diff options
863 files changed, 105124 insertions, 7583 deletions
diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs index 61bc0ade65..c3a0fdedf0 100644 --- a/.git-blame-ignore-revs +++ b/.git-blame-ignore-revs @@ -2,6 +2,7 @@ # git blame --ignore-revs-file .git-blame-ignore-revs <...> # or to make it permanent # git config blame.ignoreRevsFile .git-blame-ignore-revs +9fa6ec14737b94fdfb41539d96c7e4f84f3514b6 701a01920eee5431d2052aad92aefbdf50ac2139 bf2394f08bdc91a6cbd3784a1bfa3af3247bb06f 0157c327715ca367d13b7f02b2981f3484ccdeeb diff --git a/Makefile.am b/Makefile.am index b6e8a2bce8..90c8407010 100644 --- a/Makefile.am +++ b/Makefile.am @@ -158,6 +158,7 @@ include bfdd/subdir.am include yang/subdir.am include yang/libyang_plugins/subdir.am include vrrpd/subdir.am +include pathd/subdir.am include vtysh/subdir.am include tests/subdir.am diff --git a/babeld/babel_interface.c b/babeld/babel_interface.c index d37e0b608f..85d79bdc3b 100644 --- a/babeld/babel_interface.c +++ b/babeld/babel_interface.c @@ -693,7 +693,7 @@ interface_recalculate(struct interface *ifp) rc = resize_receive_buffer(mtu); if(rc < 0) - zlog_warn("couldn't resize receive buffer for interface %s (%d) (%d bytes).\n", + zlog_warn("couldn't resize receive buffer for interface %s (%d) (%d bytes).", ifp->name, ifp->ifindex, mtu); memset(&mreq, 0, sizeof(mreq)); diff --git a/babeld/message.c b/babeld/message.c index c9a10cb1c0..edb9806011 100644 --- a/babeld/message.c +++ b/babeld/message.c @@ -643,7 +643,7 @@ parse_packet(const unsigned char *from, struct interface *ifp, return; rtt = MAX(0, local_waiting_us - remote_waiting_us); - debugf(BABEL_DEBUG_COMMON, "RTT to %s on %s sample result: %d us.\n", + debugf(BABEL_DEBUG_COMMON, "RTT to %s on %s sample result: %d us.", format_address(from), ifp->name, rtt); old_rttcost = neighbour_rttcost(neigh); diff --git a/bfdd/bfd.c b/bfdd/bfd.c index 9667ba8708..3e45bf0e04 100644 --- a/bfdd/bfd.c +++ b/bfdd/bfd.c @@ -313,6 +313,13 @@ int bfd_session_enable(struct bfd_session *bs) } } + if (!vrf_is_backend_netns() && vrf && vrf->vrf_id != VRF_DEFAULT + && !if_lookup_by_name(vrf->name, vrf->vrf_id)) { + zlog_err("session-enable: vrf interface %s not available yet", + vrf->name); + return 0; + } + if (bs->key.ifname[0]) { if (vrf) ifp = if_lookup_by_name(bs->key.ifname, vrf->vrf_id); @@ -320,14 +327,16 @@ int bfd_session_enable(struct bfd_session *bs) ifp = if_lookup_by_name_all_vrf(bs->key.ifname); if (ifp == NULL) { zlog_err( - "session-enable: specified interface doesn't exists."); + "session-enable: specified interface %s (VRF %s) doesn't exist.", + bs->key.ifname, vrf ? vrf->name : "<all>"); return 0; } if (bs->key.ifname[0] && !vrf) { vrf = vrf_lookup_by_id(ifp->vrf_id); if (vrf == NULL) { zlog_err( - "session-enable: specified VRF doesn't exists."); + "session-enable: specified VRF %u doesn't exist.", + ifp->vrf_id); return 0; } } @@ -1663,15 +1672,54 @@ static bool bfd_id_hash_cmp(const void *n1, const void *n2) static unsigned int bfd_key_hash_do(const void *p) { const struct bfd_session *bs = p; + struct bfd_key key = bs->key; - return jhash(&bs->key, sizeof(bs->key), 0); + /* + * Local address and interface name are optional and + * can be filled any time after session creation. + * Hash key should not depend on these fields. + */ + memset(&key.local, 0, sizeof(key.local)); + memset(key.ifname, 0, sizeof(key.ifname)); + + return jhash(&key, sizeof(key), 0); } static bool bfd_key_hash_cmp(const void *n1, const void *n2) { const struct bfd_session *bs1 = n1, *bs2 = n2; - return memcmp(&bs1->key, &bs2->key, sizeof(bs1->key)) == 0; + if (bs1->key.family != bs2->key.family) + return false; + if (bs1->key.mhop != bs2->key.mhop) + return false; + if (memcmp(&bs1->key.peer, &bs2->key.peer, sizeof(bs1->key.peer))) + return false; + if (memcmp(bs1->key.vrfname, bs2->key.vrfname, + sizeof(bs1->key.vrfname))) + return false; + + /* + * Local address is optional and can be empty. + * If both addresses are not empty and different, + * then the keys are different. + */ + if (memcmp(&bs1->key.local, &zero_addr, sizeof(bs1->key.local)) + && memcmp(&bs2->key.local, &zero_addr, sizeof(bs2->key.local)) + && memcmp(&bs1->key.local, &bs2->key.local, sizeof(bs1->key.local))) + return false; + + /* + * Interface name is optional and can be empty. + * If both names are not empty and different, + * then the keys are different. + */ + if (bs1->key.ifname[0] && bs2->key.ifname[0] + && memcmp(bs1->key.ifname, bs2->key.ifname, + sizeof(bs1->key.ifname))) + return false; + + return true; } @@ -1689,117 +1737,13 @@ struct bfd_session *bfd_id_lookup(uint32_t id) return hash_lookup(bfd_id_hash, &bs); } -struct bfd_key_walk_partial_lookup { - struct bfd_session *given; - struct bfd_session *result; -}; - -/* ignore some parameters */ -static int bfd_key_lookup_ignore_partial_walker(struct hash_bucket *b, - void *data) -{ - struct bfd_key_walk_partial_lookup *ctx = - (struct bfd_key_walk_partial_lookup *)data; - struct bfd_session *given = ctx->given; - struct bfd_session *parsed = b->data; - - if (given->key.family != parsed->key.family) - return HASHWALK_CONTINUE; - if (given->key.mhop != parsed->key.mhop) - return HASHWALK_CONTINUE; - if (memcmp(&given->key.peer, &parsed->key.peer, - sizeof(struct in6_addr))) - return HASHWALK_CONTINUE; - if (memcmp(given->key.vrfname, parsed->key.vrfname, MAXNAMELEN)) - return HASHWALK_CONTINUE; - ctx->result = parsed; - /* ignore localaddr or interface */ - return HASHWALK_ABORT; -} - struct bfd_session *bfd_key_lookup(struct bfd_key key) { - struct bfd_session bs, *bsp; - struct bfd_key_walk_partial_lookup ctx; - char peer_buf[INET6_ADDRSTRLEN]; - - bs.key = key; - bsp = hash_lookup(bfd_key_hash, &bs); - if (bsp) - return bsp; - - inet_ntop(bs.key.family, &bs.key.peer, peer_buf, - sizeof(peer_buf)); - /* Handle cases where local-address is optional. */ - if (memcmp(&bs.key.local, &zero_addr, sizeof(bs.key.local))) { - memset(&bs.key.local, 0, sizeof(bs.key.local)); - bsp = hash_lookup(bfd_key_hash, &bs); - if (bsp) { - if (bglobal.debug_peer_event) { - char addr_buf[INET6_ADDRSTRLEN]; - inet_ntop(bs.key.family, &key.local, addr_buf, - sizeof(addr_buf)); - zlog_debug( - " peer %s found, but loc-addr %s ignored", - peer_buf, addr_buf); - } - return bsp; - } - } - - bs.key = key; - /* Handle cases where ifname is optional. */ - if (bs.key.ifname[0]) { - memset(bs.key.ifname, 0, sizeof(bs.key.ifname)); - bsp = hash_lookup(bfd_key_hash, &bs); - if (bsp) { - if (bglobal.debug_peer_event) - zlog_debug(" peer %s found, but ifp %s ignored", - peer_buf, key.ifname); - return bsp; - } - } + struct bfd_session bs; - /* Handle cases where local-address and ifname are optional. */ - if (bs.key.family == AF_INET) { - memset(&bs.key.local, 0, sizeof(bs.key.local)); - bsp = hash_lookup(bfd_key_hash, &bs); - if (bsp) { - if (bglobal.debug_peer_event) { - char addr_buf[INET6_ADDRSTRLEN]; - inet_ntop(bs.key.family, &bs.key.local, - addr_buf, sizeof(addr_buf)); - zlog_debug( - " peer %s found, but ifp %s and loc-addr %s ignored", - peer_buf, key.ifname, addr_buf); - } - return bsp; - } - } bs.key = key; - /* Handle case where a context more complex ctx is present. - * input has no iface nor local-address, but a context may - * exist. - * - * Only applies to IPv4, because IPv6 requires either - * local-address or interface. - */ - if (!bs.key.mhop && bs.key.family == AF_INET) { - ctx.result = NULL; - ctx.given = &bs; - hash_walk(bfd_key_hash, &bfd_key_lookup_ignore_partial_walker, - &ctx); - /* change key */ - if (ctx.result) { - bsp = ctx.result; - if (bglobal.debug_peer_event) - zlog_debug( - " peer %s found, but ifp and/or loc-addr params ignored", - peer_buf); - } - } - return bsp; + return hash_lookup(bfd_key_hash, &bs); } /* @@ -1823,16 +1767,11 @@ struct bfd_session *bfd_id_delete(uint32_t id) struct bfd_session *bfd_key_delete(struct bfd_key key) { - struct bfd_session bs, *bsp; + struct bfd_session bs; bs.key = key; - bsp = hash_lookup(bfd_key_hash, &bs); - if (bsp == NULL && key.ifname[0]) { - memset(bs.key.ifname, 0, sizeof(bs.key.ifname)); - bsp = hash_lookup(bfd_key_hash, &bs); - } - return hash_release(bfd_key_hash, bsp); + return hash_release(bfd_key_hash, &bs); } /* Iteration functions. */ diff --git a/bfdd/bfd_packet.c b/bfdd/bfd_packet.c index 0a71c18a42..12bb52cf67 100644 --- a/bfdd/bfd_packet.c +++ b/bfdd/bfd_packet.c @@ -165,7 +165,7 @@ void ptm_bfd_echo_snd(struct bfd_session *bfd) salen = sizeof(sin6); } else { sd = bvrf->bg_echo; - memset(&sin6, 0, sizeof(sin6)); + memset(&sin, 0, sizeof(sin)); sin.sin_family = AF_INET; memcpy(&sin.sin_addr, &bfd->key.peer, sizeof(sin.sin_addr)); sin.sin_port = htons(BFD_DEF_ECHO_PORT); @@ -543,6 +543,7 @@ int bfd_recv_cb(struct thread *t) ifindex_t ifindex = IFINDEX_INTERNAL; struct sockaddr_any local, peer; uint8_t msgbuf[1516]; + struct interface *ifp = NULL; struct bfd_vrf_global *bvrf = THREAD_ARG(t); vrfid = bvrf->vrf->vrf_id; @@ -572,6 +573,15 @@ int bfd_recv_cb(struct thread *t) &local, &peer); } + /* update vrf-id because when in vrf-lite mode, + * the socket is on default namespace + */ + if (ifindex) { + ifp = if_lookup_by_index(ifindex, vrfid); + if (ifp) + vrfid = ifp->vrf_id; + } + /* Implement RFC 5880 6.8.6 */ if (mlen < BFD_PKT_LEN) { cp_debug(is_mhop, &peer, &local, ifindex, vrfid, @@ -951,8 +961,9 @@ int bp_peer_socket(const struct bfd_session *bs) if (bs->key.ifname[0]) device_to_bind = (const char *)bs->key.ifname; - else if (CHECK_FLAG(bs->flags, BFD_SESS_FLAG_MH) - && bs->key.vrfname[0]) + else if ((!vrf_is_backend_netns() && bs->vrf->vrf_id != VRF_DEFAULT) + || ((CHECK_FLAG(bs->flags, BFD_SESS_FLAG_MH) + && bs->key.vrfname[0]))) device_to_bind = (const char *)bs->key.vrfname; frr_with_privs(&bglobal.bfdd_privs) { @@ -1018,8 +1029,9 @@ int bp_peer_socketv6(const struct bfd_session *bs) if (bs->key.ifname[0]) device_to_bind = (const char *)bs->key.ifname; - else if (CHECK_FLAG(bs->flags, BFD_SESS_FLAG_MH) - && bs->key.vrfname[0]) + else if ((!vrf_is_backend_netns() && bs->vrf->vrf_id != VRF_DEFAULT) + || ((CHECK_FLAG(bs->flags, BFD_SESS_FLAG_MH) + && bs->key.vrfname[0]))) device_to_bind = (const char *)bs->key.vrfname; frr_with_privs(&bglobal.bfdd_privs) { diff --git a/bfdd/bfdd.c b/bfdd/bfdd.c index b8a059708f..0cac990108 100644 --- a/bfdd/bfdd.c +++ b/bfdd/bfdd.c @@ -371,10 +371,6 @@ int main(int argc, char *argv[]) snprintf(ctl_path, sizeof(ctl_path), BFDD_CONTROL_SOCKET, "/", bfdd_di.pathspace); -#if 0 /* TODO add support for JSON configuration files. */ - parse_config(conf); -#endif - /* Initialize FRR infrastructure. */ master = frr_init(); diff --git a/bfdd/ptm_adapter.c b/bfdd/ptm_adapter.c index 44519c47b5..0c70600f20 100644 --- a/bfdd/ptm_adapter.c +++ b/bfdd/ptm_adapter.c @@ -669,17 +669,24 @@ static void bfdd_sessions_enable_interface(struct interface *ifp) struct bfd_session *bs; struct vrf *vrf; + vrf = vrf_lookup_by_id(ifp->vrf_id); + if (!vrf) + return; + TAILQ_FOREACH(bso, &bglobal.bg_obslist, bso_entry) { bs = bso->bso_bs; - /* Interface name mismatch. */ - if (strcmp(ifp->name, bs->key.ifname)) - continue; - vrf = vrf_lookup_by_id(ifp->vrf_id); - if (!vrf) - continue; + /* check vrf name */ if (bs->key.vrfname[0] && strcmp(vrf->name, bs->key.vrfname)) continue; + + /* If Interface matches vrfname, then bypass iface check */ + if (vrf_is_backend_netns() || strcmp(ifp->name, vrf->name)) { + /* Interface name mismatch. */ + if (strcmp(ifp->name, bs->key.ifname)) + continue; + } + /* Skip enabled sessions. */ if (bs->sock != -1) continue; @@ -696,11 +703,15 @@ static void bfdd_sessions_disable_interface(struct interface *ifp) TAILQ_FOREACH(bso, &bglobal.bg_obslist, bso_entry) { bs = bso->bso_bs; - if (strcmp(ifp->name, bs->key.ifname)) + + if (bs->ifp != ifp) continue; + /* Skip disabled sessions. */ - if (bs->sock == -1) + if (bs->sock == -1) { + bs->ifp = NULL; continue; + } bfd_session_disable(bs); bs->ifp = NULL; @@ -759,7 +770,8 @@ void bfdd_sessions_disable_vrf(struct vrf *vrf) static int bfd_ifp_destroy(struct interface *ifp) { if (bglobal.debug_zebra) - zlog_debug("zclient: delete interface %s", ifp->name); + zlog_debug("zclient: delete interface %s (VRF %u)", ifp->name, + ifp->vrf_id); bfdd_sessions_disable_interface(ifp); @@ -812,10 +824,10 @@ static int bfdd_interface_address_update(ZAPI_CALLBACK_ARGS) return 0; if (bglobal.debug_zebra) - zlog_debug("zclient: %s local address %pFX", + zlog_debug("zclient: %s local address %pFX (VRF %u)", cmd == ZEBRA_INTERFACE_ADDRESS_ADD ? "add" : "delete", - ifc->address); + ifc->address, vrf_id); if (cmd == ZEBRA_INTERFACE_ADDRESS_ADD) bfdd_sessions_enable_address(ifc); @@ -828,8 +840,8 @@ static int bfdd_interface_address_update(ZAPI_CALLBACK_ARGS) static int bfd_ifp_create(struct interface *ifp) { if (bglobal.debug_zebra) - zlog_debug("zclient: add interface %s", ifp->name); - + zlog_debug("zclient: add interface %s (VRF %u)", ifp->name, + ifp->vrf_id); bfdd_sessions_enable_interface(ifp); return 0; diff --git a/bgpd/bgp_advertise.h b/bgpd/bgp_advertise.h index f2cf78efde..745a0dffce 100644 --- a/bgpd/bgp_advertise.h +++ b/bgpd/bgp_advertise.h @@ -83,6 +83,9 @@ struct bgp_adj_out { /* Advertisement information. */ struct bgp_advertise *adv; + + /* Attribute hash */ + uint32_t attr_hash; }; RB_HEAD(bgp_adj_out_rb, bgp_adj_out); diff --git a/bgpd/bgp_aspath.h b/bgpd/bgp_aspath.h index 9df352fcd6..4b16818167 100644 --- a/bgpd/bgp_aspath.h +++ b/bgpd/bgp_aspath.h @@ -32,7 +32,7 @@ /* Private AS range defined in RFC2270. */ #define BGP_PRIVATE_AS_MIN 64512U -#define BGP_PRIVATE_AS_MAX 65535U +#define BGP_PRIVATE_AS_MAX UINT16_MAX /* Private 4 byte AS range defined in RFC6996. */ #define BGP_PRIVATE_AS4_MIN 4200000000U @@ -40,7 +40,7 @@ /* we leave BGP_AS_MAX as the 16bit AS MAX number. */ #define BGP_AS_ZERO 0 -#define BGP_AS_MAX 65535U +#define BGP_AS_MAX UINT16_MAX #define BGP_AS4_MAX 4294967295U /* Transition 16Bit AS as defined by IANA */ #define BGP_AS_TRANS 23456U diff --git a/bgpd/bgp_attr.c b/bgpd/bgp_attr.c index ce22e8404d..dc8cc81042 100644 --- a/bgpd/bgp_attr.c +++ b/bgpd/bgp_attr.c @@ -1753,14 +1753,22 @@ static int bgp_attr_aggregator(struct bgp_attr_parser_args *args) attr->aggregator_as = aggregator_as; attr->aggregator_addr.s_addr = stream_get_ipv4(peer->curr); - /* Set atomic aggregate flag. */ - attr->flag |= ATTR_FLAG_BIT(BGP_ATTR_AGGREGATOR); - /* Codification of AS 0 Processing */ - if (aggregator_as == BGP_AS_ZERO) + if (aggregator_as == BGP_AS_ZERO) { flog_err(EC_BGP_ATTR_LEN, - "AGGREGATOR AS number is 0 for aspath: %s", - aspath_print(attr->aspath)); + "%s: AGGREGATOR AS number is 0 for aspath: %s", + peer->host, aspath_print(attr->aspath)); + + if (bgp_debug_update(peer, NULL, NULL, 1)) { + char attr_str[BUFSIZ] = {0}; + + bgp_dump_attr(attr, attr_str, sizeof(attr_str)); + + zlog_debug("%s: attributes: %s", __func__, attr_str); + } + } else { + attr->flag |= ATTR_FLAG_BIT(BGP_ATTR_AGGREGATOR); + } return BGP_ATTR_PARSE_PROCEED; } @@ -1784,16 +1792,26 @@ bgp_attr_as4_aggregator(struct bgp_attr_parser_args *args, } aggregator_as = stream_getl(peer->curr); + *as4_aggregator_as = aggregator_as; as4_aggregator_addr->s_addr = stream_get_ipv4(peer->curr); - attr->flag |= ATTR_FLAG_BIT(BGP_ATTR_AS4_AGGREGATOR); - /* Codification of AS 0 Processing */ - if (aggregator_as == BGP_AS_ZERO) + if (aggregator_as == BGP_AS_ZERO) { flog_err(EC_BGP_ATTR_LEN, - "AS4_AGGREGATOR AS number is 0 for aspath: %s", - aspath_print(attr->aspath)); + "%s: AS4_AGGREGATOR AS number is 0 for aspath: %s", + peer->host, aspath_print(attr->aspath)); + + if (bgp_debug_update(peer, NULL, NULL, 1)) { + char attr_str[BUFSIZ] = {0}; + + bgp_dump_attr(attr, attr_str, sizeof(attr_str)); + + zlog_debug("%s: attributes: %s", __func__, attr_str); + } + } else { + attr->flag |= ATTR_FLAG_BIT(BGP_ATTR_AS4_AGGREGATOR); + } return BGP_ATTR_PARSE_PROCEED; } @@ -3395,7 +3413,8 @@ void bgp_attr_extcom_tunnel_type(struct attr *attr, bgp_encap_types *tunnel_type) { struct ecommunity *ecom; - int i; + uint32_t i; + if (!attr) return; @@ -3900,7 +3919,7 @@ bgp_size_t bgp_packet_attribute(struct bgp *bgp, struct peer *peer, /* Is ASN representable in 2-bytes? Or must AS_TRANS be * used? */ - if (attr->aggregator_as > 65535) { + if (attr->aggregator_as > UINT16_MAX) { stream_putw(s, BGP_AS_TRANS); /* we have to send AS4_AGGREGATOR, too. @@ -4021,7 +4040,7 @@ bgp_size_t bgp_packet_attribute(struct bgp *bgp, struct peer *peer, uint8_t *pnt; int tbit; int ecom_tr_size = 0; - int i; + uint32_t i; for (i = 0; i < attr->ecommunity->size; i++) { pnt = attr->ecommunity->val + (i * 8); diff --git a/bgpd/bgp_attr_evpn.c b/bgpd/bgp_attr_evpn.c index 7cc9ecd79e..1df646c346 100644 --- a/bgpd/bgp_attr_evpn.c +++ b/bgpd/bgp_attr_evpn.c @@ -89,7 +89,7 @@ char *ecom_mac2str(char *ecom_mac) /* Fetch router-mac from extended community */ bool bgp_attr_rmac(struct attr *attr, struct ethaddr *rmac) { - int i = 0; + uint32_t i = 0; struct ecommunity *ecom; ecom = attr->ecommunity; @@ -122,7 +122,7 @@ bool bgp_attr_rmac(struct attr *attr, struct ethaddr *rmac) uint8_t bgp_attr_default_gw(struct attr *attr) { struct ecommunity *ecom; - int i; + uint32_t i; ecom = attr->ecommunity; if (!ecom || !ecom->size) @@ -153,7 +153,7 @@ uint8_t bgp_attr_default_gw(struct attr *attr) uint16_t bgp_attr_df_pref_from_ec(struct attr *attr, uint8_t *alg) { struct ecommunity *ecom; - int i; + uint32_t i; uint16_t df_pref = 0; *alg = EVPN_MH_DF_ALG_SERVICE_CARVING; @@ -190,7 +190,7 @@ uint16_t bgp_attr_df_pref_from_ec(struct attr *attr, uint8_t *alg) uint32_t bgp_attr_mac_mobility_seqnum(struct attr *attr, uint8_t *sticky) { struct ecommunity *ecom; - int i; + uint32_t i; uint8_t flags = 0; ecom = attr->ecommunity; @@ -237,7 +237,7 @@ void bgp_attr_evpn_na_flag(struct attr *attr, uint8_t *router_flag, bool *proxy) { struct ecommunity *ecom; - int i; + uint32_t i; uint8_t val; ecom = attr->ecommunity; diff --git a/bgpd/bgp_bmp.c b/bgpd/bgp_bmp.c index 82e27884cf..c93713668f 100644 --- a/bgpd/bgp_bmp.c +++ b/bgpd/bgp_bmp.c @@ -434,10 +434,19 @@ static struct stream *bmp_peerstate(struct peer *peer, bool down) case PEER_DOWN_CLOSE_SESSION: type = BMP_PEERDOWN_REMOTE_CLOSE; break; + case PEER_DOWN_WAITING_NHT: + type = BMP_PEERDOWN_LOCAL_FSM; + stream_putw(s, BGP_FSM_TcpConnectionFails); + break; + /* + * TODO: Map remaining PEER_DOWN_* reasons to RFC event codes. + * TODO: Implement BMP_PEERDOWN_LOCAL_NOTIFY. + * + * See RFC7854 ss. 4.9 + */ default: - type = BMP_PEERDOWN_LOCAL_NOTIFY; - stream_put(s, peer->last_reset_cause, - peer->last_reset_cause_size); + type = BMP_PEERDOWN_LOCAL_FSM; + stream_putw(s, BMP_PEER_DOWN_NO_RELEVANT_EVENT_CODE); break; } stream_putc_at(s, type_pos, type); diff --git a/bgpd/bgp_bmp.h b/bgpd/bgp_bmp.h index d6b22d0cbc..2c3ba570ee 100644 --- a/bgpd/bgp_bmp.h +++ b/bgpd/bgp_bmp.h @@ -269,6 +269,8 @@ struct bmp_bgp_peer { /* per struct bgp * data */ PREDECL_HASH(bmp_bgph) +#define BMP_PEER_DOWN_NO_RELEVANT_EVENT_CODE 0x00 + struct bmp_bgp { struct bmp_bgph_item bbi; diff --git a/bgpd/bgp_clist.c b/bgpd/bgp_clist.c index 247d758f8c..e17cce3ff6 100644 --- a/bgpd/bgp_clist.c +++ b/bgpd/bgp_clist.c @@ -654,86 +654,6 @@ static bool ecommunity_regexp_match(struct ecommunity *ecom, regex_t *reg) return false; } -#if 0 -/* Delete community attribute using regular expression match. Return - modified communites attribute. */ -static struct community * -community_regexp_delete (struct community *com, regex_t * reg) -{ - int i; - uint32_t comval; - /* Maximum is "65535:65535" + '\0'. */ - char c[12]; - const char *str; - - if (!com) - return NULL; - - i = 0; - while (i < com->size) - { - memcpy (&comval, com_nthval (com, i), sizeof(uint32_t)); - comval = ntohl (comval); - - switch (comval) { - case COMMUNITY_INTERNET: - str = "internet"; - break; - case COMMUNITY_ACCEPT_OWN: - str = "accept-own"; - break; - case COMMUNITY_ROUTE_FILTER_TRANSLATED_v4: - str = "route-filter-translated-v4"; - break; - case COMMUNITY_ROUTE_FILTER_v4: - str = "route-filter-v4"; - break; - case COMMUNITY_ROUTE_FILTER_TRANSLATED_v6: - str = "route-filter-translated-v6"; - break; - case COMMUNITY_ROUTE_FILTER_v6: - str = "route-filter-v6"; - break; - case COMMUNITY_LLGR_STALE: - str = "llgr-stale"; - break; - case COMMUNITY_NO_LLGR: - str = "no-llgr"; - break; - case COMMUNITY_ACCEPT_OWN_NEXTHOP: - str = "accept-own-nexthop"; - break; - case COMMUNITY_BLACKHOLE: - str = "blackhole"; - break; - case COMMUNITY_NO_EXPORT: - str = "no-export"; - break; - case COMMUNITY_NO_ADVERTISE: - str = "no-advertise"; - break; - case COMMUNITY_LOCAL_AS: - str = "local-AS"; - break; - case COMMUNITY_NO_PEER: - str = "no-peer"; - break; - default: - sprintf (c, "%d:%d", (comval >> 16) & 0xFFFF, - comval & 0xFFFF); - str = c; - break; - } - - if (regexec (reg, str, 0, NULL, 0) == 0) - community_del_val (com, com_nthval (com, i)); - else - i++; - } - return com; -} -#endif - /* When given community attribute matches to the community-list return 1 else return 0. */ bool community_list_match(struct community *com, struct community_list *list) @@ -1110,11 +1030,14 @@ struct lcommunity *lcommunity_list_match_delete(struct lcommunity *lcom, } /* Helper to check if every octet do not exceed UINT_MAX */ -static bool lcommunity_list_valid(const char *community) +bool lcommunity_list_valid(const char *community, int style) { int octets; char **splits, **communities; + char *endptr; int num, num_communities; + regex_t *regres; + int invalid = 0; frrstr_split(community, " ", &communities, &num_communities); @@ -1123,25 +1046,43 @@ static bool lcommunity_list_valid(const char *community) frrstr_split(communities[j], ":", &splits, &num); for (int i = 0; i < num; i++) { - if (strtoul(splits[i], NULL, 10) > UINT_MAX) - return false; - if (strlen(splits[i]) == 0) - return false; + /* There is no digit to check */ + invalid++; + + if (style == LARGE_COMMUNITY_LIST_STANDARD) { + if (*splits[i] == '-') + /* Must not be negative */ + invalid++; + else if (strtoul(splits[i], &endptr, 10) + > UINT_MAX) + /* Larger than 4 octets */ + invalid++; + else if (*endptr) + /* Not all characters were digits */ + invalid++; + } else { + regres = bgp_regcomp(communities[j]); + if (!regres) + /* malformed regex */ + invalid++; + else + bgp_regex_free(regres); + } octets++; XFREE(MTYPE_TMP, splits[i]); } XFREE(MTYPE_TMP, splits); - if (octets < 3) - return false; + if (octets != 3) + invalid++; XFREE(MTYPE_TMP, communities[j]); } XFREE(MTYPE_TMP, communities); - return true; + return (invalid > 0) ? false : true; } /* Set lcommunity-list. */ @@ -1176,7 +1117,7 @@ int lcommunity_list_set(struct community_list_handler *ch, const char *name, } if (str) { - if (!lcommunity_list_valid(str)) + if (!lcommunity_list_valid(str, style)) return COMMUNITY_LIST_ERR_MALFORMED_VAL; if (style == LARGE_COMMUNITY_LIST_STANDARD) diff --git a/bgpd/bgp_clist.h b/bgpd/bgp_clist.h index f7d46525a0..632e1730cc 100644 --- a/bgpd/bgp_clist.h +++ b/bgpd/bgp_clist.h @@ -154,6 +154,7 @@ extern int extcommunity_list_unset(struct community_list_handler *ch, extern int lcommunity_list_set(struct community_list_handler *ch, const char *name, const char *str, const char *seq, int direct, int style); +extern bool lcommunity_list_valid(const char *community, int style); extern int lcommunity_list_unset(struct community_list_handler *ch, const char *name, const char *str, const char *seq, int direct, int style); diff --git a/bgpd/bgp_community.c b/bgpd/bgp_community.c index b6cc2b839f..43138b82f6 100644 --- a/bgpd/bgp_community.c +++ b/bgpd/bgp_community.c @@ -24,6 +24,7 @@ #include "hash.h" #include "memory.h" #include "jhash.h" +#include "frrstr.h" #include "bgpd/bgp_memory.h" #include "bgpd/bgp_community.h" @@ -55,7 +56,7 @@ void community_free(struct community **com) } /* Add one community value to the community. */ -static void community_add_val(struct community *com, uint32_t val) +void community_add_val(struct community *com, uint32_t val) { com->size++; if (com->val) @@ -648,6 +649,31 @@ enum community_token { community_token_unknown }; +/* Helper to check if a given community is valid */ +static bool community_valid(const char *community) +{ + int octets = 0; + char **splits; + int num; + int invalid = 0; + + frrstr_split(community, ":", &splits, &num); + + for (int i = 0; i < num; i++) { + if (strtoul(splits[i], NULL, 10) > UINT16_MAX) + invalid++; + + if (strlen(splits[i]) == 0) + invalid++; + + octets++; + XFREE(MTYPE_TMP, splits[i]); + } + XFREE(MTYPE_TMP, splits); + + return (octets < 2 || invalid) ? false : true; +} + /* Get next community token from string. */ static const char * community_gettoken(const char *buf, enum community_token *token, uint32_t *val) @@ -780,6 +806,11 @@ community_gettoken(const char *buf, enum community_token *token, uint32_t *val) uint32_t community_low = 0; uint32_t community_high = 0; + if (!community_valid(p)) { + *token = community_token_unknown; + return NULL; + } + while (isdigit((unsigned char)*p) || *p == ':') { if (*p == ':') { if (separator) { @@ -810,11 +841,6 @@ community_gettoken(const char *buf, enum community_token *token, uint32_t *val) return NULL; } - if (community_low > UINT16_MAX) { - *token = community_token_unknown; - return NULL; - } - *val = community_high + community_low; *token = community_token_val; return p; diff --git a/bgpd/bgp_community.h b/bgpd/bgp_community.h index b99f38ab64..2a1fbf526a 100644 --- a/bgpd/bgp_community.h +++ b/bgpd/bgp_community.h @@ -88,6 +88,7 @@ extern struct community *community_delete(struct community *, struct community *); extern struct community *community_dup(struct community *); extern bool community_include(struct community *, uint32_t); +extern void community_add_val(struct community *com, uint32_t val); extern void community_del_val(struct community *, uint32_t *); extern unsigned long community_count(void); extern struct hash *community_hash(void); diff --git a/bgpd/bgp_conditional_adv.c b/bgpd/bgp_conditional_adv.c index b5cd1b52b7..b9ea26e862 100644 --- a/bgpd/bgp_conditional_adv.c +++ b/bgpd/bgp_conditional_adv.c @@ -19,8 +19,7 @@ */ #include "bgpd/bgp_conditional_adv.h" - -const char *get_afi_safi_str(afi_t afi, safi_t safi, bool for_json); +#include "bgpd/bgp_vty.h" static route_map_result_t bgp_check_rmap_prefixes_in_bgp_table(struct bgp_table *table, @@ -198,10 +197,6 @@ static int bgp_conditional_adv_timer(struct thread *t) continue; FOREACH_AFI_SAFI (afi, safi) { - if (strmatch(get_afi_safi_str(afi, safi, true), - "Unknown")) - continue; - if (!peer->afc_nego[afi][safi]) continue; diff --git a/bgpd/bgp_damp.c b/bgpd/bgp_damp.c index 94a27ead0e..b740979b82 100644 --- a/bgpd/bgp_damp.c +++ b/bgpd/bgp_damp.c @@ -35,22 +35,117 @@ #include "bgpd/bgp_route.h" #include "bgpd/bgp_attr.h" #include "bgpd/bgp_advertise.h" +#include "bgpd/bgp_vty.h" -const char *get_afi_safi_str(afi_t afi, safi_t safi, bool for_json); +static void bgp_reuselist_add(struct reuselist *list, + struct bgp_damp_info *info) +{ + struct reuselist_node *new_node; + + assert(info); + new_node = XCALLOC(MTYPE_BGP_DAMP_REUSELIST, sizeof(*new_node)); + new_node->info = info; + SLIST_INSERT_HEAD(list, new_node, entry); +} + +static void bgp_reuselist_del(struct reuselist *list, + struct reuselist_node **node) +{ + if ((*node) == NULL) + return; + assert(list && node && *node); + SLIST_REMOVE(list, (*node), reuselist_node, entry); + XFREE(MTYPE_BGP_DAMP_REUSELIST, (*node)); + *node = NULL; +} + +static void bgp_reuselist_switch(struct reuselist *source, + struct reuselist_node *node, + struct reuselist *target) +{ + assert(source && target && node); + SLIST_REMOVE(source, node, reuselist_node, entry); + SLIST_INSERT_HEAD(target, node, entry); +} + +static void bgp_reuselist_free(struct reuselist *list) +{ + struct reuselist_node *rn; + + assert(list); + while ((rn = SLIST_FIRST(list)) != NULL) + bgp_reuselist_del(list, &rn); +} + +static struct reuselist_node *bgp_reuselist_find(struct reuselist *list, + struct bgp_damp_info *info) +{ + struct reuselist_node *rn; + + assert(list && info); + SLIST_FOREACH (rn, list, entry) { + if (rn->info == info) + return rn; + } + return NULL; +} + +static void bgp_damp_info_unclaim(struct bgp_damp_info *bdi) +{ + struct reuselist_node *node; -/* Global variable to access damping configuration */ -static struct bgp_damp_config damp[AFI_MAX][SAFI_MAX]; + assert(bdi && bdi->config); + if (bdi->index == BGP_DAMP_NO_REUSE_LIST_INDEX) { + node = bgp_reuselist_find(&bdi->config->no_reuse_list, bdi); + if (node) + bgp_reuselist_del(&bdi->config->no_reuse_list, &node); + } else { + node = bgp_reuselist_find(&bdi->config->reuse_list[bdi->index], + bdi); + if (node) + bgp_reuselist_del(&bdi->config->reuse_list[bdi->index], + &node); + } + bdi->config = NULL; +} + +static void bgp_damp_info_claim(struct bgp_damp_info *bdi, + struct bgp_damp_config *bdc) +{ + assert(bdc && bdi); + if (bdi->config == NULL) { + bdi->config = bdc; + return; + } + bgp_damp_info_unclaim(bdi); + bdi->config = bdc; + bdi->afi = bdc->afi; + bdi->safi = bdc->safi; +} -/* Utility macro to add and delete BGP dampening information to no - used list. */ -#define BGP_DAMP_LIST_ADD(N, A) BGP_PATH_INFO_ADD(N, A, no_reuse_list) -#define BGP_DAMP_LIST_DEL(N, A) BGP_PATH_INFO_DEL(N, A, no_reuse_list) +struct bgp_damp_config *get_active_bdc_from_pi(struct bgp_path_info *pi, + afi_t afi, safi_t safi) +{ + if (!pi) + return NULL; + if (CHECK_FLAG(pi->peer->af_flags[afi][safi], + PEER_FLAG_CONFIG_DAMPENING)) + return &pi->peer->damp[afi][safi]; + if (peer_group_active(pi->peer)) + if (CHECK_FLAG(pi->peer->group->conf->af_flags[afi][safi], + PEER_FLAG_CONFIG_DAMPENING)) + return &pi->peer->group->conf->damp[afi][safi]; + if (CHECK_FLAG(pi->peer->bgp->af_flags[afi][safi], + BGP_CONFIG_DAMPENING)) + return &pi->peer->bgp->damp[afi][safi]; + return NULL; +} /* Calculate reuse list index by penalty value. */ static int bgp_reuse_index(int penalty, struct bgp_damp_config *bdc) { unsigned int i; - int index; + unsigned int index; /* * reuse_limit can't be zero, this is for Coverity @@ -73,27 +168,45 @@ static int bgp_reuse_index(int penalty, struct bgp_damp_config *bdc) static void bgp_reuse_list_add(struct bgp_damp_info *bdi, struct bgp_damp_config *bdc) { - int index; - - index = bdi->index = bgp_reuse_index(bdi->penalty, bdc); - - bdi->prev = NULL; - bdi->next = bdc->reuse_list[index]; - if (bdc->reuse_list[index]) - bdc->reuse_list[index]->prev = bdi; - bdc->reuse_list[index] = bdi; + bgp_damp_info_claim(bdi, bdc); + bdi->index = bgp_reuse_index(bdi->penalty, bdc); + bgp_reuselist_add(&bdc->reuse_list[bdi->index], bdi); } /* Delete BGP dampening information from reuse list. */ static void bgp_reuse_list_delete(struct bgp_damp_info *bdi, struct bgp_damp_config *bdc) { - if (bdi->next) - bdi->next->prev = bdi->prev; - if (bdi->prev) - bdi->prev->next = bdi->next; - else - bdc->reuse_list[bdi->index] = bdi->next; + struct reuselist *list; + struct reuselist_node *rn; + + list = &bdc->reuse_list[bdi->index]; + rn = bgp_reuselist_find(list, bdi); + bgp_damp_info_unclaim(bdi); + bgp_reuselist_del(list, &rn); +} + +static void bgp_no_reuse_list_add(struct bgp_damp_info *bdi, + struct bgp_damp_config *bdc) +{ + bgp_damp_info_claim(bdi, bdc); + bdi->index = BGP_DAMP_NO_REUSE_LIST_INDEX; + bgp_reuselist_add(&bdc->no_reuse_list, bdi); +} + +static void bgp_no_reuse_list_delete(struct bgp_damp_info *bdi, + struct bgp_damp_config *bdc) +{ + struct reuselist_node *rn; + + assert(bdc && bdi); + if (bdi->config == NULL) { + bgp_damp_info_unclaim(bdi); + return; + } + bdi->config = NULL; + rn = bgp_reuselist_find(&bdc->no_reuse_list, bdi); + bgp_reuselist_del(&bdc->no_reuse_list, &rn); } /* Return decayed penalty value. */ @@ -116,32 +229,34 @@ int bgp_damp_decay(time_t tdiff, int penalty, struct bgp_damp_config *bdc) is evaluated. RFC2439 Section 4.8.7. */ static int bgp_reuse_timer(struct thread *t) { + struct bgp_damp_config *bdc = THREAD_ARG(t); struct bgp_damp_info *bdi; - struct bgp_damp_info *next; + struct reuselist plist; + struct reuselist_node *node; + struct bgp *bgp; time_t t_now, t_diff; - struct bgp_damp_config *bdc = THREAD_ARG(t); - - bdc->t_reuse = NULL; thread_add_timer(bm->master, bgp_reuse_timer, bdc, DELTA_REUSE, &bdc->t_reuse); t_now = bgp_clock(); - /* 1. save a pointer to the current zeroth queue head and zero the - list head entry. */ - bdi = bdc->reuse_list[bdc->reuse_offset]; - bdc->reuse_list[bdc->reuse_offset] = NULL; + /* 1. save a pointer to the current queue head and zero the list head + * list head entry. */ + assert(bdc->reuse_offset < bdc->reuse_list_size); + plist = bdc->reuse_list[bdc->reuse_offset]; + node = SLIST_FIRST(&plist); + SLIST_INIT(&bdc->reuse_list[bdc->reuse_offset]); /* 2. set offset = modulo reuse-list-size ( offset + 1 ), thereby rotating the circular queue of list-heads. */ bdc->reuse_offset = (bdc->reuse_offset + 1) % bdc->reuse_list_size; + assert(bdc->reuse_offset < bdc->reuse_list_size); /* 3. if ( the saved list head pointer is non-empty ) */ - for (; bdi; bdi = next) { - struct bgp *bgp = bdi->path->peer->bgp; - - next = bdi->next; + while ((node = SLIST_FIRST(&plist)) != NULL) { + bdi = node->info; + bgp = bdi->path->peer->bgp; /* Set t-diff = t-now - t-updated. */ t_diff = t_now - bdi->t_updated; @@ -170,16 +285,27 @@ static int bgp_reuse_timer(struct thread *t) bdi->safi); } - if (bdi->penalty <= bdc->reuse_limit / 2.0) - bgp_damp_info_free(bdi, 1, bdc->afi, bdc->safi); - else - BGP_DAMP_LIST_ADD(bdc, bdi); - } else + if (bdi->penalty <= bdc->reuse_limit / 2.0) { + bgp_damp_info_free(&bdi, bdc, 1, bdi->afi, + bdi->safi); + bgp_reuselist_del(&plist, &node); + } else { + node->info->index = + BGP_DAMP_NO_REUSE_LIST_INDEX; + bgp_reuselist_switch(&plist, node, + &bdc->no_reuse_list); + } + } else { /* Re-insert into another list (See RFC2439 Section * 4.8.6). */ - bgp_reuse_list_add(bdi, bdc); + bdi->index = bgp_reuse_index(bdi->penalty, bdc); + bgp_reuselist_switch(&plist, node, + &bdc->reuse_list[bdi->index]); + } } + assert(SLIST_EMPTY(&plist)); + return 0; } @@ -190,10 +316,13 @@ int bgp_damp_withdraw(struct bgp_path_info *path, struct bgp_dest *dest, time_t t_now; struct bgp_damp_info *bdi = NULL; unsigned int last_penalty = 0; - struct bgp_damp_config *bdc = &damp[afi][safi]; + struct bgp_damp_config *bdc; - t_now = bgp_clock(); + bdc = get_active_bdc_from_pi(path, afi, safi); + if (!bdc) + return BGP_DAMP_USED; + t_now = bgp_clock(); /* Processing Unreachable Messages. */ if (path->extra) bdi = path->extra->damp_info; @@ -215,12 +344,13 @@ int bgp_damp_withdraw(struct bgp_path_info *path, struct bgp_dest *dest, bdi->flap = 1; bdi->start_time = t_now; bdi->suppress_time = 0; - bdi->index = -1; + bdi->index = BGP_DAMP_NO_REUSE_LIST_INDEX; bdi->afi = afi; bdi->safi = safi; (bgp_path_info_extra_get(path))->damp_info = bdi; - BGP_DAMP_LIST_ADD(bdc, bdi); + bgp_no_reuse_list_add(bdi, bdc); } else { + bgp_damp_info_claim(bdi, bdc); last_penalty = bdi->penalty; /* 1. Set t-diff = t-now - t-updated. */ @@ -246,7 +376,7 @@ int bgp_damp_withdraw(struct bgp_path_info *path, struct bgp_dest *dest, /* Remove the route from a reuse list if it is on one. */ if (CHECK_FLAG(bdi->path->flags, BGP_PATH_DAMPED)) { /* If decay rate isn't equal to 0, reinsert brn. */ - if (bdi->penalty != last_penalty && bdi->index >= 0) { + if (bdi->penalty != last_penalty) { bgp_reuse_list_delete(bdi, bdc); bgp_reuse_list_add(bdi, bdc); } @@ -258,10 +388,9 @@ int bgp_damp_withdraw(struct bgp_path_info *path, struct bgp_dest *dest, if (bdi->penalty >= bdc->suppress_value) { bgp_path_info_set_flag(dest, path, BGP_PATH_DAMPED); bdi->suppress_time = t_now; - BGP_DAMP_LIST_DEL(bdc, bdi); + bgp_no_reuse_list_delete(bdi, bdc); bgp_reuse_list_add(bdi, bdc); } - return BGP_DAMP_USED; } @@ -271,7 +400,10 @@ int bgp_damp_update(struct bgp_path_info *path, struct bgp_dest *dest, time_t t_now; struct bgp_damp_info *bdi; int status; - struct bgp_damp_config *bdc = &damp[afi][safi]; + struct bgp_damp_config *bdc; + + bdc = get_active_bdc_from_pi(path, afi, safi); + assert(bdc); if (!path->extra || !((bdi = path->extra->damp_info))) return BGP_DAMP_USED; @@ -290,7 +422,7 @@ int bgp_damp_update(struct bgp_path_info *path, struct bgp_dest *dest, && (bdi->penalty < bdc->reuse_limit)) { bgp_path_info_unset_flag(dest, path, BGP_PATH_DAMPED); bgp_reuse_list_delete(bdi, bdc); - BGP_DAMP_LIST_ADD(bdc, bdi); + bgp_no_reuse_list_add(bdi, bdc); bdi->suppress_time = 0; status = BGP_DAMP_USED; } else @@ -298,36 +430,29 @@ int bgp_damp_update(struct bgp_path_info *path, struct bgp_dest *dest, if (bdi->penalty > bdc->reuse_limit / 2.0) bdi->t_updated = t_now; - else - bgp_damp_info_free(bdi, 0, afi, safi); + else { + bgp_damp_info_unclaim(bdi); + bgp_damp_info_free(&bdi, bdc, 0, afi, safi); + } return status; } -void bgp_damp_info_free(struct bgp_damp_info *bdi, int withdraw, afi_t afi, - safi_t safi) +void bgp_damp_info_free(struct bgp_damp_info **bdi, struct bgp_damp_config *bdc, + int withdraw, afi_t afi, safi_t safi) { - struct bgp_path_info *path; - struct bgp_damp_config *bdc = &damp[afi][safi]; + assert(bdc && bdi && *bdi); - if (!bdi) + if ((*bdi)->path == NULL) { + XFREE(MTYPE_BGP_DAMP_INFO, (*bdi)); return; + } - path = bdi->path; - path->extra->damp_info = NULL; - - if (CHECK_FLAG(path->flags, BGP_PATH_DAMPED)) - bgp_reuse_list_delete(bdi, bdc); - else - BGP_DAMP_LIST_DEL(bdc, bdi); - - bgp_path_info_unset_flag(bdi->dest, path, + (*bdi)->path->extra->damp_info = NULL; + bgp_path_info_unset_flag((*bdi)->dest, (*bdi)->path, BGP_PATH_HISTORY | BGP_PATH_DAMPED); - - if (bdi->lastrecord == BGP_RECORD_WITHDRAW && withdraw) - bgp_path_info_delete(bdi->dest, path); - - XFREE(MTYPE_BGP_DAMP_INFO, bdi); + if ((*bdi)->lastrecord == BGP_RECORD_WITHDRAW && withdraw) + bgp_path_info_delete((*bdi)->dest, (*bdi)->path); } static void bgp_damp_parameter_set(int hlife, int reuse, int sup, int maxsup, @@ -370,8 +495,7 @@ static void bgp_damp_parameter_set(int hlife, int reuse, int sup, int maxsup, bdc->reuse_list = XCALLOC(MTYPE_BGP_DAMP_ARRAY, - bdc->reuse_list_size * sizeof(struct bgp_reuse_node *)); - + bdc->reuse_list_size * sizeof(struct reuselist)); /* Reuse-array computations */ bdc->reuse_index = XCALLOC(MTYPE_BGP_DAMP_ARRAY, sizeof(int) * bdc->reuse_index_size); @@ -398,7 +522,7 @@ static void bgp_damp_parameter_set(int hlife, int reuse, int sup, int maxsup, int bgp_damp_enable(struct bgp *bgp, afi_t afi, safi_t safi, time_t half, unsigned int reuse, unsigned int suppress, time_t max) { - struct bgp_damp_config *bdc = &damp[afi][safi]; + struct bgp_damp_config *bdc = &bgp->damp[afi][safi]; if (CHECK_FLAG(bgp->af_flags[afi][safi], BGP_CONFIG_DAMPENING)) { if (bdc->half_life == half && bdc->reuse_limit == reuse @@ -410,6 +534,8 @@ int bgp_damp_enable(struct bgp *bgp, afi_t afi, safi_t safi, time_t half, SET_FLAG(bgp->af_flags[afi][safi], BGP_CONFIG_DAMPENING); bgp_damp_parameter_set(half, reuse, suppress, max, bdc); + bdc->afi = afi; + bdc->safi = safi; /* Register reuse timer. */ thread_add_timer(bm->master, bgp_reuse_timer, bdc, DELTA_REUSE, @@ -418,8 +544,30 @@ int bgp_damp_enable(struct bgp *bgp, afi_t afi, safi_t safi, time_t half, return 0; } -static void bgp_damp_config_clean(struct bgp_damp_config *bdc) +/* Clean all the bgp_damp_info stored in reuse_list and no_reuse_list. */ +void bgp_damp_info_clean(struct bgp_damp_config *bdc, afi_t afi, safi_t safi) { + struct bgp_damp_info *bdi; + struct reuselist_node *rn; + struct reuselist *list; + unsigned int i; + + bdc->reuse_offset = 0; + for (i = 0; i < bdc->reuse_list_size; ++i) { + list = &bdc->reuse_list[i]; + while ((rn = SLIST_FIRST(list)) != NULL) { + bdi = rn->info; + bgp_reuselist_del(list, &rn); + bgp_damp_info_free(&bdi, bdc, 1, afi, safi); + } + } + + while ((rn = SLIST_FIRST(&bdc->no_reuse_list)) != NULL) { + bdi = rn->info; + bgp_reuselist_del(&bdc->no_reuse_list, &rn); + bgp_damp_info_free(&bdi, bdc, 1, afi, safi); + } + /* Free decay array */ XFREE(MTYPE_BGP_DAMP_ARRAY, bdc->decay_array); bdc->decay_array_size = 0; @@ -429,96 +577,81 @@ static void bgp_damp_config_clean(struct bgp_damp_config *bdc) bdc->reuse_index_size = 0; /* Free reuse list array. */ + for (i = 0; i < bdc->reuse_list_size; ++i) + bgp_reuselist_free(&bdc->reuse_list[i]); + XFREE(MTYPE_BGP_DAMP_ARRAY, bdc->reuse_list); bdc->reuse_list_size = 0; -} - -/* Clean all the bgp_damp_info stored in reuse_list. */ -void bgp_damp_info_clean(afi_t afi, safi_t safi) -{ - unsigned int i; - struct bgp_damp_info *bdi, *next; - struct bgp_damp_config *bdc = &damp[afi][safi]; - - bdc->reuse_offset = 0; - - for (i = 0; i < bdc->reuse_list_size; i++) { - if (!bdc->reuse_list[i]) - continue; - - for (bdi = bdc->reuse_list[i]; bdi; bdi = next) { - next = bdi->next; - bgp_damp_info_free(bdi, 1, afi, safi); - } - bdc->reuse_list[i] = NULL; - } - for (bdi = bdc->no_reuse_list; bdi; bdi = next) { - next = bdi->next; - bgp_damp_info_free(bdi, 1, afi, safi); - } - bdc->no_reuse_list = NULL; + THREAD_OFF(bdc->t_reuse); } +/* Disable route flap dampening for a bgp instance. + * + * Please note that this function also gets used to free memory when deleting a + * bgp instance. + */ int bgp_damp_disable(struct bgp *bgp, afi_t afi, safi_t safi) { - struct bgp_damp_config *bdc = &damp[afi][safi]; + struct bgp_damp_config *bdc; + + bdc = &bgp->damp[afi][safi]; + if (!bdc) + return 0; + /* If it wasn't enabled, there's nothing to do. */ if (!CHECK_FLAG(bgp->af_flags[afi][safi], BGP_CONFIG_DAMPENING)) return 0; /* Cancel reuse event. */ - thread_cancel(&(bdc->t_reuse)); + thread_cancel(&bdc->t_reuse); /* Clean BGP dampening information. */ - bgp_damp_info_clean(afi, safi); - - /* Clear configuration */ - bgp_damp_config_clean(bdc); + bgp_damp_info_clean(bdc, afi, safi); UNSET_FLAG(bgp->af_flags[afi][safi], BGP_CONFIG_DAMPENING); + return 0; } -void bgp_config_write_damp(struct vty *vty, afi_t afi, safi_t safi) +void bgp_config_write_damp(struct vty *vty, struct bgp *bgp, afi_t afi, + safi_t safi) { - if (damp[afi][safi].half_life == DEFAULT_HALF_LIFE * 60 - && damp[afi][safi].reuse_limit == DEFAULT_REUSE - && damp[afi][safi].suppress_value == DEFAULT_SUPPRESS - && damp[afi][safi].max_suppress_time - == damp[afi][safi].half_life * 4) + struct bgp_damp_config *bdc; + + bdc = &bgp->damp[afi][safi]; + if (bdc->half_life == DEFAULT_HALF_LIFE * 60 + && bdc->reuse_limit == DEFAULT_REUSE + && bdc->suppress_value == DEFAULT_SUPPRESS + && bdc->max_suppress_time == bdc->half_life * 4) vty_out(vty, " bgp dampening\n"); - else if (damp[afi][safi].half_life != DEFAULT_HALF_LIFE * 60 - && damp[afi][safi].reuse_limit == DEFAULT_REUSE - && damp[afi][safi].suppress_value == DEFAULT_SUPPRESS - && damp[afi][safi].max_suppress_time - == damp[afi][safi].half_life * 4) - vty_out(vty, " bgp dampening %lld\n", - damp[afi][safi].half_life / 60LL); + else if (bdc->half_life != DEFAULT_HALF_LIFE * 60 + && bdc->reuse_limit == DEFAULT_REUSE + && bdc->suppress_value == DEFAULT_SUPPRESS + && bdc->max_suppress_time == bdc->half_life * 4) + vty_out(vty, " bgp dampening %lld\n", bdc->half_life / 60LL); else vty_out(vty, " bgp dampening %lld %d %d %lld\n", - damp[afi][safi].half_life / 60LL, - damp[afi][safi].reuse_limit, - damp[afi][safi].suppress_value, - damp[afi][safi].max_suppress_time / 60LL); + bdc->half_life / 60LL, bdc->reuse_limit, + bdc->suppress_value, bdc->max_suppress_time / 60LL); } -static const char *bgp_get_reuse_time(unsigned int penalty, char *buf, - size_t len, afi_t afi, safi_t safi, - bool use_json, json_object *json) +static const char *bgp_get_reuse_time(struct bgp_damp_config *bdc, + unsigned int penalty, char *buf, + size_t len, bool use_json, + json_object *json) { time_t reuse_time = 0; struct tm tm; int time_store = 0; - if (penalty > damp[afi][safi].reuse_limit) { + if (penalty > bdc->reuse_limit) { reuse_time = (int)(DELTA_T - * ((log((double)damp[afi][safi].reuse_limit - / penalty)) - / (log(damp[afi][safi].decay_array[1])))); + * ((log((double)bdc->reuse_limit / penalty)) + / (log(bdc->decay_array[1])))); - if (reuse_time > damp[afi][safi].max_suppress_time) - reuse_time = damp[afi][safi].max_suppress_time; + if (reuse_time > bdc->max_suppress_time) + reuse_time = bdc->max_suppress_time; gmtime_r(&reuse_time, &tm); } else @@ -570,14 +703,15 @@ static const char *bgp_get_reuse_time(unsigned int penalty, char *buf, return buf; } -void bgp_damp_info_vty(struct vty *vty, struct bgp_path_info *path, afi_t afi, - safi_t safi, json_object *json_path) +void bgp_damp_info_vty(struct vty *vty, struct bgp *bgp, + struct bgp_path_info *path, afi_t afi, safi_t safi, + json_object *json_path) { struct bgp_damp_info *bdi; time_t t_now, t_diff; char timebuf[BGP_UPTIME_LEN]; int penalty; - struct bgp_damp_config *bdc = &damp[afi][safi]; + struct bgp_damp_config *bdc = &bgp->damp[afi][safi]; if (!path->extra) return; @@ -603,8 +737,8 @@ void bgp_damp_info_vty(struct vty *vty, struct bgp_path_info *path, afi_t afi, if (CHECK_FLAG(path->flags, BGP_PATH_DAMPED) && !CHECK_FLAG(path->flags, BGP_PATH_HISTORY)) - bgp_get_reuse_time(penalty, timebuf, BGP_UPTIME_LEN, - afi, safi, 1, json_path); + bgp_get_reuse_time(bdc, penalty, timebuf, + BGP_UPTIME_LEN, 1, json_path); } else { vty_out(vty, " Dampinfo: penalty %d, flapped %d times in %s", @@ -615,14 +749,15 @@ void bgp_damp_info_vty(struct vty *vty, struct bgp_path_info *path, afi_t afi, if (CHECK_FLAG(path->flags, BGP_PATH_DAMPED) && !CHECK_FLAG(path->flags, BGP_PATH_HISTORY)) vty_out(vty, ", reuse in %s", - bgp_get_reuse_time(penalty, timebuf, - BGP_UPTIME_LEN, afi, safi, 0, + bgp_get_reuse_time(bdc, penalty, timebuf, + BGP_UPTIME_LEN, 0, json_path)); vty_out(vty, "\n"); } } + const char *bgp_damp_reuse_time_vty(struct vty *vty, struct bgp_path_info *path, char *timebuf, size_t len, afi_t afi, safi_t safi, bool use_json, @@ -631,7 +766,11 @@ const char *bgp_damp_reuse_time_vty(struct vty *vty, struct bgp_path_info *path, struct bgp_damp_info *bdi; time_t t_now, t_diff; int penalty; - struct bgp_damp_config *bdc = &damp[afi][safi]; + struct bgp_damp_config *bdc; + + bdc = get_active_bdc_from_pi(path, afi, safi); + if (!bdc) + return NULL; if (!path->extra) return NULL; @@ -649,24 +788,23 @@ const char *bgp_damp_reuse_time_vty(struct vty *vty, struct bgp_path_info *path, t_diff = t_now - bdi->t_updated; penalty = bgp_damp_decay(t_diff, bdi->penalty, bdc); - return bgp_get_reuse_time(penalty, timebuf, len, afi, safi, use_json, - json); + return bgp_get_reuse_time(bdc, penalty, timebuf, len, use_json, json); } + static int bgp_print_dampening_parameters(struct bgp *bgp, struct vty *vty, afi_t afi, safi_t safi) { + struct bgp_damp_config *bdc; if (CHECK_FLAG(bgp->af_flags[afi][safi], BGP_CONFIG_DAMPENING)) { + bdc = &bgp->damp[afi][safi]; vty_out(vty, "Half-life time: %lld min\n", - (long long)damp[afi][safi].half_life / 60); - vty_out(vty, "Reuse penalty: %d\n", - damp[afi][safi].reuse_limit); - vty_out(vty, "Suppress penalty: %d\n", - damp[afi][safi].suppress_value); + (long long)bdc->half_life / 60); + vty_out(vty, "Reuse penalty: %d\n", bdc->reuse_limit); + vty_out(vty, "Suppress penalty: %d\n", bdc->suppress_value); vty_out(vty, "Max suppress time: %lld min\n", - (long long)damp[afi][safi].max_suppress_time / 60); - vty_out(vty, "Max suppress penalty: %u\n", - damp[afi][safi].ceiling); + (long long)bdc->max_suppress_time / 60); + vty_out(vty, "Max suppress penalty: %u\n", bdc->ceiling); vty_out(vty, "\n"); } else vty_out(vty, "dampening not enabled for %s\n", @@ -679,8 +817,8 @@ int bgp_show_dampening_parameters(struct vty *vty, afi_t afi, safi_t safi, uint8_t show_flags) { struct bgp *bgp; - bgp = bgp_get_default(); + bgp = bgp_get_default(); if (bgp == NULL) { vty_out(vty, "No BGP process is configured\n"); return CMD_WARNING; @@ -719,3 +857,132 @@ int bgp_show_dampening_parameters(struct vty *vty, afi_t afi, safi_t safi, } return CMD_SUCCESS; } + +void bgp_peer_damp_enable(struct peer *peer, afi_t afi, safi_t safi, + time_t half, unsigned int reuse, + unsigned int suppress, time_t max) +{ + struct bgp_damp_config *bdc; + + if (!peer) + return; + bdc = &peer->damp[afi][safi]; + if (peer_af_flag_check(peer, afi, safi, PEER_FLAG_CONFIG_DAMPENING)) { + if (bdc->half_life == half && bdc->reuse_limit == reuse + && bdc->suppress_value == suppress + && bdc->max_suppress_time == max) + return; + bgp_peer_damp_disable(peer, afi, safi); + } + SET_FLAG(peer->af_flags[afi][safi], PEER_FLAG_CONFIG_DAMPENING); + bgp_damp_parameter_set(half, reuse, suppress, max, bdc); + bdc->afi = afi; + bdc->safi = safi; + thread_add_timer(bm->master, bgp_reuse_timer, bdc, DELTA_REUSE, + &bdc->t_reuse); +} + +/* Disable route flap dampening for a peer. + * + * Please note that this function also gets used to free memory when deleting a + * peer or peer group. + */ +void bgp_peer_damp_disable(struct peer *peer, afi_t afi, safi_t safi) +{ + struct bgp_damp_config *bdc; + + if (!peer_af_flag_check(peer, afi, safi, PEER_FLAG_CONFIG_DAMPENING)) + return; + bdc = &peer->damp[afi][safi]; + if (!bdc) + return; + bgp_damp_info_clean(bdc, afi, safi); + UNSET_FLAG(peer->af_flags[afi][safi], PEER_FLAG_CONFIG_DAMPENING); +} + +void bgp_config_write_peer_damp(struct vty *vty, struct peer *peer, afi_t afi, + safi_t safi) +{ + struct bgp_damp_config *bdc; + + bdc = &peer->damp[afi][safi]; + if (bdc->half_life == DEFAULT_HALF_LIFE * 60 + && bdc->reuse_limit == DEFAULT_REUSE + && bdc->suppress_value == DEFAULT_SUPPRESS + && bdc->max_suppress_time == bdc->half_life * 4) + vty_out(vty, " neighbor %s dampening\n", peer->host); + else if (bdc->half_life != DEFAULT_HALF_LIFE * 60 + && bdc->reuse_limit == DEFAULT_REUSE + && bdc->suppress_value == DEFAULT_SUPPRESS + && bdc->max_suppress_time == bdc->half_life * 4) + vty_out(vty, " neighbor %s dampening %lld\n", peer->host, + bdc->half_life / 60LL); + else + vty_out(vty, " neighbor %s dampening %lld %d %d %lld\n", + peer->host, bdc->half_life / 60LL, bdc->reuse_limit, + bdc->suppress_value, bdc->max_suppress_time / 60LL); +} + +static void bgp_print_peer_dampening_parameters(struct vty *vty, + struct peer *peer, afi_t afi, + safi_t safi, bool use_json, + json_object *json) +{ + struct bgp_damp_config *bdc; + + if (!peer) + return; + if (CHECK_FLAG(peer->af_flags[afi][safi], PEER_FLAG_CONFIG_DAMPENING)) { + bdc = &peer->damp[afi][safi]; + if (!bdc) + return; + if (use_json) { + json_object_int_add(json, "halfLifeSecs", + bdc->half_life); + json_object_int_add(json, "reusePenalty", + bdc->reuse_limit); + json_object_int_add(json, "suppressPenalty", + bdc->suppress_value); + json_object_int_add(json, "maxSuppressTimeSecs", + bdc->max_suppress_time); + json_object_int_add(json, "maxSuppressPenalty", + bdc->ceiling); + } else { + vty_out(vty, "Half-life time: %lld min\n", + (long long)bdc->half_life / 60); + vty_out(vty, "Reuse penalty: %d\n", bdc->reuse_limit); + vty_out(vty, "Suppress penalty: %d\n", + bdc->suppress_value); + vty_out(vty, "Max suppress time: %lld min\n", + (long long)bdc->max_suppress_time / 60); + vty_out(vty, "Max suppress penalty: %u\n", + bdc->ceiling); + vty_out(vty, "\n"); + } + } else if (!use_json) + vty_out(vty, "neighbor dampening not enabled for %s\n", + get_afi_safi_str(afi, safi, false)); +} + +void bgp_show_peer_dampening_parameters(struct vty *vty, struct peer *peer, + afi_t afi, safi_t safi, bool use_json) +{ + json_object *json; + + if (use_json) { + json = json_object_new_object(); + json_object_string_add(json, "addressFamily", + get_afi_safi_str(afi, safi, false)); + bgp_print_peer_dampening_parameters(vty, peer, afi, safi, true, + json); + vty_out(vty, "%s\n", + json_object_to_json_string_ext( + json, JSON_C_TO_STRING_PRETTY)); + json_object_free(json); + } else { + vty_out(vty, "\nFor address family: %s\n", + get_afi_safi_str(afi, safi, false)); + bgp_print_peer_dampening_parameters(vty, peer, afi, safi, false, + NULL); + } +} diff --git a/bgpd/bgp_damp.h b/bgpd/bgp_damp.h index 604706300b..521f59b296 100644 --- a/bgpd/bgp_damp.h +++ b/bgpd/bgp_damp.h @@ -25,11 +25,6 @@ /* Structure maintained on a per-route basis. */ struct bgp_damp_info { - /* Doubly linked list. This information must be linked to - reuse_list or no_reuse_list. */ - struct bgp_damp_info *next; - struct bgp_damp_info *prev; - /* Figure-of-merit. */ unsigned int penalty; @@ -45,6 +40,9 @@ struct bgp_damp_info { /* Time of route start to be suppressed. */ time_t suppress_time; + /* Back reference to associated dampening configuration. */ + struct bgp_damp_config *config; + /* Back reference to bgp_path_info. */ struct bgp_path_info *path; @@ -53,6 +51,8 @@ struct bgp_damp_info { /* Current index in the reuse_list. */ int index; +#define BGP_DAMP_NO_REUSE_LIST_INDEX \ + (-1) /* index for elements on no_reuse_list */ /* Last time message type. */ uint8_t lastrecord; @@ -63,6 +63,13 @@ struct bgp_damp_info { safi_t safi; }; +struct reuselist_node { + SLIST_ENTRY(reuselist_node) entry; + struct bgp_damp_info *info; +}; + +SLIST_HEAD(reuselist, reuselist_node); + /* Specified parameter set configuration. */ struct bgp_damp_config { /* Value over which routes suppressed. */ @@ -100,11 +107,11 @@ struct bgp_damp_config { int *reuse_index; /* Reuse list array per-set based. */ - struct bgp_damp_info **reuse_list; - int reuse_offset; + struct reuselist *reuse_list; + unsigned int reuse_offset; /* All dampening information which is not on reuse list. */ - struct bgp_damp_info *no_reuse_list; + struct reuselist no_reuse_list; /* Reuse timer thread per-set base. */ struct thread *t_reuse; @@ -132,6 +139,8 @@ struct bgp_damp_config { #define REUSE_LIST_SIZE 256 #define REUSE_ARRAY_SIZE 1024 +extern struct bgp_damp_config *get_active_bdc_from_pi(struct bgp_path_info *pi, + afi_t afi, safi_t safi); extern int bgp_damp_enable(struct bgp *, afi_t, safi_t, time_t, unsigned int, unsigned int, time_t); extern int bgp_damp_disable(struct bgp *, afi_t, safi_t); @@ -139,13 +148,18 @@ extern int bgp_damp_withdraw(struct bgp_path_info *path, struct bgp_dest *dest, afi_t afi, safi_t safi, int attr_change); extern int bgp_damp_update(struct bgp_path_info *path, struct bgp_dest *dest, afi_t afi, safi_t saff); -extern void bgp_damp_info_free(struct bgp_damp_info *path, int withdraw, +extern void bgp_damp_info_free(struct bgp_damp_info **path, + struct bgp_damp_config *bdc, int withdraw, afi_t afi, safi_t safi); -extern void bgp_damp_info_clean(afi_t afi, safi_t safi); +extern void bgp_damp_info_clean(struct bgp_damp_config *bdc, afi_t afi, + safi_t safi); +extern void bgp_damp_config_clean(struct bgp_damp_config *bdc); extern int bgp_damp_decay(time_t, int, struct bgp_damp_config *damp); -extern void bgp_config_write_damp(struct vty *, afi_t afi, safi_t safi); -extern void bgp_damp_info_vty(struct vty *vty, struct bgp_path_info *path, - afi_t afi, safi_t safi, json_object *json_path); +extern void bgp_config_write_damp(struct vty *vty, struct bgp *bgp, afi_t afi, + safi_t safi); +extern void bgp_damp_info_vty(struct vty *vty, struct bgp *bgp, + struct bgp_path_info *path, afi_t afi, + safi_t safi, json_object *json_path); extern const char *bgp_damp_reuse_time_vty(struct vty *vty, struct bgp_path_info *path, char *timebuf, size_t len, afi_t afi, @@ -153,5 +167,14 @@ extern const char *bgp_damp_reuse_time_vty(struct vty *vty, json_object *json); extern int bgp_show_dampening_parameters(struct vty *vty, afi_t, safi_t, uint8_t); +extern void bgp_peer_damp_enable(struct peer *peer, afi_t afi, safi_t safi, + time_t half, unsigned int reuse, + unsigned int suppress, time_t max); +extern void bgp_peer_damp_disable(struct peer *peer, afi_t afi, safi_t safi); +extern void bgp_config_write_peer_damp(struct vty *vty, struct peer *peer, + afi_t afi, safi_t safi); +extern void bgp_show_peer_dampening_parameters(struct vty *vty, + struct peer *peer, afi_t afi, + safi_t safi, bool use_json); #endif /* _QUAGGA_BGP_DAMP_H */ diff --git a/bgpd/bgp_debug.c b/bgpd/bgp_debug.c index 2c076fb80b..3afa6eaf09 100644 --- a/bgpd/bgp_debug.c +++ b/bgpd/bgp_debug.c @@ -38,6 +38,7 @@ #include "bgpd/bgp_attr.h" #include "bgpd/bgp_debug.h" #include "bgpd/bgp_community.h" +#include "bgpd/bgp_lcommunity.h" #include "bgpd/bgp_updgrp.h" #include "bgpd/bgp_mplsvpn.h" #include "bgpd/bgp_ecommunity.h" @@ -118,7 +119,7 @@ static const struct message bgp_notify_msg[] = { {BGP_NOTIFY_HOLD_ERR, "Hold Timer Expired"}, {BGP_NOTIFY_FSM_ERR, "Neighbor Events Error"}, {BGP_NOTIFY_CEASE, "Cease"}, - {BGP_NOTIFY_CAPABILITY_ERR, "CAPABILITY Message Error"}, + {BGP_NOTIFY_ROUTE_REFRESH_ERR, "ROUTE-REFRESH Message Error"}, {0}}; static const struct message bgp_notify_head_msg[] = { @@ -166,11 +167,9 @@ static const struct message bgp_notify_cease_msg[] = { {BGP_NOTIFY_CEASE_OUT_OF_RESOURCE, "/Out of Resource"}, {0}}; -static const struct message bgp_notify_capability_msg[] = { +static const struct message bgp_notify_route_refresh_msg[] = { {BGP_NOTIFY_SUBCODE_UNSPECIFIC, "/Unspecific"}, - {BGP_NOTIFY_CAPABILITY_INVALID_ACTION, "/Invalid Action Value"}, - {BGP_NOTIFY_CAPABILITY_INVALID_LENGTH, "/Invalid Capability Length"}, - {BGP_NOTIFY_CAPABILITY_MALFORMED_CODE, "/Malformed Capability Value"}, + {BGP_NOTIFY_ROUTE_REFRESH_INVALID_MSG_LEN, "/Invalid Message Length"}, {0}}; static const struct message bgp_notify_fsm_msg[] = { @@ -411,6 +410,11 @@ bool bgp_dump_attr(struct attr *attr, char *buf, size_t size) ", community %s", community_str(attr->community, false)); + if (CHECK_FLAG(attr->flag, ATTR_FLAG_BIT(BGP_ATTR_LARGE_COMMUNITIES))) + snprintf(buf + strlen(buf), size - strlen(buf), + ", large-community %s", + lcommunity_str(attr->lcommunity, false)); + if (CHECK_FLAG(attr->flag, ATTR_FLAG_BIT(BGP_ATTR_EXT_COMMUNITIES))) snprintf(buf + strlen(buf), size - strlen(buf), ", extcommunity %s", ecommunity_str(attr->ecommunity)); @@ -487,8 +491,8 @@ const char *bgp_notify_subcode_str(char code, char subcode) case BGP_NOTIFY_CEASE: return lookup_msg(bgp_notify_cease_msg, subcode, "Unrecognized Error Subcode"); - case BGP_NOTIFY_CAPABILITY_ERR: - return lookup_msg(bgp_notify_capability_msg, subcode, + case BGP_NOTIFY_ROUTE_REFRESH_ERR: + return lookup_msg(bgp_notify_route_refresh_msg, subcode, "Unrecognized Error Subcode"); } return ""; diff --git a/bgpd/bgp_ecommunity.c b/bgpd/bgp_ecommunity.c index 43bfb3e2bc..7f6f61e141 100644 --- a/bgpd/bgp_ecommunity.c +++ b/bgpd/bgp_ecommunity.c @@ -95,7 +95,7 @@ static bool ecommunity_add_val_internal(struct ecommunity *ecom, bool unique, bool overwrite, uint8_t ecom_size) { - int c, ins_idx; + uint32_t c, ins_idx; const struct ecommunity_val *eval4 = (struct ecommunity_val *)eval; const struct ecommunity_val_ipv6 *eval6 = (struct ecommunity_val_ipv6 *)eval; @@ -113,7 +113,7 @@ static bool ecommunity_add_val_internal(struct ecommunity *ecom, /* check also if the extended community itself exists. */ c = 0; - ins_idx = -1; + ins_idx = UINT32_MAX; for (uint8_t *p = ecom->val; c < ecom->size; p += ecom_size, c++) { if (unique) { @@ -145,12 +145,12 @@ static bool ecommunity_add_val_internal(struct ecommunity *ecom, if (ret > 0) { if (!unique) break; - if (ins_idx == -1) + if (ins_idx == UINT32_MAX) ins_idx = c; } } - if (ins_idx == -1) + if (ins_idx == UINT32_MAX) ins_idx = c; /* Add the value to the structure with numerical sorting. */ @@ -193,7 +193,7 @@ static struct ecommunity * ecommunity_uniq_sort_internal(struct ecommunity *ecom, unsigned short ecom_size) { - int i; + uint32_t i; struct ecommunity *new; const void *eval; @@ -895,11 +895,10 @@ static int ecommunity_lb_str(char *buf, size_t bufsz, const uint8_t *pnt) */ char *ecommunity_ecom2str(struct ecommunity *ecom, int format, int filter) { - int i; + uint32_t i; uint8_t *pnt; uint8_t type = 0; uint8_t sub_type = 0; -#define ECOMMUNITY_STRLEN 64 int str_size; char *str_buf; @@ -1176,8 +1175,8 @@ char *ecommunity_ecom2str(struct ecommunity *ecom, int format, int filter) bool ecommunity_match(const struct ecommunity *ecom1, const struct ecommunity *ecom2) { - int i = 0; - int j = 0; + uint32_t i = 0; + uint32_t j = 0; if (ecom1 == NULL && ecom2 == NULL) return true; @@ -1209,7 +1208,7 @@ extern struct ecommunity_val *ecommunity_lookup(const struct ecommunity *ecom, uint8_t type, uint8_t subtype) { uint8_t *p; - int c; + uint32_t c; /* If the value already exists in the structure return 0. */ c = 0; @@ -1230,7 +1229,7 @@ bool ecommunity_strip(struct ecommunity *ecom, uint8_t type, uint8_t subtype) { uint8_t *p, *q, *new; - int c, found = 0; + uint32_t c, found = 0; /* When this is fist value, just add it. */ if (ecom == NULL || ecom->val == NULL) return false; @@ -1278,7 +1277,7 @@ bool ecommunity_strip(struct ecommunity *ecom, uint8_t type, bool ecommunity_del_val(struct ecommunity *ecom, struct ecommunity_val *eval) { uint8_t *p; - int c, found = 0; + uint32_t c, found = 0; /* Make sure specified value exists. */ if (ecom == NULL || ecom->val == NULL) @@ -1512,7 +1511,7 @@ void bgp_remove_ecomm_from_aggregate_hash(struct bgp_aggregate *aggregate, const uint8_t *ecommunity_linkbw_present(struct ecommunity *ecom, uint32_t *bw) { const uint8_t *eval; - int i; + uint32_t i; if (bw) *bw = 0; diff --git a/bgpd/bgp_ecommunity.h b/bgpd/bgp_ecommunity.h index 6318e7edb1..03b23fcd37 100644 --- a/bgpd/bgp_ecommunity.h +++ b/bgpd/bgp_ecommunity.h @@ -103,6 +103,9 @@ /* Extended Communities type flag. */ #define ECOMMUNITY_FLAG_NON_TRANSITIVE 0x40 +/* Extended Community readable string length */ +#define ECOMMUNITY_STRLEN 64 + /* Extended Communities attribute. */ struct ecommunity { /* Reference counter. */ @@ -114,7 +117,7 @@ struct ecommunity { uint8_t unit_size; /* Size of Extended Communities attribute. */ - int size; + uint32_t size; /* Extended Communities value. */ uint8_t *val; diff --git a/bgpd/bgp_evpn.c b/bgpd/bgp_evpn.c index 96f4b0aa78..c976632678 100644 --- a/bgpd/bgp_evpn.c +++ b/bgpd/bgp_evpn.c @@ -870,7 +870,7 @@ static void add_mac_mobility_to_attr(uint32_t seq_num, struct attr *attr) struct ecommunity ecom_tmp; struct ecommunity_val eval; uint8_t *ecom_val_ptr; - int i; + uint32_t i; uint8_t *pnt; int type = 0; int sub_type = 0; @@ -2710,7 +2710,7 @@ static int is_route_matching_for_vrf(struct bgp *bgp_vrf, { struct attr *attr = pi->attr; struct ecommunity *ecom; - int i; + uint32_t i; assert(attr); /* Route should have valid RT to be even considered. */ @@ -2777,7 +2777,7 @@ static int is_route_matching_for_vni(struct bgp *bgp, struct bgpevpn *vpn, { struct attr *attr = pi->attr; struct ecommunity *ecom; - int i; + uint32_t i; assert(attr); /* Route should have valid RT to be even considered. */ @@ -3260,7 +3260,7 @@ static int bgp_evpn_install_uninstall_table(struct bgp *bgp, afi_t afi, struct prefix_evpn *evp = (struct prefix_evpn *)p; struct attr *attr = pi->attr; struct ecommunity *ecom; - int i; + uint32_t i; struct prefix_evpn ad_evp; assert(attr); @@ -4906,7 +4906,7 @@ int bgp_nlri_parse_evpn(struct peer *peer, struct attr *attr, */ void bgp_evpn_map_vrf_to_its_rts(struct bgp *bgp_vrf) { - int i = 0; + uint32_t i = 0; struct ecommunity_val *eval = NULL; struct listnode *node = NULL, *nnode = NULL; struct ecommunity *ecom = NULL; @@ -4926,7 +4926,7 @@ void bgp_evpn_map_vrf_to_its_rts(struct bgp *bgp_vrf) */ void bgp_evpn_unmap_vrf_from_its_rts(struct bgp *bgp_vrf) { - int i; + uint32_t i; struct ecommunity_val *eval; struct listnode *node, *nnode; struct ecommunity *ecom; @@ -4963,7 +4963,7 @@ void bgp_evpn_unmap_vrf_from_its_rts(struct bgp *bgp_vrf) */ void bgp_evpn_map_vni_to_its_rts(struct bgp *bgp, struct bgpevpn *vpn) { - int i; + uint32_t i; struct ecommunity_val *eval; struct listnode *node, *nnode; struct ecommunity *ecom; @@ -4983,7 +4983,7 @@ void bgp_evpn_map_vni_to_its_rts(struct bgp *bgp, struct bgpevpn *vpn) */ void bgp_evpn_unmap_vni_from_its_rts(struct bgp *bgp, struct bgpevpn *vpn) { - int i; + uint32_t i; struct ecommunity_val *eval; struct listnode *node, *nnode; struct ecommunity *ecom; @@ -5370,11 +5370,11 @@ int bgp_evpn_local_l3vni_add(vni_t l3vni, vrf_id_t vrf_id, switch (ret) { case BGP_ERR_AS_MISMATCH: flog_err(EC_BGP_EVPN_AS_MISMATCH, - "BGP is already running; AS is %u\n", as); + "BGP is already running; AS is %u", as); return -1; case BGP_ERR_INSTANCE_MISMATCH: flog_err(EC_BGP_EVPN_INSTANCE_MISMATCH, - "BGP instance name and AS number mismatch\n"); + "BGP instance name and AS number mismatch"); return -1; } diff --git a/bgpd/bgp_evpn_mh.c b/bgpd/bgp_evpn_mh.c index b3e66d607f..2dec0863c0 100644 --- a/bgpd/bgp_evpn_mh.c +++ b/bgpd/bgp_evpn_mh.c @@ -69,8 +69,9 @@ static uint32_t bgp_evpn_es_get_active_vtep_cnt(struct bgp_evpn_es *es); static void bgp_evpn_l3nhg_update_on_vtep_chg(struct bgp_evpn_es *es); static struct bgp_evpn_es *bgp_evpn_es_new(struct bgp *bgp, const esi_t *esi); static void bgp_evpn_es_free(struct bgp_evpn_es *es, const char *caller); -static void bgp_evpn_es_path_all_update(struct bgp_evpn_es_vtep *es_vtep, - bool active); +static void +bgp_evpn_es_path_update_on_vtep_chg(struct bgp_evpn_es_vtep *es_vtep, + bool active); esi_t zero_esi_buf, *zero_esi = &zero_esi_buf; @@ -990,6 +991,10 @@ static void bgp_evpn_local_type1_evi_route_add(struct bgp *bgp, struct prefix_evpn p; struct bgp_evpn_es_evi *es_evi; + /* EAD-per-EVI routes have been suppressed */ + if (!bgp_mh_info->ead_evi_tx) + return; + if (CHECK_FLAG(es->flags, BGP_EVPNES_ADV_EVI)) /* EAD-EVI route add for this ES is already done */ return; @@ -1243,7 +1248,7 @@ static void bgp_evpn_es_vtep_re_eval_active(struct bgp *bgp, * removed. */ bgp_evpn_l3nhg_update_on_vtep_chg(es_vtep->es); - bgp_evpn_es_path_all_update(es_vtep, new_active); + bgp_evpn_es_path_update_on_vtep_chg(es_vtep, new_active); /* queue up the es for background consistency checks */ bgp_evpn_es_cons_checks_pend_add(es_vtep->es); @@ -1441,18 +1446,19 @@ void bgp_evpn_path_es_link(struct bgp_path_info *pi, vni_t vni, esi_t *esi) listnode_add(es->macip_path_list, &es_info->es_listnode); } -static void bgp_evpn_es_path_all_update(struct bgp_evpn_es_vtep *es_vtep, - bool active) +static void +bgp_evpn_es_path_update_on_vtep_chg(struct bgp_evpn_es_vtep *es_vtep, + bool active) { struct listnode *node; struct bgp_path_es_info *es_info; struct bgp_path_info *pi; struct bgp_path_info *parent_pi; struct bgp_evpn_es *es = es_vtep->es; - char prefix_buf[PREFIX_STRLEN]; if (BGP_DEBUG(evpn_mh, EVPN_MH_RT)) - zlog_debug("update all paths linked to es %s", es->esi_str); + zlog_debug("update paths linked to es %s on vtep chg", + es->esi_str); for (ALL_LIST_ELEMENTS_RO(es->macip_path_list, node, es_info)) { pi = es_info->pi; @@ -1470,10 +1476,9 @@ static void bgp_evpn_es_path_all_update(struct bgp_evpn_es_vtep *es_vtep, continue; if (BGP_DEBUG(evpn_mh, EVPN_MH_RT)) - zlog_debug("update path %s linked to es %s", - prefix2str(&parent_pi->net->p, prefix_buf, - sizeof(prefix_buf)), - es->esi_str); + zlog_debug( + "update path %pFX linked to es %s on vtep chg", + &parent_pi->net->p, es->esi_str); bgp_evpn_import_route_in_vrfs(parent_pi, active ? 1 : 0); } } @@ -2717,14 +2722,20 @@ static void bgp_evpn_es_evi_vtep_re_eval_active(struct bgp *bgp, { bool old_active; bool new_active; + uint32_t ead_activity_flags; old_active = !!CHECK_FLAG(evi_vtep->flags, BGP_EVPN_EVI_VTEP_ACTIVE); - /* Both EAD-per-ES and EAD-per-EVI routes must be rxed from a PE - * before it can be activated. - */ - if ((evi_vtep->flags & BGP_EVPN_EVI_VTEP_EAD) == - BGP_EVPN_EVI_VTEP_EAD) + if (bgp_mh_info->ead_evi_rx) + /* Both EAD-per-ES and EAD-per-EVI routes must be rxed from a PE + * before it can be activated. + */ + ead_activity_flags = BGP_EVPN_EVI_VTEP_EAD; + else + /* EAD-per-ES is sufficent to activate the PE */ + ead_activity_flags = BGP_EVPN_EVI_VTEP_EAD_PER_ES; + + if ((evi_vtep->flags & ead_activity_flags) == ead_activity_flags) SET_FLAG(evi_vtep->flags, BGP_EVPN_EVI_VTEP_ACTIVE); else UNSET_FLAG(evi_vtep->flags, BGP_EVPN_EVI_VTEP_ACTIVE); @@ -3075,9 +3086,9 @@ int bgp_evpn_local_es_evi_add(struct bgp *bgp, esi_t *esi, vni_t vni) bgp_evpn_es_evi_local_info_set(es_evi); /* generate an EAD-EVI for this new VNI */ - build_evpn_type1_prefix(&p, BGP_EVPN_AD_EVI_ETH_TAG, - &es->esi, es->originator_ip); if (CHECK_FLAG(es->flags, BGP_EVPNES_ADV_EVI)) { + build_evpn_type1_prefix(&p, BGP_EVPN_AD_EVI_ETH_TAG, &es->esi, + es->originator_ip); if (bgp_evpn_type1_route_update(bgp, es, vpn, &p)) flog_err(EC_BGP_EVPN_ROUTE_CREATE, "%u: EAD-EVI route creation failure for ESI %s VNI %u", @@ -3170,13 +3181,30 @@ int bgp_evpn_remote_es_evi_del(struct bgp *bgp, struct bgpevpn *vpn, vpn->vni, &p->prefix.ead_addr.ip.ipaddr_v4); es = bgp_evpn_es_find(&p->prefix.ead_addr.esi); - if (!es) - /* XXX - error logs */ + if (!es) { + if (BGP_DEBUG(evpn_mh, EVPN_MH_ES)) + zlog_debug("del remote %s es %s evi %u vtep %pI4, NO es", + p->prefix.ead_addr.eth_tag ? "ead-es" + : "ead-evi", + esi_to_str(&p->prefix.ead_addr.esi, buf, + sizeof(buf)), + vpn->vni, + &p->prefix.ead_addr.ip.ipaddr_v4); return 0; + } es_evi = bgp_evpn_es_evi_find(es, vpn); - if (!es_evi) - /* XXX - error logs */ + if (!es_evi) { + if (BGP_DEBUG(evpn_mh, EVPN_MH_ES)) + zlog_debug( + "del remote %s es %s evi %u vtep %pI4, NO es-evi", + p->prefix.ead_addr.eth_tag ? "ead-es" + : "ead-evi", + esi_to_str(&p->prefix.ead_addr.esi, buf, + sizeof(buf)), + vpn->vni, + &p->prefix.ead_addr.ip.ipaddr_v4); return 0; + } ead_es = !!p->prefix.ead_addr.eth_tag; bgp_evpn_es_evi_vtep_del(bgp, es_evi, p->prefix.ead_addr.ip.ipaddr_v4, @@ -3264,7 +3292,7 @@ static void bgp_evpn_es_evi_json_vtep_fill(json_object *json_vteps, if (evi_vtep->flags & BGP_EVPN_EVI_VTEP_EAD_PER_ES) json_array_string_add(json_flags, "ead-per-es"); if (evi_vtep->flags & BGP_EVPN_EVI_VTEP_EAD_PER_EVI) - json_array_string_add(json_flags, "ed-per-evi"); + json_array_string_add(json_flags, "ead-per-evi"); json_object_object_add(json_vtep_entry, "flags", json_flags); } @@ -3700,6 +3728,9 @@ void bgp_evpn_mh_init(void) bgp_mh_info->pend_es_list = list_new(); listset_app_node_mem(bgp_mh_info->pend_es_list); + bgp_mh_info->ead_evi_rx = BGP_EVPN_MH_EAD_EVI_RX_DEF; + bgp_mh_info->ead_evi_tx = BGP_EVPN_MH_EAD_EVI_TX_DEF; + /* config knobs - XXX add cli to control it */ bgp_mh_info->ead_evi_adv_for_down_links = true; bgp_mh_info->consistency_checking = true; diff --git a/bgpd/bgp_evpn_mh.h b/bgpd/bgp_evpn_mh.h index d2f6a7b054..6199113e87 100644 --- a/bgpd/bgp_evpn_mh.h +++ b/bgpd/bgp_evpn_mh.h @@ -100,7 +100,7 @@ struct bgp_evpn_es { /* List of ES-VRFs associated with this ES */ struct list *es_vrf_list; - /* List of MAC-IP global routes using this ES as destination - + /* List of MAC-IP VNI paths using this ES as destination - * element is bgp_path_info_extra->es_info */ struct list *macip_path_list; @@ -261,6 +261,15 @@ struct bgp_evpn_mh_info { /* Use L3 NHGs for host routes in symmetric IRB */ bool install_l3nhg; bool host_routes_use_l3nhg; + /* Some vendors are not generating the EAD-per-EVI route. This knob + * can be turned off to activate a remote ES-PE when the EAD-per-ES + * route is rxed i.e. not wait on the EAD-per-EVI route + */ + bool ead_evi_rx; +#define BGP_EVPN_MH_EAD_EVI_RX_DEF true + /* Skip EAD-EVI advertisements by turning off this knob */ + bool ead_evi_tx; +#define BGP_EVPN_MH_EAD_EVI_TX_DEF true }; /****************************************************************************/ diff --git a/bgpd/bgp_evpn_private.h b/bgpd/bgp_evpn_private.h index 6a7d124938..77b3746344 100644 --- a/bgpd/bgp_evpn_private.h +++ b/bgpd/bgp_evpn_private.h @@ -630,13 +630,5 @@ extern struct bgp_dest * bgp_global_evpn_node_lookup(struct bgp_table *table, afi_t afi, safi_t safi, const struct prefix_evpn *evp, struct prefix_rd *prd); -extern struct bgp_node *bgp_global_evpn_node_get(struct bgp_table *table, - afi_t afi, safi_t safi, - const struct prefix_evpn *evp, - struct prefix_rd *prd); -extern struct bgp_node * -bgp_global_evpn_node_lookup(struct bgp_table *table, afi_t afi, safi_t safi, - const struct prefix_evpn *evp, - struct prefix_rd *prd); extern void bgp_evpn_import_route_in_vrfs(struct bgp_path_info *pi, int import); #endif /* _BGP_EVPN_PRIVATE_H */ diff --git a/bgpd/bgp_evpn_vty.c b/bgpd/bgp_evpn_vty.c index f957103df7..5b0b3bb6e5 100644 --- a/bgpd/bgp_evpn_vty.c +++ b/bgpd/bgp_evpn_vty.c @@ -2621,10 +2621,14 @@ static void evpn_show_route_rd(struct vty *vty, struct bgp *bgp, /* RD header and legend - once overall. */ if (rd_header && !json) { vty_out(vty, + "EVPN type-1 prefix: [1]:[ESI]:[EthTag]:[IPlen]:[VTEP-IP]\n"); + vty_out(vty, "EVPN type-2 prefix: [2]:[EthTag]:[MAClen]:[MAC]\n"); vty_out(vty, "EVPN type-3 prefix: [3]:[EthTag]:[IPlen]:[OrigIP]\n"); vty_out(vty, + "EVPN type-4 prefix: [4]:[ESI]:[IPlen]:[OrigIP]\n"); + vty_out(vty, "EVPN type-5 prefix: [5]:[EthTag]:[IPlen]:[IP]\n\n"); rd_header = 0; } @@ -3757,6 +3761,26 @@ DEFPY (bgp_evpn_use_es_l3nhg, return CMD_SUCCESS; } +DEFPY (bgp_evpn_ead_evi_rx_disable, + bgp_evpn_ead_evi_rx_disable_cmd, + "[no$no] disable-ead-evi-rx", + NO_STR + "Activate PE on EAD-ES even if EAD-EVI is not received\n") +{ + bgp_mh_info->ead_evi_rx = no? true :false; + return CMD_SUCCESS; +} + +DEFPY (bgp_evpn_ead_evi_tx_disable, + bgp_evpn_ead_evi_tx_disable_cmd, + "[no$no] disable-ead-evi-tx", + NO_STR + "Don't advertise EAD-EVI for local ESs\n") +{ + bgp_mh_info->ead_evi_tx = no? true :false; + return CMD_SUCCESS; +} + DEFPY (bgp_evpn_advertise_pip_ip_mac, bgp_evpn_advertise_pip_ip_mac_cmd, "[no$no] advertise-pip [ip <A.B.C.D> [mac <X:X:X:X:X:X|X:X:X:X:X:X/M>]]", @@ -4076,7 +4100,7 @@ DEFPY(show_bgp_l2vpn_evpn_es_vrf, show_bgp_l2vpn_evpn_es_vrf_cmd, */ DEFUN(show_bgp_l2vpn_evpn_summary, show_bgp_l2vpn_evpn_summary_cmd, - "show bgp [vrf VRFNAME] l2vpn evpn summary [established|failed] [json]", + "show bgp [vrf VRFNAME] l2vpn evpn summary [established|failed] [wide] [json]", SHOW_STR BGP_STR "bgp vrf\n" @@ -4086,23 +4110,30 @@ DEFUN(show_bgp_l2vpn_evpn_summary, "Summary of BGP neighbor status\n" "Show only sessions in Established state\n" "Show only sessions not in Established state\n" + "Increase table width for longer output\n" JSON_STR) { int idx_vrf = 0; - bool uj = use_json(argc, argv); + int idx = 0; char *vrf = NULL; - bool show_failed = false; - bool show_established = false; + uint8_t show_flags = 0; if (argv_find(argv, argc, "vrf", &idx_vrf)) vrf = argv[++idx_vrf]->arg; - if (argv_find(argv, argc, "failed", &idx_vrf)) - show_failed = true; - if (argv_find(argv, argc, "established", &idx_vrf)) - show_established = true; - return bgp_show_summary_vty(vty, vrf, AFI_L2VPN, SAFI_EVPN, show_failed, - show_established, uj); + if (argv_find(argv, argc, "failed", &idx)) + SET_FLAG(show_flags, BGP_SHOW_OPT_FAILED); + + if (argv_find(argv, argc, "established", &idx)) + SET_FLAG(show_flags, BGP_SHOW_OPT_ESTABLISHED); + + if (argv_find(argv, argc, "wide", &idx)) + SET_FLAG(show_flags, BGP_SHOW_OPT_WIDE); + + if (use_json(argc, argv)) + SET_FLAG(show_flags, BGP_SHOW_OPT_JSON); + + return bgp_show_summary_vty(vty, vrf, AFI_L2VPN, SAFI_EVPN, show_flags); } int bgp_evpn_cli_parse_type(int *type, struct cmd_token **argv, int argc) @@ -5751,6 +5782,20 @@ void bgp_config_write_evpn_info(struct vty *vty, struct bgp *bgp, afi_t afi, vty_out(vty, " no use-es-l3nhg\n"); } + if (bgp_mh_info->ead_evi_rx != BGP_EVPN_MH_EAD_EVI_RX_DEF) { + if (bgp_mh_info->ead_evi_rx) + vty_out(vty, " no disable-ead-evi-rx\n"); + else + vty_out(vty, " disable-ead-evi-rx\n"); + } + + if (bgp_mh_info->ead_evi_tx != BGP_EVPN_MH_EAD_EVI_TX_DEF) { + if (bgp_mh_info->ead_evi_tx) + vty_out(vty, " no disable-ead-evi-tx\n"); + else + vty_out(vty, " disable-ead-evi-tx\n"); + } + if (!bgp->evpn_info->dup_addr_detect) vty_out(vty, " no dup-addr-detection\n"); @@ -5896,6 +5941,8 @@ void bgp_ethernetvpn_init(void) install_element(BGP_EVPN_NODE, &bgp_evpn_flood_control_cmd); install_element(BGP_EVPN_NODE, &bgp_evpn_advertise_pip_ip_mac_cmd); install_element(BGP_EVPN_NODE, &bgp_evpn_use_es_l3nhg_cmd); + install_element(BGP_EVPN_NODE, &bgp_evpn_ead_evi_rx_disable_cmd); + install_element(BGP_EVPN_NODE, &bgp_evpn_ead_evi_tx_disable_cmd); /* test commands */ install_element(BGP_EVPN_NODE, &test_es_add_cmd); diff --git a/bgpd/bgp_flowspec_util.c b/bgpd/bgp_flowspec_util.c index 15b891f25a..b244c87258 100644 --- a/bgpd/bgp_flowspec_util.c +++ b/bgpd/bgp_flowspec_util.c @@ -637,7 +637,7 @@ int bgp_flowspec_match_rules_fill(uint8_t *nlri_content, int len, offset += ret; break; default: - flog_err(EC_LIB_DEVELOPMENT, "%s: unknown type %d\n", + flog_err(EC_LIB_DEVELOPMENT, "%s: unknown type %d", __func__, type); } } diff --git a/bgpd/bgp_fsm.c b/bgpd/bgp_fsm.c index ce665feb4e..b69e2d71b6 100644 --- a/bgpd/bgp_fsm.c +++ b/bgpd/bgp_fsm.c @@ -55,10 +55,11 @@ #include "bgpd/bgp_keepalives.h" #include "bgpd/bgp_io.h" #include "bgpd/bgp_zebra.h" +#include "bgpd/bgp_vty.h" DEFINE_HOOK(peer_backward_transition, (struct peer * peer), (peer)) DEFINE_HOOK(peer_status_changed, (struct peer * peer), (peer)) -extern const char *get_afi_safi_str(afi_t afi, safi_t safi, bool for_json); + /* Definition of display strings corresponding to FSM events. This should be * kept consistent with the events defined in bgpd.h */ @@ -109,9 +110,9 @@ static int bgp_peer_reg_with_nht(struct peer *peer) && !CHECK_FLAG(peer->bgp->flags, BGP_FLAG_DISABLE_NH_CONNECTED_CHK)) connected = 1; - return bgp_find_or_add_nexthop( - peer->bgp, peer->bgp, family2afi(peer->su.sa.sa_family), - NULL, peer, connected); + return bgp_find_or_add_nexthop(peer->bgp, peer->bgp, + family2afi(peer->su.sa.sa_family), + SAFI_UNICAST, NULL, peer, connected); } static void peer_xfer_stats(struct peer *peer_dst, struct peer *peer_src) @@ -468,6 +469,7 @@ void bgp_timer_set(struct peer *peer) BGP_TIMER_OFF(peer->t_gr_restart); BGP_TIMER_OFF(peer->t_gr_stale); BGP_TIMER_OFF(peer->t_pmax_restart); + BGP_TIMER_OFF(peer->t_refresh_stalepath); /* fallthru */ case Clearing: BGP_TIMER_OFF(peer->t_start); @@ -1282,6 +1284,16 @@ int bgp_stop(struct peer *peer) peer->nsf[afi][safi] = 0; } + /* Stop route-refresh stalepath timer */ + if (peer->t_refresh_stalepath) { + BGP_TIMER_OFF(peer->t_refresh_stalepath); + + if (bgp_debug_neighbor_events(peer)) + zlog_debug( + "%s: route-refresh restart stalepath timer stopped", + peer->host); + } + /* If peer reset before receiving EOR, decrement EOR count and * cancel the selection deferral timer if there are no * pending EOR messages to be received @@ -1408,19 +1420,6 @@ int bgp_stop(struct peer *peer) peer->update_time = 0; -/* Until we are sure that there is no problem about prefix count - this should be commented out.*/ -#if 0 - /* Reset prefix count */ - peer->pcount[AFI_IP][SAFI_UNICAST] = 0; - peer->pcount[AFI_IP][SAFI_MULTICAST] = 0; - peer->pcount[AFI_IP][SAFI_LABELED_UNICAST] = 0; - peer->pcount[AFI_IP][SAFI_MPLS_VPN] = 0; - peer->pcount[AFI_IP6][SAFI_UNICAST] = 0; - peer->pcount[AFI_IP6][SAFI_MULTICAST] = 0; - peer->pcount[AFI_IP6][SAFI_LABELED_UNICAST] = 0; -#endif /* 0 */ - if (!CHECK_FLAG(peer->flags, PEER_FLAG_CONFIG_NODE) && !(CHECK_FLAG(peer->flags, PEER_FLAG_DELETE))) { peer_delete(peer); @@ -2065,14 +2064,16 @@ static int bgp_establish(struct peer *peer) PEER_CAP_ORF_PREFIX_SM_ADV)) { if (CHECK_FLAG(peer->af_cap[afi][safi], PEER_CAP_ORF_PREFIX_RM_RCV)) - bgp_route_refresh_send(peer, afi, safi, - ORF_TYPE_PREFIX, - REFRESH_IMMEDIATE, 0); + bgp_route_refresh_send( + peer, afi, safi, ORF_TYPE_PREFIX, + REFRESH_IMMEDIATE, 0, + BGP_ROUTE_REFRESH_NORMAL); else if (CHECK_FLAG(peer->af_cap[afi][safi], PEER_CAP_ORF_PREFIX_RM_OLD_RCV)) - bgp_route_refresh_send(peer, afi, safi, - ORF_TYPE_PREFIX_OLD, - REFRESH_IMMEDIATE, 0); + bgp_route_refresh_send( + peer, afi, safi, ORF_TYPE_PREFIX_OLD, + REFRESH_IMMEDIATE, 0, + BGP_ROUTE_REFRESH_NORMAL); } } diff --git a/bgpd/bgp_label.c b/bgpd/bgp_label.c index 4f440cd1f8..5a31bd0243 100644 --- a/bgpd/bgp_label.c +++ b/bgpd/bgp_label.c @@ -120,6 +120,65 @@ mpls_label_t bgp_adv_label(struct bgp_dest *dest, struct bgp_path_info *pi, return dest->local_label; } +static void bgp_send_fec_register_label_msg(struct bgp_dest *dest, bool reg, + uint32_t label_index) +{ + struct stream *s; + int command; + const struct prefix *p; + uint16_t flags = 0; + size_t flags_pos = 0; + mpls_label_t *local_label = &(dest->local_label); + bool have_label_to_reg = + bgp_is_valid_label(local_label) + && label_pton(local_label) != MPLS_LABEL_IMPLICIT_NULL; + + p = bgp_dest_get_prefix(dest); + + /* Check socket. */ + if (!zclient || zclient->sock < 0) + return; + + if (BGP_DEBUG(labelpool, LABELPOOL)) + zlog_debug("%s: FEC %sregister %pRN label_index=%u label=%u", + __func__, reg ? "" : "un", bgp_dest_to_rnode(dest), + label_index, label_pton(local_label)); + /* If the route node has a local_label assigned or the + * path node has an MPLS SR label index allowing zebra to + * derive the label, proceed with registration. */ + s = zclient->obuf; + stream_reset(s); + command = (reg) ? ZEBRA_FEC_REGISTER : ZEBRA_FEC_UNREGISTER; + zclient_create_header(s, command, VRF_DEFAULT); + flags_pos = stream_get_endp(s); /* save position of 'flags' */ + stream_putw(s, flags); /* initial flags */ + stream_putw(s, PREFIX_FAMILY(p)); + stream_put_prefix(s, p); + if (reg) { + /* label index takes precedence over auto-assigned label. */ + if (label_index != 0) { + flags |= ZEBRA_FEC_REGISTER_LABEL_INDEX; + stream_putl(s, label_index); + } else if (have_label_to_reg) { + flags |= ZEBRA_FEC_REGISTER_LABEL; + stream_putl(s, label_pton(local_label)); + } + SET_FLAG(dest->flags, BGP_NODE_REGISTERED_FOR_LABEL); + } else + UNSET_FLAG(dest->flags, BGP_NODE_REGISTERED_FOR_LABEL); + + /* Set length and flags */ + stream_putw_at(s, 0, stream_get_endp(s)); + + /* + * We only need to write new flags if this is a register + */ + if (reg) + stream_putw_at(s, flags_pos, flags); + + zclient_send_message(zclient); +} + /** * This is passed as the callback function to bgp_labelpool.c:bgp_lp_get() * by bgp_reg_dereg_for_label() when a label needs to be obtained from @@ -130,20 +189,21 @@ mpls_label_t bgp_adv_label(struct bgp_dest *dest, struct bgp_path_info *pi, int bgp_reg_for_label_callback(mpls_label_t new_label, void *labelid, bool allocated) { - struct bgp_path_info *pi; struct bgp_dest *dest; - pi = labelid; - /* Is this path still valid? */ - if (!bgp_path_info_unlock(pi)) { - if (BGP_DEBUG(labelpool, LABELPOOL)) - zlog_debug( - "%s: bgp_path_info is no longer valid, ignoring", - __func__); + dest = labelid; + + /* + * if the route had been removed or the request has gone then reject + * the allocated label. The requesting code will have done what is + * required to allocate the correct label + */ + if (!CHECK_FLAG(dest->flags, BGP_NODE_LABEL_REQUESTED)) { + bgp_dest_unlock_node(dest); return -1; } - dest = pi->net; + bgp_dest_unlock_node(dest); if (BGP_DEBUG(labelpool, LABELPOOL)) zlog_debug("%s: FEC %pRN label=%u, allocated=%d", __func__, @@ -151,47 +211,15 @@ int bgp_reg_for_label_callback(mpls_label_t new_label, void *labelid, if (!allocated) { /* - * previously-allocated label is now invalid + * previously-allocated label is now invalid, set to implicit + * null until new label arrives */ - if (pi->attr->label_index == MPLS_INVALID_LABEL_INDEX - && pi->attr->label != MPLS_LABEL_NONE - && CHECK_FLAG(dest->flags, BGP_NODE_REGISTERED_FOR_LABEL)) { - bgp_unregister_for_label(dest); + if (CHECK_FLAG(dest->flags, BGP_NODE_REGISTERED_FOR_LABEL)) { + UNSET_FLAG(dest->flags, BGP_NODE_LABEL_REQUESTED); label_ntop(MPLS_LABEL_IMPLICIT_NULL, 1, &dest->local_label); bgp_set_valid_label(&dest->local_label); } - return 0; - } - - /* - * label index is assigned, this should be handled by SR-related code, - * so retry FEC registration and then reject label allocation for - * it to be released to label pool - */ - if (pi->attr->label_index != MPLS_INVALID_LABEL_INDEX) { - flog_err( - EC_BGP_LABEL, - "%s: FEC %pRN Rejecting allocated label %u as Label Index is %u", - __func__, bgp_dest_to_rnode(dest), new_label, - pi->attr->label_index); - - bgp_register_for_label(pi->net, pi); - - return -1; - } - - if (pi->attr->label != MPLS_INVALID_LABEL) { - if (new_label == pi->attr->label) { - /* already have same label, accept but do nothing */ - return 0; - } - /* Shouldn't happen: different label allocation */ - flog_err(EC_BGP_LABEL, - "%s: %pRN had label %u but got new assignment %u", - __func__, bgp_dest_to_rnode(dest), pi->attr->label, - new_label); - /* continue means use new one */ } label_ntop(new_label, 1, &dest->local_label); @@ -200,7 +228,7 @@ int bgp_reg_for_label_callback(mpls_label_t new_label, void *labelid, /* * Get back to registering the FEC */ - bgp_register_for_label(pi->net, pi); + bgp_send_fec_register_label_msg(dest, true, 0); return 0; } @@ -209,20 +237,12 @@ void bgp_reg_dereg_for_label(struct bgp_dest *dest, struct bgp_path_info *pi, bool reg) { bool with_label_index = false; - struct stream *s; const struct prefix *p; - mpls_label_t *local_label; - int command; - uint16_t flags = 0; - size_t flags_pos = 0; + bool have_label_to_reg = + bgp_is_valid_label(&dest->local_label) + && label_pton(&dest->local_label) != MPLS_LABEL_IMPLICIT_NULL; p = bgp_dest_get_prefix(dest); - local_label = &(dest->local_label); - /* this prevents the loop when we're called by - * bgp_reg_for_label_callback() - */ - bool have_label_to_reg = bgp_is_valid_label(local_label) - && label_pton(local_label) != MPLS_LABEL_IMPLICIT_NULL; if (reg) { assert(pi); @@ -234,67 +254,37 @@ void bgp_reg_dereg_for_label(struct bgp_dest *dest, struct bgp_path_info *pi, ATTR_FLAG_BIT(BGP_ATTR_PREFIX_SID)) && pi->attr->label_index != BGP_INVALID_LABEL_INDEX) { with_label_index = true; + UNSET_FLAG(dest->flags, BGP_NODE_LABEL_REQUESTED); } else { /* - * If no label index was provided -- assume any label + * If no label has been registered -- assume any label * from label pool will do. This means that label index * always takes precedence over auto-assigned labels. */ if (!have_label_to_reg) { + SET_FLAG(dest->flags, BGP_NODE_LABEL_REQUESTED); if (BGP_DEBUG(labelpool, LABELPOOL)) zlog_debug( "%s: Requesting label from LP for %pFX", __func__, p); - - /* bgp_reg_for_label_callback() will call back - * __func__ when it gets a label from the pool. - * This means we'll never register FECs without - * valid labels. + /* bgp_reg_for_label_callback() will deal with + * fec registration when it gets a label from + * the pool. This means we'll never register + * FECs withoutvalid labels. */ - bgp_lp_get(LP_TYPE_BGP_LU, pi, - bgp_reg_for_label_callback); + bgp_lp_get(LP_TYPE_BGP_LU, dest, + bgp_reg_for_label_callback); return; } } + } else { + UNSET_FLAG(dest->flags, BGP_NODE_LABEL_REQUESTED); + bgp_lp_release(LP_TYPE_BGP_LU, dest, + label_pton(&dest->local_label)); } - /* Check socket. */ - if (!zclient || zclient->sock < 0) - return; - - /* If the route node has a local_label assigned or the - * path node has an MPLS SR label index allowing zebra to - * derive the label, proceed with registration. */ - s = zclient->obuf; - stream_reset(s); - command = (reg) ? ZEBRA_FEC_REGISTER : ZEBRA_FEC_UNREGISTER; - zclient_create_header(s, command, VRF_DEFAULT); - flags_pos = stream_get_endp(s); /* save position of 'flags' */ - stream_putw(s, flags); /* initial flags */ - stream_putw(s, PREFIX_FAMILY(p)); - stream_put_prefix(s, p); - if (reg) { - if (have_label_to_reg) { - flags |= ZEBRA_FEC_REGISTER_LABEL; - stream_putl(s, label_pton(local_label)); - } else if (with_label_index) { - flags |= ZEBRA_FEC_REGISTER_LABEL_INDEX; - stream_putl(s, pi->attr->label_index); - } - SET_FLAG(dest->flags, BGP_NODE_REGISTERED_FOR_LABEL); - } else - UNSET_FLAG(dest->flags, BGP_NODE_REGISTERED_FOR_LABEL); - - /* Set length and flags */ - stream_putw_at(s, 0, stream_get_endp(s)); - - /* - * We only need to write new flags if this is a register - */ - if (reg) - stream_putw_at(s, flags_pos, flags); - - zclient_send_message(zclient); + bgp_send_fec_register_label_msg( + dest, reg, with_label_index ? pi->attr->label_index : 0); } static int bgp_nlri_get_labels(struct peer *peer, uint8_t *pnt, uint8_t plen, diff --git a/bgpd/bgp_labelpool.c b/bgpd/bgp_labelpool.c index e8d8167c35..001340be35 100644 --- a/bgpd/bgp_labelpool.c +++ b/bgpd/bgp_labelpool.c @@ -182,18 +182,18 @@ void bgp_lp_init(struct thread_master *master, struct labelpool *pool) lp->callback_q->spec.max_retries = 0; } -/* check if a label callback was for a BGP LU path, and if so, unlock it */ +/* check if a label callback was for a BGP LU node, and if so, unlock it */ static void check_bgp_lu_cb_unlock(struct lp_lcb *lcb) { if (lcb->type == LP_TYPE_BGP_LU) - bgp_path_info_unlock(lcb->labelid); + bgp_dest_unlock_node(lcb->labelid); } -/* check if a label callback was for a BGP LU path, and if so, lock it */ +/* check if a label callback was for a BGP LU node, and if so, lock it */ static void check_bgp_lu_cb_lock(struct lp_lcb *lcb) { if (lcb->type == LP_TYPE_BGP_LU) - bgp_path_info_lock(lcb->labelid); + bgp_dest_lock_node(lcb->labelid); } void bgp_lp_finish(void) @@ -356,7 +356,7 @@ void bgp_lp_get( q->labelid = lcb->labelid; q->allocated = true; - /* if this is a LU request, lock path info before queueing */ + /* if this is a LU request, lock node before queueing */ check_bgp_lu_cb_lock(lcb); work_queue_add(lp->callback_q, q); @@ -384,7 +384,7 @@ void bgp_lp_get( sizeof(struct lp_fifo)); lf->lcb = *lcb; - /* if this is a LU request, lock path info before queueing */ + /* if this is a LU request, lock node before queueing */ check_bgp_lu_cb_lock(lcb); lp_fifo_add_tail(&lp->requests, lf); @@ -394,7 +394,7 @@ void bgp_lp_get( return; if (zclient_send_get_label_chunk(zclient, 0, LP_CHUNK_SIZE, MPLS_LABEL_BASE_ANY) - == ZCLIENT_SEND_FAILURE) + != ZCLIENT_SEND_FAILURE) lp->pending_count += LP_CHUNK_SIZE; } } @@ -461,6 +461,9 @@ void bgp_lp_event_chunk(uint8_t keep, uint32_t first, uint32_t last) zlog_debug("%s: labelid %p: request no longer in effect", __func__, labelid); } + /* if this was a BGP_LU request, unlock node + */ + check_bgp_lu_cb_unlock(lcb); goto finishedrequest; } @@ -472,7 +475,7 @@ void bgp_lp_event_chunk(uint8_t keep, uint32_t first, uint32_t last) __func__, labelid, lcb->label, lcb->label, lcb); } - /* if this was a BGP_LU request, unlock path info node + /* if this was a BGP_LU request, unlock node */ check_bgp_lu_cb_unlock(lcb); @@ -538,6 +541,7 @@ void bgp_lp_event_zebra_up(void) struct lp_lcb *lcb; int lm_init_ok; + lp->reconnect_count++; /* * Get label chunk allocation request dispatched to zebra */ @@ -607,3 +611,371 @@ void bgp_lp_event_zebra_up(void) skiplist_delete_first(lp->inuse); } } + +DEFUN(show_bgp_labelpool_summary, show_bgp_labelpool_summary_cmd, + "show bgp labelpool summary [json]", + SHOW_STR BGP_STR + "BGP Labelpool information\n" + "BGP Labelpool summary\n" JSON_STR) +{ + bool uj = use_json(argc, argv); + json_object *json = NULL; + + if (!lp) { + if (uj) + vty_out(vty, "{}\n"); + else + vty_out(vty, "No existing BGP labelpool\n"); + return (CMD_WARNING); + } + + if (uj) { + json = json_object_new_object(); + json_object_int_add(json, "Ledger", skiplist_count(lp->ledger)); + json_object_int_add(json, "InUse", skiplist_count(lp->inuse)); + json_object_int_add(json, "Requests", + lp_fifo_count(&lp->requests)); + json_object_int_add(json, "LabelChunks", listcount(lp->chunks)); + json_object_int_add(json, "Pending", lp->pending_count); + json_object_int_add(json, "Reconnects", lp->reconnect_count); + vty_out(vty, "%s\n", + json_object_to_json_string_ext( + json, JSON_C_TO_STRING_PRETTY)); + json_object_free(json); + } else { + vty_out(vty, "Labelpool Summary\n"); + vty_out(vty, "-----------------\n"); + vty_out(vty, "%-13s %d\n", + "Ledger:", skiplist_count(lp->ledger)); + vty_out(vty, "%-13s %d\n", "InUse:", skiplist_count(lp->inuse)); + vty_out(vty, "%-13s %zu\n", + "Requests:", lp_fifo_count(&lp->requests)); + vty_out(vty, "%-13s %d\n", + "LabelChunks:", listcount(lp->chunks)); + vty_out(vty, "%-13s %d\n", "Pending:", lp->pending_count); + vty_out(vty, "%-13s %d\n", "Reconnects:", lp->reconnect_count); + } + return CMD_SUCCESS; +} + +DEFUN(show_bgp_labelpool_ledger, show_bgp_labelpool_ledger_cmd, + "show bgp labelpool ledger [json]", + SHOW_STR BGP_STR + "BGP Labelpool information\n" + "BGP Labelpool ledger\n" JSON_STR) +{ + bool uj = use_json(argc, argv); + json_object *json = NULL, *json_elem = NULL; + struct lp_lcb *lcb = NULL; + struct bgp_dest *dest; + void *cursor = NULL; + const struct prefix *p; + int rc, count; + + if (!lp) { + if (uj) + vty_out(vty, "{}\n"); + else + vty_out(vty, "No existing BGP labelpool\n"); + return (CMD_WARNING); + } + + if (uj) { + count = skiplist_count(lp->ledger); + if (!count) { + vty_out(vty, "{}\n"); + return CMD_SUCCESS; + } + json = json_object_new_array(); + } else { + vty_out(vty, "Prefix Label\n"); + vty_out(vty, "---------------------------\n"); + } + + for (rc = skiplist_next(lp->ledger, (void **)&dest, (void **)&lcb, + &cursor); + !rc; rc = skiplist_next(lp->ledger, (void **)&dest, (void **)&lcb, + &cursor)) { + if (uj) { + json_elem = json_object_new_object(); + json_object_array_add(json, json_elem); + } + switch (lcb->type) { + case LP_TYPE_BGP_LU: + if (!CHECK_FLAG(dest->flags, BGP_NODE_LABEL_REQUESTED)) + if (uj) { + json_object_string_add( + json_elem, "prefix", "INVALID"); + json_object_int_add(json_elem, "label", + lcb->label); + } else + vty_out(vty, "%-18s %u\n", + "INVALID", lcb->label); + else { + char buf[PREFIX2STR_BUFFER]; + p = bgp_dest_get_prefix(dest); + prefix2str(p, buf, sizeof(buf)); + if (uj) { + json_object_string_add(json_elem, + "prefix", buf); + json_object_int_add(json_elem, "label", + lcb->label); + } else + vty_out(vty, "%-18s %u\n", buf, + lcb->label); + } + break; + case LP_TYPE_VRF: + if (uj) { + json_object_string_add(json_elem, "prefix", + "VRF"); + json_object_int_add(json_elem, "label", + lcb->label); + } else + vty_out(vty, "%-18s %u\n", "VRF", + lcb->label); + + break; + } + } + if (uj) { + vty_out(vty, "%s\n", + json_object_to_json_string_ext( + json, JSON_C_TO_STRING_PRETTY)); + json_object_free(json); + } + return CMD_SUCCESS; +} + +DEFUN(show_bgp_labelpool_inuse, show_bgp_labelpool_inuse_cmd, + "show bgp labelpool inuse [json]", + SHOW_STR BGP_STR + "BGP Labelpool information\n" + "BGP Labelpool inuse\n" JSON_STR) +{ + bool uj = use_json(argc, argv); + json_object *json = NULL, *json_elem = NULL; + struct bgp_dest *dest; + mpls_label_t label; + struct lp_lcb *lcb; + void *cursor = NULL; + const struct prefix *p; + int rc, count; + + if (!lp) { + vty_out(vty, "No existing BGP labelpool\n"); + return (CMD_WARNING); + } + if (!lp) { + if (uj) + vty_out(vty, "{}\n"); + else + vty_out(vty, "No existing BGP labelpool\n"); + return (CMD_WARNING); + } + + if (uj) { + count = skiplist_count(lp->inuse); + if (!count) { + vty_out(vty, "{}\n"); + return CMD_SUCCESS; + } + json = json_object_new_array(); + } else { + vty_out(vty, "Prefix Label\n"); + vty_out(vty, "---------------------------\n"); + } + for (rc = skiplist_next(lp->inuse, (void **)&label, (void **)&dest, + &cursor); + !rc; rc = skiplist_next(lp->ledger, (void **)&label, + (void **)&dest, &cursor)) { + if (skiplist_search(lp->ledger, dest, (void **)&lcb)) + continue; + + if (uj) { + json_elem = json_object_new_object(); + json_object_array_add(json, json_elem); + } + + switch (lcb->type) { + case LP_TYPE_BGP_LU: + if (!CHECK_FLAG(dest->flags, BGP_NODE_LABEL_REQUESTED)) + if (uj) { + json_object_string_add( + json_elem, "prefix", "INVALID"); + json_object_int_add(json_elem, "label", + label); + } else + vty_out(vty, "INVALID %u\n", + label); + else { + char buf[PREFIX2STR_BUFFER]; + p = bgp_dest_get_prefix(dest); + prefix2str(p, buf, sizeof(buf)); + if (uj) { + json_object_string_add(json_elem, + "prefix", buf); + json_object_int_add(json_elem, "label", + label); + } else + vty_out(vty, "%-18s %u\n", buf, + label); + } + break; + case LP_TYPE_VRF: + if (uj) { + json_object_string_add(json_elem, "prefix", + "VRF"); + json_object_int_add(json_elem, "label", label); + } else + vty_out(vty, "%-18s %u\n", "VRF", + label); + break; + } + } + if (uj) { + vty_out(vty, "%s\n", + json_object_to_json_string_ext( + json, JSON_C_TO_STRING_PRETTY)); + json_object_free(json); + } + return CMD_SUCCESS; +} + +DEFUN(show_bgp_labelpool_requests, show_bgp_labelpool_requests_cmd, + "show bgp labelpool requests [json]", + SHOW_STR BGP_STR + "BGP Labelpool information\n" + "BGP Labelpool requests\n" JSON_STR) +{ + bool uj = use_json(argc, argv); + json_object *json = NULL, *json_elem = NULL; + struct bgp_dest *dest; + const struct prefix *p; + char buf[PREFIX2STR_BUFFER]; + struct lp_fifo *item, *next; + int count; + + if (!lp) { + if (uj) + vty_out(vty, "{}\n"); + else + vty_out(vty, "No existing BGP labelpool\n"); + return (CMD_WARNING); + } + + if (uj) { + count = lp_fifo_count(&lp->requests); + if (!count) { + vty_out(vty, "{}\n"); + return CMD_SUCCESS; + } + json = json_object_new_array(); + } else { + vty_out(vty, "Prefix \n"); + vty_out(vty, "----------------\n"); + } + + for (item = lp_fifo_first(&lp->requests); item; item = next) { + next = lp_fifo_next_safe(&lp->requests, item); + dest = item->lcb.labelid; + if (uj) { + json_elem = json_object_new_object(); + json_object_array_add(json, json_elem); + } + switch (item->lcb.type) { + case LP_TYPE_BGP_LU: + if (!CHECK_FLAG(dest->flags, + BGP_NODE_LABEL_REQUESTED)) { + if (uj) + json_object_string_add( + json_elem, "prefix", "INVALID"); + else + vty_out(vty, "INVALID\n"); + } else { + p = bgp_dest_get_prefix(dest); + prefix2str(p, buf, sizeof(buf)); + if (uj) + json_object_string_add(json_elem, + "prefix", buf); + else + vty_out(vty, "%-18s\n", buf); + } + break; + case LP_TYPE_VRF: + if (uj) + json_object_string_add(json_elem, "prefix", + "VRF"); + else + vty_out(vty, "VRF\n"); + break; + } + } + if (uj) { + vty_out(vty, "%s\n", + json_object_to_json_string_ext( + json, JSON_C_TO_STRING_PRETTY)); + json_object_free(json); + } + return CMD_SUCCESS; +} + +DEFUN(show_bgp_labelpool_chunks, show_bgp_labelpool_chunks_cmd, + "show bgp labelpool chunks [json]", + SHOW_STR BGP_STR + "BGP Labelpool information\n" + "BGP Labelpool chunks\n" JSON_STR) +{ + bool uj = use_json(argc, argv); + json_object *json = NULL, *json_elem; + struct listnode *node; + struct lp_chunk *chunk; + int count; + + if (!lp) { + if (uj) + vty_out(vty, "{}\n"); + else + vty_out(vty, "No existing BGP labelpool\n"); + return (CMD_WARNING); + } + + if (uj) { + count = listcount(lp->chunks); + if (!count) { + vty_out(vty, "{}\n"); + return CMD_SUCCESS; + } + json = json_object_new_array(); + } else { + vty_out(vty, "First Last\n"); + vty_out(vty, "--------------\n"); + } + + for (ALL_LIST_ELEMENTS_RO(lp->chunks, node, chunk)) { + if (uj) { + json_elem = json_object_new_object(); + json_object_array_add(json, json_elem); + json_object_int_add(json_elem, "first", chunk->first); + json_object_int_add(json_elem, "last", chunk->last); + } else + vty_out(vty, "%-10u %-10u\n", chunk->first, + chunk->last); + } + if (uj) { + vty_out(vty, "%s\n", + json_object_to_json_string_ext( + json, JSON_C_TO_STRING_PRETTY)); + json_object_free(json); + } + return CMD_SUCCESS; +} + +void bgp_lp_vty_init(void) +{ + install_element(VIEW_NODE, &show_bgp_labelpool_summary_cmd); + install_element(VIEW_NODE, &show_bgp_labelpool_ledger_cmd); + install_element(VIEW_NODE, &show_bgp_labelpool_inuse_cmd); + install_element(VIEW_NODE, &show_bgp_labelpool_requests_cmd); + install_element(VIEW_NODE, &show_bgp_labelpool_chunks_cmd); +} diff --git a/bgpd/bgp_labelpool.h b/bgpd/bgp_labelpool.h index eaa3fce20b..d9f64acfe4 100644 --- a/bgpd/bgp_labelpool.h +++ b/bgpd/bgp_labelpool.h @@ -40,6 +40,7 @@ struct labelpool { struct lp_fifo_head requests; /* blocked on zebra */ struct work_queue *callback_q; uint32_t pending_count; /* requested from zebra */ + uint32_t reconnect_count; /* zebra reconnections */ }; extern void bgp_lp_init(struct thread_master *master, struct labelpool *pool); @@ -50,5 +51,6 @@ extern void bgp_lp_release(int type, void *labelid, mpls_label_t label); extern void bgp_lp_event_chunk(uint8_t keep, uint32_t first, uint32_t last); extern void bgp_lp_event_zebra_down(void); extern void bgp_lp_event_zebra_up(void); +extern void bgp_lp_vty_init(void); #endif /* _FRR_BGP_LABELPOOL_H */ diff --git a/bgpd/bgp_lcommunity.c b/bgpd/bgp_lcommunity.c index 5900fcf862..88a85c979e 100644 --- a/bgpd/bgp_lcommunity.c +++ b/bgpd/bgp_lcommunity.c @@ -348,16 +348,12 @@ void lcommunity_finish(void) lcomhash = NULL; } -/* Large Communities token enum. */ -enum lcommunity_token { - lcommunity_token_unknown = 0, - lcommunity_token_val, -}; - -/* Get next Large Communities token from the string. */ +/* Get next Large Communities token from the string. + * Assumes str is space-delimeted and describes 0 or more + * valid large communities + */ static const char *lcommunity_gettoken(const char *str, - struct lcommunity_val *lval, - enum lcommunity_token *token) + struct lcommunity_val *lval) { const char *p = str; @@ -372,60 +368,55 @@ static const char *lcommunity_gettoken(const char *str, return NULL; /* Community value. */ - if (isdigit((unsigned char)*p)) { - int separator = 0; - int digit = 0; - uint32_t globaladmin = 0; - uint32_t localdata1 = 0; - uint32_t localdata2 = 0; - - while (isdigit((unsigned char)*p) || *p == ':') { - if (*p == ':') { - if (separator == 2) { - *token = lcommunity_token_unknown; - return NULL; - } else { - separator++; - digit = 0; - if (separator == 1) { - globaladmin = localdata2; - } else { - localdata1 = localdata2; - } - localdata2 = 0; - } + int separator = 0; + int digit = 0; + uint32_t globaladmin = 0; + uint32_t localdata1 = 0; + uint32_t localdata2 = 0; + + while (*p && *p != ' ') { + /* large community valid chars */ + assert(isdigit((unsigned char)*p) || *p == ':'); + + if (*p == ':') { + separator++; + digit = 0; + if (separator == 1) { + globaladmin = localdata2; } else { - digit = 1; - localdata2 *= 10; - localdata2 += (*p - '0'); + localdata1 = localdata2; } - p++; + localdata2 = 0; + } else { + digit = 1; + /* left shift the accumulated value and add current + * digit + */ + localdata2 *= 10; + localdata2 += (*p - '0'); } - if (!digit) { - *token = lcommunity_token_unknown; - return NULL; - } - - /* - * Copy the large comm. - */ - lval->val[0] = (globaladmin >> 24) & 0xff; - lval->val[1] = (globaladmin >> 16) & 0xff; - lval->val[2] = (globaladmin >> 8) & 0xff; - lval->val[3] = globaladmin & 0xff; - lval->val[4] = (localdata1 >> 24) & 0xff; - lval->val[5] = (localdata1 >> 16) & 0xff; - lval->val[6] = (localdata1 >> 8) & 0xff; - lval->val[7] = localdata1 & 0xff; - lval->val[8] = (localdata2 >> 24) & 0xff; - lval->val[9] = (localdata2 >> 16) & 0xff; - lval->val[10] = (localdata2 >> 8) & 0xff; - lval->val[11] = localdata2 & 0xff; - - *token = lcommunity_token_val; - return p; + p++; } - *token = lcommunity_token_unknown; + + /* Assert str was a valid large community */ + assert(separator == 2 && digit == 1); + + /* + * Copy the large comm. + */ + lval->val[0] = (globaladmin >> 24) & 0xff; + lval->val[1] = (globaladmin >> 16) & 0xff; + lval->val[2] = (globaladmin >> 8) & 0xff; + lval->val[3] = globaladmin & 0xff; + lval->val[4] = (localdata1 >> 24) & 0xff; + lval->val[5] = (localdata1 >> 16) & 0xff; + lval->val[6] = (localdata1 >> 8) & 0xff; + lval->val[7] = localdata1 & 0xff; + lval->val[8] = (localdata2 >> 24) & 0xff; + lval->val[9] = (localdata2 >> 16) & 0xff; + lval->val[10] = (localdata2 >> 8) & 0xff; + lval->val[11] = localdata2 & 0xff; + return p; } @@ -439,23 +430,16 @@ static const char *lcommunity_gettoken(const char *str, struct lcommunity *lcommunity_str2com(const char *str) { struct lcommunity *lcom = NULL; - enum lcommunity_token token = lcommunity_token_unknown; struct lcommunity_val lval; + if (!lcommunity_list_valid(str, LARGE_COMMUNITY_LIST_STANDARD)) + return NULL; + do { - str = lcommunity_gettoken(str, &lval, &token); - switch (token) { - case lcommunity_token_val: - if (lcom == NULL) - lcom = lcommunity_new(); - lcommunity_add_val(lcom, &lval); - break; - case lcommunity_token_unknown: - default: - if (lcom) - lcommunity_free(&lcom); - return NULL; - } + str = lcommunity_gettoken(str, &lval); + if (lcom == NULL) + lcom = lcommunity_new(); + lcommunity_add_val(lcom, &lval); } while (str); return lcom; diff --git a/bgpd/bgp_lcommunity.h b/bgpd/bgp_lcommunity.h index c96df8482d..6ccb6b7879 100644 --- a/bgpd/bgp_lcommunity.h +++ b/bgpd/bgp_lcommunity.h @@ -23,6 +23,7 @@ #include "lib/json.h" #include "bgpd/bgp_route.h" +#include "bgpd/bgp_clist.h" /* Large Communities value is twelve octets long. */ #define LCOMMUNITY_SIZE 12 diff --git a/bgpd/bgp_main.c b/bgpd/bgp_main.c index f961647778..3cb3d06217 100644 --- a/bgpd/bgp_main.c +++ b/bgpd/bgp_main.c @@ -60,6 +60,7 @@ #include "bgpd/bgp_keepalives.h" #include "bgpd/bgp_network.h" #include "bgpd/bgp_errors.h" +#include "bgpd/bgp_script.h" #include "lib/routing_nb.h" #include "bgpd/bgp_nb.h" #include "bgpd/bgp_evpn_mh.h" @@ -253,6 +254,7 @@ static __attribute__((__noreturn__)) void bgp_exit(int status) bf_free(bm->rd_idspace); list_delete(&bm->bgp); + list_delete(&bm->addresses); bgp_lp_finish(); @@ -404,12 +406,16 @@ int main(int argc, char **argv) int tmp_port; int bgp_port = BGP_PORT_DEFAULT; - char *bgp_address = NULL; + struct list *addresses = list_new(); int no_fib_flag = 0; int no_zebra_flag = 0; int skip_runas = 0; int instance = 0; int buffer_size = BGP_SOCKET_SNDBUF_SIZE; + char *address; + struct listnode *node; + + addresses->cmp = (int (*)(void *, void *))strcmp; frr_preinit(&bgpd_di, argc, argv); frr_opt_add( @@ -463,7 +469,7 @@ int main(int argc, char **argv) break; } case 'l': - bgp_address = optarg; + listnode_add_sort_nodup(addresses, optarg); /* listenon implies -n */ /* fallthru */ case 'n': @@ -493,11 +499,10 @@ int main(int argc, char **argv) memset(&bgpd_privs, 0, sizeof(bgpd_privs)); /* BGP master init. */ - bgp_master_init(frr_init(), buffer_size); + bgp_master_init(frr_init(), buffer_size, addresses); bm->port = bgp_port; if (bgp_port == 0) bgp_option_set(BGP_OPT_NO_LISTEN); - bm->address = bgp_address; if (no_fib_flag || no_zebra_flag) bgp_option_set(BGP_OPT_NO_FIB); if (no_zebra_flag) @@ -506,6 +511,10 @@ int main(int argc, char **argv) /* Initializations. */ bgp_vrf_init(); +#ifdef HAVE_SCRIPTING + bgp_script_init(); +#endif + hook_register(routing_conf_event, routing_control_plane_protocols_name_validate); @@ -513,8 +522,16 @@ int main(int argc, char **argv) /* BGP related initialization. */ bgp_init((unsigned short)instance); - snprintf(bgpd_di.startinfo, sizeof(bgpd_di.startinfo), ", bgp@%s:%d", - (bm->address ? bm->address : "<all>"), bm->port); + if (list_isempty(bm->addresses)) { + snprintf(bgpd_di.startinfo, sizeof(bgpd_di.startinfo), + ", bgp@<all>:%d", bm->port); + } else { + for (ALL_LIST_ELEMENTS_RO(bm->addresses, node, address)) + snprintf(bgpd_di.startinfo + strlen(bgpd_di.startinfo), + sizeof(bgpd_di.startinfo) + - strlen(bgpd_di.startinfo), + ", bgp@%s:%d", address, bm->port); + } frr_config_fork(); /* must be called after fork() */ diff --git a/bgpd/bgp_memory.c b/bgpd/bgp_memory.c index f9aac35d05..0013eb2df2 100644 --- a/bgpd/bgp_memory.c +++ b/bgpd/bgp_memory.c @@ -98,6 +98,7 @@ DEFINE_MTYPE(BGPD, PEER_UPDATE_SOURCE, "BGP peer update interface") DEFINE_MTYPE(BGPD, PEER_CONF_IF, "BGP peer config interface") DEFINE_MTYPE(BGPD, BGP_DAMP_INFO, "Dampening info") DEFINE_MTYPE(BGPD, BGP_DAMP_ARRAY, "BGP Dampening array") +DEFINE_MTYPE(BGPD, BGP_DAMP_REUSELIST, "BGP Dampening reuse list") DEFINE_MTYPE(BGPD, BGP_REGEXP, "BGP regexp") DEFINE_MTYPE(BGPD, BGP_AGGREGATE, "BGP aggregate") DEFINE_MTYPE(BGPD, BGP_ADDR, "BGP own address") diff --git a/bgpd/bgp_memory.h b/bgpd/bgp_memory.h index a95d9ef931..63998e95ac 100644 --- a/bgpd/bgp_memory.h +++ b/bgpd/bgp_memory.h @@ -94,6 +94,7 @@ DECLARE_MTYPE(PEER_UPDATE_SOURCE) DECLARE_MTYPE(PEER_CONF_IF) DECLARE_MTYPE(BGP_DAMP_INFO) DECLARE_MTYPE(BGP_DAMP_ARRAY) +DECLARE_MTYPE(BGP_DAMP_REUSELIST) DECLARE_MTYPE(BGP_REGEXP) DECLARE_MTYPE(BGP_AGGREGATE) DECLARE_MTYPE(BGP_ADDR) diff --git a/bgpd/bgp_mplsvpn.c b/bgpd/bgp_mplsvpn.c index 3bc4c03233..d9acda8bd0 100644 --- a/bgpd/bgp_mplsvpn.c +++ b/bgpd/bgp_mplsvpn.c @@ -419,8 +419,7 @@ int vpn_leak_label_callback( static bool ecom_intersect(struct ecommunity *e1, struct ecommunity *e2) { - int i; - int j; + uint32_t i, j; if (!e1 || !e2) return false; @@ -591,8 +590,8 @@ leak_update(struct bgp *bgp, /* destination bgp instance */ * TBD do we need to do anything about the * 'connected' parameter? */ - nh_valid = bgp_find_or_add_nexthop(bgp, bgp_nexthop, - afi, bpi, NULL, 0); + nh_valid = bgp_find_or_add_nexthop( + bgp, bgp_nexthop, afi, safi, bpi, NULL, 0); if (debug) zlog_debug("%s: nexthop is %svalid (in vrf %s)", @@ -657,8 +656,8 @@ leak_update(struct bgp *bgp, /* destination bgp instance */ * TBD do we need to do anything about the * 'connected' parameter? */ - nh_valid = bgp_find_or_add_nexthop(bgp, bgp_nexthop, - afi, new, NULL, 0); + nh_valid = bgp_find_or_add_nexthop(bgp, bgp_nexthop, afi, safi, + new, NULL, 0); if (debug) zlog_debug("%s: nexthop is %svalid (in vrf %s)", diff --git a/bgpd/bgp_mplsvpn.h b/bgpd/bgp_mplsvpn.h index df2544d608..91a073d5d7 100644 --- a/bgpd/bgp_mplsvpn.h +++ b/bgpd/bgp_mplsvpn.h @@ -83,6 +83,21 @@ extern void vrf_import_from_vrf(struct bgp *to_bgp, struct bgp *from_bgp, void vrf_unimport_from_vrf(struct bgp *to_bgp, struct bgp *from_bgp, afi_t afi, safi_t safi); +static inline bool is_bgp_vrf_mplsvpn(struct bgp *bgp) +{ + afi_t afi; + + if (bgp->inst_type == BGP_INSTANCE_TYPE_VRF) + for (afi = 0; afi < AFI_MAX; ++afi) { + if (CHECK_FLAG(bgp->af_flags[afi][SAFI_UNICAST], + BGP_CONFIG_VRF_TO_MPLSVPN_EXPORT) + || CHECK_FLAG(bgp->af_flags[afi][SAFI_UNICAST], + BGP_CONFIG_MPLSVPN_TO_VRF_IMPORT)) + return true; + } + return false; +} + static inline int vpn_leak_to_vpn_active(struct bgp *bgp_vrf, afi_t afi, const char **pmsg) { diff --git a/bgpd/bgp_mplsvpn_snmp.c b/bgpd/bgp_mplsvpn_snmp.c new file mode 100644 index 0000000000..055bae8432 --- /dev/null +++ b/bgpd/bgp_mplsvpn_snmp.c @@ -0,0 +1,1689 @@ +/* MPLS/BGP L3VPN MIB + * Copyright (C) 2020 Volta Networks Inc + * + * This file is part of FRR. + * + * FRRouting is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2, or (at your option) any + * later version. + * + * FRRouting is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; see the file COPYING; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include <zebra.h> + +#include <net-snmp/net-snmp-config.h> +#include <net-snmp/net-snmp-includes.h> + +#include "if.h" +#include "log.h" +#include "prefix.h" +#include "command.h" +#include "thread.h" +#include "smux.h" +#include "filter.h" +#include "hook.h" +#include "libfrr.h" +#include "version.h" + +#include "bgpd/bgpd.h" +#include "bgpd/bgp_route.h" +#include "bgpd/bgp_attr.h" +#include "bgpd/bgp_ecommunity.h" +#include "bgpd/bgp_mplsvpn.h" +#include "bgpd/bgp_mplsvpn_snmp.h" + +#define BGP_mplsvpn_notif_enable_true 1 +#define BGP_mplsvpn_notif_enable_false 2 + +/* MPLSL3VPN MIB described in RFC4382 */ +#define MPLSL3VPNMIB 1, 3, 6, 1, 2, 1, 10, 166, 11 + +/* MPLSL3VPN Scalars */ +#define MPLSL3VPNCONFIGUREDVRFS 1 +#define MPLSL3VPNACTIVEVRFS 2 +#define MPLSL3VPNCONNECTEDINTERFACES 3 +#define MPLSL3VPNNOTIFICATIONENABLE 4 +#define MPLSL3VPNCONFMAXPOSSRTS 5 +#define MPLSL3VPNVRFCONFRTEMXTHRSHTIME 6 +#define MPLSL3VPNILLLBLRCVTHRSH 7 + +/* MPLSL3VPN IFConf Table */ +#define MPLSL3VPNIFVPNCLASSIFICATION 1 +#define MPLSL3VPNIFCONFSTORAGETYPE 2 +#define MPLSL3VPNIFCONFROWSTATUS 3 + +/* MPLSL3VPN VRF Table */ +#define MPLSL3VPNVRFVPNID 1 +#define MPLSL3VPNVRFDESC 2 +#define MPLSL3VPNVRFRD 3 +#define MPLSL3VPNVRFCREATIONTIME 4 +#define MPLSL3VPNVRFOPERSTATUS 5 +#define MPLSL3VPNVRFACTIVEINTERFACES 6 +#define MPLSL3VPNVRFASSOCIATEDINTERFACES 7 +#define MPLSL3VPNVRFCONFMIDRTETHRESH 8 +#define MPLSL3VPNVRFCONFHIGHRTETHRSH 9 +#define MPLSL3VPNVRFCONFMAXROUTES 10 +#define MPLSL3VPNVRFCONFLASTCHANGED 11 +#define MPLSL3VPNVRFCONFROWSTATUS 12 +#define MPLSL3VPNVRFCONFADMINSTATUS 13 +#define MPLSL3VPNVRFCONFSTORAGETYPE 14 + +/* MPLSL3VPN RT Table */ +#define MPLSL3VPNVRFRT 1 +#define MPLSL3VPNVRFRTDESCR 2 +#define MPLSL3VPNVRFRTROWSTATUS 3 +#define MPLSL3VPNVRFRTSTORAGETYPE 4 + +/* MPLSL3VPN PERF Table */ +#define MPLSL3VPNVRFPERFROUTESADDED 1 +#define MPLSL3VPNVRFPERFROUTESDELETED 2 +#define MPLSL3VPNVRFPERFCURRNUMROUTES 3 + +/* MPLSL3VPN RTE Table */ +#define MPLSL3VPNVRFRTEINETCIDRDESTTYPE 1 +#define MPLSL3VPNVRFRTEINETCIDRDEST 2 +#define MPLSL3VPNVRFRTEINETCIDRPFXLEN 3 +#define MPLSL3VPNVRFRTEINETCIDRPOLICY 4 +#define MPLSL3VPNVRFRTEINETCIDRNHOPTYPE 5 +#define MPLSL3VPNVRFRTEINETCIDRNEXTHOP 6 +#define MPLSL3VPNVRFRTEINETCIDRIFINDEX 7 +#define MPLSL3VPNVRFRTEINETCIDRTYPE 8 +#define MPLSL3VPNVRFRTEINETCIDRPROTO 9 +#define MPLSL3VPNVRFRTEINETCIDRAGE 10 +#define MPLSL3VPNVRFRTEINETCIDRNEXTHOPAS 11 +#define MPLSL3VPNVRFRTEINETCIDRMETRIC1 12 +#define MPLSL3VPNVRFRTEINETCIDRMETRIC2 13 +#define MPLSL3VPNVRFRTEINETCIDRMETRIC3 14 +#define MPLSL3VPNVRFRTEINETCIDRMETRIC4 15 +#define MPLSL3VPNVRFRTEINETCIDRMETRIC5 16 +#define MPLSL3VPNVRFRTEINETCIDRXCPOINTER 17 +#define MPLSL3VPNVRFRTEINETCIDRSTATUS 18 + +/* BGP Trap */ +#define MPLSL3VPNVRFUP 1 +#define MPLSL3VPNDOWN 2 + +/* SNMP value hack. */ +#define INTEGER ASN_INTEGER +#define INTEGER32 ASN_INTEGER +#define COUNTER32 ASN_COUNTER +#define OCTET_STRING ASN_OCTET_STR +#define IPADDRESS ASN_IPADDRESS +#define GAUGE32 ASN_UNSIGNED +#define TIMETICKS ASN_TIMETICKS +#define OID ASN_OBJECT_ID + +/* Declare static local variables for convenience. */ +SNMP_LOCAL_VARIABLES + +#define RT_PREAMBLE_SIZE 20 + +/* BGP-MPLS-MIB instances */ +static oid mpls_l3vpn_oid[] = {MPLSL3VPNMIB}; +static oid mpls_l3vpn_trap_oid[] = {MPLSL3VPNMIB, 0}; +static char rd_buf[RD_ADDRSTRLEN]; +/* Notifications enabled by default */ +static uint8_t bgp_mplsvpn_notif_enable = SNMP_TRUE; +static oid mpls_l3vpn_policy_oid[2] = {0, 0}; +static const char *empty_nhop = ""; +char rt_description[VRF_NAMSIZ + RT_PREAMBLE_SIZE]; + +static uint8_t *mplsL3vpnConfiguredVrfs(struct variable *, oid[], size_t *, int, + size_t *, WriteMethod **); + +static uint8_t *mplsL3vpnActiveVrfs(struct variable *, oid[], size_t *, int, + size_t *, WriteMethod **); + +static uint8_t *mplsL3vpnConnectedInterfaces(struct variable *, oid[], size_t *, + int, size_t *, WriteMethod **); + +static uint8_t *mplsL3vpnNotificationEnable(struct variable *, oid[], size_t *, + int, size_t *, WriteMethod **); + +static uint8_t *mplsL3vpnVrfConfMaxPossRts(struct variable *, oid[], size_t *, + int, size_t *, WriteMethod **); + +static uint8_t *mplsL3vpnVrfConfRteMxThrshTime(struct variable *, oid[], + size_t *, int, size_t *, + WriteMethod **); + +static uint8_t *mplsL3vpnIllLblRcvThrsh(struct variable *, oid[], size_t *, int, + size_t *, WriteMethod **); + +static uint8_t *mplsL3vpnVrfTable(struct variable *, oid[], size_t *, int, + size_t *, WriteMethod **); + +static uint8_t *mplsL3vpnVrfRtTable(struct variable *, oid[], size_t *, int, + size_t *, WriteMethod **); + +static uint8_t *mplsL3vpnIfConfTable(struct variable *, oid[], size_t *, int, + size_t *, WriteMethod **); + +static uint8_t *mplsL3vpnPerfTable(struct variable *, oid[], size_t *, int, + size_t *, WriteMethod **); + +static uint8_t *mplsL3vpnRteTable(struct variable *, oid[], size_t *, int, + size_t *, WriteMethod **); + + +static struct variable mpls_l3vpn_variables[] = { + /* BGP version. */ + {MPLSL3VPNCONFIGUREDVRFS, + GAUGE32, + RONLY, + mplsL3vpnConfiguredVrfs, + 3, + {1, 1, 1} }, + {MPLSL3VPNACTIVEVRFS, + GAUGE32, + RONLY, + mplsL3vpnActiveVrfs, + 3, + {1, 1, 2} }, + {MPLSL3VPNCONNECTEDINTERFACES, + GAUGE32, + RONLY, + mplsL3vpnConnectedInterfaces, + 3, + {1, 1, 3} }, + {MPLSL3VPNNOTIFICATIONENABLE, + INTEGER, + RWRITE, + mplsL3vpnNotificationEnable, + 3, + {1, 1, 4} }, + {MPLSL3VPNCONFMAXPOSSRTS, + GAUGE32, + RONLY, + mplsL3vpnVrfConfMaxPossRts, + 3, + {1, 1, 5} }, + {MPLSL3VPNVRFCONFRTEMXTHRSHTIME, + GAUGE32, + RONLY, + mplsL3vpnVrfConfRteMxThrshTime, + 3, + {1, 1, 6} }, + {MPLSL3VPNILLLBLRCVTHRSH, + GAUGE32, + RONLY, + mplsL3vpnIllLblRcvThrsh, + 3, + {1, 1, 7} }, + + /* Ifconf Table */ + {MPLSL3VPNIFVPNCLASSIFICATION, + INTEGER, + RONLY, + mplsL3vpnIfConfTable, + 5, + {1, 2, 1, 1, 2} }, + {MPLSL3VPNIFCONFSTORAGETYPE, + INTEGER, + RONLY, + mplsL3vpnIfConfTable, + 5, + {1, 2, 1, 1, 4} }, + {MPLSL3VPNIFCONFROWSTATUS, + INTEGER, + RONLY, + mplsL3vpnIfConfTable, + 5, + {1, 2, 1, 1, 5} }, + + /* mplsL3VpnVrf Table */ + {MPLSL3VPNVRFVPNID, + OCTET_STRING, + RONLY, + mplsL3vpnVrfTable, + 5, + {1, 2, 2, 1, 2} }, + {MPLSL3VPNVRFDESC, + OCTET_STRING, + RONLY, + mplsL3vpnVrfTable, + 5, + {1, 2, 2, 1, 3} }, + {MPLSL3VPNVRFRD, + OCTET_STRING, + RONLY, + mplsL3vpnVrfTable, + 5, + {1, 2, 2, 1, 4} }, + {MPLSL3VPNVRFCREATIONTIME, + TIMETICKS, + RONLY, + mplsL3vpnVrfTable, + 5, + {1, 2, 2, 1, 5} }, + {MPLSL3VPNVRFOPERSTATUS, + INTEGER, + RONLY, + mplsL3vpnVrfTable, + 5, + {1, 2, 2, 1, 6} }, + {MPLSL3VPNVRFACTIVEINTERFACES, + GAUGE32, + RONLY, + mplsL3vpnVrfTable, + 5, + {1, 2, 2, 1, 7} }, + {MPLSL3VPNVRFASSOCIATEDINTERFACES, + GAUGE32, + RONLY, + mplsL3vpnVrfTable, + 5, + {1, 2, 2, 1, 8} }, + {MPLSL3VPNVRFCONFMIDRTETHRESH, + GAUGE32, + RONLY, + mplsL3vpnVrfTable, + 5, + {1, 2, 2, 1, 9} }, + {MPLSL3VPNVRFCONFHIGHRTETHRSH, + GAUGE32, + RONLY, + mplsL3vpnVrfTable, + 5, + {1, 2, 2, 1, 10} }, + {MPLSL3VPNVRFCONFMAXROUTES, + GAUGE32, + RONLY, + mplsL3vpnVrfTable, + 5, + {1, 2, 2, 1, 11} }, + {MPLSL3VPNVRFCONFLASTCHANGED, + TIMETICKS, + RONLY, + mplsL3vpnVrfTable, + 5, + {1, 2, 2, 1, 12} }, + {MPLSL3VPNVRFCONFROWSTATUS, + INTEGER, + RONLY, + mplsL3vpnVrfTable, + 5, + {1, 2, 2, 1, 13} }, + {MPLSL3VPNVRFCONFADMINSTATUS, + INTEGER, + RONLY, + mplsL3vpnVrfTable, + 5, + {1, 2, 2, 1, 14} }, + {MPLSL3VPNVRFCONFSTORAGETYPE, + INTEGER, + RONLY, + mplsL3vpnVrfTable, + 5, + {1, 2, 2, 1, 15} }, + + /* mplsL3vpnVrfRt Table */ + {MPLSL3VPNVRFRT, + OCTET_STRING, + RONLY, + mplsL3vpnVrfRtTable, + 5, + {1, 2, 3, 1, 4} }, + {MPLSL3VPNVRFRTDESCR, + OCTET_STRING, + RONLY, + mplsL3vpnVrfRtTable, + 5, + {1, 2, 3, 1, 5} }, + {MPLSL3VPNVRFRTROWSTATUS, + INTEGER, + RONLY, + mplsL3vpnVrfRtTable, + 5, + {1, 2, 3, 1, 6} }, + {MPLSL3VPNVRFRTSTORAGETYPE, + INTEGER, + RONLY, + mplsL3vpnVrfRtTable, + 5, + {1, 2, 3, 1, 7} }, + + /* mplsL3VpnPerfTable */ + {MPLSL3VPNVRFPERFROUTESADDED, + COUNTER32, + RONLY, + mplsL3vpnPerfTable, + 5, + {1, 3, 1, 1, 1} }, + {MPLSL3VPNVRFPERFROUTESDELETED, + COUNTER32, + RONLY, + mplsL3vpnPerfTable, + 5, + {1, 3, 1, 1, 2} }, + {MPLSL3VPNVRFPERFCURRNUMROUTES, + GAUGE32, + RONLY, + mplsL3vpnPerfTable, + 5, + {1, 3, 1, 1, 3} }, + + /* mplsVpnRteTable */ + {MPLSL3VPNVRFRTEINETCIDRDESTTYPE, + INTEGER, + RONLY, + mplsL3vpnRteTable, + 5, + {1, 4, 1, 1, 1} }, + {MPLSL3VPNVRFRTEINETCIDRDEST, + OCTET_STRING, + RONLY, + mplsL3vpnRteTable, + 5, + {1, 4, 1, 1, 2} }, + {MPLSL3VPNVRFRTEINETCIDRPFXLEN, + GAUGE32, + RONLY, + mplsL3vpnRteTable, + 5, + {1, 4, 1, 1, 3} }, + {MPLSL3VPNVRFRTEINETCIDRPOLICY, + OID, + RONLY, + mplsL3vpnRteTable, + 5, + {1, 4, 1, 1, 4} }, + {MPLSL3VPNVRFRTEINETCIDRNHOPTYPE, + INTEGER, + RONLY, + mplsL3vpnRteTable, + 5, + {1, 4, 1, 1, 5} }, + {MPLSL3VPNVRFRTEINETCIDRNEXTHOP, + OCTET_STRING, + RONLY, + mplsL3vpnRteTable, + 5, + {1, 4, 1, 1, 6} }, + {MPLSL3VPNVRFRTEINETCIDRIFINDEX, + INTEGER, + RONLY, + mplsL3vpnRteTable, + 5, + {1, 4, 1, 1, 7} }, + {MPLSL3VPNVRFRTEINETCIDRTYPE, + INTEGER, + RONLY, + mplsL3vpnRteTable, + 5, + {1, 4, 1, 1, 8} }, + {MPLSL3VPNVRFRTEINETCIDRPROTO, + INTEGER, + RONLY, + mplsL3vpnRteTable, + 5, + {1, 4, 1, 1, 9} }, + {MPLSL3VPNVRFRTEINETCIDRAGE, + GAUGE32, + RONLY, + mplsL3vpnRteTable, + 5, + {1, 4, 1, 1, 10} }, + {MPLSL3VPNVRFRTEINETCIDRNEXTHOPAS, + GAUGE32, + RONLY, + mplsL3vpnRteTable, + 5, + {1, 4, 1, 1, 11} }, + {MPLSL3VPNVRFRTEINETCIDRMETRIC1, + INTEGER, + RONLY, + mplsL3vpnRteTable, + 5, + {1, 4, 1, 1, 12} }, + {MPLSL3VPNVRFRTEINETCIDRMETRIC2, + INTEGER, + RONLY, + mplsL3vpnRteTable, + 5, + {1, 4, 1, 1, 13} }, + {MPLSL3VPNVRFRTEINETCIDRMETRIC3, + INTEGER, + RONLY, + mplsL3vpnRteTable, + 5, + {1, 4, 1, 1, 14} }, + {MPLSL3VPNVRFRTEINETCIDRMETRIC4, + INTEGER, + RONLY, + mplsL3vpnRteTable, + 5, + {1, 4, 1, 1, 15} }, + {MPLSL3VPNVRFRTEINETCIDRMETRIC5, + INTEGER, + RONLY, + mplsL3vpnRteTable, + 5, + {1, 4, 1, 1, 16} }, + {MPLSL3VPNVRFRTEINETCIDRXCPOINTER, + OCTET_STRING, + RONLY, + mplsL3vpnRteTable, + 5, + {1, 4, 1, 1, 17} }, + {MPLSL3VPNVRFRTEINETCIDRSTATUS, + INTEGER, + RONLY, + mplsL3vpnRteTable, + 5, + {1, 4, 1, 1, 18} }, +}; + +/* timeticks are in hundredths of a second */ +static void bgp_mpls_l3vpn_update_timeticks(time_t *counter) +{ + struct timeval tv; + + monotime(&tv); + *counter = (tv.tv_sec * 100) + (tv.tv_usec / 10000); +} + +static int bgp_mpls_l3vpn_update_last_changed(struct bgp *bgp) +{ + if (bgp->snmp_stats) + bgp_mpls_l3vpn_update_timeticks( + &(bgp->snmp_stats->modify_time)); + return 0; +} + +static uint32_t bgp_mpls_l3vpn_current_routes(struct bgp *l3vpn_bgp) +{ + uint32_t count = 0; + struct bgp_table *table; + struct bgp_dest *dest; + struct bgp_path_info *pi; + + table = l3vpn_bgp->rib[AFI_IP][SAFI_UNICAST]; + for (dest = bgp_table_top(table); dest; dest = bgp_route_next(dest)) { + pi = bgp_dest_get_bgp_path_info(dest); + for (; pi; pi = pi->next) + count++; + } + table = l3vpn_bgp->rib[AFI_IP6][SAFI_UNICAST]; + for (dest = bgp_table_top(table); dest; dest = bgp_route_next(dest)) { + pi = bgp_dest_get_bgp_path_info(dest); + for (; pi; pi = pi->next) + count++; + } + return count; +} + +static int bgp_init_snmp_stats(struct bgp *bgp) +{ + if (is_bgp_vrf_mplsvpn(bgp)) { + if (bgp->snmp_stats == NULL) { + bgp->snmp_stats = XCALLOC( + MTYPE_BGP, sizeof(struct bgp_snmp_stats)); + /* fix up added routes */ + if (bgp->snmp_stats) { + bgp->snmp_stats->routes_added = + bgp_mpls_l3vpn_current_routes(bgp); + bgp_mpls_l3vpn_update_timeticks( + &(bgp->snmp_stats->creation_time)); + } + } + } else { + if (bgp->snmp_stats) { + XFREE(MTYPE_BGP, bgp->snmp_stats); + bgp->snmp_stats = NULL; + } + } + /* Something changed - update the timestamp */ + bgp_mpls_l3vpn_update_last_changed(bgp); + return 0; +} + +static int bgp_snmp_update_route_stats(struct bgp_dest *dest, + struct bgp_path_info *pi, bool added) +{ + struct bgp_table *table; + + if (dest) { + table = bgp_dest_table(dest); + /* only update if we have a stats block - MPLSVPN vrfs for now*/ + if (table && table->bgp && table->bgp->snmp_stats) { + if (added) + table->bgp->snmp_stats->routes_added++; + else + table->bgp->snmp_stats->routes_deleted++; + } + } + return 0; +} + +static bool is_bgp_vrf_active(struct bgp *bgp) +{ + struct vrf *vrf; + struct interface *ifp; + + /* if there is one interface in the vrf which is up then it is deemed + * active + */ + vrf = vrf_lookup_by_id(bgp->vrf_id); + if (vrf == NULL) + return false; + RB_FOREACH (ifp, if_name_head, &vrf->ifaces_by_name) { + /* if we are in a vrf skip the l3mdev */ + if (bgp->name && strncmp(ifp->name, bgp->name, VRF_NAMSIZ) == 0) + continue; + + if (if_is_up(ifp)) + return true; + } + return false; +} + +/* BGP Traps. */ +static struct trap_object l3vpn_trap_list[] = {{5, {1, 2, 1, 1, 5} }, + {5, {1, 2, 2, 1, 6} } }; + +static int bgp_vrf_check_update_active(struct bgp *bgp, struct interface *ifp) +{ + bool new_active = false; + oid trap; + struct index_oid trap_index[2]; + + if (!is_bgp_vrf_mplsvpn(bgp) || bgp->snmp_stats == NULL + || !bgp_mplsvpn_notif_enable) + return 0; + new_active = is_bgp_vrf_active(bgp); + if (bgp->snmp_stats->active != new_active) { + /* add trap in here */ + bgp->snmp_stats->active = new_active; + + /* send relevent trap */ + if (bgp->snmp_stats->active) + trap = MPLSL3VPNVRFUP; + else + trap = MPLSL3VPNDOWN; + + /* + * first index vrf_name + ifindex + * second index vrf_name + */ + trap_index[1].indexlen = strnlen(bgp->name, VRF_NAMSIZ); + oid_copy_str(trap_index[0].indexname, bgp->name, + trap_index[1].indexlen); + oid_copy_str(trap_index[1].indexname, bgp->name, + trap_index[1].indexlen); + trap_index[0].indexlen = + trap_index[1].indexlen + sizeof(ifindex_t); + oid_copy_int(trap_index[0].indexname + trap_index[1].indexlen, + (int *)&(ifp->ifindex)); + + smux_trap_multi_index( + mpls_l3vpn_variables, array_size(mpls_l3vpn_variables), + mpls_l3vpn_trap_oid, array_size(mpls_l3vpn_trap_oid), + mpls_l3vpn_oid, sizeof(mpls_l3vpn_oid) / sizeof(oid), + trap_index, array_size(trap_index), l3vpn_trap_list, + array_size(l3vpn_trap_list), trap); + } + bgp_mpls_l3vpn_update_last_changed(bgp); + return 0; +} + +static uint8_t *mplsL3vpnConfiguredVrfs(struct variable *v, oid name[], + size_t *length, int exact, + size_t *var_len, + WriteMethod **write_method) +{ + struct listnode *node, *nnode; + struct bgp *bgp; + uint32_t count = 0; + + if (smux_header_generic(v, name, length, exact, var_len, write_method) + == MATCH_FAILED) + return NULL; + + for (ALL_LIST_ELEMENTS(bm->bgp, node, nnode, bgp)) { + if (is_bgp_vrf_mplsvpn(bgp)) + count++; + } + return SNMP_INTEGER(count); +} + +static uint8_t *mplsL3vpnActiveVrfs(struct variable *v, oid name[], + size_t *length, int exact, size_t *var_len, + WriteMethod **write_method) +{ + struct listnode *node, *nnode; + struct bgp *bgp; + uint32_t count = 0; + + if (smux_header_generic(v, name, length, exact, var_len, write_method) + == MATCH_FAILED) + return NULL; + + for (ALL_LIST_ELEMENTS(bm->bgp, node, nnode, bgp)) { + if (is_bgp_vrf_mplsvpn(bgp) && is_bgp_vrf_active(bgp)) + count++; + } + return SNMP_INTEGER(count); +} + +static uint8_t *mplsL3vpnConnectedInterfaces(struct variable *v, oid name[], + size_t *length, int exact, + size_t *var_len, + WriteMethod **write_method) +{ + struct listnode *node, *nnode; + struct bgp *bgp; + uint32_t count = 0; + struct vrf *vrf; + + if (smux_header_generic(v, name, length, exact, var_len, write_method) + == MATCH_FAILED) + return NULL; + + for (ALL_LIST_ELEMENTS(bm->bgp, node, nnode, bgp)) { + if (is_bgp_vrf_mplsvpn(bgp)) { + vrf = vrf_lookup_by_name(bgp->name); + if (vrf == NULL) + continue; + + count += vrf_interface_count(vrf); + } + } + + return SNMP_INTEGER(count); +} + +static int write_mplsL3vpnNotificationEnable(int action, uint8_t *var_val, + uint8_t var_val_type, + size_t var_val_len, uint8_t *statP, + oid *name, size_t length) +{ + uint32_t intval; + + if (var_val_type != ASN_INTEGER) + return SNMP_ERR_WRONGTYPE; + + if (var_val_len != sizeof(long)) + return SNMP_ERR_WRONGLENGTH; + + intval = *(long *)var_val; + bgp_mplsvpn_notif_enable = intval; + return SNMP_ERR_NOERROR; +} + +static uint8_t *mplsL3vpnNotificationEnable(struct variable *v, oid name[], + size_t *length, int exact, + size_t *var_len, + WriteMethod **write_method) +{ + if (smux_header_generic(v, name, length, exact, var_len, write_method) + == MATCH_FAILED) + return NULL; + + *write_method = write_mplsL3vpnNotificationEnable; + return SNMP_INTEGER(bgp_mplsvpn_notif_enable); +} + +static uint8_t *mplsL3vpnVrfConfMaxPossRts(struct variable *v, oid name[], + size_t *length, int exact, + size_t *var_len, + WriteMethod **write_method) +{ + if (smux_header_generic(v, name, length, exact, var_len, write_method) + == MATCH_FAILED) + return NULL; + + return SNMP_INTEGER(0); +} + +static uint8_t *mplsL3vpnVrfConfRteMxThrshTime(struct variable *v, oid name[], + size_t *length, int exact, + size_t *var_len, + WriteMethod **write_method) +{ + if (smux_header_generic(v, name, length, exact, var_len, write_method) + == MATCH_FAILED) + return NULL; + + return SNMP_INTEGER(0); +} + +static uint8_t *mplsL3vpnIllLblRcvThrsh(struct variable *v, oid name[], + size_t *length, int exact, + size_t *var_len, + WriteMethod **write_method) +{ + if (smux_header_generic(v, name, length, exact, var_len, write_method) + == MATCH_FAILED) + return NULL; + + return SNMP_INTEGER(0); +} + + +static struct bgp *bgp_lookup_by_name_next(char *vrf_name) +{ + struct bgp *bgp, *bgp_next = NULL; + struct listnode *node, *nnode; + bool first = false; + + /* + * the vrfs are not stored alphabetically but since we are using the + * vrf name as an index we need the getnext function to return them + * in a atrict order. Thus run through and find the best next one. + */ + for (ALL_LIST_ELEMENTS(bm->bgp, node, nnode, bgp)) { + if (!is_bgp_vrf_mplsvpn(bgp)) + continue; + if (strnlen(vrf_name, VRF_NAMSIZ) == 0 && bgp_next == NULL) { + first = true; + bgp_next = bgp; + continue; + } + if (first || strncmp(bgp->name, vrf_name, VRF_NAMSIZ) > 0) { + if (bgp_next == NULL) + bgp_next = bgp; + else if (strncmp(bgp->name, bgp_next->name, VRF_NAMSIZ) + < 0) + bgp_next = bgp; + } + } + return bgp_next; +} + +/* 1.3.6.1.2.1.10.166.11.1.2.1.1.x = 14*/ +#define IFCONFTAB_NAMELEN 14 +static struct bgp *bgpL3vpnIfConf_lookup(struct variable *v, oid name[], + size_t *length, char *vrf_name, + ifindex_t *ifindex, int exact) +{ + struct bgp *bgp = NULL; + size_t namelen = v ? v->namelen : IFCONFTAB_NAMELEN; + struct interface *ifp; + int vrf_name_len, len; + + /* too long ? */ + if (*length - namelen > (VRF_NAMSIZ + sizeof(uint32_t))) + return NULL; + /* do we have index info in the oid ? */ + if (*length - namelen != 0 && *length - namelen >= sizeof(uint32_t)) { + /* copy the info from the oid */ + vrf_name_len = *length - (namelen + sizeof(ifindex_t)); + oid2string(name + namelen, vrf_name_len, vrf_name); + oid2int(name + namelen + vrf_name_len, ifindex); + } + + if (exact) { + /* Check the length. */ + bgp = bgp_lookup_by_name(vrf_name); + if (bgp && !is_bgp_vrf_mplsvpn(bgp)) + return NULL; + if (!bgp) + return NULL; + ifp = if_lookup_by_index(*ifindex, bgp->vrf_id); + if (!ifp) + return NULL; + } else { + if (strnlen(vrf_name, VRF_NAMSIZ) == 0) + bgp = bgp_lookup_by_name_next(vrf_name); + else + bgp = bgp_lookup_by_name(vrf_name); + + while (bgp) { + ifp = if_vrf_lookup_by_index_next(*ifindex, + bgp->vrf_id); + if (ifp) { + vrf_name_len = strnlen(bgp->name, VRF_NAMSIZ); + *ifindex = ifp->ifindex; + len = vrf_name_len + sizeof(ifindex_t); + oid_copy_str(name + namelen, bgp->name, + vrf_name_len); + oid_copy_int(name + namelen + vrf_name_len, + ifindex); + *length = len + namelen; + + return bgp; + } + *ifindex = 0; + bgp = bgp_lookup_by_name_next(bgp->name); + } + + return NULL; + } + return bgp; +} + +static uint8_t *mplsL3vpnIfConfTable(struct variable *v, oid name[], + size_t *length, int exact, size_t *var_len, + WriteMethod **write_method) +{ + char vrf_name[VRF_NAMSIZ]; + ifindex_t ifindex = 0; + struct bgp *l3vpn_bgp; + + if (smux_header_table(v, name, length, exact, var_len, write_method) + == MATCH_FAILED) + return NULL; + + memset(vrf_name, 0, VRF_NAMSIZ); + l3vpn_bgp = bgpL3vpnIfConf_lookup(v, name, length, vrf_name, &ifindex, + exact); + if (!l3vpn_bgp) + return NULL; + + switch (v->magic) { + case MPLSL3VPNIFVPNCLASSIFICATION: + return SNMP_INTEGER(2); + case MPLSL3VPNIFCONFSTORAGETYPE: + return SNMP_INTEGER(2); + case MPLSL3VPNIFCONFROWSTATUS: + return SNMP_INTEGER(1); + } + return NULL; +} + +/* 1.3.6.1.2.1.10.166.11.1.2.2.1.x = 14*/ +#define VRFTAB_NAMELEN 14 + +static struct bgp *bgpL3vpnVrf_lookup(struct variable *v, oid name[], + size_t *length, char *vrf_name, int exact) +{ + struct bgp *bgp = NULL; + size_t namelen = v ? v->namelen : VRFTAB_NAMELEN; + int len; + + if (*length - namelen > VRF_NAMSIZ) + return NULL; + oid2string(name + namelen, *length - namelen, vrf_name); + if (exact) { + /* Check the length. */ + bgp = bgp_lookup_by_name(vrf_name); + if (bgp && !is_bgp_vrf_mplsvpn(bgp)) + return NULL; + } else { + bgp = bgp_lookup_by_name_next(vrf_name); + + if (bgp == NULL) + return NULL; + + len = strnlen(bgp->name, VRF_NAMSIZ); + oid_copy_str(name + namelen, bgp->name, len); + *length = len + namelen; + } + return bgp; +} + +static uint8_t *mplsL3vpnVrfTable(struct variable *v, oid name[], + size_t *length, int exact, size_t *var_len, + WriteMethod **write_method) +{ + char vrf_name[VRF_NAMSIZ]; + struct bgp *l3vpn_bgp; + + if (smux_header_table(v, name, length, exact, var_len, write_method) + == MATCH_FAILED) + return NULL; + + memset(vrf_name, 0, VRF_NAMSIZ); + l3vpn_bgp = bgpL3vpnVrf_lookup(v, name, length, vrf_name, exact); + + if (!l3vpn_bgp) + return NULL; + + switch (v->magic) { + case MPLSL3VPNVRFVPNID: + *var_len = 0; + return NULL; + case MPLSL3VPNVRFDESC: + *var_len = strnlen(l3vpn_bgp->name, VRF_NAMSIZ); + return (uint8_t *)l3vpn_bgp->name; + case MPLSL3VPNVRFRD: + /* + * this is a horror show but the MIB dicates one RD per vrf + * and not one RD per AFI as we (FRR) have. So this little gem + * returns the V4 one if it's set OR the v6 one if it's set or + * zero-length string id neither are set + */ + memset(rd_buf, 0, RD_ADDRSTRLEN); + if (CHECK_FLAG(l3vpn_bgp->vpn_policy[AFI_IP].flags, + BGP_VPN_POLICY_TOVPN_RD_SET)) + prefix_rd2str(&l3vpn_bgp->vpn_policy[AFI_IP].tovpn_rd, + rd_buf, sizeof(rd_buf)); + else if (CHECK_FLAG(l3vpn_bgp->vpn_policy[AFI_IP6].flags, + BGP_VPN_POLICY_TOVPN_RD_SET)) + prefix_rd2str(&l3vpn_bgp->vpn_policy[AFI_IP6].tovpn_rd, + rd_buf, sizeof(rd_buf)); + + *var_len = strnlen(rd_buf, RD_ADDRSTRLEN); + return (uint8_t *)rd_buf; + case MPLSL3VPNVRFCREATIONTIME: + return SNMP_INTEGER( + (uint32_t)l3vpn_bgp->snmp_stats->creation_time); + case MPLSL3VPNVRFOPERSTATUS: + if (l3vpn_bgp->snmp_stats->active) + return SNMP_INTEGER(1); + else + return SNMP_INTEGER(2); + case MPLSL3VPNVRFACTIVEINTERFACES: + return SNMP_INTEGER(bgp_vrf_interfaces(l3vpn_bgp, true)); + case MPLSL3VPNVRFASSOCIATEDINTERFACES: + return SNMP_INTEGER(bgp_vrf_interfaces(l3vpn_bgp, false)); + case MPLSL3VPNVRFCONFMIDRTETHRESH: + return SNMP_INTEGER(0); + case MPLSL3VPNVRFCONFHIGHRTETHRSH: + return SNMP_INTEGER(0); + case MPLSL3VPNVRFCONFMAXROUTES: + return SNMP_INTEGER(0); + case MPLSL3VPNVRFCONFLASTCHANGED: + return SNMP_INTEGER( + (uint32_t)l3vpn_bgp->snmp_stats->modify_time); + case MPLSL3VPNVRFCONFROWSTATUS: + return SNMP_INTEGER(1); + case MPLSL3VPNVRFCONFADMINSTATUS: + return SNMP_INTEGER(1); + case MPLSL3VPNVRFCONFSTORAGETYPE: + return SNMP_INTEGER(2); + } + return NULL; +} + +/* 1.3.6.1.2.1.10.166.11.1.2.3.1.x = 14*/ +#define VRFRTTAB_NAMELEN 14 +static struct bgp *bgpL3vpnVrfRt_lookup(struct variable *v, oid name[], + size_t *length, char *vrf_name, + uint32_t *rt_index, uint8_t *rt_type, + int exact) +{ + uint32_t type_index_size; + struct bgp *l3vpn_bgp; + size_t namelen = v ? v->namelen : VRFRTTAB_NAMELEN; + int vrf_name_len, len; + + /* too long ? */ + if (*length - namelen + > (VRF_NAMSIZ + sizeof(uint32_t)) + sizeof(uint8_t)) + return NULL; + + type_index_size = sizeof(uint32_t) + sizeof(uint8_t); + /* do we have index info in the oid ? */ + if (*length - namelen != 0 && *length - namelen >= type_index_size) { + /* copy the info from the oid */ + vrf_name_len = *length - (namelen + type_index_size); + oid2string(name + namelen, vrf_name_len, vrf_name); + oid2int(name + namelen + vrf_name_len, (int *)rt_index); + *rt_type = name[namelen + vrf_name_len + sizeof(uint32_t)]; + } + + if (exact) { + l3vpn_bgp = bgp_lookup_by_name(vrf_name); + if (l3vpn_bgp && !is_bgp_vrf_mplsvpn(l3vpn_bgp)) + return NULL; + if (!l3vpn_bgp) + return NULL; + /* check the index and type match up */ + if ((*rt_index != AFI_IP) || (*rt_index != AFI_IP6)) + return NULL; + /* do we have RT config */ + if (!(l3vpn_bgp->vpn_policy[*rt_index] + .rtlist[BGP_VPN_POLICY_DIR_FROMVPN] + || l3vpn_bgp->vpn_policy[*rt_index] + .rtlist[BGP_VPN_POLICY_DIR_TOVPN])) + return NULL; + return l3vpn_bgp; + } + if (strnlen(vrf_name, VRF_NAMSIZ) == 0) + l3vpn_bgp = bgp_lookup_by_name_next(vrf_name); + else + l3vpn_bgp = bgp_lookup_by_name(vrf_name); + while (l3vpn_bgp) { + switch (*rt_index) { + case 0: + *rt_index = AFI_IP; + break; + case AFI_IP: + *rt_index = AFI_IP6; + break; + case AFI_IP6: + *rt_index = 0; + continue; + } + if (*rt_index) { + switch (*rt_type) { + case 0: + *rt_type = MPLSVPNVRFRTTYPEIMPORT; + break; + case MPLSVPNVRFRTTYPEIMPORT: + *rt_type = MPLSVPNVRFRTTYPEEXPORT; + break; + case MPLSVPNVRFRTTYPEEXPORT: + case MPLSVPNVRFRTTYPEBOTH: + *rt_type = 0; + break; + } + if (*rt_type) { + bool import, export; + + import = + (!!l3vpn_bgp->vpn_policy[*rt_index].rtlist + [BGP_VPN_POLICY_DIR_FROMVPN]); + export = + (!!l3vpn_bgp->vpn_policy[*rt_index].rtlist + [BGP_VPN_POLICY_DIR_TOVPN]); + if (*rt_type == MPLSVPNVRFRTTYPEIMPORT + && !import) + continue; + if (*rt_type == MPLSVPNVRFRTTYPEEXPORT + && !export) + continue; + /* ckeck for both */ + if (*rt_type == MPLSVPNVRFRTTYPEIMPORT && import + && export + && ecommunity_cmp( + l3vpn_bgp->vpn_policy[*rt_index].rtlist + [BGP_VPN_POLICY_DIR_FROMVPN], + l3vpn_bgp->vpn_policy[*rt_index].rtlist + [BGP_VPN_POLICY_DIR_TOVPN])) + *rt_type = MPLSVPNVRFRTTYPEBOTH; + + /* we have a match copy the oid info */ + vrf_name_len = + strnlen(l3vpn_bgp->name, VRF_NAMSIZ); + len = vrf_name_len + sizeof(uint32_t) + + sizeof(uint8_t); + oid_copy_str(name + namelen, l3vpn_bgp->name, + vrf_name_len); + oid_copy_int(name + namelen + vrf_name_len, + (int *)rt_index); + name[(namelen + len) - 1] = *rt_type; + *length = len + namelen; + return l3vpn_bgp; + } + l3vpn_bgp = bgp_lookup_by_name_next(l3vpn_bgp->name); + } + } + return NULL; +} + +static const char *rt_type2str(uint8_t rt_type) +{ + switch (rt_type) { + case MPLSVPNVRFRTTYPEIMPORT: + return "import"; + case MPLSVPNVRFRTTYPEEXPORT: + return "export"; + case MPLSVPNVRFRTTYPEBOTH: + return "both"; + default: + return "unknown"; + } +} +static uint8_t *mplsL3vpnVrfRtTable(struct variable *v, oid name[], + size_t *length, int exact, size_t *var_len, + WriteMethod **write_method) +{ + char vrf_name[VRF_NAMSIZ]; + struct bgp *l3vpn_bgp; + uint32_t rt_index = 0; + uint8_t rt_type = 0; + char *rt_b; + + if (smux_header_table(v, name, length, exact, var_len, write_method) + == MATCH_FAILED) + return NULL; + + memset(vrf_name, 0, VRF_NAMSIZ); + l3vpn_bgp = bgpL3vpnVrfRt_lookup(v, name, length, vrf_name, &rt_index, + &rt_type, exact); + + if (!l3vpn_bgp) + return NULL; + + switch (v->magic) { + case MPLSL3VPNVRFRT: + switch (rt_type) { + case MPLSVPNVRFRTTYPEIMPORT: + rt_b = ecommunity_ecom2str( + l3vpn_bgp->vpn_policy[rt_index] + .rtlist[BGP_VPN_POLICY_DIR_FROMVPN], + ECOMMUNITY_FORMAT_ROUTE_MAP, + ECOMMUNITY_ROUTE_TARGET); + break; + case MPLSVPNVRFRTTYPEEXPORT: + case MPLSVPNVRFRTTYPEBOTH: + rt_b = ecommunity_ecom2str( + l3vpn_bgp->vpn_policy[rt_index] + .rtlist[BGP_VPN_POLICY_DIR_TOVPN], + ECOMMUNITY_FORMAT_ROUTE_MAP, + ECOMMUNITY_ROUTE_TARGET); + break; + default: + rt_b = NULL; + break; + } + if (rt_b) + *var_len = strnlen(rt_b, ECOMMUNITY_STRLEN); + else + *var_len = 0; + return (uint8_t *)rt_b; + case MPLSL3VPNVRFRTDESCR: + /* since we dont have a description generate one */ + memset(rt_description, 0, VRF_NAMSIZ + RT_PREAMBLE_SIZE); + snprintf(rt_description, VRF_NAMSIZ + RT_PREAMBLE_SIZE, + "RT %s for VRF %s", rt_type2str(rt_type), + l3vpn_bgp->name); + *var_len = + strnlen(rt_description, VRF_NAMSIZ + RT_PREAMBLE_SIZE); + return (uint8_t *)rt_description; + case MPLSL3VPNVRFRTROWSTATUS: + return SNMP_INTEGER(1); + case MPLSL3VPNVRFRTSTORAGETYPE: + return SNMP_INTEGER(2); + } + return NULL; +} + +/* 1.3.6.1.2.1.10.166.11.1.3.1.1.x = 14*/ +#define PERFTAB_NAMELEN 14 + +static uint8_t *mplsL3vpnPerfTable(struct variable *v, oid name[], + size_t *length, int exact, size_t *var_len, + WriteMethod **write_method) +{ + char vrf_name[VRF_NAMSIZ]; + struct bgp *l3vpn_bgp; + + if (smux_header_table(v, name, length, exact, var_len, write_method) + == MATCH_FAILED) + return NULL; + + memset(vrf_name, 0, VRF_NAMSIZ); + l3vpn_bgp = bgpL3vpnVrf_lookup(v, name, length, vrf_name, exact); + + if (!l3vpn_bgp) + return NULL; + + switch (v->magic) { + case MPLSL3VPNVRFPERFROUTESADDED: + return SNMP_INTEGER(l3vpn_bgp->snmp_stats->routes_added); + case MPLSL3VPNVRFPERFROUTESDELETED: + return SNMP_INTEGER(l3vpn_bgp->snmp_stats->routes_deleted); + case MPLSL3VPNVRFPERFCURRNUMROUTES: + return SNMP_INTEGER(bgp_mpls_l3vpn_current_routes(l3vpn_bgp)); + } + return NULL; +} + +static struct bgp_path_info * +bgp_lookup_route(struct bgp *l3vpn_bgp, struct bgp_dest **dest, + struct prefix *prefix, uint16_t policy, struct ipaddr *nexthop) +{ + struct bgp_path_info *pi = NULL; + struct bgp_table *table; + + switch (prefix->family) { + case AF_INET: + table = l3vpn_bgp->rib[AFI_IP][SAFI_UNICAST]; + break; + case AF_INET6: + table = l3vpn_bgp->rib[AFI_IP6][SAFI_UNICAST]; + break; + default: + return NULL; + } + + /*get the prefix */ + *dest = bgp_node_lookup(table, prefix); + if (*dest == NULL) + return NULL; + + /* now find the right path */ + pi = bgp_dest_get_bgp_path_info(*dest); + for (; pi; pi = pi->next) { + switch (nexthop->ipa_type) { + case IPADDR_V4: + if (nexthop->ip._v4_addr.s_addr + == pi->attr->nexthop.s_addr) + return pi; + break; + case IPADDR_V6: + if (memcmp(&nexthop->ip._v6_addr, + &pi->attr->mp_nexthop_global, + sizeof(struct in6_addr)) + == 0) + return pi; + break; + default: + return pi; + } + } + return NULL; +} + +static struct bgp_path_info *bgp_lookup_route_next(struct bgp **l3vpn_bgp, + struct bgp_dest **dest, + struct prefix *prefix, + uint16_t *policy, + struct ipaddr *nexthop) +{ + struct bgp_path_info *pi = NULL; + struct bgp_table *table; + const struct prefix *p; + uint8_t family; + + /* First route?*/ + if (prefix->prefixlen == 0) { + /* try V4 table */ + table = (*l3vpn_bgp)->rib[AFI_IP][SAFI_UNICAST]; + for (*dest = bgp_table_top(table); *dest; + *dest = bgp_route_next(*dest)) { + pi = bgp_dest_get_bgp_path_info(*dest); + if (pi) + break; + } + + if (!pi) { + /* try V6 table */ + table = (*l3vpn_bgp)->rib[AFI_IP6][SAFI_UNICAST]; + for (*dest = bgp_table_top(table); *dest; + *dest = bgp_route_next(*dest)) { + pi = bgp_dest_get_bgp_path_info(*dest); + if (pi) + break; + } + } + return pi; + } + /* real next search for the entry first use exact lookup */ + pi = bgp_lookup_route(*l3vpn_bgp, dest, prefix, *policy, nexthop); + + if (pi == NULL) + return NULL; + + p = bgp_dest_get_prefix(*dest); + family = p->family; + + /* We have found the input path let's find the next one in the list */ + if (pi->next) { + /* ensure OID is always higher for multipath routes by + * incrementing opaque policy oid + */ + *policy += 1; + return pi->next; + } + + /* No more paths in the input route so find the next route */ + for (; *l3vpn_bgp; + *l3vpn_bgp = bgp_lookup_by_name_next((*l3vpn_bgp)->name)) { + *policy = 0; + if (!*dest) { + table = (*l3vpn_bgp)->rib[AFI_IP][SAFI_UNICAST]; + *dest = bgp_table_top(table); + family = AF_INET; + } else + *dest = bgp_route_next(*dest); + + while (true) { + for (; *dest; *dest = bgp_route_next(*dest)) { + pi = bgp_dest_get_bgp_path_info(*dest); + + if (pi) + return pi; + } + if (family == AF_INET) { + table = (*l3vpn_bgp) + ->rib[AFI_IP6][SAFI_UNICAST]; + *dest = bgp_table_top(table); + family = AF_INET6; + continue; + } + break; + } + } + + return NULL; +} + +static bool is_addr_type(oid id) +{ + switch (id) { + case INETADDRESSTYPEUNKNOWN: + case INETADDRESSTYPEIPV4: + case INETADDRESSTYPEIPV6: + return true; + } + return false; +} + +/* 1.3.6.1.2.1.10.166.11.1.4.1.1.x = 14*/ +#define PERFTAB_NAMELEN 14 + +static struct bgp_path_info *bgpL3vpnRte_lookup(struct variable *v, oid name[], + size_t *length, char *vrf_name, + struct bgp **l3vpn_bgp, + struct bgp_dest **dest, + uint16_t *policy, int exact) +{ + uint8_t i; + uint8_t vrf_name_len = 0; + struct bgp_path_info *pi = NULL; + size_t namelen = v ? v->namelen : IFCONFTAB_NAMELEN; + struct prefix prefix = {0}; + struct ipaddr nexthop = {0}; + uint8_t prefix_type; + uint8_t nexthop_type; + + if ((uint32_t)(*length - namelen) > (VRF_NAMSIZ + 37)) + return NULL; + + if (*length - namelen != 0) { + /* parse incoming OID */ + for (i = namelen; i < (*length); i++) { + if (is_addr_type(name[i])) + break; + vrf_name_len++; + } + if (vrf_name_len > VRF_NAMSIZ) + return NULL; + + oid2string(name + namelen, vrf_name_len, vrf_name); + prefix_type = name[i++]; + switch (prefix_type) { + case INETADDRESSTYPEUNKNOWN: + prefix.family = AF_UNSPEC; + break; + case INETADDRESSTYPEIPV4: + prefix.family = AF_INET; + oid2in_addr(&name[i], sizeof(struct in_addr), + &prefix.u.prefix4); + i += sizeof(struct in_addr); + break; + case INETADDRESSTYPEIPV6: + prefix.family = AF_INET6; + oid2in_addr(&name[i], sizeof(struct in6_addr), + &prefix.u.prefix4); /* sic */ + i += sizeof(struct in6_addr); + break; + } + prefix.prefixlen = (uint8_t)name[i++]; + *policy |= name[i++] << 8; + *policy |= name[i++]; + nexthop_type = name[i++]; + switch (nexthop_type) { + case INETADDRESSTYPEUNKNOWN: + nexthop.ipa_type = (prefix.family == AF_INET) + ? IPADDR_V4 + : IPADDR_V6; + break; + case INETADDRESSTYPEIPV4: + nexthop.ipa_type = IPADDR_V4; + oid2in_addr(&name[i], sizeof(struct in_addr), + &nexthop.ip._v4_addr); + /* i += sizeof(struct in_addr); */ + break; + case INETADDRESSTYPEIPV6: + nexthop.ipa_type = IPADDR_V6; + oid2in_addr(&name[i], sizeof(struct in6_addr), + &nexthop.ip._v4_addr); /* sic */ + /* i += sizeof(struct in6_addr); */ + break; + } + } + + if (exact) { + *l3vpn_bgp = bgp_lookup_by_name(vrf_name); + if (*l3vpn_bgp && !is_bgp_vrf_mplsvpn(*l3vpn_bgp)) + return NULL; + if (*l3vpn_bgp == NULL) + return NULL; + + /* now lookup the route in this bgp table */ + pi = bgp_lookup_route(*l3vpn_bgp, dest, &prefix, *policy, + &nexthop); + } else { + int str_len; + + str_len = strnlen(vrf_name, VRF_NAMSIZ); + if (str_len == 0) { + *l3vpn_bgp = bgp_lookup_by_name_next(vrf_name); + } else + /* otherwise lookup the one we have */ + *l3vpn_bgp = bgp_lookup_by_name(vrf_name); + + if (l3vpn_bgp == NULL) + return NULL; + + pi = bgp_lookup_route_next(l3vpn_bgp, dest, &prefix, policy, + &nexthop); + if (pi) { + uint8_t vrf_name_len = + strnlen((*l3vpn_bgp)->name, VRF_NAMSIZ); + const struct prefix *p = bgp_dest_get_prefix(*dest); + uint8_t oid_index; + bool v4 = (p->family == AF_INET); + uint8_t addr_len = v4 ? sizeof(struct in_addr) + : sizeof(struct in6_addr); + struct attr *attr = pi->attr; + + /* copy the index parameters */ + oid_copy_str(&name[namelen], (*l3vpn_bgp)->name, + vrf_name_len); + oid_index = namelen + vrf_name_len; + name[oid_index++] = + v4 ? INETADDRESSTYPEIPV4 : INETADDRESSTYPEIPV6; + oid_copy_addr(&name[oid_index], &p->u.prefix4, + addr_len); + oid_index += addr_len; + name[oid_index++] = p->prefixlen; + name[oid_index++] = *policy >> 8; + name[oid_index++] = *policy & 0xff; + + if (!BGP_ATTR_NEXTHOP_AFI_IP6(attr)) { + if (attr->nexthop.s_addr == INADDR_ANY) + name[oid_index++] = + INETADDRESSTYPEUNKNOWN; + else { + name[oid_index++] = INETADDRESSTYPEIPV4; + oid_copy_addr(&name[oid_index], + &attr->nexthop, + sizeof(struct in_addr)); + oid_index += sizeof(struct in_addr); + } + } else { + if (IN6_IS_ADDR_UNSPECIFIED( + &attr->mp_nexthop_global)) + name[oid_index++] = + INETADDRESSTYPEUNKNOWN; + else { + name[oid_index++] = INETADDRESSTYPEIPV6; + oid_copy_addr( + &name[oid_index], + (struct in_addr *)&attr + ->mp_nexthop_global, + sizeof(struct in6_addr)); + oid_index += sizeof(struct in6_addr); + } + } + *length = oid_index; + } + } + return pi; +} + +static uint8_t *mplsL3vpnRteTable(struct variable *v, oid name[], + size_t *length, int exact, size_t *var_len, + WriteMethod **write_method) +{ + char vrf_name[VRF_NAMSIZ]; + struct bgp *l3vpn_bgp; + struct bgp_dest *dest; + struct bgp_path_info *pi; + const struct prefix *p; + uint16_t policy = 0; + + if (smux_header_table(v, name, length, exact, var_len, write_method) + == MATCH_FAILED) + return NULL; + + memset(vrf_name, 0, VRF_NAMSIZ); + pi = bgpL3vpnRte_lookup(v, name, length, vrf_name, &l3vpn_bgp, &dest, + &policy, exact); + + + if (!pi) + return NULL; + + p = bgp_dest_get_prefix(dest); + + if (!p) + return NULL; + + switch (v->magic) { + case MPLSL3VPNVRFRTEINETCIDRDESTTYPE: + switch (p->family) { + case AF_INET: + return SNMP_INTEGER(INETADDRESSTYPEIPV4); + case AF_INET6: + return SNMP_INTEGER(INETADDRESSTYPEIPV6); + default: + return SNMP_INTEGER(INETADDRESSTYPEUNKNOWN); + } + case MPLSL3VPNVRFRTEINETCIDRDEST: + switch (p->family) { + case AF_INET: + return SNMP_IPADDRESS(p->u.prefix4); + case AF_INET6: + return SNMP_IP6ADDRESS(p->u.prefix6); + default: + *var_len = 0; + return NULL; + } + case MPLSL3VPNVRFRTEINETCIDRPFXLEN: + return SNMP_INTEGER(p->prefixlen); + case MPLSL3VPNVRFRTEINETCIDRPOLICY: + *var_len = sizeof(mpls_l3vpn_policy_oid); + mpls_l3vpn_policy_oid[0] = policy >> 8; + mpls_l3vpn_policy_oid[1] = policy & 0xff; + return (uint8_t *)mpls_l3vpn_policy_oid; + case MPLSL3VPNVRFRTEINETCIDRNHOPTYPE: + if (!BGP_ATTR_NEXTHOP_AFI_IP6(pi->attr)) { + if (pi->attr->nexthop.s_addr == INADDR_ANY) + return SNMP_INTEGER(INETADDRESSTYPEUNKNOWN); + else + return SNMP_INTEGER(INETADDRESSTYPEIPV4); + } else if (IN6_IS_ADDR_UNSPECIFIED( + &pi->attr->mp_nexthop_global)) + return SNMP_INTEGER(INETADDRESSTYPEUNKNOWN); + else + return SNMP_INTEGER(INETADDRESSTYPEIPV6); + + case MPLSL3VPNVRFRTEINETCIDRNEXTHOP: + if (!BGP_ATTR_NEXTHOP_AFI_IP6(pi->attr)) + if (pi->attr->nexthop.s_addr == INADDR_ANY) { + *var_len = 0; + return (uint8_t *)empty_nhop; + } else + return SNMP_IPADDRESS(pi->attr->nexthop); + else if (IN6_IS_ADDR_UNSPECIFIED( + &pi->attr->mp_nexthop_global)) { + *var_len = 0; + return (uint8_t *)empty_nhop; + } else + return SNMP_IP6ADDRESS(pi->attr->mp_nexthop_global); + + case MPLSL3VPNVRFRTEINETCIDRIFINDEX: + if (pi->nexthop && pi->nexthop->nexthop) + return SNMP_INTEGER(pi->nexthop->nexthop->ifindex); + else + return SNMP_INTEGER(0); + case MPLSL3VPNVRFRTEINETCIDRTYPE: + if (pi->nexthop && pi->nexthop->nexthop) { + switch (pi->nexthop->nexthop->type) { + case NEXTHOP_TYPE_IFINDEX: + return SNMP_INTEGER( + MPLSL3VPNVRFRTECIDRTYPELOCAL); + case NEXTHOP_TYPE_IPV4: + case NEXTHOP_TYPE_IPV4_IFINDEX: + case NEXTHOP_TYPE_IPV6: + case NEXTHOP_TYPE_IPV6_IFINDEX: + return SNMP_INTEGER( + MPLSL3VPNVRFRTECIDRTYPEREMOTE); + case NEXTHOP_TYPE_BLACKHOLE: + switch (pi->nexthop->nexthop->bh_type) { + case BLACKHOLE_REJECT: + return SNMP_INTEGER( + MPLSL3VPNVRFRTECIDRTYPEREJECT); + default: + return SNMP_INTEGER( + MPLSL3VPNVRFRTECIDRTYPEBLACKHOLE); + } + default: + return SNMP_INTEGER( + MPLSL3VPNVRFRTECIDRTYPEOTHER); + } + } else + return SNMP_INTEGER(MPLSL3VPNVRFRTECIDRTYPEOTHER); + case MPLSL3VPNVRFRTEINETCIDRPROTO: + switch (pi->type) { + case ZEBRA_ROUTE_CONNECT: + return SNMP_INTEGER(IANAIPROUTEPROTOCOLLOCAL); + case ZEBRA_ROUTE_STATIC: + return SNMP_INTEGER(IANAIPROUTEPROTOCOLNETMGMT); + case ZEBRA_ROUTE_RIP: + case ZEBRA_ROUTE_RIPNG: + return SNMP_INTEGER(IANAIPROUTEPROTOCOLRIP); + case ZEBRA_ROUTE_OSPF: + case ZEBRA_ROUTE_OSPF6: + return SNMP_INTEGER(IANAIPROUTEPROTOCOLOSPF); + case ZEBRA_ROUTE_ISIS: + return SNMP_INTEGER(IANAIPROUTEPROTOCOLISIS); + case ZEBRA_ROUTE_BGP: + return SNMP_INTEGER(IANAIPROUTEPROTOCOLBGP); + case ZEBRA_ROUTE_EIGRP: + return SNMP_INTEGER(IANAIPROUTEPROTOCOLCISCOEIGRP); + default: + return SNMP_INTEGER(IANAIPROUTEPROTOCOLOTHER); + } + case MPLSL3VPNVRFRTEINETCIDRAGE: + return SNMP_INTEGER(pi->uptime); + case MPLSL3VPNVRFRTEINETCIDRNEXTHOPAS: + return SNMP_INTEGER(pi->peer ? pi->peer->as : 0); + case MPLSL3VPNVRFRTEINETCIDRMETRIC1: + if (pi->extra) + return SNMP_INTEGER(pi->extra->igpmetric); + else + return SNMP_INTEGER(0); + case MPLSL3VPNVRFRTEINETCIDRMETRIC2: + return SNMP_INTEGER(-1); + case MPLSL3VPNVRFRTEINETCIDRMETRIC3: + return SNMP_INTEGER(-1); + case MPLSL3VPNVRFRTEINETCIDRMETRIC4: + return SNMP_INTEGER(-1); + case MPLSL3VPNVRFRTEINETCIDRMETRIC5: + return SNMP_INTEGER(-1); + case MPLSL3VPNVRFRTEINETCIDRXCPOINTER: + return SNMP_OCTET(0); + case MPLSL3VPNVRFRTEINETCIDRSTATUS: + return SNMP_INTEGER(1); + } + return NULL; +} + +void bgp_mpls_l3vpn_module_init(void) +{ + hook_register(bgp_vrf_status_changed, bgp_vrf_check_update_active); + hook_register(bgp_snmp_init_stats, bgp_init_snmp_stats); + hook_register(bgp_snmp_update_last_changed, + bgp_mpls_l3vpn_update_last_changed); + hook_register(bgp_snmp_update_stats, bgp_snmp_update_route_stats); + REGISTER_MIB("mplsL3VpnMIB", mpls_l3vpn_variables, variable, + mpls_l3vpn_oid); +} diff --git a/bgpd/bgp_mplsvpn_snmp.h b/bgpd/bgp_mplsvpn_snmp.h new file mode 100644 index 0000000000..781d5e98f6 --- /dev/null +++ b/bgpd/bgp_mplsvpn_snmp.h @@ -0,0 +1,31 @@ +/* MPLS/BGP L3VPN MIB + * Copyright (C) 2020 Volta Networks Inc + * + * This file is part of FRR. + * + * FRRouting is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2, or (at your option) any + * later version. + * + * FRRouting is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; see the file COPYING; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +void bgp_mpls_l3vpn_module_init(void); + +#define MPLSL3VPNVRFRTECIDRTYPEOTHER 1 +#define MPLSL3VPNVRFRTECIDRTYPEREJECT 2 +#define MPLSL3VPNVRFRTECIDRTYPELOCAL 3 +#define MPLSL3VPNVRFRTECIDRTYPEREMOTE 4 +#define MPLSL3VPNVRFRTECIDRTYPEBLACKHOLE 5 + +#define MPLSVPNVRFRTTYPEIMPORT 1 +#define MPLSVPNVRFRTTYPEEXPORT 2 +#define MPLSVPNVRFRTTYPEBOTH 3 diff --git a/bgpd/bgp_nb.c b/bgpd/bgp_nb.c index f098332b29..f65a4be677 100644 --- a/bgpd/bgp_nb.c +++ b/bgpd/bgp_nb.c @@ -352,6 +352,13 @@ const struct frr_yang_module_info frr_bgp_info = { } }, { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/global/suppress-duplicates", + .cbs = { + .cli_show = cli_show_router_bgp_suppress_duplicates, + .modify = bgp_global_suppress_duplicates_modify, + } + }, + { .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/global/show-hostname", .cbs = { .cli_show = cli_show_router_bgp_show_hostname, @@ -3085,6 +3092,76 @@ const struct frr_yang_module_info frr_bgp_info = { } }, { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-unicast/filter-config/rmap-import", + .cbs = { + .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_rmap_import_modify, + .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_rmap_import_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-unicast/filter-config/rmap-export", + .cbs = { + .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_rmap_export_modify, + .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_rmap_export_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-unicast/filter-config/plist-import", + .cbs = { + .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_plist_import_modify, + .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_plist_import_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-unicast/filter-config/plist-export", + .cbs = { + .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_plist_export_modify, + .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_plist_export_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-unicast/filter-config/access-list-import", + .cbs = { + .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_access_list_import_modify, + .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_access_list_import_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-unicast/filter-config/access-list-export", + .cbs = { + .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_access_list_export_modify, + .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_access_list_export_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-unicast/filter-config/as-path-filter-list-import", + .cbs = { + .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_as_path_filter_list_import_modify, + .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_as_path_filter_list_import_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-unicast/filter-config/as-path-filter-list-export", + .cbs = { + .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_as_path_filter_list_export_modify, + .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_as_path_filter_list_export_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-unicast/filter-config/unsuppress-map-import", + .cbs = { + .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_unsuppress_map_import_modify, + .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_unsuppress_map_import_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-unicast/filter-config/unsuppress-map-export", + .cbs = { + .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_unsuppress_map_export_modify, + .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_unsuppress_map_export_destroy, + } + }, + { .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv4-multicast/add-paths/path-type", .cbs = { .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_multicast_add_paths_path_type_modify, @@ -3317,6 +3394,76 @@ const struct frr_yang_module_info frr_bgp_info = { } }, { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv4-multicast/filter-config/rmap-import", + .cbs = { + .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_rmap_import_modify, + .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_rmap_import_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv4-multicast/filter-config/rmap-export", + .cbs = { + .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_rmap_export_modify, + .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_rmap_export_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv4-multicast/filter-config/plist-import", + .cbs = { + .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_plist_import_modify, + .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_plist_import_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv4-multicast/filter-config/plist-export", + .cbs = { + .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_plist_export_modify, + .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_plist_export_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv4-multicast/filter-config/access-list-import", + .cbs = { + .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_access_list_import_modify, + .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_access_list_import_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv4-multicast/filter-config/access-list-export", + .cbs = { + .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_access_list_export_modify, + .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_access_list_export_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv4-multicast/filter-config/as-path-filter-list-import", + .cbs = { + .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_as_path_filter_list_import_modify, + .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_as_path_filter_list_import_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv4-multicast/filter-config/as-path-filter-list-export", + .cbs = { + .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_as_path_filter_list_export_modify, + .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_as_path_filter_list_export_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv4-multicast/filter-config/unsuppress-map-import", + .cbs = { + .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_unsuppress_map_import_modify, + .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_unsuppress_map_import_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv4-multicast/filter-config/unsuppress-map-export", + .cbs = { + .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_unsuppress_map_export_modify, + .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_unsuppress_map_export_destroy, + } + }, + { .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-multicast/add-paths/path-type", .cbs = { .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_multicast_add_paths_path_type_modify, @@ -3549,6 +3696,76 @@ const struct frr_yang_module_info frr_bgp_info = { } }, { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-multicast/filter-config/rmap-import", + .cbs = { + .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_rmap_import_modify, + .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_rmap_import_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-multicast/filter-config/rmap-export", + .cbs = { + .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_rmap_export_modify, + .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_rmap_export_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-multicast/filter-config/plist-import", + .cbs = { + .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_plist_import_modify, + .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_plist_import_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-multicast/filter-config/plist-export", + .cbs = { + .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_plist_export_modify, + .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_plist_export_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-multicast/filter-config/access-list-import", + .cbs = { + .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_access_list_import_modify, + .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_access_list_import_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-multicast/filter-config/access-list-export", + .cbs = { + .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_access_list_export_modify, + .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_access_list_export_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-multicast/filter-config/as-path-filter-list-import", + .cbs = { + .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_as_path_filter_list_import_modify, + .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_as_path_filter_list_import_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-multicast/filter-config/as-path-filter-list-export", + .cbs = { + .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_as_path_filter_list_export_modify, + .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_as_path_filter_list_export_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-multicast/filter-config/unsuppress-map-import", + .cbs = { + .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_unsuppress_map_import_modify, + .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_unsuppress_map_import_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-multicast/filter-config/unsuppress-map-export", + .cbs = { + .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_unsuppress_map_export_modify, + .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_unsuppress_map_export_destroy, + } + }, + { .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv4-labeled-unicast/add-paths/path-type", .cbs = { .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_add_paths_path_type_modify, @@ -3781,6 +3998,76 @@ const struct frr_yang_module_info frr_bgp_info = { } }, { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv4-labeled-unicast/filter-config/rmap-import", + .cbs = { + .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_rmap_import_modify, + .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_rmap_import_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv4-labeled-unicast/filter-config/rmap-export", + .cbs = { + .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_rmap_export_modify, + .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_rmap_export_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv4-labeled-unicast/filter-config/plist-import", + .cbs = { + .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_plist_import_modify, + .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_plist_import_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv4-labeled-unicast/filter-config/plist-export", + .cbs = { + .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_plist_export_modify, + .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_plist_export_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv4-labeled-unicast/filter-config/access-list-import", + .cbs = { + .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_access_list_import_modify, + .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_access_list_import_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv4-labeled-unicast/filter-config/access-list-export", + .cbs = { + .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_access_list_export_modify, + .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_access_list_export_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv4-labeled-unicast/filter-config/as-path-filter-list-import", + .cbs = { + .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_as_path_filter_list_import_modify, + .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_as_path_filter_list_import_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv4-labeled-unicast/filter-config/as-path-filter-list-export", + .cbs = { + .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_as_path_filter_list_export_modify, + .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_as_path_filter_list_export_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv4-labeled-unicast/filter-config/unsuppress-map-import", + .cbs = { + .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_unsuppress_map_import_modify, + .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_unsuppress_map_import_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv4-labeled-unicast/filter-config/unsuppress-map-export", + .cbs = { + .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_unsuppress_map_export_modify, + .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_unsuppress_map_export_destroy, + } + }, + { .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-labeled-unicast/add-paths/path-type", .cbs = { .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_labeled_unicast_add_paths_path_type_modify, @@ -4013,6 +4300,76 @@ const struct frr_yang_module_info frr_bgp_info = { } }, { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-labeled-unicast/filter-config/rmap-import", + .cbs = { + .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_rmap_import_modify, + .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_rmap_import_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-labeled-unicast/filter-config/rmap-export", + .cbs = { + .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_rmap_export_modify, + .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_rmap_export_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-labeled-unicast/filter-config/plist-import", + .cbs = { + .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_plist_import_modify, + .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_plist_import_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-labeled-unicast/filter-config/plist-export", + .cbs = { + .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_plist_export_modify, + .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_plist_export_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-labeled-unicast/filter-config/access-list-import", + .cbs = { + .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_access_list_import_modify, + .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_access_list_import_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-labeled-unicast/filter-config/access-list-export", + .cbs = { + .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_access_list_export_modify, + .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_access_list_export_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-labeled-unicast/filter-config/as-path-filter-list-import", + .cbs = { + .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_as_path_filter_list_import_modify, + .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_as_path_filter_list_import_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-labeled-unicast/filter-config/as-path-filter-list-export", + .cbs = { + .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_as_path_filter_list_export_modify, + .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_as_path_filter_list_export_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-labeled-unicast/filter-config/unsuppress-map-import", + .cbs = { + .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_unsuppress_map_import_modify, + .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_unsuppress_map_import_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-labeled-unicast/filter-config/unsuppress-map-export", + .cbs = { + .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_unsuppress_map_export_modify, + .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_unsuppress_map_export_destroy, + } + }, + { .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/l3vpn-ipv4-unicast/add-paths/path-type", .cbs = { .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv4_unicast_add_paths_path_type_modify, @@ -4205,6 +4562,76 @@ const struct frr_yang_module_info frr_bgp_info = { } }, { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/l3vpn-ipv4-unicast/filter-config/rmap-import", + .cbs = { + .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_rmap_import_modify, + .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_rmap_import_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/l3vpn-ipv4-unicast/filter-config/rmap-export", + .cbs = { + .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_rmap_export_modify, + .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_rmap_export_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/l3vpn-ipv4-unicast/filter-config/plist-import", + .cbs = { + .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_plist_import_modify, + .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_plist_import_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/l3vpn-ipv4-unicast/filter-config/plist-export", + .cbs = { + .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_plist_export_modify, + .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_plist_export_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/l3vpn-ipv4-unicast/filter-config/access-list-import", + .cbs = { + .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_access_list_import_modify, + .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_access_list_import_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/l3vpn-ipv4-unicast/filter-config/access-list-export", + .cbs = { + .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_access_list_export_modify, + .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_access_list_export_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/l3vpn-ipv4-unicast/filter-config/as-path-filter-list-import", + .cbs = { + .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_as_path_filter_list_import_modify, + .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_as_path_filter_list_import_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/l3vpn-ipv4-unicast/filter-config/as-path-filter-list-export", + .cbs = { + .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_as_path_filter_list_export_modify, + .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_as_path_filter_list_export_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/l3vpn-ipv4-unicast/filter-config/unsuppress-map-import", + .cbs = { + .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_unsuppress_map_import_modify, + .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_unsuppress_map_import_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/l3vpn-ipv4-unicast/filter-config/unsuppress-map-export", + .cbs = { + .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_unsuppress_map_export_modify, + .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_unsuppress_map_export_destroy, + } + }, + { .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/l3vpn-ipv6-unicast/add-paths/path-type", .cbs = { .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv6_unicast_add_paths_path_type_modify, @@ -4397,6 +4824,76 @@ const struct frr_yang_module_info frr_bgp_info = { } }, { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/l3vpn-ipv6-unicast/filter-config/rmap-import", + .cbs = { + .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_rmap_import_modify, + .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_rmap_import_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/l3vpn-ipv6-unicast/filter-config/rmap-export", + .cbs = { + .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_rmap_export_modify, + .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_rmap_export_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/l3vpn-ipv6-unicast/filter-config/plist-import", + .cbs = { + .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_plist_import_modify, + .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_plist_import_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/l3vpn-ipv6-unicast/filter-config/plist-export", + .cbs = { + .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_plist_export_modify, + .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_plist_export_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/l3vpn-ipv6-unicast/filter-config/access-list-import", + .cbs = { + .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_access_list_import_modify, + .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_access_list_import_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/l3vpn-ipv6-unicast/filter-config/access-list-export", + .cbs = { + .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_access_list_export_modify, + .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_access_list_export_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/l3vpn-ipv6-unicast/filter-config/as-path-filter-list-import", + .cbs = { + .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_as_path_filter_list_import_modify, + .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_as_path_filter_list_import_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/l3vpn-ipv6-unicast/filter-config/as-path-filter-list-export", + .cbs = { + .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_as_path_filter_list_export_modify, + .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_as_path_filter_list_export_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/l3vpn-ipv6-unicast/filter-config/unsuppress-map-import", + .cbs = { + .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_unsuppress_map_import_modify, + .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_unsuppress_map_import_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/l3vpn-ipv6-unicast/filter-config/unsuppress-map-export", + .cbs = { + .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_unsuppress_map_export_modify, + .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_unsuppress_map_export_destroy, + } + }, + { .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/l2vpn-evpn/as-path-options/allow-own-as", .cbs = { .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_l2vpn_evpn_as_path_options_allow_own_as_modify, @@ -4465,6 +4962,76 @@ const struct frr_yang_module_info frr_bgp_info = { } }, { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/l2vpn-evpn/filter-config/rmap-import", + .cbs = { + .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_l2vpn_evpn_filter_config_rmap_import_modify, + .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_l2vpn_evpn_filter_config_rmap_import_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/l2vpn-evpn/filter-config/rmap-export", + .cbs = { + .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_l2vpn_evpn_filter_config_rmap_export_modify, + .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_l2vpn_evpn_filter_config_rmap_export_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/l2vpn-evpn/filter-config/plist-import", + .cbs = { + .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_l2vpn_evpn_filter_config_plist_import_modify, + .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_l2vpn_evpn_filter_config_plist_import_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/l2vpn-evpn/filter-config/plist-export", + .cbs = { + .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_l2vpn_evpn_filter_config_plist_export_modify, + .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_l2vpn_evpn_filter_config_plist_export_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/l2vpn-evpn/filter-config/access-list-import", + .cbs = { + .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_l2vpn_evpn_filter_config_access_list_import_modify, + .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_l2vpn_evpn_filter_config_access_list_import_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/l2vpn-evpn/filter-config/access-list-export", + .cbs = { + .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_l2vpn_evpn_filter_config_access_list_export_modify, + .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_l2vpn_evpn_filter_config_access_list_export_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/l2vpn-evpn/filter-config/as-path-filter-list-import", + .cbs = { + .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_l2vpn_evpn_filter_config_as_path_filter_list_import_modify, + .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_l2vpn_evpn_filter_config_as_path_filter_list_import_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/l2vpn-evpn/filter-config/as-path-filter-list-export", + .cbs = { + .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_l2vpn_evpn_filter_config_as_path_filter_list_export_modify, + .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_l2vpn_evpn_filter_config_as_path_filter_list_export_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/l2vpn-evpn/filter-config/unsuppress-map-import", + .cbs = { + .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_l2vpn_evpn_filter_config_unsuppress_map_import_modify, + .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_l2vpn_evpn_filter_config_unsuppress_map_import_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/l2vpn-evpn/filter-config/unsuppress-map-export", + .cbs = { + .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_l2vpn_evpn_filter_config_unsuppress_map_export_modify, + .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_l2vpn_evpn_filter_config_unsuppress_map_export_destroy, + } + }, + { .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv4-flowspec/route-reflector/route-reflector-client", .cbs = { .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_flowspec_route_reflector_route_reflector_client_modify, @@ -4483,6 +5050,76 @@ const struct frr_yang_module_info frr_bgp_info = { } }, { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv4-flowspec/filter-config/rmap-import", + .cbs = { + .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_rmap_import_modify, + .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_rmap_import_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv4-flowspec/filter-config/rmap-export", + .cbs = { + .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_rmap_export_modify, + .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_rmap_export_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv4-flowspec/filter-config/plist-import", + .cbs = { + .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_plist_import_modify, + .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_plist_import_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv4-flowspec/filter-config/plist-export", + .cbs = { + .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_plist_export_modify, + .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_plist_export_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv4-flowspec/filter-config/access-list-import", + .cbs = { + .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_access_list_import_modify, + .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_access_list_import_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv4-flowspec/filter-config/access-list-export", + .cbs = { + .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_access_list_export_modify, + .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_access_list_export_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv4-flowspec/filter-config/as-path-filter-list-import", + .cbs = { + .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_as_path_filter_list_import_modify, + .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_as_path_filter_list_import_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv4-flowspec/filter-config/as-path-filter-list-export", + .cbs = { + .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_as_path_filter_list_export_modify, + .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_as_path_filter_list_export_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv4-flowspec/filter-config/unsuppress-map-import", + .cbs = { + .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_unsuppress_map_import_modify, + .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_unsuppress_map_import_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv4-flowspec/filter-config/unsuppress-map-export", + .cbs = { + .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_unsuppress_map_export_modify, + .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_unsuppress_map_export_destroy, + } + }, + { .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-flowspec/route-reflector/route-reflector-client", .cbs = { .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_flowspec_route_reflector_route_reflector_client_modify, @@ -4501,6 +5138,76 @@ const struct frr_yang_module_info frr_bgp_info = { } }, { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-flowspec/filter-config/rmap-import", + .cbs = { + .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_rmap_import_modify, + .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_rmap_import_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-flowspec/filter-config/rmap-export", + .cbs = { + .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_rmap_export_modify, + .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_rmap_export_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-flowspec/filter-config/plist-import", + .cbs = { + .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_plist_import_modify, + .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_plist_import_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-flowspec/filter-config/plist-export", + .cbs = { + .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_plist_export_modify, + .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_plist_export_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-flowspec/filter-config/access-list-import", + .cbs = { + .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_access_list_import_modify, + .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_access_list_import_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-flowspec/filter-config/access-list-export", + .cbs = { + .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_access_list_export_modify, + .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_access_list_export_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-flowspec/filter-config/as-path-filter-list-import", + .cbs = { + .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_as_path_filter_list_import_modify, + .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_as_path_filter_list_import_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-flowspec/filter-config/as-path-filter-list-export", + .cbs = { + .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_as_path_filter_list_export_modify, + .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_as_path_filter_list_export_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-flowspec/filter-config/unsuppress-map-import", + .cbs = { + .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_unsuppress_map_import_modify, + .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_unsuppress_map_import_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-flowspec/filter-config/unsuppress-map-export", + .cbs = { + .modify = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_unsuppress_map_export_modify, + .destroy = bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_unsuppress_map_export_destroy, + } + }, + { .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv4-unicast/add-paths/path-type", .cbs = { .modify = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_unicast_add_paths_path_type_modify, @@ -5041,6 +5748,76 @@ const struct frr_yang_module_info frr_bgp_info = { } }, { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv6-unicast/filter-config/rmap-import", + .cbs = { + .modify = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_rmap_import_modify, + .destroy = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_rmap_import_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv6-unicast/filter-config/rmap-export", + .cbs = { + .modify = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_rmap_export_modify, + .destroy = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_rmap_export_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv6-unicast/filter-config/plist-import", + .cbs = { + .modify = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_plist_import_modify, + .destroy = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_plist_import_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv6-unicast/filter-config/plist-export", + .cbs = { + .modify = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_plist_export_modify, + .destroy = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_plist_export_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv6-unicast/filter-config/access-list-import", + .cbs = { + .modify = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_access_list_import_modify, + .destroy = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_access_list_import_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv6-unicast/filter-config/access-list-export", + .cbs = { + .modify = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_access_list_export_modify, + .destroy = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_access_list_export_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv6-unicast/filter-config/as-path-filter-list-import", + .cbs = { + .modify = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_as_path_filter_list_import_modify, + .destroy = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_as_path_filter_list_import_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv6-unicast/filter-config/as-path-filter-list-export", + .cbs = { + .modify = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_as_path_filter_list_export_modify, + .destroy = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_as_path_filter_list_export_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv6-unicast/filter-config/unsuppress-map-import", + .cbs = { + .modify = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_unsuppress_map_import_modify, + .destroy = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_unsuppress_map_import_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv6-unicast/filter-config/unsuppress-map-export", + .cbs = { + .modify = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_unsuppress_map_export_modify, + .destroy = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_unsuppress_map_export_destroy, + } + }, + { .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv4-multicast/add-paths/path-type", .cbs = { .modify = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_multicast_add_paths_path_type_modify, @@ -5272,6 +6049,76 @@ const struct frr_yang_module_info frr_bgp_info = { } }, { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv4-multicast/filter-config/rmap-import", + .cbs = { + .modify = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_rmap_import_modify, + .destroy = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_rmap_import_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv4-multicast/filter-config/rmap-export", + .cbs = { + .modify = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_rmap_export_modify, + .destroy = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_rmap_export_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv4-multicast/filter-config/plist-import", + .cbs = { + .modify = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_plist_import_modify, + .destroy = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_plist_import_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv4-multicast/filter-config/plist-export", + .cbs = { + .modify = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_plist_export_modify, + .destroy = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_plist_export_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv4-multicast/filter-config/access-list-import", + .cbs = { + .modify = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_access_list_import_modify, + .destroy = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_access_list_import_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv4-multicast/filter-config/access-list-export", + .cbs = { + .modify = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_access_list_export_modify, + .destroy = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_access_list_export_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv4-multicast/filter-config/as-path-filter-list-import", + .cbs = { + .modify = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_as_path_filter_list_import_modify, + .destroy = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_as_path_filter_list_import_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv4-multicast/filter-config/as-path-filter-list-export", + .cbs = { + .modify = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_as_path_filter_list_export_modify, + .destroy = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_as_path_filter_list_export_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv4-multicast/filter-config/unsuppress-map-import", + .cbs = { + .modify = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_unsuppress_map_import_modify, + .destroy = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_unsuppress_map_import_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv4-multicast/filter-config/unsuppress-map-export", + .cbs = { + .modify = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_unsuppress_map_export_modify, + .destroy = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_unsuppress_map_export_destroy, + } + }, + { .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv6-multicast/add-paths/path-type", .cbs = { .modify = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_multicast_add_paths_path_type_modify, @@ -5503,6 +6350,76 @@ const struct frr_yang_module_info frr_bgp_info = { } }, { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv6-multicast/filter-config/rmap-import", + .cbs = { + .modify = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_rmap_import_modify, + .destroy = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_rmap_import_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv6-multicast/filter-config/rmap-export", + .cbs = { + .modify = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_rmap_export_modify, + .destroy = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_rmap_export_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv6-multicast/filter-config/plist-import", + .cbs = { + .modify = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_plist_import_modify, + .destroy = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_plist_import_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv6-multicast/filter-config/plist-export", + .cbs = { + .modify = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_plist_export_modify, + .destroy = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_plist_export_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv6-multicast/filter-config/access-list-import", + .cbs = { + .modify = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_access_list_import_modify, + .destroy = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_access_list_import_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv6-multicast/filter-config/access-list-export", + .cbs = { + .modify = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_access_list_export_modify, + .destroy = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_access_list_export_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv6-multicast/filter-config/as-path-filter-list-import", + .cbs = { + .modify = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_as_path_filter_list_import_modify, + .destroy = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_as_path_filter_list_import_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv6-multicast/filter-config/as-path-filter-list-export", + .cbs = { + .modify = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_as_path_filter_list_export_modify, + .destroy = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_as_path_filter_list_export_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv6-multicast/filter-config/unsuppress-map-import", + .cbs = { + .modify = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_unsuppress_map_import_modify, + .destroy = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_unsuppress_map_import_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv6-multicast/filter-config/unsuppress-map-export", + .cbs = { + .modify = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_unsuppress_map_export_modify, + .destroy = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_unsuppress_map_export_destroy, + } + }, + { .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv4-labeled-unicast/add-paths/path-type", .cbs = { .modify = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_add_paths_path_type_modify, @@ -5735,6 +6652,76 @@ const struct frr_yang_module_info frr_bgp_info = { } }, { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv4-labeled-unicast/filter-config/rmap-import", + .cbs = { + .modify = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_rmap_import_modify, + .destroy = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_rmap_import_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv4-labeled-unicast/filter-config/rmap-export", + .cbs = { + .modify = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_rmap_export_modify, + .destroy = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_rmap_export_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv4-labeled-unicast/filter-config/plist-import", + .cbs = { + .modify = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_plist_import_modify, + .destroy = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_plist_import_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv4-labeled-unicast/filter-config/plist-export", + .cbs = { + .modify = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_plist_export_modify, + .destroy = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_plist_export_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv4-labeled-unicast/filter-config/access-list-import", + .cbs = { + .modify = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_access_list_import_modify, + .destroy = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_access_list_import_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv4-labeled-unicast/filter-config/access-list-export", + .cbs = { + .modify = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_access_list_export_modify, + .destroy = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_access_list_export_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv4-labeled-unicast/filter-config/as-path-filter-list-import", + .cbs = { + .modify = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_as_path_filter_list_import_modify, + .destroy = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_as_path_filter_list_import_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv4-labeled-unicast/filter-config/as-path-filter-list-export", + .cbs = { + .modify = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_as_path_filter_list_export_modify, + .destroy = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_as_path_filter_list_export_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv4-labeled-unicast/filter-config/unsuppress-map-import", + .cbs = { + .modify = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_unsuppress_map_import_modify, + .destroy = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_unsuppress_map_import_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv4-labeled-unicast/filter-config/unsuppress-map-export", + .cbs = { + .modify = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_unsuppress_map_export_modify, + .destroy = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_unsuppress_map_export_destroy, + } + }, + { .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv6-labeled-unicast/add-paths/path-type", .cbs = { .modify = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_labeled_unicast_add_paths_path_type_modify, @@ -6437,6 +7424,76 @@ const struct frr_yang_module_info frr_bgp_info = { } }, { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv4-flowspec/filter-config/rmap-import", + .cbs = { + .modify = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_rmap_import_modify, + .destroy = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_rmap_import_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv4-flowspec/filter-config/rmap-export", + .cbs = { + .modify = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_rmap_export_modify, + .destroy = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_rmap_export_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv4-flowspec/filter-config/plist-import", + .cbs = { + .modify = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_plist_import_modify, + .destroy = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_plist_import_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv4-flowspec/filter-config/plist-export", + .cbs = { + .modify = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_plist_export_modify, + .destroy = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_plist_export_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv4-flowspec/filter-config/access-list-import", + .cbs = { + .modify = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_access_list_import_modify, + .destroy = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_access_list_import_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv4-flowspec/filter-config/access-list-export", + .cbs = { + .modify = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_access_list_export_modify, + .destroy = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_access_list_export_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv4-flowspec/filter-config/as-path-filter-list-import", + .cbs = { + .modify = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_as_path_filter_list_import_modify, + .destroy = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_as_path_filter_list_import_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv4-flowspec/filter-config/as-path-filter-list-export", + .cbs = { + .modify = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_as_path_filter_list_export_modify, + .destroy = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_as_path_filter_list_export_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv4-flowspec/filter-config/unsuppress-map-import", + .cbs = { + .modify = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_unsuppress_map_import_modify, + .destroy = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_unsuppress_map_import_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv4-flowspec/filter-config/unsuppress-map-export", + .cbs = { + .modify = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_unsuppress_map_export_modify, + .destroy = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_unsuppress_map_export_destroy, + } + }, + { .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv6-flowspec/route-reflector/route-reflector-client", .cbs = { .modify = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_flowspec_route_reflector_route_reflector_client_modify, @@ -6455,6 +7512,76 @@ const struct frr_yang_module_info frr_bgp_info = { } }, { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv6-flowspec/filter-config/rmap-import", + .cbs = { + .modify = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_rmap_import_modify, + .destroy = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_rmap_import_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv6-flowspec/filter-config/rmap-export", + .cbs = { + .modify = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_rmap_export_modify, + .destroy = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_rmap_export_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv6-flowspec/filter-config/plist-import", + .cbs = { + .modify = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_plist_import_modify, + .destroy = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_plist_import_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv6-flowspec/filter-config/plist-export", + .cbs = { + .modify = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_plist_export_modify, + .destroy = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_plist_export_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv6-flowspec/filter-config/access-list-import", + .cbs = { + .modify = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_access_list_import_modify, + .destroy = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_access_list_import_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv6-flowspec/filter-config/access-list-export", + .cbs = { + .modify = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_access_list_export_modify, + .destroy = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_access_list_export_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv6-flowspec/filter-config/as-path-filter-list-import", + .cbs = { + .modify = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_as_path_filter_list_import_modify, + .destroy = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_as_path_filter_list_import_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv6-flowspec/filter-config/as-path-filter-list-export", + .cbs = { + .modify = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_as_path_filter_list_export_modify, + .destroy = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_as_path_filter_list_export_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv6-flowspec/filter-config/unsuppress-map-import", + .cbs = { + .modify = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_unsuppress_map_import_modify, + .destroy = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_unsuppress_map_import_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv6-flowspec/filter-config/unsuppress-map-export", + .cbs = { + .modify = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_unsuppress_map_export_modify, + .destroy = bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_unsuppress_map_export_destroy, + } + }, + { .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv4-unicast/add-paths/path-type", .cbs = { .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_unicast_add_paths_path_type_modify, @@ -6995,6 +8122,76 @@ const struct frr_yang_module_info frr_bgp_info = { } }, { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-unicast/filter-config/rmap-import", + .cbs = { + .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_unicast_filter_config_rmap_import_modify, + .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_unicast_filter_config_rmap_import_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-unicast/filter-config/rmap-export", + .cbs = { + .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_unicast_filter_config_rmap_export_modify, + .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_unicast_filter_config_rmap_export_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-unicast/filter-config/plist-import", + .cbs = { + .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_unicast_filter_config_plist_import_modify, + .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_unicast_filter_config_plist_import_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-unicast/filter-config/plist-export", + .cbs = { + .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_unicast_filter_config_plist_export_modify, + .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_unicast_filter_config_plist_export_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-unicast/filter-config/access-list-import", + .cbs = { + .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_unicast_filter_config_access_list_import_modify, + .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_unicast_filter_config_access_list_import_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-unicast/filter-config/access-list-export", + .cbs = { + .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_unicast_filter_config_access_list_export_modify, + .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_unicast_filter_config_access_list_export_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-unicast/filter-config/as-path-filter-list-import", + .cbs = { + .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_unicast_filter_config_as_path_filter_list_import_modify, + .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_unicast_filter_config_as_path_filter_list_import_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-unicast/filter-config/as-path-filter-list-export", + .cbs = { + .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_unicast_filter_config_as_path_filter_list_export_modify, + .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_unicast_filter_config_as_path_filter_list_export_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-unicast/filter-config/unsuppress-map-import", + .cbs = { + .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_unicast_filter_config_unsuppress_map_import_modify, + .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_unicast_filter_config_unsuppress_map_import_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-unicast/filter-config/unsuppress-map-export", + .cbs = { + .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_unicast_filter_config_unsuppress_map_export_modify, + .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_unicast_filter_config_unsuppress_map_export_destroy, + } + }, + { .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv4-multicast/add-paths/path-type", .cbs = { .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_multicast_add_paths_path_type_modify, @@ -7233,6 +8430,76 @@ const struct frr_yang_module_info frr_bgp_info = { } }, { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv4-multicast/filter-config/rmap-import", + .cbs = { + .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_multicast_filter_config_rmap_import_modify, + .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_multicast_filter_config_rmap_import_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv4-multicast/filter-config/rmap-export", + .cbs = { + .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_multicast_filter_config_rmap_export_modify, + .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_multicast_filter_config_rmap_export_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv4-multicast/filter-config/plist-import", + .cbs = { + .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_multicast_filter_config_plist_import_modify, + .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_multicast_filter_config_plist_import_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv4-multicast/filter-config/plist-export", + .cbs = { + .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_multicast_filter_config_plist_export_modify, + .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_multicast_filter_config_plist_export_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv4-multicast/filter-config/access-list-import", + .cbs = { + .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_multicast_filter_config_access_list_import_modify, + .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_multicast_filter_config_access_list_import_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv4-multicast/filter-config/access-list-export", + .cbs = { + .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_multicast_filter_config_access_list_export_modify, + .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_multicast_filter_config_access_list_export_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv4-multicast/filter-config/as-path-filter-list-import", + .cbs = { + .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_multicast_filter_config_as_path_filter_list_import_modify, + .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_multicast_filter_config_as_path_filter_list_import_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv4-multicast/filter-config/as-path-filter-list-export", + .cbs = { + .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_multicast_filter_config_as_path_filter_list_export_modify, + .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_multicast_filter_config_as_path_filter_list_export_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv4-multicast/filter-config/unsuppress-map-import", + .cbs = { + .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_multicast_filter_config_unsuppress_map_import_modify, + .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_multicast_filter_config_unsuppress_map_import_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv4-multicast/filter-config/unsuppress-map-export", + .cbs = { + .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_multicast_filter_config_unsuppress_map_export_modify, + .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_multicast_filter_config_unsuppress_map_export_destroy, + } + }, + { .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-multicast/add-paths/path-type", .cbs = { .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_multicast_add_paths_path_type_modify, @@ -7471,6 +8738,76 @@ const struct frr_yang_module_info frr_bgp_info = { } }, { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-multicast/filter-config/rmap-import", + .cbs = { + .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_multicast_filter_config_rmap_import_modify, + .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_multicast_filter_config_rmap_import_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-multicast/filter-config/rmap-export", + .cbs = { + .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_multicast_filter_config_rmap_export_modify, + .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_multicast_filter_config_rmap_export_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-multicast/filter-config/plist-import", + .cbs = { + .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_multicast_filter_config_plist_import_modify, + .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_multicast_filter_config_plist_import_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-multicast/filter-config/plist-export", + .cbs = { + .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_multicast_filter_config_plist_export_modify, + .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_multicast_filter_config_plist_export_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-multicast/filter-config/access-list-import", + .cbs = { + .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_multicast_filter_config_access_list_import_modify, + .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_multicast_filter_config_access_list_import_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-multicast/filter-config/access-list-export", + .cbs = { + .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_multicast_filter_config_access_list_export_modify, + .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_multicast_filter_config_access_list_export_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-multicast/filter-config/as-path-filter-list-import", + .cbs = { + .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_multicast_filter_config_as_path_filter_list_import_modify, + .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_multicast_filter_config_as_path_filter_list_import_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-multicast/filter-config/as-path-filter-list-export", + .cbs = { + .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_multicast_filter_config_as_path_filter_list_export_modify, + .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_multicast_filter_config_as_path_filter_list_export_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-multicast/filter-config/unsuppress-map-import", + .cbs = { + .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_multicast_filter_config_unsuppress_map_import_modify, + .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_multicast_filter_config_unsuppress_map_import_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-multicast/filter-config/unsuppress-map-export", + .cbs = { + .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_multicast_filter_config_unsuppress_map_export_modify, + .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_multicast_filter_config_unsuppress_map_export_destroy, + } + }, + { .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv4-labeled-unicast/add-paths/path-type", .cbs = { .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_labeled_unicast_add_paths_path_type_modify, @@ -7703,6 +9040,76 @@ const struct frr_yang_module_info frr_bgp_info = { } }, { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv4-labeled-unicast/filter-config/rmap-import", + .cbs = { + .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_rmap_import_modify, + .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_rmap_import_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv4-labeled-unicast/filter-config/rmap-export", + .cbs = { + .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_rmap_export_modify, + .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_rmap_export_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv4-labeled-unicast/filter-config/plist-import", + .cbs = { + .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_plist_import_modify, + .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_plist_import_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv4-labeled-unicast/filter-config/plist-export", + .cbs = { + .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_plist_export_modify, + .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_plist_export_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv4-labeled-unicast/filter-config/access-list-import", + .cbs = { + .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_access_list_import_modify, + .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_access_list_import_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv4-labeled-unicast/filter-config/access-list-export", + .cbs = { + .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_access_list_export_modify, + .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_access_list_export_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv4-labeled-unicast/filter-config/as-path-filter-list-import", + .cbs = { + .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_as_path_filter_list_import_modify, + .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_as_path_filter_list_import_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv4-labeled-unicast/filter-config/as-path-filter-list-export", + .cbs = { + .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_as_path_filter_list_export_modify, + .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_as_path_filter_list_export_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv4-labeled-unicast/filter-config/unsuppress-map-import", + .cbs = { + .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_unsuppress_map_import_modify, + .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_unsuppress_map_import_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv4-labeled-unicast/filter-config/unsuppress-map-export", + .cbs = { + .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_unsuppress_map_export_modify, + .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_unsuppress_map_export_destroy, + } + }, + { .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-labeled-unicast/add-paths/path-type", .cbs = { .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_labeled_unicast_add_paths_path_type_modify, @@ -7935,6 +9342,76 @@ const struct frr_yang_module_info frr_bgp_info = { } }, { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-labeled-unicast/filter-config/rmap-import", + .cbs = { + .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_rmap_import_modify, + .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_rmap_import_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-labeled-unicast/filter-config/rmap-export", + .cbs = { + .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_rmap_export_modify, + .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_rmap_export_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-labeled-unicast/filter-config/plist-import", + .cbs = { + .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_plist_import_modify, + .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_plist_import_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-labeled-unicast/filter-config/plist-export", + .cbs = { + .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_plist_export_modify, + .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_plist_export_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-labeled-unicast/filter-config/access-list-import", + .cbs = { + .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_access_list_import_modify, + .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_access_list_import_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-labeled-unicast/filter-config/access-list-export", + .cbs = { + .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_access_list_export_modify, + .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_access_list_export_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-labeled-unicast/filter-config/as-path-filter-list-import", + .cbs = { + .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_as_path_filter_list_import_modify, + .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_as_path_filter_list_import_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-labeled-unicast/filter-config/as-path-filter-list-export", + .cbs = { + .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_as_path_filter_list_export_modify, + .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_as_path_filter_list_export_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-labeled-unicast/filter-config/unsuppress-map-import", + .cbs = { + .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_unsuppress_map_import_modify, + .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_unsuppress_map_import_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-labeled-unicast/filter-config/unsuppress-map-export", + .cbs = { + .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_unsuppress_map_export_modify, + .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_unsuppress_map_export_destroy, + } + }, + { .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/l3vpn-ipv4-unicast/add-paths/path-type", .cbs = { .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv4_unicast_add_paths_path_type_modify, @@ -8127,6 +9604,76 @@ const struct frr_yang_module_info frr_bgp_info = { } }, { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/l3vpn-ipv4-unicast/filter-config/rmap-import", + .cbs = { + .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_rmap_import_modify, + .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_rmap_import_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/l3vpn-ipv4-unicast/filter-config/rmap-export", + .cbs = { + .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_rmap_export_modify, + .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_rmap_export_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/l3vpn-ipv4-unicast/filter-config/plist-import", + .cbs = { + .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_plist_import_modify, + .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_plist_import_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/l3vpn-ipv4-unicast/filter-config/plist-export", + .cbs = { + .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_plist_export_modify, + .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_plist_export_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/l3vpn-ipv4-unicast/filter-config/access-list-import", + .cbs = { + .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_access_list_import_modify, + .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_access_list_import_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/l3vpn-ipv4-unicast/filter-config/access-list-export", + .cbs = { + .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_access_list_export_modify, + .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_access_list_export_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/l3vpn-ipv4-unicast/filter-config/as-path-filter-list-import", + .cbs = { + .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_as_path_filter_list_import_modify, + .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_as_path_filter_list_import_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/l3vpn-ipv4-unicast/filter-config/as-path-filter-list-export", + .cbs = { + .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_as_path_filter_list_export_modify, + .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_as_path_filter_list_export_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/l3vpn-ipv4-unicast/filter-config/unsuppress-map-import", + .cbs = { + .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_unsuppress_map_import_modify, + .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_unsuppress_map_import_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/l3vpn-ipv4-unicast/filter-config/unsuppress-map-export", + .cbs = { + .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_unsuppress_map_export_modify, + .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_unsuppress_map_export_destroy, + } + }, + { .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/l3vpn-ipv6-unicast/add-paths/path-type", .cbs = { .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv6_unicast_add_paths_path_type_modify, @@ -8319,6 +9866,76 @@ const struct frr_yang_module_info frr_bgp_info = { } }, { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/l3vpn-ipv6-unicast/filter-config/rmap-import", + .cbs = { + .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_rmap_import_modify, + .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_rmap_import_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/l3vpn-ipv6-unicast/filter-config/rmap-export", + .cbs = { + .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_rmap_export_modify, + .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_rmap_export_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/l3vpn-ipv6-unicast/filter-config/plist-import", + .cbs = { + .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_plist_import_modify, + .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_plist_import_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/l3vpn-ipv6-unicast/filter-config/plist-export", + .cbs = { + .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_plist_export_modify, + .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_plist_export_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/l3vpn-ipv6-unicast/filter-config/access-list-import", + .cbs = { + .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_access_list_import_modify, + .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_access_list_import_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/l3vpn-ipv6-unicast/filter-config/access-list-export", + .cbs = { + .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_access_list_export_modify, + .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_access_list_export_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/l3vpn-ipv6-unicast/filter-config/as-path-filter-list-import", + .cbs = { + .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_as_path_filter_list_import_modify, + .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_as_path_filter_list_import_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/l3vpn-ipv6-unicast/filter-config/as-path-filter-list-export", + .cbs = { + .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_as_path_filter_list_export_modify, + .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_as_path_filter_list_export_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/l3vpn-ipv6-unicast/filter-config/unsuppress-map-import", + .cbs = { + .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_unsuppress_map_import_modify, + .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_unsuppress_map_import_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/l3vpn-ipv6-unicast/filter-config/unsuppress-map-export", + .cbs = { + .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_unsuppress_map_export_modify, + .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_unsuppress_map_export_destroy, + } + }, + { .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/l2vpn-evpn/as-path-options/allow-own-as", .cbs = { .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_l2vpn_evpn_as_path_options_allow_own_as_modify, @@ -8405,6 +10022,76 @@ const struct frr_yang_module_info frr_bgp_info = { } }, { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv4-flowspec/filter-config/rmap-import", + .cbs = { + .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_flowspec_filter_config_rmap_import_modify, + .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_flowspec_filter_config_rmap_import_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv4-flowspec/filter-config/rmap-export", + .cbs = { + .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_flowspec_filter_config_rmap_export_modify, + .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_flowspec_filter_config_rmap_export_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv4-flowspec/filter-config/plist-import", + .cbs = { + .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_flowspec_filter_config_plist_import_modify, + .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_flowspec_filter_config_plist_import_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv4-flowspec/filter-config/plist-export", + .cbs = { + .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_flowspec_filter_config_plist_export_modify, + .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_flowspec_filter_config_plist_export_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv4-flowspec/filter-config/access-list-import", + .cbs = { + .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_flowspec_filter_config_access_list_import_modify, + .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_flowspec_filter_config_access_list_import_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv4-flowspec/filter-config/access-list-export", + .cbs = { + .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_flowspec_filter_config_access_list_export_modify, + .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_flowspec_filter_config_access_list_export_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv4-flowspec/filter-config/as-path-filter-list-import", + .cbs = { + .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_flowspec_filter_config_as_path_filter_list_import_modify, + .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_flowspec_filter_config_as_path_filter_list_import_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv4-flowspec/filter-config/as-path-filter-list-export", + .cbs = { + .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_flowspec_filter_config_as_path_filter_list_export_modify, + .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_flowspec_filter_config_as_path_filter_list_export_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv4-flowspec/filter-config/unsuppress-map-import", + .cbs = { + .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_flowspec_filter_config_unsuppress_map_import_modify, + .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_flowspec_filter_config_unsuppress_map_import_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv4-flowspec/filter-config/unsuppress-map-export", + .cbs = { + .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_flowspec_filter_config_unsuppress_map_export_modify, + .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_flowspec_filter_config_unsuppress_map_export_destroy, + } + }, + { .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-flowspec/route-reflector/route-reflector-client", .cbs = { .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_flowspec_route_reflector_route_reflector_client_modify, @@ -8423,6 +10110,76 @@ const struct frr_yang_module_info frr_bgp_info = { } }, { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-flowspec/filter-config/rmap-import", + .cbs = { + .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_flowspec_filter_config_rmap_import_modify, + .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_flowspec_filter_config_rmap_import_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-flowspec/filter-config/rmap-export", + .cbs = { + .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_flowspec_filter_config_rmap_export_modify, + .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_flowspec_filter_config_rmap_export_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-flowspec/filter-config/plist-import", + .cbs = { + .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_flowspec_filter_config_plist_import_modify, + .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_flowspec_filter_config_plist_import_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-flowspec/filter-config/plist-export", + .cbs = { + .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_flowspec_filter_config_plist_export_modify, + .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_flowspec_filter_config_plist_export_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-flowspec/filter-config/access-list-import", + .cbs = { + .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_flowspec_filter_config_access_list_import_modify, + .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_flowspec_filter_config_access_list_import_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-flowspec/filter-config/access-list-export", + .cbs = { + .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_flowspec_filter_config_access_list_export_modify, + .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_flowspec_filter_config_access_list_export_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-flowspec/filter-config/as-path-filter-list-import", + .cbs = { + .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_flowspec_filter_config_as_path_filter_list_import_modify, + .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_flowspec_filter_config_as_path_filter_list_import_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-flowspec/filter-config/as-path-filter-list-export", + .cbs = { + .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_flowspec_filter_config_as_path_filter_list_export_modify, + .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_flowspec_filter_config_as_path_filter_list_export_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-flowspec/filter-config/unsuppress-map-import", + .cbs = { + .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_flowspec_filter_config_unsuppress_map_import_modify, + .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_flowspec_filter_config_unsuppress_map_import_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-flowspec/filter-config/unsuppress-map-export", + .cbs = { + .modify = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_flowspec_filter_config_unsuppress_map_export_modify, + .destroy = bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_flowspec_filter_config_unsuppress_map_export_destroy, + } + }, + { .xpath = NULL, }, } diff --git a/bgpd/bgp_nb.h b/bgpd/bgp_nb.h index f608d4f8c1..eb7725d3dd 100644 --- a/bgpd/bgp_nb.h +++ b/bgpd/bgp_nb.h @@ -127,6 +127,7 @@ int bgp_global_fast_external_failover_modify(struct nb_cb_modify_args *args); int bgp_global_local_pref_modify(struct nb_cb_modify_args *args); int bgp_global_default_shutdown_modify(struct nb_cb_modify_args *args); int bgp_global_ebgp_requires_policy_modify(struct nb_cb_modify_args *args); +int bgp_global_suppress_duplicates_modify(struct nb_cb_modify_args *args); int bgp_global_show_hostname_modify(struct nb_cb_modify_args *args); int bgp_global_show_nexthop_hostname_modify(struct nb_cb_modify_args *args); int bgp_global_import_check_modify(struct nb_cb_modify_args *args); @@ -1328,6 +1329,46 @@ int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_unicast_soft_reconfiguration_ struct nb_cb_modify_args *args); int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_unicast_weight_weight_attribute_modify( struct nb_cb_modify_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_rmap_import_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_rmap_import_destroy( + struct nb_cb_destroy_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_rmap_export_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_rmap_export_destroy( + struct nb_cb_destroy_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_plist_import_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_plist_import_destroy( + struct nb_cb_destroy_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_plist_export_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_plist_export_destroy( + struct nb_cb_destroy_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_access_list_import_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_access_list_import_destroy( + struct nb_cb_destroy_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_access_list_export_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_access_list_export_destroy( + struct nb_cb_destroy_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_as_path_filter_list_import_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_as_path_filter_list_import_destroy( + struct nb_cb_destroy_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_as_path_filter_list_export_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_as_path_filter_list_export_destroy( + struct nb_cb_destroy_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_unsuppress_map_import_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_unsuppress_map_import_destroy( + struct nb_cb_destroy_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_unsuppress_map_export_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_unsuppress_map_export_destroy( + struct nb_cb_destroy_args *args); int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_unicast_weight_weight_attribute_destroy( struct nb_cb_destroy_args *args); int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_multicast_add_paths_path_type_modify( @@ -1430,6 +1471,46 @@ int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_multicast_weight_weight_attri struct nb_cb_modify_args *args); int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_multicast_weight_weight_attribute_destroy( struct nb_cb_destroy_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_rmap_import_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_rmap_import_destroy( + struct nb_cb_destroy_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_rmap_export_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_rmap_export_destroy( + struct nb_cb_destroy_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_plist_import_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_plist_import_destroy( + struct nb_cb_destroy_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_plist_export_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_plist_export_destroy( + struct nb_cb_destroy_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_access_list_import_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_access_list_import_destroy( + struct nb_cb_destroy_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_access_list_export_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_access_list_export_destroy( + struct nb_cb_destroy_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_as_path_filter_list_import_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_as_path_filter_list_import_destroy( + struct nb_cb_destroy_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_as_path_filter_list_export_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_as_path_filter_list_export_destroy( + struct nb_cb_destroy_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_unsuppress_map_import_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_unsuppress_map_import_destroy( + struct nb_cb_destroy_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_unsuppress_map_export_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_unsuppress_map_export_destroy( + struct nb_cb_destroy_args *args); int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_multicast_add_paths_path_type_modify( struct nb_cb_modify_args *args); int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_multicast_as_path_options_allow_own_as_modify( @@ -1530,6 +1611,46 @@ int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_multicast_weight_weight_attri struct nb_cb_modify_args *args); int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_multicast_weight_weight_attribute_destroy( struct nb_cb_destroy_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_rmap_import_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_rmap_import_destroy( + struct nb_cb_destroy_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_rmap_export_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_rmap_export_destroy( + struct nb_cb_destroy_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_plist_import_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_plist_import_destroy( + struct nb_cb_destroy_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_plist_export_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_plist_export_destroy( + struct nb_cb_destroy_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_access_list_import_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_access_list_import_destroy( + struct nb_cb_destroy_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_access_list_export_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_access_list_export_destroy( + struct nb_cb_destroy_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_as_path_filter_list_import_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_as_path_filter_list_import_destroy( + struct nb_cb_destroy_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_as_path_filter_list_export_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_as_path_filter_list_export_destroy( + struct nb_cb_destroy_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_unsuppress_map_import_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_unsuppress_map_import_destroy( + struct nb_cb_destroy_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_unsuppress_map_export_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_unsuppress_map_export_destroy( + struct nb_cb_destroy_args *args); int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_add_paths_path_type_modify( struct nb_cb_modify_args *args); int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_as_path_options_allow_own_as_modify( @@ -1630,6 +1751,46 @@ int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_weight_weight struct nb_cb_modify_args *args); int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_weight_weight_attribute_destroy( struct nb_cb_destroy_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_rmap_import_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_rmap_import_destroy( + struct nb_cb_destroy_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_rmap_export_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_rmap_export_destroy( + struct nb_cb_destroy_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_plist_import_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_plist_import_destroy( + struct nb_cb_destroy_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_plist_export_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_plist_export_destroy( + struct nb_cb_destroy_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_access_list_import_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_access_list_import_destroy( + struct nb_cb_destroy_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_access_list_export_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_access_list_export_destroy( + struct nb_cb_destroy_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_as_path_filter_list_import_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_as_path_filter_list_import_destroy( + struct nb_cb_destroy_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_as_path_filter_list_export_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_as_path_filter_list_export_destroy( + struct nb_cb_destroy_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_unsuppress_map_import_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_unsuppress_map_import_destroy( + struct nb_cb_destroy_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_unsuppress_map_export_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_unsuppress_map_export_destroy( + struct nb_cb_destroy_args *args); int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_labeled_unicast_add_paths_path_type_modify( struct nb_cb_modify_args *args); int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_labeled_unicast_as_path_options_allow_own_as_modify( @@ -1730,6 +1891,46 @@ int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_labeled_unicast_weight_weight struct nb_cb_modify_args *args); int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_labeled_unicast_weight_weight_attribute_destroy( struct nb_cb_destroy_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_rmap_import_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_rmap_import_destroy( + struct nb_cb_destroy_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_rmap_export_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_rmap_export_destroy( + struct nb_cb_destroy_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_plist_import_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_plist_import_destroy( + struct nb_cb_destroy_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_plist_export_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_plist_export_destroy( + struct nb_cb_destroy_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_access_list_import_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_access_list_import_destroy( + struct nb_cb_destroy_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_access_list_export_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_access_list_export_destroy( + struct nb_cb_destroy_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_as_path_filter_list_import_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_as_path_filter_list_import_destroy( + struct nb_cb_destroy_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_as_path_filter_list_export_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_as_path_filter_list_export_destroy( + struct nb_cb_destroy_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_unsuppress_map_import_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_unsuppress_map_import_destroy( + struct nb_cb_destroy_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_unsuppress_map_export_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_unsuppress_map_export_destroy( + struct nb_cb_destroy_args *args); int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv4_unicast_add_paths_path_type_modify( struct nb_cb_modify_args *args); int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv4_unicast_as_path_options_allow_own_as_modify( @@ -1812,6 +2013,46 @@ int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv4_unicast_weight_weight_a struct nb_cb_modify_args *args); int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv4_unicast_weight_weight_attribute_destroy( struct nb_cb_destroy_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_rmap_import_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_rmap_import_destroy( + struct nb_cb_destroy_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_rmap_export_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_rmap_export_destroy( + struct nb_cb_destroy_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_plist_import_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_plist_import_destroy( + struct nb_cb_destroy_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_plist_export_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_plist_export_destroy( + struct nb_cb_destroy_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_access_list_import_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_access_list_import_destroy( + struct nb_cb_destroy_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_access_list_export_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_access_list_export_destroy( + struct nb_cb_destroy_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_as_path_filter_list_import_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_as_path_filter_list_import_destroy( + struct nb_cb_destroy_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_as_path_filter_list_export_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_as_path_filter_list_export_destroy( + struct nb_cb_destroy_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_unsuppress_map_import_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_unsuppress_map_import_destroy( + struct nb_cb_destroy_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_unsuppress_map_export_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_unsuppress_map_export_destroy( + struct nb_cb_destroy_args *args); int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv6_unicast_add_paths_path_type_modify( struct nb_cb_modify_args *args); int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv6_unicast_as_path_options_allow_own_as_modify( @@ -1894,6 +2135,46 @@ int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv6_unicast_weight_weight_a struct nb_cb_modify_args *args); int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv6_unicast_weight_weight_attribute_destroy( struct nb_cb_destroy_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_rmap_import_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_rmap_import_destroy( + struct nb_cb_destroy_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_rmap_export_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_rmap_export_destroy( + struct nb_cb_destroy_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_plist_import_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_plist_import_destroy( + struct nb_cb_destroy_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_plist_export_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_plist_export_destroy( + struct nb_cb_destroy_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_access_list_import_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_access_list_import_destroy( + struct nb_cb_destroy_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_access_list_export_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_access_list_export_destroy( + struct nb_cb_destroy_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_as_path_filter_list_import_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_as_path_filter_list_import_destroy( + struct nb_cb_destroy_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_as_path_filter_list_export_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_as_path_filter_list_export_destroy( + struct nb_cb_destroy_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_unsuppress_map_import_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_unsuppress_map_import_destroy( + struct nb_cb_destroy_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_unsuppress_map_export_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_unsuppress_map_export_destroy( + struct nb_cb_destroy_args *args); int bgp_neighbors_neighbor_afi_safis_afi_safi_l2vpn_evpn_as_path_options_allow_own_as_modify( struct nb_cb_modify_args *args); int bgp_neighbors_neighbor_afi_safis_afi_safi_l2vpn_evpn_as_path_options_allow_own_as_destroy( @@ -1920,18 +2201,138 @@ int bgp_neighbors_neighbor_afi_safis_afi_safi_l2vpn_evpn_route_server_route_serv struct nb_cb_modify_args *args); int bgp_neighbors_neighbor_afi_safis_afi_safi_l2vpn_evpn_soft_reconfiguration_modify( struct nb_cb_modify_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_l2vpn_evpn_filter_config_rmap_import_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_l2vpn_evpn_filter_config_rmap_import_destroy( + struct nb_cb_destroy_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_l2vpn_evpn_filter_config_rmap_export_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_l2vpn_evpn_filter_config_rmap_export_destroy( + struct nb_cb_destroy_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_l2vpn_evpn_filter_config_plist_import_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_l2vpn_evpn_filter_config_plist_import_destroy( + struct nb_cb_destroy_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_l2vpn_evpn_filter_config_plist_export_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_l2vpn_evpn_filter_config_plist_export_destroy( + struct nb_cb_destroy_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_l2vpn_evpn_filter_config_access_list_import_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_l2vpn_evpn_filter_config_access_list_import_destroy( + struct nb_cb_destroy_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_l2vpn_evpn_filter_config_access_list_export_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_l2vpn_evpn_filter_config_access_list_export_destroy( + struct nb_cb_destroy_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_l2vpn_evpn_filter_config_as_path_filter_list_import_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_l2vpn_evpn_filter_config_as_path_filter_list_import_destroy( + struct nb_cb_destroy_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_l2vpn_evpn_filter_config_as_path_filter_list_export_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_l2vpn_evpn_filter_config_as_path_filter_list_export_destroy( + struct nb_cb_destroy_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_l2vpn_evpn_filter_config_unsuppress_map_import_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_l2vpn_evpn_filter_config_unsuppress_map_import_destroy( + struct nb_cb_destroy_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_l2vpn_evpn_filter_config_unsuppress_map_export_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_l2vpn_evpn_filter_config_unsuppress_map_export_destroy( + struct nb_cb_destroy_args *args); int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_flowspec_route_reflector_route_reflector_client_modify( struct nb_cb_modify_args *args); int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_flowspec_route_server_route_server_client_modify( struct nb_cb_modify_args *args); int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_flowspec_soft_reconfiguration_modify( struct nb_cb_modify_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_rmap_import_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_rmap_import_destroy( + struct nb_cb_destroy_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_rmap_export_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_rmap_export_destroy( + struct nb_cb_destroy_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_plist_import_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_plist_import_destroy( + struct nb_cb_destroy_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_plist_export_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_plist_export_destroy( + struct nb_cb_destroy_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_access_list_import_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_access_list_import_destroy( + struct nb_cb_destroy_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_access_list_export_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_access_list_export_destroy( + struct nb_cb_destroy_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_as_path_filter_list_import_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_as_path_filter_list_import_destroy( + struct nb_cb_destroy_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_as_path_filter_list_export_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_as_path_filter_list_export_destroy( + struct nb_cb_destroy_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_unsuppress_map_import_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_unsuppress_map_import_destroy( + struct nb_cb_destroy_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_unsuppress_map_export_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_unsuppress_map_export_destroy( + struct nb_cb_destroy_args *args); int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_flowspec_route_reflector_route_reflector_client_modify( struct nb_cb_modify_args *args); int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_flowspec_route_server_route_server_client_modify( struct nb_cb_modify_args *args); int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_flowspec_soft_reconfiguration_modify( struct nb_cb_modify_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_rmap_import_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_rmap_import_destroy( + struct nb_cb_destroy_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_rmap_export_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_rmap_export_destroy( + struct nb_cb_destroy_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_plist_import_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_plist_import_destroy( + struct nb_cb_destroy_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_plist_export_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_plist_export_destroy( + struct nb_cb_destroy_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_access_list_import_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_access_list_import_destroy( + struct nb_cb_destroy_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_access_list_export_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_access_list_export_destroy( + struct nb_cb_destroy_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_as_path_filter_list_import_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_as_path_filter_list_import_destroy( + struct nb_cb_destroy_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_as_path_filter_list_export_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_as_path_filter_list_export_destroy( + struct nb_cb_destroy_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_unsuppress_map_import_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_unsuppress_map_import_destroy( + struct nb_cb_destroy_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_unsuppress_map_export_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_unsuppress_map_export_destroy( + struct nb_cb_destroy_args *args); int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_unicast_add_paths_path_type_modify( struct nb_cb_modify_args *args); int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_unicast_as_path_options_allow_own_as_modify( @@ -2174,6 +2575,46 @@ int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_unicast_weight_wei struct nb_cb_modify_args *args); int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_unicast_weight_weight_attribute_destroy( struct nb_cb_destroy_args *args); +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_rmap_import_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_rmap_import_destroy( + struct nb_cb_destroy_args *args); +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_rmap_export_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_rmap_export_destroy( + struct nb_cb_destroy_args *args); +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_plist_import_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_plist_import_destroy( + struct nb_cb_destroy_args *args); +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_plist_export_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_plist_export_destroy( + struct nb_cb_destroy_args *args); +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_access_list_import_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_access_list_import_destroy( + struct nb_cb_destroy_args *args); +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_access_list_export_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_access_list_export_destroy( + struct nb_cb_destroy_args *args); +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_as_path_filter_list_import_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_as_path_filter_list_import_destroy( + struct nb_cb_destroy_args *args); +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_as_path_filter_list_export_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_as_path_filter_list_export_destroy( + struct nb_cb_destroy_args *args); +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_unsuppress_map_import_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_unsuppress_map_import_destroy( + struct nb_cb_destroy_args *args); +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_unsuppress_map_export_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_unsuppress_map_export_destroy( + struct nb_cb_destroy_args *args); int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_multicast_add_paths_path_type_modify( struct nb_cb_modify_args *args); int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_multicast_as_path_options_allow_own_as_modify( @@ -2274,6 +2715,46 @@ int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_multicast_weight_w struct nb_cb_modify_args *args); int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_multicast_weight_weight_attribute_destroy( struct nb_cb_destroy_args *args); +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_rmap_import_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_rmap_import_destroy( + struct nb_cb_destroy_args *args); +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_rmap_export_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_rmap_export_destroy( + struct nb_cb_destroy_args *args); +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_plist_import_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_plist_import_destroy( + struct nb_cb_destroy_args *args); +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_plist_export_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_plist_export_destroy( + struct nb_cb_destroy_args *args); +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_access_list_import_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_access_list_import_destroy( + struct nb_cb_destroy_args *args); +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_access_list_export_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_access_list_export_destroy( + struct nb_cb_destroy_args *args); +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_as_path_filter_list_import_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_as_path_filter_list_import_destroy( + struct nb_cb_destroy_args *args); +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_as_path_filter_list_export_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_as_path_filter_list_export_destroy( + struct nb_cb_destroy_args *args); +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_unsuppress_map_import_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_unsuppress_map_import_destroy( + struct nb_cb_destroy_args *args); +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_unsuppress_map_export_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_unsuppress_map_export_destroy( + struct nb_cb_destroy_args *args); int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_multicast_add_paths_path_type_modify( struct nb_cb_modify_args *args); int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_multicast_as_path_options_allow_own_as_modify( @@ -2374,6 +2855,46 @@ int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_multicast_weight_w struct nb_cb_modify_args *args); int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_multicast_weight_weight_attribute_destroy( struct nb_cb_destroy_args *args); +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_rmap_import_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_rmap_import_destroy( + struct nb_cb_destroy_args *args); +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_rmap_export_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_rmap_export_destroy( + struct nb_cb_destroy_args *args); +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_plist_import_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_plist_import_destroy( + struct nb_cb_destroy_args *args); +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_plist_export_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_plist_export_destroy( + struct nb_cb_destroy_args *args); +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_access_list_import_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_access_list_import_destroy( + struct nb_cb_destroy_args *args); +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_access_list_export_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_access_list_export_destroy( + struct nb_cb_destroy_args *args); +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_as_path_filter_list_import_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_as_path_filter_list_import_destroy( + struct nb_cb_destroy_args *args); +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_as_path_filter_list_export_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_as_path_filter_list_export_destroy( + struct nb_cb_destroy_args *args); +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_unsuppress_map_import_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_unsuppress_map_import_destroy( + struct nb_cb_destroy_args *args); +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_unsuppress_map_export_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_unsuppress_map_export_destroy( + struct nb_cb_destroy_args *args); int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_add_paths_path_type_modify( struct nb_cb_modify_args *args); int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_as_path_options_allow_own_as_modify( @@ -2474,6 +2995,46 @@ int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_we struct nb_cb_modify_args *args); int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_weight_weight_attribute_destroy( struct nb_cb_destroy_args *args); +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_rmap_import_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_rmap_import_destroy( + struct nb_cb_destroy_args *args); +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_rmap_export_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_rmap_export_destroy( + struct nb_cb_destroy_args *args); +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_plist_import_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_plist_import_destroy( + struct nb_cb_destroy_args *args); +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_plist_export_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_plist_export_destroy( + struct nb_cb_destroy_args *args); +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_access_list_import_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_access_list_import_destroy( + struct nb_cb_destroy_args *args); +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_access_list_export_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_access_list_export_destroy( + struct nb_cb_destroy_args *args); +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_as_path_filter_list_import_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_as_path_filter_list_import_destroy( + struct nb_cb_destroy_args *args); +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_as_path_filter_list_export_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_as_path_filter_list_export_destroy( + struct nb_cb_destroy_args *args); +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_unsuppress_map_import_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_unsuppress_map_import_destroy( + struct nb_cb_destroy_args *args); +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_unsuppress_map_export_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_unsuppress_map_export_destroy( + struct nb_cb_destroy_args *args); int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_labeled_unicast_add_paths_path_type_modify( struct nb_cb_modify_args *args); int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_labeled_unicast_as_path_options_allow_own_as_modify( @@ -2770,12 +3331,92 @@ int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_flowspec_route_ser struct nb_cb_modify_args *args); int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_flowspec_soft_reconfiguration_modify( struct nb_cb_modify_args *args); +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_rmap_import_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_rmap_import_destroy( + struct nb_cb_destroy_args *args); +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_rmap_export_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_rmap_export_destroy( + struct nb_cb_destroy_args *args); +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_plist_import_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_plist_import_destroy( + struct nb_cb_destroy_args *args); +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_plist_export_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_plist_export_destroy( + struct nb_cb_destroy_args *args); +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_access_list_import_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_access_list_import_destroy( + struct nb_cb_destroy_args *args); +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_access_list_export_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_access_list_export_destroy( + struct nb_cb_destroy_args *args); +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_as_path_filter_list_import_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_as_path_filter_list_import_destroy( + struct nb_cb_destroy_args *args); +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_as_path_filter_list_export_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_as_path_filter_list_export_destroy( + struct nb_cb_destroy_args *args); +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_unsuppress_map_import_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_unsuppress_map_import_destroy( + struct nb_cb_destroy_args *args); +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_unsuppress_map_export_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_unsuppress_map_export_destroy( + struct nb_cb_destroy_args *args); int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_flowspec_route_reflector_route_reflector_client_modify( struct nb_cb_modify_args *args); int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_flowspec_route_server_route_server_client_modify( struct nb_cb_modify_args *args); int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_flowspec_soft_reconfiguration_modify( struct nb_cb_modify_args *args); +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_rmap_import_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_rmap_import_destroy( + struct nb_cb_destroy_args *args); +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_rmap_export_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_rmap_export_destroy( + struct nb_cb_destroy_args *args); +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_plist_import_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_plist_import_destroy( + struct nb_cb_destroy_args *args); +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_plist_export_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_plist_export_destroy( + struct nb_cb_destroy_args *args); +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_access_list_import_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_access_list_import_destroy( + struct nb_cb_destroy_args *args); +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_access_list_export_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_access_list_export_destroy( + struct nb_cb_destroy_args *args); +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_as_path_filter_list_import_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_as_path_filter_list_import_destroy( + struct nb_cb_destroy_args *args); +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_as_path_filter_list_export_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_as_path_filter_list_export_destroy( + struct nb_cb_destroy_args *args); +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_unsuppress_map_import_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_unsuppress_map_import_destroy( + struct nb_cb_destroy_args *args); +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_unsuppress_map_export_modify( + struct nb_cb_modify_args *args); +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_unsuppress_map_export_destroy( + struct nb_cb_destroy_args *args); int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_unicast_add_paths_path_type_modify( struct nb_cb_modify_args *args); int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_unicast_as_path_options_allow_own_as_modify( @@ -3018,6 +3659,46 @@ int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_unicast_weight_weight_att struct nb_cb_modify_args *args); int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_unicast_weight_weight_attribute_destroy( struct nb_cb_destroy_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_unicast_filter_config_rmap_import_modify( + struct nb_cb_modify_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_unicast_filter_config_rmap_import_destroy( + struct nb_cb_destroy_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_unicast_filter_config_rmap_export_modify( + struct nb_cb_modify_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_unicast_filter_config_rmap_export_destroy( + struct nb_cb_destroy_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_unicast_filter_config_plist_import_modify( + struct nb_cb_modify_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_unicast_filter_config_plist_import_destroy( + struct nb_cb_destroy_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_unicast_filter_config_plist_export_modify( + struct nb_cb_modify_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_unicast_filter_config_plist_export_destroy( + struct nb_cb_destroy_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_unicast_filter_config_access_list_import_modify( + struct nb_cb_modify_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_unicast_filter_config_access_list_import_destroy( + struct nb_cb_destroy_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_unicast_filter_config_access_list_export_modify( + struct nb_cb_modify_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_unicast_filter_config_access_list_export_destroy( + struct nb_cb_destroy_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_unicast_filter_config_as_path_filter_list_import_modify( + struct nb_cb_modify_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_unicast_filter_config_as_path_filter_list_import_destroy( + struct nb_cb_destroy_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_unicast_filter_config_as_path_filter_list_export_modify( + struct nb_cb_modify_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_unicast_filter_config_as_path_filter_list_export_destroy( + struct nb_cb_destroy_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_unicast_filter_config_unsuppress_map_import_modify( + struct nb_cb_modify_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_unicast_filter_config_unsuppress_map_import_destroy( + struct nb_cb_destroy_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_unicast_filter_config_unsuppress_map_export_modify( + struct nb_cb_modify_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_unicast_filter_config_unsuppress_map_export_destroy( + struct nb_cb_destroy_args *args); int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_multicast_add_paths_path_type_modify( struct nb_cb_modify_args *args); int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_multicast_as_path_options_allow_own_as_modify( @@ -3118,6 +3799,46 @@ int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_multicast_weight_weight_a struct nb_cb_modify_args *args); int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_multicast_weight_weight_attribute_destroy( struct nb_cb_destroy_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_multicast_filter_config_rmap_import_modify( + struct nb_cb_modify_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_multicast_filter_config_rmap_import_destroy( + struct nb_cb_destroy_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_multicast_filter_config_rmap_export_modify( + struct nb_cb_modify_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_multicast_filter_config_rmap_export_destroy( + struct nb_cb_destroy_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_multicast_filter_config_plist_import_modify( + struct nb_cb_modify_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_multicast_filter_config_plist_import_destroy( + struct nb_cb_destroy_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_multicast_filter_config_plist_export_modify( + struct nb_cb_modify_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_multicast_filter_config_plist_export_destroy( + struct nb_cb_destroy_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_multicast_filter_config_access_list_import_modify( + struct nb_cb_modify_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_multicast_filter_config_access_list_import_destroy( + struct nb_cb_destroy_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_multicast_filter_config_access_list_export_modify( + struct nb_cb_modify_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_multicast_filter_config_access_list_export_destroy( + struct nb_cb_destroy_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_multicast_filter_config_as_path_filter_list_import_modify( + struct nb_cb_modify_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_multicast_filter_config_as_path_filter_list_import_destroy( + struct nb_cb_destroy_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_multicast_filter_config_as_path_filter_list_export_modify( + struct nb_cb_modify_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_multicast_filter_config_as_path_filter_list_export_destroy( + struct nb_cb_destroy_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_multicast_filter_config_unsuppress_map_import_modify( + struct nb_cb_modify_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_multicast_filter_config_unsuppress_map_import_destroy( + struct nb_cb_destroy_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_multicast_filter_config_unsuppress_map_export_modify( + struct nb_cb_modify_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_multicast_filter_config_unsuppress_map_export_destroy( + struct nb_cb_destroy_args *args); int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_multicast_add_paths_path_type_modify( struct nb_cb_modify_args *args); int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_multicast_as_path_options_allow_own_as_modify( @@ -3218,6 +3939,46 @@ int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_multicast_weight_weight_a struct nb_cb_modify_args *args); int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_multicast_weight_weight_attribute_destroy( struct nb_cb_destroy_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_multicast_filter_config_rmap_import_modify( + struct nb_cb_modify_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_multicast_filter_config_rmap_import_destroy( + struct nb_cb_destroy_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_multicast_filter_config_rmap_export_modify( + struct nb_cb_modify_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_multicast_filter_config_rmap_export_destroy( + struct nb_cb_destroy_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_multicast_filter_config_plist_import_modify( + struct nb_cb_modify_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_multicast_filter_config_plist_import_destroy( + struct nb_cb_destroy_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_multicast_filter_config_plist_export_modify( + struct nb_cb_modify_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_multicast_filter_config_plist_export_destroy( + struct nb_cb_destroy_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_multicast_filter_config_access_list_import_modify( + struct nb_cb_modify_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_multicast_filter_config_access_list_import_destroy( + struct nb_cb_destroy_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_multicast_filter_config_access_list_export_modify( + struct nb_cb_modify_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_multicast_filter_config_access_list_export_destroy( + struct nb_cb_destroy_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_multicast_filter_config_as_path_filter_list_import_modify( + struct nb_cb_modify_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_multicast_filter_config_as_path_filter_list_import_destroy( + struct nb_cb_destroy_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_multicast_filter_config_as_path_filter_list_export_modify( + struct nb_cb_modify_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_multicast_filter_config_as_path_filter_list_export_destroy( + struct nb_cb_destroy_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_multicast_filter_config_unsuppress_map_import_modify( + struct nb_cb_modify_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_multicast_filter_config_unsuppress_map_import_destroy( + struct nb_cb_destroy_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_multicast_filter_config_unsuppress_map_export_modify( + struct nb_cb_modify_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_multicast_filter_config_unsuppress_map_export_destroy( + struct nb_cb_destroy_args *args); int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_labeled_unicast_add_paths_path_type_modify( struct nb_cb_modify_args *args); int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_labeled_unicast_as_path_options_allow_own_as_modify( @@ -3318,6 +4079,46 @@ int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_labeled_unicast_weight_we struct nb_cb_modify_args *args); int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_labeled_unicast_weight_weight_attribute_destroy( struct nb_cb_destroy_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_rmap_import_modify( + struct nb_cb_modify_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_rmap_import_destroy( + struct nb_cb_destroy_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_rmap_export_modify( + struct nb_cb_modify_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_rmap_export_destroy( + struct nb_cb_destroy_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_plist_import_modify( + struct nb_cb_modify_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_plist_import_destroy( + struct nb_cb_destroy_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_plist_export_modify( + struct nb_cb_modify_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_plist_export_destroy( + struct nb_cb_destroy_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_access_list_import_modify( + struct nb_cb_modify_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_access_list_import_destroy( + struct nb_cb_destroy_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_access_list_export_modify( + struct nb_cb_modify_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_access_list_export_destroy( + struct nb_cb_destroy_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_as_path_filter_list_import_modify( + struct nb_cb_modify_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_as_path_filter_list_import_destroy( + struct nb_cb_destroy_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_as_path_filter_list_export_modify( + struct nb_cb_modify_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_as_path_filter_list_export_destroy( + struct nb_cb_destroy_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_unsuppress_map_import_modify( + struct nb_cb_modify_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_unsuppress_map_import_destroy( + struct nb_cb_destroy_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_unsuppress_map_export_modify( + struct nb_cb_modify_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_unsuppress_map_export_destroy( + struct nb_cb_destroy_args *args); int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_labeled_unicast_add_paths_path_type_modify( struct nb_cb_modify_args *args); int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_labeled_unicast_as_path_options_allow_own_as_modify( @@ -3418,6 +4219,46 @@ int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_labeled_unicast_weight_we struct nb_cb_modify_args *args); int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_labeled_unicast_weight_weight_attribute_destroy( struct nb_cb_destroy_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_rmap_import_modify( + struct nb_cb_modify_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_rmap_import_destroy( + struct nb_cb_destroy_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_rmap_export_modify( + struct nb_cb_modify_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_rmap_export_destroy( + struct nb_cb_destroy_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_plist_import_modify( + struct nb_cb_modify_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_plist_import_destroy( + struct nb_cb_destroy_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_plist_export_modify( + struct nb_cb_modify_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_plist_export_destroy( + struct nb_cb_destroy_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_access_list_import_modify( + struct nb_cb_modify_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_access_list_import_destroy( + struct nb_cb_destroy_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_access_list_export_modify( + struct nb_cb_modify_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_access_list_export_destroy( + struct nb_cb_destroy_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_as_path_filter_list_import_modify( + struct nb_cb_modify_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_as_path_filter_list_import_destroy( + struct nb_cb_destroy_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_as_path_filter_list_export_modify( + struct nb_cb_modify_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_as_path_filter_list_export_destroy( + struct nb_cb_destroy_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_unsuppress_map_import_modify( + struct nb_cb_modify_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_unsuppress_map_import_destroy( + struct nb_cb_destroy_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_unsuppress_map_export_modify( + struct nb_cb_modify_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_unsuppress_map_export_destroy( + struct nb_cb_destroy_args *args); int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv4_unicast_add_paths_path_type_modify( struct nb_cb_modify_args *args); int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv4_unicast_as_path_options_allow_own_as_modify( @@ -3500,6 +4341,46 @@ int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv4_unicast_weight_weig struct nb_cb_modify_args *args); int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv4_unicast_weight_weight_attribute_destroy( struct nb_cb_destroy_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_rmap_import_modify( + struct nb_cb_modify_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_rmap_import_destroy( + struct nb_cb_destroy_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_rmap_export_modify( + struct nb_cb_modify_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_rmap_export_destroy( + struct nb_cb_destroy_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_plist_import_modify( + struct nb_cb_modify_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_plist_import_destroy( + struct nb_cb_destroy_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_plist_export_modify( + struct nb_cb_modify_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_plist_export_destroy( + struct nb_cb_destroy_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_access_list_import_modify( + struct nb_cb_modify_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_access_list_import_destroy( + struct nb_cb_destroy_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_access_list_export_modify( + struct nb_cb_modify_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_access_list_export_destroy( + struct nb_cb_destroy_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_as_path_filter_list_import_modify( + struct nb_cb_modify_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_as_path_filter_list_import_destroy( + struct nb_cb_destroy_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_as_path_filter_list_export_modify( + struct nb_cb_modify_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_as_path_filter_list_export_destroy( + struct nb_cb_destroy_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_unsuppress_map_import_modify( + struct nb_cb_modify_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_unsuppress_map_import_destroy( + struct nb_cb_destroy_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_unsuppress_map_export_modify( + struct nb_cb_modify_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_unsuppress_map_export_destroy( + struct nb_cb_destroy_args *args); int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv6_unicast_add_paths_path_type_modify( struct nb_cb_modify_args *args); int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv6_unicast_as_path_options_allow_own_as_modify( @@ -3582,6 +4463,46 @@ int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv6_unicast_weight_weig struct nb_cb_modify_args *args); int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv6_unicast_weight_weight_attribute_destroy( struct nb_cb_destroy_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_rmap_import_modify( + struct nb_cb_modify_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_rmap_import_destroy( + struct nb_cb_destroy_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_rmap_export_modify( + struct nb_cb_modify_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_rmap_export_destroy( + struct nb_cb_destroy_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_plist_import_modify( + struct nb_cb_modify_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_plist_import_destroy( + struct nb_cb_destroy_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_plist_export_modify( + struct nb_cb_modify_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_plist_export_destroy( + struct nb_cb_destroy_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_access_list_import_modify( + struct nb_cb_modify_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_access_list_import_destroy( + struct nb_cb_destroy_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_access_list_export_modify( + struct nb_cb_modify_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_access_list_export_destroy( + struct nb_cb_destroy_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_as_path_filter_list_import_modify( + struct nb_cb_modify_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_as_path_filter_list_import_destroy( + struct nb_cb_destroy_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_as_path_filter_list_export_modify( + struct nb_cb_modify_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_as_path_filter_list_export_destroy( + struct nb_cb_destroy_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_unsuppress_map_import_modify( + struct nb_cb_modify_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_unsuppress_map_import_destroy( + struct nb_cb_destroy_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_unsuppress_map_export_modify( + struct nb_cb_modify_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_unsuppress_map_export_destroy( + struct nb_cb_destroy_args *args); int bgp_peer_groups_peer_group_afi_safis_afi_safi_l2vpn_evpn_as_path_options_allow_own_as_modify( struct nb_cb_modify_args *args); int bgp_peer_groups_peer_group_afi_safis_afi_safi_l2vpn_evpn_as_path_options_allow_own_as_destroy( @@ -3614,12 +4535,92 @@ int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_flowspec_route_server_rou struct nb_cb_modify_args *args); int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_flowspec_soft_reconfiguration_modify( struct nb_cb_modify_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_flowspec_filter_config_rmap_import_modify( + struct nb_cb_modify_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_flowspec_filter_config_rmap_import_destroy( + struct nb_cb_destroy_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_flowspec_filter_config_rmap_export_modify( + struct nb_cb_modify_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_flowspec_filter_config_rmap_export_destroy( + struct nb_cb_destroy_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_flowspec_filter_config_plist_import_modify( + struct nb_cb_modify_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_flowspec_filter_config_plist_import_destroy( + struct nb_cb_destroy_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_flowspec_filter_config_plist_export_modify( + struct nb_cb_modify_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_flowspec_filter_config_plist_export_destroy( + struct nb_cb_destroy_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_flowspec_filter_config_access_list_import_modify( + struct nb_cb_modify_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_flowspec_filter_config_access_list_import_destroy( + struct nb_cb_destroy_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_flowspec_filter_config_access_list_export_modify( + struct nb_cb_modify_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_flowspec_filter_config_access_list_export_destroy( + struct nb_cb_destroy_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_flowspec_filter_config_as_path_filter_list_import_modify( + struct nb_cb_modify_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_flowspec_filter_config_as_path_filter_list_import_destroy( + struct nb_cb_destroy_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_flowspec_filter_config_as_path_filter_list_export_modify( + struct nb_cb_modify_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_flowspec_filter_config_as_path_filter_list_export_destroy( + struct nb_cb_destroy_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_flowspec_filter_config_unsuppress_map_import_modify( + struct nb_cb_modify_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_flowspec_filter_config_unsuppress_map_import_destroy( + struct nb_cb_destroy_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_flowspec_filter_config_unsuppress_map_export_modify( + struct nb_cb_modify_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_flowspec_filter_config_unsuppress_map_export_destroy( + struct nb_cb_destroy_args *args); int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_flowspec_route_reflector_route_reflector_client_modify( struct nb_cb_modify_args *args); int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_flowspec_route_server_route_server_client_modify( struct nb_cb_modify_args *args); int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_flowspec_soft_reconfiguration_modify( struct nb_cb_modify_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_flowspec_filter_config_rmap_import_modify( + struct nb_cb_modify_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_flowspec_filter_config_rmap_import_destroy( + struct nb_cb_destroy_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_flowspec_filter_config_rmap_export_modify( + struct nb_cb_modify_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_flowspec_filter_config_rmap_export_destroy( + struct nb_cb_destroy_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_flowspec_filter_config_plist_import_modify( + struct nb_cb_modify_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_flowspec_filter_config_plist_import_destroy( + struct nb_cb_destroy_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_flowspec_filter_config_plist_export_modify( + struct nb_cb_modify_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_flowspec_filter_config_plist_export_destroy( + struct nb_cb_destroy_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_flowspec_filter_config_access_list_import_modify( + struct nb_cb_modify_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_flowspec_filter_config_access_list_import_destroy( + struct nb_cb_destroy_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_flowspec_filter_config_access_list_export_modify( + struct nb_cb_modify_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_flowspec_filter_config_access_list_export_destroy( + struct nb_cb_destroy_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_flowspec_filter_config_as_path_filter_list_import_modify( + struct nb_cb_modify_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_flowspec_filter_config_as_path_filter_list_import_destroy( + struct nb_cb_destroy_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_flowspec_filter_config_as_path_filter_list_export_modify( + struct nb_cb_modify_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_flowspec_filter_config_as_path_filter_list_export_destroy( + struct nb_cb_destroy_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_flowspec_filter_config_unsuppress_map_import_modify( + struct nb_cb_modify_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_flowspec_filter_config_unsuppress_map_import_destroy( + struct nb_cb_destroy_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_flowspec_filter_config_unsuppress_map_export_modify( + struct nb_cb_modify_args *args); +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_flowspec_filter_config_unsuppress_map_export_destroy( + struct nb_cb_destroy_args *args); /* * Callback registered with routing_nb lib to validate only @@ -3639,6 +4640,9 @@ void cli_show_router_bgp_route_selection(struct vty *vty, void cli_show_router_bgp_ebgp_requires_policy(struct vty *vty, struct lyd_node *dnode, bool show_defaults); +void cli_show_router_bgp_suppress_duplicates(struct vty *vty, + struct lyd_node *dnode, + bool show_defaults); void cli_show_router_bgp_default_shutdown(struct vty *vty, struct lyd_node *dnode, bool show_defaults); diff --git a/bgpd/bgp_nb_config.c b/bgpd/bgp_nb_config.c index 6ee8b25b82..f2443bd164 100644 --- a/bgpd/bgp_nb_config.c +++ b/bgpd/bgp_nb_config.c @@ -32,6 +32,8 @@ #include "bgpd/bgp_io.h" #include "bgpd/bgp_damp.h" +DEFINE_HOOK(bgp_snmp_init_stats, (struct bgp *bgp), (bgp)) + FRR_CFG_DEFAULT_ULONG(BGP_CONNECT_RETRY, { .val_ulong = 10, .match_profile = "datacenter", }, { .val_ulong = 120 }, @@ -894,6 +896,9 @@ int bgp_global_route_selection_options_allow_multiple_as_modify( "../multi-path-as-set")) { SET_FLAG(bgp->flags, BGP_FLAG_MULTIPATH_RELAX_AS_SET); + } else { + UNSET_FLAG(bgp->flags, + BGP_FLAG_MULTIPATH_RELAX_AS_SET); } } else { UNSET_FLAG(bgp->flags, BGP_FLAG_ASPATH_MULTIPATH_RELAX); @@ -923,15 +928,10 @@ int bgp_global_route_selection_options_multi_path_as_set_modify( return NB_OK; case NB_EV_APPLY: bgp = nb_running_get_entry(args->dnode, NULL, true); - - if (!CHECK_FLAG(bgp->flags, BGP_FLAG_MULTIPATH_RELAX_AS_SET)) { + if (yang_dnode_get_bool(args->dnode, NULL)) SET_FLAG(bgp->flags, BGP_FLAG_MULTIPATH_RELAX_AS_SET); - - } else - zlog_debug( - "%s multi-path-as-set as part of allow-multiple-as modify cb.", - __func__); - + else + UNSET_FLAG(bgp->flags, BGP_FLAG_MULTIPATH_RELAX_AS_SET); break; } @@ -1599,6 +1599,27 @@ int bgp_global_ebgp_requires_policy_modify(struct nb_cb_modify_args *args) /* * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/global/suppress-duplicates + */ +int bgp_global_suppress_duplicates_modify(struct nb_cb_modify_args *args) +{ + if (args->event != NB_EV_APPLY) + return NB_OK; + + struct bgp *bgp; + + bgp = nb_running_get_entry(args->dnode, NULL, true); + + if (yang_dnode_get_bool(args->dnode, NULL)) + SET_FLAG(bgp->flags, BGP_FLAG_SUPPRESS_DUPLICATES); + else + UNSET_FLAG(bgp->flags, BGP_FLAG_SUPPRESS_DUPLICATES); + + return NB_OK; +} + +/* + * XPath: * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/global/show-hostname */ int bgp_global_show_hostname_modify(struct nb_cb_modify_args *args) @@ -6292,7 +6313,11 @@ static struct peer *bgp_peer_group_peer_lookup(struct bgp *bgp, struct peer_group *group = NULL; group = peer_group_lookup(bgp, peer_str); - return group->conf; + + if (group) + return group->conf; + + return NULL; } /* @@ -9531,7 +9556,7 @@ static int bgp_global_afi_safi_ip_unicast_vpn_config_rd_destroy( bgp = nb_running_get_entry(af_dnode, NULL, true); rd_str = yang_dnode_get_string(args->dnode, NULL); - if (str2prefix_rd(rd_str, &prd)) { + if (!str2prefix_rd(rd_str, &prd)) { snprintf(args->errmsg, args->errmsg_len, "Malformed rd %s \n", rd_str); return NB_ERR_INCONSISTENCY; @@ -9839,6 +9864,7 @@ static int bgp_global_afi_safi_ip_unicast_vpn_config_import_export_vpn_modify( UNSET_FLAG(bgp->af_flags[afi][safi], flag); } + hook_call(bgp_snmp_init_stats, bgp); return NB_OK; } @@ -15101,6 +15127,66 @@ int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_unicast_orf_capability_orf_bo return NB_OK; } +static int bgp_neighbor_afi_safi_rmap_modify(struct nb_cb_modify_args *args, + int direct) +{ + struct bgp *bgp; + const char *peer_str; + struct peer *peer; + const struct lyd_node *nbr_dnode; + const struct lyd_node *nbr_af_dnode; + const char *af_name; + afi_t afi; + safi_t safi; + const char *name_str; + struct route_map *route_map; + int ret; + + nbr_af_dnode = yang_dnode_get_parent(args->dnode, "afi-safi"); + af_name = yang_dnode_get_string(nbr_af_dnode, "./afi-safi-name"); + yang_afi_safi_identity2value(af_name, &afi, &safi); + + nbr_dnode = yang_dnode_get_parent(nbr_af_dnode, "neighbor"); + bgp = nb_running_get_entry(nbr_dnode, NULL, true); + peer_str = yang_dnode_get_string(nbr_dnode, "./remote-address"); + peer = bgp_neighbor_peer_lookup(bgp, peer_str, args->errmsg, + args->errmsg_len); + + name_str = yang_dnode_get_string(args->dnode, NULL); + route_map = route_map_lookup_by_name(name_str); + ret = peer_route_map_set(peer, afi, safi, direct, name_str, route_map); + + return bgp_nb_errmsg_return(args->errmsg, args->errmsg_len, ret); +} + +static int bgp_neighbor_afi_safi_rmap_destroy(struct nb_cb_destroy_args *args, + int direct) +{ + struct bgp *bgp; + const char *peer_str; + struct peer *peer; + const struct lyd_node *nbr_dnode; + const struct lyd_node *nbr_af_dnode; + const char *af_name; + afi_t afi; + safi_t safi; + int ret; + + nbr_af_dnode = yang_dnode_get_parent(args->dnode, "afi-safi"); + af_name = yang_dnode_get_string(nbr_af_dnode, "./afi-safi-name"); + yang_afi_safi_identity2value(af_name, &afi, &safi); + + nbr_dnode = yang_dnode_get_parent(nbr_af_dnode, "neighbor"); + bgp = nb_running_get_entry(nbr_dnode, NULL, true); + peer_str = yang_dnode_get_string(nbr_dnode, "./remote-address"); + peer = bgp_neighbor_peer_lookup(bgp, peer_str, args->errmsg, + args->errmsg_len); + + ret = peer_route_map_unset(peer, afi, safi, direct); + + return bgp_nb_errmsg_return(args->errmsg, args->errmsg_len, ret); +} + /* * XPath: * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv4-unicast/filter-config/rmap-import @@ -15112,9 +15198,9 @@ int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_unicast_filter_config_rmap_im case NB_EV_VALIDATE: case NB_EV_PREPARE: case NB_EV_ABORT: - case NB_EV_APPLY: - /* TODO: implement me. */ break; + case NB_EV_APPLY: + return bgp_neighbor_afi_safi_rmap_modify(args, RMAP_IN); } return NB_OK; @@ -15127,9 +15213,9 @@ int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_unicast_filter_config_rmap_im case NB_EV_VALIDATE: case NB_EV_PREPARE: case NB_EV_ABORT: - case NB_EV_APPLY: - /* TODO: implement me. */ break; + case NB_EV_APPLY: + return bgp_neighbor_afi_safi_rmap_destroy(args, RMAP_IN); } return NB_OK; @@ -15146,9 +15232,9 @@ int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_unicast_filter_config_rmap_ex case NB_EV_VALIDATE: case NB_EV_PREPARE: case NB_EV_ABORT: - case NB_EV_APPLY: - /* TODO: implement me. */ break; + case NB_EV_APPLY: + return bgp_neighbor_afi_safi_rmap_modify(args, RMAP_OUT); } return NB_OK; @@ -15161,14 +15247,72 @@ int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_unicast_filter_config_rmap_ex case NB_EV_VALIDATE: case NB_EV_PREPARE: case NB_EV_ABORT: - case NB_EV_APPLY: - /* TODO: implement me. */ break; + case NB_EV_APPLY: + return bgp_neighbor_afi_safi_rmap_destroy(args, RMAP_OUT); } return NB_OK; } +static int bgp_neighbor_afi_safi_plist_modify(struct nb_cb_modify_args *args, + int direct) +{ + struct bgp *bgp; + const char *peer_str; + struct peer *peer; + const struct lyd_node *nbr_dnode; + const struct lyd_node *nbr_af_dnode; + const char *af_name; + afi_t afi; + safi_t safi; + const char *name_str; + + nbr_af_dnode = yang_dnode_get_parent(args->dnode, "afi-safi"); + af_name = yang_dnode_get_string(nbr_af_dnode, "./afi-safi-name"); + yang_afi_safi_identity2value(af_name, &afi, &safi); + + nbr_dnode = yang_dnode_get_parent(nbr_af_dnode, "neighbor"); + bgp = nb_running_get_entry(nbr_dnode, NULL, true); + peer_str = yang_dnode_get_string(nbr_dnode, "./remote-address"); + peer = bgp_neighbor_peer_lookup(bgp, peer_str, args->errmsg, + args->errmsg_len); + + name_str = yang_dnode_get_string(args->dnode, NULL); + if (peer_prefix_list_set(peer, afi, safi, direct, name_str) < 0) + return NB_ERR_INCONSISTENCY; + + return NB_OK; +} + +static int bgp_neighbor_afi_safi_plist_destroy(struct nb_cb_destroy_args *args, + int direct) +{ + struct bgp *bgp; + const char *peer_str; + struct peer *peer; + const struct lyd_node *nbr_dnode; + const struct lyd_node *nbr_af_dnode; + const char *af_name; + afi_t afi; + safi_t safi; + + nbr_af_dnode = yang_dnode_get_parent(args->dnode, "afi-safi"); + af_name = yang_dnode_get_string(nbr_af_dnode, "./afi-safi-name"); + yang_afi_safi_identity2value(af_name, &afi, &safi); + + nbr_dnode = yang_dnode_get_parent(nbr_af_dnode, "neighbor"); + bgp = nb_running_get_entry(nbr_dnode, NULL, true); + peer_str = yang_dnode_get_string(nbr_dnode, "./remote-address"); + peer = bgp_neighbor_peer_lookup(bgp, peer_str, args->errmsg, + args->errmsg_len); + + if (peer_prefix_list_unset(peer, afi, safi, direct) < 0) + return NB_ERR_INCONSISTENCY; + + return NB_OK; +} + /* * XPath: * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv4-unicast/filter-config/plist-import @@ -15180,9 +15324,9 @@ int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_unicast_filter_config_plist_i case NB_EV_VALIDATE: case NB_EV_PREPARE: case NB_EV_ABORT: - case NB_EV_APPLY: - /* TODO: implement me. */ break; + case NB_EV_APPLY: + return bgp_neighbor_afi_safi_plist_modify(args, FILTER_IN); } return NB_OK; @@ -15195,9 +15339,9 @@ int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_unicast_filter_config_plist_i case NB_EV_VALIDATE: case NB_EV_PREPARE: case NB_EV_ABORT: - case NB_EV_APPLY: - /* TODO: implement me. */ break; + case NB_EV_APPLY: + return bgp_neighbor_afi_safi_plist_destroy(args, FILTER_IN); } return NB_OK; @@ -15214,9 +15358,9 @@ int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_unicast_filter_config_plist_e case NB_EV_VALIDATE: case NB_EV_PREPARE: case NB_EV_ABORT: - case NB_EV_APPLY: - /* TODO: implement me. */ break; + case NB_EV_APPLY: + return bgp_neighbor_afi_safi_plist_modify(args, FILTER_OUT); } return NB_OK; @@ -15229,9 +15373,9 @@ int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_unicast_filter_config_plist_e case NB_EV_VALIDATE: case NB_EV_PREPARE: case NB_EV_ABORT: - case NB_EV_APPLY: - /* TODO: implement me. */ break; + case NB_EV_APPLY: + return bgp_neighbor_afi_safi_plist_destroy(args, FILTER_OUT); } return NB_OK; @@ -16420,6 +16564,347 @@ int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_unicast_weight_weight_attribu /* * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-unicast/filter-config/rmap-import + */ +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_rmap_import_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_neighbor_afi_safi_rmap_modify(args, RMAP_IN); + } + + return NB_OK; +} + +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_rmap_import_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_neighbor_afi_safi_rmap_destroy(args, RMAP_IN); + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-unicast/filter-config/rmap-export + */ +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_rmap_export_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_neighbor_afi_safi_rmap_modify(args, RMAP_OUT); + } + + return NB_OK; +} + +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_rmap_export_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_neighbor_afi_safi_rmap_destroy(args, RMAP_OUT); + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-unicast/filter-config/plist-import + */ +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_plist_import_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_neighbor_afi_safi_plist_modify(args, FILTER_IN); + } + + return NB_OK; +} + +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_plist_import_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_neighbor_afi_safi_plist_destroy(args, FILTER_IN); + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-unicast/filter-config/plist-export + */ +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_plist_export_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_neighbor_afi_safi_plist_modify(args, FILTER_OUT); + } + + return NB_OK; +} + +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_plist_export_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_neighbor_afi_safi_plist_destroy(args, FILTER_OUT); + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-unicast/filter-config/access-list-import + */ +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_access_list_import_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_access_list_import_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-unicast/filter-config/access-list-export + */ +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_access_list_export_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_access_list_export_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-unicast/filter-config/as-path-filter-list-import + */ +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_as_path_filter_list_import_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_as_path_filter_list_import_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-unicast/filter-config/as-path-filter-list-export + */ +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_as_path_filter_list_export_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_as_path_filter_list_export_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-unicast/filter-config/unsuppress-map-import + */ +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_unsuppress_map_import_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_unsuppress_map_import_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-unicast/filter-config/unsuppress-map-export + */ +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_unsuppress_map_export_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_unsuppress_map_export_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + + +/* + * XPath: * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv4-multicast/add-paths/path-type */ int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_multicast_add_paths_path_type_modify( @@ -17378,6 +17863,346 @@ int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_multicast_weight_weight_attri /* * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv4-multicast/filter-config/rmap-import + */ +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_rmap_import_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_neighbor_afi_safi_rmap_modify(args, RMAP_IN); + } + + return NB_OK; +} + +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_rmap_import_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_neighbor_afi_safi_rmap_destroy(args, RMAP_IN); + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv4-multicast/filter-config/rmap-export + */ +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_rmap_export_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_neighbor_afi_safi_rmap_modify(args, RMAP_OUT); + } + + return NB_OK; +} + +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_rmap_export_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_neighbor_afi_safi_rmap_destroy(args, RMAP_OUT); + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv4-multicast/filter-config/plist-import + */ +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_plist_import_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_neighbor_afi_safi_plist_modify(args, FILTER_IN); + } + + return NB_OK; +} + +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_plist_import_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_neighbor_afi_safi_plist_destroy(args, FILTER_IN); + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv4-multicast/filter-config/plist-export + */ +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_plist_export_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_neighbor_afi_safi_plist_modify(args, FILTER_OUT); + } + + return NB_OK; +} + +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_plist_export_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_neighbor_afi_safi_plist_destroy(args, FILTER_OUT); + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv4-multicast/filter-config/access-list-import + */ +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_access_list_import_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_access_list_import_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv4-multicast/filter-config/access-list-export + */ +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_access_list_export_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_access_list_export_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv4-multicast/filter-config/as-path-filter-list-import + */ +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_as_path_filter_list_import_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_as_path_filter_list_import_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv4-multicast/filter-config/as-path-filter-list-export + */ +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_as_path_filter_list_export_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_as_path_filter_list_export_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv4-multicast/filter-config/unsuppress-map-import + */ +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_unsuppress_map_import_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_unsuppress_map_import_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv4-multicast/filter-config/unsuppress-map-export + */ +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_unsuppress_map_export_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_unsuppress_map_export_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-multicast/add-paths/path-type */ int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_multicast_add_paths_path_type_modify( @@ -18336,6 +19161,346 @@ int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_multicast_weight_weight_attri /* * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-multicast/filter-config/rmap-import + */ +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_rmap_import_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_neighbor_afi_safi_rmap_modify(args, RMAP_IN); + } + + return NB_OK; +} + +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_rmap_import_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_neighbor_afi_safi_rmap_destroy(args, RMAP_IN); + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-multicast/filter-config/rmap-export + */ +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_rmap_export_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_neighbor_afi_safi_rmap_modify(args, RMAP_OUT); + } + + return NB_OK; +} + +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_rmap_export_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_neighbor_afi_safi_rmap_destroy(args, RMAP_OUT); + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-multicast/filter-config/plist-import + */ +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_plist_import_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_neighbor_afi_safi_plist_modify(args, FILTER_IN); + } + + return NB_OK; +} + +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_plist_import_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_neighbor_afi_safi_plist_destroy(args, FILTER_IN); + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-multicast/filter-config/plist-export + */ +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_plist_export_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_neighbor_afi_safi_plist_modify(args, FILTER_OUT); + } + + return NB_OK; +} + +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_plist_export_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_neighbor_afi_safi_plist_destroy(args, FILTER_OUT); + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-multicast/filter-config/access-list-import + */ +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_access_list_import_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_access_list_import_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-multicast/filter-config/access-list-export + */ +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_access_list_export_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_access_list_export_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-multicast/filter-config/as-path-filter-list-import + */ +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_as_path_filter_list_import_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_as_path_filter_list_import_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-multicast/filter-config/as-path-filter-list-export + */ +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_as_path_filter_list_export_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_as_path_filter_list_export_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-multicast/filter-config/unsuppress-map-import + */ +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_unsuppress_map_import_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_unsuppress_map_import_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-multicast/filter-config/unsuppress-map-export + */ +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_unsuppress_map_export_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_unsuppress_map_export_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv4-labeled-unicast/add-paths/path-type */ int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_add_paths_path_type_modify( @@ -19294,6 +20459,346 @@ int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_weight_weight /* * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv4-labeled-unicast/filter-config/rmap-import + */ +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_rmap_import_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_neighbor_afi_safi_rmap_modify(args, RMAP_IN); + } + + return NB_OK; +} + +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_rmap_import_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_neighbor_afi_safi_rmap_destroy(args, RMAP_IN); + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv4-labeled-unicast/filter-config/rmap-export + */ +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_rmap_export_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_neighbor_afi_safi_rmap_modify(args, RMAP_OUT); + } + + return NB_OK; +} + +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_rmap_export_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_neighbor_afi_safi_rmap_destroy(args, RMAP_OUT); + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv4-labeled-unicast/filter-config/plist-import + */ +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_plist_import_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_neighbor_afi_safi_plist_modify(args, FILTER_IN); + } + + return NB_OK; +} + +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_plist_import_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_neighbor_afi_safi_plist_destroy(args, FILTER_IN); + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv4-labeled-unicast/filter-config/plist-export + */ +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_plist_export_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_neighbor_afi_safi_plist_modify(args, FILTER_OUT); + } + + return NB_OK; +} + +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_plist_export_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_neighbor_afi_safi_plist_destroy(args, FILTER_OUT); + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv4-labeled-unicast/filter-config/access-list-import + */ +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_access_list_import_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_access_list_import_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv4-labeled-unicast/filter-config/access-list-export + */ +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_access_list_export_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_access_list_export_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv4-labeled-unicast/filter-config/as-path-filter-list-import + */ +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_as_path_filter_list_import_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_as_path_filter_list_import_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv4-labeled-unicast/filter-config/as-path-filter-list-export + */ +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_as_path_filter_list_export_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_as_path_filter_list_export_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv4-labeled-unicast/filter-config/unsuppress-map-import + */ +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_unsuppress_map_import_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_unsuppress_map_import_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv4-labeled-unicast/filter-config/unsuppress-map-export + */ +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_unsuppress_map_export_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_unsuppress_map_export_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-labeled-unicast/add-paths/path-type */ int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_labeled_unicast_add_paths_path_type_modify( @@ -20252,6 +21757,346 @@ int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_labeled_unicast_weight_weight /* * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-labeled-unicast/filter-config/rmap-import + */ +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_rmap_import_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_neighbor_afi_safi_rmap_modify(args, RMAP_IN); + } + + return NB_OK; +} + +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_rmap_import_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_neighbor_afi_safi_rmap_destroy(args, RMAP_IN); + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-labeled-unicast/filter-config/rmap-export + */ +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_rmap_export_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_neighbor_afi_safi_rmap_modify(args, RMAP_OUT); + } + + return NB_OK; +} + +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_rmap_export_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_neighbor_afi_safi_rmap_destroy(args, RMAP_OUT); + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-labeled-unicast/filter-config/plist-import + */ +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_plist_import_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_plist_import_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-labeled-unicast/filter-config/plist-export + */ +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_plist_export_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_neighbor_afi_safi_plist_modify(args, FILTER_OUT); + } + + return NB_OK; +} + +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_plist_export_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_neighbor_afi_safi_plist_destroy(args, FILTER_OUT); + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-labeled-unicast/filter-config/access-list-import + */ +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_access_list_import_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_access_list_import_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-labeled-unicast/filter-config/access-list-export + */ +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_access_list_export_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_access_list_export_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-labeled-unicast/filter-config/as-path-filter-list-import + */ +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_as_path_filter_list_import_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_as_path_filter_list_import_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-labeled-unicast/filter-config/as-path-filter-list-export + */ +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_as_path_filter_list_export_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_as_path_filter_list_export_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-labeled-unicast/filter-config/unsuppress-map-import + */ +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_unsuppress_map_import_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_unsuppress_map_import_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-labeled-unicast/filter-config/unsuppress-map-export + */ +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_unsuppress_map_export_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_unsuppress_map_export_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/l3vpn-ipv4-unicast/add-paths/path-type */ int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv4_unicast_add_paths_path_type_modify( @@ -21055,6 +22900,346 @@ int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv4_unicast_weight_weight_a /* * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/l3vpn-ipv4-unicast/filter-config/rmap-import + */ +int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_rmap_import_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_neighbor_afi_safi_rmap_modify(args, RMAP_IN); + } + + return NB_OK; +} + +int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_rmap_import_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_neighbor_afi_safi_rmap_destroy(args, RMAP_IN); + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/l3vpn-ipv4-unicast/filter-config/rmap-export + */ +int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_rmap_export_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_neighbor_afi_safi_rmap_modify(args, RMAP_OUT); + } + + return NB_OK; +} + +int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_rmap_export_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_neighbor_afi_safi_rmap_destroy(args, RMAP_OUT); + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/l3vpn-ipv4-unicast/filter-config/plist-import + */ +int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_plist_import_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_plist_import_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + return NB_OK; + case NB_EV_APPLY: + return bgp_neighbor_afi_safi_plist_destroy(args, FILTER_IN); + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/l3vpn-ipv4-unicast/filter-config/plist-export + */ +int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_plist_export_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_neighbor_afi_safi_plist_modify(args, FILTER_OUT); + } + + return NB_OK; +} + +int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_plist_export_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_neighbor_afi_safi_plist_destroy(args, FILTER_OUT); + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/l3vpn-ipv4-unicast/filter-config/access-list-import + */ +int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_access_list_import_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_access_list_import_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/l3vpn-ipv4-unicast/filter-config/access-list-export + */ +int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_access_list_export_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_access_list_export_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/l3vpn-ipv4-unicast/filter-config/as-path-filter-list-import + */ +int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_as_path_filter_list_import_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_as_path_filter_list_import_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/l3vpn-ipv4-unicast/filter-config/as-path-filter-list-export + */ +int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_as_path_filter_list_export_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_as_path_filter_list_export_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/l3vpn-ipv4-unicast/filter-config/unsuppress-map-import + */ +int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_unsuppress_map_import_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_unsuppress_map_import_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/l3vpn-ipv4-unicast/filter-config/unsuppress-map-export + */ +int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_unsuppress_map_export_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_unsuppress_map_export_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/l3vpn-ipv6-unicast/add-paths/path-type */ int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv6_unicast_add_paths_path_type_modify( @@ -21858,6 +24043,346 @@ int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv6_unicast_weight_weight_a /* * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/l3vpn-ipv6-unicast/filter-config/rmap-import + */ +int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_rmap_import_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_neighbor_afi_safi_rmap_modify(args, RMAP_IN); + } + + return NB_OK; +} + +int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_rmap_import_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_neighbor_afi_safi_rmap_destroy(args, RMAP_IN); + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/l3vpn-ipv6-unicast/filter-config/rmap-export + */ +int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_rmap_export_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_neighbor_afi_safi_rmap_modify(args, RMAP_OUT); + } + + return NB_OK; +} + +int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_rmap_export_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_neighbor_afi_safi_rmap_destroy(args, RMAP_OUT); + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/l3vpn-ipv6-unicast/filter-config/plist-import + */ +int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_plist_import_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_neighbor_afi_safi_plist_modify(args, FILTER_IN); + } + + return NB_OK; +} + +int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_plist_import_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_neighbor_afi_safi_plist_destroy(args, FILTER_IN); + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/l3vpn-ipv6-unicast/filter-config/plist-export + */ +int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_plist_export_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + return NB_OK; + case NB_EV_APPLY: + return bgp_neighbor_afi_safi_plist_modify(args, FILTER_OUT); + } + + return NB_OK; +} + +int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_plist_export_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_neighbor_afi_safi_plist_destroy(args, FILTER_OUT); + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/l3vpn-ipv6-unicast/filter-config/access-list-import + */ +int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_access_list_import_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_access_list_import_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/l3vpn-ipv6-unicast/filter-config/access-list-export + */ +int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_access_list_export_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_access_list_export_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/l3vpn-ipv6-unicast/filter-config/as-path-filter-list-import + */ +int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_as_path_filter_list_import_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_as_path_filter_list_import_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/l3vpn-ipv6-unicast/filter-config/as-path-filter-list-export + */ +int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_as_path_filter_list_export_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_as_path_filter_list_export_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/l3vpn-ipv6-unicast/filter-config/unsuppress-map-import + */ +int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_unsuppress_map_import_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_unsuppress_map_import_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/l3vpn-ipv6-unicast/filter-config/unsuppress-map-export + */ +int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_unsuppress_map_export_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int bgp_neighbors_neighbor_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_unsuppress_map_export_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/l2vpn-evpn/as-path-options/allow-own-as */ int bgp_neighbors_neighbor_afi_safis_afi_safi_l2vpn_evpn_as_path_options_allow_own_as_modify( @@ -22133,6 +24658,346 @@ int bgp_neighbors_neighbor_afi_safis_afi_safi_l2vpn_evpn_soft_reconfiguration_mo /* * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/l2vpn-evpn/filter-config/rmap-import + */ +int bgp_neighbors_neighbor_afi_safis_afi_safi_l2vpn_evpn_filter_config_rmap_import_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_neighbor_afi_safi_rmap_modify(args, RMAP_IN); + } + + return NB_OK; +} + +int bgp_neighbors_neighbor_afi_safis_afi_safi_l2vpn_evpn_filter_config_rmap_import_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_neighbor_afi_safi_rmap_destroy(args, RMAP_IN); + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/l2vpn-evpn/filter-config/rmap-export + */ +int bgp_neighbors_neighbor_afi_safis_afi_safi_l2vpn_evpn_filter_config_rmap_export_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_neighbor_afi_safi_rmap_modify(args, RMAP_OUT); + } + + return NB_OK; +} + +int bgp_neighbors_neighbor_afi_safis_afi_safi_l2vpn_evpn_filter_config_rmap_export_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_neighbor_afi_safi_rmap_destroy(args, RMAP_IN); + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/l2vpn-evpn/filter-config/plist-import + */ +int bgp_neighbors_neighbor_afi_safis_afi_safi_l2vpn_evpn_filter_config_plist_import_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_neighbor_afi_safi_plist_modify(args, FILTER_IN); + } + + return NB_OK; +} + +int bgp_neighbors_neighbor_afi_safis_afi_safi_l2vpn_evpn_filter_config_plist_import_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_neighbor_afi_safi_plist_destroy(args, FILTER_IN); + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/l2vpn-evpn/filter-config/plist-export + */ +int bgp_neighbors_neighbor_afi_safis_afi_safi_l2vpn_evpn_filter_config_plist_export_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_neighbor_afi_safi_plist_modify(args, FILTER_OUT); + } + + return NB_OK; +} + +int bgp_neighbors_neighbor_afi_safis_afi_safi_l2vpn_evpn_filter_config_plist_export_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_neighbor_afi_safi_plist_destroy(args, FILTER_OUT); + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/l2vpn-evpn/filter-config/access-list-import + */ +int bgp_neighbors_neighbor_afi_safis_afi_safi_l2vpn_evpn_filter_config_access_list_import_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int bgp_neighbors_neighbor_afi_safis_afi_safi_l2vpn_evpn_filter_config_access_list_import_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/l2vpn-evpn/filter-config/access-list-export + */ +int bgp_neighbors_neighbor_afi_safis_afi_safi_l2vpn_evpn_filter_config_access_list_export_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int bgp_neighbors_neighbor_afi_safis_afi_safi_l2vpn_evpn_filter_config_access_list_export_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/l2vpn-evpn/filter-config/as-path-filter-list-import + */ +int bgp_neighbors_neighbor_afi_safis_afi_safi_l2vpn_evpn_filter_config_as_path_filter_list_import_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int bgp_neighbors_neighbor_afi_safis_afi_safi_l2vpn_evpn_filter_config_as_path_filter_list_import_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/l2vpn-evpn/filter-config/as-path-filter-list-export + */ +int bgp_neighbors_neighbor_afi_safis_afi_safi_l2vpn_evpn_filter_config_as_path_filter_list_export_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int bgp_neighbors_neighbor_afi_safis_afi_safi_l2vpn_evpn_filter_config_as_path_filter_list_export_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/l2vpn-evpn/filter-config/unsuppress-map-import + */ +int bgp_neighbors_neighbor_afi_safis_afi_safi_l2vpn_evpn_filter_config_unsuppress_map_import_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int bgp_neighbors_neighbor_afi_safis_afi_safi_l2vpn_evpn_filter_config_unsuppress_map_import_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/l2vpn-evpn/filter-config/unsuppress-map-export + */ +int bgp_neighbors_neighbor_afi_safis_afi_safi_l2vpn_evpn_filter_config_unsuppress_map_export_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int bgp_neighbors_neighbor_afi_safis_afi_safi_l2vpn_evpn_filter_config_unsuppress_map_export_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv4-flowspec/route-reflector/route-reflector-client */ int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_flowspec_route_reflector_route_reflector_client_modify( @@ -22202,6 +25067,346 @@ int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_flowspec_soft_reconfiguration /* * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv4-flowspec/filter-config/rmap-import + */ +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_rmap_import_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_neighbor_afi_safi_rmap_modify(args, RMAP_IN); + } + + return NB_OK; +} + +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_rmap_import_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_neighbor_afi_safi_rmap_destroy(args, RMAP_IN); + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv4-flowspec/filter-config/rmap-export + */ +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_rmap_export_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_neighbor_afi_safi_rmap_modify(args, RMAP_OUT); + } + + return NB_OK; +} + +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_rmap_export_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_neighbor_afi_safi_rmap_destroy(args, RMAP_OUT); + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv4-flowspec/filter-config/plist-import + */ +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_plist_import_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_neighbor_afi_safi_plist_modify(args, FILTER_IN); + } + + return NB_OK; +} + +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_plist_import_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_neighbor_afi_safi_plist_destroy(args, FILTER_IN); + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv4-flowspec/filter-config/plist-export + */ +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_plist_export_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_neighbor_afi_safi_plist_modify(args, FILTER_OUT); + } + + return NB_OK; +} + +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_plist_export_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_neighbor_afi_safi_plist_destroy(args, FILTER_OUT); + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv4-flowspec/filter-config/access-list-import + */ +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_access_list_import_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_access_list_import_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv4-flowspec/filter-config/access-list-export + */ +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_access_list_export_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_access_list_export_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv4-flowspec/filter-config/as-path-filter-list-import + */ +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_as_path_filter_list_import_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_as_path_filter_list_import_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv4-flowspec/filter-config/as-path-filter-list-export + */ +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_as_path_filter_list_export_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_as_path_filter_list_export_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv4-flowspec/filter-config/unsuppress-map-import + */ +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_unsuppress_map_import_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_unsuppress_map_import_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv4-flowspec/filter-config/unsuppress-map-export + */ +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_unsuppress_map_export_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_unsuppress_map_export_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-flowspec/route-reflector/route-reflector-client */ int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_flowspec_route_reflector_route_reflector_client_modify( @@ -22269,6 +25474,346 @@ int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_flowspec_soft_reconfiguration return NB_OK; } +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-flowspec/filter-config/rmap-import + */ +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_rmap_import_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_neighbor_afi_safi_rmap_modify(args, RMAP_IN); + } + + return NB_OK; +} + +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_rmap_import_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_neighbor_afi_safi_rmap_destroy(args, RMAP_IN); + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-flowspec/filter-config/rmap-export + */ +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_rmap_export_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_neighbor_afi_safi_rmap_modify(args, RMAP_OUT); + } + + return NB_OK; +} + +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_rmap_export_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_neighbor_afi_safi_rmap_destroy(args, RMAP_OUT); + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-flowspec/filter-config/plist-import + */ +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_plist_import_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_neighbor_afi_safi_plist_modify(args, FILTER_IN); + } + + return NB_OK; +} + +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_plist_import_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_neighbor_afi_safi_plist_destroy(args, FILTER_IN); + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-flowspec/filter-config/plist-export + */ +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_plist_export_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_neighbor_afi_safi_plist_modify(args, FILTER_OUT); + } + + return NB_OK; +} + +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_plist_export_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_neighbor_afi_safi_plist_destroy(args, FILTER_OUT); + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-flowspec/filter-config/access-list-import + */ +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_access_list_import_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_access_list_import_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-flowspec/filter-config/access-list-export + */ +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_access_list_export_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_access_list_export_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-flowspec/filter-config/as-path-filter-list-import + */ +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_as_path_filter_list_import_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_as_path_filter_list_import_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-flowspec/filter-config/as-path-filter-list-export + */ +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_as_path_filter_list_export_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_as_path_filter_list_export_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-flowspec/filter-config/unsuppress-map-import + */ +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_unsuppress_map_import_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_unsuppress_map_import_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-flowspec/filter-config/unsuppress-map-export + */ +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_unsuppress_map_export_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int bgp_neighbors_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_unsuppress_map_export_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + static int bgp_unnumbered_neighbor_afi_safi_flag_modify(struct nb_cb_modify_args *args, uint32_t flags, bool set) @@ -23419,6 +26964,68 @@ int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_unicast_orf_capabi return NB_OK; } +static int +bgp_unnumbered_neighbor_afi_safi_rmap_modify(struct nb_cb_modify_args *args, + int direct) +{ + struct bgp *bgp; + const char *peer_str; + struct peer *peer; + const struct lyd_node *nbr_dnode; + const struct lyd_node *nbr_af_dnode; + const char *af_name; + afi_t afi; + safi_t safi; + const char *name_str; + struct route_map *route_map; + int ret; + + nbr_af_dnode = yang_dnode_get_parent(args->dnode, "afi-safi"); + af_name = yang_dnode_get_string(nbr_af_dnode, "./afi-safi-name"); + yang_afi_safi_identity2value(af_name, &afi, &safi); + + nbr_dnode = yang_dnode_get_parent(nbr_af_dnode, "unnumbered-neighbor"); + bgp = nb_running_get_entry(nbr_dnode, NULL, true); + peer_str = yang_dnode_get_string(nbr_dnode, "./interface"); + peer = bgp_unnumbered_neighbor_peer_lookup(bgp, peer_str, args->errmsg, + args->errmsg_len); + + name_str = yang_dnode_get_string(args->dnode, NULL); + route_map = route_map_lookup_by_name(name_str); + ret = peer_route_map_set(peer, afi, safi, direct, name_str, route_map); + + return bgp_nb_errmsg_return(args->errmsg, args->errmsg_len, ret); +} + +static int +bgp_unnumbered_neighbor_afi_safi_rmap_destroy(struct nb_cb_destroy_args *args, + int direct) +{ + struct bgp *bgp; + const char *peer_str; + struct peer *peer; + const struct lyd_node *nbr_dnode; + const struct lyd_node *nbr_af_dnode; + const char *af_name; + afi_t afi; + safi_t safi; + int ret; + + nbr_af_dnode = yang_dnode_get_parent(args->dnode, "afi-safi"); + af_name = yang_dnode_get_string(nbr_af_dnode, "./afi-safi-name"); + yang_afi_safi_identity2value(af_name, &afi, &safi); + + nbr_dnode = yang_dnode_get_parent(nbr_af_dnode, "unnumbered-neighbor"); + bgp = nb_running_get_entry(nbr_dnode, NULL, true); + peer_str = yang_dnode_get_string(nbr_dnode, "./interface"); + peer = bgp_unnumbered_neighbor_peer_lookup(bgp, peer_str, args->errmsg, + args->errmsg_len); + + ret = peer_route_map_unset(peer, afi, safi, direct); + + return bgp_nb_errmsg_return(args->errmsg, args->errmsg_len, ret); +} + /* * XPath: * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv4-unicast/filter-config/rmap-import @@ -23430,9 +27037,10 @@ int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_unicast_filter_con case NB_EV_VALIDATE: case NB_EV_PREPARE: case NB_EV_ABORT: - case NB_EV_APPLY: - /* TODO: implement me. */ break; + case NB_EV_APPLY: + return bgp_unnumbered_neighbor_afi_safi_rmap_modify(args, + RMAP_IN); } return NB_OK; @@ -23445,9 +27053,10 @@ int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_unicast_filter_con case NB_EV_VALIDATE: case NB_EV_PREPARE: case NB_EV_ABORT: - case NB_EV_APPLY: - /* TODO: implement me. */ break; + case NB_EV_APPLY: + return bgp_unnumbered_neighbor_afi_safi_rmap_destroy(args, + RMAP_IN); } return NB_OK; @@ -23464,9 +27073,10 @@ int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_unicast_filter_con case NB_EV_VALIDATE: case NB_EV_PREPARE: case NB_EV_ABORT: - case NB_EV_APPLY: - /* TODO: implement me. */ break; + case NB_EV_APPLY: + return bgp_unnumbered_neighbor_afi_safi_rmap_modify(args, + RMAP_OUT); } return NB_OK; @@ -23479,14 +27089,74 @@ int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_unicast_filter_con case NB_EV_VALIDATE: case NB_EV_PREPARE: case NB_EV_ABORT: - case NB_EV_APPLY: - /* TODO: implement me. */ break; + case NB_EV_APPLY: + return bgp_unnumbered_neighbor_afi_safi_rmap_destroy(args, + RMAP_OUT); } return NB_OK; } +static int +bgp_unnumbered_neighbor_afi_safi_plist_modify(struct nb_cb_modify_args *args, + int direct) +{ + struct bgp *bgp; + const char *peer_str; + struct peer *peer; + const struct lyd_node *nbr_dnode; + const struct lyd_node *nbr_af_dnode; + const char *af_name; + afi_t afi; + safi_t safi; + const char *name_str; + + nbr_af_dnode = yang_dnode_get_parent(args->dnode, "afi-safi"); + af_name = yang_dnode_get_string(nbr_af_dnode, "./afi-safi-name"); + yang_afi_safi_identity2value(af_name, &afi, &safi); + + nbr_dnode = yang_dnode_get_parent(nbr_af_dnode, "unnumbered-neighbor"); + bgp = nb_running_get_entry(nbr_dnode, NULL, true); + peer_str = yang_dnode_get_string(nbr_dnode, "./interface"); + peer = bgp_unnumbered_neighbor_peer_lookup(bgp, peer_str, args->errmsg, + args->errmsg_len); + + name_str = yang_dnode_get_string(args->dnode, NULL); + if (peer_prefix_list_set(peer, afi, safi, direct, name_str) < 0) + return NB_ERR_INCONSISTENCY; + + return NB_OK; +} + +static int +bgp_unnumbered_neighbor_afi_safi_plist_destroy(struct nb_cb_destroy_args *args, + int direct) +{ + struct bgp *bgp; + const char *peer_str; + struct peer *peer; + const struct lyd_node *nbr_dnode; + const struct lyd_node *nbr_af_dnode; + const char *af_name; + afi_t afi; + safi_t safi; + + nbr_af_dnode = yang_dnode_get_parent(args->dnode, "afi-safi"); + af_name = yang_dnode_get_string(nbr_af_dnode, "./afi-safi-name"); + yang_afi_safi_identity2value(af_name, &afi, &safi); + + nbr_dnode = yang_dnode_get_parent(nbr_af_dnode, "unnumbered-neighbor"); + bgp = nb_running_get_entry(nbr_dnode, NULL, true); + peer_str = yang_dnode_get_string(nbr_dnode, "./interface"); + peer = bgp_unnumbered_neighbor_peer_lookup(bgp, peer_str, args->errmsg, + args->errmsg_len); + + if (peer_prefix_list_unset(peer, afi, safi, direct) < 0) + return NB_ERR_INCONSISTENCY; + + return NB_OK; +} /* * XPath: * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv4-unicast/filter-config/plist-import @@ -23498,9 +27168,10 @@ int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_unicast_filter_con case NB_EV_VALIDATE: case NB_EV_PREPARE: case NB_EV_ABORT: - case NB_EV_APPLY: - /* TODO: implement me. */ break; + case NB_EV_APPLY: + return bgp_unnumbered_neighbor_afi_safi_plist_modify(args, + FILTER_IN); } return NB_OK; @@ -23513,9 +27184,10 @@ int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_unicast_filter_con case NB_EV_VALIDATE: case NB_EV_PREPARE: case NB_EV_ABORT: - case NB_EV_APPLY: - /* TODO: implement me. */ break; + case NB_EV_APPLY: + return bgp_unnumbered_neighbor_afi_safi_plist_destroy( + args, FILTER_IN); } return NB_OK; @@ -23532,9 +27204,10 @@ int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_unicast_filter_con case NB_EV_VALIDATE: case NB_EV_PREPARE: case NB_EV_ABORT: - case NB_EV_APPLY: - /* TODO: implement me. */ break; + case NB_EV_APPLY: + return bgp_unnumbered_neighbor_afi_safi_plist_modify( + args, FILTER_OUT); } return NB_OK; @@ -23547,9 +27220,10 @@ int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_unicast_filter_con case NB_EV_VALIDATE: case NB_EV_PREPARE: case NB_EV_ABORT: - case NB_EV_APPLY: - /* TODO: implement me. */ break; + case NB_EV_APPLY: + return bgp_unnumbered_neighbor_afi_safi_plist_destroy( + args, FILTER_OUT); } return NB_OK; @@ -24739,6 +28413,354 @@ int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_unicast_weight_wei /* * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv6-unicast/filter-config/rmap-import + */ +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_rmap_import_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_unnumbered_neighbor_afi_safi_rmap_modify(args, + RMAP_IN); + } + + return NB_OK; +} + +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_rmap_import_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_unnumbered_neighbor_afi_safi_rmap_destroy(args, + RMAP_IN); + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv6-unicast/filter-config/rmap-export + */ +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_rmap_export_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_unnumbered_neighbor_afi_safi_rmap_modify(args, + RMAP_OUT); + } + + return NB_OK; +} + +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_rmap_export_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_unnumbered_neighbor_afi_safi_rmap_destroy(args, + RMAP_OUT); + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv6-unicast/filter-config/plist-import + */ +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_plist_import_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_unnumbered_neighbor_afi_safi_plist_modify(args, + FILTER_IN); + } + + return NB_OK; +} + +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_plist_import_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_unnumbered_neighbor_afi_safi_plist_destroy( + args, FILTER_IN); + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv6-unicast/filter-config/plist-export + */ +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_plist_export_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_unnumbered_neighbor_afi_safi_plist_modify( + args, FILTER_OUT); + } + + return NB_OK; +} + +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_plist_export_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_unnumbered_neighbor_afi_safi_plist_destroy( + args, FILTER_OUT); + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv6-unicast/filter-config/access-list-import + */ +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_access_list_import_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_access_list_import_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv6-unicast/filter-config/access-list-export + */ +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_access_list_export_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_access_list_export_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv6-unicast/filter-config/as-path-filter-list-import + */ +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_as_path_filter_list_import_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_as_path_filter_list_import_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv6-unicast/filter-config/as-path-filter-list-export + */ +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_as_path_filter_list_export_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_as_path_filter_list_export_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv6-unicast/filter-config/unsuppress-map-import + */ +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_unsuppress_map_import_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_unsuppress_map_import_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv6-unicast/filter-config/unsuppress-map-export + */ +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_unsuppress_map_export_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_unicast_filter_config_unsuppress_map_export_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv4-multicast/add-paths/path-type */ int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_multicast_add_paths_path_type_modify( @@ -25698,6 +29720,354 @@ int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_multicast_weight_w /* * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv4-multicast/filter-config/rmap-import + */ +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_rmap_import_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_unnumbered_neighbor_afi_safi_rmap_modify(args, + RMAP_IN); + } + + return NB_OK; +} + +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_rmap_import_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_unnumbered_neighbor_afi_safi_rmap_destroy(args, + RMAP_IN); + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv4-multicast/filter-config/rmap-export + */ +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_rmap_export_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_unnumbered_neighbor_afi_safi_rmap_modify(args, + RMAP_OUT); + } + + return NB_OK; +} + +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_rmap_export_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_unnumbered_neighbor_afi_safi_rmap_destroy(args, + RMAP_OUT); + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv4-multicast/filter-config/plist-import + */ +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_plist_import_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_unnumbered_neighbor_afi_safi_plist_modify(args, + FILTER_IN); + } + + return NB_OK; +} + +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_plist_import_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_unnumbered_neighbor_afi_safi_plist_destroy( + args, FILTER_IN); + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv4-multicast/filter-config/plist-export + */ +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_plist_export_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_unnumbered_neighbor_afi_safi_plist_modify( + args, FILTER_OUT); + } + + return NB_OK; +} + +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_plist_export_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_unnumbered_neighbor_afi_safi_plist_destroy( + args, FILTER_OUT); + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv4-multicast/filter-config/access-list-import + */ +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_access_list_import_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_access_list_import_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv4-multicast/filter-config/access-list-export + */ +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_access_list_export_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_access_list_export_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv4-multicast/filter-config/as-path-filter-list-import + */ +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_as_path_filter_list_import_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_as_path_filter_list_import_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv4-multicast/filter-config/as-path-filter-list-export + */ +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_as_path_filter_list_export_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_as_path_filter_list_export_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv4-multicast/filter-config/unsuppress-map-import + */ +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_unsuppress_map_import_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_unsuppress_map_import_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv4-multicast/filter-config/unsuppress-map-export + */ +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_unsuppress_map_export_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_multicast_filter_config_unsuppress_map_export_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv6-multicast/add-paths/path-type */ int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_multicast_add_paths_path_type_modify( @@ -26657,6 +31027,355 @@ int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_multicast_weight_w /* * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv6-multicast/filter-config/rmap-import + */ +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_rmap_import_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_unnumbered_neighbor_afi_safi_rmap_modify(args, + RMAP_IN); + } + + return NB_OK; +} + +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_rmap_import_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_unnumbered_neighbor_afi_safi_rmap_destroy(args, + RMAP_IN); + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv6-multicast/filter-config/rmap-export + */ +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_rmap_export_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_unnumbered_neighbor_afi_safi_rmap_modify(args, + RMAP_OUT); + } + + return NB_OK; +} + +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_rmap_export_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_unnumbered_neighbor_afi_safi_rmap_destroy(args, + RMAP_OUT); + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv6-multicast/filter-config/plist-import + */ +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_plist_import_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_unnumbered_neighbor_afi_safi_plist_modify(args, + FILTER_IN); + } + + return NB_OK; +} + +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_plist_import_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_unnumbered_neighbor_afi_safi_plist_destroy( + args, FILTER_IN); + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv6-multicast/filter-config/plist-export + */ +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_plist_export_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_unnumbered_neighbor_afi_safi_plist_modify( + args, FILTER_OUT); + } + + return NB_OK; +} + +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_plist_export_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_unnumbered_neighbor_afi_safi_plist_destroy( + args, FILTER_OUT); + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv6-multicast/filter-config/access-list-import + */ +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_access_list_import_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_access_list_import_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv6-multicast/filter-config/access-list-export + */ +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_access_list_export_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_access_list_export_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv6-multicast/filter-config/as-path-filter-list-import + */ +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_as_path_filter_list_import_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_as_path_filter_list_import_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv6-multicast/filter-config/as-path-filter-list-export + */ +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_as_path_filter_list_export_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_as_path_filter_list_export_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv6-multicast/filter-config/unsuppress-map-import + */ +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_unsuppress_map_import_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_unsuppress_map_import_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv6-multicast/filter-config/unsuppress-map-export + */ +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_unsuppress_map_export_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_multicast_filter_config_unsuppress_map_export_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + + +/* + * XPath: * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv4-labeled-unicast/add-paths/path-type */ int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_add_paths_path_type_modify( @@ -27616,6 +32335,354 @@ int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_we /* * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv4-labeled-unicast/filter-config/rmap-import + */ +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_rmap_import_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_unnumbered_neighbor_afi_safi_rmap_modify(args, + RMAP_IN); + } + + return NB_OK; +} + +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_rmap_import_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_unnumbered_neighbor_afi_safi_rmap_destroy(args, + RMAP_IN); + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv4-labeled-unicast/filter-config/rmap-export + */ +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_rmap_export_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_unnumbered_neighbor_afi_safi_rmap_modify(args, + RMAP_OUT); + } + + return NB_OK; +} + +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_rmap_export_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_unnumbered_neighbor_afi_safi_rmap_destroy(args, + RMAP_OUT); + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv4-labeled-unicast/filter-config/plist-import + */ +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_plist_import_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_unnumbered_neighbor_afi_safi_plist_modify(args, + FILTER_IN); + } + + return NB_OK; +} + +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_plist_import_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_unnumbered_neighbor_afi_safi_plist_destroy( + args, FILTER_IN); + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv4-labeled-unicast/filter-config/plist-export + */ +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_plist_export_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_unnumbered_neighbor_afi_safi_plist_modify( + args, FILTER_OUT); + } + + return NB_OK; +} + +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_plist_export_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_unnumbered_neighbor_afi_safi_plist_destroy( + args, FILTER_OUT); + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv4-labeled-unicast/filter-config/access-list-import + */ +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_access_list_import_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_access_list_import_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv4-labeled-unicast/filter-config/access-list-export + */ +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_access_list_export_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_access_list_export_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv4-labeled-unicast/filter-config/as-path-filter-list-import + */ +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_as_path_filter_list_import_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_as_path_filter_list_import_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv4-labeled-unicast/filter-config/as-path-filter-list-export + */ +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_as_path_filter_list_export_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_as_path_filter_list_export_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv4-labeled-unicast/filter-config/unsuppress-map-import + */ +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_unsuppress_map_import_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_unsuppress_map_import_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv4-labeled-unicast/filter-config/unsuppress-map-export + */ +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_unsuppress_map_export_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_unsuppress_map_export_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv6-labeled-unicast/add-paths/path-type */ int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_labeled_unicast_add_paths_path_type_modify( @@ -30527,6 +35594,354 @@ int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_flowspec_soft_reco /* * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv4-flowspec/filter-config/rmap-import + */ +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_rmap_import_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_unnumbered_neighbor_afi_safi_rmap_modify(args, + RMAP_IN); + } + + return NB_OK; +} + +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_rmap_import_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_unnumbered_neighbor_afi_safi_rmap_destroy(args, + RMAP_IN); + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv4-flowspec/filter-config/rmap-export + */ +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_rmap_export_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_unnumbered_neighbor_afi_safi_rmap_modify(args, + RMAP_OUT); + } + + return NB_OK; +} + +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_rmap_export_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_unnumbered_neighbor_afi_safi_rmap_destroy(args, + RMAP_OUT); + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv4-flowspec/filter-config/plist-import + */ +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_plist_import_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_unnumbered_neighbor_afi_safi_plist_modify(args, + FILTER_IN); + } + + return NB_OK; +} + +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_plist_import_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_unnumbered_neighbor_afi_safi_plist_destroy( + args, FILTER_IN); + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv4-flowspec/filter-config/plist-export + */ +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_plist_export_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_unnumbered_neighbor_afi_safi_plist_modify( + args, FILTER_OUT); + } + + return NB_OK; +} + +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_plist_export_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_unnumbered_neighbor_afi_safi_plist_destroy( + args, FILTER_OUT); + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv4-flowspec/filter-config/access-list-import + */ +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_access_list_import_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_access_list_import_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv4-flowspec/filter-config/access-list-export + */ +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_access_list_export_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_access_list_export_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv4-flowspec/filter-config/as-path-filter-list-import + */ +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_as_path_filter_list_import_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_as_path_filter_list_import_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv4-flowspec/filter-config/as-path-filter-list-export + */ +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_as_path_filter_list_export_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_as_path_filter_list_export_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv4-flowspec/filter-config/unsuppress-map-import + */ +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_unsuppress_map_import_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_unsuppress_map_import_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv4-flowspec/filter-config/unsuppress-map-export + */ +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_unsuppress_map_export_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv4_flowspec_filter_config_unsuppress_map_export_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv6-flowspec/route-reflector/route-reflector-client */ int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_flowspec_route_reflector_route_reflector_client_modify( @@ -30594,6 +36009,354 @@ int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_flowspec_soft_reco return NB_OK; } +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv6-flowspec/filter-config/rmap-import + */ +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_rmap_import_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_unnumbered_neighbor_afi_safi_rmap_modify(args, + RMAP_IN); + } + + return NB_OK; +} + +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_rmap_import_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_unnumbered_neighbor_afi_safi_rmap_destroy(args, + RMAP_IN); + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv6-flowspec/filter-config/rmap-export + */ +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_rmap_export_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_unnumbered_neighbor_afi_safi_rmap_modify(args, + RMAP_OUT); + } + + return NB_OK; +} + +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_rmap_export_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_unnumbered_neighbor_afi_safi_rmap_destroy(args, + RMAP_OUT); + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv6-flowspec/filter-config/plist-import + */ +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_plist_import_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_unnumbered_neighbor_afi_safi_plist_modify(args, + FILTER_IN); + } + + return NB_OK; +} + +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_plist_import_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_unnumbered_neighbor_afi_safi_plist_destroy( + args, FILTER_IN); + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv6-flowspec/filter-config/plist-export + */ +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_plist_export_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_unnumbered_neighbor_afi_safi_plist_modify( + args, FILTER_OUT); + } + + return NB_OK; +} + +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_plist_export_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_unnumbered_neighbor_afi_safi_plist_destroy( + args, FILTER_OUT); + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv6-flowspec/filter-config/access-list-import + */ +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_access_list_import_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_access_list_import_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv6-flowspec/filter-config/access-list-export + */ +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_access_list_export_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_access_list_export_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv6-flowspec/filter-config/as-path-filter-list-import + */ +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_as_path_filter_list_import_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_as_path_filter_list_import_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv6-flowspec/filter-config/as-path-filter-list-export + */ +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_as_path_filter_list_export_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_as_path_filter_list_export_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv6-flowspec/filter-config/unsuppress-map-import + */ +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_unsuppress_map_import_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_unsuppress_map_import_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv6-flowspec/filter-config/unsuppress-map-export + */ +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_unsuppress_map_export_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int bgp_neighbors_unnumbered_neighbor_afi_safis_afi_safi_ipv6_flowspec_filter_config_unsuppress_map_export_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + static int bgp_peer_group_afi_safi_flag_modify(struct nb_cb_modify_args *args, uint32_t flags, bool set) { @@ -31732,6 +37495,62 @@ int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_unicast_orf_capability_or return NB_OK; } +static int bgp_peer_group_afi_safi_rmap_modify(struct nb_cb_modify_args *args, + int direct) +{ + struct bgp *bgp; + const char *peer_str; + struct peer *peer; + const struct lyd_node *nbr_dnode; + const struct lyd_node *nbr_af_dnode; + const char *af_name; + afi_t afi; + safi_t safi; + const char *name_str; + struct route_map *route_map; + int ret; + + nbr_af_dnode = yang_dnode_get_parent(args->dnode, "afi-safi"); + af_name = yang_dnode_get_string(nbr_af_dnode, "./afi-safi-name"); + yang_afi_safi_identity2value(af_name, &afi, &safi); + nbr_dnode = yang_dnode_get_parent(nbr_af_dnode, "peer-group"); + bgp = nb_running_get_entry(nbr_dnode, NULL, true); + peer_str = yang_dnode_get_string(nbr_dnode, "./peer-group-name"); + peer = bgp_peer_group_peer_lookup(bgp, peer_str); + + name_str = yang_dnode_get_string(args->dnode, NULL); + route_map = route_map_lookup_by_name(name_str); + ret = peer_route_map_set(peer, afi, safi, direct, name_str, route_map); + + return bgp_nb_errmsg_return(args->errmsg, args->errmsg_len, ret); +} + +static int bgp_peer_group_afi_safi_rmap_destroy(struct nb_cb_destroy_args *args, + int direct) +{ + struct bgp *bgp; + const char *peer_str; + struct peer *peer; + const struct lyd_node *nbr_dnode; + const struct lyd_node *nbr_af_dnode; + const char *af_name; + afi_t afi; + safi_t safi; + int ret; + + nbr_af_dnode = yang_dnode_get_parent(args->dnode, "afi-safi"); + af_name = yang_dnode_get_string(nbr_af_dnode, "./afi-safi-name"); + yang_afi_safi_identity2value(af_name, &afi, &safi); + nbr_dnode = yang_dnode_get_parent(nbr_af_dnode, "peer-group"); + bgp = nb_running_get_entry(nbr_dnode, NULL, true); + peer_str = yang_dnode_get_string(nbr_dnode, "./peer-group-name"); + peer = bgp_peer_group_peer_lookup(bgp, peer_str); + + ret = peer_route_map_unset(peer, afi, safi, direct); + + return bgp_nb_errmsg_return(args->errmsg, args->errmsg_len, ret); +} + /* * XPath: * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv4-unicast/filter-config/rmap-import @@ -31743,9 +37562,9 @@ int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_unicast_filter_config_rma case NB_EV_VALIDATE: case NB_EV_PREPARE: case NB_EV_ABORT: - case NB_EV_APPLY: - /* TODO: implement me. */ break; + case NB_EV_APPLY: + return bgp_peer_group_afi_safi_rmap_modify(args, RMAP_IN); } return NB_OK; @@ -31758,9 +37577,9 @@ int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_unicast_filter_config_rma case NB_EV_VALIDATE: case NB_EV_PREPARE: case NB_EV_ABORT: - case NB_EV_APPLY: - /* TODO: implement me. */ break; + case NB_EV_APPLY: + return bgp_peer_group_afi_safi_rmap_destroy(args, RMAP_IN); } return NB_OK; @@ -31777,9 +37596,9 @@ int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_unicast_filter_config_rma case NB_EV_VALIDATE: case NB_EV_PREPARE: case NB_EV_ABORT: - case NB_EV_APPLY: - /* TODO: implement me. */ break; + case NB_EV_APPLY: + return bgp_peer_group_afi_safi_rmap_modify(args, RMAP_OUT); } return NB_OK; @@ -31792,14 +37611,69 @@ int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_unicast_filter_config_rma case NB_EV_VALIDATE: case NB_EV_PREPARE: case NB_EV_ABORT: - case NB_EV_APPLY: - /* TODO: implement me. */ break; + case NB_EV_APPLY: + return bgp_peer_group_afi_safi_rmap_destroy(args, RMAP_OUT); } return NB_OK; } +static int bgp_peer_group_afi_safi_plist_modify(struct nb_cb_modify_args *args, + int direct) +{ + struct bgp *bgp; + const char *peer_str; + struct peer *peer; + const struct lyd_node *nbr_dnode; + const struct lyd_node *nbr_af_dnode; + const char *af_name; + afi_t afi; + safi_t safi; + const char *name_str; + + nbr_af_dnode = yang_dnode_get_parent(args->dnode, "afi-safi"); + af_name = yang_dnode_get_string(nbr_af_dnode, "./afi-safi-name"); + yang_afi_safi_identity2value(af_name, &afi, &safi); + nbr_dnode = yang_dnode_get_parent(nbr_af_dnode, "peer-group"); + bgp = nb_running_get_entry(nbr_dnode, NULL, true); + peer_str = yang_dnode_get_string(nbr_dnode, "./peer-group-name"); + peer = bgp_peer_group_peer_lookup(bgp, peer_str); + + name_str = yang_dnode_get_string(args->dnode, NULL); + if (peer_prefix_list_set(peer, afi, safi, direct, name_str) < 0) + return NB_ERR_INCONSISTENCY; + + return NB_OK; +} + +static int +bgp_peer_group_afi_safi_plist_destroy(struct nb_cb_destroy_args *args, + int direct) +{ + struct bgp *bgp; + const char *peer_str; + struct peer *peer; + const struct lyd_node *nbr_dnode; + const struct lyd_node *nbr_af_dnode; + const char *af_name; + afi_t afi; + safi_t safi; + + nbr_af_dnode = yang_dnode_get_parent(args->dnode, "afi-safi"); + af_name = yang_dnode_get_string(nbr_af_dnode, "./afi-safi-name"); + yang_afi_safi_identity2value(af_name, &afi, &safi); + nbr_dnode = yang_dnode_get_parent(nbr_af_dnode, "peer-group"); + bgp = nb_running_get_entry(nbr_dnode, NULL, true); + peer_str = yang_dnode_get_string(nbr_dnode, "./peer-group-name"); + peer = bgp_peer_group_peer_lookup(bgp, peer_str); + + if (peer_prefix_list_unset(peer, afi, safi, direct) < 0) + return NB_ERR_INCONSISTENCY; + + return NB_OK; +} + /* * XPath: * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv4-unicast/filter-config/plist-import @@ -31811,9 +37685,9 @@ int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_unicast_filter_config_pli case NB_EV_VALIDATE: case NB_EV_PREPARE: case NB_EV_ABORT: - case NB_EV_APPLY: - /* TODO: implement me. */ break; + case NB_EV_APPLY: + return bgp_peer_group_afi_safi_plist_modify(args, FILTER_IN); } return NB_OK; @@ -31826,9 +37700,9 @@ int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_unicast_filter_config_pli case NB_EV_VALIDATE: case NB_EV_PREPARE: case NB_EV_ABORT: - case NB_EV_APPLY: - /* TODO: implement me. */ break; + case NB_EV_APPLY: + return bgp_peer_group_afi_safi_plist_destroy(args, FILTER_IN); } return NB_OK; @@ -31845,9 +37719,9 @@ int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_unicast_filter_config_pli case NB_EV_VALIDATE: case NB_EV_PREPARE: case NB_EV_ABORT: - case NB_EV_APPLY: - /* TODO: implement me. */ break; + case NB_EV_APPLY: + return bgp_peer_group_afi_safi_plist_modify(args, FILTER_OUT); } return NB_OK; @@ -31860,9 +37734,9 @@ int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_unicast_filter_config_pli case NB_EV_VALIDATE: case NB_EV_PREPARE: case NB_EV_ABORT: - case NB_EV_APPLY: - /* TODO: implement me. */ break; + case NB_EV_APPLY: + return bgp_peer_group_afi_safi_plist_destroy(args, FILTER_OUT); } return NB_OK; @@ -33051,6 +38925,346 @@ int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_unicast_weight_weight_att /* * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-unicast/filter-config/rmap-import + */ +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_unicast_filter_config_rmap_import_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_peer_group_afi_safi_rmap_modify(args, RMAP_IN); + } + + return NB_OK; +} + +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_unicast_filter_config_rmap_import_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_peer_group_afi_safi_rmap_destroy(args, RMAP_IN); + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-unicast/filter-config/rmap-export + */ +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_unicast_filter_config_rmap_export_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_peer_group_afi_safi_rmap_modify(args, RMAP_OUT); + } + + return NB_OK; +} + +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_unicast_filter_config_rmap_export_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_peer_group_afi_safi_rmap_destroy(args, RMAP_OUT); + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-unicast/filter-config/plist-import + */ +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_unicast_filter_config_plist_import_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_peer_group_afi_safi_plist_modify(args, FILTER_IN); + } + + return NB_OK; +} + +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_unicast_filter_config_plist_import_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_peer_group_afi_safi_plist_destroy(args, FILTER_IN); + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-unicast/filter-config/plist-export + */ +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_unicast_filter_config_plist_export_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_peer_group_afi_safi_plist_modify(args, FILTER_OUT); + } + + return NB_OK; +} + +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_unicast_filter_config_plist_export_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_peer_group_afi_safi_plist_destroy(args, FILTER_OUT); + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-unicast/filter-config/access-list-import + */ +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_unicast_filter_config_access_list_import_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_unicast_filter_config_access_list_import_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-unicast/filter-config/access-list-export + */ +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_unicast_filter_config_access_list_export_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_unicast_filter_config_access_list_export_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-unicast/filter-config/as-path-filter-list-import + */ +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_unicast_filter_config_as_path_filter_list_import_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_unicast_filter_config_as_path_filter_list_import_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-unicast/filter-config/as-path-filter-list-export + */ +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_unicast_filter_config_as_path_filter_list_export_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_unicast_filter_config_as_path_filter_list_export_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-unicast/filter-config/unsuppress-map-import + */ +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_unicast_filter_config_unsuppress_map_import_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_unicast_filter_config_unsuppress_map_import_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-unicast/filter-config/unsuppress-map-export + */ +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_unicast_filter_config_unsuppress_map_export_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_unicast_filter_config_unsuppress_map_export_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv4-multicast/add-paths/path-type */ int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_multicast_add_paths_path_type_modify( @@ -34009,6 +40223,346 @@ int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_multicast_weight_weight_a /* * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv4-multicast/filter-config/rmap-import + */ +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_multicast_filter_config_rmap_import_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_peer_group_afi_safi_rmap_modify(args, RMAP_IN); + } + + return NB_OK; +} + +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_multicast_filter_config_rmap_import_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_peer_group_afi_safi_rmap_destroy(args, RMAP_IN); + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv4-multicast/filter-config/rmap-export + */ +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_multicast_filter_config_rmap_export_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_peer_group_afi_safi_rmap_modify(args, RMAP_OUT); + } + + return NB_OK; +} + +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_multicast_filter_config_rmap_export_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_peer_group_afi_safi_rmap_destroy(args, RMAP_OUT); + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv4-multicast/filter-config/plist-import + */ +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_multicast_filter_config_plist_import_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_peer_group_afi_safi_plist_modify(args, FILTER_IN); + } + + return NB_OK; +} + +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_multicast_filter_config_plist_import_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_peer_group_afi_safi_plist_destroy(args, FILTER_IN); + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv4-multicast/filter-config/plist-export + */ +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_multicast_filter_config_plist_export_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_peer_group_afi_safi_plist_modify(args, FILTER_OUT); + } + + return NB_OK; +} + +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_multicast_filter_config_plist_export_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_peer_group_afi_safi_plist_destroy(args, FILTER_OUT); + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv4-multicast/filter-config/access-list-import + */ +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_multicast_filter_config_access_list_import_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_multicast_filter_config_access_list_import_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv4-multicast/filter-config/access-list-export + */ +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_multicast_filter_config_access_list_export_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_multicast_filter_config_access_list_export_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv4-multicast/filter-config/as-path-filter-list-import + */ +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_multicast_filter_config_as_path_filter_list_import_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_multicast_filter_config_as_path_filter_list_import_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv4-multicast/filter-config/as-path-filter-list-export + */ +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_multicast_filter_config_as_path_filter_list_export_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_multicast_filter_config_as_path_filter_list_export_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv4-multicast/filter-config/unsuppress-map-import + */ +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_multicast_filter_config_unsuppress_map_import_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_multicast_filter_config_unsuppress_map_import_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv4-multicast/filter-config/unsuppress-map-export + */ +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_multicast_filter_config_unsuppress_map_export_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_multicast_filter_config_unsuppress_map_export_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-multicast/add-paths/path-type */ int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_multicast_add_paths_path_type_modify( @@ -34967,6 +41521,346 @@ int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_multicast_weight_weight_a /* * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-multicast/filter-config/rmap-import + */ +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_multicast_filter_config_rmap_import_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_peer_group_afi_safi_rmap_modify(args, RMAP_IN); + } + + return NB_OK; +} + +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_multicast_filter_config_rmap_import_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_peer_group_afi_safi_rmap_destroy(args, RMAP_IN); + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-multicast/filter-config/rmap-export + */ +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_multicast_filter_config_rmap_export_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_peer_group_afi_safi_rmap_modify(args, RMAP_OUT); + } + + return NB_OK; +} + +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_multicast_filter_config_rmap_export_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_peer_group_afi_safi_rmap_destroy(args, RMAP_OUT); + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-multicast/filter-config/plist-import + */ +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_multicast_filter_config_plist_import_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_peer_group_afi_safi_plist_modify(args, FILTER_IN); + } + + return NB_OK; +} + +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_multicast_filter_config_plist_import_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_peer_group_afi_safi_plist_destroy(args, FILTER_IN); + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-multicast/filter-config/plist-export + */ +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_multicast_filter_config_plist_export_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_peer_group_afi_safi_plist_modify(args, FILTER_OUT); + } + + return NB_OK; +} + +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_multicast_filter_config_plist_export_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_peer_group_afi_safi_plist_destroy(args, FILTER_OUT); + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-multicast/filter-config/access-list-import + */ +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_multicast_filter_config_access_list_import_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_multicast_filter_config_access_list_import_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-multicast/filter-config/access-list-export + */ +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_multicast_filter_config_access_list_export_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_multicast_filter_config_access_list_export_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-multicast/filter-config/as-path-filter-list-import + */ +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_multicast_filter_config_as_path_filter_list_import_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_multicast_filter_config_as_path_filter_list_import_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-multicast/filter-config/as-path-filter-list-export + */ +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_multicast_filter_config_as_path_filter_list_export_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_multicast_filter_config_as_path_filter_list_export_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-multicast/filter-config/unsuppress-map-import + */ +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_multicast_filter_config_unsuppress_map_import_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_multicast_filter_config_unsuppress_map_import_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-multicast/filter-config/unsuppress-map-export + */ +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_multicast_filter_config_unsuppress_map_export_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_multicast_filter_config_unsuppress_map_export_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv4-labeled-unicast/add-paths/path-type */ int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_labeled_unicast_add_paths_path_type_modify( @@ -35925,6 +42819,346 @@ int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_labeled_unicast_weight_we /* * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv4-labeled-unicast/filter-config/rmap-import + */ +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_rmap_import_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_peer_group_afi_safi_rmap_modify(args, RMAP_IN); + } + + return NB_OK; +} + +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_rmap_import_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_peer_group_afi_safi_rmap_destroy(args, RMAP_IN); + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv4-labeled-unicast/filter-config/rmap-export + */ +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_rmap_export_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_peer_group_afi_safi_rmap_modify(args, RMAP_OUT); + } + + return NB_OK; +} + +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_rmap_export_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_peer_group_afi_safi_rmap_destroy(args, RMAP_OUT); + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv4-labeled-unicast/filter-config/plist-import + */ +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_plist_import_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_peer_group_afi_safi_plist_modify(args, FILTER_IN); + } + + return NB_OK; +} + +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_plist_import_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_peer_group_afi_safi_plist_destroy(args, FILTER_IN); + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv4-labeled-unicast/filter-config/plist-export + */ +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_plist_export_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_peer_group_afi_safi_plist_modify(args, FILTER_OUT); + } + + return NB_OK; +} + +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_plist_export_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_peer_group_afi_safi_plist_destroy(args, FILTER_OUT); + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv4-labeled-unicast/filter-config/access-list-import + */ +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_access_list_import_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_access_list_import_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv4-labeled-unicast/filter-config/access-list-export + */ +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_access_list_export_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_access_list_export_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv4-labeled-unicast/filter-config/as-path-filter-list-import + */ +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_as_path_filter_list_import_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_as_path_filter_list_import_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv4-labeled-unicast/filter-config/as-path-filter-list-export + */ +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_as_path_filter_list_export_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_as_path_filter_list_export_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv4-labeled-unicast/filter-config/unsuppress-map-import + */ +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_unsuppress_map_import_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_unsuppress_map_import_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv4-labeled-unicast/filter-config/unsuppress-map-export + */ +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_unsuppress_map_export_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_labeled_unicast_filter_config_unsuppress_map_export_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-labeled-unicast/add-paths/path-type */ int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_labeled_unicast_add_paths_path_type_modify( @@ -36883,6 +44117,346 @@ int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_labeled_unicast_weight_we /* * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-labeled-unicast/filter-config/rmap-import + */ +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_rmap_import_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_peer_group_afi_safi_rmap_modify(args, RMAP_IN); + } + + return NB_OK; +} + +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_rmap_import_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_peer_group_afi_safi_rmap_destroy(args, RMAP_IN); + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-labeled-unicast/filter-config/rmap-export + */ +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_rmap_export_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_peer_group_afi_safi_rmap_modify(args, RMAP_OUT); + } + + return NB_OK; +} + +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_rmap_export_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_peer_group_afi_safi_rmap_destroy(args, RMAP_OUT); + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-labeled-unicast/filter-config/plist-import + */ +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_plist_import_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_peer_group_afi_safi_plist_modify(args, FILTER_IN); + } + + return NB_OK; +} + +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_plist_import_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_peer_group_afi_safi_plist_destroy(args, FILTER_IN); + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-labeled-unicast/filter-config/plist-export + */ +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_plist_export_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_peer_group_afi_safi_plist_modify(args, FILTER_OUT); + } + + return NB_OK; +} + +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_plist_export_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_peer_group_afi_safi_plist_destroy(args, FILTER_OUT); + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-labeled-unicast/filter-config/access-list-import + */ +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_access_list_import_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_access_list_import_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-labeled-unicast/filter-config/access-list-export + */ +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_access_list_export_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_access_list_export_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-labeled-unicast/filter-config/as-path-filter-list-import + */ +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_as_path_filter_list_import_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_as_path_filter_list_import_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-labeled-unicast/filter-config/as-path-filter-list-export + */ +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_as_path_filter_list_export_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_as_path_filter_list_export_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-labeled-unicast/filter-config/unsuppress-map-import + */ +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_unsuppress_map_import_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_unsuppress_map_import_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-labeled-unicast/filter-config/unsuppress-map-export + */ +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_unsuppress_map_export_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_labeled_unicast_filter_config_unsuppress_map_export_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/l3vpn-ipv4-unicast/add-paths/path-type */ int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv4_unicast_add_paths_path_type_modify( @@ -37686,6 +45260,346 @@ int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv4_unicast_weight_weig /* * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/l3vpn-ipv4-unicast/filter-config/rmap-import + */ +int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_rmap_import_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_peer_group_afi_safi_rmap_modify(args, RMAP_IN); + } + + return NB_OK; +} + +int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_rmap_import_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_peer_group_afi_safi_rmap_destroy(args, RMAP_IN); + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/l3vpn-ipv4-unicast/filter-config/rmap-export + */ +int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_rmap_export_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_peer_group_afi_safi_rmap_modify(args, RMAP_OUT); + } + + return NB_OK; +} + +int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_rmap_export_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_peer_group_afi_safi_rmap_destroy(args, RMAP_OUT); + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/l3vpn-ipv4-unicast/filter-config/plist-import + */ +int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_plist_import_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_peer_group_afi_safi_plist_modify(args, FILTER_IN); + } + + return NB_OK; +} + +int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_plist_import_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_peer_group_afi_safi_plist_destroy(args, FILTER_IN); + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/l3vpn-ipv4-unicast/filter-config/plist-export + */ +int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_plist_export_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_peer_group_afi_safi_plist_modify(args, FILTER_OUT); + } + + return NB_OK; +} + +int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_plist_export_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_peer_group_afi_safi_plist_destroy(args, FILTER_OUT); + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/l3vpn-ipv4-unicast/filter-config/access-list-import + */ +int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_access_list_import_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_access_list_import_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/l3vpn-ipv4-unicast/filter-config/access-list-export + */ +int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_access_list_export_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_access_list_export_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/l3vpn-ipv4-unicast/filter-config/as-path-filter-list-import + */ +int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_as_path_filter_list_import_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_as_path_filter_list_import_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/l3vpn-ipv4-unicast/filter-config/as-path-filter-list-export + */ +int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_as_path_filter_list_export_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_as_path_filter_list_export_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/l3vpn-ipv4-unicast/filter-config/unsuppress-map-import + */ +int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_unsuppress_map_import_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_unsuppress_map_import_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/l3vpn-ipv4-unicast/filter-config/unsuppress-map-export + */ +int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_unsuppress_map_export_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv4_unicast_filter_config_unsuppress_map_export_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/l3vpn-ipv6-unicast/add-paths/path-type */ int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv6_unicast_add_paths_path_type_modify( @@ -38489,6 +46403,346 @@ int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv6_unicast_weight_weig /* * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/l3vpn-ipv6-unicast/filter-config/rmap-import + */ +int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_rmap_import_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_peer_group_afi_safi_rmap_modify(args, RMAP_IN); + } + + return NB_OK; +} + +int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_rmap_import_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_peer_group_afi_safi_rmap_destroy(args, RMAP_IN); + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/l3vpn-ipv6-unicast/filter-config/rmap-export + */ +int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_rmap_export_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_peer_group_afi_safi_rmap_modify(args, RMAP_OUT); + } + + return NB_OK; +} + +int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_rmap_export_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_peer_group_afi_safi_rmap_destroy(args, RMAP_OUT); + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/l3vpn-ipv6-unicast/filter-config/plist-import + */ +int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_plist_import_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_peer_group_afi_safi_plist_modify(args, FILTER_IN); + } + + return NB_OK; +} + +int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_plist_import_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_peer_group_afi_safi_plist_destroy(args, FILTER_IN); + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/l3vpn-ipv6-unicast/filter-config/plist-export + */ +int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_plist_export_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_peer_group_afi_safi_plist_modify(args, FILTER_OUT); + } + + return NB_OK; +} + +int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_plist_export_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_peer_group_afi_safi_plist_destroy(args, FILTER_OUT); + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/l3vpn-ipv6-unicast/filter-config/access-list-import + */ +int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_access_list_import_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_access_list_import_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/l3vpn-ipv6-unicast/filter-config/access-list-export + */ +int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_access_list_export_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_access_list_export_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/l3vpn-ipv6-unicast/filter-config/as-path-filter-list-import + */ +int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_as_path_filter_list_import_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_as_path_filter_list_import_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/l3vpn-ipv6-unicast/filter-config/as-path-filter-list-export + */ +int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_as_path_filter_list_export_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_as_path_filter_list_export_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/l3vpn-ipv6-unicast/filter-config/unsuppress-map-import + */ +int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_unsuppress_map_import_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_unsuppress_map_import_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/l3vpn-ipv6-unicast/filter-config/unsuppress-map-export + */ +int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_unsuppress_map_export_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int bgp_peer_groups_peer_group_afi_safis_afi_safi_l3vpn_ipv6_unicast_filter_config_unsuppress_map_export_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/l2vpn-evpn/as-path-options/allow-own-as */ int bgp_peer_groups_peer_group_afi_safis_afi_safi_l2vpn_evpn_as_path_options_allow_own_as_modify( @@ -38833,6 +47087,346 @@ int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_flowspec_soft_reconfigura /* * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv4-flowspec/filter-config/rmap-import + */ +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_flowspec_filter_config_rmap_import_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_peer_group_afi_safi_rmap_modify(args, RMAP_IN); + } + + return NB_OK; +} + +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_flowspec_filter_config_rmap_import_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_peer_group_afi_safi_rmap_destroy(args, RMAP_IN); + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv4-flowspec/filter-config/rmap-export + */ +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_flowspec_filter_config_rmap_export_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_peer_group_afi_safi_rmap_modify(args, RMAP_OUT); + } + + return NB_OK; +} + +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_flowspec_filter_config_rmap_export_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_peer_group_afi_safi_rmap_destroy(args, RMAP_OUT); + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv4-flowspec/filter-config/plist-import + */ +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_flowspec_filter_config_plist_import_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_peer_group_afi_safi_plist_modify(args, FILTER_IN); + } + + return NB_OK; +} + +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_flowspec_filter_config_plist_import_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_peer_group_afi_safi_plist_destroy(args, FILTER_IN); + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv4-flowspec/filter-config/plist-export + */ +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_flowspec_filter_config_plist_export_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_peer_group_afi_safi_plist_modify(args, FILTER_OUT); + } + + return NB_OK; +} + +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_flowspec_filter_config_plist_export_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_peer_group_afi_safi_plist_destroy(args, FILTER_OUT); + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv4-flowspec/filter-config/access-list-import + */ +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_flowspec_filter_config_access_list_import_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_flowspec_filter_config_access_list_import_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv4-flowspec/filter-config/access-list-export + */ +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_flowspec_filter_config_access_list_export_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_flowspec_filter_config_access_list_export_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv4-flowspec/filter-config/as-path-filter-list-import + */ +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_flowspec_filter_config_as_path_filter_list_import_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_flowspec_filter_config_as_path_filter_list_import_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv4-flowspec/filter-config/as-path-filter-list-export + */ +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_flowspec_filter_config_as_path_filter_list_export_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_flowspec_filter_config_as_path_filter_list_export_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv4-flowspec/filter-config/unsuppress-map-import + */ +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_flowspec_filter_config_unsuppress_map_import_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_flowspec_filter_config_unsuppress_map_import_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv4-flowspec/filter-config/unsuppress-map-export + */ +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_flowspec_filter_config_unsuppress_map_export_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv4_flowspec_filter_config_unsuppress_map_export_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-flowspec/route-reflector/route-reflector-client */ int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_flowspec_route_reflector_route_reflector_client_modify( @@ -38899,3 +47493,343 @@ int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_flowspec_soft_reconfigura return NB_OK; } + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-flowspec/filter-config/rmap-import + */ +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_flowspec_filter_config_rmap_import_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_peer_group_afi_safi_rmap_modify(args, RMAP_IN); + } + + return NB_OK; +} + +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_flowspec_filter_config_rmap_import_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_peer_group_afi_safi_rmap_destroy(args, RMAP_IN); + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-flowspec/filter-config/rmap-export + */ +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_flowspec_filter_config_rmap_export_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_peer_group_afi_safi_rmap_modify(args, RMAP_OUT); + } + + return NB_OK; +} + +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_flowspec_filter_config_rmap_export_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_peer_group_afi_safi_plist_destroy(args, FILTER_IN); + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-flowspec/filter-config/plist-import + */ +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_flowspec_filter_config_plist_import_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_peer_group_afi_safi_plist_modify(args, FILTER_IN); + } + + return NB_OK; +} + +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_flowspec_filter_config_plist_import_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_peer_group_afi_safi_rmap_destroy(args, RMAP_OUT); + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-flowspec/filter-config/plist-export + */ +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_flowspec_filter_config_plist_export_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_peer_group_afi_safi_plist_modify(args, FILTER_OUT); + } + + return NB_OK; +} + +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_flowspec_filter_config_plist_export_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return bgp_peer_group_afi_safi_plist_destroy(args, FILTER_OUT); + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-flowspec/filter-config/access-list-import + */ +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_flowspec_filter_config_access_list_import_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_flowspec_filter_config_access_list_import_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-flowspec/filter-config/access-list-export + */ +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_flowspec_filter_config_access_list_export_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_flowspec_filter_config_access_list_export_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-flowspec/filter-config/as-path-filter-list-import + */ +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_flowspec_filter_config_as_path_filter_list_import_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_flowspec_filter_config_as_path_filter_list_import_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-flowspec/filter-config/as-path-filter-list-export + */ +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_flowspec_filter_config_as_path_filter_list_export_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_flowspec_filter_config_as_path_filter_list_export_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-flowspec/filter-config/unsuppress-map-import + */ +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_flowspec_filter_config_unsuppress_map_import_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_flowspec_filter_config_unsuppress_map_import_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-bgp:bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-flowspec/filter-config/unsuppress-map-export + */ +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_flowspec_filter_config_unsuppress_map_export_modify( + struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} + +int bgp_peer_groups_peer_group_afi_safis_afi_safi_ipv6_flowspec_filter_config_unsuppress_map_export_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + /* TODO: implement me. */ + break; + } + + return NB_OK; +} diff --git a/bgpd/bgp_nexthop.c b/bgpd/bgp_nexthop.c index 1a9f59db64..b7f62ec0a1 100644 --- a/bgpd/bgp_nexthop.c +++ b/bgpd/bgp_nexthop.c @@ -33,6 +33,7 @@ #include "nexthop.h" #include "queue.h" #include "filter.h" +#include "printfrr.h" #include "bgpd/bgpd.h" #include "bgpd/bgp_route.h" @@ -1020,3 +1021,50 @@ void bgp_scan_finish(struct bgp *bgp) bgp->connected_table[afi] = NULL; } } + +char *bgp_nexthop_dump_bnc_flags(struct bgp_nexthop_cache *bnc, char *buf, + size_t len) +{ + if (bnc->flags == 0) { + snprintfrr(buf, len, "None "); + return buf; + } + + snprintfrr(buf, len, "%s%s%s%s%s%s%s", + CHECK_FLAG(bnc->flags, BGP_NEXTHOP_VALID) ? "Valid " : "", + CHECK_FLAG(bnc->flags, BGP_NEXTHOP_REGISTERED) ? "Reg " : "", + CHECK_FLAG(bnc->flags, BGP_NEXTHOP_CONNECTED) ? "Conn " : "", + CHECK_FLAG(bnc->flags, BGP_NEXTHOP_PEER_NOTIFIED) ? "Notify " + : "", + CHECK_FLAG(bnc->flags, BGP_STATIC_ROUTE) ? "Static " : "", + CHECK_FLAG(bnc->flags, BGP_STATIC_ROUTE_EXACT_MATCH) + ? "Static Exact " + : "", + CHECK_FLAG(bnc->flags, BGP_NEXTHOP_LABELED_VALID) + ? "Label Valid " + : ""); + + return buf; +} + +char *bgp_nexthop_dump_bnc_change_flags(struct bgp_nexthop_cache *bnc, + char *buf, size_t len) +{ + if (bnc->flags == 0) { + snprintfrr(buf, len, "None "); + return buf; + } + + snprintfrr(buf, len, "%s%s%s", + CHECK_FLAG(bnc->change_flags, BGP_NEXTHOP_CHANGED) + ? "Changed " + : "", + CHECK_FLAG(bnc->change_flags, BGP_NEXTHOP_METRIC_CHANGED) + ? "Metric " + : "", + CHECK_FLAG(bnc->change_flags, BGP_NEXTHOP_CONNECTED_CHANGED) + ? "Connected " + : ""); + + return buf; +} diff --git a/bgpd/bgp_nexthop.h b/bgpd/bgp_nexthop.h index c4b913faf4..a223ff4133 100644 --- a/bgpd/bgp_nexthop.h +++ b/bgpd/bgp_nexthop.h @@ -100,6 +100,11 @@ struct update_subgroup; struct bgp_dest; struct attr; +#define BNC_FLAG_DUMP_SIZE 180 +extern char *bgp_nexthop_dump_bnc_flags(struct bgp_nexthop_cache *bnc, + char *buf, size_t len); +extern char *bgp_nexthop_dump_bnc_change_flags(struct bgp_nexthop_cache *bnc, + char *buf, size_t len); extern void bgp_connected_add(struct bgp *bgp, struct connected *c); extern void bgp_connected_delete(struct bgp *bgp, struct connected *c); extern bool bgp_subgrp_multiaccess_check_v4(struct in_addr nexthop, diff --git a/bgpd/bgp_nht.c b/bgpd/bgp_nht.c index 29ab3d9c6c..bc5da0ee21 100644 --- a/bgpd/bgp_nht.c +++ b/bgpd/bgp_nht.c @@ -121,7 +121,7 @@ void bgp_unlink_nexthop_by_peer(struct peer *peer) * we need both the bgp_route and bgp_nexthop pointers. */ int bgp_find_or_add_nexthop(struct bgp *bgp_route, struct bgp *bgp_nexthop, - afi_t afi, struct bgp_path_info *pi, + afi_t afi, safi_t safi, struct bgp_path_info *pi, struct peer *peer, int connected) { struct bgp_nexthop_cache_head *tree = NULL; @@ -257,7 +257,11 @@ int bgp_find_or_add_nexthop(struct bgp *bgp_route, struct bgp *bgp_nexthop, */ if (bgp_route->inst_type == BGP_INSTANCE_TYPE_VIEW) return 1; - else + else if (safi == SAFI_UNICAST && pi + && pi->sub_type == BGP_ROUTE_IMPORTED && pi->extra + && pi->extra->num_labels) { + return bgp_isvalid_labeled_nexthop(bnc); + } else return (bgp_isvalid_nexthop(bnc)); } @@ -316,12 +320,17 @@ static void bgp_process_nexthop_update(struct bgp_nexthop_cache *bnc, bnc->change_flags = 0; /* debug print the input */ - if (BGP_DEBUG(nht, NHT)) + if (BGP_DEBUG(nht, NHT)) { + char bnc_buf[BNC_FLAG_DUMP_SIZE]; + zlog_debug( - "%s(%u): Rcvd NH update %pFX(%u) - metric %d/%d #nhops %d/%d flags 0x%x", + "%s(%u): Rcvd NH update %pFX(%u) - metric %d/%d #nhops %d/%d flags %s", bnc->bgp->name_pretty, bnc->bgp->vrf_id, &nhr->prefix, bnc->srte_color, nhr->metric, bnc->metric, - nhr->nexthop_num, bnc->nexthop_num, bnc->flags); + nhr->nexthop_num, bnc->nexthop_num, + bgp_nexthop_dump_bnc_flags(bnc, bnc_buf, + sizeof(bnc_buf))); + } if (nhr->metric != bnc->metric) bnc->change_flags |= BGP_NEXTHOP_METRIC_CHANGED; @@ -698,11 +707,17 @@ static void evaluate_paths(struct bgp_nexthop_cache *bnc) if (BGP_DEBUG(nht, NHT)) { char buf[PREFIX2STR_BUFFER]; + char bnc_buf[BNC_FLAG_DUMP_SIZE]; + char chg_buf[BNC_FLAG_DUMP_SIZE]; + bnc_str(bnc, buf, PREFIX2STR_BUFFER); zlog_debug( - "NH update for %s(%u)(%s) - flags 0x%x chgflags 0x%x - evaluate paths", - buf, bnc->srte_color, bnc->bgp->name_pretty, bnc->flags, - bnc->change_flags); + "NH update for %s(%u)(%s) - flags %s chgflags %s- evaluate paths", + buf, bnc->srte_color, bnc->bgp->name_pretty, + bgp_nexthop_dump_bnc_flags(bnc, bnc_buf, + sizeof(bnc_buf)), + bgp_nexthop_dump_bnc_change_flags(bnc, chg_buf, + sizeof(bnc_buf))); } LIST_FOREACH (path, &(bnc->paths), nh_thread) { diff --git a/bgpd/bgp_nht.h b/bgpd/bgp_nht.h index 8451f0689d..f374e8dfa5 100644 --- a/bgpd/bgp_nht.h +++ b/bgpd/bgp_nht.h @@ -34,14 +34,15 @@ extern void bgp_parse_nexthop_update(int command, vrf_id_t vrf_id); * bgp_route - BGP instance of route * bgp_nexthop - BGP instance of nexthop * a - afi: AFI_IP or AF_IP6 + * safi - safi: to check which table nhs are being imported to * p - path for which the nexthop object is being looked up * peer - The BGP peer associated with this NHT * connected - True if NH MUST be a connected route */ extern int bgp_find_or_add_nexthop(struct bgp *bgp_route, struct bgp *bgp_nexthop, afi_t a, - struct bgp_path_info *p, struct peer *peer, - int connected); + safi_t safi, struct bgp_path_info *p, + struct peer *peer, int connected); /** * bgp_unlink_nexthop() - Unlink the nexthop object from the path structure. diff --git a/bgpd/bgp_open.c b/bgpd/bgp_open.c index 6cfcb9cc3d..533518cf93 100644 --- a/bgpd/bgp_open.c +++ b/bgpd/bgp_open.c @@ -760,6 +760,7 @@ static const struct message capcode_str[] = { {CAPABILITY_CODE_REFRESH_OLD, "Route Refresh (Old)"}, {CAPABILITY_CODE_ORF_OLD, "ORF (Old)"}, {CAPABILITY_CODE_FQDN, "FQDN"}, + {CAPABILITY_CODE_ENHANCED_RR, "Enhanced Route Refresh"}, {0}}; /* Minimum sizes for length field of each cap (so not inc. the header) */ @@ -776,6 +777,7 @@ static const size_t cap_minsizes[] = { [CAPABILITY_CODE_REFRESH_OLD] = CAPABILITY_CODE_REFRESH_LEN, [CAPABILITY_CODE_ORF_OLD] = CAPABILITY_CODE_ORF_LEN, [CAPABILITY_CODE_FQDN] = CAPABILITY_CODE_MIN_FQDN_LEN, + [CAPABILITY_CODE_ENHANCED_RR] = CAPABILITY_CODE_ENHANCED_LEN, }; /* value the capability must be a multiple of. @@ -796,6 +798,7 @@ static const size_t cap_modsizes[] = { [CAPABILITY_CODE_REFRESH_OLD] = 1, [CAPABILITY_CODE_ORF_OLD] = 1, [CAPABILITY_CODE_FQDN] = 1, + [CAPABILITY_CODE_ENHANCED_RR] = 1, }; /** @@ -863,6 +866,7 @@ static int bgp_capability_parse(struct peer *peer, size_t length, case CAPABILITY_CODE_DYNAMIC_OLD: case CAPABILITY_CODE_ENHE: case CAPABILITY_CODE_FQDN: + case CAPABILITY_CODE_ENHANCED_RR: /* Check length. */ if (caphdr.length < cap_minsizes[caphdr.code]) { zlog_info( @@ -913,10 +917,13 @@ static int bgp_capability_parse(struct peer *peer, size_t length, ret = 0; /* Don't return error for this */ } } break; + case CAPABILITY_CODE_ENHANCED_RR: case CAPABILITY_CODE_REFRESH: case CAPABILITY_CODE_REFRESH_OLD: { /* BGP refresh capability */ - if (caphdr.code == CAPABILITY_CODE_REFRESH_OLD) + if (caphdr.code == CAPABILITY_CODE_ENHANCED_RR) + SET_FLAG(peer->cap, PEER_CAP_ENHANCED_RR_RCV); + else if (caphdr.code == CAPABILITY_CODE_REFRESH_OLD) SET_FLAG(peer->cap, PEER_CAP_REFRESH_OLD_RCV); else SET_FLAG(peer->cap, PEER_CAP_REFRESH_NEW_RCV); @@ -1450,6 +1457,13 @@ void bgp_open_capability(struct stream *s, struct peer *peer) stream_putc(s, CAPABILITY_CODE_REFRESH); stream_putc(s, CAPABILITY_CODE_REFRESH_LEN); + /* Enhanced Route Refresh. */ + SET_FLAG(peer->cap, PEER_CAP_ENHANCED_RR_ADV); + stream_putc(s, BGP_OPEN_OPT_CAP); + stream_putc(s, CAPABILITY_CODE_ENHANCED_LEN + 2); + stream_putc(s, CAPABILITY_CODE_ENHANCED_RR); + stream_putc(s, CAPABILITY_CODE_ENHANCED_LEN); + /* AS4 */ SET_FLAG(peer->cap, PEER_CAP_AS4_ADV); stream_putc(s, BGP_OPEN_OPT_CAP); diff --git a/bgpd/bgp_open.h b/bgpd/bgp_open.h index 5250a68581..471ac05c7c 100644 --- a/bgpd/bgp_open.h +++ b/bgpd/bgp_open.h @@ -49,6 +49,7 @@ struct graceful_restart_af { #define CAPABILITY_CODE_DYNAMIC_OLD 66 /* Dynamic Capability, deprecated since 2003 */ #define CAPABILITY_CODE_DYNAMIC 67 /* Dynamic Capability */ #define CAPABILITY_CODE_ADDPATH 69 /* Addpath Capability */ +#define CAPABILITY_CODE_ENHANCED_RR 70 /* Enhanced Route Refresh capability */ #define CAPABILITY_CODE_FQDN 73 /* Advertise hostname capability */ #define CAPABILITY_CODE_ENHE 5 /* Extended Next Hop Encoding */ #define CAPABILITY_CODE_REFRESH_OLD 128 /* Route Refresh Capability(cisco) */ @@ -63,6 +64,7 @@ struct graceful_restart_af { #define CAPABILITY_CODE_ADDPATH_LEN 4 #define CAPABILITY_CODE_ENHE_LEN 6 /* NRLI AFI = 2, SAFI = 2, Nexthop AFI = 2 */ #define CAPABILITY_CODE_MIN_FQDN_LEN 2 +#define CAPABILITY_CODE_ENHANCED_LEN 0 #define CAPABILITY_CODE_ORF_LEN 5 /* Cooperative Route Filtering Capability. */ diff --git a/bgpd/bgp_packet.c b/bgpd/bgp_packet.c index dfc64c043b..c2e2de1c73 100644 --- a/bgpd/bgp_packet.c +++ b/bgpd/bgp_packet.c @@ -444,6 +444,45 @@ int bgp_generate_updgrp_packets(struct thread *thread) * yet. */ if (!next_pkt || !next_pkt->buffer) { + if (!paf->t_announce_route) { + /* Make sure we supress BGP UPDATES + * for normal processing later again. + */ + UNSET_FLAG(paf->subgroup->sflags, + SUBGRP_STATUS_FORCE_UPDATES); + + /* If route-refresh BoRR message was + * already sent and we are done with + * re-announcing tables for a decent + * afi/safi, we ready to send + * EoRR request. + */ + if (CHECK_FLAG( + peer->af_sflags[afi][safi], + PEER_STATUS_BORR_SEND)) { + bgp_route_refresh_send( + peer, afi, safi, 0, 0, + 0, + BGP_ROUTE_REFRESH_EORR); + + SET_FLAG(peer->af_sflags[afi] + [safi], + PEER_STATUS_EORR_SEND); + UNSET_FLAG( + peer->af_sflags[afi] + [safi], + PEER_STATUS_BORR_SEND); + + if (bgp_debug_neighbor_events( + peer)) + zlog_debug( + "%s sending route-refresh (EoRR) for %s/%s", + peer->host, + afi2str(afi), + safi2str(safi)); + } + } + if (CHECK_FLAG(peer->cap, PEER_CAP_RESTART_RCV)) { if (!(PAF_SUBGRP(paf))->t_coalesce @@ -809,7 +848,7 @@ void bgp_notify_send(struct peer *peer, uint8_t code, uint8_t sub_code) */ void bgp_route_refresh_send(struct peer *peer, afi_t afi, safi_t safi, uint8_t orf_type, uint8_t when_to_refresh, - int remove) + int remove, uint8_t subtype) { struct stream *s; struct bgp_filter *filter; @@ -835,7 +874,10 @@ void bgp_route_refresh_send(struct peer *peer, afi_t afi, safi_t safi, /* Encode Route Refresh message. */ stream_putw(s, pkt_afi); - stream_putc(s, 0); + if (subtype) + stream_putc(s, subtype); + else + stream_putc(s, 0); stream_putc(s, pkt_safi); if (orf_type == ORF_TYPE_PREFIX || orf_type == ORF_TYPE_PREFIX_OLD) @@ -1453,6 +1495,29 @@ static int bgp_keepalive_receive(struct peer *peer, bgp_size_t size) return Receive_KEEPALIVE_message; } +static int bgp_refresh_stalepath_timer_expire(struct thread *thread) +{ + struct peer_af *paf; + + paf = THREAD_ARG(thread); + + afi_t afi = paf->afi; + safi_t safi = paf->safi; + struct peer *peer = paf->peer; + + peer->t_refresh_stalepath = NULL; + + if (peer->nsf[afi][safi]) + bgp_clear_stale_route(peer, afi, safi); + + if (bgp_debug_neighbor_events(peer)) + zlog_debug("%s: route-refresh (BoRR) timer for %s/%s expired", + peer->host, afi2str(afi), safi2str(safi)); + + bgp_timer_set(peer); + + return 0; +} /** * Process BGP UPDATE message for peer. @@ -1798,6 +1863,7 @@ static int bgp_notify_receive(struct peer *peer, bgp_size_t size) bgp_notify.subcode = stream_getc(peer->curr); bgp_notify.length = size - 2; bgp_notify.data = NULL; + bgp_notify.raw_data = NULL; /* Preserv notify code and sub code. */ peer->notify.code = bgp_notify.code; @@ -1881,6 +1947,9 @@ static int bgp_route_refresh_receive(struct peer *peer, bgp_size_t size) struct peer_af *paf; struct update_group *updgrp; struct peer *updgrp_peer; + uint8_t subtype; + bgp_size_t msg_length = + size - (BGP_MSG_ROUTE_REFRESH_MIN_SIZE - BGP_HEADER_SIZE); /* If peer does not have the capability, send notification. */ if (!CHECK_FLAG(peer->cap, PEER_CAP_REFRESH_ADV)) { @@ -1908,14 +1977,9 @@ static int bgp_route_refresh_receive(struct peer *peer, bgp_size_t size) /* Parse packet. */ pkt_afi = stream_getw(s); - (void)stream_getc(s); + subtype = stream_getc(s); pkt_safi = stream_getc(s); - if (bgp_debug_update(peer, NULL, NULL, 0)) - zlog_debug("%s rcvd REFRESH_REQ for afi/safi: %s/%s", - peer->host, iana_afi2str(pkt_afi), - iana_safi2str(pkt_safi)); - /* Convert AFI, SAFI to internal values and check. */ if (bgp_map_afi_safi_iana2int(pkt_afi, pkt_safi, &afi, &safi)) { zlog_info( @@ -1931,8 +1995,34 @@ static int bgp_route_refresh_receive(struct peer *peer, bgp_size_t size) uint8_t orf_type; uint16_t orf_len; - if (size - (BGP_MSG_ROUTE_REFRESH_MIN_SIZE - BGP_HEADER_SIZE) - < 5) { + if (subtype) { + /* If the length, excluding the fixed-size message + * header, of the received ROUTE-REFRESH message with + * Message Subtype 1 and 2 is not 4, then the BGP + * speaker MUST send a NOTIFICATION message with the + * Error Code of "ROUTE-REFRESH Message Error" and the + * subcode of "Invalid Message Length". + */ + if (msg_length != 4) { + zlog_err( + "%s Enhanced Route Refresh message length error", + peer->host); + bgp_notify_send( + peer, BGP_NOTIFY_ROUTE_REFRESH_ERR, + BGP_NOTIFY_ROUTE_REFRESH_INVALID_MSG_LEN); + } + + /* When the BGP speaker receives a ROUTE-REFRESH message + * with a "Message Subtype" field other than 0, 1, or 2, + * it MUST ignore the received ROUTE-REFRESH message. + */ + if (subtype > 2) + zlog_err( + "%s Enhanced Route Refresh invalid subtype", + peer->host); + } + + if (msg_length < 5) { zlog_info("%s ORF route refresh length error", peer->host); bgp_notify_send(peer, BGP_NOTIFY_CEASE, @@ -2116,6 +2206,11 @@ static int bgp_route_refresh_receive(struct peer *peer, bgp_size_t size) peer->orf_plist[afi][safi]; } + /* Avoid supressing duplicate routes later + * when processing in subgroup_announce_table(). + */ + SET_FLAG(paf->subgroup->sflags, SUBGRP_STATUS_FORCE_UPDATES); + /* If the peer is configured for default-originate clear the * SUBGRP_STATUS_DEFAULT_ORIGINATE flag so that we will * re-advertise the @@ -2127,6 +2222,124 @@ static int bgp_route_refresh_receive(struct peer *peer, bgp_size_t size) SUBGRP_STATUS_DEFAULT_ORIGINATE); } + if (subtype == BGP_ROUTE_REFRESH_BORR) { + /* A BGP speaker that has received the Graceful Restart + * Capability from its neighbor MUST ignore any BoRRs for + * an <AFI, SAFI> from the neighbor before the speaker + * receives the EoR for the given <AFI, SAFI> from the + * neighbor. + */ + if (CHECK_FLAG(peer->cap, PEER_CAP_RESTART_RCV) + && !CHECK_FLAG(peer->af_sflags[afi][safi], + PEER_STATUS_EOR_RECEIVED)) { + if (bgp_debug_neighbor_events(peer)) + zlog_debug( + "%s rcvd route-refresh (BoRR) for %s/%s before EoR", + peer->host, afi2str(afi), + safi2str(safi)); + return BGP_PACKET_NOOP; + } + + if (peer->t_refresh_stalepath) { + if (bgp_debug_neighbor_events(peer)) + zlog_debug( + "%s rcvd route-refresh (BoRR) for %s/%s, whereas BoRR already received", + peer->host, afi2str(afi), + safi2str(safi)); + return BGP_PACKET_NOOP; + } + + SET_FLAG(peer->af_sflags[afi][safi], PEER_STATUS_BORR_RECEIVED); + UNSET_FLAG(peer->af_sflags[afi][safi], + PEER_STATUS_EORR_RECEIVED); + + /* When a BGP speaker receives a BoRR message from + * a peer, it MUST mark all the routes with the given + * Address Family Identifier and Subsequent Address + * Family Identifier, <AFI, SAFI> [RFC2918], from + * that peer as stale. + */ + if (peer_active_nego(peer)) { + SET_FLAG(peer->af_sflags[afi][safi], + PEER_STATUS_ENHANCED_REFRESH); + bgp_set_stale_route(peer, afi, safi); + } + + if (peer->status == Established) + thread_add_timer(bm->master, + bgp_refresh_stalepath_timer_expire, + paf, peer->bgp->stalepath_time, + &peer->t_refresh_stalepath); + + if (bgp_debug_neighbor_events(peer)) + zlog_debug( + "%s rcvd route-refresh (BoRR) for %s/%s, triggering timer for %u seconds", + peer->host, afi2str(afi), safi2str(safi), + peer->bgp->stalepath_time); + } else if (subtype == BGP_ROUTE_REFRESH_EORR) { + if (!peer->t_refresh_stalepath) { + zlog_err( + "%s rcvd route-refresh (EoRR) for %s/%s, whereas no BoRR received", + peer->host, afi2str(afi), safi2str(safi)); + return BGP_PACKET_NOOP; + } + + BGP_TIMER_OFF(peer->t_refresh_stalepath); + + SET_FLAG(peer->af_sflags[afi][safi], PEER_STATUS_EORR_RECEIVED); + UNSET_FLAG(peer->af_sflags[afi][safi], + PEER_STATUS_BORR_RECEIVED); + + if (bgp_debug_neighbor_events(peer)) + zlog_debug( + "%s rcvd route-refresh (EoRR) for %s/%s, stopping BoRR timer", + peer->host, afi2str(afi), safi2str(safi)); + + if (peer->nsf[afi][safi]) + bgp_clear_stale_route(peer, afi, safi); + } else { + if (bgp_debug_neighbor_events(peer)) + zlog_debug("%s rcvd route-refresh (REQUEST) for %s/%s", + peer->host, afi2str(afi), safi2str(safi)); + + /* In response to a "normal route refresh request" from the + * peer, the speaker MUST send a BoRR message. + */ + if (CHECK_FLAG(peer->cap, PEER_CAP_ENHANCED_RR_RCV)) { + /* For a BGP speaker that supports the BGP Graceful + * Restart, it MUST NOT send a BoRR for an <AFI, SAFI> + * to a neighbor before it sends the EoR for the + * <AFI, SAFI> to the neighbor. + */ + if (!CHECK_FLAG(peer->af_sflags[afi][safi], + PEER_STATUS_EOR_SEND)) { + if (bgp_debug_neighbor_events(peer)) + zlog_debug( + "%s rcvd route-refresh (REQUEST) for %s/%s before EoR", + peer->host, afi2str(afi), + safi2str(safi)); + return BGP_PACKET_NOOP; + } + + bgp_route_refresh_send(peer, afi, safi, 0, 0, 0, + BGP_ROUTE_REFRESH_BORR); + + if (bgp_debug_neighbor_events(peer)) + zlog_debug( + "%s sending route-refresh (BoRR) for %s/%s", + peer->host, afi2str(afi), + safi2str(safi)); + + /* Set flag Ready-To-Send to know when we can send EoRR + * message. + */ + SET_FLAG(peer->af_sflags[afi][safi], + PEER_STATUS_BORR_SEND); + UNSET_FLAG(peer->af_sflags[afi][safi], + PEER_STATUS_EORR_SEND); + } + } + /* Perform route refreshment to the peer */ bgp_announce_route(peer, afi, safi); diff --git a/bgpd/bgp_packet.h b/bgpd/bgp_packet.h index e83f7d950c..525859a2da 100644 --- a/bgpd/bgp_packet.h +++ b/bgpd/bgp_packet.h @@ -62,8 +62,9 @@ extern void bgp_open_send(struct peer *); extern void bgp_notify_send(struct peer *, uint8_t, uint8_t); extern void bgp_notify_send_with_data(struct peer *, uint8_t, uint8_t, uint8_t *, size_t); -extern void bgp_route_refresh_send(struct peer *, afi_t, safi_t, uint8_t, - uint8_t, int); +extern void bgp_route_refresh_send(struct peer *peer, afi_t afi, safi_t safi, + uint8_t orf_type, uint8_t when_to_refresh, + int remove, uint8_t subtype); extern void bgp_capability_send(struct peer *, afi_t, safi_t, int, int); extern int bgp_capability_receive(struct peer *, bgp_size_t); diff --git a/bgpd/bgp_pbr.c b/bgpd/bgp_pbr.c index a3f1eb8401..171522f8ae 100644 --- a/bgpd/bgp_pbr.c +++ b/bgpd/bgp_pbr.c @@ -754,7 +754,7 @@ int bgp_pbr_build_and_validate_entry(const struct prefix *p, struct bgp_pbr_entry_main *api) { int ret; - int i, action_count = 0; + uint32_t i, action_count = 0; struct ecommunity *ecom; struct ecommunity_val *ecom_eval; struct bgp_pbr_entry_action *api_action; @@ -2845,19 +2845,6 @@ static void bgp_pbr_handle_entry(struct bgp *bgp, struct bgp_path_info *path, zlog_warn("PBR: Sample action Ignored"); } } -#if 0 - if (api->actions[i].u.za.filter - & TRAFFIC_ACTION_DISTRIBUTE) { - if (BGP_DEBUG(pbr, PBR)) { - bgp_pbr_print_policy_route(api); - zlog_warn("PBR: Distribute action Applies"); - } - continue_loop = 0; - /* continue forwarding entry as before - * no action - */ - } -#endif /* XXX to confirm behaviour of traffic action. for now , ignore */ /* terminate action: run other filters */ break; diff --git a/bgpd/bgp_route.c b/bgpd/bgp_route.c index 87e03d205f..f9e655b4e7 100644 --- a/bgpd/bgp_route.c +++ b/bgpd/bgp_route.c @@ -93,10 +93,14 @@ #include "bgpd/bgp_route_clippy.c" #endif +DEFINE_HOOK(bgp_snmp_update_stats, + (struct bgp_node *rn, struct bgp_path_info *pi, bool added), + (rn, pi, added)) + /* Extern from bgp_dump.c */ extern const char *bgp_origin_str[]; extern const char *bgp_origin_long_str[]; -const char *get_afi_safi_str(afi_t afi, safi_t safi, bool for_json); + /* PMSI strings. */ #define PMSI_TNLTYPE_STR_NO_INFO "No info" #define PMSI_TNLTYPE_STR_DEFAULT PMSI_TNLTYPE_STR_NO_INFO @@ -208,9 +212,6 @@ void bgp_path_info_extra_free(struct bgp_path_info_extra **extra) return; e = *extra; - if (e->damp_info) - bgp_damp_info_free(e->damp_info, 0, e->damp_info->afi, - e->damp_info->safi); e->damp_info = NULL; if (e->parent) { @@ -405,6 +406,7 @@ void bgp_path_info_add(struct bgp_dest *dest, struct bgp_path_info *pi) bgp_dest_lock_node(dest); peer_lock(pi->peer); /* bgp_path_info peer reference */ bgp_dest_set_defer_flag(dest, false); + hook_call(bgp_snmp_update_stats, dest, pi, true); } /* Do the actual removal of info from RIB, for use by bgp_process @@ -420,6 +422,7 @@ void bgp_path_info_reap(struct bgp_dest *dest, struct bgp_path_info *pi) bgp_path_info_mpath_dequeue(pi); bgp_path_info_unlock(pi); + hook_call(bgp_snmp_update_stats, dest, pi, false); bgp_dest_unlock_node(dest); } @@ -1839,7 +1842,8 @@ bool subgroup_announce_check(struct bgp_dest *dest, struct bgp_path_info *pi, /* If community is not disabled check the no-export and local. */ if (!transparent && bgp_community_filter(peer, piattr)) { if (bgp_debug_update(NULL, p, subgrp->update_group, 0)) - zlog_debug("%s: community filter check fail", __func__); + zlog_debug("%s: community filter check fail for %pFX", + __func__, p); return false; } @@ -2755,7 +2759,10 @@ static void bgp_process_main_one(struct bgp *bgp, struct bgp_dest *dest, == BGP_ROUTE_REDISTRIBUTE) { if (CHECK_FLAG( dest->flags, - BGP_NODE_REGISTERED_FOR_LABEL)) + BGP_NODE_REGISTERED_FOR_LABEL) + || CHECK_FLAG( + dest->flags, + BGP_NODE_LABEL_REQUESTED)) bgp_unregister_for_label(dest); label_ntop(MPLS_LABEL_IMPLICIT_NULL, 1, &dest->local_label); @@ -2765,10 +2772,13 @@ static void bgp_process_main_one(struct bgp *bgp, struct bgp_dest *dest, new_select); } } else if (CHECK_FLAG(dest->flags, - BGP_NODE_REGISTERED_FOR_LABEL)) { + BGP_NODE_REGISTERED_FOR_LABEL) + || CHECK_FLAG(dest->flags, + BGP_NODE_LABEL_REQUESTED)) { bgp_unregister_for_label(dest); } - } else if (CHECK_FLAG(dest->flags, BGP_NODE_REGISTERED_FOR_LABEL)) { + } else if (CHECK_FLAG(dest->flags, BGP_NODE_REGISTERED_FOR_LABEL) + || CHECK_FLAG(dest->flags, BGP_NODE_LABEL_REQUESTED)) { bgp_unregister_for_label(dest); } @@ -3325,14 +3335,16 @@ static void bgp_rib_withdraw(struct bgp_dest *dest, struct bgp_path_info *pi, /* apply dampening, if result is suppressed, we'll be retaining * the bgp_path_info in the RIB for historical reference. */ - if (CHECK_FLAG(peer->bgp->af_flags[afi][safi], BGP_CONFIG_DAMPENING) - && peer->sort == BGP_PEER_EBGP) - if ((bgp_damp_withdraw(pi, dest, afi, safi, 0)) - == BGP_DAMP_SUPPRESSED) { - bgp_aggregate_decrement(peer->bgp, p, pi, afi, - safi); - return; + if (peer->sort == BGP_PEER_EBGP) { + if (get_active_bdc_from_pi(pi, afi, safi)) { + if (bgp_damp_withdraw(pi, dest, afi, safi, 0) + == BGP_DAMP_SUPPRESSED) { + bgp_aggregate_decrement(peer->bgp, p, pi, afi, + safi); + return; + } } + } #ifdef ENABLE_BGP_VNC if (safi == SAFI_MPLS_VPN) { @@ -3500,6 +3512,36 @@ bool bgp_update_martian_nexthop(struct bgp *bgp, afi_t afi, safi_t safi, return ret; } +static void bgp_attr_add_no_export_community(struct attr *attr) +{ + struct community *old; + struct community *new; + struct community *merge; + struct community *no_export; + + old = attr->community; + no_export = community_str2com("no-export"); + + assert(no_export); + + if (old) { + merge = community_merge(community_dup(old), no_export); + + if (!old->refcnt) + community_free(&old); + + new = community_uniq_sort(merge); + community_free(&merge); + } else { + new = community_dup(no_export); + } + + community_free(&no_export); + + attr->community = new; + attr->flag |= ATTR_FLAG_BIT(BGP_ATTR_COMMUNITIES); +} + int bgp_update(struct peer *peer, const struct prefix *p, uint32_t addpath_id, struct attr *attr, afi_t afi, safi_t safi, int type, int sub_type, struct prefix_rd *prd, mpls_label_t *label, @@ -3692,6 +3734,20 @@ int bgp_update(struct peer *peer, const struct prefix *p, uint32_t addpath_id, if (peer->sort == BGP_PEER_EBGP) { + /* rfc7999: + * A BGP speaker receiving an announcement tagged with the + * BLACKHOLE community SHOULD add the NO_ADVERTISE or + * NO_EXPORT community as defined in RFC1997, or a + * similar community, to prevent propagation of the + * prefix outside the local AS. The community to prevent + * propagation SHOULD be chosen according to the operator's + * routing policy. + */ + if (new_attr.community + && community_include(new_attr.community, + COMMUNITY_BLACKHOLE)) + bgp_attr_add_no_export_community(&new_attr); + /* If we receive the graceful-shutdown community from an eBGP * peer we must lower local-preference */ if (new_attr.community @@ -3749,7 +3805,7 @@ int bgp_update(struct peer *peer, const struct prefix *p, uint32_t addpath_id, /* Same attribute comes in. */ if (!CHECK_FLAG(pi->flags, BGP_PATH_REMOVED) - && attrhash_cmp(pi->attr, attr_new) + && same_attr && (!has_valid_label || memcmp(&(bgp_path_info_extra_get(pi))->label, label, num_labels * sizeof(mpls_label_t)) @@ -3757,8 +3813,7 @@ int bgp_update(struct peer *peer, const struct prefix *p, uint32_t addpath_id, && (overlay_index_equal( afi, pi, evpn == NULL ? NULL : &evpn->gw_ip))) { - if (CHECK_FLAG(bgp->af_flags[afi][safi], - BGP_CONFIG_DAMPENING) + if (get_active_bdc_from_pi(pi, afi, safi) && peer->sort == BGP_PEER_EBGP && CHECK_FLAG(pi->flags, BGP_PATH_HISTORY)) { if (bgp_debug_update(peer, p, NULL, 1)) { @@ -3852,11 +3907,11 @@ int bgp_update(struct peer *peer, const struct prefix *p, uint32_t addpath_id, bgp_aggregate_decrement(bgp, p, pi, afi, safi); /* Update bgp route dampening information. */ - if (CHECK_FLAG(bgp->af_flags[afi][safi], BGP_CONFIG_DAMPENING) + if (get_active_bdc_from_pi(pi, afi, safi) && peer->sort == BGP_PEER_EBGP) { /* This is implicit withdraw so we should update - dampening - information. */ + * dampening information. + */ if (!CHECK_FLAG(pi->flags, BGP_PATH_HISTORY)) bgp_damp_withdraw(pi, dest, afi, safi, 1); } @@ -3979,7 +4034,7 @@ int bgp_update(struct peer *peer, const struct prefix *p, uint32_t addpath_id, #endif /* Update bgp route dampening information. */ - if (CHECK_FLAG(bgp->af_flags[afi][safi], BGP_CONFIG_DAMPENING) + if (get_active_bdc_from_pi(pi, afi, safi) && peer->sort == BGP_PEER_EBGP) { /* Now we do normal update dampening. */ ret = bgp_damp_update(pi, dest, afi, safi); @@ -4013,7 +4068,7 @@ int bgp_update(struct peer *peer, const struct prefix *p, uint32_t addpath_id, nh_afi = BGP_ATTR_NH_AFI(afi, pi->attr); if (bgp_find_or_add_nexthop(bgp, bgp_nexthop, nh_afi, - pi, NULL, connected) + safi, pi, NULL, connected) || CHECK_FLAG(peer->flags, PEER_FLAG_IS_RFAPI_HD)) bgp_path_info_set_flag(dest, pi, BGP_PATH_VALID); @@ -4158,7 +4213,7 @@ int bgp_update(struct peer *peer, const struct prefix *p, uint32_t addpath_id, nh_afi = BGP_ATTR_NH_AFI(afi, new->attr); - if (bgp_find_or_add_nexthop(bgp, bgp, nh_afi, new, NULL, + if (bgp_find_or_add_nexthop(bgp, bgp, nh_afi, safi, new, NULL, connected) || CHECK_FLAG(peer->flags, PEER_FLAG_IS_RFAPI_HD)) bgp_path_info_set_flag(dest, new, BGP_PATH_VALID); @@ -4595,8 +4650,10 @@ static wq_item_status bgp_clear_route_node(struct work_queue *wq, void *data) continue; /* graceful restart STALE flag set. */ - if (CHECK_FLAG(peer->sflags, PEER_STATUS_NSF_WAIT) - && peer->nsf[afi][safi] + if (((CHECK_FLAG(peer->sflags, PEER_STATUS_NSF_WAIT) + && peer->nsf[afi][safi]) + || CHECK_FLAG(peer->af_sflags[afi][safi], + PEER_STATUS_ENHANCED_REFRESH)) && !CHECK_FLAG(pi->flags, BGP_PATH_STALE) && !CHECK_FLAG(pi->flags, BGP_PATH_UNUSEABLE)) bgp_path_info_set_flag(dest, pi, BGP_PATH_STALE); @@ -4847,7 +4904,7 @@ void bgp_clear_stale_route(struct peer *peer, afi_t afi, safi_t safi) struct bgp_path_info *pi; struct bgp_table *table; - if (safi == SAFI_MPLS_VPN) { + if (safi == SAFI_MPLS_VPN || safi == SAFI_ENCAP || safi == SAFI_EVPN) { for (dest = bgp_table_top(peer->bgp->rib[afi][safi]); dest; dest = bgp_route_next(dest)) { struct bgp_dest *rm; @@ -4886,6 +4943,81 @@ void bgp_clear_stale_route(struct peer *peer, afi_t afi, safi_t safi) } } +void bgp_set_stale_route(struct peer *peer, afi_t afi, safi_t safi) +{ + struct bgp_dest *dest, *ndest; + struct bgp_path_info *pi; + struct bgp_table *table; + + if (safi == SAFI_MPLS_VPN || safi == SAFI_ENCAP || safi == SAFI_EVPN) { + for (dest = bgp_table_top(peer->bgp->rib[afi][safi]); dest; + dest = bgp_route_next(dest)) { + table = bgp_dest_get_bgp_table_info(dest); + if (!table) + continue; + + for (ndest = bgp_table_top(table); ndest; + ndest = bgp_route_next(ndest)) { + for (pi = bgp_dest_get_bgp_path_info(ndest); pi; + pi = pi->next) { + if (pi->peer != peer) + continue; + + if ((CHECK_FLAG( + peer->af_sflags[afi][safi], + PEER_STATUS_ENHANCED_REFRESH)) + && !CHECK_FLAG(pi->flags, + BGP_PATH_STALE) + && !CHECK_FLAG( + pi->flags, + BGP_PATH_UNUSEABLE)) { + if (bgp_debug_neighbor_events( + peer)) + zlog_debug( + "%s: route-refresh for %s/%s, marking prefix %pFX as stale", + peer->host, + afi2str(afi), + safi2str(safi), + bgp_dest_get_prefix( + ndest)); + + bgp_path_info_set_flag( + ndest, pi, + BGP_PATH_STALE); + } + } + } + } + } else { + for (dest = bgp_table_top(peer->bgp->rib[afi][safi]); dest; + dest = bgp_route_next(dest)) { + for (pi = bgp_dest_get_bgp_path_info(dest); pi; + pi = pi->next) { + if (pi->peer != peer) + continue; + + if ((CHECK_FLAG(peer->af_sflags[afi][safi], + PEER_STATUS_ENHANCED_REFRESH)) + && !CHECK_FLAG(pi->flags, BGP_PATH_STALE) + && !CHECK_FLAG(pi->flags, + BGP_PATH_UNUSEABLE)) { + if (bgp_debug_neighbor_events(peer)) + zlog_debug( + "%s: route-refresh for %s/%s, marking prefix %pFX as stale", + peer->host, + afi2str(afi), + safi2str(safi), + bgp_dest_get_prefix( + dest)); + + bgp_path_info_set_flag(dest, pi, + BGP_PATH_STALE); + } + } + } + } +} + bool bgp_outbound_policy_exists(struct peer *peer, struct bgp_filter *filter) { if (peer->sort == BGP_PEER_IBGP) @@ -5319,7 +5451,8 @@ void bgp_static_update(struct bgp *bgp, const struct prefix *p, bgp_nexthop = pi->extra->bgp_orig; if (bgp_find_or_add_nexthop(bgp, bgp_nexthop, - afi, pi, NULL, 0)) + afi, safi, pi, NULL, + 0)) bgp_path_info_set_flag(dest, pi, BGP_PATH_VALID); else { @@ -5371,7 +5504,7 @@ void bgp_static_update(struct bgp *bgp, const struct prefix *p, /* Nexthop reachability check. */ if (CHECK_FLAG(bgp->flags, BGP_FLAG_IMPORT_CHECK) && (safi == SAFI_UNICAST || safi == SAFI_LABELED_UNICAST)) { - if (bgp_find_or_add_nexthop(bgp, bgp, afi, new, NULL, 0)) + if (bgp_find_or_add_nexthop(bgp, bgp, afi, safi, new, NULL, 0)) bgp_path_info_set_flag(dest, new, BGP_PATH_VALID); else { if (BGP_DEBUG(nht, NHT)) { @@ -6576,6 +6709,9 @@ static void bgp_aggregate_install( if (!attr) { bgp_aggregate_delete(bgp, p, afi, safi, aggregate); + if (BGP_DEBUG(update_groups, UPDATE_GROUPS)) + zlog_debug("%s: %pFX null attribute", __func__, + p); return; } @@ -7094,6 +7230,13 @@ static void bgp_add_route_to_aggregate(struct bgp *bgp, struct ecommunity *ecommunity = NULL; struct lcommunity *lcommunity = NULL; + /* If the bgp instance is being deleted or self peer is deleted + * then do not create aggregate route + */ + if (CHECK_FLAG(bgp->flags, BGP_FLAG_DELETE_IN_PROGRESS) + || (bgp->peer_self == NULL)) + return; + /* ORIGIN attribute: If at least one route among routes that are * aggregated has ORIGIN with the value INCOMPLETE, then the * aggregated route must have the ORIGIN attribute with the value @@ -7210,6 +7353,13 @@ static void bgp_remove_route_from_aggregate(struct bgp *bgp, afi_t afi, struct lcommunity *lcommunity = NULL; unsigned long match = 0; + /* If the bgp instance is being deleted or self peer is deleted + * then do not create aggregate route + */ + if (CHECK_FLAG(bgp->flags, BGP_FLAG_DELETE_IN_PROGRESS) + || (bgp->peer_self == NULL)) + return; + if (BGP_PATH_HOLDDOWN(pi)) return; @@ -7406,6 +7556,13 @@ int bgp_aggregate_unset(struct bgp *bgp, struct prefix *prefix, afi_t afi, struct bgp_dest *dest; struct bgp_aggregate *aggregate; + /* If the bgp instance is being deleted or self peer is deleted + * then do not create aggregate route + */ + if (CHECK_FLAG(bgp->flags, BGP_FLAG_DELETE_IN_PROGRESS) + || (bgp->peer_self == NULL)) + return 0; + apply_mask(prefix); /* Old configuration check. */ dest = bgp_node_lookup(bgp->aggregate[afi][safi], prefix); @@ -8115,7 +8272,7 @@ bgp_path_selection_reason2str(enum bgp_path_selection_reason reason) case bgp_path_selection_router_id: return "Router ID"; case bgp_path_selection_cluster_length: - return "Cluser length"; + return "Cluster length"; case bgp_path_selection_stale: return "Path Staleness"; case bgp_path_selection_local_configured: @@ -9508,22 +9665,9 @@ void route_vty_out_detail(struct vty *vty, struct bgp *bgp, inet_ntop(AF_INET, &attr->aggregator_addr, buf, sizeof(buf))); - if (attr->aggregator_as == BGP_AS_ZERO) - json_object_boolean_true_add( - json_path, "aggregatorAsMalformed"); - else - json_object_boolean_false_add( - json_path, "aggregatorAsMalformed"); } else { - if (attr->aggregator_as == BGP_AS_ZERO) - vty_out(vty, - ", (aggregated by %u(malformed) %pI4)", - attr->aggregator_as, - &attr->aggregator_addr); - else - vty_out(vty, ", (aggregated by %u %pI4)", - attr->aggregator_as, - &attr->aggregator_addr); + vty_out(vty, ", (aggregated by %u %pI4)", + attr->aggregator_as, &attr->aggregator_addr); } } @@ -10129,7 +10273,7 @@ void route_vty_out_detail(struct vty *vty, struct bgp *bgp, } if (path->extra && path->extra->damp_info) - bgp_damp_info_vty(vty, path, afi, safi, json_path); + bgp_damp_info_vty(vty, bgp, path, afi, safi, json_path); /* Remote Label */ if (path->extra && bgp_is_valid_label(&path->extra->label[0]) @@ -10261,6 +10405,24 @@ void route_vty_out_detail(struct vty *vty, struct bgp *bgp, str, label2vni(&attr->label)); } + /* Output some debug about internal state of the dest flags */ + if (json_paths) { + if (CHECK_FLAG(bn->flags, BGP_NODE_PROCESS_SCHEDULED)) + json_object_boolean_true_add(json_path, "processScheduled"); + if (CHECK_FLAG(bn->flags, BGP_NODE_USER_CLEAR)) + json_object_boolean_true_add(json_path, "userCleared"); + if (CHECK_FLAG(bn->flags, BGP_NODE_LABEL_CHANGED)) + json_object_boolean_true_add(json_path, "labelChanged"); + if (CHECK_FLAG(bn->flags, BGP_NODE_REGISTERED_FOR_LABEL)) + json_object_boolean_true_add(json_path, "registeredForLabel"); + if (CHECK_FLAG(bn->flags, BGP_NODE_SELECT_DEFER)) + json_object_boolean_true_add(json_path, "selectDefered"); + if (CHECK_FLAG(bn->flags, BGP_NODE_FIB_INSTALLED)) + json_object_boolean_true_add(json_path, "fibInstalled"); + if (CHECK_FLAG(bn->flags, BGP_NODE_FIB_INSTALL_PENDING)) + json_object_boolean_true_add(json_path, "fibPending"); + } + /* We've constructed the json object for this path, add it to the json * array of paths */ @@ -11163,8 +11325,13 @@ static int bgp_show_route_in_table(struct vty *vty, struct bgp *bgp, vty, use_json, json_paths); - if (use_json && display) - json_object_object_add(json, "paths", json_paths); + if (use_json) { + if (display) + json_object_object_add(json, "paths", + json_paths); + else + json_object_free(json_paths); + } } else { if ((dest = bgp_node_match(rib, &match)) != NULL) { const struct prefix *dest_p = bgp_dest_get_prefix(dest); @@ -11739,10 +11906,6 @@ DEFPY (show_ip_bgp_json, ? AFI_IP : AFI_IP6; FOREACH_SAFI (safi) { - if (strmatch(get_afi_safi_str(afi, safi, true), - "Unknown")) - continue; - if (!bgp_afi_safi_peer_exists(bgp, afi, safi)) continue; @@ -11773,10 +11936,6 @@ DEFPY (show_ip_bgp_json, } else { /* show <ip> bgp all: for each AFI and SAFI*/ FOREACH_AFI_SAFI (afi, safi) { - if (strmatch(get_afi_safi_str(afi, safi, true), - "Unknown")) - continue; - if (!bgp_afi_safi_peer_exists(bgp, afi, safi)) continue; @@ -12148,22 +12307,6 @@ struct bgp_table_stats { double total_space; }; -#if 0 -#define TALLY_SIGFIG 100000 -static unsigned long -ravg_tally (unsigned long count, unsigned long oldavg, unsigned long newval) -{ - unsigned long newtot = (count-1) * oldavg + (newval * TALLY_SIGFIG); - unsigned long res = (newtot * TALLY_SIGFIG) / count; - unsigned long ret = newtot / count; - - if ((res % TALLY_SIGFIG) > (TALLY_SIGFIG/2)) - return ret + 1; - else - return ret; -} -#endif - static void bgp_table_stats_rn(struct bgp_dest *dest, struct bgp_dest *top, struct bgp_table_stats *ts, unsigned int space) { @@ -12178,13 +12321,6 @@ static void bgp_table_stats_rn(struct bgp_dest *dest, struct bgp_dest *top, ts->counts[BGP_STATS_PREFIXES]++; ts->counts[BGP_STATS_TOTPLEN] += rn_p->prefixlen; -#if 0 - ts->counts[BGP_STATS_AVGPLEN] - = ravg_tally (ts->counts[BGP_STATS_PREFIXES], - ts->counts[BGP_STATS_AVGPLEN], - rn_p->prefixlen); -#endif - /* check if the prefix is included by any other announcements */ while (pdest && !bgp_dest_has_bgp_path_info_data(pdest)) pdest = bgp_dest_parent_nolock(pdest); @@ -12221,16 +12357,6 @@ static void bgp_table_stats_rn(struct bgp_dest *dest, struct bgp_dest *top, ts->counts[BGP_STATS_ASPATH_TOTHOPS] += hops; ts->counts[BGP_STATS_ASPATH_TOTSIZE] += size; -#if 0 - ts->counts[BGP_STATS_ASPATH_AVGHOPS] - = ravg_tally (ts->counts[BGP_STATS_ASPATH_COUNT], - ts->counts[BGP_STATS_ASPATH_AVGHOPS], - hops); - ts->counts[BGP_STATS_ASPATH_AVGSIZE] - = ravg_tally (ts->counts[BGP_STATS_ASPATH_COUNT], - ts->counts[BGP_STATS_ASPATH_AVGSIZE], - size); -#endif if (highest > ts->counts[BGP_STATS_ASN_HIGHEST]) ts->counts[BGP_STATS_ASN_HIGHEST] = highest; } @@ -12340,15 +12466,6 @@ static int bgp_table_stats_single(struct vty *vty, struct bgp *bgp, afi_t afi, continue; switch (i) { -#if 0 - case BGP_STATS_ASPATH_AVGHOPS: - case BGP_STATS_ASPATH_AVGSIZE: - case BGP_STATS_AVGPLEN: - vty_out (vty, "%-30s: ", table_stats_strs[i]); - vty_out (vty, "%12.2f", - (float)ts.counts[i] / (float)TALLY_SIGFIG); - break; -#endif case BGP_STATS_ASPATH_TOTHOPS: case BGP_STATS_ASPATH_TOTSIZE: if (!json) { @@ -12651,6 +12768,7 @@ static int bgp_peer_counts(struct vty *vty, struct peer *peer, afi_t afi, "No such neighbor or address family"); vty_out(vty, "%s\n", json_object_to_json_string(json)); json_object_free(json); + json_object_free(json_loop); } else vty_out(vty, "%% No such neighbor or address family\n"); @@ -12903,51 +13021,29 @@ static void show_adj_route_header(struct vty *vty, struct bgp *bgp, } } -static void show_adj_route(struct vty *vty, struct peer *peer, afi_t afi, - safi_t safi, enum bgp_show_adj_route_type type, - const char *rmap_name, json_object *json, - uint8_t show_flags) +static void +show_adj_route(struct vty *vty, struct peer *peer, struct bgp_table *table, + afi_t afi, safi_t safi, enum bgp_show_adj_route_type type, + const char *rmap_name, json_object *json, json_object *json_ar, + json_object *json_scode, json_object *json_ocode, + uint8_t show_flags, int *header1, int *header2, char *rd_str, + unsigned long *output_count, unsigned long *filtered_count) { - struct bgp_table *table; struct bgp_adj_in *ain; struct bgp_adj_out *adj; - unsigned long output_count = 0; - unsigned long filtered_count = 0; struct bgp_dest *dest; - int header1 = 1; struct bgp *bgp; - int header2 = 1; struct attr attr; int ret; struct update_subgroup *subgrp; - json_object *json_scode = NULL; - json_object *json_ocode = NULL; - json_object *json_ar = NULL; struct peer_af *paf; bool route_filtered; bool use_json = CHECK_FLAG(show_flags, BGP_SHOW_OPT_JSON); bool wide = CHECK_FLAG(show_flags, BGP_SHOW_OPT_WIDE); - - if (use_json) { - json_scode = json_object_new_object(); - json_ocode = json_object_new_object(); - json_ar = json_object_new_object(); - - json_object_string_add(json_scode, "suppressed", "s"); - json_object_string_add(json_scode, "damped", "d"); - json_object_string_add(json_scode, "history", "h"); - json_object_string_add(json_scode, "valid", "*"); - json_object_string_add(json_scode, "best", ">"); - json_object_string_add(json_scode, "multipath", "="); - json_object_string_add(json_scode, "internal", "i"); - json_object_string_add(json_scode, "ribFailure", "r"); - json_object_string_add(json_scode, "stale", "S"); - json_object_string_add(json_scode, "removed", "R"); - - json_object_string_add(json_ocode, "igp", "i"); - json_object_string_add(json_ocode, "egp", "e"); - json_object_string_add(json_ocode, "incomplete", "?"); - } + bool show_rd = ((safi == SAFI_MPLS_VPN) || (safi == SAFI_ENCAP) + || (safi == SAFI_EVPN)) + ? true + : false; bgp = peer->bgp; @@ -12961,13 +13057,6 @@ static void show_adj_route(struct vty *vty, struct peer *peer, afi_t afi, return; } - /* labeled-unicast routes live in the unicast table */ - if (safi == SAFI_LABELED_UNICAST) - table = bgp->rib[afi][SAFI_UNICAST]; - else - table = bgp->rib[afi][safi]; - - output_count = filtered_count = 0; subgrp = peer_subgroup(peer, afi, safi); if (type == bgp_show_adj_route_advertised && subgrp @@ -13011,7 +13100,7 @@ static void show_adj_route(struct vty *vty, struct peer *peer, afi_t afi, vty_out(vty, "Originating default network %s\n\n", (afi == AFI_IP) ? "0.0.0.0/0" : "::/0"); } - header1 = 0; + *header1 = 0; } for (dest = bgp_table_top(table); dest; dest = bgp_route_next(dest)) { @@ -13021,9 +13110,23 @@ static void show_adj_route(struct vty *vty, struct peer *peer, afi_t afi, if (ain->peer != peer) continue; - show_adj_route_header( - vty, bgp, table, &header1, &header2, - json, json_scode, json_ocode, wide); + show_adj_route_header(vty, bgp, table, header1, + header2, json, json_scode, + json_ocode, wide); + + if ((safi == SAFI_MPLS_VPN) + || (safi == SAFI_ENCAP) + || (safi == SAFI_EVPN)) { + if (use_json) + json_object_string_add( + json_ar, "rd", rd_str); + else if (show_rd && rd_str) { + vty_out(vty, + "Route Distinguisher: %s\n", + rd_str); + show_rd = false; + } + } attr = *ain->attr; route_filtered = false; @@ -13049,14 +13152,14 @@ static void show_adj_route(struct vty *vty, struct peer *peer, afi_t afi, continue; } - if (type == bgp_show_adj_route_received && - (route_filtered || ret == RMAP_DENY)) - filtered_count++; + if (type == bgp_show_adj_route_received + && (route_filtered || ret == RMAP_DENY)) + (*filtered_count)++; route_vty_out_tmp(vty, rn_p, &attr, safi, use_json, json_ar, wide); bgp_attr_undup(&attr, ain->attr); - output_count++; + (*output_count)++; } } else if (type == bgp_show_adj_route_advertised) { RB_FOREACH (adj, bgp_adj_out_rb, &dest->adj_out) @@ -13064,10 +13167,10 @@ static void show_adj_route(struct vty *vty, struct peer *peer, afi_t afi, if (paf->peer != peer || !adj->attr) continue; - show_adj_route_header( - vty, bgp, table, &header1, - &header2, json, json_scode, - json_ocode, wide); + show_adj_route_header(vty, bgp, table, + header1, header2, + json, json_scode, + json_ocode, wide); const struct prefix *rn_p = bgp_dest_get_prefix(dest); @@ -13078,13 +13181,29 @@ static void show_adj_route(struct vty *vty, struct peer *peer, afi_t afi, rmap_name); if (ret != RMAP_DENY) { + if ((safi == SAFI_MPLS_VPN) + || (safi == SAFI_ENCAP) + || (safi == SAFI_EVPN)) { + if (use_json) + json_object_string_add( + json_ar, + "rd", + rd_str); + else if (show_rd + && rd_str) { + vty_out(vty, + "Route Distinguisher: %s\n", + rd_str); + show_rd = false; + } + } route_vty_out_tmp( vty, rn_p, &attr, safi, use_json, json_ar, wide); - output_count++; + (*output_count)++; } else { - filtered_count++; + (*filtered_count)++; } bgp_attr_undup(&attr, adj->attr); @@ -13092,9 +13211,9 @@ static void show_adj_route(struct vty *vty, struct peer *peer, afi_t afi, } else if (type == bgp_show_adj_route_bestpath) { struct bgp_path_info *pi; - show_adj_route_header(vty, bgp, table, &header1, - &header2, json, json_scode, - json_ocode, wide); + show_adj_route_header(vty, bgp, table, header1, header2, + json, json_scode, json_ocode, + wide); for (pi = bgp_dest_get_bgp_path_info(dest); pi; pi = pi->next) { @@ -13108,46 +13227,67 @@ static void show_adj_route(struct vty *vty, struct peer *peer, afi_t afi, bgp_dest_get_prefix(dest), pi->attr, safi, use_json, json_ar, wide); - output_count++; + (*output_count)++; } } } - - if (use_json) { - json_object_object_add(json, "advertisedRoutes", json_ar); - json_object_int_add(json, "totalPrefixCounter", output_count); - json_object_int_add(json, "filteredPrefixCounter", - filtered_count); - - vty_out(vty, "%s\n", json_object_to_json_string_ext( - json, JSON_C_TO_STRING_PRETTY)); - - if (!output_count && !filtered_count) { - json_object_free(json_scode); - json_object_free(json_ocode); - } - - json_object_free(json); - } else if (output_count > 0) { - if (filtered_count > 0) - vty_out(vty, - "\nTotal number of prefixes %ld (%ld filtered)\n", - output_count, filtered_count); - else - vty_out(vty, "\nTotal number of prefixes %ld\n", - output_count); - } } static int peer_adj_routes(struct vty *vty, struct peer *peer, afi_t afi, safi_t safi, enum bgp_show_adj_route_type type, const char *rmap_name, uint8_t show_flags) { + struct bgp *bgp; + struct bgp_table *table; json_object *json = NULL; + json_object *json_scode = NULL; + json_object *json_ocode = NULL; + json_object *json_ar = NULL; bool use_json = CHECK_FLAG(show_flags, BGP_SHOW_OPT_JSON); - if (use_json) + /* Init BGP headers here so they're only displayed once + * even if 'table' is 2-tier (MPLS_VPN, ENCAP, EVPN). + */ + int header1 = 1; + int header2 = 1; + + /* + * Initialize variables for each RD + * All prefixes under an RD is aggregated within "json_routes" + */ + char rd_str[BUFSIZ] = {0}; + json_object *json_routes = NULL; + + + /* For 2-tier tables, prefix counts need to be + * maintained across multiple runs of show_adj_route() + */ + unsigned long output_count_per_rd; + unsigned long filtered_count_per_rd; + unsigned long output_count = 0; + unsigned long filtered_count = 0; + + if (use_json) { json = json_object_new_object(); + json_ar = json_object_new_object(); + json_scode = json_object_new_object(); + json_ocode = json_object_new_object(); + + json_object_string_add(json_scode, "suppressed", "s"); + json_object_string_add(json_scode, "damped", "d"); + json_object_string_add(json_scode, "history", "h"); + json_object_string_add(json_scode, "valid", "*"); + json_object_string_add(json_scode, "best", ">"); + json_object_string_add(json_scode, "multipath", "="); + json_object_string_add(json_scode, "internal", "i"); + json_object_string_add(json_scode, "ribFailure", "r"); + json_object_string_add(json_scode, "stale", "S"); + json_object_string_add(json_scode, "removed", "R"); + + json_object_string_add(json_ocode, "igp", "i"); + json_object_string_add(json_ocode, "egp", "e"); + json_object_string_add(json_ocode, "incomplete", "?"); + } if (!peer || !peer->afc[afi][safi]) { if (use_json) { @@ -13179,7 +13319,84 @@ static int peer_adj_routes(struct vty *vty, struct peer *peer, afi_t afi, return CMD_WARNING; } - show_adj_route(vty, peer, afi, safi, type, rmap_name, json, show_flags); + bgp = peer->bgp; + + /* labeled-unicast routes live in the unicast table */ + if (safi == SAFI_LABELED_UNICAST) + table = bgp->rib[afi][SAFI_UNICAST]; + else + table = bgp->rib[afi][safi]; + + if ((safi == SAFI_MPLS_VPN) || (safi == SAFI_ENCAP) + || (safi == SAFI_EVPN)) { + + struct bgp_dest *dest; + + for (dest = bgp_table_top(table); dest; + dest = bgp_route_next(dest)) { + table = bgp_dest_get_bgp_table_info(dest); + if (!table) + continue; + + output_count_per_rd = 0; + filtered_count_per_rd = 0; + + if (use_json) + json_routes = json_object_new_object(); + + const struct prefix_rd *prd; + prd = (const struct prefix_rd *)bgp_dest_get_prefix( + dest); + + prefix_rd2str(prd, rd_str, sizeof(rd_str)); + + show_adj_route(vty, peer, table, afi, safi, type, + rmap_name, json, json_routes, json_scode, + json_ocode, show_flags, &header1, + &header2, rd_str, &output_count_per_rd, + &filtered_count_per_rd); + + /* Don't include an empty RD in the output! */ + if (json_routes && (output_count_per_rd > 0)) + json_object_object_add(json_ar, rd_str, + json_routes); + + output_count += output_count_per_rd; + filtered_count += filtered_count_per_rd; + } + } else + show_adj_route(vty, peer, table, afi, safi, type, rmap_name, + json, json_ar, json_scode, json_ocode, + show_flags, &header1, &header2, rd_str, + &output_count, &filtered_count); + + if (use_json) { + json_object_object_add(json, "advertisedRoutes", json_ar); + json_object_int_add(json, "totalPrefixCounter", output_count); + json_object_int_add(json, "filteredPrefixCounter", + filtered_count); + + vty_out(vty, "%s\n", + json_object_to_json_string_ext( + json, JSON_C_TO_STRING_PRETTY)); + + if (!output_count && !filtered_count) { + json_object_free(json_scode); + json_object_free(json_ocode); + } + + if (json) + json_object_free(json); + + } else if (output_count > 0) { + if (filtered_count > 0) + vty_out(vty, + "\nTotal number of prefixes %ld (%ld filtered)\n", + output_count, filtered_count); + else + vty_out(vty, "\nTotal number of prefixes %ld\n", + output_count); + } return CMD_SUCCESS; } @@ -13318,10 +13535,6 @@ DEFPY (show_ip_bgp_instance_neighbor_advertised_route, afi = CHECK_FLAG(show_flags, BGP_SHOW_OPT_AFI_IP) ? AFI_IP : AFI_IP6; FOREACH_SAFI (safi) { - if (strmatch(get_afi_safi_str(afi, safi, true), - "Unknown")) - continue; - if (!bgp_afi_safi_peer_exists(bgp, afi, safi)) continue; @@ -13341,10 +13554,6 @@ DEFPY (show_ip_bgp_instance_neighbor_advertised_route, } } else { FOREACH_AFI_SAFI (afi, safi) { - if (strmatch(get_afi_safi_str(afi, safi, true), - "Unknown")) - continue; - if (!bgp_afi_safi_peer_exists(bgp, afi, safi)) continue; @@ -14077,7 +14286,8 @@ static int bgp_clear_damp_route(struct vty *vty, const char *view_name, if (pi->extra && pi->extra->damp_info) { pi_temp = pi->next; bgp_damp_info_free( - pi->extra->damp_info, + &pi->extra->damp_info, + &bgp->damp[afi][safi], 1, afi, safi); pi = pi_temp; } else @@ -14099,7 +14309,8 @@ static int bgp_clear_damp_route(struct vty *vty, const char *view_name, if (pi->extra && pi->extra->damp_info) { pi_temp = pi->next; bgp_damp_info_free( - pi->extra->damp_info, + &pi->extra->damp_info, + &bgp->damp[afi][safi], 1, afi, safi); pi = pi_temp; } else @@ -14122,7 +14333,9 @@ DEFUN (clear_ip_bgp_dampening, BGP_STR "Clear route flap dampening information\n") { - bgp_damp_info_clean(AFI_IP, SAFI_UNICAST); + VTY_DECLVAR_CONTEXT(bgp, bgp); + bgp_damp_info_clean(&bgp->damp[AFI_IP][SAFI_UNICAST], AFI_IP, + SAFI_UNICAST); return CMD_SUCCESS; } diff --git a/bgpd/bgp_route.h b/bgpd/bgp_route.h index 17ca3f8b38..1060d2e60d 100644 --- a/bgpd/bgp_route.h +++ b/bgpd/bgp_route.h @@ -102,7 +102,7 @@ enum bgp_show_adj_route_type { #define BGP_NLRI_PARSE_ERROR_EVPN_TYPE1_SIZE -15 #define BGP_NLRI_PARSE_ERROR -32 -/* MAC-IP/type-2 path_info in the global routing table is linked to the +/* MAC-IP/type-2 path_info in the VNI routing table is linked to the * destination ES */ struct bgp_path_es_info { @@ -558,6 +558,8 @@ DECLARE_HOOK(bgp_process, #define BGP_SHOW_OPT_AFI_ALL (1 << 2) #define BGP_SHOW_OPT_AFI_IP (1 << 3) #define BGP_SHOW_OPT_AFI_IP6 (1 << 4) +#define BGP_SHOW_OPT_ESTABLISHED (1 << 5) +#define BGP_SHOW_OPT_FAILED (1 << 6) /* Prototypes. */ extern void bgp_rib_remove(struct bgp_dest *dest, struct bgp_path_info *pi, @@ -575,6 +577,7 @@ extern void bgp_clear_route(struct peer *, afi_t, safi_t); extern void bgp_clear_route_all(struct peer *); extern void bgp_clear_adj_in(struct peer *, afi_t, safi_t); extern void bgp_clear_stale_route(struct peer *, afi_t, safi_t); +extern void bgp_set_stale_route(struct peer *peer, afi_t afi, safi_t safi); extern bool bgp_outbound_policy_exists(struct peer *, struct bgp_filter *); extern bool bgp_inbound_policy_exists(struct peer *, struct bgp_filter *); diff --git a/bgpd/bgp_routemap.c b/bgpd/bgp_routemap.c index 637eaca397..3dc2cfbd5c 100644 --- a/bgpd/bgp_routemap.c +++ b/bgpd/bgp_routemap.c @@ -65,6 +65,7 @@ #include "bgpd/bgp_flowspec_util.h" #include "bgpd/bgp_encap_types.h" #include "bgpd/bgp_mpath.h" +#include "bgpd/bgp_script.h" #ifdef ENABLE_BGP_VNC #include "bgpd/rfapi/bgp_rfapi_cfg.h" @@ -337,99 +338,138 @@ static const struct route_map_rule_cmd route_match_peer_cmd = { route_match_peer_free }; -#if defined(HAVE_LUA) -static enum route_map_cmd_result_t -route_match_command(void *rule, const struct prefix *prefix, void *object) -{ - int status = RMAP_NOMATCH; - u_int32_t locpref = 0; - u_int32_t newlocpref = 0; - enum lua_rm_status lrm_status; - struct bgp_path_info *path = (struct bgp_path_info *)object; - lua_State *L = lua_initialize("/etc/frr/lua.scr"); - - if (L == NULL) - return status; +#ifdef HAVE_SCRIPTING +enum frrlua_rm_status { /* - * Setup the prefix information to pass in + * Script function run failure. This will translate into a deny */ - lua_setup_prefix_table(L, prefix); - - zlog_debug("Set up prefix table"); + LUA_RM_FAILURE = 0, /* - * Setup the bgp_path_info information + * No Match was found for the route map function */ - lua_newtable(L); - lua_pushinteger(L, path->attr->med); - lua_setfield(L, -2, "metric"); - lua_pushinteger(L, path->attr->nh_ifindex); - lua_setfield(L, -2, "ifindex"); - lua_pushstring(L, path->attr->aspath->str); - lua_setfield(L, -2, "aspath"); - lua_pushinteger(L, path->attr->local_pref); - lua_setfield(L, -2, "localpref"); - zlog_debug("%s %d", path->attr->aspath->str, path->attr->nh_ifindex); - lua_setglobal(L, "nexthop"); - - zlog_debug("Set up nexthop information"); + LUA_RM_NOMATCH, /* - * Run the rule + * Match was found but no changes were made to the incoming data. */ - lrm_status = lua_run_rm_rule(L, rule); - switch (lrm_status) { + LUA_RM_MATCH, + /* + * Match was found and data was modified, so figure out what changed + */ + LUA_RM_MATCH_AND_CHANGE, +}; + +static enum route_map_cmd_result_t +route_match_script(void *rule, const struct prefix *prefix, void *object) +{ + const char *scriptname = rule; + struct bgp_path_info *path = (struct bgp_path_info *)object; + + struct frrscript *fs = frrscript_load(scriptname, NULL); + + if (!fs) { + zlog_err("Issue loading script rule; defaulting to no match"); + return RMAP_NOMATCH; + } + + enum frrlua_rm_status status_failure = LUA_RM_FAILURE, + status_nomatch = LUA_RM_NOMATCH, + status_match = LUA_RM_MATCH, + status_match_and_change = LUA_RM_MATCH_AND_CHANGE; + + /* Make result values available */ + struct frrscript_env env[] = { + {"integer", "RM_FAILURE", &status_failure}, + {"integer", "RM_NOMATCH", &status_nomatch}, + {"integer", "RM_MATCH", &status_match}, + {"integer", "RM_MATCH_AND_CHANGE", &status_match_and_change}, + {"integer", "action", &status_failure}, + {"prefix", "prefix", prefix}, + {"attr", "attributes", path->attr}, + {"peer", "peer", path->peer}, + {}}; + + struct frrscript_env results[] = { + {"integer", "action"}, + {"attr", "attributes"}, + {}, + }; + + int result = frrscript_call(fs, env); + + if (result) { + zlog_err("Issue running script rule; defaulting to no match"); + return RMAP_NOMATCH; + } + + enum frrlua_rm_status *lrm_status = + frrscript_get_result(fs, &results[0]); + + int status = RMAP_NOMATCH; + + switch (*lrm_status) { case LUA_RM_FAILURE: - zlog_debug("RM_FAILURE"); + zlog_err( + "Executing route-map match script '%s' failed; defaulting to no match", + scriptname); + status = RMAP_NOMATCH; break; case LUA_RM_NOMATCH: - zlog_debug("RM_NOMATCH"); + status = RMAP_NOMATCH; break; case LUA_RM_MATCH_AND_CHANGE: - zlog_debug("MATCH AND CHANGE"); - lua_getglobal(L, "nexthop"); - path->attr->med = get_integer(L, "metric"); - /* - * This needs to be abstraced with the set function - */ + status = RMAP_MATCH; + zlog_debug("Updating attribute based on script's values"); + + uint32_t locpref = 0; + struct attr *newattr = frrscript_get_result(fs, &results[1]); + + path->attr->med = newattr->med; + if (path->attr->flag & ATTR_FLAG_BIT(BGP_ATTR_LOCAL_PREF)) locpref = path->attr->local_pref; - newlocpref = get_integer(L, "localpref"); - if (newlocpref != locpref) { - path->attr->flag |= ATTR_FLAG_BIT(BGP_ATTR_LOCAL_PREF); - path->attr->local_pref = newlocpref; + if (locpref != newattr->local_pref) { + SET_FLAG(path->attr->flag, + ATTR_FLAG_BIT(BGP_ATTR_LOCAL_PREF)); + path->attr->local_pref = newattr->local_pref; } - status = RMAP_MATCH; + + aspath_free(newattr->aspath); + XFREE(MTYPE_TMP, newattr); break; case LUA_RM_MATCH: - zlog_debug("MATCH ONLY"); status = RMAP_MATCH; break; } - lua_close(L); + + XFREE(MTYPE_TMP, lrm_status); + frrscript_unload(fs); + return status; } -static void *route_match_command_compile(const char *arg) +static void *route_match_script_compile(const char *arg) { - char *command; + char *scriptname; + + scriptname = XSTRDUP(MTYPE_ROUTE_MAP_COMPILED, arg); - command = XSTRDUP(MTYPE_ROUTE_MAP_COMPILED, arg); - return command; + return scriptname; } -static void -route_match_command_free(void *rule) +static void route_match_script_free(void *rule) { XFREE(MTYPE_ROUTE_MAP_COMPILED, rule); } -static const struct route_map_rule_cmd route_match_command_cmd = { - "command", - route_match_command, - route_match_command_compile, - route_match_command_free +static const struct route_map_rule_cmd route_match_script_cmd = { + "script", + route_match_script, + route_match_script_compile, + route_match_script_free }; -#endif + +#endif /* HAVE_SCRIPTING */ /* `match ip address IP_ACCESS_LIST' */ @@ -3491,8 +3531,9 @@ static void bgp_route_map_process_peer(const char *rmap_name, PEER_FLAG_SOFT_RECONFIG)) { if (bgp_debug_update(peer, NULL, NULL, 1)) zlog_debug( - "Processing route_map %s update on peer %s (inbound, soft-reconfig)", - rmap_name, peer->host); + "Processing route_map %s(%s:%s) update on peer %s (inbound, soft-reconfig)", + rmap_name, afi2str(afi), + safi2str(safi), peer->host); bgp_soft_reconfig_in(peer, afi, safi); } else if (CHECK_FLAG(peer->cap, @@ -3501,10 +3542,12 @@ static void bgp_route_map_process_peer(const char *rmap_name, PEER_CAP_REFRESH_NEW_RCV)) { if (bgp_debug_update(peer, NULL, NULL, 1)) zlog_debug( - "Processing route_map %s update on peer %s (inbound, route-refresh)", - rmap_name, peer->host); - bgp_route_refresh_send(peer, afi, safi, 0, 0, - 0); + "Processing route_map %s(%s:%s) update on peer %s (inbound, route-refresh)", + rmap_name, afi2str(afi), + safi2str(safi), peer->host); + bgp_route_refresh_send( + peer, afi, safi, 0, 0, 0, + BGP_ROUTE_REFRESH_NORMAL); } } } @@ -3640,8 +3683,9 @@ static void bgp_route_map_process_update(struct bgp *bgp, const char *rmap_name, if (BGP_DEBUG(zebra, ZEBRA)) zlog_debug( - "Processing route_map %s update on table map", - rmap_name); + "Processing route_map %s(%s:%s) update on table map", + rmap_name, afi2str(afi), + safi2str(safi)); if (route_update) bgp_zebra_announce_table(bgp, afi, safi); } @@ -3668,8 +3712,9 @@ static void bgp_route_map_process_update(struct bgp *bgp, const char *rmap_name, if (bgp_debug_zebra(bn_p)) zlog_debug( - "Processing route_map %s update on static route %s", - rmap_name, + "Processing route_map %s(%s:%s) update on static route %s", + rmap_name, afi2str(afi), + safi2str(safi), inet_ntop(bn_p->family, &bn_p->u.prefix, buf, INET6_ADDRSTRLEN)); @@ -3719,8 +3764,9 @@ static void bgp_route_map_process_update(struct bgp *bgp, const char *rmap_name, if (bgp_debug_zebra(bn_p)) zlog_debug( - "Processing route_map %s update on aggregate-address route %s", - rmap_name, + "Processing route_map %s(%s:%s) update on aggregate-address route %s", + rmap_name, afi2str(afi), + safi2str(safi), inet_ntop(bn_p->family, &bn_p->u.prefix, buf, INET6_ADDRSTRLEN)); @@ -3755,8 +3801,9 @@ static void bgp_route_map_process_update(struct bgp *bgp, const char *rmap_name, if (BGP_DEBUG(zebra, ZEBRA)) zlog_debug( - "Processing route_map %s update on redistributed routes", - rmap_name); + "Processing route_map %s(%s:%s) update on redistributed routes", + rmap_name, afi2str(afi), + safi2str(safi)); bgp_redistribute_resend(bgp, afi, i, red->instance); @@ -3775,8 +3822,8 @@ static void bgp_route_map_process_update(struct bgp *bgp, const char *rmap_name, if (BGP_DEBUG(zebra, ZEBRA)) zlog_debug( - "Processing route_map %s update on advertise type5 route command", - rmap_name); + "Processing route_map %s(%s:%s) update on advertise type5 route command", + rmap_name, afi2str(afi), safi2str(safi)); if (route_update && advertise_type5_routes(bgp, afi)) { bgp_evpn_withdraw_type5_routes(bgp, afi, safi); @@ -4095,30 +4142,29 @@ DEFUN (no_match_peer, RMAP_EVENT_MATCH_DELETED); } -#if defined(HAVE_LUA) -DEFUN (match_command, - match_command_cmd, - "match command WORD", - MATCH_STR - "Run a command to match\n" - "The command to run\n") -{ - return bgp_route_match_add(vty, "command", argv[2]->arg, - RMAP_EVENT_FILTER_ADDED); -} - -DEFUN (no_match_command, - no_match_command_cmd, - "no match command WORD", +#ifdef HAVE_SCRIPTING +DEFUN (match_script, + match_script_cmd, + "[no] match script WORD", NO_STR MATCH_STR - "Run a command to match\n" - "The command to run\n") + "Execute script to determine match\n" + "The script name to run, without .lua; e.g. 'myroutemap' to run myroutemap.lua\n") { - return bgp_route_match_delete(vty, "command", argv[3]->arg, - RMAP_EVENT_FILTER_DELETED); + bool no = strmatch(argv[0]->text, "no"); + int i = 0; + argv_find(argv, argc, "WORD", &i); + const char *script = argv[i]->arg; + + if (no) { + return bgp_route_match_delete(vty, "script", script, + RMAP_EVENT_FILTER_DELETED); + } else { + return bgp_route_match_add(vty, "script", script, + RMAP_EVENT_FILTER_ADDED); + } } -#endif +#endif /* HAVE_SCRIPTING */ /* match probability */ DEFUN (match_probability, @@ -4782,6 +4828,11 @@ DEFUN (set_community, buffer_putstr(b, "no-export"); continue; } + if (strncmp(argv[i]->arg, "blackhole", strlen(argv[i]->arg)) + == 0) { + buffer_putstr(b, "blackhole"); + continue; + } if (strncmp(argv[i]->arg, "graceful-shutdown", strlen(argv[i]->arg)) == 0) { @@ -5632,8 +5683,8 @@ void bgp_route_map_init(void) route_map_install_match(&route_match_peer_cmd); route_map_install_match(&route_match_local_pref_cmd); -#if defined(HAVE_LUA) - route_map_install_match(&route_match_command_cmd); +#ifdef HAVE_SCRIPTING + route_map_install_match(&route_match_script_cmd); #endif route_map_install_match(&route_match_ip_address_cmd); route_map_install_match(&route_match_ip_next_hop_cmd); @@ -5797,9 +5848,8 @@ void bgp_route_map_init(void) install_element(RMAP_NODE, &no_set_ipv6_nexthop_prefer_global_cmd); install_element(RMAP_NODE, &set_ipv6_nexthop_peer_cmd); install_element(RMAP_NODE, &no_set_ipv6_nexthop_peer_cmd); -#if defined(HAVE_LUA) - install_element(RMAP_NODE, &match_command_cmd); - install_element(RMAP_NODE, &no_match_command_cmd); +#ifdef HAVE_SCRIPTING + install_element(RMAP_NODE, &match_script_cmd); #endif } diff --git a/bgpd/bgp_script.c b/bgpd/bgp_script.c new file mode 100644 index 0000000000..0cda1927f8 --- /dev/null +++ b/bgpd/bgp_script.c @@ -0,0 +1,192 @@ +/* BGP scripting foo + * Copyright (C) 2020 NVIDIA Corporation + * Quentin Young + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; see the file COPYING; if not, write to the + * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, + * MA 02110-1301 USA + */ + +#include <zebra.h> + +#ifdef HAVE_SCRIPTING + +#include "bgpd.h" +#include "bgp_script.h" +#include "bgp_debug.h" +#include "bgp_aspath.h" +#include "frratomic.h" +#include "frrscript.h" +#include "frrlua.h" + +static void lua_pushpeer(lua_State *L, const struct peer *peer) +{ + lua_newtable(L); + lua_pushinteger(L, peer->as); + lua_setfield(L, -2, "remote_as"); + lua_pushinteger(L, peer->local_as); + lua_setfield(L, -2, "local_as"); + lua_pushinaddr(L, &peer->remote_id); + lua_setfield(L, -2, "remote_id"); + lua_pushinaddr(L, &peer->local_id); + lua_setfield(L, -2, "local_id"); + lua_pushstring(L, lookup_msg(bgp_status_msg, peer->status, NULL)); + lua_setfield(L, -2, "state"); + lua_pushstring(L, peer->desc ? peer->desc : ""); + lua_setfield(L, -2, "description"); + lua_pushtimet(L, &peer->uptime); + lua_setfield(L, -2, "uptime"); + lua_pushtimet(L, &peer->readtime); + lua_setfield(L, -2, "last_readtime"); + lua_pushtimet(L, &peer->resettime); + lua_setfield(L, -2, "last_resettime"); + lua_pushsockunion(L, peer->su_local); + lua_setfield(L, -2, "local_address"); + lua_pushsockunion(L, peer->su_remote); + lua_setfield(L, -2, "remote_address"); + lua_pushinteger(L, peer->cap); + lua_setfield(L, -2, "capabilities"); + lua_pushinteger(L, peer->flags); + lua_setfield(L, -2, "flags"); + lua_pushstring(L, peer->password ? peer->password : ""); + lua_setfield(L, -2, "password"); + + /* Nested tables here */ + lua_newtable(L); + { + lua_newtable(L); + { + lua_pushinteger(L, peer->holdtime); + lua_setfield(L, -2, "hold"); + lua_pushinteger(L, peer->keepalive); + lua_setfield(L, -2, "keepalive"); + lua_pushinteger(L, peer->connect); + lua_setfield(L, -2, "connect"); + lua_pushinteger(L, peer->routeadv); + lua_setfield(L, -2, "route_advertisement"); + } + lua_setfield(L, -2, "configured"); + + lua_newtable(L); + { + lua_pushinteger(L, peer->v_holdtime); + lua_setfield(L, -2, "hold"); + lua_pushinteger(L, peer->v_keepalive); + lua_setfield(L, -2, "keepalive"); + lua_pushinteger(L, peer->v_connect); + lua_setfield(L, -2, "connect"); + lua_pushinteger(L, peer->v_routeadv); + lua_setfield(L, -2, "route_advertisement"); + } + lua_setfield(L, -2, "negotiated"); + } + lua_setfield(L, -2, "timers"); + + lua_newtable(L); + { + lua_pushinteger(L, atomic_load_explicit(&peer->open_in, + memory_order_relaxed)); + lua_setfield(L, -2, "open_in"); + lua_pushinteger(L, atomic_load_explicit(&peer->open_out, + memory_order_relaxed)); + lua_setfield(L, -2, "open_out"); + lua_pushinteger(L, atomic_load_explicit(&peer->update_in, + memory_order_relaxed)); + lua_setfield(L, -2, "update_in"); + lua_pushinteger(L, atomic_load_explicit(&peer->update_out, + memory_order_relaxed)); + lua_setfield(L, -2, "update_out"); + lua_pushinteger(L, atomic_load_explicit(&peer->update_time, + memory_order_relaxed)); + lua_setfield(L, -2, "update_time"); + lua_pushinteger(L, atomic_load_explicit(&peer->keepalive_in, + memory_order_relaxed)); + lua_setfield(L, -2, "keepalive_in"); + lua_pushinteger(L, atomic_load_explicit(&peer->keepalive_out, + memory_order_relaxed)); + lua_setfield(L, -2, "keepalive_out"); + lua_pushinteger(L, atomic_load_explicit(&peer->notify_in, + memory_order_relaxed)); + lua_setfield(L, -2, "notify_in"); + lua_pushinteger(L, atomic_load_explicit(&peer->notify_out, + memory_order_relaxed)); + lua_setfield(L, -2, "notify_out"); + lua_pushinteger(L, atomic_load_explicit(&peer->refresh_in, + memory_order_relaxed)); + lua_setfield(L, -2, "refresh_in"); + lua_pushinteger(L, atomic_load_explicit(&peer->refresh_out, + memory_order_relaxed)); + lua_setfield(L, -2, "refresh_out"); + lua_pushinteger(L, atomic_load_explicit(&peer->dynamic_cap_in, + memory_order_relaxed)); + lua_setfield(L, -2, "dynamic_cap_in"); + lua_pushinteger(L, atomic_load_explicit(&peer->dynamic_cap_out, + memory_order_relaxed)); + lua_setfield(L, -2, "dynamic_cap_out"); + lua_pushinteger(L, peer->established); + lua_setfield(L, -2, "times_established"); + lua_pushinteger(L, peer->dropped); + lua_setfield(L, -2, "times_dropped"); + } + lua_setfield(L, -2, "stats"); +} + +static void lua_pushattr(lua_State *L, const struct attr *attr) +{ + lua_newtable(L); + lua_pushinteger(L, attr->med); + lua_setfield(L, -2, "metric"); + lua_pushinteger(L, attr->nh_ifindex); + lua_setfield(L, -2, "ifindex"); + lua_pushstring(L, attr->aspath->str); + lua_setfield(L, -2, "aspath"); + lua_pushinteger(L, attr->local_pref); + lua_setfield(L, -2, "localpref"); +} + +static void *lua_toattr(lua_State *L, int idx) +{ + struct attr *attr = XCALLOC(MTYPE_TMP, sizeof(struct attr)); + + lua_getfield(L, -1, "metric"); + attr->med = lua_tointeger(L, -1); + lua_pop(L, 1); + lua_getfield(L, -1, "ifindex"); + attr->nh_ifindex = lua_tointeger(L, -1); + lua_pop(L, 1); + lua_getfield(L, -1, "aspath"); + attr->aspath = aspath_str2aspath(lua_tostring(L, -1)); + lua_pop(L, 1); + lua_getfield(L, -1, "localpref"); + attr->local_pref = lua_tointeger(L, -1); + lua_pop(L, 1); + + return attr; +} + +struct frrscript_codec frrscript_codecs_bgpd[] = { + {.typename = "peer", + .encoder = (encoder_func)lua_pushpeer, + .decoder = NULL}, + {.typename = "attr", + .encoder = (encoder_func)lua_pushattr, + .decoder = lua_toattr}, + {}}; + +void bgp_script_init(void) +{ + frrscript_register_type_codecs(frrscript_codecs_bgpd); +} + +#endif /* HAVE_SCRIPTING */ diff --git a/bgpd/bgp_script.h b/bgpd/bgp_script.h new file mode 100644 index 0000000000..6682c2eebd --- /dev/null +++ b/bgpd/bgp_script.h @@ -0,0 +1,34 @@ +/* BGP scripting foo + * Copyright (C) 2020 NVIDIA Corporation + * Quentin Young + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; see the file COPYING; if not, write to the + * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, + * MA 02110-1301 USA + */ +#ifndef __BGP_SCRIPT__ +#define __BGP_SCRIPT__ + +#include <zebra.h> + +#ifdef HAVE_SCRIPTING + +/* + * Initialize scripting stuff. + */ +void bgp_script_init(void); + +#endif /* HAVE_SCRIPTING */ + +#endif /* __BGP_SCRIPT__ */ diff --git a/bgpd/bgp_snmp.c b/bgpd/bgp_snmp.c index 303f4ca56e..71868abc5f 100644 --- a/bgpd/bgp_snmp.c +++ b/bgpd/bgp_snmp.c @@ -40,6 +40,7 @@ #include "bgpd/bgp_attr.h" #include "bgpd/bgp_route.h" #include "bgpd/bgp_fsm.h" +#include "bgpd/bgp_mplsvpn_snmp.h" /* BGP4-MIB described in RFC1657. */ #define BGP4MIB 1,3,6,1,2,1,15 @@ -849,8 +850,9 @@ static uint8_t *bgp4PathAttrTable(struct variable *v, oid name[], } /* BGP Traps. */ -static struct trap_object bgpTrapList[] = {{3, {3, 1, BGPPEERLASTERROR}}, - {3, {3, 1, BGPPEERSTATE}}}; +static struct trap_object bgpTrapList[] = {{3, {3, 1, BGPPEERREMOTEADDR} }, + {3, {3, 1, BGPPEERLASTERROR} }, + {3, {3, 1, BGPPEERSTATE} } }; static int bgpTrapEstablished(struct peer *peer) { @@ -898,6 +900,7 @@ static int bgp_snmp_init(struct thread_master *tm) { smux_init(tm); REGISTER_MIB("mibII/bgp", bgp_variables, variable, bgp_oid); + bgp_mpls_l3vpn_module_init(); return 0; } diff --git a/bgpd/bgp_table.h b/bgpd/bgp_table.h index 738d41ee6d..68b460149c 100644 --- a/bgpd/bgp_table.h +++ b/bgpd/bgp_table.h @@ -104,6 +104,7 @@ struct bgp_node { #define BGP_NODE_SELECT_DEFER (1 << 4) #define BGP_NODE_FIB_INSTALL_PENDING (1 << 5) #define BGP_NODE_FIB_INSTALLED (1 << 6) +#define BGP_NODE_LABEL_REQUESTED (1 << 7) struct bgp_addpath_node_data tx_addpath; diff --git a/bgpd/bgp_updgrp.c b/bgpd/bgp_updgrp.c index 2788a8ea4f..621a14014f 100644 --- a/bgpd/bgp_updgrp.c +++ b/bgpd/bgp_updgrp.c @@ -909,7 +909,6 @@ static void update_subgroup_add_peer(struct update_subgroup *subgrp, bpacket_add_peer(pkt, paf); - bpacket_queue_sanity_check(SUBGRP_PKTQ(subgrp)); if (BGP_DEBUG(update_groups, UPDATE_GROUPS)) zlog_debug("peer %s added to subgroup s%" PRIu64, paf->peer->host, subgrp->id); @@ -1229,8 +1228,6 @@ static int update_subgroup_copy_packets(struct update_subgroup *dest, pkt = bpacket_next(pkt); } - bpacket_queue_sanity_check(SUBGRP_PKTQ(dest)); - return count; } @@ -1387,6 +1384,11 @@ static int updgrp_policy_update_walkcb(struct update_group *updgrp, void *arg) } UPDGRP_FOREACH_SUBGRP (updgrp, subgrp) { + /* Avoid supressing duplicate routes later + * when processing in subgroup_announce_table(). + */ + SET_FLAG(subgrp->sflags, SUBGRP_STATUS_FORCE_UPDATES); + if (changed) { if (bgp_debug_update(NULL, NULL, updgrp, 0)) zlog_debug( diff --git a/bgpd/bgp_updgrp.h b/bgpd/bgp_updgrp.h index 9cad78c26d..694636ef3d 100644 --- a/bgpd/bgp_updgrp.h +++ b/bgpd/bgp_updgrp.h @@ -109,12 +109,6 @@ struct bpacket { struct bpacket_queue { TAILQ_HEAD(pkt_queue, bpacket) pkts; -#if 0 - /* A dummy packet that is used to thread all peers that have - completed their work */ - struct bpacket sentinel; -#endif - unsigned int conf_max_count; unsigned int curr_count; unsigned int hwm_count; @@ -252,20 +246,14 @@ struct update_subgroup { uint64_t id; uint16_t sflags; +#define SUBGRP_STATUS_DEFAULT_ORIGINATE (1 << 0) +#define SUBGRP_STATUS_FORCE_UPDATES (1 << 1) - /* Subgroup flags, see below */ uint16_t flags; +#define SUBGRP_FLAG_NEEDS_REFRESH (1 << 0) }; /* - * We need to do an outbound refresh to get this subgroup into a - * consistent state. - */ -#define SUBGRP_FLAG_NEEDS_REFRESH (1 << 0) - -#define SUBGRP_STATUS_DEFAULT_ORIGINATE (1 << 0) - -/* * Add the given value to the specified counter on a subgroup and its * parent structures. */ diff --git a/bgpd/bgp_updgrp_adv.c b/bgpd/bgp_updgrp_adv.c index da9e1f28ae..fb64f010f9 100644 --- a/bgpd/bgp_updgrp_adv.c +++ b/bgpd/bgp_updgrp_adv.c @@ -95,6 +95,10 @@ static void adj_free(struct bgp_adj_out *adj) { TAILQ_REMOVE(&(adj->subgroup->adjq), adj, subgrp_adj_train); SUBGRP_DECR_STAT(adj->subgroup, adj_count); + + RB_REMOVE(bgp_adj_out_rb, &adj->dest->adj_out, adj); + bgp_dest_unlock_node(adj->dest); + XFREE(MTYPE_BGP_ADJ_OUT, adj); } @@ -402,11 +406,9 @@ struct bgp_adj_out *bgp_adj_out_alloc(struct update_subgroup *subgrp, adj->subgroup = subgrp; adj->addpath_tx_id = addpath_tx_id; - if (dest) { - RB_INSERT(bgp_adj_out_rb, &dest->adj_out, adj); - bgp_dest_lock_node(dest); - adj->dest = dest; - } + RB_INSERT(bgp_adj_out_rb, &dest->adj_out, adj); + bgp_dest_lock_node(dest); + adj->dest = dest; TAILQ_INSERT_TAIL(&(subgrp->adjq), adj, subgrp_adj_train); SUBGRP_INCR_STAT(subgrp, adj_count); @@ -464,6 +466,7 @@ void bgp_adj_out_set_subgroup(struct bgp_dest *dest, struct peer *adv_peer; struct peer_af *paf; struct bgp *bgp; + uint32_t attr_hash = attrhash_key_make(attr); peer = SUBGRP_PEER(subgrp); afi = SUBGRP_AFI(subgrp); @@ -487,6 +490,26 @@ void bgp_adj_out_set_subgroup(struct bgp_dest *dest, return; } + /* Check if we are sending the same route. This is needed to + * avoid duplicate UPDATES. For instance, filtering communities + * at egress, neighbors will see duplicate UPDATES despite + * the route wasn't changed actually. + * Do not suppress BGP UPDATES for route-refresh. + */ + if (CHECK_FLAG(bgp->flags, BGP_FLAG_SUPPRESS_DUPLICATES) + && !CHECK_FLAG(subgrp->sflags, SUBGRP_STATUS_FORCE_UPDATES) + && adj->attr_hash == attr_hash) { + if (BGP_DEBUG(update, UPDATE_OUT)) { + char attr_str[BUFSIZ] = {0}; + + bgp_dump_attr(attr, attr_str, sizeof(attr_str)); + + zlog_debug("%s suppress UPDATE w/ attr: %s", peer->host, + attr_str); + } + return; + } + if (adj->adv) bgp_advertise_clean_subgroup(subgrp, adj); adj->adv = bgp_advertise_new(); @@ -497,11 +520,9 @@ void bgp_adj_out_set_subgroup(struct bgp_dest *dest, /* bgp_path_info adj_out reference */ adv->pathi = bgp_path_info_lock(path); - if (attr) - adv->baa = bgp_advertise_intern(subgrp->hash, attr); - else - adv->baa = baa_new(); + adv->baa = bgp_advertise_intern(subgrp->hash, attr); adv->adj = adj; + adj->attr_hash = attr_hash; /* Add new advertisement to advertisement attribute list. */ bgp_advertise_add(adv->baa, adv); @@ -582,13 +603,8 @@ void bgp_adj_out_unset_subgroup(struct bgp_dest *dest, if (trigger_write) subgroup_trigger_write(subgrp); } else { - /* Remove myself from adjacency. */ - RB_REMOVE(bgp_adj_out_rb, &dest->adj_out, adj); - /* Free allocated information. */ adj_free(adj); - - bgp_dest_unlock_node(dest); } } @@ -604,7 +620,6 @@ void bgp_adj_out_remove_subgroup(struct bgp_dest *dest, struct bgp_adj_out *adj, if (adj->adv) bgp_advertise_clean_subgroup(subgrp, adj); - RB_REMOVE(bgp_adj_out_rb, &dest->adj_out, adj); adj_free(adj); } @@ -616,11 +631,8 @@ void subgroup_clear_table(struct update_subgroup *subgrp) { struct bgp_adj_out *aout, *taout; - SUBGRP_FOREACH_ADJ_SAFE (subgrp, aout, taout) { - struct bgp_dest *dest = aout->dest; - bgp_adj_out_remove_subgroup(dest, aout, subgrp); - bgp_dest_unlock_node(dest); - } + SUBGRP_FOREACH_ADJ_SAFE (subgrp, aout, taout) + bgp_adj_out_remove_subgroup(aout->dest, aout, subgrp); } /* @@ -908,14 +920,8 @@ void subgroup_default_originate(struct update_subgroup *subgrp, int withdraw) bgp_advertise_clean_subgroup( subgrp, adj); - /* Remove from adjacency. */ - RB_REMOVE(bgp_adj_out_rb, - &dest->adj_out, adj); - /* Free allocated information. */ adj_free(adj); - - bgp_dest_unlock_node(dest); } } diff --git a/bgpd/bgp_updgrp_packet.c b/bgpd/bgp_updgrp_packet.c index 866bf8178a..a13a5395b4 100644 --- a/bgpd/bgp_updgrp_packet.c +++ b/bgpd/bgp_updgrp_packet.c @@ -88,39 +88,6 @@ void bpacket_queue_init(struct bpacket_queue *q) } /* - * bpacket_queue_sanity_check - */ -void bpacket_queue_sanity_check(struct bpacket_queue __attribute__((__unused__)) - * q) -{ -#if 0 - struct bpacket *pkt; - - pkt = bpacket_queue_last (q); - assert (pkt); - assert (!pkt->buffer); - - /* - * Make sure the count of packets is correct. - */ - int num_pkts = 0; - - pkt = bpacket_queue_first (q); - while (pkt) - { - num_pkts++; - - if (num_pkts > q->curr_count) - assert (0); - - pkt = TAILQ_NEXT (pkt, pkt_train); - } - - assert (num_pkts == q->curr_count); -#endif -} - -/* * bpacket_queue_add_packet * * Internal function of bpacket_queue - and adds a @@ -168,7 +135,6 @@ struct bpacket *bpacket_queue_add(struct bpacket_queue *q, struct stream *s, else bpacket_attr_vec_arr_reset(&pkt->arr); bpacket_queue_add_packet(q, pkt); - bpacket_queue_sanity_check(q); return pkt; } @@ -176,7 +142,6 @@ struct bpacket *bpacket_queue_add(struct bpacket_queue *q, struct stream *s, * Fill in the new information into the current sentinel and create a * new sentinel. */ - bpacket_queue_sanity_check(q); last_pkt = bpacket_queue_last(q); assert(last_pkt->buffer == NULL); last_pkt->buffer = s; @@ -190,7 +155,6 @@ struct bpacket *bpacket_queue_add(struct bpacket_queue *q, struct stream *s, pkt->ver++; bpacket_queue_add_packet(q, pkt); - bpacket_queue_sanity_check(q); return last_pkt; } @@ -290,7 +254,6 @@ static int bpacket_queue_compact(struct bpacket_queue *q) num_deleted++; } - bpacket_queue_sanity_check(q); return num_deleted; } @@ -1078,7 +1041,6 @@ struct bpacket *subgroup_withdraw_packet(struct update_subgroup *subgrp) subgrp->scount--; bgp_adj_out_remove_subgroup(dest, adj, subgrp); - bgp_dest_unlock_node(dest); } if (!stream_empty(s)) { diff --git a/bgpd/bgp_vty.c b/bgpd/bgp_vty.c index b77c4b2297..f8ef5e2aa2 100644 --- a/bgpd/bgp_vty.c +++ b/bgpd/bgp_vty.c @@ -119,10 +119,15 @@ FRR_CFG_DEFAULT_BOOL(BGP_EBGP_REQUIRES_POLICY, { .val_bool = false, .match_version = "< 7.4", }, { .val_bool = true }, ) +FRR_CFG_DEFAULT_BOOL(BGP_SUPPRESS_DUPLICATES, + { .val_bool = false, .match_version = "< 7.6", }, + { .val_bool = true }, +) DEFINE_HOOK(bgp_inst_config_write, (struct bgp *bgp, struct vty *vty), (bgp, vty)) +DEFINE_HOOK(bgp_snmp_update_last_changed, (struct bgp *bgp), (bgp)) #define GR_NO_OPER \ "The Graceful Restart No Operation was executed as cmd same as previous one." @@ -209,34 +214,38 @@ static enum node_type bgp_node_type(afi_t afi, safi_t safi) static const char *get_afi_safi_vty_str(afi_t afi, safi_t safi) { - if (afi == AFI_IP && safi == SAFI_UNICAST) - return "IPv4 Unicast"; - else if (afi == AFI_IP && safi == SAFI_MULTICAST) - return "IPv4 Multicast"; - else if (afi == AFI_IP && safi == SAFI_LABELED_UNICAST) - return "IPv4 Labeled Unicast"; - else if (afi == AFI_IP && safi == SAFI_MPLS_VPN) - return "IPv4 VPN"; - else if (afi == AFI_IP && safi == SAFI_ENCAP) - return "IPv4 Encap"; - else if (afi == AFI_IP && safi == SAFI_FLOWSPEC) - return "IPv4 Flowspec"; - else if (afi == AFI_IP6 && safi == SAFI_UNICAST) - return "IPv6 Unicast"; - else if (afi == AFI_IP6 && safi == SAFI_MULTICAST) - return "IPv6 Multicast"; - else if (afi == AFI_IP6 && safi == SAFI_LABELED_UNICAST) - return "IPv6 Labeled Unicast"; - else if (afi == AFI_IP6 && safi == SAFI_MPLS_VPN) - return "IPv6 VPN"; - else if (afi == AFI_IP6 && safi == SAFI_ENCAP) - return "IPv6 Encap"; - else if (afi == AFI_IP6 && safi == SAFI_FLOWSPEC) - return "IPv6 Flowspec"; - else if (afi == AFI_L2VPN && safi == SAFI_EVPN) - return "L2VPN EVPN"; - else - return "Unknown"; + if (afi == AFI_IP) { + if (safi == SAFI_UNICAST) + return "IPv4 Unicast"; + if (safi == SAFI_MULTICAST) + return "IPv4 Multicast"; + if (safi == SAFI_LABELED_UNICAST) + return "IPv4 Labeled Unicast"; + if (safi == SAFI_MPLS_VPN) + return "IPv4 VPN"; + if (safi == SAFI_ENCAP) + return "IPv4 Encap"; + if (safi == SAFI_FLOWSPEC) + return "IPv4 Flowspec"; + } else if (afi == AFI_IP6) { + if (safi == SAFI_UNICAST) + return "IPv6 Unicast"; + if (safi == SAFI_MULTICAST) + return "IPv6 Multicast"; + if (safi == SAFI_LABELED_UNICAST) + return "IPv6 Labeled Unicast"; + if (safi == SAFI_MPLS_VPN) + return "IPv6 VPN"; + if (safi == SAFI_ENCAP) + return "IPv6 Encap"; + if (safi == SAFI_FLOWSPEC) + return "IPv6 Flowspec"; + } else if (afi == AFI_L2VPN) { + if (safi == SAFI_EVPN) + return "L2VPN EVPN"; + } + + return "Unknown"; } /* @@ -247,34 +256,38 @@ static const char *get_afi_safi_vty_str(afi_t afi, safi_t safi) */ static const char *get_afi_safi_json_str(afi_t afi, safi_t safi) { - if (afi == AFI_IP && safi == SAFI_UNICAST) - return "ipv4Unicast"; - else if (afi == AFI_IP && safi == SAFI_MULTICAST) - return "ipv4Multicast"; - else if (afi == AFI_IP && safi == SAFI_LABELED_UNICAST) - return "ipv4LabeledUnicast"; - else if (afi == AFI_IP && safi == SAFI_MPLS_VPN) - return "ipv4Vpn"; - else if (afi == AFI_IP && safi == SAFI_ENCAP) - return "ipv4Encap"; - else if (afi == AFI_IP && safi == SAFI_FLOWSPEC) - return "ipv4Flowspec"; - else if (afi == AFI_IP6 && safi == SAFI_UNICAST) - return "ipv6Unicast"; - else if (afi == AFI_IP6 && safi == SAFI_MULTICAST) - return "ipv6Multicast"; - else if (afi == AFI_IP6 && safi == SAFI_LABELED_UNICAST) - return "ipv6LabeledUnicast"; - else if (afi == AFI_IP6 && safi == SAFI_MPLS_VPN) - return "ipv6Vpn"; - else if (afi == AFI_IP6 && safi == SAFI_ENCAP) - return "ipv6Encap"; - else if (afi == AFI_IP6 && safi == SAFI_FLOWSPEC) - return "ipv6Flowspec"; - else if (afi == AFI_L2VPN && safi == SAFI_EVPN) - return "l2VpnEvpn"; - else - return "Unknown"; + if (afi == AFI_IP) { + if (safi == SAFI_UNICAST) + return "ipv4Unicast"; + if (safi == SAFI_MULTICAST) + return "ipv4Multicast"; + if (safi == SAFI_LABELED_UNICAST) + return "ipv4LabeledUnicast"; + if (safi == SAFI_MPLS_VPN) + return "ipv4Vpn"; + if (safi == SAFI_ENCAP) + return "ipv4Encap"; + if (safi == SAFI_FLOWSPEC) + return "ipv4Flowspec"; + } else if (afi == AFI_IP6) { + if (safi == SAFI_UNICAST) + return "ipv6Unicast"; + if (safi == SAFI_MULTICAST) + return "ipv6Multicast"; + if (safi == SAFI_LABELED_UNICAST) + return "ipv6LabeledUnicast"; + if (safi == SAFI_MPLS_VPN) + return "ipv6Vpn"; + if (safi == SAFI_ENCAP) + return "ipv6Encap"; + if (safi == SAFI_FLOWSPEC) + return "ipv6Flowspec"; + } else if (afi == AFI_L2VPN) { + if (safi == SAFI_EVPN) + return "l2VpnEvpn"; + } + + return "Unknown"; } /* return string maps to afi-safi specific container names @@ -282,30 +295,34 @@ static const char *get_afi_safi_json_str(afi_t afi, safi_t safi) */ const char *bgp_afi_safi_get_container_str(afi_t afi, safi_t safi) { - if (afi == AFI_IP && safi == SAFI_UNICAST) - return "ipv4-unicast"; - else if (afi == AFI_IP && safi == SAFI_MULTICAST) - return "ipv4-multicast"; - else if (afi == AFI_IP && safi == SAFI_LABELED_UNICAST) - return "ipv4-labeled-unicast"; - else if (afi == AFI_IP && safi == SAFI_MPLS_VPN) - return "l3vpn-ipv4-unicast"; - else if (afi == AFI_IP && safi == SAFI_FLOWSPEC) - return "ipv4-flowspec"; - else if (afi == AFI_IP6 && safi == SAFI_UNICAST) - return "ipv6-unicast"; - else if (afi == AFI_IP6 && safi == SAFI_MULTICAST) - return "ipv6-multicast"; - else if (afi == AFI_IP6 && safi == SAFI_LABELED_UNICAST) - return "ipv6-labeled-unicast"; - else if (afi == AFI_IP6 && safi == SAFI_MPLS_VPN) - return "l3vpn-ipv6-unicast"; - else if (afi == AFI_IP6 && safi == SAFI_FLOWSPEC) - return "ipv6-flowspec"; - else if (afi == AFI_L2VPN && safi == SAFI_EVPN) - return "l2vpn-evpn"; - else - return "Unknown"; + if (afi == AFI_IP) { + if (safi == SAFI_UNICAST) + return "ipv4-unicast"; + if (safi == SAFI_MULTICAST) + return "ipv4-multicast"; + if (safi == SAFI_LABELED_UNICAST) + return "ipv4-labeled-unicast"; + if (safi == SAFI_MPLS_VPN) + return "l3vpn-ipv4-unicast"; + if (safi == SAFI_FLOWSPEC) + return "ipv4-flowspec"; + } else if (afi == AFI_IP6) { + if (safi == SAFI_UNICAST) + return "ipv6-unicast"; + if (safi == SAFI_MULTICAST) + return "ipv6-multicast"; + if (safi == SAFI_LABELED_UNICAST) + return "ipv6-labeled-unicast"; + if (safi == SAFI_MPLS_VPN) + return "l3vpn-ipv6-unicast"; + if (safi == SAFI_FLOWSPEC) + return "ipv6-flowspec"; + } else if (afi == AFI_L2VPN) { + if (safi == SAFI_EVPN) + return "l2vpn-evpn"; + } + + return "Unknown"; } /* Utility function to get address family from current node. */ @@ -475,6 +492,8 @@ int bgp_get_vty(struct bgp **bgp, as_t *as, const char *name, SET_FLAG((*bgp)->flags, BGP_FLAG_DETERMINISTIC_MED); if (DFLT_BGP_EBGP_REQUIRES_POLICY) SET_FLAG((*bgp)->flags, BGP_FLAG_EBGP_REQUIRES_POLICY); + if (DFLT_BGP_SUPPRESS_DUPLICATES) + SET_FLAG((*bgp)->flags, BGP_FLAG_SUPPRESS_DUPLICATES); ret = BGP_SUCCESS; } @@ -864,6 +883,7 @@ static int bgp_peer_clear(struct peer *peer, afi_t afi, safi_t safi, struct listnode **nnode, enum bgp_clear_type stype) { int ret = 0; + struct peer_af *paf; /* if afi/.safi not specified, spin thru all of them */ if ((afi == AFI_UNSPEC) && (safi == SAFI_UNSPEC)) { @@ -871,6 +891,11 @@ static int bgp_peer_clear(struct peer *peer, afi_t afi, safi_t safi, safi_t tmp_safi; FOREACH_AFI_SAFI (tmp_afi, tmp_safi) { + paf = peer_af_find(peer, tmp_afi, tmp_safi); + if (paf && paf->subgroup) + SET_FLAG(paf->subgroup->sflags, + SUBGRP_STATUS_FORCE_UPDATES); + if (!peer->afc[tmp_afi][tmp_safi]) continue; @@ -889,6 +914,11 @@ static int bgp_peer_clear(struct peer *peer, afi_t afi, safi_t safi, if (!peer->afc[afi][tmp_safi]) continue; + paf = peer_af_find(peer, afi, tmp_safi); + if (paf && paf->subgroup) + SET_FLAG(paf->subgroup->sflags, + SUBGRP_STATUS_FORCE_UPDATES); + if (stype == BGP_CLEAR_SOFT_NONE) ret = peer_clear(peer, nnode); else @@ -900,6 +930,11 @@ static int bgp_peer_clear(struct peer *peer, afi_t afi, safi_t safi, if (!peer->afc[afi][safi]) return 1; + paf = peer_af_find(peer, afi, safi); + if (paf && paf->subgroup) + SET_FLAG(paf->subgroup->sflags, + SUBGRP_STATUS_FORCE_UPDATES); + if (stype == BGP_CLEAR_SOFT_NONE) ret = peer_clear(peer, nnode); else @@ -2543,6 +2578,37 @@ DEFUN_YANG(no_bgp_always_compare_med, return nb_cli_apply_changes(vty, NULL); } +DEFUN_YANG(bgp_suppress_duplicates, + bgp_suppress_duplicates_cmd, + "bgp suppress-duplicates", + "BGP specific commands\n" + "Suppress duplicate updates if the route actually not changed\n") +{ + nb_cli_enqueue_change(vty, "./global/suppress-duplicates", + NB_OP_MODIFY, "true"); + return nb_cli_apply_changes(vty, NULL); +} + +DEFUN_YANG(no_bgp_suppress_duplicates, + no_bgp_suppress_duplicates_cmd, + "no bgp suppress-duplicates", + NO_STR + "BGP specific commands\n" + "Suppress duplicate updates if the route actually not changed\n") +{ + nb_cli_enqueue_change(vty, "./global/suppress-duplicates", + NB_OP_MODIFY, "false"); + return nb_cli_apply_changes(vty, NULL); +} + +void cli_show_router_bgp_suppress_duplicates(struct vty *vty, + struct lyd_node *dnode, + bool show_defaults) +{ + if (yang_dnode_get_bool(dnode, NULL) != SAVE_BGP_SUPPRESS_DUPLICATES) + vty_out(vty, " bgp suppress-duplicates\n"); +} + DEFUN_YANG(bgp_ebgp_requires_policy, bgp_ebgp_requires_policy_cmd, "bgp ebgp-requires-policy", @@ -4887,27 +4953,63 @@ ALIAS_HIDDEN(no_neighbor_activate, no_neighbor_activate_hidden_cmd, NO_STR NEIGHBOR_STR NEIGHBOR_ADDR_STR2 "Enable the Address Family for this Neighbor\n") -DEFUN_YANG (neighbor_set_peer_group, - neighbor_set_peer_group_cmd, - "neighbor <A.B.C.D|X:X::X:X|WORD> peer-group PGNAME", - NEIGHBOR_STR - NEIGHBOR_ADDR_STR2 - "Member of the peer-group\n" - "Peer-group name\n") +DEFUN (neighbor_set_peer_group, + neighbor_set_peer_group_cmd, + "neighbor <A.B.C.D|X:X::X:X|WORD> peer-group PGNAME", + NEIGHBOR_STR + NEIGHBOR_ADDR_STR2 + "Member of the peer-group\n" + "Peer-group name\n") { + VTY_DECLVAR_CONTEXT(bgp, bgp); int idx_peer = 1; int idx_word = 3; - char base_xpath[XPATH_MAXLEN]; + int ret; + as_t as; + union sockunion su; + struct peer *peer; + struct peer_group *group; - if (peer_and_group_lookup_nb(vty, argv[idx_peer]->arg, base_xpath, - sizeof(base_xpath), NULL) - < 0) + ret = str2sockunion(argv[idx_peer]->arg, &su); + if (ret < 0) { + peer = peer_lookup_by_conf_if(bgp, argv[idx_peer]->arg); + if (!peer) { + vty_out(vty, "%% Malformed address or name: %s\n", + argv[idx_peer]->arg); + return CMD_WARNING_CONFIG_FAILED; + } + } else { + if (peer_address_self_check(bgp, &su)) { + vty_out(vty, + "%% Can not configure the local system as neighbor\n"); + return CMD_WARNING_CONFIG_FAILED; + } + + /* Disallow for dynamic neighbor. */ + peer = peer_lookup(bgp, &su); + if (peer && peer_dynamic_neighbor(peer)) { + vty_out(vty, + "%% Operation not allowed on a dynamic neighbor\n"); + return CMD_WARNING_CONFIG_FAILED; + } + } + + group = peer_group_lookup(bgp, argv[idx_word]->arg); + if (!group) { + vty_out(vty, "%% Configure the peer-group first\n"); return CMD_WARNING_CONFIG_FAILED; + } - nb_cli_enqueue_change(vty, "./peer-group", NB_OP_MODIFY, - argv[idx_word]->arg); + ret = peer_group_bind(bgp, &su, peer, group, &as); - return nb_cli_apply_changes(vty, base_xpath); + if (ret == BGP_ERR_PEER_GROUP_PEER_TYPE_DIFFERENT) { + vty_out(vty, + "%% Peer with AS %u cannot be in this peer-group, members must be all internal or all external\n", + as); + return CMD_WARNING_CONFIG_FAILED; + } + + return bgp_vty_return(vty, ret); } ALIAS_HIDDEN(neighbor_set_peer_group, neighbor_set_peer_group_hidden_cmd, @@ -4918,7 +5020,7 @@ ALIAS_HIDDEN(neighbor_set_peer_group, neighbor_set_peer_group_hidden_cmd, DEFUN_YANG (no_neighbor_set_peer_group, no_neighbor_set_peer_group_cmd, - "no neighbor <A.B.C.D|X:X::X:X|WORD> peer-group PGNAME", + "no neighbor <A.B.C.D|X:X::X:X|WORD> peer-group [PGNAME]", NO_STR NEIGHBOR_STR NEIGHBOR_ADDR_STR2 @@ -4939,7 +5041,7 @@ DEFUN_YANG (no_neighbor_set_peer_group, } ALIAS_HIDDEN(no_neighbor_set_peer_group, no_neighbor_set_peer_group_hidden_cmd, - "no neighbor <A.B.C.D|X:X::X:X|WORD> peer-group PGNAME", + "no neighbor <A.B.C.D|X:X::X:X|WORD> peer-group [PGNAME]", NO_STR NEIGHBOR_STR NEIGHBOR_ADDR_STR2 "Member of the peer-group\n" "Peer-group name\n") @@ -5463,7 +5565,6 @@ DEFUN_YANG (neighbor_nexthop_self, afi_t afi = bgp_node_afi(vty); safi_t safi = bgp_node_safi(vty); - snprintf(af_xpath, sizeof(af_xpath), FRR_BGP_AF_XPATH, yang_afi_safi_value2identity(afi, safi)); @@ -5502,7 +5603,6 @@ DEFUN_YANG(neighbor_nexthop_self_force, afi_t afi = bgp_node_afi(vty); safi_t safi = bgp_node_safi(vty); - snprintf(af_xpath, sizeof(af_xpath), FRR_BGP_AF_XPATH, yang_afi_safi_value2identity(afi, safi)); @@ -5587,7 +5687,6 @@ DEFUN_YANG (no_neighbor_nexthop_self_force, afi_t afi = bgp_node_afi(vty); safi_t safi = bgp_node_safi(vty); - snprintf(af_xpath, sizeof(af_xpath), FRR_BGP_AF_XPATH, yang_afi_safi_value2identity(afi, safi)); @@ -5745,7 +5844,6 @@ DEFUN_YANG (neighbor_remove_private_as_all, afi_t afi = bgp_node_afi(vty); safi_t safi = bgp_node_safi(vty); - snprintf(af_xpath, sizeof(af_xpath), FRR_BGP_AF_XPATH, yang_afi_safi_value2identity(afi, safi)); @@ -5785,7 +5883,6 @@ DEFUN_YANG (neighbor_remove_private_as_replace_as, afi_t afi = bgp_node_afi(vty); safi_t safi = bgp_node_safi(vty); - snprintf(af_xpath, sizeof(af_xpath), FRR_BGP_AF_XPATH, yang_afi_safi_value2identity(afi, safi)); @@ -5826,7 +5923,6 @@ DEFUN_YANG (neighbor_remove_private_as_all_replace_as, afi_t afi = bgp_node_afi(vty); safi_t safi = bgp_node_safi(vty); - snprintf(af_xpath, sizeof(af_xpath), FRR_BGP_AF_XPATH, yang_afi_safi_value2identity(afi, safi)); @@ -5907,7 +6003,6 @@ DEFUN_YANG (no_neighbor_remove_private_as_all, afi_t afi = bgp_node_afi(vty); safi_t safi = bgp_node_safi(vty); - snprintf(af_xpath, sizeof(af_xpath), FRR_BGP_AF_XPATH, yang_afi_safi_value2identity(afi, safi)); @@ -5948,7 +6043,6 @@ DEFUN_YANG (no_neighbor_remove_private_as_replace_as, afi_t afi = bgp_node_afi(vty); safi_t safi = bgp_node_safi(vty); - snprintf(af_xpath, sizeof(af_xpath), FRR_BGP_AF_XPATH, yang_afi_safi_value2identity(afi, safi)); @@ -5990,7 +6084,6 @@ DEFUN_YANG (no_neighbor_remove_private_as_all_replace_as, afi_t afi = bgp_node_afi(vty); safi_t safi = bgp_node_safi(vty); - snprintf(af_xpath, sizeof(af_xpath), FRR_BGP_AF_XPATH, yang_afi_safi_value2identity(afi, safi)); @@ -6280,7 +6373,6 @@ DEFUN_YANG (neighbor_soft_reconfiguration, afi_t afi = bgp_node_afi(vty); safi_t safi = bgp_node_safi(vty); - snprintf(af_xpath, sizeof(af_xpath), FRR_BGP_AF_XPATH, yang_afi_safi_value2identity(afi, safi)); @@ -7667,69 +7759,44 @@ ALIAS_HIDDEN( "Filter outgoing updates\n") /* Set prefix list to the peer. */ -static int peer_prefix_list_set_vty(struct vty *vty, const char *ip_str, - afi_t afi, safi_t safi, - const char *name_str, - const char *direct_str) -{ - int ret; - int direct = FILTER_IN; - struct peer *peer; - - peer = peer_and_group_lookup_vty(vty, ip_str); - if (!peer) - return CMD_WARNING_CONFIG_FAILED; - - /* Check filter direction. */ - if (strncmp(direct_str, "i", 1) == 0) - direct = FILTER_IN; - else if (strncmp(direct_str, "o", 1) == 0) - direct = FILTER_OUT; - - ret = peer_prefix_list_set(peer, afi, safi, direct, name_str); - - return bgp_vty_return(vty, ret); -} - -static int peer_prefix_list_unset_vty(struct vty *vty, const char *ip_str, - afi_t afi, safi_t safi, - const char *direct_str) +DEFPY_YANG( + neighbor_prefix_list, neighbor_prefix_list_cmd, + "[no$no] neighbor <A.B.C.D|X:X::X:X|WORD>$neighbor_str prefix-list WORD$prefix_str <in|out>$direction", + NO_STR NEIGHBOR_STR NEIGHBOR_ADDR_STR2 + "Filter updates to/from this neighbor\n" + "Name of a prefix list\n" + "Filter incoming updates\n" + "Filter outgoing updates\n") { - int ret; - struct peer *peer; - int direct = FILTER_IN; + char base_xpath[XPATH_MAXLEN]; + char af_xpath[XPATH_MAXLEN]; + char plist_xpath[XPATH_MAXLEN]; + afi_t afi = bgp_node_afi(vty); + safi_t safi = bgp_node_safi(vty); - peer = peer_and_group_lookup_vty(vty, ip_str); - if (!peer) + snprintf(af_xpath, sizeof(af_xpath), FRR_BGP_AF_XPATH, + yang_afi_safi_value2identity(afi, safi)); + if (peer_and_group_lookup_nb(vty, neighbor_str, base_xpath, + sizeof(base_xpath), af_xpath) + < 0) return CMD_WARNING_CONFIG_FAILED; - /* Check filter direction. */ - if (strncmp(direct_str, "i", 1) == 0) - direct = FILTER_IN; - else if (strncmp(direct_str, "o", 1) == 0) - direct = FILTER_OUT; - - ret = peer_prefix_list_unset(peer, afi, safi, direct); + if (strmatch(direction, "in")) + snprintf(plist_xpath, sizeof(plist_xpath), + "./%s/filter-config/plist-import", + bgp_afi_safi_get_container_str(afi, safi)); + else if (strmatch(direction, "out")) + snprintf(plist_xpath, sizeof(plist_xpath), + "./%s/filter-config/plist-export", + bgp_afi_safi_get_container_str(afi, safi)); - return bgp_vty_return(vty, ret); -} + if (!no) + nb_cli_enqueue_change(vty, plist_xpath, NB_OP_MODIFY, + prefix_str); + else + nb_cli_enqueue_change(vty, plist_xpath, NB_OP_DESTROY, NULL); -DEFUN (neighbor_prefix_list, - neighbor_prefix_list_cmd, - "neighbor <A.B.C.D|X:X::X:X|WORD> prefix-list WORD <in|out>", - NEIGHBOR_STR - NEIGHBOR_ADDR_STR2 - "Filter updates to/from this neighbor\n" - "Name of a prefix list\n" - "Filter incoming updates\n" - "Filter outgoing updates\n") -{ - int idx_peer = 1; - int idx_word = 3; - int idx_in_out = 4; - return peer_prefix_list_set_vty( - vty, argv[idx_peer]->arg, bgp_node_afi(vty), bgp_node_safi(vty), - argv[idx_word]->arg, argv[idx_in_out]->arg); + return nb_cli_apply_changes(vty, base_xpath); } ALIAS_HIDDEN(neighbor_prefix_list, neighbor_prefix_list_hidden_cmd, @@ -7740,32 +7807,6 @@ ALIAS_HIDDEN(neighbor_prefix_list, neighbor_prefix_list_hidden_cmd, "Filter incoming updates\n" "Filter outgoing updates\n") -DEFUN (no_neighbor_prefix_list, - no_neighbor_prefix_list_cmd, - "no neighbor <A.B.C.D|X:X::X:X|WORD> prefix-list WORD <in|out>", - NO_STR - NEIGHBOR_STR - NEIGHBOR_ADDR_STR2 - "Filter updates to/from this neighbor\n" - "Name of a prefix list\n" - "Filter incoming updates\n" - "Filter outgoing updates\n") -{ - int idx_peer = 2; - int idx_in_out = 5; - return peer_prefix_list_unset_vty(vty, argv[idx_peer]->arg, - bgp_node_afi(vty), bgp_node_safi(vty), - argv[idx_in_out]->arg); -} - -ALIAS_HIDDEN(no_neighbor_prefix_list, no_neighbor_prefix_list_hidden_cmd, - "no neighbor <A.B.C.D|X:X::X:X|WORD> prefix-list WORD <in|out>", - NO_STR NEIGHBOR_STR NEIGHBOR_ADDR_STR2 - "Filter updates to/from this neighbor\n" - "Name of a prefix list\n" - "Filter incoming updates\n" - "Filter outgoing updates\n") - static int peer_aslist_set_vty(struct vty *vty, const char *ip_str, afi_t afi, safi_t safi, const char *name_str, const char *direct_str) @@ -7926,106 +7967,54 @@ ALIAS_HIDDEN(neighbor_advertise_map, neighbor_advertise_map_hidden_cmd, "Name of the exist or non exist map\n") /* Set route-map to the peer. */ -static int peer_route_map_set_vty(struct vty *vty, const char *ip_str, - afi_t afi, safi_t safi, const char *name_str, - const char *direct_str) -{ - int ret; - struct peer *peer; - int direct = RMAP_IN; - struct route_map *route_map; - - peer = peer_and_group_lookup_vty(vty, ip_str); - if (!peer) - return CMD_WARNING_CONFIG_FAILED; - - /* Check filter direction. */ - if (strncmp(direct_str, "in", 2) == 0) - direct = RMAP_IN; - else if (strncmp(direct_str, "o", 1) == 0) - direct = RMAP_OUT; - - route_map = route_map_lookup_warn_noexist(vty, name_str); - ret = peer_route_map_set(peer, afi, safi, direct, name_str, route_map); - - return bgp_vty_return(vty, ret); -} - -static int peer_route_map_unset_vty(struct vty *vty, const char *ip_str, - afi_t afi, safi_t safi, - const char *direct_str) +DEFPY_YANG( + neighbor_route_map, neighbor_route_map_cmd, + "[no$no] neighbor <A.B.C.D|X:X::X:X|WORD>$neighbor_str route-map WORD$rmap_str <in|out>$direction", + NO_STR NEIGHBOR_STR NEIGHBOR_ADDR_STR2 + "Apply route map to neighbor\n" + "Name of route map\n" + "Apply map to incoming routes\n" + "Apply map to outbound routes\n") { - int ret; - struct peer *peer; - int direct = RMAP_IN; + char base_xpath[XPATH_MAXLEN]; + char af_xpath[XPATH_MAXLEN]; + char rmap_xpath[XPATH_MAXLEN]; + afi_t afi = bgp_node_afi(vty); + safi_t safi = bgp_node_safi(vty); - peer = peer_and_group_lookup_vty(vty, ip_str); - if (!peer) + snprintf(af_xpath, sizeof(af_xpath), FRR_BGP_AF_XPATH, + yang_afi_safi_value2identity(afi, safi)); + if (peer_and_group_lookup_nb(vty, neighbor_str, base_xpath, + sizeof(base_xpath), af_xpath) + < 0) return CMD_WARNING_CONFIG_FAILED; - /* Check filter direction. */ - if (strncmp(direct_str, "in", 2) == 0) - direct = RMAP_IN; - else if (strncmp(direct_str, "o", 1) == 0) - direct = RMAP_OUT; - - ret = peer_route_map_unset(peer, afi, safi, direct); - - return bgp_vty_return(vty, ret); -} + if (strmatch(direction, "in")) + snprintf(rmap_xpath, sizeof(rmap_xpath), + "./%s/filter-config/rmap-import", + bgp_afi_safi_get_container_str(afi, safi)); + else if (strmatch(direction, "out")) + snprintf(rmap_xpath, sizeof(rmap_xpath), + "./%s/filter-config/rmap-export", + bgp_afi_safi_get_container_str(afi, safi)); -DEFUN (neighbor_route_map, - neighbor_route_map_cmd, - "neighbor <A.B.C.D|X:X::X:X|WORD> route-map WORD <in|out>", - NEIGHBOR_STR - NEIGHBOR_ADDR_STR2 - "Apply route map to neighbor\n" - "Name of route map\n" - "Apply map to incoming routes\n" - "Apply map to outbound routes\n") -{ - int idx_peer = 1; - int idx_word = 3; - int idx_in_out = 4; - return peer_route_map_set_vty( - vty, argv[idx_peer]->arg, bgp_node_afi(vty), bgp_node_safi(vty), - argv[idx_word]->arg, argv[idx_in_out]->arg); -} + if (!no) { + if (!yang_dnode_exists( + vty->candidate_config->dnode, + "/frr-route-map:lib/route-map[name='%s']", + rmap_str)) { + if (vty_shell_serv(vty)) + vty_out(vty, + "The route-map '%s' does not exist.\n", + rmap_str); + } + nb_cli_enqueue_change(vty, rmap_xpath, NB_OP_MODIFY, rmap_str); + } else + nb_cli_enqueue_change(vty, rmap_xpath, NB_OP_DESTROY, NULL); -ALIAS_HIDDEN(neighbor_route_map, neighbor_route_map_hidden_cmd, - "neighbor <A.B.C.D|X:X::X:X|WORD> route-map WORD <in|out>", - NEIGHBOR_STR NEIGHBOR_ADDR_STR2 - "Apply route map to neighbor\n" - "Name of route map\n" - "Apply map to incoming routes\n" - "Apply map to outbound routes\n") - -DEFUN (no_neighbor_route_map, - no_neighbor_route_map_cmd, - "no neighbor <A.B.C.D|X:X::X:X|WORD> route-map WORD <in|out>", - NO_STR - NEIGHBOR_STR - NEIGHBOR_ADDR_STR2 - "Apply route map to neighbor\n" - "Name of route map\n" - "Apply map to incoming routes\n" - "Apply map to outbound routes\n") -{ - int idx_peer = 2; - int idx_in_out = 5; - return peer_route_map_unset_vty(vty, argv[idx_peer]->arg, - bgp_node_afi(vty), bgp_node_safi(vty), - argv[idx_in_out]->arg); + return nb_cli_apply_changes(vty, base_xpath); } -ALIAS_HIDDEN(no_neighbor_route_map, no_neighbor_route_map_hidden_cmd, - "no neighbor <A.B.C.D|X:X::X:X|WORD> route-map WORD <in|out>", - NO_STR NEIGHBOR_STR NEIGHBOR_ADDR_STR2 - "Apply route map to neighbor\n" - "Name of route map\n" - "Apply map to incoming routes\n" - "Apply map to outbound routes\n") - /* Set unsuppress-map to the peer. */ static int peer_unsuppress_map_set_vty(struct vty *vty, const char *ip_str, afi_t afi, safi_t safi, @@ -8831,6 +8820,93 @@ DEFPY( return CMD_SUCCESS; } +DEFPY(neighbor_damp, + neighbor_damp_cmd, + "neighbor <A.B.C.D|X:X::X:X|WORD>$neighbor dampening [(1-45)$half [(1-20000)$reuse (1-20000)$suppress (1-255)$max]]", + NEIGHBOR_STR + NEIGHBOR_ADDR_STR2 + "Enable neighbor route-flap dampening\n" + "Half-life time for the penalty\n" + "Value to start reusing a route\n" + "Value to start suppressing a route\n" + "Maximum duration to suppress a stable route\n") +{ + struct peer *peer = peer_and_group_lookup_vty(vty, neighbor); + + if (!peer) + return CMD_WARNING_CONFIG_FAILED; + if (!half) + half = DEFAULT_HALF_LIFE; + if (!reuse) { + reuse = DEFAULT_REUSE; + suppress = DEFAULT_SUPPRESS; + max = half * 4; + } + if (suppress < reuse) { + vty_out(vty, + "Suppress value cannot be less than reuse value\n"); + return CMD_WARNING_CONFIG_FAILED; + } + bgp_peer_damp_enable(peer, bgp_node_afi(vty), bgp_node_safi(vty), + half * 60, reuse, suppress, max * 60); + return CMD_SUCCESS; +} + +DEFPY(no_neighbor_damp, + no_neighbor_damp_cmd, + "no neighbor <A.B.C.D|X:X::X:X|WORD>$neighbor dampening [HALF [REUSE SUPPRESS MAX]]", + NO_STR + NEIGHBOR_STR + NEIGHBOR_ADDR_STR2 + "Enable neighbor route-flap dampening\n" + "Half-life time for the penalty\n" + "Value to start reusing a route\n" + "Value to start suppressing a route\n" + "Maximum duration to suppress a stable route\n") +{ + struct peer *peer = peer_and_group_lookup_vty(vty, neighbor); + + if (!peer) + return CMD_WARNING_CONFIG_FAILED; + bgp_peer_damp_disable(peer, bgp_node_afi(vty), bgp_node_safi(vty)); + return CMD_SUCCESS; +} + +DEFPY (show_ip_bgp_neighbor_damp_param, + show_ip_bgp_neighbor_damp_param_cmd, + "show [ip] bgp [<ipv4|ipv6> [unicast]] neighbors <A.B.C.D|X:X::X:X|WORD>$neighbor dampening parameters [json]$json", + SHOW_STR + IP_STR + BGP_STR + BGP_AFI_HELP_STR + "Address Family modifier\n" + NEIGHBOR_STR + NEIGHBOR_ADDR_STR2 + "Neighbor route-flap dampening information\n" + "Display detail of configured dampening parameters\n" + JSON_STR) +{ + bool use_json = false; + int idx = 0; + afi_t afi = AFI_IP; + safi_t safi = SAFI_UNICAST; + struct peer *peer; + + if (argv_find(argv, argc, "ip", &idx)) + afi = AFI_IP; + if (argv_find(argv, argc, "ipv4", &idx)) + afi = AFI_IP; + if (argv_find(argv, argc, "ipv6", &idx)) + afi = AFI_IP6; + peer = peer_and_group_lookup_vty(vty, neighbor); + if (!peer) + return CMD_WARNING; + if (json) + use_json = true; + bgp_show_peer_dampening_parameters(vty, peer, afi, safi, use_json); + return CMD_SUCCESS; +} + static int set_ecom_list(struct vty *vty, int argc, struct cmd_token **argv, struct ecommunity **list, bool is_rt6) { @@ -9066,6 +9142,7 @@ DEFPY (af_label_vpn_export, vpn_leak_postchange(BGP_VPN_POLICY_DIR_TOVPN, afi, bgp_get_default(), bgp); + hook_call(bgp_snmp_update_last_changed, bgp); return CMD_SUCCESS; } @@ -10499,11 +10576,29 @@ static void bgp_show_failed_summary(struct vty *vty, struct bgp *bgp, } } +/* If the peer's description includes whitespaces + * then return the first occurrence. Also strip description + * to the given size if needed. + */ +static char *bgp_peer_description_stripped(char *desc, uint32_t size) +{ + static char stripped[BUFSIZ]; + char *pnt; + uint32_t len = size > strlen(desc) ? strlen(desc) : size; + + pnt = strchr(desc, ' '); + if (pnt) + len = size > (uint32_t)(pnt - desc) ? (uint32_t)(pnt - desc) + : size; + + strlcpy(stripped, desc, len + 1); + + return stripped; +} /* Show BGP peer's summary information. */ static int bgp_show_summary(struct vty *vty, struct bgp *bgp, int afi, int safi, - bool show_failed, bool show_established, - bool use_json) + uint8_t show_flags) { struct peer *peer; struct listnode *node, *nnode; @@ -10519,6 +10614,11 @@ static int bgp_show_summary(struct vty *vty, struct bgp *bgp, int afi, int safi, json_object *json_peers = NULL; struct peer_af *paf; struct bgp_filter *filter; + bool use_json = CHECK_FLAG(show_flags, BGP_SHOW_OPT_JSON); + bool show_failed = CHECK_FLAG(show_flags, BGP_SHOW_OPT_FAILED); + bool show_established = + CHECK_FLAG(show_flags, BGP_SHOW_OPT_ESTABLISHED); + bool show_wide = CHECK_FLAG(show_flags, BGP_SHOW_OPT_WIDE); /* labeled-unicast routes are installed in the unicast table so in order * to @@ -10815,10 +10915,13 @@ static int bgp_show_summary(struct vty *vty, struct bgp *bgp, int afi, int safi, vty_out(vty, "%*s", max_neighbor_width - 8, " "); if (show_failed) - vty_out(vty, "EstdCnt DropCnt ResetTime Reason\n"); + vty_out(vty, + BGP_SHOW_SUMMARY_HEADER_FAILED); else vty_out(vty, - "V AS MsgRcvd MsgSent TblVer InQ OutQ Up/Down State/PfxRcd PfxSnt Desc\n"); + show_wide + ? BGP_SHOW_SUMMARY_HEADER_ALL_WIDE + : BGP_SHOW_SUMMARY_HEADER_ALL); } } @@ -10858,6 +10961,11 @@ static int bgp_show_summary(struct vty *vty, struct bgp *bgp, int afi, int safi, peer->domainname); json_object_int_add(json_peer, "remoteAs", peer->as); + json_object_int_add( + json_peer, "localAs", + peer->change_local_as + ? peer->change_local_as + : peer->local_as); json_object_int_add(json_peer, "version", 4); json_object_int_add(json_peer, "msgRcvd", PEER_TOTAL_RX(peer)); @@ -11014,14 +11122,33 @@ static int bgp_show_summary(struct vty *vty, struct bgp *bgp, int afi, int safi, &peer->ibuf->count, memory_order_relaxed); - vty_out(vty, - "4 %10u %9u %9u %8" PRIu64" %4zu %4zu %8s", - peer->as, PEER_TOTAL_RX(peer), - PEER_TOTAL_TX(peer), - peer->version[afi][safi], inq_count, - outq_count, - peer_uptime(peer->uptime, timebuf, - BGP_UPTIME_LEN, 0, NULL)); + if (show_wide) + vty_out(vty, + "4 %10u %10u %9u %9u %8" PRIu64 + " %4zu %4zu %8s", + peer->as, + peer->change_local_as + ? peer->change_local_as + : peer->local_as, + PEER_TOTAL_RX(peer), + PEER_TOTAL_TX(peer), + peer->version[afi][safi], + inq_count, outq_count, + peer_uptime(peer->uptime, + timebuf, + BGP_UPTIME_LEN, 0, + NULL)); + else + vty_out(vty, "4 %10u %9u %9u %8" PRIu64 + " %4zu %4zu %8s", + peer->as, PEER_TOTAL_RX(peer), + PEER_TOTAL_TX(peer), + peer->version[afi][safi], + inq_count, outq_count, + peer_uptime(peer->uptime, + timebuf, + BGP_UPTIME_LEN, 0, + NULL)); if (peer->status == Established) { if (peer->afc_recv[afi][safi]) { @@ -11039,7 +11166,7 @@ static int bgp_show_summary(struct vty *vty, struct bgp *bgp, int afi, int safi, [afi] [pfx_rcd_safi]); } else { - vty_out(vty, " NoNeg"); + vty_out(vty, " NoNeg"); } if (paf && PAF_SUBGRP(paf)) { @@ -11056,6 +11183,8 @@ static int bgp_show_summary(struct vty *vty, struct bgp *bgp, int afi, int safi, (PAF_SUBGRP( paf)) ->scount); + } else { + vty_out(vty, " NoNeg"); } } else { if (CHECK_FLAG(peer->flags, @@ -11075,7 +11204,10 @@ static int bgp_show_summary(struct vty *vty, struct bgp *bgp, int afi, int safi, vty_out(vty, " %8u", 0); } if (peer->desc) - vty_out(vty, " %s", peer->desc); + vty_out(vty, " %s", + bgp_peer_description_stripped( + peer->desc, + show_wide ? 64 : 20)); else vty_out(vty, " N/A"); vty_out(vty, "\n"); @@ -11115,14 +11247,14 @@ static int bgp_show_summary(struct vty *vty, struct bgp *bgp, int afi, int safi, } static void bgp_show_summary_afi_safi(struct vty *vty, struct bgp *bgp, int afi, - int safi, bool show_failed, - bool show_established, bool use_json) + int safi, uint8_t show_flags) { int is_first = 1; int afi_wildcard = (afi == AFI_MAX); int safi_wildcard = (safi == SAFI_MAX); int is_wildcard = (afi_wildcard || safi_wildcard); bool nbr_output = false; + bool use_json = CHECK_FLAG(show_flags, BGP_SHOW_OPT_JSON); if (use_json && is_wildcard) vty_out(vty, "{\n"); @@ -11160,8 +11292,7 @@ static void bgp_show_summary_afi_safi(struct vty *vty, struct bgp *bgp, int afi, } } bgp_show_summary(vty, bgp, afi, safi, - show_failed, show_established, - use_json); + show_flags); } safi++; if (!safi_wildcard) @@ -11183,14 +11314,13 @@ static void bgp_show_summary_afi_safi(struct vty *vty, struct bgp *bgp, int afi, } static void bgp_show_all_instances_summary_vty(struct vty *vty, afi_t afi, - safi_t safi, bool show_failed, - bool show_established, - bool use_json) + safi_t safi, uint8_t show_flags) { struct listnode *node, *nnode; struct bgp *bgp; int is_first = 1; bool nbr_output = false; + bool use_json = CHECK_FLAG(show_flags, BGP_SHOW_OPT_JSON); if (use_json) vty_out(vty, "{\n"); @@ -11213,8 +11343,7 @@ static void bgp_show_all_instances_summary_vty(struct vty *vty, afi_t afi, ? VRF_DEFAULT_NAME : bgp->name); } - bgp_show_summary_afi_safi(vty, bgp, afi, safi, show_failed, - show_established, use_json); + bgp_show_summary_afi_safi(vty, bgp, afi, safi, show_flags); } if (use_json) @@ -11224,16 +11353,15 @@ static void bgp_show_all_instances_summary_vty(struct vty *vty, afi_t afi, } int bgp_show_summary_vty(struct vty *vty, const char *name, afi_t afi, - safi_t safi, bool show_failed, bool show_established, - bool use_json) + safi_t safi, uint8_t show_flags) { struct bgp *bgp; + bool use_json = CHECK_FLAG(show_flags, BGP_SHOW_OPT_JSON); if (name) { if (strmatch(name, "all")) { - bgp_show_all_instances_summary_vty( - vty, afi, safi, show_failed, show_established, - use_json); + bgp_show_all_instances_summary_vty(vty, afi, safi, + show_flags); return CMD_SUCCESS; } else { bgp = bgp_lookup_by_name(name); @@ -11248,8 +11376,7 @@ int bgp_show_summary_vty(struct vty *vty, const char *name, afi_t afi, } bgp_show_summary_afi_safi(vty, bgp, afi, safi, - show_failed, show_established, - use_json); + show_flags); return CMD_SUCCESS; } } @@ -11257,8 +11384,7 @@ int bgp_show_summary_vty(struct vty *vty, const char *name, afi_t afi, bgp = bgp_get_default(); if (bgp) - bgp_show_summary_afi_safi(vty, bgp, afi, safi, show_failed, - show_established, use_json); + bgp_show_summary_afi_safi(vty, bgp, afi, safi, show_flags); else { if (use_json) vty_out(vty, "{}\n"); @@ -11273,7 +11399,7 @@ int bgp_show_summary_vty(struct vty *vty, const char *name, afi_t afi, /* `show [ip] bgp summary' commands. */ DEFPY (show_ip_bgp_summary, show_ip_bgp_summary_cmd, - "show [ip] bgp [<view|vrf> VIEWVRFNAME] ["BGP_AFI_CMD_STR" ["BGP_SAFI_WITH_LABEL_CMD_STR"]] [all$all] summary [established|failed] [json$uj]", + "show [ip] bgp [<view|vrf> VIEWVRFNAME] ["BGP_AFI_CMD_STR" ["BGP_SAFI_WITH_LABEL_CMD_STR"]] [all$all] summary [established|failed] [wide] [json$uj]", SHOW_STR IP_STR BGP_STR @@ -11284,13 +11410,13 @@ DEFPY (show_ip_bgp_summary, "Summary of BGP neighbor status\n" "Show only sessions in Established state\n" "Show only sessions not in Established state\n" + "Increase table width for longer output\n" JSON_STR) { char *vrf = NULL; afi_t afi = AFI_MAX; safi_t safi = SAFI_MAX; - bool show_failed = false; - bool show_established = false; + uint8_t show_flags = 0; int idx = 0; @@ -11311,12 +11437,18 @@ DEFPY (show_ip_bgp_summary, } if (argv_find(argv, argc, "failed", &idx)) - show_failed = true; + SET_FLAG(show_flags, BGP_SHOW_OPT_FAILED); + if (argv_find(argv, argc, "established", &idx)) - show_established = true; + SET_FLAG(show_flags, BGP_SHOW_OPT_ESTABLISHED); - return bgp_show_summary_vty(vty, vrf, afi, safi, show_failed, - show_established, uj); + if (argv_find(argv, argc, "wide", &idx)) + SET_FLAG(show_flags, BGP_SHOW_OPT_WIDE); + + if (argv_find(argv, argc, "json", &idx)) + SET_FLAG(show_flags, BGP_SHOW_OPT_JSON); + + return bgp_show_summary_vty(vty, vrf, afi, safi, show_flags); } const char *get_afi_safi_str(afi_t afi, safi_t safi, bool for_json) @@ -13088,6 +13220,37 @@ static void bgp_show_peer(struct vty *vty, struct peer *p, bool use_json, "received"); } + /* Enhanced Route Refresh */ + if (CHECK_FLAG(p->cap, PEER_CAP_ENHANCED_RR_ADV) + || CHECK_FLAG(p->cap, + PEER_CAP_ENHANCED_RR_RCV)) { + if (CHECK_FLAG(p->cap, + PEER_CAP_ENHANCED_RR_ADV) + && CHECK_FLAG( + p->cap, + PEER_CAP_ENHANCED_RR_RCV)) + json_object_string_add( + json_cap, + "enhancedRouteRefresh", + "advertisedAndReceived"); + else if ( + CHECK_FLAG( + p->cap, + PEER_CAP_ENHANCED_RR_ADV)) + json_object_string_add( + json_cap, + "enhancedRouteRefresh", + "advertised"); + else if ( + CHECK_FLAG( + p->cap, + PEER_CAP_ENHANCED_RR_RCV)) + json_object_string_add( + json_cap, + "enhancedRouteRefresh", + "received"); + } + /* Multiprotocol Extensions */ json_object *json_multi = NULL; json_multi = json_object_new_object(); @@ -13460,6 +13623,28 @@ static void bgp_show_peer(struct vty *vty, struct peer *p, bool use_json, vty_out(vty, "\n"); } + /* Enhanced Route Refresh */ + if (CHECK_FLAG(p->cap, PEER_CAP_ENHANCED_RR_ADV) + || CHECK_FLAG(p->cap, + PEER_CAP_ENHANCED_RR_RCV)) { + vty_out(vty, + " Enhanced Route Refresh:"); + if (CHECK_FLAG( + p->cap, + PEER_CAP_ENHANCED_RR_ADV)) + vty_out(vty, " advertised"); + if (CHECK_FLAG( + p->cap, + PEER_CAP_ENHANCED_RR_RCV)) + vty_out(vty, " %sreceived", + CHECK_FLAG( + p->cap, + PEER_CAP_REFRESH_ADV) + ? "and " + : ""); + vty_out(vty, "\n"); + } + /* Multiprotocol Extensions */ FOREACH_AFI_SAFI (afi, safi) if (p->afc_adv[afi][safi] @@ -16882,7 +17067,15 @@ static void bgp_config_write_family(struct vty *vty, struct bgp *bgp, afi_t afi, /* BGP flag dampening. */ if (CHECK_FLAG(bgp->af_flags[afi][safi], BGP_CONFIG_DAMPENING)) - bgp_config_write_damp(vty, afi, safi); + bgp_config_write_damp(vty, bgp, afi, safi); + for (ALL_LIST_ELEMENTS_RO(bgp->group, node, group)) + if (peer_af_flag_check(group->conf, afi, safi, + PEER_FLAG_CONFIG_DAMPENING)) + bgp_config_write_peer_damp(vty, group->conf, afi, safi); + for (ALL_LIST_ELEMENTS_RO(bgp->peer, node, peer)) + if (peer_af_flag_check(peer, afi, safi, + PEER_FLAG_CONFIG_DAMPENING)) + bgp_config_write_peer_damp(vty, peer, afi, safi); for (ALL_LIST_ELEMENTS(bgp->group, node, nnode, group)) bgp_config_write_peer_af(vty, bgp, group->conf, afi, safi); @@ -17019,6 +17212,16 @@ int bgp_config_write(struct vty *vty) if (bgp->reject_as_sets) vty_out(vty, " bgp reject-as-sets\n"); + /* Suppress duplicate updates if the route actually not changed + */ + if (!!CHECK_FLAG(bgp->flags, BGP_FLAG_SUPPRESS_DUPLICATES) + != SAVE_BGP_SUPPRESS_DUPLICATES) + vty_out(vty, " %sbgp suppress-duplicates\n", + CHECK_FLAG(bgp->flags, + BGP_FLAG_SUPPRESS_DUPLICATES) + ? "" + : "no "); + /* BGP default ipv4-unicast. */ if (CHECK_FLAG(bgp->flags, BGP_FLAG_NO_DEFAULT_IPV4)) vty_out(vty, " no bgp default ipv4-unicast\n"); @@ -17602,6 +17805,10 @@ void bgp_vty_init(void) install_element(BGP_NODE, &bgp_ebgp_requires_policy_cmd); install_element(BGP_NODE, &no_bgp_ebgp_requires_policy_cmd); + /* bgp suppress-duplicates */ + install_element(BGP_NODE, &bgp_suppress_duplicates_cmd); + install_element(BGP_NODE, &no_bgp_suppress_duplicates_cmd); + /* bgp reject-as-sets */ install_element(BGP_NODE, &bgp_reject_as_sets_cmd); install_element(BGP_NODE, &no_bgp_reject_as_sets_cmd); @@ -18404,27 +18611,16 @@ void bgp_vty_init(void) /* "neighbor prefix-list" commands. */ install_element(BGP_NODE, &neighbor_prefix_list_hidden_cmd); - install_element(BGP_NODE, &no_neighbor_prefix_list_hidden_cmd); install_element(BGP_IPV4_NODE, &neighbor_prefix_list_cmd); - install_element(BGP_IPV4_NODE, &no_neighbor_prefix_list_cmd); install_element(BGP_IPV4M_NODE, &neighbor_prefix_list_cmd); - install_element(BGP_IPV4M_NODE, &no_neighbor_prefix_list_cmd); install_element(BGP_IPV4L_NODE, &neighbor_prefix_list_cmd); - install_element(BGP_IPV4L_NODE, &no_neighbor_prefix_list_cmd); install_element(BGP_IPV6_NODE, &neighbor_prefix_list_cmd); - install_element(BGP_IPV6_NODE, &no_neighbor_prefix_list_cmd); install_element(BGP_IPV6M_NODE, &neighbor_prefix_list_cmd); - install_element(BGP_IPV6M_NODE, &no_neighbor_prefix_list_cmd); install_element(BGP_IPV6L_NODE, &neighbor_prefix_list_cmd); - install_element(BGP_IPV6L_NODE, &no_neighbor_prefix_list_cmd); install_element(BGP_VPNV4_NODE, &neighbor_prefix_list_cmd); - install_element(BGP_VPNV4_NODE, &no_neighbor_prefix_list_cmd); install_element(BGP_VPNV6_NODE, &neighbor_prefix_list_cmd); - install_element(BGP_VPNV6_NODE, &no_neighbor_prefix_list_cmd); install_element(BGP_FLOWSPECV4_NODE, &neighbor_prefix_list_cmd); - install_element(BGP_FLOWSPECV4_NODE, &no_neighbor_prefix_list_cmd); install_element(BGP_FLOWSPECV6_NODE, &neighbor_prefix_list_cmd); - install_element(BGP_FLOWSPECV6_NODE, &no_neighbor_prefix_list_cmd); /* "neighbor filter-list" commands. */ install_element(BGP_NODE, &neighbor_filter_list_hidden_cmd); @@ -18451,30 +18647,17 @@ void bgp_vty_init(void) install_element(BGP_FLOWSPECV6_NODE, &no_neighbor_filter_list_cmd); /* "neighbor route-map" commands. */ - install_element(BGP_NODE, &neighbor_route_map_hidden_cmd); - install_element(BGP_NODE, &no_neighbor_route_map_hidden_cmd); install_element(BGP_IPV4_NODE, &neighbor_route_map_cmd); - install_element(BGP_IPV4_NODE, &no_neighbor_route_map_cmd); install_element(BGP_IPV4M_NODE, &neighbor_route_map_cmd); - install_element(BGP_IPV4M_NODE, &no_neighbor_route_map_cmd); install_element(BGP_IPV4L_NODE, &neighbor_route_map_cmd); - install_element(BGP_IPV4L_NODE, &no_neighbor_route_map_cmd); install_element(BGP_IPV6_NODE, &neighbor_route_map_cmd); - install_element(BGP_IPV6_NODE, &no_neighbor_route_map_cmd); install_element(BGP_IPV6M_NODE, &neighbor_route_map_cmd); - install_element(BGP_IPV6M_NODE, &no_neighbor_route_map_cmd); install_element(BGP_IPV6L_NODE, &neighbor_route_map_cmd); - install_element(BGP_IPV6L_NODE, &no_neighbor_route_map_cmd); install_element(BGP_VPNV4_NODE, &neighbor_route_map_cmd); - install_element(BGP_VPNV4_NODE, &no_neighbor_route_map_cmd); install_element(BGP_VPNV6_NODE, &neighbor_route_map_cmd); - install_element(BGP_VPNV6_NODE, &no_neighbor_route_map_cmd); install_element(BGP_FLOWSPECV4_NODE, &neighbor_route_map_cmd); - install_element(BGP_FLOWSPECV4_NODE, &no_neighbor_route_map_cmd); install_element(BGP_FLOWSPECV6_NODE, &neighbor_route_map_cmd); - install_element(BGP_FLOWSPECV6_NODE, &no_neighbor_route_map_cmd); install_element(BGP_EVPN_NODE, &neighbor_route_map_cmd); - install_element(BGP_EVPN_NODE, &no_neighbor_route_map_cmd); /* "neighbor unsuppress-map" commands. */ install_element(BGP_NODE, &neighbor_unsuppress_map_hidden_cmd); @@ -18633,6 +18816,23 @@ void bgp_vty_init(void) install_element(BGP_EVPN_NODE, &neighbor_allowas_in_cmd); install_element(BGP_EVPN_NODE, &no_neighbor_allowas_in_cmd); + /* "neighbor dampening" commands. */ + install_element(BGP_NODE, &neighbor_damp_cmd); + install_element(BGP_NODE, &no_neighbor_damp_cmd); + install_element(BGP_IPV4_NODE, &neighbor_damp_cmd); + install_element(BGP_IPV4_NODE, &no_neighbor_damp_cmd); + install_element(BGP_IPV4M_NODE, &neighbor_damp_cmd); + install_element(BGP_IPV4M_NODE, &no_neighbor_damp_cmd); + install_element(BGP_IPV4L_NODE, &neighbor_damp_cmd); + install_element(BGP_IPV4L_NODE, &no_neighbor_damp_cmd); + install_element(BGP_IPV6_NODE, &neighbor_damp_cmd); + install_element(BGP_IPV6_NODE, &no_neighbor_damp_cmd); + install_element(BGP_IPV6M_NODE, &neighbor_damp_cmd); + install_element(BGP_IPV6M_NODE, &no_neighbor_damp_cmd); + install_element(BGP_IPV6L_NODE, &neighbor_damp_cmd); + install_element(BGP_IPV6L_NODE, &no_neighbor_damp_cmd); + install_element(VIEW_NODE, &show_ip_bgp_neighbor_damp_param_cmd); + /* address-family commands. */ install_element(BGP_NODE, &address_family_ipv4_safi_cmd); install_element(BGP_NODE, &address_family_ipv6_safi_cmd); diff --git a/bgpd/bgp_vty.h b/bgpd/bgp_vty.h index 07f61ab0ea..85619dd074 100644 --- a/bgpd/bgp_vty.h +++ b/bgpd/bgp_vty.h @@ -53,6 +53,12 @@ struct bgp; " Helper - GR Mode-Helper,\n" \ " Disable - GR Mode-Disable.\n\n" +#define BGP_SHOW_SUMMARY_HEADER_ALL \ + "V AS MsgRcvd MsgSent TblVer InQ OutQ Up/Down State/PfxRcd PfxSnt Desc\n" +#define BGP_SHOW_SUMMARY_HEADER_ALL_WIDE \ + "V AS LocalAS MsgRcvd MsgSent TblVer InQ OutQ Up/Down State/PfxRcd PfxSnt Desc\n" +#define BGP_SHOW_SUMMARY_HEADER_FAILED "EstdCnt DropCnt ResetTime Reason\n" + #define BGP_SHOW_PEER_GR_CAPABILITY( \ vty, p, use_json, json) \ do { \ @@ -178,8 +184,7 @@ extern int bgp_vty_find_and_parse_afi_safi_bgp(struct vty *vty, int bgp_vty_find_and_parse_bgp(struct vty *vty, struct cmd_token **argv, int argc, struct bgp **bgp, bool use_json); extern int bgp_show_summary_vty(struct vty *vty, const char *name, afi_t afi, - safi_t safi, bool show_failed, - bool show_established, bool use_json); + safi_t safi, uint8_t show_flags); extern int bgp_clear_star_soft_in(const char *name, char *errmsg, size_t errmsg_len); extern int bgp_clear_star_soft_out(const char *name, char *errmsg, diff --git a/bgpd/bgp_zebra.c b/bgpd/bgp_zebra.c index db5c877759..627e996937 100644 --- a/bgpd/bgp_zebra.c +++ b/bgpd/bgp_zebra.c @@ -67,6 +67,10 @@ /* All information about zebra. */ struct zclient *zclient = NULL; +/* hook to indicate vrf status change for SNMP */ +DEFINE_HOOK(bgp_vrf_status_changed, (struct bgp *bgp, struct interface *ifp), + (bgp, ifp)) + /* Can we install into zebra? */ static inline bool bgp_install_info_to_zebra(struct bgp *bgp) { @@ -212,8 +216,10 @@ static int bgp_ifp_destroy(struct interface *ifp) if (BGP_DEBUG(zebra, ZEBRA)) zlog_debug("Rx Intf del VRF %u IF %s", ifp->vrf_id, ifp->name); - if (bgp) + if (bgp) { bgp_update_interface_nbrs(bgp, ifp, NULL); + hook_call(bgp_vrf_status_changed, bgp, ifp); + } bgp_mac_del_mac_entry(ifp); @@ -243,6 +249,7 @@ static int bgp_ifp_up(struct interface *ifp) for (ALL_LIST_ELEMENTS(ifp->nbr_connected, node, nnode, nc)) bgp_nbr_connected_add(bgp, nc); + hook_call(bgp_vrf_status_changed, bgp, ifp); return 0; } @@ -297,6 +304,7 @@ static int bgp_ifp_down(struct interface *ifp) } } + hook_call(bgp_vrf_status_changed, bgp, ifp); return 0; } @@ -461,6 +469,8 @@ static int bgp_interface_vrf_update(ZAPI_CALLBACK_ARGS) for (ALL_LIST_ELEMENTS(ifp->nbr_connected, node, nnode, nc)) bgp_nbr_connected_add(bgp, nc); + + hook_call(bgp_vrf_status_changed, bgp, ifp); return 0; } @@ -896,6 +906,7 @@ bgp_path_info_to_ipv6_nexthop(struct bgp_path_info *path, ifindex_t *ifindex) /* Workaround for Cisco's nexthop bug. */ if (IN6_IS_ADDR_UNSPECIFIED( &path->attr->mp_nexthop_global) + && path->peer->su_remote && path->peer->su_remote->sa.sa_family == AF_INET6) { nexthop = @@ -1305,43 +1316,36 @@ void bgp_zebra_announce(struct bgp_dest *dest, const struct prefix *p, ATTR_FLAG_BIT(BGP_ATTR_SRTE_COLOR))) api_nh->srte_color = info->attr->srte_color; - if (nh_family == AF_INET) { - if (bgp_debug_zebra(&api.prefix)) { - if (mpinfo->extra) { - zlog_debug( - "%s: p=%s, bgp_is_valid_label: %d", - __func__, buf_prefix, - bgp_is_valid_label( - &mpinfo->extra - ->label[0])); - } else { - zlog_debug( - "%s: p=%s, extra is NULL, no label", - __func__, buf_prefix); - } - } - - if (bgp->table_map[afi][safi].name) { - /* Copy info and attributes, so the route-map - apply doesn't modify the BGP route info. */ - local_attr = *mpinfo->attr; - mpinfo_cp->attr = &local_attr; + if (bgp_debug_zebra(&api.prefix)) { + if (mpinfo->extra) { + zlog_debug("%s: p=%s, bgp_is_valid_label: %d", + __func__, buf_prefix, + bgp_is_valid_label( + &mpinfo->extra->label[0])); + } else { + zlog_debug("%s: p=%s, extra is NULL, no label", + __func__, buf_prefix); } + } - if (bgp->table_map[afi][safi].name) { - if (!bgp_table_map_apply( - bgp->table_map[afi][safi].map, p, - mpinfo_cp)) - continue; + if (bgp->table_map[afi][safi].name) { + /* Copy info and attributes, so the route-map + apply doesn't modify the BGP route info. */ + local_attr = *mpinfo->attr; + mpinfo_cp->attr = &local_attr; + if (!bgp_table_map_apply(bgp->table_map[afi][safi].map, + p, mpinfo_cp)) + continue; - /* metric/tag is only allowed to be - * overridden on 1st nexthop */ - if (mpinfo == info) { - metric = mpinfo_cp->attr->med; - tag = mpinfo_cp->attr->tag; - } + /* metric/tag is only allowed to be + * overridden on 1st nexthop */ + if (mpinfo == info) { + metric = mpinfo_cp->attr->med; + tag = mpinfo_cp->attr->tag; } + } + if (nh_family == AF_INET) { nh_updated = update_ipv4nh_for_route_install( nh_othervrf, nh_othervrf ? @@ -1352,38 +1356,23 @@ void bgp_zebra_announce(struct bgp_dest *dest, const struct prefix *p, ifindex_t ifindex = IFINDEX_INTERNAL; struct in6_addr *nexthop; - if (bgp->table_map[afi][safi].name) { - /* Copy info and attributes, so the route-map - apply doesn't modify the BGP route info. */ - local_attr = *mpinfo->attr; - mpinfo_cp->attr = &local_attr; - } - - if (bgp->table_map[afi][safi].name) { - /* Copy info and attributes, so the route-map - apply doesn't modify the BGP route info. */ - local_attr = *mpinfo->attr; - mpinfo_cp->attr = &local_attr; - - if (!bgp_table_map_apply( - bgp->table_map[afi][safi].map, p, - mpinfo_cp)) - continue; - - /* metric/tag is only allowed to be - * overridden on 1st nexthop */ - if (mpinfo == info) { - metric = mpinfo_cp->attr->med; - tag = mpinfo_cp->attr->tag; - } - } nexthop = bgp_path_info_to_ipv6_nexthop(mpinfo_cp, &ifindex); - nh_updated = update_ipv6nh_for_route_install( - nh_othervrf, nh_othervrf ? - info->extra->bgp_orig : bgp, - nexthop, ifindex, - mpinfo, info, is_evpn, api_nh); + + if (!nexthop) + nh_updated = update_ipv4nh_for_route_install( + nh_othervrf, + nh_othervrf ? info->extra->bgp_orig + : bgp, + &mpinfo_cp->attr->nexthop, + mpinfo_cp->attr, is_evpn, api_nh); + else + nh_updated = update_ipv6nh_for_route_install( + nh_othervrf, + nh_othervrf ? info->extra->bgp_orig + : bgp, + nexthop, ifindex, mpinfo, info, is_evpn, + api_nh); } /* Did we get proper nexthop info to update zebra? */ @@ -1499,9 +1488,7 @@ void bgp_zebra_announce(struct bgp_dest *dest, const struct prefix *p, api_nh->vrf_id, api_nh->weight, label_buf, eth_buf); } - } - if (bgp_debug_zebra(p)) { int recursion_flag = 0; if (CHECK_FLAG(api.flags, ZEBRA_FLAG_ALLOW_RECURSION)) @@ -2409,7 +2396,6 @@ static int bgp_zebra_route_notify_owner(int command, struct zclient *zclient, struct prefix p; enum zapi_route_notify_owner note; uint32_t table_id; - char buf[PREFIX_STRLEN]; afi_t afi; safi_t safi; struct bgp_dest *dest; @@ -2431,9 +2417,6 @@ static int bgp_zebra_route_notify_owner(int command, struct zclient *zclient, return -1; } - if (BGP_DEBUG(zebra, ZEBRA)) - prefix2str(&p, buf, sizeof(buf)); - /* Find the bgp route node */ dest = bgp_afi_node_lookup(bgp->rib[afi][safi], afi, safi, &p, &bgp->vrf_prd); @@ -2452,7 +2435,7 @@ static int bgp_zebra_route_notify_owner(int command, struct zclient *zclient, BGP_NODE_FIB_INSTALL_PENDING); SET_FLAG(dest->flags, BGP_NODE_FIB_INSTALLED); if (BGP_DEBUG(zebra, ZEBRA)) - zlog_debug("route %s : INSTALLED", buf); + zlog_debug("route %pRN : INSTALLED", dest); /* Find the best route */ for (pi = dest->info; pi; pi = pi->next) { /* Process aggregate route */ @@ -2468,8 +2451,8 @@ static int bgp_zebra_route_notify_owner(int command, struct zclient *zclient, dest, new_select); else { flog_err(EC_BGP_INVALID_ROUTE, - "selected route %s not found", - buf); + "selected route %pRN not found", + dest); return -1; } } @@ -2480,16 +2463,24 @@ static int bgp_zebra_route_notify_owner(int command, struct zclient *zclient, * route add later */ UNSET_FLAG(dest->flags, BGP_NODE_FIB_INSTALLED); + if (BGP_DEBUG(zebra, ZEBRA)) + zlog_debug("route %pRN: Removed from Fib", dest); break; case ZAPI_ROUTE_FAIL_INSTALL: + if (BGP_DEBUG(zebra, ZEBRA)) + zlog_debug("route: %pRN Failed to Install into Fib", + dest); /* Error will be logged by zebra module */ break; case ZAPI_ROUTE_BETTER_ADMIN_WON: + if (BGP_DEBUG(zebra, ZEBRA)) + zlog_debug("route: %pRN removed due to better admin won", + dest); /* No action required */ break; case ZAPI_ROUTE_REMOVE_FAIL: - zlog_warn("%s: Route %s failure to remove", - __func__, buf); + zlog_warn("%s: Route %pRN failure to remove", + __func__, dest); break; } return 0; @@ -2983,6 +2974,7 @@ static int bgp_ifp_create(struct interface *ifp) bgp_mac_add_mac_entry(ifp); bgp_update_interface_nbrs(bgp, ifp, ifp); + hook_call(bgp_vrf_status_changed, bgp, ifp); return 0; } diff --git a/bgpd/bgpd.c b/bgpd/bgpd.c index b6afa391f3..b11fd5288a 100644 --- a/bgpd/bgpd.c +++ b/bgpd/bgpd.c @@ -121,12 +121,20 @@ extern struct zclient *zclient; static int bgp_check_main_socket(bool create, struct bgp *bgp) { static int bgp_server_main_created; + struct listnode *node; + char *address; if (create) { if (bgp_server_main_created) return 0; - if (bgp_socket(bgp, bm->port, bm->address) < 0) - return BGP_ERR_INVALID_VALUE; + if (list_isempty(bm->addresses)) { + if (bgp_socket(bgp, bm->port, NULL) < 0) + return BGP_ERR_INVALID_VALUE; + } else { + for (ALL_LIST_ELEMENTS_RO(bm->addresses, node, address)) + if (bgp_socket(bgp, bm->port, address) < 0) + return BGP_ERR_INVALID_VALUE; + } bgp_server_main_created = 1; return 0; } @@ -2379,6 +2387,14 @@ int peer_delete(struct peer *peer) bgp_bfd_deregister_peer(peer); + /* Delete peer route flap dampening configuration. This needs to happen + * before removing the peer from peer groups. + */ + FOREACH_AFI_SAFI (afi, safi) + if (peer_af_flag_check(peer, afi, safi, + PEER_FLAG_CONFIG_DAMPENING)) + bgp_peer_damp_disable(peer, afi, safi); + /* If this peer belongs to peer group, clear up the relationship. */ if (peer->group) { @@ -3280,7 +3296,8 @@ struct bgp *bgp_get_evpn(void) int bgp_handle_socket(struct bgp *bgp, struct vrf *vrf, vrf_id_t old_vrf_id, bool create) { - int ret = 0; + struct listnode *node; + char *address; /* Create BGP server socket, if listen mode not disabled */ if (!bgp || bgp_option_check(BGP_OPT_NO_LISTEN)) @@ -3309,9 +3326,14 @@ int bgp_handle_socket(struct bgp *bgp, struct vrf *vrf, vrf_id_t old_vrf_id, */ if (vrf->vrf_id == VRF_UNKNOWN) return 0; - ret = bgp_socket(bgp, bm->port, bm->address); - if (ret < 0) - return BGP_ERR_INVALID_VALUE; + if (list_isempty(bm->addresses)) { + if (bgp_socket(bgp, bm->port, NULL) < 0) + return BGP_ERR_INVALID_VALUE; + } else { + for (ALL_LIST_ELEMENTS_RO(bm->addresses, node, address)) + if (bgp_socket(bgp, bm->port, address) < 0) + return BGP_ERR_INVALID_VALUE; + } return 0; } else return bgp_check_main_socket(create, bgp); @@ -3500,6 +3522,11 @@ int bgp_delete(struct bgp *bgp) BGP_TIMER_OFF(gr_info->t_route_select); } + /* Delete route flap dampening configuration */ + FOREACH_AFI_SAFI (afi, safi) { + bgp_damp_disable(bgp, afi, safi); + } + if (BGP_DEBUG(zebra, ZEBRA)) { if (bgp->inst_type == BGP_INSTANCE_TYPE_DEFAULT) zlog_debug("Deleting Default VRF"); @@ -3677,6 +3704,7 @@ void bgp_free(struct bgp *bgp) XFREE(MTYPE_BGP, bgp->name); XFREE(MTYPE_BGP, bgp->name_pretty); + XFREE(MTYPE_BGP, bgp->snmp_stats); XFREE(MTYPE_BGP, bgp); } @@ -3990,6 +4018,8 @@ bool peer_active_nego(struct peer *peer) void peer_change_action(struct peer *peer, afi_t afi, safi_t safi, enum peer_change_type type) { + struct peer_af *paf; + if (CHECK_FLAG(peer->sflags, PEER_STATUS_GROUP)) return; @@ -4010,7 +4040,8 @@ void peer_change_action(struct peer *peer, afi_t afi, safi_t safi, } else if (type == peer_change_reset_in) { if (CHECK_FLAG(peer->cap, PEER_CAP_REFRESH_OLD_RCV) || CHECK_FLAG(peer->cap, PEER_CAP_REFRESH_NEW_RCV)) - bgp_route_refresh_send(peer, afi, safi, 0, 0, 0); + bgp_route_refresh_send(peer, afi, safi, 0, 0, 0, + BGP_ROUTE_REFRESH_NORMAL); else { if ((peer->doppelganger) && (peer->doppelganger->status != Deleted) @@ -4022,7 +4053,12 @@ void peer_change_action(struct peer *peer, afi_t afi, safi_t safi, BGP_NOTIFY_CEASE_CONFIG_CHANGE); } } else if (type == peer_change_reset_out) { - update_group_adjust_peer(peer_af_find(peer, afi, safi)); + paf = peer_af_find(peer, afi, safi); + if (paf && paf->subgroup) + SET_FLAG(paf->subgroup->sflags, + SUBGRP_STATUS_FORCE_UPDATES); + + update_group_adjust_peer(paf); bgp_announce_route(peer, afi, safi); } } @@ -5032,20 +5068,20 @@ int peer_default_originate_unset(struct peer *peer, afi_t afi, safi_t safi) continue; /* Remove flag and configuration on peer-group member. */ - UNSET_FLAG(peer->af_flags[afi][safi], + UNSET_FLAG(member->af_flags[afi][safi], PEER_FLAG_DEFAULT_ORIGINATE); - if (peer->default_rmap[afi][safi].name) + if (member->default_rmap[afi][safi].name) XFREE(MTYPE_ROUTE_MAP_NAME, - peer->default_rmap[afi][safi].name); - route_map_counter_decrement(peer->default_rmap[afi][safi].map); - peer->default_rmap[afi][safi].name = NULL; - peer->default_rmap[afi][safi].map = NULL; + member->default_rmap[afi][safi].name); + route_map_counter_decrement(member->default_rmap[afi][safi].map); + member->default_rmap[afi][safi].name = NULL; + member->default_rmap[afi][safi].map = NULL; /* Update peer route announcements. */ - if (peer->status == Established && peer->afc_nego[afi][safi]) { - update_group_adjust_peer(peer_af_find(peer, afi, safi)); - bgp_default_originate(peer, afi, safi, 1); - bgp_announce_route(peer, afi, safi); + if (member->status == Established && member->afc_nego[afi][safi]) { + update_group_adjust_peer(peer_af_find(member, afi, safi)); + bgp_default_originate(member, afi, safi, 1); + bgp_announce_route(member, afi, safi); } } @@ -5083,7 +5119,8 @@ static void peer_on_policy_change(struct peer *peer, afi_t afi, safi_t safi, bgp_soft_reconfig_in(peer, afi, safi); else if (CHECK_FLAG(peer->cap, PEER_CAP_REFRESH_OLD_RCV) || CHECK_FLAG(peer->cap, PEER_CAP_REFRESH_NEW_RCV)) - bgp_route_refresh_send(peer, afi, safi, 0, 0, 0); + bgp_route_refresh_send(peer, afi, safi, 0, 0, 0, + BGP_ROUTE_REFRESH_NORMAL); } } @@ -5180,10 +5217,10 @@ int peer_timers_set(struct peer *peer, uint32_t keepalive, uint32_t holdtime) struct peer *member; struct listnode *node, *nnode; - if (keepalive > 65535) + if (keepalive > UINT16_MAX) return BGP_ERR_INVALID_VALUE; - if (holdtime > 65535) + if (holdtime > UINT16_MAX) return BGP_ERR_INVALID_VALUE; if (holdtime < 3 && holdtime != 0) @@ -5260,7 +5297,7 @@ int peer_timers_connect_set(struct peer *peer, uint32_t connect) struct peer *member; struct listnode *node, *nnode; - if (connect > 65535) + if (connect > UINT16_MAX) return BGP_ERR_INVALID_VALUE; /* Set flag and configuration on peer. */ @@ -5269,9 +5306,14 @@ int peer_timers_connect_set(struct peer *peer, uint32_t connect) peer->v_connect = connect; /* Skip peer-group mechanics for regular peers. */ - if (!CHECK_FLAG(peer->sflags, PEER_STATUS_GROUP)) + if (!CHECK_FLAG(peer->sflags, PEER_STATUS_GROUP)) { + if (peer->status != Established) { + if (peer_active(peer)) + BGP_EVENT_ADD(peer, BGP_Stop); + BGP_EVENT_ADD(peer, BGP_Start); + } return 0; - + } /* * Set flag and configuration on all peer-group members, unless they are * explicitely overriding peer-group configuration. @@ -5285,6 +5327,12 @@ int peer_timers_connect_set(struct peer *peer, uint32_t connect) SET_FLAG(member->flags, PEER_FLAG_TIMER_CONNECT); member->connect = connect; member->v_connect = connect; + + if (member->status != Established) { + if (peer_active(member)) + BGP_EVENT_ADD(member, BGP_Stop); + BGP_EVENT_ADD(member, BGP_Start); + } } return 0; @@ -5312,9 +5360,14 @@ int peer_timers_connect_unset(struct peer *peer) peer->v_connect = peer->bgp->default_connect_retry; /* Skip peer-group mechanics for regular peers. */ - if (!CHECK_FLAG(peer->sflags, PEER_STATUS_GROUP)) + if (!CHECK_FLAG(peer->sflags, PEER_STATUS_GROUP)) { + if (peer->status != Established) { + if (peer_active(peer)) + BGP_EVENT_ADD(peer, BGP_Stop); + BGP_EVENT_ADD(peer, BGP_Start); + } return 0; - + } /* * Remove flag and configuration from all peer-group members, unless * they are explicitely overriding peer-group configuration. @@ -5328,6 +5381,12 @@ int peer_timers_connect_unset(struct peer *peer) UNSET_FLAG(member->flags, PEER_FLAG_TIMER_CONNECT); member->connect = 0; member->v_connect = peer->bgp->default_connect_retry; + + if (member->status != Established) { + if (peer_active(member)) + BGP_EVENT_ADD(member, BGP_Stop); + BGP_EVENT_ADD(member, BGP_Start); + } } return 0; @@ -5648,11 +5707,6 @@ int peer_allowas_in_unset(struct peer *peer, afi_t afi, safi_t safi) PEER_FLAG_ALLOWAS_IN)) continue; - /* Skip peers where flag is already disabled. */ - if (!CHECK_FLAG(member->af_flags[afi][safi], - PEER_FLAG_ALLOWAS_IN)) - continue; - /* Remove flags and configuration on peer-group member. */ UNSET_FLAG(member->af_flags[afi][safi], PEER_FLAG_ALLOWAS_IN); UNSET_FLAG(member->af_flags[afi][safi], @@ -6938,6 +6992,22 @@ int peer_advertise_map_unset(struct peer *peer, afi_t afi, safi_t safi, return 0; } +static bool peer_maximum_prefix_clear_overflow(struct peer *peer) +{ + if (!CHECK_FLAG(peer->sflags, PEER_STATUS_PREFIX_OVERFLOW)) + return false; + + UNSET_FLAG(peer->sflags, PEER_STATUS_PREFIX_OVERFLOW); + if (peer->t_pmax_restart) { + BGP_TIMER_OFF(peer->t_pmax_restart); + if (bgp_debug_neighbor_events(peer)) + zlog_debug("%s Maximum-prefix restart timer cancelled", + peer->host); + } + BGP_EVENT_ADD(peer, BGP_Start); + return true; +} + int peer_maximum_prefix_set(struct peer *peer, afi_t afi, safi_t safi, uint32_t max, uint8_t threshold, int warning, uint16_t restart, bool force) @@ -7059,7 +7129,11 @@ int peer_maximum_prefix_unset(struct peer *peer, afi_t afi, safi_t safi) member->pmax[afi][safi] = 0; member->pmax_threshold[afi][safi] = 0; member->pmax_restart[afi][safi] = 0; + + peer_maximum_prefix_clear_overflow(member); } + } else { + peer_maximum_prefix_clear_overflow(peer); } return 0; @@ -7094,6 +7168,7 @@ int is_ebgp_multihop_configured(struct peer *peer) int peer_ttl_security_hops_set(struct peer *peer, int gtsm_hops) { struct peer_group *group; + struct peer *gpeer; struct listnode *node, *nnode; int ret; @@ -7130,9 +7205,10 @@ int peer_ttl_security_hops_set(struct peer *peer, int gtsm_hops) return ret; } else { group = peer->group; + group->conf->gtsm_hops = gtsm_hops; for (ALL_LIST_ELEMENTS(group->peer, node, nnode, - peer)) { - peer->gtsm_hops = group->conf->gtsm_hops; + gpeer)) { + gpeer->gtsm_hops = group->conf->gtsm_hops; /* Calling ebgp multihop also resets the * session. @@ -7142,7 +7218,7 @@ int peer_ttl_security_hops_set(struct peer *peer, int gtsm_hops) * value is * irrelevant. */ - peer_ebgp_multihop_set(peer, MAXTTL); + peer_ebgp_multihop_set(gpeer, MAXTTL); } } } else { @@ -7163,9 +7239,10 @@ int peer_ttl_security_hops_set(struct peer *peer, int gtsm_hops) MAXTTL + 1 - gtsm_hops); } else { group = peer->group; + group->conf->gtsm_hops = gtsm_hops; for (ALL_LIST_ELEMENTS(group->peer, node, nnode, - peer)) { - peer->gtsm_hops = group->conf->gtsm_hops; + gpeer)) { + gpeer->gtsm_hops = group->conf->gtsm_hops; /* Change setting of existing peer * established then change value (may break @@ -7175,17 +7252,18 @@ int peer_ttl_security_hops_set(struct peer *peer, int gtsm_hops) * no session then do nothing (will get * handled by next connection) */ - if (peer->fd >= 0 - && peer->gtsm_hops + if (gpeer->fd >= 0 + && gpeer->gtsm_hops != BGP_GTSM_HOPS_DISABLED) sockopt_minttl( - peer->su.sa.sa_family, peer->fd, - MAXTTL + 1 - peer->gtsm_hops); - if ((peer->status < Established) - && peer->doppelganger - && (peer->doppelganger->fd >= 0)) - sockopt_minttl(peer->su.sa.sa_family, - peer->doppelganger->fd, + gpeer->su.sa.sa_family, + gpeer->fd, + MAXTTL + 1 - gpeer->gtsm_hops); + if ((gpeer->status < Established) + && gpeer->doppelganger + && (gpeer->doppelganger->fd >= 0)) + sockopt_minttl(gpeer->su.sa.sa_family, + gpeer->doppelganger->fd, MAXTTL + 1 - gtsm_hops); } } @@ -7262,18 +7340,8 @@ int peer_clear(struct peer *peer, struct listnode **nnode) { if (!CHECK_FLAG(peer->flags, PEER_FLAG_SHUTDOWN) || !CHECK_FLAG(peer->bgp->flags, BGP_FLAG_SHUTDOWN)) { - if (CHECK_FLAG(peer->sflags, PEER_STATUS_PREFIX_OVERFLOW)) { - UNSET_FLAG(peer->sflags, PEER_STATUS_PREFIX_OVERFLOW); - if (peer->t_pmax_restart) { - BGP_TIMER_OFF(peer->t_pmax_restart); - if (bgp_debug_neighbor_events(peer)) - zlog_debug( - "%s Maximum-prefix restart timer canceled", - peer->host); - } - BGP_EVENT_ADD(peer, BGP_Start); + if (peer_maximum_prefix_clear_overflow(peer)) return 0; - } peer->v_start = BGP_INIT_START_TIMER; if (BGP_IS_VALID_STATE_FOR_NOTIF(peer->status)) @@ -7331,19 +7399,23 @@ int peer_clear_soft(struct peer *peer, afi_t afi, safi_t safi, PEER_STATUS_ORF_PREFIX_SEND)) bgp_route_refresh_send( peer, afi, safi, prefix_type, - REFRESH_DEFER, 1); - bgp_route_refresh_send(peer, afi, safi, - prefix_type, - REFRESH_IMMEDIATE, 0); + REFRESH_DEFER, 1, + BGP_ROUTE_REFRESH_NORMAL); + bgp_route_refresh_send( + peer, afi, safi, prefix_type, + REFRESH_IMMEDIATE, 0, + BGP_ROUTE_REFRESH_NORMAL); } else { if (CHECK_FLAG(peer->af_sflags[afi][safi], PEER_STATUS_ORF_PREFIX_SEND)) bgp_route_refresh_send( peer, afi, safi, prefix_type, - REFRESH_IMMEDIATE, 1); + REFRESH_IMMEDIATE, 1, + BGP_ROUTE_REFRESH_NORMAL); else - bgp_route_refresh_send(peer, afi, safi, - 0, 0, 0); + bgp_route_refresh_send( + peer, afi, safi, 0, 0, 0, + BGP_ROUTE_REFRESH_NORMAL); } return 0; } @@ -7362,8 +7434,9 @@ int peer_clear_soft(struct peer *peer, afi_t afi, safi_t safi, message to the peer. */ if (CHECK_FLAG(peer->cap, PEER_CAP_REFRESH_OLD_RCV) || CHECK_FLAG(peer->cap, PEER_CAP_REFRESH_NEW_RCV)) - bgp_route_refresh_send(peer, afi, safi, 0, 0, - 0); + bgp_route_refresh_send( + peer, afi, safi, 0, 0, 0, + BGP_ROUTE_REFRESH_NORMAL); else return BGP_ERR_SOFT_RECONFIG_UNCONFIGURED; } @@ -7418,7 +7491,8 @@ char *peer_uptime(time_t uptime2, char *buf, size_t len, bool use_json, return buf; } -void bgp_master_init(struct thread_master *master, const int buffer_size) +void bgp_master_init(struct thread_master *master, const int buffer_size, + struct list *addresses) { qobj_init(); @@ -7428,6 +7502,7 @@ void bgp_master_init(struct thread_master *master, const int buffer_size) bm->bgp = list_new(); bm->listen_sockets = list_new(); bm->port = BGP_PORT_DEFAULT; + bm->addresses = addresses; bm->master = master; bm->start_time = bgp_clock(); bm->t_rmap_update = NULL; @@ -7613,6 +7688,8 @@ void bgp_init(unsigned short instance) /* BFD init */ bgp_bfd_init(); + bgp_lp_vty_init(); + cmd_variable_handler_register(bgp_viewvrf_var_handlers); } diff --git a/bgpd/bgpd.h b/bgpd/bgpd.h index 775be7980e..7a8f99163e 100644 --- a/bgpd/bgpd.h +++ b/bgpd/bgpd.h @@ -43,6 +43,7 @@ #include "bgp_labelpool.h" #include "bgp_addpath_types.h" #include "bgp_nexthop.h" +#include "bgp_damp.h" #define BGP_MAX_HOSTNAME 64 /* Linux max, is larger than most other sys */ #define BGP_PEER_MAX_HASH_SIZE 16384 @@ -124,8 +125,8 @@ struct bgp_master { /* BGP port number. */ uint16_t port; - /* Listener address */ - char *address; + /* Listener addresses */ + struct list *addresses; /* The Mac table */ struct hash *self_mac_hash; @@ -309,6 +310,15 @@ enum bgp_link_bw_handling { RB_HEAD(bgp_es_vrf_rb_head, bgp_evpn_es_vrf); RB_PROTOTYPE(bgp_es_vrf_rb_head, bgp_evpn_es_vrf, rb_node, bgp_es_vrf_rb_cmp); +struct bgp_snmp_stats { + /* SNMP variables for mplsL3Vpn*/ + time_t creation_time; + time_t modify_time; + bool active; + uint32_t routes_added; + uint32_t routes_deleted; +}; + /* BGP instance structure. */ struct bgp { /* AS number of this BGP instance. */ @@ -362,6 +372,8 @@ struct bgp { uint32_t subgrps_deleted; } update_group_stats; + struct bgp_snmp_stats *snmp_stats; + /* BGP configuration. */ uint16_t config; #define BGP_CONFIG_CLUSTER_ID (1 << 0) @@ -464,6 +476,7 @@ struct bgp { /* This flag is set if the instance is in administrative shutdown */ #define BGP_FLAG_SHUTDOWN (1 << 27) #define BGP_FLAG_SUPPRESS_FIB_PENDING (1 << 28) +#define BGP_FLAG_SUPPRESS_DUPLICATES (1 << 29) enum global_mode GLOBAL_GR_FSM[BGP_GLOBAL_GR_MODE] [BGP_GLOBAL_GR_EVENT_CMD]; @@ -694,6 +707,9 @@ struct bgp { uint32_t condition_filter_count; struct thread *t_condition_check; + /* BGP route flap dampening configuration */ + struct bgp_damp_config damp[AFI_MAX][SAFI_MAX]; + QOBJ_FIELDS }; DECLARE_QOBJ_TYPE(bgp) @@ -905,7 +921,47 @@ struct bgp_peer_gr { bgp_peer_gr_action_ptr action_fun; }; -/* BGP finite state machine events. */ +/* + * BGP FSM event codes, per RFC 4271 ss. 8.1 + */ +enum bgp_fsm_rfc_codes { + BGP_FSM_ManualStart = 1, + BGP_FSM_ManualStop = 2, + BGP_FSM_AutomaticStart = 3, + BGP_FSM_ManualStart_with_PassiveTcpEstablishment = 4, + BGP_FSM_AutomaticStart_with_PassiveTcpEstablishment = 5, + BGP_FSM_AutomaticStart_with_DampPeerOscillations = 6, + BGP_FSM_AutomaticStart_with_DampPeerOscillations_and_PassiveTcpEstablishment = + 7, + BGP_FSM_AutomaticStop = 8, + BGP_FSM_ConnectRetryTimer_Expires = 9, + BGP_FSM_HoldTimer_Expires = 10, + BGP_FSM_KeepaliveTimer_Expires = 11, + BGP_FSM_DelayOpenTimer_Expires = 12, + BGP_FSM_IdleHoldTimer_Expires = 13, + BGP_FSM_TcpConnection_Valid = 14, + BGP_FSM_Tcp_CR_Invalid = 15, + BGP_FSM_Tcp_CR_Acked = 16, + BGP_FSM_TcpConnectionConfirmed = 17, + BGP_FSM_TcpConnectionFails = 18, + BGP_FSM_BGPOpen = 19, + BGP_FSM_BGPOpen_with_DelayOpenTimer_running = 20, + BGP_FSM_BGPHeaderErr = 21, + BGP_FSM_BGPOpenMsgErr = 22, + BGP_FSM_OpenCollisionDump = 23, + BGP_FSM_NotifMsgVerErr = 24, + BGP_FSM_NotifMsg = 25, + BGP_FSM_KeepAliveMsg = 26, + BGP_FSM_UpdateMsg = 27, + BGP_FSM_UpdateMsgErr = 28 +}; + +/* + * BGP finite state machine events + * + * Note: these do not correspond to RFC-defined event codes. Those are + * defined elsewhere. + */ enum bgp_fsm_events { BGP_Start = 1, BGP_Stop, @@ -1062,6 +1118,8 @@ struct peer { #define PEER_CAP_ENHE_RCV (1U << 14) /* Extended nexthop received */ #define PEER_CAP_HOSTNAME_ADV (1U << 15) /* hostname advertised */ #define PEER_CAP_HOSTNAME_RCV (1U << 16) /* hostname received */ +#define PEER_CAP_ENHANCED_RR_ADV (1U << 17) /* enhanced rr advertised */ +#define PEER_CAP_ENHANCED_RR_RCV (1U << 18) /* enhanced rr received */ /* Capability flags (reset in bgp_stop) */ uint32_t af_cap[AFI_MAX][SAFI_MAX]; @@ -1196,6 +1254,9 @@ struct peer { /* Last update packet sent time */ time_t pkt_stime[AFI_MAX][SAFI_MAX]; + /* Peer / peer group route flap dampening configuration */ + struct bgp_damp_config damp[AFI_MAX][SAFI_MAX]; + /* Peer Per AF flags */ /* * Please consult the comments for *flags_override*, *flags_invert* and @@ -1233,6 +1294,8 @@ struct peer { #define PEER_FLAG_SEND_LARGE_COMMUNITY (1U << 26) /* Send large Communities */ #define PEER_FLAG_MAX_PREFIX_OUT (1U << 27) /* outgoing maximum prefix */ #define PEER_FLAG_MAX_PREFIX_FORCE (1U << 28) /* maximum-prefix <num> force */ +#define PEER_FLAG_CONFIG_DAMPENING (1U << 29) /* route flap dampening */ + enum bgp_addpath_strat addpath_type[AFI_MAX][SAFI_MAX]; @@ -1263,6 +1326,11 @@ struct peer { #define PEER_STATUS_PREFIX_LIMIT (1U << 3) /* exceed prefix-limit */ #define PEER_STATUS_EOR_SEND (1U << 4) /* end-of-rib send to peer */ #define PEER_STATUS_EOR_RECEIVED (1U << 5) /* end-of-rib received from peer */ +#define PEER_STATUS_ENHANCED_REFRESH (1U << 6) /* Enhanced Route Refresh */ +#define PEER_STATUS_BORR_SEND (1U << 7) /* BoRR send to peer */ +#define PEER_STATUS_BORR_RECEIVED (1U << 8) /* BoRR received from peer */ +#define PEER_STATUS_EORR_SEND (1U << 9) /* EoRR send to peer */ +#define PEER_STATUS_EORR_RECEIVED (1U << 10) /* EoRR received from peer */ /* Configured timer values. */ _Atomic uint32_t holdtime; @@ -1296,6 +1364,7 @@ struct peer { struct thread *t_gr_stale; struct thread *t_generate_updgrp_packets; struct thread *t_process_packet; + struct thread *t_refresh_stalepath; /* Thread flags. */ _Atomic uint32_t thread_flags; @@ -1620,7 +1689,7 @@ struct bgp_nlri { #define BGP_NOTIFY_HOLD_ERR 4 #define BGP_NOTIFY_FSM_ERR 5 #define BGP_NOTIFY_CEASE 6 -#define BGP_NOTIFY_CAPABILITY_ERR 7 +#define BGP_NOTIFY_ROUTE_REFRESH_ERR 7 /* Subcodes for BGP Finite State Machine Error */ #define BGP_NOTIFY_FSM_ERR_SUBCODE_UNSPECIFIC 0 @@ -1668,10 +1737,13 @@ struct bgp_nlri { #define BGP_NOTIFY_CEASE_COLLISION_RESOLUTION 7 #define BGP_NOTIFY_CEASE_OUT_OF_RESOURCE 8 -/* BGP_NOTIFY_CAPABILITY_ERR sub codes (draft-ietf-idr-dynamic-cap-02). */ -#define BGP_NOTIFY_CAPABILITY_INVALID_ACTION 1 -#define BGP_NOTIFY_CAPABILITY_INVALID_LENGTH 2 -#define BGP_NOTIFY_CAPABILITY_MALFORMED_CODE 3 +/* BGP_NOTIFY_ROUTE_REFRESH_ERR sub codes (RFC 7313). */ +#define BGP_NOTIFY_ROUTE_REFRESH_INVALID_MSG_LEN 1 + +/* BGP route refresh optional subtypes. */ +#define BGP_ROUTE_REFRESH_NORMAL 0 +#define BGP_ROUTE_REFRESH_BORR 1 +#define BGP_ROUTE_REFRESH_EORR 2 /* BGP timers default value. */ #define BGP_INIT_START_TIMER 1 @@ -1851,8 +1923,8 @@ extern char *peer_uptime(time_t uptime2, char *buf, size_t len, bool use_json, extern int bgp_config_write(struct vty *); -extern void bgp_master_init(struct thread_master *master, - const int buffer_size); +extern void bgp_master_init(struct thread_master *master, const int buffer_size, + struct list *addresses); extern void bgp_init(unsigned short instance); extern void bgp_pthreads_run(void); @@ -2225,6 +2297,27 @@ static inline struct vrf *bgp_vrf_lookup_by_instance_type(struct bgp *bgp) return vrf; } +static inline uint32_t bgp_vrf_interfaces(struct bgp *bgp, bool active) +{ + struct vrf *vrf; + struct interface *ifp; + uint32_t count = 0; + + /* if there is one interface in the vrf which is up then it is deemed + * active + */ + vrf = bgp_vrf_lookup_by_instance_type(bgp); + if (vrf == NULL) + return 0; + RB_FOREACH (ifp, if_name_head, &vrf->ifaces_by_name) { + if (strncmp(ifp->name, bgp->name, VRF_NAMSIZ) == 0) + continue; + if (!active || if_is_up(ifp)) + count++; + } + return count; +} + /* Link BGP instance to VRF. */ static inline void bgp_vrf_link(struct bgp *bgp, struct vrf *vrf) { @@ -2262,7 +2355,14 @@ extern int bgp_lookup_by_as_name_type(struct bgp **bgp_val, as_t *as, enum bgp_instance_type inst_type); /* Hooks */ -DECLARE_HOOK(peer_status_changed, (struct peer * peer), (peer)) +DECLARE_HOOK(bgp_vrf_status_changed, (struct bgp *bgp, struct interface *ifp), + (bgp, ifp)) +DECLARE_HOOK(peer_status_changed, (struct peer *peer), (peer)) +DECLARE_HOOK(bgp_snmp_init_stats, (struct bgp *bgp), (bgp)) +DECLARE_HOOK(bgp_snmp_update_last_changed, (struct bgp *bgp), (bgp)) +DECLARE_HOOK(bgp_snmp_update_stats, + (struct bgp_node *rn, struct bgp_path_info *pi, bool added), + (rn, pi, added)) void peer_nsf_stop(struct peer *peer); #endif /* _QUAGGA_BGPD_H */ diff --git a/bgpd/rfapi/rfapi.c b/bgpd/rfapi/rfapi.c index f7bbd44512..8c455c6ea5 100644 --- a/bgpd/rfapi/rfapi.c +++ b/bgpd/rfapi/rfapi.c @@ -578,9 +578,6 @@ void add_vnc_route(struct rfapi_descriptor *rfd, /* cookie, VPN UN addr, peer */ struct bgp_attr_encap_subtlv *encaptlv; char buf[PREFIX_STRLEN]; char buf2[RD_ADDRSTRLEN]; -#if 0 /* unused? */ - struct prefix pfx_buf; -#endif struct rfapi_nexthop *lnh = NULL; /* local nexthop */ struct rfapi_vn_option *vo; @@ -603,20 +600,6 @@ void add_vnc_route(struct rfapi_descriptor *rfd, /* cookie, VPN UN addr, peer */ return; } -#if 0 /* unused? */ - if ((safi == SAFI_MPLS_VPN) && (flags & RFAPI_AHR_SET_PFX_TO_NEXTHOP)) - { - - if (rfapiRaddr2Qprefix (nexthop, &pfx_buf)) - { - vnc_zlog_debug_verbose - ("%s: can't set pfx to vn addr, not adding SAFI_MPLS_VPN route", - __func__); - return; - } - p = &pfx_buf; - } -#endif for (vo = options_vn; vo; vo = vo->next) { if (RFAPI_VN_OPTION_TYPE_L2ADDR == vo->type) { l2o = &vo->v.l2addr; diff --git a/bgpd/rfapi/rfapi_import.c b/bgpd/rfapi/rfapi_import.c index 3d87b63542..b2732a40b4 100644 --- a/bgpd/rfapi/rfapi_import.c +++ b/bgpd/rfapi/rfapi_import.c @@ -984,7 +984,7 @@ static int rfapiEcommunitiesMatchBeec(struct ecommunity *ecom, int rfapiEcommunitiesIntersect(struct ecommunity *e1, struct ecommunity *e2) { - int i, j; + uint32_t i, j; if (!e1 || !e2) return 0; @@ -1014,7 +1014,8 @@ int rfapiEcommunitiesIntersect(struct ecommunity *e1, struct ecommunity *e2) int rfapiEcommunityGetLNI(struct ecommunity *ecom, uint32_t *lni) { if (ecom) { - int i; + uint32_t i; + for (i = 0; i < ecom->size; ++i) { uint8_t *p = ecom->val + (i * ECOMMUNITY_SIZE); @@ -1034,7 +1035,8 @@ int rfapiEcommunityGetEthernetTag(struct ecommunity *ecom, uint16_t *tag_id) struct bgp *bgp = bgp_get_default(); *tag_id = 0; /* default to untagged */ if (ecom) { - int i; + uint32_t i; + for (i = 0; i < ecom->size; ++i) { as_t as = 0; int encode = 0; diff --git a/bgpd/rfapi/rfapi_private.h b/bgpd/rfapi/rfapi_private.h index e24d62def4..af56367955 100644 --- a/bgpd/rfapi/rfapi_private.h +++ b/bgpd/rfapi/rfapi_private.h @@ -289,9 +289,6 @@ add_vnc_route(struct rfapi_descriptor *rfd, /* cookie + UN addr for VPN */ uint8_t type, uint8_t sub_type, int flags); #define RFAPI_AHR_NO_TUNNEL_SUBTLV 0x00000001 #define RFAPI_AHR_RFPOPT_IS_VNCTLV 0x00000002 /* hack! */ -#if 0 /* unused? */ -# define RFAPI_AHR_SET_PFX_TO_NEXTHOP 0x00000004 -#endif extern void del_vnc_route(struct rfapi_descriptor *rfd, struct peer *peer, struct bgp *bgp, safi_t safi, const struct prefix *p, diff --git a/bgpd/rfapi/vnc_export_bgp.c b/bgpd/rfapi/vnc_export_bgp.c index 762cd2596f..c90fcf8d72 100644 --- a/bgpd/rfapi/vnc_export_bgp.c +++ b/bgpd/rfapi/vnc_export_bgp.c @@ -134,7 +134,7 @@ static void encap_attr_export_ce(struct attr *new, struct attr *orig, static int getce(struct bgp *bgp, struct attr *attr, struct prefix *pfx_ce) { uint8_t *ecp; - int i; + uint32_t i; uint16_t localadmin = bgp->rfapi_cfg->resolve_nve_roo_local_admin; for (ecp = attr->ecommunity->val, i = 0; i < attr->ecommunity->size; @@ -1177,19 +1177,19 @@ static void vnc_direct_add_rn_group_rd(struct bgp *bgp, if (!rfg->rt_export_list || !rfg->rfapi_import_table) { vnc_zlog_debug_verbose( - "%s: VRF \"%s\" is missing RT import/export configuration.\n", + "%s: VRF \"%s\" is missing RT import/export configuration.", __func__, rfg->name); return; } if (!rfg->rd.prefixlen) { vnc_zlog_debug_verbose( - "%s: VRF \"%s\" is missing RD configuration.\n", + "%s: VRF \"%s\" is missing RD configuration.", __func__, rfg->name); return; } if (rfg->label > MPLS_LABEL_MAX) { vnc_zlog_debug_verbose( - "%s: VRF \"%s\" is missing default label configuration.\n", + "%s: VRF \"%s\" is missing default label configuration.", __func__, rfg->name); return; } diff --git a/bgpd/subdir.am b/bgpd/subdir.am index ea60b921d1..4614363bf0 100644 --- a/bgpd/subdir.am +++ b/bgpd/subdir.am @@ -18,6 +18,7 @@ vtysh_scan += \ bgpd/bgp_evpn_mh.c \ bgpd/bgp_evpn_vty.c \ bgpd/bgp_filter.c \ + bgpd/bgp_labelpool.c \ bgpd/bgp_mplsvpn.c \ bgpd/bgp_nexthop.c \ bgpd/bgp_route.c \ @@ -95,6 +96,7 @@ bgpd_libbgp_a_SOURCES = \ bgpd/bgp_regex.c \ bgpd/bgp_route.c \ bgpd/bgp_routemap.c \ + bgpd/bgp_script.c \ bgpd/bgp_table.c \ bgpd/bgp_updgrp.c \ bgpd/bgp_updgrp_adv.c \ @@ -165,6 +167,7 @@ noinst_HEADERS += \ bgpd/bgp_memory.h \ bgpd/bgp_mpath.h \ bgpd/bgp_mplsvpn.h \ + bgpd/bgp_mplsvpn_snmp.h \ bgpd/bgp_network.h \ bgpd/bgp_nexthop.h \ bgpd/bgp_nht.h \ @@ -174,6 +177,7 @@ noinst_HEADERS += \ bgpd/bgp_rd.h \ bgpd/bgp_regex.h \ bgpd/bgp_route.h \ + bgpd/bgp_script.h \ bgpd/bgp_table.h \ bgpd/bgp_updgrp.h \ bgpd/bgp_vpn.h \ @@ -215,7 +219,7 @@ bgpd_bgp_btoa_CFLAGS = $(AM_CFLAGS) bgpd_bgpd_LDADD = bgpd/libbgp.a $(RFPLDADD) lib/libfrr.la $(LIBCAP) $(LIBM) $(UST_LIBS) bgpd_bgp_btoa_LDADD = bgpd/libbgp.a $(RFPLDADD) lib/libfrr.la $(LIBCAP) $(LIBM) $(UST_LIBS) -bgpd_bgpd_snmp_la_SOURCES = bgpd/bgp_snmp.c +bgpd_bgpd_snmp_la_SOURCES = bgpd/bgp_snmp.c bgpd/bgp_mplsvpn_snmp.c bgpd_bgpd_snmp_la_CFLAGS = $(WERROR) $(SNMP_CFLAGS) -std=gnu99 bgpd_bgpd_snmp_la_LDFLAGS = -avoid-version -module -shared -export-dynamic bgpd_bgpd_snmp_la_LIBADD = lib/libfrrsnmp.la diff --git a/configure.ac b/configure.ac index acedf7573d..266f37a112 100755 --- a/configure.ac +++ b/configure.ac @@ -7,7 +7,7 @@ ## AC_PREREQ([2.60]) -AC_INIT([frr], [7.6-dev], [https://github.com/frrouting/frr/issues]) +AC_INIT([frr], [7.7-dev], [https://github.com/frrouting/frr/issues]) PACKAGE_URL="https://frrouting.org/" AC_SUBST([PACKAGE_URL]) PACKAGE_FULLNAME="FRRouting" @@ -138,6 +138,12 @@ AC_ARG_WITH([moduledir], [AS_HELP_STRING([--with-moduledir=DIR], [module directo ]) AC_SUBST([moduledir], [$moduledir]) +AC_ARG_WITH([scriptdir], [AS_HELP_STRING([--with-scriptdir=DIR], [script directory (${sysconfdir}/scripts)])], [ + scriptdir="$withval" +], [ + scriptdir="\${sysconfdir}/scripts" +]) +AC_SUBST([scriptdir], [$scriptdir]) AC_ARG_WITH([yangmodelsdir], [AS_HELP_STRING([--with-yangmodelsdir=DIR], [yang models directory (${datarootdir}/yang)])], [ yangmodelsdir="$withval" @@ -274,24 +280,22 @@ if test "$enable_clang_coverage" = "yes"; then ]) fi +if test "$enable_scripting" = "yes"; then + AX_PROG_LUA([5.3]) + AX_LUA_HEADERS + AX_LUA_LIBS([ + AC_DEFINE([HAVE_SCRIPTING], [1], [Have support for scripting]) + LIBS="$LIBS $LUA_LIB" + ]) +fi + if test "$enable_dev_build" = "yes"; then AC_DEFINE([DEV_BUILD], [1], [Build for development]) if test "$orig_cflags" = ""; then AC_C_FLAG([-g3]) AC_C_FLAG([-O0]) fi - if test "$enable_lua" = "yes"; then - AX_PROG_LUA([5.3]) - AX_LUA_HEADERS - AX_LUA_LIBS([ - AC_DEFINE([HAVE_LUA], [1], [Have support for Lua interpreter]) - LIBS="$LIBS $LUA_LIB" - ]) - fi else - if test "$enable_lua" = "yes"; then - AC_MSG_ERROR([Lua is not meant to be built/used outside of development at this time]) - fi if test "$orig_cflags" = ""; then AC_C_FLAG([-g]) AC_C_FLAG([-O2]) @@ -407,6 +411,21 @@ else ]) fi +AC_MSG_CHECKING([whether linker supports __start/stop_section symbols]) +AC_LINK_IFELSE([AC_LANG_PROGRAM([[ +#include <stdio.h> +int __attribute__((section("secttest"))) var = 1; +extern int __start_secttest, __stop_secttest; +]], [[ + void *a = &var, *b = &__start_secttest, *c = &__stop_secttest; + printf("%p %p %p\n", a, b, c); +]])], [ + AC_MSG_RESULT(yes) + AC_DEFINE(HAVE_SECTION_SYMS, 1, [have __start/stop_section symbols]) +], [ + AC_MSG_RESULT(no) +]) + dnl ---------- dnl Essentials dnl ---------- @@ -560,6 +579,8 @@ AC_ARG_ENABLE([fabricd], AS_HELP_STRING([--disable-fabricd], [do not build fabricd])) AC_ARG_ENABLE([vrrpd], AS_HELP_STRING([--disable-vrrpd], [do not build vrrpd])) +AC_ARG_ENABLE([pathd], + AS_HELP_STRING([--disable-pathd], [do not build pathd])) AC_ARG_ENABLE([bgp-announce], AS_HELP_STRING([--disable-bgp-announce], [turn off BGP route announcement])) AC_ARG_ENABLE([bgp-vnc], @@ -625,6 +646,8 @@ AC_ARG_ENABLE([pcreposix], AS_HELP_STRING([--enable-pcreposix], [enable using PCRE Posix libs for regex functions])) AC_ARG_ENABLE([fpm], AS_HELP_STRING([--enable-fpm], [enable Forwarding Plane Manager support])) +AC_ARG_ENABLE([pcep], + AS_HELP_STRING([--enable-pcep], [enable PCEP support for pathd])) AC_ARG_ENABLE([systemd], AS_HELP_STRING([--enable-systemd], [enable Systemd support])) AC_ARG_ENABLE([werror], @@ -693,8 +716,8 @@ fi AC_ARG_ENABLE([dev_build], AS_HELP_STRING([--enable-dev-build], [build for development])) -AC_ARG_ENABLE([lua], - AS_HELP_STRING([--enable-lua], [Build Lua scripting])) +AC_ARG_ENABLE([scripting], + AS_HELP_STRING([--enable-scripting], [Build with scripting support])) AC_ARG_ENABLE([netlink-debug], AS_HELP_STRING([--disable-netlink-debug], [pretty print netlink debug messages])) @@ -1675,6 +1698,14 @@ else esac fi +AS_IF([test "$enable_pathd" != "no"], [ + AC_DEFINE([HAVE_PATHD], [1], [pathd]) +]) + +AS_IF([test "$enable_pcep" != "no"], [ + AC_DEFINE([HAVE_PATHD_PCEP], [1], [pathd-pcep]) +]) + if test "$ac_cv_lib_json_c_json_object_get" = "no" -a "$BFDD" = "bfdd"; then AC_MSG_ERROR(["you must use json-c library to use bfdd"]) fi @@ -1850,7 +1881,7 @@ dnl --------------- dnl sysrepo dnl --------------- if test "$enable_sysrepo" = "yes"; then - PKG_CHECK_MODULES([SYSREPO], [libsysrepo], + PKG_CHECK_MODULES([SYSREPO], [sysrepo], [AC_DEFINE([HAVE_SYSREPO], [1], [Enable sysrepo integration]) SYSREPO=true], [SYSREPO=false @@ -2335,6 +2366,29 @@ if test "$frr_cv_mallinfo" = "yes"; then AC_DEFINE([HAVE_MALLINFO], [1], [mallinfo]) fi +AC_CACHE_CHECK([whether mallinfo2 is available], [frr_cv_mallinfo2], [ + AC_LINK_IFELSE([AC_LANG_PROGRAM([FRR_INCLUDES [ +#ifdef HAVE_MALLOC_H +#include <malloc.h> +#endif +#ifdef HAVE_MALLOC_NP_H +#include <malloc_np.h> +#endif +#ifdef HAVE_MALLOC_MALLOC_H +#include <malloc/malloc.h> +#endif +]], [[ +struct mallinfo2 ac_x; ac_x = mallinfo2 (); +]])], [ + frr_cv_mallinfo2=yes + ], [ + frr_cv_mallinfo2=no + ]) +]) +if test "$frr_cv_mallinfo2" = "yes"; then + AC_DEFINE([HAVE_MALLINFO2], [1], [mallinfo2]) +fi + AC_MSG_CHECKING([whether malloc_usable_size is available]) AC_LINK_IFELSE([AC_LANG_PROGRAM([FRR_INCLUDES [ #ifdef HAVE_MALLOC_H @@ -2434,19 +2488,23 @@ CFG_SBIN="$sbindir" CFG_STATE="$frr_statedir" CFG_MODULE="$moduledir" CFG_YANGMODELS="$yangmodelsdir" +CFG_SCRIPT="$scriptdir" for I in 1 2 3 4 5 6 7 8 9 10; do eval CFG_SYSCONF="\"$CFG_SYSCONF\"" eval CFG_SBIN="\"$CFG_SBIN\"" eval CFG_STATE="\"$CFG_STATE\"" eval CFG_MODULE="\"$CFG_MODULE\"" eval CFG_YANGMODELS="\"$CFG_YANGMODELS\"" + eval CFG_SCRIPT="\"$CFG_SCRIPT\"" done AC_SUBST([CFG_SYSCONF]) AC_SUBST([CFG_SBIN]) AC_SUBST([CFG_STATE]) AC_SUBST([CFG_MODULE]) +AC_SUBST([CFG_SCRIPT]) AC_SUBST([CFG_YANGMODELS]) AC_DEFINE_UNQUOTED([MODULE_PATH], ["$CFG_MODULE"], [path to modules]) +AC_DEFINE_UNQUOTED([SCRIPT_PATH], ["$CFG_SCRIPT"], [path to scripts]) AC_DEFINE_UNQUOTED([YANG_MODELS_PATH], ["$CFG_YANGMODELS"], [path to YANG data models]) AC_DEFINE_UNQUOTED([WATCHFRR_SH_PATH], ["${CFG_SBIN%/}/watchfrr.sh"], [path to watchfrr.sh]) @@ -2467,6 +2525,18 @@ AM_CONDITIONAL([IRDP], [$IRDP]) AM_CONDITIONAL([FPM], [test "$enable_fpm" = "yes"]) AM_CONDITIONAL([HAVE_PROTOBUF], [test "$enable_protobuf" = "yes"]) AM_CONDITIONAL([HAVE_PROTOBUF3], [$PROTO3]) + +dnl PCEP plugin +AM_CONDITIONAL([HAVE_PATHD_PCEP], [test "$enable_pcep" = "yes"]) +AS_IF([test "$enable_pcep" = "yes"], [ + AC_CHECK_LIB([pcep_pcc], [initialize_pcc], [ + PATHD_PCEP_LIBS="-lpcep_pcc" + ],[ + AC_MSG_ERROR([PCEP library libpcep_pcc not found]) + ]) + AC_SUBST([PATHD_PCEP_LIBS]) +]) + dnl daemons AM_CONDITIONAL([VTYSH], [test "$VTYSH" = "vtysh"]) AM_CONDITIONAL([ZEBRA], [test "$enable_zebra" != "no"]) @@ -2489,6 +2559,7 @@ AM_CONDITIONAL([SHARPD], [test "$enable_sharpd" = "yes"]) AM_CONDITIONAL([STATICD], [test "$enable_staticd" != "no"]) AM_CONDITIONAL([FABRICD], [test "$enable_fabricd" != "no"]) AM_CONDITIONAL([VRRPD], [test "$enable_vrrpd" != "no"]) +AM_CONDITIONAL([PATHD], [test "$enable_pathd" != "no"]) AC_CONFIG_FILES([Makefile],[ test "$enable_dev_build" = "yes" && makefile_devbuild="--dev-build" @@ -2515,6 +2586,8 @@ AC_CONFIG_FILES([tools/frr], [chmod +x tools/frr]) AC_CONFIG_FILES([tools/watchfrr.sh], [chmod +x tools/watchfrr.sh]) AC_CONFIG_FILES([tools/frrinit.sh], [chmod +x tools/frrinit.sh]) AC_CONFIG_FILES([tools/frrcommon.sh]) +AC_CONFIG_FILES([tools/frr.service]) +AC_CONFIG_FILES([tools/frr@.service]) AS_IF([test "$with_pkg_git_version" = "yes"], [ AC_CONFIG_COMMANDS([lib/gitversion.h], [ @@ -2557,6 +2630,7 @@ state file directory : ${frr_statedir} config file directory : `eval echo \`echo ${sysconfdir}\`` example directory : `eval echo \`echo ${exampledir}\`` module directory : ${CFG_MODULE} +script directory : ${CFG_SCRIPT} user to run as : ${enable_user} group to run as : ${enable_group} group for vty sockets : ${enable_vty_group} diff --git a/debian/control b/debian/control index 4aaa9f21bf..b9e96b55d0 100644 --- a/debian/control +++ b/debian/control @@ -29,7 +29,8 @@ Build-Depends: bison, python3-dev, python3-pytest <!nocheck>, python3-sphinx, - texinfo (>= 4.7) + texinfo (>= 4.7), + liblua5.3-dev <pkg.frr.lua> Standards-Version: 4.5.0.3 Homepage: https://www.frrouting.org/ Vcs-Browser: https://github.com/FRRouting/frr/tree/debian/master diff --git a/debian/rules b/debian/rules index 6cc03c378a..25ae04261d 100755 --- a/debian/rules +++ b/debian/rules @@ -29,6 +29,12 @@ else CONF_SYSTEMD=--enable-systemd=no endif +ifeq ($(filter pkg.frr.lua,$(DEB_BUILD_PROFILES)),) + CONF_LUA=--disable-scripting +else + CONF_LUA=--enable-scripting +endif + export PYTHON=python3 %: @@ -49,6 +55,7 @@ override_dh_auto_configure: \ $(CONF_SYSTEMD) \ $(CONF_RPKI) \ + $(CONF_LUA) \ --with-libpam \ --enable-doc \ --enable-doc-html \ diff --git a/doc/developer/building.rst b/doc/developer/building.rst index a6cd545872..c687ba8dc8 100644 --- a/doc/developer/building.rst +++ b/doc/developer/building.rst @@ -29,3 +29,4 @@ Building FRR building-frr-for-ubuntu2004 building-frr-for-archlinux building-docker + cross-compiling diff --git a/doc/developer/cross-compiling.rst b/doc/developer/cross-compiling.rst new file mode 100644 index 0000000000..339e00c921 --- /dev/null +++ b/doc/developer/cross-compiling.rst @@ -0,0 +1,326 @@ +Cross-Compiling +=============== + +FRR is capable of being cross-compiled to a number of different architectures. +With an adequate toolchain this process is fairly straightforward, though one +must exercise caution to validate this toolchain's correctness before attempting +to compile FRR or its dependencies; small oversights in the construction of the +build tools may lead to problems which quickly become difficult to diagnose. + +Toolchain Preliminary +--------------------- + +The first step to cross-compiling any program is to identify the system which +the program (FRR) will run on. From here on this will be called the "host" +machine, following autotools' convention, while the machine building FRR will be +called the "build" machine. The toolchain will of course be installed onto the +build machine and be leveraged to build FRR for the host machine to run. + +.. note:: + + The build machine used while writing this guide was ``x86_64-pc-linux-gnu`` + and the target machine was ``arm-linux-gnueabihf`` (a Raspberry Pi 3B+). + Replace this with your targeted tuple below if you plan on running the + commands from this guide: + + .. code-block:: shell + + export HOST_ARCH="arm-linux-gnueabihf" + + For your given target, the build system's OS may have some support for + building cross compilers natively, or may even offer binary toolchains built + upstream for the target architecture. Check your package manager or OS + documentation before committing to building a toolchain from scratch. + +This guide will not detail *how* to build a cross-compiling toolchain but +will instead assume one already exists and is installed on the build system. +The methods for building the toolchain itself may differ between operating +systems so consult the OS documentation for any particulars regarding +cross-compilers. The OSDev wiki has a `pleasant tutorial`_ on cross-compiling in +the context of operating system development which bootstraps from only the +native GCC and binutils on the build machine. This may be useful if the build +machine's OS does not offer existing tools to build a cross-compiler targeting +the host. + +.. _pleasant tutorial: https://wiki.osdev.org/GCC_Cross-Compiler + +This guide will also not demonstrate how to build all of FRR's dependencies for the +target architecture. Instead, general instructions for using a cross-compiling +toolchain to compile packages using CMake, Autotools, and Makefiles are +provided; these three cases apply to almost all FRR dependencies. + +.. _glibc mismatch: + +.. warning:: + + Ensure the versions and implementations of the C standard library (glibc or + what have you) match on the host and the build toolchain. ``ldd --version`` + will help you here. Upgrade one or the other if the they do not match. + +Testing the Toolchain +--------------------- + +Before any cross-compilation begins it would be prudent to test the new +toolchain by writing, compiling and linking a simple program. + +.. code-block:: shell + + # A small program + cat > nothing.c <<EOF + int main() { return 0; } + EOF + + # Build and link with the cross-compiler + ${HOST_ARCH}-gcc -o nothing nothing.c + + # Inspect the resulting binary, results may vary + file ./nothing + + # nothing: ELF 32-bit LSB pie executable, ARM, EABI5 version 1 (SYSV), + # dynamically linked, interpreter /lib/ld-linux-armhf.so.3, + # for GNU/Linux 3.2.0, not stripped + +If this produced no errors then the installed toolchain is probably ready to +start compiling the build dependencies and eventually FRR itself. There still +may be lurking issues but fundamentally the toolchain can produce binaries and +that's good enough to start working with it. + +.. warning:: + + If any errors occurred during the previous functional test please look back + and address them before moving on; this indicates your cross-compiling + toolchain is *not* in a position to build FRR or its dependencies. Even if + everything was fine, keep in mind that many errors from here on *may still + be related* to your toolchain (e.g. libstdc++.so or other components) and this + small test is not a guarantee of complete toolchain coherence. + +Cross-compiling Dependencies +---------------------------- + +When compiling FRR it is necessary to compile some of its dependencies alongside +it on the build machine. This is so symbols from the shared libraries (which +will be loaded at run-time on the host machine) can be linked to the FRR +binaries at compile time; additionally, headers for these libraries are needed +during the compile stage for a successful build. + +Sysroot Overview +^^^^^^^^^^^^^^^^ + +All build dependencies should be installed into a "root" directory on the build +computer, hereafter called the "sysroot". This directory will be prefixed to +paths while searching for requisite libraries and headers during the build +process. Often this may be set via a ``--prefix`` flag when building the +dependent packages, meaning a ``make install`` will copy compiled libraries into +(e.g.) ``/usr/${HOST_ARCH}/usr``. + +If the toolchain was built on the build machine then there is likely already a +sysroot where those tools and standard libraries were installed; it may be +helpful to use that directory as the sysroot for this build as well. + +Basic Workflow +^^^^^^^^^^^^^^ + +Before compiling or building any dependencies, make note of which daemons are +being targeted and which libraries will be needed. Not all dependencies are +necessary if only building with a subset of the daemons. + +The following workflow will compile and install any libraries which can be built +with Autotools. The resultant library will be installed into the sysroot +``/usr/${HOST_ARCH}``. + +.. code-block:: shell + + ./configure \ + CC=${HOST_ARCH}-gcc \ + CXX=${HOST_ARCH}-g++ \ + --build=${HOST_ARCH} \ + --prefix=/usr/${HOST_ARCH} + make + make install + +Some libraries like ``json-c`` and ``libyang`` are packaged with CMake and can +be built and installed generally like: + +.. code-block:: shell + + mkdir build + cd build + CC=${HOST_ARCH}-gcc \ + CXX=${HOST_ARCH}-g++ \ + cmake \ + -DCMAKE_INSTALL_PREFIX=/usr/${HOST_ARCH} \ + .. + make + make install + +For programs with only a Makefile (e.g. ``libcap``) the process may look still a +little different: + +.. code-block:: shell + + CC=${HOST_ARCH}-gcc make + make install DESTDIR=/usr/${HOST_ARCH} + +These three workflows should handle the bulk of building and installing the +build-time dependencies for FRR. Verify that the installed files are being +placed correctly into the sysroot and were actually built using the +cross-compile toolchain, not by the native toolchain by accident. + +Dependency Notes +^^^^^^^^^^^^^^^^ + +There are a lot of things that can go wrong during a cross-compilation. Some of +the more common errors and a few special considerations are collected below for +reference. + +libyang +""""""" + +``-DENABLE_LYD_PRIV=ON`` should be provided during the CMake step. + +Ensure also that the version of ``libyang`` being installed corresponds to the +version required by the targeted FRR version. + +gRPC +"""" + +This piece is requisite only if the ``--enable-grpc`` flag will be passed +later on to FRR. One may get burned when compiling gRPC if the ``protoc`` +version on the build machine differs from the version of ``protoc`` being linked +to during a gRPC build. The error messages from this defect look like: + +.. code-block:: terminal + + gens/src/proto/grpc/channelz/channelz.pb.h: In member function ‘void grpc::channelz::v1::ServerRef::set_name(const char*, size_t)’: + gens/src/proto/grpc/channelz/channelz.pb.h:9127:64: error: ‘EmptyDefault’ is not a member of ‘google::protobuf::internal::ArenaStringPtr’ + 9127 | name_.Set(::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::EmptyDefault{}, ::std::string( + +This happens because protocol buffer code generation uses ``protoc`` to create +classes with different getters and setters corresponding to the protobuf data +defined by the source tree's ``.proto`` files. Clearly the cross-compiled +``protoc`` cannot be used for this code generation because that binary is built +for a different CPU. + +The solution is to install matching versions of native and cross-compiled +protocol buffers; this way the native binary will generate code and the +cross-compiled library will be linked to by gRPC and these versions will not +disagree. + +---- + +The ``-latomic`` linker flag may also be necessary here if using ``libstdc++`` +since GCC's C++11 implementation makes library calls in certain cases for +``<atomic>`` so ``-latomic`` cannot be assumed. + +Cross-compiling FRR Itself +-------------------------- + +With all the necessary libraries cross-compiled and installed into the sysroot, +the last thing to actually build is FRR itself: + +.. code-block:: shell + + # Clone and bootstrap the build + git clone 'https://github.com/FRRouting/frr.git' + # (e.g.) git checkout stable/7.5 + ./bootstrap.sh + + # Build clippy using the native toolchain + mkdir build-clippy + cd build-clippy + ../configure --enable-clippy-only + make clippy-only + cd .. + + # Next, configure FRR and use the clippy we just built + ./configure \ + CC=${HOST_ARCH}-gcc \ + CXX=${HOST_ARCH}-g++ \ + --host=${HOST_ARCH} \ + --with-sysroot=/usr/${HOST_ARCH} \ + --with-clippy=./build-clippy/lib/clippy \ + --sysconfdir=/etc/frr \ + --sbindir="\${prefix}/lib/frr" \ + --localstatedir=/var/run/frr \ + --prefix=/usr \ + --enable-user=frr \ + --enable-group=frr \ + --enable-vty-group=frrvty \ + --disable-doc \ + --enable-grpc + + # Send it + make + +Installation to Host Machine +---------------------------- + +If no errors were observed during the previous steps it is safe to ``make +install`` FRR into its own directory. + +.. code-block:: shell + + # Install FRR its own "sysroot" + make install DESTDIR=/some/path/to/sysroot + +After running the above command, FRR binaries, modules and example configuration +files will be installed into some path on the build machine. The directory +will have folders like ``/usr`` and ``/etc``; this "root" should now be copied +to the host and installed on top of the root directory there. + +.. code-block:: shell + + # Tar this sysroot (preserving permissions) + tar -C /some/path/to/sysroot -cpvf frr-${HOST_ARCH}.tar . + + # Transfer tar file to host machine + scp frr-${HOST_ARCH}.tar me@host-machine: + + # Overlay the tarred sysroot on top of the host machine's root + ssh me@host-machine <<-EOF + # May need to elevate permissions here + tar -C / -xpvf frr-${HOST_ARCH}.tar.gz . + EOF + +Now FRR should be installed just as if ``make install`` had been run on the host +machine. Create configuration files and assign permissions as needed. Lastly, +ensure the correct users and groups exist for FRR on the host machine. + +Troubleshooting +--------------- + +Even when every precaution has been taken some things may still go wrong! This +section details some common runtime problems. + +Mismatched Libraries +^^^^^^^^^^^^^^^^^^^^ + +If you see something like this after installing on the host: + +.. code-block:: console + + /usr/lib/frr/zebra: error while loading shared libraries: libyang.so.1: cannot open shared object file: No such file or directory + +... at least one of FRR's dependencies which was linked to the binary earlier is +not available on the host OS. Even if it has been installed the host +repository's version may lag what is needed for more recent FRR builds (this is +likely to happen with YANG at the moment). + +If the matching library is not available from the host OS package manager it may +be possible to compile them using the same toolchain used to compile FRR. The +library may have already been built earlier when compiling FRR on the build +machine, in which case it may be as simple as following the same workflow laid +out during the `Installation to Host Machine`_. + +Mismatched Glibc Versions +^^^^^^^^^^^^^^^^^^^^^^^^^ + +The version and implementation of the C standard library must match on both the +host and build toolchain. The error corresponding to this misconfiguration will +look like: + +.. code-block:: console + + /usr/lib/frr/zebra: /lib/${HOST_ARCH}/libc.so.6: version `GLIBC_2.32' not found (required by /usr/lib/libfrr.so.0) + +See the earlier warning about preventing a `glibc mismatch`_. diff --git a/doc/developer/index.rst b/doc/developer/index.rst index 5a7da806ff..8e7913419f 100644 --- a/doc/developer/index.rst +++ b/doc/developer/index.rst @@ -18,3 +18,5 @@ FRRouting Developer's Guide ospf zebra vtysh + path + link-state diff --git a/doc/developer/library.rst b/doc/developer/library.rst index 3d5c6a2a15..2e36c253ea 100644 --- a/doc/developer/library.rst +++ b/doc/developer/library.rst @@ -11,10 +11,11 @@ Library Facilities (libfrr) rcu lists logging + xrefs locking hooks cli modules - lua + scripting diff --git a/doc/developer/link-state.rst b/doc/developer/link-state.rst new file mode 100644 index 0000000000..f1fc52966b --- /dev/null +++ b/doc/developer/link-state.rst @@ -0,0 +1,314 @@ +Link State API Documentation +============================ + +Introduction +------------ + +The Link State (LS) API aims to provide a set of structures and functions to +build and manage a Traffic Engineering Database for the various FRR daemons. +This API has been designed for several use cases: + +- BGP Link State (BGP-LS): where BGP protocol need to collect the link state + information from the routing daemons (IS-IS and/or OSPF) to implement RFC7572 +- Path Computation Element (PCE): where path computation algorithms are based + on Traffic Engineering Database +- ReSerVation Protocol (RSVP): where signaling need to know the Traffic + Engineering topology of the network in order to determine the path of + RSVP tunnels + +Architecture +------------ + +The main requirements from the various uses cases are as follow: + +- Provides a set of data model and function to ease Link State information + manipulation (storage, serialize, parse ...) +- Ease and normalize Link State information exchange between FRR daemons +- Provides database structure for Traffic Engineering Database (TED) + +To ease Link State understanding, FRR daemons have been classified into two +categories: + +- **Consumer**: Daemons that consume Link State information e.g. BGPd +- **Producer**: Daemons that are able to collect Link State information and + send them to consumer daemons e.g. OSPFd IS-ISd + +Zebra daemon, and more precisely, the ZAPI message is used to convey the Link +State information between *producer* and *consumer*, but, Zebra acts as a +simple pass through and does not store any Link State information. A new ZAPI +**Opaque** message has been design for that purpose. + +Each consumer and producer daemons are free to store or not Link State data and +organise the information following the Traffic Engineering Database model +provided by the API or any other data structure e.g. Hash, RB-tree ... + +Link State API +-------------- + +This is the low level API that allows any daemons manipulate the Link State +elements that are stored in the Link State Database. + +Data structures +^^^^^^^^^^^^^^^ + +3 types of Link State structure have been defined: + +.. c:type:: struct ls_node + + that groups all information related to a node + +.. c:type:: struct ls_attributes + + that groups all information related to a link + +.. c:type:: struct ls_prefix + + that groups all information related to a prefix + +These 3 types of structures are those handled by BGP-LS (see RFC7752) and +suitable to describe a Traffic Engineering topology. + +Each structure, in addition to the specific parameters, embed the node +identifier which advertises the Link State and a bit mask as flags to +indicates which parameters are valid i.e. for which the value is valid and +corresponds to a Link State information conveyed by the routing protocol. + +.. c:type:: struct ls_node_id + + defines the Node identifier as router ID IPv4 address plus the area ID for + OSPF or the ISO System ID plus the IS-IS level for IS-IS. + +Functions +^^^^^^^^^ + +A set of functions is provided to create, delete and compare Link State Node: + +.. c:function:: struct ls_node *ls_node_new(struct ls_node_id adv, struct in_addr router_id, struct in6_addr router6_id) +.. c:function:: voidls_node_del(struct ls_node *node) +.. c:function:: int ls_node_same(struct ls_node *n1, struct ls_node *n2) + +and Link State Attributes: + +.. c:function:: struct ls_attributes *ls_attributes_new(struct ls_node_id adv, struct in_addr local, struct in6_addr local6, uint32_t local_id) +.. c:function:: void ls_attributes_del(struct ls_attributes *attr) +.. c:function:: int ls_attributes_same(struct ls_attributes *a1, struct ls_attributes *a2) + +The low level API doesn't provide any particular functions for the Link State +Prefix structure as this latter is simpler to manipulate. + +Link State TED +-------------- + +This is the high level API that provides functions to create, update, delete a +Link State Database to from a Traffic Engineering Database (TED). + +Data Structures +^^^^^^^^^^^^^^^ + +The Traffic Engineering is modeled as a Graph in order to ease Path Computation +algorithm implementation. Denoted **G(V, E)**, a graph is composed by a list of +**Vertices (V)** which represents the network Node and a list of **Edges (E)** +which represents Link. An additional list of **prefixes (P)** is also added and +also attached to the *Vertex (V)* which advertise it. + +*Vertex (V)* contains the list of outgoing *Edges (E)* that connect this Vertex +with its direct neighbors and the list of incoming *Edges (E)* that connect +the direct neighbors to this Vertex. Indeed, the *Edge (E)* is unidirectional, +thus, it is necessary to add 2 Edges to model a bidirectional relation between +2 Vertices. Finally, the *Vertex (V)* contains a pointer to the corresponding +Link State Node. + +*Edge (E)* contains the source and destination Vertex that this Edge +is connecting and a pointer to the corresponding Link State Attributes. + +A unique Key is used to identify both Vertices and Edges within the Graph. + + +:: + + -------------- --------------------------- -------------- + | Connected |---->| Connected Edge Va to Vb |--->| Connected | + --->| Vertex | --------------------------- | Vertex |----> + | | | | + | - Key (Va) | | - Key (Vb) | + <---| - Vertex | --------------------------- | - Vertex |<---- + | |<----| Connected Edge Vb to Va |<---| | + -------------- --------------------------- -------------- + + +4 data structures have been defined to implement the Graph model: + +.. c:type:: struct ls_vertex +.. c:type:: struct ls_edge +.. c:type:: struct ls_prefix +.. c:type:: struct ls_ted + + +Functions +^^^^^^^^^ + +.. c:function:: struct ls_vertex *ls_vertex_add(struct ls_ted *ted, struct ls_node *node) +.. c:function:: struct ls_vertex *ls_vertex_update(struct ls_ted *ted, struct ls_node *node) +.. c:function:: void ls_vertex_del(struct ls_ted *ted, struct ls_vertex *vertex) +.. c:function:: struct ls_vertex *ls_find_vertex_by_key(struct ls_ted *ted, const uint64_t key) +.. c:function:: struct ls_vertex *ls_find_vertex_by_id(struct ls_ted *ted, struct ls_node_id id) +.. c:function:: int ls_vertex_same(struct ls_vertex *v1, struct ls_vertex *v2) + +.. c:function:: struct ls_edge *ls_edge_add(struct ls_ted *ted, struct ls_attributes *attributes) +.. c:function:: struct ls_edge *ls_edge_update(struct ls_ted *ted, struct ls_attributes *attributes) +.. c:function:: void ls_edge_del(struct ls_ted *ted, struct ls_edge *edge) +.. c:function:: struct ls_edge *ls_find_edge_by_key(struct ls_ted *ted, const uint64_t key) +.. c:function:: struct ls_edge *ls_find_edge_by_source(struct ls_ted *ted, struct ls_attributes *attributes); +.. c:function:: struct ls_edge *ls_find_edge_by_destination(struct ls_ted *ted, struct ls_attributes *attributes); + +.. c:function:: struct ls_subnet *ls_subnet_add(struct ls_ted *ted, struct ls_prefix *pref) +.. c:function:: void ls_subnet_del(struct ls_ted *ted, struct ls_subnet *subnet) +.. c:function:: struct ls_subnet *ls_find_subnet(struct ls_ted *ted, const struct prefix prefix) + +.. c:function:: struct ls_ted *ls_ted_new(const uint32_t key, char *name, uint32_t asn) +.. c:function:: void ls_ted_del(struct ls_ted *ted) +.. c:function:: void ls_connect_vertices(struct ls_vertex *src, struct ls_vertex *dst, struct ls_edge *edge) +.. c:function:: void ls_connect(struct ls_vertex *vertex, struct ls_edge *edge, bool source) +.. c:function:: void ls_disconnect(struct ls_vertex *vertex, struct ls_edge *edge, bool source) +.. c:function:: void ls_disconnect_edge(struct ls_edge *edge) + + +Link State Messages +------------------- + +This part of the API provides functions and data structure to ease the +communication between the *Producer* and *Consumer* daemons. + +Communications principles +^^^^^^^^^^^^^^^^^^^^^^^^^ + +Recent ZAPI Opaque Message is used to exchange Link State data between daemons. +For that purpose, Link State API provides new functions to serialize and parse +Link State information through the ZAPI Opaque message. A dedicated flag, +named ZAPI_OPAQUE_FLAG_UNICAST, allows daemons to send a unicast or a multicast +Opaque message and is used as follow for the Link State exchange: + +- Multicast: To send data update to all daemons that have subscribed to the + Link State Update message +- Unicast: To send initial Link State information from a particular daemon. All + data are send only to the daemon that request Link State Synchronisatio + +Figure 1 below, illustrates the ZAPI Opaque message exchange between a +*Producer* (an IGP like OSPF or IS-IS) and a *Consumer* (e.g. BGP). The +message sequences are as follows: + +- First, both *Producer* and *Consumer* must register to their respective ZAPI + Opaque Message. **Link State Sync** for the *Producer* in order to receive + Database synchronisation request from a *Consumer*. **Link State Update** for + the *Consumer* in order to received any Link State update from a *Producer*. + These register messages are stored by Zebra to determine to which daemon it + should redistribute the ZAPI messages it receives. +- Then, the *Consumer* sends a **Link State Synchronistation** request with the + Multicast method in order to receive the complete Link State Database from a + *Producer*. ZEBRA daemon forwards this message to any *Producer* daemons that + previously registered to this message. If no *Producer* has yet registered, + the request is lost. Thus, if the *Consumer* receives no response whithin a + given timer, it means that no *Producer* are available right now. So, the + *Consumer* must send the same request until it receives a Link State Database + Synchronistation message. This behaviour is necessary as we can't control in + which order daemons are started. It is up to the *Consumer* daemon to fix the + timeout and the number of retry. +- When a *Producer* receives a **Link State Synchronisation** request, it + starts sending all elements of its own Link State Database through the + **Link State Database Synchronisation** message. These messages are send with + the Unicast method to avoid flooding other daemons with these elements. ZEBRA + layer ensures to forward the message to the right daemon. +- When a *Producer* update its Link State Database, it automatically sends a + **Link State Update** message with the Multicast method. In turn, ZEBRA + daemon forwards the message to all *Consumer* daemons that previously + registered to this message. if no daemon is registered, the message is lost. +- A daemon could unregister from the ZAPI Opaque message registry at any time. + In this case, the ZEBRA daemon stops to forward any messages it receives to + this daemon, even if it was previously converns. + +:: + + IGP ZEBRA Consumer + (OSPF/IS-IS) (ZAPI Opaque Thread) (e.g. BGP) + | | | \ + | | Register LS Update | | + | |<----------------------------| Register Phase + | | | | + | | Request LS Sync | | + | |<----------------------------| | + : : : A | + | Register LS Sync | | | | + |----------------------------->| | | / + : : : |TimeOut + : : : | + | | | | + | | Request LS Sync | v \ + | Request LS Sync |<----------------------------| | + |<-----------------------------| | Synchronistation + | LS DB Sync | | Phase + |----------------------------->| LS DB Sync | | + | |---------------------------->| | + | LS DB Sync (cont'd) | | | + |----------------------------->| LS DB Sync (cont'd) | | + | . |---------------------------->| | + | . | . | | + | . | . | | + | LS DB Sync (end) | . | | + |----------------------------->| LS DB Sync (end) | | + | |---------------------------->| | + | | | / + : : : + : : : + | LS Update | | \ + |----------------------------->| LS Update | | + | |---------------------------->| Update Phase + | | | | + : : : / + : : : + | | | \ + | | Unregister LS Update | | + | |<----------------------------| Deregister Phase + | | | | + | LS Update | | | + |----------------------------->| | | + | | | / + | | | + + Figure 1: Link State messages exchange + + +Data Structures +^^^^^^^^^^^^^^^ + +The Link State Message is defined to convey Link State parameters from +the routing protocol (OSPF or IS-IS) to other daemons e.g. BGP. + +.. c:type:: struct ls_message + +The structure is composed of: + +- Event of the message: + + - Sync: Send the whole LS DB following a request + - Add: Send the a new Link State element + - Update: Send an update of an existing Link State element + - Delete: Indicate that the given Link State element is removed + +- Type of Link State element: Node, Attribute or Prefix +- Remote node id when known +- Data: Node, Attributes or Prefix + +A Link State Message can carry only one Link State Element (Node, Attributes +of Prefix) at once, and only one Link State Message is sent through ZAPI +Opaque Link State type at once. + +Functions +^^^^^^^^^ + +.. c:function:: struct ls_message *ls_parse_msg(struct stream *s) +.. c:function:: int ls_send_msg(struct zclient *zclient, struct ls_message *msg, struct zapi_opaque_reg_info *dst) +.. c:function:: struct ls_message *ls_vertex2msg(struct ls_message *msg, struct ls_vertex *vertex) +.. c:function:: struct ls_message *ls_edge2msg(struct ls_message *msg, struct ls_edge *edge) +.. c:function:: struct ls_message *ls_subnet2msg(struct ls_message *msg, struct ls_subnet *subnet) +.. c:function:: int ls_sync_ted(struct ls_ted *ted, struct zclient *zclient, struct zapi_opaque_reg_info *dst) + diff --git a/doc/developer/logging.rst b/doc/developer/logging.rst index 2f2444373c..cf3aa8d17f 100644 --- a/doc/developer/logging.rst +++ b/doc/developer/logging.rst @@ -83,7 +83,7 @@ Extensions +-----------+--------------------------+----------------------------------------------+ | ``%pNHs`` | ``struct nexthop *`` | ``1.2.3.4 if 15`` | +-----------+--------------------------+----------------------------------------------+ -| ``%pFX`` + ``struct bgp_dest *`` | ``fe80::1234/64`` available in BGP only | +| ``%pFX`` | ``struct bgp_dest *`` | ``fe80::1234/64`` (available in BGP only) | +-----------+--------------------------+----------------------------------------------+ Printf features like field lengths can be used normally with these extensions, diff --git a/doc/developer/lua.rst b/doc/developer/lua.rst deleted file mode 100644 index 3315c31ad7..0000000000 --- a/doc/developer/lua.rst +++ /dev/null @@ -1,65 +0,0 @@ -.. _lua: - -Lua -=== - -Lua is currently experimental within FRR and has very limited -support. If you would like to compile FRR with Lua you must -follow these steps: - -1. Installation of Relevant Libraries - - .. code-block:: shell - - apt-get install lua5.3 liblua5-3 liblua5.3-dev - - These are the Debian libraries that are needed. There should - be equivalent RPM's that can be found - -2. Compilation - - Configure needs these options - - .. code-block:: shell - - ./configure --enable-dev-build --enable-lua <all other interesting options> - - Typically you just include the two new enable lines to build with it. - -3. Using Lua - - * Copy tools/lua.scr into /etc/frr - - * Create a route-map match command - - .. code-block:: console - - ! - router bgp 55 - neighbor 10.50.11.116 remote-as external - address-family ipv4 unicast - neighbor 10.50.11.116 route-map TEST in - exit-address-family - ! - route-map TEST permit 10 - match command mooey - ! - - * In the lua.scr file make sure that you have a function named 'mooey' - - .. code-block:: console - - function mooey () - zlog_debug(string.format("afi: %d: %s %d ifdx: %d aspath: %s localpref: %d", - prefix.family, prefix.route, nexthop.metric, - nexthop.ifindex, nexthop.aspath, nexthop.localpref)) - - nexthop.metric = 33 - nexthop.localpref = 13 - return 3 - end - -4. General Comments - - Please be aware that this is extremely experimental and needs a ton of work - to get this up into a state that is usable. diff --git a/doc/developer/ospf-sr.rst b/doc/developer/ospf-sr.rst index efe9b1b12f..263db9dfb9 100644 --- a/doc/developer/ospf-sr.rst +++ b/doc/developer/ospf-sr.rst @@ -17,6 +17,7 @@ Supported Features * Automatic provisioning of MPLS table * Equal Cost Multi-Path (ECMP) * Static route configuration with label stack up to 32 labels +* TI-LFA (for P2P interfaces only) Interoperability ---------------- @@ -182,6 +183,71 @@ called. Once check the validity of labels, they are send to ZEBRA layer through command for deletion. This is completed by a new labelled route through `ZEBRA_ROUTE_ADD` command, respectively `ZEBRA_ROUTE_DELETE` command. +TI-LFA +^^^^^^ + +Experimental support for Topology Independent LFA (Loop-Free Alternate), see +for example 'draft-bashandy-rtgwg-segment-routing-ti-lfa-05'. The related +files are `ospf_ti_lfa.c/h`. + +The current implementation is rather naive and does not support the advanced +optimizations suggested in e.g. RFC7490 or RFC8102. It focuses on providing +the essential infrastructure which can also later be used to enhance the +algorithmic aspects. + +Supported features: + +* Link and node protection +* Intra-area support +* Proper use of Prefix- and Adjacency-SIDs in label stacks +* Asymmetric weights (using reverse SPF) +* Non-adjacent P/Q spaces +* Protection of Prefix-SIDs + +If configured for every SPF run the routing table is enriched with additional +backup paths for every prefix. The corresponding Prefix-SIDs are updated with +backup paths too within the OSPF SR update task. + +Informal High-Level Algorithm Description: + +:: + + p_spaces = empty_list() + + for every protected_resource (link or node): + p_space = generate_p_space(protected_resource) + p_space.q_spaces = empty_list() + + for every destination that is affected by the protected_resource: + q_space = generate_q_space(destination) + + # The label stack is stored in q_space + generate_label_stack(p_space, q_space) + + # The p_space collects all its q_spaces + p_spaces.q_spaces.add(q_space) + + p_spaces.add(p_space) + + adjust_routing_table(p_spaces) + +Possible Performance Improvements: + +* Improve overall datastructures, get away from linked lists for vertices +* Don't calculate a Q space for every destination, but for a minimum set of + backup paths that cover all destinations in the post-convergence SPF. The + thinking here is that once a backup path is known that it is also a backup + path for all nodes on the path themselves. This can be done by using the + leafs of a trimmed minimum spanning tree generated out of the post- + convergence SPF tree for that particular P space. +* For an alternative (maybe better) optimization look at + https://tools.ietf.org/html/rfc7490#section-5.2.1.3 which describes using + the Q space of the node which is affected by e.g. a link failure. Note that + this optimization is topology dependent. + +It is highly recommended to read e.g. `Segment Routing I/II` by Filsfils to +understand the basics of Ti-LFA. + Configuration ------------- diff --git a/doc/developer/path-internals-daemon.rst b/doc/developer/path-internals-daemon.rst new file mode 100644 index 0000000000..29f017284f --- /dev/null +++ b/doc/developer/path-internals-daemon.rst @@ -0,0 +1,115 @@ +PATHD Internals +=============== + +Architecture +------------ + +Overview +........ + +The pathd deamon manages the segment routing policies, it owns the data +structures representing them and can load modules that manipulate them like the +PCEP module. Its responsibility is to select a candidate path for each +configured policy and to install it into Zebra. + +Zebra +..... + +Zebra manages policies that are active or pending to be activated due to the +next hop not being available yet. In zebra, policy data structures and APIs are +defined in `zebra_srte.[hc]`. + +The responsibilities of Zebra are: + + - Store the policies' segment list. + - Install the policies when their next-hop is available. + - Notify other daemons of the status of the policies. + +Adding and removing policies is done using the commands `ZEBRA_SR_POLICY_SET` +and `ZEBRA_SR_POLICY_DELETE` as parameter of the function `zebra_send_sr_policy` +all defined in `zclient.[hc]`. + +If the first segment of the policy is an unknown label, it is kept until +notified by the mpls hooks `zebra_mpls_label_created`, and then it is installed. + +To get notified when a policy status changes, a client can implement the +`sr_policy_notify_status` callback defined in `zclient.[hc]`. + +For encoding/decoding the various data structures used to comunicate with zebra, +the following functions are available from `zclient.[hc]`: +`zapi_sr_policy_encode`, `zapi_sr_policy_decode` and +`zapi_sr_policy_notify_status_decode`. + + +Pathd +..... + + +The pathd daemon manages all the possible candidate paths for the segment +routing policies and selects the best one following the +`segment routing policy draft <https://tools.ietf.org/html/draft-ietf-spring-segment-routing-policy-06#section-2.9>`_. +It also supports loadable modules for handling dynamic candidate paths and the +creation of new policies and candidate paths at runtime. + +The responsibilities of the pathd base daemon, not including any optional +modules, are: + + - Store the policies and all the possible candidate paths for them. + - Select the best candidate path for each policy and send it to Zebra. + - Provide VTYSH configuration to set up policies and candidate paths. + - Provide a Northbound API to manipulate **configured** policies and candidate paths. + - Handle loadable modules for extending the functionality. + - Provide an API to the loadable module to manipulate policies and candidate paths. + + +Threading Model +--------------- + +The daemon runs completely inside the main thread using FRR event model, there +is no threading involved. + + +Source Code +----------- + +Internal Data Structures +........................ + +The main data structures for policies and candidate paths are defined in +`pathd.h` and implemented in `pathd.c`. + +When modifying these structures, either directly or through the functions +exported by `pathd.h`, nothing should be deleted/freed right away. The deletion +or modification flags must be set and when all the changes are done, the +function `srte_apply_changes` must be called. When called, a new candidate path +may be elected and sent to Zebra, and all the structures flagged as deleted +will be freed. In addition, a hook will be called so dynamic modules can perform +any required action when the elected candidate path changes. + + +Northbound API +.............. + +The northbound API is defined in `path_nb.[ch]` and implemented in +`path_nb_config.c` for configuration data and `path_nb_state.c` for operational +data. + + +Command Line Client +................... + +The command-line client (VTYSH) is implemented in `path_cli.c`. + + +Interface with Zebra +.................... + +All the functions interfacing with Zebra are defined and implemented in +`path_zebra.[hc]`. + + +Loadable Module API +................... + +For the time being, the API the loadable module uses is defined by `pathd.h`, +but in the future, it should be moved to a dedicated include file. diff --git a/doc/developer/path-internals-pcep.rst b/doc/developer/path-internals-pcep.rst new file mode 100644 index 0000000000..ca318314f1 --- /dev/null +++ b/doc/developer/path-internals-pcep.rst @@ -0,0 +1,193 @@ +PCEP Module Internals +===================== + +Introduction +------------ + +The PCEP module for the pathd daemon implements the PCEP protocol described in +:rfc:`5440` to update the policies and candidate paths. + +The protocol encoding/decoding and the basic session management is handled by +the `pceplib external library 1.2 <https://github.com/volta-networks/pceplib/tree/devel-1.2>`_. + +Together with pceplib, this module supports at least partially: + + - :rfc:`5440` + + Most of the protocol defined in the RFC is implemented. + All the messages can be parsed, but this was only tested in the context + of segment routing. Only a very small subset of metric types can be + configured, and there is a known issue with some Cisco routers not + following the IANA numbers for metrics. + + - :rfc:`8231` + + Support delegation of candidate path after performing the initial + computation request. If the PCE does not respond or cannot compute + a path, an empty candidate path is delegated to the PCE. + Only tested in the context of segment routing. + + - :rfc:`8408` + + Only used to comunicate the support for segment routing to the PCE. + + - :rfc:`8664` + + All the NAI types are implemented, but only the MPLS NAI are supported. + If the PCE provide segments that are not MPLS labels, the PCC will + return an error. + +Note that pceplib supports more RFCs and drafts, see pceplib +`README <https://github.com/volta-networks/pceplib/blob/master/README.md>`_ +for more details. + + +Architecture +------------ + +Overview +........ + +The module is separated into multiple layers: + + - pathd interface + - command-line console + - controller + - PCC + - pceplib interface + +The pathd interface handles all the interactions with the daemon API. + +The command-line console handles all the VTYSH configuration commands. + +The controller manages the multiple PCC connections and the interaction between +them and the daemon interface. + +The PCC handles a single connection to a PCE through a pceplib session. + +The pceplib interface abstracts the API of the pceplib. + +.. figure:: ../figures/pcep_module_threading_overview.svg + + +Threading Model +--------------- + +The module requires multiple threads to cooperate: + + - The main thread used by the pathd daemon. + - The controller pthread used to isolate the PCC from the main thread. + - The possible threads started in the pceplib library. + +To ensure thread safety, all the controller and PCC state data structures can +only be read and modified in the controller thread, and all the global data +structures can only be read and modified in the main thread. Most of the +interactions between these threads are done through FRR timers and events. + +The controller is the bridge between the two threads, all the functions that +**MUST** be called from the main thread start with the prefix `pcep_ctrl_` and +all the functions that **MUST** be called from the controller thread start +with the prefix `pcep_thread_`. When an asynchronous action must be taken in +a different thread, an FRR event is sent to the thread. If some synchronous +operation is needed, the calling thread will block and run a callback in the +other thread, there the result is **COPIED** and returned to the calling thread. + +No function other than the controller functions defined for it should be called +from the main thread. The only exception being some utility functions from +`path_pcep_lib.[hc]`. + +All the calls to pathd API functions **MUST** be performed in the main thread, +for that, the controller sends FRR events handled in function +`path_pcep.c:pcep_main_event_handler`. + +For the same reason, the console client only runs in the main thread. It can +freely use the global variable, but **MUST** use controller's `pcep_ctrl_` +functions to interact with the PCCs. + + +Source Code +----------- + +Generic Data Structures +....................... + +The data structures are defined in multiple places, and where they are defined +dictates where they can be used. + +The data structures defined in `path_pcep.h` can be used anywhere in the module. + +Internally, throughout the module, the `struct path` data structure is used +to describe PCEP messages. It is a simplified flattened structure that can +represent multiple complex PCEP message types. The conversion from this +structure to the PCEP data structures used by pceplib is done in the pceplib +interface layer. + +The data structures defined in `path_pcep_controller.h` should only be used +in `path_pcep_controller.c`. Even if a structure pointer is passed as a parameter +to functions defined in `path_pcep_pcc.h`, these should consider it as an opaque +data structure only used to call back controller functions. + +The same applies to the structures defined in `path_pcep_pcc.h`, even if the +controller owns a reference to this data structure, it should never read or +modify it directly, it should be considered an opaque structure. + +The global data structure can be accessed from the pathd interface layer +`path_pcep.c` and the command line client code `path_pcep_cli.c`. + + +Interface With Pathd +.................... + +All the functions calling or called by the pathd daemon are implemented in +`path_pcep.c`. These functions **MUST** run in the main FRR thread, and +all the interactions with the controller and the PCCs **MUST** pass through +the controller's `pcep_ctrl_` prefixed functions. + +To handle asynchronous events from the PCCs, a callback is passed to +`pcep_ctrl_initialize` that is called in the FRR main thread context. + + +Command Line Client +................... + +All the command line configuration commands (VTYSH) are implemented in +`path_pcep_cli.c`. All the functions there run in the main FRR thread and +can freely access the global variables. All the interaction with the +controller's and the PCCs **MUST** pass through the controller `pcep_ctrl_` +prefixed functions. + + +Debugging Helpers +................. + +All the functions formating data structures for debugging and logging purposes +are implemented in `path_pcep_debug.[hc]`. + + +Interface with pceplib +...................... + +All the functions calling the pceplib external library are defined in +`path_pcep_lib.[hc]`. Some functions are called from the main FRR thread, like +`pcep_lib_initialize`, `pcep_lib_finalize`; some can be called from either +thread, like `pcep_lib_free_counters`; some function must be called from the +controller thread, like `pcep_lib_connect`. This will probably be formalized +later on with function prefix like done in the controller. + + +Controller +.......... + +The controller is defined and implemented in `path_pcep_controller.[hc]`. +Part of the controller code runs in FRR main thread and part runs in its own +FRR pthread started to isolate the main thread from the PCCs' event loop. +To communicate between the threads it uses FRR events, timers and +`thread_execute` calls. + + +PCC +... + +Each PCC instance owns its state and runs in the controller thread. They are +defined and implemented in `path_pcep_pcc.[hc]`. All the interactions with +the daemon must pass through some controller's `pcep_thread_` prefixed function. diff --git a/doc/developer/path-internals.rst b/doc/developer/path-internals.rst new file mode 100644 index 0000000000..2c2df0f378 --- /dev/null +++ b/doc/developer/path-internals.rst @@ -0,0 +1,11 @@ +.. _path_internals: + +********* +Internals +********* + +.. toctree:: + :maxdepth: 2 + + path-internals-daemon + path-internals-pcep diff --git a/doc/developer/path.rst b/doc/developer/path.rst new file mode 100644 index 0000000000..b6d2438c58 --- /dev/null +++ b/doc/developer/path.rst @@ -0,0 +1,11 @@ +.. _path: + +***** +PATHD +***** + +.. toctree:: + :maxdepth: 2 + + path-internals + diff --git a/doc/developer/scripting.rst b/doc/developer/scripting.rst new file mode 100644 index 0000000000..b0413619ab --- /dev/null +++ b/doc/developer/scripting.rst @@ -0,0 +1,433 @@ +.. _scripting: + +Scripting +========= + +.. seealso:: User docs for scripting + +Overview +-------- + +FRR has the ability to call Lua scripts to perform calculations, make +decisions, or otherwise extend builtin behavior with arbitrary user code. This +is implemented using the standard Lua C bindings. The supported version of Lua +is 5.3. + +C objects may be passed into Lua and Lua objects may be retrieved by C code via +a marshalling system. In this way, arbitrary data from FRR may be passed to +scripts. It is possible to pass C functions as well. + +The Lua environment is isolated from the C environment; user scripts cannot +access FRR's address space unless explicitly allowed by FRR. + +For general information on how Lua is used to extend C, refer to Part IV of +"Programming in Lua". + +https://www.lua.org/pil/contents.html#24 + + +Design +------ + +Why Lua +^^^^^^^ + +Lua is designed to be embedded in C applications. It is very small; the +standard library is 220K. It is relatively fast. It has a simple, minimal +syntax that is relatively easy to learn and can be understood by someone with +little to no programming experience. Moreover it is widely used to add +scripting capabilities to applications. In short it is designed for this task. + +Reasons against supporting multiple scripting languages: + +- Each language would require different FFI methods, and specifically + different object encoders; a lot of code +- Languages have different capabilities that would have to be brought to + parity with each other; a lot of work +- Languages have vastly different performance characteristics; this would + create alot of basically unfixable issues, and result in a single de facto + standard scripting language (the fastest) +- Each language would need a dedicated maintainer for the above reasons; + this is pragmatically difficult +- Supporting multiple languages fractures the community and limits the audience + with which a given script can be shared + +General +^^^^^^^ + +FRR's concept of a script is somewhat abstracted away from the fact that it is +Lua underneath. A script in has two things: + +- name +- state + +In code: + +.. code-block:: c + + struct frrscript { + /* Script name */ + char *name; + + /* Lua state */ + struct lua_State *L; + }; + + +``name`` is simply a string. Everything else is in ``state``, which is itself a +Lua library object (``lua_State``). This is an opaque struct that is +manipulated using ``lua_*`` functions. The basic ones are imported from +``lua.h`` and the rest are implemented within FRR to fill our use cases. The +thing to remember is that all operations beyond the initial loading the script +take place on this opaque state object. + +There are four basic actions that can be done on a script: + +- load +- execute +- query state +- unload + +They are typically done in this order. + + +Loading +^^^^^^^ + +A snippet of Lua code is referred to as a "chunk". These are simply text. FRR +presently assumes chunks are located in individual files specific to one task. +These files are stored in the scripts directory and must end in ``.lua``. + +A script object is created by loading a script. This is done with +``frrscript_load()``. This function takes the name of the script and an +optional callback function. The string ".lua" is appended to the script name, +and the resultant filename is looked for in the scripts directory. + +For example, to load ``/etc/frr/scripts/bingus.lua``: + +.. code-block:: c + + struct frrscript *fs = frrscript_load("bingus", NULL); + +During loading the script is validated for syntax and its initial environment +is setup. By default this does not include the Lua standard library; there are +security issues to consider, though for practical purposes untrusted users +should not be able to write the scripts directory anyway. If desired the Lua +standard library may be added to the script environment using +``luaL_openlibs(fs->L)`` after loading the script. Further information on +setting up the script environment is in the Lua manual. + + +Executing +^^^^^^^^^ + +After loading, scripts may be executed. A script may take input in the form of +variable bindings set in its environment prior to being run, and may provide +results by setting the value of variables. Arbitrary C values may be +transferred into the script environment, including functions. + +A typical execution call looks something like this: + +.. code-block:: c + + struct frrscript *fs = frrscript_load(...); + + int status_ok = 0, status_fail = 1; + struct prefix p = ...; + + struct frrscript_env env[] = { + {"integer", "STATUS_FAIL", &status_fail}, + {"integer", "STATUS_OK", &status_ok}, + {"prefix", "myprefix", &p}, + {}}; + + int result = frrscript_call(fs, env); + + +To execute a loaded script, we need to define the inputs. These inputs are +passed by binding values to variable names that will be accessible within the +Lua environment. Basically, all communication with the script takes place via +global variables within the script, and to provide inputs we predefine globals +before the script runs. This is done by passing ``frrscript_call()`` an array +of ``struct frrscript_env``. Each struct has three fields. The first identifies +the type of the value being passed; more on this later. The second defines the +name of the global variable within the script environment to bind the third +argument (the value) to. + +The script is then executed and returns a general status code. In the success +case this will be 0, otherwise it will be nonzero. The script itself does not +determine this code, it is provided by the Lua interpreter. + + +Querying State +^^^^^^^^^^^^^^ + +When a chunk is executed, its state at exit is preserved and can be inspected. + +After running a script, results may be retrieved by querying the script's +state. Again this is done by retrieving the values of global variables, which +are known to the script author to be "output" variables. + +A result is retrieved like so: + +.. code-block:: c + + struct frrscript_env myresult = {"string", "myresult"}; + + char *myresult = frrscript_get_result(fs, &myresult); + + ... do something ... + + XFREE(MTYPE_TMP, myresult); + + +As with arguments, results are retrieved by providing a ``struct +frrscript_env`` specifying a type and a global name. No value is necessary, nor +is it modified by ``frrscript_get_result()``. That function simply extracts the +requested value from the script state and returns it. + +In most cases the returned value will be allocated with ``MTYPE_TMP`` and will +need to be freed after use. + + +Unloading +^^^^^^^^^ + +To destroy a script and its associated state: + +.. code-block:: c + + frrscript_unload(fs); + +Values returned by ``frrscript_get_result`` are still valid after the script +they were retrieved from is unloaded. + +Note that you must unload and then load the script if you want to reset its +state, for example to run it again with different inputs. Otherwise the state +from the previous run carries over into subsequent runs. + + +.. _marshalling: + +Marshalling +^^^^^^^^^^^ + +Earlier sections glossed over the meaning of the type name field in ``struct +frrscript_env`` and how data is passed between C and Lua. Lua, as a dynamically +typed, garbage collected language, cannot directly use C values without some +kind of marshalling / unmarshalling system to translate types between the two +runtimes. + +Lua communicates with C code using a stack. C code wishing to provide data to +Lua scripts must provide a function that marshalls the C data into a Lua +representation and pushes it on the stack. C code wishing to retrieve data from +Lua must provide a corresponding unmarshalling function that retrieves a Lua +value from the stack and converts it to the corresponding C type. These two +functions, together with a chosen name of the type they operate on, are +referred to as ``codecs`` in FRR. + +A codec is defined as: + +.. code-block:: c + + typedef void (*encoder_func)(lua_State *, const void *); + typedef void *(*decoder_func)(lua_State *, int); + + struct frrscript_codec { + const char *typename; + encoder_func encoder; + decoder_func decoder; + }; + +A typename string and two function pointers. + +``typename`` can be anything you want. For example, for the combined types of +``struct prefix`` and its equivalent in Lua I have chosen the name ``prefix``. +There is no restriction on naming here, it is just a human name used as a key +and specified when passing and retrieving values. + +``encoder`` is a function that takes a ``lua_State *`` and a C type and pushes +onto the Lua stack a value representing the C type. For C structs, the usual +case, this will typically be a Lua table (tables are the only datastructure Lua +has). For example, here is the encoder function for ``struct prefix``: + + +.. code-block:: c + + void lua_pushprefix(lua_State *L, const struct prefix *prefix) + { + char buffer[PREFIX_STRLEN]; + + zlog_debug("frrlua: pushing prefix table"); + + lua_newtable(L); + lua_pushstring(L, prefix2str(prefix, buffer, PREFIX_STRLEN)); + lua_setfield(L, -2, "network"); + lua_pushinteger(L, prefix->prefixlen); + lua_setfield(L, -2, "length"); + lua_pushinteger(L, prefix->family); + lua_setfield(L, -2, "family"); + } + +This function pushes a single value onto the Lua stack. It is a table whose equivalent in Lua is: + +.. code-block:: + + { ["network"] = "1.2.3.4/24", ["prefixlen"] = 24, ["family"] = 2 } + + +``decoder`` does the reverse; it takes a ``lua_State *`` and an index into the +stack, and unmarshalls a Lua value there into the corresponding C type. Again +for ``struct prefix``: + + +.. code-block:: c + + void *lua_toprefix(lua_State *L, int idx) + { + struct prefix *p = XCALLOC(MTYPE_TMP, sizeof(struct prefix)); + + lua_getfield(L, idx, "network"); + str2prefix(lua_tostring(L, -1), p); + lua_pop(L, 1); + + return p; + } + +By convention these functions should be called ``lua_to*``, as this is the +naming convention used by the Lua C library for the basic types e.g. +``lua_tointeger`` and ``lua_tostring``. + +The returned data must always be copied off the stack and the copy must be +allocated with ``MTYPE_TMP``. This way it is possible to unload the script +(destroy the state) without invalidating any references to values stored in it. + +To register a new type with its corresponding encoding functions: + +.. code-block:: c + + struct frrscript_codec frrscript_codecs_lib[] = { + {.typename = "prefix", + .encoder = (encoder_func)lua_pushprefix, + .decoder = lua_toprefix}, + {.typename = "sockunion", + .encoder = (encoder_func)lua_pushsockunion, + .decoder = lua_tosockunion}, + ... + {}}; + + frrscript_register_type_codecs(frrscript_codecs_lib); + +From this point on the type names are available to be used when calling any +script and getting its results. + +.. note:: + + Marshalled types are not restricted to simple values like integers, strings + and tables. It is possible to marshall a type such that the resultant object + in Lua is an actual object-oriented object, complete with methods that call + back into defined C functions. See the Lua manual for how to do this; for a + code example, look at how zlog is exported into the script environment. + + +Script Environment +------------------ + +Logging +^^^^^^^ + +For convenience, script environments are populated by default with a ``log`` +object which contains methods corresponding to each of the ``zlog`` levels: + +.. code-block:: lua + + log.info("info") + log.warn("warn") + log.error("error") + log.notice("notice") + log.debug("debug") + +The log messages will show up in the daemon's log output. + + +Examples +-------- + +For a complete code example involving passing custom types, retrieving results, +and doing complex calculations in Lua, look at the implementation of the +``match script SCRIPT`` command for BGP routemaps. This example calls into a +script with a route prefix and attributes received from a peer and expects the +script to return a match / no match / match and update result. + +An example script to use with this follows. This script matches, does not match +or updates a route depending on how many BGP UPDATE messages the peer has +received when the script is called, simply as a demonstration of what can be +accomplished with scripting. + +.. code-block:: lua + + + -- Example route map matching + -- author: qlyoung + -- + -- The following variables are available to us: + -- log + -- logging library, with the usual functions + -- prefix + -- the route under consideration + -- attributes + -- the route's attributes + -- peer + -- the peer which received this route + -- RM_FAILURE + -- status code in case of failure + -- RM_NOMATCH + -- status code for no match + -- RM_MATCH + -- status code for match + -- RM_MATCH_AND_CHANGE + -- status code for match-and-set + -- + -- We need to set the following out values: + -- action + -- Set to the appropriate status code to indicate what we did + -- attributes + -- Setting fields on here will propagate them back up to the caller if + -- 'action' is set to RM_MATCH_AND_CHANGE. + + + log.info("Evaluating route " .. prefix.network .. " from peer " .. peer.remote_id.string) + + function on_match (prefix, attrs) + log.info("Match") + action = RM_MATCH + end + + function on_nomatch (prefix, attrs) + log.info("No match") + action = RM_NOMATCH + end + + function on_match_and_change (prefix, attrs) + action = RM_MATCH_AND_CHANGE + log.info("Match and change") + attrs["metric"] = attrs["metric"] + 7 + end + + special_routes = { + ["172.16.10.4/24"] = on_match, + ["172.16.13.1/8"] = on_nomatch, + ["192.168.0.24/8"] = on_match_and_change, + } + + + if special_routes[prefix.network] then + special_routes[prefix.network](prefix, attributes) + elseif peer.stats.update_in % 3 == 0 then + on_match(prefix, attributes) + elseif peer.stats.update_in % 2 == 0 then + on_nomatch(prefix, attributes) + else + on_match_and_change(prefix, attributes) + end + diff --git a/doc/developer/subdir.am b/doc/developer/subdir.am index 3c0d203007..f7e4486ef0 100644 --- a/doc/developer/subdir.am +++ b/doc/developer/subdir.am @@ -27,16 +27,17 @@ dev_RSTFILES = \ doc/developer/building.rst \ doc/developer/cli.rst \ doc/developer/conf.py \ + doc/developer/cross-compiling.rst \ doc/developer/frr-release-procedure.rst \ doc/developer/grpc.rst \ doc/developer/hooks.rst \ doc/developer/include-compile.rst \ doc/developer/index.rst \ doc/developer/library.rst \ + doc/developer/link-state.rst \ doc/developer/lists.rst \ doc/developer/locking.rst \ doc/developer/logging.rst \ - doc/developer/lua.rst \ doc/developer/memtypes.rst \ doc/developer/modules.rst \ doc/developer/next-hop-tracking.rst \ @@ -46,12 +47,19 @@ dev_RSTFILES = \ doc/developer/packaging-debian.rst \ doc/developer/packaging-redhat.rst \ doc/developer/packaging.rst \ + doc/developer/path-internals-daemon.rst \ + doc/developer/path-internals-pcep.rst \ + doc/developer/path-internals.rst \ + doc/developer/path.rst \ doc/developer/rcu.rst \ + doc/developer/scripting.rst \ doc/developer/static-linking.rst \ + doc/developer/tracing.rst \ doc/developer/testing.rst \ doc/developer/topotests-snippets.rst \ doc/developer/topotests.rst \ doc/developer/workflow.rst \ + doc/developer/xrefs.rst \ doc/developer/zebra.rst \ # end diff --git a/doc/developer/topotests-markers.rst b/doc/developer/topotests-markers.rst new file mode 100644 index 0000000000..9f92412595 --- /dev/null +++ b/doc/developer/topotests-markers.rst @@ -0,0 +1,114 @@ +.. _topotests-markers: + +Markers +-------- + +To allow for automated selective testing on large scale continuous integration +systems, all tests must be marked with at least one of the following markers: + +* babeld +* bfdd +* bgpd +* eigrpd +* isisd +* ldpd +* nhrpd +* ospf6d +* ospfd +* pathd +* pbrd +* pimd +* ripd +* ripngd +* sharpd +* staticd +* vrrpd + +The markers corespond to the daemon subdirectories in FRR's source code and have +to be added to tests on a module level depending on which daemons are used +during the test. + +The goal is to have continuous integration systems scan code submissions, detect +changes to files in a daemons subdirectory and select only tests using that +daemon to run to shorten developers waiting times for test results and save test +infrastructure resources. + +Newly written modules and code changes on tests, which do not contain any or +incorrect markers will be rejected by reviewers. + + +Registering markers +^^^^^^^^^^^^^^^^^^^ +The Registration of new markers takes place in the file +``tests/topotests/pytest.ini``: + +.. code:: python3 + + # tests/topotests/pytest.ini + [pytest] + ... + markers = + babeld: Tests that run against BABELD + bfdd: Tests that run against BFDD + ... + vrrpd: Tests that run against VRRPD + + +Adding markers to tests +^^^^^^^^^^^^^^^^^^^^^^^ +Markers are added to a test by placing a global variable in the test module. + +Adding a single marker: + +.. code:: python3 + + import pytest + ... + + # add after imports, before defining classes or functions: + pytestmark = pytest.mark.bfdd + + ... + + def test_using_bfdd(): + + +Adding multiple markers: + +.. code:: python3 + + import pytest + ... + + # add after imports, before defining classes or functions: + pytestmark = [ + pytest.mark.bgpd, + pytest.mark.ospfd, + pytest.mark.ospf6d + ] + + ... + + def test_using_bgpd_ospfd_ospf6d(): + + +Selecting marked modules for testing +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +Selecting by a single marker: + +.. code:: bash + + pytest -v -m isisd + +Selecting by multiple markers: + +.. code:: bash + + pytest -v -m "isisd or ldpd or nhrpd" + + +Further Information +^^^^^^^^^^^^^^^^^^^ +The `online pytest documentation <https://docs.pytest.org/en/stable/example/markers.html>`_ +provides further information and usage examples for pytest markers. + diff --git a/doc/developer/topotests.rst b/doc/developer/topotests.rst index 5486fd826d..93d81548b2 100644 --- a/doc/developer/topotests.rst +++ b/doc/developer/topotests.rst @@ -21,8 +21,10 @@ Installing Mininet Infrastructure apt-get install mininet apt-get install python-pip apt-get install iproute + apt-get install iperf pip install ipaddr pip install "pytest<5" + pip install "scapy>=2.4.2" pip install exabgp==3.4.17 (Newer 4.0 version of exabgp is not yet supported) useradd -d /var/run/exabgp/ -s /bin/false exabgp @@ -49,6 +51,28 @@ Next, update security limits by changing :file:`/etc/security/limits.conf` to:: Reboot for options to take effect. +SNMP Utilities Installation +""""""""""""""""""""""""""" + +To run SNMP test you need to install SNMP utilities and MIBs. Unfortunately +there are some errors in the upstream MIBS which need to be patched up. The +following steps will get you there on Ubuntu 20.04. + +.. code:: shell + + apt install snmpd snmp + apt install snmp-mibs-downloader + download-mibs + wget http://www.iana.org/assignments/ianaippmmetricsregistry-mib/ianaippmmetricsregistry-mib -O /usr/share/snmp/mibs/iana/IANA-IPPM-METRICS-REGISTRY-MIB + wget http://pastebin.com/raw.php?i=p3QyuXzZ -O /usr/share/snmp/mibs/ietf/SNMPv2-PDU + wget http://pastebin.com/raw.php?i=gG7j8nyk -O /usr/share/snmp/mibs/ietf/IPATM-IPMC-MIB + edit /etc/snmp/snmp.conf to look like this + # As the snmp packages come without MIB files due to license reasons, loading + # of MIBs is disabled by default. If you added the MIBs you can reenable + # loading them by commenting out the following line. + mibs +ALL + + FRR Installation ^^^^^^^^^^^^^^^^ @@ -84,6 +108,7 @@ If you prefer to manually build FRR, then use the following suggested config: --enable-user=frr \ --enable-group=frr \ --enable-vty-group=frrvty \ + --enable-snmp=agentx \ --with-pkg-extra-version=-my-manual-build And create ``frr`` user and ``frrvty`` group as follows: @@ -769,6 +794,8 @@ Requirements: conforms with this, run it without the :option:`-s` parameter. - Use `black <https://github.com/psf/black>`_ code formatter before creating a pull request. This ensures we have a unified code style. +- Mark test modules with pytest markers depending on the daemons used during the + tests (s. Markers) Tips: @@ -927,6 +954,8 @@ does what you need. If nothing is similar, then you may create a new topology, preferably, using the newest template (:file:`tests/topotests/example-test/test_template.py`). +.. include:: topotests-markers.rst + .. include:: topotests-snippets.rst License diff --git a/doc/developer/tracing.rst b/doc/developer/tracing.rst index ee0a6be008..c194ae1270 100644 --- a/doc/developer/tracing.rst +++ b/doc/developer/tracing.rst @@ -57,7 +57,7 @@ run the target in non-forking mode (no ``-d``) and use LTTng as usual (refer to LTTng user manual). When using USDT probes with LTTng, follow the example in `this article <https://lttng.org/blog/2019/10/15/new-dynamic-user-space-tracing-in-lttng/>`_. -To trace with dtrace or SystemTap, compile with :option:`--enable-usdt=yes` and +To trace with dtrace or SystemTap, compile with `--enable-usdt=yes` and use your tracer as usual. To see available USDT probes:: @@ -308,6 +308,31 @@ Limitations Tracers do not like ``fork()`` or ``dlopen()``. LTTng has some workarounds for this involving interceptor libraries using ``LD_PRELOAD``. +If you're running FRR in a typical daemonizing way (``-d`` to the daemons) +you'll need to run the daemons like so: + +.. code-block:: shell + + LD_PRELOAD=liblttng-ust-fork.so <daemon> + + +If you're using systemd this you can accomplish this for all daemons by +modifying ``frr.service`` like so: + +.. code-block:: diff + + --- a/frr.service + +++ b/frr.service + @@ -7,6 +7,7 @@ Before=network.target + OnFailure=heartbeat-failed@%n.service + + [Service] + +Environment="LD_PRELOAD=liblttng-ust-fork.so" + Nice=-5 + Type=forking + NotifyAccess=all + + USDT tracepoints are relatively high overhead and probably shouldn't be used for "flight recorder" functionality, i.e. enabling and passively recording all events for monitoring purposes. It's generally okay to use LTTng like this, diff --git a/doc/developer/workflow.rst b/doc/developer/workflow.rst index 4183ac6480..861d87b998 100644 --- a/doc/developer/workflow.rst +++ b/doc/developer/workflow.rst @@ -1262,6 +1262,9 @@ When documented this way, CLI commands can be cross referenced with the This is very helpful for users who want to quickly remind themselves what a particular command does. +When documenting a cli that has a ``no`` form, please do not include +the ``no`` in the ``.. index::`` line. + Configuration Snippets ^^^^^^^^^^^^^^^^^^^^^^ diff --git a/doc/developer/xrefs.rst b/doc/developer/xrefs.rst new file mode 100644 index 0000000000..6a0794d41b --- /dev/null +++ b/doc/developer/xrefs.rst @@ -0,0 +1,170 @@ +.. _xrefs: + +Introspection (xrefs) +===================== + +The FRR library provides an introspection facility called "xrefs." The intent +is to provide structured access to annotated entities in the compiled binary, +such as log messages and thread scheduling calls. + +Enabling and use +---------------- + +Support for emitting an xref is included in the macros for the specific +entities, e.g. :c:func:`zlog_info` contains the relevant statements. The only +requirement for the system to work is a GNU compatible linker that supports +section start/end symbols. (The only known linker on any system FRR supports +that does not do this is the Solaris linker.) + +To verify xrefs have been included in a binary or dynamic library, run +``readelf -n binary``. For individual object files, it's +``readelf -S object.o | grep xref_array`` instead. + +An extraction tool will be added in a future commit. + +Structure and contents +---------------------- + +As a slight improvement to security and fault detection, xrefs are divided into +a ``const struct xref *`` and an optional ``struct xrefdata *``. The required +const part contains: + +.. c:member:: enum xref_type xref.type + + Identifies what kind of object the xref points to. + +.. c:member:: int line +.. c:member:: const char *xref.file +.. c:member:: const char *xref.func + + Source code location of the xref. ``func`` will be ``<global>`` for + xrefs outside of a function. + +.. c:member:: struct xrefdata *xref.xrefdata + + The optional writable part of the xref. NULL if no non-const part exists. + +The optional non-const part has: + +.. c:member:: const struct xref *xrefdata.xref + + Pointer back to the constant part. Since circular pointers are close to + impossible to emit from inside a function body's static variables, this + is initialized at startup. + +.. c:member:: char xrefdata.uid[16] + + Unique identifier, see below. + +.. c:member:: const char *xrefdata.hashstr +.. c:member:: uint32_t xrefdata.hashu32[2] + + Input to unique identifier calculation. These should encompass all + details needed to make an xref unique. If more than one string should + be considered, use string concatenation for the initializer. + +Both structures can be extended by embedding them in a larger type-specific +struct, e.g. ``struct xref_logmsg *``. + +Unique identifiers +------------------ + +All xrefs that have a writable ``struct xrefdata *`` part are assigned an +unique identifier, which is formed as base32 (crockford) SHA256 on: + +- the source filename +- the ``hashstr`` field +- the ``hashu32`` fields + +.. note:: + + Function names and line numbers are intentionally not included to allow + moving items within a file without affecting the identifier. + +For running executables, this hash is calculated once at startup. When +directly reading from an ELF file with external tooling, the value must be +calculated when necessary. + +The identifiers have the form ``AXXXX-XXXXX`` where ``X`` is +``0-9, A-Z except I,L,O,U`` and ``A`` is ``G-Z except I,L,O,U`` (i.e. the +identifiers always start with a letter.) When reading identifiers from user +input, ``I`` and ``L`` should be replaced with ``1`` and ``O`` should be +replaced with ``0``. There are 49 bits of entropy in this identifier. + +Underlying machinery +-------------------- + +Xrefs are nothing other than global variables with some extra glue to make +them possible to find from the outside by looking at the binary. The first +non-obvious part is that they can occur inside of functions, since they're +defined as ``static``. They don't have a visible name -- they don't need one. + +To make finding these variables possible, another global variable, a pointer +to the first one, is created in the same way. However, it is put in a special +ELF section through ``__attribute__((section("xref_array")))``. This is the +section you can see with readelf. + +Finally, on the level of a whole executable or library, the linker will stuff +the individual pointers consecutive to each other since they're in the same +section — hence the array. Start and end of this array is given by the +linker-autogenerated ``__start_xref_array`` and ``__stop_xref_array`` symbols. +Using these, both a constructor to run at startup as well as an ELF note are +created. + +The ELF note is the entrypoint for externally retrieving xrefs from a binary +without having to run it. It can be found by walking through the ELF data +structures even if the binary has been fully stripped of debug and section +information. SystemTap's SDT probes & LTTng's trace points work in the same +way (though they emit 1 note for each probe, while xrefs only emit one note +in total which refers to the array.) Using xrefs does not impact SystemTap +or LTTng, the notes have identifiers they can be distinguished by. + +The ELF structure of a linked binary (library or executable) will look like +this:: + + $ readelf --wide -l -n lib/.libs/libfrr.so + + Elf file type is DYN (Shared object file) + Entry point 0x67d21 + There are 12 program headers, starting at offset 64 + + Program Headers: + Type Offset VirtAddr PhysAddr FileSiz MemSiz Flg Align + PHDR 0x000040 0x0000000000000040 0x0000000000000040 0x0002a0 0x0002a0 R 0x8 + INTERP 0x125560 0x0000000000125560 0x0000000000125560 0x00001c 0x00001c R 0x10 + [Requesting program interpreter: /lib64/ld-linux-x86-64.so.2] + LOAD 0x000000 0x0000000000000000 0x0000000000000000 0x02aff0 0x02aff0 R 0x1000 + LOAD 0x02b000 0x000000000002b000 0x000000000002b000 0x0b2889 0x0b2889 R E 0x1000 + LOAD 0x0de000 0x00000000000de000 0x00000000000de000 0x070048 0x070048 R 0x1000 + LOAD 0x14e428 0x000000000014f428 0x000000000014f428 0x00fb70 0x01a2b8 RW 0x1000 + DYNAMIC 0x157a40 0x0000000000158a40 0x0000000000158a40 0x000270 0x000270 RW 0x8 + NOTE 0x0002e0 0x00000000000002e0 0x00000000000002e0 0x00004c 0x00004c R 0x4 + TLS 0x14e428 0x000000000014f428 0x000000000014f428 0x000000 0x000008 R 0x8 + GNU_EH_FRAME 0x12557c 0x000000000012557c 0x000000000012557c 0x00819c 0x00819c R 0x4 + GNU_STACK 0x000000 0x0000000000000000 0x0000000000000000 0x000000 0x000000 RW 0x10 + GNU_RELRO 0x14e428 0x000000000014f428 0x000000000014f428 0x009bd8 0x009bd8 R 0x1 + + (...) + + Displaying notes found in: .note.gnu.build-id + Owner Data size Description + GNU 0x00000014 NT_GNU_BUILD_ID (unique build ID bitstring) Build ID: 6a1f66be38b523095ebd6ec13cc15820cede903d + + Displaying notes found in: .note.FRR + Owner Data size Description + FRRouting 0x00000010 Unknown note type: (0x46455258) description data: 6c eb 15 00 00 00 00 00 74 ec 15 00 00 00 00 00 + +Where 0x15eb6c…0x15ec74 are the offsets (relative to the note itself) where +the xref array is in the file. Also note the owner is clearly marked as +"FRRouting" and the type is "XREF" in hex. + +For SystemTap's use of ELF notes, refer to +https://libstapsdt.readthedocs.io/en/latest/how-it-works/internals.html as an +entry point. + +.. note:: + + Due to GCC bug 41091, the "xref_array" section is not correctly generated + for C++ code when compiled by GCC. A workaround is present for runtime + functionality, but to extract the xrefs from a C++ source file, it needs + to be built with clang (or a future fixed version of GCC) instead. diff --git a/doc/figures/pcep_module_threading_overview.svg b/doc/figures/pcep_module_threading_overview.svg new file mode 100644 index 0000000000..4d2d2a254a --- /dev/null +++ b/doc/figures/pcep_module_threading_overview.svg @@ -0,0 +1,481 @@ +<?xml version="1.0" encoding="UTF-8"?> +<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd"> +<svg version="1.2" width="210mm" height="297mm" viewBox="0 0 21000 29700" preserveAspectRatio="xMidYMid" fill-rule="evenodd" stroke-width="28.222" stroke-linejoin="round" xmlns="http://www.w3.org/2000/svg" xmlns:ooo="http://xml.openoffice.org/svg/export" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:presentation="http://sun.com/xmlns/staroffice/presentation" xmlns:smil="http://www.w3.org/2001/SMIL20/" xmlns:anim="urn:oasis:names:tc:opendocument:xmlns:animation:1.0" xml:space="preserve"> + <defs class="ClipPathGroup"> + <clipPath id="presentation_clip_path" clipPathUnits="userSpaceOnUse"> + <rect x="0" y="0" width="21000" height="29700"/> + </clipPath> + <clipPath id="presentation_clip_path_shrink" clipPathUnits="userSpaceOnUse"> + <rect x="21" y="29" width="20958" height="29641"/> + </clipPath> + </defs> + <defs> + <font id="EmbeddedFont_1" horiz-adv-x="2048"> + <font-face font-family="Liberation Sans embedded" units-per-em="2048" font-weight="normal" font-style="normal" ascent="1852" descent="423"/> + <missing-glyph horiz-adv-x="2048" d="M 0,0 L 2047,0 2047,2047 0,2047 0,0 Z"/> + <glyph unicode="u" horiz-adv-x="874" d="M 314,1082 L 314,396 C 314,325 321,269 335,230 349,191 371,162 402,145 433,128 478,119 537,119 624,119 692,149 742,208 792,267 817,350 817,455 L 817,1082 997,1082 997,231 C 997,105 999,28 1003,0 L 833,0 C 832,3 832,12 831,27 830,42 830,59 829,78 828,97 826,132 825,185 L 822,185 C 781,110 733,58 679,27 624,-4 557,-20 476,-20 357,-20 271,10 216,69 161,128 133,225 133,361 L 133,1082 314,1082 Z"/> + <glyph unicode="t" horiz-adv-x="531" d="M 554,8 C 495,-8 434,-16 372,-16 228,-16 156,66 156,229 L 156,951 31,951 31,1082 163,1082 216,1324 336,1324 336,1082 536,1082 536,951 336,951 336,268 C 336,216 345,180 362,159 379,138 408,127 450,127 474,127 509,132 554,141 L 554,8 Z"/> + <glyph unicode="r" horiz-adv-x="530" d="M 142,0 L 142,830 C 142,906 140,990 136,1082 L 306,1082 C 311,959 314,886 314,861 L 318,861 C 347,954 380,1017 417,1051 454,1085 507,1102 575,1102 599,1102 623,1099 648,1092 L 648,927 C 624,934 592,937 552,937 477,937 420,905 381,841 342,776 322,684 322,564 L 322,0 142,0 Z"/> + <glyph unicode="p" horiz-adv-x="953" d="M 1053,546 C 1053,169 920,-20 655,-20 488,-20 376,43 319,168 L 314,168 C 317,163 318,106 318,-2 L 318,-425 138,-425 138,861 C 138,972 136,1046 132,1082 L 306,1082 C 307,1079 308,1070 309,1054 310,1037 312,1012 314,978 315,944 316,921 316,908 L 320,908 C 352,975 394,1024 447,1055 500,1086 569,1101 655,1101 788,1101 888,1056 954,967 1020,878 1053,737 1053,546 Z M 864,542 C 864,693 844,800 803,865 762,930 698,962 609,962 538,962 482,947 442,917 401,887 371,840 350,777 329,713 318,630 318,528 318,386 341,281 386,214 431,147 505,113 607,113 696,113 762,146 803,212 844,277 864,387 864,542 Z"/> + <glyph unicode="o" horiz-adv-x="980" d="M 1053,542 C 1053,353 1011,212 928,119 845,26 724,-20 565,-20 407,-20 288,28 207,125 126,221 86,360 86,542 86,915 248,1102 571,1102 736,1102 858,1057 936,966 1014,875 1053,733 1053,542 Z M 864,542 C 864,691 842,800 798,868 753,935 679,969 574,969 469,969 393,935 346,866 299,797 275,689 275,542 275,399 298,292 345,221 391,149 464,113 563,113 671,113 748,148 795,217 841,286 864,395 864,542 Z"/> + <glyph unicode="n" horiz-adv-x="874" d="M 825,0 L 825,686 C 825,757 818,813 804,852 790,891 768,920 737,937 706,954 661,963 602,963 515,963 447,933 397,874 347,815 322,732 322,627 L 322,0 142,0 142,851 C 142,977 140,1054 136,1082 L 306,1082 C 307,1079 307,1070 308,1055 309,1040 310,1024 311,1005 312,986 313,950 314,897 L 317,897 C 358,972 406,1025 461,1056 515,1087 582,1102 663,1102 782,1102 869,1073 924,1014 979,955 1006,857 1006,721 L 1006,0 825,0 Z"/> + <glyph unicode="l" horiz-adv-x="187" d="M 138,0 L 138,1484 318,1484 318,0 138,0 Z"/> + <glyph unicode="i" horiz-adv-x="187" d="M 137,1312 L 137,1484 317,1484 317,1312 137,1312 Z M 137,0 L 137,1082 317,1082 317,0 137,0 Z"/> + <glyph unicode="h" horiz-adv-x="874" d="M 317,897 C 356,968 402,1020 457,1053 511,1086 580,1102 663,1102 780,1102 867,1073 923,1015 978,956 1006,858 1006,721 L 1006,0 825,0 825,686 C 825,762 818,819 804,856 790,893 767,920 735,937 703,954 659,963 602,963 517,963 450,934 399,875 348,816 322,737 322,638 L 322,0 142,0 142,1484 322,1484 322,1098 C 322,1057 321,1015 319,972 316,929 315,904 314,897 L 317,897 Z"/> + <glyph unicode="e" horiz-adv-x="980" d="M 276,503 C 276,379 302,283 353,216 404,149 479,115 578,115 656,115 719,131 766,162 813,193 844,233 861,281 L 1019,236 C 954,65 807,-20 578,-20 418,-20 296,28 213,123 129,218 87,360 87,548 87,727 129,864 213,959 296,1054 416,1102 571,1102 889,1102 1048,910 1048,527 L 1048,503 276,503 Z M 862,641 C 852,755 823,838 775,891 727,943 658,969 568,969 481,969 412,940 361,882 310,823 282,743 278,641 L 862,641 Z"/> + <glyph unicode="d" horiz-adv-x="927" d="M 821,174 C 788,105 744,55 689,25 634,-5 565,-20 484,-20 347,-20 247,26 183,118 118,210 86,349 86,536 86,913 219,1102 484,1102 566,1102 634,1087 689,1057 744,1027 788,979 821,914 L 823,914 821,1035 821,1484 1001,1484 1001,223 C 1001,110 1003,36 1007,0 L 835,0 C 833,11 831,35 829,74 826,113 825,146 825,174 L 821,174 Z M 275,542 C 275,391 295,282 335,217 375,152 440,119 530,119 632,119 706,154 752,225 798,296 821,405 821,554 821,697 798,802 752,869 706,936 633,969 532,969 441,969 376,936 336,869 295,802 275,693 275,542 Z"/> + <glyph unicode="c" horiz-adv-x="901" d="M 275,546 C 275,402 298,295 343,226 388,157 457,122 548,122 612,122 666,139 709,174 752,209 778,262 788,334 L 970,322 C 956,218 912,135 837,73 762,11 668,-20 553,-20 402,-20 286,28 207,124 127,219 87,359 87,542 87,724 127,863 207,959 287,1054 402,1102 551,1102 662,1102 754,1073 827,1016 900,959 945,880 964,779 L 779,765 C 770,825 746,873 708,908 670,943 616,961 546,961 451,961 382,929 339,866 296,803 275,696 275,546 Z"/> + <glyph unicode="b" horiz-adv-x="953" d="M 1053,546 C 1053,169 920,-20 655,-20 573,-20 505,-5 451,25 396,54 352,102 318,168 L 316,168 C 316,147 315,116 312,74 309,31 307,7 306,0 L 132,0 C 136,36 138,110 138,223 L 138,1484 318,1484 318,1061 C 318,1018 317,967 314,908 L 318,908 C 351,977 396,1027 451,1057 506,1087 574,1102 655,1102 792,1102 892,1056 957,964 1021,872 1053,733 1053,546 Z M 864,540 C 864,691 844,800 804,865 764,930 699,963 609,963 508,963 434,928 388,859 341,790 318,680 318,529 318,387 341,282 386,215 431,147 505,113 607,113 698,113 763,147 804,214 844,281 864,389 864,540 Z"/> + <glyph unicode="a" horiz-adv-x="1060" d="M 414,-20 C 305,-20 224,9 169,66 114,123 87,202 87,302 87,414 124,500 198,560 271,620 390,652 554,656 L 797,660 797,719 C 797,807 778,870 741,908 704,946 645,965 565,965 484,965 426,951 389,924 352,897 330,853 323,793 L 135,810 C 166,1005 310,1102 569,1102 705,1102 807,1071 876,1009 945,946 979,856 979,738 L 979,272 C 979,219 986,179 1000,152 1014,125 1041,111 1080,111 1097,111 1117,113 1139,118 L 1139,6 C 1094,-5 1047,-10 1000,-10 933,-10 885,8 855,43 824,78 807,132 803,207 L 797,207 C 751,124 698,66 637,32 576,-3 501,-20 414,-20 Z M 455,115 C 521,115 580,130 631,160 682,190 723,231 753,284 782,336 797,390 797,445 L 797,534 600,530 C 515,529 451,520 408,504 364,488 330,463 307,430 284,397 272,353 272,299 272,240 288,195 320,163 351,131 396,115 455,115 Z"/> + <glyph unicode="_" horiz-adv-x="1218" d="M -31,-407 L -31,-277 1162,-277 1162,-407 -31,-407 Z"/> + <glyph unicode="Y" horiz-adv-x="1298" d="M 777,584 L 777,0 587,0 587,584 45,1409 255,1409 684,738 1111,1409 1321,1409 777,584 Z"/> + <glyph unicode="X" horiz-adv-x="1298" d="M 1112,0 L 689,616 257,0 46,0 582,732 87,1409 298,1409 690,856 1071,1409 1282,1409 800,739 1323,0 1112,0 Z"/> + <glyph unicode="V" horiz-adv-x="1377" d="M 782,0 L 584,0 9,1409 210,1409 600,417 684,168 768,417 1156,1409 1357,1409 782,0 Z"/> + <glyph unicode="T" horiz-adv-x="1192" d="M 720,1253 L 720,0 530,0 530,1253 46,1253 46,1409 1204,1409 1204,1253 720,1253 Z"/> + <glyph unicode="S" horiz-adv-x="1192" d="M 1272,389 C 1272,259 1221,158 1120,87 1018,16 875,-20 690,-20 347,-20 148,99 93,338 L 278,375 C 299,290 345,228 414,189 483,149 578,129 697,129 820,129 916,150 983,193 1050,235 1083,297 1083,379 1083,425 1073,462 1052,491 1031,520 1001,543 963,562 925,581 880,596 827,609 774,622 716,635 652,650 541,675 456,699 399,724 341,749 295,776 262,807 229,837 203,872 186,913 168,954 159,1000 159,1053 159,1174 205,1267 298,1332 390,1397 522,1430 694,1430 854,1430 976,1406 1061,1357 1146,1308 1205,1224 1239,1106 L 1051,1073 C 1030,1148 991,1202 933,1236 875,1269 795,1286 692,1286 579,1286 493,1267 434,1230 375,1193 345,1137 345,1063 345,1020 357,984 380,956 403,927 436,903 479,884 522,864 609,840 738,811 781,801 825,791 868,781 911,770 952,758 991,744 1030,729 1067,712 1102,693 1136,674 1166,650 1191,622 1216,594 1236,561 1251,523 1265,485 1272,440 1272,389 Z"/> + <glyph unicode="R" horiz-adv-x="1244" d="M 1164,0 L 798,585 359,585 359,0 168,0 168,1409 831,1409 C 990,1409 1112,1374 1199,1303 1285,1232 1328,1133 1328,1006 1328,901 1298,813 1237,742 1176,671 1091,626 984,607 L 1384,0 1164,0 Z M 1136,1004 C 1136,1086 1108,1149 1053,1192 997,1235 917,1256 812,1256 L 359,1256 359,736 820,736 C 921,736 999,760 1054,807 1109,854 1136,919 1136,1004 Z"/> + <glyph unicode="P" horiz-adv-x="1112" d="M 1258,985 C 1258,852 1215,746 1128,667 1041,588 922,549 773,549 L 359,549 359,0 168,0 168,1409 761,1409 C 919,1409 1041,1372 1128,1298 1215,1224 1258,1120 1258,985 Z M 1066,983 C 1066,1165 957,1256 738,1256 L 359,1256 359,700 746,700 C 959,700 1066,794 1066,983 Z"/> + <glyph unicode="O" horiz-adv-x="1430" d="M 1495,711 C 1495,564 1467,435 1411,324 1354,213 1273,128 1168,69 1063,10 938,-20 795,-20 650,-20 526,9 421,68 316,127 235,212 180,323 125,434 97,563 97,711 97,936 159,1113 282,1240 405,1367 577,1430 797,1430 940,1430 1065,1402 1170,1345 1275,1288 1356,1205 1412,1096 1467,987 1495,859 1495,711 Z M 1300,711 C 1300,886 1256,1024 1169,1124 1081,1224 957,1274 797,1274 636,1274 511,1225 423,1126 335,1027 291,889 291,711 291,534 336,394 425,291 514,187 637,135 795,135 958,135 1083,185 1170,286 1257,386 1300,528 1300,711 Z"/> + <glyph unicode="N" horiz-adv-x="1165" d="M 1082,0 L 328,1200 333,1103 338,936 338,0 168,0 168,1409 390,1409 1152,201 C 1144,332 1140,426 1140,485 L 1140,1409 1312,1409 1312,0 1082,0 Z"/> + <glyph unicode="M" horiz-adv-x="1377" d="M 1366,0 L 1366,940 C 1366,1044 1369,1144 1375,1240 1342,1121 1313,1027 1287,960 L 923,0 789,0 420,960 364,1130 331,1240 334,1129 338,940 338,0 168,0 168,1409 419,1409 794,432 C 807,393 820,351 833,306 845,261 853,228 857,208 862,235 874,275 891,330 908,384 919,418 925,432 L 1293,1409 1538,1409 1538,0 1366,0 Z"/> + <glyph unicode="L" horiz-adv-x="927" d="M 168,0 L 168,1409 359,1409 359,156 1071,156 1071,0 168,0 Z"/> + <glyph unicode="I" horiz-adv-x="213" d="M 189,0 L 189,1409 380,1409 380,0 189,0 Z"/> + <glyph unicode="H" horiz-adv-x="1165" d="M 1121,0 L 1121,653 359,653 359,0 168,0 168,1409 359,1409 359,813 1121,813 1121,1409 1312,1409 1312,0 1121,0 Z"/> + <glyph unicode="F" horiz-adv-x="1006" d="M 359,1253 L 359,729 1145,729 1145,571 359,571 359,0 168,0 168,1409 1169,1409 1169,1253 359,1253 Z"/> + <glyph unicode="E" horiz-adv-x="1138" d="M 168,0 L 168,1409 1237,1409 1237,1253 359,1253 359,801 1177,801 1177,647 359,647 359,156 1278,156 1278,0 168,0 Z"/> + <glyph unicode="D" horiz-adv-x="1218" d="M 1381,719 C 1381,574 1353,447 1296,338 1239,229 1159,145 1055,87 951,29 831,0 695,0 L 168,0 168,1409 634,1409 C 873,1409 1057,1349 1187,1230 1316,1110 1381,940 1381,719 Z M 1189,719 C 1189,894 1141,1027 1046,1119 950,1210 811,1256 630,1256 L 359,1256 359,153 673,153 C 776,153 867,176 946,221 1024,266 1084,332 1126,417 1168,502 1189,603 1189,719 Z"/> + <glyph unicode="C" horiz-adv-x="1324" d="M 792,1274 C 636,1274 515,1224 428,1124 341,1023 298,886 298,711 298,538 343,400 434,295 524,190 646,137 800,137 997,137 1146,235 1245,430 L 1401,352 C 1343,231 1262,138 1157,75 1052,12 930,-20 791,-20 649,-20 526,10 423,69 319,128 240,212 186,322 131,431 104,561 104,711 104,936 165,1112 286,1239 407,1366 575,1430 790,1430 940,1430 1065,1401 1166,1342 1267,1283 1341,1196 1388,1081 L 1207,1021 C 1174,1103 1122,1166 1050,1209 977,1252 891,1274 792,1274 Z"/> + <glyph unicode="B" horiz-adv-x="1112" d="M 1258,397 C 1258,272 1212,174 1121,105 1030,35 903,0 740,0 L 168,0 168,1409 680,1409 C 1011,1409 1176,1295 1176,1067 1176,984 1153,914 1106,857 1059,800 993,762 908,743 1020,730 1106,692 1167,631 1228,569 1258,491 1258,397 Z M 984,1044 C 984,1120 958,1174 906,1207 854,1240 779,1256 680,1256 L 359,1256 359,810 680,810 C 782,810 858,829 909,868 959,906 984,965 984,1044 Z M 1065,412 C 1065,578 948,661 715,661 L 359,661 359,153 730,153 C 847,153 932,175 985,218 1038,261 1065,326 1065,412 Z"/> + <glyph unicode="A" horiz-adv-x="1377" d="M 1167,0 L 1006,412 364,412 202,0 4,0 579,1409 796,1409 1362,0 1167,0 Z M 685,1265 L 676,1237 C 659,1182 635,1111 602,1024 L 422,561 949,561 768,1026 C 749,1072 731,1124 712,1182 L 685,1265 Z"/> + <glyph unicode=" " horiz-adv-x="556"/> + </font> + </defs> + <defs class="TextShapeIndex"> + <g ooo:slide="id1" ooo:id-list="id3 id4 id5 id6 id7 id8 id9 id10 id11 id12 id13 id14 id15 id16 id17 id18 id19 id20 id21 id22 id23 id24 id25 id26 id27 id28"/> + </defs> + <defs class="EmbeddedBulletChars"> + <g id="bullet-char-template-57356" transform="scale(0.00048828125,-0.00048828125)"> + <path d="M 580,1141 L 1163,571 580,0 -4,571 580,1141 Z"/> + </g> + <g id="bullet-char-template-57354" transform="scale(0.00048828125,-0.00048828125)"> + <path d="M 8,1128 L 1137,1128 1137,0 8,0 8,1128 Z"/> + </g> + <g id="bullet-char-template-10146" transform="scale(0.00048828125,-0.00048828125)"> + <path d="M 174,0 L 602,739 174,1481 1456,739 174,0 Z M 1358,739 L 309,1346 659,739 1358,739 Z"/> + </g> + <g id="bullet-char-template-10132" transform="scale(0.00048828125,-0.00048828125)"> + <path d="M 2015,739 L 1276,0 717,0 1260,543 174,543 174,936 1260,936 717,1481 1274,1481 2015,739 Z"/> + </g> + <g id="bullet-char-template-10007" transform="scale(0.00048828125,-0.00048828125)"> + <path d="M 0,-2 C -7,14 -16,27 -25,37 L 356,567 C 262,823 215,952 215,954 215,979 228,992 255,992 264,992 276,990 289,987 310,991 331,999 354,1012 L 381,999 492,748 772,1049 836,1024 860,1049 C 881,1039 901,1025 922,1006 886,937 835,863 770,784 769,783 710,716 594,584 L 774,223 C 774,196 753,168 711,139 L 727,119 C 717,90 699,76 672,76 641,76 570,178 457,381 L 164,-76 C 142,-110 111,-127 72,-127 30,-127 9,-110 8,-76 1,-67 -2,-52 -2,-32 -2,-23 -1,-13 0,-2 Z"/> + </g> + <g id="bullet-char-template-10004" transform="scale(0.00048828125,-0.00048828125)"> + <path d="M 285,-33 C 182,-33 111,30 74,156 52,228 41,333 41,471 41,549 55,616 82,672 116,743 169,778 240,778 293,778 328,747 346,684 L 369,508 C 377,444 397,411 428,410 L 1163,1116 C 1174,1127 1196,1133 1229,1133 1271,1133 1292,1118 1292,1087 L 1292,965 C 1292,929 1282,901 1262,881 L 442,47 C 390,-6 338,-33 285,-33 Z"/> + </g> + <g id="bullet-char-template-9679" transform="scale(0.00048828125,-0.00048828125)"> + <path d="M 813,0 C 632,0 489,54 383,161 276,268 223,411 223,592 223,773 276,916 383,1023 489,1130 632,1184 813,1184 992,1184 1136,1130 1245,1023 1353,916 1407,772 1407,592 1407,412 1353,268 1245,161 1136,54 992,0 813,0 Z"/> + </g> + <g id="bullet-char-template-8226" transform="scale(0.00048828125,-0.00048828125)"> + <path d="M 346,457 C 273,457 209,483 155,535 101,586 74,649 74,723 74,796 101,859 155,911 209,963 273,989 346,989 419,989 480,963 531,910 582,859 608,796 608,723 608,648 583,586 532,535 482,483 420,457 346,457 Z"/> + </g> + <g id="bullet-char-template-8211" transform="scale(0.00048828125,-0.00048828125)"> + <path d="M -4,459 L 1135,459 1135,606 -4,606 -4,459 Z"/> + </g> + <g id="bullet-char-template-61548" transform="scale(0.00048828125,-0.00048828125)"> + <path d="M 173,740 C 173,903 231,1043 346,1159 462,1274 601,1332 765,1332 928,1332 1067,1274 1183,1159 1299,1043 1357,903 1357,740 1357,577 1299,437 1183,322 1067,206 928,148 765,148 601,148 462,206 346,322 231,437 173,577 173,740 Z"/> + </g> + </defs> + <defs class="TextEmbeddedBitmaps"/> + <g> + <g id="id2" class="Master_Slide"> + <g id="bg-id2" class="Background"/> + <g id="bo-id2" class="BackgroundObjects"/> + </g> + </g> + <g class="SlideGroup"> + <g> + <g id="container-id1"> + <g id="id1" class="Slide" clip-path="url(#presentation_clip_path)"> + <g class="Page"> + <g class="com.sun.star.drawing.CustomShape"> + <g id="id3"> + <rect class="BoundingBox" stroke="none" fill="none" x="9227" y="2229" width="7040" height="8959"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 12771,11160 L 12747,11160 12642,11160"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 12578,11160 L 12474,11160"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 12410,11160 L 12306,11160"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 12242,11160 L 12138,11160"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 12074,11160 L 11970,11160"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 11906,11160 L 11802,11160"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 11738,11160 L 11634,11160"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 11570,11160 L 11466,11160"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 11402,11160 L 11298,11160"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 11234,11160 L 11130,11160"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 11066,11160 L 10962,11160"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 10898,11160 L 10794,11160"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 10730,11160 L 10626,11160"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 10562,11160 L 10458,11160"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 10394,11160 L 10290,11160"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 10226,11160 L 10122,11160"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 10058,11160 L 9954,11160"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 9890,11160 L 9786,11160"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 9722,11160 L 9618,11160"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 9554,11160 L 9450,11160"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 9386,11160 L 9282,11160"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 9254,11124 L 9254,11020"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 9254,10956 L 9254,10852"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 9254,10788 L 9254,10684"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 9254,10620 L 9254,10516"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 9254,10452 L 9254,10348"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 9254,10284 L 9254,10180"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 9254,10116 L 9254,10012"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 9254,9948 L 9254,9844"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 9254,9780 L 9254,9676"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 9254,9612 L 9254,9508"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 9254,9444 L 9254,9340"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 9254,9276 L 9254,9172"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 9254,9108 L 9254,9004"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 9254,8940 L 9254,8836"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 9254,8772 L 9254,8668"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 9254,8604 L 9254,8500"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 9254,8436 L 9254,8332"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 9254,8268 L 9254,8164"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 9254,8100 L 9254,7996"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 9254,7932 L 9254,7828"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 9254,7764 L 9254,7660"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 9254,7596 L 9254,7492"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 9254,7428 L 9254,7324"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 9254,7260 L 9254,7156"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 9254,7092 L 9254,6988"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 9254,6924 L 9254,6820"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 9254,6756 L 9254,6652"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 9254,6588 L 9254,6484"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 9254,6420 L 9254,6316"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 9254,6252 L 9254,6148"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 9254,6084 L 9254,5980"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 9254,5916 L 9254,5812"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 9254,5748 L 9254,5644"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 9254,5580 L 9254,5476"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 9254,5412 L 9254,5308"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 9254,5244 L 9254,5140"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 9254,5076 L 9254,4972"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 9254,4908 L 9254,4804"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 9254,4740 L 9254,4636"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 9254,4572 L 9254,4467"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 9254,4404 L 9254,4299"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 9254,4236 L 9254,4131"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 9254,4068 L 9254,3963"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 9254,3900 L 9254,3795"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 9254,3732 L 9254,3627"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 9254,3564 L 9254,3459"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 9254,3396 L 9254,3291"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 9254,3228 L 9254,3123"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 9254,3060 L 9254,2955"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 9254,2892 L 9254,2787"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 9254,2724 L 9254,2619"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 9254,2556 L 9254,2451"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 9254,2388 L 9254,2283"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 9290,2256 L 9395,2256"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 9458,2256 L 9563,2256"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 9626,2256 L 9731,2256"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 9794,2256 L 9899,2256"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 9962,2256 L 10067,2256"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 10130,2256 L 10235,2256"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 10298,2256 L 10403,2256"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 10466,2256 L 10571,2256"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 10634,2256 L 10739,2256"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 10802,2256 L 10907,2256"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 10970,2256 L 11075,2256"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 11138,2256 L 11243,2256"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 11306,2256 L 11411,2256"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 11474,2256 L 11579,2256"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 11642,2256 L 11747,2256"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 11810,2256 L 11915,2256"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 11978,2256 L 12083,2256"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 12146,2256 L 12251,2256"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 12314,2256 L 12419,2256"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 12482,2256 L 12587,2256"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 12650,2256 L 12755,2256"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 12818,2256 L 12923,2256"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 12986,2256 L 13091,2256"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 13154,2256 L 13259,2256"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 13322,2256 L 13427,2256"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 13490,2256 L 13595,2256"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 13659,2256 L 13763,2256"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 13827,2256 L 13931,2256"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 13995,2256 L 14099,2256"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 14163,2256 L 14267,2256"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 14331,2256 L 14435,2256"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 14499,2256 L 14603,2256"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 14667,2256 L 14771,2256"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 14835,2256 L 14939,2256"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 15003,2256 L 15107,2256"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 15171,2256 L 15275,2256"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 15339,2256 L 15443,2256"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 15507,2256 L 15611,2256"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 15675,2256 L 15779,2256"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 15843,2256 L 15947,2256"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 16011,2256 L 16115,2256"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 16179,2256 L 16239,2256 16239,2300"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 16239,2364 L 16239,2468"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 16239,2532 L 16239,2636"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 16239,2700 L 16239,2804"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 16239,2868 L 16239,2972"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 16239,3036 L 16239,3140"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 16239,3204 L 16239,3308"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 16239,3372 L 16239,3476"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 16239,3540 L 16239,3644"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 16239,3708 L 16239,3812"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 16239,3876 L 16239,3980"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 16239,4044 L 16239,4148"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 16239,4212 L 16239,4316"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 16239,4380 L 16239,4484"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 16239,4548 L 16239,4652"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 16239,4716 L 16239,4820"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 16239,4884 L 16239,4988"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 16239,5052 L 16239,5156"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 16239,5220 L 16239,5324"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 16239,5388 L 16239,5492"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 16239,5556 L 16239,5660"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 16239,5724 L 16239,5828"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 16239,5892 L 16239,5996"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 16239,6060 L 16239,6164"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 16239,6228 L 16239,6332"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 16239,6396 L 16239,6500"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 16239,6564 L 16239,6668"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 16239,6732 L 16239,6836"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 16239,6900 L 16239,7004"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 16239,7068 L 16239,7172"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 16239,7236 L 16239,7340"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 16239,7404 L 16239,7508"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 16239,7572 L 16239,7676"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 16239,7740 L 16239,7844"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 16239,7908 L 16239,8012"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 16239,8076 L 16239,8180"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 16239,8244 L 16239,8348"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 16239,8412 L 16239,8516"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 16239,8580 L 16239,8684"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 16239,8748 L 16239,8852"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 16239,8916 L 16239,9020"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 16239,9084 L 16239,9188"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 16239,9252 L 16239,9356"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 16239,9420 L 16239,9524"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 16239,9588 L 16239,9693"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 16239,9756 L 16239,9861"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 16239,9924 L 16239,10029"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 16239,10092 L 16239,10197"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 16239,10260 L 16239,10365"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 16239,10428 L 16239,10533"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 16239,10596 L 16239,10701"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 16239,10764 L 16239,10869"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 16239,10932 L 16239,11037"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 16239,11100 L 16239,11160 16194,11160"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 16131,11160 L 16026,11160"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 15963,11160 L 15858,11160"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 15795,11160 L 15690,11160"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 15627,11160 L 15522,11160"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 15459,11160 L 15354,11160"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 15291,11160 L 15186,11160"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 15123,11160 L 15018,11160"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 14955,11160 L 14850,11160"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 14787,11160 L 14682,11160"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 14619,11160 L 14514,11160"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 14451,11160 L 14346,11160"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 14283,11160 L 14178,11160"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 14115,11160 L 14010,11160"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 13947,11160 L 13842,11160"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 13779,11160 L 13674,11160"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 13611,11160 L 13506,11160"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 13443,11160 L 13338,11160"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 13275,11160 L 13170,11160"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 13107,11160 L 13002,11160"/> + <path fill="none" stroke="rgb(52,101,164)" stroke-width="53" stroke-linejoin="round" d="M 12939,11160 L 12834,11160"/> + </g> + </g> + <g class="com.sun.star.drawing.TextShape"> + <g id="id4"> + <rect class="BoundingBox" stroke="none" fill="none" x="3128" y="2360" width="5723" height="840"/> + <text class="TextShape"><tspan class="TextParagraph" font-family="Liberation Sans, sans-serif" font-size="529px" font-weight="400"><tspan class="TextPosition" x="3378" y="2963"><tspan fill="rgb(0,102,179)" stroke="none">MAIN PTHREAD</tspan></tspan></tspan></text> + </g> + </g> + <g class="com.sun.star.drawing.TextShape"> + <g id="id5"> + <rect class="BoundingBox" stroke="none" fill="none" x="9640" y="2360" width="8013" height="840"/> + <text class="TextShape"><tspan class="TextParagraph" font-family="Liberation Sans, sans-serif" font-size="529px" font-weight="400"><tspan class="TextPosition" x="9890" y="2963"><tspan fill="rgb(0,102,179)" stroke="none">CONTROLLER PTHREAD</tspan></tspan></tspan></text> + </g> + </g> + <g class="com.sun.star.drawing.CustomShape"> + <g id="id6"> + <rect class="BoundingBox" stroke="none" fill="none" x="3315" y="5190" width="2797" height="1273"/> + <path fill="rgb(114,159,207)" stroke="none" d="M 4713,6461 L 3316,6461 3316,5191 6110,5191 6110,6461 4713,6461 Z"/> + <path fill="none" stroke="rgb(52,101,164)" d="M 4713,6461 L 3316,6461 3316,5191 6110,5191 6110,6461 4713,6461 Z"/> + <text class="TextShape"><tspan class="TextParagraph" font-family="Liberation Sans, sans-serif" font-size="459px" font-weight="400"><tspan class="TextPosition" x="4358" y="5985"><tspan fill="rgb(0,0,0)" stroke="none">CLI</tspan></tspan></tspan></text> + </g> + </g> + <g class="com.sun.star.drawing.CustomShape"> + <g id="id7"> + <rect class="BoundingBox" stroke="none" fill="none" x="3315" y="7349" width="2797" height="1273"/> + <path fill="rgb(114,159,207)" stroke="none" d="M 4713,8620 L 3316,8620 3316,7350 6110,7350 6110,8620 4713,8620 Z"/> + <path fill="none" stroke="rgb(52,101,164)" d="M 4713,8620 L 3316,8620 3316,7350 6110,7350 6110,8620 4713,8620 Z"/> + <text class="TextShape"><tspan class="TextParagraph" font-family="Liberation Sans, sans-serif" font-size="459px" font-weight="400"><tspan class="TextPosition" x="3972" y="7888"><tspan fill="rgb(0,0,0)" stroke="none">PATHD</tspan></tspan></tspan><tspan class="TextParagraph" font-family="Liberation Sans, sans-serif" font-size="459px" font-weight="400"><tspan class="TextPosition" x="3430" y="8400"><tspan fill="rgb(0,0,0)" stroke="none">INTERFACE</tspan></tspan></tspan></text> + </g> + </g> + <g class="com.sun.star.drawing.CustomShape"> + <g id="id8"> + <rect class="BoundingBox" stroke="none" fill="none" x="12459" y="6079" width="2797" height="1273"/> + <path fill="rgb(114,159,207)" stroke="none" d="M 13857,7350 L 12460,7350 12460,6080 15254,6080 15254,7350 13857,7350 Z"/> + <path fill="none" stroke="rgb(52,101,164)" d="M 13857,7350 L 12460,7350 12460,6080 15254,6080 15254,7350 13857,7350 Z"/> + <text class="TextShape"><tspan class="TextParagraph" font-family="Liberation Sans, sans-serif" font-size="459px" font-weight="400"><tspan class="TextPosition" x="13375" y="6874"><tspan fill="rgb(0,0,0)" stroke="none">PCC</tspan></tspan></tspan></text> + </g> + </g> + <g class="com.sun.star.drawing.LineShape"> + <g id="id9"> + <rect class="BoundingBox" stroke="none" fill="none" x="1665" y="7578" width="2033" height="205"/> + <path fill="none" stroke="rgb(102,102,102)" stroke-width="35" stroke-linejoin="round" d="M 1956,7680 L 3406,7680"/> + <path fill="rgb(102,102,102)" stroke="none" d="M 1665,7680 L 1970,7782 1970,7579 1665,7680 Z"/> + <path fill="rgb(102,102,102)" stroke="none" d="M 3697,7680 L 3393,7579 3393,7782 3697,7680 Z"/> + </g> + </g> + <g class="com.sun.star.drawing.TextShape"> + <g id="id10"> + <rect class="BoundingBox" stroke="none" fill="none" x="1892" y="5137" width="1451" height="570"/> + <text class="TextShape"><tspan class="TextParagraph" font-family="Liberation Sans, sans-serif" font-size="282px" font-weight="400"><tspan class="TextPosition" x="2142" y="5520"><tspan fill="rgb(0,0,0)" stroke="none">VTYSH</tspan></tspan></tspan></text> + </g> + </g> + <g class="com.sun.star.drawing.TextShape"> + <g id="id11"> + <rect class="BoundingBox" stroke="none" fill="none" x="1568" y="5738" width="1976" height="570"/> + <text class="TextShape"><tspan class="TextParagraph" font-family="Liberation Sans, sans-serif" font-size="282px" font-weight="400"><tspan class="TextPosition" x="1818" y="6121"><tspan fill="rgb(0,0,0)" stroke="none">Northbound</tspan></tspan></tspan></text> + </g> + </g> + <g class="com.sun.star.drawing.TextShape"> + <g id="id12"> + <rect class="BoundingBox" stroke="none" fill="none" x="1738" y="7235" width="1735" height="570"/> + <text class="TextShape"><tspan class="TextParagraph" font-family="Liberation Sans, sans-serif" font-size="282px" font-weight="400"><tspan class="TextPosition" x="1988" y="7618"><tspan fill="rgb(0,0,0)" stroke="none">pathd API</tspan></tspan></tspan></text> + </g> + </g> + <g class="com.sun.star.drawing.ClosedBezierShape"> + <g id="id13"> + <rect class="BoundingBox" stroke="none" fill="none" x="2299" y="2229" width="6957" height="8959"/> + <path fill="rgb(52,101,164)" stroke="none" d="M 2299,11134 L 2396,11134 2396,11160 2396,11186 2299,11186 2299,11134 Z M 2299,2230 L 2333,2230 2333,2256 2333,2283 2299,2283 2299,2230 Z M 2399,2283 L 2399,2256 2399,2230 2506,2230 2506,2256 2506,2283 2399,2283 Z M 2571,2283 L 2571,2256 2571,2230 2679,2230 2679,2256 2679,2283 2571,2283 Z M 2744,2283 L 2744,2256 2744,2230 2852,2230 2852,2256 2852,2283 2744,2283 Z M 2917,2283 L 2917,2256 2917,2230 3025,2230 3025,2256 3025,2283 2917,2283 Z M 3090,2283 L 3090,2256 3090,2230 3198,2230 3198,2256 3198,2283 3090,2283 Z M 3263,2283 L 3263,2256 3263,2230 3371,2230 3371,2256 3371,2283 3263,2283 Z M 3436,2283 L 3436,2256 3436,2230 3543,2230 3543,2256 3543,2283 3436,2283 Z M 3609,2283 L 3609,2256 3609,2230 3716,2230 3716,2256 3716,2283 3609,2283 Z M 3782,2283 L 3782,2256 3782,2230 3889,2230 3889,2256 3889,2283 3782,2283 Z M 3955,2283 L 3955,2256 3955,2230 4062,2230 4062,2256 4062,2283 3955,2283 Z M 4128,2283 L 4128,2256 4128,2230 4235,2230 4235,2256 4235,2283 4128,2283 Z M 4301,2283 L 4301,2256 4301,2230 4408,2230 4408,2256 4408,2283 4301,2283 Z M 4473,2283 L 4473,2256 4473,2230 4581,2230 4581,2256 4581,2283 4473,2283 Z M 4646,2283 L 4646,2256 4646,2230 4754,2230 4754,2256 4754,2283 4646,2283 Z M 4819,2283 L 4819,2256 4819,2230 4927,2230 4927,2256 4927,2283 4819,2283 Z M 4992,2283 L 4992,2256 4992,2230 5100,2230 5100,2256 5100,2283 4992,2283 Z M 5165,2283 L 5165,2256 5165,2230 5273,2230 5273,2256 5273,2283 5165,2283 Z M 5338,2283 L 5338,2256 5338,2230 5445,2230 5445,2256 5445,2283 5338,2283 Z M 5511,2283 L 5511,2256 5511,2230 5618,2230 5618,2256 5618,2283 5511,2283 Z M 5684,2283 L 5684,2256 5684,2230 5791,2230 5791,2256 5791,2283 5684,2283 Z M 5857,2283 L 5857,2256 5857,2230 5964,2230 5964,2256 5964,2283 5857,2283 Z M 6030,2283 L 6030,2256 6030,2230 6137,2230 6137,2256 6137,2283 6030,2283 Z M 6203,2283 L 6203,2256 6203,2230 6310,2230 6310,2256 6310,2283 6203,2283 Z M 6375,2283 L 6375,2256 6375,2230 6483,2230 6483,2256 6483,2283 6375,2283 Z M 6548,2283 L 6548,2256 6548,2230 6656,2230 6656,2256 6656,2283 6548,2283 Z M 6721,2283 L 6721,2256 6721,2230 6829,2230 6829,2256 6829,2283 6721,2283 Z M 6894,2283 L 6894,2256 6894,2230 7002,2230 7002,2256 7002,2283 6894,2283 Z M 7067,2283 L 7067,2256 7067,2230 7175,2230 7175,2256 7175,2283 7067,2283 Z M 7240,2283 L 7240,2256 7240,2230 7347,2230 7347,2256 7347,2283 7240,2283 Z M 7413,2283 L 7413,2256 7413,2230 7520,2230 7520,2256 7520,2283 7413,2283 Z M 7586,2283 L 7586,2256 7586,2230 7693,2230 7693,2256 7693,2283 7586,2283 Z M 7759,2283 L 7759,2256 7759,2230 7866,2230 7866,2256 7866,2283 7759,2283 Z M 7932,2283 L 7932,2256 7932,2230 8039,2230 8039,2256 8039,2283 7932,2283 Z M 8105,2283 L 8105,2256 8105,2230 8212,2230 8212,2256 8212,2283 8105,2283 Z M 8277,2283 L 8277,2256 8277,2230 8385,2230 8385,2256 8385,2283 8277,2283 Z M 8450,2283 L 8450,2256 8450,2230 8558,2230 8558,2256 8558,2283 8450,2283 Z M 8623,2283 L 8623,2256 8623,2230 8731,2230 8731,2256 8731,2283 8623,2283 Z M 8796,2283 L 8796,2256 8796,2230 8904,2230 8904,2256 8904,2283 8796,2283 Z M 8969,2283 L 8969,2256 8969,2230 9077,2230 9077,2256 9077,2283 8969,2283 Z M 9142,2283 L 9142,2256 9142,2230 9227,2230 C 9232,2230 9237,2231 9241,2233 9245,2235 9248,2239 9251,2243 9253,2247 9254,2251 9254,2256 L 9254,2278 9227,2278 9227,2283 9142,2283 Z M 9200,2341 L 9227,2341 9254,2341 9254,2446 9227,2446 9200,2446 9200,2341 Z M 9200,2509 L 9227,2509 9254,2509 9254,2614 9227,2614 9200,2614 9200,2509 Z M 9200,2677 L 9227,2677 9254,2677 9254,2782 9227,2782 9200,2782 9200,2677 Z M 9200,2845 L 9227,2845 9254,2845 9254,2950 9227,2950 9200,2950 9200,2845 Z M 9200,3013 L 9227,3013 9254,3013 9254,3118 9227,3118 9200,3118 9200,3013 Z M 9200,3181 L 9227,3181 9254,3181 9254,3286 9227,3286 9200,3286 9200,3181 Z M 9200,3349 L 9227,3349 9254,3349 9254,3454 9227,3454 9200,3454 9200,3349 Z M 9200,3517 L 9227,3517 9254,3517 9254,3622 9227,3622 9200,3622 9200,3517 Z M 9200,3685 L 9227,3685 9254,3685 9254,3790 9227,3790 9200,3790 9200,3685 Z M 9200,3853 L 9227,3853 9254,3853 9254,3958 9227,3958 9200,3958 9200,3853 Z M 9200,4021 L 9227,4021 9254,4021 9254,4126 9227,4126 9200,4126 9200,4021 Z M 9200,4189 L 9227,4189 9254,4189 9254,4294 9227,4294 9200,4294 9200,4189 Z M 9200,4357 L 9227,4357 9254,4357 9254,4462 9227,4462 9200,4462 9200,4357 Z M 9200,4525 L 9227,4525 9254,4525 9254,4630 9227,4630 9200,4630 9200,4525 Z M 9200,4693 L 9227,4693 9254,4693 9254,4798 9227,4798 9200,4798 9200,4693 Z M 9200,4861 L 9227,4861 9254,4861 9254,4966 9227,4966 9200,4966 9200,4861 Z M 9200,5029 L 9227,5029 9254,5029 9254,5134 9227,5134 9200,5134 9200,5029 Z M 9200,5197 L 9227,5197 9254,5197 9254,5302 9227,5302 9200,5302 9200,5197 Z M 9200,5365 L 9227,5365 9254,5365 9254,5470 9227,5470 9200,5470 9200,5365 Z M 9200,5533 L 9227,5533 9254,5533 9254,5638 9227,5638 9200,5638 9200,5533 Z M 9200,5701 L 9227,5701 9254,5701 9254,5806 9227,5806 9200,5806 9200,5701 Z M 9200,5869 L 9227,5869 9254,5869 9254,5974 9227,5974 9200,5974 9200,5869 Z M 9200,6037 L 9227,6037 9254,6037 9254,6142 9227,6142 9200,6142 9200,6037 Z M 9200,6205 L 9227,6205 9254,6205 9254,6310 9227,6310 9200,6310 9200,6205 Z M 9200,6373 L 9227,6373 9254,6373 9254,6478 9227,6478 9200,6478 9200,6373 Z M 9200,6541 L 9227,6541 9254,6541 9254,6646 9227,6646 9200,6646 9200,6541 Z M 9200,6709 L 9227,6709 9254,6709 9254,6814 9227,6814 9200,6814 9200,6709 Z M 9200,6877 L 9227,6877 9254,6877 9254,6982 9227,6982 9200,6982 9200,6877 Z M 9200,7045 L 9227,7045 9254,7045 9254,7150 9227,7150 9200,7150 9200,7045 Z M 9200,7213 L 9227,7213 9254,7213 9254,7318 9227,7318 9200,7318 9200,7213 Z M 9200,7381 L 9227,7381 9254,7381 9254,7486 9227,7486 9200,7486 9200,7381 Z M 9200,7549 L 9227,7549 9254,7549 9254,7654 9227,7654 9200,7654 9200,7549 Z M 9200,7717 L 9227,7717 9254,7717 9254,7822 9227,7822 9200,7822 9200,7717 Z M 9200,7886 L 9227,7886 9254,7886 9254,7990 9227,7990 9200,7990 9200,7886 Z M 9200,8054 L 9227,8054 9254,8054 9254,8158 9227,8158 9200,8158 9200,8054 Z M 9200,8222 L 9227,8222 9254,8222 9254,8326 9227,8326 9200,8326 9200,8222 Z M 9200,8390 L 9227,8390 9254,8390 9254,8494 9227,8494 9200,8494 9200,8390 Z M 9200,8558 L 9227,8558 9254,8558 9254,8662 9227,8662 9200,8662 9200,8558 Z M 9200,8726 L 9227,8726 9254,8726 9254,8830 9227,8830 9200,8830 9200,8726 Z M 9200,8894 L 9227,8894 9254,8894 9254,8998 9227,8998 9200,8998 9200,8894 Z M 9200,9062 L 9227,9062 9254,9062 9254,9166 9227,9166 9200,9166 9200,9062 Z M 9200,9230 L 9227,9230 9254,9230 9254,9334 9227,9334 9200,9334 9200,9230 Z M 9200,9398 L 9227,9398 9254,9398 9254,9502 9227,9502 9200,9502 9200,9398 Z M 9200,9566 L 9227,9566 9254,9566 9254,9670 9227,9670 9200,9670 9200,9566 Z M 9200,9734 L 9227,9734 9254,9734 9254,9838 9227,9838 9200,9838 9200,9734 Z M 9200,9902 L 9227,9902 9254,9902 9254,10006 9227,10006 9200,10006 9200,9902 Z M 9200,10070 L 9227,10070 9254,10070 9254,10174 9227,10174 9200,10174 9200,10070 Z M 9200,10238 L 9227,10238 9254,10238 9254,10342 9227,10342 9200,10342 9200,10238 Z M 9200,10406 L 9227,10406 9254,10406 9254,10510 9227,10510 9200,10510 9200,10406 Z M 9200,10574 L 9227,10574 9254,10574 9254,10678 9227,10678 9200,10678 9200,10574 Z M 9200,10742 L 9227,10742 9254,10742 9254,10846 9227,10846 9200,10846 9200,10742 Z M 9200,10910 L 9227,10910 9254,10910 9254,11014 9227,11014 9200,11014 9200,10910 Z M 9200,11078 L 9227,11078 9254,11078 9254,11160 C 9254,11165 9253,11169 9251,11173 9248,11177 9245,11181 9241,11183 9237,11185 9232,11186 9227,11186 L 9204,11186 9204,11160 9200,11160 9200,11078 Z M 9139,11134 L 9139,11160 9139,11186 9032,11186 9032,11160 9032,11134 9139,11134 Z M 8966,11134 L 8966,11160 8966,11186 8859,11186 8859,11160 8859,11134 8966,11134 Z M 8793,11134 L 8793,11160 8793,11186 8686,11186 8686,11160 8686,11134 8793,11134 Z M 8620,11134 L 8620,11160 8620,11186 8513,11186 8513,11160 8513,11134 8620,11134 Z M 8447,11134 L 8447,11160 8447,11186 8340,11186 8340,11160 8340,11134 8447,11134 Z M 8274,11134 L 8274,11160 8274,11186 8167,11186 8167,11160 8167,11134 8274,11134 Z M 8102,11134 L 8102,11160 8102,11186 7994,11186 7994,11160 7994,11134 8102,11134 Z M 7929,11134 L 7929,11160 7929,11186 7821,11186 7821,11160 7821,11134 7929,11134 Z M 7756,11134 L 7756,11160 7756,11186 7648,11186 7648,11160 7648,11134 7756,11134 Z M 7583,11134 L 7583,11160 7583,11186 7475,11186 7475,11160 7475,11134 7583,11134 Z M 7410,11134 L 7410,11160 7410,11186 7302,11186 7302,11160 7302,11134 7410,11134 Z M 7237,11134 L 7237,11160 7237,11186 7130,11186 7130,11160 7130,11134 7237,11134 Z M 7064,11134 L 7064,11160 7064,11186 6957,11186 6957,11160 6957,11134 7064,11134 Z M 6891,11134 L 6891,11160 6891,11186 6784,11186 6784,11160 6784,11134 6891,11134 Z M 6718,11134 L 6718,11160 6718,11186 6611,11186 6611,11160 6611,11134 6718,11134 Z M 6545,11134 L 6545,11160 6545,11186 6438,11186 6438,11160 6438,11134 6545,11134 Z M 6373,11134 L 6373,11160 6373,11186 6265,11186 6265,11160 6265,11134 6373,11134 Z M 6200,11134 L 6200,11160 6200,11186 6092,11186 6092,11160 6092,11134 6200,11134 Z M 6027,11134 L 6027,11160 6027,11186 5919,11186 5919,11160 5919,11134 6027,11134 Z M 5854,11134 L 5854,11160 5854,11186 5746,11186 5746,11160 5746,11134 5854,11134 Z M 5681,11134 L 5681,11160 5681,11186 5573,11186 5573,11160 5573,11134 5681,11134 Z M 5508,11134 L 5508,11160 5508,11186 5401,11186 5401,11160 5401,11134 5508,11134 Z M 5335,11134 L 5335,11160 5335,11186 5228,11186 5228,11160 5228,11134 5335,11134 Z M 5162,11134 L 5162,11160 5162,11186 5055,11186 5055,11160 5055,11134 5162,11134 Z M 4989,11134 L 4989,11160 4989,11186 4882,11186 4882,11160 4882,11134 4989,11134 Z M 4816,11134 L 4816,11160 4816,11186 4709,11186 4709,11160 4709,11134 4816,11134 Z M 4643,11134 L 4643,11160 4643,11186 4536,11186 4536,11160 4536,11134 4643,11134 Z M 4471,11134 L 4471,11160 4471,11186 4363,11186 4363,11160 4363,11134 4471,11134 Z M 4298,11134 L 4298,11160 4298,11186 4190,11186 4190,11160 4190,11134 4298,11134 Z M 4125,11134 L 4125,11160 4125,11186 4017,11186 4017,11160 4017,11134 4125,11134 Z M 3952,11134 L 3952,11160 3952,11186 3844,11186 3844,11160 3844,11134 3952,11134 Z M 3779,11134 L 3779,11160 3779,11186 3671,11186 3671,11160 3671,11134 3779,11134 Z M 3606,11134 L 3606,11160 3606,11186 3499,11186 3499,11160 3499,11134 3606,11134 Z M 3433,11134 L 3433,11160 3433,11186 3326,11186 3326,11160 3326,11134 3433,11134 Z M 3260,11134 L 3260,11160 3260,11186 3153,11186 3153,11160 3153,11134 3260,11134 Z M 3087,11134 L 3087,11160 3087,11186 2980,11186 2980,11160 2980,11134 3087,11134 Z M 2914,11134 L 2914,11160 2914,11186 2807,11186 2807,11160 2807,11134 2914,11134 Z M 2741,11134 L 2741,11160 2741,11186 2634,11186 2634,11160 2634,11134 2741,11134 Z M 2569,11134 L 2569,11160 2569,11186 2461,11186 2461,11160 2461,11134 2569,11134 Z"/> + </g> + </g> + <g class="com.sun.star.drawing.CustomShape"> + <g id="id14"> + <rect class="BoundingBox" stroke="none" fill="none" x="7086" y="3539" width="4194" height="1273"/> + <path fill="rgb(114,159,207)" stroke="none" d="M 9183,4810 L 7087,4810 7087,3540 11278,3540 11278,4810 9183,4810 Z"/> + <path fill="none" stroke="rgb(52,101,164)" d="M 9183,4810 L 7087,4810 7087,3540 11278,3540 11278,4810 9183,4810 Z"/> + <text class="TextShape"><tspan class="TextParagraph" font-family="Liberation Sans, sans-serif" font-size="459px" font-weight="400"><tspan class="TextPosition" x="8218" y="4078"><tspan fill="rgb(0,0,0)" stroke="none">PCEPLIB</tspan></tspan></tspan><tspan class="TextParagraph" font-family="Liberation Sans, sans-serif" font-size="459px" font-weight="400"><tspan class="TextPosition" x="7900" y="4590"><tspan fill="rgb(0,0,0)" stroke="none">INTERFACE</tspan></tspan></tspan></text> + </g> + </g> + <g class="com.sun.star.drawing.LineShape"> + <g id="id15"> + <rect class="BoundingBox" stroke="none" fill="none" x="1665" y="6105" width="2033" height="205"/> + <path fill="none" stroke="rgb(102,102,102)" stroke-width="35" stroke-linejoin="round" d="M 1956,6207 L 3406,6207"/> + <path fill="rgb(102,102,102)" stroke="none" d="M 1665,6207 L 1970,6309 1970,6106 1665,6207 Z"/> + <path fill="rgb(102,102,102)" stroke="none" d="M 3697,6207 L 3393,6106 3393,6309 3697,6207 Z"/> + </g> + </g> + <g class="com.sun.star.drawing.LineShape"> + <g id="id16"> + <rect class="BoundingBox" stroke="none" fill="none" x="1665" y="5470" width="2033" height="205"/> + <path fill="none" stroke="rgb(102,102,102)" stroke-width="35" stroke-linejoin="round" d="M 1956,5572 L 3406,5572"/> + <path fill="rgb(102,102,102)" stroke="none" d="M 1665,5572 L 1970,5674 1970,5471 1665,5572 Z"/> + <path fill="rgb(102,102,102)" stroke="none" d="M 3697,5572 L 3393,5471 3393,5674 3697,5572 Z"/> + </g> + </g> + <g class="com.sun.star.drawing.CustomShape"> + <g id="id17"> + <rect class="BoundingBox" stroke="none" fill="none" x="4584" y="9553" width="9528" height="1146"/> + <path fill="rgb(114,159,207)" stroke="none" d="M 9348,10697 L 4585,10697 4585,9554 14110,9554 14110,10697 9348,10697 Z"/> + <path fill="none" stroke="rgb(52,101,164)" d="M 9348,10697 L 4585,10697 4585,9554 14110,9554 14110,10697 9348,10697 Z"/> + <text class="TextShape"><tspan class="TextParagraph" font-family="Liberation Sans, sans-serif" font-size="459px" font-weight="400"><tspan class="TextPosition" x="7786" y="10285"><tspan fill="rgb(0,0,0)" stroke="none">CONTROLLER</tspan></tspan></tspan></text> + </g> + </g> + <g class="com.sun.star.drawing.TextShape"> + <g id="id18"> + <rect class="BoundingBox" stroke="none" fill="none" x="6491" y="9448" width="2383" height="570"/> + <text class="TextShape"><tspan class="TextParagraph" font-family="Liberation Sans, sans-serif" font-size="282px" font-weight="400"><tspan class="TextPosition" x="6741" y="9831"><tspan fill="rgb(0,0,0)" stroke="none">pcep_ctrl_XXX</tspan></tspan></tspan></text> + </g> + </g> + <g class="com.sun.star.drawing.TextShape"> + <g id="id19"> + <rect class="BoundingBox" stroke="none" fill="none" x="9920" y="9448" width="2802" height="570"/> + <text class="TextShape"><tspan class="TextParagraph" font-family="Liberation Sans, sans-serif" font-size="282px" font-weight="400"><tspan class="TextPosition" x="10170" y="9831"><tspan fill="rgb(0,0,0)" stroke="none">pcep_thread_XXX</tspan></tspan></tspan></text> + </g> + </g> + <g class="com.sun.star.drawing.LineShape"> + <g id="id20"> + <rect class="BoundingBox" stroke="none" fill="none" x="5953" y="8366" width="1682" height="1271"/> + <path fill="none" stroke="rgb(102,102,102)" stroke-width="35" stroke-linejoin="round" d="M 6185,8541 L 7402,9461"/> + <path fill="rgb(102,102,102)" stroke="none" d="M 5953,8366 L 6135,8631 6257,8469 5953,8366 Z"/> + <path fill="rgb(102,102,102)" stroke="none" d="M 7634,9636 L 7452,9371 7330,9533 7634,9636 Z"/> + </g> + </g> + <g class="com.sun.star.drawing.LineShape"> + <g id="id21"> + <rect class="BoundingBox" stroke="none" fill="none" x="5953" y="6207" width="2287" height="3430"/> + <path fill="none" stroke="rgb(102,102,102)" stroke-width="35" stroke-linejoin="round" d="M 6114,6449 L 8078,9394"/> + <path fill="rgb(102,102,102)" stroke="none" d="M 5953,6207 L 6037,6517 6206,6404 5953,6207 Z"/> + <path fill="rgb(102,102,102)" stroke="none" d="M 8239,9636 L 8155,9326 7986,9439 8239,9636 Z"/> + </g> + </g> + <g class="com.sun.star.drawing.LineShape"> + <g id="id22"> + <rect class="BoundingBox" stroke="none" fill="none" x="5953" y="4683" width="1652" height="763"/> + <path fill="none" stroke="rgb(102,102,102)" stroke-width="35" stroke-linejoin="round" d="M 6217,5323 L 7340,4805"/> + <path fill="rgb(102,102,102)" stroke="none" d="M 5953,5445 L 6272,5410 6187,5225 5953,5445 Z"/> + <path fill="rgb(102,102,102)" stroke="none" d="M 7604,4683 L 7285,4718 7370,4903 7604,4683 Z"/> + </g> + </g> + <g class="com.sun.star.drawing.LineShape"> + <g id="id23"> + <rect class="BoundingBox" stroke="none" fill="none" x="5953" y="4683" width="2287" height="3049"/> + <path fill="none" stroke="rgb(102,102,102)" stroke-width="35" stroke-linejoin="round" d="M 6128,7498 L 8064,4916"/> + <path fill="rgb(102,102,102)" stroke="none" d="M 5953,7731 L 6217,7548 6054,7427 5953,7731 Z"/> + <path fill="rgb(102,102,102)" stroke="none" d="M 8239,4683 L 7975,4866 8138,4988 8239,4683 Z"/> + </g> + </g> + <g class="com.sun.star.drawing.LineShape"> + <g id="id24"> + <rect class="BoundingBox" stroke="none" fill="none" x="8772" y="1635" width="205" height="2033"/> + <path fill="none" stroke="rgb(102,102,102)" stroke-width="35" stroke-linejoin="round" d="M 8874,3376 L 8874,1926"/> + <path fill="rgb(102,102,102)" stroke="none" d="M 8874,3667 L 8976,3363 8773,3363 8874,3667 Z"/> + <path fill="rgb(102,102,102)" stroke="none" d="M 8874,1635 L 8773,1940 8976,1940 8874,1635 Z"/> + </g> + </g> + <g class="com.sun.star.drawing.TextShape"> + <g id="id25"> + <rect class="BoundingBox" stroke="none" fill="none" x="8347" y="1127" width="1925" height="570"/> + <text class="TextShape"><tspan class="TextParagraph" font-family="Liberation Sans, sans-serif" font-size="282px" font-weight="400"><tspan class="TextPosition" x="8597" y="1510"><tspan fill="rgb(0,0,0)" stroke="none">pceplib API</tspan></tspan></tspan></text> + </g> + </g> + <g class="com.sun.star.drawing.LineShape"> + <g id="id26"> + <rect class="BoundingBox" stroke="none" fill="none" x="11033" y="4683" width="1779" height="1525"/> + <path fill="none" stroke="rgb(102,102,102)" stroke-width="35" stroke-linejoin="round" d="M 11254,4872 L 12590,6018"/> + <path fill="rgb(102,102,102)" stroke="none" d="M 11033,4683 L 11198,4958 11330,4804 11033,4683 Z"/> + <path fill="rgb(102,102,102)" stroke="none" d="M 12811,6207 L 12646,5932 12514,6086 12811,6207 Z"/> + </g> + </g> + <g class="com.sun.star.drawing.LineShape"> + <g id="id27"> + <rect class="BoundingBox" stroke="none" fill="none" x="11063" y="7223" width="1749" height="2414"/> + <path fill="none" stroke="rgb(102,102,102)" stroke-width="35" stroke-linejoin="round" d="M 11234,9400 L 12640,7459"/> + <path fill="rgb(102,102,102)" stroke="none" d="M 11063,9636 L 11324,9449 11159,9330 11063,9636 Z"/> + <path fill="rgb(102,102,102)" stroke="none" d="M 12811,7223 L 12550,7410 12715,7529 12811,7223 Z"/> + </g> + </g> + <g class="com.sun.star.drawing.LineShape"> + <g id="id28"> + <rect class="BoundingBox" stroke="none" fill="none" x="9534" y="1635" width="205" height="2033"/> + <path fill="none" stroke="rgb(102,102,102)" stroke-width="35" stroke-linejoin="round" d="M 9636,3376 L 9636,1926"/> + <path fill="rgb(102,102,102)" stroke="none" d="M 9636,3667 L 9738,3363 9535,3363 9636,3667 Z"/> + <path fill="rgb(102,102,102)" stroke="none" d="M 9636,1635 L 9535,1940 9738,1940 9636,1635 Z"/> + </g> + </g> + </g> + </g> + </g> + </g> + </g> +</svg>
\ No newline at end of file diff --git a/doc/user/bgp.rst b/doc/user/bgp.rst index c65f1144eb..cb97ee22df 100644 --- a/doc/user/bgp.rst +++ b/doc/user/bgp.rst @@ -31,12 +31,23 @@ be specified (:ref:`common-invocation-options`). .. option:: -l, --listenon - Specify a specific IP address for bgpd to listen on, rather than its default + Specify specific IP addresses for bgpd to listen on, rather than its default of ``0.0.0.0`` / ``::``. This can be useful to constrain bgpd to an internal - address, or to run multiple bgpd processes on one host. + address, or to run multiple bgpd processes on one host. Multiple addresses + can be specified. + + In the following example, bgpd is started listening for connections on the + addresses 100.0.1.2 and fd00::2:2. The options -d (runs in daemon mode) and + -f (uses specific configuration file) are also used in this example as we + are likely to run multiple bgpd instances, each one with different + configurations, when using -l option. Note that this option implies the --no_kernel option, and no learned routes will be installed into the linux kernel. +.. code-block:: shell + + # /usr/lib/frr/bgpd -d -f /some-folder/bgpd.conf -l 100.0.1.2 -l fd00::2:2 + .. option:: -n, --no_kernel Do not install learned routes into the linux kernel. This option is useful @@ -424,10 +435,14 @@ Require policy on EBGP .. clicmd:: [no] bgp ebgp-requires-policy This command requires incoming and outgoing filters to be applied - for eBGP sessions. Without the incoming filter, no routes will be - accepted. Without the outgoing filter, no routes will be announced. + for eBGP sessions as part of RFC-8212 compliance. Without the incoming + filter, no routes will be accepted. Without the outgoing filter, no + routes will be announced. - This is enabled by default. + This is enabled by default for the traditional configuration and + turned off by default for datacenter configuration. + + When you enable/disable this option you MUST clear the session. When the incoming or outgoing filter is missing you will see "(Policy)" sign under ``show bgp summary``: @@ -446,6 +461,22 @@ Require policy on EBGP 192.168.0.2 4 65002 8 10 0 0 0 00:03:09 5 (Policy) fe80:1::2222 4 65002 9 11 0 0 0 00:03:09 (Policy) (Policy) + Additionally a `show bgp neighbor` command would indicate in the `For address family:` + block that: + + .. code-block:: frr + + exit1# show bgp neighbor + ... + For address family: IPv4 Unicast + Update group 1, subgroup 1 + Packet Queue length 0 + Inbound soft reconfiguration allowed + Community attribute sent to this neighbor(all) + Inbound updates discarded due to missing policy + Outbound updates discarded due to missing policy + 0 accepted prefixes + Reject routes with AS_SET or AS_CONFED_SET types ------------------------------------------------ @@ -454,6 +485,17 @@ Reject routes with AS_SET or AS_CONFED_SET types This command enables rejection of incoming and outgoing routes having AS_SET or AS_CONFED_SET type. +Suppress duplicate updates +-------------------------- + +.. index:: bgp suppress-duplicates +.. clicmd:: [no] bgp suppress-duplicates + + For example, BGP routers can generate multiple identical announcements with + empty community attributes if stripped at egress. This is an undesired behavior. + Suppress duplicate updates if the route actually not changed. + Default: enabled. + Disable checking if nexthop is connected on EBGP sessions --------------------------------------------------------- @@ -469,28 +511,57 @@ Disable checking if nexthop is connected on EBGP sessions Route Flap Dampening -------------------- -.. clicmd:: bgp dampening (1-45) (1-20000) (1-20000) (1-255) +.. index:: [no] bgp dampening [(1-45) [(1-20000) (1-20000) (1-255)]] +.. clicmd:: [no] bgp dampening [(1-45) [(1-20000) (1-20000) (1-255)]] + + This command enables (with optionally specified dampening parameters) or + disables route-flap dampening for all routes of a BGP instance. - This command enables BGP route-flap dampening and specifies dampening parameters. +.. index:: [no] neighbor PEER dampening [(1-45) [(1-20000) (1-20000) (1-255)]] +.. clicmd:: [no] neighbor PEER dampening [(1-45) [(1-20000) (1-20000) (1-255)]] + + This command enables (with optionally specified dampening parameters) or + disables route-flap dampening for all routes learned from a BGP peer. + +.. index:: [no] neighbor GROUP dampening [(1-45) [(1-20000) (1-20000) (1-255)]] +.. clicmd:: [no] neighbor GROUP dampening [(1-45) [(1-20000) (1-20000) (1-255)]] + + This command enables (with optionally specified dampening parameters) or + disables route-flap dampening for all routes learned from peers of a peer + group. half-life - Half-life time for the penalty + Half-life time for the penalty in minutes (default value: 15). reuse-threshold - Value to start reusing a route + Value to start reusing a route (default value: 750). suppress-threshold - Value to start suppressing a route + Value to start suppressing a route (default value: 2000). max-suppress - Maximum duration to suppress a stable route + Maximum duration to suppress a stable route in minutes (default value: + 60). The route-flap damping algorithm is compatible with :rfc:`2439`. The use of - this command is not recommended nowadays. + these commands is not recommended nowadays. At the moment, route-flap dampening is not working per VRF and is working only for IPv4 unicast and multicast. + With different parameter sets configurable for BGP instances, peer groups and + peers, the active dampening profile for a route is chosen on the fly, + allowing for various changes in configuration (i.e. peer group memberships) + during runtime. The parameter sets are taking precedence in the following + order: + + 1. Peer + 2. Peer group + 3. BGP instance + + The negating commands do not allow to exclude a peer/peer group from a peer + group/BGP instances configuration. + .. seealso:: https://www.ripe.net/publications/docs/ripe-378 @@ -798,6 +869,38 @@ The following functionality is provided by graceful restart: <---------------------------------------------------------------------> +.. _bgp-GR-preserve-forwarding-state: + +BGP-GR Preserve-Forwarding State +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +BGP OPEN message carrying optional capabilities for Graceful Restart has +8 bit “Flags for Address Family” for given AFI and SAFI. This field contains +bit flags relating to routes that were advertised with the given AFI and SAFI. + +.. code-block:: frr + + 0 1 2 3 4 5 6 7 + +-+-+-+-+-+-+-+-+ + |F| Reserved | + +-+-+-+-+-+-+-+-+ + +The most significant bit is defined as the Forwarding State (F) bit, which +can be used to indicate whether the forwarding state for routes that were +advertised with the given AFI and SAFI has indeed been preserved during the +previous BGP restart. When set (value 1), the bit indicates that the +forwarding state has been preserved. +The remaining bits are reserved and MUST be set to zero by the sender and +ignored by the receiver. + +.. index:: bgp graceful-restart preserve-fw-state +.. clicmd:: bgp graceful-restart preserve-fw-state + +FRR gives us the option to enable/disable the "F" flag using this specific +vty command. However, it doesn't have the option to enable/disable +this flag only for specific AFI/SAFI i.e. when this command is used, it +applied to all the supported AFI/SAFI combinations for this peer. + .. _bgp-end-of-rib-message: End-of-RIB (EOR) message @@ -851,6 +954,19 @@ However, it MUST defer route selection for an address family until it either. This is command, will set the time for which stale routes are kept in RIB. +.. index:: bgp graceful-restart stalepath-time (1-4095) +.. clicmd:: bgp graceful-restart stalepath-time (1-4095) + + This is command, will set the max time (in seconds) to hold onto + restarting peer's stale paths. + + It also controls Enhanced Route-Refresh timer. + + If this command is configured and the router does not receive a Route-Refresh EoRR + message, the router removes the stale routes from the BGP table after the timer + expires. The stale path timer is started when the router receives a Route-Refresh + BoRR message. + .. _bgp-per-peer-graceful-restart: BGP Per Peer Graceful Restart @@ -1372,6 +1488,9 @@ Configuring Peers directly connected and this knob is not enabled, the session will not establish. + If the peer's IP address is not in the RIB and is reachable via the + default route, then you have to enable ``ip nht resolve-via-default``. + .. index:: neighbor PEER description ... .. clicmd:: [no] neighbor PEER description ... @@ -1596,7 +1715,7 @@ Configuring Peers peer in question. This number is between 0 and 600 seconds, with the default advertisement interval being 0. -.. index:: [no] neighbor PEER timers delayopen (1-240) +.. index:: neighbor PEER timers delayopen (1-240) .. clicmd:: [no] neighbor PEER timers delayopen (1-240) This command allows the user enable the @@ -1871,9 +1990,9 @@ is 4 octet long. The following format is used to define the community value. ``0xFFFF029A`` ``65535:666``. :rfc:`7999` documents sending prefixes to EBGP peers and upstream for the purpose of blackholing traffic. Prefixes tagged with the this community should normally not be - re-advertised from neighbors of the originating network. It is - recommended upon receiving prefixes tagged with this community to - add ``NO_EXPORT`` and ``NO_ADVERTISE``. + re-advertised from neighbors of the originating network. Upon receiving + ``BLACKHOLE`` community from a BGP speaker, ``NO_ADVERTISE`` community + is added automatically. ``no-export`` ``no-export`` represents well-known communities value ``NO_EXPORT`` @@ -2644,11 +2763,11 @@ Ethernet Segments An Ethernet Segment can be configured by specifying a system-MAC and a local discriminatior against the bond interface on the PE (via zebra) - -.. index:: evpn mh es-id [(1-16777215)$es_lid] -.. clicmd:: [no] evpn mh es-id [(1-16777215)$es_lid] +.. index:: evpn mh es-id (1-16777215) +.. clicmd:: [no] evpn mh es-id (1-16777215) -.. index:: evpn mh es-sys-mac [X:X:X:X:X:X$mac] -.. clicmd:: [no$no] evpn mh es-sys-mac [X:X:X:X:X:X$mac] +.. index:: evpn mh es-sys-mac X:X:X:X:X:X +.. clicmd:: [no] evpn mh es-sys-mac X:X:X:X:X:X The sys-mac and local discriminator are used for generating a 10-byte, Type-3 Ethernet Segment ID. @@ -2671,8 +2790,8 @@ forward BUM traffic received via the overlay network. This implementation uses a preference based DF election specified by draft-ietf-bess-evpn-pref-df. The DF preference is configurable per-ES (via zebra) - -.. index:: evpn mh es-df-pref [(1-16777215)$df_pref] -.. clicmd:: [no] evpn mh es-df-pref [(1-16777215)$df_pref] +.. index:: evpn mh es-df-pref (1-16777215) +.. clicmd:: [no] evpn mh es-df-pref (1-16777215) BUM traffic is rxed via the overlay by all PEs attached to a server but only the DF can forward the de-capsulated traffic to the access port. To @@ -2682,6 +2801,20 @@ the traffic. Similarly traffic received from ES peers via the overlay cannot be forwarded to the server. This is split-horizon-filtering with local bias. +Knobs for interop +""""""""""""""""" +Some vendors do not send EAD-per-EVI routes. To interop with them we +need to relax the dependency on EAD-per-EVI routes and activate a remote +ES-PE based on just the EAD-per-ES route. + +Note that by default we advertise and expect EAD-per-EVI routes. + +.. index:: disable-ead-evi-rx +.. clicmd:: [no] disable-ead-evi-rx + +.. index:: disable-ead-evi-tx +.. clicmd:: [no] disable-ead-evi-tx + Fast failover """"""""""""" As the primary purpose of EVPN-MH is redundancy keeping the failover efficient @@ -2695,14 +2828,14 @@ been introduced for the express purpose of efficient ES failovers. on via the following BGP config - .. index:: use-es-l3nhg -.. clicmd:: [no$no] use-es-l3nhg +.. clicmd:: [no] use-es-l3nhg - Local ES (MAC/Neigh) failover via ES-redirect. On dataplanes that do not have support for ES-redirect the feature can be turned off via the following zebra config - .. index:: evpn mh redirect-off -.. clicmd:: [no$no] evpn mh redirect-off +.. clicmd:: [no] evpn mh redirect-off Uplink/Core tracking """""""""""""""""""" @@ -2723,11 +2856,11 @@ the ES peer (PE2) goes down PE1 continues to advertise hosts learnt from PE2 for a holdtime during which it attempts to establish local reachability of the host. This holdtime is configurable via the following zebra commands - -.. index:: evpn mh neigh-holdtime (0-86400)$duration -.. clicmd:: [no$no] evpn mh neigh-holdtime (0-86400)$duration +.. index:: evpn mh neigh-holdtime (0-86400) +.. clicmd:: [no] evpn mh neigh-holdtime (0-86400) -.. index:: evpn mh mac-holdtime (0-86400)$duration -.. clicmd:: [no$no] evpn mh mac-holdtime (0-86400)$duration +.. index:: evpn mh mac-holdtime (0-86400) +.. clicmd:: [no] evpn mh mac-holdtime (0-86400) Startup delay """"""""""""" @@ -2736,8 +2869,8 @@ and EVPN network to converge before enabling the ESs. For this duration the ES bonds are held protodown. The startup delay is configurable via the following zebra command - -.. index:: evpn mh startup-delay(0-3600)$duration -.. clicmd:: [no] evpn mh startup-delay(0-3600)$duration +.. index:: evpn mh startup-delay (0-3600) +.. clicmd:: [no] evpn mh startup-delay (0-3600) +Support with VRF network namespace backend +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -3199,8 +3332,8 @@ Some other commands provide additional options for filtering the output. This command displays BGP routes using AS path regular expression (:ref:`bgp-regular-expressions`). -.. index:: show [ip] bgp [all] summary [json] -.. clicmd:: show [ip] bgp [all] summary [json] +.. index:: show [ip] bgp [all] summary [wide] [json] +.. clicmd:: show [ip] bgp [all] summary [wide] [json] Show a bgp peer summary for the specified address family. @@ -3209,6 +3342,25 @@ and should no longer be used. In order to reach the other BGP routing tables other than the IPv6 routing table given by :clicmd:`show bgp`, the new command structure is extended with :clicmd:`show bgp [afi] [safi]`. +``wide`` option gives more output like ``LocalAS`` and extended ``Desc`` to +64 characters. + + .. code-block:: frr + + exit1# show ip bgp summary wide + + IPv4 Unicast Summary: + BGP router identifier 192.168.100.1, local AS number 65534 vrf-id 0 + BGP table version 3 + RIB entries 5, using 920 bytes of memory + Peers 1, using 27 KiB of memory + + Neighbor V AS LocalAS MsgRcvd MsgSent TblVer InQ OutQ Up/Down State/PfxRcd PfxSnt Desc + 192.168.0.2 4 65030 123 15 22 0 0 0 00:07:00 0 1 us-east1-rs1.frrouting.org + + Total number of neighbors 1 + exit1# + .. index:: show bgp [afi] [safi] [all] [wide|json] .. clicmd:: show bgp [afi] [safi] [all] [wide|json] @@ -3341,6 +3493,32 @@ attribute. If ``json`` option is specified, output is displayed in JSON format. +.. index:: show bgp labelpool <chunks|inuse|ledger|requests|summary> [json] +.. clicmd:: show bgp labelpool <chunks|inuse|ledger|requests|summary> [json] + + These commands display information about the BGP labelpool used for + the association of MPLS labels with routes for L3VPN and Labeled Unicast + + If ``chunks`` option is specified, output shows the current list of label + chunks granted to BGP by Zebra, indicating the start and end label in + each chunk + + If ``inuse`` option is specified, output shows the current inuse list of + label to prefix mappings + + If ``ledger`` option is specified, output shows ledger list of all + label requests made per prefix + + If ``requests`` option is specified, output shows current list of label + requests which have not yet been fulfilled by the labelpool + + If ``summary`` option is specified, output is a summary of the counts for + the chunks, inuse, ledger and requests list along with the count of + outstanding chunk requests to Zebra and the nummber of zebra reconnects + that have happened + + If ``json`` option is specified, output is displayed in JSON format. + .. _bgp-display-routes-by-lcommunity: Displaying Routes by Large Community Attribute @@ -3470,10 +3648,10 @@ starting the daemon and the configuration gets saved, the option will persist unless removed from the configuration with the negating command prior to the configuration write operation. -.. index:: [no] bgp send-extra-data zebra +.. index:: bgp send-extra-data zebra .. clicmd:: [no] bgp send-extra-data zebra - This Command turns off the ability of BGP to send extra data to zebra. +This Command turns off the ability of BGP to send extra data to zebra. In this case it's the AS-Path being used for the path. The default behavior in BGP is to send this data and to turn it off enter the no form of the command. If extra data was sent to zebra, and this command is turned on there is no diff --git a/doc/user/fabricd.rst b/doc/user/fabricd.rst index a74d3e098b..17a51ccb3c 100644 --- a/doc/user/fabricd.rst +++ b/doc/user/fabricd.rst @@ -57,6 +57,19 @@ in the configuration: Configure the authentication password for a domain, as clear text or md5 one. +.. index:: attached-bit [receive ignore | send] +.. clicmd:: attached-bit [receive ignore | send] + +.. index:: attached-bit +.. clicmd:: no attached-bit + + Set attached bit for inter-area traffic: + + - receive + If LSP received with attached bit set, create default route to neighbor + - send + If L1|L2 router, set attached bit in LSP sent to L1 router + .. index:: log-adjacency-changes .. clicmd:: log-adjacency-changes @@ -64,7 +77,7 @@ in the configuration: .. clicmd:: no log-adjacency-changes Log changes in adjacency state. - + .. index:: set-overload-bit .. clicmd:: set-overload-bit diff --git a/doc/user/index.rst b/doc/user/index.rst index 8ac997f8dd..7b9464668b 100644 --- a/doc/user/index.rst +++ b/doc/user/index.rst @@ -29,6 +29,7 @@ Basics ipv6 kernel snmp + scripting .. modules ######### @@ -50,6 +51,7 @@ Protocols nhrpd ospfd ospf6d + pathd pim pbr ripd diff --git a/doc/user/installation.rst b/doc/user/installation.rst index 382d71b71f..a13e6ce43b 100644 --- a/doc/user/installation.rst +++ b/doc/user/installation.rst @@ -362,6 +362,10 @@ options from the list below. Set hardcoded rpaths in the executable [default=yes]. +.. option:: --enable-scripting + + Enable Lua scripting [default=no]. + You may specify any combination of the above options to the configure script. By default, the executables are placed in :file:`/usr/local/sbin` and the configuration files in :file:`/usr/local/etc`. The :file:`/usr/local/` @@ -382,6 +386,10 @@ options to the configuration script. Configure zebra to use `dir` for local state files, such as pid files and unix sockets. +.. option:: --with-scriptdir <dir> + + Look for Lua scripts in ``dir`` [``prefix``/etc/frr/scripts]. + .. option:: --with-yangmodelsdir <dir> Look for YANG modules in `dir` [`prefix`/share/yang]. Note that the FRR diff --git a/doc/user/isisd.rst b/doc/user/isisd.rst index f991e3f073..352701728d 100644 --- a/doc/user/isisd.rst +++ b/doc/user/isisd.rst @@ -72,6 +72,19 @@ writing, *isisd* does not support multiple ISIS processes. Configure the authentication password for an area, respectively a domain, as clear text or md5 one. +.. index:: attached-bit [receive ignore | send] +.. clicmd:: attached-bit [receive ignore | send] + +.. index:: attached-bit +.. clicmd:: no attached-bit + + Set attached bit for inter-area traffic: + + - receive + If LSP received with attached bit set, create default route to neighbor + - send + If L1|L2 router, set attached bit in LSP sent to L1 router + .. index:: log-adjacency-changes .. clicmd:: log-adjacency-changes @@ -204,6 +217,12 @@ ISIS Fast-Reroute Disable load sharing across multiple LFA backups. +.. index:: fast-reroute remote-lfa prefix-list WORD [level-1 | level-2] +.. clicmd:: [no] fast-reroute remote-lfa prefix-list [WORD] [level-1 | level-2] + + Configure a prefix-list to select eligible PQ nodes (valid for all protected + interfaces). + .. _isis-region: ISIS region @@ -400,6 +419,18 @@ ISIS interface Enable per-prefix TI-LFA fast reroute link or node protection. +.. index:: isis fast-reroute remote-lfa tunnel mpls-ldp [level-1 | level-2] +.. clicmd:: [no] isis fast-reroute remote-lfa tunnel mpls-ldp [level-1 | level-2] + + Enable per-prefix Remote LFA fast reroute link protection. Note that other + routers in the network need to be configured to accept LDP targeted hello + messages in order for RLFA to work. + +.. index:: isis fast-reroute remote-lfa maximum-metric (1-16777215) [level-1 | level-2] +.. clicmd:: [no] isis fast-reroute remote-lfa maximum-metric (1-16777215) [level-1 | level-2] + + Limit Remote LFA PQ node selection within the specified metric. + .. _showing-isis-information: Showing ISIS information diff --git a/doc/user/nhrpd.rst b/doc/user/nhrpd.rst index 9caeb0eedb..65645c519d 100644 --- a/doc/user/nhrpd.rst +++ b/doc/user/nhrpd.rst @@ -180,14 +180,15 @@ Integration with IKE nhrpd needs tight integration with IKE daemon for various reasons. Currently only strongSwan is supported as IKE daemon. -nhrpd connects to strongSwan using VICI protocol based on UNIX socket -(hardcoded now as /var/run/charon.vici). +nhrpd connects to strongSwan using VICI protocol based on UNIX socket which +can be configured using the command below (default to /var/run/charon.vici). strongSwan currently needs few patches applied. Please check out the -https://git.alpinelinux.org/user/tteras/strongswan/log/?h=tteras-release -and -https://git.alpinelinux.org/user/tteras/strongswan/log/?h=tteras -git repositories for the patches. +original patches at: +https://git-old.alpinelinux.org/user/tteras/strongswan/ + +Actively maintained patches are also available at: +https://gitlab.alpinelinux.org/alpine/aports/-/tree/master/main/strongswan .. _nhrp-events: diff --git a/doc/user/ospf6d.rst b/doc/user/ospf6d.rst index 4f0ff90943..99119bb7e5 100644 --- a/doc/user/ospf6d.rst +++ b/doc/user/ospf6d.rst @@ -83,6 +83,12 @@ OSPF6 router This configuration setting MUST be consistent across all routers within the OSPF domain. +.. index:: maximum-paths (1-64) +.. clicmd::[no] maximum-paths (1-64) + + Use this command to control the maximum number of parallel routes that + OSPFv3 can support. The default is 64. + .. _ospf6-area: OSPF6 area @@ -170,10 +176,34 @@ Showing OSPF6 information instance ID, simply type "show ipv6 ospf6 <cr>". JSON output can be obtained by appending 'json' to the end of command. -.. index:: show ipv6 ospf6 database -.. clicmd:: show ipv6 ospf6 database +.. index:: show ipv6 ospf6 database [<detail|dump|internal>] [json] +.. clicmd:: show ipv6 ospf6 database [<detail|dump|internal>] [json] + + This command shows LSAs present in the LSDB. There are three view options. + These options helps in viewing all the parameters of the LSAs. JSON output + can be obtained by appending 'json' to the end of command. JSON option is + not applicable with 'dump' option. + +.. index:: show ipv6 ospf6 database <router|network|inter-prefix|inter-router|as-external|group-membership|type-7|link|intra-prefix> [json] +.. clicmd:: show ipv6 ospf6 database <router|network|inter-prefix|inter-router|as-external|group-membership|type-7|link|intra-prefix> [json] + + These options filters out the LSA based on its type. The three views options + works here as well. JSON output can be obtained by appending 'json' to the + end of command. + +.. index:: show ipv6 ospf6 database adv-router A.B.C.D linkstate-id A.B.C.D [json] +.. clicmd:: show ipv6 ospf6 database adv-router A.B.C.D linkstate-id A.B.C.D [json] + + The LSAs additinally can also be filtered with the linkstate-id and + advertising-router fields. We can use the LSA type filter and views with + this command as well and visa-versa. JSON output can be obtained by + appending 'json' to the end of command. - This command shows LSA database summary. You can specify the type of LSA. +.. index:: show ipv6 ospf6 database self-originated [json] +.. clicmd:: show ipv6 ospf6 database self-originated [json] + + This command is used to filter the LSAs which are originated by the present + router. All the other filters are applicable here as well. .. index:: show ipv6 ospf6 interface [json] .. clicmd:: show ipv6 ospf6 interface [json] @@ -216,6 +246,28 @@ Showing OSPF6 information Shows the routes which are redistributed by the router. JSON output can be obtained by appending 'json' at the end. +.. index:: show ipv6 ospf6 route [<intra-area|inter-area|external-1|external-2|X:X::X:X|X:X::X:X/M|detail|summary>] [json] +.. clicmd:: show ipv6 ospf6 route [<intra-area|inter-area|external-1|external-2|X:X::X:X|X:X::X:X/M|detail|summary>] [json] + + This command displays the ospfv3 routing table as determined by the most + recent SPF calculations. Options are provided to view the different types + of routes. Other than the standard view there are two other options, detail + and summary. JSON output can be obtained by appending 'json' to the end of + command. + +.. index:: show ipv6 ospf6 route X:X::X:X/M match [detail] [json] +.. clicmd:: show ipv6 ospf6 route X:X::X:X/M match [detail] [json] + + The additional match option will match the given address to the destination + of the routes, and return the result accordingly. + +.. index:: show ipv6 ospf6 interface [IFNAME] prefix [detail|<X:X::X:X|X:X::X:X/M> [<match|detail>]] [json] +.. clicmd:: show ipv6 ospf6 interface [IFNAME] prefix [detail|<X:X::X:X|X:X::X:X/M> [<match|detail>]] [json] + + This command shows the prefixes present in the interface routing table. + Interface name can also be given. JSON output can be obtained by appending + 'json' to the end of command. + OSPF6 Configuration Examples ============================ diff --git a/doc/user/ospfd.rst b/doc/user/ospfd.rst index cbde0fd46f..ee02a9dae5 100644 --- a/doc/user/ospfd.rst +++ b/doc/user/ospfd.rst @@ -322,6 +322,23 @@ To start OSPF process you have to specify the OSPF router. This feature is enabled by default. +.. index:: clear ip ospf [(1-65535)] process +.. clicmd:: clear ip ospf [(1-65535)] process + + This command can be used to clear the ospf process data structures. This + will clear the ospf neighborship as well and it will get re-established. + This will clear the LSDB too. This will be helpful when there is a change + in router-id and if user wants the router-id change to take effect, user can + use this cli instead of restarting the ospfd daemon. + +.. index:: clear ip ospf [(1-65535)] neighbor +.. clicmd:: clear ip ospf [(1-65535)] neighbor + + This command can be used to clear the ospf neighbor data structures. This + will clear the ospf neighborship and it will get re-established. This + command can be used when the neighbor state get stuck at some state and + this can be used to recover it from that state. + .. _ospf-area: Areas @@ -1216,6 +1233,20 @@ Summary Route will be originated on-behalf of all matched external LSAs. Show configuration for display all configured summary routes with matching external LSA information. +TI-LFA +====== + +Experimental support for Topology Independent LFA (Loop-Free Alternate), see +for example 'draft-bashandy-rtgwg-segment-routing-ti-lfa-05'. Note that +TI-LFA requires a proper Segment Routing configuration. + +.. index:: fast-reroute ti-lfa [node-protection] +.. clicmd:: fast-reroute ti-lfa [node-protection] + + Configured on the router level. Activates TI-LFA for all interfaces. + + Note that so far only P2P interfaces are supported. + Debugging OSPF ============== diff --git a/doc/user/overview.rst b/doc/user/overview.rst index a2ce67068f..f67698e404 100644 --- a/doc/user/overview.rst +++ b/doc/user/overview.rst @@ -321,6 +321,8 @@ BGP :t:`The Resource Public Key Infrastructure (RPKI) to Router Protocol. R. Bush, R. Austein. January 2013.` - :rfc:`6811` :t:`BGP Prefix Origin Validation. P. Mohapatra, J. Scudder, D. Ward, R. Bush, R. Austein. January 2013.` +- :rfc:`7313` + :t:`Enhanced Route Refresh Capability for BGP-4. K. Patel, E. Chen, B. Venkatachalapathy. July 2014.` - :rfc:`7606` :t:`Revised Error Handling for BGP UPDATE Messages. E. Chen, J. Scudder, P. Mohapatra, K. Patel. August 2015.` - :rfc:`7607` diff --git a/doc/user/pathd.rst b/doc/user/pathd.rst new file mode 100644 index 0000000000..0815a6c414 --- /dev/null +++ b/doc/user/pathd.rst @@ -0,0 +1,443 @@ +.. _path: + +**** +PATH +**** + +:abbr:`PATH` is a daemon that handles the installation and deletion +of Segment Routing (SR) Policies. + + +.. _starting-path: + +Starting PATH +============= + +Default configuration file for *pathd* is :file:`pathd.conf`. The typical +location of :file:`pathd.conf` is |INSTALL_PREFIX_ETC|/pathd.conf. + +If the user is using integrated config, then :file:`pathd.conf` need not be +present and the :file:`frr.conf` is read instead. + +.. program:: pathd + +:abbr:`PATH` supports all the common FRR daemon start options which are +documented elsewhere. + + +PCEP Support +============ + +To build the PCC for pathd, the externall library `pceplib 1.2 <https://github.com/volta-networks/pceplib/tree/devel-1.2>`_ is required. + +To build FRR with support for PCEP the following steps must be followed: + + - Checkout and build pceplib: + +``` +$ git clone https://github.com/volta-networks/pceplib +$ cd pceplib +$ make +$ make install +$ export PCEPLIB_ROOT=$PWD +``` + + - Configure FRR with the extra parameters: + +``` +--enable-pcep LDFLAGS="-L${PCEPLIB_ROOT}/install/lib" CPPFLAGS="-I${PCEPLIB_ROOT}/install/include" +``` + +To start pathd with pcep support the extra parameter `-M pathd_pcep` should be +passed to the pathd daemon. + + +Pathd Configuration +=================== + +Example: + +.. code-block:: frr + + debug pathd pcep basic + segment-routing + traffic-eng + segment-list SL1 + index 10 mpls label 16010 + index 20 mpls label 16030 + ! + policy color 1 endpoint 1.1.1.1 + name default + binding-sid 4000 + candidate-path preference 100 name CP1 explicit segment-list SL1 + candidate-path preference 200 name CP2 dynamic + affinity include-any 0x000000FF + bandwidth 100000 + metric bound msd 16 required + metric te 10 + objective-function mcp required + ! + pcep + pce-config GROUP1 + source-address 1.1.1.1 + tcp-md5-auth secret + timer keep-alive 30 + ! + pce PCE1 + config GROUP1 + address ip 10.10.10.10 + ! + pce PCE2 + config GROUP1 + address ip 9.9.9.9 + ! + pcc + peer PCE1 precedence 10 + peer PCE2 precedence 20 + ! + ! + ! + ! + + +.. _path-commands: + +Configuration Commands +---------------------- + +.. index:: segment-routing +.. clicmd:: segment-routing + + Configure segment routing. + +.. index:: traffic-eng +.. clicmd:: traffic-eng + + Configure segment routing traffic engineering. + +.. index:: segment-list NAME +.. clicmd:: [no] segment-list NAME + + Delete or start a segment list definition. + + +.. index:: index INDEX mpls label LABEL [nai node ADDRESS] +.. clicmd:: [no] index INDEX mpls label LABEL [nai node ADDRESS] + + Delete or specify a segment in a segment list definition. + + +.. index:: policy color COLOR endpoint ENDPOINT +.. clicmd:: [no] policy color COLOR endpoint ENDPOINT + + Delete or start a policy definition. + + +.. index:: name NAME +.. clicmd:: name NAME + + Specify the policy name. + + +.. index:: binding-sid LABEL +.. clicmd:: binding-sid LABEL + + Specify the policy SID. + + +.. index:: candidate-path preference PREFERENCE name NAME explicit segment-list SEGMENT-LIST-NAME +.. clicmd:: [no] candidate-path preference PREFERENCE name NAME explicit segment-list SEGMENT-LIST-NAME + + Delete or define an explicit candidate path. + + +.. index:: candidate-path preference PREFERENCE name NAME dynamic +.. clicmd:: [no] candidate-path preference PREFERENCE name NAME dynamic + + Delete or start a dynamic candidate path definition. + + +.. index:: affinity {exclude-any|include-any|include-all} BITPATTERN +.. clicmd:: [no] affinity {exclude-any|include-any|include-all} BITPATTERN + + Delete or specify an affinity constraint for a dynamic candidate path. + + +.. index:: bandwidth BANDWIDTH [required] +.. clicmd:: [no] bandwidth BANDWIDTH [required] + + Delete or specify a bandwidth constraint for a dynamic candidate path. + + +.. index:: metric [bound] METRIC VALUE [required] +.. clicmd:: [no] metric [bound] METRIC VALUE [required] + + Delete or specify a metric constraint for a dynamic candidate path. + + The possible metrics are: + - igp: IGP metric + - te: TE metric + - hc: Hop Counts + - abc: Aggregate bandwidth consumption + - mll: Load of the most loaded link + - igp: Cumulative IGP cost + - cte: Cumulative TE cost + - igp: P2MP IGP metric + - pte: P2MP TE metric + - phc: P2MP hop count metric + - msd: Segment-ID (SID) Depth + - pd: Path Delay metric + - pdv: Path Delay Variation metric + - pl: Path Loss metric + - ppd: P2MP Path Delay metric + - pdv: P2MP Path Delay variation metric + - ppl: P2MP Path Loss metric + - nap: Number of adaptations on a path + - nlp: Number of layers on a path + - dc: Domain Count metric + - bnc: Border Node Count metric + + +.. index:: objective-function OBJFUN1 [required] +.. clicmd:: [no] objective-function OBJFUN1 [required] + + Delete or specify a PCEP objective function constraint for a dynamic + candidate path. + + The possible functions are: + - mcp: Minimum Cost Path [RFC5541] + - mlp: Minimum Load Path [RFC5541] + - mbp: Maximum residual Bandwidth Path [RFC5541] + - mbc: Minimize aggregate Bandwidth Consumption [RFC5541] + - mll: Minimize the Load of the most loaded Link [RFC5541] + - mcc: Minimize the Cumulative Cost of a set of paths [RFC5541] + - spt: Shortest Path Tree [RFC8306] + - mct: Minimum Cost Tree [RFC8306] + - mplp: Minimum Packet Loss Path [RFC8233] + - mup: Maximum Under-Utilized Path [RFC8233] + - mrup: Maximum Reserved Under-Utilized Path [RFC8233] + - mtd: Minimize the number of Transit Domains [RFC8685] + - mbn: Minimize the number of Border Nodes [RFC8685] + - mctd: Minimize the number of Common Transit Domains [RFC8685] + - msl: Minimize the number of Shared Links [RFC8800] + - mss: Minimize the number of Shared SRLGs [RFC8800] + - msn: Minimize the number of Shared Nodes [RFC8800] + + +.. index:: debug pathd pcep [basic|path|message|pceplib] +.. clicmd:: [no] debug pathd pcep [basic|path|message|pceplib] + + Enable or disable debugging for the pcep module: + + - basic: Enable basic PCEP logging + - path: Log the path structures + - message: Log the PCEP messages + - pceplib: Enable pceplib logging + + +.. index:: pcep +.. clicmd:: pcep + + Configure PCEP support. + + +.. index:: cep-config NAME +.. clicmd:: [no] pce-config NAME + + Define a shared PCE configuration that can be used in multiple PCE + declarations. + + +.. index:: pce NAME +.. clicmd:: [no] pce NAME + + Define or delete a PCE definition. + + +.. index:: config WORD +.. clicmd:: config WORD + + Select a shared configuration. If not defined, the default + configuration will be used. + + +.. index:: address <ip A.B.C.D | ipv6 X:X::X:X> [port (1024-65535)] +.. clicmd:: address <ip A.B.C.D | ipv6 X:X::X:X> [port (1024-65535)] + + Define the address and port of the PCE. + + If not specified, the port is the standard PCEP port 4189. + + This should be specified in the PCC peer definition. + + +.. index:: source-address [ip A.B.C.D | ipv6 X:X::X:X] [port PORT] +.. clicmd:: source-address [ip A.B.C.D | ipv6 X:X::X:X] [port PORT] + + Define the address and/or port of the PCC as seen by the PCE. + This can be used in a configuration group or a PCC peer declaration. + + If not specified, the source address will be the router identifier selected + by zebra, and the port will be the standard PCEP port 4189. + + This can be specified in either the PCC peer definition or in a + configuration group. + + +.. index:: tcp-md5-auth WORD +.. clicmd:: tcp-md5-auth WORD + + Enable TCP MD5 security with the given secret. + + This can be specified in either the PCC peer definition or in a + configuration group. + + +.. index:: sr-draft07 +.. clicmd:: sr-draft07 + + Specify if a PCE only support segment routing draft 7, this flag will limit + the PCC behavior to this draft. + + This can be specified in either the PCC peer definition or in a + configuration group. + + +.. index:: pce-initiated +.. clicmd:: pce-initiated + + Specify if PCE-initiated LSP should be allowed for this PCE. + + This can be specified in either the PCC peer definition or in a + configuration group. + + +.. index:: timer [keep-alive (1-63)] [min-peer-keep-alive (1-255)] [max-peer-keep-alive (1-255)] [dead-timer (4-255)] [min-peer-dead-timer (4-255)] [max-peer-dead-timer (4-255)] [pcep-request (1-120)] [session-timeout-interval (1-120)] [delegation-timeout (1-60)] +.. clicmd:: timer [keep-alive (1-63)] [min-peer-keep-alive (1-255)] [max-peer-keep-alive (1-255)] [dead-timer (4-255)] [min-peer-dead-timer (4-255)] [max-peer-dead-timer (4-255)] [pcep-request (1-120)] [session-timeout-interval (1-120)] [delegation-timeout (1-60)] + + Specify the PCEP timers. + + This can be specified in either the PCC peer definition or in a + configuration group. + + +.. index:: pcc +.. clicmd:: [no] pcc + + Disable or start the definition of a PCC. + + +.. index:: msd (1-32) +.. clicmd:: msd (1-32) + + Specify the maximum SID depth in a PCC definition. + + +.. index:: peer WORD [precedence (1-255)] +.. clicmd:: [no] peer WORD [precedence (1-255)] + + Specify a peer and its precedence in a PCC definition. + + +Introspection Commands +---------------------- + +.. index:: show sr-te policy [detail] +.. clicmd:: show sr-te policy [detail] + + Display the segment routing policies. + +.. code-block:: frr + + router# show sr-te policy + + Endpoint Color Name BSID Status + ------------------------------------------ + 1.1.1.1 1 default 4000 Active + + +.. code-block:: frr + + router# show sr-te policy detail + + Endpoint: 1.1.1.1 Color: 1 Name: LOW_DELAY BSID: 4000 Status: Active + Preference: 100 Name: cand1 Type: explicit Segment-List: sl1 Protocol-Origin: Local + * Preference: 200 Name: cand1 Type: dynamic Segment-List: 32453452 Protocol-Origin: PCEP + +The asterisk (*) marks the best, e.g. active, candidate path. Note that for segment-lists which are +retrieved via PCEP a random number based name is generated. + + +.. index:: show debugging pathd +.. clicmd:: show debugging pathd + + Display the current status of the pathd debugging. + + +.. index:: show debugging pathd-pcep +.. clicmd:: show debugging pathd-pcep + + Display the current status of the pcep module debugging. + + +.. index:: show sr-te pcep counters +.. clicmd:: show sr-te pcep counters + + Display the counters from pceplib. + + +.. index:: show sr-te pcep pce-config [NAME] +.. clicmd:: show sr-te pcep pce-config [NAME] + + Display a shared configuration. if no name is specified, the default + configuration will be displayed. + + +.. index:: show sr-te pcep pcc +.. clicmd:: show sr-te pcep pcc + + Display PCC information. + + +.. index:: show sr-te pcep session [NAME] +.. clicmd:: show sr-te pcep session [NAME] + + Display the information of a PCEP session, if not name is specified all the + sessions will be displayed. + + +Utility Commands +---------------- + +.. index:: clear sr-te pcep session [NAME] +.. clicmd:: clear sr-te pcep session [NAME] + + Reset the pcep session by disconnecting from the PCE and performing the + normal reconnection process. No configuration is changed. + + +Usage with BGP route-maps +========================= + +It is possible to steer traffic 'into' a segment routing policy for routes +learned through BGP using route-maps: + +.. code-block:: frr + + route-map SET_SR_POLICY permit 10 + set sr-te color 1 + ! + router bgp 1 + bgp router-id 2.2.2.2 + neighbor 1.1.1.1 remote-as 1 + neighbor 1.1.1.1 update-source lo + ! + address-family ipv4 unicast + neighbor 1.1.1.1 next-hop-self + neighbor 1.1.1.1 route-map SET_SR_POLICY in + redistribute static + exit-address-family + ! + ! + +In this case, the SR Policy with color `1` and endpoint `1.1.1.1` is selected. diff --git a/doc/user/pbr.rst b/doc/user/pbr.rst index c869c6bc45..5cec7cbe62 100644 --- a/doc/user/pbr.rst +++ b/doc/user/pbr.rst @@ -258,6 +258,21 @@ causes the policy to be installed into the kernel. | valid | Is the map well-formed? | Boolean | +--------+----------------------------+---------+ +.. _pbr-debugs: + +PBR Debugs +=========== + +.. index:: debug pbr +.. clicmd:: debug pbr events|map|nht|zebra + + Debug pbr in pbrd daemon. You specify what types of debugs to turn on. + +.. index:: debug zebra pbr +.. clicmd:: debug zebra pbr + + Debug pbr in zebra daemon. + .. _pbr-details: PBR Details diff --git a/doc/user/pim.rst b/doc/user/pim.rst index dd6a647b4f..201fe2f9ed 100644 --- a/doc/user/pim.rst +++ b/doc/user/pim.rst @@ -727,6 +727,13 @@ Clear commands reset various variables. Rescan PIM OIL (output interface list). +.. index:: clear ip pim [vrf NAME] bsr-data +.. clicmd:: clear ip pim [vrf NAME] bsr-data + + This command will clear the BSM scope data struct. This command also + removes the next hop tracking for the bsr and resets the upstreams + for the dynamically learnt RPs. + PIM EVPN configuration ====================== To use PIM in the underlay for overlay BUM forwarding associate a multicast diff --git a/doc/user/rpki.rst b/doc/user/rpki.rst index 2c0e5876fa..451df1aa4e 100644 --- a/doc/user/rpki.rst +++ b/doc/user/rpki.rst @@ -271,5 +271,5 @@ RPKI Configuration Example route-map rpki permit 40 ! -.. [Securing-BGP] Geoff Huston, Randy Bush: Securing BGP, In: The Internet Protocol Journal, Volume 14, No. 2, 2011. <http://www.cisco.com/web/about/ac123/ac147/archived_issues/ipj_14-2/142_bgp.html> -.. [Resource-Certification] Geoff Huston: Resource Certification, In: The Internet Protocol Journal, Volume 12, No.1, 2009. <http://www.cisco.com/web/about/ac123/ac147/archived_issues/ipj_12-1/121_resource.html> +.. [Securing-BGP] Geoff Huston, Randy Bush: Securing BGP, In: The Internet Protocol Journal, Volume 14, No. 2, 2011. <https://www.cisco.com/c/dam/en_us/about/ac123/ac147/archived_issues/ipj_14-2/ipj_14-2.pdf> +.. [Resource-Certification] Geoff Huston: Resource Certification, In: The Internet Protocol Journal, Volume 12, No.1, 2009. <https://www.cisco.com/c/dam/en_us/about/ac123/ac147/archived_issues/ipj_12-1/ipj_12-1.pdf> diff --git a/doc/user/scripting.rst b/doc/user/scripting.rst new file mode 100644 index 0000000000..b0295e5706 --- /dev/null +++ b/doc/user/scripting.rst @@ -0,0 +1,28 @@ +.. _scripting: + +********* +Scripting +********* + +The behavior of FRR may be extended or customized using its built-in scripting +capabilities. + +Some configuration commands accept the name of a Lua script to call to perform +some task or make some decision. These scripts have their environments +populated with some set of inputs, and are expected to populate some set of +output variables, which are read by FRR after the script completes. The names +and expected contents of these scripts are documented alongside the commands +that support them. + +These scripts live in :file:`/etc/frr/scripts/` by default. This is +configurable at compile time via ``--with-scriptdir``. It may be +overriden at runtime with the ``--scriptdir`` daemon option. + +In order to use scripting, FRR must be built with ``--enable-scripting``. + +.. note:: + + Scripts are typically loaded just-in-time. This means you can change the + contents of a script that is in use without restarting FRR. Not all + scripting locations may behave this way; refer to the documentation for the + particular location. diff --git a/doc/user/setup.rst b/doc/user/setup.rst index b2b71cf012..64a33765c2 100644 --- a/doc/user/setup.rst +++ b/doc/user/setup.rst @@ -240,3 +240,53 @@ because FRR's monitoring program cannot currently distinguish between a crashed The closest that can be achieved is to remove all configuration for the daemon, and set its line in ``/etc/frr/daemons`` to ``=no``. Once this is done, the daemon will be stopped the next time FRR is restarted. + + +Network Namespaces +^^^^^^^^^^^^^^^^^^ + +It is possible to run FRR in different network namespaces so it can be +further compartmentalized (e.g. confining to a smaller subset network). +The network namespace configuration can be used in the default FRR +configuration pathspace or it can be used in a different pathspace +(`-N/--pathspace`). + +To use FRR network namespace in the default pathspace you should add +or uncomment the ``watchfrr_options`` line in ``/etc/frr/daemons``: + +.. code-block:: diff + + - #watchfrr_options="--netns" + + watchfrr_options="--netns=<network-namespace-name>" + +If you want to use a different pathspace with the network namespace +(the recommended way) you should add/uncomment the ``watchfrr_options`` +line in ``/etc/frr/<namespace>/daemons``: + +.. code-block:: diff + + - #watchfrr_options="--netns" + + #watchfrr_options="--netns=<network-namespace-name>" + + + + # `--netns` argument is optional and if not provided it will + + # default to the pathspace name. + + watchfrr_options="--netns" + +To start FRR in the new pathspace+network namespace the initialization script +should be called with an extra parameter: + + +.. code:: + + /etc/init.d/frr start <pathspace-name> + + +.. note:: + + Some Linux distributions might not use the default init script + shipped with FRR, in that case you might want to try running the + bundled script in ``/usr/lib/frr/frrinit.sh``. + + On systemd you might create different units or parameterize the + existing one. See the man page: + https://www.freedesktop.org/software/systemd/man/systemd.unit.html diff --git a/doc/user/subdir.am b/doc/user/subdir.am index dd7a193e34..3585245e85 100644 --- a/doc/user/subdir.am +++ b/doc/user/subdir.am @@ -27,6 +27,7 @@ user_RSTFILES = \ doc/user/ospf_fundamentals.rst \ doc/user/overview.rst \ doc/user/packet-dumps.rst \ + doc/user/pathd.rst \ doc/user/pim.rst \ doc/user/ripd.rst \ doc/user/pbr.rst \ @@ -34,6 +35,7 @@ user_RSTFILES = \ doc/user/routemap.rst \ doc/user/routeserver.rst \ doc/user/rpki.rst \ + doc/user/scripting.rst \ doc/user/setup.rst \ doc/user/sharp.rst \ doc/user/snmp.rst \ diff --git a/doc/user/zebra.rst b/doc/user/zebra.rst index 91cd205bed..a9979558c3 100644 --- a/doc/user/zebra.rst +++ b/doc/user/zebra.rst @@ -407,6 +407,14 @@ If no option is chosen, then the *Linux VRF* implementation as references in https://www.kernel.org/doc/Documentation/networking/vrf.txt will be mapped over the *Zebra* VRF. The routing table associated to that VRF is a Linux table identifier located in the same *Linux network namespace* where *Zebra* started. +Please note when using the *Linux VRF* routing table it is expected that a +default Kernel route will be installed that has a metric as outlined in the +www.kernel.org doc above. The Linux Kernel does table lookup via a combination +of rule application of the rule table and then route lookup of the specified +table. If no route match is found then the next applicable rule is applied +to find the next route table to use to look for a route match. As such if +your VRF table does not have a default blackhole route with a high metric +VRF route lookup will leave the table specified by the VRF, which is undesirable. If the :option:`-n` option is chosen, then the *Linux network namespace* will be mapped over the *Zebra* VRF. That implies that *Zebra* is able to configure @@ -759,6 +767,12 @@ IPv6 example for OSPFv3. not created at startup. On Debian, FRR might start before ifupdown completes. Consider a reboot test. +.. index:: zebra route-map delay-timer (0-600) +.. clicmd:: [no] zebra route-map delay-timer (0-600) + + Set the delay before any route-maps are processed in zebra. The + default time for this is 5 seconds. + .. _zebra-fib-push-interface: zebra FIB push interface diff --git a/docker/alpine/Dockerfile b/docker/alpine/Dockerfile index ed6453e2b1..126710f8c2 100644 --- a/docker/alpine/Dockerfile +++ b/docker/alpine/Dockerfile @@ -1,10 +1,9 @@ # This stage builds a dist tarball from the source -FROM alpine:edge as source-builder +FROM alpine:latest as source-builder RUN mkdir -p /src/alpine COPY alpine/APKBUILD.in /src/alpine RUN source /src/alpine/APKBUILD.in \ - && echo 'http://dl-cdn.alpinelinux.org/alpine/edge/testing' >> /etc/apk/repositories \ && apk add \ --no-cache \ --update-cache \ @@ -22,10 +21,9 @@ RUN cd /src \ && make dist # This stage builds an apk from the dist tarball -FROM alpine:edge as alpine-builder +FROM alpine:latest as alpine-builder # Don't use nocache here so that abuild can use the cache -RUN echo 'http://dl-cdn.alpinelinux.org/alpine/edge/testing' >> /etc/apk/repositories \ - && apk add \ +RUN apk add \ --update-cache \ abuild \ alpine-conf \ @@ -46,11 +44,10 @@ RUN cd /dist \ && abuild -r -P /pkgs/apk # This stage installs frr from the apk -FROM alpine:edge +FROM alpine:latest RUN mkdir -p /pkgs/apk COPY --from=alpine-builder /pkgs/apk/ /pkgs/apk/ -RUN echo 'http://dl-cdn.alpinelinux.org/alpine/edge/testing' >> /etc/apk/repositories \ - && apk add \ +RUN apk add \ --no-cache \ --update-cache \ tini \ diff --git a/docker/centos-7/Dockerfile b/docker/centos-7/Dockerfile index cca8baa147..a92326fcf3 100644 --- a/docker/centos-7/Dockerfile +++ b/docker/centos-7/Dockerfile @@ -5,8 +5,8 @@ RUN yum install -y rpm-build autoconf automake libtool make \ readline-devel texinfo net-snmp-devel groff pkgconfig \ json-c-devel pam-devel bison flex pytest c-ares-devel \ python3-devel python3-sphinx systemd-devel libcap-devel \ - https://ci1.netdef.org/artifact/LIBYANG-YANGRELEASE/shared/build-10/CentOS-7-x86_64-Packages/libyang-0.16.111-0.x86_64.rpm \ - https://ci1.netdef.org/artifact/LIBYANG-YANGRELEASE/shared/build-10/CentOS-7-x86_64-Packages/libyang-devel-0.16.111-0.x86_64.rpm \ + https://ci1.netdef.org/artifact/LIBYANG-LY1REL/shared/build-4/CentOS-7-x86_64-Packages/libyang1-1.0.184-0.x86_64.rpm \ + https://ci1.netdef.org/artifact/LIBYANG-LY1REL/shared/build-4/CentOS-7-x86_64-Packages/libyang-devel-1.0.184-0.x86_64.rpm \ https://ci1.netdef.org/artifact/RPKI-RTRLIB/shared/build-110/CentOS-7-x86_64-Packages/librtr-0.7.0-1.el7.centos.x86_64.rpm \ https://ci1.netdef.org/artifact/RPKI-RTRLIB/shared/build-110/CentOS-7-x86_64-Packages/librtr-devel-0.7.0-1.el7.centos.x86_64.rpm @@ -32,7 +32,7 @@ RUN echo '%_smp_mflags %( echo "-j$(/usr/bin/getconf _NPROCESSORS_ONLN)"; )' >> # This stage installs frr from the rpm FROM centos:centos7 RUN mkdir -p /pkgs/rpm \ - && yum install -y https://ci1.netdef.org/artifact/LIBYANG-YANGRELEASE/shared/build-10/CentOS-7-x86_64-Packages/libyang-0.16.111-0.x86_64.rpm \ + && yum install -y https://ci1.netdef.org/artifact/LIBYANG-LY1REL/shared/build-4/CentOS-7-x86_64-Packages/libyang1-1.0.184-0.x86_64.rpm \ https://ci1.netdef.org/artifact/RPKI-RTRLIB/shared/build-110/CentOS-7-x86_64-Packages/librtr-0.7.0-1.el7.centos.x86_64.rpm COPY --from=centos-7-builder /rpmbuild/RPMS/ /pkgs/rpm/ diff --git a/docker/centos-8/Dockerfile b/docker/centos-8/Dockerfile index 6c1f873589..7ed7948927 100644 --- a/docker/centos-8/Dockerfile +++ b/docker/centos-8/Dockerfile @@ -1,17 +1,15 @@ # This stage builds an rpm from the source FROM centos:centos8 as centos-8-builder -RUN dnf install --enablerepo=PowerTools -y rpm-build git autoconf pcre-devel \ +RUN dnf install --enablerepo=powertools -y rpm-build git autoconf pcre-devel \ automake libtool make readline-devel texinfo net-snmp-devel pkgconfig \ - groff pkgconfig json-c-devel pam-devel bison flex python2-pytest \ - c-ares-devel python2-devel systemd-devel libcap-devel platform-python-devel \ - https://ci1.netdef.org/artifact/LIBYANG-YANGRELEASE/shared/build-10/CentOS-7-x86_64-Packages/libyang-0.16.111-0.x86_64.rpm \ - https://ci1.netdef.org/artifact/LIBYANG-YANGRELEASE/shared/build-10/CentOS-7-x86_64-Packages/libyang-devel-0.16.111-0.x86_64.rpm \ + groff pkgconfig json-c-devel pam-devel bison flex python3-pytest \ + c-ares-devel python3-devel python3-sphinx systemd-devel libcap-devel platform-python-devel \ + https://ci1.netdef.org/artifact/LIBYANG-LY1REL/shared/build-4/CentOS-8-x86_64-Packages/libyang1-1.0.184-0.x86_64.rpm \ + https://ci1.netdef.org/artifact/LIBYANG-LY1REL/shared/build-4/CentOS-8-x86_64-Packages/libyang-devel-1.0.184-0.x86_64.rpm \ https://ci1.netdef.org/artifact/RPKI-RTRLIB/shared/build-110/CentOS-7-x86_64-Packages/librtr-0.7.0-1.el7.centos.x86_64.rpm \ https://ci1.netdef.org/artifact/RPKI-RTRLIB/shared/build-110/CentOS-7-x86_64-Packages/librtr-devel-0.7.0-1.el7.centos.x86_64.rpm -RUN pip2 install sphinx - COPY . /src ARG PKGVER @@ -35,7 +33,7 @@ RUN echo '%_smp_mflags %( echo "-j$(/usr/bin/getconf _NPROCESSORS_ONLN)"; )' >> # This stage installs frr from the rpm FROM centos:centos8 RUN mkdir -p /pkgs/rpm \ - && yum install -y https://ci1.netdef.org/artifact/LIBYANG-YANGRELEASE/shared/build-10/CentOS-7-x86_64-Packages/libyang-0.16.111-0.x86_64.rpm \ + && yum install -y https://ci1.netdef.org/artifact/LIBYANG-LY1REL/shared/build-4/CentOS-8-x86_64-Packages/libyang1-1.0.184-0.x86_64.rpm \ https://ci1.netdef.org/artifact/RPKI-RTRLIB/shared/build-110/CentOS-7-x86_64-Packages/librtr-0.7.0-1.el7.centos.x86_64.rpm COPY --from=centos-8-builder /rpmbuild/RPMS/ /pkgs/rpm/ diff --git a/eigrpd/eigrp_cli.c b/eigrpd/eigrp_cli.c index 3610b3a869..00d8ea8867 100644 --- a/eigrpd/eigrp_cli.c +++ b/eigrpd/eigrp_cli.c @@ -29,6 +29,7 @@ #include "eigrp_structs.h" #include "eigrpd.h" #include "eigrp_zebra.h" +#include "eigrp_cli.h" #ifndef VTYSH_EXTRACT_PL #include "eigrpd/eigrp_cli_clippy.c" diff --git a/eigrpd/eigrp_cli.h b/eigrpd/eigrp_cli.h new file mode 100644 index 0000000000..c5f2fd8009 --- /dev/null +++ b/eigrpd/eigrp_cli.h @@ -0,0 +1,70 @@ +/* + * EIGRP CLI Functions. + * Copyright (C) 2019 + * Authors: + * Donnie Savage + * + * This file is part of FRR. + * + * FRR is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2, or (at your option) any + * later version. + * + * FRR is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; see the file COPYING; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef _EIGRP_CLI_H_ +#define _EIGRP_CLI_H_ + +/*Prototypes*/ +extern void eigrp_cli_show_header(struct vty *vty, struct lyd_node *dnode, + bool show_defaults); +extern void eigrp_cli_show_end_header(struct vty *vty, struct lyd_node *dnode); +extern void eigrp_cli_show_router_id(struct vty *vty, struct lyd_node *dnode, + bool show_defaults); +extern void eigrp_cli_show_passive_interface(struct vty *vty, + struct lyd_node *dnode, + bool show_defaults); +extern void eigrp_cli_show_active_time(struct vty *vty, struct lyd_node *dnode, + bool show_defaults); +extern void eigrp_cli_show_variance(struct vty *vty, struct lyd_node *dnode, + bool show_defaults); +extern void eigrp_cli_show_maximum_paths(struct vty *vty, + struct lyd_node *dnode, + bool show_defaults); +extern void eigrp_cli_show_metrics(struct vty *vty, struct lyd_node *dnode, + bool show_defaults); +extern void eigrp_cli_show_network(struct vty *vty, struct lyd_node *dnode, + bool show_defaults); +extern void eigrp_cli_show_neighbor(struct vty *vty, struct lyd_node *dnode, + bool show_defaults); +extern void eigrp_cli_show_redistribute(struct vty *vty, struct lyd_node *dnode, + bool show_defaults); +extern void eigrp_cli_show_delay(struct vty *vty, struct lyd_node *dnode, + bool show_defaults); +extern void eigrp_cli_show_bandwidth(struct vty *vty, struct lyd_node *dnode, + bool show_defaults); +extern void eigrp_cli_show_hello_interval(struct vty *vty, + struct lyd_node *dnode, + bool show_defaults); +extern void eigrp_cli_show_hold_time(struct vty *vty, struct lyd_node *dnode, + bool show_defaults); +extern void eigrp_cli_show_summarize_address(struct vty *vty, + struct lyd_node *dnode, + bool show_defaults); +extern void eigrp_cli_show_authentication(struct vty *vty, + struct lyd_node *dnode, + bool show_defaults); +extern void eigrp_cli_show_keychain(struct vty *vty, struct lyd_node *dnode, + bool show_defaults); +extern void eigrp_cli_init(void); + +#endif /*EIGRP_CLI_H_ */ diff --git a/eigrpd/eigrp_const.h b/eigrpd/eigrp_const.h index d3d9bca82a..149cf00efc 100644 --- a/eigrpd/eigrp_const.h +++ b/eigrpd/eigrp_const.h @@ -122,10 +122,10 @@ enum metric_change { METRIC_DECREASE, METRIC_SAME, METRIC_INCREASE }; #define EIGRP_TOPOLOGY_TYPE_REMOTE_EXTERNAL 2 // Remote external network /*EIGRP TT entry flags*/ -#define EIGRP_NEXTHOP_ENTRY_SUCCESSOR_FLAG (1 << 0) -#define EIGRP_NEXTHOP_ENTRY_FSUCCESSOR_FLAG (1 << 1) -#define EIGRP_NEXTHOP_ENTRY_INTABLE_FLAG (1 << 2) -#define EIGRP_NEXTHOP_ENTRY_EXTERNAL_FLAG (1 << 3) +#define EIGRP_ROUTE_DESCRIPTOR_SUCCESSOR_FLAG (1 << 0) +#define EIGRP_ROUTE_DESCRIPTOR_FSUCCESSOR_FLAG (1 << 1) +#define EIGRP_ROUTE_DESCRIPTOR_INTABLE_FLAG (1 << 2) +#define EIGRP_ROUTE_DESCRIPTOR_EXTERNAL_FLAG (1 << 3) /*EIGRP FSM state count, event count*/ #define EIGRP_FSM_STATE_MAX 5 diff --git a/eigrpd/eigrp_dump.c b/eigrpd/eigrp_dump.c index dfce2acad4..e1ad51a9db 100644 --- a/eigrpd/eigrp_dump.c +++ b/eigrpd/eigrp_dump.c @@ -236,7 +236,8 @@ void show_ip_eigrp_topology_header(struct vty *vty, struct eigrp *eigrp) "Codes: P - Passive, A - Active, U - Update, Q - Query, R - Reply\n r - reply Status, s - sia Status\n\n"); } -void show_ip_eigrp_prefix_entry(struct vty *vty, struct eigrp_prefix_entry *tn) +void show_ip_eigrp_prefix_descriptor(struct vty *vty, + struct eigrp_prefix_descriptor *tn) { struct list *successors = eigrp_topology_get_successor(tn); @@ -251,14 +252,15 @@ void show_ip_eigrp_prefix_entry(struct vty *vty, struct eigrp_prefix_entry *tn) list_delete(&successors); } -void show_ip_eigrp_nexthop_entry(struct vty *vty, struct eigrp *eigrp, - struct eigrp_nexthop_entry *te, bool *first) +void show_ip_eigrp_route_descriptor(struct vty *vty, struct eigrp *eigrp, + struct eigrp_route_descriptor *te, + bool *first) { if (te->reported_distance == EIGRP_MAX_METRIC) return; if (*first) { - show_ip_eigrp_prefix_entry(vty, te->prefix); + show_ip_eigrp_prefix_descriptor(vty, te->prefix); *first = false; } diff --git a/eigrpd/eigrp_dump.h b/eigrpd/eigrp_dump.h index 348356bb3c..0d512fc63f 100644 --- a/eigrpd/eigrp_dump.h +++ b/eigrpd/eigrp_dump.h @@ -151,11 +151,11 @@ extern void show_ip_eigrp_interface_sub(struct vty *, struct eigrp *, struct eigrp_interface *); extern void show_ip_eigrp_neighbor_sub(struct vty *, struct eigrp_neighbor *, int); -extern void show_ip_eigrp_prefix_entry(struct vty *, - struct eigrp_prefix_entry *); -extern void show_ip_eigrp_nexthop_entry(struct vty *vty, struct eigrp *eigrp, - struct eigrp_nexthop_entry *ne, - bool *first); +extern void show_ip_eigrp_prefix_descriptor(struct vty *vty, + struct eigrp_prefix_descriptor *tn); +extern void show_ip_eigrp_route_descriptor(struct vty *vty, struct eigrp *eigrp, + struct eigrp_route_descriptor *ne, + bool *first); extern void eigrp_debug_init(void); diff --git a/eigrpd/eigrp_filter.c b/eigrpd/eigrp_filter.c index 009b57e05f..c77a6fc1b1 100644 --- a/eigrpd/eigrp_filter.c +++ b/eigrpd/eigrp_filter.c @@ -124,39 +124,6 @@ void eigrp_distribute_update(struct distribute_ctx *ctx, } else e->prefix[EIGRP_FILTER_OUT] = NULL; -// This is commented out, because the distribute.[ch] code -// changes looked poorly written from first glance -// commit was 133bdf2d -// TODO: DBS -#if 0 - /* route-map IN for whole process */ - if (dist->route[DISTRIBUTE_V4_IN]) - { - routemap = route_map_lookup_by_name (dist->route[DISTRIBUTE_V4_IN]); - if (routemap) - e->routemap[EIGRP_FILTER_IN] = routemap; - else - e->routemap[EIGRP_FILTER_IN] = NULL; - } - else - { - e->routemap[EIGRP_FILTER_IN] = NULL; - } - - /* route-map OUT for whole process */ - if (dist->route[DISTRIBUTE_V4_OUT]) - { - routemap = route_map_lookup_by_name (dist->route[DISTRIBUTE_V4_OUT]); - if (routemap) - e->routemap[EIGRP_FILTER_OUT] = routemap; - else - e->routemap[EIGRP_FILTER_OUT] = NULL; - } - else - { - e->routemap[EIGRP_FILTER_OUT] = NULL; - } -#endif // TODO: check Graceful restart after 10sec /* cancel GR scheduled */ @@ -232,36 +199,6 @@ void eigrp_distribute_update(struct distribute_ctx *ctx, } else ei->prefix[EIGRP_FILTER_OUT] = NULL; -#if 0 - /* route-map IN for whole process */ - if (dist->route[DISTRIBUTE_V4_IN]) - { - zlog_info("<DEBUG ACL ALL in"); - routemap = route_map_lookup_by_name (dist->route[DISTRIBUTE_V4_IN]); - if (routemap) - ei->routemap[EIGRP_FILTER_IN] = routemap; - else - ei->routemap[EIGRP_FILTER_IN] = NULL; - } - else - { - ei->routemap[EIGRP_FILTER_IN] = NULL; - } - - /* route-map OUT for whole process */ - if (dist->route[DISTRIBUTE_V4_OUT]) - { - routemap = route_map_lookup_by_name (dist->route[DISTRIBUTE_V4_OUT]); - if (routemap) - ei->routemap[EIGRP_FILTER_OUT] = routemap; - else - ei->routemap[EIGRP_FILTER_OUT] = NULL; - } - else - { - ei->routemap[EIGRP_FILTER_OUT] = NULL; - } -#endif // TODO: check Graceful restart after 10sec /* Cancel GR scheduled */ diff --git a/eigrpd/eigrp_fsm.c b/eigrpd/eigrp_fsm.c index a69a3eec0a..d2d435bf2d 100644 --- a/eigrpd/eigrp_fsm.c +++ b/eigrpd/eigrp_fsm.c @@ -77,6 +77,7 @@ #include "linklist.h" #include "vty.h" +#include "eigrpd/eigrp_types.h" #include "eigrpd/eigrp_structs.h" #include "eigrpd/eigrpd.h" #include "eigrpd/eigrp_interface.h" @@ -88,6 +89,7 @@ #include "eigrpd/eigrp_dump.h" #include "eigrpd/eigrp_topology.h" #include "eigrpd/eigrp_fsm.h" +#include "eigrpd/eigrp_metric.h" /* * Prototypes @@ -262,13 +264,13 @@ eigrp_get_fsm_event(struct eigrp_fsm_action_message *msg) { // Loading base information from message // struct eigrp *eigrp = msg->eigrp; - struct eigrp_prefix_entry *prefix = msg->prefix; - struct eigrp_nexthop_entry *entry = msg->entry; + struct eigrp_prefix_descriptor *prefix = msg->prefix; + struct eigrp_route_descriptor *entry = msg->entry; uint8_t actual_state = prefix->state; enum metric_change change; if (entry == NULL) { - entry = eigrp_nexthop_entry_new(); + entry = eigrp_route_descriptor_new(); entry->adv_router = msg->adv_router; entry->ei = msg->adv_router->ei; entry->prefix = prefix; @@ -286,7 +288,7 @@ eigrp_get_fsm_event(struct eigrp_fsm_action_message *msg) switch (actual_state) { case EIGRP_FSM_STATE_PASSIVE: { - struct eigrp_nexthop_entry *head = + struct eigrp_route_descriptor *head = listnode_head(prefix->entries); if (head->reported_distance < prefix->fdistance) { @@ -307,7 +309,7 @@ eigrp_get_fsm_event(struct eigrp_fsm_action_message *msg) } case EIGRP_FSM_STATE_ACTIVE_0: { if (msg->packet_type == EIGRP_OPC_REPLY) { - struct eigrp_nexthop_entry *head = + struct eigrp_route_descriptor *head = listnode_head(prefix->entries); listnode_delete(prefix->rij, entry->adv_router); @@ -322,7 +324,7 @@ eigrp_get_fsm_event(struct eigrp_fsm_action_message *msg) return EIGRP_FSM_EVENT_LR_FCN; } else if (msg->packet_type == EIGRP_OPC_QUERY && (entry->flags - & EIGRP_NEXTHOP_ENTRY_SUCCESSOR_FLAG)) { + & EIGRP_ROUTE_DESCRIPTOR_SUCCESSOR_FLAG)) { return EIGRP_FSM_EVENT_QACT; } @@ -332,14 +334,14 @@ eigrp_get_fsm_event(struct eigrp_fsm_action_message *msg) } case EIGRP_FSM_STATE_ACTIVE_1: { if (msg->packet_type == EIGRP_OPC_QUERY - && (entry->flags & EIGRP_NEXTHOP_ENTRY_SUCCESSOR_FLAG)) { + && (entry->flags & EIGRP_ROUTE_DESCRIPTOR_SUCCESSOR_FLAG)) { return EIGRP_FSM_EVENT_QACT; } else if (msg->packet_type == EIGRP_OPC_REPLY) { listnode_delete(prefix->rij, entry->adv_router); if (change == METRIC_INCREASE && (entry->flags - & EIGRP_NEXTHOP_ENTRY_SUCCESSOR_FLAG)) { + & EIGRP_ROUTE_DESCRIPTOR_SUCCESSOR_FLAG)) { return EIGRP_FSM_EVENT_DINC; } else if (prefix->rij->count) { return EIGRP_FSM_KEEP_STATE; @@ -350,7 +352,7 @@ eigrp_get_fsm_event(struct eigrp_fsm_action_message *msg) } else if (msg->packet_type == EIGRP_OPC_UPDATE && change == METRIC_INCREASE && (entry->flags - & EIGRP_NEXTHOP_ENTRY_SUCCESSOR_FLAG)) { + & EIGRP_ROUTE_DESCRIPTOR_SUCCESSOR_FLAG)) { return EIGRP_FSM_EVENT_DINC; } return EIGRP_FSM_KEEP_STATE; @@ -359,7 +361,7 @@ eigrp_get_fsm_event(struct eigrp_fsm_action_message *msg) } case EIGRP_FSM_STATE_ACTIVE_2: { if (msg->packet_type == EIGRP_OPC_REPLY) { - struct eigrp_nexthop_entry *head = + struct eigrp_route_descriptor *head = listnode_head(prefix->entries); listnode_delete(prefix->rij, entry->adv_router); @@ -385,7 +387,7 @@ eigrp_get_fsm_event(struct eigrp_fsm_action_message *msg) if (change == METRIC_INCREASE && (entry->flags - & EIGRP_NEXTHOP_ENTRY_SUCCESSOR_FLAG)) { + & EIGRP_ROUTE_DESCRIPTOR_SUCCESSOR_FLAG)) { return EIGRP_FSM_EVENT_DINC; } else if (prefix->rij->count) { return EIGRP_FSM_KEEP_STATE; @@ -396,7 +398,7 @@ eigrp_get_fsm_event(struct eigrp_fsm_action_message *msg) } else if (msg->packet_type == EIGRP_OPC_UPDATE && change == METRIC_INCREASE && (entry->flags - & EIGRP_NEXTHOP_ENTRY_SUCCESSOR_FLAG)) { + & EIGRP_ROUTE_DESCRIPTOR_SUCCESSOR_FLAG)) { return EIGRP_FSM_EVENT_DINC; } return EIGRP_FSM_KEEP_STATE; @@ -434,9 +436,9 @@ int eigrp_fsm_event(struct eigrp_fsm_action_message *msg) int eigrp_fsm_event_nq_fcn(struct eigrp_fsm_action_message *msg) { struct eigrp *eigrp = msg->eigrp; - struct eigrp_prefix_entry *prefix = msg->prefix; + struct eigrp_prefix_descriptor *prefix = msg->prefix; struct list *successors = eigrp_topology_get_successor(prefix); - struct eigrp_nexthop_entry *ne; + struct eigrp_route_descriptor *ne; assert(successors); // If this is NULL we have shit the bed, fun huh? @@ -461,9 +463,9 @@ int eigrp_fsm_event_nq_fcn(struct eigrp_fsm_action_message *msg) int eigrp_fsm_event_q_fcn(struct eigrp_fsm_action_message *msg) { struct eigrp *eigrp = msg->eigrp; - struct eigrp_prefix_entry *prefix = msg->prefix; + struct eigrp_prefix_descriptor *prefix = msg->prefix; struct list *successors = eigrp_topology_get_successor(prefix); - struct eigrp_nexthop_entry *ne; + struct eigrp_route_descriptor *ne; assert(successors); // If this is NULL somebody poked us in the eye. @@ -487,8 +489,8 @@ int eigrp_fsm_event_q_fcn(struct eigrp_fsm_action_message *msg) int eigrp_fsm_event_keep_state(struct eigrp_fsm_action_message *msg) { struct eigrp *eigrp = msg->eigrp; - struct eigrp_prefix_entry *prefix = msg->prefix; - struct eigrp_nexthop_entry *ne = listnode_head(prefix->entries); + struct eigrp_prefix_descriptor *prefix = msg->prefix; + struct eigrp_route_descriptor *ne = listnode_head(prefix->entries); if (prefix->state == EIGRP_FSM_STATE_PASSIVE) { if (!eigrp_metrics_is_same(prefix->reported_metric, @@ -515,8 +517,8 @@ int eigrp_fsm_event_keep_state(struct eigrp_fsm_action_message *msg) int eigrp_fsm_event_lr(struct eigrp_fsm_action_message *msg) { struct eigrp *eigrp = msg->eigrp; - struct eigrp_prefix_entry *prefix = msg->prefix; - struct eigrp_nexthop_entry *ne = listnode_head(prefix->entries); + struct eigrp_prefix_descriptor *prefix = msg->prefix; + struct eigrp_route_descriptor *ne = listnode_head(prefix->entries); prefix->fdistance = prefix->distance = prefix->rdistance = ne->distance; prefix->reported_metric = ne->total_metric; @@ -545,7 +547,7 @@ int eigrp_fsm_event_lr(struct eigrp_fsm_action_message *msg) int eigrp_fsm_event_dinc(struct eigrp_fsm_action_message *msg) { struct list *successors = eigrp_topology_get_successor(msg->prefix); - struct eigrp_nexthop_entry *ne; + struct eigrp_route_descriptor *ne; assert(successors); // Trump and his big hands @@ -566,8 +568,8 @@ int eigrp_fsm_event_dinc(struct eigrp_fsm_action_message *msg) int eigrp_fsm_event_lr_fcs(struct eigrp_fsm_action_message *msg) { struct eigrp *eigrp = msg->eigrp; - struct eigrp_prefix_entry *prefix = msg->prefix; - struct eigrp_nexthop_entry *ne = listnode_head(prefix->entries); + struct eigrp_prefix_descriptor *prefix = msg->prefix; + struct eigrp_route_descriptor *ne = listnode_head(prefix->entries); prefix->state = EIGRP_FSM_STATE_PASSIVE; prefix->distance = prefix->rdistance = ne->distance; @@ -598,8 +600,8 @@ int eigrp_fsm_event_lr_fcs(struct eigrp_fsm_action_message *msg) int eigrp_fsm_event_lr_fcn(struct eigrp_fsm_action_message *msg) { struct eigrp *eigrp = msg->eigrp; - struct eigrp_prefix_entry *prefix = msg->prefix; - struct eigrp_nexthop_entry *best_successor; + struct eigrp_prefix_descriptor *prefix = msg->prefix; + struct eigrp_route_descriptor *best_successor; struct list *successors = eigrp_topology_get_successor(prefix); assert(successors); // Routing without a stack @@ -628,7 +630,7 @@ int eigrp_fsm_event_lr_fcn(struct eigrp_fsm_action_message *msg) int eigrp_fsm_event_qact(struct eigrp_fsm_action_message *msg) { struct list *successors = eigrp_topology_get_successor(msg->prefix); - struct eigrp_nexthop_entry *ne; + struct eigrp_route_descriptor *ne; assert(successors); // Cats and no Dogs diff --git a/eigrpd/eigrp_interface.c b/eigrpd/eigrp_interface.c index dd43dd0478..bb7a930e6d 100644 --- a/eigrpd/eigrp_interface.c +++ b/eigrpd/eigrp_interface.c @@ -55,6 +55,8 @@ #include "eigrpd/eigrp_memory.h" #include "eigrpd/eigrp_fsm.h" #include "eigrpd/eigrp_dump.h" +#include "eigrpd/eigrp_types.h" +#include "eigrpd/eigrp_metric.h" struct eigrp_interface *eigrp_if_new(struct eigrp *eigrp, struct interface *ifp, struct prefix *p) @@ -227,10 +229,24 @@ void eigrp_del_if_params(struct eigrp_if_params *eip) free(eip->auth_keychain); } +/* + * Set the network byte order of the 3 bytes we send + * of the mtu of the link. + */ +static void eigrp_mtu_convert(struct eigrp_metrics *metric, uint32_t host_mtu) +{ + uint32_t network_mtu = htonl(host_mtu); + uint8_t *nm = (uint8_t *)&network_mtu; + + metric->mtu[0] = nm[1]; + metric->mtu[1] = nm[2]; + metric->mtu[2] = nm[3]; +} + int eigrp_if_up(struct eigrp_interface *ei) { - struct eigrp_prefix_entry *pe; - struct eigrp_nexthop_entry *ne; + struct eigrp_prefix_descriptor *pe; + struct eigrp_route_descriptor *ne; struct eigrp_metrics metric; struct eigrp_interface *ei2; struct listnode *node, *nnode; @@ -254,23 +270,21 @@ int eigrp_if_up(struct eigrp_interface *ei) metric.delay = eigrp_delay_to_scaled(ei->params.delay); metric.load = ei->params.load; metric.reliability = ei->params.reliability; - metric.mtu[0] = 0xDC; - metric.mtu[1] = 0x05; - metric.mtu[2] = 0x00; + eigrp_mtu_convert(&metric, ei->ifp->mtu); metric.hop_count = 0; metric.flags = 0; metric.tag = 0; /*Add connected entry to topology table*/ - ne = eigrp_nexthop_entry_new(); + ne = eigrp_route_descriptor_new(); ne->ei = ei; ne->reported_metric = metric; ne->total_metric = metric; ne->distance = eigrp_calculate_metrics(eigrp, metric); ne->reported_distance = 0; ne->adv_router = eigrp->neighbor_self; - ne->flags = EIGRP_NEXTHOP_ENTRY_SUCCESSOR_FLAG; + ne->flags = EIGRP_ROUTE_DESCRIPTOR_SUCCESSOR_FLAG; struct prefix dest_addr; @@ -280,7 +294,7 @@ int eigrp_if_up(struct eigrp_interface *ei) &dest_addr); if (pe == NULL) { - pe = eigrp_prefix_entry_new(); + pe = eigrp_prefix_descriptor_new(); pe->serno = eigrp->serno; pe->destination = (struct prefix *)prefix_ipv4_new(); prefix_copy(pe->destination, &dest_addr); @@ -292,10 +306,10 @@ int eigrp_if_up(struct eigrp_interface *ei) pe->state = EIGRP_FSM_STATE_PASSIVE; pe->fdistance = eigrp_calculate_metrics(eigrp, metric); pe->req_action |= EIGRP_FSM_NEED_UPDATE; - eigrp_prefix_entry_add(eigrp->topology_table, pe); + eigrp_prefix_descriptor_add(eigrp->topology_table, pe); listnode_add(eigrp->topology_changes_internalIPV4, pe); - eigrp_nexthop_entry_add(eigrp, pe, ne); + eigrp_route_descriptor_add(eigrp, pe, ne); for (ALL_LIST_ELEMENTS(eigrp->eiflist, node, nnode, ei2)) { eigrp_update_send(ei2); @@ -307,7 +321,7 @@ int eigrp_if_up(struct eigrp_interface *ei) struct eigrp_fsm_action_message msg; ne->prefix = pe; - eigrp_nexthop_entry_add(eigrp, pe, ne); + eigrp_route_descriptor_add(eigrp, pe, ne); msg.packet_type = EIGRP_OPC_UPDATE; msg.eigrp = eigrp; @@ -416,7 +430,7 @@ uint8_t eigrp_default_iftype(struct interface *ifp) void eigrp_if_free(struct eigrp_interface *ei, int source) { struct prefix dest_addr; - struct eigrp_prefix_entry *pe; + struct eigrp_prefix_descriptor *pe; struct eigrp *eigrp = ei->eigrp; if (source == INTERFACE_DOWN_BY_VTY) { @@ -429,7 +443,8 @@ void eigrp_if_free(struct eigrp_interface *ei, int source) pe = eigrp_topology_table_lookup_ipv4(eigrp->topology_table, &dest_addr); if (pe) - eigrp_prefix_entry_delete(eigrp, eigrp->topology_table, pe); + eigrp_prefix_descriptor_delete(eigrp, eigrp->topology_table, + pe); eigrp_if_down(ei); @@ -494,33 +509,3 @@ struct eigrp_interface *eigrp_if_lookup_by_name(struct eigrp *eigrp, return NULL; } - -uint32_t eigrp_bandwidth_to_scaled(uint32_t bandwidth) -{ - uint64_t temp_bandwidth = (256ull * 10000000) / bandwidth; - - temp_bandwidth = temp_bandwidth < EIGRP_MAX_METRIC ? temp_bandwidth - : EIGRP_MAX_METRIC; - - return (uint32_t)temp_bandwidth; -} - -uint32_t eigrp_scaled_to_bandwidth(uint32_t scaled) -{ - uint64_t temp_scaled = scaled * (256ull * 10000000); - - temp_scaled = - temp_scaled < EIGRP_MAX_METRIC ? temp_scaled : EIGRP_MAX_METRIC; - - return (uint32_t)temp_scaled; -} - -uint32_t eigrp_delay_to_scaled(uint32_t delay) -{ - return delay * 256; -} - -uint32_t eigrp_scaled_to_delay(uint32_t scaled) -{ - return scaled / 256; -} diff --git a/eigrpd/eigrp_interface.h b/eigrpd/eigrp_interface.h index 1e66dafde2..68ab5125e3 100644 --- a/eigrpd/eigrp_interface.h +++ b/eigrpd/eigrp_interface.h @@ -58,9 +58,4 @@ extern struct eigrp_interface *eigrp_if_lookup_by_name(struct eigrp *, /* Simulate down/up on the interface. */ extern void eigrp_if_reset(struct interface *); -extern uint32_t eigrp_bandwidth_to_scaled(uint32_t); -extern uint32_t eigrp_scaled_to_bandwidth(uint32_t); -extern uint32_t eigrp_delay_to_scaled(uint32_t); -extern uint32_t eigrp_scaled_to_delay(uint32_t); - #endif /* ZEBRA_EIGRP_INTERFACE_H_ */ diff --git a/eigrpd/eigrp_main.c b/eigrpd/eigrp_main.c index 6c44ce361c..b1a6498cbc 100644 --- a/eigrpd/eigrp_main.c +++ b/eigrpd/eigrp_main.c @@ -66,6 +66,8 @@ #include "eigrpd/eigrp_filter.h" #include "eigrpd/eigrp_errors.h" #include "eigrpd/eigrp_vrf.h" +#include "eigrpd/eigrp_cli.h" +#include "eigrpd/eigrp_yang.h" //#include "eigrpd/eigrp_routemap.h" /* eigprd privileges */ diff --git a/eigrpd/eigrp_memory.c b/eigrpd/eigrp_memory.c index 85b14c28ce..57ca785340 100644 --- a/eigrpd/eigrp_memory.c +++ b/eigrpd/eigrp_memory.c @@ -37,6 +37,6 @@ DEFINE_MTYPE(EIGRPD, EIGRP_IPV4_INT_TLV, "EIGRP IPv4 TLV") DEFINE_MTYPE(EIGRPD, EIGRP_SEQ_TLV, "EIGRP SEQ TLV") DEFINE_MTYPE(EIGRPD, EIGRP_AUTH_TLV, "EIGRP AUTH TLV") DEFINE_MTYPE(EIGRPD, EIGRP_AUTH_SHA256_TLV, "EIGRP SHA TLV") -DEFINE_MTYPE(EIGRPD, EIGRP_PREFIX_ENTRY, "EIGRP Prefix") -DEFINE_MTYPE(EIGRPD, EIGRP_NEXTHOP_ENTRY, "EIGRP Nexthop Entry") +DEFINE_MTYPE(EIGRPD, EIGRP_PREFIX_DESCRIPTOR, "EIGRP Prefix") +DEFINE_MTYPE(EIGRPD, EIGRP_ROUTE_DESCRIPTOR, "EIGRP Nexthop Entry") DEFINE_MTYPE(EIGRPD, EIGRP_FSM_MSG, "EIGRP FSM Message") diff --git a/eigrpd/eigrp_memory.h b/eigrpd/eigrp_memory.h index e4d02c09d4..21ecba2aae 100644 --- a/eigrpd/eigrp_memory.h +++ b/eigrpd/eigrp_memory.h @@ -36,8 +36,8 @@ DECLARE_MTYPE(EIGRP_IPV4_INT_TLV) DECLARE_MTYPE(EIGRP_SEQ_TLV) DECLARE_MTYPE(EIGRP_AUTH_TLV) DECLARE_MTYPE(EIGRP_AUTH_SHA256_TLV) -DECLARE_MTYPE(EIGRP_PREFIX_ENTRY) -DECLARE_MTYPE(EIGRP_NEXTHOP_ENTRY) +DECLARE_MTYPE(EIGRP_PREFIX_DESCRIPTOR) +DECLARE_MTYPE(EIGRP_ROUTE_DESCRIPTOR) DECLARE_MTYPE(EIGRP_FSM_MSG) #endif /* _FRR_EIGRP_MEMORY_H */ diff --git a/eigrpd/eigrp_metric.c b/eigrpd/eigrp_metric.c new file mode 100644 index 0000000000..2b05db71d5 --- /dev/null +++ b/eigrpd/eigrp_metric.c @@ -0,0 +1,146 @@ +/* + * EIGRP Metric Math Functions. + * Copyright (C) 2013-2016 + * Authors: + * Donnie Savage + * + * This file is part of GNU Zebra. + * + * GNU Zebra is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2, or (at your option) any + * later version. + * + * GNU Zebra is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; see the file COPYING; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include "eigrpd/eigrp_structs.h" +#include "eigrpd/eigrpd.h" +#include "eigrpd/eigrp_types.h" +#include "eigrpd/eigrp_metric.h" + +eigrp_scaled_t eigrp_bandwidth_to_scaled(eigrp_bandwidth_t bandwidth) +{ + eigrp_bandwidth_t scaled = EIGRP_BANDWIDTH_MAX; + + if (bandwidth != EIGRP_BANDWIDTH_MAX) { + scaled = (EIGRP_CLASSIC_SCALER * EIGRP_BANDWIDTH_SCALER); + scaled = scaled / bandwidth; + + scaled = scaled ? scaled : EIGRP_BANDWIDTH_MIN; + } + + scaled = (scaled < EIGRP_METRIC_MAX) ? scaled : EIGRP_METRIC_MAX; + return (eigrp_scaled_t)scaled; +} + +eigrp_bandwidth_t eigrp_scaled_to_bandwidth(eigrp_scaled_t scaled) +{ + eigrp_bandwidth_t bandwidth = EIGRP_BANDWIDTH_MAX; + + if (scaled != EIGRP_CLASSIC_MAX) { + bandwidth = (EIGRP_CLASSIC_SCALER * EIGRP_BANDWIDTH_SCALER); + bandwidth = scaled * bandwidth; + bandwidth = (bandwidth < EIGRP_METRIC_MAX) + ? bandwidth + : EIGRP_BANDWIDTH_MAX; + } + + return bandwidth; +} + +eigrp_scaled_t eigrp_delay_to_scaled(eigrp_delay_t delay) +{ + delay = delay ? delay : EIGRP_DELAY_MIN; + return delay * EIGRP_CLASSIC_SCALER; +} + +eigrp_delay_t eigrp_scaled_to_delay(eigrp_scaled_t scaled) +{ + scaled = scaled / EIGRP_CLASSIC_SCALER; + scaled = scaled ? scaled : EIGRP_DELAY_MIN; + + return scaled; +} + +eigrp_metric_t eigrp_calculate_metrics(struct eigrp *eigrp, + struct eigrp_metrics metric) +{ + eigrp_metric_t composite = 0; + + if (metric.delay == EIGRP_MAX_METRIC) + return EIGRP_METRIC_MAX; + + /* + * EIGRP Composite = + * {K1*BW+[(K2*BW)/(256-load)]+(K3*delay)}*{K5/(reliability+K4)} + */ + + if (eigrp->k_values[0]) + composite += (eigrp->k_values[0] * metric.bandwidth); + if (eigrp->k_values[1]) + composite += ((eigrp->k_values[1] * metric.bandwidth) + / (256 - metric.load)); + if (eigrp->k_values[2]) + composite += (eigrp->k_values[2] * metric.delay); + if (eigrp->k_values[3] && !eigrp->k_values[4]) + composite *= eigrp->k_values[3]; + if (!eigrp->k_values[3] && eigrp->k_values[4]) + composite *= (eigrp->k_values[4] / metric.reliability); + if (eigrp->k_values[3] && eigrp->k_values[4]) + composite *= ((eigrp->k_values[4] / metric.reliability) + + eigrp->k_values[3]); + + composite = + (composite <= EIGRP_METRIC_MAX) ? composite : EIGRP_METRIC_MAX; + + return composite; +} + +eigrp_metric_t +eigrp_calculate_total_metrics(struct eigrp *eigrp, + struct eigrp_route_descriptor *entry) +{ + struct eigrp_interface *ei = entry->ei; + eigrp_delay_t temp_delay; + eigrp_bandwidth_t bw; + + entry->total_metric = entry->reported_metric; + temp_delay = entry->total_metric.delay + + eigrp_delay_to_scaled(ei->params.delay); + + entry->total_metric.delay = temp_delay > EIGRP_METRIC_MAX_CLASSIC + ? EIGRP_METRIC_MAX_CLASSIC + : temp_delay; + + bw = eigrp_bandwidth_to_scaled(ei->params.bandwidth); + entry->total_metric.bandwidth = entry->total_metric.bandwidth > bw + ? bw + : entry->total_metric.bandwidth; + + return eigrp_calculate_metrics(eigrp, entry->total_metric); +} + +bool eigrp_metrics_is_same(struct eigrp_metrics metric1, + struct eigrp_metrics metric2) +{ + if ((metric1.bandwidth == metric2.bandwidth) + && (metric1.delay == metric2.delay) + && (metric1.hop_count == metric2.hop_count) + && (metric1.load == metric2.load) + && (metric1.reliability == metric2.reliability) + && (metric1.mtu[0] == metric2.mtu[0]) + && (metric1.mtu[1] == metric2.mtu[1]) + && (metric1.mtu[2] == metric2.mtu[2])) { + return true; + } + + return false; /* if different */ +} diff --git a/eigrpd/eigrp_metric.h b/eigrpd/eigrp_metric.h new file mode 100644 index 0000000000..8e9cd66747 --- /dev/null +++ b/eigrpd/eigrp_metric.h @@ -0,0 +1,63 @@ +/* + * EIGRP Metric Math Functions. + * Copyright (C) 2013-2016 + * Authors: + * Donnie Savage + * + * This file is part of GNU Zebra. + * + * GNU Zebra is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2, or (at your option) any + * later version. + * + * GNU Zebra is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; see the file COPYING; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef _ZEBRA_EIGRP_METRIC_H_ +#define _ZEBRA_EIGRP_METRIC_H_ + +/* Constants */ +#define EIGRP_BANDWIDTH_MIN 0x1ull /* 1 */ +#define EIGRP_BANDWIDTH_SCALER 10000000ull /* Inversion value */ +#define EIGRP_BANDWIDTH_MAX 0xffffffffffffffffull /* 1.84467441x10^19 */ + +#define EIGRP_DELAY_MIN 0x1ull /* 1 */ +#define EIGRP_DELAY_PICO 1000000ull +#define EIGRP_DELAY_MAX 0xffffffffffffffffull /* 1.84467441x10^19 */ + +#define EIGRP_MAX_LOAD 256 +#define EIGRP_MAX_HOPS 100 + +#define EIGRP_INACCESSIBLE 0xFFFFFFFFFFFFFFFFull + +#define EIGRP_METRIC_MAX 0xffffffffffffffffull /* 1.84467441x10^19 */ +#define EIGRP_METRIC_MAX_CLASSIC 0xffffffff +#define EIGRP_METRIC_SCALER 65536 /* CLASSIC to WIDE conversion */ + +#define EIGRP_CLASSIC_MAX 0xffffffff /* 4294967295 */ +#define EIGRP_CLASSIC_SCALER 256 /* IGRP to EIGRP conversion */ + + +/* Prototypes */ +extern eigrp_scaled_t eigrp_bandwidth_to_scaled(eigrp_bandwidth_t bw); +extern eigrp_bandwidth_t eigrp_scaled_to_bandwidth(eigrp_scaled_t scale); +extern eigrp_scaled_t eigrp_delay_to_scaled(eigrp_delay_t delay); +extern eigrp_delay_t eigrp_scaled_to_delay(eigrp_scaled_t scale); + +extern eigrp_metric_t eigrp_calculate_metrics(struct eigrp *eigrp, + struct eigrp_metrics metric); +extern eigrp_metric_t +eigrp_calculate_total_metrics(struct eigrp *eigrp, + struct eigrp_route_descriptor *rd); +extern bool eigrp_metrics_is_same(struct eigrp_metrics m1, + struct eigrp_metrics m2); + +#endif /* _ZEBRA_EIGRP_METRIC_H_ */ diff --git a/eigrpd/eigrp_neighbor.c b/eigrpd/eigrp_neighbor.c index 2d5bb0a7d1..1da2f7a108 100644 --- a/eigrpd/eigrp_neighbor.c +++ b/eigrpd/eigrp_neighbor.c @@ -343,7 +343,7 @@ void eigrp_nbr_hard_restart(struct eigrp_neighbor *nbr, struct vty *vty) eigrp_nbr_delete(nbr); } -int eigrp_nbr_split_horizon_check(struct eigrp_nexthop_entry *ne, +int eigrp_nbr_split_horizon_check(struct eigrp_route_descriptor *ne, struct eigrp_interface *ei) { if (ne->distance == EIGRP_MAX_METRIC) diff --git a/eigrpd/eigrp_neighbor.h b/eigrpd/eigrp_neighbor.h index 1c308fa981..80ab1eded5 100644 --- a/eigrpd/eigrp_neighbor.h +++ b/eigrpd/eigrp_neighbor.h @@ -54,6 +54,6 @@ extern struct eigrp_neighbor * eigrp_nbr_lookup_by_addr_process(struct eigrp *eigrp, struct in_addr addr); extern void eigrp_nbr_hard_restart(struct eigrp_neighbor *nbr, struct vty *vty); -extern int eigrp_nbr_split_horizon_check(struct eigrp_nexthop_entry *ne, +extern int eigrp_nbr_split_horizon_check(struct eigrp_route_descriptor *ne, struct eigrp_interface *ei); #endif /* _ZEBRA_EIGRP_NEIGHBOR_H */ diff --git a/eigrpd/eigrp_network.c b/eigrpd/eigrp_network.c index 51b4998959..69dcc20253 100644 --- a/eigrpd/eigrp_network.c +++ b/eigrpd/eigrp_network.c @@ -346,76 +346,6 @@ int eigrp_network_unset(struct eigrp *eigrp, struct prefix *p) return 1; } -uint32_t eigrp_calculate_metrics(struct eigrp *eigrp, - struct eigrp_metrics metric) -{ - uint64_t temp_metric; - temp_metric = 0; - - if (metric.delay == EIGRP_MAX_METRIC) - return EIGRP_MAX_METRIC; - - // EIGRP Metric = - // {K1*BW+[(K2*BW)/(256-load)]+(K3*delay)}*{K5/(reliability+K4)} - - if (eigrp->k_values[0]) - temp_metric += (eigrp->k_values[0] * metric.bandwidth); - if (eigrp->k_values[1]) - temp_metric += ((eigrp->k_values[1] * metric.bandwidth) - / (256 - metric.load)); - if (eigrp->k_values[2]) - temp_metric += (eigrp->k_values[2] * metric.delay); - if (eigrp->k_values[3] && !eigrp->k_values[4]) - temp_metric *= eigrp->k_values[3]; - if (!eigrp->k_values[3] && eigrp->k_values[4]) - temp_metric *= (eigrp->k_values[4] / metric.reliability); - if (eigrp->k_values[3] && eigrp->k_values[4]) - temp_metric *= ((eigrp->k_values[4] / metric.reliability) - + eigrp->k_values[3]); - - if (temp_metric <= EIGRP_MAX_METRIC) - return (uint32_t)temp_metric; - else - return EIGRP_MAX_METRIC; -} - -uint32_t eigrp_calculate_total_metrics(struct eigrp *eigrp, - struct eigrp_nexthop_entry *entry) -{ - struct eigrp_interface *ei = entry->ei; - - entry->total_metric = entry->reported_metric; - uint64_t temp_delay = - (uint64_t)entry->total_metric.delay - + (uint64_t)eigrp_delay_to_scaled(ei->params.delay); - entry->total_metric.delay = temp_delay > EIGRP_MAX_METRIC - ? EIGRP_MAX_METRIC - : (uint32_t)temp_delay; - - uint32_t bw = eigrp_bandwidth_to_scaled(ei->params.bandwidth); - entry->total_metric.bandwidth = entry->total_metric.bandwidth > bw - ? bw - : entry->total_metric.bandwidth; - - return eigrp_calculate_metrics(eigrp, entry->total_metric); -} - -uint8_t eigrp_metrics_is_same(struct eigrp_metrics metric1, - struct eigrp_metrics metric2) -{ - if ((metric1.bandwidth == metric2.bandwidth) - && (metric1.delay == metric2.delay) - && (metric1.hop_count == metric2.hop_count) - && (metric1.load == metric2.load) - && (metric1.reliability == metric2.reliability) - && (metric1.mtu[0] == metric2.mtu[0]) - && (metric1.mtu[1] == metric2.mtu[1]) - && (metric1.mtu[2] == metric2.mtu[2])) - return 1; - - return 0; // if different -} - void eigrp_external_routes_refresh(struct eigrp *eigrp, int type) { } diff --git a/eigrpd/eigrp_network.h b/eigrpd/eigrp_network.h index 7839fc946b..eeb32ba356 100644 --- a/eigrpd/eigrp_network.h +++ b/eigrpd/eigrp_network.h @@ -43,11 +43,6 @@ extern int eigrp_if_drop_allspfrouters(struct eigrp *top, struct prefix *p, unsigned int ifindex); extern void eigrp_adjust_sndbuflen(struct eigrp *, unsigned int); -extern uint32_t eigrp_calculate_metrics(struct eigrp *, struct eigrp_metrics); -extern uint32_t eigrp_calculate_total_metrics(struct eigrp *, - struct eigrp_nexthop_entry *); -extern uint8_t eigrp_metrics_is_same(struct eigrp_metrics, - struct eigrp_metrics); extern void eigrp_external_routes_refresh(struct eigrp *, int); #endif /* EIGRP_NETWORK_H_ */ diff --git a/eigrpd/eigrp_northbound.c b/eigrpd/eigrp_northbound.c index 5b87f72640..482667f633 100644 --- a/eigrpd/eigrp_northbound.c +++ b/eigrpd/eigrp_northbound.c @@ -34,6 +34,7 @@ #include "eigrp_interface.h" #include "eigrp_network.h" #include "eigrp_zebra.h" +#include "eigrp_cli.h" /* Helper functions. */ static void redistribute_get_metrics(const struct lyd_node *dnode, diff --git a/eigrpd/eigrp_packet.c b/eigrpd/eigrp_packet.c index f5f6ab5dff..252cd647a2 100644 --- a/eigrpd/eigrp_packet.c +++ b/eigrpd/eigrp_packet.c @@ -1144,7 +1144,7 @@ struct TLV_IPv4_Internal_type *eigrp_read_ipv4_tlv(struct stream *s) } uint16_t eigrp_add_internalTLV_to_stream(struct stream *s, - struct eigrp_prefix_entry *pe) + struct eigrp_prefix_descriptor *pe) { uint16_t length; diff --git a/eigrpd/eigrp_packet.h b/eigrpd/eigrp_packet.h index f354615fdb..cb69bc26b2 100644 --- a/eigrpd/eigrp_packet.h +++ b/eigrpd/eigrp_packet.h @@ -36,48 +36,51 @@ extern int eigrp_read(struct thread *); extern int eigrp_write(struct thread *); -extern struct eigrp_packet *eigrp_packet_new(size_t, struct eigrp_neighbor *); -extern struct eigrp_packet *eigrp_packet_duplicate(struct eigrp_packet *, - struct eigrp_neighbor *); -extern void eigrp_packet_free(struct eigrp_packet *); -extern void eigrp_packet_delete(struct eigrp_interface *); -extern void eigrp_packet_header_init(int, struct eigrp *, struct stream *, - uint32_t, uint32_t, uint32_t); -extern void eigrp_packet_checksum(struct eigrp_interface *, struct stream *, - uint16_t); +extern struct eigrp_packet *eigrp_packet_new(size_t size, + struct eigrp_neighbor *nbr); +extern struct eigrp_packet *eigrp_packet_duplicate(struct eigrp_packet *old, + struct eigrp_neighbor *nbr); +extern void eigrp_packet_free(struct eigrp_packet *ep); +extern void eigrp_packet_delete(struct eigrp_interface *ei); +extern void eigrp_packet_header_init(int type, struct eigrp *eigrp, + struct stream *s, uint32_t flags, + uint32_t sequence, uint32_t ack); +extern void eigrp_packet_checksum(struct eigrp_interface *ei, struct stream *s, + uint16_t length); extern struct eigrp_fifo *eigrp_fifo_new(void); -extern struct eigrp_packet *eigrp_fifo_next(struct eigrp_fifo *); -extern struct eigrp_packet *eigrp_fifo_pop(struct eigrp_fifo *); -extern void eigrp_fifo_push(struct eigrp_fifo *, struct eigrp_packet *); -extern void eigrp_fifo_free(struct eigrp_fifo *); -extern void eigrp_fifo_reset(struct eigrp_fifo *); - -extern void eigrp_send_packet_reliably(struct eigrp_neighbor *); - -extern struct TLV_IPv4_Internal_type *eigrp_read_ipv4_tlv(struct stream *); -extern uint16_t eigrp_add_internalTLV_to_stream(struct stream *, - struct eigrp_prefix_entry *); -extern uint16_t eigrp_add_authTLV_MD5_to_stream(struct stream *, - struct eigrp_interface *); -extern uint16_t eigrp_add_authTLV_SHA256_to_stream(struct stream *, - struct eigrp_interface *); - -extern int eigrp_unack_packet_retrans(struct thread *); -extern int eigrp_unack_multicast_packet_retrans(struct thread *); +extern struct eigrp_packet *eigrp_fifo_next(struct eigrp_fifo *fifo); +extern struct eigrp_packet *eigrp_fifo_pop(struct eigrp_fifo *fifo); +extern void eigrp_fifo_push(struct eigrp_fifo *fifo, struct eigrp_packet *ep); +extern void eigrp_fifo_free(struct eigrp_fifo *fifo); +extern void eigrp_fifo_reset(struct eigrp_fifo *fifo); + +extern void eigrp_send_packet_reliably(struct eigrp_neighbor *nbr); + +extern struct TLV_IPv4_Internal_type *eigrp_read_ipv4_tlv(struct stream *s); +extern uint16_t +eigrp_add_internalTLV_to_stream(struct stream *s, + struct eigrp_prefix_descriptor *pe); +extern uint16_t eigrp_add_authTLV_MD5_to_stream(struct stream *s, + struct eigrp_interface *ei); +extern uint16_t eigrp_add_authTLV_SHA256_to_stream(struct stream *s, + struct eigrp_interface *ei); + +extern int eigrp_unack_packet_retrans(struct thread *thread); +extern int eigrp_unack_multicast_packet_retrans(struct thread *thread); /* * untill there is reason to have their own header, these externs are found in * eigrp_hello.c */ extern void eigrp_sw_version_initialize(void); -extern void eigrp_hello_send(struct eigrp_interface *, uint8_t, - struct in_addr *); -extern void eigrp_hello_send_ack(struct eigrp_neighbor *); -extern void eigrp_hello_receive(struct eigrp *, struct ip *, - struct eigrp_header *, struct stream *, - struct eigrp_interface *, int); -extern int eigrp_hello_timer(struct thread *); +extern void eigrp_hello_send(struct eigrp_interface *ei, uint8_t flags, + struct in_addr *nbr_addr); +extern void eigrp_hello_send_ack(struct eigrp_neighbor *nbr); +extern void eigrp_hello_receive(struct eigrp *eigrp, struct ip *iph, + struct eigrp_header *eigrph, struct stream *s, + struct eigrp_interface *ei, int size); +extern int eigrp_hello_timer(struct thread *thread); /* * These externs are found in eigrp_update.c @@ -85,76 +88,83 @@ extern int eigrp_hello_timer(struct thread *); extern bool eigrp_update_prefix_apply(struct eigrp *eigrp, struct eigrp_interface *ei, int in, struct prefix *prefix); -extern void eigrp_update_send(struct eigrp_interface *); -extern void eigrp_update_receive(struct eigrp *, struct ip *, - struct eigrp_header *, struct stream *, - struct eigrp_interface *, int); -extern void eigrp_update_send_all(struct eigrp *, struct eigrp_interface *); -extern void eigrp_update_send_init(struct eigrp_neighbor *); -extern void eigrp_update_send_EOT(struct eigrp_neighbor *); -extern int eigrp_update_send_GR_thread(struct thread *); -extern void eigrp_update_send_GR(struct eigrp_neighbor *, enum GR_type, - struct vty *); -extern void eigrp_update_send_interface_GR(struct eigrp_interface *, - enum GR_type, struct vty *); -extern void eigrp_update_send_process_GR(struct eigrp *, enum GR_type, - struct vty *); +extern void eigrp_update_send(struct eigrp_interface *ei); +extern void eigrp_update_receive(struct eigrp *eigrp, struct ip *iph, + struct eigrp_header *eigrph, struct stream *s, + struct eigrp_interface *ei, int size); +extern void eigrp_update_send_all(struct eigrp *eigrp, + struct eigrp_interface *exception); +extern void eigrp_update_send_init(struct eigrp_neighbor *nbr); +extern void eigrp_update_send_EOT(struct eigrp_neighbor *nbr); +extern int eigrp_update_send_GR_thread(struct thread *thread); +extern void eigrp_update_send_GR(struct eigrp_neighbor *nbr, + enum GR_type gr_type, struct vty *vty); +extern void eigrp_update_send_interface_GR(struct eigrp_interface *ei, + enum GR_type gr_type, + struct vty *vty); +extern void eigrp_update_send_process_GR(struct eigrp *eigrp, + enum GR_type gr_type, struct vty *vty); /* * These externs are found in eigrp_query.c */ -extern void eigrp_send_query(struct eigrp_interface *); -extern void eigrp_query_receive(struct eigrp *, struct ip *, - struct eigrp_header *, struct stream *, - struct eigrp_interface *, int); -extern uint32_t eigrp_query_send_all(struct eigrp *); +extern void eigrp_send_query(struct eigrp_interface *ei); +extern void eigrp_query_receive(struct eigrp *eigrp, struct ip *iph, + struct eigrp_header *eigrph, struct stream *s, + struct eigrp_interface *ei, int size); +extern uint32_t eigrp_query_send_all(struct eigrp *eigrp); /* * These externs are found in eigrp_reply.c */ -extern void eigrp_send_reply(struct eigrp_neighbor *, - struct eigrp_prefix_entry *); -extern void eigrp_reply_receive(struct eigrp *, struct ip *, - struct eigrp_header *, struct stream *, - struct eigrp_interface *, int); +extern void eigrp_send_reply(struct eigrp_neighbor *nbr, + struct eigrp_prefix_descriptor *pe); +extern void eigrp_reply_receive(struct eigrp *eigrp, struct ip *iph, + struct eigrp_header *eigrph, struct stream *s, + struct eigrp_interface *ei, int size); /* * These externs are found in eigrp_siaquery.c */ -extern void eigrp_send_siaquery(struct eigrp_neighbor *, - struct eigrp_prefix_entry *); -extern void eigrp_siaquery_receive(struct eigrp *, struct ip *, - struct eigrp_header *, struct stream *, - struct eigrp_interface *, int); +extern void eigrp_send_siaquery(struct eigrp_neighbor *nbr, + struct eigrp_prefix_descriptor *pe); +extern void eigrp_siaquery_receive(struct eigrp *eigrp, struct ip *iph, + struct eigrp_header *eigrph, + struct stream *s, struct eigrp_interface *ei, + int size); /* * These externs are found in eigrp_siareply.c */ -extern void eigrp_send_siareply(struct eigrp_neighbor *, - struct eigrp_prefix_entry *); -extern void eigrp_siareply_receive(struct eigrp *, struct ip *, - struct eigrp_header *, struct stream *, - struct eigrp_interface *, int); +extern void eigrp_send_siareply(struct eigrp_neighbor *nbr, + struct eigrp_prefix_descriptor *pe); +extern void eigrp_siareply_receive(struct eigrp *eigrp, struct ip *iph, + struct eigrp_header *eigrph, + struct stream *s, struct eigrp_interface *ei, + int size); extern struct TLV_MD5_Authentication_Type *eigrp_authTLV_MD5_new(void); -extern void eigrp_authTLV_MD5_free(struct TLV_MD5_Authentication_Type *); +extern void eigrp_authTLV_MD5_free(struct TLV_MD5_Authentication_Type *authTLV); extern struct TLV_SHA256_Authentication_Type *eigrp_authTLV_SHA256_new(void); -extern void eigrp_authTLV_SHA256_free(struct TLV_SHA256_Authentication_Type *); - -extern int eigrp_make_md5_digest(struct eigrp_interface *, struct stream *, - uint8_t); -extern int eigrp_check_md5_digest(struct stream *, - struct TLV_MD5_Authentication_Type *, - struct eigrp_neighbor *, uint8_t); -extern int eigrp_make_sha256_digest(struct eigrp_interface *, struct stream *, - uint8_t); -extern int eigrp_check_sha256_digest(struct stream *, - struct TLV_SHA256_Authentication_Type *, - struct eigrp_neighbor *, uint8_t); - - -extern void eigrp_IPv4_InternalTLV_free(struct TLV_IPv4_Internal_type *); +extern void +eigrp_authTLV_SHA256_free(struct TLV_SHA256_Authentication_Type *authTLV); + +extern int eigrp_make_md5_digest(struct eigrp_interface *ei, struct stream *s, + uint8_t flags); +extern int eigrp_check_md5_digest(struct stream *s, + struct TLV_MD5_Authentication_Type *authTLV, + struct eigrp_neighbor *nbr, uint8_t flags); +extern int eigrp_make_sha256_digest(struct eigrp_interface *ei, + struct stream *s, uint8_t flags); +extern int +eigrp_check_sha256_digest(struct stream *s, + struct TLV_SHA256_Authentication_Type *authTLV, + struct eigrp_neighbor *nbr, uint8_t flags); + + +extern void +eigrp_IPv4_InternalTLV_free(struct TLV_IPv4_Internal_type *IPv4_InternalTLV); extern struct TLV_Sequence_Type *eigrp_SequenceTLV_new(void); diff --git a/eigrpd/eigrp_query.c b/eigrpd/eigrp_query.c index 84dcf5e2d5..0ab7b59dbb 100644 --- a/eigrpd/eigrp_query.c +++ b/eigrpd/eigrp_query.c @@ -58,7 +58,7 @@ uint32_t eigrp_query_send_all(struct eigrp *eigrp) { struct eigrp_interface *iface; struct listnode *node, *node2, *nnode2; - struct eigrp_prefix_entry *pe; + struct eigrp_prefix_descriptor *pe; uint32_t counter; if (eigrp == NULL) { @@ -118,7 +118,7 @@ void eigrp_query_receive(struct eigrp *eigrp, struct ip *iph, dest_addr.family = AF_INET; dest_addr.u.prefix4 = tlv->destination; dest_addr.prefixlen = tlv->prefix_length; - struct eigrp_prefix_entry *dest = + struct eigrp_prefix_descriptor *dest = eigrp_topology_table_lookup_ipv4( eigrp->topology_table, &dest_addr); @@ -126,9 +126,9 @@ void eigrp_query_receive(struct eigrp *eigrp, struct ip *iph, * know)*/ if (dest != NULL) { struct eigrp_fsm_action_message msg; - struct eigrp_nexthop_entry *entry = - eigrp_prefix_entry_lookup(dest->entries, - nbr); + struct eigrp_route_descriptor *entry = + eigrp_route_descriptor_lookup( + dest->entries, nbr); msg.packet_type = EIGRP_OPC_QUERY; msg.eigrp = eigrp; msg.data_type = EIGRP_INT; @@ -164,7 +164,7 @@ void eigrp_send_query(struct eigrp_interface *ei) uint16_t length = EIGRP_HEADER_LEN; struct listnode *node, *nnode, *node2, *nnode2; struct eigrp_neighbor *nbr; - struct eigrp_prefix_entry *pe; + struct eigrp_prefix_descriptor *pe; bool has_tlv = false; bool new_packet = true; uint16_t eigrp_mtu = EIGRP_PACKET_MTU(ei->ifp->mtu); diff --git a/eigrpd/eigrp_reply.c b/eigrpd/eigrp_reply.c index 26bb27d7ac..d16482173c 100644 --- a/eigrpd/eigrp_reply.c +++ b/eigrpd/eigrp_reply.c @@ -61,20 +61,21 @@ #include "eigrpd/eigrp_memory.h" #include "eigrpd/eigrp_errors.h" -void eigrp_send_reply(struct eigrp_neighbor *nbr, struct eigrp_prefix_entry *pe) +void eigrp_send_reply(struct eigrp_neighbor *nbr, + struct eigrp_prefix_descriptor *pe) { struct eigrp_packet *ep; uint16_t length = EIGRP_HEADER_LEN; struct eigrp_interface *ei = nbr->ei; struct eigrp *eigrp = ei->eigrp; - struct eigrp_prefix_entry *pe2; + struct eigrp_prefix_descriptor *pe2; // TODO: Work in progress /* Filtering */ /* get list from eigrp process */ - pe2 = XCALLOC(MTYPE_EIGRP_PREFIX_ENTRY, - sizeof(struct eigrp_prefix_entry)); - memcpy(pe2, pe, sizeof(struct eigrp_prefix_entry)); + pe2 = XCALLOC(MTYPE_EIGRP_PREFIX_DESCRIPTOR, + sizeof(struct eigrp_prefix_descriptor)); + memcpy(pe2, pe, sizeof(struct eigrp_prefix_descriptor)); if (eigrp_update_prefix_apply(eigrp, ei, EIGRP_FILTER_OUT, pe2->destination)) { @@ -122,7 +123,7 @@ void eigrp_send_reply(struct eigrp_neighbor *nbr, struct eigrp_prefix_entry *pe) eigrp_send_packet_reliably(nbr); } - XFREE(MTYPE_EIGRP_PREFIX_ENTRY, pe2); + XFREE(MTYPE_EIGRP_PREFIX_DESCRIPTOR, pe2); } /*EIGRP REPLY read function*/ @@ -161,7 +162,7 @@ void eigrp_reply_receive(struct eigrp *eigrp, struct ip *iph, dest_addr.family = AF_INET; dest_addr.u.prefix4 = tlv->destination; dest_addr.prefixlen = tlv->prefix_length; - struct eigrp_prefix_entry *dest = + struct eigrp_prefix_descriptor *dest = eigrp_topology_table_lookup_ipv4(eigrp->topology_table, &dest_addr); /* @@ -177,8 +178,8 @@ void eigrp_reply_receive(struct eigrp *eigrp, struct ip *iph, } struct eigrp_fsm_action_message msg; - struct eigrp_nexthop_entry *entry = - eigrp_prefix_entry_lookup(dest->entries, nbr); + struct eigrp_route_descriptor *entry = + eigrp_route_descriptor_lookup(dest->entries, nbr); if (eigrp_update_prefix_apply(eigrp, ei, EIGRP_FILTER_IN, &dest_addr)) { diff --git a/eigrpd/eigrp_routemap.c b/eigrpd/eigrp_routemap.c index e15f777954..5183e645e5 100644 --- a/eigrpd/eigrp_routemap.c +++ b/eigrpd/eigrp_routemap.c @@ -265,8 +265,8 @@ route_match_metric(void *rule, struct prefix *prefix, route_map_object_t type, // uint32_t *metric; // uint32_t check; // struct rip_info *rinfo; - // struct eigrp_nexthop_entry *te; - // struct eigrp_prefix_entry *pe; + // struct eigrp_route_descriptor *te; + // struct eigrp_prefix_descriptor *pe; // struct listnode *node, *node2, *nnode, *nnode2; // struct eigrp *e; // diff --git a/eigrpd/eigrp_siaquery.c b/eigrpd/eigrp_siaquery.c index ff38325465..027700fe11 100644 --- a/eigrpd/eigrp_siaquery.c +++ b/eigrpd/eigrp_siaquery.c @@ -87,7 +87,7 @@ void eigrp_siaquery_receive(struct eigrp *eigrp, struct ip *iph, dest_addr.family = AFI_IP; dest_addr.u.prefix4 = tlv->destination; dest_addr.prefixlen = tlv->prefix_length; - struct eigrp_prefix_entry *dest = + struct eigrp_prefix_descriptor *dest = eigrp_topology_table_lookup_ipv4( eigrp->topology_table, &dest_addr); @@ -95,9 +95,9 @@ void eigrp_siaquery_receive(struct eigrp *eigrp, struct ip *iph, * know)*/ if (dest != NULL) { struct eigrp_fsm_action_message msg; - struct eigrp_nexthop_entry *entry = - eigrp_prefix_entry_lookup(dest->entries, - nbr); + struct eigrp_route_descriptor *entry = + eigrp_route_descriptor_lookup( + dest->entries, nbr); msg.packet_type = EIGRP_OPC_SIAQUERY; msg.eigrp = eigrp; msg.data_type = EIGRP_INT; @@ -114,7 +114,7 @@ void eigrp_siaquery_receive(struct eigrp *eigrp, struct ip *iph, } void eigrp_send_siaquery(struct eigrp_neighbor *nbr, - struct eigrp_prefix_entry *pe) + struct eigrp_prefix_descriptor *pe) { struct eigrp_packet *ep; uint16_t length = EIGRP_HEADER_LEN; diff --git a/eigrpd/eigrp_siareply.c b/eigrpd/eigrp_siareply.c index d3dd123f90..590b224d68 100644 --- a/eigrpd/eigrp_siareply.c +++ b/eigrpd/eigrp_siareply.c @@ -86,7 +86,7 @@ void eigrp_siareply_receive(struct eigrp *eigrp, struct ip *iph, dest_addr.family = AFI_IP; dest_addr.u.prefix4 = tlv->destination; dest_addr.prefixlen = tlv->prefix_length; - struct eigrp_prefix_entry *dest = + struct eigrp_prefix_descriptor *dest = eigrp_topology_table_lookup_ipv4( eigrp->topology_table, &dest_addr); @@ -94,9 +94,9 @@ void eigrp_siareply_receive(struct eigrp *eigrp, struct ip *iph, * know)*/ if (dest != NULL) { struct eigrp_fsm_action_message msg; - struct eigrp_nexthop_entry *entry = - eigrp_prefix_entry_lookup(dest->entries, - nbr); + struct eigrp_route_descriptor *entry = + eigrp_route_descriptor_lookup( + dest->entries, nbr); msg.packet_type = EIGRP_OPC_SIAQUERY; msg.eigrp = eigrp; msg.data_type = EIGRP_INT; @@ -113,7 +113,7 @@ void eigrp_siareply_receive(struct eigrp *eigrp, struct ip *iph, } void eigrp_send_siareply(struct eigrp_neighbor *nbr, - struct eigrp_prefix_entry *pe) + struct eigrp_prefix_descriptor *pe) { struct eigrp_packet *ep; uint16_t length = EIGRP_HEADER_LEN; diff --git a/eigrpd/eigrp_structs.h b/eigrpd/eigrp_structs.h index 82bddaaae3..0d8bb29964 100644 --- a/eigrpd/eigrp_structs.h +++ b/eigrpd/eigrp_structs.h @@ -37,30 +37,10 @@ #include "eigrpd/eigrp_const.h" #include "eigrpd/eigrp_macros.h" -/* EIGRP master for system wide configuration and variables. */ -struct eigrp_master { - /* EIGRP instance. */ - struct list *eigrp; - - /* EIGRP thread master. */ - struct thread_master *master; - - /* Zebra interface list. */ - struct list *iflist; - - /* EIGRP start time. */ - time_t start_time; - - /* Various EIGRP global configuration. */ - uint8_t options; - -#define EIGRP_MASTER_SHUTDOWN (1 << 0) /* deferred-shutdown */ -}; - struct eigrp_metrics { uint32_t delay; uint32_t bandwidth; - unsigned char mtu[3]; + uint8_t mtu[3]; uint8_t hop_count; uint8_t reliability; uint8_t load; @@ -68,6 +48,16 @@ struct eigrp_metrics { uint8_t flags; }; +struct eigrp_extdata { + uint32_t orig; + uint32_t as; + uint32_t tag; + uint32_t metric; + uint16_t reserved; + uint8_t protocol; + uint8_t flags; +}; + struct eigrp { vrf_id_t vrf_id; @@ -450,7 +440,7 @@ enum GR_type { EIGRP_GR_MANUAL, EIGRP_GR_FILTER }; //--------------------------------------------------------------------------------------------------------------------------------------------- /* EIGRP Topology table node structure */ -struct eigrp_prefix_entry { +struct eigrp_prefix_descriptor { struct list *entries, *rij; uint32_t fdistance; // FD uint32_t rdistance; // RD @@ -473,8 +463,14 @@ struct eigrp_prefix_entry { }; /* EIGRP Topology table record structure */ -struct eigrp_nexthop_entry { - struct eigrp_prefix_entry *prefix; +struct eigrp_route_descriptor { + uint16_t type; + uint16_t afi; + + struct eigrp_prefix_descriptor *prefix; + struct eigrp_neighbor *adv_router; + struct in_addr nexthop; + uint32_t reported_distance; // distance reported by neighbor uint32_t distance; // sum of reported distance and link cost to // advertised neighbor @@ -482,7 +478,9 @@ struct eigrp_nexthop_entry { struct eigrp_metrics reported_metric; struct eigrp_metrics total_metric; - struct eigrp_neighbor *adv_router; // ip address of advertising neighbor + struct eigrp_metrics metric; + struct eigrp_extdata extdata; + uint8_t flags; // used for marking successor and FS struct eigrp_interface *ei; // pointer for case of connected entry @@ -501,8 +499,8 @@ struct eigrp_fsm_action_message { uint8_t packet_type; // UPDATE, QUERY, SIAQUERY, SIAREPLY struct eigrp *eigrp; // which thread sent mesg struct eigrp_neighbor *adv_router; // advertising neighbor - struct eigrp_nexthop_entry *entry; - struct eigrp_prefix_entry *prefix; + struct eigrp_route_descriptor *entry; + struct eigrp_prefix_descriptor *prefix; msg_data_t data_type; // internal or external tlv type struct eigrp_metrics metrics; enum metric_change change; diff --git a/eigrpd/eigrp_topology.c b/eigrpd/eigrp_topology.c index 2dbee16694..6da7756f84 100644 --- a/eigrpd/eigrp_topology.c +++ b/eigrpd/eigrp_topology.c @@ -39,6 +39,7 @@ #include "vty.h" #include "lib_errors.h" +#include "eigrpd/eigrp_types.h" #include "eigrpd/eigrp_structs.h" #include "eigrpd/eigrpd.h" #include "eigrpd/eigrp_interface.h" @@ -51,9 +52,10 @@ #include "eigrpd/eigrp_topology.h" #include "eigrpd/eigrp_fsm.h" #include "eigrpd/eigrp_memory.h" +#include "eigrpd/eigrp_metric.h" -static int eigrp_nexthop_entry_cmp(struct eigrp_nexthop_entry *, - struct eigrp_nexthop_entry *); +static int eigrp_route_descriptor_cmp(struct eigrp_route_descriptor *rd1, + struct eigrp_route_descriptor *rd2); /* * Returns linkedlist used as topology table @@ -70,14 +72,14 @@ struct route_table *eigrp_topology_new(void) * Returns new created toplogy node * cmp - assigned function for comparing topology entry */ -struct eigrp_prefix_entry *eigrp_prefix_entry_new(void) +struct eigrp_prefix_descriptor *eigrp_prefix_descriptor_new(void) { - struct eigrp_prefix_entry *new; - new = XCALLOC(MTYPE_EIGRP_PREFIX_ENTRY, - sizeof(struct eigrp_prefix_entry)); + struct eigrp_prefix_descriptor *new; + new = XCALLOC(MTYPE_EIGRP_PREFIX_DESCRIPTOR, + sizeof(struct eigrp_prefix_descriptor)); new->entries = list_new(); new->rij = list_new(); - new->entries->cmp = (int (*)(void *, void *))eigrp_nexthop_entry_cmp; + new->entries->cmp = (int (*)(void *, void *))eigrp_route_descriptor_cmp; new->distance = new->fdistance = new->rdistance = EIGRP_MAX_METRIC; new->destination = NULL; @@ -87,8 +89,8 @@ struct eigrp_prefix_entry *eigrp_prefix_entry_new(void) /* * Topology entry comparison */ -static int eigrp_nexthop_entry_cmp(struct eigrp_nexthop_entry *entry1, - struct eigrp_nexthop_entry *entry2) +static int eigrp_route_descriptor_cmp(struct eigrp_route_descriptor *entry1, + struct eigrp_route_descriptor *entry2) { if (entry1->distance < entry2->distance) return -1; @@ -102,12 +104,12 @@ static int eigrp_nexthop_entry_cmp(struct eigrp_nexthop_entry *entry1, * Returns new topology entry */ -struct eigrp_nexthop_entry *eigrp_nexthop_entry_new(void) +struct eigrp_route_descriptor *eigrp_route_descriptor_new(void) { - struct eigrp_nexthop_entry *new; + struct eigrp_route_descriptor *new; - new = XCALLOC(MTYPE_EIGRP_NEXTHOP_ENTRY, - sizeof(struct eigrp_nexthop_entry)); + new = XCALLOC(MTYPE_EIGRP_ROUTE_DESCRIPTOR, + sizeof(struct eigrp_route_descriptor)); new->reported_distance = EIGRP_MAX_METRIC; new->distance = EIGRP_MAX_METRIC; @@ -126,8 +128,8 @@ void eigrp_topology_free(struct eigrp *eigrp, struct route_table *table) /* * Adding topology node to topology table */ -void eigrp_prefix_entry_add(struct route_table *topology, - struct eigrp_prefix_entry *pe) +void eigrp_prefix_descriptor_add(struct route_table *topology, + struct eigrp_prefix_descriptor *pe) { struct route_node *rn; @@ -146,9 +148,9 @@ void eigrp_prefix_entry_add(struct route_table *topology, /* * Adding topology entry to topology node */ -void eigrp_nexthop_entry_add(struct eigrp *eigrp, - struct eigrp_prefix_entry *node, - struct eigrp_nexthop_entry *entry) +void eigrp_route_descriptor_add(struct eigrp *eigrp, + struct eigrp_prefix_descriptor *node, + struct eigrp_route_descriptor *entry) { struct list *l = list_new(); @@ -168,10 +170,11 @@ void eigrp_nexthop_entry_add(struct eigrp *eigrp, /* * Deleting topology node from topology table */ -void eigrp_prefix_entry_delete(struct eigrp *eigrp, struct route_table *table, - struct eigrp_prefix_entry *pe) +void eigrp_prefix_descriptor_delete(struct eigrp *eigrp, + struct route_table *table, + struct eigrp_prefix_descriptor *pe) { - struct eigrp_nexthop_entry *ne; + struct eigrp_route_descriptor *ne; struct listnode *node, *nnode; struct route_node *rn; @@ -189,7 +192,7 @@ void eigrp_prefix_entry_delete(struct eigrp *eigrp, struct route_table *table, listnode_delete(eigrp->topology_changes_internalIPV4, pe); for (ALL_LIST_ELEMENTS(pe->entries, node, nnode, ne)) - eigrp_nexthop_entry_delete(eigrp, pe, ne); + eigrp_route_descriptor_delete(eigrp, pe, ne); list_delete(&pe->entries); list_delete(&pe->rij); eigrp_zebra_route_delete(eigrp, pe->destination); @@ -198,20 +201,20 @@ void eigrp_prefix_entry_delete(struct eigrp *eigrp, struct route_table *table, rn->info = NULL; route_unlock_node(rn); // Lookup above route_unlock_node(rn); // Initial creation - XFREE(MTYPE_EIGRP_PREFIX_ENTRY, pe); + XFREE(MTYPE_EIGRP_PREFIX_DESCRIPTOR, pe); } /* * Deleting topology entry from topology node */ -void eigrp_nexthop_entry_delete(struct eigrp *eigrp, - struct eigrp_prefix_entry *node, - struct eigrp_nexthop_entry *entry) +void eigrp_route_descriptor_delete(struct eigrp *eigrp, + struct eigrp_prefix_descriptor *node, + struct eigrp_route_descriptor *entry) { if (listnode_lookup(node->entries, entry) != NULL) { listnode_delete(node->entries, entry); eigrp_zebra_route_delete(eigrp, node->destination); - XFREE(MTYPE_EIGRP_NEXTHOP_ENTRY, entry); + XFREE(MTYPE_EIGRP_ROUTE_DESCRIPTOR, entry); } } @@ -222,7 +225,7 @@ void eigrp_topology_delete_all(struct eigrp *eigrp, struct route_table *topology) { struct route_node *rn; - struct eigrp_prefix_entry *pe; + struct eigrp_prefix_descriptor *pe; for (rn = route_top(topology); rn; rn = route_next(rn)) { pe = rn->info; @@ -230,15 +233,15 @@ void eigrp_topology_delete_all(struct eigrp *eigrp, if (!pe) continue; - eigrp_prefix_entry_delete(eigrp, topology, pe); + eigrp_prefix_descriptor_delete(eigrp, topology, pe); } } -struct eigrp_prefix_entry * +struct eigrp_prefix_descriptor * eigrp_topology_table_lookup_ipv4(struct route_table *table, struct prefix *address) { - struct eigrp_prefix_entry *pe; + struct eigrp_prefix_descriptor *pe; struct route_node *rn; rn = route_node_lookup(table, address); @@ -259,14 +262,15 @@ eigrp_topology_table_lookup_ipv4(struct route_table *table, * That way we can clean up all the list_new and list_delete's * that we are doing. DBS */ -struct list *eigrp_topology_get_successor(struct eigrp_prefix_entry *table_node) +struct list * +eigrp_topology_get_successor(struct eigrp_prefix_descriptor *table_node) { struct list *successors = list_new(); - struct eigrp_nexthop_entry *data; + struct eigrp_route_descriptor *data; struct listnode *node1, *node2; for (ALL_LIST_ELEMENTS(table_node->entries, node1, node2, data)) { - if (data->flags & EIGRP_NEXTHOP_ENTRY_SUCCESSOR_FLAG) { + if (data->flags & EIGRP_ROUTE_DESCRIPTOR_SUCCESSOR_FLAG) { listnode_add(successors, data); } } @@ -283,7 +287,7 @@ struct list *eigrp_topology_get_successor(struct eigrp_prefix_entry *table_node) } struct list * -eigrp_topology_get_successor_max(struct eigrp_prefix_entry *table_node, +eigrp_topology_get_successor_max(struct eigrp_prefix_descriptor *table_node, unsigned int maxpaths) { struct list *successors = eigrp_topology_get_successor(table_node); @@ -300,10 +304,10 @@ eigrp_topology_get_successor_max(struct eigrp_prefix_entry *table_node, return successors; } -struct eigrp_nexthop_entry * -eigrp_prefix_entry_lookup(struct list *entries, struct eigrp_neighbor *nbr) +struct eigrp_route_descriptor * +eigrp_route_descriptor_lookup(struct list *entries, struct eigrp_neighbor *nbr) { - struct eigrp_nexthop_entry *data; + struct eigrp_route_descriptor *data; struct listnode *node, *nnode; for (ALL_LIST_ELEMENTS(entries, node, nnode, data)) { if (data->adv_router == nbr) { @@ -319,8 +323,8 @@ struct list *eigrp_neighbor_prefixes_lookup(struct eigrp *eigrp, struct eigrp_neighbor *nbr) { struct listnode *node2, *node22; - struct eigrp_nexthop_entry *entry; - struct eigrp_prefix_entry *pe; + struct eigrp_route_descriptor *entry; + struct eigrp_prefix_descriptor *pe; struct route_node *rn; /* create new empty list for prefixes storage */ @@ -348,8 +352,8 @@ enum metric_change eigrp_topology_update_distance(struct eigrp_fsm_action_message *msg) { struct eigrp *eigrp = msg->eigrp; - struct eigrp_prefix_entry *prefix = msg->prefix; - struct eigrp_nexthop_entry *entry = msg->entry; + struct eigrp_prefix_descriptor *prefix = msg->prefix; + struct eigrp_route_descriptor *entry = msg->entry; enum metric_change change = METRIC_SAME; uint32_t new_reported_distance; @@ -413,7 +417,7 @@ distance_done: void eigrp_topology_update_all_node_flags(struct eigrp *eigrp) { - struct eigrp_prefix_entry *pe; + struct eigrp_prefix_descriptor *pe; struct route_node *rn; if (!eigrp) @@ -430,10 +434,10 @@ void eigrp_topology_update_all_node_flags(struct eigrp *eigrp) } void eigrp_topology_update_node_flags(struct eigrp *eigrp, - struct eigrp_prefix_entry *dest) + struct eigrp_prefix_descriptor *dest) { struct listnode *node; - struct eigrp_nexthop_entry *entry; + struct eigrp_route_descriptor *entry; for (ALL_LIST_ELEMENTS_RO(dest->entries, node, entry)) { if (entry->reported_distance < dest->fdistance) { @@ -444,29 +448,29 @@ void eigrp_topology_update_node_flags(struct eigrp *eigrp, && entry->distance != EIGRP_MAX_METRIC) { // is successor entry->flags |= - EIGRP_NEXTHOP_ENTRY_SUCCESSOR_FLAG; + EIGRP_ROUTE_DESCRIPTOR_SUCCESSOR_FLAG; entry->flags &= - ~EIGRP_NEXTHOP_ENTRY_FSUCCESSOR_FLAG; + ~EIGRP_ROUTE_DESCRIPTOR_FSUCCESSOR_FLAG; } else { // is feasible successor only entry->flags |= - EIGRP_NEXTHOP_ENTRY_FSUCCESSOR_FLAG; + EIGRP_ROUTE_DESCRIPTOR_FSUCCESSOR_FLAG; entry->flags &= - ~EIGRP_NEXTHOP_ENTRY_SUCCESSOR_FLAG; + ~EIGRP_ROUTE_DESCRIPTOR_SUCCESSOR_FLAG; } } else { - entry->flags &= ~EIGRP_NEXTHOP_ENTRY_FSUCCESSOR_FLAG; - entry->flags &= ~EIGRP_NEXTHOP_ENTRY_SUCCESSOR_FLAG; + entry->flags &= ~EIGRP_ROUTE_DESCRIPTOR_FSUCCESSOR_FLAG; + entry->flags &= ~EIGRP_ROUTE_DESCRIPTOR_SUCCESSOR_FLAG; } } } void eigrp_update_routing_table(struct eigrp *eigrp, - struct eigrp_prefix_entry *prefix) + struct eigrp_prefix_descriptor *prefix) { struct list *successors; struct listnode *node; - struct eigrp_nexthop_entry *entry; + struct eigrp_route_descriptor *entry; successors = eigrp_topology_get_successor_max(prefix, eigrp->max_paths); @@ -474,13 +478,13 @@ void eigrp_update_routing_table(struct eigrp *eigrp, eigrp_zebra_route_add(eigrp, prefix->destination, successors, prefix->fdistance); for (ALL_LIST_ELEMENTS_RO(successors, node, entry)) - entry->flags |= EIGRP_NEXTHOP_ENTRY_INTABLE_FLAG; + entry->flags |= EIGRP_ROUTE_DESCRIPTOR_INTABLE_FLAG; list_delete(&successors); } else { eigrp_zebra_route_delete(eigrp, prefix->destination); for (ALL_LIST_ELEMENTS_RO(prefix->entries, node, entry)) - entry->flags &= ~EIGRP_NEXTHOP_ENTRY_INTABLE_FLAG; + entry->flags &= ~EIGRP_ROUTE_DESCRIPTOR_INTABLE_FLAG; } } @@ -488,8 +492,8 @@ void eigrp_topology_neighbor_down(struct eigrp *eigrp, struct eigrp_neighbor *nbr) { struct listnode *node2, *node22; - struct eigrp_prefix_entry *pe; - struct eigrp_nexthop_entry *entry; + struct eigrp_prefix_descriptor *pe; + struct eigrp_route_descriptor *entry; struct route_node *rn; for (rn = route_top(eigrp->topology_table); rn; rn = route_next(rn)) { @@ -504,6 +508,7 @@ void eigrp_topology_neighbor_down(struct eigrp *eigrp, if (entry->adv_router != nbr) continue; + memset(&msg, 0, sizeof(msg)); msg.metrics.delay = EIGRP_MAX_METRIC; msg.packet_type = EIGRP_OPC_UPDATE; msg.eigrp = eigrp; @@ -521,18 +526,18 @@ void eigrp_topology_neighbor_down(struct eigrp *eigrp, void eigrp_update_topology_table_prefix(struct eigrp *eigrp, struct route_table *table, - struct eigrp_prefix_entry *prefix) + struct eigrp_prefix_descriptor *prefix) { struct listnode *node1, *node2; - struct eigrp_nexthop_entry *entry; + struct eigrp_route_descriptor *entry; for (ALL_LIST_ELEMENTS(prefix->entries, node1, node2, entry)) { if (entry->distance == EIGRP_MAX_METRIC) { - eigrp_nexthop_entry_delete(eigrp, prefix, entry); + eigrp_route_descriptor_delete(eigrp, prefix, entry); } } if (prefix->distance == EIGRP_MAX_METRIC && prefix->nt != EIGRP_TOPOLOGY_TYPE_CONNECTED) { - eigrp_prefix_entry_delete(eigrp, table, prefix); + eigrp_prefix_descriptor_delete(eigrp, table, prefix); } } diff --git a/eigrpd/eigrp_topology.h b/eigrpd/eigrp_topology.h index 718cece403..26fa1a11b0 100644 --- a/eigrpd/eigrp_topology.h +++ b/eigrpd/eigrp_topology.h @@ -35,43 +35,47 @@ /* EIGRP Topology table related functions. */ extern struct route_table *eigrp_topology_new(void); extern void eigrp_topology_init(struct route_table *table); -extern struct eigrp_prefix_entry *eigrp_prefix_entry_new(void); -extern struct eigrp_nexthop_entry *eigrp_nexthop_entry_new(void); +extern struct eigrp_prefix_descriptor *eigrp_prefix_descriptor_new(void); +extern struct eigrp_route_descriptor *eigrp_route_descriptor_new(void); extern void eigrp_topology_free(struct eigrp *eigrp, struct route_table *table); -extern void eigrp_prefix_entry_add(struct route_table *table, - struct eigrp_prefix_entry *pe); -extern void eigrp_nexthop_entry_add(struct eigrp *eigrp, - struct eigrp_prefix_entry *pe, - struct eigrp_nexthop_entry *ne); -extern void eigrp_prefix_entry_delete(struct eigrp *eigrp, - struct route_table *table, - struct eigrp_prefix_entry *pe); -extern void eigrp_nexthop_entry_delete(struct eigrp *eigrp, - struct eigrp_prefix_entry *pe, - struct eigrp_nexthop_entry *ne); +extern void eigrp_prefix_descriptor_add(struct route_table *table, + struct eigrp_prefix_descriptor *pe); +extern void eigrp_route_descriptor_add(struct eigrp *eigrp, + struct eigrp_prefix_descriptor *pe, + struct eigrp_route_descriptor *ne); +extern void eigrp_prefix_descriptor_delete(struct eigrp *eigrp, + struct route_table *table, + struct eigrp_prefix_descriptor *pe); +extern void eigrp_route_descriptor_delete(struct eigrp *eigrp, + struct eigrp_prefix_descriptor *pe, + struct eigrp_route_descriptor *ne); extern void eigrp_topology_delete_all(struct eigrp *eigrp, struct route_table *table); -extern struct eigrp_prefix_entry * +extern struct eigrp_prefix_descriptor * eigrp_topology_table_lookup_ipv4(struct route_table *table, struct prefix *p); -extern struct list *eigrp_topology_get_successor(struct eigrp_prefix_entry *pe); extern struct list * -eigrp_topology_get_successor_max(struct eigrp_prefix_entry *pe, +eigrp_topology_get_successor(struct eigrp_prefix_descriptor *pe); +extern struct list * +eigrp_topology_get_successor_max(struct eigrp_prefix_descriptor *pe, unsigned int maxpaths); -extern struct eigrp_nexthop_entry * -eigrp_prefix_entry_lookup(struct list *entries, struct eigrp_neighbor *neigh); +extern struct eigrp_route_descriptor * +eigrp_route_descriptor_lookup(struct list *entries, + struct eigrp_neighbor *neigh); extern struct list *eigrp_neighbor_prefixes_lookup(struct eigrp *eigrp, struct eigrp_neighbor *n); extern void eigrp_topology_update_all_node_flags(struct eigrp *eigrp); -extern void eigrp_topology_update_node_flags(struct eigrp *eigrp, - struct eigrp_prefix_entry *pe); +extern void +eigrp_topology_update_node_flags(struct eigrp *eigrp, + struct eigrp_prefix_descriptor *pe); extern enum metric_change eigrp_topology_update_distance(struct eigrp_fsm_action_message *msg); extern void eigrp_update_routing_table(struct eigrp *eigrp, - struct eigrp_prefix_entry *pe); + struct eigrp_prefix_descriptor *pe); extern void eigrp_topology_neighbor_down(struct eigrp *eigrp, struct eigrp_neighbor *neigh); -extern void eigrp_update_topology_table_prefix(struct eigrp *eigrp, - struct route_table *table, - struct eigrp_prefix_entry *pe); +extern void +eigrp_update_topology_table_prefix(struct eigrp *eigrp, + struct route_table *table, + struct eigrp_prefix_descriptor *pe); #endif diff --git a/eigrpd/eigrp_types.h b/eigrpd/eigrp_types.h new file mode 100644 index 0000000000..bd6510733b --- /dev/null +++ b/eigrpd/eigrp_types.h @@ -0,0 +1,36 @@ +/* + * EIGRP Definition of Data Types + * Copyright (C) 2018 + * Authors: + * Donnie Savage + * + * This file is part of FRR. + * + * FRR is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2, or (at your option) any + * later version. + * + * FRR is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; see the file COPYING; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef _ZEBRA_EIGRP_TYPES_H_ +#define _ZEBRA_EIGRP_TYPES_H_ + +typedef uint64_t eigrp_bandwidth_t; +typedef uint64_t eigrp_delay_t; +typedef uint64_t eigrp_metric_t; +typedef uint32_t eigrp_scaled_t; + +typedef uint32_t eigrp_system_metric_t; +typedef uint32_t eigrp_system_delay_t; +typedef uint32_t eigrp_system_bandwidth_t; + +#endif /* _ZEBRA_EIGRP_TYPES_H_ */ diff --git a/eigrpd/eigrp_update.c b/eigrpd/eigrp_update.c index cd30eb5ab5..91f3b3218b 100644 --- a/eigrpd/eigrp_update.c +++ b/eigrpd/eigrp_update.c @@ -49,6 +49,7 @@ #include "routemap.h" #include "vty.h" +#include "eigrpd/eigrp_types.h" #include "eigrpd/eigrp_structs.h" #include "eigrpd/eigrpd.h" #include "eigrpd/eigrp_interface.h" @@ -62,6 +63,7 @@ #include "eigrpd/eigrp_fsm.h" #include "eigrpd/eigrp_network.h" #include "eigrpd/eigrp_memory.h" +#include "eigrpd/eigrp_metric.h" bool eigrp_update_prefix_apply(struct eigrp *eigrp, struct eigrp_interface *ei, int in, struct prefix *prefix) @@ -101,11 +103,12 @@ bool eigrp_update_prefix_apply(struct eigrp *eigrp, struct eigrp_interface *ei, * Function is used for removing received prefix * from list of neighbor prefixes */ -static void remove_received_prefix_gr(struct list *nbr_prefixes, - struct eigrp_prefix_entry *recv_prefix) +static void +remove_received_prefix_gr(struct list *nbr_prefixes, + struct eigrp_prefix_descriptor *recv_prefix) { struct listnode *node1, *node11; - struct eigrp_prefix_entry *prefix = NULL; + struct eigrp_prefix_descriptor *prefix = NULL; /* iterate over all prefixes in list */ for (ALL_LIST_ELEMENTS(nbr_prefixes, node1, node11, prefix)) { @@ -136,7 +139,7 @@ static void eigrp_update_receive_GR_ask(struct eigrp *eigrp, struct list *nbr_prefixes) { struct listnode *node1; - struct eigrp_prefix_entry *prefix; + struct eigrp_prefix_descriptor *prefix; struct eigrp_fsm_action_message fsm_msg; /* iterate over all prefixes which weren't advertised by neighbor */ @@ -148,8 +151,8 @@ static void eigrp_update_receive_GR_ask(struct eigrp *eigrp, /* set delay to MAX */ fsm_msg.metrics.delay = EIGRP_MAX_METRIC; - struct eigrp_nexthop_entry *entry = - eigrp_prefix_entry_lookup(prefix->entries, nbr); + struct eigrp_route_descriptor *entry = + eigrp_route_descriptor_lookup(prefix->entries, nbr); fsm_msg.packet_type = EIGRP_OPC_UPDATE; fsm_msg.eigrp = eigrp; @@ -172,8 +175,8 @@ void eigrp_update_receive(struct eigrp *eigrp, struct ip *iph, { struct eigrp_neighbor *nbr; struct TLV_IPv4_Internal_type *tlv; - struct eigrp_prefix_entry *pe; - struct eigrp_nexthop_entry *ne; + struct eigrp_prefix_descriptor *pe; + struct eigrp_route_descriptor *ne; uint32_t flags; uint16_t type; uint16_t length; @@ -304,7 +307,7 @@ void eigrp_update_receive(struct eigrp *eigrp, struct ip *iph, dest_addr.family = AF_INET; dest_addr.u.prefix4 = tlv->destination; dest_addr.prefixlen = tlv->prefix_length; - struct eigrp_prefix_entry *dest = + struct eigrp_prefix_descriptor *dest = eigrp_topology_table_lookup_ipv4( eigrp->topology_table, &dest_addr); @@ -317,9 +320,9 @@ void eigrp_update_receive(struct eigrp *eigrp, struct ip *iph, dest); struct eigrp_fsm_action_message msg; - struct eigrp_nexthop_entry *entry = - eigrp_prefix_entry_lookup(dest->entries, - nbr); + struct eigrp_route_descriptor *entry = + eigrp_route_descriptor_lookup( + dest->entries, nbr); msg.packet_type = EIGRP_OPC_UPDATE; msg.eigrp = eigrp; @@ -331,7 +334,7 @@ void eigrp_update_receive(struct eigrp *eigrp, struct ip *iph, eigrp_fsm_event(&msg); } else { /*Here comes topology information save*/ - pe = eigrp_prefix_entry_new(); + pe = eigrp_prefix_descriptor_new(); pe->serno = eigrp->serno; pe->destination = (struct prefix *)prefix_ipv4_new(); @@ -340,7 +343,7 @@ void eigrp_update_receive(struct eigrp *eigrp, struct ip *iph, pe->state = EIGRP_FSM_STATE_PASSIVE; pe->nt = EIGRP_TOPOLOGY_TYPE_REMOTE; - ne = eigrp_nexthop_entry_new(); + ne = eigrp_route_descriptor_new(); ne->ei = ei; ne->adv_router = nbr; ne->reported_metric = tlv->metric; @@ -361,11 +364,12 @@ void eigrp_update_receive(struct eigrp *eigrp, struct ip *iph, pe->fdistance = pe->distance = pe->rdistance = ne->distance; ne->prefix = pe; - ne->flags = EIGRP_NEXTHOP_ENTRY_SUCCESSOR_FLAG; + ne->flags = + EIGRP_ROUTE_DESCRIPTOR_SUCCESSOR_FLAG; - eigrp_prefix_entry_add(eigrp->topology_table, - pe); - eigrp_nexthop_entry_add(eigrp, pe, ne); + eigrp_prefix_descriptor_add( + eigrp->topology_table, pe); + eigrp_route_descriptor_add(eigrp, pe, ne); pe->distance = pe->fdistance = pe->rdistance = ne->distance; pe->reported_metric = ne->total_metric; @@ -527,8 +531,8 @@ void eigrp_update_send_EOT(struct eigrp_neighbor *nbr) { struct eigrp_packet *ep; uint16_t length = EIGRP_HEADER_LEN; - struct eigrp_nexthop_entry *te; - struct eigrp_prefix_entry *pe; + struct eigrp_route_descriptor *te; + struct eigrp_prefix_descriptor *pe; struct listnode *node2, *nnode2; struct eigrp_interface *ei = nbr->ei; struct eigrp *eigrp = ei->eigrp; @@ -600,7 +604,7 @@ void eigrp_update_send(struct eigrp_interface *ei) { struct eigrp_packet *ep; struct listnode *node, *nnode; - struct eigrp_prefix_entry *pe; + struct eigrp_prefix_descriptor *pe; uint8_t has_tlv; struct eigrp *eigrp = ei->eigrp; struct prefix *dest_addr; @@ -626,7 +630,7 @@ void eigrp_update_send(struct eigrp_interface *ei) has_tlv = 0; for (ALL_LIST_ELEMENTS(ei->eigrp->topology_changes_internalIPV4, node, nnode, pe)) { - struct eigrp_nexthop_entry *ne; + struct eigrp_route_descriptor *ne; if (!(pe->req_action & EIGRP_FSM_NEED_UPDATE)) continue; @@ -707,7 +711,7 @@ void eigrp_update_send_all(struct eigrp *eigrp, { struct eigrp_interface *iface; struct listnode *node, *node2, *nnode2; - struct eigrp_prefix_entry *pe; + struct eigrp_prefix_descriptor *pe; for (ALL_LIST_ELEMENTS_RO(eigrp->eiflist, node, iface)) { if (iface != exception) { @@ -745,7 +749,7 @@ static void eigrp_update_send_GR_part(struct eigrp_neighbor *nbr) { struct eigrp_packet *ep; uint16_t length = EIGRP_HEADER_LEN; - struct eigrp_prefix_entry *pe; + struct eigrp_prefix_descriptor *pe; struct prefix *dest_addr; struct eigrp_interface *ei = nbr->ei; struct eigrp *eigrp = ei->eigrp; @@ -837,8 +841,8 @@ static void eigrp_update_send_GR_part(struct eigrp_neighbor *nbr) /* prepare message for FSM */ struct eigrp_fsm_action_message fsm_msg; - struct eigrp_nexthop_entry *entry = - eigrp_prefix_entry_lookup(pe->entries, nbr); + struct eigrp_route_descriptor *entry = + eigrp_route_descriptor_lookup(pe->entries, nbr); fsm_msg.packet_type = EIGRP_OPC_UPDATE; fsm_msg.eigrp = eigrp; @@ -956,7 +960,7 @@ int eigrp_update_send_GR_thread(struct thread *thread) void eigrp_update_send_GR(struct eigrp_neighbor *nbr, enum GR_type gr_type, struct vty *vty) { - struct eigrp_prefix_entry *pe2; + struct eigrp_prefix_descriptor *pe2; struct list *prefixes; struct route_node *rn; struct eigrp_interface *ei = nbr->ei; diff --git a/eigrpd/eigrp_vty.c b/eigrpd/eigrp_vty.c index 66dfbaa538..0809ac2cf0 100644 --- a/eigrpd/eigrp_vty.c +++ b/eigrpd/eigrp_vty.c @@ -59,25 +59,21 @@ #include "eigrpd/eigrp_vty_clippy.c" #endif -static void eigrp_vty_display_prefix_entry(struct vty *vty, - struct eigrp *eigrp, - struct eigrp_prefix_entry *pe, +static void eigrp_vty_display_prefix_entry(struct vty *vty, struct eigrp *eigrp, + struct eigrp_prefix_descriptor *pe, bool all) { bool first = true; - struct eigrp_nexthop_entry *te; + struct eigrp_route_descriptor *te; struct listnode *node; for (ALL_LIST_ELEMENTS_RO(pe->entries, node, te)) { if (all - || (((te->flags - & EIGRP_NEXTHOP_ENTRY_SUCCESSOR_FLAG) - == EIGRP_NEXTHOP_ENTRY_SUCCESSOR_FLAG) - || ((te->flags - & EIGRP_NEXTHOP_ENTRY_FSUCCESSOR_FLAG) - == EIGRP_NEXTHOP_ENTRY_FSUCCESSOR_FLAG))) { - show_ip_eigrp_nexthop_entry(vty, eigrp, te, - &first); + || (((te->flags & EIGRP_ROUTE_DESCRIPTOR_SUCCESSOR_FLAG) + == EIGRP_ROUTE_DESCRIPTOR_SUCCESSOR_FLAG) + || ((te->flags & EIGRP_ROUTE_DESCRIPTOR_FSUCCESSOR_FLAG) + == EIGRP_ROUTE_DESCRIPTOR_FSUCCESSOR_FLAG))) { + show_ip_eigrp_route_descriptor(vty, eigrp, te, &first); first = false; } } @@ -104,7 +100,7 @@ static struct eigrp *eigrp_vty_get_eigrp(struct vty *vty, const char *vrf_name) static void eigrp_topology_helper(struct vty *vty, struct eigrp *eigrp, const char *all) { - struct eigrp_prefix_entry *tn; + struct eigrp_prefix_descriptor *tn; struct route_node *rn; show_ip_eigrp_topology_header(vty, eigrp); @@ -168,7 +164,7 @@ DEFPY (show_ip_eigrp_topology, "For a specific prefix\n") { struct eigrp *eigrp; - struct eigrp_prefix_entry *tn; + struct eigrp_prefix_descriptor *tn; struct route_node *rn; struct prefix cmp; diff --git a/eigrpd/eigrp_yang.h b/eigrpd/eigrp_yang.h new file mode 100644 index 0000000000..a95e5310fb --- /dev/null +++ b/eigrpd/eigrp_yang.h @@ -0,0 +1,32 @@ +/* + * EIGRP YANG Functions. + * Copyright (C) 2019 + * Authors: + * Donnie Savage + * + * This file is part of FRR. + * + * FRR is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2, or (at your option) any + * later version. + * + * FRR is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; see the file COPYING; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef _EIGRP_YANG_H_ +#define _EIGRP_YANG_H_ + +/*Prototypes*/ + +/* eigrp_northbound.c */ +extern const struct frr_yang_module_info frr_eigrpd_info; + +#endif /*EIGRP_YANG_H_ */ diff --git a/eigrpd/eigrp_zebra.c b/eigrpd/eigrp_zebra.c index 0795fbd6df..e79123e6b4 100644 --- a/eigrpd/eigrp_zebra.c +++ b/eigrpd/eigrp_zebra.c @@ -41,6 +41,7 @@ #include "log.h" #include "nexthop.h" +#include "eigrpd/eigrp_types.h" #include "eigrpd/eigrp_structs.h" #include "eigrpd/eigrpd.h" #include "eigrpd/eigrp_interface.h" @@ -52,6 +53,7 @@ #include "eigrpd/eigrp_network.h" #include "eigrpd/eigrp_topology.h" #include "eigrpd/eigrp_fsm.h" +#include "eigrpd/eigrp_metric.h" static int eigrp_interface_address_add(ZAPI_CALLBACK_ARGS); static int eigrp_interface_address_delete(ZAPI_CALLBACK_ARGS); @@ -194,7 +196,7 @@ void eigrp_zebra_route_add(struct eigrp *eigrp, struct prefix *p, { struct zapi_route api; struct zapi_nexthop *api_nh; - struct eigrp_nexthop_entry *te; + struct eigrp_route_descriptor *te; struct listnode *node; int count = 0; diff --git a/eigrpd/eigrpd.h b/eigrpd/eigrpd.h index 6b4d45d1fc..01173768ba 100644 --- a/eigrpd/eigrpd.h +++ b/eigrpd/eigrpd.h @@ -37,6 +37,30 @@ #define EIGRP_MAJOR_VERSION 1 #define EIGRP_MINOR_VERSION 2 +#define EIGRP_TLV_32B_VERSION 1 /* Original 32bit scaled metrics */ +#define EIGRP_TLV_64B_VERSION 2 /* Current 64bit 'wide' metrics */ +#define EIGRP_TLV_MTR_VERSION 3 /* MTR TLVs with 32bit metric *Not Supported */ +#define EIGRP_TLV_SAF_VERSION 4 /* SAF TLVs with 64bit metric *Not Supported */ + +struct eigrp_master { + /* EIGRP instance. */ + struct list *eigrp; + + /* EIGRP thread master. */ + struct thread_master *master; + + /* Zebra interface list. */ + struct list *iflist; + + /* EIGRP start time. */ + time_t start_time; + + /* Various EIGRP global configuration. */ + uint8_t options; + +#define EIGRP_MASTER_SHUTDOWN (1 << 0) /* deferred-shutdown */ +}; + /* Extern variables. */ extern struct zclient *zclient; extern struct thread_master *master; @@ -46,57 +70,10 @@ extern struct zebra_privs_t eigrpd_privs; /* Prototypes */ extern void eigrp_master_init(void); extern void eigrp_terminate(void); -extern void eigrp_finish_final(struct eigrp *); -extern void eigrp_finish(struct eigrp *); +extern void eigrp_finish_final(struct eigrp *eigrp); +extern void eigrp_finish(struct eigrp *eigrp); extern struct eigrp *eigrp_get(uint16_t as, vrf_id_t vrf_id); extern struct eigrp *eigrp_lookup(vrf_id_t vrf_id); -extern void eigrp_router_id_update(struct eigrp *); - -/* eigrp_cli.c */ -extern void eigrp_cli_show_header(struct vty *vty, struct lyd_node *dnode, - bool show_defaults); -extern void eigrp_cli_show_end_header(struct vty *vty, struct lyd_node *dnode); -extern void eigrp_cli_show_router_id(struct vty *vty, struct lyd_node *dnode, - bool show_defaults); -extern void eigrp_cli_show_passive_interface(struct vty *vty, - struct lyd_node *dnode, - bool show_defaults); -extern void eigrp_cli_show_active_time(struct vty *vty, struct lyd_node *dnode, - bool show_defaults); -extern void eigrp_cli_show_variance(struct vty *vty, struct lyd_node *dnode, - bool show_defaults); -extern void eigrp_cli_show_maximum_paths(struct vty *vty, - struct lyd_node *dnode, - bool show_defaults); -extern void eigrp_cli_show_metrics(struct vty *vty, struct lyd_node *dnode, - bool show_defaults); -extern void eigrp_cli_show_network(struct vty *vty, struct lyd_node *dnode, - bool show_defaults); -extern void eigrp_cli_show_neighbor(struct vty *vty, struct lyd_node *dnode, - bool show_defaults); -extern void eigrp_cli_show_redistribute(struct vty *vty, - struct lyd_node *dnode, - bool show_defaults); -extern void eigrp_cli_show_delay(struct vty *vty, struct lyd_node *dnode, - bool show_defaults); -extern void eigrp_cli_show_bandwidth(struct vty *vty, struct lyd_node *dnode, - bool show_defaults); -extern void eigrp_cli_show_hello_interval(struct vty *vty, - struct lyd_node *dnode, - bool show_defaults); -extern void eigrp_cli_show_hold_time(struct vty *vty, struct lyd_node *dnode, - bool show_defaults); -extern void eigrp_cli_show_summarize_address(struct vty *vty, - struct lyd_node *dnode, - bool show_defaults); -extern void eigrp_cli_show_authentication(struct vty *vty, - struct lyd_node *dnode, - bool show_defaults); -extern void eigrp_cli_show_keychain(struct vty *vty, struct lyd_node *dnode, - bool show_defaults); -extern void eigrp_cli_init(void); - -/* eigrp_northbound.c */ -extern const struct frr_yang_module_info frr_eigrpd_info; +extern void eigrp_router_id_update(struct eigrp *eigrp); #endif /* _ZEBRA_EIGRPD_H */ diff --git a/eigrpd/subdir.am b/eigrpd/subdir.am index 98f39440c3..13c9f7f8ae 100644 --- a/eigrpd/subdir.am +++ b/eigrpd/subdir.am @@ -25,6 +25,7 @@ eigrpd_libeigrp_a_SOURCES = \ eigrpd/eigrp_hello.c \ eigrpd/eigrp_interface.c \ eigrpd/eigrp_memory.c \ + eigrpd/eigrp_metric.c \ eigrpd/eigrp_neighbor.c \ eigrpd/eigrp_network.c \ eigrpd/eigrp_northbound.c \ @@ -55,6 +56,7 @@ clippy_scan += \ # end noinst_HEADERS += \ + eigrpd/eigrp_cli.h \ eigrpd/eigrp_const.h \ eigrpd/eigrp_errors.h \ eigrpd/eigrp_filter.h \ @@ -62,13 +64,16 @@ noinst_HEADERS += \ eigrpd/eigrp_interface.h \ eigrpd/eigrp_macros.h \ eigrpd/eigrp_memory.h \ + eigrpd/eigrp_metric.h \ eigrpd/eigrp_neighbor.h \ eigrpd/eigrp_network.h \ eigrpd/eigrp_packet.h \ eigrpd/eigrp_snmp.h \ eigrpd/eigrp_structs.h \ + eigrpd/eigrp_types.h \ eigrpd/eigrp_vrf.h \ eigrpd/eigrp_vty.h \ + eigrpd/eigrp_yang.h \ eigrpd/eigrp_zebra.h \ # end diff --git a/include/linux/if_bridge.h b/include/linux/if_bridge.h index 50011d55ec..fb79481cb2 100644 --- a/include/linux/if_bridge.h +++ b/include/linux/if_bridge.h @@ -293,15 +293,4 @@ struct br_mcast_stats { __u64 mcast_bytes[BR_MCAST_DIR_SIZE]; __u64 mcast_packets[BR_MCAST_DIR_SIZE]; }; - -/* FDB notification bits for NDA_NOTIFY: - * - BR_FDB_NFY_STATIC - notify on activity/expire even for a static entry - * - BR_FDB_NFY_INACTIVE - mark as inactive to avoid double notification, - * used with BR_FDB_NFY_STATIC (kernel controlled) - */ -enum { - BR_FDB_NFY_STATIC, - BR_FDB_NFY_INACTIVE, - BR_FDB_NFY_MAX -}; #endif /* _UAPI_LINUX_IF_BRIDGE_H */ diff --git a/include/linux/neighbour.h b/include/linux/neighbour.h index c06fe708f7..581af73c7f 100644 --- a/include/linux/neighbour.h +++ b/include/linux/neighbour.h @@ -30,7 +30,7 @@ enum { NDA_SRC_VNI, NDA_PROTOCOL, /* Originator of entry */ NDA_NH_ID, - NDA_NOTIFY, + NDA_FDB_EXT_ATTRS, NDA_EXT_FLAGS, __NDA_MAX }; @@ -178,4 +178,27 @@ enum { }; #define NDTA_MAX (__NDTA_MAX - 1) +/* FDB activity notification bits used in NFEA_ACTIVITY_NOTIFY: + * - FDB_NOTIFY_BIT - notify on activity/expire for any entry + * - FDB_NOTIFY_INACTIVE_BIT - mark as inactive to avoid multiple notifications + */ +enum { + FDB_NOTIFY_BIT = (1 << 0), + FDB_NOTIFY_INACTIVE_BIT = (1 << 1) +}; + +/* embedded into NDA_FDB_EXT_ATTRS: + * [NDA_FDB_EXT_ATTRS] = { + * [NFEA_ACTIVITY_NOTIFY] + * ... + * } + */ +enum { + NFEA_UNSPEC, + NFEA_ACTIVITY_NOTIFY, + NFEA_DONT_REFRESH, + __NFEA_MAX +}; +#define NFEA_MAX (__NFEA_MAX - 1) + #endif diff --git a/isisd/isis_adjacency.h b/isisd/isis_adjacency.h index 3c3a211a52..2780d826f5 100644 --- a/isisd/isis_adjacency.h +++ b/isisd/isis_adjacency.h @@ -139,5 +139,6 @@ void isis_adj_print_vty(struct isis_adjacency *adj, struct vty *vty, void isis_adj_build_neigh_list(struct list *adjdb, struct list *list); void isis_adj_build_up_list(struct list *adjdb, struct list *list); int isis_adj_usage2levels(enum isis_adj_usage usage); +int isis_bfd_startup_timer(struct thread *thread); #endif /* ISIS_ADJACENCY_H */ diff --git a/isisd/isis_bfd.c b/isisd/isis_bfd.c index f81dd6cf51..4fac73511b 100644 --- a/isisd/isis_bfd.c +++ b/isisd/isis_bfd.c @@ -146,11 +146,11 @@ static void bfd_adj_event(struct isis_adjacency *adj, struct prefix *dst, static int isis_bfd_interface_dest_update(ZAPI_CALLBACK_ARGS) { struct interface *ifp; - struct prefix dst_ip; + struct prefix dst_ip, src_ip; int status; - ifp = bfd_get_peer_info(zclient->ibuf, &dst_ip, NULL, &status, - NULL, vrf_id); + ifp = bfd_get_peer_info(zclient->ibuf, &dst_ip, &src_ip, &status, NULL, + vrf_id); if (!ifp || (dst_ip.family != AF_INET && dst_ip.family != AF_INET6)) return 0; @@ -329,6 +329,13 @@ static void bfd_handle_adj_up(struct isis_adjacency *adj, int command) if (!circuit->bfd_info) goto out; + /* If IS-IS IPv6 is configured wait for IPv6 address to be programmed + * before starting up BFD + */ + if ((circuit->ipv6_router && listcount(circuit->ipv6_link) == 0) + || adj->ipv6_address_count == 0) + return; + /* * If IS-IS is enabled for both IPv4 and IPv6 on the circuit, prefer * creating a BFD session over IPv6. @@ -443,6 +450,44 @@ static int bfd_circuit_write_settings(struct isis_circuit *circuit, } #endif +static int bfd_handle_adj_ip_enabled(struct isis_adjacency *adj, int family) +{ + + if (family != AF_INET6) + return 0; + + if (adj->bfd_session) + return 0; + + if (adj->adj_state != ISIS_ADJ_UP) + return 0; + + bfd_handle_adj_up(adj, ZEBRA_BFD_DEST_REGISTER); + + return 0; +} + +static int bfd_handle_circuit_add_addr(struct isis_circuit *circuit) +{ + struct isis_adjacency *adj; + struct listnode *node; + + if (circuit->area == 0) + return 0; + + for (ALL_LIST_ELEMENTS_RO(circuit->area->adjacency_list, node, adj)) { + if (adj->bfd_session) + continue; + + if (adj->adj_state != ISIS_ADJ_UP) + continue; + + bfd_handle_adj_up(adj, ZEBRA_BFD_DEST_REGISTER); + } + + return 0; +} + void isis_bfd_init(void) { bfd_gbl_init(); @@ -457,4 +502,6 @@ void isis_bfd_init(void) hook_register(isis_circuit_config_write, bfd_circuit_write_settings); #endif + hook_register(isis_adj_ip_enabled_hook, bfd_handle_adj_ip_enabled); + hook_register(isis_circuit_add_addr_hook, bfd_handle_circuit_add_addr); } diff --git a/isisd/isis_bpf.c b/isisd/isis_bpf.c index 454da99e09..88c3bfa63c 100644 --- a/isisd/isis_bpf.c +++ b/isisd/isis_bpf.c @@ -67,12 +67,6 @@ uint8_t *readbuff = NULL; static const uint8_t ALL_L1_ISS[6] = {0x01, 0x80, 0xC2, 0x00, 0x00, 0x14}; static const uint8_t ALL_L2_ISS[6] = {0x01, 0x80, 0xC2, 0x00, 0x00, 0x15}; -#if 0 -/* missing support for P2P-over-LAN / ES-IS on BSD */ -static const uint8_t ALL_ISS[6] = {0x09, 0x00, 0x2B, 0x00, 0x00, 0x05}; -static const uint8_t ALL_ESS[6] = {0x09, 0x00, 0x2B, 0x00, 0x00, 0x04}; -#endif - static char sock_buff[16384]; static int open_bpf_dev(struct isis_circuit *circuit) diff --git a/isisd/isis_circuit.c b/isisd/isis_circuit.c index 2580a7c43a..4aac3f8880 100644 --- a/isisd/isis_circuit.c +++ b/isisd/isis_circuit.c @@ -252,6 +252,9 @@ struct isis_circuit *circuit_scan_by_ifp(struct interface *ifp) return circuit_lookup_by_ifp(ifp, isis->init_circ_list); } +DEFINE_HOOK(isis_circuit_add_addr_hook, (struct isis_circuit *circuit), + (circuit)) + void isis_circuit_add_addr(struct isis_circuit *circuit, struct connected *connected) { @@ -322,6 +325,9 @@ void isis_circuit_add_addr(struct isis_circuit *circuit, connected->address, circuit->interface->name); #endif /* EXTREME_DEBUG */ } + + hook_call(isis_circuit_add_addr_hook, circuit); + return; } diff --git a/isisd/isis_circuit.h b/isisd/isis_circuit.h index e736d8fb1f..3387232da2 100644 --- a/isisd/isis_circuit.h +++ b/isisd/isis_circuit.h @@ -142,6 +142,8 @@ struct isis_circuit { struct bfd_info *bfd_info; struct ldp_sync_info *ldp_sync_info; bool lfa_protection[ISIS_LEVELS]; + bool rlfa_protection[ISIS_LEVELS]; + uint32_t rlfa_max_metric[ISIS_LEVELS]; struct hash *lfa_excluded_ifaces[ISIS_LEVELS]; bool tilfa_protection[ISIS_LEVELS]; bool tilfa_node_protection[ISIS_LEVELS]; @@ -223,4 +225,7 @@ DECLARE_HOOK(isis_circuit_config_write, (circuit, vty)) #endif +DECLARE_HOOK(isis_circuit_add_addr_hook, (struct isis_circuit *circuit), + (circuit)) + #endif /* _ZEBRA_ISIS_CIRCUIT_H */ diff --git a/isisd/isis_cli.c b/isisd/isis_cli.c index 1f0bebaf45..b48da9312f 100644 --- a/isisd/isis_cli.c +++ b/isisd/isis_cli.c @@ -593,6 +593,10 @@ void cli_show_isis_overload(struct vty *vty, struct lyd_node *dnode, vty_out(vty, " set-overload-bit\n"); } +#if CONFDATE > 20220119 +CPP_NOTICE( + "Use of `set-attached-bit` is deprecated please use attached-bit [send | receive]") +#endif /* * XPath: /frr-isisd:isis/instance/attached */ @@ -600,18 +604,57 @@ DEFPY_YANG(set_attached_bit, set_attached_bit_cmd, "[no] set-attached-bit", "Reset attached bit\n" "Set attached bit to identify as L1/L2 router for inter-area traffic\n") { - nb_cli_enqueue_change(vty, "./attached", NB_OP_MODIFY, + vty_out(vty, + "set-attached-bit deprecated please use attached-bit [send | receive]\n"); + + return CMD_SUCCESS; +} + +/* + * XPath: /frr-isisd:isis/instance/attach-send + */ +DEFPY_YANG(attached_bit_send, attached_bit_send_cmd, "[no] attached-bit send", + "Reset attached bit\n" + "Set attached bit for inter-area traffic\n" + "Set attached bit in LSP sent to L1 router\n") +{ + nb_cli_enqueue_change(vty, "./attach-send", NB_OP_MODIFY, no ? "false" : "true"); return nb_cli_apply_changes(vty, NULL); } -void cli_show_isis_attached(struct vty *vty, struct lyd_node *dnode, - bool show_defaults) +void cli_show_isis_attached_send(struct vty *vty, struct lyd_node *dnode, + bool show_defaults) +{ + if (!yang_dnode_get_bool(dnode, NULL)) + vty_out(vty, " no"); + vty_out(vty, " attached-bit send\n"); +} + +/* + * XPath: /frr-isisd:isis/instance/attach-receive-ignore + */ +DEFPY_YANG( + attached_bit_receive_ignore, attached_bit_receive_ignore_cmd, + "[no] attached-bit receive ignore", + "Reset attached bit\n" + "Set attach bit for inter-area traffic\n" + "If LSP received with attached bit set, create default route to neighbor\n" + "Do not process attached bit\n") +{ + nb_cli_enqueue_change(vty, "./attach-receive-ignore", NB_OP_MODIFY, + no ? "false" : "true"); + + return nb_cli_apply_changes(vty, NULL); +} + +void cli_show_isis_attached_receive(struct vty *vty, struct lyd_node *dnode, + bool show_defaults) { if (!yang_dnode_get_bool(dnode, NULL)) vty_out(vty, " no"); - vty_out(vty, " set-attached-bit\n"); + vty_out(vty, " attached-bit receive ignore\n"); } /* @@ -1915,22 +1958,22 @@ DEFPY_YANG (isis_frr_lfa_load_sharing, if (no) { nb_cli_enqueue_change( vty, "./fast-reroute/level-1/lfa/load-sharing", - NB_OP_DESTROY, "true"); + NB_OP_MODIFY, "true"); } else { nb_cli_enqueue_change( vty, "./fast-reroute/level-1/lfa/load-sharing", - NB_OP_CREATE, "false"); + NB_OP_MODIFY, "false"); } } if (!level || strmatch(level, "level-2")) { if (no) { nb_cli_enqueue_change( vty, "./fast-reroute/level-2/lfa/load-sharing", - NB_OP_DESTROY, "true"); + NB_OP_MODIFY, "true"); } else { nb_cli_enqueue_change( vty, "./fast-reroute/level-2/lfa/load-sharing", - NB_OP_CREATE, "false"); + NB_OP_MODIFY, "false"); } } @@ -1948,6 +1991,62 @@ void cli_show_isis_frr_lfa_load_sharing(struct vty *vty, struct lyd_node *dnode, } /* + * XPath: /frr-isisd:isis/instance/fast-reroute/level-{1,2}/remote-lfa/prefix-list + */ +DEFPY_YANG (isis_frr_remote_lfa_plist, + isis_frr_remote_lfa_plist_cmd, + "fast-reroute remote-lfa prefix-list WORD$plist [<level-1|level-2>$level]", + "Configure Fast ReRoute\n" + "Enable remote LFA related configuration\n" + "Filter PQ node router ID based on prefix list\n" + "Prefix-list name\n" + "Enable router ID filtering for level-1 only\n" + "Enable router ID filtering for level-2 only\n") +{ + if (!level || strmatch(level, "level-1")) + nb_cli_enqueue_change( + vty, "./fast-reroute/level-1/remote-lfa/prefix-list", + NB_OP_MODIFY, plist); + if (!level || strmatch(level, "level-2")) + nb_cli_enqueue_change( + vty, "./fast-reroute/level-2/remote-lfa/prefix-list", + NB_OP_MODIFY, plist); + + return nb_cli_apply_changes(vty, NULL); +} + +DEFPY_YANG (no_isis_frr_remote_lfa_plist, + no_isis_frr_remote_lfa_plist_cmd, + "no fast-reroute remote-lfa prefix-list [WORD] [<level-1|level-2>$level]", + NO_STR + "Configure Fast ReRoute\n" + "Enable remote LFA related configuration\n" + "Filter PQ node router ID based on prefix list\n" + "Prefix-list name\n" + "Enable router ID filtering for level-1 only\n" + "Enable router ID filtering for level-2 only\n") +{ + if (!level || strmatch(level, "level-1")) + nb_cli_enqueue_change( + vty, "./fast-reroute/level-1/remote-lfa/prefix-list", + NB_OP_DESTROY, NULL); + if (!level || strmatch(level, "level-2")) + nb_cli_enqueue_change( + vty, "./fast-reroute/level-2/remote-lfa/prefix-list", + NB_OP_DESTROY, NULL); + + return nb_cli_apply_changes(vty, NULL); +} + +void cli_show_isis_frr_remote_lfa_plist(struct vty *vty, struct lyd_node *dnode, + bool show_defaults) +{ + vty_out(vty, " fast-reroute remote-lfa prefix-list %s %s\n", + yang_dnode_get_string(dnode, NULL), + dnode->parent->parent->schema->name); +} + +/* * XPath: /frr-interface:lib/interface/frr-isisd:isis/passive */ DEFPY_YANG(isis_passive, isis_passive_cmd, "[no] isis passive", @@ -2630,6 +2729,25 @@ void cli_show_ip_isis_frr(struct vty *vty, struct lyd_node *dnode, } } + /* Remote LFA */ + l1_enabled = yang_dnode_get_bool(dnode, "./level-1/remote-lfa/enable"); + l2_enabled = yang_dnode_get_bool(dnode, "./level-2/remote-lfa/enable"); + + if (l1_enabled || l2_enabled) { + if (l1_enabled == l2_enabled) { + vty_out(vty, + " isis fast-reroute remote-lfa tunnel mpls-ldp\n"); + vty_out(vty, "\n"); + } else { + if (l1_enabled) + vty_out(vty, + " isis fast-reroute remote-lfa tunnel mpls-ldp level-1\n"); + if (l2_enabled) + vty_out(vty, + " isis fast-reroute remote-lfa tunnel mpls-ldp level-2\n"); + } + } + /* TI-LFA */ l1_enabled = yang_dnode_get_bool(dnode, "./level-1/ti-lfa/enable"); l2_enabled = yang_dnode_get_bool(dnode, "./level-2/ti-lfa/enable"); @@ -2761,6 +2879,104 @@ void cli_show_frr_lfa_exclude_interface(struct vty *vty, struct lyd_node *dnode, } /* + * XPath: + * /frr-interface:lib/interface/frr-isisd:isis/fast-reroute/level-{1,2}/remote-lfa/enable + */ +DEFPY(isis_remote_lfa, isis_remote_lfa_cmd, + "[no] isis fast-reroute remote-lfa tunnel mpls-ldp [level-1|level-2]$level", + NO_STR + "IS-IS routing protocol\n" + "Interface IP Fast-reroute configuration\n" + "Enable remote LFA computation\n" + "Enable remote LFA computation using tunnels\n" + "Use MPLS LDP tunnel to reach the remote LFA node\n" + "Enable LFA computation for Level 1 only\n" + "Enable LFA computation for Level 2 only\n") +{ + if (!level || strmatch(level, "level-1")) { + if (no) { + nb_cli_enqueue_change( + vty, + "./frr-isisd:isis/fast-reroute/level-1/remote-lfa/enable", + NB_OP_MODIFY, "false"); + } else { + nb_cli_enqueue_change( + vty, + "./frr-isisd:isis/fast-reroute/level-1/remote-lfa/enable", + NB_OP_MODIFY, "true"); + } + } + if (!level || strmatch(level, "level-2")) { + if (no) { + nb_cli_enqueue_change( + vty, + "./frr-isisd:isis/fast-reroute/level-2/remote-lfa/enable", + NB_OP_MODIFY, "false"); + } else { + nb_cli_enqueue_change( + vty, + "./frr-isisd:isis/fast-reroute/level-2/remote-lfa/enable", + NB_OP_MODIFY, "true"); + } + } + + return nb_cli_apply_changes(vty, NULL); +} + +/* + * XPath: + * /frr-interface:lib/interface/frr-isisd:isis/fast-reroute/level-{1,2}/remote-lfa/maximum-metric + */ +DEFPY(isis_remote_lfa_max_metric, isis_remote_lfa_max_metric_cmd, + "[no] isis fast-reroute remote-lfa maximum-metric (1-16777215)$metric [level-1|level-2]$level", + NO_STR + "IS-IS routing protocol\n" + "Interface IP Fast-reroute configuration\n" + "Enable remote LFA computation\n" + "Limit remote LFA node selection within the metric\n" + "Value of the metric\n" + "Enable LFA computation for Level 1 only\n" + "Enable LFA computation for Level 2 only\n") +{ + if (!level || strmatch(level, "level-1")) { + if (no) { + nb_cli_enqueue_change( + vty, + "./frr-isisd:isis/fast-reroute/level-1/remote-lfa/maximum-metric", + NB_OP_DESTROY, NULL); + } else { + nb_cli_enqueue_change( + vty, + "./frr-isisd:isis/fast-reroute/level-1/remote-lfa/maximum-metric", + NB_OP_MODIFY, metric_str); + } + } + if (!level || strmatch(level, "level-2")) { + if (no) { + nb_cli_enqueue_change( + vty, + "./frr-isisd:isis/fast-reroute/level-2/remote-lfa/maximum-metric", + NB_OP_DESTROY, NULL); + } else { + nb_cli_enqueue_change( + vty, + "./frr-isisd:isis/fast-reroute/level-2/remote-lfa/maximum-metric", + NB_OP_MODIFY, metric_str); + } + } + + return nb_cli_apply_changes(vty, NULL); +} + +void cli_show_frr_remote_lfa_max_metric(struct vty *vty, struct lyd_node *dnode, + bool show_defaults) +{ + vty_out(vty, " isis fast-reroute remote-lfa maximum-metric %s %s\n", + yang_dnode_get_string(dnode, NULL), + dnode->parent->parent->schema->name); +} + +/* * XPath: /frr-interface:lib/interface/frr-isisd:isis/fast-reroute/level-{1,2}/ti-lfa/enable */ DEFPY(isis_ti_lfa, isis_ti_lfa_cmd, @@ -3033,6 +3249,8 @@ void isis_cli_init(void) install_element(ISIS_NODE, &set_overload_bit_cmd); install_element(ISIS_NODE, &set_attached_bit_cmd); + install_element(ISIS_NODE, &attached_bit_send_cmd); + install_element(ISIS_NODE, &attached_bit_receive_ignore_cmd); install_element(ISIS_NODE, &metric_style_cmd); install_element(ISIS_NODE, &no_metric_style_cmd); @@ -3085,6 +3303,8 @@ void isis_cli_init(void) install_element(ISIS_NODE, &isis_frr_lfa_priority_limit_cmd); install_element(ISIS_NODE, &isis_frr_lfa_tiebreaker_cmd); install_element(ISIS_NODE, &isis_frr_lfa_load_sharing_cmd); + install_element(ISIS_NODE, &isis_frr_remote_lfa_plist_cmd); + install_element(ISIS_NODE, &no_isis_frr_remote_lfa_plist_cmd); install_element(INTERFACE_NODE, &isis_passive_cmd); @@ -3122,6 +3342,8 @@ void isis_cli_init(void) install_element(INTERFACE_NODE, &isis_lfa_cmd); install_element(INTERFACE_NODE, &isis_lfa_exclude_interface_cmd); + install_element(INTERFACE_NODE, &isis_remote_lfa_cmd); + install_element(INTERFACE_NODE, &isis_remote_lfa_max_metric_cmd); install_element(INTERFACE_NODE, &isis_ti_lfa_cmd); install_element(ISIS_NODE, &log_adj_changes_cmd); diff --git a/isisd/isis_constants.h b/isisd/isis_constants.h index 25eae06cb0..3d6a20ee66 100644 --- a/isisd/isis_constants.h +++ b/isisd/isis_constants.h @@ -140,7 +140,7 @@ * LSP bit masks */ #define LSPBIT_P 0x80 -#define LSPBIT_ATT 0x78 +#define LSPBIT_ATT 0x08 /* only use the Default ATT bit */ #define LSPBIT_OL 0x04 #define LSPBIT_IST 0x03 @@ -158,7 +158,6 @@ #define ISIS_MASK_LSP_ATT_ERROR_BIT(x) ((x)&0x40) #define ISIS_MASK_LSP_ATT_EXPENSE_BIT(x) ((x)&0x20) #define ISIS_MASK_LSP_ATT_DELAY_BIT(x) ((x)&0x10) -#define ISIS_MASK_LSP_ATT_DEFAULT_BIT(x) ((x)&0x8) #define LLC_LEN 3 diff --git a/isisd/isis_dlpi.c b/isisd/isis_dlpi.c index 06fb41430c..bb8c542597 100644 --- a/isisd/isis_dlpi.c +++ b/isisd/isis_dlpi.c @@ -57,11 +57,6 @@ static t_uscalar_t dlpi_ctl[1024]; /* DLPI control messages */ static const uint8_t ALL_L1_ISS[6] = {0x01, 0x80, 0xC2, 0x00, 0x00, 0x14}; static const uint8_t ALL_L2_ISS[6] = {0x01, 0x80, 0xC2, 0x00, 0x00, 0x15}; static const uint8_t ALL_ISS[6] = {0x09, 0x00, 0x2B, 0x00, 0x00, 0x05}; -#if 0 -/* missing support for ES-IS on Solaris */ -static const uint8_t ALL_ESS[6] = {0x09, 0x00, 0x2B, 0x00, 0x00, 0x04}; -#endif - static uint8_t sock_buff[16384]; static unsigned short pf_filter[] = { diff --git a/isisd/isis_dr.c b/isisd/isis_dr.c index d03f857a0c..f6175fe9a4 100644 --- a/isisd/isis_dr.c +++ b/isisd/isis_dr.c @@ -74,7 +74,8 @@ int isis_run_dr(struct thread *thread) if (circuit->circ_type != CIRCUIT_T_BROADCAST) { zlog_warn("%s: scheduled for non broadcast circuit from %s:%d", - __func__, thread->schedfrom, thread->schedfrom_line); + __func__, thread->xref->xref.file, + thread->xref->xref.line); return ISIS_WARNING; } diff --git a/isisd/isis_ldp_sync.c b/isisd/isis_ldp_sync.c index 00bef5c782..585f769806 100644 --- a/isisd/isis_ldp_sync.c +++ b/isisd/isis_ldp_sync.c @@ -145,6 +145,7 @@ void isis_ldp_sync_state_req_msg(struct isis_circuit *circuit) ils_debug("ldp_sync: send state request to LDP for %s", ifp->name); + memset(&request, 0, sizeof(request)); strlcpy(request.name, ifp->name, sizeof(ifp->name)); request.proto = LDP_IGP_SYNC_IF_STATE_REQUEST; request.ifindex = ifp->ifindex; diff --git a/isisd/isis_lfa.c b/isisd/isis_lfa.c index fc6b435b62..5b3a3827a2 100644 --- a/isisd/isis_lfa.c +++ b/isisd/isis_lfa.c @@ -25,6 +25,8 @@ #include "vrf.h" #include "table.h" #include "srcdest_table.h" +#include "plist.h" +#include "zclient.h" #include "isis_common.h" #include "isisd.h" @@ -37,11 +39,13 @@ #include "isis_mt.h" #include "isis_tlvs.h" #include "isis_spf_private.h" -#include "isisd/isis_errors.h" +#include "isis_zebra.h" +#include "isis_errors.h" DEFINE_MTYPE_STATIC(ISISD, ISIS_SPF_NODE, "ISIS SPF Node"); DEFINE_MTYPE_STATIC(ISISD, ISIS_LFA_TIEBREAKER, "ISIS LFA Tiebreaker"); DEFINE_MTYPE_STATIC(ISISD, ISIS_LFA_EXCL_IFACE, "ISIS LFA Excluded Interface"); +DEFINE_MTYPE_STATIC(ISISD, ISIS_RLFA, "ISIS Remote LFA"); static inline int isis_spf_node_compare(const struct isis_spf_node *a, const struct isis_spf_node *b) @@ -316,7 +320,7 @@ bool isis_lfa_excise_adj_check(const struct isis_spftree *spftree, { const struct lfa_protected_resource *resource; - if (spftree->type != SPF_TYPE_TI_LFA) + if (spftree->type != SPF_TYPE_RLFA && spftree->type != SPF_TYPE_TI_LFA) return false; /* @@ -832,14 +836,14 @@ spf_vertex_check_is_affected(const struct isis_vertex *vertex, return false; } -/* Check if a given TI-LFA post-convergence SPF vertex needs protection. */ -static bool tilfa_check_needs_protection(const struct isis_spftree *spftree_pc, - const struct isis_vertex *vertex) +/* Check if a given RLFA/TI-LFA post-convergence SPF vertex needs protection. */ +static bool lfa_check_needs_protection(const struct isis_spftree *spftree_pc, + const struct isis_vertex *vertex) { struct isis_vertex *vertex_old; - /* Only local adjacencies need Adj-SID protection. */ - if (VTYPE_IS(vertex->type) + /* Only local adjacencies need TI-LFA Adj-SID protection. */ + if (spftree_pc->type == SPF_TYPE_TI_LFA && VTYPE_IS(vertex->type) && !isis_adj_find(spftree_pc->area, spftree_pc->level, vertex->N.id)) return false; @@ -849,6 +853,10 @@ static bool tilfa_check_needs_protection(const struct isis_spftree *spftree_pc, if (!vertex_old) return false; + /* Skip vertex if it's already protected by local LFA. */ + if (CHECK_FLAG(vertex_old->flags, F_ISIS_VERTEX_LFA_PROTECTED)) + return false; + return spf_vertex_check_is_affected( vertex_old, spftree_pc->sysid, &spftree_pc->lfa.protected_resource); @@ -874,14 +882,12 @@ int isis_tilfa_check(struct isis_spftree *spftree_pc, if (!spftree_pc->area->srdb.enabled) return -1; - if (IS_DEBUG_LFA) - vid2string(vertex, buf, sizeof(buf)); - - if (!tilfa_check_needs_protection(spftree_pc, vertex)) { + if (!lfa_check_needs_protection(spftree_pc, vertex)) { if (IS_DEBUG_LFA) zlog_debug( "ISIS-LFA: %s %s unaffected by %s", - vtype2string(vertex->type), buf, + vtype2string(vertex->type), + vid2string(vertex, buf, sizeof(buf)), lfa_protected_resource2str( &spftree_pc->lfa.protected_resource)); @@ -902,7 +908,8 @@ int isis_tilfa_check(struct isis_spftree *spftree_pc, if (IS_DEBUG_LFA) zlog_debug( "ISIS-LFA: %s %s already covered by node protection", - vtype2string(vertex->type), buf); + vtype2string(vertex->type), + vid2string(vertex, buf, sizeof(buf))); return -1; } @@ -915,7 +922,8 @@ int isis_tilfa_check(struct isis_spftree *spftree_pc, if (IS_DEBUG_LFA) zlog_debug( "ISIS-LFA: %s %s already covered by node protection", - vtype2string(vertex->type), buf); + vtype2string(vertex->type), + vid2string(vertex, buf, sizeof(buf))); return -1; } @@ -924,7 +932,8 @@ int isis_tilfa_check(struct isis_spftree *spftree_pc, if (IS_DEBUG_LFA) zlog_debug( "ISIS-LFA: computing repair path(s) of %s %s w.r.t %s", - vtype2string(vertex->type), buf, + vtype2string(vertex->type), + vid2string(vertex, buf, sizeof(buf)), lfa_protected_resource2str( &spftree_pc->lfa.protected_resource)); @@ -939,7 +948,8 @@ int isis_tilfa_check(struct isis_spftree *spftree_pc, if (ret != 0) zlog_warn( "ISIS-LFA: failed to compute repair path(s) of %s %s w.r.t %s", - vtype2string(vertex->type), buf, + vtype2string(vertex->type), + vid2string(vertex, buf, sizeof(buf)), lfa_protected_resource2str( &spftree_pc->lfa.protected_resource)); @@ -978,13 +988,6 @@ static bool vertex_is_affected(struct isis_spftree *spftree_root, struct isis_vertex *vertex_child; struct isis_vertex_adj *vadj; bool reverse = false; - char buf1[VID2STR_BUFFER]; - char buf2[VID2STR_BUFFER]; - - if (IS_DEBUG_LFA) - zlog_debug("ISIS-LFA: vertex %s parent %s", - vid2string(vertex, buf1, sizeof(buf1)), - vid2string(pvertex, buf2, sizeof(buf2))); if (p_space && resource->type == LFA_NODE_PROTECTION) { if (isis_spf_node_find(&resource->nodes, vertex->N.id)) @@ -1059,10 +1062,6 @@ static void lfa_calc_reach_nodes(struct isis_spftree *spftree, if (isis_spf_node_find(nodes, vertex->N.id)) continue; - if (IS_DEBUG_LFA) - zlog_debug("ISIS-LFA: checking %s", - vid2string(vertex, buf, sizeof(buf))); - if (!vertex_is_affected(spftree_root, adj_nodes, p_space, vertex, resource)) { if (IS_DEBUG_LFA) @@ -1166,7 +1165,7 @@ struct isis_spftree *isis_tilfa_compute(struct isis_area *area, struct isis_spf_node *adj_node; if (IS_DEBUG_LFA) - zlog_debug("ISIS-LFA: computing the P/Q spaces w.r.t. %s", + zlog_debug("ISIS-LFA: computing TI-LFAs for %s", lfa_protected_resource2str(resource)); /* Populate list of nodes affected by link failure. */ @@ -1238,6 +1237,497 @@ int isis_spf_run_neighbors(struct isis_spftree *spftree) return 0; } +/* Find Router ID of PQ node. */ +static struct in_addr *rlfa_pq_node_rtr_id(struct isis_spftree *spftree, + const struct isis_vertex *vertex_pq) +{ + struct isis_lsp *lsp; + + lsp = isis_root_system_lsp(spftree->lspdb, vertex_pq->N.id); + if (!lsp) + return NULL; + + if (lsp->tlvs->router_cap->router_id.s_addr == INADDR_ANY) + return NULL; + + return &lsp->tlvs->router_cap->router_id; +} + +/* Find PQ node by intersecting the P/Q spaces. This is a recursive function. */ +static const struct in_addr * +rlfa_find_pq_node(struct isis_spftree *spftree_pc, + struct isis_vertex *vertex_dest, + const struct isis_vertex *vertex, + const struct isis_vertex *vertex_child) +{ + struct isis_area *area = spftree_pc->area; + int level = spftree_pc->level; + struct isis_vertex *pvertex; + struct listnode *node; + bool is_pnode, is_qnode; + + if (!vertex_child) + goto parents; + if (vertex->type != VTYPE_NONPSEUDO_IS + && vertex->type != VTYPE_NONPSEUDO_TE_IS) + goto parents; + if (!VTYPE_IS(vertex_child->type)) + vertex_child = NULL; + + /* Check if node is part of the extended P-space and/or Q-space. */ + is_pnode = lfa_ext_p_space_check(spftree_pc, vertex_dest, vertex); + is_qnode = lfa_q_space_check(spftree_pc, vertex); + + if (is_pnode && is_qnode) { + const struct in_addr *rtr_id_pq; + uint32_t max_metric; + struct prefix_list *plist = NULL; + + rtr_id_pq = rlfa_pq_node_rtr_id(spftree_pc, vertex); + if (!rtr_id_pq) { + if (IS_DEBUG_LFA) { + char buf[VID2STR_BUFFER]; + + vid2string(vertex, buf, sizeof(buf)); + zlog_debug( + "ISIS-LFA: tentative PQ node (%s %s) doesn't have a router-ID", + vtype2string(vertex->type), buf); + } + goto parents; + } + + max_metric = spftree_pc->lfa.remote.max_metric; + if (max_metric && vertex->d_N > max_metric) { + if (IS_DEBUG_LFA) + zlog_debug( + "ISIS-LFA: skipping PQ node %pI4 (maximum metric)", + rtr_id_pq); + goto parents; + } + + plist = area->rlfa_plist[level - 1]; + if (plist) { + struct prefix p; + + p.family = AF_INET; + p.prefixlen = IPV4_MAX_BITLEN; + p.u.prefix4 = *rtr_id_pq; + if (prefix_list_apply(plist, &p) == PREFIX_DENY) { + if (IS_DEBUG_LFA) + zlog_debug( + "ISIS-LFA: PQ node %pI4 filtered by prefix-list", + rtr_id_pq); + goto parents; + } + } + + if (IS_DEBUG_LFA) + zlog_debug("ISIS-LFA: found PQ node: %pI4", rtr_id_pq); + + return rtr_id_pq; + } + +parents: + for (ALL_LIST_ELEMENTS_RO(vertex->parents, node, pvertex)) { + const struct in_addr *rtr_id_pq; + + rtr_id_pq = rlfa_find_pq_node(spftree_pc, vertex_dest, pvertex, + vertex); + if (rtr_id_pq) + return rtr_id_pq; + } + + return NULL; +} + +int rlfa_cmp(const struct rlfa *a, const struct rlfa *b) +{ + return prefix_cmp(&a->prefix, &b->prefix); +} + +static struct rlfa *rlfa_add(struct isis_spftree *spftree, + struct isis_vertex *vertex, + struct in_addr pq_address) +{ + struct rlfa *rlfa; + + assert(VTYPE_IP(vertex->type)); + rlfa = XCALLOC(MTYPE_ISIS_RLFA, sizeof(*rlfa)); + rlfa->prefix = vertex->N.ip.p.dest; + rlfa->vertex = vertex; + rlfa->pq_address = pq_address; + rlfa_tree_add(&spftree->lfa.remote.rlfas, rlfa); + + return rlfa; +} + +static void rlfa_delete(struct isis_spftree *spftree, struct rlfa *rlfa) +{ + rlfa_tree_del(&spftree->lfa.remote.rlfas, rlfa); + XFREE(MTYPE_ISIS_RLFA, rlfa); +} + +static struct rlfa *rlfa_lookup(struct isis_spftree *spftree, + union prefixconstptr pu) +{ + struct rlfa s = {}; + + s.prefix = *pu.p; + return rlfa_tree_find(&spftree->lfa.remote.rlfas, &s); +} + +static int isis_area_verify_routes_cb(struct thread *thread) +{ + struct isis_area *area = THREAD_ARG(thread); + + if (IS_DEBUG_LFA) + zlog_debug("ISIS-LFA: updating RLFAs in the RIB"); + + isis_area_verify_routes(area); + + return 0; +} + +static mpls_label_t rlfa_nexthop_label(struct isis_spftree *spftree, + struct isis_vertex_adj *vadj, + struct zapi_rlfa_response *response) +{ + struct isis_spf_adj *sadj = vadj->sadj; + struct isis_adjacency *adj = sadj->adj; + + /* + * Special case to make unit tests work (use implicit-null labels + * instead of artifical ones). + */ + if (CHECK_FLAG(spftree->flags, F_SPFTREE_NO_ADJACENCIES)) + return MPLS_LABEL_IMPLICIT_NULL; + + for (unsigned int i = 0; i < response->nexthop_num; i++) { + switch (response->nexthops[i].family) { + case AF_INET: + for (unsigned int j = 0; j < adj->ipv4_address_count; + j++) { + struct in_addr addr = adj->ipv4_addresses[j]; + + if (!IPV4_ADDR_SAME( + &addr, + &response->nexthops[i].gate.ipv4)) + continue; + + return response->nexthops[i].label; + } + break; + case AF_INET6: + for (unsigned int j = 0; j < adj->ipv6_address_count; + j++) { + struct in6_addr addr = adj->ipv6_addresses[j]; + + if (!IPV6_ADDR_SAME( + &addr, + &response->nexthops[i].gate.ipv6)) + continue; + + return response->nexthops[i].label; + } + break; + + default: + break; + } + } + + return MPLS_INVALID_LABEL; +} + +int isis_rlfa_activate(struct isis_spftree *spftree, struct rlfa *rlfa, + struct zapi_rlfa_response *response) +{ + struct isis_area *area = spftree->area; + struct isis_vertex *vertex = rlfa->vertex; + struct isis_vertex_adj *vadj; + struct listnode *node; + + for (ALL_LIST_ELEMENTS_RO(vertex->Adj_N, node, vadj)) { + mpls_label_t ldp_label; + struct mpls_label_stack *label_stack; + size_t num_labels = 0; + size_t i = 0; + + ldp_label = rlfa_nexthop_label(spftree, vadj, response); + if (ldp_label == MPLS_INVALID_LABEL) { + if (IS_DEBUG_LFA) + zlog_debug( + "ISIS-LFA: failed to activate RLFA: missing LDP label to reach PQ node through %s", + sysid_print(vadj->sadj->id)); + return -1; + } + + if (ldp_label != MPLS_LABEL_IMPLICIT_NULL) + num_labels++; + if (response->pq_label != MPLS_LABEL_IMPLICIT_NULL) + num_labels++; + if (vadj->sr.present + && vadj->sr.label != MPLS_LABEL_IMPLICIT_NULL) + num_labels++; + + /* Allocate label stack. */ + label_stack = + XCALLOC(MTYPE_ISIS_NEXTHOP_LABELS, + sizeof(struct mpls_label_stack) + + num_labels * sizeof(mpls_label_t)); + label_stack->num_labels = num_labels; + + /* Push label allocated by the nexthop (outer label). */ + if (ldp_label != MPLS_LABEL_IMPLICIT_NULL) + label_stack->label[i++] = ldp_label; + /* Push label allocated by the PQ node (inner label). */ + if (response->pq_label != MPLS_LABEL_IMPLICIT_NULL) + label_stack->label[i++] = response->pq_label; + /* Preserve the original Prefix-SID label when it's present. */ + if (vadj->sr.present + && vadj->sr.label != MPLS_LABEL_IMPLICIT_NULL) + label_stack->label[i++] = vadj->sr.label; + + vadj->label_stack = label_stack; + } + + isis_route_create(&vertex->N.ip.p.dest, &vertex->N.ip.p.src, + vertex->d_N, vertex->depth, &vertex->N.ip.sr, + vertex->Adj_N, true, area, + spftree->route_table_backup); + spftree->lfa.protection_counters.rlfa[vertex->N.ip.priority] += 1; + + thread_cancel(&area->t_rlfa_rib_update); + thread_add_timer(master, isis_area_verify_routes_cb, area, 2, + &area->t_rlfa_rib_update); + + return 0; +} + +void isis_rlfa_deactivate(struct isis_spftree *spftree, struct rlfa *rlfa) +{ + struct isis_area *area = spftree->area; + struct isis_vertex *vertex = rlfa->vertex; + struct route_node *rn; + + rn = route_node_lookup(spftree->route_table_backup, &rlfa->prefix); + if (!rn) + return; + isis_route_delete(area, rn, spftree->route_table_backup); + spftree->lfa.protection_counters.rlfa[vertex->N.ip.priority] -= 1; + + thread_cancel(&area->t_rlfa_rib_update); + thread_add_timer(master, isis_area_verify_routes_cb, area, 2, + &area->t_rlfa_rib_update); +} + +void isis_rlfa_list_init(struct isis_spftree *spftree) +{ + rlfa_tree_init(&spftree->lfa.remote.rlfas); +} + +void isis_rlfa_list_clear(struct isis_spftree *spftree) +{ + while (rlfa_tree_count(&spftree->lfa.remote.rlfas) > 0) { + struct rlfa *rlfa; + + rlfa = rlfa_tree_first(&spftree->lfa.remote.rlfas); + isis_rlfa_deactivate(spftree, rlfa); + rlfa_delete(spftree, rlfa); + } +} + +void isis_rlfa_process_ldp_response(struct zapi_rlfa_response *response) +{ + struct isis *isis; + struct isis_area *area; + struct isis_spftree *spftree; + struct rlfa *rlfa; + enum spf_tree_id tree_id; + uint32_t spf_run_id; + int level; + + if (response->igp.protocol != ZEBRA_ROUTE_ISIS) + return; + + isis = isis_lookup_by_vrfid(response->igp.vrf_id); + if (!isis) + return; + + area = isis_area_lookup(response->igp.isis.area_tag, + response->igp.vrf_id); + if (!area) + return; + + tree_id = response->igp.isis.spf.tree_id; + if (tree_id != SPFTREE_IPV4 && tree_id != SPFTREE_IPV6) { + zlog_warn("ISIS-LFA: invalid SPF tree ID received from LDP"); + return; + } + + level = response->igp.isis.spf.level; + if (level != ISIS_LEVEL1 && level != ISIS_LEVEL2) { + zlog_warn("ISIS-LFA: invalid IS-IS level received from LDP"); + return; + } + + spf_run_id = response->igp.isis.spf.run_id; + spftree = area->spftree[tree_id][level - 1]; + if (spftree->runcount != spf_run_id) + /* Outdated RLFA, ignore... */ + return; + + rlfa = rlfa_lookup(spftree, &response->destination); + if (!rlfa) { + zlog_warn( + "ISIS-LFA: couldn't find Remote-LFA %pFX received from LDP", + &response->destination); + return; + } + + if (response->pq_label != MPLS_INVALID_LABEL) { + if (IS_DEBUG_LFA) + zlog_debug( + "ISIS-LFA: activating/updating RLFA for %pFX", + &rlfa->prefix); + + if (isis_rlfa_activate(spftree, rlfa, response) != 0) + isis_rlfa_deactivate(spftree, rlfa); + } else { + if (IS_DEBUG_LFA) + zlog_debug("ISIS-LFA: deactivating RLFA for %pFX", + &rlfa->prefix); + + isis_rlfa_deactivate(spftree, rlfa); + } +} + +void isis_ldp_rlfa_handle_client_close(struct zapi_client_close_info *info) +{ + struct isis *isis = isis_lookup_by_vrfid(VRF_DEFAULT); + struct isis_area *area; + struct listnode *node; + + if (!isis) + return; + + /* Check if the LDP main client session closed */ + if (info->proto != ZEBRA_ROUTE_LDP || info->session_id == 0) + return; + + if (IS_DEBUG_LFA) + zlog_debug("ISIS-LFA: LDP is down, deactivating all RLFAs"); + + for (ALL_LIST_ELEMENTS_RO(isis->area_list, node, area)) { + for (int tree = SPFTREE_IPV4; tree < SPFTREE_COUNT; tree++) { + for (int level = ISIS_LEVEL1; level <= ISIS_LEVELS; + level++) { + struct isis_spftree *spftree; + + spftree = area->spftree[tree][level - 1]; + isis_rlfa_list_clear(spftree); + } + } + } +} + +/** + * Check if the given SPF vertex needs protection and, if so, attempt to + * compute a Remote LFA for it. + * + * @param spftree_pc The post-convergence SPF tree + * @param vertex IS-IS SPF vertex to check + */ +void isis_rlfa_check(struct isis_spftree *spftree_pc, + struct isis_vertex *vertex) +{ + struct isis_spftree *spftree_old = spftree_pc->lfa.old.spftree; + struct rlfa *rlfa; + const struct in_addr *rtr_id_pq; + char buf[VID2STR_BUFFER]; + + if (!lfa_check_needs_protection(spftree_pc, vertex)) { + if (IS_DEBUG_LFA) + zlog_debug( + "ISIS-LFA: %s %s unaffected by %s", + vtype2string(vertex->type), + vid2string(vertex, buf, sizeof(buf)), + lfa_protected_resource2str( + &spftree_pc->lfa.protected_resource)); + + return; + } + + if (IS_DEBUG_LFA) + zlog_debug( + "ISIS-LFA: computing repair path(s) of %s %s w.r.t %s", + vtype2string(vertex->type), + vid2string(vertex, buf, sizeof(buf)), + lfa_protected_resource2str( + &spftree_pc->lfa.protected_resource)); + + /* Find PQ node. */ + rtr_id_pq = rlfa_find_pq_node(spftree_pc, vertex, vertex, NULL); + if (!rtr_id_pq) { + if (IS_DEBUG_LFA) + zlog_debug("ISIS-LFA: no acceptable PQ node found"); + return; + } + + /* Store valid RLFA and store LDP label for the PQ node. */ + rlfa = rlfa_add(spftree_old, vertex, *rtr_id_pq); + + /* Register RLFA with LDP. */ + if (isis_zebra_rlfa_register(spftree_old, rlfa) != 0) + rlfa_delete(spftree_old, rlfa); +} + +/** + * Compute the Remote LFA backup paths for a given protected interface. + * + * @param area IS-IS area + * @param spftree IS-IS SPF tree + * @param spftree_reverse IS-IS Reverse SPF tree + * @param max_metric Remote LFA maximum metric + * @param resource Protected resource + * + * @return Pointer to the post-convergence SPF tree + */ +struct isis_spftree *isis_rlfa_compute(struct isis_area *area, + struct isis_spftree *spftree, + struct isis_spftree *spftree_reverse, + uint32_t max_metric, + struct lfa_protected_resource *resource) +{ + struct isis_spftree *spftree_pc; + + if (IS_DEBUG_LFA) + zlog_debug("ISIS-LFA: computing remote LFAs for %s", + lfa_protected_resource2str(resource)); + + /* Create post-convergence SPF tree. */ + spftree_pc = isis_spftree_new(area, spftree->lspdb, spftree->sysid, + spftree->level, spftree->tree_id, + SPF_TYPE_RLFA, spftree->flags); + spftree_pc->lfa.old.spftree = spftree; + spftree_pc->lfa.old.spftree_reverse = spftree_reverse; + spftree_pc->lfa.remote.max_metric = max_metric; + spftree_pc->lfa.protected_resource = *resource; + + /* Compute the extended P-space and Q-space. */ + lfa_calc_pq_spaces(spftree_pc, resource); + + if (IS_DEBUG_LFA) + zlog_debug( + "ISIS-LFA: computing the post convergence SPT w.r.t. %s", + lfa_protected_resource2str(resource)); + + /* Re-run SPF in the local node to find the post-convergence paths. */ + isis_run_spf(spftree_pc); + + return spftree_pc; +} + /* Calculate the distance from the root node to the given IP destination. */ static int lfa_calc_dist_destination(struct isis_spftree *spftree, const struct isis_vertex *vertex_N, @@ -1451,8 +1941,7 @@ static bool clfa_node_protecting_check(struct isis_spftree *spftree, } static struct list * -isis_lfa_tiebreakers(struct isis_area *area, struct isis_circuit *circuit, - struct isis_spftree *spftree, +isis_lfa_tiebreakers(struct isis_area *area, struct isis_spftree *spftree, struct lfa_protected_resource *resource, struct isis_vertex *vertex, struct isis_spf_adj *sadj_primary, struct list *lfa_list) @@ -1572,6 +2061,10 @@ void isis_lfa_compute(struct isis_area *area, struct isis_circuit *circuit, resource->type = LFA_LINK_PROTECTION; + if (IS_DEBUG_LFA) + zlog_debug("ISIS-LFA: computing local LFAs for %s", + lfa_protected_resource2str(resource)); + for (ALL_QUEUE_ELEMENTS_RO(&spftree->paths, vnode, vertex)) { struct list *lfa_list; struct list *filtered_lfa_list; @@ -1591,7 +2084,8 @@ void isis_lfa_compute(struct isis_area *area, struct isis_circuit *circuit, resource)) { if (IS_DEBUG_LFA) zlog_debug( - "ISIS-LFA: route unaffected by %s", + "ISIS-LFA: %s %s unaffected by %s", + vtype2string(vertex->type), buf, lfa_protected_resource2str(resource)); continue; } @@ -1697,15 +2191,18 @@ void isis_lfa_compute(struct isis_area *area, struct isis_circuit *circuit, if (list_isempty(lfa_list)) { if (IS_DEBUG_LFA) - zlog_debug("ISIS-LFA: no valid LFAs found"); + zlog_debug( + "ISIS-LFA: no valid local LFAs found"); list_delete(&lfa_list); continue; } + SET_FLAG(vertex->flags, F_ISIS_VERTEX_LFA_PROTECTED); + /* Check tie-breakers. */ filtered_lfa_list = - isis_lfa_tiebreakers(area, circuit, spftree, resource, - vertex, sadj_primary, lfa_list); + isis_lfa_tiebreakers(area, spftree, resource, vertex, + sadj_primary, lfa_list); /* Create backup route using the best LFAs. */ allow_ecmp = area->lfa_load_sharing[level - 1]; @@ -1746,7 +2243,7 @@ static void isis_spf_run_tilfa(struct isis_area *area, } /** - * Run the LFA/TI-LFA algorithms for all protected interfaces. + * Run the LFA/RLFA/TI-LFA algorithms for all protected interfaces. * * @param area IS-IS area * @param spftree IS-IS SPF tree @@ -1756,13 +2253,11 @@ void isis_spf_run_lfa(struct isis_area *area, struct isis_spftree *spftree) struct isis_spftree *spftree_reverse = NULL; struct isis_circuit *circuit; struct listnode *node; - bool tilfa_configured; int level = spftree->level; - tilfa_configured = (area->tilfa_protected_links[level - 1] > 0); - /* Run reverse SPF locally. */ - if (tilfa_configured) + if (area->rlfa_protected_links[level - 1] > 0 + || area->tilfa_protected_links[level - 1] > 0) spftree_reverse = isis_spf_reverse_run(spftree); /* Run forward SPF on all adjacent routers. */ @@ -1808,15 +2303,32 @@ void isis_spf_run_lfa(struct isis_area *area, struct isis_spftree *spftree) continue; } - if (circuit->lfa_protection[level - 1]) + if (circuit->lfa_protection[level - 1]) { + /* Run local LFA. */ isis_lfa_compute(area, circuit, spftree, &resource); - else if (circuit->tilfa_protection[level - 1]) { + + if (circuit->rlfa_protection[level - 1]) { + struct isis_spftree *spftree_pc; + uint32_t max_metric; + + /* Run remote LFA. */ + assert(spftree_reverse); + max_metric = + circuit->rlfa_max_metric[level - 1]; + spftree_pc = isis_rlfa_compute( + area, spftree, spftree_reverse, + max_metric, &resource); + listnode_add(spftree->lfa.remote.pc_spftrees, + spftree_pc); + } + } else if (circuit->tilfa_protection[level - 1]) { + /* Run TI-LFA. */ assert(spftree_reverse); isis_spf_run_tilfa(area, circuit, spftree, spftree_reverse, &resource); } } - if (tilfa_configured) + if (spftree_reverse) isis_spftree_del(spftree_reverse); } diff --git a/isisd/isis_lfa.h b/isisd/isis_lfa.h index f09fc663a4..65891cae44 100644 --- a/isisd/isis_lfa.h +++ b/isisd/isis_lfa.h @@ -21,8 +21,10 @@ #define _FRR_ISIS_LFA_H #include "lib/typesafe.h" +#include "lib/zclient.h" PREDECL_RBTREE_UNIQ(lfa_tiebreaker_tree) +PREDECL_RBTREE_UNIQ(rlfa_tree) enum lfa_tiebreaker_type { LFA_TIEBREAKER_DOWNSTREAM = 0, @@ -41,6 +43,15 @@ int lfa_tiebreaker_cmp(const struct lfa_tiebreaker *a, DECLARE_RBTREE_UNIQ(lfa_tiebreaker_tree, struct lfa_tiebreaker, entry, lfa_tiebreaker_cmp) +struct rlfa { + struct rlfa_tree_item entry; + struct prefix prefix; + struct isis_vertex *vertex; + struct in_addr pq_address; +}; +int rlfa_cmp(const struct rlfa *a, const struct rlfa *b); +DECLARE_RBTREE_UNIQ(rlfa_tree, struct rlfa, entry, rlfa_cmp) + enum isis_tilfa_sid_type { TILFA_SID_PREFIX = 1, TILFA_SID_ADJ, @@ -145,6 +156,19 @@ bool isis_lfa_excise_node_check(const struct isis_spftree *spftree, const uint8_t *id); struct isis_spftree *isis_spf_reverse_run(const struct isis_spftree *spftree); int isis_spf_run_neighbors(struct isis_spftree *spftree); +int isis_rlfa_activate(struct isis_spftree *spftree, struct rlfa *rlfa, + struct zapi_rlfa_response *response); +void isis_rlfa_deactivate(struct isis_spftree *spftree, struct rlfa *rlfa); +void isis_rlfa_list_init(struct isis_spftree *spftree); +void isis_rlfa_list_clear(struct isis_spftree *spftree); +void isis_rlfa_process_ldp_response(struct zapi_rlfa_response *response); +void isis_ldp_rlfa_handle_client_close(struct zapi_client_close_info *info); +void isis_rlfa_check(struct isis_spftree *spftree, struct isis_vertex *vertex); +struct isis_spftree *isis_rlfa_compute(struct isis_area *area, + struct isis_spftree *spftree, + struct isis_spftree *spftree_reverse, + uint32_t max_metric, + struct lfa_protected_resource *resource); void isis_lfa_compute(struct isis_area *area, struct isis_circuit *circuit, struct isis_spftree *spftree, struct lfa_protected_resource *resource); diff --git a/isisd/isis_lsp.c b/isisd/isis_lsp.c index 47225ea2c3..6d2303817b 100644 --- a/isisd/isis_lsp.c +++ b/isisd/isis_lsp.c @@ -399,7 +399,98 @@ static void lsp_seqno_update(struct isis_lsp *lsp0) return; } -static uint8_t lsp_bits_generate(int level, int overload_bit, int attached_bit) +static bool isis_level2_adj_up(struct isis_area *curr_area) +{ + struct listnode *node, *cnode; + struct isis_circuit *circuit; + struct list *adjdb; + struct isis_adjacency *adj; + struct isis *isis = curr_area->isis; + struct isis_area *area; + + /* lookup for a Level2 adjacency up in another area */ + for (ALL_LIST_ELEMENTS_RO(isis->area_list, node, area)) { + if (area->area_tag + && strcmp(area->area_tag, curr_area->area_tag) == 0) + continue; + + for (ALL_LIST_ELEMENTS_RO(area->circuit_list, cnode, circuit)) { + if (circuit->circ_type == CIRCUIT_T_BROADCAST) { + adjdb = circuit->u.bc.adjdb[1]; + if (!adjdb || !adjdb->count) + continue; + + for (ALL_LIST_ELEMENTS_RO(adjdb, node, adj)) { + if (adj->level != ISIS_ADJ_LEVEL1 + && adj->adj_state == ISIS_ADJ_UP) + return true; + } + } else if (circuit->circ_type == CIRCUIT_T_P2P + && circuit->u.p2p.neighbor) { + adj = circuit->u.p2p.neighbor; + if (adj->level != ISIS_ADJ_LEVEL1 + && adj->adj_state == ISIS_ADJ_UP) + return true; + } + } + } + return false; +} + +static void isis_reset_attach_bit(struct isis_adjacency *curr_adj) +{ + struct listnode *node; + struct isis_area *curr_area = curr_adj->circuit->area; + struct isis *isis = curr_area->isis; + struct isis_area *area; + struct lspdb_head *head; + struct isis_lsp *lsp; + uint8_t lspid[ISIS_SYS_ID_LEN + 2]; + + /* If new adjaceny is up and area is level2 or level1and2 verify if + * we have LSPs in other areas that should now set the attach bit. + * + * If adjacenty is down, verify if we no longer have another level2 + * or level1and2 areas so that we should now remove the attach bit. + */ + if (curr_area->is_type == IS_LEVEL_1) + return; + + for (ALL_LIST_ELEMENTS_RO(isis->area_list, node, area)) { + if (area->area_tag + && strcmp(area->area_tag, curr_area->area_tag) == 0) + continue; + + if (!area->attached_bit_send) + continue; + + head = &area->lspdb[IS_LEVEL_1 - 1]; + memset(lspid, 0, ISIS_SYS_ID_LEN + 2); + memcpy(lspid, area->isis->sysid, ISIS_SYS_ID_LEN); + + lsp = lsp_search(head, lspid); + if (!lsp) + continue; + + if (curr_adj->adj_state == ISIS_ADJ_UP + && !(lsp->hdr.lsp_bits & LSPBIT_ATT)) { + sched_debug( + "ISIS (%s): adj going up regenerate lsp-bits", + area->area_tag); + lsp_regenerate_schedule(area, IS_LEVEL_1, 0); + } else if (curr_adj->adj_state == ISIS_ADJ_DOWN + && lsp->hdr.lsp_bits & LSPBIT_ATT + && !isis_level2_adj_up(area)) { + sched_debug( + "ISIS (%s): adj going down regenerate lsp-bits", + area->area_tag); + lsp_regenerate_schedule(area, IS_LEVEL_1, 0); + } + } +} + +static uint8_t lsp_bits_generate(int level, int overload_bit, int attached_bit, + struct isis_area *area) { uint8_t lsp_bits = 0; if (level == IS_LEVEL_1) @@ -408,8 +499,13 @@ static uint8_t lsp_bits_generate(int level, int overload_bit, int attached_bit) lsp_bits = IS_LEVEL_1_AND_2; if (overload_bit) lsp_bits |= overload_bit; - if (attached_bit) - lsp_bits |= attached_bit; + + /* only set the attach bit if we are a level-1-2 router and this is + * a level-1 LSP and we have a level-2 adjacency up from another area + */ + if (area->is_type == IS_LEVEL_1_AND_2 && level == IS_LEVEL_1 + && attached_bit && isis_level2_adj_up(area)) + lsp_bits |= LSPBIT_ATT; return lsp_bits; } @@ -632,13 +728,13 @@ static const char *lsp_bits2string(uint8_t lsp_bits, char *buf, size_t buf_size) return " error"; /* we only focus on the default metric */ - pos += sprintf(pos, "%d/", - ISIS_MASK_LSP_ATT_DEFAULT_BIT(lsp_bits) ? 1 : 0); + pos += snprintf(pos, buf_size, "%d/", + ISIS_MASK_LSP_ATT_BITS(lsp_bits) ? 1 : 0); - pos += sprintf(pos, "%d/", - ISIS_MASK_LSP_PARTITION_BIT(lsp_bits) ? 1 : 0); + pos += snprintf(pos, buf_size, "%d/", + ISIS_MASK_LSP_PARTITION_BIT(lsp_bits) ? 1 : 0); - sprintf(pos, "%d", ISIS_MASK_LSP_OL_BIT(lsp_bits) ? 1 : 0); + snprintf(pos, buf_size, "%d", ISIS_MASK_LSP_OL_BIT(lsp_bits) ? 1 : 0); return buf; } @@ -838,7 +934,7 @@ static struct isis_lsp *lsp_next_frag(uint8_t frag_num, struct isis_lsp *lsp0, lsp = lsp_new(area, frag_id, lsp0->hdr.rem_lifetime, 0, lsp_bits_generate(level, area->overload_bit, - area->attached_bit), + area->attached_bit_send, area), 0, lsp0, level); lsp->own_lsp = 1; lsp_insert(&area->lspdb[level - 1], lsp); @@ -864,7 +960,7 @@ static void lsp_build(struct isis_lsp *lsp, struct isis_area *area) area->area_tag, level); lsp->hdr.lsp_bits = lsp_bits_generate(level, area->overload_bit, - area->attached_bit); + area->attached_bit_send, area); lsp_add_auth(lsp); @@ -1223,10 +1319,10 @@ int lsp_generate(struct isis_area *area, int level) oldlsp->hdr.lsp_id); } rem_lifetime = lsp_rem_lifetime(area, level); - newlsp = - lsp_new(area, lspid, rem_lifetime, seq_num, - area->is_type | area->overload_bit | area->attached_bit, - 0, NULL, level); + newlsp = lsp_new(area, lspid, rem_lifetime, seq_num, + lsp_bits_generate(area->is_type, area->overload_bit, + area->attached_bit_send, area), + 0, NULL, level); newlsp->area = area; newlsp->own_lsp = 1; @@ -1310,8 +1406,9 @@ static int lsp_regenerate(struct isis_area *area, int level) continue; } - frag->hdr.lsp_bits = lsp_bits_generate( - level, area->overload_bit, area->attached_bit); + frag->hdr.lsp_bits = + lsp_bits_generate(level, area->overload_bit, + area->attached_bit_send, area); /* Set the lifetime values of all the fragments to the same * value, * so that no fragment expires before the lsp is refreshed. @@ -1518,8 +1615,8 @@ static void lsp_build_pseudo(struct isis_lsp *lsp, struct isis_circuit *circuit, lsp->level = level; /* RFC3787 section 4 SHOULD not set overload bit in pseudo LSPs */ - lsp->hdr.lsp_bits = - lsp_bits_generate(level, 0, circuit->area->attached_bit); + lsp->hdr.lsp_bits = lsp_bits_generate( + level, 0, circuit->area->attached_bit_send, area); /* * add self to IS neighbours @@ -1617,8 +1714,10 @@ int lsp_generate_pseudo(struct isis_circuit *circuit, int level) rem_lifetime = lsp_rem_lifetime(circuit->area, level); /* RFC3787 section 4 SHOULD not set overload bit in pseudo LSPs */ lsp = lsp_new(circuit->area, lsp_id, rem_lifetime, 1, - circuit->area->is_type | circuit->area->attached_bit, 0, - NULL, level); + lsp_bits_generate(circuit->area->is_type, 0, + circuit->area->attached_bit_send, + circuit->area), + 0, NULL, level); lsp->area = circuit->area; lsp_build_pseudo(lsp, circuit, level); @@ -2036,6 +2135,12 @@ void _lsp_flood(struct isis_lsp *lsp, struct isis_circuit *circuit, static int lsp_handle_adj_state_change(struct isis_adjacency *adj) { lsp_regenerate_schedule(adj->circuit->area, IS_LEVEL_1 | IS_LEVEL_2, 0); + + /* when an adjacency state changes determine if we need to + * change attach_bits in other area's LSPs + */ + isis_reset_attach_bit(adj); + return 0; } diff --git a/isisd/isis_main.c b/isisd/isis_main.c index 4576a4a95c..1b04f4f7a0 100644 --- a/isisd/isis_main.c +++ b/isisd/isis_main.c @@ -246,6 +246,8 @@ int main(int argc, char **argv, char **envp) access_list_delete_hook(isis_filter_update); isis_vrf_init(); prefix_list_init(); + prefix_list_add_hook(isis_prefix_list_update); + prefix_list_delete_hook(isis_prefix_list_update); isis_init(); isis_circuit_init(); #ifdef FABRICD diff --git a/isisd/isis_memory.c b/isisd/isis_memory.c index b63a82f404..f716e060cd 100644 --- a/isisd/isis_memory.c +++ b/isisd/isis_memory.c @@ -46,3 +46,4 @@ DEFINE_MTYPE(ISISD, ISIS_EXT_ROUTE, "ISIS redistributed route") DEFINE_MTYPE(ISISD, ISIS_EXT_INFO, "ISIS redistributed route info") DEFINE_MTYPE(ISISD, ISIS_MPLS_TE, "ISIS MPLS_TE parameters") DEFINE_MTYPE(ISISD, ISIS_ACL_NAME, "ISIS access-list name") +DEFINE_MTYPE(ISISD, ISIS_PLIST_NAME, "ISIS prefix-list name") diff --git a/isisd/isis_memory.h b/isisd/isis_memory.h index 3ef1c5bf0b..5bcd2a3983 100644 --- a/isisd/isis_memory.h +++ b/isisd/isis_memory.h @@ -45,5 +45,6 @@ DECLARE_MTYPE(ISIS_EXT_ROUTE) DECLARE_MTYPE(ISIS_EXT_INFO) DECLARE_MTYPE(ISIS_MPLS_TE) DECLARE_MTYPE(ISIS_ACL_NAME) +DECLARE_MTYPE(ISIS_PLIST_NAME) #endif /* _QUAGGA_ISIS_MEMORY_H */ diff --git a/isisd/isis_nb.c b/isisd/isis_nb.c index c3d2f238dd..6d46e6b67e 100644 --- a/isisd/isis_nb.c +++ b/isisd/isis_nb.c @@ -60,9 +60,22 @@ const struct frr_yang_module_info frr_isisd_info = { }, }, { + .xpath = "/frr-isisd:isis/instance/attach-send", + .cbs = { + .cli_show = cli_show_isis_attached_send, + .modify = isis_instance_attached_send_modify, + }, + }, + { + .xpath = "/frr-isisd:isis/instance/attach-receive-ignore", + .cbs = { + .cli_show = cli_show_isis_attached_receive, + .modify = isis_instance_attached_receive_modify, + }, + }, + { .xpath = "/frr-isisd:isis/instance/attached", .cbs = { - .cli_show = cli_show_isis_attached, .modify = isis_instance_attached_modify, }, }, @@ -485,6 +498,14 @@ const struct frr_yang_module_info frr_isisd_info = { } }, { + .xpath = "/frr-isisd:isis/instance/fast-reroute/level-1/remote-lfa/prefix-list", + .cbs = { + .cli_show = cli_show_isis_frr_remote_lfa_plist, + .modify = isis_instance_fast_reroute_level_1_remote_lfa_prefix_list_modify, + .destroy = isis_instance_fast_reroute_level_1_remote_lfa_prefix_list_destroy, + } + }, + { .xpath = "/frr-isisd:isis/instance/fast-reroute/level-2/lfa/load-sharing", .cbs = { .cli_show = cli_show_isis_frr_lfa_load_sharing, @@ -514,6 +535,14 @@ const struct frr_yang_module_info frr_isisd_info = { } }, { + .xpath = "/frr-isisd:isis/instance/fast-reroute/level-2/remote-lfa/prefix-list", + .cbs = { + .cli_show = cli_show_isis_frr_remote_lfa_plist, + .modify = isis_instance_fast_reroute_level_2_remote_lfa_prefix_list_modify, + .destroy = isis_instance_fast_reroute_level_2_remote_lfa_prefix_list_destroy, + } + }, + { .xpath = "/frr-isisd:isis/instance/log-adjacency-changes", .cbs = { .cli_show = cli_show_isis_log_adjacency, @@ -927,6 +956,20 @@ const struct frr_yang_module_info frr_isisd_info = { } }, { + .xpath = "/frr-interface:lib/interface/frr-isisd:isis/fast-reroute/level-1/remote-lfa/enable", + .cbs = { + .modify = lib_interface_isis_fast_reroute_level_1_remote_lfa_enable_modify, + } + }, + { + .xpath = "/frr-interface:lib/interface/frr-isisd:isis/fast-reroute/level-1/remote-lfa/maximum-metric", + .cbs = { + .cli_show = cli_show_frr_remote_lfa_max_metric, + .modify = lib_interface_isis_fast_reroute_level_1_remote_lfa_maximum_metric_modify, + .destroy = lib_interface_isis_fast_reroute_level_1_remote_lfa_maximum_metric_destroy, + } + }, + { .xpath = "/frr-interface:lib/interface/frr-isisd:isis/fast-reroute/level-1/ti-lfa/enable", .cbs = { .modify = lib_interface_isis_fast_reroute_level_1_ti_lfa_enable_modify, @@ -953,6 +996,20 @@ const struct frr_yang_module_info frr_isisd_info = { } }, { + .xpath = "/frr-interface:lib/interface/frr-isisd:isis/fast-reroute/level-2/remote-lfa/enable", + .cbs = { + .modify = lib_interface_isis_fast_reroute_level_2_remote_lfa_enable_modify, + } + }, + { + .xpath = "/frr-interface:lib/interface/frr-isisd:isis/fast-reroute/level-2/remote-lfa/maximum-metric", + .cbs = { + .cli_show = cli_show_frr_remote_lfa_max_metric, + .modify = lib_interface_isis_fast_reroute_level_2_remote_lfa_maximum_metric_modify, + .destroy = lib_interface_isis_fast_reroute_level_2_remote_lfa_maximum_metric_destroy, + } + }, + { .xpath = "/frr-interface:lib/interface/frr-isisd:isis/fast-reroute/level-2/ti-lfa/enable", .cbs = { .modify = lib_interface_isis_fast_reroute_level_2_ti_lfa_enable_modify, diff --git a/isisd/isis_nb.h b/isisd/isis_nb.h index f529f20861..8ecd8134e6 100644 --- a/isisd/isis_nb.h +++ b/isisd/isis_nb.h @@ -34,6 +34,8 @@ int isis_instance_is_type_modify(struct nb_cb_modify_args *args); int isis_instance_area_address_create(struct nb_cb_create_args *args); int isis_instance_area_address_destroy(struct nb_cb_destroy_args *args); int isis_instance_dynamic_hostname_modify(struct nb_cb_modify_args *args); +int isis_instance_attached_send_modify(struct nb_cb_modify_args *args); +int isis_instance_attached_receive_modify(struct nb_cb_modify_args *args); int isis_instance_attached_modify(struct nb_cb_modify_args *args); int isis_instance_overload_modify(struct nb_cb_modify_args *args); int isis_instance_metric_style_modify(struct nb_cb_modify_args *args); @@ -183,6 +185,10 @@ int isis_instance_fast_reroute_level_1_lfa_tiebreaker_destroy( struct nb_cb_destroy_args *args); int isis_instance_fast_reroute_level_1_lfa_tiebreaker_type_modify( struct nb_cb_modify_args *args); +int isis_instance_fast_reroute_level_1_remote_lfa_prefix_list_modify( + struct nb_cb_modify_args *args); +int isis_instance_fast_reroute_level_1_remote_lfa_prefix_list_destroy( + struct nb_cb_destroy_args *args); int isis_instance_fast_reroute_level_2_lfa_load_sharing_modify( struct nb_cb_modify_args *args); int isis_instance_fast_reroute_level_2_lfa_priority_limit_modify( @@ -195,6 +201,10 @@ int isis_instance_fast_reroute_level_2_lfa_tiebreaker_destroy( struct nb_cb_destroy_args *args); int isis_instance_fast_reroute_level_2_lfa_tiebreaker_type_modify( struct nb_cb_modify_args *args); +int isis_instance_fast_reroute_level_2_remote_lfa_prefix_list_modify( + struct nb_cb_modify_args *args); +int isis_instance_fast_reroute_level_2_remote_lfa_prefix_list_destroy( + struct nb_cb_destroy_args *args); int isis_instance_log_adjacency_changes_modify(struct nb_cb_modify_args *args); int isis_instance_mpls_te_create(struct nb_cb_create_args *args); int isis_instance_mpls_te_destroy(struct nb_cb_destroy_args *args); @@ -300,6 +310,12 @@ int lib_interface_isis_fast_reroute_level_1_lfa_exclude_interface_create( struct nb_cb_create_args *args); int lib_interface_isis_fast_reroute_level_1_lfa_exclude_interface_destroy( struct nb_cb_destroy_args *args); +int lib_interface_isis_fast_reroute_level_1_remote_lfa_enable_modify( + struct nb_cb_modify_args *args); +int lib_interface_isis_fast_reroute_level_1_remote_lfa_maximum_metric_modify( + struct nb_cb_modify_args *args); +int lib_interface_isis_fast_reroute_level_1_remote_lfa_maximum_metric_destroy( + struct nb_cb_destroy_args *args); int lib_interface_isis_fast_reroute_level_1_ti_lfa_enable_modify( struct nb_cb_modify_args *args); int lib_interface_isis_fast_reroute_level_1_ti_lfa_node_protection_modify( @@ -310,6 +326,12 @@ int lib_interface_isis_fast_reroute_level_2_lfa_exclude_interface_create( struct nb_cb_create_args *args); int lib_interface_isis_fast_reroute_level_2_lfa_exclude_interface_destroy( struct nb_cb_destroy_args *args); +int lib_interface_isis_fast_reroute_level_2_remote_lfa_enable_modify( + struct nb_cb_modify_args *args); +int lib_interface_isis_fast_reroute_level_2_remote_lfa_maximum_metric_modify( + struct nb_cb_modify_args *args); +int lib_interface_isis_fast_reroute_level_2_remote_lfa_maximum_metric_destroy( + struct nb_cb_destroy_args *args); int lib_interface_isis_fast_reroute_level_2_ti_lfa_enable_modify( struct nb_cb_modify_args *args); int lib_interface_isis_fast_reroute_level_2_ti_lfa_node_protection_modify( @@ -404,8 +426,10 @@ void cli_show_isis_is_type(struct vty *vty, struct lyd_node *dnode, bool show_defaults); void cli_show_isis_dynamic_hostname(struct vty *vty, struct lyd_node *dnode, bool show_defaults); -void cli_show_isis_attached(struct vty *vty, struct lyd_node *dnode, - bool show_defaults); +void cli_show_isis_attached_send(struct vty *vty, struct lyd_node *dnode, + bool show_defaults); +void cli_show_isis_attached_receive(struct vty *vty, struct lyd_node *dnode, + bool show_defaults); void cli_show_isis_overload(struct vty *vty, struct lyd_node *dnode, bool show_defaults); void cli_show_isis_metric_style(struct vty *vty, struct lyd_node *dnode, @@ -467,6 +491,8 @@ void cli_show_isis_frr_lfa_tiebreaker(struct vty *vty, struct lyd_node *dnode, bool show_defaults); void cli_show_isis_frr_lfa_load_sharing(struct vty *vty, struct lyd_node *dnode, bool show_defaults); +void cli_show_isis_frr_remote_lfa_plist(struct vty *vty, struct lyd_node *dnode, + bool show_defaults); void cli_show_ip_isis_passive(struct vty *vty, struct lyd_node *dnode, bool show_defaults); void cli_show_ip_isis_password(struct vty *vty, struct lyd_node *dnode, @@ -503,6 +529,8 @@ void cli_show_ip_isis_frr(struct vty *vty, struct lyd_node *dnode, bool show_defaults); void cli_show_frr_lfa_exclude_interface(struct vty *vty, struct lyd_node *dnode, bool show_defaults); +void cli_show_frr_remote_lfa_max_metric(struct vty *vty, struct lyd_node *dnode, + bool show_defaults); void cli_show_ip_isis_circ_type(struct vty *vty, struct lyd_node *dnode, bool show_defaults); void cli_show_ip_isis_network_type(struct vty *vty, struct lyd_node *dnode, diff --git a/isisd/isis_nb_config.c b/isisd/isis_nb_config.c index 84ca801d2b..45bbc9737b 100644 --- a/isisd/isis_nb_config.c +++ b/isisd/isis_nb_config.c @@ -28,6 +28,7 @@ #include "log.h" #include "bfd.h" #include "filter.h" +#include "plist.h" #include "spf_backoff.h" #include "lib_errors.h" #include "vrf.h" @@ -51,6 +52,7 @@ #include "isisd/isis_mt.h" #include "isisd/isis_redist.h" #include "isisd/isis_ldp_sync.h" +#include "isisd/isis_dr.h" extern struct zclient *zclient; @@ -213,6 +215,9 @@ int isis_instance_area_address_destroy(struct nb_cb_destroy_args *args) uint8_t buff[255]; struct isis_area *area; const char *net_title; + struct listnode *cnode; + struct isis_circuit *circuit; + int lvl; if (args->event != NB_EV_APPLY) return NB_OK; @@ -236,6 +241,11 @@ int isis_instance_area_address_destroy(struct nb_cb_destroy_args *args) * Last area address - reset the SystemID for this router */ if (listcount(area->area_addrs) == 0) { + for (ALL_LIST_ELEMENTS_RO(area->circuit_list, cnode, circuit)) + for (lvl = IS_LEVEL_1; lvl <= IS_LEVEL_2; ++lvl) { + if (circuit->u.bc.is_dr[lvl - 1]) + isis_dr_resign(circuit, lvl); + } memset(area->isis->sysid, 0, ISIS_SYS_ID_LEN); area->isis->sysid_set = 0; if (IS_DEBUG_EVENTS) @@ -262,9 +272,9 @@ int isis_instance_dynamic_hostname_modify(struct nb_cb_modify_args *args) } /* - * XPath: /frr-isisd:isis/instance/attached + * XPath: /frr-isisd:isis/instance/attach-send */ -int isis_instance_attached_modify(struct nb_cb_modify_args *args) +int isis_instance_attached_send_modify(struct nb_cb_modify_args *args) { struct isis_area *area; bool attached; @@ -274,8 +284,34 @@ int isis_instance_attached_modify(struct nb_cb_modify_args *args) area = nb_running_get_entry(args->dnode, NULL, true); attached = yang_dnode_get_bool(args->dnode, NULL); - isis_area_attached_bit_set(area, attached); + isis_area_attached_bit_send_set(area, attached); + + return NB_OK; +} + +/* + * XPath: /frr-isisd:isis/instance/attach-receive-ignore + */ +int isis_instance_attached_receive_modify(struct nb_cb_modify_args *args) +{ + struct isis_area *area; + bool attached; + if (args->event != NB_EV_APPLY) + return NB_OK; + + area = nb_running_get_entry(args->dnode, NULL, true); + attached = yang_dnode_get_bool(args->dnode, NULL); + isis_area_attached_bit_receive_set(area, attached); + + return NB_OK; +} + +/* + * XPath: /frr-isisd:isis/instance/attached + */ +int isis_instance_attached_modify(struct nb_cb_modify_args *args) +{ return NB_OK; } @@ -1543,6 +1579,45 @@ int isis_instance_fast_reroute_level_1_lfa_tiebreaker_type_modify( } /* + * XPath: /frr-isisd:isis/instance/fast-reroute/level-1/remote-lfa/prefix-list + */ +int isis_instance_fast_reroute_level_1_remote_lfa_prefix_list_modify( + struct nb_cb_modify_args *args) +{ + struct isis_area *area; + const char *plist_name; + + if (args->event != NB_EV_APPLY) + return NB_OK; + + area = nb_running_get_entry(args->dnode, NULL, true); + plist_name = yang_dnode_get_string(args->dnode, NULL); + + area->rlfa_plist_name[0] = XSTRDUP(MTYPE_ISIS_PLIST_NAME, plist_name); + area->rlfa_plist[0] = prefix_list_lookup(AFI_IP, plist_name); + lsp_regenerate_schedule(area, area->is_type, 0); + + return NB_OK; +} + +int isis_instance_fast_reroute_level_1_remote_lfa_prefix_list_destroy( + struct nb_cb_destroy_args *args) +{ + struct isis_area *area; + + if (args->event != NB_EV_APPLY) + return NB_OK; + + area = nb_running_get_entry(args->dnode, NULL, true); + + XFREE(MTYPE_ISIS_PLIST_NAME, area->rlfa_plist_name[0]); + area->rlfa_plist[0] = NULL; + lsp_regenerate_schedule(area, area->is_type, 0); + + return NB_OK; +} + +/* * XPath: /frr-isisd:isis/instance/fast-reroute/level-2/lfa/load-sharing */ int isis_instance_fast_reroute_level_2_lfa_load_sharing_modify( @@ -1653,6 +1728,45 @@ int isis_instance_fast_reroute_level_2_lfa_tiebreaker_type_modify( } /* + * XPath: /frr-isisd:isis/instance/fast-reroute/level-2/remote-lfa/prefix-list + */ +int isis_instance_fast_reroute_level_2_remote_lfa_prefix_list_modify( + struct nb_cb_modify_args *args) +{ + struct isis_area *area; + const char *plist_name; + + if (args->event != NB_EV_APPLY) + return NB_OK; + + area = nb_running_get_entry(args->dnode, NULL, true); + plist_name = yang_dnode_get_string(args->dnode, NULL); + + area->rlfa_plist_name[1] = XSTRDUP(MTYPE_ISIS_PLIST_NAME, plist_name); + area->rlfa_plist[1] = prefix_list_lookup(AFI_IP, plist_name); + lsp_regenerate_schedule(area, area->is_type, 0); + + return NB_OK; +} + +int isis_instance_fast_reroute_level_2_remote_lfa_prefix_list_destroy( + struct nb_cb_destroy_args *args) +{ + struct isis_area *area; + + if (args->event != NB_EV_APPLY) + return NB_OK; + + area = nb_running_get_entry(args->dnode, NULL, true); + + XFREE(MTYPE_ISIS_PLIST_NAME, area->rlfa_plist_name[1]); + area->rlfa_plist[1] = NULL; + lsp_regenerate_schedule(area, area->is_type, 0); + + return NB_OK; +} + +/* * XPath: /frr-isisd:isis/instance/log-adjacency-changes */ int isis_instance_log_adjacency_changes_modify(struct nb_cb_modify_args *args) @@ -3439,6 +3553,74 @@ int lib_interface_isis_fast_reroute_level_1_lfa_exclude_interface_destroy( /* * XPath: + * /frr-interface:lib/interface/frr-isisd:isis/fast-reroute/level-1/remote-lfa/enable + */ +int lib_interface_isis_fast_reroute_level_1_remote_lfa_enable_modify( + struct nb_cb_modify_args *args) +{ + struct isis_area *area; + struct isis_circuit *circuit; + + if (args->event != NB_EV_APPLY) + return NB_OK; + + circuit = nb_running_get_entry(args->dnode, NULL, true); + circuit->rlfa_protection[0] = yang_dnode_get_bool(args->dnode, NULL); + if (circuit->rlfa_protection[0]) + circuit->area->rlfa_protected_links[0]++; + else { + assert(circuit->area->rlfa_protected_links[0] > 0); + circuit->area->rlfa_protected_links[0]--; + } + + area = circuit->area; + lsp_regenerate_schedule(area, area->is_type, 0); + + return NB_OK; +} + +/* + * XPath: + * /frr-interface:lib/interface/frr-isisd:isis/fast-reroute/level-1/remote-lfa/maximum-metric + */ +int lib_interface_isis_fast_reroute_level_1_remote_lfa_maximum_metric_modify( + struct nb_cb_modify_args *args) +{ + struct isis_area *area; + struct isis_circuit *circuit; + + if (args->event != NB_EV_APPLY) + return NB_OK; + + circuit = nb_running_get_entry(args->dnode, NULL, true); + circuit->rlfa_max_metric[0] = yang_dnode_get_uint32(args->dnode, NULL); + + area = circuit->area; + lsp_regenerate_schedule(area, area->is_type, 0); + + return NB_OK; +} + +int lib_interface_isis_fast_reroute_level_1_remote_lfa_maximum_metric_destroy( + struct nb_cb_destroy_args *args) +{ + struct isis_area *area; + struct isis_circuit *circuit; + + if (args->event != NB_EV_APPLY) + return NB_OK; + + circuit = nb_running_get_entry(args->dnode, NULL, true); + circuit->rlfa_max_metric[0] = 0; + + area = circuit->area; + lsp_regenerate_schedule(area, area->is_type, 0); + + return NB_OK; +} + +/* + * XPath: * /frr-interface:lib/interface/frr-isisd:isis/fast-reroute/level-1/ti-lfa/enable */ int lib_interface_isis_fast_reroute_level_1_ti_lfa_enable_modify( @@ -3562,6 +3744,74 @@ int lib_interface_isis_fast_reroute_level_2_lfa_exclude_interface_destroy( /* * XPath: + * /frr-interface:lib/interface/frr-isisd:isis/fast-reroute/level-2/remote-lfa/enable + */ +int lib_interface_isis_fast_reroute_level_2_remote_lfa_enable_modify( + struct nb_cb_modify_args *args) +{ + struct isis_area *area; + struct isis_circuit *circuit; + + if (args->event != NB_EV_APPLY) + return NB_OK; + + circuit = nb_running_get_entry(args->dnode, NULL, true); + circuit->rlfa_protection[1] = yang_dnode_get_bool(args->dnode, NULL); + if (circuit->rlfa_protection[1]) + circuit->area->rlfa_protected_links[1]++; + else { + assert(circuit->area->rlfa_protected_links[1] > 0); + circuit->area->rlfa_protected_links[1]--; + } + + area = circuit->area; + lsp_regenerate_schedule(area, area->is_type, 0); + + return NB_OK; +} + +/* + * XPath: + * /frr-interface:lib/interface/frr-isisd:isis/fast-reroute/level-2/remote-lfa/maximum-metric + */ +int lib_interface_isis_fast_reroute_level_2_remote_lfa_maximum_metric_modify( + struct nb_cb_modify_args *args) +{ + struct isis_area *area; + struct isis_circuit *circuit; + + if (args->event != NB_EV_APPLY) + return NB_OK; + + circuit = nb_running_get_entry(args->dnode, NULL, true); + circuit->rlfa_max_metric[1] = yang_dnode_get_uint32(args->dnode, NULL); + + area = circuit->area; + lsp_regenerate_schedule(area, area->is_type, 0); + + return NB_OK; +} + +int lib_interface_isis_fast_reroute_level_2_remote_lfa_maximum_metric_destroy( + struct nb_cb_destroy_args *args) +{ + struct isis_area *area; + struct isis_circuit *circuit; + + if (args->event != NB_EV_APPLY) + return NB_OK; + + circuit = nb_running_get_entry(args->dnode, NULL, true); + circuit->rlfa_max_metric[1] = 0; + + area = circuit->area; + lsp_regenerate_schedule(area, area->is_type, 0); + + return NB_OK; +} + +/* + * XPath: * /frr-interface:lib/interface/frr-isisd:isis/fast-reroute/level-2/ti-lfa/enable */ int lib_interface_isis_fast_reroute_level_2_ti_lfa_enable_modify( diff --git a/isisd/isis_pdu.c b/isisd/isis_pdu.c index 72de5d6543..a02b48157f 100644 --- a/isisd/isis_pdu.c +++ b/isisd/isis_pdu.c @@ -167,7 +167,7 @@ static int process_p2p_hello(struct iih_info *iih) if (adj) { if (memcmp(iih->sys_id, adj->sysid, ISIS_SYS_ID_LEN)) { zlog_debug( - "hello source and adjacency do not match, set adj down\n"); + "hello source and adjacency do not match, set adj down"); isis_adj_state_change(&adj, ISIS_ADJ_DOWN, "adj do not exist"); return ISIS_OK; @@ -729,8 +729,8 @@ static int process_hello(uint8_t pdu_type, struct isis_circuit *circuit, if (!memcmp(iih.sys_id, circuit->isis->sysid, ISIS_SYS_ID_LEN)) { zlog_warn( - "ISIS-Adj (%s): Received IIH with own sysid - discard", - circuit->area->area_tag); + "ISIS-Adj (%s): Received IIH with own sysid on %s - discard", + circuit->area->area_tag, circuit->interface->name); circuit->rej_adjacencies++; #ifndef FABRICD isis_notif_reject_adjacency( diff --git a/isisd/isis_route.c b/isisd/isis_route.c index d32f219e98..e1baf351f4 100644 --- a/isisd/isis_route.c +++ b/isisd/isis_route.c @@ -279,6 +279,22 @@ static bool isis_sr_psid_info_same(struct isis_sr_psid_info *new, return true; } +static bool isis_label_stack_same(struct mpls_label_stack *new, + struct mpls_label_stack *old) +{ + if (!new && !old) + return true; + if (!new || !old) + return false; + if (new->num_labels != old->num_labels) + return false; + if (memcmp(&new->label, &old->label, + sizeof(mpls_label_t) * new->num_labels)) + return false; + + return true; +} + static int isis_route_info_same(struct isis_route_info *new, struct isis_route_info *old, char *buf, size_t buf_size) @@ -327,6 +343,12 @@ static int isis_route_info_same(struct isis_route_info *new, snprintf(buf, buf_size, "nhop SR label"); return 0; } + if (!isis_label_stack_same(new_nh->label_stack, + old_nh->label_stack)) { + if (buf) + snprintf(buf, buf_size, "nhop label stack"); + return 0; + } } /* only the resync flag needs to be checked */ @@ -400,8 +422,8 @@ isis_route_create(struct prefix *prefix, struct prefix_ipv6 *src_p, return route_info; } -static void isis_route_delete(struct isis_area *area, struct route_node *rode, - struct route_table *table) +void isis_route_delete(struct isis_area *area, struct route_node *rode, + struct route_table *table) { struct isis_route_info *rinfo; char buff[SRCDEST2STR_BUFFER]; @@ -466,9 +488,6 @@ static void isis_route_update(struct isis_area *area, struct prefix *prefix, SET_FLAG(route_info->flag, ISIS_ROUTE_FLAG_ZEBRA_SYNCED); UNSET_FLAG(route_info->flag, ISIS_ROUTE_FLAG_ZEBRA_RESYNC); } else { - if (!CHECK_FLAG(route_info->flag, ISIS_ROUTE_FLAG_ZEBRA_SYNCED)) - return; - /* Uninstall Prefix-SID label. */ if (route_info->sr.present) isis_zebra_prefix_sid_uninstall( @@ -516,6 +535,10 @@ static void _isis_route_verify_table(struct isis_area *area, rinfo->backup = rnode_bck->info; UNSET_FLAG(rinfo->flag, ISIS_ROUTE_FLAG_ZEBRA_SYNCED); + } else if (rinfo->backup) { + rinfo->backup = NULL; + UNSET_FLAG(rinfo->flag, + ISIS_ROUTE_FLAG_ZEBRA_SYNCED); } } @@ -629,6 +652,10 @@ void isis_route_verify_merge(struct isis_area *area, rinfo->backup = rnode_bck->info; UNSET_FLAG(rinfo->flag, ISIS_ROUTE_FLAG_ZEBRA_SYNCED); + } else if (rinfo->backup) { + rinfo->backup = NULL; + UNSET_FLAG(rinfo->flag, + ISIS_ROUTE_FLAG_ZEBRA_SYNCED); } mrnode = srcdest_rnode_get(merge, prefix, src_p); diff --git a/isisd/isis_route.h b/isisd/isis_route.h index 0d4f884959..d6763ec76c 100644 --- a/isisd/isis_route.h +++ b/isisd/isis_route.h @@ -63,6 +63,8 @@ isis_route_create(struct prefix *prefix, struct prefix_ipv6 *src_p, uint32_t cost, uint32_t depth, struct isis_sr_psid_info *sr, struct list *adjacencies, bool allow_ecmp, struct isis_area *area, struct route_table *table); +void isis_route_delete(struct isis_area *area, struct route_node *rode, + struct route_table *table); /* Walk the given table and install new routes to zebra and remove old ones. * route status is tracked using ISIS_ROUTE_FLAG_ACTIVE */ diff --git a/isisd/isis_spf.c b/isisd/isis_spf.c index 57b1d66c22..22dfee994f 100644 --- a/isisd/isis_spf.c +++ b/isisd/isis_spf.c @@ -56,6 +56,7 @@ #include "isis_csm.h" #include "isis_mt.h" #include "isis_tlvs.h" +#include "isis_zebra.h" #include "fabricd.h" #include "isis_spf_private.h" @@ -354,7 +355,10 @@ struct isis_spftree *isis_spftree_new(struct isis_area *area, tree->tree_id = tree_id; tree->family = (tree->tree_id == SPFTREE_IPV4) ? AF_INET : AF_INET6; tree->flags = flags; - if (tree->type == SPF_TYPE_TI_LFA) { + isis_rlfa_list_init(tree); + tree->lfa.remote.pc_spftrees = list_new(); + tree->lfa.remote.pc_spftrees->del = (void (*)(void *))isis_spftree_del; + if (tree->type == SPF_TYPE_RLFA || tree->type == SPF_TYPE_TI_LFA) { isis_spf_node_list_init(&tree->lfa.p_space); isis_spf_node_list_init(&tree->lfa.q_space); } @@ -366,7 +370,11 @@ void isis_spftree_del(struct isis_spftree *spftree) { hash_clean(spftree->prefix_sids, NULL); hash_free(spftree->prefix_sids); - if (spftree->type == SPF_TYPE_TI_LFA) { + isis_zebra_rlfa_unregister_all(spftree); + isis_rlfa_list_clear(spftree); + list_delete(&spftree->lfa.remote.pc_spftrees); + if (spftree->type == SPF_TYPE_RLFA + || spftree->type == SPF_TYPE_TI_LFA) { isis_spf_node_list_clear(&spftree->lfa.q_space); isis_spf_node_list_clear(&spftree->lfa.p_space); } @@ -820,7 +828,8 @@ lspfragloop: #endif /* EXTREME_DEBUG */ if (no_overload) { - if (pseudo_lsp || spftree->mtid == ISIS_MT_IPV4_UNICAST) { + if ((pseudo_lsp || spftree->mtid == ISIS_MT_IPV4_UNICAST) + && spftree->area->oldmetric) { struct isis_oldstyle_reach *r; for (r = (struct isis_oldstyle_reach *) lsp->tlvs->oldstyle_reach.head; @@ -848,42 +857,47 @@ lspfragloop: } } - struct isis_item_list *te_neighs = NULL; - if (pseudo_lsp || spftree->mtid == ISIS_MT_IPV4_UNICAST) - te_neighs = &lsp->tlvs->extended_reach; - else - te_neighs = isis_lookup_mt_items(&lsp->tlvs->mt_reach, - spftree->mtid); - - struct isis_extended_reach *er; - for (er = te_neighs - ? (struct isis_extended_reach *) - te_neighs->head - : NULL; - er; er = er->next) { - /* C.2.6 a) */ - /* Two way connectivity */ - if (!LSP_PSEUDO_ID(er->id) - && !memcmp(er->id, root_sysid, ISIS_SYS_ID_LEN)) - continue; - if (!pseudo_lsp - && !memcmp(er->id, null_sysid, ISIS_SYS_ID_LEN)) - continue; - dist = cost - + (CHECK_FLAG(spftree->flags, - F_SPFTREE_HOPCOUNT_METRIC) - ? 1 - : er->metric); - process_N(spftree, - LSP_PSEUDO_ID(er->id) ? VTYPE_PSEUDO_TE_IS - : VTYPE_NONPSEUDO_TE_IS, - (void *)er->id, dist, depth + 1, NULL, - parent); + if (spftree->area->newmetric) { + struct isis_item_list *te_neighs = NULL; + if (pseudo_lsp || spftree->mtid == ISIS_MT_IPV4_UNICAST) + te_neighs = &lsp->tlvs->extended_reach; + else + te_neighs = isis_lookup_mt_items( + &lsp->tlvs->mt_reach, spftree->mtid); + + struct isis_extended_reach *er; + for (er = te_neighs ? (struct isis_extended_reach *) + te_neighs->head + : NULL; + er; er = er->next) { + /* C.2.6 a) */ + /* Two way connectivity */ + if (!LSP_PSEUDO_ID(er->id) + && !memcmp(er->id, root_sysid, + ISIS_SYS_ID_LEN)) + continue; + if (!pseudo_lsp + && !memcmp(er->id, null_sysid, + ISIS_SYS_ID_LEN)) + continue; + dist = cost + + (CHECK_FLAG(spftree->flags, + F_SPFTREE_HOPCOUNT_METRIC) + ? 1 + : er->metric); + process_N(spftree, + LSP_PSEUDO_ID(er->id) + ? VTYPE_PSEUDO_TE_IS + : VTYPE_NONPSEUDO_TE_IS, + (void *)er->id, dist, depth + 1, NULL, + parent); + } } } if (!fabricd && !pseudo_lsp && spftree->family == AF_INET - && spftree->mtid == ISIS_MT_IPV4_UNICAST) { + && spftree->mtid == ISIS_MT_IPV4_UNICAST + && spftree->area->oldmetric) { struct isis_item_list *reachs[] = { &lsp->tlvs->oldstyle_ip_reach, &lsp->tlvs->oldstyle_ip_reach_ext}; @@ -908,6 +922,10 @@ lspfragloop: } } + /* we can skip all the rest if we're using metric style narrow */ + if (!spftree->area->newmetric) + goto end; + if (!pseudo_lsp && spftree->family == AF_INET) { struct isis_item_list *ipv4_reachs; if (spftree->mtid == ISIS_MT_IPV4_UNICAST) @@ -1027,6 +1045,35 @@ lspfragloop: } } +end: + + /* if attach bit set in LSP, attached-bit receive ignore is + * not configured, we are a level-1 area and we have no other + * level-2 | level1-2 areas then add a default route toward + * this neighbor + */ + if ((lsp->hdr.lsp_bits & LSPBIT_ATT) == LSPBIT_ATT + && !spftree->area->attached_bit_rcv_ignore + && spftree->area->is_type == IS_LEVEL_1 + && !isis_area_count(spftree->area->isis, IS_LEVEL_2)) { + struct prefix_pair ip_info = { {0} }; + if (IS_DEBUG_RTE_EVENTS) + zlog_debug("ISIS-Spf (%s): add default %s route", + rawlspid_print(lsp->hdr.lsp_id), + spftree->family == AF_INET ? "ipv4" + : "ipv6"); + + if (spftree->family == AF_INET) { + ip_info.dest.family = AF_INET; + vtype = VTYPE_IPREACH_INTERNAL; + } else { + ip_info.dest.family = AF_INET6; + vtype = VTYPE_IP6REACH_INTERNAL; + } + process_N(spftree, vtype, &ip_info, cost, depth + 1, NULL, + parent); + } + if (fragnode == NULL) fragnode = listhead(lsp->lspu.frags); else @@ -1429,6 +1476,9 @@ static void init_spt(struct isis_spftree *spftree, int mtid) list_delete_all_node(spftree->sadj_list); isis_vertex_queue_clear(&spftree->tents); isis_vertex_queue_clear(&spftree->paths); + isis_zebra_rlfa_unregister_all(spftree); + isis_rlfa_list_clear(spftree); + list_delete_all_node(spftree->lfa.remote.pc_spftrees); memset(&spftree->lfa.protection_counters, 0, sizeof(spftree->lfa.protection_counters)); @@ -1502,12 +1552,13 @@ static void spf_path_process(struct isis_spftree *spftree, priority = spf_prefix_priority(spftree, vertex); vertex->N.ip.priority = priority; if (vertex->depth == 1 || listcount(vertex->Adj_N) > 0) { + struct isis_spftree *pre_spftree; struct route_table *route_table; bool allow_ecmp; - if (spftree->type == SPF_TYPE_TI_LFA) { - struct isis_spftree *pre_spftree; - + switch (spftree->type) { + case SPF_TYPE_RLFA: + case SPF_TYPE_TI_LFA: if (priority > area->lfa_priority_limit[level - 1]) { if (IS_DEBUG_LFA) @@ -1520,7 +1571,16 @@ static void spf_path_process(struct isis_spftree *spftree, sizeof(buff))); return; } + break; + default: + break; + } + switch (spftree->type) { + case SPF_TYPE_RLFA: + isis_rlfa_check(spftree, vertex); + return; + case SPF_TYPE_TI_LFA: if (isis_tilfa_check(spftree, vertex) != 0) return; @@ -1529,7 +1589,8 @@ static void spf_path_process(struct isis_spftree *spftree, allow_ecmp = area->lfa_load_sharing[level - 1]; pre_spftree->lfa.protection_counters .tilfa[vertex->N.ip.priority] += 1; - } else { + break; + default: route_table = spftree->route_table; allow_ecmp = true; @@ -1544,6 +1605,7 @@ static void spf_path_process(struct isis_spftree *spftree, spftree->lfa.protection_counters .ecmp[priority] += 1; } + break; } isis_route_create( @@ -1834,6 +1896,7 @@ int _isis_spf_schedule(struct isis_area *area, int level, area->area_tag, level, diff, func, file, line); } + thread_cancel(&area->t_rlfa_rib_update); if (area->spf_delay_ietf[level - 1]) { /* Need to call schedule function also if spf delay is running * to diff --git a/isisd/isis_spf.h b/isisd/isis_spf.h index 5b6fcdaf68..5b3aa59379 100644 --- a/isisd/isis_spf.h +++ b/isisd/isis_spf.h @@ -31,6 +31,7 @@ struct isis_spftree; enum spf_type { SPF_TYPE_FORWARD = 1, SPF_TYPE_REVERSE, + SPF_TYPE_RLFA, SPF_TYPE_TI_LFA, }; diff --git a/isisd/isis_spf_private.h b/isisd/isis_spf_private.h index b7f326ca86..79dfa3e164 100644 --- a/isisd/isis_spf_private.h +++ b/isisd/isis_spf_private.h @@ -76,7 +76,9 @@ struct isis_vertex { struct list *parents; /* list of parents for ECMP */ struct hash *firsthops; /* first two hops to neighbor */ uint64_t insert_counter; + uint8_t flags; }; +#define F_ISIS_VERTEX_LFA_PROTECTED 0x01 /* Vertex Queue and associated functions */ @@ -349,6 +351,21 @@ struct isis_spftree { struct isis_spf_nodes p_space; struct isis_spf_nodes q_space; + /* Remote LFA related information. */ + struct { + /* List of RLFAs eligible to be installed. */ + struct rlfa_tree_head rlfas; + + /* + * RLFA post-convergence SPTs (needed to activate RLFAs + * once label information is received from LDP). + */ + struct list *pc_spftrees; + + /* RLFA maximum metric (or zero if absent). */ + uint32_t max_metric; + } remote; + /* Protection counters. */ struct { uint32_t lfa[SPF_PREFIX_PRIO_MAX]; diff --git a/isisd/isis_zebra.c b/isisd/isis_zebra.c index f08737c2c1..703532234a 100644 --- a/isisd/isis_zebra.c +++ b/isisd/isis_zebra.c @@ -47,6 +47,8 @@ #include "isisd/isis_circuit.h" #include "isisd/isis_csm.h" #include "isisd/isis_lsp.h" +#include "isisd/isis_spf.h" +#include "isisd/isis_spf_private.h" #include "isisd/isis_route.h" #include "isisd/isis_zebra.h" #include "isisd/isis_adjacency.h" @@ -540,6 +542,72 @@ void isis_zebra_redistribute_unset(afi_t afi, int type) type, 0, VRF_DEFAULT); } +/** + * Register RLFA with LDP. + */ +int isis_zebra_rlfa_register(struct isis_spftree *spftree, struct rlfa *rlfa) +{ + struct isis_area *area = spftree->area; + struct zapi_rlfa_request zr = {}; + int ret; + + if (!zclient) + return 0; + + zr.igp.vrf_id = area->isis->vrf_id; + zr.igp.protocol = ZEBRA_ROUTE_ISIS; + strlcpy(zr.igp.isis.area_tag, area->area_tag, + sizeof(zr.igp.isis.area_tag)); + zr.igp.isis.spf.tree_id = spftree->tree_id; + zr.igp.isis.spf.level = spftree->level; + zr.igp.isis.spf.run_id = spftree->runcount; + zr.destination = rlfa->prefix; + zr.pq_address = rlfa->pq_address; + + zlog_debug("ISIS-LFA: registering RLFA %pFX@%pI4 with LDP", + &rlfa->prefix, &rlfa->pq_address); + + ret = zclient_send_opaque_unicast(zclient, LDP_RLFA_REGISTER, + ZEBRA_ROUTE_LDP, 0, 0, + (const uint8_t *)&zr, sizeof(zr)); + if (ret == ZCLIENT_SEND_FAILURE) { + zlog_warn("ISIS-LFA: failed to register RLFA with LDP"); + return -1; + } + + return 0; +} + +/** + * Unregister all RLFAs from the given SPF tree with LDP. + */ +void isis_zebra_rlfa_unregister_all(struct isis_spftree *spftree) +{ + struct isis_area *area = spftree->area; + struct zapi_rlfa_igp igp = {}; + int ret; + + if (!zclient || spftree->type != SPF_TYPE_FORWARD + || CHECK_FLAG(spftree->flags, F_SPFTREE_NO_ADJACENCIES)) + return; + + if (IS_DEBUG_LFA) + zlog_debug("ISIS-LFA: unregistering all RLFAs with LDP"); + + igp.vrf_id = area->isis->vrf_id; + igp.protocol = ZEBRA_ROUTE_ISIS; + strlcpy(igp.isis.area_tag, area->area_tag, sizeof(igp.isis.area_tag)); + igp.isis.spf.tree_id = spftree->tree_id; + igp.isis.spf.level = spftree->level; + igp.isis.spf.run_id = spftree->runcount; + + ret = zclient_send_opaque_unicast(zclient, LDP_RLFA_UNREGISTER_ALL, + ZEBRA_ROUTE_LDP, 0, 0, + (const uint8_t *)&igp, sizeof(igp)); + if (ret == ZCLIENT_SEND_FAILURE) + zlog_warn("ISIS-LFA: failed to unregister RLFA with LDP"); +} + /* Label Manager Functions */ /** @@ -659,6 +727,7 @@ void isis_zebra_vrf_register(struct isis *isis) static void isis_zebra_connected(struct zclient *zclient) { zclient_send_reg_requests(zclient, VRF_DEFAULT); + zclient_register_opaque(zclient, LDP_RLFA_LABELS); } /* @@ -670,6 +739,7 @@ static int isis_opaque_msg_handler(ZAPI_CALLBACK_ARGS) struct zapi_opaque_msg info; struct ldp_igp_sync_if_state state; struct ldp_igp_sync_announce announce; + struct zapi_rlfa_response rlfa; int ret = 0; s = zclient->ibuf; @@ -685,6 +755,10 @@ static int isis_opaque_msg_handler(ZAPI_CALLBACK_ARGS) STREAM_GET(&announce, s, sizeof(announce)); ret = isis_ldp_sync_announce_update(announce); break; + case LDP_RLFA_LABELS: + STREAM_GET(&rlfa, s, sizeof(rlfa)); + isis_rlfa_process_ldp_response(&rlfa); + break; default: break; } @@ -704,6 +778,7 @@ static int isis_zebra_client_close_notify(ZAPI_CALLBACK_ARGS) return -1; isis_ldp_sync_handle_client_close(&info); + isis_ldp_rlfa_handle_client_close(&info); return ret; } @@ -742,6 +817,7 @@ void isis_zebra_init(struct thread_master *master, int instance) void isis_zebra_stop(void) { + zclient_unregister_opaque(zclient, LDP_RLFA_LABELS); zclient_stop(zclient_sync); zclient_free(zclient_sync); zclient_stop(zclient); diff --git a/isisd/isis_zebra.h b/isisd/isis_zebra.h index c5c52a6bc6..b44ec4f085 100644 --- a/isisd/isis_zebra.h +++ b/isisd/isis_zebra.h @@ -59,6 +59,8 @@ void isis_zebra_send_adjacency_sid(int cmd, const struct sr_adjacency *sra); int isis_distribute_list_update(int routetype); void isis_zebra_redistribute_set(afi_t afi, int type); void isis_zebra_redistribute_unset(afi_t afi, int type); +int isis_zebra_rlfa_register(struct isis_spftree *spftree, struct rlfa *rlfa); +void isis_zebra_rlfa_unregister_all(struct isis_spftree *spftree); bool isis_zebra_label_manager_ready(void); int isis_zebra_label_manager_connect(void); int isis_zebra_request_label_range(uint32_t base, uint32_t chunk_size); diff --git a/isisd/isisd.c b/isisd/isisd.c index 827e022baf..a802bac13b 100644 --- a/isisd/isisd.c +++ b/isisd/isisd.c @@ -32,6 +32,7 @@ #include "if.h" #include "hash.h" #include "filter.h" +#include "plist.h" #include "stream.h" #include "prefix.h" #include "table.h" @@ -315,6 +316,11 @@ struct isis_area *isis_area_create(const char *area_tag, const char *vrf_name) "/frr-isisd:isis/instance/fast-reroute/level-1/lfa/load-sharing"); area->lfa_load_sharing[1] = yang_get_default_bool( "/frr-isisd:isis/instance/fast-reroute/level-2/lfa/load-sharing"); + area->attached_bit_send = + yang_get_default_bool("/frr-isisd:isis/instance/attach-send"); + area->attached_bit_rcv_ignore = yang_get_default_bool( + "/frr-isisd:isis/instance/attach-receive-ignore"); + #else area->max_lsp_lifetime[0] = DEFAULT_LSP_LIFETIME; /* 1200 */ area->max_lsp_lifetime[1] = DEFAULT_LSP_LIFETIME; /* 1200 */ @@ -331,6 +337,8 @@ struct isis_area *isis_area_create(const char *area_tag, const char *vrf_name) area->lsp_mtu = DEFAULT_LSP_MTU; area->lfa_load_sharing[0] = true; area->lfa_load_sharing[1] = true; + area->attached_bit_send = true; + area->attached_bit_rcv_ignore = false; #endif /* ifndef FABRICD */ area->lfa_priority_limit[0] = SPF_PREFIX_PRIO_LOW; area->lfa_priority_limit[1] = SPF_PREFIX_PRIO_LOW; @@ -416,6 +424,22 @@ int isis_area_get(struct vty *vty, const char *area_tag) return CMD_SUCCESS; } +/* return the number of Level1 and level-1-2 routers or + * the number of Level2 and level-1-2 routers configured + */ +int isis_area_count(const struct isis *isis, int levels) +{ + struct isis_area *area; + struct listnode *node; + int count = 0; + + for (ALL_LIST_ELEMENTS_RO(isis->area_list, node, area)) + if (area->is_type & levels) + count++; + + return count; +} + void isis_area_destroy(struct isis_area *area) { struct listnode *node, *nnode; @@ -485,6 +509,7 @@ void isis_area_destroy(struct isis_area *area) thread_cancel(&area->t_tick); thread_cancel(&area->t_lsp_refresh[0]); thread_cancel(&area->t_lsp_refresh[1]); + thread_cancel(&area->t_rlfa_rib_update); thread_cancel_event(master, area); @@ -649,6 +674,34 @@ void isis_filter_update(struct access_list *access) } } +void isis_prefix_list_update(struct prefix_list *plist) +{ + struct isis *isis; + struct isis_area *area; + struct listnode *node, *anode; + + for (ALL_LIST_ELEMENTS_RO(im->isis, node, isis)) { + for (ALL_LIST_ELEMENTS_RO(isis->area_list, anode, area)) { + for (int level = ISIS_LEVEL1; level <= ISIS_LEVELS; + level++) { + const char *plist_name = + prefix_list_name(plist); + + if (!area->rlfa_plist_name[level - 1]) + continue; + + if (!strmatch(area->rlfa_plist_name[level - 1], + plist_name)) + continue; + + area->rlfa_plist[level - 1] = + prefix_list_lookup(AFI_IP, plist_name); + lsp_regenerate_schedule(area, area->is_type, 0); + } + } + } +} + #ifdef FABRICD static void area_set_mt_enabled(struct isis_area *area, uint16_t mtid, bool enabled) @@ -2517,12 +2570,21 @@ void isis_area_overload_bit_set(struct isis_area *area, bool overload_bit) #endif /* ifndef FABRICD */ } -void isis_area_attached_bit_set(struct isis_area *area, bool attached_bit) +void isis_area_attached_bit_send_set(struct isis_area *area, bool attached_bit) +{ + + if (attached_bit != area->attached_bit_send) { + area->attached_bit_send = attached_bit; + lsp_regenerate_schedule(area, IS_LEVEL_1 | IS_LEVEL_2, 1); + } +} + +void isis_area_attached_bit_receive_set(struct isis_area *area, + bool attached_bit) { - char new_attached_bit = attached_bit ? LSPBIT_ATT : 0; - if (new_attached_bit != area->attached_bit) { - area->attached_bit = new_attached_bit; + if (attached_bit != area->attached_bit_rcv_ignore) { + area->attached_bit_rcv_ignore = attached_bit; lsp_regenerate_schedule(area, IS_LEVEL_1 | IS_LEVEL_2, 1); } } diff --git a/isisd/isisd.h b/isisd/isisd.h index 4618d14af3..22d9c6236d 100644 --- a/isisd/isisd.h +++ b/isisd/isisd.h @@ -131,6 +131,7 @@ struct isis_area { struct thread *t_tick; /* LSP walker */ struct thread *t_lsp_refresh[ISIS_LEVELS]; struct timeval last_lsp_refresh_event[ISIS_LEVELS]; + struct thread *t_rlfa_rib_update; /* t_lsp_refresh is used in two ways: * a) regular refresh of LSPs * b) (possibly throttled) updates to LSPs @@ -168,7 +169,8 @@ struct isis_area { /* are we overloaded? */ char overload_bit; /* L1/L2 router identifier for inter-area traffic */ - char attached_bit; + char attached_bit_send; + char attached_bit_rcv_ignore; uint16_t lsp_refresh[ISIS_LEVELS]; /* minimum time allowed before lsp retransmission */ uint16_t lsp_gen_interval[ISIS_LEVELS]; @@ -197,6 +199,9 @@ struct isis_area { size_t lfa_load_sharing[ISIS_LEVELS]; enum spf_prefix_priority lfa_priority_limit[ISIS_LEVELS]; struct lfa_tiebreaker_tree_head lfa_tiebreakers[ISIS_LEVELS]; + char *rlfa_plist_name[ISIS_LEVELS]; + struct prefix_list *rlfa_plist[ISIS_LEVELS]; + size_t rlfa_protected_links[ISIS_LEVELS]; size_t tilfa_protected_links[ISIS_LEVELS]; /* Counters */ uint32_t circuit_state_changes; @@ -238,8 +243,10 @@ struct isis_area *isis_area_lookup(const char *, vrf_id_t vrf_id); struct isis_area *isis_area_lookup_by_vrf(const char *area_tag, const char *vrf_name); int isis_area_get(struct vty *vty, const char *area_tag); +int isis_area_count(const struct isis *isis, int levels); void isis_area_destroy(struct isis_area *area); void isis_filter_update(struct access_list *access); +void isis_prefix_list_update(struct prefix_list *plist); void print_debug(struct vty *, int, int); struct isis_lsp *lsp_for_arg(struct lspdb_head *head, const char *argv, struct isis *isis); @@ -248,7 +255,9 @@ void isis_area_invalidate_routes(struct isis_area *area, int levels); void isis_area_verify_routes(struct isis_area *area); void isis_area_overload_bit_set(struct isis_area *area, bool overload_bit); -void isis_area_attached_bit_set(struct isis_area *area, bool attached_bit); +void isis_area_attached_bit_send_set(struct isis_area *area, bool attached_bit); +void isis_area_attached_bit_receive_set(struct isis_area *area, + bool attached_bit); void isis_area_dynhostname_set(struct isis_area *area, bool dynhostname); void isis_area_metricstyle_set(struct isis_area *area, bool old_metric, bool new_metric); diff --git a/ldpd/adjacency.c b/ldpd/adjacency.c index 795a41491c..d390e70ad0 100644 --- a/ldpd/adjacency.c +++ b/ldpd/adjacency.c @@ -183,7 +183,8 @@ adj_itimer(struct thread *thread) if (adj->source.type == HELLO_TARGETED) { if (!(adj->source.target->flags & F_TNBR_CONFIGURED) && - adj->source.target->pw_count == 0) { + adj->source.target->pw_count == 0 && + adj->source.target->rlfa_count == 0) { /* remove dynamic targeted neighbor */ tnbr_del(leconf, adj->source.target); return (0); @@ -259,7 +260,7 @@ struct tnbr * tnbr_check(struct ldpd_conf *xconf, struct tnbr *tnbr) { if (!(tnbr->flags & (F_TNBR_CONFIGURED|F_TNBR_DYNAMIC)) && - tnbr->pw_count == 0) { + tnbr->pw_count == 0 && tnbr->rlfa_count == 0) { tnbr_del(xconf, tnbr); return (NULL); } diff --git a/ldpd/hello.c b/ldpd/hello.c index 327cb32434..5aa14ed067 100644 --- a/ldpd/hello.c +++ b/ldpd/hello.c @@ -67,7 +67,8 @@ send_hello(enum hello_type type, struct iface_af *ia, struct tnbr *tnbr) af = tnbr->af; holdtime = tnbr_get_hello_holdtime(tnbr); flags = F_HELLO_TARGETED; - if ((tnbr->flags & F_TNBR_CONFIGURED) || tnbr->pw_count) + if ((tnbr->flags & F_TNBR_CONFIGURED) || tnbr->pw_count + || tnbr->rlfa_count) flags |= F_HELLO_REQ_TARG; fd = (ldp_af_global_get(&global, af))->ldp_edisc_socket; diff --git a/ldpd/lde.c b/ldpd/lde.c index 5ed0ed4520..69338b8bad 100644 --- a/ldpd/lde.c +++ b/ldpd/lde.c @@ -27,6 +27,7 @@ #include "log.h" #include "lde.h" #include "ldp_debug.h" +#include "rlfa.h" #include <lib/log.h> #include "memory.h" @@ -444,6 +445,10 @@ lde_dispatch_parent(struct thread *thread) int shut = 0; struct fec fec; struct ldp_access *laccess; + struct ldp_rlfa_node *rnode, *rntmp; + struct ldp_rlfa_client *rclient; + struct zapi_rlfa_request *rlfa_req; + struct zapi_rlfa_igp *rlfa_igp; iev->ev_read = NULL; @@ -650,6 +655,42 @@ lde_dispatch_parent(struct thread *thread) lde_check_filter_af(AF_INET6, &ldeconf->ipv6, laccess->name); break; + case IMSG_RLFA_REG: + if (imsg.hdr.len != IMSG_HEADER_SIZE + + sizeof(struct zapi_rlfa_request)) { + log_warnx("%s: wrong imsg len", __func__); + break; + } + rlfa_req = imsg.data; + rnode = rlfa_node_find(&rlfa_req->destination, + rlfa_req->pq_address); + if (!rnode) + rnode = rlfa_node_new(&rlfa_req->destination, + rlfa_req->pq_address); + rclient = rlfa_client_find(rnode, &rlfa_req->igp); + if (rclient) + /* RLFA already registered - do nothing */ + break; + rclient = rlfa_client_new(rnode, &rlfa_req->igp); + lde_rlfa_check(rclient); + break; + case IMSG_RLFA_UNREG_ALL: + if (imsg.hdr.len != IMSG_HEADER_SIZE + + sizeof(struct zapi_rlfa_igp)) { + log_warnx("%s: wrong imsg len", __func__); + break; + } + rlfa_igp = imsg.data; + + RB_FOREACH_SAFE (rnode, ldp_rlfa_node_head, + &rlfa_node_tree, rntmp) { + rclient = rlfa_client_find(rnode, rlfa_igp); + if (!rclient) + continue; + + rlfa_client_del(rclient); + } + break; default: log_debug("%s: unexpected imsg %d", __func__, imsg.hdr.type); @@ -876,6 +917,48 @@ lde_send_delete_klabel(struct fec_node *fn, struct fec_nh *fnh) } void +lde_fec2prefix(const struct fec *fec, struct prefix *prefix) +{ + memset(prefix, 0, sizeof(*prefix)); + switch (fec->type) { + case FEC_TYPE_IPV4: + prefix->family = AF_INET; + prefix->u.prefix4 = fec->u.ipv4.prefix; + prefix->prefixlen = fec->u.ipv4.prefixlen; + break; + case FEC_TYPE_IPV6: + prefix->family = AF_INET6; + prefix->u.prefix6 = fec->u.ipv6.prefix; + prefix->prefixlen = fec->u.ipv6.prefixlen; + break; + default: + prefix->family = AF_UNSPEC; + break; + } +} + +void +lde_prefix2fec(const struct prefix *prefix, struct fec *fec) +{ + memset(fec, 0, sizeof(*fec)); + switch (prefix->family) { + case AF_INET: + fec->type = FEC_TYPE_IPV4; + fec->u.ipv4.prefix = prefix->u.prefix4; + fec->u.ipv4.prefixlen = prefix->prefixlen; + break; + case AF_INET6: + fec->type = FEC_TYPE_IPV6; + fec->u.ipv6.prefix = prefix->u.prefix6; + fec->u.ipv6.prefixlen = prefix->prefixlen; + break; + default: + fatalx("lde_prefix2fec: unknown af"); + break; + } +} + +void lde_fec2map(struct fec *fec, struct map *map) { memset(map, 0, sizeof(*map)); @@ -1388,6 +1471,9 @@ lde_nbr_del(struct lde_nbr *ln) RB_FOREACH(f, fec_tree, &ft) { fn = (struct fec_node *)f; + /* Update RLFA clients. */ + lde_rlfa_update_clients(f, ln, MPLS_INVALID_LABEL); + LIST_FOREACH(fnh, &fn->nexthops, entry) { switch (f->type) { case FEC_TYPE_IPV4: diff --git a/ldpd/lde.h b/ldpd/lde.h index 660aeafb34..e09be01ece 100644 --- a/ldpd/lde.h +++ b/ldpd/lde.h @@ -156,6 +156,8 @@ uint32_t lde_update_label(struct fec_node *); void lde_free_label(uint32_t label); void lde_send_change_klabel(struct fec_node *, struct fec_nh *); void lde_send_delete_klabel(struct fec_node *, struct fec_nh *); +void lde_fec2prefix(const struct fec *fec, struct prefix *prefix); +void lde_prefix2fec(const struct prefix *prefix, struct fec *fec); void lde_fec2map(struct fec *, struct map *); void lde_map2fec(struct map *, struct in_addr, struct fec *); void lde_send_labelmapping(struct lde_nbr *, struct fec_node *, diff --git a/ldpd/lde_lib.c b/ldpd/lde_lib.c index 9db931677d..0f91f49920 100644 --- a/ldpd/lde_lib.c +++ b/ldpd/lde_lib.c @@ -23,6 +23,7 @@ #include "ldpe.h" #include "lde.h" #include "log.h" +#include "rlfa.h" #include "mpls.h" @@ -601,6 +602,10 @@ lde_check_mapping(struct map *map, struct lde_nbr *ln, int rcvd_label_mapping) break; } } + + /* Update RLFA clients. */ + lde_rlfa_update_clients(&fec, ln, map->label); + /* LMp.13 & LMp.16: Record the mapping from this peer */ if (me == NULL) me = lde_map_add(ln, fn, 0); @@ -858,6 +863,9 @@ lde_check_withdraw(struct map *map, struct lde_nbr *ln) fnh->remote_label = NO_LABEL; } + /* Update RLFA clients. */ + lde_rlfa_update_clients(&fec, ln, MPLS_INVALID_LABEL); + /* LWd.2: send label release */ lde_send_labelrelease(ln, fn, NULL, map->label); @@ -940,6 +948,9 @@ lde_check_withdraw_wcard(struct map *map, struct lde_nbr *ln) fnh->remote_label = NO_LABEL; } + /* Update RLFA clients. */ + lde_rlfa_update_clients(f, ln, MPLS_INVALID_LABEL); + /* LWd.3: check previously received label mapping */ if (me && (map->label == NO_LABEL || map->label == me->map.label)) diff --git a/ldpd/ldp_zebra.c b/ldpd/ldp_zebra.c index a53854fa56..ea86c2dc03 100644 --- a/ldpd/ldp_zebra.c +++ b/ldpd/ldp_zebra.c @@ -114,12 +114,16 @@ static void ldp_zebra_opaque_register(void) { zclient_register_opaque(zclient, LDP_IGP_SYNC_IF_STATE_REQUEST); + zclient_register_opaque(zclient, LDP_RLFA_REGISTER); + zclient_register_opaque(zclient, LDP_RLFA_UNREGISTER_ALL); } static void ldp_zebra_opaque_unregister(void) { zclient_unregister_opaque(zclient, LDP_IGP_SYNC_IF_STATE_REQUEST); + zclient_unregister_opaque(zclient, LDP_RLFA_REGISTER); + zclient_unregister_opaque(zclient, LDP_RLFA_UNREGISTER_ALL); } int @@ -147,12 +151,29 @@ ldp_sync_zebra_send_announce(void) return 0; } +int ldp_zebra_send_rlfa_labels(struct zapi_rlfa_response *rlfa_labels) +{ + int ret; + + ret = zclient_send_opaque(zclient, LDP_RLFA_LABELS, + (const uint8_t *)rlfa_labels, + sizeof(*rlfa_labels)); + if (ret == ZCLIENT_SEND_FAILURE) { + log_warn("failed to send RLFA labels to IGP"); + return -1; + } + + return 0; +} + static int ldp_zebra_opaque_msg_handler(ZAPI_CALLBACK_ARGS) { struct stream *s; struct zapi_opaque_msg info; struct ldp_igp_sync_if_state_req state_req; + struct zapi_rlfa_igp igp; + struct zapi_rlfa_request rlfa; s = zclient->ibuf; @@ -165,6 +186,14 @@ ldp_zebra_opaque_msg_handler(ZAPI_CALLBACK_ARGS) main_imsg_compose_ldpe(IMSG_LDP_SYNC_IF_STATE_REQUEST, 0, &state_req, sizeof(state_req)); break; + case LDP_RLFA_REGISTER: + STREAM_GET(&rlfa, s, sizeof(rlfa)); + main_imsg_compose_both(IMSG_RLFA_REG, &rlfa, sizeof(rlfa)); + break; + case LDP_RLFA_UNREGISTER_ALL: + STREAM_GET(&igp, s, sizeof(igp)); + main_imsg_compose_both(IMSG_RLFA_UNREG_ALL, &igp, sizeof(igp)); + break; default: break; } diff --git a/ldpd/ldpd.c b/ldpd/ldpd.c index d6da45c862..83e93ebbbc 100644 --- a/ldpd/ldpd.c +++ b/ldpd/ldpd.c @@ -625,6 +625,7 @@ main_dispatch_lde(struct thread *thread) struct imsg imsg; ssize_t n; int shut = 0; + struct zapi_rlfa_response *rlfa_labels; iev->ev_read = NULL; @@ -691,6 +692,15 @@ main_dispatch_lde(struct thread *thread) fatalx("IMSG_ACL_CHECK imsg with wrong len"); ldp_acl_reply(iev, (struct acl_check *)imsg.data); break; + case IMSG_RLFA_LABELS: + if (imsg.hdr.len != IMSG_HEADER_SIZE + + sizeof(struct zapi_rlfa_response)) { + log_warnx("%s: wrong imsg len", __func__); + break; + } + rlfa_labels = imsg.data; + ldp_zebra_send_rlfa_labels(rlfa_labels); + break; default: log_debug("%s: error handling imsg %d", __func__, imsg.hdr.type); diff --git a/ldpd/ldpd.h b/ldpd/ldpd.h index f8a94b4e2a..beb625d8a2 100644 --- a/ldpd/ldpd.h +++ b/ldpd/ldpd.h @@ -157,7 +157,10 @@ enum imsg_type { IMSG_FILTER_UPDATE, IMSG_NBR_SHUTDOWN, IMSG_LDP_SYNC_IF_STATE_REQUEST, - IMSG_LDP_SYNC_IF_STATE_UPDATE + IMSG_LDP_SYNC_IF_STATE_UPDATE, + IMSG_RLFA_REG, + IMSG_RLFA_UNREG_ALL, + IMSG_RLFA_LABELS, }; struct ldpd_init { @@ -373,6 +376,7 @@ struct tnbr { union ldpd_addr addr; int state; uint16_t pw_count; + uint32_t rlfa_count; uint8_t flags; QOBJ_FIELDS }; @@ -875,6 +879,8 @@ extern char ctl_sock_path[MAXPATHLEN]; void ldp_zebra_init(struct thread_master *); void ldp_zebra_destroy(void); int ldp_sync_zebra_send_state_update(struct ldp_igp_sync_if_state *); +int ldp_zebra_send_rlfa_labels(struct zapi_rlfa_response * + rlfa_labels); /* compatibility */ #ifndef __OpenBSD__ diff --git a/ldpd/ldpe.c b/ldpd/ldpe.c index f3f8b85102..6a5a0750bd 100644 --- a/ldpd/ldpe.c +++ b/ldpd/ldpe.c @@ -27,6 +27,7 @@ #include "control.h" #include "log.h" #include "ldp_debug.h" +#include "rlfa.h" #include <lib/log.h> #include "memory.h" @@ -298,7 +299,11 @@ ldpe_dispatch_main(struct thread *thread) int n, shut = 0; struct ldp_access *laccess; struct ldp_igp_sync_if_state_req *ldp_sync_if_state_req; - + struct ldp_rlfa_node *rnode, *rntmp; + struct ldp_rlfa_client *rclient; + struct zapi_rlfa_request *rlfa_req; + struct zapi_rlfa_igp *rlfa_igp; + iev->ev_read = NULL; if ((n = imsg_read(ibuf)) == -1 && errno != EAGAIN) @@ -569,6 +574,44 @@ ldpe_dispatch_main(struct thread *thread) ldp_sync_if_state_req = imsg.data; ldp_sync_fsm_state_req(ldp_sync_if_state_req); break; + case IMSG_RLFA_REG: + if (imsg.hdr.len != IMSG_HEADER_SIZE + + sizeof(struct zapi_rlfa_request)) { + log_warnx("%s: wrong imsg len", __func__); + break; + } + rlfa_req = imsg.data; + + rnode = rlfa_node_find(&rlfa_req->destination, + rlfa_req->pq_address); + if (!rnode) + rnode = rlfa_node_new(&rlfa_req->destination, + rlfa_req->pq_address); + rclient = rlfa_client_find(rnode, &rlfa_req->igp); + if (rclient) + /* RLFA already registered - do nothing */ + break; + rclient = rlfa_client_new(rnode, &rlfa_req->igp); + ldpe_rlfa_init(rclient); + break; + case IMSG_RLFA_UNREG_ALL: + if (imsg.hdr.len != IMSG_HEADER_SIZE + + sizeof(struct zapi_rlfa_igp)) { + log_warnx("%s: wrong imsg len", __func__); + break; + } + rlfa_igp = imsg.data; + + RB_FOREACH_SAFE (rnode, ldp_rlfa_node_head, + &rlfa_node_tree, rntmp) { + rclient = rlfa_client_find(rnode, rlfa_igp); + if (!rclient) + continue; + + ldpe_rlfa_exit(rclient); + rlfa_client_del(rclient); + } + break; default: log_debug("ldpe_dispatch_main: error handling imsg %d", imsg.hdr.type); diff --git a/ldpd/rlfa.c b/ldpd/rlfa.c new file mode 100644 index 0000000000..697ec08af8 --- /dev/null +++ b/ldpd/rlfa.c @@ -0,0 +1,288 @@ +/* + * Copyright (C) 2020 NetDEF, Inc. + * Renato Westphal + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; see the file COPYING; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include <zebra.h> + +#include "ldpd.h" +#include "lde.h" +#include "ldpe.h" +#include "log.h" +#include "ldp_debug.h" +#include "rlfa.h" + +#include <lib/log.h> + +struct ldp_rlfa_node_head rlfa_node_tree; + +static int ldp_rlfa_client_compare(const struct ldp_rlfa_client *a, + const struct ldp_rlfa_client *b) +{ + if (a->igp.vrf_id < b->igp.vrf_id) + return -1; + if (a->igp.vrf_id > b->igp.vrf_id) + return 1; + + if (a->igp.protocol < b->igp.protocol) + return -1; + if (a->igp.protocol > b->igp.protocol) + return 1; + + if (a->igp.isis.spf.tree_id < b->igp.isis.spf.tree_id) + return -1; + if (a->igp.isis.spf.tree_id > b->igp.isis.spf.tree_id) + return 1; + + if (a->igp.isis.spf.level < b->igp.isis.spf.level) + return -1; + if (a->igp.isis.spf.level > b->igp.isis.spf.level) + return 1; + + return 0; +} +RB_GENERATE(ldp_rlfa_client_head, ldp_rlfa_client, entry, + ldp_rlfa_client_compare) + +static int ldp_rlfa_node_compare(const struct ldp_rlfa_node *a, + const struct ldp_rlfa_node *b) +{ + if (ntohl(a->pq_address.s_addr) < ntohl(b->pq_address.s_addr)) + return -1; + if (ntohl(a->pq_address.s_addr) > ntohl(b->pq_address.s_addr)) + return 1; + + return prefix_cmp(&a->destination, &b->destination); +} +RB_GENERATE(ldp_rlfa_node_head, ldp_rlfa_node, entry, ldp_rlfa_node_compare) + +struct ldp_rlfa_client *rlfa_client_new(struct ldp_rlfa_node *rnode, + struct zapi_rlfa_igp *igp) +{ + struct ldp_rlfa_client *rclient; + + if ((rclient = calloc(1, sizeof(*rclient))) == NULL) + fatal(__func__); + + rclient->igp = *igp; + rclient->node = rnode; + RB_INSERT(ldp_rlfa_client_head, &rnode->clients, rclient); + + return rclient; +} + +void rlfa_client_del(struct ldp_rlfa_client *rclient) +{ + struct ldp_rlfa_node *rnode = rclient->node; + + RB_REMOVE(ldp_rlfa_client_head, &rnode->clients, rclient); + free(rclient); + + /* Delete RLFA node if it's empty. */ + if (RB_EMPTY(ldp_rlfa_client_head, &rnode->clients)) + rlfa_node_del(rnode); +} + +struct ldp_rlfa_client *rlfa_client_find(struct ldp_rlfa_node *rnode, + struct zapi_rlfa_igp *igp) +{ + struct ldp_rlfa_client rclient; + + rclient.igp = *igp; + return RB_FIND(ldp_rlfa_client_head, &rnode->clients, &rclient); +} + +struct ldp_rlfa_node *rlfa_node_new(const struct prefix *destination, + struct in_addr pq_address) +{ + struct ldp_rlfa_node *rnode; + + if ((rnode = calloc(1, sizeof(*rnode))) == NULL) + fatal(__func__); + + rnode->destination = *destination; + rnode->pq_address = pq_address; + rnode->pq_label = MPLS_INVALID_LABEL; + RB_INIT(ldp_rlfa_client_head, &rnode->clients); + RB_INSERT(ldp_rlfa_node_head, &rlfa_node_tree, rnode); + + return rnode; +} + +void rlfa_node_del(struct ldp_rlfa_node *rnode) +{ + /* Delete RLFA clients. */ + while (!RB_EMPTY(ldp_rlfa_client_head, &rnode->clients)) { + struct ldp_rlfa_client *rclient; + + rclient = RB_ROOT(ldp_rlfa_client_head, &rnode->clients); + rlfa_client_del(rclient); + } + + RB_REMOVE(ldp_rlfa_node_head, &rlfa_node_tree, rnode); + free(rnode); +} + +struct ldp_rlfa_node *rlfa_node_find(const struct prefix *destination, + struct in_addr pq_address) +{ + struct ldp_rlfa_node rnode = {}; + + rnode.destination = *destination; + rnode.pq_address = pq_address; + return RB_FIND(ldp_rlfa_node_head, &rlfa_node_tree, &rnode); +} + +void lde_rlfa_client_send(struct ldp_rlfa_client *rclient) +{ + struct ldp_rlfa_node *rnode = rclient->node; + struct zapi_rlfa_response rlfa_labels = {}; + struct fec fec; + struct fec_node *fn; + struct fec_nh *fnh; + int i = 0; + + /* Fill in inner label (allocated by PQ node). */ + rlfa_labels.igp = rclient->igp; + rlfa_labels.destination = rnode->destination; + rlfa_labels.pq_label = rnode->pq_label; + + /* Fill in outer label(s) (allocated by the nexthop routers). */ + fec.type = FEC_TYPE_IPV4; + fec.u.ipv4.prefix = rnode->pq_address; + fec.u.ipv4.prefixlen = IPV4_MAX_BITLEN; + fn = (struct fec_node *)fec_find(&ft, &fec); + if (!fn) + return; + LIST_FOREACH(fnh, &fn->nexthops, entry) { + if (fnh->remote_label == NO_LABEL) + continue; + + rlfa_labels.nexthops[i].family = fnh->af; + switch (fnh->af) { + case AF_INET: + rlfa_labels.nexthops[i].gate.ipv4 = fnh->nexthop.v4; + break; + case AF_INET6: + rlfa_labels.nexthops[i].gate.ipv6 = fnh->nexthop.v6; + break; + default: + continue; + } + rlfa_labels.nexthops[i].label = fnh->remote_label; + i++; + } + rlfa_labels.nexthop_num = i; + + lde_imsg_compose_parent(IMSG_RLFA_LABELS, 0, &rlfa_labels, + sizeof(rlfa_labels)); +} + +void lde_rlfa_label_update(const struct fec *fec) +{ + struct ldp_rlfa_node *rnode; + + if (fec->type != FEC_TYPE_IPV4 + || fec->u.ipv4.prefixlen != IPV4_MAX_BITLEN) + return; + + /* + * TODO: use an rb-tree lookup to restrict the iteration to the RLFAs + * that were effectivelly affected by the label update. + */ + RB_FOREACH (rnode, ldp_rlfa_node_head, &rlfa_node_tree) { + struct ldp_rlfa_client *rclient; + + if (!IPV4_ADDR_SAME(&rnode->pq_address, &fec->u.ipv4.prefix)) + continue; + + RB_FOREACH (rclient, ldp_rlfa_client_head, &rnode->clients) + lde_rlfa_client_send(rclient); + } +} + +void lde_rlfa_check(struct ldp_rlfa_client *rclient) +{ + struct lde_nbr *ln; + struct lde_map *me; + struct fec fec; + union ldpd_addr pq_address = {}; + + pq_address.v4 = rclient->node->pq_address; + ln = lde_nbr_find_by_addr(AF_INET, &pq_address); + if (!ln) + return; + + lde_prefix2fec(&rclient->node->destination, &fec); + me = (struct lde_map *)fec_find(&ln->recv_map, &fec); + if (!me) + return; + + rclient->node->pq_label = me->map.label; + lde_rlfa_client_send(rclient); +} + +/* + * Check if there's any registered RLFA client for this prefix/neighbor (PQ + * node) and notify about the updated label. + */ +void lde_rlfa_update_clients(struct fec *fec, struct lde_nbr *ln, + uint32_t label) +{ + struct prefix rlfa_dest; + struct ldp_rlfa_node *rnode; + + lde_fec2prefix(fec, &rlfa_dest); + rnode = rlfa_node_find(&rlfa_dest, ln->id); + if (rnode) { + struct ldp_rlfa_client *rclient; + + rnode->pq_label = label; + RB_FOREACH (rclient, ldp_rlfa_client_head, &rnode->clients) + lde_rlfa_client_send(rclient); + } else + lde_rlfa_label_update(fec); +} + +void ldpe_rlfa_init(struct ldp_rlfa_client *rclient) +{ + struct tnbr *tnbr; + union ldpd_addr pq_address = {}; + + pq_address.v4 = rclient->node->pq_address; + tnbr = tnbr_find(leconf, AF_INET, &pq_address); + if (tnbr == NULL) { + tnbr = tnbr_new(AF_INET, &pq_address); + tnbr_update(tnbr); + RB_INSERT(tnbr_head, &leconf->tnbr_tree, tnbr); + } + + tnbr->rlfa_count++; +} + +void ldpe_rlfa_exit(struct ldp_rlfa_client *rclient) +{ + struct tnbr *tnbr; + union ldpd_addr pq_address = {}; + + pq_address.v4 = rclient->node->pq_address; + tnbr = tnbr_find(leconf, AF_INET, &pq_address); + if (tnbr) { + tnbr->rlfa_count--; + tnbr_check(leconf, tnbr); + } +} diff --git a/ldpd/rlfa.h b/ldpd/rlfa.h new file mode 100644 index 0000000000..fe67917e8a --- /dev/null +++ b/ldpd/rlfa.h @@ -0,0 +1,78 @@ +/* + * Copyright (C) 2020 NetDEF, Inc. + * Renato Westphal + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; see the file COPYING; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef _LDPD_RLFA_H_ +#define _LDPD_RLFA_H_ + +#include "openbsd-tree.h" +#include "zclient.h" + +struct ldp_rlfa_client { + RB_ENTRY(ldp_rlfa_client) entry; + + /* IGP instance data. */ + struct zapi_rlfa_igp igp; + + /* Backpointer to RLFA node. */ + struct ldp_rlfa_node *node; +}; +RB_HEAD(ldp_rlfa_client_head, ldp_rlfa_client); +RB_PROTOTYPE(ldp_rlfa_client_head, ldp_rlfa_client, entry, + ldp_rlfa_client_compare); + +struct ldp_rlfa_node { + RB_ENTRY(ldp_rlfa_node) entry; + + /* Destination prefix. */ + struct prefix destination; + + /* PQ node address. */ + struct in_addr pq_address; + + /* RLFA clients. */ + struct ldp_rlfa_client_head clients; + + /* Label allocated by the PQ node to the RLFA destination. */ + mpls_label_t pq_label; +}; +RB_HEAD(ldp_rlfa_node_head, ldp_rlfa_node); +RB_PROTOTYPE(ldp_rlfa_node_head, ldp_rlfa_node, entry, ldp_rlfa_node_compare); + +extern struct ldp_rlfa_node_head rlfa_node_tree; + +/* prototypes */ +struct ldp_rlfa_client *rlfa_client_new(struct ldp_rlfa_node *rnode, + struct zapi_rlfa_igp *igp); +void rlfa_client_del(struct ldp_rlfa_client *rclient); +struct ldp_rlfa_client *rlfa_client_find(struct ldp_rlfa_node *rnode, + struct zapi_rlfa_igp *igp); +struct ldp_rlfa_node *rlfa_node_new(const struct prefix *destination, + struct in_addr pq_address); +void rlfa_node_del(struct ldp_rlfa_node *rnode); +struct ldp_rlfa_node *rlfa_node_find(const struct prefix *destination, + struct in_addr pq_address); +void lde_rlfa_check(struct ldp_rlfa_client *rclient); +void lde_rlfa_client_send(struct ldp_rlfa_client *rclient); +void lde_rlfa_label_update(const struct fec *fec); +void lde_rlfa_update_clients(struct fec *fec, struct lde_nbr *ln, + uint32_t label); +void ldpe_rlfa_init(struct ldp_rlfa_client *rclient); +void ldpe_rlfa_exit(struct ldp_rlfa_client *rclient); + +#endif /* _LDPD_RLFA_H_ */ diff --git a/ldpd/subdir.am b/ldpd/subdir.am index 2058d2596a..d89d18341d 100644 --- a/ldpd/subdir.am +++ b/ldpd/subdir.am @@ -36,6 +36,7 @@ ldpd_libldp_a_SOURCES = \ ldpd/notification.c \ ldpd/packet.c \ ldpd/pfkey.c \ + ldpd/rlfa.c \ ldpd/socket.c \ ldpd/util.c \ # end @@ -53,6 +54,7 @@ noinst_HEADERS += \ ldpd/ldpd.h \ ldpd/ldpe.h \ ldpd/log.h \ + ldpd/rlfa.h \ # end ldpd_ldpd_SOURCES = ldpd/ldpd.c diff --git a/lib/agentx.c b/lib/agentx.c index 603d8d6172..5351f8bda2 100644 --- a/lib/agentx.c +++ b/lib/agentx.c @@ -32,6 +32,9 @@ #include "linklist.h" #include "version.h" #include "lib_errors.h" +#include "xref.h" + +XREF_SETUP() static int agentx_enabled = 0; @@ -262,11 +265,28 @@ void smux_register_mib(const char *descr, struct variable *var, size_t width, register_mib(descr, var, width, num, name, namelen); } -int smux_trap(struct variable *vp, size_t vp_len, const oid *ename, - size_t enamelen, const oid *name, size_t namelen, - const oid *iname, size_t inamelen, - const struct trap_object *trapobj, size_t trapobjlen, - uint8_t sptrap) +void smux_trap(struct variable *vp, size_t vp_len, const oid *ename, + size_t enamelen, const oid *name, size_t namelen, + const oid *iname, size_t inamelen, + const struct trap_object *trapobj, size_t trapobjlen, + uint8_t sptrap) +{ + struct index_oid trap_index[1]; + + /* copy the single index into the multi-index format */ + oid_copy(trap_index[0].indexname, iname, inamelen); + trap_index[0].indexlen = inamelen; + + smux_trap_multi_index(vp, vp_len, ename, enamelen, name, namelen, + trap_index, array_size(trap_index), trapobj, + trapobjlen, sptrap); +} + +int smux_trap_multi_index(struct variable *vp, size_t vp_len, const oid *ename, + size_t enamelen, const oid *name, size_t namelen, + struct index_oid *iname, size_t index_len, + const struct trap_object *trapobj, size_t trapobjlen, + uint8_t sptrap) { oid objid_snmptrap[] = {1, 3, 6, 1, 6, 3, 1, 1, 4, 1, 0}; size_t objid_snmptrap_len = sizeof(objid_snmptrap) / sizeof(oid); @@ -296,6 +316,13 @@ int smux_trap(struct variable *vp, size_t vp_len, const oid *ename, size_t val_len; WriteMethod *wm = NULL; struct variable cvp; + unsigned int iindex; + /* + * this allows the behaviour of smux_trap with a singe index + * for all objects to be maintained whilst allowing traps which + * have different indices per object to be supported + */ + iindex = (index_len == 1) ? 0 : i; /* Make OID. */ if (trapobj[i].namelen > 0) { @@ -303,8 +330,10 @@ int smux_trap(struct variable *vp, size_t vp_len, const oid *ename, onamelen = trapobj[i].namelen; oid_copy(oid, name, namelen); oid_copy(oid + namelen, trapobj[i].name, onamelen); - oid_copy(oid + namelen + onamelen, iname, inamelen); - oid_len = namelen + onamelen + inamelen; + oid_copy(oid + namelen + onamelen, + iname[iindex].indexname, + iname[iindex].indexlen); + oid_len = namelen + onamelen + iname[iindex].indexlen; } else { /* Scalar object */ onamelen = trapobj[i].namelen * (-1); @@ -330,6 +359,7 @@ int smux_trap(struct variable *vp, size_t vp_len, const oid *ename, cvp.magic = vp[j].magic; cvp.acl = vp[j].acl; cvp.findVar = vp[j].findVar; + /* Grab the result. */ val = cvp.findVar(&cvp, oid, &oid_len, 1, &val_len, &wm); @@ -224,6 +224,17 @@ struct interface *bfd_get_peer_info(struct stream *s, struct prefix *dp, int plen; int local_remote_cbit; + /* + * If the ifindex lookup fails the + * rest of the data in the stream is + * not read. All examples of this function + * call immediately use the dp->family which + * is not good. Ensure we are not using + * random data + */ + memset(dp, 0, sizeof(*dp)); + memset(sp, 0, sizeof(*sp)); + /* Get interface index. */ ifindex = stream_getl(s); @@ -249,13 +260,12 @@ struct interface *bfd_get_peer_info(struct stream *s, struct prefix *dp, /* Get BFD status. */ *status = stream_getl(s); - if (sp) { - sp->family = stream_getc(s); + sp->family = stream_getc(s); + + plen = prefix_blen(sp); + stream_get(&sp->u.prefix, s, plen); + sp->prefixlen = stream_getc(s); - plen = prefix_blen(sp); - stream_get(&sp->u.prefix, s, plen); - sp->prefixlen = stream_getc(s); - } local_remote_cbit = stream_getc(s); if (remote_cbit) *remote_cbit = local_remote_cbit; diff --git a/lib/buffer.c b/lib/buffer.c index 459d98e75d..42796faae8 100644 --- a/lib/buffer.c +++ b/lib/buffer.c @@ -468,16 +468,6 @@ buffer_status_t buffer_write(struct buffer *b, int fd, const void *p, { ssize_t nbytes; -#if 0 - /* - * Should we attempt to drain any previously buffered data? - * This could help reduce latency in pushing out the data if - * we are stuck in a long-running thread that is preventing - * the main select loop from calling the flush thread... - */ - if (b->head && (buffer_flush_available(b, fd) == BUFFER_ERROR)) - return BUFFER_ERROR; -#endif if (b->head) /* Buffer is not empty, so do not attempt to write the new data. */ diff --git a/lib/clippy.c b/lib/clippy.c index 2e09c24c66..15cd9d7a4b 100644 --- a/lib/clippy.c +++ b/lib/clippy.c @@ -107,7 +107,8 @@ int main(int argc, char **argv) #include "log.h" #include "zassert.h" -void vzlog(int prio, const char *format, va_list args) +void vzlogx(const struct xref_logmsg *xref, int prio, + const char *format, va_list args) { vfprintf(stderr, format, args); fputs("\n", stderr); diff --git a/lib/command.c b/lib/command.c index 87110157f6..6a4d504b2f 100644 --- a/lib/command.c +++ b/lib/command.c @@ -49,6 +49,8 @@ #include "northbound_cli.h" #include "network.h" +#include "frrscript.h" + DEFINE_MTYPE_STATIC(LIB, HOST, "Host config") DEFINE_MTYPE(LIB, COMPLETION, "Completion item") @@ -275,7 +277,7 @@ const char *cmd_prompt(enum node_type node) } /* Install a command into a node. */ -void install_element(enum node_type ntype, const struct cmd_element *cmd) +void _install_element(enum node_type ntype, const struct cmd_element *cmd) { struct cmd_node *cnode; @@ -321,7 +323,7 @@ void install_element(enum node_type ntype, const struct cmd_element *cmd) vector_set(cnode->cmd_vector, (void *)cmd); if (ntype == VIEW_NODE) - install_element(ENABLE_NODE, cmd); + _install_element(ENABLE_NODE, cmd); } void uninstall_element(enum node_type ntype, const struct cmd_element *cmd) @@ -863,6 +865,30 @@ enum node_type node_parent(enum node_type node) case BFD_PROFILE_NODE: ret = BFD_NODE; break; + case SR_TRAFFIC_ENG_NODE: + ret = SEGMENT_ROUTING_NODE; + break; + case SR_SEGMENT_LIST_NODE: + ret = SR_TRAFFIC_ENG_NODE; + break; + case SR_POLICY_NODE: + ret = SR_TRAFFIC_ENG_NODE; + break; + case SR_CANDIDATE_DYN_NODE: + ret = SR_POLICY_NODE; + break; + case PCEP_NODE: + ret = SR_TRAFFIC_ENG_NODE; + break; + case PCEP_PCE_CONFIG_NODE: + ret = PCEP_NODE; + break; + case PCEP_PCE_NODE: + ret = PCEP_NODE; + break; + case PCEP_PCC_NODE: + ret = PCEP_NODE; + break; default: ret = CONFIG_NODE; break; @@ -2279,6 +2305,31 @@ done: return CMD_SUCCESS; } +#if defined(DEV_BUILD) && defined(HAVE_SCRIPTING) +DEFUN(script, + script_cmd, + "script SCRIPT", + "Test command - execute a script\n" + "Script name (same as filename in /etc/frr/scripts/\n") +{ + struct prefix p; + + (void)str2prefix("1.2.3.4/24", &p); + + struct frrscript *fs = frrscript_load(argv[1]->arg, NULL); + + if (fs == NULL) { + vty_out(vty, "Script '/etc/frr/scripts/%s.lua' not found\n", + argv[1]->arg); + } else { + int ret = frrscript_call(fs, NULL); + vty_out(vty, "Script result: %d\n", ret); + } + + return CMD_SUCCESS; +} +#endif + /* Set config filename. Called from vty.c */ void host_config_set(const char *filename) { @@ -2293,18 +2344,18 @@ const char *host_config_get(void) void install_default(enum node_type node) { - install_element(node, &config_exit_cmd); - install_element(node, &config_quit_cmd); - install_element(node, &config_end_cmd); - install_element(node, &config_help_cmd); - install_element(node, &config_list_cmd); - install_element(node, &show_cli_graph_cmd); - install_element(node, &find_cmd); + _install_element(node, &config_exit_cmd); + _install_element(node, &config_quit_cmd); + _install_element(node, &config_end_cmd); + _install_element(node, &config_help_cmd); + _install_element(node, &config_list_cmd); + _install_element(node, &show_cli_graph_cmd); + _install_element(node, &find_cmd); - install_element(node, &config_write_cmd); - install_element(node, &show_running_config_cmd); + _install_element(node, &config_write_cmd); + _install_element(node, &show_running_config_cmd); - install_element(node, &autocomplete_cmd); + _install_element(node, &autocomplete_cmd); nb_cli_install_default(node); } @@ -2373,6 +2424,10 @@ void cmd_init(int terminal) install_element(VIEW_NODE, &echo_cmd); install_element(VIEW_NODE, &autocomplete_cmd); install_element(VIEW_NODE, &find_cmd); +#if defined(DEV_BUILD) && defined(HAVE_SCRIPTING) + install_element(VIEW_NODE, &script_cmd); +#endif + install_element(ENABLE_NODE, &config_end_cmd); install_element(ENABLE_NODE, &config_disable_cmd); diff --git a/lib/command.h b/lib/command.h index 1b0504101c..71abb20b05 100644 --- a/lib/command.h +++ b/lib/command.h @@ -145,6 +145,15 @@ enum node_type { PROTOCOL_NODE, /* protocol filtering node */ MPLS_NODE, /* MPLS config node */ PW_NODE, /* Pseudowire config node */ + SEGMENT_ROUTING_NODE, /* Segment routing root node */ + SR_TRAFFIC_ENG_NODE, /* SR Traffic Engineering node */ + SR_SEGMENT_LIST_NODE, /* SR segment list config node */ + SR_POLICY_NODE, /* SR policy config node */ + SR_CANDIDATE_DYN_NODE, /* SR dynamic candidate path config node */ + PCEP_NODE, /* PCEP node */ + PCEP_PCE_CONFIG_NODE, /* PCE shared configuration node */ + PCEP_PCE_NODE, /* PCE configuration node */ + PCEP_PCC_NODE, /* PCC configuration node */ VTY_NODE, /* Vty node. */ FPM_NODE, /* Dataplane FPM node. */ LINK_PARAMS_NODE, /* Link-parameters node */ @@ -230,7 +239,11 @@ struct cmd_node { .attr = attrs, \ .daemon = dnum, \ .name = #cmdname, \ - }; + .xref = XREF_INIT(XREFT_DEFUN, NULL, #funcname), \ + }; \ + XREF_LINK(cmdname.xref); \ + /* end */ + #define DEFUN_CMD_FUNC_DECL(funcname) \ static int funcname(const struct cmd_element *, struct vty *, int, \ @@ -405,7 +418,8 @@ struct cmd_node { "<neighbor|interface|area|lsa|zebra|config|dbex|spf|route|lsdb|redistribute|hook|asbr|prefix|abr>" #define AREA_TAG_STR "[area tag]\n" #define COMMUNITY_AANN_STR "Community number where AA and NN are (0-65535)\n" -#define COMMUNITY_VAL_STR "Community number in AA:NN format (where AA and NN are (0-65535)) or local-AS|no-advertise|no-export|internet or additive\n" +#define COMMUNITY_VAL_STR \ + "Community number in AA:NN format (where AA and NN are (0-65535)) or local-AS|no-advertise|no-export|internet|graceful-shutdown|accept-own-nexthop|accept-own|route-filter-translated-v4|route-filter-v4|route-filter-translated-v6|route-filter-v6|llgr-stale|no-llgr|blackhole|no-peer or additive\n" #define MPLS_TE_STR "MPLS-TE specific commands\n" #define LINK_PARAMS_STR "Configure interface link parameters\n" #define OSPF_RI_STR "OSPF Router Information specific commands\n" @@ -474,7 +488,29 @@ struct cmd_node { /* Prototypes. */ extern void install_node(struct cmd_node *node); extern void install_default(enum node_type); -extern void install_element(enum node_type, const struct cmd_element *); + +struct xref_install_element { + struct xref xref; + + const struct cmd_element *cmd_element; + enum node_type node_type; +}; + +#ifndef VTYSH_EXTRACT_PL +#define install_element(node_type_, cmd_element_) do { \ + static const struct xref_install_element _xref \ + __attribute__((used)) = { \ + .xref = XREF_INIT(XREFT_INSTALL_ELEMENT, NULL, \ + __func__), \ + .cmd_element = cmd_element_, \ + .node_type = node_type_, \ + }; \ + XREF_LINK(_xref.xref); \ + _install_element(node_type_, cmd_element_); \ + } while (0) +#endif + +extern void _install_element(enum node_type, const struct cmd_element *); /* known issue with uninstall_element: changes to cmd_token->attr (i.e. * deprecated/hidden) are not reversed. */ diff --git a/lib/command_graph.h b/lib/command_graph.h index 179e104a57..09824460e6 100644 --- a/lib/command_graph.h +++ b/lib/command_graph.h @@ -31,6 +31,7 @@ #include "memory.h" #include "vector.h" #include "graph.h" +#include "xref.h" #ifdef __cplusplus extern "C" { @@ -105,6 +106,7 @@ struct cmd_element { struct cmd_token *[]); const char *name; /* symbol name for debugging */ + struct xref xref; }; /* text for <cr> command */ diff --git a/lib/command_parse.y b/lib/command_parse.y index ba5225b702..8135d02b4b 100644 --- a/lib/command_parse.y +++ b/lib/command_parse.y @@ -496,7 +496,7 @@ terminate_graph (CMD_YYLTYPE *locp, struct parser_ctx *ctx, zlog_err ("----------"); while (ctx->docstr && ctx->docstr[1] != '\0') zlog_err ("%s", strsep(&ctx->docstr, "\n")); - zlog_err ("----------\n"); + zlog_err ("----------"); } graph_add_edge (finalnode, end_token_node); diff --git a/lib/compiler.h b/lib/compiler.h index 217a60d888..70ef8e9bc8 100644 --- a/lib/compiler.h +++ b/lib/compiler.h @@ -279,6 +279,29 @@ extern "C" { #define array_size(ar) (sizeof(ar) / sizeof(ar[0])) +/* Some insane macros to count number of varargs to a functionlike macro */ +#define PP_ARG_N( \ + _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, \ + _11, _12, _13, _14, _15, _16, _17, _18, _19, _20, \ + _21, _22, _23, _24, _25, _26, _27, _28, _29, _30, \ + _31, _32, _33, _34, _35, _36, _37, _38, _39, _40, \ + _41, _42, _43, _44, _45, _46, _47, _48, _49, _50, \ + _51, _52, _53, _54, _55, _56, _57, _58, _59, _60, \ + _61, _62, _63, N, ...) N + +#define PP_RSEQ_N() \ + 62, 61, 60, \ + 59, 58, 57, 56, 55, 54, 53, 52, 51, 50, \ + 49, 48, 47, 46, 45, 44, 43, 42, 41, 40, \ + 39, 38, 37, 36, 35, 34, 33, 32, 31, 30, \ + 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, \ + 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, \ + 9, 8, 7, 6, 5, 4, 3, 2, 1, 0 + +#define PP_NARG_(...) PP_ARG_N(__VA_ARGS__) +#define PP_NARG(...) PP_NARG_(_, ##__VA_ARGS__, PP_RSEQ_N()) + + /* sigh. this is so ugly, it overflows and wraps to being nice again. * * printfrr() supports "%Ld" for <int64_t>, whatever that is typedef'd to. diff --git a/lib/ferr.h b/lib/ferr.h index a89b595e87..4e95431cea 100644 --- a/lib/ferr.h +++ b/lib/ferr.h @@ -132,6 +132,8 @@ struct ferr { #define VTYSH_FRR_END 0x0FFFFFFF #define WATCHFRR_FERR_START 0x10000001 #define WATCHFRR_FERR_END 0x10FFFFFF +#define PATH_FERR_START 0x11000001 +#define PATH_FERR_END 0x11FFFFFF #define ZEBRA_FERR_START 0xF1000001 #define ZEBRA_FERR_END 0xF1FFFFFF #define END_FERR 0xFFFFFFFF diff --git a/lib/filter_cli.c b/lib/filter_cli.c index 54b6cda9a5..5d66a9fc73 100644 --- a/lib/filter_cli.c +++ b/lib/filter_cli.c @@ -259,7 +259,7 @@ DEFPY_YANG( /* Access-list must exist before entries. */ if (yang_dnode_exists(running_config->dnode, xpath) == false) - return CMD_WARNING; + return CMD_WARNING_CONFIG_FAILED; /* Use access-list data structure to fetch sequence. */ dnode = yang_dnode_get(running_config->dnode, xpath); @@ -268,7 +268,7 @@ DEFPY_YANG( mask_str ? mask_str : CISCO_HOST_WILDCARD_MASK, NULL, NULL); if (sseq == -1) - return CMD_WARNING; + return CMD_WARNING_CONFIG_FAILED; snprintfrr(xpath_entry, sizeof(xpath_entry), "%s/entry[sequence='%" PRId64 "']", xpath, sseq); @@ -436,7 +436,7 @@ DEFPY_YANG( /* Access-list must exist before entries. */ if (yang_dnode_exists(running_config->dnode, xpath) == false) - return CMD_WARNING; + return CMD_WARNING_CONFIG_FAILED; /* Use access-list data structure to fetch sequence. */ dnode = yang_dnode_get(running_config->dnode, xpath); @@ -469,7 +469,7 @@ DEFPY_YANG( "0.0.0.0", CISCO_ANY_WILDCARD_MASK); } if (sseq == -1) - return CMD_WARNING; + return CMD_WARNING_CONFIG_FAILED; snprintfrr(xpath_entry, sizeof(xpath_entry), "%s/entry[sequence='%" PRId64 "']", xpath, sseq); @@ -588,7 +588,7 @@ DEFPY_YANG( /* Access-list must exist before entries. */ if (yang_dnode_exists(running_config->dnode, xpath) == false) - return CMD_WARNING; + return CMD_WARNING_CONFIG_FAILED; /* Use access-list data structure to fetch sequence. */ dnode = yang_dnode_get(running_config->dnode, xpath); @@ -601,7 +601,7 @@ DEFPY_YANG( sseq = acl_zebra_get_seq(acl, action, (struct prefix *)prefix, exact); if (sseq == -1) - return CMD_WARNING; + return CMD_WARNING_CONFIG_FAILED; snprintfrr(xpath_entry, sizeof(xpath_entry), "%s/entry[sequence='%" PRId64 "']", xpath, sseq); @@ -786,7 +786,7 @@ DEFPY_YANG( /* Access-list must exist before entries. */ if (yang_dnode_exists(running_config->dnode, xpath) == false) - return CMD_WARNING; + return CMD_WARNING_CONFIG_FAILED; /* Use access-list data structure to fetch sequence. */ dnode = yang_dnode_get(running_config->dnode, xpath); @@ -799,7 +799,7 @@ DEFPY_YANG( sseq = acl_zebra_get_seq(acl, action, (struct prefix *)prefix, exact); if (sseq == -1) - return CMD_WARNING; + return CMD_WARNING_CONFIG_FAILED; snprintfrr(xpath_entry, sizeof(xpath_entry), "%s/entry[sequence='%" PRId64 "']", xpath, sseq); @@ -979,7 +979,7 @@ DEFPY_YANG( /* Access-list must exist before entries. */ if (yang_dnode_exists(running_config->dnode, xpath) == false) - return CMD_WARNING; + return CMD_WARNING_CONFIG_FAILED; /* Use access-list data structure to fetch sequence. */ dnode = yang_dnode_get(running_config->dnode, xpath); @@ -992,7 +992,7 @@ DEFPY_YANG( sseq = acl_zebra_get_seq(acl, action, (struct prefix *)prefix, false); if (sseq == -1) - return CMD_WARNING; + return CMD_WARNING_CONFIG_FAILED; snprintfrr(xpath_entry, sizeof(xpath_entry), "%s/entry[sequence='%" PRId64 "']", xpath, sseq); @@ -1277,7 +1277,7 @@ static int plist_remove(struct vty *vty, const char *iptype, const char *name, /* Access-list must exist before entries. */ if (yang_dnode_exists(running_config->dnode, xpath) == false) - return CMD_WARNING; + return CMD_WARNING_CONFIG_FAILED; /* Use access-list data structure to fetch sequence. */ assert(action != NULL); @@ -1290,7 +1290,7 @@ static int plist_remove(struct vty *vty, const char *iptype, const char *name, pl = nb_running_get_entry(dnode, NULL, true); pentry = prefix_list_entry_lookup(pl, p, plt, -1, le, ge); if (pentry == NULL) - return CMD_WARNING; + return CMD_WARNING_CONFIG_FAILED; snprintfrr(xpath_entry, sizeof(xpath_entry), "%s/entry[sequence='%" PRId64 "']", xpath, pentry->seq); diff --git a/lib/frr_zmq.c b/lib/frr_zmq.c index cc11d76700..33adcd7b80 100644 --- a/lib/frr_zmq.c +++ b/lib/frr_zmq.c @@ -135,9 +135,8 @@ static int frrzmq_read_msg(struct thread *t) if (read) frrzmq_check_events(cbp, &cb->write, ZMQ_POLLOUT); - funcname_thread_add_read_write( - THREAD_READ, t->master, frrzmq_read_msg, cbp, cb->fd, - &cb->read.thread, t->funcname, t->schedfrom, t->schedfrom_line); + _thread_add_read_write(t->xref, t->master, frrzmq_read_msg, cbp, + cb->fd, &cb->read.thread); return 0; out_err: @@ -148,14 +147,14 @@ out_err: return 1; } -int funcname_frrzmq_thread_add_read(struct thread_master *master, - void (*msgfunc)(void *arg, void *zmqsock), - void (*partfunc)(void *arg, void *zmqsock, - zmq_msg_t *msg, - unsigned partnum), - void (*errfunc)(void *arg, void *zmqsock), - void *arg, void *zmqsock, - struct frrzmq_cb **cbp, debugargdef) +int _frrzmq_thread_add_read(const struct xref_threadsched *xref, + struct thread_master *master, + void (*msgfunc)(void *arg, void *zmqsock), + void (*partfunc)(void *arg, void *zmqsock, + zmq_msg_t *msg, unsigned partnum), + void (*errfunc)(void *arg, void *zmqsock), + void *arg, void *zmqsock, + struct frrzmq_cb **cbp) { int fd, events; size_t len; @@ -192,13 +191,11 @@ int funcname_frrzmq_thread_add_read(struct thread_master *master, if (events & ZMQ_POLLIN) { thread_cancel(&cb->read.thread); - funcname_thread_add_event(master, frrzmq_read_msg, cbp, fd, - &cb->read.thread, funcname, schedfrom, - fromln); + _thread_add_event(xref, master, frrzmq_read_msg, cbp, fd, + &cb->read.thread); } else - funcname_thread_add_read_write( - THREAD_READ, master, frrzmq_read_msg, cbp, fd, - &cb->read.thread, funcname, schedfrom, fromln); + _thread_add_read_write(xref, master, frrzmq_read_msg, cbp, fd, + &cb->read.thread); return 0; } @@ -244,10 +241,8 @@ static int frrzmq_write_msg(struct thread *t) if (written) frrzmq_check_events(cbp, &cb->read, ZMQ_POLLIN); - funcname_thread_add_read_write(THREAD_WRITE, t->master, - frrzmq_write_msg, cbp, cb->fd, - &cb->write.thread, t->funcname, - t->schedfrom, t->schedfrom_line); + _thread_add_read_write(t->xref, t->master, frrzmq_write_msg, cbp, + cb->fd, &cb->write.thread); return 0; out_err: @@ -257,11 +252,12 @@ out_err: cb->write.cb_error(cb->write.arg, cb->zmqsock); return 1; } -int funcname_frrzmq_thread_add_write(struct thread_master *master, - void (*msgfunc)(void *arg, void *zmqsock), - void (*errfunc)(void *arg, void *zmqsock), - void *arg, void *zmqsock, - struct frrzmq_cb **cbp, debugargdef) + +int _frrzmq_thread_add_write(const struct xref_threadsched *xref, + struct thread_master *master, + void (*msgfunc)(void *arg, void *zmqsock), + void (*errfunc)(void *arg, void *zmqsock), + void *arg, void *zmqsock, struct frrzmq_cb **cbp) { int fd, events; size_t len; @@ -298,13 +294,11 @@ int funcname_frrzmq_thread_add_write(struct thread_master *master, if (events & ZMQ_POLLOUT) { thread_cancel(&cb->write.thread); - funcname_thread_add_event(master, frrzmq_write_msg, cbp, fd, - &cb->write.thread, funcname, - schedfrom, fromln); + _thread_add_event(xref, master, frrzmq_write_msg, cbp, fd, + &cb->write.thread); } else - funcname_thread_add_read_write( - THREAD_WRITE, master, frrzmq_write_msg, cbp, fd, - &cb->write.thread, funcname, schedfrom, fromln); + _thread_add_read_write(xref, master, frrzmq_write_msg, cbp, fd, + &cb->write.thread); return 0; } diff --git a/lib/frr_zmq.h b/lib/frr_zmq.h index 4303df9ccd..d30cf8a841 100644 --- a/lib/frr_zmq.h +++ b/lib/frr_zmq.h @@ -67,18 +67,32 @@ extern void *frrzmq_context; extern void frrzmq_init(void); extern void frrzmq_finish(void); -#define debugargdef const char *funcname, const char *schedfrom, int fromln +#define _xref_zmq_a(type, f, d, call) \ + ({ \ + static const struct xref_threadsched _xref \ + __attribute__((used)) = { \ + .xref = XREF_INIT(XREFT_THREADSCHED, NULL, __func__), \ + .funcname = #f, \ + .dest = #d, \ + .thread_type = THREAD_ ## type, \ + }; \ + XREF_LINK(_xref.xref); \ + call; \ + }) \ + /* end */ /* core event registration, one of these 2 macros should be used */ #define frrzmq_thread_add_read_msg(m, f, e, a, z, d) \ - funcname_frrzmq_thread_add_read(m, f, NULL, e, a, z, d, #f, __FILE__, \ - __LINE__) + _xref_zmq_a(READ, f, d, \ + _frrzmq_thread_add_read(&_xref, m, f, NULL, e, a, z, d)) + #define frrzmq_thread_add_read_part(m, f, e, a, z, d) \ - funcname_frrzmq_thread_add_read(m, NULL, f, e, a, z, d, #f, __FILE__, \ - __LINE__) + _xref_zmq_a(READ, f, d, \ + _frrzmq_thread_add_read(&_xref, m, NULL, f, e, a, z, d)) + #define frrzmq_thread_add_write_msg(m, f, e, a, z, d) \ - funcname_frrzmq_thread_add_write(m, f, e, a, z, d, #f, __FILE__, \ - __LINE__) + _xref_zmq_a(WRITE, f, d, \ + _frrzmq_thread_add_write(&_xref, m, f, e, a, z, d)) struct cb_core; struct frrzmq_cb; @@ -104,16 +118,18 @@ struct frrzmq_cb; * may schedule the event to run as soon as libfrr is back in its main * loop. */ -extern int funcname_frrzmq_thread_add_read( - struct thread_master *master, void (*msgfunc)(void *arg, void *zmqsock), +extern int _frrzmq_thread_add_read( + const struct xref_threadsched *xref, struct thread_master *master, + void (*msgfunc)(void *arg, void *zmqsock), void (*partfunc)(void *arg, void *zmqsock, zmq_msg_t *msg, unsigned partnum), void (*errfunc)(void *arg, void *zmqsock), void *arg, void *zmqsock, - struct frrzmq_cb **cb, debugargdef); -extern int funcname_frrzmq_thread_add_write( - struct thread_master *master, void (*msgfunc)(void *arg, void *zmqsock), + struct frrzmq_cb **cb); +extern int _frrzmq_thread_add_write( + const struct xref_threadsched *xref, struct thread_master *master, + void (*msgfunc)(void *arg, void *zmqsock), void (*errfunc)(void *arg, void *zmqsock), void *arg, void *zmqsock, - struct frrzmq_cb **cb, debugargdef); + struct frrzmq_cb **cb); extern void frrzmq_thread_cancel(struct frrzmq_cb **cb, struct cb_core *core); diff --git a/lib/frrlua.c b/lib/frrlua.c index 9f9cf8c1f6..d8aaa3aa3c 100644 --- a/lib/frrlua.c +++ b/lib/frrlua.c @@ -2,128 +2,365 @@ * This file defines the lua interface into * FRRouting. * - * Copyright (C) 2016 Cumulus Networks, Inc. - * Donald Sharp + * Copyright (C) 2016-2019 Cumulus Networks, Inc. + * Donald Sharp, Quentin Young * - * This file is part of FRRouting (FRR). + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. * - * FRR is free software; you can redistribute it and/or modify it under the - * terms of the GNU General Public License as published by the Free Software - * Foundation; either version 2, or (at your option) any later version. - * - * FRR is distributed in the hope that it will be useful, but WITHOUT ANY - * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS - * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more - * details. + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. * * You should have received a copy of the GNU General Public License along - * with FRR; see the file COPYING. If not, write to the Free Software - * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * with this program; see the file COPYING; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include <zebra.h> -#if defined(HAVE_LUA) +#ifdef HAVE_SCRIPTING + #include "prefix.h" #include "frrlua.h" #include "log.h" +#include "buffer.h" + +/* Lua stuff */ -static int lua_zlog_debug(lua_State *L) +/* + * FRR convenience functions. + * + * This section has convenience functions used to make interacting with the Lua + * stack easier. + */ + +int frrlua_table_get_integer(lua_State *L, const char *key) { - int debug_lua = 1; - const char *string = lua_tostring(L, 1); + int result; - if (debug_lua) - zlog_debug("%s", string); + lua_pushstring(L, key); + lua_gettable(L, -2); - return 0; + result = lua_tointeger(L, -1); + lua_pop(L, 1); + + return result; } -const char *get_string(lua_State *L, const char *key) +/* + * Encoders. + * + * This section has functions that convert internal FRR datatypes into Lua + * datatypes. + */ + +void lua_pushprefix(lua_State *L, const struct prefix *prefix) { - const char *str; + char buffer[PREFIX_STRLEN]; - lua_pushstring(L, key); - lua_gettable(L, -2); + lua_newtable(L); + lua_pushstring(L, prefix2str(prefix, buffer, PREFIX_STRLEN)); + lua_setfield(L, -2, "network"); + lua_pushinteger(L, prefix->prefixlen); + lua_setfield(L, -2, "length"); + lua_pushinteger(L, prefix->family); + lua_setfield(L, -2, "family"); +} - str = (const char *)lua_tostring(L, -1); +void *lua_toprefix(lua_State *L, int idx) +{ + struct prefix *p = XCALLOC(MTYPE_TMP, sizeof(struct prefix)); + + lua_getfield(L, idx, "network"); + (void)str2prefix(lua_tostring(L, -1), p); lua_pop(L, 1); - return str; + return p; } -int get_integer(lua_State *L, const char *key) +void lua_pushinterface(lua_State *L, const struct interface *ifp) { - int result; + lua_newtable(L); + lua_pushstring(L, ifp->name); + lua_setfield(L, -2, "name"); + lua_pushinteger(L, ifp->ifindex); + lua_setfield(L, -2, "ifindex"); + lua_pushinteger(L, ifp->status); + lua_setfield(L, -2, "status"); + lua_pushinteger(L, ifp->flags); + lua_setfield(L, -2, "flags"); + lua_pushinteger(L, ifp->metric); + lua_setfield(L, -2, "metric"); + lua_pushinteger(L, ifp->speed); + lua_setfield(L, -2, "speed"); + lua_pushinteger(L, ifp->mtu); + lua_setfield(L, -2, "mtu"); + lua_pushinteger(L, ifp->mtu6); + lua_setfield(L, -2, "mtu6"); + lua_pushinteger(L, ifp->bandwidth); + lua_setfield(L, -2, "bandwidth"); + lua_pushinteger(L, ifp->link_ifindex); + lua_setfield(L, -2, "link_ifindex"); + lua_pushinteger(L, ifp->ll_type); + lua_setfield(L, -2, "linklayer_type"); +} - lua_pushstring(L, key); - lua_gettable(L, -2); +void *lua_tointerface(lua_State *L, int idx) +{ + struct interface *ifp = XCALLOC(MTYPE_TMP, sizeof(struct interface)); - result = lua_tointeger(L, -1); + lua_getfield(L, idx, "name"); + strlcpy(ifp->name, lua_tostring(L, -1), sizeof(ifp->name)); + lua_pop(L, 1); + lua_getfield(L, idx, "ifindex"); + ifp->ifindex = lua_tointeger(L, -1); + lua_pop(L, 1); + lua_getfield(L, idx, "status"); + ifp->status = lua_tointeger(L, -1); + lua_pop(L, 1); + lua_getfield(L, idx, "flags"); + ifp->flags = lua_tointeger(L, -1); + lua_pop(L, 1); + lua_getfield(L, idx, "metric"); + ifp->metric = lua_tointeger(L, -1); + lua_pop(L, 1); + lua_getfield(L, idx, "speed"); + ifp->speed = lua_tointeger(L, -1); + lua_pop(L, 1); + lua_getfield(L, idx, "mtu"); + ifp->mtu = lua_tointeger(L, -1); + lua_pop(L, 1); + lua_getfield(L, idx, "mtu6"); + ifp->mtu6 = lua_tointeger(L, -1); + lua_pop(L, 1); + lua_getfield(L, idx, "bandwidth"); + ifp->bandwidth = lua_tointeger(L, -1); + lua_pop(L, 1); + lua_getfield(L, idx, "link_ifindex"); + ifp->link_ifindex = lua_tointeger(L, -1); + lua_pop(L, 1); + lua_getfield(L, idx, "linklayer_type"); + ifp->ll_type = lua_tointeger(L, -1); lua_pop(L, 1); - return result; + return ifp; } -static void *lua_alloc(void *ud, void *ptr, size_t osize, - size_t nsize) +void lua_pushinaddr(lua_State *L, const struct in_addr *addr) { - (void)ud; (void)osize; /* not used */ - if (nsize == 0) { - free(ptr); - return NULL; - } else - return realloc(ptr, nsize); + char buf[INET_ADDRSTRLEN]; + inet_ntop(AF_INET, addr, buf, sizeof(buf)); + + lua_newtable(L); + lua_pushinteger(L, addr->s_addr); + lua_setfield(L, -2, "value"); + lua_pushstring(L, buf); + lua_setfield(L, -2, "string"); } -lua_State *lua_initialize(const char *file) +void *lua_toinaddr(lua_State *L, int idx) { - int status; - lua_State *L = lua_newstate(lua_alloc, NULL); + struct in_addr *inaddr = XCALLOC(MTYPE_TMP, sizeof(struct in_addr)); - zlog_debug("Newstate: %p", L); - luaL_openlibs(L); - zlog_debug("Opened lib"); - status = luaL_loadfile(L, file); - if (status) { - zlog_debug("Failure to open %s %d", file, status); - lua_close(L); - return NULL; - } + lua_getfield(L, idx, "value"); + inaddr->s_addr = lua_tointeger(L, -1); + lua_pop(L, 1); - lua_pcall(L, 0, LUA_MULTRET, 0); - zlog_debug("Setting global function"); - lua_pushcfunction(L, lua_zlog_debug); - lua_setglobal(L, "zlog_debug"); + return inaddr; +} + + +void lua_pushin6addr(lua_State *L, const struct in6_addr *addr) +{ + char buf[INET6_ADDRSTRLEN]; + inet_ntop(AF_INET6, addr, buf, sizeof(buf)); + + lua_newtable(L); + lua_pushlstring(L, (const char *)addr->s6_addr, 16); + lua_setfield(L, -2, "value"); + lua_pushstring(L, buf); + lua_setfield(L, -2, "string"); +} + +void *lua_toin6addr(lua_State *L, int idx) +{ + struct in6_addr *in6addr = XCALLOC(MTYPE_TMP, sizeof(struct in6_addr)); + + lua_getfield(L, idx, "string"); + inet_pton(AF_INET6, lua_tostring(L, -1), in6addr); + lua_pop(L, 1); - return L; + return in6addr; } -void lua_setup_prefix_table(lua_State *L, const struct prefix *prefix) +void lua_pushsockunion(lua_State *L, const union sockunion *su) { - char buffer[100]; + char buf[SU_ADDRSTRLEN]; + sockunion2str(su, buf, sizeof(buf)); lua_newtable(L); - lua_pushstring(L, prefix2str(prefix, buffer, 100)); - lua_setfield(L, -2, "route"); - lua_pushinteger(L, prefix->family); - lua_setfield(L, -2, "family"); - lua_setglobal(L, "prefix"); + lua_pushlstring(L, (const char *)sockunion_get_addr(su), + sockunion_get_addrlen(su)); + lua_setfield(L, -2, "value"); + lua_pushstring(L, buf); + lua_setfield(L, -2, "string"); } -enum lua_rm_status lua_run_rm_rule(lua_State *L, const char *rule) +void *lua_tosockunion(lua_State *L, int idx) { - int status; + union sockunion *su = XCALLOC(MTYPE_TMP, sizeof(union sockunion)); + + lua_getfield(L, idx, "string"); + str2sockunion(lua_tostring(L, -1), su); + + return su; +} - lua_getglobal(L, rule); - status = lua_pcall(L, 0, 1, 0); - if (status) { - zlog_debug("Executing Failure with function: %s: %d", - rule, status); - return LUA_RM_FAILURE; +void lua_pushtimet(lua_State *L, const time_t *time) +{ + lua_pushinteger(L, *time); +} + +void *lua_totimet(lua_State *L, int idx) +{ + time_t *t = XCALLOC(MTYPE_TMP, sizeof(time_t)); + + *t = lua_tointeger(L, idx); + + return t; +} + +void lua_pushintegerp(lua_State *L, const long long *num) +{ + lua_pushinteger(L, *num); +} + +void *lua_tointegerp(lua_State *L, int idx) +{ + int isnum; + long long *num = XCALLOC(MTYPE_TMP, sizeof(long long)); + + *num = lua_tonumberx(L, idx, &isnum); + assert(isnum); + + return num; +} + +void *lua_tostringp(lua_State *L, int idx) +{ + char *string = XSTRDUP(MTYPE_TMP, lua_tostring(L, idx)); + + return string; +} + +/* + * Logging. + * + * Lua-compatible wrappers for FRR logging functions. + */ +static const char *frrlua_log_thunk(lua_State *L) +{ + int nargs; + + nargs = lua_gettop(L); + assert(nargs == 1); + + return lua_tostring(L, 1); +} + +static int frrlua_log_debug(lua_State *L) +{ + zlog_debug("%s", frrlua_log_thunk(L)); + return 0; +} + +static int frrlua_log_info(lua_State *L) +{ + zlog_info("%s", frrlua_log_thunk(L)); + return 0; +} + +static int frrlua_log_notice(lua_State *L) +{ + zlog_notice("%s", frrlua_log_thunk(L)); + return 0; +} + +static int frrlua_log_warn(lua_State *L) +{ + zlog_warn("%s", frrlua_log_thunk(L)); + return 0; +} + +static int frrlua_log_error(lua_State *L) +{ + zlog_err("%s", frrlua_log_thunk(L)); + return 0; +} + +static const luaL_Reg log_funcs[] = { + {"debug", frrlua_log_debug}, + {"info", frrlua_log_info}, + {"notice", frrlua_log_notice}, + {"warn", frrlua_log_warn}, + {"error", frrlua_log_error}, + {}, +}; + +void frrlua_export_logging(lua_State *L) +{ + lua_newtable(L); + luaL_setfuncs(L, log_funcs, 0); + lua_setglobal(L, "log"); +} + +/* + * Debugging. + */ + +char *frrlua_stackdump(lua_State *L) +{ + int top = lua_gettop(L); + + char tmpbuf[64]; + struct buffer *buf = buffer_new(4098); + + for (int i = 1; i <= top; i++) { + int t = lua_type(L, i); + + switch (t) { + case LUA_TSTRING: /* strings */ + snprintf(tmpbuf, sizeof(tmpbuf), "\"%s\"\n", + lua_tostring(L, i)); + buffer_putstr(buf, tmpbuf); + break; + case LUA_TBOOLEAN: /* booleans */ + snprintf(tmpbuf, sizeof(tmpbuf), "%s\n", + lua_toboolean(L, i) ? "true" : "false"); + buffer_putstr(buf, tmpbuf); + break; + case LUA_TNUMBER: /* numbers */ + snprintf(tmpbuf, sizeof(tmpbuf), "%g\n", + lua_tonumber(L, i)); + buffer_putstr(buf, tmpbuf); + break; + default: /* other values */ + snprintf(tmpbuf, sizeof(tmpbuf), "%s\n", + lua_typename(L, t)); + buffer_putstr(buf, tmpbuf); + break; + } } - status = lua_tonumber(L, -1); - return status; + char *result = XSTRDUP(MTYPE_TMP, buffer_getstr(buf)); + + buffer_free(buf); + + return result; } -#endif + +#endif /* HAVE_SCRIPTING */ diff --git a/lib/frrlua.h b/lib/frrlua.h index 40c7a67b89..6fb30938b0 100644 --- a/lib/frrlua.h +++ b/lib/frrlua.h @@ -1,88 +1,184 @@ /* - * This file defines the lua interface into - * FRRouting. + * Copyright (C) 2016-2019 Cumulus Networks, Inc. + * Donald Sharp, Quentin Young * - * Copyright (C) 2016 Cumulus Networks, Inc. - * Donald Sharp + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. * - * This file is part of FRRouting (FRR). - * - * FRR is free software; you can redistribute it and/or modify it under the - * terms of the GNU General Public License as published by the Free Software - * Foundation; either version 2, or (at your option) any later version. - * - * FRR is distributed in the hope that it will be useful, but WITHOUT ANY - * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS - * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more - * details. + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. * * You should have received a copy of the GNU General Public License along - * with FRR; see the file COPYING. If not, write to the Free Software - * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * with this program; see the file COPYING; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ -#ifndef __LUA_H__ -#define __LUA_H__ +#ifndef __FRRLUA_H__ +#define __FRRLUA_H__ + +#include <zebra.h> -#if defined(HAVE_LUA) +#ifdef HAVE_SCRIPTING -#include "lua.h" -#include "lualib.h" -#include "lauxlib.h" +#include <lua.h> +#include <lualib.h> +#include <lauxlib.h> + +#include "prefix.h" +#include "frrscript.h" #ifdef __cplusplus extern "C" { #endif /* - * These functions are helper functions that - * try to glom some of the lua_XXX functionality - * into what we actually need, instead of having - * to make multiple calls to set up what - * we want + * gcc-10 is complaining about the wrapper function + * not being compatible with lua_pushstring returning + * a char *. Let's wrapper it here to make our life + * easier + */ +static inline void lua_pushstring_wrapper(lua_State *L, const char *str) +{ + (void)lua_pushstring(L, str); +} + +/* + * Converts a prefix to a Lua value and pushes it on the stack. + */ +void lua_pushprefix(lua_State *L, const struct prefix *prefix); + +/* + * Converts the Lua value at idx to a prefix. + * + * Returns: + * struct prefix allocated with MTYPE_TMP + */ +void *lua_toprefix(lua_State *L, int idx); + +/* + * Converts an interface to a Lua value and pushes it on the stack. + */ +void lua_pushinterface(lua_State *L, const struct interface *ifp); + +/* + * Converts the Lua value at idx to an interface. + * + * Returns: + * struct interface allocated with MTYPE_TMP. This interface is not hooked + * to anything, nor is it inserted in the global interface tree. */ -enum lua_rm_status { - /* - * Script function run failure. This will translate into a - * deny - */ - LUA_RM_FAILURE = 0, - /* - * No Match was found for the route map function - */ - LUA_RM_NOMATCH, - /* - * Match was found but no changes were made to the - * incoming data. - */ - LUA_RM_MATCH, - /* - * Match was found and data was modified, so - * figure out what changed - */ - LUA_RM_MATCH_AND_CHANGE, -}; +void *lua_tointerface(lua_State *L, int idx); /* - * Open up the lua.scr file and parse - * initial global values, if any. + * Converts an in_addr to a Lua value and pushes it on the stack. */ -lua_State *lua_initialize(const char *file); +void lua_pushinaddr(lua_State *L, const struct in_addr *addr); -void lua_setup_prefix_table(lua_State *L, const struct prefix *prefix); +/* + * Converts the Lua value at idx to an in_addr. + * + * Returns: + * struct in_addr allocated with MTYPE_TMP. + */ +void *lua_toinaddr(lua_State *L, int idx); -enum lua_rm_status lua_run_rm_rule(lua_State *L, const char *rule); +/* + * Converts an in6_addr to a Lua value and pushes it on the stack. + */ +void lua_pushin6addr(lua_State *L, const struct in6_addr *addr); /* - * Get particular string/integer information - * from a table. It is *assumed* that - * the table has already been selected + * Converts the Lua value at idx to an in6_addr. + * + * Returns: + * struct in6_addr allocated with MTYPE_TMP. */ -const char *get_string(lua_State *L, const char *key); -int get_integer(lua_State *L, const char *key); +void *lua_toin6addr(lua_State *L, int idx); + +/* + * Converts a time_t to a Lua value and pushes it on the stack. + */ +void lua_pushtimet(lua_State *L, const time_t *time); + +/* + * Converts the Lua value at idx to a time_t. + * + * Returns: + * time_t allocated with MTYPE_TMP. + */ +void *lua_totimet(lua_State *L, int idx); + +/* + * Converts a sockunion to a Lua value and pushes it on the stack. + */ +void lua_pushsockunion(lua_State *L, const union sockunion *su); + +/* + * Converts the Lua value at idx to a sockunion. + * + * Returns: + * sockunion allocated with MTYPE_TMP. + */ +void *lua_tosockunion(lua_State *L, int idx); + +/* + * Converts an int to a Lua value and pushes it on the stack. + */ +void lua_pushintegerp(lua_State *L, const long long *num); + +/* + * Converts the Lua value at idx to an int. + * + * Returns: + * int allocated with MTYPE_TMP. + */ +void *lua_tointegerp(lua_State *L, int idx); + +/* + * Pop string. + * + * Sets *string to a copy of the string at the top of the stack. The copy is + * allocated with MTYPE_TMP and the caller is responsible for freeing it. + */ +void *lua_tostringp(lua_State *L, int idx); + +/* + * Retrieve an integer from table on the top of the stack. + * + * key + * Key of string value in table + */ +int frrlua_table_get_integer(lua_State *L, const char *key); + +/* + * Exports a new table containing bindings to FRR zlog functions into the + * global namespace. + * + * From Lua, these functions may be accessed as: + * + * - log.debug() + * - log.info() + * - log.warn() + * - log.error() + * + * They take a single string argument. + */ +void frrlua_export_logging(lua_State *L); + +/* + * Dump Lua stack to a string. + * + * Return value must be freed with XFREE(MTYPE_TMP, ...); + */ +char *frrlua_stackdump(lua_State *L); #ifdef __cplusplus } #endif -#endif -#endif +#endif /* HAVE_SCRIPTING */ + +#endif /* __FRRLUA_H__ */ diff --git a/lib/frrscript.c b/lib/frrscript.c new file mode 100644 index 0000000000..10d400886d --- /dev/null +++ b/lib/frrscript.c @@ -0,0 +1,272 @@ +/* Scripting foo + * Copyright (C) 2020 NVIDIA Corporation + * Quentin Young + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; see the file COPYING; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ +#include <zebra.h> + +#ifdef HAVE_SCRIPTING + +#include <stdarg.h> +#include <lua.h> + +#include "frrscript.h" +#include "frrlua.h" +#include "memory.h" +#include "hash.h" +#include "log.h" + + +DEFINE_MTYPE_STATIC(LIB, SCRIPT, "Scripting"); + +/* Codecs */ + +struct frrscript_codec frrscript_codecs_lib[] = { + {.typename = "integer", + .encoder = (encoder_func)lua_pushintegerp, + .decoder = lua_tointegerp}, + {.typename = "string", + .encoder = (encoder_func)lua_pushstring_wrapper, + .decoder = lua_tostringp}, + {.typename = "prefix", + .encoder = (encoder_func)lua_pushprefix, + .decoder = lua_toprefix}, + {.typename = "interface", + .encoder = (encoder_func)lua_pushinterface, + .decoder = lua_tointerface}, + {.typename = "in_addr", + .encoder = (encoder_func)lua_pushinaddr, + .decoder = lua_toinaddr}, + {.typename = "in6_addr", + .encoder = (encoder_func)lua_pushin6addr, + .decoder = lua_toin6addr}, + {.typename = "sockunion", + .encoder = (encoder_func)lua_pushsockunion, + .decoder = lua_tosockunion}, + {.typename = "time_t", + .encoder = (encoder_func)lua_pushtimet, + .decoder = lua_totimet}, + {}}; + +/* Type codecs */ + +struct hash *codec_hash; +char scriptdir[MAXPATHLEN]; + +static unsigned int codec_hash_key(const void *data) +{ + const struct frrscript_codec *c = data; + + return string_hash_make(c->typename); +} + +static bool codec_hash_cmp(const void *d1, const void *d2) +{ + const struct frrscript_codec *e1 = d1; + const struct frrscript_codec *e2 = d2; + + return strmatch(e1->typename, e2->typename); +} + +static void *codec_alloc(void *arg) +{ + struct frrscript_codec *tmp = arg; + + struct frrscript_codec *e = + XCALLOC(MTYPE_SCRIPT, sizeof(struct frrscript_codec)); + e->typename = XSTRDUP(MTYPE_SCRIPT, tmp->typename); + e->encoder = tmp->encoder; + e->decoder = tmp->decoder; + + return e; +} + +#if 0 +static void codec_free(struct codec *c) +{ + XFREE(MTYPE_TMP, c->typename); + XFREE(MTYPE_TMP, c); +} +#endif + +/* Generic script APIs */ + +int frrscript_call(struct frrscript *fs, struct frrscript_env *env) +{ + struct frrscript_codec c = {}; + const void *arg; + const char *bindname; + + /* Encode script arguments */ + for (int i = 0; env && env[i].val != NULL; i++) { + bindname = env[i].name; + c.typename = env[i].typename; + arg = env[i].val; + + struct frrscript_codec *codec = hash_lookup(codec_hash, &c); + assert(codec && "No encoder for type"); + codec->encoder(fs->L, arg); + + lua_setglobal(fs->L, bindname); + } + + int ret = lua_pcall(fs->L, 0, 0, 0); + + switch (ret) { + case LUA_OK: + break; + case LUA_ERRRUN: + zlog_err("Script '%s' runtime error: %s", fs->name, + lua_tostring(fs->L, -1)); + break; + case LUA_ERRMEM: + zlog_err("Script '%s' memory error: %s", fs->name, + lua_tostring(fs->L, -1)); + break; + case LUA_ERRERR: + zlog_err("Script '%s' error handler error: %s", fs->name, + lua_tostring(fs->L, -1)); + break; + case LUA_ERRGCMM: + zlog_err("Script '%s' garbage collector error: %s", fs->name, + lua_tostring(fs->L, -1)); + break; + default: + zlog_err("Script '%s' unknown error: %s", fs->name, + lua_tostring(fs->L, -1)); + break; + } + + if (ret != LUA_OK) { + lua_pop(fs->L, 1); + goto done; + } + +done: + /* LUA_OK is 0, so we can just return lua_pcall's result directly */ + return ret; +} + +void *frrscript_get_result(struct frrscript *fs, + const struct frrscript_env *result) +{ + void *r; + struct frrscript_codec c = {.typename = result->typename}; + + struct frrscript_codec *codec = hash_lookup(codec_hash, &c); + assert(codec && "No encoder for type"); + + if (!codec->decoder) { + zlog_err("No script decoder for type '%s'", result->typename); + return NULL; + } + + lua_getglobal(fs->L, result->name); + r = codec->decoder(fs->L, -1); + lua_pop(fs->L, 1); + + return r; +} + +void frrscript_register_type_codec(struct frrscript_codec *codec) +{ + struct frrscript_codec c = *codec; + + if (hash_lookup(codec_hash, &c)) { + zlog_backtrace(LOG_ERR); + assert(!"Type codec double-registered."); + } + + assert(hash_get(codec_hash, &c, codec_alloc)); +} + +void frrscript_register_type_codecs(struct frrscript_codec *codecs) +{ + for (int i = 0; codecs[i].typename != NULL; i++) + frrscript_register_type_codec(&codecs[i]); +} + +struct frrscript *frrscript_load(const char *name, + int (*load_cb)(struct frrscript *)) +{ + struct frrscript *fs = XCALLOC(MTYPE_SCRIPT, sizeof(struct frrscript)); + + fs->name = XSTRDUP(MTYPE_SCRIPT, name); + fs->L = luaL_newstate(); + frrlua_export_logging(fs->L); + + char fname[MAXPATHLEN * 2]; + snprintf(fname, sizeof(fname), "%s/%s.lua", scriptdir, fs->name); + + int ret = luaL_loadfile(fs->L, fname); + + switch (ret) { + case LUA_OK: + break; + case LUA_ERRSYNTAX: + zlog_err("Failed loading script '%s': syntax error: %s", fname, + lua_tostring(fs->L, -1)); + break; + case LUA_ERRMEM: + zlog_err("Failed loading script '%s': out-of-memory error: %s", + fname, lua_tostring(fs->L, -1)); + break; + case LUA_ERRGCMM: + zlog_err( + "Failed loading script '%s': garbage collector error: %s", + fname, lua_tostring(fs->L, -1)); + break; + case LUA_ERRFILE: + zlog_err("Failed loading script '%s': file read error: %s", + fname, lua_tostring(fs->L, -1)); + break; + default: + zlog_err("Failed loading script '%s': unknown error: %s", fname, + lua_tostring(fs->L, -1)); + break; + } + + if (ret != LUA_OK) + goto fail; + + if (load_cb && (*load_cb)(fs) != 0) + goto fail; + + return fs; +fail: + frrscript_unload(fs); + return NULL; +} + +void frrscript_unload(struct frrscript *fs) +{ + lua_close(fs->L); + XFREE(MTYPE_SCRIPT, fs->name); + XFREE(MTYPE_SCRIPT, fs); +} + +void frrscript_init(const char *sd) +{ + codec_hash = hash_create(codec_hash_key, codec_hash_cmp, + "Lua type encoders"); + + strlcpy(scriptdir, sd, sizeof(scriptdir)); + + /* Register core library types */ + frrscript_register_type_codecs(frrscript_codecs_lib); +} + +#endif /* HAVE_SCRIPTING */ diff --git a/lib/frrscript.h b/lib/frrscript.h new file mode 100644 index 0000000000..f4057f531b --- /dev/null +++ b/lib/frrscript.h @@ -0,0 +1,138 @@ +/* Scripting foo + * Copyright (C) 2020 NVIDIA Corporation + * Quentin Young + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; see the file COPYING; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ +#ifndef __FRRSCRIPT_H__ +#define __FRRSCRIPT_H__ + +#include <zebra.h> + +#ifdef HAVE_SCRIPTING + +#include <lua.h> +#include "frrlua.h" + +#ifdef __cplusplus +extern "C" { +#endif + +typedef void (*encoder_func)(lua_State *, const void *); +typedef void *(*decoder_func)(lua_State *, int); + +struct frrscript_codec { + const char *typename; + encoder_func encoder; + decoder_func decoder; +}; + +struct frrscript { + /* Script name */ + char *name; + + /* Lua state */ + struct lua_State *L; +}; + +struct frrscript_env { + /* Value type */ + const char *typename; + + /* Binding name */ + const char *name; + + /* Value */ + const void *val; +}; + +/* + * Create new FRR script. + */ +struct frrscript *frrscript_load(const char *name, + int (*load_cb)(struct frrscript *)); + +/* + * Destroy FRR script. + */ +void frrscript_unload(struct frrscript *fs); + +/* + * Register a Lua codec for a type. + * + * tname + * Name of type; e.g., "peer", "ospf_interface", etc. Chosen at will. + * + * codec(s) + * Function pointer to codec struct. Encoder function should push a Lua + * table representing the passed argument - which will have the C type + * associated with the chosen 'tname' to the provided stack. The decoder + * function should pop a value from the top of the stack and return a heap + * chunk containing that value. Allocations should be made with MTYPE_TMP. + * + * If using the plural function variant, pass a NULL-terminated array. + * + */ +void frrscript_register_type_codec(struct frrscript_codec *codec); +void frrscript_register_type_codecs(struct frrscript_codec *codecs); + +/* + * Initialize scripting subsystem. Call this before anything else. + * + * scriptdir + * Directory in which to look for scripts + */ +void frrscript_init(const char *scriptdir); + + +/* + * Call script. + * + * fs + * The script to call; this is obtained from frrscript_load(). + * + * env + * The script's environment. Specify this as an array of frrscript_env. + * + * Returns: + * 0 if the script ran successfully, nonzero otherwise. + */ +int frrscript_call(struct frrscript *fs, struct frrscript_env *env); + + +/* + * Get result from finished script. + * + * fs + * The script. This script must have been run already. + * + * result + * The result to extract from the script. + * This reuses the frrscript_env type, but only the typename and name fields + * need to be set. The value is returned directly. + * + * Returns: + * The script result of the specified name and type, or NULL. + */ +void *frrscript_get_result(struct frrscript *fs, + const struct frrscript_env *result); + +#ifdef __cplusplus +} +#endif /* __cplusplus */ + +#endif /* HAVE_SCRIPTING */ + +#endif /* __FRRSCRIPT_H__ */ diff --git a/lib/hash.c b/lib/hash.c index ed429b77d0..ec616ee724 100644 --- a/lib/hash.c +++ b/lib/hash.c @@ -32,7 +32,7 @@ #include "libfrr_trace.h" DEFINE_MTYPE_STATIC(LIB, HASH, "Hash") -DEFINE_MTYPE_STATIC(LIB, HASH_BACKET, "Hash Bucket") +DEFINE_MTYPE_STATIC(LIB, HASH_BUCKET, "Hash Bucket") DEFINE_MTYPE_STATIC(LIB, HASH_INDEX, "Hash Index") static pthread_mutex_t _hashes_mtx = PTHREAD_MUTEX_INITIALIZER; @@ -168,7 +168,7 @@ void *hash_get(struct hash *hash, void *data, void *(*alloc_func)(void *)) index = key & (hash->size - 1); } - bucket = XCALLOC(MTYPE_HASH_BACKET, sizeof(struct hash_bucket)); + bucket = XCALLOC(MTYPE_HASH_BUCKET, sizeof(struct hash_bucket)); bucket->data = newdata; bucket->key = key; bucket->next = hash->index[index]; @@ -239,7 +239,7 @@ void *hash_release(struct hash *hash, void *data) hash_update_ssq(hash, oldlen, newlen); ret = bucket->data; - XFREE(MTYPE_HASH_BACKET, bucket); + XFREE(MTYPE_HASH_BUCKET, bucket); hash->count--; break; } @@ -302,7 +302,7 @@ void hash_clean(struct hash *hash, void (*free_func)(void *)) if (free_func) (*free_func)(hb->data); - XFREE(MTYPE_HASH_BACKET, hb); + XFREE(MTYPE_HASH_BUCKET, hb); hash->count--; } hash->index[i] = NULL; diff --git a/lib/hash.h b/lib/hash.h index 23e93b6d7d..47d951a34b 100644 --- a/lib/hash.h +++ b/lib/hash.h @@ -76,7 +76,7 @@ struct hash { /* Data compare function. */ bool (*hash_cmp)(const void *, const void *); - /* Backet alloc. */ + /* Bucket alloc. */ unsigned long count; struct hashstats stats; @@ -351,6 +351,40 @@ struct interface *if_lookup_by_index(ifindex_t ifindex, vrf_id_t vrf_id) return NULL; } +/* Interface existance check by index. */ +struct interface *if_vrf_lookup_by_index_next(ifindex_t ifindex, + vrf_id_t vrf_id) +{ + struct vrf *vrf = vrf_lookup_by_id(vrf_id); + struct interface *tmp_ifp; + bool found = false; + + if (!vrf) + return NULL; + + if (ifindex == 0) { + tmp_ifp = RB_MIN(if_index_head, &vrf->ifaces_by_index); + /* skip the vrf interface */ + if (tmp_ifp && if_is_vrf(tmp_ifp)) + ifindex = tmp_ifp->ifindex; + else + return tmp_ifp; + } + + RB_FOREACH (tmp_ifp, if_index_head, &vrf->ifaces_by_index) { + if (found) { + /* skip the vrf interface */ + if (tmp_ifp && if_is_vrf(tmp_ifp)) + continue; + else + return tmp_ifp; + } + if (tmp_ifp->ifindex == ifindex) + found = true; + } + return NULL; +} + const char *ifindex2ifname(ifindex_t ifindex, vrf_id_t vrf_id) { struct interface *ifp; @@ -802,70 +836,6 @@ void if_dump_all(void) if_dump(ifp); } -#if 0 -/* For debug purpose. */ -DEFUN (show_address, - show_address_cmd, - "show address [vrf NAME]", - SHOW_STR - "address\n" - VRF_CMD_HELP_STR) -{ - int idx_vrf = 3; - struct listnode *node; - struct interface *ifp; - struct connected *ifc; - struct prefix *p; - vrf_id_t vrf_id = VRF_DEFAULT; - - if (argc > 2) - VRF_GET_ID (vrf_id, argv[idx_vrf]->arg); - - FOR_ALL_INTERFACES (vrf, ifp) { - for (ALL_LIST_ELEMENTS_RO (ifp->connected, node, ifc)) { - p = ifc->address; - - if (p->family == AF_INET) - vty_out (vty, "%pFX\n", p); - } - } - return CMD_SUCCESS; -} - -DEFUN (show_address_vrf_all, - show_address_vrf_all_cmd, - "show address vrf all", - SHOW_STR - "address\n" - VRF_ALL_CMD_HELP_STR) -{ - struct vrf *vrf; - struct listnode *node; - struct interface *ifp; - struct connected *ifc; - struct prefix *p; - - RB_FOREACH (vrf, vrf_name_head, &vrfs_by_name) - { - if (RB_EMPTY (if_name_head, &vrf->ifaces_by_name)) - continue; - - vty_out (vty, "\nVRF %s(%u)\n\n", - VRF_LOGNAME(vrf), vrf->vrf_id); - - FOR_ALL_INTERFACES (vrf, ifp) { - for (ALL_LIST_ELEMENTS_RO (ifp->connected, node, ifc)) { - p = ifc->address; - - if (p->family == AF_INET) - vty_out (vty, "%pFX\n", p); - } - } - } - return CMD_SUCCESS; -} -#endif - /* Allocate connected structure. */ struct connected *connected_new(void) { @@ -1083,84 +1053,6 @@ struct connected *connected_get_linklocal(struct interface *ifp) return c; } -#if 0 /* this route_table of struct connected's is unused \ - * however, it would be good to use a route_table rather than \ - * a list.. \ - */ -/* Interface looking up by interface's address. */ -/* Interface's IPv4 address reverse lookup table. */ -struct route_table *ifaddr_ipv4_table; -/* struct route_table *ifaddr_ipv6_table; */ - -static void -ifaddr_ipv4_add (struct in_addr *ifaddr, struct interface *ifp) -{ - struct route_node *rn; - struct prefix_ipv4 p; - - p.family = AF_INET; - p.prefixlen = IPV4_MAX_PREFIXLEN; - p.prefix = *ifaddr; - - rn = route_node_get (ifaddr_ipv4_table, (struct prefix *) &p); - if (rn) - { - route_unlock_node (rn); - zlog_info("ifaddr_ipv4_add(): address %pI4 is already added", - ifaddr); - return; - } - rn->info = ifp; -} - -static void -ifaddr_ipv4_delete (struct in_addr *ifaddr, struct interface *ifp) -{ - struct route_node *rn; - struct prefix_ipv4 p; - - p.family = AF_INET; - p.prefixlen = IPV4_MAX_PREFIXLEN; - p.prefix = *ifaddr; - - rn = route_node_lookup (ifaddr_ipv4_table, (struct prefix *) &p); - if (! rn) - { - zlog_info("%s: can't find address %pI4", __func__, ifaddr); - return; - } - rn->info = NULL; - route_unlock_node (rn); - route_unlock_node (rn); -} - -/* Lookup interface by interface's IP address or interface index. */ -static struct interface * -ifaddr_ipv4_lookup (struct in_addr *addr, ifindex_t ifindex) -{ - struct prefix_ipv4 p; - struct route_node *rn; - struct interface *ifp; - - if (addr) - { - p.family = AF_INET; - p.prefixlen = IPV4_MAX_PREFIXLEN; - p.prefix = *addr; - - rn = route_node_lookup (ifaddr_ipv4_table, (struct prefix *) &p); - if (! rn) - return NULL; - - ifp = rn->info; - route_unlock_node (rn); - return ifp; - } - else - return if_lookup_by_index(ifindex, VRF_DEFAULT); -} -#endif /* ifaddr_ipv4_table */ - void if_terminate(struct vrf *vrf) { struct interface *ifp; @@ -513,6 +513,8 @@ extern struct interface *if_create_name(const char *name, vrf_id_t vrf_id); /* Create new interface, adds to index list only */ extern struct interface *if_create_ifindex(ifindex_t ifindex, vrf_id_t vrf_id); extern struct interface *if_lookup_by_index(ifindex_t, vrf_id_t vrf_id); +extern struct interface *if_vrf_lookup_by_index_next(ifindex_t ifindex, + vrf_id_t vrf_id); extern struct interface *if_lookup_by_index_all_vrf(ifindex_t); extern struct interface *if_lookup_exact_address(const void *matchaddr, int family, vrf_id_t vrf_id); diff --git a/lib/lib_vty.c b/lib/lib_vty.c index cd8b5c9809..128261a39c 100644 --- a/lib/lib_vty.c +++ b/lib/lib_vty.c @@ -43,10 +43,14 @@ #include "vty.h" #include "command.h" -#ifdef HAVE_MALLINFO +#if defined(HAVE_MALLINFO2) || defined(HAVE_MALLINFO) static int show_memory_mallinfo(struct vty *vty) { +#if defined(HAVE_MALLINFO2) + struct mallinfo2 minfo = mallinfo2(); +#elif defined(HAVE_MALLINFO) struct mallinfo minfo = mallinfo(); +#endif char buf[MTYPE_MEMSTR_LEN]; vty_out(vty, "System allocator statistics:\n"); diff --git a/lib/libfrr.c b/lib/libfrr.c index 8e7777a1a9..51b97369c9 100644 --- a/lib/libfrr.c +++ b/lib/libfrr.c @@ -43,6 +43,7 @@ #include "frrcu.h" #include "frr_pthread.h" #include "defaults.h" +#include "frrscript.h" DEFINE_HOOK(frr_late_init, (struct thread_master * tm), (tm)) DEFINE_HOOK(frr_very_late_init, (struct thread_master * tm), (tm)) @@ -55,6 +56,7 @@ char frr_vtydir[256]; const char frr_dbdir[] = DAEMON_DB_DIR; #endif const char frr_moduledir[] = MODULE_PATH; +const char frr_scriptdir[] = SCRIPT_PATH; char frr_protoname[256] = "NONE"; char frr_protonameinst[256] = "NONE"; @@ -69,6 +71,7 @@ static char vtypath_default[512]; bool debug_memstats_at_exit = false; static bool nodetach_term, nodetach_daemon; +static uint64_t startup_fds; static char comb_optstr[256]; static struct option comb_lo[64]; @@ -100,6 +103,7 @@ static void opt_extend(const struct optspec *os) #define OPTION_DB_FILE 1006 #define OPTION_LOGGING 1007 #define OPTION_LIMIT_FDS 1008 +#define OPTION_SCRIPTDIR 1009 static const struct option lo_always[] = { {"help", no_argument, NULL, 'h'}, @@ -110,6 +114,7 @@ static const struct option lo_always[] = { {"pathspace", required_argument, NULL, 'N'}, {"vty_socket", required_argument, NULL, OPTION_VTYSOCK}, {"moduledir", required_argument, NULL, OPTION_MODULEDIR}, + {"scriptdir", required_argument, NULL, OPTION_SCRIPTDIR}, {"log", required_argument, NULL, OPTION_LOG}, {"log-level", required_argument, NULL, OPTION_LOGLEVEL}, {"tcli", no_argument, NULL, OPTION_TCLI}, @@ -126,6 +131,7 @@ static const struct optspec os_always = { " -N, --pathspace Insert prefix into config & socket paths\n" " --vty_socket Override vty socket path\n" " --moduledir Override modules directory\n" + " --scriptdir Override scripts directory\n" " --log Set Logging to stdout, syslog, or file:<name>\n" " --log-level Set Logging Level to use, debug, info, warn, etc\n" " --tcli Use transaction-based CLI\n" @@ -336,6 +342,28 @@ void frr_preinit(struct frr_daemon_info *daemon, int argc, char **argv) strlcpy(frr_protonameinst, di->logname, sizeof(frr_protonameinst)); di->cli_mode = FRR_CLI_CLASSIC; + + /* we may be starting with extra FDs open for whatever purpose, + * e.g. logging, some module, etc. Recording them here allows later + * checking whether an fd is valid for such extension purposes, + * without this we could end up e.g. logging to a BGP session fd. + */ + startup_fds = 0; + for (int i = 0; i < 64; i++) { + struct stat st; + + if (fstat(i, &st)) + continue; + if (S_ISDIR(st.st_mode) || S_ISBLK(st.st_mode)) + continue; + + startup_fds |= UINT64_C(0x1) << (uint64_t)i; + } +} + +bool frr_is_startup_fd(int fd) +{ + return !!(startup_fds & (UINT64_C(0x1) << (uint64_t)fd)); } void frr_opt_add(const char *optstr, const struct option *longopts, @@ -533,6 +561,14 @@ static int frr_opt(int opt) } di->module_path = optarg; break; + case OPTION_SCRIPTDIR: + if (di->script_path) { + fprintf(stderr, "--scriptdir option specified more than once!\n"); + errors++; + break; + } + di->script_path = optarg; + break; case OPTION_TCLI: di->cli_mode = FRR_CLI_TRANSACTIONAL; break; @@ -717,6 +753,9 @@ struct thread_master *frr_init(void) lib_cmd_init(); frr_pthread_init(); +#ifdef HAVE_SCRIPTING + frrscript_init(di->script_path ? di->script_path : frr_scriptdir); +#endif log_ref_init(); log_ref_vty_init(); diff --git a/lib/libfrr.h b/lib/libfrr.h index 2e4dcbe093..825f502bdf 100644 --- a/lib/libfrr.h +++ b/lib/libfrr.h @@ -81,6 +81,7 @@ struct frr_daemon_info { #endif const char *vty_path; const char *module_path; + const char *script_path; const char *pathspace; bool zpathspace; @@ -137,7 +138,8 @@ extern __attribute__((__noreturn__)) void frr_help_exit(int status); extern struct thread_master *frr_init(void); extern const char *frr_get_progname(void); extern enum frr_cli_mode frr_get_cli_mode(void); -uint32_t frr_get_fd_limit(void); +extern uint32_t frr_get_fd_limit(void); +extern bool frr_is_startup_fd(int fd); DECLARE_HOOK(frr_late_init, (struct thread_master * tm), (tm)) DECLARE_HOOK(frr_very_late_init, (struct thread_master * tm), (tm)) @@ -162,6 +164,7 @@ extern char frr_zclientpath[256]; extern const char frr_sysconfdir[]; extern char frr_vtydir[256]; extern const char frr_moduledir[]; +extern const char frr_scriptdir[]; extern char frr_protoname[]; extern char frr_protonameinst[]; diff --git a/lib/link_state.c b/lib/link_state.c new file mode 100644 index 0000000000..6bd7ef703c --- /dev/null +++ b/lib/link_state.c @@ -0,0 +1,1282 @@ +/* + * Link State Database - link_state.c + * + * Author: Olivier Dugeon <olivier.dugeon@orange.com> + * + * Copyright (C) 2020 Orange http://www.orange.com + * + * This file is part of Free Range Routing (FRR). + * + * FRR is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2, or (at your option) any + * later version. + * + * FRR is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; see the file COPYING; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include "if.h" +#include "linklist.h" +#include "log.h" +#include "command.h" +#include "termtable.h" +#include "memory.h" +#include "prefix.h" +#include "table.h" +#include "vty.h" +#include "zclient.h" +#include "stream.h" +#include "link_state.h" + +/* Link State Memory allocation */ +DEFINE_MTYPE_STATIC(LIB, LS_DB, "Link State Database") + +/** + * Link State Node management functions + */ +struct ls_node *ls_node_new(struct ls_node_id adv, struct in_addr rid, + struct in6_addr rid6) +{ + struct ls_node *new; + + if (adv.origin == NONE) + return NULL; + + new = XCALLOC(MTYPE_LS_DB, sizeof(struct ls_node)); + new->adv = adv; + if (!IPV4_NET0(rid.s_addr)) { + new->router_id = rid; + SET_FLAG(new->flags, LS_NODE_ROUTER_ID); + } else { + if (adv.origin == OSPFv2 || adv.origin == STATIC + || adv.origin == DIRECT) { + new->router_id = adv.id.ip.addr; + SET_FLAG(new->flags, LS_NODE_ROUTER_ID); + } + } + if (!IN6_IS_ADDR_UNSPECIFIED(&rid6)) { + new->router6_id = rid6; + SET_FLAG(new->flags, LS_NODE_ROUTER_ID6); + } + return new; +} + +void ls_node_del(struct ls_node *node) +{ + XFREE(MTYPE_LS_DB, node); + node = NULL; +} + +int ls_node_same(struct ls_node *n1, struct ls_node *n2) +{ + if ((n1 && !n2) || (!n1 && n2)) + return 0; + + if (n1 == n2) + return 1; + + if (n1->flags != n2->flags) + return 0; + + if (n1->adv.origin != n2->adv.origin) + return 0; + + if (!memcmp(&n1->adv.id, &n2->adv.id, sizeof(struct ls_node_id))) + return 0; + + /* Do we need to test individually each field, instead performing a + * global memcmp? There is a risk that an old value that is bit masked + * i.e. corresponding flag = 0, will result into a false negative + */ + if (!memcmp(n1, n2, sizeof(struct ls_node))) + return 0; + else + return 1; +} + +/** + * Link State Attributes management functions + */ +struct ls_attributes *ls_attributes_new(struct ls_node_id adv, + struct in_addr local, + struct in6_addr local6, + uint32_t local_id) +{ + struct ls_attributes *new; + + if (adv.origin == NONE) + return NULL; + + new = XCALLOC(MTYPE_LS_DB, sizeof(struct ls_attributes)); + new->adv = adv; + if (!IPV4_NET0(local.s_addr)) { + new->standard.local = local; + SET_FLAG(new->flags, LS_ATTR_LOCAL_ADDR); + } + if (!IN6_IS_ADDR_UNSPECIFIED(&local6)) { + new->standard.local6 = local6; + SET_FLAG(new->flags, LS_ATTR_LOCAL_ADDR6); + } + if (local_id != 0) { + new->standard.local_id = local_id; + SET_FLAG(new->flags, LS_ATTR_LOCAL_ID); + } + + /* Check that almost one identifier is set */ + if (!CHECK_FLAG(new->flags, LS_ATTR_LOCAL_ADDR | LS_ATTR_LOCAL_ADDR6 + | LS_ATTR_LOCAL_ID)) { + XFREE(MTYPE_LS_DB, new); + return NULL; + } + + return new; +} + +void ls_attributes_del(struct ls_attributes *attr) +{ + if (!attr) + return; + + if (attr->srlgs) + XFREE(MTYPE_LS_DB, attr->srlgs); + + XFREE(MTYPE_LS_DB, attr); + attr = NULL; +} + +int ls_attributes_same(struct ls_attributes *l1, struct ls_attributes *l2) +{ + if ((l1 && !l2) || (!l1 && l2)) + return 0; + + if (l1 == l2) + return 1; + + if (l1->flags != l2->flags) + return 0; + + if (l1->adv.origin != l2->adv.origin) + return 0; + + if (!memcmp(&l1->adv.id, &l2->adv.id, sizeof(struct ls_node_id))) + return 0; + + /* Do we need to test individually each field, instead performing a + * global memcmp? There is a risk that an old value that is bit masked + * i.e. corresponding flag = 0, will result into a false negative + */ + if (!memcmp(l1, l2, sizeof(struct ls_attributes))) + return 0; + else + return 1; +} + +/** + * Link State Vertices management functions + */ +struct ls_vertex *ls_vertex_new(struct ls_node *node) +{ + struct ls_vertex *new; + + if (node == NULL) + return NULL; + + new = XCALLOC(MTYPE_LS_DB, sizeof(struct ls_vertex)); + new->node = node; + new->incoming_edges = list_new(); + new->outgoing_edges = list_new(); + new->prefixes = list_new(); + + return new; +} + +void ls_vertex_del(struct ls_vertex *vertex) +{ + if (vertex == NULL) + return; + + list_delete_all_node(vertex->incoming_edges); + list_delete_all_node(vertex->outgoing_edges); + list_delete_all_node(vertex->prefixes); + XFREE(MTYPE_LS_DB, vertex); + vertex = NULL; +} + +struct ls_vertex *ls_vertex_add(struct ls_ted *ted, struct ls_node *node) +{ + struct ls_vertex *new; + + if ((ted == NULL) || (node == NULL)) + return NULL; + + new = ls_vertex_new(node); + if (!new) + return NULL; + + /* set Key as the IPv4/Ipv6 Router ID or ISO System ID */ + switch (node->adv.origin) { + case OSPFv2: + case STATIC: + case DIRECT: + memcpy(&new->key, &node->adv.id.ip.addr, IPV4_MAX_BYTELEN); + break; + case ISIS_L1: + case ISIS_L2: + memcpy(&new->key, &node->adv.id.iso.sys_id, ISO_SYS_ID_LEN); + break; + default: + new->key = 0; + break; + } + + /* Remove Vertex if key is not set */ + if (new->key == 0) { + ls_vertex_del(new); + return NULL; + } + + /* Add Vertex to TED */ + vertices_add(&ted->vertices, new); + + return new; +} + +struct ls_vertex *ls_vertex_update(struct ls_ted *ted, struct ls_node *node) +{ + struct ls_vertex *old; + + if (node == NULL) + return NULL; + + old = ls_find_vertex_by_id(ted, node->adv); + if (old) { + if (!ls_node_same(old->node, node)) { + ls_node_del(old->node); + old->node = node; + } + return old; + } + + return ls_vertex_add(ted, node); +} + +void ls_vertex_remove(struct ls_ted *ted, struct ls_vertex *vertex) +{ + vertices_del(&ted->vertices, vertex); + ls_vertex_del(vertex); +} + +struct ls_vertex *ls_find_vertex_by_key(struct ls_ted *ted, const uint64_t key) +{ + struct ls_vertex node = {}; + + if (key == 0) + return NULL; + + node.key = key; + return vertices_find(&ted->vertices, &node); +} + +struct ls_vertex *ls_find_vertex_by_id(struct ls_ted *ted, + struct ls_node_id nid) +{ + struct ls_vertex node = {}; + + switch (nid.origin) { + case OSPFv2: + case STATIC: + case DIRECT: + memcpy(&node.key, &nid.id.ip.addr, IPV4_MAX_BYTELEN); + break; + case ISIS_L1: + case ISIS_L2: + memcpy(&node.key, &nid.id.iso.sys_id, ISO_SYS_ID_LEN); + break; + default: + return NULL; + } + + return vertices_find(&ted->vertices, &node); +} + +int ls_vertex_same(struct ls_vertex *v1, struct ls_vertex *v2) +{ + if ((v1 && !v2) || (!v1 && v2)) + return 0; + + if (!v1 && !v2) + return 1; + + if (v1->key != v2->key) + return 0; + + if (v1->node == v2->node) + return 1; + + return ls_node_same(v1->node, v2->node); +} + +/** + * Link State Edges management functions + */ + +/** + * This function allows to connect the Edge to the vertices present in the TED. + * A temporary vertex that corresponds to the source of this Edge i.e. the + * advertised router, is created if not found in the Data Base. If a Edge that + * corresponds to the reverse path is found, the Edge is attached to the + * destination vertex as destination and reverse Edge is attached to the source + * vertex as source. + * + * @param ted Link State Data Base + * @param edge Link State Edge to be attached + */ +static void ls_edge_connect_to(struct ls_ted *ted, struct ls_edge *edge) +{ + struct ls_vertex *vertex = NULL; + struct ls_node *node; + struct ls_edge *dst; + const struct in_addr inaddr_any = {.s_addr = INADDR_ANY}; + + /* First, search if there is a Vertex that correspond to the Node ID */ + vertex = ls_find_vertex_by_id(ted, edge->attributes->adv); + if (vertex == NULL) { + /* Create a new temporary Node & Vertex if not found */ + node = ls_node_new(edge->attributes->adv, inaddr_any, + in6addr_any); + vertex = ls_vertex_add(ted, node); + } + /* and attach the edge as source to the vertex */ + listnode_add(vertex->outgoing_edges, edge); + edge->source = vertex; + + /* Then search if there is a reverse Edge */ + dst = ls_find_edge_by_destination(ted, edge->attributes); + /* attach the destination edge to the vertex */ + if (dst) { + listnode_add(vertex->incoming_edges, dst); + dst->destination = vertex; + /* and destination vertex to this edge */ + vertex = dst->source; + listnode_add(vertex->incoming_edges, edge); + edge->destination = vertex; + } +} + +struct ls_edge *ls_edge_add(struct ls_ted *ted, + struct ls_attributes *attributes) +{ + struct ls_edge *new; + + if (attributes == NULL) + return NULL; + + new = XCALLOC(MTYPE_LS_DB, sizeof(struct ls_edge)); + new->attributes = attributes; + /* Key is the IPv4 local address */ + if (!IPV4_NET0(attributes->standard.local.s_addr)) + new->key = ((uint64_t)attributes->standard.local.s_addr) + & 0xffffffff; + /* or the IPv6 local address if IPv4 is not defined */ + else if (!IN6_IS_ADDR_UNSPECIFIED(&attributes->standard.local6)) + new->key = (uint64_t)(attributes->standard.local6.s6_addr32[0] + & 0xffffffff) + | ((uint64_t)attributes->standard.local6.s6_addr32[1] + << 32); + /* of local identifier if no IP addresses are defined */ + else if (attributes->standard.local_id != 0) + new->key = (uint64_t)( + (attributes->standard.local_id & 0xffffffff) + | ((uint64_t)attributes->standard.remote_id << 32)); + + /* Remove Edge if key is not known */ + if (new->key == 0) { + XFREE(MTYPE_LS_DB, new); + return NULL; + } + + edges_add(&ted->edges, new); + + /* Finally, connect edge to vertices */ + ls_edge_connect_to(ted, new); + + return new; +} + +struct ls_edge *ls_find_edge_by_key(struct ls_ted *ted, const uint64_t key) +{ + struct ls_edge edge = {}; + + if (key == 0) + return NULL; + + edge.key = key; + return edges_find(&ted->edges, &edge); +} + +struct ls_edge *ls_find_edge_by_source(struct ls_ted *ted, + struct ls_attributes *attributes) +{ + struct ls_edge edge = {}; + + if (attributes == NULL) + return NULL; + + /* Key is the IPv4 local address */ + if (!IPV4_NET0(attributes->standard.local.s_addr)) + edge.key = ((uint64_t)attributes->standard.local.s_addr) + & 0xffffffff; + /* or the IPv6 local address if IPv4 is not defined */ + else if (!IN6_IS_ADDR_UNSPECIFIED(&attributes->standard.local6)) + edge.key = (uint64_t)(attributes->standard.local6.s6_addr32[0] + & 0xffffffff) + | ((uint64_t)attributes->standard.local6.s6_addr32[1] + << 32); + /* of local identifier if no IP addresses are defined */ + else if (attributes->standard.local_id != 0) + edge.key = (uint64_t)( + (attributes->standard.local_id & 0xffffffff) + | ((uint64_t)attributes->standard.remote_id << 32)); + + if (edge.key == 0) + return NULL; + + return edges_find(&ted->edges, &edge); +} + +struct ls_edge *ls_find_edge_by_destination(struct ls_ted *ted, + struct ls_attributes *attributes) +{ + struct ls_edge edge = {}; + + if (attributes == NULL) + return NULL; + + /* Key is the IPv4 local address */ + if (!IPV4_NET0(attributes->standard.remote.s_addr)) + edge.key = ((uint64_t)attributes->standard.remote.s_addr) + & 0xffffffff; + /* or the IPv6 local address if IPv4 is not defined */ + else if (!IN6_IS_ADDR_UNSPECIFIED(&attributes->standard.remote6)) + edge.key = + (uint64_t)(attributes->standard.remote6.s6_addr32[0] + & 0xffffffff) + | ((uint64_t)attributes->standard.remote6.s6_addr32[1] + << 32); + /* of local identifier if no IP addresses are defined */ + else if (attributes->standard.remote_id != 0) + edge.key = (uint64_t)( + (attributes->standard.remote_id & 0xffffffff) + | ((uint64_t)attributes->standard.local_id << 32)); + + if (edge.key == 0) + return NULL; + + return edges_find(&ted->edges, &edge); +} + +struct ls_edge *ls_edge_update(struct ls_ted *ted, + struct ls_attributes *attributes) +{ + struct ls_edge *old; + + if (attributes == NULL) + return NULL; + + /* First, search for an existing Edge */ + old = ls_find_edge_by_source(ted, attributes); + if (old) { + /* Check if attributes are similar */ + if (!ls_attributes_same(old->attributes, attributes)) { + ls_attributes_del(old->attributes); + old->attributes = attributes; + } + return old; + } + + /* If not found, add new Edge from the attributes */ + return ls_edge_add(ted, attributes); +} + +void ls_edge_del(struct ls_ted *ted, struct ls_edge *edge) +{ + /* Fist disconnect Edge */ + ls_disconnect_edge(edge); + /* Then remove it from the Data Base */ + edges_del(&ted->edges, edge); + XFREE(MTYPE_LS_DB, edge); +} + +/** + * Link State Subnet Management functions. + */ +struct ls_subnet *ls_subnet_add(struct ls_ted *ted, + struct ls_prefix *ls_pref) +{ + struct ls_subnet *new; + struct ls_vertex *vertex; + struct ls_node *node; + const struct in_addr inaddr_any = {.s_addr = INADDR_ANY}; + + if (ls_pref == NULL) + return NULL; + + new = XCALLOC(MTYPE_LS_DB, sizeof(struct ls_subnet)); + new->ls_pref = ls_pref; + new->key = ls_pref->pref; + + /* Find Vertex */ + vertex = ls_find_vertex_by_id(ted, ls_pref->adv); + if (vertex == NULL) { + /* Create a new temporary Node & Vertex if not found */ + node = ls_node_new(ls_pref->adv, inaddr_any, in6addr_any); + vertex = ls_vertex_add(ted, node); + } + /* And attach the subnet to the corresponding Vertex */ + new->vertex = vertex; + listnode_add(vertex->prefixes, new); + + subnets_add(&ted->subnets, new); + + return new; +} + +void ls_subnet_del(struct ls_ted *ted, struct ls_subnet *subnet) +{ + subnets_del(&ted->subnets, subnet); + XFREE(MTYPE_LS_DB, subnet); +} + +struct ls_subnet *ls_find_subnet(struct ls_ted *ted, const struct prefix prefix) +{ + struct ls_subnet subnet = {}; + + subnet.key = prefix; + return subnets_find(&ted->subnets, &subnet); +} + +/** + * Link State TED management functions + */ +struct ls_ted *ls_ted_new(const uint32_t key, const char *name, + uint32_t as_number) +{ + struct ls_ted *new; + + new = XCALLOC(MTYPE_LS_DB, sizeof(struct ls_ted)); + if (new == NULL) + return new; + + /* Set basic information for this ted */ + new->key = key; + new->as_number = as_number; + strlcpy(new->name, name, MAX_NAME_LENGTH); + + /* Initialize the various RB tree */ + vertices_init(&new->vertices); + edges_init(&new->edges); + subnets_init(&new->subnets); + + return new; +} + +void ls_ted_del(struct ls_ted *ted) +{ + if (ted == NULL) + return; + + /* Release RB Tree */ + vertices_fini(&ted->vertices); + edges_fini(&ted->edges); + subnets_fini(&ted->subnets); + + XFREE(MTYPE_LS_DB, ted); + ted = NULL; +} + +void ls_connect(struct ls_vertex *vertex, struct ls_edge *edge, bool source) +{ + if (vertex == NULL || edge == NULL) + return; + + if (source) { + listnode_add(vertex->outgoing_edges, edge); + edge->source = vertex; + } else { + listnode_add(vertex->incoming_edges, edge); + edge->destination = vertex; + } +} + +void ls_disconnect(struct ls_vertex *vertex, struct ls_edge *edge, bool source) +{ + + if (vertex == NULL || edge == NULL) + return; + + if (source) { + listnode_delete(vertex->outgoing_edges, edge); + edge->source = NULL; + } else { + listnode_delete(vertex->incoming_edges, edge); + edge->destination = NULL; + } +} + +void ls_connect_vertices(struct ls_vertex *src, struct ls_vertex *dst, + struct ls_edge *edge) +{ + if (edge == NULL) + return; + + edge->source = src; + edge->destination = dst; + + if (src != NULL) + listnode_add(src->outgoing_edges, edge); + + if (dst != NULL) + listnode_add(dst->incoming_edges, edge); + +} + +void ls_disconnect_edge(struct ls_edge *edge) +{ + if (edge == NULL) + return; + + ls_disconnect(edge->source, edge, true); + ls_disconnect(edge->destination, edge, false); +} + +/** + * Link State Message management functions + */ + +static struct ls_node *ls_parse_node(struct stream *s) +{ + struct ls_node *node; + size_t len; + + node = XCALLOC(MTYPE_LS_DB, sizeof(struct ls_node)); + if (node == NULL) + return NULL; + + STREAM_GET(&node->adv, s, sizeof(struct ls_node_id)); + STREAM_GETW(s, node->flags); + if (CHECK_FLAG(node->flags, LS_NODE_NAME)) { + STREAM_GETC(s, len); + STREAM_GET(node->name, s, len); + } + if (CHECK_FLAG(node->flags, LS_NODE_ROUTER_ID)) + node->router_id.s_addr = stream_get_ipv4(s); + if (CHECK_FLAG(node->flags, LS_NODE_ROUTER_ID6)) + STREAM_GET(&node->router6_id, s, IPV6_MAX_BYTELEN); + if (CHECK_FLAG(node->flags, LS_NODE_FLAG)) + STREAM_GETC(s, node->node_flag); + if (CHECK_FLAG(node->flags, LS_NODE_TYPE)) + STREAM_GETC(s, node->type); + if (CHECK_FLAG(node->flags, LS_NODE_AS_NUMBER)) + STREAM_GETL(s, node->as_number); + if (CHECK_FLAG(node->flags, LS_NODE_SR)) { + STREAM_GETL(s, node->srgb.lower_bound); + STREAM_GETL(s, node->srgb.range_size); + STREAM_GETC(s, node->srgb.flag); + STREAM_GET(node->algo, s, 2); + } + if (CHECK_FLAG(node->flags, LS_NODE_SRLB)) { + STREAM_GETL(s, node->srlb.lower_bound); + STREAM_GETL(s, node->srlb.range_size); + } + if (CHECK_FLAG(node->flags, LS_NODE_MSD)) + STREAM_GETC(s, node->msd); + + return node; + +stream_failure: + zlog_err("LS(%s): Could not parse Link State Node. Abort!", __func__); + XFREE(MTYPE_LS_DB, node); + return NULL; +} + +static struct ls_attributes *ls_parse_attributes(struct stream *s) +{ + struct ls_attributes *attr; + size_t len; + + attr = XCALLOC(MTYPE_LS_DB, sizeof(struct ls_attributes)); + if (attr == NULL) + return NULL; + attr->srlgs = NULL; + + STREAM_GET(&attr->adv, s, sizeof(struct ls_node_id)); + STREAM_GETL(s, attr->flags); + if (CHECK_FLAG(attr->flags, LS_ATTR_NAME)) { + STREAM_GETC(s, len); + STREAM_GET(attr->name, s, len); + } + if (CHECK_FLAG(attr->flags, LS_ATTR_METRIC)) + STREAM_GETL(s, attr->standard.metric); + if (CHECK_FLAG(attr->flags, LS_ATTR_TE_METRIC)) + STREAM_GETL(s, attr->standard.te_metric); + if (CHECK_FLAG(attr->flags, LS_ATTR_ADM_GRP)) + STREAM_GETL(s, attr->standard.admin_group); + if (CHECK_FLAG(attr->flags, LS_ATTR_LOCAL_ADDR)) + attr->standard.local.s_addr = stream_get_ipv4(s); + if (CHECK_FLAG(attr->flags, LS_ATTR_NEIGH_ADDR)) + attr->standard.remote.s_addr = stream_get_ipv4(s); + if (CHECK_FLAG(attr->flags, LS_ATTR_LOCAL_ADDR6)) + STREAM_GET(&attr->standard.local6, s, IPV6_MAX_BYTELEN); + if (CHECK_FLAG(attr->flags, LS_ATTR_NEIGH_ADDR6)) + STREAM_GET(&attr->standard.remote6, s, IPV6_MAX_BYTELEN); + if (CHECK_FLAG(attr->flags, LS_ATTR_LOCAL_ID)) + STREAM_GETL(s, attr->standard.local_id); + if (CHECK_FLAG(attr->flags, LS_ATTR_NEIGH_ID)) + STREAM_GETL(s, attr->standard.remote_id); + if (CHECK_FLAG(attr->flags, LS_ATTR_MAX_BW)) + STREAM_GETF(s, attr->standard.max_bw); + if (CHECK_FLAG(attr->flags, LS_ATTR_MAX_RSV_BW)) + STREAM_GETF(s, attr->standard.max_rsv_bw); + if (CHECK_FLAG(attr->flags, LS_ATTR_UNRSV_BW)) + for (len = 0; len < MAX_CLASS_TYPE; len++) + STREAM_GETF(s, attr->standard.unrsv_bw[len]); + if (CHECK_FLAG(attr->flags, LS_ATTR_REMOTE_AS)) + STREAM_GETL(s, attr->standard.remote_as); + if (CHECK_FLAG(attr->flags, LS_ATTR_REMOTE_ADDR)) + attr->standard.remote_addr.s_addr = stream_get_ipv4(s); + if (CHECK_FLAG(attr->flags, LS_ATTR_REMOTE_ADDR6)) + STREAM_GET(&attr->standard.remote_addr6, s, IPV6_MAX_BYTELEN); + if (CHECK_FLAG(attr->flags, LS_ATTR_DELAY)) + STREAM_GETL(s, attr->extended.delay); + if (CHECK_FLAG(attr->flags, LS_ATTR_MIN_MAX_DELAY)) { + STREAM_GETL(s, attr->extended.min_delay); + STREAM_GETL(s, attr->extended.max_delay); + } + if (CHECK_FLAG(attr->flags, LS_ATTR_JITTER)) + STREAM_GETL(s, attr->extended.jitter); + if (CHECK_FLAG(attr->flags, LS_ATTR_PACKET_LOSS)) + STREAM_GETL(s, attr->extended.pkt_loss); + if (CHECK_FLAG(attr->flags, LS_ATTR_AVA_BW)) + STREAM_GETF(s, attr->extended.ava_bw); + if (CHECK_FLAG(attr->flags, LS_ATTR_RSV_BW)) + STREAM_GETF(s, attr->extended.rsv_bw); + if (CHECK_FLAG(attr->flags, LS_ATTR_USE_BW)) + STREAM_GETF(s, attr->extended.used_bw); + if (CHECK_FLAG(attr->flags, LS_ATTR_ADJ_SID)) { + STREAM_GETL(s, attr->adj_sid[0].sid); + STREAM_GETC(s, attr->adj_sid[0].flags); + STREAM_GETC(s, attr->adj_sid[0].weight); + if (attr->adv.origin == ISIS_L1 || attr->adv.origin == ISIS_L2) + STREAM_GET(attr->adj_sid[0].neighbor.sysid, s, + ISO_SYS_ID_LEN); + else if (attr->adv.origin == OSPFv2) + attr->adj_sid[0].neighbor.addr.s_addr = + stream_get_ipv4(s); + } + if (CHECK_FLAG(attr->flags, LS_ATTR_BCK_ADJ_SID)) { + STREAM_GETL(s, attr->adj_sid[1].sid); + STREAM_GETC(s, attr->adj_sid[1].flags); + STREAM_GETC(s, attr->adj_sid[1].weight); + if (attr->adv.origin == ISIS_L1 || attr->adv.origin == ISIS_L2) + STREAM_GET(attr->adj_sid[1].neighbor.sysid, s, + ISO_SYS_ID_LEN); + else if (attr->adv.origin == OSPFv2) + attr->adj_sid[1].neighbor.addr.s_addr = + stream_get_ipv4(s); + } + if (CHECK_FLAG(attr->flags, LS_ATTR_SRLG)) { + STREAM_GETC(s, len); + attr->srlgs = XCALLOC(MTYPE_LS_DB, len*sizeof(uint32_t)); + attr->srlg_len = len; + for (len = 0; len < attr->srlg_len; len++) + STREAM_GETL(s, attr->srlgs[len]); + } + + return attr; + +stream_failure: + zlog_err("LS(%s): Could not parse Link State Attributes. Abort!", + __func__); + /* Clean memeory allocation */ + if (attr->srlgs != NULL) + XFREE(MTYPE_LS_DB, attr->srlgs); + XFREE(MTYPE_LS_DB, attr); + return NULL; + +} + +static struct ls_prefix *ls_parse_prefix(struct stream *s) +{ + struct ls_prefix *ls_pref; + size_t len; + + ls_pref = XCALLOC(MTYPE_LS_DB, sizeof(struct ls_prefix)); + if (ls_pref == NULL) + return NULL; + + STREAM_GET(&ls_pref->adv, s, sizeof(struct ls_node_id)); + STREAM_GETW(s, ls_pref->flags); + STREAM_GETC(s, ls_pref->pref.family); + STREAM_GETW(s, ls_pref->pref.prefixlen); + len = prefix_blen(&ls_pref->pref); + STREAM_GET(&ls_pref->pref.u.prefix, s, len); + if (CHECK_FLAG(ls_pref->flags, LS_PREF_IGP_FLAG)) + STREAM_GETC(s, ls_pref->igp_flag); + if (CHECK_FLAG(ls_pref->flags, LS_PREF_ROUTE_TAG)) + STREAM_GETL(s, ls_pref->route_tag); + if (CHECK_FLAG(ls_pref->flags, LS_PREF_EXTENDED_TAG)) + STREAM_GETQ(s, ls_pref->extended_tag); + if (CHECK_FLAG(ls_pref->flags, LS_PREF_METRIC)) + STREAM_GETL(s, ls_pref->metric); + if (CHECK_FLAG(ls_pref->flags, LS_PREF_SR)) { + STREAM_GETL(s, ls_pref->sr.sid); + STREAM_GETC(s, ls_pref->sr.sid_flag); + STREAM_GETC(s, ls_pref->sr.algo); + } + + return ls_pref; + +stream_failure: + zlog_err("LS(%s): Could not parse Link State Prefix. Abort!", __func__); + XFREE(MTYPE_LS_DB, ls_pref); + return NULL; +} + +struct ls_message *ls_parse_msg(struct stream *s) +{ + struct ls_message *msg; + + msg = XCALLOC(MTYPE_LS_DB, sizeof(struct ls_message)); + if (msg == NULL) + return NULL; + + /* Read LS Message header */ + STREAM_GETC(s, msg->event); + STREAM_GETC(s, msg->type); + STREAM_GET(&msg->remote_id, s, sizeof(struct ls_node_id)); + + /* Read Message Payload */ + switch (msg->type) { + case LS_MSG_TYPE_NODE: + msg->data.node = ls_parse_node(s); + break; + case LS_MSG_TYPE_ATTRIBUTES: + msg->data.attr = ls_parse_attributes(s); + break; + case LS_MSG_TYPE_PREFIX: + msg->data.prefix = ls_parse_prefix(s); + break; + default: + zlog_err("Unsupported Payload"); + goto stream_failure; + } + + if (msg->data.node == NULL || msg->data.attr == NULL + || msg->data.prefix == NULL) + goto stream_failure; + + return msg; + +stream_failure: + zlog_err("LS(%s): Could not parse LS message. Abort!", __func__); + XFREE(MTYPE_LS_DB, msg); + return NULL; +} + +static int ls_format_node(struct stream *s, struct ls_node *node) +{ + size_t len; + + /* Push Advertise node information first */ + stream_put(s, &node->adv, sizeof(struct ls_node_id)); + + /* Push Flags & Origin then Node information if there are present */ + stream_putw(s, node->flags); + if (CHECK_FLAG(node->flags, LS_NODE_NAME)) { + len = strlen(node->name); + stream_putc(s, len + 1); + stream_put(s, node->name, len); + stream_putc(s, '\0'); + } + if (CHECK_FLAG(node->flags, LS_NODE_ROUTER_ID)) + stream_put_ipv4(s, node->router_id.s_addr); + if (CHECK_FLAG(node->flags, LS_NODE_ROUTER_ID6)) + stream_put(s, &node->router6_id, IPV6_MAX_BYTELEN); + if (CHECK_FLAG(node->flags, LS_NODE_FLAG)) + stream_putc(s, node->node_flag); + if (CHECK_FLAG(node->flags, LS_NODE_TYPE)) + stream_putc(s, node->type); + if (CHECK_FLAG(node->flags, LS_NODE_AS_NUMBER)) + stream_putl(s, node->as_number); + if (CHECK_FLAG(node->flags, LS_NODE_SR)) { + stream_putl(s, node->srgb.lower_bound); + stream_putl(s, node->srgb.range_size); + stream_putc(s, node->srgb.flag); + stream_put(s, node->algo, 2); + } + if (CHECK_FLAG(node->flags, LS_NODE_SRLB)) { + stream_putl(s, node->srlb.lower_bound); + stream_putl(s, node->srlb.range_size); + } + if (CHECK_FLAG(node->flags, LS_NODE_MSD)) + stream_putc(s, node->msd); + + return 0; +} + +static int ls_format_attributes(struct stream *s, struct ls_attributes *attr) +{ + size_t len; + + /* Push Advertise node information first */ + stream_put(s, &attr->adv, sizeof(struct ls_node_id)); + + /* Push Flags & Origin then LS attributes if there are present */ + stream_putl(s, attr->flags); + if (CHECK_FLAG(attr->flags, LS_ATTR_NAME)) { + len = strlen(attr->name); + stream_putc(s, len + 1); + stream_put(s, attr->name, len); + stream_putc(s, '\0'); + } + if (CHECK_FLAG(attr->flags, LS_ATTR_METRIC)) + stream_putl(s, attr->standard.metric); + if (CHECK_FLAG(attr->flags, LS_ATTR_TE_METRIC)) + stream_putl(s, attr->standard.te_metric); + if (CHECK_FLAG(attr->flags, LS_ATTR_ADM_GRP)) + stream_putl(s, attr->standard.admin_group); + if (CHECK_FLAG(attr->flags, LS_ATTR_LOCAL_ADDR)) + stream_put_ipv4(s, attr->standard.local.s_addr); + if (CHECK_FLAG(attr->flags, LS_ATTR_NEIGH_ADDR)) + stream_put_ipv4(s, attr->standard.remote.s_addr); + if (CHECK_FLAG(attr->flags, LS_ATTR_LOCAL_ADDR6)) + stream_put(s, &attr->standard.local6, IPV6_MAX_BYTELEN); + if (CHECK_FLAG(attr->flags, LS_ATTR_NEIGH_ADDR6)) + stream_put(s, &attr->standard.remote6, IPV6_MAX_BYTELEN); + if (CHECK_FLAG(attr->flags, LS_ATTR_LOCAL_ID)) + stream_putl(s, attr->standard.local_id); + if (CHECK_FLAG(attr->flags, LS_ATTR_NEIGH_ID)) + stream_putl(s, attr->standard.remote_id); + if (CHECK_FLAG(attr->flags, LS_ATTR_MAX_BW)) + stream_putf(s, attr->standard.max_bw); + if (CHECK_FLAG(attr->flags, LS_ATTR_MAX_RSV_BW)) + stream_putf(s, attr->standard.max_rsv_bw); + if (CHECK_FLAG(attr->flags, LS_ATTR_UNRSV_BW)) + for (len = 0; len < MAX_CLASS_TYPE; len++) + stream_putf(s, attr->standard.unrsv_bw[len]); + if (CHECK_FLAG(attr->flags, LS_ATTR_REMOTE_AS)) + stream_putl(s, attr->standard.remote_as); + if (CHECK_FLAG(attr->flags, LS_ATTR_REMOTE_ADDR)) + stream_put_ipv4(s, attr->standard.remote_addr.s_addr); + if (CHECK_FLAG(attr->flags, LS_ATTR_REMOTE_ADDR6)) + stream_put(s, &attr->standard.remote_addr6, IPV6_MAX_BYTELEN); + if (CHECK_FLAG(attr->flags, LS_ATTR_DELAY)) + stream_putl(s, attr->extended.delay); + if (CHECK_FLAG(attr->flags, LS_ATTR_MIN_MAX_DELAY)) { + stream_putl(s, attr->extended.min_delay); + stream_putl(s, attr->extended.max_delay); + } + if (CHECK_FLAG(attr->flags, LS_ATTR_JITTER)) + stream_putl(s, attr->extended.jitter); + if (CHECK_FLAG(attr->flags, LS_ATTR_PACKET_LOSS)) + stream_putl(s, attr->extended.pkt_loss); + if (CHECK_FLAG(attr->flags, LS_ATTR_AVA_BW)) + stream_putf(s, attr->extended.ava_bw); + if (CHECK_FLAG(attr->flags, LS_ATTR_RSV_BW)) + stream_putf(s, attr->extended.rsv_bw); + if (CHECK_FLAG(attr->flags, LS_ATTR_USE_BW)) + stream_putf(s, attr->extended.used_bw); + if (CHECK_FLAG(attr->flags, LS_ATTR_ADJ_SID)) { + stream_putl(s, attr->adj_sid[0].sid); + stream_putc(s, attr->adj_sid[0].flags); + stream_putc(s, attr->adj_sid[0].weight); + if (attr->adv.origin == ISIS_L1 || attr->adv.origin == ISIS_L2) + stream_put(s, attr->adj_sid[0].neighbor.sysid, + ISO_SYS_ID_LEN); + else if (attr->adv.origin == OSPFv2) + stream_put_ipv4(s, + attr->adj_sid[0].neighbor.addr.s_addr); + } + if (CHECK_FLAG(attr->flags, LS_ATTR_BCK_ADJ_SID)) { + stream_putl(s, attr->adj_sid[1].sid); + stream_putc(s, attr->adj_sid[1].flags); + stream_putc(s, attr->adj_sid[1].weight); + if (attr->adv.origin == ISIS_L1 || attr->adv.origin == ISIS_L2) + stream_put(s, attr->adj_sid[1].neighbor.sysid, + ISO_SYS_ID_LEN); + else if (attr->adv.origin == OSPFv2) + stream_put_ipv4(s, + attr->adj_sid[1].neighbor.addr.s_addr); + } + if (CHECK_FLAG(attr->flags, LS_ATTR_SRLG)) { + stream_putc(s, attr->srlg_len); + for (len = 0; len < attr->srlg_len; len++) + stream_putl(s, attr->srlgs[len]); + } + + return 0; +} + +static int ls_format_prefix(struct stream *s, struct ls_prefix *ls_pref) +{ + size_t len; + + /* Push Advertise node information first */ + stream_put(s, &ls_pref->adv, sizeof(struct ls_node_id)); + + /* Push Flags, Origin & Prefix then information if there are present */ + stream_putw(s, ls_pref->flags); + stream_putc(s, ls_pref->pref.family); + stream_putw(s, ls_pref->pref.prefixlen); + len = prefix_blen(&ls_pref->pref); + stream_put(s, &ls_pref->pref.u.prefix, len); + if (CHECK_FLAG(ls_pref->flags, LS_PREF_IGP_FLAG)) + stream_putc(s, ls_pref->igp_flag); + if (CHECK_FLAG(ls_pref->flags, LS_PREF_ROUTE_TAG)) + stream_putl(s, ls_pref->route_tag); + if (CHECK_FLAG(ls_pref->flags, LS_PREF_EXTENDED_TAG)) + stream_putq(s, ls_pref->extended_tag); + if (CHECK_FLAG(ls_pref->flags, LS_PREF_METRIC)) + stream_putl(s, ls_pref->metric); + if (CHECK_FLAG(ls_pref->flags, LS_PREF_SR)) { + stream_putl(s, ls_pref->sr.sid); + stream_putc(s, ls_pref->sr.sid_flag); + stream_putc(s, ls_pref->sr.algo); + } + + return 0; +} + +static int ls_format_msg(struct stream *s, struct ls_message *msg) +{ + + /* Prepare Link State header */ + stream_putc(s, msg->event); + stream_putc(s, msg->type); + stream_put(s, &msg->remote_id, sizeof(struct ls_node_id)); + + /* Add Message Payload */ + switch (msg->type) { + case LS_MSG_TYPE_NODE: + return ls_format_node(s, msg->data.node); + case LS_MSG_TYPE_ATTRIBUTES: + return ls_format_attributes(s, msg->data.attr); + case LS_MSG_TYPE_PREFIX: + return ls_format_prefix(s, msg->data.prefix); + default: + zlog_warn("Unsupported Payload"); + break; + } + + return -1; +} + +int ls_send_msg(struct zclient *zclient, struct ls_message *msg, + struct zapi_opaque_reg_info *dst) +{ + struct stream *s; + uint16_t flags = 0; + + /* Check buffer size */ + if (STREAM_SIZE(zclient->obuf) < + (ZEBRA_HEADER_SIZE + sizeof(uint32_t) + sizeof(msg))) + return -1; + + s = zclient->obuf; + stream_reset(s); + + zclient_create_header(s, ZEBRA_OPAQUE_MESSAGE, VRF_DEFAULT); + + /* Send sub-type, flags and destination for unicast message */ + stream_putl(s, LINK_STATE_UPDATE); + if (dst != NULL) { + SET_FLAG(flags, ZAPI_OPAQUE_FLAG_UNICAST); + stream_putw(s, flags); + /* Send destination client info */ + stream_putc(s, dst->proto); + stream_putw(s, dst->instance); + stream_putl(s, dst->session_id); + } else + stream_putw(s, flags); + + /* Format Link State message */ + if (ls_format_msg(s, msg) < 0) { + stream_reset(s); + return -1; + } + + /* Put length into the header at the start of the stream. */ + stream_putw_at(s, 0, stream_get_endp(s)); + + return zclient_send_message(zclient); +} + +struct ls_message *ls_vertex2msg(struct ls_message *msg, + struct ls_vertex *vertex) +{ + /* Allocate space if needed */ + if (msg == NULL) + msg = XCALLOC(MTYPE_LS_DB, sizeof(struct ls_message)); + else + memset(msg, 0, sizeof(*msg)); + + msg->type = LS_MSG_TYPE_NODE; + msg->data.node = vertex->node; + msg->remote_id.origin = NONE; + + return msg; +} + +struct ls_message *ls_edge2msg(struct ls_message *msg, struct ls_edge *edge) +{ + /* Allocate space if needed */ + if (msg == NULL) + msg = XCALLOC(MTYPE_LS_DB, sizeof(struct ls_message)); + else + memset(msg, 0, sizeof(*msg)); + + msg->type = LS_MSG_TYPE_ATTRIBUTES; + msg->data.attr = edge->attributes; + if (edge->destination != NULL) + msg->remote_id = edge->destination->node->adv; + else + msg->remote_id.origin = NONE; + + return msg; +} + +struct ls_message *ls_subnet2msg(struct ls_message *msg, + struct ls_subnet *subnet) +{ + /* Allocate space if needed */ + if (msg == NULL) + msg = XCALLOC(MTYPE_LS_DB, sizeof(struct ls_message)); + else + memset(msg, 0, sizeof(*msg)); + + msg->type = LS_MSG_TYPE_PREFIX; + msg->data.prefix = subnet->ls_pref; + msg->remote_id.origin = NONE; + + return msg; +} + +void ls_delete_msg(struct ls_message *msg) +{ + if (msg == NULL) + return; + + switch (msg->type) { + case LS_MSG_TYPE_NODE: + if (msg->data.node) + XFREE(MTYPE_LS_DB, msg->data.node); + break; + case LS_MSG_TYPE_ATTRIBUTES: + if (msg->data.attr) + XFREE(MTYPE_LS_DB, msg->data.attr); + break; + case LS_MSG_TYPE_PREFIX: + if (msg->data.prefix) + XFREE(MTYPE_LS_DB, msg->data.prefix); + break; + default: + break; + } + + XFREE(MTYPE_LS_DB, msg); +} + +int ls_sync_ted(struct ls_ted *ted, struct zclient *zclient, + struct zapi_opaque_reg_info *dst) +{ + struct ls_vertex *vertex; + struct ls_edge *edge; + struct ls_subnet *subnet; + struct ls_message msg; + + /* Prepare message */ + msg.event = LS_MSG_EVENT_SYNC; + + /* Loop TED, start sending Node, then Attributes and finally Prefix */ + frr_each(vertices, &ted->vertices, vertex) { + ls_vertex2msg(&msg, vertex); + ls_send_msg(zclient, &msg, dst); + } + frr_each(edges, &ted->edges, edge) { + ls_edge2msg(&msg, edge); + ls_send_msg(zclient, &msg, dst); + } + frr_each(subnets, &ted->subnets, subnet) { + ls_subnet2msg(&msg, subnet); + ls_send_msg(zclient, &msg, dst); + } + return 0; +} + +void ls_dump_ted(struct ls_ted *ted) +{ + struct ls_vertex *vertex; + struct ls_edge *edge; + struct ls_subnet *subnet; + struct ls_message msg; + + zlog_debug("(%s) Ted init", __func__); + /* Prepare message */ + msg.event = LS_MSG_EVENT_SYNC; + + /* Loop TED, start printing Node, then Attributes and finally Prefix */ + frr_each(vertices, &ted->vertices, vertex) { + ls_vertex2msg(&msg, vertex); + zlog_debug(" Ted node (%s %pI4 %s)", + vertex->node->name[0] ? vertex->node->name + : "no name node", + &vertex->node->router_id, + vertex->node->adv.origin == DIRECT ? "DIRECT" + : "NO DIRECT"); + struct listnode *lst_node; + struct ls_edge *vertex_edge; + + for (ALL_LIST_ELEMENTS_RO(vertex->incoming_edges, lst_node, + vertex_edge)) { + zlog_debug( + " inc edge key:%lldn attr key:%pI4 loc:(%pI4) rmt:(%pI4)", + vertex_edge->key, + &vertex_edge->attributes->adv.id.ip.addr, + &vertex_edge->attributes->standard.local, + &vertex_edge->attributes->standard.remote); + } + for (ALL_LIST_ELEMENTS_RO(vertex->outgoing_edges, lst_node, + vertex_edge)) { + zlog_debug( + " out edge key:%lld attr key:%pI4 loc:(%pI4) rmt:(%pI4)", + vertex_edge->key, + &vertex_edge->attributes->adv.id.ip.addr, + &vertex_edge->attributes->standard.local, + &vertex_edge->attributes->standard.remote); + } + } + frr_each(edges, &ted->edges, edge) { + ls_edge2msg(&msg, edge); + zlog_debug(" Ted edge key:%lld src:%s dst:%s", edge->key, + edge->source ? edge->source->node->name + : "no_source", + edge->destination ? edge->destination->node->name + : "no_dest"); + } + frr_each(subnets, &ted->subnets, subnet) { + ls_subnet2msg(&msg, subnet); + zlog_debug( + " Ted subnet key:%pFX vertex:%pI4 pfx:%pFX", + &subnet->key, + &subnet->vertex->node->adv.id.ip.addr, + &subnet->ls_pref->pref); + } + zlog_debug("(%s) Ted end", __func__); +} diff --git a/lib/link_state.h b/lib/link_state.h new file mode 100644 index 0000000000..93669f5b23 --- /dev/null +++ b/lib/link_state.h @@ -0,0 +1,780 @@ +/* + * Link State Database definition - ted.h + * + * Author: Olivier Dugeon <olivier.dugeon@orange.com> + * + * Copyright (C) 2020 Orange http://www.orange.com + * + * This file is part of Free Range Routing (FRR). + * + * FRR is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2, or (at your option) any + * later version. + * + * FRR is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; see the file COPYING; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef _FRR_LINK_STATE_H_ +#define _FRR_LINK_STATE_H_ + +#include "typesafe.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * This file defines the model used to implement a Link State Database + * suitable to be used by various protocol like RSVP-TE, BGP-LS, PCEP ... + * This database is normally fulfill by the link state routing protocol, + * commonly OSPF or ISIS, carrying Traffic Engineering information within + * Link State Attributes. See, RFC3630.(OSPF-TE) and RFC5305 (ISIS-TE). + * + * At least, 3 types of Link State structure are defined: + * - Link State Node that groups all information related to a node + * - Link State Attributes that groups all information related to a link + * - Link State Prefix that groups all information related to a prefix + * + * These 3 types of structures are those handled by BGP-LS (see RFC7752). + * + * Each structure, in addition to the specific parameters, embed the node + * identifier which advertises the Link State and a bit mask as flags to + * indicates which parameters are valid i.e. for which the value corresponds + * to a Link State information convey by the routing protocol. + * Node identifier is composed of the route id as IPv4 address plus the area + * id for OSPF and the ISO System id plus the IS-IS level for IS-IS. + */ + +/* Link State Common definitions */ +#define MAX_NAME_LENGTH 256 +#define ISO_SYS_ID_LEN 6 + +/* Type of Node */ +enum ls_node_type { + STANDARD, /* a P or PE node */ + ABR, /* an Array Border Node */ + ASBR, /* an Autonomous System Border Node */ + PSEUDO, /* a Pseudo Node */ +}; + +/* Origin of the Link State information */ +enum ls_origin {NONE = 0, ISIS_L1, ISIS_L2, OSPFv2, DIRECT, STATIC}; + +/** + * Link State Node Identifier as: + * - IPv4 address + Area ID for OSPF + * - ISO System ID + ISIS Level for ISIS + */ +struct ls_node_id { + enum ls_origin origin; /* Origin of the LS information */ + union { + struct { + struct in_addr addr; /* OSPF Router IS */ + struct in_addr area_id; /* OSPF Area ID */ + } ip; + struct { + uint8_t sys_id[ISO_SYS_ID_LEN]; /* ISIS System ID */ + uint8_t level; /* ISIS Level */ + uint8_t padding; + } iso; + } id __attribute__((aligned(8))); +}; + +/* Link State flags to indicate which Node parameters are valid */ +#define LS_NODE_UNSET 0x0000 +#define LS_NODE_NAME 0x0001 +#define LS_NODE_ROUTER_ID 0x0002 +#define LS_NODE_ROUTER_ID6 0x0004 +#define LS_NODE_FLAG 0x0008 +#define LS_NODE_TYPE 0x0010 +#define LS_NODE_AS_NUMBER 0x0020 +#define LS_NODE_SR 0x0040 +#define LS_NODE_SRLB 0x0080 +#define LS_NODE_MSD 0x0100 + +/* Link State Node structure */ +struct ls_node { + uint16_t flags; /* Flag for parameters validity */ + struct ls_node_id adv; /* Adv. Router of this Link State */ + char name[MAX_NAME_LENGTH]; /* Name of the Node (IS-IS only) */ + struct in_addr router_id; /* IPv4 Router ID */ + struct in6_addr router6_id; /* IPv6 Router ID */ + uint8_t node_flag; /* IS-IS or OSPF Node flag */ + enum node_type type; /* Type of Node */ + uint32_t as_number; /* Local or neighbor AS number */ + struct { /* Segment Routing Global Block */ + uint32_t lower_bound; /* MPLS label lower bound */ + uint32_t range_size; /* MPLS label range size */ + uint8_t flag; /* IS-IS SRGB flags */ + } srgb; +#define LS_NODE_SRGB_SIZE 9 + struct { /* Segment Routing Local Block */ + uint32_t lower_bound; /* MPLS label lower bound */ + uint32_t range_size; /* MPLS label range size */ + } srlb; +#define LS_NODE_SRLB_SIZE 8 + uint8_t algo[2]; /* Segment Routing Algorithms */ + uint8_t msd; /* Maximum Stack Depth */ +}; + +/* Link State flags to indicate which Attribute parameters are valid */ +#define LS_ATTR_UNSET 0x00000000 +#define LS_ATTR_NAME 0x00000001 +#define LS_ATTR_METRIC 0x00000002 +#define LS_ATTR_TE_METRIC 0x00000004 +#define LS_ATTR_ADM_GRP 0x00000008 +#define LS_ATTR_LOCAL_ADDR 0x00000010 +#define LS_ATTR_NEIGH_ADDR 0x00000020 +#define LS_ATTR_LOCAL_ADDR6 0x00000040 +#define LS_ATTR_NEIGH_ADDR6 0x00000080 +#define LS_ATTR_LOCAL_ID 0x00000100 +#define LS_ATTR_NEIGH_ID 0x00000200 +#define LS_ATTR_MAX_BW 0x00000400 +#define LS_ATTR_MAX_RSV_BW 0x00000800 +#define LS_ATTR_UNRSV_BW 0x00001000 +#define LS_ATTR_REMOTE_AS 0x00002000 +#define LS_ATTR_REMOTE_ADDR 0x00004000 +#define LS_ATTR_REMOTE_ADDR6 0x00008000 +#define LS_ATTR_DELAY 0x00010000 +#define LS_ATTR_MIN_MAX_DELAY 0x00020000 +#define LS_ATTR_JITTER 0x00040000 +#define LS_ATTR_PACKET_LOSS 0x00080000 +#define LS_ATTR_AVA_BW 0x00100000 +#define LS_ATTR_RSV_BW 0x00200000 +#define LS_ATTR_USE_BW 0x00400000 +#define LS_ATTR_ADJ_SID 0x00800000 +#define LS_ATTR_BCK_ADJ_SID 0x01000000 +#define LS_ATTR_SRLG 0x02000000 + +/* Link State Attributes */ +struct ls_attributes { + uint32_t flags; /* Flag for parameters validity */ + struct ls_node_id adv; /* Adv. Router of this Link State */ + char name[MAX_NAME_LENGTH]; /* Name of the Edge. Could be null */ + struct { /* Standard TE metrics */ + uint32_t metric; /* IGP standard metric */ + uint32_t te_metric; /* Traffic Engineering metric */ + uint32_t admin_group; /* Administrative Group */ + struct in_addr local; /* Local IPv4 address */ + struct in_addr remote; /* Remote IPv4 address */ + struct in6_addr local6; /* Local IPv6 address */ + struct in6_addr remote6; /* Remote IPv6 address */ + uint32_t local_id; /* Local Identifier */ + uint32_t remote_id; /* Remote Identifier */ + float max_bw; /* Maximum Link Bandwidth */ + float max_rsv_bw; /* Maximum Reservable BW */ + float unrsv_bw[8]; /* Unreserved BW per CT (8) */ + uint32_t remote_as; /* Remote AS number */ + struct in_addr remote_addr; /* Remote IPv4 address */ + struct in6_addr remote_addr6; /* Remote IPv6 address */ + } standard; +#define LS_ATTR_STANDARD_SIZE 124 + struct { /* Extended TE Metrics */ + uint32_t delay; /* Unidirectional average delay */ + uint32_t min_delay; /* Unidirectional minimum delay */ + uint32_t max_delay; /* Unidirectional maximum delay */ + uint32_t jitter; /* Unidirectional delay variation */ + uint32_t pkt_loss; /* Unidirectional packet loss */ + float ava_bw; /* Available Bandwidth */ + float rsv_bw; /* Reserved Bandwidth */ + float used_bw; /* Utilized Bandwidth */ + } extended; +#define LS_ATTR_EXTENDED_SIZE 32 + struct { /* (LAN)-Adjacency SID for OSPF */ + uint32_t sid; /* SID as MPLS label or index */ + uint8_t flags; /* Flags */ + uint8_t weight; /* Administrative weight */ + union { + struct in_addr addr; /* Neighbor @IP for OSPF */ + uint8_t sysid[ISO_SYS_ID_LEN]; /* or Sys-ID for ISIS */ + } neighbor; + } adj_sid[2]; /* Primary & Backup (LAN)-Adj. SID */ +#define LS_ATTR_ADJ_SID_SIZE 120 + uint32_t *srlgs; /* List of Shared Risk Link Group */ + uint8_t srlg_len; /* number of SRLG in the list */ +}; + +/* Link State flags to indicate which Prefix parameters are valid */ +#define LS_PREF_UNSET 0x00 +#define LS_PREF_IGP_FLAG 0x01 +#define LS_PREF_ROUTE_TAG 0x02 +#define LS_PREF_EXTENDED_TAG 0x04 +#define LS_PREF_METRIC 0x08 +#define LS_PREF_SR 0x10 + +/* Link State Prefix */ +struct ls_prefix { + uint8_t flags; /* Flag for parameters validity */ + struct ls_node_id adv; /* Adv. Router of this Link State */ + struct prefix pref; /* IPv4 or IPv6 prefix */ + uint8_t igp_flag; /* IGP Flags associated to the prefix */ + uint32_t route_tag; /* IGP Route Tag */ + uint64_t extended_tag; /* IGP Extended Route Tag */ + uint32_t metric; /* Route metric for this prefix */ + struct { + uint32_t sid; /* Segment Routing ID */ + uint8_t sid_flag; /* Segment Routing Flags */ + uint8_t algo; /* Algorithm for Segment Routing */ + } sr; +}; + +/** + * Create a new Link State Node. Structure is dynamically allocated. + * + * @param adv Mandatory Link State Node ID i.e. advertise router information + * @param rid Router ID as IPv4 address + * @param rid6 Router ID as IPv6 address + * + * @return New Link State Node + */ +extern struct ls_node *ls_node_new(struct ls_node_id adv, struct in_addr rid, + struct in6_addr rid6); + +/** + * Remove Link State Node. Data structure is freed. + * + * @param node Pointer to a valid Link State Node structure + */ +extern void ls_node_del(struct ls_node *node); + +/** + * Check if two Link State Nodes are equal. Note that this routine has the same + * return value sense as '==' (which is different from a comparison). + * + * @param n1 First Link State Node to be compare + * @param n2 Second Link State Node to be compare + * + * @return 1 if equal, 0 otherwise + */ +extern int ls_node_same(struct ls_node *n1, struct ls_node *n2); + +/** + * Create a new Link State Attributes. Structure is dynamically allocated. + * At least one of parameters MUST be valid and not equal to 0. + * + * @param adv Mandatory Link State Node ID i.e. advertise router ID + * @param local Local IPv4 address + * @param local6 Local Ipv6 address + * @param local_id Local Identifier + * + * @return New Link State Attributes + */ +extern struct ls_attributes *ls_attributes_new(struct ls_node_id adv, + struct in_addr local, + struct in6_addr local6, + uint32_t local_id); + +/** + * Remove Link State Attributes. Data structure is freed. + * + * @param attr Pointer to a valid Link State Attribute structure + */ +extern void ls_attributes_del(struct ls_attributes *attr); + +/** + * Check if two Link State Attributes are equal. Note that this routine has the + * same return value sense as '==' (which is different from a comparison). + * + * @param a1 First Link State Attributes to be compare + * @param a2 Second Link State Attributes to be compare + * + * @return 1 if equal, 0 otherwise + */ +extern int ls_attributes_same(struct ls_attributes *a1, + struct ls_attributes *a2); + +/** + * In addition a Graph model is defined as an overlay on top of link state + * database in order to ease Path Computation algorithm implementation. + * Denoted G(V, E), a graph is composed by a list of Vertices (V) which + * represents the network Node and a list of Edges (E) which represents node + * Link. An additional list of prefixes (P) is also added. + * A prefix (P) is also attached to the Vertex (V) which advertise it. + * + * Vertex (V) contains the list of outgoing Edges (E) that connect this Vertex + * with its direct neighbors and the list of incoming Edges (E) that connect + * the direct neighbors to this Vertex. Indeed, the Edge (E) is unidirectional, + * thus, it is necessary to add 2 Edges to model a bidirectional relation + * between 2 Vertices. + * + * Edge (E) contains the source and destination Vertex that this Edge + * is connecting. + * + * A unique Key is used to identify both Vertices and Edges within the Graph. + * An easy way to build this key is to used the IP address: i.e. loopback + * address for Vertices and link IP address for Edges. + * + * -------------- --------------------------- -------------- + * | Connected |---->| Connected Edge Va to Vb |--->| Connected | + * --->| Vertex | --------------------------- | Vertex |----> + * | | | | + * | - Key (Va) | | - Key (Vb) | + * <---| - Vertex | --------------------------- | - Vertex |<---- + * | |<----| Connected Edge Vb to Va |<---| | + * -------------- --------------------------- -------------- + * + */ + +/* Link State Vertex structure */ +PREDECL_RBTREE_UNIQ(vertices) +struct ls_vertex { + struct vertices_item entry; /* Entry in RB Tree */ + uint64_t key; /* Unique Key identifier */ + struct ls_node *node; /* Link State Node */ + struct list *incoming_edges; /* List of incoming Link State links */ + struct list *outgoing_edges; /* List of outgoing Link State links */ + struct list *prefixes; /* List of advertised prefix */ +}; + +/* Link State Edge structure */ +PREDECL_RBTREE_UNIQ(edges) +struct ls_edge { + struct edges_item entry; /* Entry in RB tree */ + uint64_t key; /* Unique Key identifier */ + struct ls_attributes *attributes; /* Link State attributes */ + struct ls_vertex *source; /* Pointer to the source Vertex */ + struct ls_vertex *destination; /* Pointer to the destination Vertex */ +}; + +/* Link State Subnet structure */ +PREDECL_RBTREE_UNIQ(subnets) +struct ls_subnet { + struct subnets_item entry; /* Entry in RB tree */ + struct prefix key; /* Unique Key identifier */ + struct ls_vertex *vertex; /* Back pointer to the Vertex owner */ + struct ls_prefix *ls_pref; /* Link State Prefix */ +}; + +/* Declaration of Vertices, Edges and Prefixes RB Trees */ +macro_inline int vertex_cmp(const struct ls_vertex *node1, + const struct ls_vertex *node2) +{ + return (node1->key - node2->key); +} +DECLARE_RBTREE_UNIQ(vertices, struct ls_vertex, entry, vertex_cmp) + +macro_inline int edge_cmp(const struct ls_edge *edge1, + const struct ls_edge *edge2) +{ + return (edge1->key - edge2->key); +} +DECLARE_RBTREE_UNIQ(edges, struct ls_edge, entry, edge_cmp) + +macro_inline int subnet_cmp(const struct ls_subnet *a, + const struct ls_subnet *b) +{ + return prefix_cmp(&a->key, &b->key); +} +DECLARE_RBTREE_UNIQ(subnets, struct ls_subnet, entry, subnet_cmp) + +/* Link State TED Structure */ +struct ls_ted { + uint32_t key; /* Unique identifier */ + char name[MAX_NAME_LENGTH]; /* Name of this graph. Could be null */ + uint32_t as_number; /* AS number of the modeled network */ + struct ls_vertex *self; /* Vertex of the FRR instance */ + struct vertices_head vertices; /* List of Vertices */ + struct edges_head edges; /* List of Edges */ + struct subnets_head subnets; /* List of Subnets */ +}; + +/** + * Create a new Link State Vertex structure and initialize is with the Link + * State Node parameter. + * + * @param node Link State Node + * + * @return New Vertex + */ +extern struct ls_vertex *ls_vertex_new(struct ls_node *node); + +/** + * Delete Link State Vertex. This function clean internal Vertex lists (incoming + * and outgoing Link State Edge and Link State Subnet). Note that referenced + * objects of the different lists (Edges & SubNet) are not removed as they could + * be connected to other Vertices. + * + * @param vertex Link State Vertex to be removed + */ +extern void ls_vertex_del(struct ls_vertex *vertex); + +/** + * Add new vertex to the Link State DB. Vertex is created from the Link State + * Node. Vertex data structure is dynamically allocated. + * + * @param ted Traffic Engineering Database structure + * @param node Link State Node + * + * @return New Vertex or NULL in case of error + */ +extern struct ls_vertex *ls_vertex_add(struct ls_ted *ted, + struct ls_node *node); + +/** + * Update Vertex with the Link State Node. A new vertex is created if no one + * corresponds to the Link State Node. + * + * @param ted Link State Data Base + * @param node Link State Node to be updated + * + * @return Updated Link State Vertex or Null in case of error + */ +extern struct ls_vertex *ls_vertex_update(struct ls_ted *ted, + struct ls_node *node); + +/** + * Remove Vertex from the Link State DB. Vertex Data structure is freed but + * not the Link State Node. Link State DB is not modified if Vertex is NULL or + * not found in the Data Base. + * + * @param ted Link State Data Base + * @param vertex Vertex to be removed + */ +extern void ls_vertex_remove(struct ls_ted *ted, struct ls_vertex *vertex); + +/** + * Find Vertex in the Link State DB by its unique key. + * + * @param ted Link State Data Base + * @param key Vertex Key different from 0 + * + * @return Vertex if found, NULL otherwise + */ +extern struct ls_vertex *ls_find_vertex_by_key(struct ls_ted *ted, + const uint64_t key); + +/** + * Find Vertex in the Link State DB by its Link State Node. + * + * @param ted Link State Data Base + * @param nid Link State Node ID + * + * @return Vertex if found, NULL otherwise + */ +extern struct ls_vertex *ls_find_vertex_by_id(struct ls_ted *ted, + struct ls_node_id nid); + +/** + * Check if two Vertices are equal. Note that this routine has the same return + * value sense as '==' (which is different from a comparison). + * + * @param v1 First vertex to compare + * @param v2 Second vertex to compare + * + * @return 1 if equal, 0 otherwise + */ +extern int ls_vertex_same(struct ls_vertex *v1, struct ls_vertex *v2); + +/** + * Add new Edge to the Link State DB. Edge is created from the Link State + * Attributes. Edge data structure is dynamically allocated. + * + * @param ted Link State Data Base + * @param attributes Link State attributes + * + * @return New Edge or NULL in case of error + */ +extern struct ls_edge *ls_edge_add(struct ls_ted *ted, + struct ls_attributes *attributes); + +/** + * Update the Link State Attributes information of an existing Edge. If there is + * no corresponding Edge in the Link State Data Base, a new Edge is created. + * + * @param ted Link State Data Base + * @param attributes Link State Attributes + * + * @return Updated Link State Edge, or NULL in case of error + */ +extern struct ls_edge *ls_edge_update(struct ls_ted *ted, + struct ls_attributes *attributes); + +/** + * Remove Edge from the Link State DB. Edge data structure is freed but not the + * Link State Attributes data structure. Link State DB is not modified if Edge + * is NULL or not found in the Data Base. + * + * @param ted Link State Data Base + * @param edge Edge to be removed + */ +extern void ls_edge_del(struct ls_ted *ted, struct ls_edge *edge); + +/** + * Find Edge in the Link State Data Base by Edge key. + * + * @param ted Link State Data Base + * @param key Edge key + * + * @return Edge if found, NULL otherwise + */ +extern struct ls_edge *ls_find_edge_by_key(struct ls_ted *ted, + const uint64_t key); + +/** + * Find Edge in the Link State Data Base by the source (local IPv4 or IPv6 + * address or local ID) informations of the Link + * State Attributes + * + * @param ted Link State Data Base + * @param attributes Link State Attributes + * + * @return Edge if found, NULL otherwise + */ +extern struct ls_edge * +ls_find_edge_by_source(struct ls_ted *ted, struct ls_attributes *attributes); + +/** + * Find Edge in the Link State Data Base by the destination (remote IPv4 or IPv6 + * address of remote ID) information of the Link State Attributes + * + * @param ted Link State Data Base + * @param attributes Link State Attributes + * + * @return Edge if found, NULL otherwise + */ +extern struct ls_edge * +ls_find_edge_by_destination(struct ls_ted *ted, + struct ls_attributes *attributes); + +/** + * Add new Subnet to the Link State DB. Subnet is created from the Link State + * prefix. Subnet data structure is dynamically allocated. + * + * @param ted Link State Data Base + * @param pref Link State Prefix + * + * @return New Subnet + */ +extern struct ls_subnet *ls_subnet_add(struct ls_ted *ted, + struct ls_prefix *pref); + +/** + * Remove Subnet from the Link State DB. Subnet data structure is freed but + * not the Link State prefix data structure. Link State DB is not modified + * if Subnet is NULL or not found in the Data Base. + * + * @param ted Link State Data Base + * @param subnet Subnet to be removed + */ +extern void ls_subnet_del(struct ls_ted *ted, struct ls_subnet *subnet); + +/** + * Find Subnet in the Link State Data Base by prefix. + * + * @param ted Link State Data Base + * @param prefix Link State Prefix + * + * @return Subnet if found, NULL otherwise + */ +extern struct ls_subnet *ls_find_subnet(struct ls_ted *ted, + const struct prefix prefix); + +/** + * Create a new Link State Data Base. + * + * @param key Unique key of the data base. Must be different from 0 + * @param name Name of the data base (may be NULL) + * @param asn AS Number for this data base. Must be different from 0 + * + * @return New Link State Database or NULL in case of error + */ +extern struct ls_ted *ls_ted_new(const uint32_t key, const char *name, + uint32_t asn); + +/** + * Delete existing Link State Data Base. + * + * @param ted Link State Data Base + */ +extern void ls_ted_del(struct ls_ted *ted); + +/** + * Connect Source and Destination Vertices by given Edge. Only non NULL source + * and destination vertices are connected. + * + * @param src Link State Source Vertex + * @param dst Link State Destination Vertex + * @param edge Link State Edge. Must not be NULL + */ +extern void ls_connect_vertices(struct ls_vertex *src, struct ls_vertex *dst, + struct ls_edge *edge); + +/** + * Connect Link State Edge to the Link State Vertex which could be a Source or + * a Destination Vertex. + * + * @param vertex Link State Vertex to be connected. Must not be NULL + * @param edge Link State Edge connection. Must not be NULL + * @param source True for a Source, false for a Destination Vertex + */ +extern void ls_connect(struct ls_vertex *vertex, struct ls_edge *edge, + bool source); + +/** + * Disconnect Link State Edge from the Link State Vertex which could be a + * Source or a Destination Vertex. + * + * @param vertex Link State Vertex to be connected. Must not be NULL + * @param edge Link State Edge connection. Must not be NULL + * @param source True for a Source, false for a Destination Vertex + */ +extern void ls_disconnect(struct ls_vertex *vertex, struct ls_edge *edge, + bool source); + +/** + * Disconnect Link State Edge from both Source and Destination Vertex. + * + * @param edge Link State Edge to be disconnected + */ +extern void ls_disconnect_edge(struct ls_edge *edge); + + +/** + * The Link State Message is defined to convey Link State parameters from + * the routing protocol (OSPF or IS-IS) to other daemons e.g. BGP. + * + * The structure is composed of: + * - Event of the message: + * - Sync: Send the whole LS DB following a request + * - Add: Send the a new Link State element + * - Update: Send an update of an existing Link State element + * - Delete: Indicate that the given Link State element is removed + * - Type of Link State element: Node, Attribute or Prefix + * - Remote node id when known + * - Data: Node, Attributes or Prefix + * + * A Link State Message can carry only one Link State Element (Node, Attributes + * of Prefix) at once, and only one Link State Message is sent through ZAPI + * Opaque Link State type at once. + */ + +/* ZAPI Opaque Link State Message Event */ +#define LS_MSG_EVENT_SYNC 1 +#define LS_MSG_EVENT_ADD 2 +#define LS_MSG_EVENT_UPDATE 3 +#define LS_MSG_EVENT_DELETE 4 + +/* ZAPI Opaque Link State Message sub-Type */ +#define LS_MSG_TYPE_NODE 1 +#define LS_MSG_TYPE_ATTRIBUTES 2 +#define LS_MSG_TYPE_PREFIX 3 + +/* Link State Message */ +struct ls_message { + uint8_t event; /* Message Event: Sync, Add, Update, Delete */ + uint8_t type; /* Message Data Type: Node, Attribute, Prefix */ + struct ls_node_id remote_id; /* Remote Link State Node ID */ + union { + struct ls_node *node; /* Link State Node */ + struct ls_attributes *attr; /* Link State Attributes */ + struct ls_prefix *prefix; /* Link State Prefix */ + } data; +}; + +/** + * Parse Link State Message from stream. Used this function once receiving a + * new ZAPI Opaque message of type Link State. + * + * @param s Stream buffer. Must not be NULL. + * + * @return New Link State Message or NULL in case of error + */ +extern struct ls_message *ls_parse_msg(struct stream *s); + +/** + * Delete existing message, freeing all substructure. + * + * @param msg Link state message to be deleted + */ +extern void ls_delete_msg(struct ls_message *msg); + +/** + * Send Link State Message as new ZAPI Opaque message of type Link State. + * If destination is not NULL, message is sent as Unicast otherwise it is + * broadcast to all registered daemon. + * + * @param zclient Zebra Client + * @param msg Link State Message to be sent + * @param dst Destination daemon for unicast message, + * NULL for broadcast message + * + * @return 0 on success, -1 otherwise + */ +extern int ls_send_msg(struct zclient *zclient, struct ls_message *msg, + struct zapi_opaque_reg_info *dst); + +/** + * Create a new Link State Message from a Link State Vertex. If Link State + * Message is NULL, a new data structure is dynamically allocated. + * + * @param msg Link State Message to be filled or NULL + * @param vertex Link State Vertex. Must not be NULL + * + * @return New Link State Message msg parameter is NULL or pointer + * to the provided Link State Message + */ +extern struct ls_message *ls_vertex2msg(struct ls_message *msg, + struct ls_vertex *vertex); + +/** + * Create a new Link State Message from a Link State Edge. If Link State + * Message is NULL, a new data structure is dynamically allocated. + * + * @param msg Link State Message to be filled or NULL + * @param edge Link State Edge. Must not be NULL + * + * @return New Link State Message msg parameter is NULL or pointer + * to the provided Link State Message + */ +extern struct ls_message *ls_edge2msg(struct ls_message *msg, + struct ls_edge *edge); + +/** + * Create a new Link State Message from a Link State Subnet. If Link State + * Message is NULL, a new data structure is dynamically allocated. + * + * @param msg Link State Message to be filled or NULL + * @param subnet Link State Subnet. Must not be NULL + * + * @return New Link State Message msg parameter is NULL or pointer + * to the provided Link State Message + */ +extern struct ls_message *ls_subnet2msg(struct ls_message *msg, + struct ls_subnet *subnet); + +/** + * Send all the content of the Link State Data Base to the given destination. + * Link State content is sent is this order: Vertices, Edges, Subnet. + * This function must be used when a daemon request a Link State Data Base + * Synchronization. + * + * @param ted Link State Data Base. Must not be NULL + * @param zclient Zebra Client. Must not be NULL + * @param dst Destination FRR daemon. Must not be NULL + * + * @return 0 on success, -1 otherwise + */ +extern int ls_sync_ted(struct ls_ted *ted, struct zclient *zclient, + struct zapi_opaque_reg_info *dst); + +/** + * Dump all Link State Data Base elements for debugging purposes + * + * @param ted Link State Data Base. Must not be NULL + * + */ +extern void ls_dump_ted(struct ls_ted *ted); + +#ifdef __cplusplus +} +#endif + +#endif /* _FRR_LINK_STATE_H_ */ @@ -161,8 +161,9 @@ void zlog_signal(int signo, const char *action, void *siginfo_v, if (!tc) bprintfrr(&fb, "no thread information available\n"); else - bprintfrr(&fb, "in thread %s scheduled from %s:%d\n", - tc->funcname, tc->schedfrom, tc->schedfrom_line); + bprintfrr(&fb, "in thread %s scheduled from %s:%d %s()\n", + tc->xref->funcname, tc->xref->xref.file, + tc->xref->xref.line, tc->xref->xref.func); zlog_sigsafe(fb.buf, fb.pos - fb.buf); } @@ -179,6 +180,9 @@ void zlog_backtrace_sigsafe(int priority, void *program_counter) unw_word_t ip, off, sp; Dl_info dlinfo; + memset(&uc, 0, sizeof(uc)); + memset(&cursor, 0, sizeof(cursor)); + unw_getcontext(&uc); unw_init_local(&cursor, &uc); while (unw_step(&cursor) > 0) { @@ -300,8 +304,9 @@ void zlog_thread_info(int log_level) if (tc) zlog(log_level, - "Current thread function %s, scheduled from file %s, line %u", - tc->funcname, tc->schedfrom, tc->schedfrom_line); + "Current thread function %s, scheduled from file %s, line %u in %s()", + tc->xref->funcname, tc->xref->xref.file, + tc->xref->xref.line, tc->xref->xref.func); else zlog(log_level, "Current thread not known/applicable"); } @@ -62,16 +62,6 @@ struct message { const char *str; }; -/* For logs which have error codes associated with them */ -#define flog_err(ferr_id, format, ...) \ - zlog_err("[EC %u] " format, ferr_id, ##__VA_ARGS__) -#define flog_err_sys(ferr_id, format, ...) \ - flog_err(ferr_id, format, ##__VA_ARGS__) -#define flog_warn(ferr_id, format, ...) \ - zlog_warn("[EC %u] " format, ferr_id, ##__VA_ARGS__) -#define flog(priority, ferr_id, format, ...) \ - zlog(priority, "[EC %u] " format, ferr_id, ##__VA_ARGS__) - extern void zlog_thread_info(int log_level); #define ZLOG_FILTERS_MAX 100 /* Max # of filters at once */ diff --git a/lib/module.c b/lib/module.c index 14d5cfd44f..3d299a6a2e 100644 --- a/lib/module.c +++ b/lib/module.c @@ -43,6 +43,8 @@ union _frrmod_runtime_u frrmod_default = { }, }; +XREF_SETUP() + // if defined(HAVE_SYS_WEAK_ALIAS_ATTRIBUTE) // union _frrmod_runtime_u _frrmod_this_module // __attribute__((weak, alias("frrmod_default"))); diff --git a/lib/module.h b/lib/module.h index 79cf52d75a..5d8d9cfbcc 100644 --- a/lib/module.h +++ b/lib/module.h @@ -20,6 +20,9 @@ #include <stdint.h> #include <stdbool.h> +#include "compiler.h" +#include "xref.h" + #ifdef __cplusplus extern "C" { #endif @@ -75,7 +78,10 @@ extern union _frrmod_runtime_u _frrmod_this_module; DSO_LOCAL union _frrmod_runtime_u _frrmod_this_module = {{ \ NULL, \ &_frrmod_info, \ - }}; + }}; \ + XREF_SETUP() \ + /* end */ + #define FRR_MODULE_SETUP(...) \ FRR_COREMOD_SETUP(__VA_ARGS__) \ DSO_SELF struct frrmod_runtime *frr_module = &_frrmod_this_module.r; diff --git a/lib/network.c b/lib/network.c index d2482bd55e..411661a5e1 100644 --- a/lib/network.c +++ b/lib/network.c @@ -121,21 +121,3 @@ float ntohf(float net) { return htonf(net); } - -/** - * Helper function that returns a random long value. The main purpose of - * this function is to hide a `random()` call that gets flagged by coverity - * scan and put it into one place. - * - * The main usage of this function should be for generating jitter or weak - * random values for simple purposes. - * - * See 'man 3 random' for more information. - * - * \returns random long integer. - */ -long frr_weak_random(void) -{ - /* coverity[dont_call] */ - return random(); -} diff --git a/lib/network.h b/lib/network.h index 83c9e59e76..4a9666984f 100644 --- a/lib/network.h +++ b/lib/network.h @@ -45,7 +45,23 @@ extern int set_cloexec(int fd); extern float htonf(float); extern float ntohf(float); -extern long frr_weak_random(void); +/** + * Helper function that returns a random long value. The main purpose of + * this function is to hide a `random()` call that gets flagged by coverity + * scan and put it into one place. + * + * The main usage of this function should be for generating jitter or weak + * random values for simple purposes. + * + * See 'man 3 random' for more information. + * + * \returns random long integer. + */ +static inline long frr_weak_random(void) +{ + /* coverity[dont_call] */ + return random(); +} #ifdef __cplusplus } diff --git a/lib/northbound.h b/lib/northbound.h index 3f6e4dc46e..8dd6b4c337 100644 --- a/lib/northbound.h +++ b/lib/northbound.h @@ -552,7 +552,7 @@ struct nb_node { * from working properly on shared libraries. For those compilers, use a fixed * size array to work around the problem. */ -#define YANG_MODULE_MAX_NODES 1400 +#define YANG_MODULE_MAX_NODES 2000 struct frr_yang_module_info { /* YANG module name. */ @@ -600,6 +600,7 @@ enum nb_client { NB_CLIENT_CONFD, NB_CLIENT_SYSREPO, NB_CLIENT_GRPC, + NB_CLIENT_PCEP, }; /* Northbound context. */ @@ -621,6 +622,8 @@ struct nb_context { } sysrepo; struct { } grpc; + struct { + } pcep; } client_data; #endif }; diff --git a/lib/northbound_cli.c b/lib/northbound_cli.c index 7048df99fb..ad7dad5cb2 100644 --- a/lib/northbound_cli.c +++ b/lib/northbound_cli.c @@ -693,6 +693,12 @@ static int nb_write_config(struct nb_config *config, enum nb_cfg_format format, __func__, safe_strerror(errno)); return -1; } + if (fchmod(fd, CONFIGFILE_MASK) != 0) { + flog_warn(EC_LIB_SYSTEM_CALL, + "%s: fchmod() failed: %s(%d):", __func__, + safe_strerror(errno), errno); + return -1; + } /* Make vty for configuration file. */ file_vty = vty_new(); @@ -1820,20 +1826,20 @@ static struct cmd_node nb_debug_node = { void nb_cli_install_default(int node) { - install_element(node, &show_config_candidate_section_cmd); + _install_element(node, &show_config_candidate_section_cmd); if (frr_get_cli_mode() != FRR_CLI_TRANSACTIONAL) return; - install_element(node, &config_commit_cmd); - install_element(node, &config_commit_comment_cmd); - install_element(node, &config_commit_check_cmd); - install_element(node, &config_update_cmd); - install_element(node, &config_discard_cmd); - install_element(node, &show_config_running_cmd); - install_element(node, &show_config_candidate_cmd); - install_element(node, &show_config_compare_cmd); - install_element(node, &show_config_transaction_cmd); + _install_element(node, &config_commit_cmd); + _install_element(node, &config_commit_comment_cmd); + _install_element(node, &config_commit_check_cmd); + _install_element(node, &config_update_cmd); + _install_element(node, &config_discard_cmd); + _install_element(node, &show_config_running_cmd); + _install_element(node, &show_config_candidate_cmd); + _install_element(node, &show_config_compare_cmd); + _install_element(node, &show_config_transaction_cmd); } /* YANG module autocomplete. */ diff --git a/lib/printf/printf-pos.c b/lib/printf/printf-pos.c index cc03f7ef9a..ac775bea4e 100644 --- a/lib/printf/printf-pos.c +++ b/lib/printf/printf-pos.c @@ -384,6 +384,7 @@ reswitch: switch (ch) { goto error; break; #endif /* !NO_FLOATING_POINT */ +#ifdef DANGEROUS_PERCENT_N case 'n': if (flags & INTMAXT) error = addtype(&types, TP_INTMAXT); @@ -404,6 +405,7 @@ reswitch: switch (ch) { if (error) goto error; continue; /* no output */ +#endif case 'O': flags |= LONGINT; /*FALLTHROUGH*/ @@ -576,6 +578,7 @@ reswitch: switch (ch) { goto error; break; #endif /* !NO_FLOATING_POINT */ +#ifdef DANGEROUS_PERCENT_N case 'n': if (flags & INTMAXT) error = addtype(&types, TP_INTMAXT); @@ -596,6 +599,7 @@ reswitch: switch (ch) { if (error) goto error; continue; /* no output */ +#endif case 'O': flags |= LONGINT; /*FALLTHROUGH*/ diff --git a/lib/printf/vfprintf.c b/lib/printf/vfprintf.c index 6ffccb3811..a0634cde4b 100644 --- a/lib/printf/vfprintf.c +++ b/lib/printf/vfprintf.c @@ -503,6 +503,11 @@ reswitch: switch (ch) { size = (prec >= 0) ? strnlen(cp, prec) : strlen(cp); sign = '\0'; break; +#ifdef DANGEROUS_PERCENT_N + /* FRR does not use %n in printf formats. This is just left + * here in case someone tries to use %n and starts debugging + * why the f* it doesn't work + */ case 'n': /* * Assignment-like behavior is specified if the @@ -526,6 +531,7 @@ reswitch: switch (ch) { else *GETARG(int *) = ret; continue; /* no output */ +#endif case 'O': flags |= LONGINT; /*FALLTHROUGH*/ diff --git a/lib/privs.c b/lib/privs.c index 1bb5d059c8..5ca3c0d886 100644 --- a/lib/privs.c +++ b/lib/privs.c @@ -587,6 +587,8 @@ void zprivs_preinit(struct zebra_privs_t *zprivs) } } +struct zebra_privs_t *lib_privs; + void zprivs_init(struct zebra_privs_t *zprivs) { gid_t groups[NGROUPS_MAX] = {}; @@ -598,6 +600,8 @@ void zprivs_init(struct zebra_privs_t *zprivs) || zprivs->cap_num_i)) return; + lib_privs = zprivs; + if (zprivs->user) { ngroups = array_size(groups); if (getgrouplist(zprivs->user, zprivs_state.zgid, groups, @@ -701,6 +705,8 @@ void zprivs_terminate(struct zebra_privs_t *zprivs) { struct zebra_privs_refs_t *refs; + lib_privs = NULL; + if (!zprivs) { fprintf(stderr, "%s: no privs struct given, terminating", __func__); diff --git a/lib/privs.h b/lib/privs.h index 18ba8e8888..2dcdbe2e6c 100644 --- a/lib/privs.h +++ b/lib/privs.h @@ -100,6 +100,8 @@ struct zprivs_ids_t { gid_t gid_vty; /* vty gid */ }; +extern struct zebra_privs_t *lib_privs; + /* initialise zebra privileges */ extern void zprivs_preinit(struct zebra_privs_t *zprivs); extern void zprivs_init(struct zebra_privs_t *zprivs); diff --git a/lib/resolver.c b/lib/resolver.c index e5caadb2d0..c01284e29e 100644 --- a/lib/resolver.c +++ b/lib/resolver.c @@ -19,6 +19,9 @@ #include "lib_errors.h" #include "resolver.h" #include "command.h" +#include "xref.h" + +XREF_SETUP() struct resolver_state { ares_channel channel; diff --git a/lib/route_types.pl b/lib/route_types.pl index 759e9b4729..8c216e13bd 100755 --- a/lib/route_types.pl +++ b/lib/route_types.pl @@ -130,7 +130,7 @@ sub codelist { $str =~ s/ $//; push @lines, $str . "\\n\" \\\n"; push @lines, " \" > - selected route, * - FIB route, q - queued, r - rejected, b - backup\\n\""; - push @lines, " \" t - trapped, o - offload failure\\n\""; + push @lines, " \" t - trapped, o - offload failure\\n\\n\""; return join("", @lines); diff --git a/lib/routemap.c b/lib/routemap.c index 004beb3628..7714086672 100644 --- a/lib/routemap.c +++ b/lib/routemap.c @@ -1347,7 +1347,7 @@ enum rmap_compile_rets route_map_add_match(struct route_map_index *index, get_route_map_delete_event(type); route_map_upd8_dependency( delete_rmap_event_type, - rule_key, + rule->rule_str, index->map->name); } @@ -2240,7 +2240,7 @@ static void route_map_pentry_update(route_map_event_t event, } } -static void route_map_pentry_process_dependency(struct hash_bucket *backet, +static void route_map_pentry_process_dependency(struct hash_bucket *bucket, void *data) { char *rmap_name = NULL; @@ -2253,7 +2253,7 @@ static void route_map_pentry_process_dependency(struct hash_bucket *backet, (struct route_map_pentry_dep *)data; unsigned char family = pentry_dep->pentry->prefix.family; - dep_data = (struct route_map_dep_data *)backet->data; + dep_data = (struct route_map_dep_data *)bucket->data; if (!dep_data) return; @@ -2399,6 +2399,7 @@ route_map_result_t route_map_apply(struct route_map *map, index = route_map_get_index(map, prefix, object, (uint8_t *)&match_ret); if (index) { + index->applied++; if (rmap_debug) zlog_debug( "Best match route-map: %s, sequence: %d for pfx: %pFX, result: %s", @@ -2586,22 +2587,23 @@ static void route_map_clear_reference(struct hash_bucket *bucket, void *arg) struct route_map_dep *dep = bucket->data; struct route_map_dep_data *dep_data = NULL, tmp_dep_data; - if (arg) { - memset(&tmp_dep_data, 0, sizeof(struct route_map_dep_data)); - tmp_dep_data.rname = arg; - dep_data = hash_release(dep->dep_rmap_hash, - &tmp_dep_data); - if (dep_data) { - XFREE(MTYPE_ROUTE_MAP_NAME, dep_data->rname); - XFREE(MTYPE_ROUTE_MAP_DEP_DATA, dep_data); - } - if (!dep->dep_rmap_hash->count) { - dep = hash_release(dep->this_hash, - (void *)dep->dep_name); - hash_free(dep->dep_rmap_hash); - XFREE(MTYPE_ROUTE_MAP_NAME, dep->dep_name); - XFREE(MTYPE_ROUTE_MAP_DEP, dep); - } + memset(&tmp_dep_data, 0, sizeof(struct route_map_dep_data)); + tmp_dep_data.rname = arg; + dep_data = hash_release(dep->dep_rmap_hash, &tmp_dep_data); + if (dep_data) { + if (rmap_debug) + zlog_debug("Clearing reference for %s to %s count: %d", + dep->dep_name, tmp_dep_data.rname, + dep_data->refcnt); + + XFREE(MTYPE_ROUTE_MAP_NAME, dep_data->rname); + XFREE(MTYPE_ROUTE_MAP_DEP_DATA, dep_data); + } + if (!dep->dep_rmap_hash->count) { + dep = hash_release(dep->this_hash, (void *)dep->dep_name); + hash_free(dep->dep_rmap_hash); + XFREE(MTYPE_ROUTE_MAP_NAME, dep->dep_name); + XFREE(MTYPE_ROUTE_MAP_DEP, dep); } } @@ -2609,6 +2611,9 @@ static void route_map_clear_all_references(char *rmap_name) { int i; + if (rmap_debug) + zlog_debug("Clearing references for %s", rmap_name); + for (i = 1; i < ROUTE_MAP_DEP_MAX; i++) { hash_iterate(route_map_dep_hash[i], route_map_clear_reference, (void *)rmap_name); @@ -2763,12 +2768,19 @@ static int route_map_dep_update(struct hash *dephash, const char *dep_name, memset(&tmp_dep_data, 0, sizeof(struct route_map_dep_data)); tmp_dep_data.rname = rname; dep_data = hash_lookup(dep->dep_rmap_hash, &tmp_dep_data); - - if (!dep_data) + /* + * If dep_data is NULL then something has gone seriously + * wrong in route-map handling. Note it and prevent + * the crash. + */ + if (!dep_data) { + zlog_warn( + "route-map dependency for route-map %s: %s is not correct", + rmap_name, dep_name); goto out; + } - if (dep_data->refcnt) - dep_data->refcnt--; + dep_data->refcnt--; if (!dep_data->refcnt) { ret_dep_data = hash_release(dep->dep_rmap_hash, diff --git a/lib/sigevent.c b/lib/sigevent.c index 8d583096f6..64cec1385d 100644 --- a/lib/sigevent.c +++ b/lib/sigevent.c @@ -258,7 +258,7 @@ core_handler(int signo, siginfo_t *siginfo, void *context) static void trap_default_signals(void) { static const int core_signals[] = { - SIGQUIT, SIGILL, + SIGQUIT, SIGILL, SIGABRT, #ifdef SIGEMT SIGEMT, #endif diff --git a/lib/smux.h b/lib/smux.h index 6896f02354..e07df2369f 100644 --- a/lib/smux.h +++ b/lib/smux.h @@ -44,6 +44,29 @@ extern "C" { #define IN_ADDR_SIZE sizeof(struct in_addr) +/* IANAipRouteProtocol */ +#define IANAIPROUTEPROTOCOLOTHER 1 +#define IANAIPROUTEPROTOCOLLOCAL 2 +#define IANAIPROUTEPROTOCOLNETMGMT 3 +#define IANAIPROUTEPROTOCOLICMP 4 +#define IANAIPROUTEPROTOCOLEGP 5 +#define IANAIPROUTEPROTOCOLGGP 6 +#define IANAIPROUTEPROTOCOLHELLO 7 +#define IANAIPROUTEPROTOCOLRIP 8 +#define IANAIPROUTEPROTOCOLISIS 9 +#define IANAIPROUTEPROTOCOLESIS 10 +#define IANAIPROUTEPROTOCOLCISCOIGRP 11 +#define IANAIPROUTEPROTOCOLBBNSPFIGP 12 +#define IANAIPROUTEPROTOCOLOSPF 13 +#define IANAIPROUTEPROTOCOLBGP 14 +#define IANAIPROUTEPROTOCOLIDPR 15 +#define IANAIPROUTEPROTOCOLCISCOEIGRP 16 +#define IANAIPROUTEPROTOCOLDVMRP 17 + +#define INETADDRESSTYPEUNKNOWN 0 +#define INETADDRESSTYPEIPV4 1 +#define INETADDRESSTYPEIPV6 2 + #undef REGISTER_MIB #define REGISTER_MIB(descr, var, vartype, theoid) \ smux_register_mib(descr, (struct variable *)var, \ @@ -56,19 +79,29 @@ struct trap_object { oid name[MAX_OID_LEN]; }; +struct index_oid { + int indexlen; + oid indexname[MAX_OID_LEN]; +}; /* Declare SMUX return value. */ #define SNMP_LOCAL_VARIABLES \ static long snmp_int_val __attribute__((unused)); \ static struct in_addr snmp_in_addr_val __attribute__((unused)); - + static uint8_t snmp_octet_val __attribute__((unused)); #define SNMP_INTEGER(V) \ (*var_len = sizeof(snmp_int_val), snmp_int_val = V, \ (uint8_t *)&snmp_int_val) +#define SNMP_OCTET(V) \ + (*var_len = sizeof(snmp_octet_val), snmp_octet_val = V, \ + (uint8_t *)&snmp_octet_val) + #define SNMP_IPADDRESS(V) \ (*var_len = sizeof(struct in_addr), snmp_in_addr_val = V, \ (uint8_t *)&snmp_in_addr_val) +#define SNMP_IP6ADDRESS(V) (*var_len = sizeof(struct in6_addr), (uint8_t *)&V) + extern void smux_init(struct thread_master *tm); extern void smux_register_mib(const char *, struct variable *, size_t, int, oid[], size_t); @@ -98,14 +131,24 @@ extern int smux_header_table(struct variable *, oid *, size_t *, int, size_t *, The use of the arguments may differ depending on the implementation used. */ -extern int smux_trap(struct variable *, size_t, const oid *, size_t, - const oid *, size_t, const oid *, size_t, - const struct trap_object *, size_t, uint8_t); - +extern void smux_trap(struct variable *, size_t, const oid *, size_t, + const oid *, size_t, const oid *, size_t, + const struct trap_object *, size_t, uint8_t); + +extern int smux_trap_multi_index(struct variable *vp, size_t vp_len, + const oid *ename, size_t enamelen, + const oid *name, size_t namelen, + struct index_oid *iname, size_t index_len, + const struct trap_object *trapobj, + size_t trapobjlen, uint8_t sptrap); extern int oid_compare(const oid *, int, const oid *, int); extern void oid2in_addr(oid[], int, struct in_addr *); +extern void oid2int(oid oid[], int *dest); extern void *oid_copy(void *, const void *, size_t); extern void oid_copy_addr(oid[], const struct in_addr *, int); +extern void oid_copy_int(oid oid[], int *val); +extern void oid2string(oid oid[], int len, char *string); +extern void oid_copy_str(oid oid[], const char *string, int len); #ifdef __cplusplus } diff --git a/lib/snmp.c b/lib/snmp.c index 736a3c62b8..e92f622bb9 100644 --- a/lib/snmp.c +++ b/lib/snmp.c @@ -64,6 +64,19 @@ void oid2in_addr(oid oid[], int len, struct in_addr *addr) *pnt++ = oid[i]; } +void oid2int(oid oid[], int *dest) +{ + uint8_t i; + uint8_t *pnt; + int network_dest; + + pnt = (uint8_t *)&network_dest; + + for (i = 0; i < sizeof(int); i++) + *pnt++ = oid[i]; + *dest = ntohl(network_dest); +} + void oid_copy_addr(oid oid[], const struct in_addr *addr, int len) { int i; @@ -78,6 +91,47 @@ void oid_copy_addr(oid oid[], const struct in_addr *addr, int len) oid[i] = *pnt++; } +void oid_copy_int(oid oid[], int *val) +{ + uint8_t i; + const uint8_t *pnt; + int network_val; + + network_val = htonl(*val); + pnt = (uint8_t *)&network_val; + + for (i = 0; i < sizeof(int); i++) + oid[i] = *pnt++; +} + +void oid2string(oid oid[], int len, char *string) +{ + int i; + uint8_t *pnt; + + if (len == 0) + return; + + pnt = (uint8_t *)string; + + for (i = 0; i < len; i++) + *pnt++ = (uint8_t)oid[i]; +} + +void oid_copy_str(oid oid[], const char *string, int len) +{ + int i; + const uint8_t *pnt; + + if (len == 0) + return; + + pnt = (uint8_t *)string; + + for (i = 0; i < len; i++) + oid[i] = *pnt++; +} + int smux_header_generic(struct variable *v, oid *name, size_t *length, int exact, size_t *var_len, WriteMethod **write_method) { diff --git a/lib/sockunion.c b/lib/sockunion.c index c999845659..1dbf77efa4 100644 --- a/lib/sockunion.c +++ b/lib/sockunion.c @@ -708,3 +708,20 @@ static ssize_t printfrr_psu(char *buf, size_t bsz, const char *fmt, fb.pos[0] = '\0'; return consumed; } + +int sockunion_is_null(const union sockunion *su) +{ + unsigned char null_s6_addr[16] = {0}; + + switch (sockunion_family(su)) { + case AF_UNSPEC: + return 1; + case AF_INET: + return (su->sin.sin_addr.s_addr == 0); + case AF_INET6: + return !memcmp(su->sin6.sin6_addr.s6_addr, null_s6_addr, + sizeof(null_s6_addr)); + default: + return 0; + } +} diff --git a/lib/sockunion.h b/lib/sockunion.h index 72f12b77ca..5e80ba1090 100644 --- a/lib/sockunion.h +++ b/lib/sockunion.h @@ -102,6 +102,7 @@ extern union sockunion *sockunion_getpeername(int); extern union sockunion *sockunion_dup(const union sockunion *); extern void sockunion_free(union sockunion *); extern void sockunion_init(union sockunion *); +extern int sockunion_is_null(const union sockunion *su); #ifdef _FRR_ATTRIBUTE_PRINTFRR #pragma FRR printfrr_ext "%pSU" (union sockunion *) diff --git a/lib/stream.c b/lib/stream.c index e4e37b7315..ef73c2fdc9 100644 --- a/lib/stream.c +++ b/lib/stream.c @@ -57,7 +57,7 @@ DEFINE_MTYPE_STATIC(LIB, STREAM_FIFO, "Stream FIFO") #define STREAM_WARN_OFFSETS(S) \ do { \ flog_warn(EC_LIB_STREAM, \ - "&(struct stream): %p, size: %lu, getp: %lu, endp: %lu\n", \ + "&(struct stream): %p, size: %lu, getp: %lu, endp: %lu", \ (void *)(S), (unsigned long)(S)->size, \ (unsigned long)(S)->getp, (unsigned long)(S)->endp); \ zlog_backtrace(LOG_WARNING); \ @@ -93,7 +93,7 @@ DEFINE_MTYPE_STATIC(LIB, STREAM_FIFO, "Stream FIFO") if (((S)->endp + (Z)) > (S)->size) { \ flog_warn( \ EC_LIB_STREAM, \ - "CHECK_SIZE: truncating requested size %lu\n", \ + "CHECK_SIZE: truncating requested size %lu", \ (unsigned long)(Z)); \ STREAM_WARN_OFFSETS(S); \ (Z) = (S)->size - (S)->endp; \ diff --git a/lib/subdir.am b/lib/subdir.am index 038282a99b..d5ffa08546 100644 --- a/lib/subdir.am +++ b/lib/subdir.am @@ -26,6 +26,7 @@ lib_libfrr_la_SOURCES = \ lib/filter_nb.c \ lib/frrcu.c \ lib/frrlua.c \ + lib/frrscript.c \ lib/frr_pthread.c \ lib/frrstr.c \ lib/getopt.c \ @@ -48,6 +49,7 @@ lib_libfrr_la_SOURCES = \ lib/libfrr.c \ lib/libfrr_trace.c \ lib/linklist.c \ + lib/link_state.c \ lib/log.c \ lib/log_filter.c \ lib/log_vty.c \ @@ -101,6 +103,7 @@ lib_libfrr_la_SOURCES = \ lib/vty.c \ lib/wheel.c \ lib/workqueue.c \ + lib/xref.c \ lib/yang.c \ lib/yang_translator.c \ lib/yang_wrappers.c \ @@ -184,6 +187,7 @@ pkginclude_HEADERS += \ lib/filter.h \ lib/freebsd-queue.h \ lib/frrlua.h \ + lib/frrscript.h \ lib/frr_pthread.h \ lib/frratomic.h \ lib/frrcu.h \ @@ -208,6 +212,7 @@ pkginclude_HEADERS += \ lib/libfrr_trace.h \ lib/libospf.h \ lib/linklist.h \ + lib/link_state.h \ lib/log.h \ lib/log_vty.h \ lib/md5.h \ @@ -264,6 +269,7 @@ pkginclude_HEADERS += \ lib/vxlan.h \ lib/wheel.h \ lib/workqueue.h \ + lib/xref.h \ lib/yang.h \ lib/yang_translator.h \ lib/yang_wrappers.h \ @@ -407,6 +413,7 @@ lib_clippy_CFLAGS = $(PYTHON_CFLAGS) lib_clippy_LDADD = $(PYTHON_LIBS) $(UST_LIBS) lib_clippy_LDFLAGS = -export-dynamic lib_clippy_SOURCES = \ + lib/jhash.c \ lib/clippy.c \ lib/command_graph.c \ lib/command_lex.l \ diff --git a/lib/thread.c b/lib/thread.c index c886058355..e0d734a951 100644 --- a/lib/thread.c +++ b/lib/thread.c @@ -119,7 +119,7 @@ static void vty_out_cpu_thread_history(struct vty *vty, a->total_active, a->cpu.total / 1000, a->cpu.total % 1000, a->total_calls, (a->cpu.total / a->total_calls), a->cpu.max, (a->real.total / a->total_calls), a->real.max); - vty_out(vty, " %c%c%c%c%c %s\n", + vty_out(vty, " %c%c%c%c%c %s\n", a->types & (1 << THREAD_READ) ? 'R' : ' ', a->types & (1 << THREAD_WRITE) ? 'W' : ' ', a->types & (1 << THREAD_TIMER) ? 'T' : ' ', @@ -188,7 +188,7 @@ static void cpu_record_print(struct vty *vty, uint8_t filter) name); vty_out(vty, "-------------------------------%s\n", underline); - vty_out(vty, "%21s %18s %18s\n", "", + vty_out(vty, "%30s %18s %18s\n", "", "CPU (user+system):", "Real (wall-clock):"); vty_out(vty, "Active Runtime(ms) Invoked Avg uSec Max uSecs"); @@ -211,7 +211,7 @@ static void cpu_record_print(struct vty *vty, uint8_t filter) vty_out(vty, "\n"); vty_out(vty, "Total thread statistics\n"); vty_out(vty, "-------------------------\n"); - vty_out(vty, "%21s %18s %18s\n", "", + vty_out(vty, "%30s %18s %18s\n", "", "CPU (user+system):", "Real (wall-clock):"); vty_out(vty, "Active Runtime(ms) Invoked Avg uSec Max uSecs"); vty_out(vty, " Avg uSec Max uSecs"); @@ -342,7 +342,7 @@ static void show_thread_poll_helper(struct vty *vty, struct thread_master *m) if (!thread) vty_out(vty, "ERROR "); else - vty_out(vty, "%s ", thread->funcname); + vty_out(vty, "%s ", thread->xref->funcname); } else vty_out(vty, " "); @@ -352,7 +352,7 @@ static void show_thread_poll_helper(struct vty *vty, struct thread_master *m) if (!thread) vty_out(vty, "ERROR\n"); else - vty_out(vty, "%s\n", thread->funcname); + vty_out(vty, "%s\n", thread->xref->funcname); } else vty_out(vty, "\n"); } @@ -633,9 +633,6 @@ unsigned long thread_timer_remain_second(struct thread *thread) return thread_timer_remain_msec(thread) / 1000LL; } -#define debugargdef const char *funcname, const char *schedfrom, int fromln -#define debugargpass funcname, schedfrom, fromln - struct timeval thread_timer_remain(struct thread *thread) { struct timeval remain; @@ -678,7 +675,7 @@ char *thread_timer_to_hhmmss(char *buf, int buf_size, /* Get new thread. */ static struct thread *thread_get(struct thread_master *m, uint8_t type, int (*func)(struct thread *), void *arg, - debugargdef) + const struct xref_threadsched *xref) { struct thread *thread = thread_list_pop(&m->unuse); struct cpu_thread_history tmp; @@ -707,18 +704,17 @@ static struct thread *thread_get(struct thread_master *m, uint8_t type, * This hopefully saves us some serious * hash_get lookups. */ - if (thread->funcname != funcname || thread->func != func) { + if ((thread->xref && thread->xref->funcname != xref->funcname) + || thread->func != func) { tmp.func = func; - tmp.funcname = funcname; + tmp.funcname = xref->funcname; thread->hist = hash_get(m->cpu_record, &tmp, (void *(*)(void *))cpu_record_hash_alloc); } thread->hist->total_active++; thread->func = func; - thread->funcname = funcname; - thread->schedfrom = schedfrom; - thread->schedfrom_line = fromln; + thread->xref = xref; return thread; } @@ -832,21 +828,23 @@ done: } /* Add new read thread. */ -struct thread *funcname_thread_add_read_write(int dir, struct thread_master *m, - int (*func)(struct thread *), - void *arg, int fd, - struct thread **t_ptr, - debugargdef) +struct thread *_thread_add_read_write(const struct xref_threadsched *xref, + struct thread_master *m, + int (*func)(struct thread *), + void *arg, int fd, struct thread **t_ptr) { + int dir = xref->thread_type; struct thread *thread = NULL; struct thread **thread_array; if (dir == THREAD_READ) - frrtrace(9, frr_libfrr, schedule_read, m, funcname, schedfrom, - fromln, t_ptr, fd, 0, arg, 0); + frrtrace(9, frr_libfrr, schedule_read, m, + xref->funcname, xref->xref.file, xref->xref.line, + t_ptr, fd, 0, arg, 0); else - frrtrace(9, frr_libfrr, schedule_write, m, funcname, schedfrom, - fromln, t_ptr, fd, 0, arg, 0); + frrtrace(9, frr_libfrr, schedule_write, m, + xref->funcname, xref->xref.file, xref->xref.line, + t_ptr, fd, 0, arg, 0); assert(fd >= 0 && fd < m->fd_limit); frr_with_mutex(&m->mtx) { @@ -882,7 +880,7 @@ struct thread *funcname_thread_add_read_write(int dir, struct thread_master *m, /* make sure we have room for this fd + pipe poker fd */ assert(queuepos + 1 < m->handler.pfdsize); - thread = thread_get(m, dir, func, arg, debugargpass); + thread = thread_get(m, dir, func, arg, xref); m->handler.pfds[queuepos].fd = fd; m->handler.pfds[queuepos].events |= @@ -910,10 +908,10 @@ struct thread *funcname_thread_add_read_write(int dir, struct thread_master *m, } static struct thread * -funcname_thread_add_timer_timeval(struct thread_master *m, - int (*func)(struct thread *), int type, - void *arg, struct timeval *time_relative, - struct thread **t_ptr, debugargdef) +_thread_add_timer_timeval(const struct xref_threadsched *xref, + struct thread_master *m, int (*func)(struct thread *), + int type, void *arg, struct timeval *time_relative, + struct thread **t_ptr) { struct thread *thread; @@ -922,7 +920,8 @@ funcname_thread_add_timer_timeval(struct thread_master *m, assert(type == THREAD_TIMER); assert(time_relative); - frrtrace(9, frr_libfrr, schedule_timer, m, funcname, schedfrom, fromln, + frrtrace(9, frr_libfrr, schedule_timer, m, + xref->funcname, xref->xref.file, xref->xref.line, t_ptr, 0, 0, arg, (long)time_relative->tv_sec); frr_with_mutex(&m->mtx) { @@ -930,7 +929,7 @@ funcname_thread_add_timer_timeval(struct thread_master *m, /* thread is already scheduled; don't reschedule */ return NULL; - thread = thread_get(m, type, func, arg, debugargpass); + thread = thread_get(m, type, func, arg, xref); frr_with_mutex(&thread->mtx) { monotime(&thread->u.sands); @@ -951,10 +950,10 @@ funcname_thread_add_timer_timeval(struct thread_master *m, /* Add timer event thread. */ -struct thread *funcname_thread_add_timer(struct thread_master *m, - int (*func)(struct thread *), - void *arg, long timer, - struct thread **t_ptr, debugargdef) +struct thread *_thread_add_timer(const struct xref_threadsched *xref, + struct thread_master *m, + int (*func)(struct thread *), + void *arg, long timer, struct thread **t_ptr) { struct timeval trel; @@ -963,16 +962,16 @@ struct thread *funcname_thread_add_timer(struct thread_master *m, trel.tv_sec = timer; trel.tv_usec = 0; - return funcname_thread_add_timer_timeval(m, func, THREAD_TIMER, arg, - &trel, t_ptr, debugargpass); + return _thread_add_timer_timeval(xref, m, func, THREAD_TIMER, arg, + &trel, t_ptr); } /* Add timer event thread with "millisecond" resolution */ -struct thread *funcname_thread_add_timer_msec(struct thread_master *m, - int (*func)(struct thread *), - void *arg, long timer, - struct thread **t_ptr, - debugargdef) +struct thread *_thread_add_timer_msec(const struct xref_threadsched *xref, + struct thread_master *m, + int (*func)(struct thread *), + void *arg, long timer, + struct thread **t_ptr) { struct timeval trel; @@ -981,29 +980,31 @@ struct thread *funcname_thread_add_timer_msec(struct thread_master *m, trel.tv_sec = timer / 1000; trel.tv_usec = 1000 * (timer % 1000); - return funcname_thread_add_timer_timeval(m, func, THREAD_TIMER, arg, - &trel, t_ptr, debugargpass); + return _thread_add_timer_timeval(xref, m, func, THREAD_TIMER, arg, + &trel, t_ptr); } /* Add timer event thread with "millisecond" resolution */ -struct thread *funcname_thread_add_timer_tv(struct thread_master *m, - int (*func)(struct thread *), - void *arg, struct timeval *tv, - struct thread **t_ptr, debugargdef) +struct thread *_thread_add_timer_tv(const struct xref_threadsched *xref, + struct thread_master *m, + int (*func)(struct thread *), + void *arg, struct timeval *tv, + struct thread **t_ptr) { - return funcname_thread_add_timer_timeval(m, func, THREAD_TIMER, arg, tv, - t_ptr, debugargpass); + return _thread_add_timer_timeval(xref, m, func, THREAD_TIMER, arg, tv, + t_ptr); } /* Add simple event thread. */ -struct thread *funcname_thread_add_event(struct thread_master *m, - int (*func)(struct thread *), - void *arg, int val, - struct thread **t_ptr, debugargdef) +struct thread *_thread_add_event(const struct xref_threadsched *xref, + struct thread_master *m, + int (*func)(struct thread *), + void *arg, int val, struct thread **t_ptr) { struct thread *thread = NULL; - frrtrace(9, frr_libfrr, schedule_event, m, funcname, schedfrom, fromln, + frrtrace(9, frr_libfrr, schedule_event, m, + xref->funcname, xref->xref.file, xref->xref.line, t_ptr, 0, val, arg, 0); assert(m != NULL); @@ -1013,7 +1014,7 @@ struct thread *funcname_thread_add_event(struct thread_master *m, /* thread is already scheduled; don't reschedule */ break; - thread = thread_get(m, THREAD_EVENT, func, arg, debugargpass); + thread = thread_get(m, THREAD_EVENT, func, arg, xref); frr_with_mutex(&thread->mtx) { thread->u.val = val; thread_list_add_tail(&m->event, thread); @@ -1239,8 +1240,9 @@ void thread_cancel(struct thread **thread) master = (*thread)->master; - frrtrace(9, frr_libfrr, thread_cancel, master, (*thread)->funcname, - (*thread)->schedfrom, (*thread)->schedfrom_line, NULL, (*thread)->u.fd, + frrtrace(9, frr_libfrr, thread_cancel, master, + (*thread)->xref->funcname, (*thread)->xref->xref.file, + (*thread)->xref->xref.line, NULL, (*thread)->u.fd, (*thread)->u.val, (*thread)->arg, (*thread)->u.sands.tv_sec); assert(master->owner == pthread_self()); @@ -1287,8 +1289,8 @@ void thread_cancel_async(struct thread_master *master, struct thread **thread, if (thread && *thread) frrtrace(9, frr_libfrr, thread_cancel_async, master, - (*thread)->funcname, (*thread)->schedfrom, - (*thread)->schedfrom_line, NULL, (*thread)->u.fd, + (*thread)->xref->funcname, (*thread)->xref->xref.file, + (*thread)->xref->xref.line, NULL, (*thread)->u.fd, (*thread)->u.val, (*thread)->arg, (*thread)->u.sands.tv_sec); else @@ -1363,7 +1365,7 @@ static int thread_process_io_helper(struct thread_master *m, if (!thread) { if ((actual_state & (POLLHUP|POLLIN)) != POLLHUP) flog_err(EC_LIB_NO_THREAD, - "Attempting to process an I/O event but for fd: %d(%d) no thread to handle this!\n", + "Attempting to process an I/O event but for fd: %d(%d) no thread to handle this!", m->handler.pfds[pos].fd, actual_state); return 0; } @@ -1673,8 +1675,9 @@ void thread_call(struct thread *thread) GETRUSAGE(&before); thread->real = before.real; - frrtrace(9, frr_libfrr, thread_call, thread->master, thread->funcname, - thread->schedfrom, thread->schedfrom_line, NULL, thread->u.fd, + frrtrace(9, frr_libfrr, thread_call, thread->master, + thread->xref->funcname, thread->xref->xref.file, + thread->xref->xref.line, NULL, thread->u.fd, thread->u.val, thread->arg, thread->u.sands.tv_sec); pthread_setspecific(thread_current, thread); @@ -1724,7 +1727,7 @@ void thread_call(struct thread *thread) flog_warn( EC_LIB_SLOW_THREAD, "SLOW THREAD: task %s (%lx) ran for %lums (cpu time %lums)", - thread->funcname, (unsigned long)thread->func, + thread->xref->funcname, (unsigned long)thread->func, realtime / 1000, cputime / 1000); } #endif /* CONSUMED_TIME_CHECK */ @@ -1732,15 +1735,15 @@ void thread_call(struct thread *thread) } /* Execute thread */ -void funcname_thread_execute(struct thread_master *m, - int (*func)(struct thread *), void *arg, int val, - debugargdef) +void _thread_execute(const struct xref_threadsched *xref, + struct thread_master *m, int (*func)(struct thread *), + void *arg, int val) { struct thread *thread; /* Get or allocate new thread to execute. */ frr_with_mutex(&m->mtx) { - thread = thread_get(m, THREAD_EVENT, func, arg, debugargpass); + thread = thread_get(m, THREAD_EVENT, func, arg, xref); /* Set its event value. */ frr_with_mutex(&thread->mtx) { diff --git a/lib/thread.h b/lib/thread.h index eb1b107e7b..6b510fc4c9 100644 --- a/lib/thread.h +++ b/lib/thread.h @@ -27,6 +27,7 @@ #include "monotime.h" #include "frratomic.h" #include "typesafe.h" +#include "xref.h" #ifdef __cplusplus extern "C" { @@ -66,6 +67,14 @@ struct cancel_req { struct thread **threadref; }; +struct xref_threadsched { + struct xref xref; + + const char *funcname; + const char *dest; + uint32_t thread_type; +}; + /* Master of the theads. */ struct thread_master { char *name; @@ -107,9 +116,7 @@ struct thread { struct timeval real; struct cpu_thread_history *hist; /* cache pointer to cpu_history */ unsigned long yield; /* yield time in microseconds */ - const char *funcname; /* name of thread function */ - const char *schedfrom; /* source file thread was scheduled from */ - int schedfrom_line; /* line number of source file */ + const struct xref_threadsched *xref; /* origin location */ pthread_mutex_t mtx; /* mutex for thread.c functions */ }; @@ -156,17 +163,45 @@ struct cpu_thread_history { thread_cancel(&(thread)); \ } while (0) -#define debugargdef const char *funcname, const char *schedfrom, int fromln - -#define thread_add_read(m,f,a,v,t) funcname_thread_add_read_write(THREAD_READ,m,f,a,v,t,#f,__FILE__,__LINE__) -#define thread_add_write(m,f,a,v,t) funcname_thread_add_read_write(THREAD_WRITE,m,f,a,v,t,#f,__FILE__,__LINE__) -#define thread_add_timer(m,f,a,v,t) funcname_thread_add_timer(m,f,a,v,t,#f,__FILE__,__LINE__) -#define thread_add_timer_msec(m,f,a,v,t) funcname_thread_add_timer_msec(m,f,a,v,t,#f,__FILE__,__LINE__) -#define thread_add_timer_tv(m,f,a,v,t) funcname_thread_add_timer_tv(m,f,a,v,t,#f,__FILE__,__LINE__) -#define thread_add_event(m,f,a,v,t) funcname_thread_add_event(m,f,a,v,t,#f,__FILE__,__LINE__) -#define thread_execute(m,f,a,v) funcname_thread_execute(m,f,a,v,#f,__FILE__,__LINE__) -#define thread_execute_name(m, f, a, v, n) \ - funcname_thread_execute(m, f, a, v, n, __FILE__, __LINE__) +/* + * Macro wrappers to generate xrefs for all thread add calls. Includes + * file/line/function info for debugging/tracing. + */ +#include "lib/xref.h" + +#define _xref_t_a(addfn, type, m, f, a, v, t) \ + ({ \ + static const struct xref_threadsched _xref \ + __attribute__((used)) = { \ + .xref = XREF_INIT(XREFT_THREADSCHED, NULL, __func__), \ + .funcname = #f, \ + .dest = #t, \ + .thread_type = THREAD_ ## type, \ + }; \ + XREF_LINK(_xref.xref); \ + _thread_add_ ## addfn(&_xref, m, f, a, v, t); \ + }) \ + /* end */ + +#define thread_add_read(m,f,a,v,t) _xref_t_a(read_write, READ, m,f,a,v,t) +#define thread_add_write(m,f,a,v,t) _xref_t_a(read_write, WRITE, m,f,a,v,t) +#define thread_add_timer(m,f,a,v,t) _xref_t_a(timer, TIMER, m,f,a,v,t) +#define thread_add_timer_msec(m,f,a,v,t) _xref_t_a(timer_msec, TIMER, m,f,a,v,t) +#define thread_add_timer_tv(m,f,a,v,t) _xref_t_a(timer_tv, TIMER, m,f,a,v,t) +#define thread_add_event(m,f,a,v,t) _xref_t_a(event, TIMER, m,f,a,v,t) + +#define thread_execute(m,f,a,v) \ + ({ \ + static const struct xref_threadsched _xref \ + __attribute__((used)) = { \ + .xref = XREF_INIT(XREFT_THREADSCHED, NULL, __func__), \ + .funcname = #f, \ + .dest = NULL, \ + .thread_type = THREAD_EXECUTE, \ + }; \ + XREF_LINK(_xref.xref); \ + _thread_execute(&_xref, m, f, a, v); \ + }) /* end */ /* Prototypes. */ extern struct thread_master *thread_master_create(const char *); @@ -174,35 +209,30 @@ void thread_master_set_name(struct thread_master *master, const char *name); extern void thread_master_free(struct thread_master *); extern void thread_master_free_unused(struct thread_master *); -extern struct thread * -funcname_thread_add_read_write(int dir, struct thread_master *, - int (*)(struct thread *), void *, int, - struct thread **, debugargdef); - -extern struct thread *funcname_thread_add_timer(struct thread_master *, - int (*)(struct thread *), - void *, long, struct thread **, - debugargdef); - -extern struct thread * -funcname_thread_add_timer_msec(struct thread_master *, int (*)(struct thread *), - void *, long, struct thread **, debugargdef); - -extern struct thread *funcname_thread_add_timer_tv(struct thread_master *, - int (*)(struct thread *), - void *, struct timeval *, - struct thread **, - debugargdef); - -extern struct thread *funcname_thread_add_event(struct thread_master *, - int (*)(struct thread *), - void *, int, struct thread **, - debugargdef); - -extern void funcname_thread_execute(struct thread_master *, - int (*)(struct thread *), void *, int, - debugargdef); -#undef debugargdef +extern struct thread *_thread_add_read_write( + const struct xref_threadsched *xref, struct thread_master *master, + int (*fn)(struct thread *), void *arg, int fd, struct thread **tref); + +extern struct thread *_thread_add_timer( + const struct xref_threadsched *xref, struct thread_master *master, + int (*fn)(struct thread *), void *arg, long t, struct thread **tref); + +extern struct thread *_thread_add_timer_msec( + const struct xref_threadsched *xref, struct thread_master *master, + int (*fn)(struct thread *), void *arg, long t, struct thread **tref); + +extern struct thread *_thread_add_timer_tv( + const struct xref_threadsched *xref, struct thread_master *master, + int (*fn)(struct thread *), void *arg, struct timeval *tv, + struct thread **tref); + +extern struct thread *_thread_add_event( + const struct xref_threadsched *xref, struct thread_master *master, + int (*fn)(struct thread *), void *arg, int val, struct thread **tref); + +extern void _thread_execute(const struct xref_threadsched *xref, + struct thread_master *master, + int (*fn)(struct thread *), void *arg, int val); extern void thread_cancel(struct thread **event); extern void thread_cancel_async(struct thread_master *, struct thread **, @@ -214,6 +214,53 @@ struct vrf *vrf_get(vrf_id_t vrf_id, const char *name) return vrf; } +/* Update a VRF. If not found, create one. + * Arg: + * name - The name of the vrf. + * vrf_id - The vrf_id of the vrf. + * Description: This function first finds the vrf using its name. If the vrf is + * found and the vrf-id of the existing vrf does not match the new vrf id, it + * will disable the existing vrf and update it with new vrf-id. If the vrf is + * not found, it will create the vrf with given name and the new vrf id. + */ +struct vrf *vrf_update(vrf_id_t new_vrf_id, const char *name) +{ + struct vrf *vrf = NULL; + + /*Treat VRF add for existing vrf as update + * Update VRF ID and also update in VRF ID table + */ + if (name) + vrf = vrf_lookup_by_name(name); + if (vrf && new_vrf_id != VRF_UNKNOWN && vrf->vrf_id != VRF_UNKNOWN + && vrf->vrf_id != new_vrf_id) { + if (debug_vrf) { + zlog_debug( + "Vrf Update event: %s old id: %u, new id: %u", + name, vrf->vrf_id, new_vrf_id); + } + + /*Disable the vrf to simulate implicit delete + * so that all stale routes are deleted + * This vrf will be enabled down the line + */ + vrf_disable(vrf); + + + RB_REMOVE(vrf_id_head, &vrfs_by_id, vrf); + vrf->vrf_id = new_vrf_id; + RB_INSERT(vrf_id_head, &vrfs_by_id, vrf); + + } else { + + /* + * vrf_get is implied creation if it does not exist + */ + vrf = vrf_get(new_vrf_id, name); + } + return vrf; +} + /* Delete a VRF. This is called when the underlying VRF goes away, a * pre-configured VRF is deleted or when shutting down (vrf_terminate()). */ @@ -628,12 +675,12 @@ int vrf_handler_create(struct vty *vty, const char *vrfname, if (strlen(vrfname) > VRF_NAMSIZ) { if (vty) vty_out(vty, - "%% VRF name %s invalid: length exceeds %d bytes\n", + "%% VRF name %s invalid: length exceeds %d bytes", vrfname, VRF_NAMSIZ); else flog_warn( EC_LIB_VRF_LENGTH, - "%% VRF name %s invalid: length exceeds %d bytes\n", + "%% VRF name %s invalid: length exceeds %d bytes", vrfname, VRF_NAMSIZ); return CMD_WARNING_CONFIG_FAILED; } @@ -1064,6 +1111,7 @@ static int lib_vrf_create(struct nb_cb_create_args *args) vrfp = vrf_get(VRF_UNKNOWN, vrfname); + vrf_set_user_cfged(vrfp); nb_running_set_entry(args->dnode, vrfp); return NB_OK; @@ -1089,7 +1137,7 @@ static int lib_vrf_destroy(struct nb_cb_destroy_args *args) vrfp = nb_running_unset_entry(args->dnode); /* Clear configured flag and invoke delete. */ - UNSET_FLAG(vrfp->status, VRF_CONFIGURED); + vrf_reset_user_cfged(vrfp); vrf_delete(vrfp); break; } @@ -114,6 +114,7 @@ extern struct vrf_name_head vrfs_by_name; extern struct vrf *vrf_lookup_by_id(vrf_id_t); extern struct vrf *vrf_lookup_by_name(const char *); extern struct vrf *vrf_get(vrf_id_t, const char *); +extern struct vrf *vrf_update(vrf_id_t new_vrf_id, const char *name); extern const char *vrf_id_to_name(vrf_id_t vrf_id); extern vrf_id_t vrf_name_to_id(const char *); @@ -167,6 +168,20 @@ static inline void vrf_reset_user_cfged(struct vrf *vrf) UNSET_FLAG(vrf->status, VRF_CONFIGURED); } +static inline uint32_t vrf_interface_count(struct vrf *vrf) +{ + uint32_t count = 0; + struct interface *ifp; + + RB_FOREACH (ifp, if_name_head, &vrf->ifaces_by_name) { + /* skip the l3mdev */ + if (strncmp(ifp->name, vrf->name, VRF_NAMSIZ) == 0) + continue; + count++; + } + return count; +} + /* * Utilities to obtain the user data */ @@ -73,7 +73,8 @@ enum event { #endif /* VTYSH */ }; -static void vty_event(enum event, int, struct vty *); +static void vty_event_serv(enum event event, int sock); +static void vty_event(enum event, struct vty *); /* Extern host structure from command.c */ extern struct host host; @@ -396,16 +397,6 @@ static void vty_do_window_size(struct vty *vty) vty_out(vty, "%s", cmd); } -#if 0 /* Currently not used. */ -/* Make don't use lflow vty interface. */ -static void -vty_dont_lflow_ahead (struct vty *vty) -{ - unsigned char cmd[] = { IAC, DONT, TELOPT_LFLOW, '\0' }; - vty_out (vty, "%s", cmd); -} -#endif /* 0 */ - /* Authentication of vty */ static void vty_auth(struct vty *vty, char *buf) { @@ -1090,11 +1081,6 @@ static void vty_describe_command(struct vty *vty) vector_free(varcomps); } -#if 0 - vty_out (vty, " %-*s %s\n", width - desc->cmd[0] == '.' ? desc->cmd + 1 : desc->cmd, - desc->str ? desc->str : ""); -#endif /* 0 */ } if ((token = token_cr)) { @@ -1299,6 +1285,7 @@ static int vty_execute(struct vty *vty) #define VTY_NORMAL 0 #define VTY_PRE_ESCAPE 1 #define VTY_ESCAPE 2 +#define VTY_CR 3 /* Escape character command map. */ static void vty_escape_map(unsigned char c, struct vty *vty) @@ -1340,14 +1327,13 @@ static int vty_read(struct thread *thread) int nbytes; unsigned char buf[VTY_READ_BUFSIZ]; - int vty_sock = THREAD_FD(thread); struct vty *vty = THREAD_ARG(thread); /* Read raw data from socket */ if ((nbytes = read(vty->fd, buf, VTY_READ_BUFSIZ)) <= 0) { if (nbytes < 0) { if (ERRNO_IO_RETRY(errno)) { - vty_event(VTY_READ, vty_sock, vty); + vty_event(VTY_READ, vty); return 0; } vty->monitor = 0; /* disable monitoring to avoid @@ -1396,12 +1382,6 @@ static int vty_read(struct thread *thread) case 'Q': vty_buffer_reset(vty); break; -#if 0 /* More line does not work for "show ip bgp". */ - case '\n': - case '\r': - vty->status = VTY_MORELINE; - break; -#endif default: break; } @@ -1444,6 +1424,17 @@ static int vty_read(struct thread *thread) continue; } + if (vty->escape == VTY_CR) { + /* if we get CR+NL, the NL results in an extra empty + * prompt line being printed without this; just drop + * the NL if it immediately follows CR. + */ + vty->escape = VTY_NORMAL; + + if (buf[i] == '\n') + continue; + } + switch (buf[i]) { case CONTROL('A'): vty_beginning_of_line(vty); @@ -1488,9 +1479,12 @@ static int vty_read(struct thread *thread) case CONTROL('Z'): vty_end_config(vty); break; - case '\n': case '\r': + vty->escape = VTY_CR; + /* fallthru */ + case '\n': vty_out(vty, "\n"); + buffer_flush_available(vty->obuf, vty->wfd); vty_execute(vty); break; case '\t': @@ -1521,8 +1515,8 @@ static int vty_read(struct thread *thread) if (vty->status == VTY_CLOSE) vty_close(vty); else { - vty_event(VTY_WRITE, vty->wfd, vty); - vty_event(VTY_READ, vty_sock, vty); + vty_event(VTY_WRITE, vty); + vty_event(VTY_READ, vty); } return 0; } @@ -1532,7 +1526,6 @@ static int vty_flush(struct thread *thread) { int erase; buffer_status_t flushrc; - int vty_sock = THREAD_FD(thread); struct vty *vty = THREAD_ARG(thread); /* Tempolary disable read thread. */ @@ -1544,20 +1537,20 @@ static int vty_flush(struct thread *thread) /* N.B. if width is 0, that means we don't know the window size. */ if ((vty->lines == 0) || (vty->width == 0) || (vty->height == 0)) - flushrc = buffer_flush_available(vty->obuf, vty_sock); + flushrc = buffer_flush_available(vty->obuf, vty->wfd); else if (vty->status == VTY_MORELINE) - flushrc = buffer_flush_window(vty->obuf, vty_sock, vty->width, + flushrc = buffer_flush_window(vty->obuf, vty->wfd, vty->width, 1, erase, 0); else flushrc = buffer_flush_window( - vty->obuf, vty_sock, vty->width, + vty->obuf, vty->wfd, vty->width, vty->lines >= 0 ? vty->lines : vty->height, erase, 0); switch (flushrc) { case BUFFER_ERROR: vty->monitor = 0; /* disable monitoring to avoid infinite recursion */ - zlog_info("buffer_flush failed on vty client fd %d, closing", - vty->fd); + zlog_info("buffer_flush failed on vty client fd %d/%d, closing", + vty->fd, vty->wfd); buffer_reset(vty->lbuf); buffer_reset(vty->obuf); vty_close(vty); @@ -1568,14 +1561,14 @@ static int vty_flush(struct thread *thread) else { vty->status = VTY_NORMAL; if (vty->lines == 0) - vty_event(VTY_READ, vty_sock, vty); + vty_event(VTY_READ, vty); } break; case BUFFER_PENDING: /* There is more data waiting to be written. */ vty->status = VTY_MORE; if (vty->lines == 0) - vty_event(VTY_WRITE, vty_sock, vty); + vty_event(VTY_WRITE, vty); break; } @@ -1678,8 +1671,8 @@ static struct vty *vty_create(int vty_sock, union sockunion *su) vty_prompt(vty); /* Add read/write thread. */ - vty_event(VTY_WRITE, vty_sock, vty); - vty_event(VTY_READ, vty_sock, vty); + vty_event(VTY_WRITE, vty); + vty_event(VTY_READ, vty); return vty; } @@ -1735,7 +1728,6 @@ void vty_stdio_resume(void) termios = stdio_orig_termios; termios.c_iflag &= ~(IGNBRK | BRKINT | PARMRK | ISTRIP | INLCR | IGNCR | ICRNL | IXON); - termios.c_oflag &= ~OPOST; termios.c_lflag &= ~(ECHO | ECHONL | ICANON | IEXTEN); termios.c_cflag &= ~(CSIZE | PARENB); termios.c_cflag |= CS8; @@ -1746,8 +1738,8 @@ void vty_stdio_resume(void) vty_prompt(stdio_vty); /* Add read/write thread. */ - vty_event(VTY_WRITE, 1, stdio_vty); - vty_event(VTY_READ, 0, stdio_vty); + vty_event(VTY_WRITE, stdio_vty); + vty_event(VTY_READ, stdio_vty); } void vty_stdio_close(void) @@ -1796,7 +1788,7 @@ static int vty_accept(struct thread *thread) accept_sock = THREAD_FD(thread); /* We continue hearing vty socket. */ - vty_event(VTY_SERV, accept_sock, NULL); + vty_event_serv(VTY_SERV, accept_sock); memset(&su, 0, sizeof(union sockunion)); @@ -1826,7 +1818,7 @@ static int vty_accept(struct thread *thread) close(vty_sock); /* continue accepting connections */ - vty_event(VTY_SERV, accept_sock, NULL); + vty_event_serv(VTY_SERV, accept_sock); return 0; } @@ -1842,7 +1834,7 @@ static int vty_accept(struct thread *thread) close(vty_sock); /* continue accepting connections */ - vty_event(VTY_SERV, accept_sock, NULL); + vty_event_serv(VTY_SERV, accept_sock); return 0; } @@ -1915,7 +1907,7 @@ static void vty_serv_sock_addrinfo(const char *hostname, unsigned short port) continue; } - vty_event(VTY_SERV, sock, NULL); + vty_event_serv(VTY_SERV, sock); } while ((ainfo = ainfo->ai_next) != NULL); freeaddrinfo(ainfo_save); @@ -1993,7 +1985,7 @@ static void vty_serv_un(const char *path) } } - vty_event(VTYSH_SERV, sock, NULL); + vty_event_serv(VTYSH_SERV, sock); } /* #define VTYSH_DEBUG 1 */ @@ -2008,7 +2000,7 @@ static int vtysh_accept(struct thread *thread) accept_sock = THREAD_FD(thread); - vty_event(VTYSH_SERV, accept_sock, NULL); + vty_event_serv(VTYSH_SERV, accept_sock); memset(&client, 0, sizeof(struct sockaddr_un)); client_len = sizeof(struct sockaddr_un); @@ -2042,7 +2034,7 @@ static int vtysh_accept(struct thread *thread) vty->type = VTY_SHELL_SERV; vty->node = VIEW_NODE; - vty_event(VTYSH_READ, sock, vty); + vty_event(VTYSH_READ, vty); return 0; } @@ -2051,7 +2043,7 @@ static int vtysh_flush(struct vty *vty) { switch (buffer_flush_available(vty->obuf, vty->wfd)) { case BUFFER_PENDING: - vty_event(VTYSH_WRITE, vty->wfd, vty); + vty_event(VTYSH_WRITE, vty); break; case BUFFER_ERROR: vty->monitor = @@ -2084,7 +2076,7 @@ static int vtysh_read(struct thread *thread) if ((nbytes = read(sock, buf, VTY_READ_BUFSIZ)) <= 0) { if (nbytes < 0) { if (ERRNO_IO_RETRY(errno)) { - vty_event(VTYSH_READ, sock, vty); + vty_event(VTYSH_READ, vty); return 0; } vty->monitor = 0; /* disable monitoring to avoid @@ -2150,7 +2142,7 @@ static int vtysh_read(struct thread *thread) if (vty->status == VTY_CLOSE) vty_close(vty); else - vty_event(VTYSH_READ, sock, vty); + vty_event(VTYSH_READ, vty); return 0; } @@ -2657,33 +2649,44 @@ int vty_config_node_exit(struct vty *vty) /* Master of the threads. */ static struct thread_master *vty_master; -static void vty_event(enum event event, int sock, struct vty *vty) +static void vty_event_serv(enum event event, int sock) { struct thread *vty_serv_thread = NULL; switch (event) { case VTY_SERV: - vty_serv_thread = thread_add_read(vty_master, vty_accept, vty, - sock, NULL); + vty_serv_thread = thread_add_read(vty_master, vty_accept, + NULL, sock, NULL); vector_set_index(Vvty_serv_thread, sock, vty_serv_thread); break; #ifdef VTYSH case VTYSH_SERV: - vty_serv_thread = thread_add_read(vty_master, vtysh_accept, vty, - sock, NULL); + vty_serv_thread = thread_add_read(vty_master, vtysh_accept, + NULL, sock, NULL); vector_set_index(Vvty_serv_thread, sock, vty_serv_thread); break; +#endif /* VTYSH */ + default: + assert(!"vty_event_serv() called incorrectly"); + } +} + +static void vty_event(enum event event, struct vty *vty) +{ + switch (event) { +#ifdef VTYSH case VTYSH_READ: - thread_add_read(vty_master, vtysh_read, vty, sock, + thread_add_read(vty_master, vtysh_read, vty, vty->fd, &vty->t_read); break; case VTYSH_WRITE: - thread_add_write(vty_master, vtysh_write, vty, sock, + thread_add_write(vty_master, vtysh_write, vty, vty->wfd, &vty->t_write); break; #endif /* VTYSH */ case VTY_READ: - thread_add_read(vty_master, vty_read, vty, sock, &vty->t_read); + thread_add_read(vty_master, vty_read, vty, vty->fd, + &vty->t_read); /* Time out treatment. */ if (vty->v_timeout) { @@ -2693,7 +2696,7 @@ static void vty_event(enum event event, int sock, struct vty *vty) } break; case VTY_WRITE: - thread_add_write(vty_master, vty_flush, vty, sock, + thread_add_write(vty_master, vty_flush, vty, vty->wfd, &vty->t_write); break; case VTY_TIMEOUT_RESET: @@ -2702,6 +2705,8 @@ static void vty_event(enum event event, int sock, struct vty *vty) thread_add_timer(vty_master, vty_timeout, vty, vty->v_timeout, &vty->t_timeout); break; + default: + assert(!"vty_event() called incorrectly"); } } @@ -2748,7 +2753,7 @@ static int exec_timeout(struct vty *vty, const char *min_str, vty_timeout_val = timeout; vty->v_timeout = timeout; - vty_event(VTY_TIMEOUT_RESET, 0, vty); + vty_event(VTY_TIMEOUT_RESET, vty); return CMD_SUCCESS; diff --git a/lib/wheel.c b/lib/wheel.c index f5e5cc52c3..5bdd6292f9 100644 --- a/lib/wheel.c +++ b/lib/wheel.c @@ -73,8 +73,7 @@ static int wheel_timer_thread(struct thread *t) wheel = THREAD_ARG(t); - thread_execute_name(wheel->master, wheel_timer_thread_helper, - wheel, 0, wheel->name); + thread_execute(wheel->master, wheel_timer_thread_helper, wheel, 0); return 0; } diff --git a/lib/workqueue.c b/lib/workqueue.c index f8e4677220..8eabdf52e7 100644 --- a/lib/workqueue.c +++ b/lib/workqueue.c @@ -377,11 +377,6 @@ stats: if (yielded) wq->yields++; -#if 0 - printf ("%s: cycles %d, new: best %d, worst %d\n", - __func__, cycles, wq->cycles.best, wq->cycles.granularity); -#endif - /* Is the queue done yet? If it is, call the completion callback. */ if (!work_queue_empty(wq)) { if (ret == WQ_RETRY_LATER || diff --git a/lib/xref.c b/lib/xref.c new file mode 100644 index 0000000000..40efe51363 --- /dev/null +++ b/lib/xref.c @@ -0,0 +1,130 @@ +/* + * Copyright (c) 2017-20 David Lamparter, for NetDEF, Inc. + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifdef HAVE_CONFIG_H +#include "config.h" +#endif + +#include <stdlib.h> +#include <stdarg.h> +#include <string.h> +#include <pthread.h> +#include <signal.h> +#include <inttypes.h> + +#include "xref.h" +#include "vty.h" +#include "jhash.h" +#include "sha256.h" +#include "memory.h" +#include "hash.h" + +struct xref_block *xref_blocks; +static struct xref_block **xref_block_last = &xref_blocks; + +static void base32(uint8_t **inpos, int *bitpos, + char *out, size_t n_chars) +{ + static const char base32ch[] = "0123456789ABCDEFGHJKMNPQRSTVWXYZ"; + + char *opos = out; + uint8_t *in = *inpos; + int bp = *bitpos; + + while (opos < out + n_chars) { + uint32_t bits = in[0] | (in[1] << 8); + + if (bp == -1) + bits |= 0x10; + else + bits >>= bp; + + *opos++ = base32ch[bits & 0x1f]; + + bp += 5; + if (bp >= 8) + in++, bp -= 8; + } + *opos = '\0'; + *inpos = in; + *bitpos = bp; +} + +static void xref_add_one(const struct xref *xref) +{ + SHA256_CTX sha; + struct xrefdata *xrefdata; + + const char *filename, *p, *q; + uint8_t hash[32], *h = hash; + uint32_t be_val; + int bitpos; + + if (!xref || !xref->xrefdata) + return; + + xrefdata = xref->xrefdata; + xrefdata->xref = xref; + + if (!xrefdata->hashstr) + return; + + /* as far as the unique ID is concerned, only use the last + * directory name + filename, e.g. "bgpd/bgp_route.c". This + * gives a little leeway in moving things and avoids IDs being + * screwed up by out of tree builds or absolute pathnames. + */ + filename = xref->file; + p = strrchr(filename, '/'); + if (p) { + q = memrchr(filename, '/', p - filename); + if (q) + filename = q + 1; + else + filename = p + 1; + } + + SHA256_Init(&sha); + SHA256_Update(&sha, filename, strlen(filename)); + SHA256_Update(&sha, xrefdata->hashstr, + strlen(xrefdata->hashstr)); + be_val = htonl(xrefdata->hashu32[0]); + SHA256_Update(&sha, &be_val, sizeof(be_val)); + be_val = htonl(xrefdata->hashu32[1]); + SHA256_Update(&sha, &be_val, sizeof(be_val)); + SHA256_Final(hash, &sha); + + bitpos = -1; + base32(&h, &bitpos, &xrefdata->uid[0], 5); + xrefdata->uid[5] = '-'; + base32(&h, &bitpos, &xrefdata->uid[6], 5); +} + +void xref_gcc_workaround(const struct xref *xref) +{ + xref_add_one(xref); +} + +void xref_block_add(struct xref_block *block) +{ + const struct xref * const *xrefp; + + *xref_block_last = block; + xref_block_last = &block->next; + + for (xrefp = block->start; xrefp < block->stop; xrefp++) + xref_add_one(*xrefp); +} diff --git a/lib/xref.h b/lib/xref.h new file mode 100644 index 0000000000..b3243fa058 --- /dev/null +++ b/lib/xref.h @@ -0,0 +1,272 @@ +/* + * Copyright (c) 2017-20 David Lamparter, for NetDEF, Inc. + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _FRR_XREF_H +#define _FRR_XREF_H + +#include <stdint.h> +#include <stdlib.h> +#include <limits.h> +#include <errno.h> +#include "compiler.h" + +#ifdef __cplusplus +extern "C" { +#endif + +enum xref_type { + XREFT_NONE = 0, + + XREFT_THREADSCHED = 0x100, + + XREFT_LOGMSG = 0x200, + + XREFT_DEFUN = 0x300, + XREFT_INSTALL_ELEMENT = 0x301, +}; + +/* struct xref is the "const" part; struct xrefdata is the writable part. */ +struct xref; +struct xrefdata; + +struct xref { + /* this may be NULL, depending on the type of the xref. + * if it is NULL, the xref has no unique ID and cannot be accessed + * through that mechanism. + */ + struct xrefdata *xrefdata; + + /* type isn't generally needed at runtime */ + enum xref_type type; + + /* code location */ + int line; + const char *file; + const char *func; + + /* -- 32 bytes (on 64bit) -- */ + + /* type-specific bits appended by embedding this struct */ +}; + +struct xrefdata { + /* pointer back to the const part; this will be initialized at + * program startup by xref_block_add(). (Creating structs with + * cyclic pointers to each other is not easily possible for + * function-scoped static variables.) + * + * There is no xrefdata w/o xref, but there are xref w/o xrefdata. + */ + const struct xref *xref; + + /* base32(crockford) of unique ID. not all bytes are used, but + * let's pad to 16 for simplicity + */ + char uid[16]; + + /* hash/uid input + * if hashstr is NULL, no UID is assigned/calculated. Use macro + * string concatenation if multiple values need to be fed in. + * (This is here to not make the UID calculation independent of + * xref type.) + */ + const char *hashstr; + uint32_t hashu32[2]; + + /* -- 32 bytes (on 64bit) -- */ +}; + +/* linker "magic" is used to create an array of pointers to struct xref. + * the result is a contiguous block of pointers, each pointing to an xref + * somewhere in the code. The linker gives us start and end pointers, we + * stuff those into the struct below and hook up a constructor to run at + * program startup with the struct passed. + * + * Placing the xrefs themselves into an array doesn't work because they'd + * need to be constant size, but we're embedding struct xref into other + * container structs with extra data. Also this means that external code + * (like the python xref dumper) can safely ignore extra data at the end of + * xrefs without needing to account for size in iterating the array. + * + * If you're curious, this is also how __attribute__((constructor)) (and + * destructor) are implemented - there are 2 arrays, ".init_array" and + * ".fini_array", containing function pointers. The magic turns out to be + * quite mundane, actually ;) + * + * The slightly tricky bit is that this is a per-object (i.e. per shared + * library & daemon) thing and we need a bit of help (in XREF_SETUP) to + * initialize correctly. + */ + +struct xref_block { + struct xref_block *next; + const struct xref * const *start; + const struct xref * const *stop; +}; + +extern struct xref_block *xref_blocks; +extern void xref_block_add(struct xref_block *block); +extern void xref_gcc_workaround(const struct xref *xref); + +#ifndef HAVE_SECTION_SYMS +/* we have a build system patch to use GNU ld on Solaris; if that doesn't + * work we end up on Solaris ld which doesn't support the section start/end + * symbols. + */ +#define XREF_SETUP() \ + CPP_NOTICE("Missing linker support for section arrays. Solaris ld?") +#else +/* the actual symbols that the linker provides for us. Note these are + * _symbols_ referring to the actual section start/end, i.e. they are very + * much NOT _pointers_, rather the symbol *value* is the pointer. Declaring + * them as size-1 arrays is the "best" / "right" thing. + */ +extern const struct xref * const __start_xref_array[1] DSO_LOCAL; +extern const struct xref * const __stop_xref_array[1] DSO_LOCAL; + +/* this macro is invoked once for each standalone DSO through + * FRR_MODULE_SETUP \ + * }-> FRR_COREMOD_SETUP -> XREF_SETUP + * FRR_DAEMON_INFO / + */ +#define XREF_SETUP() \ + static const struct xref _dummy_xref = { \ + /* .xrefdata = */ NULL, \ + /* .type = */ XREFT_NONE, \ + /* .line = */ __LINE__, \ + /* .file = */ __FILE__, \ + /* .func = */ "dummy", \ + }; \ + static const struct xref * const _dummy_xref_p \ + __attribute__((used, section("xref_array"))) \ + = &_dummy_xref; \ + static void __attribute__((used, _CONSTRUCTOR(1100))) \ + _xref_init(void) { \ + static struct xref_block _xref_block = { \ + .start = __start_xref_array, \ + .stop = __stop_xref_array, \ + }; \ + xref_block_add(&_xref_block); \ + } \ + asm(XREF_NOTE); \ + /* end */ + +/* the following blurb emits an ELF note indicating start and end of the xref + * array in the binary. This is technically the "correct" entry point for + * external tools reading xrefs out of an ELF shared library or executable. + * + * right now, the extraction tools use the section header for "xref_array" + * instead; however, section headers are technically not necessarily preserved + * for fully linked libraries or executables. (In practice they are only + * stripped by obfuscation tools.) + * + * conversely, for reading xrefs out of a single relocatable object file (e.g. + * bar.o), section headers are the right thing to look at since the note is + * only emitted for the final binary once. + * + * FRR itself does not need this note to operate correctly, so if you have + * some build issue with it just add -DFRR_XREF_NO_NOTE to your build flags + * to disable it. + */ +#ifdef FRR_XREF_NO_NOTE +#define XREF_NOTE "" +#else + +#if __SIZEOF_POINTER__ == 4 +#define _NOTE_2PTRSIZE "8" +#define _NOTE_PTR ".long" +#elif __SIZEOF_POINTER__ == 8 +#define _NOTE_2PTRSIZE "16" +#define _NOTE_PTR ".quad" +#else +#error unsupported pointer size +#endif + +#ifdef __arm__ +# define asmspecial "%" +#else +# define asmspecial "@" +#endif + +#define XREF_NOTE \ + "" "\n"\ + " .type _frr_xref_note," asmspecial "object" "\n"\ + " .pushsection .note.FRR,\"a\"," asmspecial "note" "\n"\ + " .p2align 2" "\n"\ + "_frr_xref_note:" "\n"\ + " .long 9" "\n"\ + " .long " _NOTE_2PTRSIZE "\n"\ + " .ascii \"XREF\"" "\n"\ + " .ascii \"FRRouting\\0\\0\\0\"" "\n"\ + " " _NOTE_PTR " __start_xref_array-." "\n"\ + " " _NOTE_PTR " __stop_xref_array-." "\n"\ + " .size _frr_xref_note, .-_frr_xref_note" "\n"\ + " .popsection" "\n"\ + "" "\n"\ + /* end */ +#endif + +#endif /* HAVE_SECTION_SYMS */ + +/* emit the array entry / pointer to xref */ +#if defined(__clang__) || !defined(__cplusplus) +#define XREF_LINK(dst) \ + static const struct xref * const NAMECTR(xref_p_) \ + __attribute__((used, section("xref_array"))) \ + = &(dst) \ + /* end */ + +#else /* GCC && C++ */ +/* workaround for GCC bug 41091 (dated 2009), added in 2021... + * + * this breaks extraction of xrefs with xrelfo.py (because the xref_array + * entry will be missing), but provides full runtime functionality. To get + * the proper list of xrefs from C++ code, build with clang... + */ +struct _xref_p { + const struct xref * const ptr; + + _xref_p(const struct xref *_ptr) : ptr(_ptr) + { + xref_gcc_workaround(_ptr); + } +}; + +#define XREF_LINK(dst) \ + static const struct _xref_p __attribute__((used)) \ + NAMECTR(xref_p_)(&(dst)) \ + /* end */ +#endif + +/* initializer for a "struct xref" */ +#define XREF_INIT(type_, xrefdata_, func_) \ + { \ + /* .xrefdata = */ (xrefdata_), \ + /* .type = */ (type_), \ + /* .line = */ __LINE__, \ + /* .file = */ __FILE__, \ + /* .func = */ func_, \ + } \ + /* end */ + +/* use with XREF_INIT when outside of a function, i.e. no __func__ */ +#define XREF_NO_FUNC "<global>" + +#ifdef __cplusplus +} +#endif + +#endif /* _FRR_XREF_H */ diff --git a/lib/yang.c b/lib/yang.c index a3e2a395d7..383dc9f5eb 100644 --- a/lib/yang.c +++ b/lib/yang.c @@ -92,6 +92,7 @@ static const char *const frr_native_modules[] = { "frr-isisd", "frr-vrrpd", "frr-zebra", + "frr-pathd", }; /* clang-format on */ diff --git a/lib/zclient.c b/lib/zclient.c index cb4555650d..20c285cf7f 100644 --- a/lib/zclient.c +++ b/lib/zclient.c @@ -40,6 +40,7 @@ #include "nexthop_group.h" #include "lib_errors.h" #include "srte.h" +#include "printfrr.h" DEFINE_MTYPE_STATIC(LIB, ZCLIENT, "Zclient") DEFINE_MTYPE_STATIC(LIB, REDIST_INST, "Redistribution instance IDs") @@ -995,17 +996,24 @@ done: return ret; } -int zapi_nhg_encode(struct stream *s, int cmd, struct zapi_nhg *api_nhg) +static int zapi_nhg_encode(struct stream *s, int cmd, struct zapi_nhg *api_nhg) { int i; if (cmd != ZEBRA_NHG_DEL && cmd != ZEBRA_NHG_ADD) { flog_err(EC_LIB_ZAPI_ENCODE, - "%s: Specified zapi NHG command (%d) doesn't exist\n", + "%s: Specified zapi NHG command (%d) doesn't exist", __func__, cmd); return -1; } + if (api_nhg->nexthop_num >= MULTIPATH_NUM || + api_nhg->backup_nexthop_num >= MULTIPATH_NUM) { + flog_err(EC_LIB_ZAPI_ENCODE, + "%s: zapi NHG encode with invalid input", __func__); + return -1; + } + stream_reset(s); zclient_create_header(s, cmd, VRF_DEFAULT); @@ -1023,7 +1031,6 @@ int zapi_nhg_encode(struct stream *s, int cmd, struct zapi_nhg *api_nhg) zapi_nexthop_encode(s, &api_nhg->nexthops[i], 0, 0); /* Backup nexthops */ - stream_putw(s, api_nhg->backup_nexthop_num); for (i = 0; i < api_nhg->backup_nexthop_num; i++) @@ -1058,7 +1065,7 @@ int zapi_route_encode(uint8_t cmd, struct stream *s, struct zapi_route *api) if (api->type >= ZEBRA_ROUTE_MAX) { flog_err(EC_LIB_ZAPI_ENCODE, - "%s: Specified route type (%u) is not a legal value\n", + "%s: Specified route type (%u) is not a legal value", __func__, api->type); return -1; } @@ -1070,7 +1077,7 @@ int zapi_route_encode(uint8_t cmd, struct stream *s, struct zapi_route *api) if (api->safi < SAFI_UNICAST || api->safi >= SAFI_MAX) { flog_err(EC_LIB_ZAPI_ENCODE, - "%s: Specified route SAFI (%u) is not a legal value\n", + "%s: Specified route SAFI (%u) is not a legal value", __func__, api->safi); return -1; } @@ -1285,7 +1292,7 @@ int zapi_route_decode(struct stream *s, struct zapi_route *api) STREAM_GETC(s, api->type); if (api->type >= ZEBRA_ROUTE_MAX) { flog_err(EC_LIB_ZAPI_ENCODE, - "%s: Specified route type: %d is not a legal value\n", + "%s: Specified route type: %d is not a legal value", __func__, api->type); return -1; } @@ -1296,7 +1303,7 @@ int zapi_route_decode(struct stream *s, struct zapi_route *api) STREAM_GETC(s, api->safi); if (api->safi < SAFI_UNICAST || api->safi >= SAFI_MAX) { flog_err(EC_LIB_ZAPI_ENCODE, - "%s: Specified route SAFI (%u) is not a legal value\n", + "%s: Specified route SAFI (%u) is not a legal value", __func__, api->safi); return -1; } @@ -3297,7 +3304,7 @@ static void zclient_capability_decode(ZAPI_CALLBACK_ARGS) if (vrf_backend < 0 || vrf_configure_backend(vrf_backend)) { flog_err(EC_LIB_ZAPI_ENCODE, - "%s: Garbage VRF backend type: %d\n", __func__, + "%s: Garbage VRF backend type: %d", __func__, vrf_backend); goto stream_failure; } @@ -4121,3 +4128,51 @@ uint32_t zclient_get_nhg_start(uint32_t proto) return ZEBRA_NHG_PROTO_SPACING * proto; } + +char *zclient_dump_route_flags(uint32_t flags, char *buf, size_t len) +{ + if (flags == 0) { + snprintfrr(buf, len, "None "); + return buf; + } + + snprintfrr( + buf, len, "%s%s%s%s%s%s%s%s%s%s", + CHECK_FLAG(flags, ZEBRA_FLAG_ALLOW_RECURSION) ? "Recursion " + : "", + CHECK_FLAG(flags, ZEBRA_FLAG_SELFROUTE) ? "Self " : "", + CHECK_FLAG(flags, ZEBRA_FLAG_IBGP) ? "iBGP " : "", + CHECK_FLAG(flags, ZEBRA_FLAG_SELECTED) ? "Selected " : "", + CHECK_FLAG(flags, ZEBRA_FLAG_FIB_OVERRIDE) ? "Override " : "", + CHECK_FLAG(flags, ZEBRA_FLAG_EVPN_ROUTE) ? "Evpn " : "", + CHECK_FLAG(flags, ZEBRA_FLAG_RR_USE_DISTANCE) ? "RR Distance " + : "", + CHECK_FLAG(flags, ZEBRA_FLAG_TRAPPED) ? "Trapped " : "", + CHECK_FLAG(flags, ZEBRA_FLAG_OFFLOADED) ? "Offloaded " : "", + CHECK_FLAG(flags, ZEBRA_FLAG_OFFLOAD_FAILED) ? "Offload Failed " + : ""); + return buf; +} + +char *zclient_evpn_dump_macip_flags(uint8_t flags, char *buf, size_t len) +{ + if (flags == 0) { + snprintfrr(buf, len, "None "); + return buf; + } + + snprintfrr( + buf, len, "%s%s%s%s%s%s%s", + CHECK_FLAG(flags, ZEBRA_MACIP_TYPE_STICKY) ? "Sticky MAC " : "", + CHECK_FLAG(flags, ZEBRA_MACIP_TYPE_GW) ? "Gateway MAC " : "", + CHECK_FLAG(flags, ZEBRA_MACIP_TYPE_ROUTER_FLAG) ? "Router " + : "", + CHECK_FLAG(flags, ZEBRA_MACIP_TYPE_OVERRIDE_FLAG) ? "Override " + : "", + CHECK_FLAG(flags, ZEBRA_MACIP_TYPE_SVI_IP) ? "SVI MAC " : "", + CHECK_FLAG(flags, ZEBRA_MACIP_TYPE_PROXY_ADVERT) ? "Proxy " + : "", + CHECK_FLAG(flags, ZEBRA_MACIP_TYPE_SYNC_PATH) ? "Sync " : ""); + + return buf; +} diff --git a/lib/zclient.h b/lib/zclient.h index 2af448a20c..cf52ea91a0 100644 --- a/lib/zclient.h +++ b/lib/zclient.h @@ -479,6 +479,7 @@ struct zapi_route { uint8_t type; unsigned short instance; + /* If you add flags, update zclient_dump_route_flags */ uint32_t flags; /* * Cause Zebra to consider this routes nexthops recursively @@ -580,6 +581,8 @@ struct zapi_route { } opaque; }; +extern char *zclient_dump_route_flags(uint32_t flags, char *buf, size_t len); + struct zapi_labels { uint8_t message; #define ZAPI_LABELS_FTN 0x01 @@ -634,6 +637,52 @@ struct zapi_pw_status { uint32_t status; }; +/* IGP instance data associated to a RLFA. */ +struct zapi_rlfa_igp { + vrf_id_t vrf_id; + int protocol; + union { + struct { + char area_tag[32]; + struct { + int tree_id; + int level; + unsigned int run_id; + } spf; + } isis; + }; +}; + +/* IGP -> LDP RLFA (un)registration message. */ +struct zapi_rlfa_request { + /* IGP instance data. */ + struct zapi_rlfa_igp igp; + + /* Destination prefix. */ + struct prefix destination; + + /* PQ node address. */ + struct in_addr pq_address; +}; + +/* LDP -> IGP RLFA label update. */ +struct zapi_rlfa_response { + /* IGP instance data. */ + struct zapi_rlfa_igp igp; + + /* Destination prefix. */ + struct prefix destination; + + /* Resolved LDP labels. */ + mpls_label_t pq_label; + uint16_t nexthop_num; + struct { + int family; + union g_addr gate; + mpls_label_t label; + } nexthops[MULTIPATH_NUM]; +}; + enum zapi_route_notify_owner { ZAPI_ROUTE_FAIL_INSTALL, ZAPI_ROUTE_BETTER_ADMIN_WON, @@ -722,8 +771,11 @@ zapi_rule_notify_owner2str(enum zapi_rule_notify_owner note) #define ZEBRA_MACIP_TYPE_PROXY_ADVERT 0x20 /* Not locally active */ #define ZEBRA_MACIP_TYPE_SYNC_PATH 0x40 /* sync path */ /* XXX - flags is an u8; that needs to be changed to u32 if you need - * to allocate past 0x80 + * to allocate past 0x80. Additionally touch zclient_evpn_dump_macip_flags */ +#define MACIP_BUF_SIZE 128 +extern char *zclient_evpn_dump_macip_flags(uint8_t flags, char *buf, + size_t len); /* Zebra ES VTEP flags (ZEBRA_REMOTE_ES_VTEP_ADD) */ /* ESR has been rxed from the VTEP. Only VTEPs that have advertised the @@ -969,9 +1021,7 @@ bool zapi_ipset_notify_decode(struct stream *s, uint32_t *unique, enum zapi_ipset_notify_owner *note); - -extern int zapi_nhg_encode(struct stream *s, int cmd, struct zapi_nhg *api_nhg); -extern int zapi_nhg_decode(struct stream *s, int cmd, struct zapi_nhg *api_nhg); +/* Nexthop-group message apis */ extern enum zclient_send_status zclient_nhg_send(struct zclient *zclient, int cmd, struct zapi_nhg *api_nhg); @@ -1091,6 +1141,12 @@ enum zapi_opaque_registry { LDP_IGP_SYNC_IF_STATE_UPDATE = 4, /* Announce that LDP is up */ LDP_IGP_SYNC_ANNOUNCE_UPDATE = 5, + /* Register RLFA with LDP */ + LDP_RLFA_REGISTER = 7, + /* Unregister all RLFAs with LDP */ + LDP_RLFA_UNREGISTER_ALL = 8, + /* Announce LDP labels associated to a previously registered RLFA */ + LDP_RLFA_LABELS = 9, }; /* Send the hello message. diff --git a/lib/zlog.c b/lib/zlog.c index e77feec5f2..51509e24f4 100644 --- a/lib/zlog.c +++ b/lib/zlog.c @@ -94,6 +94,7 @@ struct zlog_msg { const char *fmt; va_list args; + const struct xref_logmsg *xref; char *stackbuf; size_t stackbufsz; @@ -349,12 +350,14 @@ void zlog_tls_buffer_flush(void) } -static void vzlog_notls(int prio, const char *fmt, va_list ap) +static void vzlog_notls(const struct xref_logmsg *xref, int prio, + const char *fmt, va_list ap) { struct zlog_target *zt; struct zlog_msg stackmsg = { .prio = prio & LOG_PRIMASK, .fmt = fmt, + .xref = xref, }, *msg = &stackmsg; char stackbuf[512]; @@ -379,8 +382,8 @@ static void vzlog_notls(int prio, const char *fmt, va_list ap) XFREE(MTYPE_LOG_MESSAGE, msg->text); } -static void vzlog_tls(struct zlog_tls *zlog_tls, int prio, - const char *fmt, va_list ap) +static void vzlog_tls(struct zlog_tls *zlog_tls, const struct xref_logmsg *xref, + int prio, const char *fmt, va_list ap) { struct zlog_target *zt; struct zlog_msg *msg; @@ -413,6 +416,7 @@ static void vzlog_tls(struct zlog_tls *zlog_tls, int prio, msg->stackbufsz = TLS_LOG_BUF_SIZE - zlog_tls->bufpos - 1; msg->fmt = fmt; msg->prio = prio & LOG_PRIMASK; + msg->xref = xref; if (msg->prio < LOG_INFO) immediate = true; @@ -447,7 +451,8 @@ static void vzlog_tls(struct zlog_tls *zlog_tls, int prio, XFREE(MTYPE_LOG_MESSAGE, msg->text); } -void vzlog(int prio, const char *fmt, va_list ap) +void vzlogx(const struct xref_logmsg *xref, int prio, + const char *fmt, va_list ap) { struct zlog_tls *zlog_tls = zlog_tls_get(); @@ -480,9 +485,9 @@ void vzlog(int prio, const char *fmt, va_list ap) #endif if (zlog_tls) - vzlog_tls(zlog_tls, prio, fmt, ap); + vzlog_tls(zlog_tls, xref, prio, fmt, ap); else - vzlog_notls(prio, fmt, ap); + vzlog_notls(xref, prio, fmt, ap); } void zlog_sigsafe(const char *text, size_t len) @@ -516,6 +521,11 @@ int zlog_msg_prio(struct zlog_msg *msg) return msg->prio; } +const struct xref_logmsg *zlog_msg_xref(struct zlog_msg *msg) +{ + return msg->xref; +} + const char *zlog_msg_text(struct zlog_msg *msg, size_t *textlen) { if (!msg->text) { diff --git a/lib/zlog.h b/lib/zlog.h index 1c5013746b..bdf59fa68e 100644 --- a/lib/zlog.h +++ b/lib/zlog.h @@ -38,6 +38,20 @@ extern char zlog_prefix[]; extern size_t zlog_prefixsz; extern int zlog_tmpdirfd; +struct xref_logmsg { + struct xref xref; + + const char *fmtstring; + uint32_t priority; + uint32_t ec; +}; + +struct xrefdata_logmsg { + struct xrefdata xrefdata; + + /* nothing more here right now */ +}; + /* These functions are set up to write to stdout/stderr without explicit * initialization and/or before config load. There is no need to call e.g. * fprintf(stderr, ...) just because it's "too early" at startup. Depending @@ -45,7 +59,9 @@ extern int zlog_tmpdirfd; * determine wether something is a log message or something else. */ -extern void vzlog(int prio, const char *fmt, va_list ap); +extern void vzlogx(const struct xref_logmsg *xref, int prio, + const char *fmt, va_list ap); +#define vzlog(prio, ...) vzlogx(NULL, prio, __VA_ARGS__) PRINTFRR(2, 3) static inline void zlog(int prio, const char *fmt, ...) @@ -57,11 +73,61 @@ static inline void zlog(int prio, const char *fmt, ...) va_end(ap); } -#define zlog_err(...) zlog(LOG_ERR, __VA_ARGS__) -#define zlog_warn(...) zlog(LOG_WARNING, __VA_ARGS__) -#define zlog_info(...) zlog(LOG_INFO, __VA_ARGS__) -#define zlog_notice(...) zlog(LOG_NOTICE, __VA_ARGS__) -#define zlog_debug(...) zlog(LOG_DEBUG, __VA_ARGS__) +PRINTFRR(2, 3) +static inline void zlog_ref(const struct xref_logmsg *xref, + const char *fmt, ...) +{ + va_list ap; + + va_start(ap, fmt); + vzlogx(xref, xref->priority, fmt, ap); + va_end(ap); +} + +#define _zlog_ref(prio, msg, ...) do { \ + static struct xrefdata _xrefdata = { \ + .hashstr = (msg), \ + .hashu32 = { (prio), 0 }, \ + }; \ + static const struct xref_logmsg _xref __attribute__((used)) = {\ + .xref = XREF_INIT(XREFT_LOGMSG, &_xrefdata, __func__), \ + .fmtstring = (msg), \ + .priority = (prio), \ + }; \ + XREF_LINK(_xref.xref); \ + zlog_ref(&_xref, (msg), ## __VA_ARGS__); \ + } while (0) + +#define zlog_err(...) _zlog_ref(LOG_ERR, __VA_ARGS__) +#define zlog_warn(...) _zlog_ref(LOG_WARNING, __VA_ARGS__) +#define zlog_info(...) _zlog_ref(LOG_INFO, __VA_ARGS__) +#define zlog_notice(...) _zlog_ref(LOG_NOTICE, __VA_ARGS__) +#define zlog_debug(...) _zlog_ref(LOG_DEBUG, __VA_ARGS__) + +#define _zlog_ecref(ec_, prio, msg, ...) do { \ + static struct xrefdata _xrefdata = { \ + .hashstr = (msg), \ + .hashu32 = { (prio), (ec_) }, \ + }; \ + static const struct xref_logmsg _xref __attribute__((used)) = {\ + .xref = XREF_INIT(XREFT_LOGMSG, &_xrefdata, __func__), \ + .fmtstring = (msg), \ + .priority = (prio), \ + .ec = (ec_), \ + }; \ + XREF_LINK(_xref.xref); \ + zlog_ref(&_xref, "[EC %u] " msg, ec_, ## __VA_ARGS__); \ + } while (0) + +#define flog_err(ferr_id, format, ...) \ + _zlog_ecref(ferr_id, LOG_ERR, format, ## __VA_ARGS__) +#define flog_warn(ferr_id, format, ...) \ + _zlog_ecref(ferr_id, LOG_WARNING, format, ## __VA_ARGS__) + +#define flog_err_sys(ferr_id, format, ...) \ + flog_err(ferr_id, format, ##__VA_ARGS__) +#define flog(priority, ferr_id, format, ...) \ + zlog(priority, "[EC %u] " format, ferr_id, ##__VA_ARGS__) extern void zlog_sigsafe(const char *text, size_t len); @@ -83,6 +149,7 @@ extern void zlog_sigsafe(const char *text, size_t len); struct zlog_msg; extern int zlog_msg_prio(struct zlog_msg *msg); +extern const struct xref_logmsg *zlog_msg_xref(struct zlog_msg *msg); /* pass NULL as textlen if you don't need it. */ extern const char *zlog_msg_text(struct zlog_msg *msg, size_t *textlen); diff --git a/nhrpd/README.kernel b/nhrpd/README.kernel index 5831316f1f..067ff9838c 100644 --- a/nhrpd/README.kernel +++ b/nhrpd/README.kernel @@ -32,6 +32,7 @@ This list tries to collect them to one source of information: commit "ipv4: introduce ip_dst_mtu_maybe_forward and protect forwarding path against pmtu spoofing" Workaround: Set sysctl net.ipv4.ip_forward_use_pmtu=1 + See: https://marc.info/?t=143636239500003&r=1&w=2 for details (Should fix kernel to have this by default on for tunnel devices) - subtle path mtu mishandling issues diff --git a/nhrpd/nhrp_cache.c b/nhrpd/nhrp_cache.c index 1c8fee8b07..0b5a0427e6 100644 --- a/nhrpd/nhrp_cache.c +++ b/nhrpd/nhrp_cache.c @@ -69,12 +69,13 @@ static void nhrp_cache_free(struct nhrp_cache *c) { struct nhrp_interface *nifp = c->ifp->info; - zassert(c->cur.type == NHRP_CACHE_INVALID && c->cur.peer == NULL); - zassert(c->new.type == NHRP_CACHE_INVALID && c->new.peer == NULL); + debugf(NHRP_DEBUG_COMMON, "Deleting cache entry"); nhrp_cache_counts[c->cur.type]--; notifier_call(&c->notifier_list, NOTIFY_CACHE_DELETE); zassert(!notifier_active(&c->notifier_list)); hash_release(nifp->cache_hash, c); + THREAD_OFF(c->t_timeout); + THREAD_OFF(c->t_auth); XFREE(MTYPE_NHRP_CACHE, c); } @@ -140,6 +141,41 @@ struct nhrp_cache_config *nhrp_cache_config_get(struct interface *ifp, create ? nhrp_cache_config_alloc : NULL); } +static void do_nhrp_cache_free(struct hash_bucket *hb, + void *arg __attribute__((__unused__))) +{ + struct nhrp_cache *c = hb->data; + + nhrp_cache_free(c); +} + +static void do_nhrp_cache_config_free(struct hash_bucket *hb, + void *arg __attribute__((__unused__))) +{ + struct nhrp_cache_config *cc = hb->data; + + nhrp_cache_config_free(cc); +} + +void nhrp_cache_interface_del(struct interface *ifp) +{ + struct nhrp_interface *nifp = ifp->info; + + debugf(NHRP_DEBUG_COMMON, "Cleaning up undeleted cache entries (%lu)", + nifp->cache_hash ? nifp->cache_hash->count : 0); + + if (nifp->cache_hash) { + hash_iterate(nifp->cache_hash, do_nhrp_cache_free, NULL); + hash_free(nifp->cache_hash); + } + + if (nifp->cache_config_hash) { + hash_iterate(nifp->cache_config_hash, do_nhrp_cache_config_free, + NULL); + hash_free(nifp->cache_config_hash); + } +} + struct nhrp_cache *nhrp_cache_get(struct interface *ifp, union sockunion *remote_addr, int create) { @@ -164,6 +200,7 @@ struct nhrp_cache *nhrp_cache_get(struct interface *ifp, static int nhrp_cache_do_free(struct thread *t) { struct nhrp_cache *c = THREAD_ARG(t); + c->t_timeout = NULL; nhrp_cache_free(c); return 0; @@ -172,6 +209,7 @@ static int nhrp_cache_do_free(struct thread *t) static int nhrp_cache_do_timeout(struct thread *t) { struct nhrp_cache *c = THREAD_ARG(t); + c->t_timeout = NULL; if (c->cur.type != NHRP_CACHE_INVALID) nhrp_cache_update_binding(c, c->cur.type, -1, NULL, 0, NULL); diff --git a/nhrpd/nhrp_interface.c b/nhrpd/nhrp_interface.c index 7768383e6b..269499cc59 100644 --- a/nhrpd/nhrp_interface.c +++ b/nhrpd/nhrp_interface.c @@ -49,6 +49,21 @@ static int nhrp_if_new_hook(struct interface *ifp) static int nhrp_if_delete_hook(struct interface *ifp) { + struct nhrp_interface *nifp = ifp->info; + + debugf(NHRP_DEBUG_IF, "Deleted interface (%s)", ifp->name); + + nhrp_cache_interface_del(ifp); + nhrp_nhs_interface_del(ifp); + nhrp_peer_interface_del(ifp); + + if (nifp->ipsec_profile) + free(nifp->ipsec_profile); + if (nifp->ipsec_fallback_profile) + free(nifp->ipsec_fallback_profile); + if (nifp->source) + free(nifp->source); + XFREE(MTYPE_NHRP_IF, ifp->info); return 0; } diff --git a/nhrpd/nhrp_main.c b/nhrpd/nhrp_main.c index 9fc13761c8..49a4900bf8 100644 --- a/nhrpd/nhrp_main.c +++ b/nhrpd/nhrp_main.c @@ -144,8 +144,13 @@ int main(int argc, char **argv) nhrp_interface_init(); resolver_init(master); - /* Run with elevated capabilities, as for all netlink activity - * we need privileges anyway. */ + /* + * Run with elevated capabilities, as for all netlink activity + * we need privileges anyway. + * The assert is for clang SA code where it does + * not see the change function being set in lib + */ + assert(nhrpd_privs.change); nhrpd_privs.change(ZPRIVS_RAISE); netlink_init(); diff --git a/nhrpd/nhrp_nhs.c b/nhrpd/nhrp_nhs.c index 085cab347f..540708f1ae 100644 --- a/nhrpd/nhrp_nhs.c +++ b/nhrpd/nhrp_nhs.c @@ -35,6 +35,7 @@ static void nhrp_reg_reply(struct nhrp_reqid *reqid, void *arg) union sockunion cie_nbma, cie_proto, *proto; char buf[64]; int ok = 0, holdtime; + unsigned short mtu = 0; nhrp_reqid_free(&nhrp_packet_reqid, &r->reqid); @@ -57,6 +58,8 @@ static void nhrp_reg_reply(struct nhrp_reqid *reqid, void *arg) || (cie->code == NHRP_CODE_ADMINISTRATIVELY_PROHIBITED && nhs->hub))) ok = 0; + mtu = ntohs(cie->mtu); + debugf(NHRP_DEBUG_COMMON, "NHS: CIE MTU: %d", mtu); } if (!ok) @@ -96,7 +99,7 @@ static void nhrp_reg_reply(struct nhrp_reqid *reqid, void *arg) c = nhrp_cache_get(ifp, &p->dst_proto, 1); if (c) nhrp_cache_update_binding(c, NHRP_CACHE_NHS, holdtime, - nhrp_peer_ref(r->peer), 0, NULL); + nhrp_peer_ref(r->peer), mtu, NULL); } static int nhrp_reg_timeout(struct thread *t) @@ -197,7 +200,8 @@ static int nhrp_reg_send_req(struct thread *t) /* FIXME: push CIE for each local protocol address */ cie = nhrp_cie_push(zb, NHRP_CODE_SUCCESS, NULL, NULL); - cie->prefix_length = 0xff; + /* RFC2332 5.2.1 if unique is set then prefix length must be 0xff */ + cie->prefix_length = (if_ad->flags & NHRP_IFF_REG_NO_UNIQUE) ? 8 * sockunion_get_addrlen(dst_proto) : 0xff; cie->holding_time = htons(if_ad->holdtime); cie->mtu = htons(if_ad->mtu); @@ -378,6 +382,24 @@ int nhrp_nhs_free(struct nhrp_nhs *nhs) return 0; } +void nhrp_nhs_interface_del(struct interface *ifp) +{ + struct nhrp_interface *nifp = ifp->info; + struct nhrp_nhs *nhs, *tmp; + afi_t afi; + + for (afi = 0; afi < AFI_MAX; afi++) { + debugf(NHRP_DEBUG_COMMON, "Cleaning up nhs entries (%d)", + !list_empty(&nifp->afi[afi].nhslist_head)); + + list_for_each_entry_safe(nhs, tmp, &nifp->afi[afi].nhslist_head, + nhslist_entry) + { + nhrp_nhs_free(nhs); + } + } +} + void nhrp_nhs_terminate(void) { struct vrf *vrf = vrf_lookup_by_id(VRF_DEFAULT); diff --git a/nhrpd/nhrp_peer.c b/nhrpd/nhrp_peer.c index 2dc019ce65..9aaa9dec1e 100644 --- a/nhrpd/nhrp_peer.c +++ b/nhrpd/nhrp_peer.c @@ -38,11 +38,17 @@ static void nhrp_packet_debug(struct zbuf *zb, const char *dir); static void nhrp_peer_check_delete(struct nhrp_peer *p) { + char buf[2][256]; struct nhrp_interface *nifp = p->ifp->info; if (p->ref || notifier_active(&p->notifier_list)) return; + debugf(NHRP_DEBUG_COMMON, "Deleting peer ref:%d remote:%s local:%s", + p->ref, + sockunion2str(&p->vc->remote.nbma, buf[0], sizeof(buf[0])), + sockunion2str(&p->vc->local.nbma, buf[1], sizeof(buf[1]))); + THREAD_OFF(p->t_fallback); hash_release(nifp->peer_hash, p); nhrp_interface_notify_del(p->ifp, &p->ifp_notifier); @@ -185,6 +191,27 @@ static void *nhrp_peer_create(void *data) return p; } +static void do_peer_hash_free(struct hash_bucket *hb, + void *arg __attribute__((__unused__))) +{ + struct nhrp_peer *p = hb->data; + nhrp_peer_check_delete(p); +} + +void nhrp_peer_interface_del(struct interface *ifp) +{ + struct nhrp_interface *nifp = ifp->info; + + debugf(NHRP_DEBUG_COMMON, "Cleaning up undeleted peer entries (%lu)", + nifp->peer_hash ? nifp->peer_hash->count : 0); + + if (nifp->peer_hash) { + hash_iterate(nifp->peer_hash, do_peer_hash_free, NULL); + assert(nifp->peer_hash->count == 0); + hash_free(nifp->peer_hash); + } +} + struct nhrp_peer *nhrp_peer_get(struct interface *ifp, const union sockunion *remote_nbma) { @@ -271,6 +298,8 @@ int nhrp_peer_check(struct nhrp_peer *p, int establish) return 0; if (sockunion_family(&vc->local.nbma) == AF_UNSPEC) return 0; + if (vc->ipsec) + return 1; p->prio = establish > 1; p->requested = 1; diff --git a/nhrpd/nhrp_route.c b/nhrpd/nhrp_route.c index e7d35b90ff..334f468c18 100644 --- a/nhrpd/nhrp_route.c +++ b/nhrpd/nhrp_route.c @@ -56,7 +56,7 @@ static void nhrp_route_update_put(struct route_node *rn) struct route_info *ri = rn->info; if (!ri->ifp && !ri->nhrp_ifp - && sockunion_family(&ri->via) == AF_UNSPEC) { + && sockunion_is_null(&ri->via)) { XFREE(MTYPE_NHRP_ROUTE, rn->info); route_unlock_node(rn); } @@ -70,8 +70,7 @@ static void nhrp_route_update_zebra(const struct prefix *p, struct route_node *rn; struct route_info *ri; - rn = nhrp_route_update_get( - p, (sockunion_family(nexthop) != AF_UNSPEC) || ifp); + rn = nhrp_route_update_get(p, !sockunion_is_null(nexthop) || ifp); if (rn) { ri = rn->info; ri->via = *nexthop; @@ -99,6 +98,7 @@ void nhrp_route_announce(int add, enum nhrp_cache_type type, { struct zapi_route api; struct zapi_nexthop *api_nh; + union sockunion *nexthop_ref = (union sockunion *)nexthop; if (zclient->sock < 0) return; @@ -134,8 +134,14 @@ void nhrp_route_announce(int add, enum nhrp_cache_type type, switch (api.prefix.family) { case AF_INET: - if (nexthop) { - api_nh->gate.ipv4 = nexthop->sin.sin_addr; + if (api.prefix.prefixlen == IPV4_MAX_BITLEN && + nexthop_ref && + memcmp(&nexthop_ref->sin.sin_addr, &api.prefix.u.prefix4, + sizeof(struct in_addr)) == 0) { + nexthop_ref = NULL; + } + if (nexthop_ref) { + api_nh->gate.ipv4 = nexthop_ref->sin.sin_addr; api_nh->type = NEXTHOP_TYPE_IPV4; } if (ifp) { @@ -147,8 +153,14 @@ void nhrp_route_announce(int add, enum nhrp_cache_type type, } break; case AF_INET6: - if (nexthop) { - api_nh->gate.ipv6 = nexthop->sin6.sin6_addr; + if (api.prefix.prefixlen == IPV6_MAX_BITLEN && + nexthop_ref && + memcmp(&nexthop_ref->sin6.sin6_addr, &api.prefix.u.prefix6, + sizeof(struct in6_addr)) == 0) { + nexthop_ref = NULL; + } + if (nexthop_ref) { + api_nh->gate.ipv6 = nexthop_ref->sin6.sin6_addr; api_nh->type = NEXTHOP_TYPE_IPV6; } if (ifp) { @@ -171,8 +183,9 @@ void nhrp_route_announce(int add, enum nhrp_cache_type type, zlog_debug( "Zebra send: route %s %pFX nexthop %s metric %u count %d dev %s", add ? "add" : "del", &api.prefix, - nexthop ? inet_ntop(api.prefix.family, &api_nh->gate, - buf, sizeof(buf)) + nexthop_ref ? inet_ntop(api.prefix.family, + &api_nh->gate, + buf, sizeof(buf)) : "<onlink>", api.metric, api.nexthop_num, ifp ? ifp->name : "none"); } @@ -225,7 +238,7 @@ int nhrp_route_read(ZAPI_CALLBACK_ARGS) sockunion2str(&nexthop_addr, buf, sizeof(buf)), ifp ? ifp->name : "(none)"); - nhrp_route_update_zebra(&api.prefix, &nexthop_addr, ifp); + nhrp_route_update_zebra(&api.prefix, &nexthop_addr, added ? ifp : NULL); nhrp_shortcut_prefix_change(&api.prefix, !added); return 0; diff --git a/nhrpd/nhrp_shortcut.c b/nhrpd/nhrp_shortcut.c index 2359cfa4ac..fbb883185a 100644 --- a/nhrpd/nhrp_shortcut.c +++ b/nhrpd/nhrp_shortcut.c @@ -51,18 +51,26 @@ static int nhrp_shortcut_do_expire(struct thread *t) static void nhrp_shortcut_cache_notify(struct notifier_block *n, unsigned long cmd) { + char buf2[PREFIX_STRLEN]; + struct nhrp_shortcut *s = container_of(n, struct nhrp_shortcut, cache_notifier); + struct nhrp_cache *c = s->cache; + if (c) + sockunion2str(&c->remote_addr, buf2, sizeof(buf2)); + else + snprintf(buf2, sizeof(buf2), "(unspec)"); switch (cmd) { case NOTIFY_CACHE_UP: if (!s->route_installed) { debugf(NHRP_DEBUG_ROUTE, - "Shortcut: route install %pFX nh (unspec) dev %s", - s->p, s->cache->ifp->name); + "Shortcut: route install %pFX nh %s dev %s", + s->p, buf2, c && c->ifp ? + c->ifp->name : "<unk>"); - nhrp_route_announce(1, s->type, s->p, s->cache->ifp, - NULL, 0); + nhrp_route_announce(1, s->type, s->p, c ? c->ifp : NULL, + c ? &c->remote_addr : NULL, 0); s->route_installed = 1; } break; @@ -207,6 +215,7 @@ static void nhrp_shortcut_recv_resolution_rep(struct nhrp_reqid *reqid, struct nhrp_extension_header *ext; struct nhrp_cie_header *cie; struct nhrp_cache *c = NULL; + struct nhrp_cache *c_dst_proto = NULL; union sockunion *proto, cie_proto, *nbma, cie_nbma, nat_nbma; struct prefix prefix, route_prefix; struct zbuf extpl; @@ -304,6 +313,22 @@ static void nhrp_shortcut_recv_resolution_rep(struct nhrp_reqid *reqid, debugf(NHRP_DEBUG_COMMON, "Shortcut: no cache for nbma %s", buf[2]); } + + /* Update cache binding for dst_proto as well */ + if (proto != &pp->dst_proto) { + c_dst_proto = nhrp_cache_get(pp->ifp, &pp->dst_proto, 1); + if (c_dst_proto) { + debugf(NHRP_DEBUG_COMMON, + "Shortcut: cache found, update binding"); + nhrp_cache_update_binding(c_dst_proto, NHRP_CACHE_DYNAMIC, + holding_time, + nhrp_peer_get(pp->ifp, nbma), + htons(cie->mtu), nbma); + } else { + debugf(NHRP_DEBUG_COMMON, + "Shortcut: no cache for nbma %s", buf[2]); + } + } } /* Update shortcut entry for subnet to protocol gw binding */ diff --git a/nhrpd/nhrpd.h b/nhrpd/nhrpd.h index 80a365a3c3..a36d0c445d 100644 --- a/nhrpd/nhrpd.h +++ b/nhrpd/nhrpd.h @@ -124,7 +124,7 @@ enum nhrp_notify_type { struct nhrp_vc { struct notifier_list notifier_list; - uint8_t ipsec; + uint32_t ipsec; uint8_t updating; uint8_t abort_migration; @@ -343,6 +343,7 @@ void nhrp_nhs_foreach(struct interface *ifp, afi_t afi, void (*cb)(struct nhrp_nhs *, struct nhrp_registration *, void *), void *ctx); +void nhrp_nhs_interface_del(struct interface *ifp); void nhrp_route_update_nhrp(const struct prefix *p, struct interface *ifp); void nhrp_route_announce(int add, enum nhrp_cache_type type, @@ -366,6 +367,7 @@ void nhrp_shortcut_foreach(afi_t afi, void nhrp_shortcut_purge(struct nhrp_shortcut *s, int force); void nhrp_shortcut_prefix_change(const struct prefix *p, int deleted); +void nhrp_cache_interface_del(struct interface *ifp); void nhrp_cache_config_free(struct nhrp_cache_config *c); struct nhrp_cache_config *nhrp_cache_config_get(struct interface *ifp, union sockunion *remote_addr, @@ -446,6 +448,7 @@ struct nhrp_reqid *nhrp_reqid_lookup(struct nhrp_reqid_pool *, uint32_t reqid); int nhrp_packet_init(void); +void nhrp_peer_interface_del(struct interface *ifp); struct nhrp_peer *nhrp_peer_get(struct interface *ifp, const union sockunion *remote_nbma); struct nhrp_peer *nhrp_peer_ref(struct nhrp_peer *p); diff --git a/ospf6d/ospf6_abr.c b/ospf6d/ospf6_abr.c index 8cfdf2642c..abcdb40547 100644 --- a/ospf6d/ospf6_abr.c +++ b/ospf6d/ospf6_abr.c @@ -160,35 +160,22 @@ int ospf6_abr_originate_summary_to_area(struct ospf6_route *route, && route->type != OSPF6_DEST_TYPE_RANGE && ((route->type != OSPF6_DEST_TYPE_ROUTER) || !CHECK_FLAG(route->path.router_bits, OSPF6_ROUTER_BIT_E))) { -#if 0 - zlog_debug( - "Route type is none of network, range nor ASBR, ignore"); -#endif return 0; } /* AS External routes are never considered */ if (route->path.type == OSPF6_PATH_TYPE_EXTERNAL1 || route->path.type == OSPF6_PATH_TYPE_EXTERNAL2) { -#if 0 - zlog_debug("Path type is external, skip"); -#endif return 0; } /* do not generate if the path's area is the same as target area */ if (route->path.area_id == area->area_id) { -#if 0 - zlog_debug("The route is in the area itself, ignore"); -#endif return 0; } /* do not generate if the nexthops belongs to the target area */ if (ospf6_abr_nexthops_belong_to_area(route, area)) { -#if 0 - zlog_debug("The route's nexthop is in the same area, ignore"); -#endif return 0; } @@ -770,6 +757,10 @@ void ospf6_abr_old_path_update(struct ospf6_route *old_route, void ospf6_abr_old_route_remove(struct ospf6_lsa *lsa, struct ospf6_route *old, struct ospf6_route_table *table) { + if (IS_OSPF6_DEBUG_ABR) + zlog_debug("%s: route %pFX, paths %d", __func__, &old->prefix, + listcount(old->paths)); + if (listcount(old->paths) > 1) { struct listnode *anode, *anext, *nnode, *rnode, *rnext; struct ospf6_path *o_path; @@ -778,13 +769,15 @@ void ospf6_abr_old_route_remove(struct ospf6_lsa *lsa, struct ospf6_route *old, for (ALL_LIST_ELEMENTS(old->paths, anode, anext, o_path)) { if (o_path->origin.adv_router != lsa->header->adv_router - && o_path->origin.id != lsa->header->id) + || o_path->origin.id != lsa->header->id) continue; for (ALL_LIST_ELEMENTS_RO(o_path->nh_list, nnode, nh)) { for (ALL_LIST_ELEMENTS(old->nh_list, rnode, rnext, rnh)) { if (!ospf6_nexthop_is_same(rnh, nh)) continue; + if (IS_OSPF6_DEBUG_ABR) + zlog_debug("deleted nexthop"); listnode_delete(old->nh_list, rnh); ospf6_nexthop_delete(rnh); } @@ -847,14 +840,16 @@ void ospf6_abr_examin_summary(struct ospf6_lsa *lsa, struct ospf6_area *oa) bool old_entry_updated = false; struct ospf6_path *path, *o_path, *ecmp_path; struct listnode *anode; + bool add_route = false; memset(&prefix, 0, sizeof(prefix)); if (lsa->header->type == htons(OSPF6_LSTYPE_INTER_PREFIX)) { if (IS_OSPF6_DEBUG_EXAMIN(INTER_PREFIX)) { is_debug++; - zlog_debug("%s: Examin %s in area %s", __func__, - lsa->name, oa->name); + zlog_debug("%s: LSA %s age %d in area %s", __func__, + lsa->name, ospf6_lsa_age_current(lsa), + oa->name); } prefix_lsa = @@ -873,8 +868,9 @@ void ospf6_abr_examin_summary(struct ospf6_lsa *lsa, struct ospf6_area *oa) } else if (lsa->header->type == htons(OSPF6_LSTYPE_INTER_ROUTER)) { if (IS_OSPF6_DEBUG_EXAMIN(INTER_ROUTER)) { is_debug++; - zlog_debug("%s: Examin %s in area %s", __func__, - lsa->name, oa->name); + zlog_debug("%s: LSA %s age %d in area %s", __func__, + lsa->name, ospf6_lsa_age_current(lsa), + oa->name); } router_lsa = @@ -898,8 +894,12 @@ void ospf6_abr_examin_summary(struct ospf6_lsa *lsa, struct ospf6_area *oa) /* Find existing route */ route = ospf6_route_lookup(&prefix, table); - if (route) + if (route) { ospf6_route_lock(route); + if (is_debug) + zlog_debug("%s: route %pFX, paths %d", __func__, + &prefix, listcount(route->paths)); + } while (route && ospf6_route_is_prefix(&prefix, route)) { if (route->path.area_id == oa->area_id && route->path.origin.type == lsa->header->type @@ -952,6 +952,7 @@ void ospf6_abr_examin_summary(struct ospf6_lsa *lsa, struct ospf6_area *oa) return; } + /* (2) if the LSA is self-originated, ignore */ if (lsa->header->adv_router == oa->ospf6->router_id) { if (is_debug) @@ -1026,8 +1027,8 @@ void ospf6_abr_examin_summary(struct ospf6_lsa *lsa, struct ospf6_area *oa) || !CHECK_FLAG(abr_entry->path.router_bits, OSPF6_ROUTER_BIT_B)) { if (is_debug) zlog_debug( - "%s: ABR router entry does not exist, ignore", - __func__); + "%s: ABR router entry %pFX does not exist, ignore", + __func__, &abr_prefix); if (old) { if (old->type == OSPF6_DEST_TYPE_ROUTER && oa->intra_brouter_calc) { @@ -1040,7 +1041,7 @@ void ospf6_abr_examin_summary(struct ospf6_lsa *lsa, struct ospf6_area *oa) zlog_debug( "%s: remove old entry: %s %p ", __func__, buf, (void *)old); - ospf6_route_remove(old, table); + ospf6_abr_old_route_remove(lsa, old, table); } } return; @@ -1104,7 +1105,11 @@ void ospf6_abr_examin_summary(struct ospf6_lsa *lsa, struct ospf6_area *oa) are identical. */ old = ospf6_route_lookup(&prefix, table); - + if (old) { + if (is_debug) + zlog_debug("%s: found old route %pFX, paths %d", + __func__, &prefix, listcount(old->paths)); + } for (old_route = old; old_route; old_route = old_route->next) { if (!ospf6_route_is_same(old_route, route) || (old_route->type != route->type) || @@ -1186,7 +1191,7 @@ void ospf6_abr_examin_summary(struct ospf6_lsa *lsa, struct ospf6_area *oa) "%s: Update route: %s %p old cost %u new cost %u nh %u", __func__, buf, (void *)old_route, old_route->path.cost, route->path.cost, - listcount(route->nh_list)); + listcount(old_route->nh_list)); /* For Inter-Prefix route: Update RIB/FIB, * For Inter-Router trigger summary update @@ -1199,10 +1204,19 @@ void ospf6_abr_examin_summary(struct ospf6_lsa *lsa, struct ospf6_area *oa) break; } + /* If the old entry is not updated and old entry not found or old entry + * does not match with the new entry then add the new route + */ if (old_entry_updated == false) { + if ((old == NULL) || (old->type != route->type) + || (old->path.type != route->path.type)) + add_route = true; + } + + if (add_route) { if (is_debug) { zlog_debug( - "%s: Install route: %s cost %u nh %u adv_router %pI4", + "%s: Install new route: %s cost %u nh %u adv_router %pI4", __func__, buf, route->path.cost, listcount(route->nh_list), &route->path.origin.adv_router); @@ -1296,7 +1310,9 @@ static char *ospf6_inter_area_prefix_lsa_get_prefix_str(struct ospf6_lsa *lsa, } static int ospf6_inter_area_prefix_lsa_show(struct vty *vty, - struct ospf6_lsa *lsa) + struct ospf6_lsa *lsa, + json_object *json_obj, + bool use_json) { struct ospf6_inter_prefix_lsa *prefix_lsa; char buf[INET6_ADDRSTRLEN]; @@ -1304,16 +1320,29 @@ static int ospf6_inter_area_prefix_lsa_show(struct vty *vty, prefix_lsa = (struct ospf6_inter_prefix_lsa *)OSPF6_LSA_HEADER_END( lsa->header); - vty_out(vty, " Metric: %lu\n", - (unsigned long)OSPF6_ABR_SUMMARY_METRIC(prefix_lsa)); + if (use_json) { + json_object_int_add( + json_obj, "metric", + (unsigned long)OSPF6_ABR_SUMMARY_METRIC(prefix_lsa)); + ospf6_prefix_options_printbuf(prefix_lsa->prefix.prefix_options, + buf, sizeof(buf)); + json_object_string_add(json_obj, "prefixOptions", buf); + json_object_string_add( + json_obj, "prefix", + ospf6_inter_area_prefix_lsa_get_prefix_str( + lsa, buf, sizeof(buf), 0)); + } else { + vty_out(vty, " Metric: %lu\n", + (unsigned long)OSPF6_ABR_SUMMARY_METRIC(prefix_lsa)); - ospf6_prefix_options_printbuf(prefix_lsa->prefix.prefix_options, buf, - sizeof(buf)); - vty_out(vty, " Prefix Options: %s\n", buf); + ospf6_prefix_options_printbuf(prefix_lsa->prefix.prefix_options, + buf, sizeof(buf)); + vty_out(vty, " Prefix Options: %s\n", buf); - vty_out(vty, " Prefix: %s\n", - ospf6_inter_area_prefix_lsa_get_prefix_str(lsa, buf, - sizeof(buf), 0)); + vty_out(vty, " Prefix: %s\n", + ospf6_inter_area_prefix_lsa_get_prefix_str( + lsa, buf, sizeof(buf), 0)); + } return 0; } @@ -1338,7 +1367,9 @@ static char *ospf6_inter_area_router_lsa_get_prefix_str(struct ospf6_lsa *lsa, } static int ospf6_inter_area_router_lsa_show(struct vty *vty, - struct ospf6_lsa *lsa) + struct ospf6_lsa *lsa, + json_object *json_obj, + bool use_json) { struct ospf6_inter_router_lsa *router_lsa; char buf[64]; @@ -1347,12 +1378,22 @@ static int ospf6_inter_area_router_lsa_show(struct vty *vty, lsa->header); ospf6_options_printbuf(router_lsa->options, buf, sizeof(buf)); - vty_out(vty, " Options: %s\n", buf); - vty_out(vty, " Metric: %lu\n", - (unsigned long)OSPF6_ABR_SUMMARY_METRIC(router_lsa)); + if (use_json) { + json_object_string_add(json_obj, "options", buf); + json_object_int_add( + json_obj, "metric", + (unsigned long)OSPF6_ABR_SUMMARY_METRIC(router_lsa)); + } else { + vty_out(vty, " Options: %s\n", buf); + vty_out(vty, " Metric: %lu\n", + (unsigned long)OSPF6_ABR_SUMMARY_METRIC(router_lsa)); + } inet_ntop(AF_INET, &router_lsa->router_id, buf, sizeof(buf)); - vty_out(vty, " Destination Router ID: %s\n", buf); + if (use_json) + json_object_string_add(json_obj, "destinationRouterId", buf); + else + vty_out(vty, " Destination Router ID: %s\n", buf); return 0; } diff --git a/ospf6d/ospf6_asbr.c b/ospf6d/ospf6_asbr.c index 0e419cbff6..3449f48267 100644 --- a/ospf6d/ospf6_asbr.c +++ b/ospf6d/ospf6_asbr.c @@ -210,7 +210,7 @@ void ospf6_asbr_update_route_ecmp_path(struct ospf6_route *old, struct ospf6_route *route, struct ospf6 *ospf6) { - struct ospf6_route *old_route; + struct ospf6_route *old_route, *next_route; struct ospf6_path *ecmp_path, *o_path = NULL; struct listnode *anode, *anext; struct listnode *nnode, *rnode, *rnext; @@ -220,9 +220,11 @@ void ospf6_asbr_update_route_ecmp_path(struct ospf6_route *old, /* check for old entry match with new route origin, * delete old entry. */ - for (old_route = old; old_route; old_route = old_route->next) { + for (old_route = old; old_route; old_route = next_route) { bool route_updated = false; + next_route = old_route->next; + if (!ospf6_route_is_same(old_route, route) || (old_route->path.type != route->path.type)) continue; @@ -315,6 +317,8 @@ void ospf6_asbr_update_route_ecmp_path(struct ospf6_route *old, old_route->path.cost, route->path.cost); } + if (old == old_route) + old = next_route; ospf6_route_remove(old_route, ospf6->route_table); } @@ -1439,8 +1443,7 @@ static void ospf6_redistribute_show_config(struct vty *vty, struct ospf6 *ospf6, struct ospf6_redist *red; total = 0; - for (type = 0; type < ZEBRA_ROUTE_MAX; type++) - nroute[type] = 0; + memset(nroute, 0, sizeof(nroute)); for (route = ospf6_route_head(ospf6->external_table); route; route = ospf6_route_next(route)) { info = route->route_option; @@ -1448,12 +1451,11 @@ static void ospf6_redistribute_show_config(struct vty *vty, struct ospf6 *ospf6, total++; } - if (use_json) - json_route = json_object_new_object(); - else + if (!use_json) vty_out(vty, "Redistributing External Routes from:\n"); for (type = 0; type < ZEBRA_ROUTE_MAX; type++) { + red = ospf6_redist_lookup(ospf6, type, 0); if (!red) @@ -1462,6 +1464,7 @@ static void ospf6_redistribute_show_config(struct vty *vty, struct ospf6 *ospf6, continue; if (use_json) { + json_route = json_object_new_object(); json_object_string_add(json_route, "routeType", ZROUTE_NAME(type)); json_object_int_add(json_route, "numberOfRoutes", @@ -1890,7 +1893,8 @@ static char *ospf6_as_external_lsa_get_prefix_str(struct ospf6_lsa *lsa, return (buf); } -static int ospf6_as_external_lsa_show(struct vty *vty, struct ospf6_lsa *lsa) +static int ospf6_as_external_lsa_show(struct vty *vty, struct ospf6_lsa *lsa, + json_object *json_obj, bool use_json) { struct ospf6_as_external_lsa *external; char buf[64]; @@ -1908,31 +1912,65 @@ static int ospf6_as_external_lsa_show(struct vty *vty, struct ospf6_lsa *lsa) (CHECK_FLAG(external->bits_metric, OSPF6_ASBR_BIT_T) ? 'T' : '-')); - vty_out(vty, " Bits: %s\n", buf); - vty_out(vty, " Metric: %5lu\n", - (unsigned long)OSPF6_ASBR_METRIC(external)); - - ospf6_prefix_options_printbuf(external->prefix.prefix_options, buf, - sizeof(buf)); - vty_out(vty, " Prefix Options: %s\n", buf); + if (use_json) { + json_object_string_add(json_obj, "bits", buf); + json_object_int_add(json_obj, "metric", + (unsigned long)OSPF6_ASBR_METRIC(external)); + ospf6_prefix_options_printbuf(external->prefix.prefix_options, + buf, sizeof(buf)); + json_object_string_add(json_obj, "prefixOptions", buf); + json_object_int_add( + json_obj, "referenceLsType", + ntohs(external->prefix.prefix_refer_lstype)); + json_object_string_add(json_obj, "prefix", + ospf6_as_external_lsa_get_prefix_str( + lsa, buf, sizeof(buf), 0)); + + /* Forwarding-Address */ + json_object_boolean_add( + json_obj, "forwardingAddressPresent", + CHECK_FLAG(external->bits_metric, OSPF6_ASBR_BIT_F)); + if (CHECK_FLAG(external->bits_metric, OSPF6_ASBR_BIT_F)) + json_object_string_add( + json_obj, "forwardingAddress", + ospf6_as_external_lsa_get_prefix_str( + lsa, buf, sizeof(buf), 1)); + + /* Tag */ + json_object_boolean_add( + json_obj, "tagPresent", + CHECK_FLAG(external->bits_metric, OSPF6_ASBR_BIT_T)); + if (CHECK_FLAG(external->bits_metric, OSPF6_ASBR_BIT_T)) + json_object_int_add(json_obj, "tag", + ospf6_as_external_lsa_get_tag(lsa)); + } else { + vty_out(vty, " Bits: %s\n", buf); + vty_out(vty, " Metric: %5lu\n", + (unsigned long)OSPF6_ASBR_METRIC(external)); - vty_out(vty, " Referenced LSType: %d\n", - ntohs(external->prefix.prefix_refer_lstype)); + ospf6_prefix_options_printbuf(external->prefix.prefix_options, + buf, sizeof(buf)); + vty_out(vty, " Prefix Options: %s\n", buf); - vty_out(vty, " Prefix: %s\n", - ospf6_as_external_lsa_get_prefix_str(lsa, buf, sizeof(buf), 0)); + vty_out(vty, " Referenced LSType: %d\n", + ntohs(external->prefix.prefix_refer_lstype)); - /* Forwarding-Address */ - if (CHECK_FLAG(external->bits_metric, OSPF6_ASBR_BIT_F)) { - vty_out(vty, " Forwarding-Address: %s\n", + vty_out(vty, " Prefix: %s\n", ospf6_as_external_lsa_get_prefix_str(lsa, buf, - sizeof(buf), 1)); - } + sizeof(buf), 0)); - /* Tag */ - if (CHECK_FLAG(external->bits_metric, OSPF6_ASBR_BIT_T)) { - vty_out(vty, " Tag: %" ROUTE_TAG_PRI "\n", - ospf6_as_external_lsa_get_tag(lsa)); + /* Forwarding-Address */ + if (CHECK_FLAG(external->bits_metric, OSPF6_ASBR_BIT_F)) { + vty_out(vty, " Forwarding-Address: %s\n", + ospf6_as_external_lsa_get_prefix_str( + lsa, buf, sizeof(buf), 1)); + } + + /* Tag */ + if (CHECK_FLAG(external->bits_metric, OSPF6_ASBR_BIT_T)) { + vty_out(vty, " Tag: %" ROUTE_TAG_PRI "\n", + ospf6_as_external_lsa_get_tag(lsa)); + } } return 0; diff --git a/ospf6d/ospf6_flood.c b/ospf6d/ospf6_flood.c index 0662cfd683..2d896546fa 100644 --- a/ospf6d/ospf6_flood.c +++ b/ospf6d/ospf6_flood.c @@ -452,12 +452,6 @@ void ospf6_flood_area(struct ospf6_neighbor *from, struct ospf6_lsa *lsa, && oi != OSPF6_INTERFACE(lsa->lsdb->data)) continue; -#if 0 - if (OSPF6_LSA_SCOPE (lsa->header->type) == OSPF6_SCOPE_AS && - ospf6_is_interface_virtual_link (oi)) - continue; -#endif /*0*/ - ospf6_flood_interface(from, lsa, oi); } } @@ -527,12 +521,6 @@ static void ospf6_flood_clear_area(struct ospf6_lsa *lsa, struct ospf6_area *oa) && oi != OSPF6_INTERFACE(lsa->lsdb->data)) continue; -#if 0 - if (OSPF6_LSA_SCOPE (lsa->header->type) == OSPF6_SCOPE_AS && - ospf6_is_interface_virtual_link (oi)) - continue; -#endif /*0*/ - ospf6_flood_clear_interface(lsa, oi); } } diff --git a/ospf6d/ospf6_interface.c b/ospf6d/ospf6_interface.c index 4988cee7d8..0cc3bd2cc9 100644 --- a/ospf6d/ospf6_interface.c +++ b/ospf6d/ospf6_interface.c @@ -250,6 +250,7 @@ void ospf6_interface_delete(struct ospf6_interface *oi) THREAD_OFF(oi->thread_send_lsupdate); THREAD_OFF(oi->thread_send_lsack); THREAD_OFF(oi->thread_sso); + THREAD_OFF(oi->thread_wait_timer); ospf6_lsdb_remove_all(oi->lsdb); ospf6_lsdb_remove_all(oi->lsupdate_list); @@ -304,6 +305,7 @@ void ospf6_interface_disable(struct ospf6_interface *oi) THREAD_OFF(oi->thread_link_lsa); THREAD_OFF(oi->thread_intra_prefix_lsa); THREAD_OFF(oi->thread_as_extern_lsa); + THREAD_OFF(oi->thread_wait_timer); } static struct in6_addr * @@ -793,7 +795,7 @@ int interface_up(struct thread *thread) else { ospf6_interface_state_change(OSPF6_INTERFACE_WAITING, oi); thread_add_timer(master, wait_timer, oi, oi->dead_interval, - NULL); + &oi->thread_wait_timer); } return 0; @@ -1414,7 +1416,7 @@ DEFUN (show_ipv6_ospf6_interface_ifname_prefix, [<\ detail\ |<X:X::X:X|X:X::X:X/M> [<match|detail>]\ - >]", + >] [json]", SHOW_STR IP6_STR OSPF6_STR @@ -1425,12 +1427,14 @@ DEFUN (show_ipv6_ospf6_interface_ifname_prefix, OSPF6_ROUTE_ADDRESS_STR OSPF6_ROUTE_PREFIX_STR OSPF6_ROUTE_MATCH_STR - "Display details of the prefixes\n") + "Display details of the prefixes\n" + JSON_STR) { int idx_ifname = 4; int idx_prefix = 6; struct interface *ifp; struct ospf6_interface *oi; + bool uj = use_json(argc, argv); ifp = if_lookup_by_name(argv[idx_ifname]->arg, VRF_DEFAULT); if (ifp == NULL) { @@ -1445,8 +1449,8 @@ DEFUN (show_ipv6_ospf6_interface_ifname_prefix, return CMD_WARNING; } - ospf6_route_table_show(vty, idx_prefix, argc, argv, - oi->route_connected); + ospf6_route_table_show(vty, idx_prefix, argc, argv, oi->route_connected, + uj); return CMD_SUCCESS; } @@ -1457,7 +1461,7 @@ DEFUN (show_ipv6_ospf6_interface_prefix, [<\ detail\ |<X:X::X:X|X:X::X:X/M> [<match|detail>]\ - >]", + >] [json]", SHOW_STR IP6_STR OSPF6_STR @@ -1467,12 +1471,14 @@ DEFUN (show_ipv6_ospf6_interface_prefix, OSPF6_ROUTE_ADDRESS_STR OSPF6_ROUTE_PREFIX_STR OSPF6_ROUTE_MATCH_STR - "Display details of the prefixes\n") + "Display details of the prefixes\n" + JSON_STR) { struct vrf *vrf = vrf_lookup_by_id(VRF_DEFAULT); int idx_prefix = 5; struct ospf6_interface *oi; struct interface *ifp; + bool uj = use_json(argc, argv); FOR_ALL_INTERFACES (vrf, ifp) { oi = (struct ospf6_interface *)ifp->info; @@ -1480,7 +1486,7 @@ DEFUN (show_ipv6_ospf6_interface_prefix, continue; ospf6_route_table_show(vty, idx_prefix, argc, argv, - oi->route_connected); + oi->route_connected, uj); } return CMD_SUCCESS; diff --git a/ospf6d/ospf6_interface.h b/ospf6d/ospf6_interface.h index dd7f4d1b1e..6e4692920c 100644 --- a/ospf6d/ospf6_interface.h +++ b/ospf6d/ospf6_interface.h @@ -111,6 +111,7 @@ struct ospf6_interface { struct thread *thread_link_lsa; struct thread *thread_intra_prefix_lsa; struct thread *thread_as_extern_lsa; + struct thread *thread_wait_timer; struct ospf6_route_table *route_connected; diff --git a/ospf6d/ospf6_intra.c b/ospf6d/ospf6_intra.c index 17538c466a..2abe64ac60 100644 --- a/ospf6d/ospf6_intra.c +++ b/ospf6d/ospf6_intra.c @@ -76,7 +76,8 @@ static char *ospf6_router_lsa_get_nbr_id(struct ospf6_lsa *lsa, char *buf, *)(start + pos * (sizeof(struct ospf6_router_lsdesc))); - if ((char *)lsdesc < end) { + if ((char *)lsdesc + sizeof(struct ospf6_router_lsdesc) + <= end) { if (buf && (buflen > INET_ADDRSTRLEN * 2)) { inet_ntop(AF_INET, &lsdesc->neighbor_interface_id, buf1, @@ -84,20 +85,24 @@ static char *ospf6_router_lsa_get_nbr_id(struct ospf6_lsa *lsa, char *buf, inet_ntop(AF_INET, &lsdesc->neighbor_router_id, buf2, sizeof(buf2)); sprintf(buf, "%s/%s", buf2, buf1); + + return buf; } - } else - return NULL; + } } - return buf; + return NULL; } -static int ospf6_router_lsa_show(struct vty *vty, struct ospf6_lsa *lsa) +static int ospf6_router_lsa_show(struct vty *vty, struct ospf6_lsa *lsa, + json_object *json_obj, bool use_json) { char *start, *end, *current; char buf[32], name[32], bits[16], options[32]; struct ospf6_router_lsa *router_lsa; struct ospf6_router_lsdesc *lsdesc; + json_object *json_arr; + json_object *json_loop; router_lsa = (struct ospf6_router_lsa *)((char *)lsa->header @@ -105,7 +110,12 @@ static int ospf6_router_lsa_show(struct vty *vty, struct ospf6_lsa *lsa) ospf6_capability_printbuf(router_lsa->bits, bits, sizeof(bits)); ospf6_options_printbuf(router_lsa->options, options, sizeof(options)); - vty_out(vty, " Bits: %s Options: %s\n", bits, options); + if (use_json) { + json_object_string_add(json_obj, "bits", bits); + json_object_string_add(json_obj, "options", options); + json_arr = json_object_new_array(); + } else + vty_out(vty, " Bits: %s Options: %s\n", bits, options); start = (char *)router_lsa + sizeof(struct ospf6_router_lsa); end = (char *)lsa->header + ntohs(lsa->header->length); @@ -126,18 +136,43 @@ static int ospf6_router_lsa_show(struct vty *vty, struct ospf6_lsa *lsa) snprintf(name, sizeof(name), "Unknown (%#x)", lsdesc->type); - vty_out(vty, " Type: %s Metric: %d\n", name, - ntohs(lsdesc->metric)); - vty_out(vty, " Interface ID: %s\n", - inet_ntop(AF_INET, &lsdesc->interface_id, buf, - sizeof(buf))); - vty_out(vty, " Neighbor Interface ID: %s\n", - inet_ntop(AF_INET, &lsdesc->neighbor_interface_id, buf, - sizeof(buf))); - vty_out(vty, " Neighbor Router ID: %s\n", - inet_ntop(AF_INET, &lsdesc->neighbor_router_id, buf, - sizeof(buf))); + if (use_json) { + json_loop = json_object_new_object(); + json_object_string_add(json_loop, "type", name); + json_object_int_add(json_loop, "metric", + ntohs(lsdesc->metric)); + json_object_string_add(json_loop, "interfaceId", + inet_ntop(AF_INET, + &lsdesc->interface_id, + buf, sizeof(buf))); + json_object_string_add( + json_loop, "neighborInterfaceId", + inet_ntop(AF_INET, + &lsdesc->neighbor_interface_id, buf, + sizeof(buf))); + json_object_string_add( + json_loop, "neighborRouterId", + inet_ntop(AF_INET, &lsdesc->neighbor_router_id, + buf, sizeof(buf))); + json_object_array_add(json_arr, json_loop); + } else { + vty_out(vty, " Type: %s Metric: %d\n", name, + ntohs(lsdesc->metric)); + vty_out(vty, " Interface ID: %s\n", + inet_ntop(AF_INET, &lsdesc->interface_id, buf, + sizeof(buf))); + vty_out(vty, " Neighbor Interface ID: %s\n", + inet_ntop(AF_INET, + &lsdesc->neighbor_interface_id, buf, + sizeof(buf))); + vty_out(vty, " Neighbor Router ID: %s\n", + inet_ntop(AF_INET, &lsdesc->neighbor_router_id, + buf, sizeof(buf))); + } } + if (use_json) + json_object_object_add(json_obj, "lsaDescription", json_arr); + return 0; } @@ -411,39 +446,55 @@ static char *ospf6_network_lsa_get_ar_id(struct ospf6_lsa *lsa, char *buf, if ((current + sizeof(struct ospf6_network_lsdesc)) <= end) { lsdesc = (struct ospf6_network_lsdesc *)current; - if (buf) + if (buf) { inet_ntop(AF_INET, &lsdesc->router_id, buf, buflen); - } else - return NULL; + return buf; + } + } } - return (buf); + return NULL; } -static int ospf6_network_lsa_show(struct vty *vty, struct ospf6_lsa *lsa) +static int ospf6_network_lsa_show(struct vty *vty, struct ospf6_lsa *lsa, + json_object *json_obj, bool use_json) { char *start, *end, *current; struct ospf6_network_lsa *network_lsa; struct ospf6_network_lsdesc *lsdesc; char buf[128], options[32]; + json_object *json_arr; network_lsa = (struct ospf6_network_lsa *)((caddr_t)lsa->header + sizeof(struct ospf6_lsa_header)); ospf6_options_printbuf(network_lsa->options, options, sizeof(options)); - vty_out(vty, " Options: %s\n", options); + if (use_json) + json_object_string_add(json_obj, "options", options); + else + vty_out(vty, " Options: %s\n", options); start = (char *)network_lsa + sizeof(struct ospf6_network_lsa); end = (char *)lsa->header + ntohs(lsa->header->length); + if (use_json) + json_arr = json_object_new_array(); + for (current = start; current + sizeof(struct ospf6_network_lsdesc) <= end; current += sizeof(struct ospf6_network_lsdesc)) { lsdesc = (struct ospf6_network_lsdesc *)current; inet_ntop(AF_INET, &lsdesc->router_id, buf, sizeof(buf)); - vty_out(vty, " Attached Router: %s\n", buf); + if (use_json) + json_object_array_add(json_arr, + json_object_new_string(buf)); + else + vty_out(vty, " Attached Router: %s\n", buf); } + if (use_json) + json_object_object_add(json_obj, "attachedRouter", json_arr); + return 0; } @@ -602,7 +653,7 @@ static char *ospf6_link_lsa_get_prefix_str(struct ospf6_lsa *lsa, char *buf, end = (char *)lsa->header + ntohs(lsa->header->length); current = start; - do { + while (current + sizeof(struct ospf6_prefix) <= end) { prefix = (struct ospf6_prefix *)current; if (prefix->prefix_length == 0 || current + OSPF6_PREFIX_SIZE(prefix) > end) { @@ -620,12 +671,13 @@ static char *ospf6_link_lsa_get_prefix_str(struct ospf6_lsa *lsa, char *buf, inet_ntop(AF_INET6, &in6, buf, buflen); return (buf); } - } while (current <= end); + } } return NULL; } -static int ospf6_link_lsa_show(struct vty *vty, struct ospf6_lsa *lsa) +static int ospf6_link_lsa_show(struct vty *vty, struct ospf6_lsa *lsa, + json_object *json_obj, bool use_json) { char *start, *end, *current; struct ospf6_link_lsa *link_lsa; @@ -634,6 +686,10 @@ static int ospf6_link_lsa_show(struct vty *vty, struct ospf6_lsa *lsa) struct ospf6_prefix *prefix; const char *p, *mc, *la, *nu; struct in6_addr in6; + json_object *json_loop; + json_object *json_arr = NULL; + char str[15]; + char prefix_string[133]; link_lsa = (struct ospf6_link_lsa *)((caddr_t)lsa->header + sizeof(struct ospf6_lsa_header)); @@ -642,10 +698,18 @@ static int ospf6_link_lsa_show(struct vty *vty, struct ospf6_lsa *lsa) inet_ntop(AF_INET6, &link_lsa->linklocal_addr, buf, sizeof(buf)); prefixnum = ntohl(link_lsa->prefix_num); - vty_out(vty, " Priority: %d Options: %s\n", link_lsa->priority, - options); - vty_out(vty, " LinkLocal Address: %s\n", buf); - vty_out(vty, " Number of Prefix: %d\n", prefixnum); + if (use_json) { + json_arr = json_object_new_array(); + json_object_int_add(json_obj, "priority", link_lsa->priority); + json_object_string_add(json_obj, "options", options); + json_object_string_add(json_obj, "linkLocalAddress", buf); + json_object_int_add(json_obj, "numberOfPrefix", prefixnum); + } else { + vty_out(vty, " Priority: %d Options: %s\n", + link_lsa->priority, options); + vty_out(vty, " LinkLocal Address: %s\n", buf); + vty_out(vty, " Number of Prefix: %d\n", prefixnum); + } start = (char *)link_lsa + sizeof(struct ospf6_link_lsa); end = (char *)lsa->header + ntohs(lsa->header->length); @@ -668,16 +732,31 @@ static int ospf6_link_lsa_show(struct vty *vty, struct ospf6_lsa *lsa) nu = (CHECK_FLAG(prefix->prefix_options, OSPF6_PREFIX_OPTION_NU) ? "NU" : "--"); - vty_out(vty, " Prefix Options: %s|%s|%s|%s\n", p, mc, la, - nu); + if (use_json) { + json_loop = json_object_new_object(); + snprintf(str, sizeof(str), "%s|%s|%s|%s", p, mc, la, + nu); + json_object_string_add(json_loop, "prefixOption", str); + } else + vty_out(vty, " Prefix Options: %s|%s|%s|%s\n", p, + mc, la, nu); memset(&in6, 0, sizeof(in6)); memcpy(&in6, OSPF6_PREFIX_BODY(prefix), OSPF6_PREFIX_SPACE(prefix->prefix_length)); inet_ntop(AF_INET6, &in6, buf, sizeof(buf)); - vty_out(vty, " Prefix: %s/%d\n", buf, - prefix->prefix_length); + if (use_json) { + snprintf(prefix_string, sizeof(prefix_string), "%s/%d", + buf, prefix->prefix_length); + json_object_string_add(json_loop, "prefix", + prefix_string); + json_object_array_add(json_arr, json_loop); + } else + vty_out(vty, " Prefix: %s/%d\n", buf, + prefix->prefix_length); } + if (use_json) + json_object_object_add(json_obj, "prefix", json_arr); return 0; } @@ -803,7 +882,7 @@ static char *ospf6_intra_prefix_lsa_get_prefix_str(struct ospf6_lsa *lsa, end = (char *)lsa->header + ntohs(lsa->header->length); current = start; - do { + while (current + sizeof(struct ospf6_prefix) <= end) { prefix = (struct ospf6_prefix *)current; if (prefix->prefix_length == 0 || current + OSPF6_PREFIX_SIZE(prefix) > end) { @@ -823,12 +902,13 @@ static char *ospf6_intra_prefix_lsa_get_prefix_str(struct ospf6_lsa *lsa, prefix->prefix_length); return (buf); } - } while (current <= end); + } } - return (buf); + return NULL; } -static int ospf6_intra_prefix_lsa_show(struct vty *vty, struct ospf6_lsa *lsa) +static int ospf6_intra_prefix_lsa_show(struct vty *vty, struct ospf6_lsa *lsa, + json_object *json_obj, bool use_json) { char *start, *end, *current; struct ospf6_intra_prefix_lsa *intra_prefix_lsa; @@ -838,6 +918,10 @@ static int ospf6_intra_prefix_lsa_show(struct vty *vty, struct ospf6_lsa *lsa) char id[16], adv_router[16]; const char *p, *mc, *la, *nu; struct in6_addr in6; + json_object *json_loop; + json_object *json_arr = NULL; + char str[15]; + char prefix_string[133]; intra_prefix_lsa = (struct ospf6_intra_prefix_lsa *)((caddr_t)lsa->header @@ -845,13 +929,25 @@ static int ospf6_intra_prefix_lsa_show(struct vty *vty, struct ospf6_lsa *lsa) prefixnum = ntohs(intra_prefix_lsa->prefix_num); - vty_out(vty, " Number of Prefix: %d\n", prefixnum); + if (use_json) { + json_arr = json_object_new_array(); + json_object_int_add(json_obj, "numberOfPrefix", prefixnum); + } else + vty_out(vty, " Number of Prefix: %d\n", prefixnum); inet_ntop(AF_INET, &intra_prefix_lsa->ref_id, id, sizeof(id)); inet_ntop(AF_INET, &intra_prefix_lsa->ref_adv_router, adv_router, sizeof(adv_router)); - vty_out(vty, " Reference: %s Id: %s Adv: %s\n", - ospf6_lstype_name(intra_prefix_lsa->ref_type), id, adv_router); + if (use_json) { + json_object_string_add( + json_obj, "reference", + ospf6_lstype_name(intra_prefix_lsa->ref_type)); + json_object_string_add(json_obj, "referenceId", id); + json_object_string_add(json_obj, "referenceAdv", adv_router); + } else + vty_out(vty, " Reference: %s Id: %s Adv: %s\n", + ospf6_lstype_name(intra_prefix_lsa->ref_type), id, + adv_router); start = (char *)intra_prefix_lsa + sizeof(struct ospf6_intra_prefix_lsa); @@ -875,16 +971,31 @@ static int ospf6_intra_prefix_lsa_show(struct vty *vty, struct ospf6_lsa *lsa) nu = (CHECK_FLAG(prefix->prefix_options, OSPF6_PREFIX_OPTION_NU) ? "NU" : "--"); - vty_out(vty, " Prefix Options: %s|%s|%s|%s\n", p, mc, la, - nu); + if (use_json) { + json_loop = json_object_new_object(); + snprintf(str, sizeof(str), "%s|%s|%s|%s", p, mc, la, + nu); + json_object_string_add(json_loop, "prefixOption", str); + } else + vty_out(vty, " Prefix Options: %s|%s|%s|%s\n", p, + mc, la, nu); memset(&in6, 0, sizeof(in6)); memcpy(&in6, OSPF6_PREFIX_BODY(prefix), OSPF6_PREFIX_SPACE(prefix->prefix_length)); inet_ntop(AF_INET6, &in6, buf, sizeof(buf)); - vty_out(vty, " Prefix: %s/%d\n", buf, - prefix->prefix_length); + if (use_json) { + snprintf(prefix_string, sizeof(prefix_string), "%s/%d", + buf, prefix->prefix_length); + json_object_string_add(json_loop, "prefix", + prefix_string); + json_object_array_add(json_arr, json_loop); + } else + vty_out(vty, " Prefix: %s/%d\n", buf, + prefix->prefix_length); } + if (use_json) + json_object_object_add(json_obj, "prefix", json_arr); return 0; } @@ -1338,6 +1449,8 @@ static void ospf6_intra_prefix_update_route_origin(struct ospf6_route *oa_route, g_route->path.origin.id = h_path->origin.id; g_route->path.origin.adv_router = h_path->origin.adv_router; + if (nroute) + ospf6_route_unlock(nroute); break; } } diff --git a/ospf6d/ospf6_lsa.c b/ospf6d/ospf6_lsa.c index 29141ee7f8..f1b04c9bec 100644 --- a/ospf6d/ospf6_lsa.c +++ b/ospf6d/ospf6_lsa.c @@ -66,7 +66,8 @@ struct ospf6 *ospf6_get_by_lsdb(struct ospf6_lsa *lsa) return ospf6; } -static int ospf6_unknown_lsa_show(struct vty *vty, struct ospf6_lsa *lsa) +static int ospf6_unknown_lsa_show(struct vty *vty, struct ospf6_lsa *lsa, + json_object *json_obj, bool use_json) { uint8_t *start, *end, *current; char byte[4]; @@ -74,18 +75,22 @@ static int ospf6_unknown_lsa_show(struct vty *vty, struct ospf6_lsa *lsa) start = (uint8_t *)lsa->header + sizeof(struct ospf6_lsa_header); end = (uint8_t *)lsa->header + ntohs(lsa->header->length); - vty_out(vty, " Unknown contents:\n"); - for (current = start; current < end; current++) { - if ((current - start) % 16 == 0) - vty_out(vty, "\n "); - else if ((current - start) % 4 == 0) - vty_out(vty, " "); + if (use_json) + json_object_string_add(json_obj, "LsaType", "unknown"); + else { + vty_out(vty, " Unknown contents:\n"); + for (current = start; current < end; current++) { + if ((current - start) % 16 == 0) + vty_out(vty, "\n "); + else if ((current - start) % 4 == 0) + vty_out(vty, " "); + + snprintf(byte, sizeof(byte), "%02x", *current); + vty_out(vty, "%s", byte); + } - snprintf(byte, sizeof(byte), "%02x", *current); - vty_out(vty, "%s", byte); + vty_out(vty, "\n\n"); } - - vty_out(vty, "\n\n"); return 0; } @@ -392,13 +397,15 @@ void ospf6_lsa_show_summary_header(struct vty *vty) "AdvRouter", "Age", "SeqNum", "Payload"); } -void ospf6_lsa_show_summary(struct vty *vty, struct ospf6_lsa *lsa) +void ospf6_lsa_show_summary(struct vty *vty, struct ospf6_lsa *lsa, + json_object *json_array, bool use_json) { char adv_router[16], id[16]; int type; const struct ospf6_lsa_handler *handler; - char buf[64], tmpbuf[80]; + char buf[64]; int cnt = 0; + json_object *json_obj = NULL; assert(lsa); assert(lsa->header); @@ -409,34 +416,95 @@ void ospf6_lsa_show_summary(struct vty *vty, struct ospf6_lsa *lsa) type = ntohs(lsa->header->type); handler = ospf6_get_lsa_handler(lsa->header->type); + + if (use_json) + json_obj = json_object_new_object(); + if ((type == OSPF6_LSTYPE_INTER_PREFIX) || (type == OSPF6_LSTYPE_INTER_ROUTER) || (type == OSPF6_LSTYPE_AS_EXTERNAL)) { - vty_out(vty, "%-4s %-15s%-15s%4hu %8lx %30s\n", - ospf6_lstype_short_name(lsa->header->type), id, - adv_router, ospf6_lsa_age_current(lsa), - (unsigned long)ntohl(lsa->header->seqnum), - handler->lh_get_prefix_str(lsa, buf, sizeof(buf), 0)); + if (use_json) { + json_object_string_add( + json_obj, "type", + ospf6_lstype_short_name(lsa->header->type)); + json_object_string_add(json_obj, "lsId", id); + json_object_string_add(json_obj, "advRouter", + adv_router); + json_object_int_add(json_obj, "age", + ospf6_lsa_age_current(lsa)); + json_object_int_add( + json_obj, "seqNum", + (unsigned long)ntohl(lsa->header->seqnum)); + json_object_string_add( + json_obj, "payload", + handler->lh_get_prefix_str(lsa, buf, + sizeof(buf), 0)); + json_object_array_add(json_array, json_obj); + } else + vty_out(vty, "%-4s %-15s%-15s%4hu %8lx %30s\n", + ospf6_lstype_short_name(lsa->header->type), id, + adv_router, ospf6_lsa_age_current(lsa), + (unsigned long)ntohl(lsa->header->seqnum), + handler->lh_get_prefix_str(lsa, buf, + sizeof(buf), 0)); } else if (type != OSPF6_LSTYPE_UNKNOWN) { - snprintf(tmpbuf, sizeof(tmpbuf), "%-4s %-15s%-15s%4hu %8lx", - ospf6_lstype_short_name(lsa->header->type), id, - adv_router, ospf6_lsa_age_current(lsa), - (unsigned long)ntohl(lsa->header->seqnum)); - while (handler->lh_get_prefix_str(lsa, buf, sizeof(buf), cnt) != NULL) { - vty_out(vty, "%s %30s\n", tmpbuf, buf); + if (use_json) { + json_object_string_add( + json_obj, "type", + ospf6_lstype_short_name( + lsa->header->type)); + json_object_string_add(json_obj, "lsId", id); + json_object_string_add(json_obj, "advRouter", + adv_router); + json_object_int_add(json_obj, "age", + ospf6_lsa_age_current(lsa)); + json_object_int_add( + json_obj, "seqNum", + (unsigned long)ntohl( + lsa->header->seqnum)); + json_object_string_add(json_obj, "payload", + buf); + json_object_array_add(json_array, json_obj); + json_obj = json_object_new_object(); + } else + vty_out(vty, "%-4s %-15s%-15s%4hu %8lx %30s\n", + ospf6_lstype_short_name( + lsa->header->type), + id, adv_router, + ospf6_lsa_age_current(lsa), + (unsigned long)ntohl( + lsa->header->seqnum), + buf); cnt++; } + if (use_json) + json_object_free(json_obj); } else { - vty_out(vty, "%-4s %-15s%-15s%4hu %8lx\n", - ospf6_lstype_short_name(lsa->header->type), id, - adv_router, ospf6_lsa_age_current(lsa), - (unsigned long)ntohl(lsa->header->seqnum)); + if (use_json) { + json_object_string_add( + json_obj, "type", + ospf6_lstype_short_name(lsa->header->type)); + json_object_string_add(json_obj, "lsId", id); + json_object_string_add(json_obj, "advRouter", + adv_router); + json_object_int_add(json_obj, "age", + ospf6_lsa_age_current(lsa)); + json_object_int_add( + json_obj, "seqNum", + (unsigned long)ntohl(lsa->header->seqnum)); + json_object_array_add(json_array, json_obj); + } else + vty_out(vty, "%-4s %-15s%-15s%4hu %8lx\n", + ospf6_lstype_short_name(lsa->header->type), id, + adv_router, ospf6_lsa_age_current(lsa), + (unsigned long)ntohl(lsa->header->seqnum)); } } -void ospf6_lsa_show_dump(struct vty *vty, struct ospf6_lsa *lsa) +void ospf6_lsa_show_dump(struct vty *vty, struct ospf6_lsa *lsa, + json_object *json_array, bool use_json) { uint8_t *start, *end, *current; char byte[4]; @@ -444,6 +512,9 @@ void ospf6_lsa_show_dump(struct vty *vty, struct ospf6_lsa *lsa) start = (uint8_t *)lsa->header; end = (uint8_t *)lsa->header + ntohs(lsa->header->length); + if (use_json) + return; + vty_out(vty, "\n"); vty_out(vty, "%s:\n", lsa->name); @@ -458,12 +529,15 @@ void ospf6_lsa_show_dump(struct vty *vty, struct ospf6_lsa *lsa) } vty_out(vty, "\n\n"); + return; } -void ospf6_lsa_show_internal(struct vty *vty, struct ospf6_lsa *lsa) +void ospf6_lsa_show_internal(struct vty *vty, struct ospf6_lsa *lsa, + json_object *json_array, bool use_json) { char adv_router[64], id[64]; + json_object *json_obj; assert(lsa && lsa->header); @@ -471,30 +545,56 @@ void ospf6_lsa_show_internal(struct vty *vty, struct ospf6_lsa *lsa) inet_ntop(AF_INET, &lsa->header->adv_router, adv_router, sizeof(adv_router)); - vty_out(vty, "\n"); - vty_out(vty, "Age: %4hu Type: %s\n", ospf6_lsa_age_current(lsa), - ospf6_lstype_name(lsa->header->type)); - vty_out(vty, "Link State ID: %s\n", id); - vty_out(vty, "Advertising Router: %s\n", adv_router); - vty_out(vty, "LS Sequence Number: %#010lx\n", - (unsigned long)ntohl(lsa->header->seqnum)); - vty_out(vty, "CheckSum: %#06hx Length: %hu\n", - ntohs(lsa->header->checksum), ntohs(lsa->header->length)); - vty_out(vty, "Flag: %x \n", lsa->flag); - vty_out(vty, "Lock: %d \n", lsa->lock); - vty_out(vty, "ReTx Count: %d\n", lsa->retrans_count); - vty_out(vty, "Threads: Expire: 0x%p, Refresh: 0x%p \n", - (void *)lsa->expire, (void *)lsa->refresh); - vty_out(vty, "\n"); + if (use_json) { + json_obj = json_object_new_object(); + json_object_int_add(json_obj, "age", + ospf6_lsa_age_current(lsa)); + json_object_string_add(json_obj, "type", + ospf6_lstype_name(lsa->header->type)); + json_object_string_add(json_obj, "linkStateId", id); + json_object_string_add(json_obj, "advertisingRouter", + adv_router); + json_object_int_add(json_obj, "lsSequenceNumber", + (unsigned long)ntohl(lsa->header->seqnum)); + json_object_int_add(json_obj, "checksum", + ntohs(lsa->header->checksum)); + json_object_int_add(json_obj, "length", + ntohs(lsa->header->length)); + json_object_int_add(json_obj, "flag", lsa->flag); + json_object_int_add(json_obj, "lock", lsa->lock); + json_object_int_add(json_obj, "reTxCount", lsa->retrans_count); + + /* Threads Data not added */ + json_object_array_add(json_array, json_obj); + } else { + vty_out(vty, "\n"); + vty_out(vty, "Age: %4hu Type: %s\n", ospf6_lsa_age_current(lsa), + ospf6_lstype_name(lsa->header->type)); + vty_out(vty, "Link State ID: %s\n", id); + vty_out(vty, "Advertising Router: %s\n", adv_router); + vty_out(vty, "LS Sequence Number: %#010lx\n", + (unsigned long)ntohl(lsa->header->seqnum)); + vty_out(vty, "CheckSum: %#06hx Length: %hu\n", + ntohs(lsa->header->checksum), + ntohs(lsa->header->length)); + vty_out(vty, "Flag: %x \n", lsa->flag); + vty_out(vty, "Lock: %d \n", lsa->lock); + vty_out(vty, "ReTx Count: %d\n", lsa->retrans_count); + vty_out(vty, "Threads: Expire: 0x%p, Refresh: 0x%p \n", + (void *)lsa->expire, (void *)lsa->refresh); + vty_out(vty, "\n"); + } return; } -void ospf6_lsa_show(struct vty *vty, struct ospf6_lsa *lsa) +void ospf6_lsa_show(struct vty *vty, struct ospf6_lsa *lsa, + json_object *json_array, bool use_json) { char adv_router[64], id[64]; const struct ospf6_lsa_handler *handler; struct timeval now, res; char duration[64]; + json_object *json_obj = NULL; assert(lsa && lsa->header); @@ -505,27 +605,47 @@ void ospf6_lsa_show(struct vty *vty, struct ospf6_lsa *lsa) monotime(&now); timersub(&now, &lsa->installed, &res); timerstring(&res, duration, sizeof(duration)); - - vty_out(vty, "Age: %4hu Type: %s\n", ospf6_lsa_age_current(lsa), - ospf6_lstype_name(lsa->header->type)); - vty_out(vty, "Link State ID: %s\n", id); - vty_out(vty, "Advertising Router: %s\n", adv_router); - vty_out(vty, "LS Sequence Number: %#010lx\n", - (unsigned long)ntohl(lsa->header->seqnum)); - vty_out(vty, "CheckSum: %#06hx Length: %hu\n", - ntohs(lsa->header->checksum), ntohs(lsa->header->length)); - vty_out(vty, "Duration: %s\n", duration); + if (use_json) { + json_obj = json_object_new_object(); + json_object_int_add(json_obj, "age", + ospf6_lsa_age_current(lsa)); + json_object_string_add(json_obj, "type", + ospf6_lstype_name(lsa->header->type)); + json_object_string_add(json_obj, "advertisingRouter", + adv_router); + json_object_int_add(json_obj, "lsSequenceNumber", + (unsigned long)ntohl(lsa->header->seqnum)); + json_object_int_add(json_obj, "checkSum", + ntohs(lsa->header->checksum)); + json_object_int_add(json_obj, "length", + ntohs(lsa->header->length)); + json_object_string_add(json_obj, "duration", duration); + } else { + vty_out(vty, "Age: %4hu Type: %s\n", ospf6_lsa_age_current(lsa), + ospf6_lstype_name(lsa->header->type)); + vty_out(vty, "Link State ID: %s\n", id); + vty_out(vty, "Advertising Router: %s\n", adv_router); + vty_out(vty, "LS Sequence Number: %#010lx\n", + (unsigned long)ntohl(lsa->header->seqnum)); + vty_out(vty, "CheckSum: %#06hx Length: %hu\n", + ntohs(lsa->header->checksum), + ntohs(lsa->header->length)); + vty_out(vty, "Duration: %s\n", duration); + } handler = ospf6_get_lsa_handler(lsa->header->type); if (handler->lh_show != NULL) - handler->lh_show(vty, lsa); + handler->lh_show(vty, lsa, json_obj, use_json); else { assert(unknown_handler.lh_show != NULL); - unknown_handler.lh_show(vty, lsa); + unknown_handler.lh_show(vty, lsa, json_obj, use_json); } - vty_out(vty, "\n"); + if (use_json) + json_object_array_add(json_array, json_obj); + else + vty_out(vty, "\n"); } /* OSPFv3 LSA creation/deletion function */ diff --git a/ospf6d/ospf6_lsa.h b/ospf6d/ospf6_lsa.h index 814e276796..7fa9c5fe40 100644 --- a/ospf6d/ospf6_lsa.h +++ b/ospf6d/ospf6_lsa.h @@ -21,6 +21,7 @@ #ifndef OSPF6_LSA_H #define OSPF6_LSA_H #include "ospf6_top.h" +#include "lib/json.h" /* Debug option */ #define OSPF6_LSA_DEBUG 0x01 @@ -141,9 +142,10 @@ struct ospf6_lsa_handler { uint16_t lh_type; /* host byte order */ const char *lh_name; const char *lh_short_name; - int (*lh_show)(struct vty *, struct ospf6_lsa *); - char *(*lh_get_prefix_str)(struct ospf6_lsa *, char *buf, - int buflen, int pos); + int (*lh_show)(struct vty *, struct ospf6_lsa *, json_object *json_obj, + bool use_json); + char *(*lh_get_prefix_str)(struct ospf6_lsa *, char *buf, int buflen, + int pos); uint8_t lh_debug; }; @@ -206,10 +208,14 @@ extern char *ospf6_lsa_printbuf(struct ospf6_lsa *lsa, char *buf, int size); extern void ospf6_lsa_header_print_raw(struct ospf6_lsa_header *header); extern void ospf6_lsa_header_print(struct ospf6_lsa *lsa); extern void ospf6_lsa_show_summary_header(struct vty *vty); -extern void ospf6_lsa_show_summary(struct vty *vty, struct ospf6_lsa *lsa); -extern void ospf6_lsa_show_dump(struct vty *vty, struct ospf6_lsa *lsa); -extern void ospf6_lsa_show_internal(struct vty *vty, struct ospf6_lsa *lsa); -extern void ospf6_lsa_show(struct vty *vty, struct ospf6_lsa *lsa); +extern void ospf6_lsa_show_summary(struct vty *vty, struct ospf6_lsa *lsa, + json_object *json, bool use_json); +extern void ospf6_lsa_show_dump(struct vty *vty, struct ospf6_lsa *lsa, + json_object *json, bool use_json); +extern void ospf6_lsa_show_internal(struct vty *vty, struct ospf6_lsa *lsa, + json_object *json, bool use_json); +extern void ospf6_lsa_show(struct vty *vty, struct ospf6_lsa *lsa, + json_object *json, bool use_json); extern struct ospf6_lsa *ospf6_lsa_create(struct ospf6_lsa_header *header); extern struct ospf6_lsa * diff --git a/ospf6d/ospf6_lsdb.c b/ospf6d/ospf6_lsdb.c index c136c558cb..9636e1a230 100644 --- a/ospf6d/ospf6_lsdb.c +++ b/ospf6d/ospf6_lsdb.c @@ -346,55 +346,6 @@ int ospf6_lsdb_maxage_remover(struct ospf6_lsdb *lsdb) return (reschedule); } -void ospf6_lsdb_show(struct vty *vty, enum ospf_lsdb_show_level level, - uint16_t *type, uint32_t *id, uint32_t *adv_router, - struct ospf6_lsdb *lsdb) -{ - struct ospf6_lsa *lsa; - const struct route_node *end = NULL; - void (*showfunc)(struct vty *, struct ospf6_lsa *) = NULL; - - switch (level) { - case OSPF6_LSDB_SHOW_LEVEL_DETAIL: - showfunc = ospf6_lsa_show; - break; - case OSPF6_LSDB_SHOW_LEVEL_INTERNAL: - showfunc = ospf6_lsa_show_internal; - break; - case OSPF6_LSDB_SHOW_LEVEL_DUMP: - showfunc = ospf6_lsa_show_dump; - break; - case OSPF6_LSDB_SHOW_LEVEL_NORMAL: - default: - showfunc = ospf6_lsa_show_summary; - } - - if (type && id && adv_router) { - lsa = ospf6_lsdb_lookup(*type, *id, *adv_router, lsdb); - if (lsa) { - if (level == OSPF6_LSDB_SHOW_LEVEL_NORMAL) - ospf6_lsa_show(vty, lsa); - else - (*showfunc)(vty, lsa); - } - return; - } - - if (level == OSPF6_LSDB_SHOW_LEVEL_NORMAL) - ospf6_lsa_show_summary_header(vty); - - end = ospf6_lsdb_head(lsdb, !!type + !!(type && adv_router), - type ? *type : 0, adv_router ? *adv_router : 0, - &lsa); - while (lsa) { - if ((!adv_router || lsa->header->adv_router == *adv_router) - && (!id || lsa->header->id == *id)) - (*showfunc)(vty, lsa); - - lsa = ospf6_lsdb_next(end, lsa); - } -} - uint32_t ospf6_new_ls_id(uint16_t type, uint32_t adv_router, struct ospf6_lsdb *lsdb) { diff --git a/ospf6d/ospf6_lsdb.h b/ospf6d/ospf6_lsdb.h index 457e3dc4e4..7a62c46b02 100644 --- a/ospf6d/ospf6_lsdb.h +++ b/ospf6d/ospf6_lsdb.h @@ -92,7 +92,8 @@ enum ospf_lsdb_show_level { extern void ospf6_lsdb_show(struct vty *vty, enum ospf_lsdb_show_level level, uint16_t *type, uint32_t *id, uint32_t *adv_router, - struct ospf6_lsdb *lsdb); + struct ospf6_lsdb *lsdb, json_object *json, + bool use_json); extern uint32_t ospf6_new_ls_id(uint16_t type, uint32_t adv_router, struct ospf6_lsdb *lsdb); diff --git a/ospf6d/ospf6_route.c b/ospf6d/ospf6_route.c index 60c208437b..b77f968179 100644 --- a/ospf6d/ospf6_route.c +++ b/ospf6d/ospf6_route.c @@ -163,6 +163,10 @@ const char *const ospf6_path_type_substr[OSPF6_PATH_TYPE_MAX] = { "??", "IA", "IE", "E1", "E2", }; +const char *ospf6_path_type_json[OSPF6_PATH_TYPE_MAX] = { + "UnknownRoute", "IntraArea", "InterArea", "External1", "External2", +}; + struct ospf6_nexthop *ospf6_nexthop_create(void) { @@ -1030,7 +1034,8 @@ void ospf6_route_table_delete(struct ospf6_route_table *table) /* VTY commands */ -void ospf6_route_show(struct vty *vty, struct ospf6_route *route) +void ospf6_route_show(struct vty *vty, struct ospf6_route *route, + json_object *json_array_routes, bool use_json) { int i; char destination[PREFIX2STR_BUFFER], nexthop[64]; @@ -1038,6 +1043,9 @@ void ospf6_route_show(struct vty *vty, struct ospf6_route *route) struct timeval now, res; struct listnode *node; struct ospf6_nexthop *nh; + json_object *json_route = NULL; + json_object *json_array_next_hops = NULL; + json_object *json_next_hop; if (om6->ospf6 == NULL) { vty_out(vty, "OSPFv3 is not running\n"); @@ -1058,34 +1066,74 @@ void ospf6_route_show(struct vty *vty, struct ospf6_route *route) else prefix2str(&route->prefix, destination, sizeof(destination)); - i = 0; + if (use_json) { + json_route = json_object_new_object(); + json_object_string_add(json_route, "destination", destination); + json_object_boolean_add(json_route, "isBestRoute", + ospf6_route_is_best(route)); + json_object_string_add(json_route, "destinationType", + OSPF6_DEST_TYPE_SUBSTR(route->type)); + json_object_string_add( + json_route, "pathType", + OSPF6_PATH_TYPE_SUBSTR(route->path.type)); + json_object_string_add(json_route, "duration", duration); + } + + /* Nexthops */ + if (use_json) + json_array_next_hops = json_object_new_array(); + else + i = 0; for (ALL_LIST_ELEMENTS_RO(route->nh_list, node, nh)) { struct interface *ifp; /* nexthop */ inet_ntop(AF_INET6, &nh->address, nexthop, sizeof(nexthop)); ifp = if_lookup_by_index_all_vrf(nh->ifindex); - if (!i) { - vty_out(vty, "%c%1s %2s %-30s %-25s %6.*s %s\n", - (ospf6_route_is_best(route) ? '*' : ' '), - OSPF6_DEST_TYPE_SUBSTR(route->type), - OSPF6_PATH_TYPE_SUBSTR(route->path.type), - destination, nexthop, IFNAMSIZ, ifp->name, - duration); - i++; - } else - vty_out(vty, "%c%1s %2s %-30s %-25s %6.*s %s\n", ' ', - "", "", "", nexthop, IFNAMSIZ, ifp->name, ""); + if (use_json) { + json_next_hop = json_object_new_object(); + json_object_string_add(json_next_hop, "nextHop", + nexthop); + json_object_string_add(json_next_hop, "interfaceName", + ifp->name); + json_object_array_add(json_array_next_hops, + json_next_hop); + } else { + if (!i) { + vty_out(vty, "%c%1s %2s %-30s %-25s %6.*s %s\n", + (ospf6_route_is_best(route) ? '*' + : ' '), + OSPF6_DEST_TYPE_SUBSTR(route->type), + OSPF6_PATH_TYPE_SUBSTR( + route->path.type), + destination, nexthop, IFNAMSIZ, + ifp->name, duration); + i++; + } else + vty_out(vty, "%c%1s %2s %-30s %-25s %6.*s %s\n", + ' ', "", "", "", nexthop, IFNAMSIZ, + ifp->name, ""); + } + } + if (use_json) { + json_object_object_add(json_route, "nextHops", + json_array_next_hops); + json_object_array_add(json_array_routes, json_route); } } -void ospf6_route_show_detail(struct vty *vty, struct ospf6_route *route) +void ospf6_route_show_detail(struct vty *vty, struct ospf6_route *route, + json_object *json_array_routes, bool use_json) { - char destination[PREFIX2STR_BUFFER]; + char destination[PREFIX2STR_BUFFER], nexthop[64]; char area_id[16], id[16], adv_router[16], capa[16], options[16]; struct timeval now, res; char duration[64]; struct listnode *node; struct ospf6_nexthop *nh; + char flag[6]; + json_object *json_route = NULL; + json_object *json_array_next_hops = NULL; + json_object *json_next_hop; if (om6->ospf6 == NULL) { vty_out(vty, "OSPFv3 is not running\n"); @@ -1103,84 +1151,177 @@ void ospf6_route_show_detail(struct vty *vty, struct ospf6_route *route) destination, sizeof(destination)); else prefix2str(&route->prefix, destination, sizeof(destination)); - vty_out(vty, "Destination: %s\n", destination); - /* destination type */ - vty_out(vty, "Destination type: %s\n", - OSPF6_DEST_TYPE_NAME(route->type)); + if (use_json) { + json_route = json_object_new_object(); + json_object_string_add(json_route, "destination", destination); + json_object_string_add(json_route, "destinationType", + OSPF6_DEST_TYPE_NAME(route->type)); + } else { + vty_out(vty, "Destination: %s\n", destination); + vty_out(vty, "Destination type: %s\n", + OSPF6_DEST_TYPE_NAME(route->type)); + } /* Time */ timersub(&now, &route->installed, &res); timerstring(&res, duration, sizeof(duration)); - vty_out(vty, "Installed Time: %s ago\n", duration); + if (use_json) + json_object_string_add(json_route, "installedTimeSince", + duration); + else + vty_out(vty, "Installed Time: %s ago\n", duration); timersub(&now, &route->changed, &res); timerstring(&res, duration, sizeof(duration)); - vty_out(vty, " Changed Time: %s ago\n", duration); + if (use_json) + json_object_string_add(json_route, "changedTimeSince", + duration); + else + vty_out(vty, "Changed Time: %s ago\n", duration); /* Debugging info */ - vty_out(vty, "Lock: %d Flags: %s%s%s%s\n", route->lock, - (CHECK_FLAG(route->flag, OSPF6_ROUTE_BEST) ? "B" : "-"), - (CHECK_FLAG(route->flag, OSPF6_ROUTE_ADD) ? "A" : "-"), - (CHECK_FLAG(route->flag, OSPF6_ROUTE_REMOVE) ? "R" : "-"), - (CHECK_FLAG(route->flag, OSPF6_ROUTE_CHANGE) ? "C" : "-")); - vty_out(vty, "Memory: prev: %p this: %p next: %p\n", - (void *)route->prev, (void *)route, (void *)route->next); + if (use_json) { + json_object_int_add(json_route, "numberOfLock", route->lock); + snprintf( + flag, sizeof(flag), "%s%s%s%s", + (CHECK_FLAG(route->flag, OSPF6_ROUTE_BEST) ? "B" : "-"), + (CHECK_FLAG(route->flag, OSPF6_ROUTE_ADD) ? "A" : "-"), + (CHECK_FLAG(route->flag, OSPF6_ROUTE_REMOVE) ? "R" + : "-"), + (CHECK_FLAG(route->flag, OSPF6_ROUTE_CHANGE) ? "C" + : "-")); + json_object_string_add(json_route, "flags", flag); + } else { + vty_out(vty, "Lock: %d Flags: %s%s%s%s\n", route->lock, + (CHECK_FLAG(route->flag, OSPF6_ROUTE_BEST) ? "B" : "-"), + (CHECK_FLAG(route->flag, OSPF6_ROUTE_ADD) ? "A" : "-"), + (CHECK_FLAG(route->flag, OSPF6_ROUTE_REMOVE) ? "R" + : "-"), + (CHECK_FLAG(route->flag, OSPF6_ROUTE_CHANGE) ? "C" + : "-")); + vty_out(vty, "Memory: prev: %p this: %p next: %p\n", + (void *)route->prev, (void *)route, + (void *)route->next); + } /* Path section */ /* Area-ID */ inet_ntop(AF_INET, &route->path.area_id, area_id, sizeof(area_id)); - vty_out(vty, "Associated Area: %s\n", area_id); + if (use_json) + json_object_string_add(json_route, "associatedArea", area_id); + else + vty_out(vty, "Associated Area: %s\n", area_id); /* Path type */ - vty_out(vty, "Path Type: %s\n", OSPF6_PATH_TYPE_NAME(route->path.type)); + if (use_json) + json_object_string_add(json_route, "pathType", + OSPF6_PATH_TYPE_NAME(route->path.type)); + else + vty_out(vty, "Path Type: %s\n", + OSPF6_PATH_TYPE_NAME(route->path.type)); /* LS Origin */ inet_ntop(AF_INET, &route->path.origin.id, id, sizeof(id)); inet_ntop(AF_INET, &route->path.origin.adv_router, adv_router, sizeof(adv_router)); - vty_out(vty, "LS Origin: %s Id: %s Adv: %s\n", - ospf6_lstype_name(route->path.origin.type), id, adv_router); + if (use_json) { + json_object_string_add( + json_route, "lsOriginRoutePathType", + ospf6_lstype_name(route->path.origin.type)); + json_object_string_add(json_route, "lsId", id); + json_object_string_add(json_route, "lsAdvertisingRouter", + adv_router); + } else { + vty_out(vty, "LS Origin: %s Id: %s Adv: %s\n", + ospf6_lstype_name(route->path.origin.type), id, + adv_router); + } /* Options */ ospf6_options_printbuf(route->path.options, options, sizeof(options)); - vty_out(vty, "Options: %s\n", options); + if (use_json) + json_object_string_add(json_route, "options", options); + else + vty_out(vty, "Options: %s\n", options); /* Router Bits */ ospf6_capability_printbuf(route->path.router_bits, capa, sizeof(capa)); - vty_out(vty, "Router Bits: %s\n", capa); + if (use_json) + json_object_string_add(json_route, "routerBits", capa); + else + vty_out(vty, "Router Bits: %s\n", capa); /* Prefix Options */ - vty_out(vty, "Prefix Options: xxx\n"); + if (use_json) + json_object_string_add(json_route, "prefixOptions", "xxx"); + else + vty_out(vty, "Prefix Options: xxx\n"); /* Metrics */ - vty_out(vty, "Metric Type: %d\n", route->path.metric_type); - vty_out(vty, "Metric: %d (%d)\n", route->path.cost, - route->path.u.cost_e2); + if (use_json) { + json_object_int_add(json_route, "metricType", + route->path.metric_type); + json_object_int_add(json_route, "metricCost", route->path.cost); + json_object_int_add(json_route, "metricCostE2", + route->path.u.cost_e2); + + json_object_int_add(json_route, "pathsCount", + route->paths->count); + json_object_int_add(json_route, "nextHopCount", + route->nh_list->count); + } else { + vty_out(vty, "Metric Type: %d\n", route->path.metric_type); + vty_out(vty, "Metric: %d (%d)\n", route->path.cost, + route->path.u.cost_e2); + + vty_out(vty, "Paths count: %u\n", route->paths->count); + vty_out(vty, "Nexthop count: %u\n", route->nh_list->count); + } - vty_out(vty, "Paths count: %u\n", route->paths->count); - vty_out(vty, "Nexthop count: %u\n", route->nh_list->count); /* Nexthops */ - vty_out(vty, "Nexthop:\n"); + if (use_json) + json_array_next_hops = json_object_new_array(); + else + vty_out(vty, "Nexthop:\n"); + for (ALL_LIST_ELEMENTS_RO(route->nh_list, node, nh)) { struct interface *ifp; - /* nexthop */ - ifp = if_lookup_by_index_all_vrf(nh->ifindex); - vty_out(vty, " %pI6 %.*s\n", &nh->address, IFNAMSIZ, ifp->name); + /* nexthop */ + if (use_json) { + inet_ntop(AF_INET6, &nh->address, nexthop, + sizeof(nexthop)); + json_next_hop = json_object_new_object(); + json_object_string_add(json_next_hop, "nextHop", + nexthop); + json_object_string_add(json_next_hop, "interfaceName", + ifp->name); + json_object_array_add(json_array_next_hops, + json_next_hop); + } else + vty_out(vty, " %pI6 %.*s\n", &nh->address, IFNAMSIZ, + ifp->name); } - vty_out(vty, "\n"); + if (use_json) { + json_object_object_add(json_route, "nextHops", + json_array_next_hops); + json_object_array_add(json_array_routes, json_route); + } else + vty_out(vty, "\n"); } static void ospf6_route_show_table_summary(struct vty *vty, - struct ospf6_route_table *table) + struct ospf6_route_table *table, + json_object *json, bool use_json) { struct ospf6_route *route, *prev = NULL; int i, pathtype[OSPF6_PATH_TYPE_MAX]; unsigned int number = 0; int nh_count = 0, nhinval = 0, ecmp = 0; int alternative = 0, destination = 0; + char path_str[30]; for (i = 0; i < OSPF6_PATH_TYPE_MAX; i++) pathtype[i] = 0; @@ -1203,111 +1344,164 @@ static void ospf6_route_show_table_summary(struct vty *vty, } assert(number == table->count); - - vty_out(vty, "Number of OSPFv3 routes: %d\n", number); - vty_out(vty, "Number of Destination: %d\n", destination); - vty_out(vty, "Number of Alternative routes: %d\n", alternative); - vty_out(vty, "Number of Equal Cost Multi Path: %d\n", ecmp); + if (use_json) { + json_object_int_add(json, "numberOfOspfv3Routes", number); + json_object_int_add(json, "numberOfDestination", destination); + json_object_int_add(json, "numberOfAlternativeRoutes", + alternative); + json_object_int_add(json, "numberOfEcmp", ecmp); + } else { + vty_out(vty, "Number of OSPFv3 routes: %d\n", number); + vty_out(vty, "Number of Destination: %d\n", destination); + vty_out(vty, "Number of Alternative routes: %d\n", alternative); + vty_out(vty, "Number of Equal Cost Multi Path: %d\n", ecmp); + } for (i = OSPF6_PATH_TYPE_INTRA; i <= OSPF6_PATH_TYPE_EXTERNAL2; i++) { - vty_out(vty, "Number of %s routes: %d\n", - OSPF6_PATH_TYPE_NAME(i), pathtype[i]); + if (use_json) { + snprintf(path_str, sizeof(path_str), "numberOf%sRoutes", + OSPF6_PATH_TYPE_JSON(i)); + json_object_int_add(json, path_str, pathtype[i]); + } else + vty_out(vty, "Number of %s routes: %d\n", + OSPF6_PATH_TYPE_NAME(i), pathtype[i]); } } static void ospf6_route_show_table_prefix(struct vty *vty, struct prefix *prefix, - struct ospf6_route_table *table) + struct ospf6_route_table *table, + json_object *json, bool use_json) { struct ospf6_route *route; + json_object *json_array_routes = NULL; route = ospf6_route_lookup(prefix, table); if (route == NULL) return; + if (use_json) + json_array_routes = json_object_new_array(); ospf6_route_lock(route); while (route && ospf6_route_is_prefix(prefix, route)) { /* Specifying a prefix will always display details */ - ospf6_route_show_detail(vty, route); + ospf6_route_show_detail(vty, route, json_array_routes, + use_json); route = ospf6_route_next(route); } + + if (use_json) + json_object_object_add(json, "routes", json_array_routes); if (route) ospf6_route_unlock(route); } static void ospf6_route_show_table_address(struct vty *vty, struct prefix *prefix, - struct ospf6_route_table *table) + struct ospf6_route_table *table, + json_object *json, bool use_json) { struct ospf6_route *route; + json_object *json_array_routes = NULL; route = ospf6_route_lookup_bestmatch(prefix, table); if (route == NULL) return; + if (use_json) + json_array_routes = json_object_new_array(); prefix = &route->prefix; ospf6_route_lock(route); while (route && ospf6_route_is_prefix(prefix, route)) { /* Specifying a prefix will always display details */ - ospf6_route_show_detail(vty, route); + ospf6_route_show_detail(vty, route, json_array_routes, + use_json); route = ospf6_route_next(route); } + if (use_json) + json_object_object_add(json, "routes", json_array_routes); if (route) ospf6_route_unlock(route); } static void ospf6_route_show_table_match(struct vty *vty, int detail, struct prefix *prefix, - struct ospf6_route_table *table) + struct ospf6_route_table *table, + json_object *json, bool use_json) { struct ospf6_route *route; + json_object *json_array_routes = NULL; + assert(prefix->family); route = ospf6_route_match_head(prefix, table); + if (use_json) + json_array_routes = json_object_new_array(); while (route) { if (detail) - ospf6_route_show_detail(vty, route); + ospf6_route_show_detail(vty, route, json_array_routes, + use_json); else - ospf6_route_show(vty, route); + ospf6_route_show(vty, route, json_array_routes, + use_json); route = ospf6_route_match_next(prefix, route); } + if (use_json) + json_object_object_add(json, "routes", json_array_routes); } static void ospf6_route_show_table_type(struct vty *vty, int detail, uint8_t type, - struct ospf6_route_table *table) + struct ospf6_route_table *table, + json_object *json, bool use_json) { struct ospf6_route *route; + json_object *json_array_routes = NULL; route = ospf6_route_head(table); + if (use_json) + json_array_routes = json_object_new_array(); while (route) { if (route->path.type == type) { if (detail) - ospf6_route_show_detail(vty, route); + ospf6_route_show_detail(vty, route, + json_array_routes, + use_json); else - ospf6_route_show(vty, route); + ospf6_route_show(vty, route, json_array_routes, + use_json); } route = ospf6_route_next(route); } + if (use_json) + json_object_object_add(json, "routes", json_array_routes); } static void ospf6_route_show_table(struct vty *vty, int detail, - struct ospf6_route_table *table) + struct ospf6_route_table *table, + json_object *json, bool use_json) { struct ospf6_route *route; + json_object *json_array_routes = NULL; route = ospf6_route_head(table); + if (use_json) + json_array_routes = json_object_new_array(); while (route) { if (detail) - ospf6_route_show_detail(vty, route); + ospf6_route_show_detail(vty, route, json_array_routes, + use_json); else - ospf6_route_show(vty, route); + ospf6_route_show(vty, route, json_array_routes, + use_json); route = ospf6_route_next(route); } + if (use_json) + json_object_object_add(json, "routes", json_array_routes); } int ospf6_route_table_show(struct vty *vty, int argc_start, int argc, struct cmd_token **argv, - struct ospf6_route_table *table) + struct ospf6_route_table *table, bool use_json) { int summary = 0; int match = 0; @@ -1317,10 +1511,15 @@ int ospf6_route_table_show(struct vty *vty, int argc_start, int argc, int i, ret; struct prefix prefix; uint8_t type = 0; + int arg_end = use_json ? (argc - 1) : argc; + json_object *json = NULL; memset(&prefix, 0, sizeof(struct prefix)); - for (i = argc_start; i < argc; i++) { + if (use_json) + json = json_object_new_object(); + + for (i = argc_start; i < arg_end; i++) { if (strmatch(argv[i]->text, "summary")) { summary++; continue; @@ -1363,14 +1562,24 @@ int ospf6_route_table_show(struct vty *vty, int argc_start, int argc, slash++; continue; } + if (use_json) + json_object_string_add(json, "malformedArgument", + argv[i]->arg); + else + vty_out(vty, "Malformed argument: %s\n", argv[i]->arg); - vty_out(vty, "Malformed argument: %s\n", argv[i]->arg); return CMD_SUCCESS; } /* Give summary of this route table */ if (summary) { - ospf6_route_show_table_summary(vty, table); + ospf6_route_show_table_summary(vty, table, json, use_json); + if (use_json) { + vty_out(vty, "%s\n", + json_object_to_json_string_ext( + json, JSON_C_TO_STRING_PRETTY)); + json_object_free(json); + } return CMD_SUCCESS; } @@ -1378,20 +1587,36 @@ int ospf6_route_table_show(struct vty *vty, int argc_start, int argc, if (isprefix && !match) { /* If exact address, give best matching route */ if (!slash) - ospf6_route_show_table_address(vty, &prefix, table); + ospf6_route_show_table_address(vty, &prefix, table, + json, use_json); else - ospf6_route_show_table_prefix(vty, &prefix, table); - + ospf6_route_show_table_prefix(vty, &prefix, table, json, + use_json); + + if (use_json) { + vty_out(vty, "%s\n", + json_object_to_json_string_ext( + json, JSON_C_TO_STRING_PRETTY)); + json_object_free(json); + } return CMD_SUCCESS; } if (match) - ospf6_route_show_table_match(vty, detail, &prefix, table); + ospf6_route_show_table_match(vty, detail, &prefix, table, json, + use_json); else if (type) - ospf6_route_show_table_type(vty, detail, type, table); + ospf6_route_show_table_type(vty, detail, type, table, json, + use_json); else - ospf6_route_show_table(vty, detail, table); + ospf6_route_show_table(vty, detail, table, json, use_json); + if (use_json) { + vty_out(vty, "%s\n", + json_object_to_json_string_ext( + json, JSON_C_TO_STRING_PRETTY)); + json_object_free(json); + } return CMD_SUCCESS; } @@ -1439,7 +1664,7 @@ static void ospf6_linkstate_show_table_exact(struct vty *vty, ospf6_route_lock(route); while (route && ospf6_route_is_prefix(prefix, route)) { /* Specifying a prefix will always display details */ - ospf6_route_show_detail(vty, route); + ospf6_route_show_detail(vty, route, NULL, false); route = ospf6_route_next(route); } if (route) @@ -1457,7 +1682,7 @@ static void ospf6_linkstate_show_table(struct vty *vty, int detail, route = ospf6_route_head(table); while (route) { if (detail) - ospf6_route_show_detail(vty, route); + ospf6_route_show_detail(vty, route, NULL, false); else ospf6_linkstate_show(vty, route); route = ospf6_route_next(route); diff --git a/ospf6d/ospf6_route.h b/ospf6d/ospf6_route.h index e2118003d5..a791a82cd4 100644 --- a/ospf6d/ospf6_route.h +++ b/ospf6d/ospf6_route.h @@ -23,6 +23,7 @@ #include "command.h" #include "zclient.h" +#include "lib/json.h" #define OSPF6_MULTI_PATH_LIMIT 4 @@ -233,6 +234,9 @@ extern const char *const ospf6_path_type_substr[OSPF6_PATH_TYPE_MAX]; #define OSPF6_PATH_TYPE_SUBSTR(x) \ (0 < (x) && (x) < OSPF6_PATH_TYPE_MAX ? ospf6_path_type_substr[(x)] \ : ospf6_path_type_substr[0]) +#define OSPF6_PATH_TYPE_JSON(x) \ + (0 < (x) && (x) < OSPF6_PATH_TYPE_MAX ? ospf6_path_type_json[(x)] \ + : ospf6_path_type_json[0]) #define OSPF6_ROUTE_ADDRESS_STR "Display the route bestmatches the address\n" #define OSPF6_ROUTE_PREFIX_STR "Display the route\n" @@ -326,11 +330,14 @@ extern void ospf6_route_table_delete(struct ospf6_route_table *table); extern void ospf6_route_dump(struct ospf6_route_table *table); -extern void ospf6_route_show(struct vty *vty, struct ospf6_route *route); -extern void ospf6_route_show_detail(struct vty *vty, struct ospf6_route *route); +extern void ospf6_route_show(struct vty *vty, struct ospf6_route *route, + json_object *json, bool use_json); +extern void ospf6_route_show_detail(struct vty *vty, struct ospf6_route *route, + json_object *json, bool use_json); + extern int ospf6_route_table_show(struct vty *, int, int, struct cmd_token **, - struct ospf6_route_table *); + struct ospf6_route_table *, bool use_json); extern int ospf6_linkstate_table_show(struct vty *vty, int idx_ipv4, int argc, struct cmd_token **argv, struct ospf6_route_table *table); diff --git a/ospf6d/ospf6_spf.c b/ospf6d/ospf6_spf.c index 70771c6060..f94252991c 100644 --- a/ospf6d/ospf6_spf.c +++ b/ospf6d/ospf6_spf.c @@ -432,9 +432,8 @@ void ospf6_spf_table_finish(struct ospf6_route_table *result_table) } } -static const char *const ospf6_spf_reason_str[] = { - "R+", "R-", "N+", "N-", "L+", "L-", "R*", "N*", -}; +static const char *const ospf6_spf_reason_str[] = {"R+", "R-", "N+", "N-", "L+", + "L-", "R*", "N*", "C"}; void ospf6_spf_reason_string(unsigned int reason, char *buf, int size) { @@ -655,7 +654,7 @@ static int ospf6_spf_calculation_thread(struct thread *t) (long long)runtime.tv_usec); zlog_info( - "SPF processing: # Areas: %d, SPF runtime: %lld sec %lld usec, Reason: %s\n", + "SPF processing: # Areas: %d, SPF runtime: %lld sec %lld usec, Reason: %s", areas_processed, (long long)runtime.tv_sec, (long long)runtime.tv_usec, rbuf); diff --git a/ospf6d/ospf6_spf.h b/ospf6d/ospf6_spf.h index 853ce4de07..253888d8ce 100644 --- a/ospf6d/ospf6_spf.h +++ b/ospf6d/ospf6_spf.h @@ -88,6 +88,7 @@ struct ospf6_vertex { #define OSPF6_SPF_FLAGS_LINK_LSA_REMOVED (1 << 5) #define OSPF6_SPF_FLAGS_ROUTER_LSA_ORIGINATED (1 << 6) #define OSPF6_SPF_FLAGS_NETWORK_LSA_ORIGINATED (1 << 7) +#define OSPF6_SPF_FLAGS_CONFIG_CHANGE (1 << 8) static inline void ospf6_set_spf_reason(struct ospf6 *ospf, unsigned int reason) { diff --git a/ospf6d/ospf6_top.c b/ospf6d/ospf6_top.c index 7b4ed84d53..3f72ec828e 100644 --- a/ospf6d/ospf6_top.c +++ b/ospf6d/ospf6_top.c @@ -51,6 +51,7 @@ #include "ospf6_intra.h" #include "ospf6_spf.h" #include "ospf6d.h" +#include "lib/json.h" DEFINE_QOBJ_TYPE(ospf6) @@ -267,6 +268,8 @@ static struct ospf6 *ospf6_create(const char *name) o->distance_table = route_table_init(); o->fd = -1; + o->max_multipath = MULTIPATH_NUM; + QOBJ_REG(o, ospf6); /* Make ospf protocol socket. */ @@ -718,39 +721,6 @@ DEFUN (no_ospf6_distance_ospf6, return CMD_SUCCESS; } -#if 0 -DEFUN (ospf6_distance_source, - ospf6_distance_source_cmd, - "distance (1-255) X:X::X:X/M [WORD]", - "Administrative distance\n" - "Distance value\n" - "IP source prefix\n" - "Access list name\n") -{ - VTY_DECLVAR_CONTEXT(ospf6, o); - char *alname = (argc == 4) ? argv[3]->arg : NULL; - ospf6_distance_set (vty, o, argv[1]->arg, argv[2]->arg, alname); - - return CMD_SUCCESS; -} - -DEFUN (no_ospf6_distance_source, - no_ospf6_distance_source_cmd, - "no distance (1-255) X:X::X:X/M [WORD]", - NO_STR - "Administrative distance\n" - "Distance value\n" - "IP source prefix\n" - "Access list name\n") -{ - VTY_DECLVAR_CONTEXT(ospf6, o); - char *alname = (argc == 5) ? argv[4]->arg : NULL; - ospf6_distance_unset (vty, o, argv[2]->arg, argv[3]->arg, alname); - - return CMD_SUCCESS; -} -#endif - DEFUN (ospf6_interface_area, ospf6_interface_area_cmd, "interface IFNAME area <A.B.C.D|(0-4294967295)>", @@ -911,54 +881,61 @@ DEFUN (no_ospf6_stub_router_admin, return CMD_SUCCESS; } -#if 0 -DEFUN (ospf6_stub_router_startup, - ospf6_stub_router_startup_cmd, - "stub-router on-startup (5-86400)", - "Make router a stub router\n" - "Advertise inability to be a transit router\n" - "Automatically advertise as stub-router on startup of OSPF6\n" - "Time (seconds) to advertise self as stub-router\n") +/* Restart OSPF SPF algorithm*/ +static void ospf6_restart_spf(struct ospf6 *ospf6) { - return CMD_SUCCESS; + ospf6_route_remove_all(ospf6->route_table); + ospf6_route_remove_all(ospf6->brouter_table); + ospf6_route_remove_all(ospf6->external_table); + + /* Trigger SPF */ + ospf6_spf_schedule(ospf6, OSPF6_SPF_FLAGS_CONFIG_CHANGE); } -DEFUN (no_ospf6_stub_router_startup, - no_ospf6_stub_router_startup_cmd, - "no stub-router on-startup", - NO_STR - "Make router a stub router\n" - "Advertise inability to be a transit router\n" - "Automatically advertise as stub-router on startup of OSPF6\n" - "Time (seconds) to advertise self as stub-router\n") +/* Set the max paths */ +static void ospf6_maxpath_set(struct ospf6 *ospf6, uint16_t paths) { - return CMD_SUCCESS; + if (ospf6->max_multipath == paths) + return; + + ospf6->max_multipath = paths; + + /* Send deletion to zebra to delete all + * ospf specific routes and reinitiate + * SPF to reflect the new max multipath. + */ + ospf6_restart_spf(ospf6); } -DEFUN (ospf6_stub_router_shutdown, - ospf6_stub_router_shutdown_cmd, - "stub-router on-shutdown (5-86400)", - "Make router a stub router\n" - "Advertise inability to be a transit router\n" - "Automatically advertise as stub-router before shutdown\n" - "Time (seconds) to advertise self as stub-router\n") +/* Ospf Maximum-paths config support */ +DEFUN(ospf6_max_multipath, + ospf6_max_multipath_cmd, + "maximum-paths " CMD_RANGE_STR(1, MULTIPATH_NUM), + "Max no of multiple paths for ECMP support\n" + "Number of paths\n") { - return CMD_SUCCESS; + VTY_DECLVAR_CONTEXT(ospf6, ospf6); + int idx_number = 1; + int maximum_paths = strtol(argv[idx_number]->arg, NULL, 10); + + ospf6_maxpath_set(ospf6, maximum_paths); + + return CMD_SUCCESS; } -DEFUN (no_ospf6_stub_router_shutdown, - no_ospf6_stub_router_shutdown_cmd, - "no stub-router on-shutdown", - NO_STR - "Make router a stub router\n" - "Advertise inability to be a transit router\n" - "Automatically advertise as stub-router before shutdown\n" - "Time (seconds) to advertise self as stub-router\n") +DEFUN(no_ospf6_max_multipath, + no_ospf6_max_multipath_cmd, + "no maximum-paths [" CMD_RANGE_STR(1, MULTIPATH_NUM)"]", + NO_STR + "Max no of multiple paths for ECMP support\n" + "Number of paths\n") { - return CMD_SUCCESS; -} -#endif + VTY_DECLVAR_CONTEXT(ospf6, ospf6); + ospf6_maxpath_set(ospf6, MULTIPATH_NUM); + + return CMD_SUCCESS; +} static void ospf6_show(struct vty *vty, struct ospf6 *o, json_object *json, bool use_json) @@ -998,6 +975,7 @@ static void ospf6_show(struct vty *vty, struct ospf6 *o, json_object *json, json_object_int_add(json, "holdTimeMultiplier", o->spf_hold_multiplier); + json_object_int_add(json, "maximumPaths", o->max_multipath); if (o->ts_spf.tv_sec || o->ts_spf.tv_usec) { timersub(&now, &o->ts_spf, &result); @@ -1080,6 +1058,7 @@ static void ospf6_show(struct vty *vty, struct ospf6 *o, json_object *json, vty_out(vty, " LSA minimum arrival %d msecs\n", o->lsa_minarrival); + vty_out(vty, " Maximum-paths %u\n", o->max_multipath); /* Show SPF parameters */ vty_out(vty, @@ -1165,7 +1144,7 @@ DEFUN(show_ipv6_ospf6, DEFUN (show_ipv6_ospf6_route, show_ipv6_ospf6_route_cmd, - "show ipv6 ospf6 route [<intra-area|inter-area|external-1|external-2|X:X::X:X|X:X::X:X/M|detail|summary>]", + "show ipv6 ospf6 route [<intra-area|inter-area|external-1|external-2|X:X::X:X|X:X::X:X/M|detail|summary>] [json]", SHOW_STR IP6_STR OSPF6_STR @@ -1177,41 +1156,44 @@ DEFUN (show_ipv6_ospf6_route, "Specify IPv6 address\n" "Specify IPv6 prefix\n" "Detailed information\n" - "Summary of route table\n") + "Summary of route table\n" + JSON_STR) { struct ospf6 *ospf6; + bool uj = use_json(argc, argv); ospf6 = ospf6_lookup_by_vrf_name(VRF_DEFAULT_NAME); OSPF6_CMD_CHECK_RUNNING(ospf6); - ospf6_route_table_show(vty, 4, argc, argv, ospf6->route_table); + ospf6_route_table_show(vty, 4, argc, argv, ospf6->route_table, uj); return CMD_SUCCESS; } DEFUN (show_ipv6_ospf6_route_match, show_ipv6_ospf6_route_match_cmd, - "show ipv6 ospf6 route X:X::X:X/M <match|longer>", + "show ipv6 ospf6 route X:X::X:X/M <match|longer> [json]", SHOW_STR IP6_STR OSPF6_STR ROUTE_STR "Specify IPv6 prefix\n" "Display routes which match the specified route\n" - "Display routes longer than the specified route\n") + "Display routes longer than the specified route\n" + JSON_STR) { struct ospf6 *ospf6; + bool uj = use_json(argc, argv); ospf6 = ospf6_lookup_by_vrf_name(VRF_DEFAULT_NAME); OSPF6_CMD_CHECK_RUNNING(ospf6); - ospf6_route_table_show(vty, 4, argc, argv, ospf6->route_table); - + ospf6_route_table_show(vty, 4, argc, argv, ospf6->route_table, uj); return CMD_SUCCESS; } DEFUN (show_ipv6_ospf6_route_match_detail, show_ipv6_ospf6_route_match_detail_cmd, - "show ipv6 ospf6 route X:X::X:X/M match detail", + "show ipv6 ospf6 route X:X::X:X/M match detail [json]", SHOW_STR IP6_STR OSPF6_STR @@ -1219,21 +1201,22 @@ DEFUN (show_ipv6_ospf6_route_match_detail, "Specify IPv6 prefix\n" "Display routes which match the specified route\n" "Detailed information\n" - ) + JSON_STR) { struct ospf6 *ospf6; + bool uj = use_json(argc, argv); ospf6 = ospf6_lookup_by_vrf_name(VRF_DEFAULT_NAME); OSPF6_CMD_CHECK_RUNNING(ospf6); - ospf6_route_table_show(vty, 4, argc, argv, ospf6->route_table); + ospf6_route_table_show(vty, 4, argc, argv, ospf6->route_table, uj); return CMD_SUCCESS; } DEFUN (show_ipv6_ospf6_route_type_detail, show_ipv6_ospf6_route_type_detail_cmd, - "show ipv6 ospf6 route <intra-area|inter-area|external-1|external-2> detail", + "show ipv6 ospf6 route <intra-area|inter-area|external-1|external-2> detail [json]", SHOW_STR IP6_STR OSPF6_STR @@ -1243,14 +1226,15 @@ DEFUN (show_ipv6_ospf6_route_type_detail, "Display Type-1 External routes\n" "Display Type-2 External routes\n" "Detailed information\n" - ) + JSON_STR) { struct ospf6 *ospf6; + bool uj = use_json(argc, argv); ospf6 = ospf6_lookup_by_vrf_name(VRF_DEFAULT_NAME); OSPF6_CMD_CHECK_RUNNING(ospf6); - ospf6_route_table_show(vty, 4, argc, argv, ospf6->route_table); + ospf6_route_table_show(vty, 4, argc, argv, ospf6->route_table, uj); return CMD_SUCCESS; } @@ -1333,6 +1317,11 @@ static int config_write_ospf6(struct vty *vty) vty_out(vty, " timers lsa min-arrival %d\n", ospf6->lsa_minarrival); + /* ECMP max path config */ + if (ospf6->max_multipath != MULTIPATH_NUM) + vty_out(vty, " maximum-paths %d\n", + ospf6->max_multipath); + ospf6_stub_router_config_write(vty, ospf6); ospf6_redistribute_config_write(vty, ospf6); ospf6_area_config_write(vty, ospf6); @@ -1390,20 +1379,13 @@ void ospf6_top_init(void) install_element(OSPF6_NODE, &no_ospf6_interface_area_cmd); install_element(OSPF6_NODE, &ospf6_stub_router_admin_cmd); install_element(OSPF6_NODE, &no_ospf6_stub_router_admin_cmd); -/* For a later time */ -#if 0 - install_element (OSPF6_NODE, &ospf6_stub_router_startup_cmd); - install_element (OSPF6_NODE, &no_ospf6_stub_router_startup_cmd); - install_element (OSPF6_NODE, &ospf6_stub_router_shutdown_cmd); - install_element (OSPF6_NODE, &no_ospf6_stub_router_shutdown_cmd); -#endif + + /* maximum-paths command */ + install_element(OSPF6_NODE, &ospf6_max_multipath_cmd); + install_element(OSPF6_NODE, &no_ospf6_max_multipath_cmd); install_element(OSPF6_NODE, &ospf6_distance_cmd); install_element(OSPF6_NODE, &no_ospf6_distance_cmd); install_element(OSPF6_NODE, &ospf6_distance_ospf6_cmd); install_element(OSPF6_NODE, &no_ospf6_distance_ospf6_cmd); -#if 0 - install_element (OSPF6_NODE, &ospf6_distance_source_cmd); - install_element (OSPF6_NODE, &no_ospf6_distance_source_cmd); -#endif } diff --git a/ospf6d/ospf6_top.h b/ospf6d/ospf6_top.h index 93e25d7599..75dff86cd7 100644 --- a/ospf6d/ospf6_top.h +++ b/ospf6d/ospf6_top.h @@ -127,6 +127,11 @@ struct ospf6 { * update to neighbors immediatly */ uint8_t inst_shutdown; + /* Max number of multiple paths + * to support ECMP. + */ + uint16_t max_multipath; + QOBJ_FIELDS }; DECLARE_QOBJ_TYPE(ospf6) diff --git a/ospf6d/ospf6_zebra.c b/ospf6d/ospf6_zebra.c index 7a8027a37f..2b7072d34f 100644 --- a/ospf6d/ospf6_zebra.c +++ b/ospf6d/ospf6_zebra.c @@ -343,7 +343,14 @@ static void ospf6_zebra_route_update(int type, struct ospf6_route *request, api.safi = SAFI_UNICAST; api.prefix = *dest; SET_FLAG(api.message, ZAPI_MESSAGE_NEXTHOP); - api.nexthop_num = MIN(nhcount, MULTIPATH_NUM); + + if (nhcount > ospf6->max_multipath) { + if (IS_OSPF6_DEBUG_ZEBRA(SEND)) + zlog_debug( + " Nexthop count is greater than configured maximum-path, hence ignore the extra nexthops"); + } + api.nexthop_num = MIN(nhcount, ospf6->max_multipath); + ospf6_route_zebra_copy_nexthops(request, api.nexthops, api.nexthop_num, api.vrf_id); SET_FLAG(api.message, ZAPI_MESSAGE_METRIC); diff --git a/ospf6d/ospf6d.c b/ospf6d/ospf6d.c index 4b958e550f..8d9c85fd08 100644 --- a/ospf6d/ospf6d.c +++ b/ospf6d/ospf6d.c @@ -44,6 +44,7 @@ #include "ospf6_flood.h" #include "ospf6d.h" #include "ospf6_bfd.h" +#include "lib/json.h" struct route_node *route_prev(struct route_node *node) { @@ -154,53 +155,264 @@ static uint16_t parse_type_spec(int idx_lsa, int argc, struct cmd_token **argv) return type; } +void ospf6_lsdb_show(struct vty *vty, enum ospf_lsdb_show_level level, + uint16_t *type, uint32_t *id, uint32_t *adv_router, + struct ospf6_lsdb *lsdb, json_object *json_obj, + bool use_json) +{ + struct ospf6_lsa *lsa; + const struct route_node *end = NULL; + void (*showfunc)(struct vty *, struct ospf6_lsa *, json_object *, + bool) = NULL; + json_object *json_array = NULL; + + switch (level) { + case OSPF6_LSDB_SHOW_LEVEL_DETAIL: + showfunc = ospf6_lsa_show; + break; + case OSPF6_LSDB_SHOW_LEVEL_INTERNAL: + showfunc = ospf6_lsa_show_internal; + break; + case OSPF6_LSDB_SHOW_LEVEL_DUMP: + showfunc = ospf6_lsa_show_dump; + break; + case OSPF6_LSDB_SHOW_LEVEL_NORMAL: + default: + showfunc = ospf6_lsa_show_summary; + } + + if (use_json) + json_array = json_object_new_array(); + + if (type && id && adv_router) { + lsa = ospf6_lsdb_lookup(*type, *id, *adv_router, lsdb); + if (lsa) { + if (level == OSPF6_LSDB_SHOW_LEVEL_NORMAL) + ospf6_lsa_show(vty, lsa, json_array, use_json); + else + (*showfunc)(vty, lsa, json_array, use_json); + } + + if (use_json) + json_object_object_add(json_obj, "lsa", json_array); + return; + } + + if ((level == OSPF6_LSDB_SHOW_LEVEL_NORMAL) && !use_json) + ospf6_lsa_show_summary_header(vty); + + end = ospf6_lsdb_head(lsdb, !!type + !!(type && adv_router), + type ? *type : 0, adv_router ? *adv_router : 0, + &lsa); + while (lsa) { + if ((!adv_router || lsa->header->adv_router == *adv_router) + && (!id || lsa->header->id == *id)) + (*showfunc)(vty, lsa, json_array, use_json); + lsa = ospf6_lsdb_next(end, lsa); + } + + if (use_json) + json_object_object_add(json_obj, "lsa", json_array); +} + +static void ospf6_lsdb_show_wrapper(struct vty *vty, + enum ospf_lsdb_show_level level, + uint16_t *type, uint32_t *id, + uint32_t *adv_router, bool uj, + struct ospf6 *ospf6) +{ + struct listnode *i, *j; + struct ospf6 *o = ospf6; + struct ospf6_area *oa; + struct ospf6_interface *oi; + json_object *json = NULL; + json_object *json_array = NULL; + json_object *json_obj = NULL; + + if (uj) { + json = json_object_new_object(); + json_array = json_object_new_array(); + } + for (ALL_LIST_ELEMENTS_RO(o->area_list, i, oa)) { + if (uj) { + json_obj = json_object_new_object(); + json_object_string_add(json_obj, "areaId", oa->name); + } else + vty_out(vty, AREA_LSDB_TITLE_FORMAT, oa->name); + ospf6_lsdb_show(vty, level, type, id, adv_router, oa->lsdb, + json_obj, uj); + if (uj) + json_object_array_add(json_array, json_obj); + } + if (uj) + json_object_object_add(json, "areaScopedLinkStateDb", + json_array); + + if (uj) + json_array = json_object_new_array(); + for (ALL_LIST_ELEMENTS_RO(o->area_list, i, oa)) { + for (ALL_LIST_ELEMENTS_RO(oa->if_list, j, oi)) { + if (uj) { + json_obj = json_object_new_object(); + json_object_string_add(json_obj, "areaId", + oa->name); + json_object_string_add(json_obj, "interface", + oi->interface->name); + } else + vty_out(vty, IF_LSDB_TITLE_FORMAT, + oi->interface->name, oa->name); + ospf6_lsdb_show(vty, level, type, id, adv_router, + oi->lsdb, json_obj, uj); + if (uj) + json_object_array_add(json_array, json_obj); + } + } + if (uj) + json_object_object_add(json, "interfaceScopedLinkStateDb", + json_array); + if (uj) { + json_array = json_object_new_array(); + json_obj = json_object_new_object(); + } else + vty_out(vty, AS_LSDB_TITLE_FORMAT); + + ospf6_lsdb_show(vty, level, type, id, adv_router, o->lsdb, json_obj, + uj); + + if (uj) { + json_object_array_add(json_array, json_obj); + json_object_object_add(json, "asScopedLinkStateDb", json_array); + + vty_out(vty, "%s\n", + json_object_to_json_string_ext( + json, JSON_C_TO_STRING_PRETTY)); + json_object_free(json); + } else + vty_out(vty, "\n"); +} + +static void ospf6_lsdb_type_show_wrapper(struct vty *vty, + enum ospf_lsdb_show_level level, + uint16_t *type, uint32_t *id, + uint32_t *adv_router, bool uj, + struct ospf6 *ospf6) +{ + struct listnode *i, *j; + struct ospf6 *o = ospf6; + struct ospf6_area *oa; + struct ospf6_interface *oi; + json_object *json = NULL; + json_object *json_array = NULL; + json_object *json_obj = NULL; + + if (uj) { + json = json_object_new_object(); + json_array = json_object_new_array(); + } + + switch (OSPF6_LSA_SCOPE(*type)) { + case OSPF6_SCOPE_AREA: + for (ALL_LIST_ELEMENTS_RO(o->area_list, i, oa)) { + if (uj) { + json_obj = json_object_new_object(); + json_object_string_add(json_obj, "areaId", + oa->name); + } else + vty_out(vty, AREA_LSDB_TITLE_FORMAT, oa->name); + + ospf6_lsdb_show(vty, level, type, id, adv_router, + oa->lsdb, json_obj, uj); + if (uj) + json_object_array_add(json_array, json_obj); + } + if (uj) + json_object_object_add(json, "areaScopedLinkStateDb", + json_array); + break; + + case OSPF6_SCOPE_LINKLOCAL: + for (ALL_LIST_ELEMENTS_RO(o->area_list, i, oa)) { + for (ALL_LIST_ELEMENTS_RO(oa->if_list, j, oi)) { + if (uj) { + json_obj = json_object_new_object(); + json_object_string_add( + json_obj, "areaId", oa->name); + json_object_string_add( + json_obj, "interface", + oi->interface->name); + } else + vty_out(vty, IF_LSDB_TITLE_FORMAT, + oi->interface->name, oa->name); + + ospf6_lsdb_show(vty, level, type, id, + adv_router, oi->lsdb, json_obj, + uj); + + if (uj) + json_object_array_add(json_array, + json_obj); + } + } + if (uj) + json_object_object_add( + json, "interfaceScopedLinkStateDb", json_array); + break; + + case OSPF6_SCOPE_AS: + if (uj) + json_obj = json_object_new_object(); + else + vty_out(vty, AS_LSDB_TITLE_FORMAT); + + ospf6_lsdb_show(vty, level, type, id, adv_router, o->lsdb, + json_obj, uj); + if (uj) { + json_object_array_add(json_array, json_obj); + json_object_object_add(json, "asScopedLinkStateDb", + json_array); + } + break; + + default: + assert(0); + break; + } + if (uj) { + vty_out(vty, "%s\n", + json_object_to_json_string_ext( + json, JSON_C_TO_STRING_PRETTY)); + json_object_free(json); + } else + vty_out(vty, "\n"); +} + DEFUN (show_ipv6_ospf6_database, show_ipv6_ospf6_database_cmd, - "show ipv6 ospf6 database [<detail|dump|internal>]", + "show ipv6 ospf6 database [<detail|dump|internal>] [json]", SHOW_STR IPV6_STR OSPF6_STR "Display Link state database\n" "Display details of LSAs\n" "Dump LSAs\n" - "Display LSA's internal information\n") + "Display LSA's internal information\n" + JSON_STR) { int idx_level = 4; int level; - struct listnode *i, *j; + bool uj = use_json(argc, argv); struct ospf6 *ospf6; - struct ospf6_area *oa; - struct ospf6_interface *oi; - ospf6 = ospf6_lookup_by_vrf_name(VRF_DEFAULT_NAME); OSPF6_CMD_CHECK_RUNNING(ospf6); level = parse_show_level(idx_level, argc, argv); - - for (ALL_LIST_ELEMENTS_RO(ospf6->area_list, i, oa)) { - vty_out(vty, AREA_LSDB_TITLE_FORMAT, oa->name); - ospf6_lsdb_show(vty, level, NULL, NULL, NULL, oa->lsdb); - } - - for (ALL_LIST_ELEMENTS_RO(ospf6->area_list, i, oa)) { - for (ALL_LIST_ELEMENTS_RO(oa->if_list, j, oi)) { - vty_out(vty, IF_LSDB_TITLE_FORMAT, oi->interface->name, - oa->name); - ospf6_lsdb_show(vty, level, NULL, NULL, NULL, oi->lsdb); - } - } - - vty_out(vty, AS_LSDB_TITLE_FORMAT); - ospf6_lsdb_show(vty, level, NULL, NULL, NULL, ospf6->lsdb); - - vty_out(vty, "\n"); + ospf6_lsdb_show_wrapper(vty, level, NULL, NULL, NULL, uj, ospf6); return CMD_SUCCESS; } -DEFUN (show_ipv6_ospf6_database_type, - show_ipv6_ospf6_database_type_cmd, - "show ipv6 ospf6 database <router|network|inter-prefix|inter-router|as-external|group-membership|type-7|link|intra-prefix> [<detail|dump|internal>]", +DEFUN (show_ipv6_ospf6_database_type, show_ipv6_ospf6_database_type_cmd, + "show ipv6 ospf6 database <router|network|inter-prefix|inter-router|as-external|group-membership|type-7|link|intra-prefix> [<detail|dump|internal>] [json]", SHOW_STR IPV6_STR OSPF6_STR @@ -217,16 +429,14 @@ DEFUN (show_ipv6_ospf6_database_type, "Display details of LSAs\n" "Dump LSAs\n" "Display LSA's internal information\n" - ) + JSON_STR) { int idx_lsa = 4; int idx_level = 5; int level; - struct listnode *i, *j; - struct ospf6 *ospf6; - struct ospf6_area *oa; - struct ospf6_interface *oi; uint16_t type = 0; + bool uj = use_json(argc, argv); + struct ospf6 *ospf6; ospf6 = ospf6_lookup_by_vrf_name(VRF_DEFAULT_NAME); @@ -235,43 +445,13 @@ DEFUN (show_ipv6_ospf6_database_type, type = parse_type_spec(idx_lsa, argc, argv); level = parse_show_level(idx_level, argc, argv); - switch (OSPF6_LSA_SCOPE(type)) { - case OSPF6_SCOPE_AREA: - for (ALL_LIST_ELEMENTS_RO(ospf6->area_list, i, oa)) { - vty_out(vty, AREA_LSDB_TITLE_FORMAT, oa->name); - ospf6_lsdb_show(vty, level, &type, NULL, NULL, - oa->lsdb); - } - break; - - case OSPF6_SCOPE_LINKLOCAL: - for (ALL_LIST_ELEMENTS_RO(ospf6->area_list, i, oa)) { - for (ALL_LIST_ELEMENTS_RO(oa->if_list, j, oi)) { - vty_out(vty, IF_LSDB_TITLE_FORMAT, - oi->interface->name, oa->name); - ospf6_lsdb_show(vty, level, &type, NULL, NULL, - oi->lsdb); - } - } - break; - - case OSPF6_SCOPE_AS: - vty_out(vty, AS_LSDB_TITLE_FORMAT); - ospf6_lsdb_show(vty, level, &type, NULL, NULL, ospf6->lsdb); - break; - - default: - assert(0); - break; - } - - vty_out(vty, "\n"); + ospf6_lsdb_type_show_wrapper(vty, level, &type, NULL, NULL, uj, ospf6); return CMD_SUCCESS; } DEFUN (show_ipv6_ospf6_database_id, show_ipv6_ospf6_database_id_cmd, - "show ipv6 ospf6 database <*|linkstate-id> A.B.C.D [<detail|dump|internal>]", + "show ipv6 ospf6 database <*|linkstate-id> A.B.C.D [<detail|dump|internal>] [json]", SHOW_STR IPV6_STR OSPF6_STR @@ -281,16 +461,15 @@ DEFUN (show_ipv6_ospf6_database_id, "Specify Link state ID as IPv4 address notation\n" "Display details of LSAs\n" "Dump LSAs\n" - "Display LSA's internal information\n") + "Display LSA's internal information\n" + JSON_STR) { int idx_ipv4 = 5; int idx_level = 6; int level; - struct listnode *i, *j; - struct ospf6 *ospf6; - struct ospf6_area *oa; - struct ospf6_interface *oi; uint32_t id = 0; + bool uj = use_json(argc, argv); + struct ospf6 *ospf6; ospf6 = ospf6_lookup_by_vrf_name(VRF_DEFAULT_NAME); @@ -300,30 +479,14 @@ DEFUN (show_ipv6_ospf6_database_id, inet_pton(AF_INET, argv[idx_ipv4]->arg, &id); level = parse_show_level(idx_level, argc, argv); + ospf6_lsdb_show_wrapper(vty, level, NULL, &id, NULL, uj, ospf6); - for (ALL_LIST_ELEMENTS_RO(ospf6->area_list, i, oa)) { - vty_out(vty, AREA_LSDB_TITLE_FORMAT, oa->name); - ospf6_lsdb_show(vty, level, NULL, &id, NULL, oa->lsdb); - } - - for (ALL_LIST_ELEMENTS_RO(ospf6->area_list, i, oa)) { - for (ALL_LIST_ELEMENTS_RO(oa->if_list, j, oi)) { - vty_out(vty, IF_LSDB_TITLE_FORMAT, oi->interface->name, - oa->name); - ospf6_lsdb_show(vty, level, NULL, &id, NULL, oi->lsdb); - } - } - - vty_out(vty, AS_LSDB_TITLE_FORMAT); - ospf6_lsdb_show(vty, level, NULL, &id, NULL, ospf6->lsdb); - - vty_out(vty, "\n"); return CMD_SUCCESS; } DEFUN (show_ipv6_ospf6_database_router, show_ipv6_ospf6_database_router_cmd, - "show ipv6 ospf6 database <*|adv-router> * A.B.C.D <detail|dump|internal>", + "show ipv6 ospf6 database <*|adv-router> * A.B.C.D <detail|dump|internal> [json]", SHOW_STR IPV6_STR OSPF6_STR @@ -334,16 +497,15 @@ DEFUN (show_ipv6_ospf6_database_router, "Specify Advertising Router as IPv4 address notation\n" "Display details of LSAs\n" "Dump LSAs\n" - "Display LSA's internal information\n") + "Display LSA's internal information\n" + JSON_STR) { int idx_ipv4 = 6; int idx_level = 7; int level; - struct listnode *i, *j; - struct ospf6 *ospf6; - struct ospf6_area *oa; - struct ospf6_interface *oi; uint32_t adv_router = 0; + bool uj = use_json(argc, argv); + struct ospf6 *ospf6; ospf6 = ospf6_lookup_by_vrf_name(VRF_DEFAULT_NAME); @@ -351,24 +513,7 @@ DEFUN (show_ipv6_ospf6_database_router, inet_pton(AF_INET, argv[idx_ipv4]->arg, &adv_router); level = parse_show_level(idx_level, argc, argv); - for (ALL_LIST_ELEMENTS_RO(ospf6->area_list, i, oa)) { - vty_out(vty, AREA_LSDB_TITLE_FORMAT, oa->name); - ospf6_lsdb_show(vty, level, NULL, NULL, &adv_router, oa->lsdb); - } - - for (ALL_LIST_ELEMENTS_RO(ospf6->area_list, i, oa)) { - for (ALL_LIST_ELEMENTS_RO(oa->if_list, j, oi)) { - vty_out(vty, IF_LSDB_TITLE_FORMAT, oi->interface->name, - oa->name); - ospf6_lsdb_show(vty, level, NULL, NULL, &adv_router, - oi->lsdb); - } - } - - vty_out(vty, AS_LSDB_TITLE_FORMAT); - ospf6_lsdb_show(vty, level, NULL, NULL, &adv_router, ospf6->lsdb); - - vty_out(vty, "\n"); + ospf6_lsdb_show_wrapper(vty, level, NULL, NULL, &adv_router, uj, ospf6); return CMD_SUCCESS; } @@ -408,7 +553,7 @@ DEFUN_HIDDEN (show_ipv6_ospf6_database_aggr_router, return CMD_SUCCESS; } ospf6_lsdb_show(vty, level, &type, NULL, NULL, - oa->temp_router_lsa_lsdb); + oa->temp_router_lsa_lsdb, NULL, false); /* Remove the temp cache */ ospf6_remove_temp_router_lsa(oa); } @@ -420,7 +565,7 @@ DEFUN_HIDDEN (show_ipv6_ospf6_database_aggr_router, DEFUN (show_ipv6_ospf6_database_type_id, show_ipv6_ospf6_database_type_id_cmd, - "show ipv6 ospf6 database <router|network|inter-prefix|inter-router|as-external|group-membership|type-7|link|intra-prefix> linkstate-id A.B.C.D [<detail|dump|internal>]", + "show ipv6 ospf6 database <router|network|inter-prefix|inter-router|as-external|group-membership|type-7|link|intra-prefix> linkstate-id A.B.C.D [<detail|dump|internal>] [json]", SHOW_STR IPV6_STR OSPF6_STR @@ -439,18 +584,16 @@ DEFUN (show_ipv6_ospf6_database_type_id, "Display details of LSAs\n" "Dump LSAs\n" "Display LSA's internal information\n" - ) + JSON_STR) { int idx_lsa = 4; int idx_ipv4 = 6; int idx_level = 7; int level; - struct listnode *i, *j; - struct ospf6 *ospf6; - struct ospf6_area *oa; - struct ospf6_interface *oi; uint16_t type = 0; uint32_t id = 0; + bool uj = use_json(argc, argv); + struct ospf6 *ospf6; ospf6 = ospf6_lookup_by_vrf_name(VRF_DEFAULT_NAME); @@ -460,42 +603,13 @@ DEFUN (show_ipv6_ospf6_database_type_id, inet_pton(AF_INET, argv[idx_ipv4]->arg, &id); level = parse_show_level(idx_level, argc, argv); - switch (OSPF6_LSA_SCOPE(type)) { - case OSPF6_SCOPE_AREA: - for (ALL_LIST_ELEMENTS_RO(ospf6->area_list, i, oa)) { - vty_out(vty, AREA_LSDB_TITLE_FORMAT, oa->name); - ospf6_lsdb_show(vty, level, &type, &id, NULL, oa->lsdb); - } - break; - - case OSPF6_SCOPE_LINKLOCAL: - for (ALL_LIST_ELEMENTS_RO(ospf6->area_list, i, oa)) { - for (ALL_LIST_ELEMENTS_RO(oa->if_list, j, oi)) { - vty_out(vty, IF_LSDB_TITLE_FORMAT, - oi->interface->name, oa->name); - ospf6_lsdb_show(vty, level, &type, &id, NULL, - oi->lsdb); - } - } - break; - - case OSPF6_SCOPE_AS: - vty_out(vty, AS_LSDB_TITLE_FORMAT); - ospf6_lsdb_show(vty, level, &type, &id, NULL, ospf6->lsdb); - break; - - default: - assert(0); - break; - } - - vty_out(vty, "\n"); + ospf6_lsdb_type_show_wrapper(vty, level, &type, &id, NULL, uj, ospf6); return CMD_SUCCESS; } DEFUN (show_ipv6_ospf6_database_type_router, show_ipv6_ospf6_database_type_router_cmd, - "show ipv6 ospf6 database <router|network|inter-prefix|inter-router|as-external|group-membership|type-7|link|intra-prefix> <*|adv-router> A.B.C.D [<detail|dump|internal>]", + "show ipv6 ospf6 database <router|network|inter-prefix|inter-router|as-external|group-membership|type-7|link|intra-prefix> <*|adv-router> A.B.C.D [<detail|dump|internal>] [json]", SHOW_STR IPV6_STR OSPF6_STR @@ -515,18 +629,16 @@ DEFUN (show_ipv6_ospf6_database_type_router, "Display details of LSAs\n" "Dump LSAs\n" "Display LSA's internal information\n" - ) + JSON_STR) { int idx_lsa = 4; int idx_ipv4 = 6; int idx_level = 7; int level; - struct listnode *i, *j; - struct ospf6 *ospf6; - struct ospf6_area *oa; - struct ospf6_interface *oi; uint16_t type = 0; uint32_t adv_router = 0; + bool uj = use_json(argc, argv); + struct ospf6 *ospf6; ospf6 = ospf6_lookup_by_vrf_name(VRF_DEFAULT_NAME); @@ -535,45 +647,15 @@ DEFUN (show_ipv6_ospf6_database_type_router, inet_pton(AF_INET, argv[idx_ipv4]->arg, &adv_router); level = parse_show_level(idx_level, argc, argv); - switch (OSPF6_LSA_SCOPE(type)) { - case OSPF6_SCOPE_AREA: - for (ALL_LIST_ELEMENTS_RO(ospf6->area_list, i, oa)) { - vty_out(vty, AREA_LSDB_TITLE_FORMAT, oa->name); - ospf6_lsdb_show(vty, level, &type, NULL, &adv_router, - oa->lsdb); - } - break; - - case OSPF6_SCOPE_LINKLOCAL: - for (ALL_LIST_ELEMENTS_RO(ospf6->area_list, i, oa)) { - for (ALL_LIST_ELEMENTS_RO(oa->if_list, j, oi)) { - vty_out(vty, IF_LSDB_TITLE_FORMAT, - oi->interface->name, oa->name); - ospf6_lsdb_show(vty, level, &type, NULL, - &adv_router, oi->lsdb); - } - } - break; - - case OSPF6_SCOPE_AS: - vty_out(vty, AS_LSDB_TITLE_FORMAT); - ospf6_lsdb_show(vty, level, &type, NULL, &adv_router, - ospf6->lsdb); - break; - - default: - assert(0); - break; - } - - vty_out(vty, "\n"); + ospf6_lsdb_type_show_wrapper(vty, level, &type, NULL, &adv_router, uj, + ospf6); return CMD_SUCCESS; } DEFUN (show_ipv6_ospf6_database_id_router, show_ipv6_ospf6_database_id_router_cmd, - "show ipv6 ospf6 database * A.B.C.D A.B.C.D [<detail|dump|internal>]", + "show ipv6 ospf6 database * A.B.C.D A.B.C.D [<detail|dump|internal>] [json]", SHOW_STR IPV6_STR OSPF6_STR @@ -584,18 +666,16 @@ DEFUN (show_ipv6_ospf6_database_id_router, "Display details of LSAs\n" "Dump LSAs\n" "Display LSA's internal information\n" - ) + JSON_STR) { int idx_ls_id = 5; int idx_adv_rtr = 6; int idx_level = 7; int level; - struct listnode *i, *j; - struct ospf6 *ospf6; - struct ospf6_area *oa; - struct ospf6_interface *oi; uint32_t id = 0; uint32_t adv_router = 0; + bool uj = use_json(argc, argv); + struct ospf6 *ospf6; ospf6 = ospf6_lookup_by_vrf_name(VRF_DEFAULT_NAME); OSPF6_CMD_CHECK_RUNNING(ospf6); @@ -603,31 +683,14 @@ DEFUN (show_ipv6_ospf6_database_id_router, inet_pton(AF_INET, argv[idx_adv_rtr]->arg, &adv_router); level = parse_show_level(idx_level, argc, argv); - for (ALL_LIST_ELEMENTS_RO(ospf6->area_list, i, oa)) { - vty_out(vty, AREA_LSDB_TITLE_FORMAT, oa->name); - ospf6_lsdb_show(vty, level, NULL, &id, &adv_router, oa->lsdb); - } - - for (ALL_LIST_ELEMENTS_RO(ospf6->area_list, i, oa)) { - for (ALL_LIST_ELEMENTS_RO(oa->if_list, j, oi)) { - vty_out(vty, IF_LSDB_TITLE_FORMAT, oi->interface->name, - oa->name); - ospf6_lsdb_show(vty, level, NULL, &id, &adv_router, - oi->lsdb); - } - } - - vty_out(vty, AS_LSDB_TITLE_FORMAT); - ospf6_lsdb_show(vty, level, NULL, &id, &adv_router, ospf6->lsdb); - - vty_out(vty, "\n"); + ospf6_lsdb_show_wrapper(vty, level, NULL, &id, &adv_router, uj, ospf6); return CMD_SUCCESS; } DEFUN (show_ipv6_ospf6_database_adv_router_linkstate_id, show_ipv6_ospf6_database_adv_router_linkstate_id_cmd, - "show ipv6 ospf6 database adv-router A.B.C.D linkstate-id A.B.C.D [<detail|dump|internal>]", + "show ipv6 ospf6 database adv-router A.B.C.D linkstate-id A.B.C.D [<detail|dump|internal>] [json]", SHOW_STR IPV6_STR OSPF6_STR @@ -638,18 +701,17 @@ DEFUN (show_ipv6_ospf6_database_adv_router_linkstate_id, "Specify Link state ID as IPv4 address notation\n" "Display details of LSAs\n" "Dump LSAs\n" - "Display LSA's internal information\n") + "Display LSA's internal information\n" + JSON_STR) { int idx_adv_rtr = 5; int idx_ls_id = 7; int idx_level = 8; int level; - struct listnode *i, *j; - struct ospf6 *ospf6; - struct ospf6_area *oa; - struct ospf6_interface *oi; uint32_t id = 0; uint32_t adv_router = 0; + bool uj = use_json(argc, argv); + struct ospf6 *ospf6; ospf6 = ospf6_lookup_by_vrf_name(VRF_DEFAULT_NAME); @@ -658,30 +720,13 @@ DEFUN (show_ipv6_ospf6_database_adv_router_linkstate_id, inet_pton(AF_INET, argv[idx_ls_id]->arg, &id); level = parse_show_level(idx_level, argc, argv); - for (ALL_LIST_ELEMENTS_RO(ospf6->area_list, i, oa)) { - vty_out(vty, AREA_LSDB_TITLE_FORMAT, oa->name); - ospf6_lsdb_show(vty, level, NULL, &id, &adv_router, oa->lsdb); - } - - for (ALL_LIST_ELEMENTS_RO(ospf6->area_list, i, oa)) { - for (ALL_LIST_ELEMENTS_RO(oa->if_list, j, oi)) { - vty_out(vty, IF_LSDB_TITLE_FORMAT, oi->interface->name, - oa->name); - ospf6_lsdb_show(vty, level, NULL, &id, &adv_router, - oi->lsdb); - } - } - - vty_out(vty, AS_LSDB_TITLE_FORMAT); - ospf6_lsdb_show(vty, level, NULL, &id, &adv_router, ospf6->lsdb); - - vty_out(vty, "\n"); + ospf6_lsdb_show_wrapper(vty, level, NULL, &id, &adv_router, uj, ospf6); return CMD_SUCCESS; } DEFUN (show_ipv6_ospf6_database_type_id_router, show_ipv6_ospf6_database_type_id_router_cmd, - "show ipv6 ospf6 database <router|network|inter-prefix|inter-router|as-external|group-membership|type-7|link|intra-prefix> A.B.C.D A.B.C.D [<dump|internal>]", + "show ipv6 ospf6 database <router|network|inter-prefix|inter-router|as-external|group-membership|type-7|link|intra-prefix> A.B.C.D A.B.C.D [<dump|internal>] [json]", SHOW_STR IPV6_STR OSPF6_STR @@ -698,20 +743,19 @@ DEFUN (show_ipv6_ospf6_database_type_id_router, "Specify Link state ID as IPv4 address notation\n" "Specify Advertising Router as IPv4 address notation\n" "Dump LSAs\n" - "Display LSA's internal information\n") + "Display LSA's internal information\n" + JSON_STR) { int idx_lsa = 4; int idx_ls_id = 5; int idx_adv_rtr = 6; int idx_level = 7; int level; - struct listnode *i, *j; - struct ospf6 *ospf6; - struct ospf6_area *oa; - struct ospf6_interface *oi; uint16_t type = 0; uint32_t id = 0; uint32_t adv_router = 0; + bool uj = use_json(argc, argv); + struct ospf6 *ospf6; ospf6 = ospf6_lookup_by_vrf_name(VRF_DEFAULT_NAME); @@ -722,45 +766,15 @@ DEFUN (show_ipv6_ospf6_database_type_id_router, inet_pton(AF_INET, argv[idx_adv_rtr]->arg, &adv_router); level = parse_show_level(idx_level, argc, argv); - switch (OSPF6_LSA_SCOPE(type)) { - case OSPF6_SCOPE_AREA: - for (ALL_LIST_ELEMENTS_RO(ospf6->area_list, i, oa)) { - vty_out(vty, AREA_LSDB_TITLE_FORMAT, oa->name); - ospf6_lsdb_show(vty, level, &type, &id, &adv_router, - oa->lsdb); - } - break; - - case OSPF6_SCOPE_LINKLOCAL: - for (ALL_LIST_ELEMENTS_RO(ospf6->area_list, i, oa)) { - for (ALL_LIST_ELEMENTS_RO(oa->if_list, j, oi)) { - vty_out(vty, IF_LSDB_TITLE_FORMAT, - oi->interface->name, oa->name); - ospf6_lsdb_show(vty, level, &type, &id, - &adv_router, oi->lsdb); - } - } - break; - - case OSPF6_SCOPE_AS: - vty_out(vty, AS_LSDB_TITLE_FORMAT); - ospf6_lsdb_show(vty, level, &type, &id, &adv_router, - ospf6->lsdb); - break; - - default: - assert(0); - break; - } - - vty_out(vty, "\n"); + ospf6_lsdb_type_show_wrapper(vty, level, &type, &id, &adv_router, uj, + ospf6); return CMD_SUCCESS; } DEFUN (show_ipv6_ospf6_database_type_adv_router_linkstate_id, show_ipv6_ospf6_database_type_adv_router_linkstate_id_cmd, - "show ipv6 ospf6 database <router|network|inter-prefix|inter-router|as-external|group-membership|type-7|link|intra-prefix> adv-router A.B.C.D linkstate-id A.B.C.D [<dump|internal>]", + "show ipv6 ospf6 database <router|network|inter-prefix|inter-router|as-external|group-membership|type-7|link|intra-prefix> adv-router A.B.C.D linkstate-id A.B.C.D [<dump|internal>] [json]", SHOW_STR IPV6_STR OSPF6_STR @@ -779,20 +793,19 @@ DEFUN (show_ipv6_ospf6_database_type_adv_router_linkstate_id, "Search by Link state ID\n" "Specify Link state ID as IPv4 address notation\n" "Dump LSAs\n" - "Display LSA's internal information\n") + "Display LSA's internal information\n" + JSON_STR) { int idx_lsa = 4; int idx_adv_rtr = 6; int idx_ls_id = 8; int idx_level = 9; int level; - struct listnode *i, *j; - struct ospf6 *ospf6; - struct ospf6_area *oa; - struct ospf6_interface *oi; uint16_t type = 0; uint32_t id = 0; uint32_t adv_router = 0; + bool uj = use_json(argc, argv); + struct ospf6 *ospf6; ospf6 = ospf6_lookup_by_vrf_name(VRF_DEFAULT_NAME); @@ -803,44 +816,14 @@ DEFUN (show_ipv6_ospf6_database_type_adv_router_linkstate_id, inet_pton(AF_INET, argv[idx_ls_id]->arg, &id); level = parse_show_level(idx_level, argc, argv); - switch (OSPF6_LSA_SCOPE(type)) { - case OSPF6_SCOPE_AREA: - for (ALL_LIST_ELEMENTS_RO(ospf6->area_list, i, oa)) { - vty_out(vty, AREA_LSDB_TITLE_FORMAT, oa->name); - ospf6_lsdb_show(vty, level, &type, &id, &adv_router, - oa->lsdb); - } - break; - - case OSPF6_SCOPE_LINKLOCAL: - for (ALL_LIST_ELEMENTS_RO(ospf6->area_list, i, oa)) { - for (ALL_LIST_ELEMENTS_RO(oa->if_list, j, oi)) { - vty_out(vty, IF_LSDB_TITLE_FORMAT, - oi->interface->name, oa->name); - ospf6_lsdb_show(vty, level, &type, &id, - &adv_router, oi->lsdb); - } - } - break; - - case OSPF6_SCOPE_AS: - vty_out(vty, AS_LSDB_TITLE_FORMAT); - ospf6_lsdb_show(vty, level, &type, &id, &adv_router, - ospf6->lsdb); - break; - - default: - assert(0); - break; - } - - vty_out(vty, "\n"); + ospf6_lsdb_type_show_wrapper(vty, level, &type, &id, &adv_router, uj, + ospf6); return CMD_SUCCESS; } DEFUN (show_ipv6_ospf6_database_self_originated, show_ipv6_ospf6_database_self_originated_cmd, - "show ipv6 ospf6 database self-originated [<detail|dump|internal>]", + "show ipv6 ospf6 database self-originated [<detail|dump|internal>] [json]", SHOW_STR IPV6_STR OSPF6_STR @@ -848,46 +831,28 @@ DEFUN (show_ipv6_ospf6_database_self_originated, "Display Self-originated LSAs\n" "Display details of LSAs\n" "Dump LSAs\n" - "Display LSA's internal information\n") + "Display LSA's internal information\n" + JSON_STR) { int idx_level = 5; int level; - struct listnode *i, *j; - struct ospf6 *ospf6; - struct ospf6_area *oa; - struct ospf6_interface *oi; uint32_t adv_router = 0; + bool uj = use_json(argc, argv); + struct ospf6 *ospf6; ospf6 = ospf6_lookup_by_vrf_name(VRF_DEFAULT_NAME); OSPF6_CMD_CHECK_RUNNING(ospf6); level = parse_show_level(idx_level, argc, argv); adv_router = ospf6->router_id; - for (ALL_LIST_ELEMENTS_RO(ospf6->area_list, i, oa)) { - vty_out(vty, AREA_LSDB_TITLE_FORMAT, oa->name); - ospf6_lsdb_show(vty, level, NULL, NULL, &adv_router, oa->lsdb); - } - - for (ALL_LIST_ELEMENTS_RO(ospf6->area_list, i, oa)) { - for (ALL_LIST_ELEMENTS_RO(oa->if_list, j, oi)) { - vty_out(vty, IF_LSDB_TITLE_FORMAT, oi->interface->name, - oa->name); - ospf6_lsdb_show(vty, level, NULL, NULL, &adv_router, - oi->lsdb); - } - } - - vty_out(vty, AS_LSDB_TITLE_FORMAT); - ospf6_lsdb_show(vty, level, NULL, NULL, &adv_router, ospf6->lsdb); - - vty_out(vty, "\n"); + ospf6_lsdb_show_wrapper(vty, level, NULL, NULL, &adv_router, uj, ospf6); return CMD_SUCCESS; } DEFUN (show_ipv6_ospf6_database_type_self_originated, show_ipv6_ospf6_database_type_self_originated_cmd, - "show ipv6 ospf6 database <router|network|inter-prefix|inter-router|as-external|group-membership|type-7|link|intra-prefix> self-originated [<detail|dump|internal>]", + "show ipv6 ospf6 database <router|network|inter-prefix|inter-router|as-external|group-membership|type-7|link|intra-prefix> self-originated [<detail|dump|internal>] [json]", SHOW_STR IPV6_STR OSPF6_STR @@ -904,17 +869,16 @@ DEFUN (show_ipv6_ospf6_database_type_self_originated, "Display Self-originated LSAs\n" "Display details of LSAs\n" "Dump LSAs\n" - "Display LSA's internal information\n") + "Display LSA's internal information\n" + JSON_STR) { int idx_lsa = 4; int idx_level = 6; int level; - struct listnode *i, *j; - struct ospf6 *ospf6; - struct ospf6_area *oa; - struct ospf6_interface *oi; uint16_t type = 0; uint32_t adv_router = 0; + bool uj = use_json(argc, argv); + struct ospf6 *ospf6; ospf6 = ospf6_lookup_by_vrf_name(VRF_DEFAULT_NAME); OSPF6_CMD_CHECK_RUNNING(ospf6); @@ -923,44 +887,14 @@ DEFUN (show_ipv6_ospf6_database_type_self_originated, adv_router = ospf6->router_id; - switch (OSPF6_LSA_SCOPE(type)) { - case OSPF6_SCOPE_AREA: - for (ALL_LIST_ELEMENTS_RO(ospf6->area_list, i, oa)) { - vty_out(vty, AREA_LSDB_TITLE_FORMAT, oa->name); - ospf6_lsdb_show(vty, level, &type, NULL, &adv_router, - oa->lsdb); - } - break; - - case OSPF6_SCOPE_LINKLOCAL: - for (ALL_LIST_ELEMENTS_RO(ospf6->area_list, i, oa)) { - for (ALL_LIST_ELEMENTS_RO(oa->if_list, j, oi)) { - vty_out(vty, IF_LSDB_TITLE_FORMAT, - oi->interface->name, oa->name); - ospf6_lsdb_show(vty, level, &type, NULL, - &adv_router, oi->lsdb); - } - } - break; - - case OSPF6_SCOPE_AS: - vty_out(vty, AS_LSDB_TITLE_FORMAT); - ospf6_lsdb_show(vty, level, &type, NULL, &adv_router, - ospf6->lsdb); - break; - - default: - assert(0); - break; - } - - vty_out(vty, "\n"); + ospf6_lsdb_type_show_wrapper(vty, level, &type, NULL, &adv_router, uj, + ospf6); return CMD_SUCCESS; } DEFUN (show_ipv6_ospf6_database_type_self_originated_linkstate_id, show_ipv6_ospf6_database_type_self_originated_linkstate_id_cmd, - "show ipv6 ospf6 database <router|network|inter-prefix|inter-router|as-external|group-membership|type-7|link|intra-prefix> self-originated linkstate-id A.B.C.D [<detail|dump|internal>]", + "show ipv6 ospf6 database <router|network|inter-prefix|inter-router|as-external|group-membership|type-7|link|intra-prefix> self-originated linkstate-id A.B.C.D [<detail|dump|internal>] [json]", SHOW_STR IPV6_STR OSPF6_STR @@ -979,19 +913,18 @@ DEFUN (show_ipv6_ospf6_database_type_self_originated_linkstate_id, "Specify Link state ID as IPv4 address notation\n" "Display details of LSAs\n" "Dump LSAs\n" - "Display LSA's internal information\n") + "Display LSA's internal information\n" + JSON_STR) { int idx_lsa = 4; int idx_ls_id = 7; int idx_level = 8; int level; - struct listnode *i, *j; - struct ospf6 *ospf6; - struct ospf6_area *oa; - struct ospf6_interface *oi; uint16_t type = 0; uint32_t adv_router = 0; uint32_t id = 0; + bool uj = use_json(argc, argv); + struct ospf6 *ospf6; ospf6 = ospf6_lookup_by_vrf_name(VRF_DEFAULT_NAME); OSPF6_CMD_CHECK_RUNNING(ospf6); @@ -1000,44 +933,14 @@ DEFUN (show_ipv6_ospf6_database_type_self_originated_linkstate_id, level = parse_show_level(idx_level, argc, argv); adv_router = ospf6->router_id; - switch (OSPF6_LSA_SCOPE(type)) { - case OSPF6_SCOPE_AREA: - for (ALL_LIST_ELEMENTS_RO(ospf6->area_list, i, oa)) { - vty_out(vty, AREA_LSDB_TITLE_FORMAT, oa->name); - ospf6_lsdb_show(vty, level, &type, &id, &adv_router, - oa->lsdb); - } - break; - - case OSPF6_SCOPE_LINKLOCAL: - for (ALL_LIST_ELEMENTS_RO(ospf6->area_list, i, oa)) { - for (ALL_LIST_ELEMENTS_RO(oa->if_list, j, oi)) { - vty_out(vty, IF_LSDB_TITLE_FORMAT, - oi->interface->name, oa->name); - ospf6_lsdb_show(vty, level, &type, &id, - &adv_router, oi->lsdb); - } - } - break; - - case OSPF6_SCOPE_AS: - vty_out(vty, AS_LSDB_TITLE_FORMAT); - ospf6_lsdb_show(vty, level, &type, &id, &adv_router, - ospf6->lsdb); - break; - - default: - assert(0); - break; - } - - vty_out(vty, "\n"); + ospf6_lsdb_type_show_wrapper(vty, level, &type, &id, &adv_router, uj, + ospf6); return CMD_SUCCESS; } DEFUN (show_ipv6_ospf6_database_type_id_self_originated, show_ipv6_ospf6_database_type_id_self_originated_cmd, - "show ipv6 ospf6 database <router|network|inter-prefix|inter-router|as-external|group-membership|type-7|link|intra-prefix> A.B.C.D self-originated [<detail|dump|internal>]", + "show ipv6 ospf6 database <router|network|inter-prefix|inter-router|as-external|group-membership|type-7|link|intra-prefix> A.B.C.D self-originated [<detail|dump|internal>] [json]", SHOW_STR IPV6_STR OSPF6_STR @@ -1055,19 +958,18 @@ DEFUN (show_ipv6_ospf6_database_type_id_self_originated, "Display Self-originated LSAs\n" "Display details of LSAs\n" "Dump LSAs\n" - "Display LSA's internal information\n") + "Display LSA's internal information\n" + JSON_STR) { int idx_lsa = 4; int idx_ls_id = 5; int idx_level = 7; int level; - struct listnode *i, *j; - struct ospf6 *ospf6; - struct ospf6_area *oa; - struct ospf6_interface *oi; uint16_t type = 0; uint32_t adv_router = 0; uint32_t id = 0; + bool uj = use_json(argc, argv); + struct ospf6 *ospf6; ospf6 = ospf6_lookup_by_vrf_name(VRF_DEFAULT_NAME); OSPF6_CMD_CHECK_RUNNING(ospf6); @@ -1076,38 +978,8 @@ DEFUN (show_ipv6_ospf6_database_type_id_self_originated, level = parse_show_level(idx_level, argc, argv); adv_router = ospf6->router_id; - switch (OSPF6_LSA_SCOPE(type)) { - case OSPF6_SCOPE_AREA: - for (ALL_LIST_ELEMENTS_RO(ospf6->area_list, i, oa)) { - vty_out(vty, AREA_LSDB_TITLE_FORMAT, oa->name); - ospf6_lsdb_show(vty, level, &type, &id, &adv_router, - oa->lsdb); - } - break; - - case OSPF6_SCOPE_LINKLOCAL: - for (ALL_LIST_ELEMENTS_RO(ospf6->area_list, i, oa)) { - for (ALL_LIST_ELEMENTS_RO(oa->if_list, j, oi)) { - vty_out(vty, IF_LSDB_TITLE_FORMAT, - oi->interface->name, oa->name); - ospf6_lsdb_show(vty, level, &type, &id, - &adv_router, oi->lsdb); - } - } - break; - - case OSPF6_SCOPE_AS: - vty_out(vty, AS_LSDB_TITLE_FORMAT); - ospf6_lsdb_show(vty, level, &type, &id, &adv_router, - ospf6->lsdb); - break; - - default: - assert(0); - break; - } - - vty_out(vty, "\n"); + ospf6_lsdb_type_show_wrapper(vty, level, &type, &id, &adv_router, uj, + ospf6); return CMD_SUCCESS; } @@ -1134,7 +1006,7 @@ DEFUN (show_ipv6_ospf6_border_routers, if (strmatch(argv[idx_ipv4]->text, "detail")) { for (ro = ospf6_route_head(ospf6->brouter_table); ro; ro = ospf6_route_next(ro)) - ospf6_route_show_detail(vty, ro); + ospf6_route_show_detail(vty, ro, NULL, false); } else { inet_pton(AF_INET, argv[idx_ipv4]->arg, &adv_router); @@ -1147,7 +1019,7 @@ DEFUN (show_ipv6_ospf6_border_routers, return CMD_SUCCESS; } - ospf6_route_show_detail(vty, ro); + ospf6_route_show_detail(vty, ro, NULL, false); return CMD_SUCCESS; } } else { diff --git a/ospfclient/ospf_apiclient.c b/ospfclient/ospf_apiclient.c index d4f0dc953c..1b9b66d745 100644 --- a/ospfclient/ospf_apiclient.c +++ b/ospfclient/ospf_apiclient.c @@ -34,6 +34,7 @@ #include "stream.h" #include "log.h" #include "memory.h" +#include "xref.h" /* work around gcc bug 69981, disable MTYPEs in libospf */ #define _QUAGGA_OSPF_MEMORY_H @@ -57,6 +58,8 @@ #include "ospfd/ospf_dump_api.c" #include "ospfd/ospf_api.c" +XREF_SETUP() + DEFINE_MGROUP(OSPFCLIENT, "libospfapiclient") DEFINE_MTYPE_STATIC(OSPFCLIENT, OSPF_APICLIENT, "OSPF-API client") diff --git a/ospfd/ospf_asbr.c b/ospfd/ospf_asbr.c index 94fa1b5b44..0b4e5d7762 100644 --- a/ospfd/ospf_asbr.c +++ b/ospfd/ospf_asbr.c @@ -517,7 +517,7 @@ struct ospf_external_aggr_rt *ospf_external_aggr_match(struct ospf *ospf, struct ospf_external_aggr_rt *ag = node->info; zlog_debug( - "%s: Matching aggregator found.prefix:%pI4/%d Aggregator %pI4/%d\n", + "%s: Matching aggregator found.prefix:%pI4/%d Aggregator %pI4/%d", __func__, &p->prefix, p->prefixlen, &ag->p.prefix, ag->p.prefixlen); } @@ -956,7 +956,7 @@ static void ospf_handle_external_aggr_update(struct ospf *ospf) struct route_node *rn = NULL; if (IS_DEBUG_OSPF(lsa, EXTNL_LSA_AGGR)) - zlog_debug("%s: Process modified aggregators.\n", __func__); + zlog_debug("%s: Process modified aggregators.", __func__); for (rn = route_top(ospf->rt_aggr_tbl); rn; rn = route_next(rn)) { struct ospf_external_aggr_rt *aggr; @@ -1047,7 +1047,7 @@ static int ospf_asbr_external_aggr_process(struct thread *thread) operation = ospf->aggr_action; if (IS_DEBUG_OSPF(lsa, EXTNL_LSA_AGGR)) - zlog_debug("%s: operation:%d\n", __func__, operation); + zlog_debug("%s: operation:%d", __func__, operation); switch (operation) { case OSPF_ROUTE_AGGR_ADD: diff --git a/ospfd/ospf_ase.c b/ospfd/ospf_ase.c index e99653f918..51da4a55a7 100644 --- a/ospfd/ospf_ase.c +++ b/ospfd/ospf_ase.c @@ -156,84 +156,6 @@ static int ospf_ase_forward_address_check(struct ospf *ospf, return 1; } -#if 0 -/* Calculate ASBR route. */ -static struct ospf_route * -ospf_ase_calculate_asbr_route (struct ospf *ospf, - struct route_table *rt_network, - struct route_table *rt_router, - struct as_external_lsa *al) -{ - struct prefix_ipv4 asbr; - struct ospf_route *asbr_route; - struct route_node *rn; - - /* Find ASBR route from Router routing table. */ - asbr.family = AF_INET; - asbr.prefix = al->header.adv_router; - asbr.prefixlen = IPV4_MAX_BITLEN; - apply_mask_ipv4 (&asbr); - - asbr_route = ospf_find_asbr_route (ospf, rt_router, &asbr); - - if (asbr_route == NULL) - { - if (IS_DEBUG_OSPF (lsa, LSA)) - zlog_debug ("ospf_ase_calculate(): Route to ASBR %pI4 not found", - &asbr.prefix); - return NULL; - } - - if (!(asbr_route->u.std.flags & ROUTER_LSA_EXTERNAL)) - { - if (IS_DEBUG_OSPF (lsa, LSA)) - zlog_debug ("ospf_ase_calculate(): Originating router is not an ASBR"); - return NULL; - } - - if (al->e[0].fwd_addr.s_addr != INADDR_ANY) - { - if (IS_DEBUG_OSPF (lsa, LSA)) - zlog_debug ("ospf_ase_calculate(): Forwarding address is not 0.0.0.0."); - - if (! ospf_ase_forward_address_check (ospf, al->e[0].fwd_addr)) - { - if (IS_DEBUG_OSPF (lsa, LSA)) - zlog_debug ("ospf_ase_calculate(): Forwarding address is one of our addresses, Ignore."); - return NULL; - } - - if (IS_DEBUG_OSPF (lsa, LSA)) - zlog_debug ("ospf_ase_calculate(): Looking up in the Network Routing Table."); - - /* Looking up the path to the fwd_addr from Network route. */ - asbr.family = AF_INET; - asbr.prefix = al->e[0].fwd_addr; - asbr.prefixlen = IPV4_MAX_BITLEN; - - rn = route_node_match (rt_network, (struct prefix *) &asbr); - - if (rn == NULL) - { - if (IS_DEBUG_OSPF (lsa, LSA)) - zlog_debug ("ospf_ase_calculate(): Couldn't find a route to the forwarding address."); - return NULL; - } - - route_unlock_node (rn); - - if ((asbr_route = rn->info) == NULL) - { - if (IS_DEBUG_OSPF (lsa, LSA)) - zlog_debug ("ospf_ase_calculate(): Somehow OSPF route to ASBR is lost"); - return NULL; - } - } - - return asbr_route; -} -#endif - static struct ospf_route * ospf_ase_calculate_new_route(struct ospf_lsa *lsa, struct ospf_route *asbr_route, uint32_t metric) @@ -748,8 +670,13 @@ void ospf_ase_unregister_external_lsa(struct ospf_lsa *lsa, struct ospf *top) if (rn) { lst = rn->info; - listnode_delete(lst, lsa); - ospf_lsa_unlock(&lsa); /* external_lsas list */ + struct listnode *node = listnode_lookup(lst, lsa); + /* Unlock lsa only if node is present in the list */ + if (node) { + listnode_delete(lst, lsa); + ospf_lsa_unlock(&lsa); /* external_lsas list */ + } + route_unlock_node(rn); } } diff --git a/ospfd/ospf_bfd.c b/ospfd/ospf_bfd.c index 4640720952..a9bc9069d2 100644 --- a/ospfd/ospf_bfd.c +++ b/ospfd/ospf_bfd.c @@ -205,14 +205,14 @@ static int ospf_bfd_interface_dest_update(ZAPI_CALLBACK_ARGS) struct ospf_neighbor *nbr = NULL; struct route_node *node; struct route_node *n_node; - struct prefix p; + struct prefix p, src_p; int status; int old_status; struct bfd_info *bfd_info; struct timeval tv; - ifp = bfd_get_peer_info(zclient->ibuf, &p, NULL, &status, - NULL, vrf_id); + ifp = bfd_get_peer_info(zclient->ibuf, &p, &src_p, &status, NULL, + vrf_id); if ((ifp == NULL) || (p.family != AF_INET)) return 0; diff --git a/ospfd/ospf_dump.c b/ospfd/ospf_dump.c index e15c9c42c7..b98852eeee 100644 --- a/ospfd/ospf_dump.c +++ b/ospfd/ospf_dump.c @@ -56,6 +56,7 @@ unsigned long conf_debug_ospf_nssa = 0; unsigned long conf_debug_ospf_te = 0; unsigned long conf_debug_ospf_ext = 0; unsigned long conf_debug_ospf_sr = 0; +unsigned long conf_debug_ospf_ti_lfa = 0; unsigned long conf_debug_ospf_defaultinfo = 0; unsigned long conf_debug_ospf_ldp_sync = 0; unsigned long conf_debug_ospf_gr = 0; @@ -71,6 +72,7 @@ unsigned long term_debug_ospf_nssa = 0; unsigned long term_debug_ospf_te = 0; unsigned long term_debug_ospf_ext = 0; unsigned long term_debug_ospf_sr = 0; +unsigned long term_debug_ospf_ti_lfa = 0; unsigned long term_debug_ospf_defaultinfo; unsigned long term_debug_ospf_ldp_sync; unsigned long term_debug_ospf_gr = 0; @@ -1470,6 +1472,24 @@ DEFUN (no_debug_ospf_sr, return CMD_SUCCESS; } +DEFUN(debug_ospf_ti_lfa, debug_ospf_ti_lfa_cmd, "debug ospf ti-lfa", + DEBUG_STR OSPF_STR "OSPF-SR TI-LFA information\n") +{ + if (vty->node == CONFIG_NODE) + CONF_DEBUG_ON(ti_lfa, TI_LFA); + TERM_DEBUG_ON(ti_lfa, TI_LFA); + return CMD_SUCCESS; +} + +DEFUN(no_debug_ospf_ti_lfa, no_debug_ospf_ti_lfa_cmd, "no debug ospf ti-lfa", + NO_STR DEBUG_STR OSPF_STR "OSPF-SR TI-LFA information\n") +{ + if (vty->node == CONFIG_NODE) + CONF_DEBUG_OFF(ti_lfa, TI_LFA); + TERM_DEBUG_OFF(ti_lfa, TI_LFA); + return CMD_SUCCESS; +} + DEFUN (debug_ospf_default_info, debug_ospf_default_info_cmd, "debug ospf default-information", @@ -1891,6 +1911,12 @@ static int config_write_debug(struct vty *vty) write = 1; } + /* debug ospf sr ti-lfa */ + if (IS_CONF_DEBUG_OSPF(sr, TI_LFA) == OSPF_DEBUG_TI_LFA) { + vty_out(vty, "debug ospf%s ti-lfa\n", str); + write = 1; + } + /* debug ospf ldp-sync */ if (IS_CONF_DEBUG_OSPF(ldp_sync, LDP_SYNC) == OSPF_DEBUG_LDP_SYNC) { vty_out(vty, "debug ospf%s ldp-sync\n", str); @@ -1920,6 +1946,7 @@ void ospf_debug_init(void) install_element(ENABLE_NODE, &debug_ospf_nssa_cmd); install_element(ENABLE_NODE, &debug_ospf_te_cmd); install_element(ENABLE_NODE, &debug_ospf_sr_cmd); + install_element(ENABLE_NODE, &debug_ospf_ti_lfa_cmd); install_element(ENABLE_NODE, &debug_ospf_default_info_cmd); install_element(ENABLE_NODE, &debug_ospf_ldp_sync_cmd); install_element(ENABLE_NODE, &no_debug_ospf_ism_cmd); @@ -1930,6 +1957,7 @@ void ospf_debug_init(void) install_element(ENABLE_NODE, &no_debug_ospf_nssa_cmd); install_element(ENABLE_NODE, &no_debug_ospf_te_cmd); install_element(ENABLE_NODE, &no_debug_ospf_sr_cmd); + install_element(ENABLE_NODE, &no_debug_ospf_ti_lfa_cmd); install_element(ENABLE_NODE, &no_debug_ospf_default_info_cmd); install_element(ENABLE_NODE, &no_debug_ospf_ldp_sync_cmd); install_element(ENABLE_NODE, &debug_ospf_gr_cmd); @@ -1962,6 +1990,7 @@ void ospf_debug_init(void) install_element(CONFIG_NODE, &debug_ospf_nssa_cmd); install_element(CONFIG_NODE, &debug_ospf_te_cmd); install_element(CONFIG_NODE, &debug_ospf_sr_cmd); + install_element(CONFIG_NODE, &debug_ospf_ti_lfa_cmd); install_element(CONFIG_NODE, &debug_ospf_default_info_cmd); install_element(CONFIG_NODE, &debug_ospf_ldp_sync_cmd); install_element(CONFIG_NODE, &no_debug_ospf_nsm_cmd); @@ -1971,6 +2000,7 @@ void ospf_debug_init(void) install_element(CONFIG_NODE, &no_debug_ospf_nssa_cmd); install_element(CONFIG_NODE, &no_debug_ospf_te_cmd); install_element(CONFIG_NODE, &no_debug_ospf_sr_cmd); + install_element(CONFIG_NODE, &no_debug_ospf_ti_lfa_cmd); install_element(CONFIG_NODE, &no_debug_ospf_default_info_cmd); install_element(CONFIG_NODE, &no_debug_ospf_ldp_sync_cmd); install_element(CONFIG_NODE, &debug_ospf_gr_cmd); diff --git a/ospfd/ospf_dump.h b/ospfd/ospf_dump.h index ea607fef7c..c4c5606663 100644 --- a/ospfd/ospf_dump.h +++ b/ospfd/ospf_dump.h @@ -60,6 +60,7 @@ #define OSPF_DEBUG_TE 0x04 #define OSPF_DEBUG_EXT 0x08 #define OSPF_DEBUG_SR 0x10 +#define OSPF_DEBUG_TI_LFA 0x11 #define OSPF_DEBUG_DEFAULTINFO 0x20 #define OSPF_DEBUG_LDP_SYNC 0x40 @@ -110,6 +111,8 @@ #define IS_DEBUG_OSPF_SR IS_DEBUG_OSPF(sr, SR) +#define IS_DEBUG_OSPF_TI_LFA IS_DEBUG_OSPF(ti_lfa, TI_LFA) + #define IS_DEBUG_OSPF_DEFAULT_INFO IS_DEBUG_OSPF(defaultinfo, DEFAULTINFO) #define IS_DEBUG_OSPF_LDP_SYNC IS_DEBUG_OSPF(ldp_sync, LDP_SYNC) @@ -133,6 +136,7 @@ extern unsigned long term_debug_ospf_nssa; extern unsigned long term_debug_ospf_te; extern unsigned long term_debug_ospf_ext; extern unsigned long term_debug_ospf_sr; +extern unsigned long term_debug_ospf_ti_lfa; extern unsigned long term_debug_ospf_defaultinfo; extern unsigned long term_debug_ospf_ldp_sync; extern unsigned long term_debug_ospf_gr; diff --git a/ospfd/ospf_ldp_sync.c b/ospfd/ospf_ldp_sync.c index b574e2cac8..dbd45635b2 100644 --- a/ospfd/ospf_ldp_sync.c +++ b/ospfd/ospf_ldp_sync.c @@ -107,6 +107,7 @@ void ospf_ldp_sync_state_req_msg(struct interface *ifp) ols_debug("ldp_sync: send state request to LDP for %s", ifp->name); + memset(&request, 0, sizeof(request)); strlcpy(request.name, ifp->name, sizeof(ifp->name)); request.proto = LDP_IGP_SYNC_IF_STATE_REQUEST; request.ifindex = ifp->ifindex; diff --git a/ospfd/ospf_lsa.c b/ospfd/ospf_lsa.c index d5eba74fd4..5a48eebe49 100644 --- a/ospfd/ospf_lsa.c +++ b/ospfd/ospf_lsa.c @@ -337,7 +337,7 @@ void lsa_header_set(struct stream *s, uint8_t options, uint8_t type, /* router-LSA related functions. */ /* Get router-LSA flags. */ -static uint8_t router_lsa_flags(struct ospf_area *area) +uint8_t router_lsa_flags(struct ospf_area *area) { uint8_t flags; @@ -420,9 +420,8 @@ static uint16_t ospf_link_cost(struct ospf_interface *oi) } /* Set a link information. */ -static char link_info_set(struct stream **s, struct in_addr id, - struct in_addr data, uint8_t type, uint8_t tos, - uint16_t cost) +char link_info_set(struct stream **s, struct in_addr id, struct in_addr data, + uint8_t type, uint8_t tos, uint16_t cost) { /* LSA stream is initially allocated to OSPF_MAX_LSA_SIZE, suits * vast majority of cases. Some rare routers with lots of links need @@ -679,7 +678,7 @@ static int router_lsa_link_set(struct stream **s, struct ospf_area *area) } /* Set router-LSA body. */ -static void ospf_router_lsa_body_set(struct stream **s, struct ospf_area *area) +void ospf_router_lsa_body_set(struct stream **s, struct ospf_area *area) { unsigned long putp; uint16_t cnt; @@ -1756,8 +1755,8 @@ static struct ospf_lsa *ospf_lsa_translated_nssa_new(struct ospf *ospf, == NULL) { if (IS_DEBUG_OSPF_NSSA) zlog_debug( - "ospf_nssa_translate_originate(): Could not originate Translated Type-5 for %pI4", - &ei.p.prefix); + "%s: Could not originate Translated Type-5 for %pI4", + __func__, &ei.p.prefix); return NULL; } @@ -1790,24 +1789,22 @@ struct ospf_lsa *ospf_translated_nssa_originate(struct ospf *ospf, if ((new = ospf_lsa_translated_nssa_new(ospf, type7)) == NULL) { if (IS_DEBUG_OSPF_NSSA) zlog_debug( - "ospf_translated_nssa_originate(): Could not translate Type-7, Id %pI4, to Type-5", - &type7->data->id); + "%s: Could not translate Type-7, Id %pI4, to Type-5", + __func__, &type7->data->id); return NULL; } extnew = (struct as_external_lsa *)new->data; if ((new = ospf_lsa_install(ospf, NULL, new)) == NULL) { - flog_warn( - EC_OSPF_LSA_INSTALL_FAILURE, - "ospf_lsa_translated_nssa_originate(): Could not install LSA id %pI4", - &type7->data->id); + flog_warn(EC_OSPF_LSA_INSTALL_FAILURE, + "%s: Could not install LSA id %pI4", __func__, + &type7->data->id); return NULL; } if (IS_DEBUG_OSPF_NSSA) { - zlog_debug( - "ospf_translated_nssa_originate(): translated Type 7, installed:"); + zlog_debug("%s: translated Type 7, installed", __func__); ospf_lsa_header_dump(new->data); zlog_debug(" Network mask: %d", ip_masklen(extnew->mask)); zlog_debug(" Forward addr: %pI4", @@ -2166,12 +2163,6 @@ void ospf_external_lsa_flush(struct ospf *ospf, uint8_t type, /* Sweep LSA from Link State Retransmit List. */ ospf_ls_retransmit_delete_nbr_as(ospf, lsa); -/* There must be no self-originated LSA in rtrs_external. */ -#if 0 - /* Remove External route from Zebra. */ - ospf_zebra_delete ((struct prefix_ipv4 *) p, &nexthop); -#endif - if (!IS_LSA_MAXAGE(lsa)) { /* Unregister LSA from Refresh queue. */ ospf_refresher_unregister_lsa(ospf, lsa); @@ -2443,12 +2434,7 @@ ospf_summary_lsa_install(struct ospf *ospf, struct ospf_lsa *new, int rt_recalc) necessary to re-examine all the AS-external-LSAs. */ -#if 0 - /* This doesn't exist yet... */ - ospf_summary_incremental_update(new); */ -#else /* #if 0 */ ospf_spf_calculate_schedule(ospf, SPF_FLAG_SUMMARY_LSA_INSTALL); -#endif /* #if 0 */ } if (IS_LSA_SELF(new)) @@ -2469,16 +2455,8 @@ static struct ospf_lsa *ospf_summary_asbr_lsa_install(struct ospf *ospf, destination is an AS boundary router, it may also be necessary to re-examine all the AS-external-LSAs. */ -#if 0 - /* These don't exist yet... */ - ospf_summary_incremental_update(new); - /* Isn't this done by the above call? - - RFC 2328 Section 16.5 implies it should be */ - /* ospf_ase_calculate_schedule(); */ -#else /* #if 0 */ ospf_spf_calculate_schedule(ospf, SPF_FLAG_ASBR_SUMMARY_LSA_INSTALL); -#endif /* #if 0 */ } /* register LSA to refresh-list. */ @@ -2667,7 +2645,7 @@ struct ospf_lsa *ospf_lsa_install(struct ospf *ospf, struct ospf_interface *oi, } else { if (IS_DEBUG_OSPF(lsa, LSA_GENERATE)) { zlog_debug( - "ospf_lsa_install() got an lsa with seq 0x80000000 that was not self originated. Ignoring\n"); + "ospf_lsa_install() got an lsa with seq 0x80000000 that was not self originated. Ignoring"); ospf_lsa_header_dump(lsa->data); } return old; @@ -2789,7 +2767,7 @@ int ospf_check_nbr_status(struct ospf *ospf) static int ospf_maxage_lsa_remover(struct thread *thread) { struct ospf *ospf = THREAD_ARG(thread); - struct ospf_lsa *lsa; + struct ospf_lsa *lsa, *old; struct route_node *rn; int reschedule = 0; @@ -2851,6 +2829,18 @@ static int ospf_maxage_lsa_remover(struct thread *thread) /* Remove from lsdb. */ if (lsa->lsdb) { + old = ospf_lsdb_lookup(lsa->lsdb, lsa); + /* The max age LSA here must be the same + * as the LSA in LSDB + */ + if (old != lsa) { + flog_err(EC_OSPF_LSA_MISSING, + "%s: LSA[Type%d:%pI4]: LSA not in LSDB", + __func__, lsa->data->type, + &lsa->data->id); + + continue; + } ospf_discard_from_db(ospf, lsa->lsdb, lsa); ospf_lsdb_delete(lsa->lsdb, lsa); } else { diff --git a/ospfd/ospf_lsa.h b/ospfd/ospf_lsa.h index c5de287948..f2a0d36e7e 100644 --- a/ospfd/ospf_lsa.h +++ b/ospfd/ospf_lsa.h @@ -260,6 +260,8 @@ extern struct lsa_header *ospf_lsa_data_dup(struct lsa_header *); extern void ospf_lsa_data_free(struct lsa_header *); /* Prototype for various LSAs */ +extern void ospf_router_lsa_body_set(struct stream **s, struct ospf_area *area); +extern uint8_t router_lsa_flags(struct ospf_area *area); extern int ospf_router_lsa_update(struct ospf *); extern int ospf_router_lsa_update_area(struct ospf_area *); @@ -333,6 +335,10 @@ extern int is_prefix_default(struct prefix_ipv4 *); extern int metric_type(struct ospf *, uint8_t, unsigned short); extern int metric_value(struct ospf *, uint8_t, unsigned short); +extern char link_info_set(struct stream **s, struct in_addr id, + struct in_addr data, uint8_t type, uint8_t tos, + uint16_t cost); + extern struct in_addr ospf_get_nssa_ip(struct ospf_area *); extern int ospf_translated_nssa_compare(struct ospf_lsa *, struct ospf_lsa *); extern struct ospf_lsa *ospf_translated_nssa_refresh(struct ospf *, diff --git a/ospfd/ospf_memory.c b/ospfd/ospf_memory.c index ae22cec414..f4fb68cbdf 100644 --- a/ospfd/ospf_memory.c +++ b/ospfd/ospf_memory.c @@ -58,3 +58,5 @@ DEFINE_MTYPE(OSPFD, OSPF_EXT_PARAMS, "OSPF Extended parameters") DEFINE_MTYPE(OSPFD, OSPF_SR_PARAMS, "OSPF Segment Routing parameters") DEFINE_MTYPE(OSPFD, OSPF_GR_HELPER, "OSPF Graceful Restart Helper") DEFINE_MTYPE(OSPFD, OSPF_EXTERNAL_RT_AGGR, "OSPF External Route Summarisation") +DEFINE_MTYPE(OSPFD, OSPF_P_SPACE, "OSPF TI-LFA P-Space") +DEFINE_MTYPE(OSPFD, OSPF_Q_SPACE, "OSPF TI-LFA Q-Space") diff --git a/ospfd/ospf_memory.h b/ospfd/ospf_memory.h index 624b1d3306..42bc8d7b77 100644 --- a/ospfd/ospf_memory.h +++ b/ospfd/ospf_memory.h @@ -57,5 +57,7 @@ DECLARE_MTYPE(OSPF_SR_PARAMS) DECLARE_MTYPE(OSPF_EXT_PARAMS) DECLARE_MTYPE(OSPF_GR_HELPER) DECLARE_MTYPE(OSPF_EXTERNAL_RT_AGGR) +DECLARE_MTYPE(OSPF_P_SPACE) +DECLARE_MTYPE(OSPF_Q_SPACE) #endif /* _QUAGGA_OSPF_MEMORY_H */ diff --git a/ospfd/ospf_packet.c b/ospfd/ospf_packet.c index ef39b6c2f6..343e406f28 100644 --- a/ospfd/ospf_packet.c +++ b/ospfd/ospf_packet.c @@ -2987,6 +2987,16 @@ static enum ospf_read_return_enum ospf_read_helper(struct ospf *ospf) } } + if (ospf->vrf_id == VRF_DEFAULT && ospf->vrf_id != ifp->vrf_id) { + /* + * We may have a situation where l3mdev_accept == 1 + * let's just kindly drop the packet and move on. + * ospf really really really does not like when + * we receive the same packet multiple times. + */ + return OSPF_READ_CONTINUE; + } + /* Self-originated packet should be discarded silently. */ if (ospf_if_lookup_by_local_addr(ospf, NULL, iph->ip_src)) { if (IS_DEBUG_OSPF_PACKET(0, RECV)) { diff --git a/ospfd/ospf_ri.c b/ospfd/ospf_ri.c index 3145d16161..4083ea9332 100644 --- a/ospfd/ospf_ri.c +++ b/ospfd/ospf_ri.c @@ -1451,9 +1451,7 @@ static uint16_t show_vty_sr_range(struct vty *vty, struct tlv_header *tlvh) GET_LABEL(ntohl(range->lower.value))); } else { zlog_debug( - " Segment Routing %s Range TLV:\n" - " Range Size = %d\n" - " SID Label = %d\n\n", + " Segment Routing %s Range TLV: Range Size = %d SID Label = %d", ntohs(range->header.type) == RI_SR_TLV_SRGB_LABEL_RANGE ? "Global" : "Local", @@ -1476,8 +1474,7 @@ static uint16_t show_vty_sr_msd(struct vty *vty, struct tlv_header *tlvh) msd->value); } else { zlog_debug( - " Segment Routing MSD TLV:\n" - " Node Maximum Stack Depth = %d\n", + " Segment Routing MSD TLV: Node Maximum Stack Depth = %d", msd->value); } diff --git a/ospfd/ospf_route.c b/ospfd/ospf_route.c index 590122e223..7cfcaf14be 100644 --- a/ospfd/ospf_route.c +++ b/ospfd/ospf_route.c @@ -71,15 +71,31 @@ struct ospf_path *ospf_path_new(void) static struct ospf_path *ospf_path_dup(struct ospf_path *path) { struct ospf_path *new; + int memsize; new = ospf_path_new(); memcpy(new, path, sizeof(struct ospf_path)); + /* optional TI-LFA backup paths */ + if (path->srni.backup_label_stack) { + memsize = sizeof(struct mpls_label_stack) + + (sizeof(mpls_label_t) + * path->srni.backup_label_stack->num_labels); + new->srni.backup_label_stack = + XCALLOC(MTYPE_OSPF_PATH, memsize); + memcpy(new->srni.backup_label_stack, + path->srni.backup_label_stack, memsize); + } + return new; } void ospf_path_free(struct ospf_path *op) { + /* optional TI-LFA backup paths */ + if (op->srni.backup_label_stack) + XFREE(MTYPE_OSPF_PATH, op->srni.backup_label_stack); + XFREE(MTYPE_OSPF_PATH, op); } @@ -140,6 +156,35 @@ static int ospf_route_exist_new_table(struct route_table *rt, return 1; } +static int ospf_route_backup_path_same(struct sr_nexthop_info *srni1, + struct sr_nexthop_info *srni2) +{ + struct mpls_label_stack *ls1, *ls2; + uint8_t label_count; + + ls1 = srni1->backup_label_stack; + ls2 = srni2->backup_label_stack; + + if (!ls1 && !ls2) + return 1; + + if ((ls1 && !ls2) || (!ls1 && ls2)) + return 0; + + if (ls1->num_labels != ls2->num_labels) + return 0; + + for (label_count = 0; label_count < ls1->num_labels; label_count++) { + if (ls1->label[label_count] != ls2->label[label_count]) + return 0; + } + + if (!IPV4_ADDR_SAME(&srni1->backup_nexthop, &srni2->backup_nexthop)) + return 0; + + return 1; +} + /* If a prefix and a nexthop match any route in the routing table, then return 1, otherwise return 0. */ int ospf_route_match_same(struct route_table *rt, struct prefix_ipv4 *prefix, @@ -180,6 +225,11 @@ int ospf_route_match_same(struct route_table *rt, struct prefix_ipv4 *prefix, return 0; if (op->ifindex != newop->ifindex) return 0; + + /* check TI-LFA backup paths */ + if (!ospf_route_backup_path_same(&op->srni, + &newop->srni)) + return 0; } return 1; } else if (prefix_same(&rn->p, (struct prefix *)prefix)) @@ -664,38 +714,6 @@ void ospf_route_table_dump(struct route_table *rt) zlog_debug("========================================"); } -void ospf_route_table_print(struct vty *vty, struct route_table *rt) -{ - struct route_node *rn; - struct ospf_route * or ; - struct listnode *pnode; - struct ospf_path *path; - - vty_out(vty, "========== OSPF routing table ==========\n"); - for (rn = route_top(rt); rn; rn = route_next(rn)) - if ((or = rn->info) != NULL) { - if (or->type == OSPF_DESTINATION_NETWORK) { - vty_out(vty, "N %-18pFX %-15pI4 %s %d\n", - &rn->p, & or->u.std.area_id, - ospf_path_type_str[or->path_type], - or->cost); - for (ALL_LIST_ELEMENTS_RO(or->paths, pnode, - path)) - if (path->nexthop.s_addr != INADDR_ANY) - vty_out(vty, " -> %pI4\n", - &path->nexthop); - else - vty_out(vty, " -> %s\n", - "directly connected"); - } else - vty_out(vty, "R %-18pI4 %-15pI4 %s %d\n", - &rn->p.u.prefix4, & or->u.std.area_id, - ospf_path_type_str[or->path_type], - or->cost); - } - vty_out(vty, "========================================\n"); -} - /* This is 16.4.1 implementation. o Intra-area paths using non-backbone areas are always the most preferred. o The other paths, intra-area backbone paths and inter-area paths, @@ -802,6 +820,7 @@ void ospf_route_copy_nexthops_from_vertex(struct ospf_area *area, || area->spf_dry_run) { path = ospf_path_new(); path->nexthop = nexthop->router; + path->adv_router = v->id; if (oi) { path->ifindex = oi->ifp->ifindex; diff --git a/ospfd/ospf_route.h b/ospfd/ospf_route.h index c3fa5954d5..811581c0d3 100644 --- a/ospfd/ospf_route.h +++ b/ospfd/ospf_route.h @@ -42,6 +42,10 @@ struct sr_nexthop_info { * or NULL if next hop is the destination of the prefix */ struct sr_node *nexthop; + + /* TI-LFA */ + struct mpls_label_stack *backup_label_stack; + struct in_addr backup_nexthop; }; /* OSPF Path. */ @@ -132,7 +136,6 @@ extern void ospf_route_table_free(struct route_table *); extern void ospf_route_install(struct ospf *, struct route_table *); extern void ospf_route_table_dump(struct route_table *); -extern void ospf_route_table_print(struct vty *vty, struct route_table *rt); extern void ospf_intra_add_router(struct route_table *, struct vertex *, struct ospf_area *); diff --git a/ospfd/ospf_snmp.c b/ospfd/ospf_snmp.c index 66dd9c7ca4..3f4ca44b05 100644 --- a/ospfd/ospf_snmp.c +++ b/ospfd/ospf_snmp.c @@ -1236,7 +1236,6 @@ static struct ospf_nbr_nbma *ospfHostLookup(struct variable *v, oid *name, size_t *length, struct in_addr *addr, int exact) { - int len; struct ospf_nbr_nbma *nbr_nbma; struct ospf *ospf; @@ -1258,28 +1257,8 @@ static struct ospf_nbr_nbma *ospfHostLookup(struct variable *v, oid *name, nbr_nbma = ospf_nbr_nbma_lookup(ospf, *addr); return nbr_nbma; - } else { - len = *length - v->namelen; - if (len > 4) - len = 4; - - oid2in_addr(name + v->namelen, len, addr); - - nbr_nbma = - ospf_nbr_nbma_lookup_next(ospf, addr, len == 0 ? 1 : 0); - - if (nbr_nbma == NULL) - return NULL; - - oid_copy_addr(name + v->namelen, addr, IN_ADDR_SIZE); - - /* Set TOS 0. */ - name[v->namelen + IN_ADDR_SIZE] = 0; - - *length = v->namelen + IN_ADDR_SIZE + 1; - - return nbr_nbma; } + return NULL; } diff --git a/ospfd/ospf_spf.c b/ospfd/ospf_spf.c index 4665f53edb..82acd0829c 100644 --- a/ospfd/ospf_spf.c +++ b/ospfd/ospf_spf.c @@ -46,6 +46,7 @@ #include "ospfd/ospf_abr.h" #include "ospfd/ospf_dump.h" #include "ospfd/ospf_sr.h" +#include "ospfd/ospf_ti_lfa.h" #include "ospfd/ospf_errors.h" /* Variables to ensure a SPF scheduled log message is printed only once */ @@ -141,11 +142,16 @@ static void ospf_canonical_nexthops_free(struct vertex *root) ospf_canonical_nexthops_free(child); /* Free child nexthops pointing back to this root vertex */ - for (ALL_LIST_ELEMENTS(child->parents, n2, nn2, vp)) + for (ALL_LIST_ELEMENTS(child->parents, n2, nn2, vp)) { if (vp->parent == root && vp->nexthop) { vertex_nexthop_free(vp->nexthop); vp->nexthop = NULL; + if (vp->local_nexthop) { + vertex_nexthop_free(vp->local_nexthop); + vp->local_nexthop = NULL; + } } + } } } @@ -154,7 +160,8 @@ static void ospf_canonical_nexthops_free(struct vertex *root) * vertex_nexthop, with refcounts. */ static struct vertex_parent *vertex_parent_new(struct vertex *v, int backlink, - struct vertex_nexthop *hop) + struct vertex_nexthop *hop, + struct vertex_nexthop *lhop) { struct vertex_parent *new; @@ -163,6 +170,7 @@ static struct vertex_parent *vertex_parent_new(struct vertex *v, int backlink, new->parent = v; new->backlink = backlink; new->nexthop = hop; + new->local_nexthop = lhop; return new; } @@ -172,7 +180,7 @@ static void vertex_parent_free(void *p) XFREE(MTYPE_OSPF_VERTEX_PARENT, p); } -static int vertex_parent_cmp(void *aa, void *bb) +int vertex_parent_cmp(void *aa, void *bb) { struct vertex_parent *a = aa, *b = bb; return IPV4_ADDR_CMP(&a->nexthop->router, &b->nexthop->router); @@ -284,6 +292,257 @@ static void ospf_vertex_add_parent(struct vertex *v) } } +/* Find a vertex according to its router id */ +struct vertex *ospf_spf_vertex_find(struct in_addr id, struct list *vertex_list) +{ + struct listnode *node; + struct vertex *found; + + for (ALL_LIST_ELEMENTS_RO(vertex_list, node, found)) { + if (found->id.s_addr == id.s_addr) + return found; + } + + return NULL; +} + +/* Find a vertex parent according to its router id */ +struct vertex_parent *ospf_spf_vertex_parent_find(struct in_addr id, + struct vertex *vertex) +{ + struct listnode *node; + struct vertex_parent *found; + + for (ALL_LIST_ELEMENTS_RO(vertex->parents, node, found)) { + if (found->parent->id.s_addr == id.s_addr) + return found; + } + + return NULL; +} + +struct vertex *ospf_spf_vertex_by_nexthop(struct vertex *root, + struct in_addr *nexthop) +{ + struct listnode *node; + struct vertex *child; + struct vertex_parent *vertex_parent; + + for (ALL_LIST_ELEMENTS_RO(root->children, node, child)) { + vertex_parent = ospf_spf_vertex_parent_find(root->id, child); + if (vertex_parent->nexthop->router.s_addr == nexthop->s_addr) + return child; + } + + return NULL; +} + +/* Create a deep copy of a SPF vertex without children and parents */ +static struct vertex *ospf_spf_vertex_copy(struct vertex *vertex) +{ + struct vertex *copy; + + copy = XCALLOC(MTYPE_OSPF_VERTEX, sizeof(struct vertex)); + + memcpy(copy, vertex, sizeof(struct vertex)); + copy->parents = list_new(); + copy->parents->del = vertex_parent_free; + copy->parents->cmp = vertex_parent_cmp; + copy->children = list_new(); + + return copy; +} + +/* Create a deep copy of a SPF vertex_parent */ +static struct vertex_parent * +ospf_spf_vertex_parent_copy(struct vertex_parent *vertex_parent) +{ + struct vertex_parent *vertex_parent_copy; + struct vertex_nexthop *nexthop_copy, *local_nexthop_copy; + + vertex_parent_copy = + XCALLOC(MTYPE_OSPF_VERTEX, sizeof(struct vertex_parent)); + + nexthop_copy = vertex_nexthop_new(); + local_nexthop_copy = vertex_nexthop_new(); + + memcpy(vertex_parent_copy, vertex_parent, sizeof(struct vertex_parent)); + memcpy(nexthop_copy, vertex_parent->nexthop, + sizeof(struct vertex_nexthop)); + memcpy(local_nexthop_copy, vertex_parent->local_nexthop, + sizeof(struct vertex_nexthop)); + + vertex_parent_copy->nexthop = nexthop_copy; + vertex_parent_copy->local_nexthop = local_nexthop_copy; + + return vertex_parent_copy; +} + +/* Create a deep copy of a SPF tree */ +void ospf_spf_copy(struct vertex *vertex, struct list *vertex_list) +{ + struct listnode *node; + struct vertex *vertex_copy, *child, *child_copy, *parent_copy; + struct vertex_parent *vertex_parent, *vertex_parent_copy; + + /* First check if the node is already in the vertex list */ + vertex_copy = ospf_spf_vertex_find(vertex->id, vertex_list); + if (!vertex_copy) { + vertex_copy = ospf_spf_vertex_copy(vertex); + listnode_add(vertex_list, vertex_copy); + } + + /* Copy all parents, create parent nodes if necessary */ + for (ALL_LIST_ELEMENTS_RO(vertex->parents, node, vertex_parent)) { + parent_copy = ospf_spf_vertex_find(vertex_parent->parent->id, + vertex_list); + if (!parent_copy) { + parent_copy = + ospf_spf_vertex_copy(vertex_parent->parent); + listnode_add(vertex_list, parent_copy); + } + vertex_parent_copy = ospf_spf_vertex_parent_copy(vertex_parent); + vertex_parent_copy->parent = parent_copy; + listnode_add(vertex_copy->parents, vertex_parent_copy); + } + + /* Copy all children, create child nodes if necessary */ + for (ALL_LIST_ELEMENTS_RO(vertex->children, node, child)) { + child_copy = ospf_spf_vertex_find(child->id, vertex_list); + if (!child_copy) { + child_copy = ospf_spf_vertex_copy(child); + listnode_add(vertex_list, child_copy); + } + listnode_add(vertex_copy->children, child_copy); + } + + /* Finally continue copying with child nodes */ + for (ALL_LIST_ELEMENTS_RO(vertex->children, node, child)) + ospf_spf_copy(child, vertex_list); +} + +static void ospf_spf_remove_branch(struct vertex_parent *vertex_parent, + struct vertex *child, + struct list *vertex_list) +{ + struct listnode *node, *nnode, *inner_node, *inner_nnode; + struct vertex *grandchild; + struct vertex_parent *vertex_parent_found; + bool has_more_links = false; + + /* + * First check if there are more nexthops for that parent to that child + */ + for (ALL_LIST_ELEMENTS_RO(child->parents, node, vertex_parent_found)) { + if (vertex_parent_found->parent->id.s_addr + == vertex_parent->parent->id.s_addr + && vertex_parent_found->nexthop->router.s_addr + != vertex_parent->nexthop->router.s_addr) + has_more_links = true; + } + + /* + * No more links from that parent? Then delete the child from its + * children list. + */ + if (!has_more_links) + listnode_delete(vertex_parent->parent->children, child); + + /* + * Delete the vertex_parent from the child parents list, this needs to + * be done anyway. + */ + listnode_delete(child->parents, vertex_parent); + + /* + * Are there actually more parents left? If not, then delete the child! + * This is done by recursively removing the links to the grandchildren, + * such that finally the child can be removed without leaving unused + * partial branches. + */ + if (child->parents->count == 0) { + for (ALL_LIST_ELEMENTS(child->children, node, nnode, + grandchild)) { + for (ALL_LIST_ELEMENTS(grandchild->parents, inner_node, + inner_nnode, + vertex_parent_found)) { + ospf_spf_remove_branch(vertex_parent_found, + grandchild, vertex_list); + } + } + listnode_delete(vertex_list, child); + ospf_vertex_free(child); + } +} + +static int ospf_spf_remove_link(struct vertex *vertex, struct list *vertex_list, + struct router_lsa_link *link) +{ + struct listnode *node, *inner_node; + struct vertex *child; + struct vertex_parent *vertex_parent; + + /* + * Identify the node who shares a subnet (given by the link) with a + * child and remove the branch of this particular child. + */ + for (ALL_LIST_ELEMENTS_RO(vertex->children, node, child)) { + for (ALL_LIST_ELEMENTS_RO(child->parents, inner_node, + vertex_parent)) { + if ((vertex_parent->local_nexthop->router.s_addr + & link->link_data.s_addr) + == (link->link_id.s_addr + & link->link_data.s_addr)) { + ospf_spf_remove_branch(vertex_parent, child, + vertex_list); + return 0; + } + } + } + + /* No link found yet, move on recursively */ + for (ALL_LIST_ELEMENTS_RO(vertex->children, node, child)) { + if (ospf_spf_remove_link(child, vertex_list, link) == 0) + return 0; + } + + /* link was not removed yet */ + return 1; +} + +void ospf_spf_remove_resource(struct vertex *vertex, struct list *vertex_list, + struct protected_resource *resource) +{ + struct listnode *node, *nnode; + struct vertex *found; + struct vertex_parent *vertex_parent; + + switch (resource->type) { + case OSPF_TI_LFA_LINK_PROTECTION: + ospf_spf_remove_link(vertex, vertex_list, resource->link); + break; + case OSPF_TI_LFA_NODE_PROTECTION: + found = ospf_spf_vertex_find(resource->router_id, vertex_list); + if (!found) + break; + + /* + * Remove the node by removing all links from its parents. Note + * that the child is automatically removed here with the last + * link from a parent, hence no explicit removal of the node. + */ + for (ALL_LIST_ELEMENTS(found->parents, node, nnode, + vertex_parent)) + ospf_spf_remove_branch(vertex_parent, found, + vertex_list); + + break; + default: + /* do nothing */ + break; + } +} + static void ospf_spf_init(struct ospf_area *area, struct ospf_lsa *root_lsa, bool is_dry_run, bool is_root_node) { @@ -427,6 +686,7 @@ static void ospf_spf_flush_parents(struct vertex *w) */ static void ospf_spf_add_parent(struct vertex *v, struct vertex *w, struct vertex_nexthop *newhop, + struct vertex_nexthop *newlhop, unsigned int distance) { struct vertex_parent *vp, *wp; @@ -482,7 +742,8 @@ static void ospf_spf_add_parent(struct vertex *v, struct vertex *w, } } - vp = vertex_parent_new(v, ospf_lsa_has_link(w->lsa, v->lsa), newhop); + vp = vertex_parent_new(v, ospf_lsa_has_link(w->lsa, v->lsa), newhop, + newlhop); listnode_add_sort(w->parents, vp); return; @@ -541,7 +802,7 @@ static unsigned int ospf_nexthop_calculation(struct ospf_area *area, unsigned int distance, int lsa_pos) { struct listnode *node, *nnode; - struct vertex_nexthop *nh; + struct vertex_nexthop *nh, *lnh; struct vertex_parent *vp; unsigned int added = 0; char buf1[BUFSIZ]; @@ -586,17 +847,22 @@ static unsigned int ospf_nexthop_calculation(struct ospf_area *area, struct ospf_interface *oi = NULL; struct in_addr nexthop = {.s_addr = 0}; - oi = ospf_if_lookup_by_lsa_pos(area, lsa_pos); - if (!oi) { - zlog_debug( - "%s: OI not found in LSA: lsa_pos: %d link_id:%s link_data:%s", - __func__, lsa_pos, - inet_ntop(AF_INET, &l->link_id, - buf1, BUFSIZ), - inet_ntop(AF_INET, - &l->link_data, buf2, - BUFSIZ)); - return 0; + if (area->spf_root_node) { + oi = ospf_if_lookup_by_lsa_pos(area, + lsa_pos); + if (!oi) { + zlog_debug( + "%s: OI not found in LSA: lsa_pos: %d link_id:%s link_data:%s", + __func__, lsa_pos, + inet_ntop(AF_INET, + &l->link_id, + buf1, BUFSIZ), + inet_ntop(AF_INET, + &l->link_data, + buf2, + BUFSIZ)); + return 0; + } } /* @@ -644,7 +910,21 @@ static unsigned int ospf_nexthop_calculation(struct ospf_area *area, * as described above using a reverse lookup to * figure out the nexthop. */ - if (oi->type == OSPF_IFTYPE_POINTOPOINT) { + + /* + * HACK: we don't know (yet) how to distinguish + * between P2P and P2MP interfaces by just + * looking at LSAs, which is important for + * TI-LFA since you want to do SPF calculations + * from the perspective of other nodes. Since + * TI-LFA is currently not implemented for P2MP + * we just check here if it is enabled and then + * blindly assume that P2P is used. Ultimately + * the interface code needs to be removed + * somehow. + */ + if (area->ospf->ti_lfa_enabled + || (oi && oi->type == OSPF_IFTYPE_POINTOPOINT)) { struct ospf_neighbor *nbr_w = NULL; /* Calculating node is root node, link @@ -673,7 +953,7 @@ static unsigned int ospf_nexthop_calculation(struct ospf_area *area, } } } - } else if (oi->type + } else if (oi && oi->type == OSPF_IFTYPE_POINTOMULTIPOINT) { struct prefix_ipv4 la; @@ -703,12 +983,22 @@ static unsigned int ospf_nexthop_calculation(struct ospf_area *area, nh = vertex_nexthop_new(); nh->router = nexthop; nh->lsa_pos = lsa_pos; - ospf_spf_add_parent(v, w, nh, distance); + + /* + * Since v is the root the nexthop and + * local nexthop are the same. + */ + lnh = vertex_nexthop_new(); + memcpy(lnh, nh, + sizeof(struct vertex_nexthop)); + + ospf_spf_add_parent(v, w, nh, lnh, + distance); return 1; } else zlog_info( "%s: could not determine nexthop for link %s", - __func__, oi->ifp->name); + __func__, oi ? oi->ifp->name : ""); } /* end point-to-point link from V to W */ else if (l->m[0].type == LSA_LINK_TYPE_VIRTUALLINK) { /* @@ -733,7 +1023,17 @@ static unsigned int ospf_nexthop_calculation(struct ospf_area *area, nh = vertex_nexthop_new(); nh->router = vl_data->nexthop.router; nh->lsa_pos = vl_data->nexthop.lsa_pos; - ospf_spf_add_parent(v, w, nh, distance); + + /* + * Since v is the root the nexthop and + * local nexthop are the same. + */ + lnh = vertex_nexthop_new(); + memcpy(lnh, nh, + sizeof(struct vertex_nexthop)); + + ospf_spf_add_parent(v, w, nh, lnh, + distance); return 1; } else zlog_info( @@ -747,7 +1047,15 @@ static unsigned int ospf_nexthop_calculation(struct ospf_area *area, nh = vertex_nexthop_new(); nh->router.s_addr = 0; /* Nexthop not required */ nh->lsa_pos = lsa_pos; - ospf_spf_add_parent(v, w, nh, distance); + + /* + * Since v is the root the nexthop and + * local nexthop are the same. + */ + lnh = vertex_nexthop_new(); + memcpy(lnh, nh, sizeof(struct vertex_nexthop)); + + ospf_spf_add_parent(v, w, nh, lnh, distance); return 1; } } /* end V is the root */ @@ -780,8 +1088,18 @@ static unsigned int ospf_nexthop_calculation(struct ospf_area *area, nh = vertex_nexthop_new(); nh->router = l->link_data; nh->lsa_pos = vp->nexthop->lsa_pos; + + /* + * Since v is the root the nexthop and + * local nexthop are the same. + */ + lnh = vertex_nexthop_new(); + memcpy(lnh, nh, + sizeof(struct vertex_nexthop)); + added = 1; - ospf_spf_add_parent(v, w, nh, distance); + ospf_spf_add_parent(v, w, nh, lnh, + distance); } /* * Note lack of return is deliberate. See next @@ -829,12 +1147,154 @@ static unsigned int ospf_nexthop_calculation(struct ospf_area *area, for (ALL_LIST_ELEMENTS(v->parents, node, nnode, vp)) { added = 1; - ospf_spf_add_parent(v, w, vp->nexthop, distance); + + /* + * The nexthop is inherited, but the local nexthop still needs + * to be created. + */ + if (l) { + lnh = vertex_nexthop_new(); + lnh->router = l->link_data; + lnh->lsa_pos = lsa_pos; + } else { + lnh = NULL; + } + + ospf_spf_add_parent(v, w, vp->nexthop, lnh, distance); } return added; } +static int ospf_spf_is_protected_resource(struct ospf_area *area, + struct router_lsa_link *link, + struct lsa_header *lsa) +{ + uint8_t *p, *lim; + struct router_lsa_link *p_link; + struct router_lsa_link *l = NULL; + struct in_addr router_id; + int link_type; + + if (!area->spf_protected_resource) + return 0; + + link_type = link->m[0].type; + + switch (area->spf_protected_resource->type) { + case OSPF_TI_LFA_LINK_PROTECTION: + p_link = area->spf_protected_resource->link; + if (!p_link) + return 0; + + /* For P2P: check if the link belongs to the same subnet */ + if (link_type == LSA_LINK_TYPE_POINTOPOINT + && (p_link->link_id.s_addr & p_link->link_data.s_addr) + == (link->link_data.s_addr + & p_link->link_data.s_addr)) + return 1; + + /* For stub: check if this the same subnet */ + if (link_type == LSA_LINK_TYPE_STUB + && (p_link->link_id.s_addr == link->link_id.s_addr) + && (p_link->link_data.s_addr == link->link_data.s_addr)) + return 1; + + break; + case OSPF_TI_LFA_NODE_PROTECTION: + router_id = area->spf_protected_resource->router_id; + if (router_id.s_addr == INADDR_ANY) + return 0; + + /* For P2P: check if the link leads to the protected node */ + if (link_type == LSA_LINK_TYPE_POINTOPOINT + && link->link_id.s_addr == router_id.s_addr) + return 1; + + /* The rest is about stub links! */ + if (link_type != LSA_LINK_TYPE_STUB) + return 0; + + /* + * Check if there's a P2P link in the router LSA with the + * corresponding link data in the same subnet. + */ + + p = ((uint8_t *)lsa) + OSPF_LSA_HEADER_SIZE + 4; + lim = ((uint8_t *)lsa) + ntohs(lsa->length); + + while (p < lim) { + l = (struct router_lsa_link *)p; + p += (OSPF_ROUTER_LSA_LINK_SIZE + + (l->m[0].tos_count * OSPF_ROUTER_LSA_TOS_SIZE)); + + /* We only care about P2P with the proper link id */ + if ((l->m[0].type != LSA_LINK_TYPE_POINTOPOINT) + || (l->link_id.s_addr != router_id.s_addr)) + continue; + + /* Link data in the subnet given by the link? */ + if ((link->link_id.s_addr & link->link_data.s_addr) + == (l->link_data.s_addr & link->link_data.s_addr)) + return 1; + } + + break; + case OSPF_TI_LFA_UNDEFINED_PROTECTION: + break; + } + + return 0; +} + +/* + * For TI-LFA we need the reverse SPF for Q spaces. The reverse SPF is created + * by honoring the weight of the reverse 'edge', e.g. the edge from W to V, and + * NOT the weight of the 'edge' from V to W as usual. Hence we need to find the + * corresponding link in the LSA of W and extract the particular weight. + * + * TODO: Only P2P supported by now! + */ +static uint16_t get_reverse_distance(struct vertex *v, + struct router_lsa_link *l, + struct ospf_lsa *w_lsa) +{ + uint8_t *p, *lim; + struct router_lsa_link *w_link; + uint16_t distance = 0; + + assert(w_lsa && w_lsa->data); + + p = ((uint8_t *)w_lsa->data) + OSPF_LSA_HEADER_SIZE + 4; + lim = ((uint8_t *)w_lsa->data) + ntohs(w_lsa->data->length); + + while (p < lim) { + w_link = (struct router_lsa_link *)p; + p += (OSPF_ROUTER_LSA_LINK_SIZE + + (w_link->m[0].tos_count * OSPF_ROUTER_LSA_TOS_SIZE)); + + /* Only care about P2P with link ID equal to V's router id */ + if (w_link->m[0].type == LSA_LINK_TYPE_POINTOPOINT + && w_link->link_id.s_addr == v->id.s_addr) { + distance = ntohs(w_link->m[0].metric); + break; + } + } + + /* + * This might happen if the LSA for W is not complete yet. In this + * case we take the weight of the 'forward' link from V. When the LSA + * for W is completed the reverse SPF is run again anyway. + */ + if (distance == 0) + distance = ntohs(l->m[0].metric); + + if (IS_DEBUG_OSPF_EVENT) + zlog_debug("%s: reversed distance is %u", __func__, distance); + + return distance; +} + /* * RFC2328 16.1 (2). * v is on the SPF tree. Examine the links in v's LSA. Update the list of @@ -850,6 +1310,7 @@ static void ospf_spf_next(struct vertex *v, struct ospf_area *area, struct router_lsa_link *l = NULL; struct in_addr *r; int type = 0, lsa_pos = -1, lsa_pos_next = 0; + uint16_t link_distance; /* * If this is a router-LSA, and bit V of the router-LSA (see Section @@ -892,6 +1353,16 @@ static void ospf_spf_next(struct vertex *v, struct ospf_area *area, continue; /* + * Don't process TI-LFA protected resources. + * + * TODO: Replace this by a proper solution, e.g. remove + * corresponding links from the LSDB and run the SPF + * algo with the stripped-down LSDB. + */ + if (ospf_spf_is_protected_resource(area, l, v->lsa)) + continue; + + /* * (b) Otherwise, W is a transit vertex (router or * transit network). Look up the vertex W's LSA * (router-LSA or network-LSA) in Area A's link state @@ -928,8 +1399,19 @@ static void ospf_spf_next(struct vertex *v, struct ospf_area *area, continue; } + /* + * For TI-LFA we might need the reverse SPF. + * Currently only works with P2P! + */ + if (type == LSA_LINK_TYPE_POINTOPOINT + && area->spf_reversed) + link_distance = + get_reverse_distance(v, l, w_lsa); + else + link_distance = ntohs(l->m[0].metric); + /* step (d) below */ - distance = v->distance + ntohs(l->m[0].metric); + distance = v->distance + link_distance; } else { /* In case of V is Network-LSA. */ r = (struct in_addr *)p; @@ -1069,8 +1551,7 @@ void ospf_spf_print(struct vty *vty, struct vertex *v, int i) struct vertex_parent *parent; if (v->type == OSPF_VERTEX_ROUTER) { - vty_out(vty, "SPF Result: depth %d [R] %pI4\n", i, - &v->lsa->id); + vty_out(vty, "SPF Result: depth %d [R] %pI4\n", i, &v->lsa->id); } else { struct network_lsa *lsa = (struct network_lsa *)v->lsa; vty_out(vty, "SPF Result: depth %d [N] %pI4/%d\n", i, @@ -1078,9 +1559,11 @@ void ospf_spf_print(struct vty *vty, struct vertex *v, int i) } for (ALL_LIST_ELEMENTS_RO(v->parents, nnode, parent)) { - vty_out(vty, " nexthop %pI4 lsa pos %d\n", - &parent->nexthop->router, - parent->nexthop->lsa_pos); + vty_out(vty, + " nexthop %pI4 lsa pos %d -- local nexthop %pI4 lsa pos %d\n", + &parent->nexthop->router, parent->nexthop->lsa_pos, + &parent->local_nexthop->router, + parent->local_nexthop->lsa_pos); } i++; @@ -1128,7 +1611,9 @@ static void ospf_spf_process_stubs(struct ospf_area *area, struct vertex *v, p += (OSPF_ROUTER_LSA_LINK_SIZE + (l->m[0].tos_count * OSPF_ROUTER_LSA_TOS_SIZE)); - if (l->m[0].type == LSA_LINK_TYPE_STUB) + /* Don't process TI-LFA protected resources */ + if (l->m[0].type == LSA_LINK_TYPE_STUB + && !ospf_spf_is_protected_resource(area, l, v->lsa)) ospf_intra_add_stub(rt, l, v, area, parent_is_root, lsa_pos); lsa_pos++; @@ -1190,73 +1675,13 @@ void ospf_spf_cleanup(struct vertex *spf, struct list *vertex_list) * attached the first level of router vertices attached to the * root vertex, see ospf_nexthop_calculation. */ - ospf_canonical_nexthops_free(spf); + if (spf) + ospf_canonical_nexthops_free(spf); /* Free SPF vertices list with deconstructor ospf_vertex_free. */ - list_delete(&vertex_list); -} - -#if 0 -static void -ospf_rtrs_print (struct route_table *rtrs) -{ - struct route_node *rn; - struct list *or_list; - struct listnode *ln; - struct listnode *pnode; - struct ospf_route *or; - struct ospf_path *path; - char buf1[BUFSIZ]; - char buf2[BUFSIZ]; - - if (IS_DEBUG_OSPF_EVENT) - zlog_debug ("ospf_rtrs_print() start"); - - for (rn = route_top (rtrs); rn; rn = route_next (rn)) - if ((or_list = rn->info) != NULL) - for (ALL_LIST_ELEMENTS_RO (or_list, ln, or)) - { - switch (or->path_type) - { - case OSPF_PATH_INTRA_AREA: - if (IS_DEBUG_OSPF_EVENT) - zlog_debug ("%s [%d] area: %s", - inet_ntop (AF_INET, &or->id, buf1, BUFSIZ), - or->cost, inet_ntop (AF_INET, &or->u.std.area_id, - buf2, BUFSIZ)); - break; - case OSPF_PATH_INTER_AREA: - if (IS_DEBUG_OSPF_EVENT) - zlog_debug ("%s IA [%d] area: %s", - inet_ntop (AF_INET, &or->id, buf1, BUFSIZ), - or->cost, inet_ntop (AF_INET, &or->u.std.area_id, - buf2, BUFSIZ)); - break; - default: - break; - } - - for (ALL_LIST_ELEMENTS_RO (or->paths, pnode, path)) - { - if (path->nexthop.s_addr == INADDR_ANY) - { - if (IS_DEBUG_OSPF_EVENT) - zlog_debug (" directly attached to %s\r", - ifindex2ifname (path->ifindex), VRF_DEFAULT); - } - else - { - if (IS_DEBUG_OSPF_EVENT) - zlog_debug (" via %pI4, %s\r", - &path->nexthop, - ifindex2ifname (path->ifindex), VRF_DEFAULT); - } - } - } - - zlog_debug ("ospf_rtrs_print() end"); + if (vertex_list) + list_delete(&vertex_list); } -#endif /* Calculating the shortest-path tree for an area, see RFC2328 16.1. */ void ospf_spf_calculate(struct ospf_area *area, struct ospf_lsa *root_lsa, @@ -1359,19 +1784,27 @@ void ospf_spf_calculate(struct ospf_area *area, struct ospf_lsa *root_lsa, if (IS_DEBUG_OSPF_EVENT) zlog_debug("ospf_spf_calculate: Stop. %zd vertices", mtype_stats_alloc(MTYPE_OSPF_VERTEX)); +} + +void ospf_spf_calculate_area(struct ospf *ospf, struct ospf_area *area, + struct route_table *new_table, + struct route_table *new_rtrs) +{ + ospf_spf_calculate(area, area->router_lsa_self, new_table, new_rtrs, + false, true); - /* If this is a dry run then keep the SPF data in place */ - if (!area->spf_dry_run) - ospf_spf_cleanup(area->spf, area->spf_vertex_list); + if (ospf->ti_lfa_enabled) + ospf_ti_lfa_compute(area, new_table, + ospf->ti_lfa_protection_type); + + ospf_spf_cleanup(area->spf, area->spf_vertex_list); } -int ospf_spf_calculate_areas(struct ospf *ospf, struct route_table *new_table, - struct route_table *new_rtrs, bool is_dry_run, - bool is_root_node) +void ospf_spf_calculate_areas(struct ospf *ospf, struct route_table *new_table, + struct route_table *new_rtrs) { struct ospf_area *area; struct listnode *node, *nnode; - int areas_processed = 0; /* Calculate SPF for each area. */ for (ALL_LIST_ELEMENTS(ospf->areas, node, nnode, area)) { @@ -1380,20 +1813,13 @@ int ospf_spf_calculate_areas(struct ospf *ospf, struct route_table *new_table, if (ospf->backbone && ospf->backbone == area) continue; - ospf_spf_calculate(area, area->router_lsa_self, new_table, - new_rtrs, is_dry_run, is_root_node); - areas_processed++; + ospf_spf_calculate_area(ospf, area, new_table, new_rtrs); } /* SPF for backbone, if required */ - if (ospf->backbone) { - area = ospf->backbone; - ospf_spf_calculate(area, area->router_lsa_self, new_table, - new_rtrs, is_dry_run, is_root_node); - areas_processed++; - } - - return areas_processed; + if (ospf->backbone) + ospf_spf_calculate_area(ospf, ospf->backbone, new_table, + new_rtrs); } /* Worker for SPF calculation scheduler. */ @@ -1402,7 +1828,6 @@ static int ospf_spf_calculate_schedule_worker(struct thread *thread) struct ospf *ospf = THREAD_ARG(thread); struct route_table *new_table, *new_rtrs; struct timeval start_time, spf_start_time; - int areas_processed; unsigned long ia_time, prune_time, rt_time; unsigned long abr_time, total_spf_time, spf_time; char rbuf[32]; /* reason_buf */ @@ -1418,8 +1843,7 @@ static int ospf_spf_calculate_schedule_worker(struct thread *thread) monotime(&spf_start_time); new_table = route_table_init(); /* routing table */ new_rtrs = route_table_init(); /* ABR/ASBR routing table */ - areas_processed = ospf_spf_calculate_areas(ospf, new_table, new_rtrs, - false, true); + ospf_spf_calculate_areas(ospf, new_table, new_rtrs); spf_time = monotime_since(&spf_start_time, NULL); ospf_vl_shut_unapproved(ospf); @@ -1512,7 +1936,7 @@ static int ospf_spf_calculate_schedule_worker(struct thread *thread) zlog_info(" RouteInstall: %ld", rt_time); if (IS_OSPF_ABR(ospf)) zlog_info(" ABR: %ld (%d areas)", - abr_time, areas_processed); + abr_time, ospf->areas->count); zlog_info("Reason(s) for SPF: %s", rbuf); } diff --git a/ospfd/ospf_spf.h b/ospfd/ospf_spf.h index 2dc0f8b886..66555be4b7 100644 --- a/ospfd/ospf_spf.h +++ b/ospfd/ospf_spf.h @@ -47,15 +47,15 @@ struct vertex { struct list *children; /* list of children in SPF tree*/ }; -/* A nexthop taken on the root node to get to this (parent) vertex */ struct vertex_nexthop { struct in_addr router; /* router address to send to */ int lsa_pos; /* LSA position for resolving the interface */ }; struct vertex_parent { - struct vertex_nexthop *nexthop; /* nexthop address for this parent */ - struct vertex *parent; /* parent vertex */ + struct vertex_nexthop *nexthop; /* nexthop taken on the root node */ + struct vertex_nexthop *local_nexthop; /* local nexthop of the parent */ + struct vertex *parent; /* parent vertex */ int backlink; /* index back to parent for router-lsa's */ }; @@ -77,12 +77,25 @@ extern void ospf_spf_calculate(struct ospf_area *area, struct route_table *new_table, struct route_table *new_rtrs, bool is_dry_run, bool is_root_node); -extern int ospf_spf_calculate_areas(struct ospf *ospf, +extern void ospf_spf_calculate_area(struct ospf *ospf, struct ospf_area *area, struct route_table *new_table, - struct route_table *new_rtrs, - bool is_dry_run, bool is_root_node); + struct route_table *new_rtrs); +extern void ospf_spf_calculate_areas(struct ospf *ospf, + struct route_table *new_table, + struct route_table *new_rtrs); extern void ospf_rtrs_free(struct route_table *); extern void ospf_spf_cleanup(struct vertex *spf, struct list *vertex_list); +extern void ospf_spf_copy(struct vertex *vertex, struct list *vertex_list); +extern void ospf_spf_remove_resource(struct vertex *vertex, + struct list *vertex_list, + struct protected_resource *resource); +extern struct vertex *ospf_spf_vertex_find(struct in_addr id, + struct list *vertex_list); +extern struct vertex *ospf_spf_vertex_by_nexthop(struct vertex *root, + struct in_addr *nexthop); +extern struct vertex_parent *ospf_spf_vertex_parent_find(struct in_addr id, + struct vertex *vertex); +extern int vertex_parent_cmp(void *aa, void *bb); extern void ospf_spf_print(struct vty *vty, struct vertex *v, int i); diff --git a/ospfd/ospf_sr.c b/ospfd/ospf_sr.c index e2218957d2..7b2d794214 100644 --- a/ospfd/ospf_sr.c +++ b/ospfd/ospf_sr.c @@ -159,6 +159,16 @@ static struct sr_node *sr_node_new(struct in_addr *rid) return new; } +/* Supposed to be used for testing */ +struct sr_node *ospf_sr_node_create(struct in_addr *rid) +{ + struct sr_node *srn; + + srn = hash_get(OspfSR.neighbors, (void *)rid, (void *)sr_node_new); + + return srn; +} + /* Delete Segment Routing node */ static void sr_node_del(struct sr_node *srn) { @@ -588,11 +598,6 @@ int ospf_sr_init(void) if (OspfSR.neighbors == NULL) return rc; - /* Initialize Route Table for prefix */ - OspfSR.prefix = route_table_init(); - if (OspfSR.prefix == NULL) - return rc; - /* Register Segment Routing VTY command */ ospf_sr_register_vty(); @@ -616,9 +621,6 @@ void ospf_sr_term(void) if (OspfSR.neighbors) hash_free(OspfSR.neighbors); - /* Clear Prefix Table */ - if (OspfSR.prefix) - route_table_finish(OspfSR.prefix); } /* @@ -653,6 +655,56 @@ static mpls_label_t index2label(uint32_t index, struct sr_block srgb) return label; } +/* Get the prefix sid for a specific router id */ +mpls_label_t ospf_sr_get_prefix_sid_by_id(struct in_addr *id) +{ + struct sr_node *srn; + struct sr_prefix *srp; + mpls_label_t label; + + srn = (struct sr_node *)hash_lookup(OspfSR.neighbors, id); + + if (srn) { + /* + * TODO: Here we assume that the SRGBs are the same, + * and that the node's prefix SID is at the head of + * the list, probably needs tweaking. + */ + srp = listnode_head(srn->ext_prefix); + label = index2label(srp->sid, srn->srgb); + } else { + label = MPLS_INVALID_LABEL; + } + + return label; +} + +/* Get the adjacency sid for a specific 'root' id and 'neighbor' id */ +mpls_label_t ospf_sr_get_adj_sid_by_id(struct in_addr *root_id, + struct in_addr *neighbor_id) +{ + struct sr_node *srn; + struct sr_link *srl; + mpls_label_t label; + struct listnode *node; + + srn = (struct sr_node *)hash_lookup(OspfSR.neighbors, root_id); + + label = MPLS_INVALID_LABEL; + + if (srn) { + for (ALL_LIST_ELEMENTS_RO(srn->ext_link, node, srl)) { + if (srl->type == ADJ_SID + && srl->remote_id.s_addr == neighbor_id->s_addr) { + label = srl->sid[0]; + break; + } + } + } + + return label; +} + /* Get neighbor full structure from address */ static struct ospf_neighbor *get_neighbor_by_addr(struct ospf *top, struct in_addr addr) @@ -853,8 +905,13 @@ static int compute_prefix_nhlfe(struct sr_prefix *srp) * be received before corresponding Router Information LSA */ if (srnext == NULL || srnext->srgb.lower_bound == 0 - || srnext->srgb.range_size == 0) + || srnext->srgb.range_size == 0) { + osr_debug( + " |- SR-Node %pI4 not ready. Stop process", + &srnext->adv_router); + path->srni.label_out = MPLS_INVALID_LABEL; continue; + } osr_debug(" |- Found SRGB %u/%u for next hop SR-Node %pI4", srnext->srgb.range_size, srnext->srgb.lower_bound, @@ -1213,7 +1270,7 @@ static void update_in_nhlfe(struct hash_bucket *bucket, void *args) /* * When SRGB has changed, update NHLFE Output Label for all Extended Prefix - * with SID index which use the given SR-Node as nexthop though hash_iterate() + * with SID index which use the given SR-Node as nexthop through hash_iterate() */ static void update_out_nhlfe(struct hash_bucket *bucket, void *args) { @@ -1223,21 +1280,29 @@ static void update_out_nhlfe(struct hash_bucket *bucket, void *args) struct sr_prefix *srp; struct ospf_path *path; + /* Skip Self SR-Node */ + if (srn == OspfSR.self) + return; + + osr_debug("SR (%s): Update Out NHLFE for neighbor SR-Node %pI4", + __func__, &srn->adv_router); + for (ALL_LIST_ELEMENTS_RO(srn->ext_prefix, node, srp)) { - /* Process only SID Index with valid route */ + /* Skip Prefix that has not yet a valid route */ if (srp->route == NULL) continue; for (ALL_LIST_ELEMENTS_RO(srp->route->paths, pnode, path)) { - /* Process only SID Index for next hop without PHP */ - if ((path->srni.nexthop == srp->srn) - && (!CHECK_FLAG(srp->flags, - EXT_SUBTLV_PREFIX_SID_NPFLG))) + /* Skip path that has not next SR-Node as nexthop */ + if (path->srni.nexthop != srnext) continue; - path->srni.label_out = - index2label(srp->sid, srnext->srgb); - ospf_zebra_update_prefix_sid(srp); + + /* Compute new Output Label */ + path->srni.label_out = sr_prefix_out_label(srp, srnext); } + + /* Finally update MPLS table */ + ospf_zebra_update_prefix_sid(srp); } } @@ -1378,11 +1443,6 @@ void ospf_sr_ri_lsa_update(struct ospf_lsa *lsa) srn->srlb.lower_bound = GET_LABEL(ntohl(ri_srlb->lower.value)); } - osr_debug(" |- Update SR-Node[%pI4], SRGB[%u/%u], SRLB[%u/%u], Algo[%u], MSD[%u]", - &srn->adv_router, srn->srgb.lower_bound, srn->srgb.range_size, - srn->srlb.lower_bound, srn->srlb.range_size, srn->algo[0], - srn->msd); - /* Check if SRGB has changed */ if ((srn->srgb.range_size == srgb.range_size) && (srn->srgb.lower_bound == srgb.lower_bound)) @@ -1392,6 +1452,11 @@ void ospf_sr_ri_lsa_update(struct ospf_lsa *lsa) srn->srgb.range_size = srgb.range_size; srn->srgb.lower_bound = srgb.lower_bound; + osr_debug(" |- Update SR-Node[%pI4], SRGB[%u/%u], SRLB[%u/%u], Algo[%u], MSD[%u]", + &srn->adv_router, srn->srgb.lower_bound, srn->srgb.range_size, + srn->srlb.lower_bound, srn->srlb.range_size, srn->algo[0], + srn->msd); + /* ... and NHLFE if it is a neighbor SR node */ if (srn->neighbor == OspfSR.self) hash_iterate(OspfSR.neighbors, update_out_nhlfe, srn); @@ -1562,6 +1627,7 @@ void ospf_sr_ext_itf_add(struct ext_itf *exti) srl->itf_addr = exti->link.link_data; srl->instance = SET_OPAQUE_LSID(OPAQUE_TYPE_EXTENDED_LINK_LSA, exti->instance); + srl->remote_id = exti->link.link_id; switch (exti->stype) { case ADJ_SID: srl->type = ADJ_SID; diff --git a/ospfd/ospf_sr.h b/ospfd/ospf_sr.h index 222675944d..ce13457484 100644 --- a/ospfd/ospf_sr.h +++ b/ospfd/ospf_sr.h @@ -233,9 +233,6 @@ struct ospf_sr_db { /* List of neighbour SR nodes */ struct hash *neighbors; - /* List of SR prefix */ - struct route_table *prefix; - /* Local SR info announced in Router Info LSA */ /* Algorithms supported by the node */ @@ -290,6 +287,9 @@ struct sr_link { /* 24-bit Opaque-ID field value according to RFC 7684 specification */ uint32_t instance; + /* Addressed (remote) router id */ + struct in_addr remote_id; + /* Interface address */ struct in_addr itf_addr; @@ -361,4 +361,11 @@ extern void ospf_sr_update_local_prefix(struct interface *ifp, struct prefix *p); /* Segment Routing re-routing function */ extern void ospf_sr_update_task(struct ospf *ospf); + +/* Support for TI-LFA */ +extern mpls_label_t ospf_sr_get_prefix_sid_by_id(struct in_addr *id); +extern mpls_label_t ospf_sr_get_adj_sid_by_id(struct in_addr *root_id, + struct in_addr *neighbor_id); +extern struct sr_node *ospf_sr_node_create(struct in_addr *rid); + #endif /* _FRR_OSPF_SR_H */ diff --git a/ospfd/ospf_ti_lfa.c b/ospfd/ospf_ti_lfa.c new file mode 100644 index 0000000000..4a0186bfb9 --- /dev/null +++ b/ospfd/ospf_ti_lfa.c @@ -0,0 +1,1114 @@ +/* + * OSPF TI-LFA + * Copyright (C) 2020 NetDEF, Inc. + * Sascha Kattelmann + * + * This file is part of FRR. + * + * FRR is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2, or (at your option) any + * later version. + * + * FRR is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; see the file COPYING; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include <zebra.h> + +#include "prefix.h" +#include "table.h" +#include "printfrr.h" + +#include "ospfd/ospfd.h" +#include "ospfd/ospf_asbr.h" +#include "ospfd/ospf_lsa.h" +#include "ospfd/ospf_spf.h" +#include "ospfd/ospf_sr.h" +#include "ospfd/ospf_route.h" +#include "ospfd/ospf_ti_lfa.h" +#include "ospfd/ospf_dump.h" + + +DECLARE_RBTREE_UNIQ(p_spaces, struct p_space, p_spaces_item, + p_spaces_compare_func) +DECLARE_RBTREE_UNIQ(q_spaces, struct q_space, q_spaces_item, + q_spaces_compare_func) + +static void +ospf_ti_lfa_generate_p_space(struct ospf_area *area, struct vertex *child, + struct protected_resource *protected_resource, + bool recursive, struct list *pc_path); + +void ospf_print_protected_resource( + struct protected_resource *protected_resource, char *buf) +{ + struct router_lsa_link *link; + + switch (protected_resource->type) { + case OSPF_TI_LFA_LINK_PROTECTION: + link = protected_resource->link; + snprintfrr(buf, PROTECTED_RESOURCE_STRLEN, + "protected link: %pI4 %pI4", &link->link_id, + &link->link_data); + break; + case OSPF_TI_LFA_NODE_PROTECTION: + snprintfrr(buf, PROTECTED_RESOURCE_STRLEN, + "protected node: %pI4", + &protected_resource->router_id); + break; + case OSPF_TI_LFA_UNDEFINED_PROTECTION: + snprintfrr(buf, PROTECTED_RESOURCE_STRLEN, + "undefined protected resource"); + break; + } +} + +static enum ospf_ti_lfa_p_q_space_adjacency +ospf_ti_lfa_find_p_node(struct vertex *pc_node, struct p_space *p_space, + struct q_space *q_space) +{ + struct listnode *curr_node; + struct vertex *p_node = NULL, *pc_node_parent, *p_node_pc_parent; + struct vertex_parent *pc_vertex_parent; + + curr_node = listnode_lookup(q_space->pc_path, pc_node); + pc_node_parent = listgetdata(curr_node->next); + + q_space->p_node_info->type = OSPF_TI_LFA_UNDEFINED_NODE; + + p_node = ospf_spf_vertex_find(pc_node_parent->id, p_space->vertex_list); + + if (p_node) { + q_space->p_node_info->node = p_node; + q_space->p_node_info->type = OSPF_TI_LFA_P_NODE; + + if (curr_node->next->next) { + p_node_pc_parent = listgetdata(curr_node->next->next); + pc_vertex_parent = ospf_spf_vertex_parent_find( + p_node_pc_parent->id, pc_node_parent); + q_space->p_node_info->nexthop = + pc_vertex_parent->nexthop->router; + } else { + /* + * It can happen that the P node is the root node itself + * (hence there can be no parents). In this case we + * don't need to set a nexthop. + */ + q_space->p_node_info->nexthop.s_addr = INADDR_ANY; + } + + return OSPF_TI_LFA_P_Q_SPACE_ADJACENT; + } + + ospf_ti_lfa_find_p_node(pc_node_parent, p_space, q_space); + return OSPF_TI_LFA_P_Q_SPACE_NON_ADJACENT; +} + +static void ospf_ti_lfa_find_q_node(struct vertex *pc_node, + struct p_space *p_space, + struct q_space *q_space) +{ + struct listnode *curr_node, *next_node; + struct vertex *p_node, *q_node, *q_space_parent = NULL, *pc_node_parent; + struct vertex_parent *pc_vertex_parent; + + curr_node = listnode_lookup(q_space->pc_path, pc_node); + next_node = curr_node->next; + pc_node_parent = listgetdata(next_node); + pc_vertex_parent = + ospf_spf_vertex_parent_find(pc_node_parent->id, pc_node); + + p_node = ospf_spf_vertex_find(pc_node->id, p_space->vertex_list); + q_node = ospf_spf_vertex_find(pc_node->id, q_space->vertex_list); + + /* The Q node is always present. */ + assert(q_node); + + q_space->q_node_info->type = OSPF_TI_LFA_UNDEFINED_NODE; + + if (p_node && q_node) { + q_space->q_node_info->node = pc_node; + q_space->q_node_info->type = OSPF_TI_LFA_PQ_NODE; + q_space->q_node_info->nexthop = + pc_vertex_parent->nexthop->router; + return; + } + + /* + * Note that the Q space has the 'reverse' direction of the PC + * SPF. Hence compare PC SPF parent to Q space children. + */ + q_space_parent = + ospf_spf_vertex_find(pc_node_parent->id, q_node->children); + + /* + * If the Q space parent doesn't exist we 'hit' the border to the P + * space and hence got our Q node. + */ + if (!q_space_parent) { + q_space->q_node_info->node = pc_node; + q_space->q_node_info->type = OSPF_TI_LFA_Q_NODE; + q_space->q_node_info->nexthop = + pc_vertex_parent->nexthop->router; + return; + } + + return ospf_ti_lfa_find_q_node(pc_node_parent, p_space, q_space); +} + +static void ospf_ti_lfa_append_label_stack(struct mpls_label_stack *label_stack, + mpls_label_t labels[], + uint32_t num_labels) +{ + int i, offset, limit; + + limit = label_stack->num_labels + num_labels; + offset = label_stack->num_labels; + + for (i = label_stack->num_labels; i < limit; i++) { + label_stack->label[i] = labels[i - offset]; + label_stack->num_labels++; + } +} + + +static struct mpls_label_stack * +ospf_ti_lfa_create_label_stack(mpls_label_t labels[], uint32_t num_labels) +{ + struct mpls_label_stack *label_stack; + uint32_t i; + + /* Sanity check */ + for (i = 0; i < num_labels; i++) { + if (labels[i] == MPLS_INVALID_LABEL) + return NULL; + } + + label_stack = XCALLOC(MTYPE_OSPF_Q_SPACE, + sizeof(struct mpls_label_stack) + + MPLS_MAX_LABELS * sizeof(mpls_label_t)); + label_stack->num_labels = num_labels; + + for (i = 0; i < num_labels; i++) + label_stack->label[i] = labels[i]; + + return label_stack; +} + +static struct list * +ospf_ti_lfa_map_path_to_pc_vertices(struct list *path, + struct list *pc_vertex_list) +{ + struct listnode *node; + struct vertex *vertex, *pc_vertex; + struct list *pc_path; + + pc_path = list_new(); + + for (ALL_LIST_ELEMENTS_RO(path, node, vertex)) { + pc_vertex = ospf_spf_vertex_find(vertex->id, pc_vertex_list); + listnode_add(pc_path, pc_vertex); + } + + return pc_path; +} + +static struct list *ospf_ti_lfa_cut_out_pc_path(struct list *pc_vertex_list, + struct list *pc_path, + struct vertex *p_node, + struct vertex *q_node) +{ + struct list *inner_pc_path; + struct vertex *current_vertex; + struct listnode *current_listnode; + + inner_pc_path = list_new(); + current_vertex = ospf_spf_vertex_find(q_node->id, pc_vertex_list); + current_listnode = listnode_lookup(pc_path, current_vertex); + + /* Note that the post-convergence paths are reversed. */ + for (;;) { + current_vertex = listgetdata(current_listnode); + listnode_add(inner_pc_path, current_vertex); + + if (current_vertex->id.s_addr == p_node->id.s_addr) + break; + + current_listnode = current_listnode->next; + } + + return inner_pc_path; +} + +static void ospf_ti_lfa_generate_inner_label_stack( + struct ospf_area *area, struct p_space *p_space, + struct q_space *q_space, + struct ospf_ti_lfa_inner_backup_path_info *inner_backup_path_info) +{ + struct route_table *new_table, *new_rtrs; + struct vertex *q_node; + struct vertex *start_vertex, *end_vertex; + struct vertex_parent *vertex_parent; + struct listnode *pc_p_node, *pc_q_node; + struct vertex *spf_orig; + struct list *vertex_list_orig; + struct p_spaces_head *p_spaces_orig; + struct p_space *inner_p_space; + struct q_space *inner_q_space; + struct ospf_ti_lfa_node_info *p_node_info, *q_node_info; + struct protected_resource *protected_resource; + struct list *inner_pc_path; + mpls_label_t start_label, end_label; + + p_node_info = q_space->p_node_info; + q_node_info = q_space->q_node_info; + protected_resource = p_space->protected_resource; + + start_vertex = p_node_info->node; + end_vertex = q_node_info->node; + + /* + * It can happen that the P node and/or the Q node are the root or + * the destination, therefore we need to force one step forward (resp. + * backward) using an Adjacency-SID. + */ + start_label = MPLS_INVALID_LABEL; + end_label = MPLS_INVALID_LABEL; + if (p_node_info->node->id.s_addr == p_space->root->id.s_addr) { + pc_p_node = listnode_lookup(q_space->pc_path, p_space->pc_spf); + start_vertex = listgetdata(pc_p_node->prev); + start_label = ospf_sr_get_adj_sid_by_id(&p_node_info->node->id, + &start_vertex->id); + } + if (q_node_info->node->id.s_addr == q_space->root->id.s_addr) { + pc_q_node = listnode_lookup(q_space->pc_path, + listnode_head(q_space->pc_path)); + end_vertex = listgetdata(pc_q_node->next); + end_label = ospf_sr_get_adj_sid_by_id(&end_vertex->id, + &q_node_info->node->id); + } + + /* Corner case: inner path is just one node */ + if (start_vertex->id.s_addr == end_vertex->id.s_addr) { + inner_backup_path_info->label_stack = + ospf_ti_lfa_create_label_stack(&start_label, 1); + inner_backup_path_info->q_node_info.node = end_vertex; + inner_backup_path_info->q_node_info.type = OSPF_TI_LFA_PQ_NODE; + inner_backup_path_info->p_node_info.type = + OSPF_TI_LFA_UNDEFINED_NODE; + vertex_parent = ospf_spf_vertex_parent_find(p_space->root->id, + end_vertex); + inner_backup_path_info->p_node_info.nexthop = + vertex_parent->nexthop->router; + return; + } + + inner_pc_path = ospf_ti_lfa_cut_out_pc_path(p_space->pc_vertex_list, + q_space->pc_path, + start_vertex, end_vertex); + + new_table = route_table_init(); + new_rtrs = route_table_init(); + + /* Copy the current state ... */ + spf_orig = area->spf; + vertex_list_orig = area->spf_vertex_list; + p_spaces_orig = area->p_spaces; + + area->p_spaces = + XCALLOC(MTYPE_OSPF_P_SPACE, sizeof(struct p_spaces_head)); + + /* dry run true, root node false */ + ospf_spf_calculate(area, start_vertex->lsa_p, new_table, new_rtrs, true, + false); + + q_node = ospf_spf_vertex_find(end_vertex->id, area->spf_vertex_list); + + ospf_ti_lfa_generate_p_space(area, q_node, protected_resource, false, + inner_pc_path); + + /* There's just one P and Q space */ + inner_p_space = p_spaces_pop(area->p_spaces); + inner_q_space = q_spaces_pop(inner_p_space->q_spaces); + + /* Copy over inner backup path information from the inner q_space */ + + /* In case the outer P node is also the root of the P space */ + if (start_label != MPLS_INVALID_LABEL) { + inner_backup_path_info->label_stack = + ospf_ti_lfa_create_label_stack(&start_label, 1); + ospf_ti_lfa_append_label_stack( + inner_backup_path_info->label_stack, + inner_q_space->label_stack->label, + inner_q_space->label_stack->num_labels); + inner_backup_path_info->p_node_info.node = start_vertex; + inner_backup_path_info->p_node_info.type = OSPF_TI_LFA_P_NODE; + vertex_parent = ospf_spf_vertex_parent_find(p_space->root->id, + start_vertex); + inner_backup_path_info->p_node_info.nexthop = + vertex_parent->nexthop->router; + } else { + memcpy(inner_backup_path_info->label_stack, + inner_q_space->label_stack, + sizeof(struct mpls_label_stack) + + sizeof(mpls_label_t) + * inner_q_space->label_stack + ->num_labels); + memcpy(&inner_backup_path_info->p_node_info, + inner_q_space->p_node_info, + sizeof(struct ospf_ti_lfa_node_info)); + } + + /* In case the outer Q node is also the root of the Q space */ + if (end_label != MPLS_INVALID_LABEL) { + inner_backup_path_info->q_node_info.node = end_vertex; + inner_backup_path_info->q_node_info.type = OSPF_TI_LFA_Q_NODE; + } else { + memcpy(&inner_backup_path_info->q_node_info, + inner_q_space->q_node_info, + sizeof(struct ospf_ti_lfa_node_info)); + } + + /* Cleanup */ + ospf_ti_lfa_free_p_spaces(area); + ospf_spf_cleanup(area->spf, area->spf_vertex_list); + + /* ... and copy the current state back. */ + area->spf = spf_orig; + area->spf_vertex_list = vertex_list_orig; + area->p_spaces = p_spaces_orig; +} + +static void ospf_ti_lfa_generate_label_stack(struct ospf_area *area, + struct p_space *p_space, + struct q_space *q_space) +{ + enum ospf_ti_lfa_p_q_space_adjacency adjacency_result; + mpls_label_t labels[MPLS_MAX_LABELS]; + struct vertex *pc_node; + struct ospf_ti_lfa_inner_backup_path_info inner_backup_path_info; + + if (IS_DEBUG_OSPF_TI_LFA) + zlog_debug( + "%s: Generating Label stack for src %pI4 and dest %pI4.", + __func__, &p_space->root->id, &q_space->root->id); + + pc_node = listnode_head(q_space->pc_path); + + if (!pc_node) { + if (IS_DEBUG_OSPF_TI_LFA) + zlog_debug( + "%s: There seems to be no post convergence path (yet).", + __func__); + return; + } + + ospf_ti_lfa_find_q_node(pc_node, p_space, q_space); + if (q_space->q_node_info->type == OSPF_TI_LFA_UNDEFINED_NODE) { + if (IS_DEBUG_OSPF_TI_LFA) + zlog_debug("%s: Q node not found!", __func__); + return; + } + + /* Found a PQ node? Then we are done here. */ + if (q_space->q_node_info->type == OSPF_TI_LFA_PQ_NODE) { + /* + * If the PQ node is a child of the root, then we can use an + * adjacency SID instead of a prefix SID for the backup path. + */ + if (ospf_spf_vertex_parent_find(p_space->root->id, + q_space->q_node_info->node)) + labels[0] = ospf_sr_get_adj_sid_by_id( + &p_space->root->id, + &q_space->q_node_info->node->id); + else + labels[0] = ospf_sr_get_prefix_sid_by_id( + &q_space->q_node_info->node->id); + + q_space->label_stack = + ospf_ti_lfa_create_label_stack(labels, 1); + q_space->nexthop = q_space->q_node_info->nexthop; + + return; + } + + /* Otherwise find a (hopefully adjacent) P node. */ + pc_node = ospf_spf_vertex_find(q_space->q_node_info->node->id, + p_space->pc_vertex_list); + adjacency_result = ospf_ti_lfa_find_p_node(pc_node, p_space, q_space); + + if (q_space->p_node_info->type == OSPF_TI_LFA_UNDEFINED_NODE) { + if (IS_DEBUG_OSPF_TI_LFA) + zlog_debug("%s: P node not found!", __func__); + return; + } + + /* + * This should be the regular case: P and Q space are adjacent or even + * overlapping. This is guaranteed for link protection when used with + * symmetric weights. + */ + if (adjacency_result == OSPF_TI_LFA_P_Q_SPACE_ADJACENT) { + /* + * It can happen that the P node is the root itself, therefore + * we don't need a label for it. So just one adjacency SID for + * the Q node. + */ + if (q_space->p_node_info->node->id.s_addr + == p_space->root->id.s_addr) { + labels[0] = ospf_sr_get_adj_sid_by_id( + &p_space->root->id, + &q_space->q_node_info->node->id); + q_space->label_stack = + ospf_ti_lfa_create_label_stack(labels, 1); + q_space->nexthop = q_space->q_node_info->nexthop; + return; + } + + /* + * Otherwise we have a P and also a Q node (which are adjacent). + * + * It can happen that the P node is a child of the root, + * therefore we might just need the adjacency SID for the P node + * instead of the prefix SID. For the Q node always take the + * adjacency SID. + */ + if (ospf_spf_vertex_parent_find(p_space->root->id, + q_space->p_node_info->node)) + labels[0] = ospf_sr_get_adj_sid_by_id( + &p_space->root->id, + &q_space->p_node_info->node->id); + else + labels[0] = ospf_sr_get_prefix_sid_by_id( + &q_space->p_node_info->node->id); + + labels[1] = ospf_sr_get_adj_sid_by_id( + &q_space->p_node_info->node->id, + &q_space->q_node_info->node->id); + + q_space->label_stack = + ospf_ti_lfa_create_label_stack(labels, 2); + q_space->nexthop = q_space->p_node_info->nexthop; + + } else { + /* + * It can happen that the P and Q space are not adjacent when + * e.g. node protection or asymmetric weights are used. In this + * case the found P and Q nodes are used as a reference for + * another run of the algorithm! + * + * After having found the inner label stack it is stitched + * together with the outer labels. + */ + inner_backup_path_info.label_stack = XCALLOC( + MTYPE_OSPF_PATH, + sizeof(struct mpls_label_stack) + + sizeof(mpls_label_t) * MPLS_MAX_LABELS); + ospf_ti_lfa_generate_inner_label_stack(area, p_space, q_space, + &inner_backup_path_info); + + /* + * First stitch together the outer P node label with the inner + * label stack. + */ + if (q_space->p_node_info->node->id.s_addr + == p_space->root->id.s_addr) { + /* + * It can happen that the P node is the root itself, + * therefore we don't need a label for it. Just take + * the inner label stack first. + */ + q_space->label_stack = ospf_ti_lfa_create_label_stack( + inner_backup_path_info.label_stack->label, + inner_backup_path_info.label_stack->num_labels); + + /* Use the inner P or Q node for the nexthop */ + if (inner_backup_path_info.p_node_info.type + != OSPF_TI_LFA_UNDEFINED_NODE) + q_space->nexthop = inner_backup_path_info + .p_node_info.nexthop; + else + q_space->nexthop = inner_backup_path_info + .q_node_info.nexthop; + + } else if (ospf_spf_vertex_parent_find( + p_space->root->id, + q_space->p_node_info->node)) { + /* + * It can happen that the outer P node is a child of + * the root, therefore we might just need the + * adjacency SID for the outer P node instead of the + * prefix SID. Then just append the inner label stack. + */ + labels[0] = ospf_sr_get_adj_sid_by_id( + &p_space->root->id, + &q_space->p_node_info->node->id); + q_space->label_stack = + ospf_ti_lfa_create_label_stack(labels, 1); + ospf_ti_lfa_append_label_stack( + q_space->label_stack, + inner_backup_path_info.label_stack->label, + inner_backup_path_info.label_stack->num_labels); + q_space->nexthop = q_space->p_node_info->nexthop; + } else { + /* The outer P node needs a Prefix-SID here */ + labels[0] = ospf_sr_get_prefix_sid_by_id( + &q_space->p_node_info->node->id); + q_space->label_stack = + ospf_ti_lfa_create_label_stack(labels, 1); + ospf_ti_lfa_append_label_stack( + q_space->label_stack, + inner_backup_path_info.label_stack->label, + inner_backup_path_info.label_stack->num_labels); + q_space->nexthop = q_space->p_node_info->nexthop; + } + + /* Now the outer Q node needs to be considered */ + if (ospf_spf_vertex_parent_find( + inner_backup_path_info.q_node_info.node->id, + q_space->q_node_info->node)) { + /* + * The outer Q node can be a child of the inner Q node, + * hence just add an Adjacency-SID. + */ + labels[0] = ospf_sr_get_adj_sid_by_id( + &inner_backup_path_info.q_node_info.node->id, + &q_space->q_node_info->node->id); + ospf_ti_lfa_append_label_stack(q_space->label_stack, + labels, 1); + } else { + /* Otherwise a Prefix-SID is needed */ + labels[0] = ospf_sr_get_prefix_sid_by_id( + &q_space->q_node_info->node->id); + ospf_ti_lfa_append_label_stack(q_space->label_stack, + labels, 1); + } + /* + * Note that there's also the case where the inner and outer Q + * node are the same, but then there's nothing to do! + */ + } +} + +static struct list * +ospf_ti_lfa_generate_post_convergence_path(struct list *pc_vertex_list, + struct vertex *dest) +{ + struct list *pc_path; + struct vertex *current_vertex; + struct vertex_parent *parent; + + current_vertex = ospf_spf_vertex_find(dest->id, pc_vertex_list); + if (!current_vertex) { + if (IS_DEBUG_OSPF_TI_LFA) + zlog_debug( + "%s: There seems to be no post convergence path (yet).", + __func__); + return NULL; + } + + pc_path = list_new(); + listnode_add(pc_path, current_vertex); + + /* Generate a backup path in reverse order */ + for (;;) { + parent = listnode_head(current_vertex->parents); + if (!parent) + break; + + listnode_add(pc_path, parent->parent); + current_vertex = parent->parent; + } + + return pc_path; +} + +static void ospf_ti_lfa_generate_q_spaces(struct ospf_area *area, + struct p_space *p_space, + struct vertex *dest, bool recursive, + struct list *pc_path) +{ + struct listnode *node; + struct vertex *child; + struct route_table *new_table, *new_rtrs; + struct q_space *q_space, q_space_search; + char label_buf[MPLS_LABEL_STRLEN]; + char res_buf[PROTECTED_RESOURCE_STRLEN]; + bool node_protected; + + ospf_print_protected_resource(p_space->protected_resource, res_buf); + node_protected = + p_space->protected_resource->type == OSPF_TI_LFA_NODE_PROTECTION + && dest->id.s_addr + == p_space->protected_resource->router_id.s_addr; + + /* + * If node protection is used, don't build a Q space for the protected + * node of that particular P space. Move on with children instead. + */ + if (node_protected) { + if (recursive) { + /* Recursively generate Q spaces for all children */ + for (ALL_LIST_ELEMENTS_RO(dest->children, node, child)) + ospf_ti_lfa_generate_q_spaces(area, p_space, + child, recursive, + pc_path); + } + return; + } + + /* Check if we already have a Q space for this destination */ + q_space_search.root = dest; + if (q_spaces_find(p_space->q_spaces, &q_space_search)) + return; + + q_space = XCALLOC(MTYPE_OSPF_Q_SPACE, sizeof(struct q_space)); + q_space->p_node_info = XCALLOC(MTYPE_OSPF_Q_SPACE, + sizeof(struct ospf_ti_lfa_node_info)); + q_space->q_node_info = XCALLOC(MTYPE_OSPF_Q_SPACE, + sizeof(struct ospf_ti_lfa_node_info)); + + new_table = route_table_init(); + new_rtrs = route_table_init(); + + /* + * Generate a new (reversed!) SPF tree for this vertex, + * dry run true, root node false + */ + area->spf_reversed = true; + ospf_spf_calculate(area, dest->lsa_p, new_table, new_rtrs, true, false); + + /* Reset the flag for reverse SPF */ + area->spf_reversed = false; + + q_space->root = area->spf; + q_space->vertex_list = area->spf_vertex_list; + q_space->label_stack = NULL; + + if (pc_path) + q_space->pc_path = ospf_ti_lfa_map_path_to_pc_vertices( + pc_path, p_space->pc_vertex_list); + else + q_space->pc_path = ospf_ti_lfa_generate_post_convergence_path( + p_space->pc_vertex_list, q_space->root); + + /* If there's no backup path available then we are done here. */ + if (!q_space->pc_path) { + zlog_info( + "%s: NO backup path found for root %pI4 and destination %pI4 for %s, aborting ...", + __func__, &p_space->root->id, &q_space->root->id, + res_buf); + return; + } + + /* 'Cut' the protected resource out of the new SPF tree */ + ospf_spf_remove_resource(q_space->root, q_space->vertex_list, + p_space->protected_resource); + + /* + * Generate the smallest possible label stack from the root of the P + * space to the root of the Q space. + */ + ospf_ti_lfa_generate_label_stack(area, p_space, q_space); + + if (q_space->label_stack) { + mpls_label2str(q_space->label_stack->num_labels, + q_space->label_stack->label, label_buf, + MPLS_LABEL_STRLEN, true); + zlog_info( + "%s: Generated label stack %s for root %pI4 and destination %pI4 for %s", + __func__, label_buf, &p_space->root->id, + &q_space->root->id, res_buf); + } else { + zlog_info( + "%s: NO label stack generated for root %pI4 and destination %pI4 for %s", + __func__, &p_space->root->id, &q_space->root->id, + res_buf); + } + + /* We are finished, store the new Q space in the P space struct */ + q_spaces_add(p_space->q_spaces, q_space); + + /* Recursively generate Q spaces for all children */ + if (recursive) { + for (ALL_LIST_ELEMENTS_RO(dest->children, node, child)) + ospf_ti_lfa_generate_q_spaces(area, p_space, child, + recursive, pc_path); + } +} + +static void ospf_ti_lfa_generate_post_convergence_spf(struct ospf_area *area, + struct p_space *p_space) +{ + struct route_table *new_table, *new_rtrs; + + new_table = route_table_init(); + new_rtrs = route_table_init(); + + area->spf_protected_resource = p_space->protected_resource; + + /* + * The 'post convergence' SPF tree is generated here + * dry run true, root node false + * + * So how does this work? During the SPF calculation the algorithm + * checks if a link belongs to a protected resource and then just + * ignores it. + * This is actually _NOT_ a good way to calculate the post + * convergence SPF tree. The preferred way would be to delete the + * relevant links (and nodes) from a copy of the LSDB and then just run + * the SPF algorithm on that as usual. + * However, removing links from router LSAs appears to be its own + * endeavour (because LSAs are stored as a 'raw' stream), so we go with + * this rather hacky way for now. + */ + ospf_spf_calculate(area, area->router_lsa_self, new_table, new_rtrs, + true, false); + + p_space->pc_spf = area->spf; + p_space->pc_vertex_list = area->spf_vertex_list; + + area->spf_protected_resource = NULL; +} + +static void +ospf_ti_lfa_generate_p_space(struct ospf_area *area, struct vertex *child, + struct protected_resource *protected_resource, + bool recursive, struct list *pc_path) +{ + struct vertex *spf_orig; + struct list *vertex_list, *vertex_list_orig; + struct p_space *p_space; + + p_space = XCALLOC(MTYPE_OSPF_P_SPACE, sizeof(struct p_space)); + vertex_list = list_new(); + + /* The P-space will get its own SPF tree, so copy the old one */ + ospf_spf_copy(area->spf, vertex_list); + p_space->root = listnode_head(vertex_list); + p_space->vertex_list = vertex_list; + p_space->protected_resource = protected_resource; + + /* Initialize the Q spaces for this P space and protected resource */ + p_space->q_spaces = + XCALLOC(MTYPE_OSPF_Q_SPACE, sizeof(struct q_spaces_head)); + q_spaces_init(p_space->q_spaces); + + /* 'Cut' the protected resource out of the new SPF tree */ + ospf_spf_remove_resource(p_space->root, p_space->vertex_list, + p_space->protected_resource); + + /* + * Since we are going to calculate more SPF trees for Q spaces, keep the + * 'original' one here temporarily + */ + spf_orig = area->spf; + vertex_list_orig = area->spf_vertex_list; + + /* Generate the post convergence SPF as a blueprint for backup paths */ + ospf_ti_lfa_generate_post_convergence_spf(area, p_space); + + /* Generate the relevant Q spaces for this particular P space */ + ospf_ti_lfa_generate_q_spaces(area, p_space, child, recursive, pc_path); + + /* Put the 'original' SPF tree back in place */ + area->spf = spf_orig; + area->spf_vertex_list = vertex_list_orig; + + /* We are finished, store the new P space */ + p_spaces_add(area->p_spaces, p_space); +} + +void ospf_ti_lfa_generate_p_spaces(struct ospf_area *area, + enum protection_type protection_type) +{ + struct listnode *node, *inner_node; + struct vertex *root, *child; + struct vertex_parent *vertex_parent; + uint8_t *p, *lim; + struct router_lsa_link *l = NULL; + struct prefix stub_prefix, child_prefix; + struct protected_resource *protected_resource; + + area->p_spaces = + XCALLOC(MTYPE_OSPF_P_SPACE, sizeof(struct p_spaces_head)); + p_spaces_init(area->p_spaces); + + root = area->spf; + + /* Root or its router LSA was not created yet? */ + if (!root || !root->lsa) + return; + + stub_prefix.family = AF_INET; + child_prefix.family = AF_INET; + child_prefix.prefixlen = IPV4_MAX_PREFIXLEN; + + p = ((uint8_t *)root->lsa) + OSPF_LSA_HEADER_SIZE + 4; + lim = ((uint8_t *)root->lsa) + ntohs(root->lsa->length); + + zlog_info("%s: Generating P spaces for area %pI4", __func__, + &area->area_id); + + /* + * Iterate over all stub networks which target other OSPF neighbors. + * Check the nexthop of the child vertex if a stub network is relevant. + */ + while (p < lim) { + l = (struct router_lsa_link *)p; + p += (OSPF_ROUTER_LSA_LINK_SIZE + + (l->m[0].tos_count * OSPF_ROUTER_LSA_TOS_SIZE)); + + /* First comes node protection */ + if (protection_type == OSPF_TI_LFA_NODE_PROTECTION) { + if (l->m[0].type == LSA_LINK_TYPE_POINTOPOINT) { + protected_resource = XCALLOC( + MTYPE_OSPF_P_SPACE, + sizeof(struct protected_resource)); + protected_resource->type = protection_type; + protected_resource->router_id = l->link_id; + child = ospf_spf_vertex_find( + protected_resource->router_id, + root->children); + if (child) + ospf_ti_lfa_generate_p_space( + area, child, protected_resource, + true, NULL); + } + + continue; + } + + /* The rest is about link protection */ + if (protection_type != OSPF_TI_LFA_LINK_PROTECTION) + continue; + + if (l->m[0].type != LSA_LINK_TYPE_STUB) + continue; + + stub_prefix.prefixlen = ip_masklen(l->link_data); + stub_prefix.u.prefix4 = l->link_id; + + for (ALL_LIST_ELEMENTS_RO(root->children, node, child)) { + + if (child->type != OSPF_VERTEX_ROUTER) + continue; + + for (ALL_LIST_ELEMENTS_RO(child->parents, inner_node, + vertex_parent)) { + + child_prefix.u.prefix4 = + vertex_parent->nexthop->router; + + /* + * If there's a link for that stub network then + * we will protect it. Hence generate a P space + * for that particular link including the + * Q spaces so we can later on generate a + * backup path for the link. + */ + if (prefix_match(&stub_prefix, &child_prefix)) { + zlog_info( + "%s: Generating P space for %pI4", + __func__, &l->link_id); + + protected_resource = XCALLOC( + MTYPE_OSPF_P_SPACE, + sizeof(struct + protected_resource)); + protected_resource->type = + protection_type; + protected_resource->link = l; + + ospf_ti_lfa_generate_p_space( + area, child, protected_resource, + true, NULL); + } + } + } + } +} + +static struct p_space *ospf_ti_lfa_get_p_space_by_path(struct ospf_area *area, + struct ospf_path *path) +{ + struct p_space *p_space; + struct router_lsa_link *link; + struct vertex *child; + int type; + + frr_each(p_spaces, area->p_spaces, p_space) { + type = p_space->protected_resource->type; + + if (type == OSPF_TI_LFA_LINK_PROTECTION) { + link = p_space->protected_resource->link; + if ((path->nexthop.s_addr & link->link_data.s_addr) + == (link->link_id.s_addr & link->link_data.s_addr)) + return p_space; + } + + if (type == OSPF_TI_LFA_NODE_PROTECTION) { + child = ospf_spf_vertex_by_nexthop(area->spf, + &path->nexthop); + if (child + && p_space->protected_resource->router_id.s_addr + == child->id.s_addr) + return p_space; + } + } + + return NULL; +} + +void ospf_ti_lfa_insert_backup_paths(struct ospf_area *area, + struct route_table *new_table) +{ + struct route_node *rn; + struct ospf_route *or; + struct ospf_path *path; + struct listnode *node; + struct p_space *p_space; + struct q_space *q_space, q_space_search; + struct vertex root_search; + char label_buf[MPLS_LABEL_STRLEN]; + + for (rn = route_top(new_table); rn; rn = route_next(rn)) { + or = rn->info; + if (or == NULL) + continue; + + /* Insert a backup path for all OSPF paths */ + for (ALL_LIST_ELEMENTS_RO(or->paths, node, path)) { + + if (path->adv_router.s_addr == INADDR_ANY + || path->nexthop.s_addr == INADDR_ANY) + continue; + + if (IS_DEBUG_OSPF_TI_LFA) + zlog_debug( + "%s: attempting to insert backup path for prefix %pFX, router id %pI4 and nexthop %pI4.", + __func__, &rn->p, &path->adv_router, + &path->nexthop); + + p_space = ospf_ti_lfa_get_p_space_by_path(area, path); + if (!p_space) { + if (IS_DEBUG_OSPF_TI_LFA) + zlog_debug( + "%s: P space not found for router id %pI4 and nexthop %pI4.", + __func__, &path->adv_router, + &path->nexthop); + continue; + } + + root_search.id = path->adv_router; + q_space_search.root = &root_search; + q_space = q_spaces_find(p_space->q_spaces, + &q_space_search); + if (!q_space) { + if (IS_DEBUG_OSPF_TI_LFA) + zlog_debug( + "%s: Q space not found for advertising router %pI4.", + __func__, &path->adv_router); + continue; + } + + /* If there's a backup label stack, insert it*/ + if (q_space->label_stack) { + /* Init the backup path data in path */ + path->srni.backup_label_stack = XCALLOC( + MTYPE_OSPF_PATH, + sizeof(struct mpls_label_stack) + + sizeof(mpls_label_t) + * q_space->label_stack + ->num_labels); + + /* Copy over the label stack */ + path->srni.backup_label_stack->num_labels = + q_space->label_stack->num_labels; + memcpy(path->srni.backup_label_stack->label, + q_space->label_stack->label, + sizeof(mpls_label_t) + * q_space->label_stack + ->num_labels); + + /* Set the backup nexthop too */ + path->srni.backup_nexthop = q_space->nexthop; + } + + if (path->srni.backup_label_stack) { + mpls_label2str( + path->srni.backup_label_stack + ->num_labels, + path->srni.backup_label_stack->label, + label_buf, MPLS_LABEL_STRLEN, true); + if (IS_DEBUG_OSPF_TI_LFA) + zlog_debug( + "%s: inserted backup path %s for prefix %pFX, router id %pI4 and nexthop %pI4.", + __func__, label_buf, &rn->p, + &path->adv_router, + &path->nexthop); + } else { + if (IS_DEBUG_OSPF_TI_LFA) + zlog_debug( + "%s: inserted NO backup path for prefix %pFX, router id %pI4 and nexthop %pI4.", + __func__, &rn->p, + &path->adv_router, + &path->nexthop); + } + } + } +} + +void ospf_ti_lfa_free_p_spaces(struct ospf_area *area) +{ + struct p_space *p_space; + struct q_space *q_space; + + while ((p_space = p_spaces_pop(area->p_spaces))) { + while ((q_space = q_spaces_pop(p_space->q_spaces))) { + ospf_spf_cleanup(q_space->root, q_space->vertex_list); + + if (q_space->pc_path) + list_delete(&q_space->pc_path); + + XFREE(MTYPE_OSPF_Q_SPACE, q_space->p_node_info); + XFREE(MTYPE_OSPF_Q_SPACE, q_space->q_node_info); + XFREE(MTYPE_OSPF_Q_SPACE, q_space->label_stack); + XFREE(MTYPE_OSPF_Q_SPACE, q_space); + } + + ospf_spf_cleanup(p_space->root, p_space->vertex_list); + ospf_spf_cleanup(p_space->pc_spf, p_space->pc_vertex_list); + XFREE(MTYPE_OSPF_P_SPACE, p_space->protected_resource); + + q_spaces_fini(p_space->q_spaces); + XFREE(MTYPE_OSPF_Q_SPACE, p_space->q_spaces); + } + + p_spaces_fini(area->p_spaces); + XFREE(MTYPE_OSPF_P_SPACE, area->p_spaces); +} + +void ospf_ti_lfa_compute(struct ospf_area *area, struct route_table *new_table, + enum protection_type protection_type) +{ + /* + * Generate P spaces per protected link/node and their respective Q + * spaces, generate backup paths (MPLS label stacks) by finding P/Q + * nodes. + */ + ospf_ti_lfa_generate_p_spaces(area, protection_type); + + /* Insert the generated backup paths into the routing table. */ + ospf_ti_lfa_insert_backup_paths(area, new_table); + + /* Cleanup P spaces and related datastructures including Q spaces. */ + ospf_ti_lfa_free_p_spaces(area); +} diff --git a/ospfd/ospf_ti_lfa.h b/ospfd/ospf_ti_lfa.h new file mode 100644 index 0000000000..bc8f19b98f --- /dev/null +++ b/ospfd/ospf_ti_lfa.h @@ -0,0 +1,41 @@ +/* + * OSPF calculation. + * Copyright (C) 2020 NetDEF, Inc. + * Sascha Kattelmann + * + * This file is part of FRR. + * + * FRR is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2, or (at your option) any + * later version. + * + * FRR is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; see the file COPYING; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef _OSPF_TI_LFA_H +#define _OSPF_TI_LFA_H + +#define PROTECTED_RESOURCE_STRLEN 100 + +extern void ospf_ti_lfa_compute(struct ospf_area *area, + struct route_table *new_table, + enum protection_type protection_type); + +/* unit testing */ +extern void ospf_ti_lfa_generate_p_spaces(struct ospf_area *area, + enum protection_type protection_type); +extern void ospf_ti_lfa_insert_backup_paths(struct ospf_area *area, + struct route_table *new_table); +extern void ospf_ti_lfa_free_p_spaces(struct ospf_area *area); +void ospf_print_protected_resource( + struct protected_resource *protected_resource, char *buf); + +#endif /* _OSPF_TI_LFA_H */ diff --git a/ospfd/ospf_vty.c b/ospfd/ospf_vty.c index 0408462110..4132452069 100644 --- a/ospfd/ospf_vty.c +++ b/ospfd/ospf_vty.c @@ -280,7 +280,7 @@ DEFPY (ospf_router_id, for (ALL_LIST_ELEMENTS_RO(ospf->areas, node, area)) if (area->full_nbrs) { vty_out(vty, - "For this router-id change to take effect, save config and restart ospfd\n"); + "For this router-id change to take effect, use “clear ip ospf process” command\n"); return CMD_SUCCESS; } @@ -313,7 +313,7 @@ DEFUN_HIDDEN (ospf_router_id_old, for (ALL_LIST_ELEMENTS_RO(ospf->areas, node, area)) if (area->full_nbrs) { vty_out(vty, - "For this router-id change to take effect, save config and restart ospfd\n"); + "For this router-id change to take effect, use “clear ip ospf process” command\n"); return CMD_SUCCESS; } @@ -346,7 +346,7 @@ DEFPY (no_ospf_router_id, for (ALL_LIST_ELEMENTS_RO(ospf->areas, node, area)) if (area->full_nbrs) { vty_out(vty, - "For this router-id change to take effect, save config and restart ospfd\n"); + "For this router-id change to take effect, use “clear ip ospf process” command\n"); return CMD_SUCCESS; } @@ -2602,6 +2602,43 @@ ALIAS(no_ospf_write_multiplier, no_write_multiplier_cmd, "Write multiplier\n" "Maximum number of interface serviced per write\n") +DEFUN(ospf_ti_lfa, ospf_ti_lfa_cmd, "fast-reroute ti-lfa [node-protection]", + "Fast Reroute for MPLS and IP resilience\n" + "Topology Independent LFA (Loop-Free Alternate)\n" + "TI-LFA node protection (default is link protection)\n") +{ + VTY_DECLVAR_INSTANCE_CONTEXT(ospf, ospf); + + ospf->ti_lfa_enabled = true; + + if (argc == 3) + ospf->ti_lfa_protection_type = OSPF_TI_LFA_NODE_PROTECTION; + else + ospf->ti_lfa_protection_type = OSPF_TI_LFA_LINK_PROTECTION; + + ospf_spf_calculate_schedule(ospf, SPF_FLAG_CONFIG_CHANGE); + + return CMD_SUCCESS; +} + +DEFUN(no_ospf_ti_lfa, no_ospf_ti_lfa_cmd, + "no fast-reroute ti-lfa [node-protection]", + NO_STR + "Fast Reroute for MPLS and IP resilience\n" + "Topology Independent LFA (Loop-Free Alternate)\n" + "TI-LFA node protection (default is link protection)\n") +{ + VTY_DECLVAR_INSTANCE_CONTEXT(ospf, ospf); + + ospf->ti_lfa_enabled = false; + + ospf->ti_lfa_protection_type = OSPF_TI_LFA_UNDEFINED_PROTECTION; + + ospf_spf_calculate_schedule(ospf, SPF_FLAG_CONFIG_CHANGE); + + return CMD_SUCCESS; +} + static const char *const ospf_abr_type_descr_str[] = { "Unknown", "Standard (RFC2328)", "Alternative IBM", "Alternative Cisco", "Alternative Shortcut" @@ -3365,6 +3402,54 @@ DEFUN (show_ip_ospf_instance, return ret; } +static void ospf_interface_auth_show(struct vty *vty, struct ospf_interface *oi, + json_object *json, bool use_json) +{ + int auth_type; + + auth_type = OSPF_IF_PARAM(oi, auth_type); + + switch (auth_type) { + case OSPF_AUTH_NULL: + if (use_json) + json_object_string_add(json, "authentication", + "authenticationNone"); + else + vty_out(vty, " Authentication NULL is enabled\n"); + break; + case OSPF_AUTH_SIMPLE: { + if (use_json) + json_object_string_add(json, "authentication", + "authenticationSimplePassword"); + else + vty_out(vty, + " Simple password authentication enabled\n"); + break; + } + case OSPF_AUTH_CRYPTOGRAPHIC: { + struct crypt_key *ckey; + + if (list_isempty(OSPF_IF_PARAM(oi, auth_crypt))) + return; + + ckey = listgetdata(listtail(OSPF_IF_PARAM(oi, auth_crypt))); + if (ckey) { + if (use_json) { + json_object_string_add(json, "authentication", + "authenticationMessageDigest"); + } else { + vty_out(vty, + " Cryptographic authentication enabled\n"); + vty_out(vty, " Algorithm:MD5\n"); + } + } + break; + } + default: + break; + } +} + static void show_ip_ospf_interface_sub(struct vty *vty, struct ospf *ospf, struct interface *ifp, json_object *json_interface_sub, @@ -3686,6 +3771,9 @@ static void show_ip_ospf_interface_sub(struct vty *vty, struct ospf *ospf, ospf_nbr_count(oi, 0), ospf_nbr_count(oi, NSM_Full)); ospf_bfd_interface_show(vty, ifp, json_interface_sub, use_json); + + /* OSPF Authentication information */ + ospf_interface_auth_show(vty, oi, json_interface_sub, use_json); } } @@ -6318,31 +6406,7 @@ static int show_as_external_lsa_detail(struct vty *vty, struct ospf_lsa *lsa, return 0; } -#if 0 -static int -show_as_external_lsa_stdvty (struct ospf_lsa *lsa) -{ - struct as_external_lsa *al = (struct as_external_lsa *) lsa->data; - - /* show_ip_ospf_database_header (vty, lsa); */ - - zlog_debug( " Network Mask: /%d%s", - ip_masklen (al->mask), "\n"); - zlog_debug( " Metric Type: %s%s", - IS_EXTERNAL_METRIC (al->e[0].tos) ? - "2 (Larger than any link state path)" : "1", "\n"); - zlog_debug( " TOS: 0%s", "\n"); - zlog_debug( " Metric: %d%s", - GET_METRIC (al->e[0].metric), "\n"); - zlog_debug( " Forward Address: %pI4%s", - &al->e[0].fwd_addr, "\n"); - zlog_debug( " External Route Tag: %"ROUTE_TAG_PRI"%s%s", - (route_tag_t)ntohl (al->e[0].route_tag), "\n", "\n"); - - return 0; -} -#endif /* Show AS-NSSA-LSA detail information. */ static int show_as_nssa_lsa_detail(struct vty *vty, struct ospf_lsa *lsa, json_object *json) @@ -6630,8 +6694,8 @@ static void show_lsa_detail_adv_router(struct vty *vty, struct ospf *ospf, json_lstype); } -static void show_ip_ospf_database_summary(struct vty *vty, struct ospf *ospf, - int self, json_object *json) +void show_ip_ospf_database_summary(struct vty *vty, struct ospf *ospf, int self, + json_object *json) { struct ospf_lsa *lsa; struct route_node *rn; @@ -7316,8 +7380,8 @@ DEFUN (show_ip_ospf_instance_database_type_adv_router, continue; ospf_output = true; ret = show_ip_ospf_database_type_adv_router_common( - vty, ospf, idx ? 1 : 0, argc, argv, - use_vrf, json, uj); + vty, ospf, 2, argc, argv, use_vrf, json, + uj); } if (!ospf_output) vty_out(vty, "%% OSPF instance not found\n"); @@ -7329,8 +7393,7 @@ DEFUN (show_ip_ospf_instance_database_type_adv_router, } ret = show_ip_ospf_database_type_adv_router_common( - vty, ospf, idx ? 1 : 0, argc, argv, use_vrf, - json, uj); + vty, ospf, 2, argc, argv, use_vrf, json, uj); } } else { /* Display default ospf (instance 0) info */ @@ -8871,6 +8934,7 @@ DEFUN (no_ip_ospf_area, struct ospf_if_params *params; unsigned short instance = 0; struct in_addr addr; + struct in_addr area_id; if (argv_find(argv, argc, "(1-65535)", &idx)) instance = strtol(argv[idx]->arg, NULL, 10); @@ -8898,6 +8962,7 @@ DEFUN (no_ip_ospf_area, } else params = IF_DEF_PARAMS(ifp); + area_id = params->if_area; if (!OSPF_IF_PARAM_CONFIGURED(params, if_area)) { vty_out(vty, "Can't find specified interface area configuration.\n"); @@ -8913,6 +8978,7 @@ DEFUN (no_ip_ospf_area, if (ospf) { ospf_interface_area_unset(ospf, ifp); ospf->if_ospf_cli_count--; + ospf_area_check_free(ospf, area_id); } return CMD_SUCCESS; @@ -9377,78 +9443,6 @@ DEFUN (ospf_distance_ospf, return CMD_SUCCESS; } -#if 0 -DEFUN (ospf_distance_source, - ospf_distance_source_cmd, - "distance (1-255) A.B.C.D/M", - "Administrative distance\n" - "Distance value\n" - "IP source prefix\n") -{ - VTY_DECLVAR_CONTEXT(ospf, ospf); - int idx_number = 1; - int idx_ipv4_prefixlen = 2; - - ospf_distance_set (vty, ospf, argv[idx_number]->arg, argv[idx_ipv4_prefixlen]->arg, NULL); - - return CMD_SUCCESS; -} - -DEFUN (no_ospf_distance_source, - no_ospf_distance_source_cmd, - "no distance (1-255) A.B.C.D/M", - NO_STR - "Administrative distance\n" - "Distance value\n" - "IP source prefix\n") -{ - VTY_DECLVAR_CONTEXT(ospf, ospf); - int idx_number = 2; - int idx_ipv4_prefixlen = 3; - - ospf_distance_unset (vty, ospf, argv[idx_number]->arg, argv[idx_ipv4_prefixlen]->arg, NULL); - - return CMD_SUCCESS; -} - -DEFUN (ospf_distance_source_access_list, - ospf_distance_source_access_list_cmd, - "distance (1-255) A.B.C.D/M WORD", - "Administrative distance\n" - "Distance value\n" - "IP source prefix\n" - "Access list name\n") -{ - VTY_DECLVAR_CONTEXT(ospf, ospf); - int idx_number = 1; - int idx_ipv4_prefixlen = 2; - int idx_word = 3; - - ospf_distance_set (vty, ospf, argv[idx_number]->arg, argv[idx_ipv4_prefixlen]->arg, argv[idx_word]->arg); - - return CMD_SUCCESS; -} - -DEFUN (no_ospf_distance_source_access_list, - no_ospf_distance_source_access_list_cmd, - "no distance (1-255) A.B.C.D/M WORD", - NO_STR - "Administrative distance\n" - "Distance value\n" - "IP source prefix\n" - "Access list name\n") -{ - VTY_DECLVAR_CONTEXT(ospf, ospf); - int idx_number = 2; - int idx_ipv4_prefixlen = 3; - int idx_word = 4; - - ospf_distance_unset (vty, ospf, argv[idx_number]->arg, argv[idx_ipv4_prefixlen]->arg, argv[idx_word]->arg); - - return CMD_SUCCESS; -} -#endif - DEFUN (ip_ospf_mtu_ignore, ip_ospf_mtu_ignore_addr_cmd, "ip ospf mtu-ignore [A.B.C.D]", @@ -9928,10 +9922,10 @@ DEFPY(no_ospf_gr_helper_planned_only, return CMD_SUCCESS; } -static int ospf_print_vty_helper_dis_rtr_walkcb(struct hash_bucket *backet, +static int ospf_print_vty_helper_dis_rtr_walkcb(struct hash_bucket *bucket, void *arg) { - struct advRtr *rtr = backet->data; + struct advRtr *rtr = bucket->data; struct vty *vty = (struct vty *)arg; static unsigned int count; @@ -11177,6 +11171,70 @@ DEFUN (show_ip_ospf_vrfs, return CMD_SUCCESS; } +DEFPY (clear_ip_ospf_neighbor, + clear_ip_ospf_neighbor_cmd, + "clear ip ospf [(1-65535)]$instance neighbor [A.B.C.D$nbr_id]", + CLEAR_STR + IP_STR + "OSPF information\n" + "Instance ID\n" + "Reset OSPF Neighbor\n" + "Neighbor ID\n") +{ + struct listnode *node; + struct ospf *ospf = NULL; + + /* If user does not specify the arguments, + * instance = 0 and nbr_id = 0.0.0.0 + */ + if (instance != 0) { + /* This means clear only the particular ospf process */ + ospf = ospf_lookup_instance(instance); + if (ospf == NULL) + return CMD_NOT_MY_INSTANCE; + } + + /* Clear all the ospf processes */ + for (ALL_LIST_ELEMENTS_RO(om->ospf, node, ospf)) { + if (!ospf->oi_running) + continue; + + ospf_neighbor_reset(ospf, nbr_id, nbr_id_str); + } + + return CMD_SUCCESS; +} + +DEFPY (clear_ip_ospf_process, + clear_ip_ospf_process_cmd, + "clear ip ospf [(1-65535)]$instance process", + CLEAR_STR + IP_STR + "OSPF information\n" + "Instance ID\n" + "Reset OSPF Process\n") +{ + struct listnode *node; + struct ospf *ospf = NULL; + + /* Check if instance is not passed as an argument */ + if (instance != 0) { + /* This means clear only the particular ospf process */ + ospf = ospf_lookup_instance(instance); + if (ospf == NULL) + return CMD_NOT_MY_INSTANCE; + } + + /* Clear all the ospf processes */ + for (ALL_LIST_ELEMENTS_RO(om->ospf, node, ospf)) { + if (!ospf->oi_running) + continue; + + ospf_process_reset(ospf); + } + + return CMD_SUCCESS; +} static const char *const ospf_abr_type_str[] = { "unknown", "standard", "ibm", "cisco", "shortcut" @@ -11185,10 +11243,10 @@ static const char *const ospf_abr_type_str[] = { static const char *const ospf_shortcut_mode_str[] = { "default", "enable", "disable" }; -static int ospf_vty_external_rt_walkcb(struct hash_bucket *backet, +static int ospf_vty_external_rt_walkcb(struct hash_bucket *bucket, void *arg) { - struct external_info *ei = backet->data; + struct external_info *ei = bucket->data; struct vty *vty = (struct vty *)arg; static unsigned int count; @@ -11204,10 +11262,10 @@ static int ospf_vty_external_rt_walkcb(struct hash_bucket *backet, return HASHWALK_CONTINUE; } -static int ospf_json_external_rt_walkcb(struct hash_bucket *backet, +static int ospf_json_external_rt_walkcb(struct hash_bucket *bucket, void *arg) { - struct external_info *ei = backet->data; + struct external_info *ei = bucket->data; struct json_object *json = (struct json_object *)arg; char buf[PREFIX2STR_BUFFER]; char exnalbuf[20]; @@ -11976,10 +12034,10 @@ static int config_write_ospf_redistribute(struct vty *vty, struct ospf *ospf) return 0; } -static int ospf_cfg_write_helper_dis_rtr_walkcb(struct hash_bucket *backet, +static int ospf_cfg_write_helper_dis_rtr_walkcb(struct hash_bucket *bucket, void *arg) { - struct advRtr *rtr = backet->data; + struct advRtr *rtr = bucket->data; struct vty *vty = (struct vty *)arg; vty_out(vty, " graceful-restart helper-only %pI4\n", @@ -12247,6 +12305,14 @@ static int ospf_config_write_one(struct vty *vty, struct ospf *ospf) oi->ifp->name, &oi->address->u.prefix4); } + /* TI-LFA print. */ + if (ospf->ti_lfa_enabled) { + if (ospf->ti_lfa_protection_type == OSPF_TI_LFA_NODE_PROTECTION) + vty_out(vty, " fast-reroute ti-lfa node-protection\n"); + else + vty_out(vty, " fast-reroute ti-lfa\n"); + } + /* Network area print. */ config_write_network_area(vty, ospf); @@ -12491,13 +12557,6 @@ static void ospf_vty_zebra_init(void) &no_ospf_external_route_aggregation_no_adrvertise_cmd); install_element(OSPF_NODE, &ospf_route_aggregation_timer_cmd); install_element(OSPF_NODE, &no_ospf_route_aggregation_timer_cmd); - -#if 0 - install_element (OSPF_NODE, &ospf_distance_source_cmd); - install_element (OSPF_NODE, &no_ospf_distance_source_cmd); - install_element (OSPF_NODE, &ospf_distance_source_access_list_cmd); - install_element (OSPF_NODE, &no_ospf_distance_source_access_list_cmd); -#endif /* 0 */ } static int ospf_config_write(struct vty *vty); @@ -12573,6 +12632,8 @@ DEFUN (clear_ip_ospf_interface, void ospf_vty_clear_init(void) { install_element(ENABLE_NODE, &clear_ip_ospf_interface_cmd); + install_element(ENABLE_NODE, &clear_ip_ospf_process_cmd); + install_element(ENABLE_NODE, &clear_ip_ospf_neighbor_cmd); } @@ -12709,6 +12770,10 @@ void ospf_vty_init(void) install_element(OSPF_NODE, &ospf_proactive_arp_cmd); install_element(OSPF_NODE, &no_ospf_proactive_arp_cmd); + /* TI-LFA commands */ + install_element(OSPF_NODE, &ospf_ti_lfa_cmd); + install_element(OSPF_NODE, &no_ospf_ti_lfa_cmd); + /* Init interface related vty commands. */ ospf_vty_if_init(); diff --git a/ospfd/ospf_vty.h b/ospfd/ospf_vty.h index 79aabe7b4e..bf9c971710 100644 --- a/ospfd/ospf_vty.h +++ b/ospfd/ospf_vty.h @@ -54,4 +54,8 @@ extern void ospf_vty_show_init(void); extern void ospf_vty_clear_init(void); extern int str2area_id(const char *, struct in_addr *, int *); +/* unit tests */ +void show_ip_ospf_database_summary(struct vty *vty, struct ospf *ospf, int self, + json_object *json); + #endif /* _QUAGGA_OSPF_VTY_H */ diff --git a/ospfd/ospf_zebra.c b/ospfd/ospf_zebra.c index 2d02619ae3..aaab274570 100644 --- a/ospfd/ospf_zebra.c +++ b/ospfd/ospf_zebra.c @@ -198,15 +198,70 @@ static int ospf_interface_vrf_update(ZAPI_CALLBACK_ARGS) return 0; } +/* Nexthop, ifindex, distance and metric information. */ +static void ospf_zebra_add_nexthop(struct ospf *ospf, struct ospf_path *path, + struct zapi_route *api) +{ + struct zapi_nexthop *api_nh; + struct zapi_nexthop *api_nh_backup; + + /* TI-LFA backup path label stack comes first, if present */ + if (path->srni.backup_label_stack) { + api_nh_backup = &api->backup_nexthops[api->backup_nexthop_num]; + api_nh_backup->vrf_id = ospf->vrf_id; + + api_nh_backup->type = NEXTHOP_TYPE_IPV4; + api_nh_backup->gate.ipv4 = path->srni.backup_nexthop; + + api_nh_backup->label_num = + path->srni.backup_label_stack->num_labels; + memcpy(api_nh_backup->labels, + path->srni.backup_label_stack->label, + sizeof(mpls_label_t) * api_nh_backup->label_num); + + api->backup_nexthop_num++; + } + + /* And here comes the primary nexthop */ + api_nh = &api->nexthops[api->nexthop_num]; +#ifdef HAVE_NETLINK + if (path->unnumbered + || (path->nexthop.s_addr != INADDR_ANY && path->ifindex != 0)) { +#else /* HAVE_NETLINK */ + if (path->nexthop.s_addr != INADDR_ANY && path->ifindex != 0) { +#endif /* HAVE_NETLINK */ + api_nh->gate.ipv4 = path->nexthop; + api_nh->ifindex = path->ifindex; + api_nh->type = NEXTHOP_TYPE_IPV4_IFINDEX; + } else if (path->nexthop.s_addr != INADDR_ANY) { + api_nh->gate.ipv4 = path->nexthop; + api_nh->type = NEXTHOP_TYPE_IPV4; + } else { + api_nh->ifindex = path->ifindex; + api_nh->type = NEXTHOP_TYPE_IFINDEX; + } + api_nh->vrf_id = ospf->vrf_id; + + /* Set TI-LFA backup nexthop info if present */ + if (path->srni.backup_label_stack) { + SET_FLAG(api->message, ZAPI_MESSAGE_BACKUP_NEXTHOPS); + SET_FLAG(api_nh->flags, ZAPI_NEXTHOP_FLAG_HAS_BACKUP); + + /* Just care about a single TI-LFA backup path for now */ + api_nh->backup_num = 1; + api_nh->backup_idx[0] = api->backup_nexthop_num - 1; + } + + api->nexthop_num++; +} + void ospf_zebra_add(struct ospf *ospf, struct prefix_ipv4 *p, struct ospf_route * or) { struct zapi_route api; - struct zapi_nexthop *api_nh; uint8_t distance; struct ospf_path *path; struct listnode *node; - int count = 0; memset(&api, 0, sizeof(api)); api.vrf_id = ospf->vrf_id; @@ -241,29 +296,11 @@ void ospf_zebra_add(struct ospf *ospf, struct prefix_ipv4 *p, api.distance = distance; } - /* Nexthop, ifindex, distance and metric information. */ for (ALL_LIST_ELEMENTS_RO(or->paths, node, path)) { - if (count >= MULTIPATH_NUM) + if (api.nexthop_num >= MULTIPATH_NUM) break; - api_nh = &api.nexthops[count]; -#ifdef HAVE_NETLINK - if (path->unnumbered || (path->nexthop.s_addr != INADDR_ANY - && path->ifindex != 0)) { -#else /* HAVE_NETLINK */ - if (path->nexthop.s_addr != INADDR_ANY && path->ifindex != 0) { -#endif /* HAVE_NETLINK */ - api_nh->gate.ipv4 = path->nexthop; - api_nh->ifindex = path->ifindex; - api_nh->type = NEXTHOP_TYPE_IPV4_IFINDEX; - } else if (path->nexthop.s_addr != INADDR_ANY) { - api_nh->gate.ipv4 = path->nexthop; - api_nh->type = NEXTHOP_TYPE_IPV4; - } else { - api_nh->ifindex = path->ifindex; - api_nh->type = NEXTHOP_TYPE_IFINDEX; - } - api_nh->vrf_id = ospf->vrf_id; - count++; + + ospf_zebra_add_nexthop(ospf, path, &api); if (IS_DEBUG_OSPF(zebra, ZEBRA_REDISTRIBUTE)) { struct interface *ifp; @@ -276,7 +313,6 @@ void ospf_zebra_add(struct ospf *ospf, struct prefix_ipv4 *p, ifp ? ifp->name : " "); } } - api.nexthop_num = count; zclient_route_send(ZEBRA_ROUTE_ADD, zclient, &api); } @@ -501,12 +537,10 @@ void ospf_zebra_update_prefix_sid(const struct sr_prefix *srp) { struct zapi_labels zl; struct zapi_nexthop *znh; + struct zapi_nexthop *znh_backup; struct listnode *node; struct ospf_path *path; - osr_debug("SR (%s): Update Labels %u for Prefix %pFX", __func__, - srp->label_in, (struct prefix *)&srp->prefv4); - /* Prepare message. */ memset(&zl, 0, sizeof(zl)); zl.type = ZEBRA_LSP_OSPF_SR; @@ -520,6 +554,11 @@ void ospf_zebra_update_prefix_sid(const struct sr_prefix *srp) znh->ifindex = srp->nhlfe.ifindex; znh->label_num = 1; znh->labels[0] = srp->nhlfe.label_out; + + osr_debug("SR (%s): Configure Prefix %pFX with labels %u/%u", + __func__, (struct prefix *)&srp->prefv4, + srp->label_in, srp->nhlfe.label_out); + break; case PREF_SID: @@ -535,6 +574,10 @@ void ospf_zebra_update_prefix_sid(const struct sr_prefix *srp) if (srp->route == NULL) { return; } + + osr_debug("SR (%s): Configure Prefix %pFX with", + __func__, (struct prefix *)&srp->prefv4); + for (ALL_LIST_ELEMENTS_RO(srp->route->paths, node, path)) { if (path->srni.label_out == MPLS_INVALID_LABEL) continue; @@ -542,12 +585,56 @@ void ospf_zebra_update_prefix_sid(const struct sr_prefix *srp) if (zl.nexthop_num >= MULTIPATH_NUM) break; + /* + * TI-LFA backup path label stack comes first, if + * present. + */ + if (path->srni.backup_label_stack) { + znh_backup = &zl.backup_nexthops + [zl.backup_nexthop_num++]; + znh_backup->type = NEXTHOP_TYPE_IPV4; + znh_backup->gate.ipv4 = + path->srni.backup_nexthop; + + memcpy(znh_backup->labels, + path->srni.backup_label_stack->label, + sizeof(mpls_label_t) + * path->srni.backup_label_stack + ->num_labels); + + znh_backup->label_num = + path->srni.backup_label_stack + ->num_labels; + if (path->srni.label_out + != MPLS_LABEL_IPV4_EXPLICIT_NULL + && path->srni.label_out + != MPLS_LABEL_IMPLICIT_NULL) + znh_backup->labels + [znh_backup->label_num++] = + path->srni.label_out; + } + znh = &zl.nexthops[zl.nexthop_num++]; znh->type = NEXTHOP_TYPE_IPV4_IFINDEX; znh->gate.ipv4 = path->nexthop; znh->ifindex = path->ifindex; znh->label_num = 1; znh->labels[0] = path->srni.label_out; + + osr_debug(" |- labels %u/%u", srp->label_in, + srp->nhlfe.label_out); + + /* Set TI-LFA backup nexthop info if present */ + if (path->srni.backup_label_stack) { + SET_FLAG(zl.message, ZAPI_LABELS_HAS_BACKUPS); + SET_FLAG(znh->flags, + ZAPI_NEXTHOP_FLAG_HAS_BACKUP); + + /* Just care about a single TI-LFA backup path + * for now */ + znh->backup_num = 1; + znh->backup_idx[0] = zl.backup_nexthop_num - 1; + } } break; default: diff --git a/ospfd/ospfd.c b/ospfd/ospfd.c index 0adf8a7b41..04397d50a5 100644 --- a/ospfd/ospfd.c +++ b/ospfd/ospfd.c @@ -87,13 +87,39 @@ static void ospf_finish_final(struct ospf *); #define OSPF_EXTERNAL_LSA_ORIGINATE_DELAY 1 -void ospf_router_id_update(struct ospf *ospf) +int p_spaces_compare_func(const struct p_space *a, const struct p_space *b) +{ + if (a->protected_resource->type == OSPF_TI_LFA_LINK_PROTECTION + && b->protected_resource->type == OSPF_TI_LFA_LINK_PROTECTION) + return (a->protected_resource->link->link_id.s_addr + - b->protected_resource->link->link_id.s_addr); + + if (a->protected_resource->type == OSPF_TI_LFA_NODE_PROTECTION + && b->protected_resource->type == OSPF_TI_LFA_NODE_PROTECTION) + return (a->protected_resource->router_id.s_addr + - b->protected_resource->router_id.s_addr); + + /* This should not happen */ + return 0; +} + +int q_spaces_compare_func(const struct q_space *a, const struct q_space *b) +{ + return (a->root->id.s_addr - b->root->id.s_addr); +} + +DECLARE_RBTREE_UNIQ(p_spaces, struct p_space, p_spaces_item, + p_spaces_compare_func) + +void ospf_process_refresh_data(struct ospf *ospf, bool reset) { struct vrf *vrf = vrf_lookup_by_id(ospf->vrf_id); struct in_addr router_id, router_id_old; struct ospf_interface *oi; struct interface *ifp; - struct listnode *node; + struct listnode *node, *nnode; + struct ospf_area *area; + bool rid_change = false; if (!ospf->oi_running) { if (IS_DEBUG_OSPF_EVENT) @@ -126,8 +152,8 @@ void ospf_router_id_update(struct ospf *ospf) zlog_debug("Router-ID[OLD:%pI4]: Update to %pI4", &ospf->router_id, &router_id); - if (!IPV4_ADDR_SAME(&router_id_old, &router_id)) { - + rid_change = !(IPV4_ADDR_SAME(&router_id_old, &router_id)); + if (rid_change || (reset)) { for (ALL_LIST_ELEMENTS_RO(ospf->oiflist, node, oi)) { /* Some nbrs are identified by router_id, these needs * to be rebuilt. Possible optimization would be to do @@ -149,16 +175,8 @@ void ospf_router_id_update(struct ospf *ospf) ospf_if_up(oi); } - /* Flush (inline) all external LSAs based on the OSPF_LSA_SELF - * flag */ - if (ospf->lsdb) { - struct route_node *rn; - struct ospf_lsa *lsa; - - LSDB_LOOP (EXTERNAL_LSDB(ospf), rn, lsa) - if (IS_LSA_SELF(lsa)) - ospf_lsa_flush_schedule(ospf, lsa); - } + /* Flush (inline) all the self originated LSAs */ + ospf_flush_self_originated_lsas_now(ospf); ospf->router_id = router_id; if (IS_DEBUG_OSPF_EVENT) @@ -183,24 +201,81 @@ void ospf_router_id_update(struct ospf *ospf) LSDB_LOOP (EXTERNAL_LSDB(ospf), rn, lsa) { /* AdvRouter and Router ID is the same. */ if (IPV4_ADDR_SAME(&lsa->data->adv_router, - &ospf->router_id)) { + &ospf->router_id) && rid_change) { SET_FLAG(lsa->flags, OSPF_LSA_SELF_CHECKED); SET_FLAG(lsa->flags, OSPF_LSA_SELF); ospf_lsa_flush_schedule(ospf, lsa); } + /* The above flush will send immediately + * So discard the LSA to originate new + */ + ospf_discard_from_db(ospf, ospf->lsdb, lsa); } + + LSDB_LOOP (OPAQUE_AS_LSDB(ospf), rn, lsa) + ospf_discard_from_db(ospf, ospf->lsdb, lsa); + + ospf_lsdb_delete_all(ospf->lsdb); } + /* Delete the LSDB */ + for (ALL_LIST_ELEMENTS(ospf->areas, node, nnode, area)) + ospf_area_lsdb_discard_delete(area); + /* update router-lsa's for each area */ ospf_router_lsa_update(ospf); /* update ospf_interface's */ - FOR_ALL_INTERFACES (vrf, ifp) - ospf_if_update(ospf, ifp); + FOR_ALL_INTERFACES (vrf, ifp) { + if (reset) + ospf_if_reset(ifp); + else + ospf_if_update(ospf, ifp); + } ospf_external_lsa_rid_change(ospf); } + + ospf->inst_shutdown = 0; +} + +void ospf_router_id_update(struct ospf *ospf) +{ + ospf_process_refresh_data(ospf, false); +} + +void ospf_process_reset(struct ospf *ospf) +{ + ospf_process_refresh_data(ospf, true); +} + +void ospf_neighbor_reset(struct ospf *ospf, struct in_addr nbr_id, + const char *nbr_str) +{ + struct route_node *rn; + struct ospf_neighbor *nbr; + struct ospf_interface *oi; + struct listnode *node; + + /* Clear only a particular nbr with nbr router id as nbr_id */ + if (nbr_str != NULL) { + for (ALL_LIST_ELEMENTS_RO(ospf->oiflist, node, oi)) { + nbr = ospf_nbr_lookup_by_routerid(oi->nbrs, &nbr_id); + if (nbr) + OSPF_NSM_EVENT_EXECUTE(nbr, NSM_KillNbr); + } + return; + } + + /* send Neighbor event KillNbr to all associated neighbors. */ + for (ALL_LIST_ELEMENTS_RO(ospf->oiflist, node, oi)) { + for (rn = route_top(oi->nbrs); rn; rn = route_next(rn)) { + nbr = rn->info; + if (nbr && (nbr != oi->nbr_self)) + OSPF_NSM_EVENT_EXECUTE(nbr, NSM_KillNbr); + } + } } /* For OSPF area sort by area id. */ @@ -213,8 +288,7 @@ static int ospf_area_id_cmp(struct ospf_area *a1, struct ospf_area *a2) return 0; } -/* Allocate new ospf structure. */ -static struct ospf *ospf_new(unsigned short instance, const char *name) +struct ospf *ospf_new_alloc(unsigned short instance, const char *name) { int i; struct vrf *vrf = NULL; @@ -289,8 +363,6 @@ static struct ospf *ospf_new(unsigned short instance, const char *name) new->maxage_delay = OSPF_LSA_MAXAGE_REMOVE_DELAY_DEFAULT; new->maxage_lsa = route_table_init(); new->t_maxage_walker = NULL; - thread_add_timer(master, ospf_lsa_maxage_walker, new, - OSPF_LSA_MAXAGE_CHECK_INTERVAL, &new->t_maxage_walker); /* Distance table init. */ new->distance_table = route_table_init(); @@ -298,8 +370,6 @@ static struct ospf *ospf_new(unsigned short instance, const char *name) new->lsa_refresh_queue.index = 0; new->lsa_refresh_interval = OSPF_LSA_REFRESH_INTERVAL_DEFAULT; new->t_lsa_refresher = NULL; - thread_add_timer(master, ospf_lsa_refresh_walker, new, - new->lsa_refresh_interval, &new->t_lsa_refresher); new->lsa_refresher_started = monotime(NULL); new->ibuf = stream_new(OSPF_MAX_PACKET_SIZE + 1); @@ -317,6 +387,17 @@ static struct ospf *ospf_new(unsigned short instance, const char *name) QOBJ_REG(new, ospf); new->fd = -1; + + return new; +} + +/* Allocate new ospf structure. */ +static struct ospf *ospf_new(unsigned short instance, const char *name) +{ + struct ospf *new; + + new = ospf_new_alloc(instance, name); + if ((ospf_sock_init(new)) < 0) { if (new->vrf_id != VRF_UNKNOWN) flog_err( @@ -325,6 +406,12 @@ static struct ospf *ospf_new(unsigned short instance, const char *name) __func__); return new; } + + thread_add_timer(master, ospf_lsa_maxage_walker, new, + OSPF_LSA_MAXAGE_CHECK_INTERVAL, &new->t_maxage_walker); + thread_add_timer(master, ospf_lsa_refresh_walker, new, + new->lsa_refresh_interval, &new->t_lsa_refresher); + thread_add_read(master, ospf_read, new, new->fd, &new->t_read); return new; @@ -836,8 +923,7 @@ static void ospf_finish_final(struct ospf *ospf) /* allocate new OSPF Area object */ -static struct ospf_area *ospf_area_new(struct ospf *ospf, - struct in_addr area_id) +struct ospf_area *ospf_area_new(struct ospf *ospf, struct in_addr area_id) { struct ospf_area *new; @@ -870,14 +956,11 @@ static struct ospf_area *ospf_area_new(struct ospf *ospf, return new; } -static void ospf_area_free(struct ospf_area *area) +void ospf_area_lsdb_discard_delete(struct ospf_area *area) { struct route_node *rn; struct ospf_lsa *lsa; - ospf_opaque_type10_lsa_term(area); - - /* Free LSDBs. */ LSDB_LOOP (ROUTER_LSDB(area), rn, lsa) ospf_discard_from_db(area->ospf, area->lsdb, lsa); LSDB_LOOP (NETWORK_LSDB(area), rn, lsa) @@ -895,6 +978,15 @@ static void ospf_area_free(struct ospf_area *area) ospf_discard_from_db(area->ospf, area->lsdb, lsa); ospf_lsdb_delete_all(area->lsdb); +} + +static void ospf_area_free(struct ospf_area *area) +{ + ospf_opaque_type10_lsa_term(area); + + /* Free LSDBs. */ + ospf_area_lsdb_discard_delete(area); + ospf_lsdb_free(area->lsdb); ospf_lsa_unlock(&area->router_lsa_self); @@ -978,7 +1070,8 @@ void ospf_area_del_if(struct ospf_area *area, struct ospf_interface *oi) } -static void add_ospf_interface(struct connected *co, struct ospf_area *area) +struct ospf_interface *add_ospf_interface(struct connected *co, + struct ospf_area *area) { struct ospf_interface *oi; @@ -1015,6 +1108,8 @@ static void add_ospf_interface(struct connected *co, struct ospf_area *area) if ((area->ospf->router_id.s_addr != INADDR_ANY) && if_is_operative(co->ifp)) ospf_if_up(oi); + + return oi; } static void update_redistributed(struct ospf *ospf, int add_to_ospf) @@ -1644,26 +1739,6 @@ int ospf_area_nssa_translator_role_set(struct ospf *ospf, return 1; } -#if 0 -/* XXX: unused? Leave for symmetry? */ -static int -ospf_area_nssa_translator_role_unset (struct ospf *ospf, - struct in_addr area_id) -{ - struct ospf_area *area; - - area = ospf_area_lookup_by_area_id (ospf, area_id); - if (area == NULL) - return 0; - - area->NSSATranslatorRole = OSPF_NSSA_ROLE_CANDIDATE; - - ospf_area_check_free (ospf, area_id); - - return 1; -} -#endif - int ospf_area_export_list_set(struct ospf *ospf, struct ospf_area *area, const char *list_name) { @@ -1906,35 +1981,6 @@ struct ospf_nbr_nbma *ospf_nbr_nbma_lookup(struct ospf *ospf, return NULL; } -struct ospf_nbr_nbma *ospf_nbr_nbma_lookup_next(struct ospf *ospf, - struct in_addr *addr, int first) -{ -#if 0 - struct ospf_nbr_nbma *nbr_nbma; - struct listnode *node; -#endif - - if (ospf == NULL) - return NULL; - -#if 0 - for (ALL_LIST_ELEMENTS_RO (ospf->nbr_nbma, node, nbr_nbma)) - { - if (first) - { - *addr = nbr_nbma->addr; - return nbr_nbma; - } - else if (ntohl (nbr_nbma->addr.s_addr) > ntohl (addr->s_addr)) - { - *addr = nbr_nbma->addr; - return nbr_nbma; - } - } -#endif - return NULL; -} - int ospf_nbr_nbma_set(struct ospf *ospf, struct in_addr nbr_addr) { struct ospf_nbr_nbma *nbr_nbma; diff --git a/ospfd/ospfd.h b/ospfd/ospfd.h index 3087b735ae..954a469b68 100644 --- a/ospfd/ospfd.h +++ b/ospfd/ospfd.h @@ -23,6 +23,7 @@ #define _ZEBRA_OSPFD_H #include <zebra.h> +#include "typesafe.h" #include "qobj.h" #include "libospf.h" #include "ldp_sync.h" @@ -126,6 +127,13 @@ enum { OSPF_LOG_ADJACENCY_DETAIL = (1 << 4), }; +/* TI-LFA */ +enum protection_type { + OSPF_TI_LFA_UNDEFINED_PROTECTION, + OSPF_TI_LFA_LINK_PROTECTION, + OSPF_TI_LFA_NODE_PROTECTION, +}; + /* OSPF instance structure. */ struct ospf { /* OSPF's running state based on the '[no] router ospf [<instance>]' @@ -374,10 +382,71 @@ struct ospf { /* MPLS LDP-IGP Sync */ struct ldp_sync_info_cmd ldp_sync_cmd; + /* TI-LFA support for all interfaces. */ + bool ti_lfa_enabled; + enum protection_type ti_lfa_protection_type; + QOBJ_FIELDS }; DECLARE_QOBJ_TYPE(ospf) +enum ospf_ti_lfa_p_q_space_adjacency { + OSPF_TI_LFA_P_Q_SPACE_ADJACENT, + OSPF_TI_LFA_P_Q_SPACE_NON_ADJACENT, +}; + +enum ospf_ti_lfa_node_type { + OSPF_TI_LFA_UNDEFINED_NODE, + OSPF_TI_LFA_PQ_NODE, + OSPF_TI_LFA_P_NODE, + OSPF_TI_LFA_Q_NODE, +}; + +struct ospf_ti_lfa_node_info { + struct vertex *node; + enum ospf_ti_lfa_node_type type; + struct in_addr nexthop; +}; + +struct ospf_ti_lfa_inner_backup_path_info { + struct ospf_ti_lfa_node_info p_node_info; + struct ospf_ti_lfa_node_info q_node_info; + struct mpls_label_stack *label_stack; +}; + +struct protected_resource { + enum protection_type type; + + /* Link Protection */ + struct router_lsa_link *link; + + /* Node Protection */ + struct in_addr router_id; +}; + +PREDECL_RBTREE_UNIQ(q_spaces) +struct q_space { + struct vertex *root; + struct list *vertex_list; + struct mpls_label_stack *label_stack; + struct in_addr nexthop; + struct list *pc_path; + struct ospf_ti_lfa_node_info *p_node_info; + struct ospf_ti_lfa_node_info *q_node_info; + struct q_spaces_item q_spaces_item; +}; + +PREDECL_RBTREE_UNIQ(p_spaces) +struct p_space { + struct vertex *root; + struct protected_resource *protected_resource; + struct q_spaces_head *q_spaces; + struct list *vertex_list; + struct vertex *pc_spf; + struct list *pc_vertex_list; + struct p_spaces_item p_spaces_item; +}; + /* OSPF area structure. */ struct ospf_area { /* OSPF instance. */ @@ -475,6 +544,12 @@ struct ospf_area { bool spf_root_node; /* flag for checking if the calculating node is the root node of the SPF tree */ + /* TI-LFA protected link for SPF calculations */ + struct protected_resource *spf_protected_resource; + + /* P/Q spaces for TI-LFA */ + struct p_spaces_head *p_spaces; + /* Threads. */ struct thread *t_stub_router; /* Stub-router timer */ struct thread *t_opaque_lsa_self; /* Type-10 Opaque-LSAs origin. */ @@ -482,6 +557,9 @@ struct ospf_area { /* Statistics field. */ uint32_t spf_calculation; /* SPF Calculation Count. */ + /* reverse SPF (used for TI-LFA Q spaces) */ + bool spf_reversed; + /* Time stamps. */ struct timeval ts_spf; /* SPF calculation time stamp. */ @@ -566,12 +644,17 @@ extern const char *ospf_redist_string(unsigned int route_type); extern struct ospf *ospf_lookup_instance(unsigned short); extern struct ospf *ospf_get(unsigned short instance, const char *name, bool *created); +extern struct ospf *ospf_new_alloc(unsigned short instance, const char *name); extern struct ospf *ospf_get_instance(unsigned short, bool *created); extern struct ospf *ospf_lookup_by_inst_name(unsigned short instance, const char *name); extern struct ospf *ospf_lookup_by_vrf_id(vrf_id_t vrf_id); extern void ospf_finish(struct ospf *); +extern void ospf_process_refresh_data(struct ospf *ospf, bool reset); extern void ospf_router_id_update(struct ospf *ospf); +extern void ospf_process_reset(struct ospf *ospf); +extern void ospf_neighbor_reset(struct ospf *ospf, struct in_addr nbr_id, + const char *nbr_str); extern int ospf_network_set(struct ospf *, struct prefix_ipv4 *, struct in_addr, int); extern int ospf_network_unset(struct ospf *, struct prefix_ipv4 *, @@ -596,6 +679,7 @@ extern int ospf_area_shortcut_set(struct ospf *, struct ospf_area *, int); extern int ospf_area_shortcut_unset(struct ospf *, struct ospf_area *); extern int ospf_timers_refresh_set(struct ospf *, int); extern int ospf_timers_refresh_unset(struct ospf *); +void ospf_area_lsdb_discard_delete(struct ospf_area *area); extern int ospf_nbr_nbma_set(struct ospf *, struct in_addr); extern int ospf_nbr_nbma_unset(struct ospf *, struct in_addr); extern int ospf_nbr_nbma_priority_set(struct ospf *, struct in_addr, uint8_t); @@ -610,10 +694,10 @@ extern void ospf_terminate(void); extern void ospf_nbr_nbma_if_update(struct ospf *, struct ospf_interface *); extern struct ospf_nbr_nbma *ospf_nbr_nbma_lookup(struct ospf *, struct in_addr); -extern struct ospf_nbr_nbma *ospf_nbr_nbma_lookup_next(struct ospf *, - struct in_addr *, int); extern int ospf_oi_count(struct interface *); +extern struct ospf_area *ospf_area_new(struct ospf *ospf, + struct in_addr area_id); extern struct ospf_area *ospf_area_get(struct ospf *, struct in_addr); extern void ospf_area_check_free(struct ospf *, struct in_addr); extern struct ospf_area *ospf_area_lookup_by_area_id(struct ospf *, @@ -635,4 +719,11 @@ const char *ospf_vrf_id_to_name(vrf_id_t vrf_id); int ospf_area_nssa_no_summary_set(struct ospf *, struct in_addr); const char *ospf_get_name(const struct ospf *ospf); +extern struct ospf_interface *add_ospf_interface(struct connected *co, + struct ospf_area *area); + +extern int p_spaces_compare_func(const struct p_space *a, + const struct p_space *b); +extern int q_spaces_compare_func(const struct q_space *a, + const struct q_space *b); #endif /* _ZEBRA_OSPFD_H */ diff --git a/ospfd/subdir.am b/ospfd/subdir.am index 1a807ea12b..28d58452df 100644 --- a/ospfd/subdir.am +++ b/ospfd/subdir.am @@ -52,6 +52,7 @@ ospfd_libfrrospf_a_SOURCES = \ ospfd/ospf_route.c \ ospfd/ospf_routemap.c \ ospfd/ospf_spf.c \ + ospfd/ospf_ti_lfa.c \ ospfd/ospf_sr.c \ ospfd/ospf_te.c \ ospfd/ospf_vty.c \ @@ -100,6 +101,7 @@ noinst_HEADERS += \ ospfd/ospf_ri.h \ ospfd/ospf_route.h \ ospfd/ospf_spf.h \ + ospfd/ospf_ti_lfa.h \ ospfd/ospf_sr.h \ ospfd/ospf_te.h \ ospfd/ospf_vty.h \ diff --git a/pathd/.gitignore b/pathd/.gitignore new file mode 100644 index 0000000000..95f4a99794 --- /dev/null +++ b/pathd/.gitignore @@ -0,0 +1,2 @@ +libpath.a +pathd diff --git a/pathd/Makefile b/pathd/Makefile new file mode 100644 index 0000000000..b681a9ab1c --- /dev/null +++ b/pathd/Makefile @@ -0,0 +1,10 @@ +all: ALWAYS + @$(MAKE) -s -C .. pathd/pathd +%: ALWAYS + @$(MAKE) -s -C .. pathd/$@ + +Makefile: + #nothing +ALWAYS: +.PHONY: ALWAYS makefiles +.SUFFIXES: diff --git a/pathd/path_cli.c b/pathd/path_cli.c new file mode 100644 index 0000000000..8beb428135 --- /dev/null +++ b/pathd/path_cli.c @@ -0,0 +1,1108 @@ +/* + * Copyright (C) 2020 NetDEF, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; see the file COPYING; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include <float.h> +#include <math.h> +#include <zebra.h> + +#include "log.h" +#include "command.h" +#include "mpls.h" +#include "northbound_cli.h" +#include "termtable.h" + +#include "pathd/pathd.h" +#include "pathd/path_nb.h" +#include "pathd/path_memory.h" +#ifndef VTYSH_EXTRACT_PL +#include "pathd/path_cli_clippy.c" +#endif + +#define XPATH_MAXATTRSIZE 64 +#define XPATH_MAXKEYSIZE 42 +#define XPATH_POLICY_BASELEN 100 +#define XPATH_POLICY_MAXLEN (XPATH_POLICY_BASELEN + XPATH_MAXATTRSIZE) +#define XPATH_CANDIDATE_BASELEN (XPATH_POLICY_BASELEN + XPATH_MAXKEYSIZE) +#define XPATH_CANDIDATE_MAXLEN (XPATH_CANDIDATE_BASELEN + XPATH_MAXATTRSIZE) + + +static int config_write_segment_routing(struct vty *vty); +static int config_write_traffic_eng(struct vty *vty); +static int config_write_segment_lists(struct vty *vty); +static int config_write_sr_policies(struct vty *vty); + +DEFINE_MTYPE_STATIC(PATHD, PATH_CLI, "Client") + +/* Vty node structures. */ +static struct cmd_node segment_routing_node = { + .name = "segment-routing", + .node = SEGMENT_ROUTING_NODE, + .parent_node = CONFIG_NODE, + .prompt = "%s(config-sr)# ", + .config_write = config_write_segment_routing, +}; + +static struct cmd_node sr_traffic_eng_node = { + .name = "sr traffic-eng", + .node = SR_TRAFFIC_ENG_NODE, + .parent_node = SEGMENT_ROUTING_NODE, + .prompt = "%s(config-sr-te)# ", + .config_write = config_write_traffic_eng, +}; + +static struct cmd_node srte_segment_list_node = { + .name = "srte segment-list", + .node = SR_SEGMENT_LIST_NODE, + .parent_node = SR_TRAFFIC_ENG_NODE, + .prompt = "%s(config-sr-te-segment-list)# ", + .config_write = config_write_segment_lists, +}; + +static struct cmd_node srte_policy_node = { + .name = "srte policy", + .node = SR_POLICY_NODE, + .parent_node = SR_TRAFFIC_ENG_NODE, + .prompt = "%s(config-sr-te-policy)# ", + .config_write = config_write_sr_policies, +}; + +static struct cmd_node srte_candidate_dyn_node = { + .name = "srte candidate-dyn", + .node = SR_CANDIDATE_DYN_NODE, + .parent_node = SR_POLICY_NODE, + .prompt = "%s(config-sr-te-candidate)# ", +}; + + +/* + * Show SR-TE info + */ +DEFPY(show_srte_policy, + show_srte_policy_cmd, + "show sr-te policy", + SHOW_STR + "SR-TE info\n" + "SR-TE Policy\n") +{ + struct ttable *tt; + struct srte_policy *policy; + char *table; + + if (RB_EMPTY(srte_policy_head, &srte_policies)) { + vty_out(vty, "No SR Policies to display.\n\n"); + return CMD_SUCCESS; + } + + /* Prepare table. */ + tt = ttable_new(&ttable_styles[TTSTYLE_BLANK]); + ttable_add_row(tt, "Endpoint|Color|Name|BSID|Status"); + tt->style.cell.rpad = 2; + tt->style.corner = '+'; + ttable_restyle(tt); + ttable_rowseps(tt, 0, BOTTOM, true, '-'); + + RB_FOREACH (policy, srte_policy_head, &srte_policies) { + char endpoint[46]; + char binding_sid[16] = "-"; + + ipaddr2str(&policy->endpoint, endpoint, sizeof(endpoint)); + if (policy->binding_sid != MPLS_LABEL_NONE) + snprintf(binding_sid, sizeof(binding_sid), "%u", + policy->binding_sid); + + ttable_add_row(tt, "%s|%u|%s|%s|%s", endpoint, policy->color, + policy->name, binding_sid, + policy->status == SRTE_POLICY_STATUS_UP + ? "Active" + : "Inactive"); + } + + /* Dump the generated table. */ + table = ttable_dump(tt, "\n"); + vty_out(vty, "%s\n", table); + XFREE(MTYPE_TMP, table); + + ttable_del(tt); + + return CMD_SUCCESS; +} + +/* + * Show detailed SR-TE info + */ +DEFPY(show_srte_policy_detail, + show_srte_policy_detail_cmd, + "show sr-te policy detail", + SHOW_STR + "SR-TE info\n" + "SR-TE Policy\n" + "Show a detailed summary\n") +{ + struct srte_policy *policy; + + if (RB_EMPTY(srte_policy_head, &srte_policies)) { + vty_out(vty, "No SR Policies to display.\n\n"); + return CMD_SUCCESS; + } + + vty_out(vty, "\n"); + RB_FOREACH (policy, srte_policy_head, &srte_policies) { + struct srte_candidate *candidate; + char endpoint[46]; + char binding_sid[16] = "-"; + char *segment_list_info; + static char undefined_info[] = "(undefined)"; + static char created_by_pce_info[] = "(created by PCE)"; + + + ipaddr2str(&policy->endpoint, endpoint, sizeof(endpoint)); + if (policy->binding_sid != MPLS_LABEL_NONE) + snprintf(binding_sid, sizeof(binding_sid), "%u", + policy->binding_sid); + vty_out(vty, + "Endpoint: %s Color: %u Name: %s BSID: %s Status: %s\n", + endpoint, policy->color, policy->name, binding_sid, + policy->status == SRTE_POLICY_STATUS_UP ? "Active" + : "Inactive"); + + RB_FOREACH (candidate, srte_candidate_head, + &policy->candidate_paths) { + struct srte_segment_list *segment_list; + + segment_list = candidate->lsp->segment_list; + if (segment_list == NULL) + segment_list_info = undefined_info; + else if (segment_list->protocol_origin + == SRTE_ORIGIN_PCEP) + segment_list_info = created_by_pce_info; + else + segment_list_info = + candidate->lsp->segment_list->name; + + vty_out(vty, + " %s Preference: %d Name: %s Type: %s Segment-List: %s Protocol-Origin: %s\n", + CHECK_FLAG(candidate->flags, F_CANDIDATE_BEST) + ? "*" + : " ", + candidate->preference, candidate->name, + candidate->type == SRTE_CANDIDATE_TYPE_EXPLICIT + ? "explicit" + : "dynamic", + segment_list_info, + srte_origin2str( + candidate->lsp->protocol_origin)); + } + + vty_out(vty, "\n"); + } + + return CMD_SUCCESS; +} + +DEFPY_NOSH( + segment_routing_list, + segment_routing_cmd, + "segment-routing", + "Configure segment routing\n") +{ + VTY_PUSH_CONTEXT_NULL(SEGMENT_ROUTING_NODE); + return CMD_SUCCESS; +} + +DEFPY_NOSH( + sr_traffic_eng_list, + sr_traffic_eng_cmd, + "traffic-eng", + "Configure SR traffic engineering\n") +{ + VTY_PUSH_CONTEXT_NULL(SR_TRAFFIC_ENG_NODE); + return CMD_SUCCESS; +} + +/* + * XPath: /frr-pathd:pathd/srte/segment-list + */ +DEFPY_NOSH( + srte_segment_list, + srte_segment_list_cmd, + "segment-list WORD$name", + "Segment List\n" + "Segment List Name\n") +{ + char xpath[XPATH_MAXLEN]; + int ret; + + snprintf(xpath, sizeof(xpath), + "/frr-pathd:pathd/srte/segment-list[name='%s']", name); + nb_cli_enqueue_change(vty, xpath, NB_OP_CREATE, NULL); + + snprintf(xpath, sizeof(xpath), + "/frr-pathd:pathd/srte/segment-list[name='%s']/protocol-origin", + name); + nb_cli_enqueue_change(vty, xpath, NB_OP_MODIFY, "local"); + + snprintf(xpath, sizeof(xpath), + "/frr-pathd:pathd/srte/segment-list[name='%s']/originator", name); + nb_cli_enqueue_change(vty, xpath, NB_OP_MODIFY, "config"); + + ret = nb_cli_apply_changes(vty, NULL); + if (ret == CMD_SUCCESS) { + snprintf(xpath, sizeof(xpath), + "/frr-pathd:pathd/srte/segment-list[name='%s']", name); + VTY_PUSH_XPATH(SR_SEGMENT_LIST_NODE, xpath); + } + + return ret; +} + +DEFPY(srte_no_segment_list, + srte_no_segment_list_cmd, + "no segment-list WORD$name", + NO_STR + "Segment List\n" + "Segment List Name\n") +{ + char xpath[XPATH_MAXLEN]; + + snprintf(xpath, sizeof(xpath), + "/frr-pathd:pathd/srte/segment-list[name='%s']", name); + nb_cli_enqueue_change(vty, xpath, NB_OP_DESTROY, NULL); + + return nb_cli_apply_changes(vty, NULL); +} + +void cli_show_srte_segment_list(struct vty *vty, struct lyd_node *dnode, + bool show_defaults) +{ + vty_out(vty, " segment-list %s\n", + yang_dnode_get_string(dnode, "./name")); +} + +/* + * XPath: /frr-pathd:pathd/srte/segment-list/segment + */ +DEFPY(srte_segment_list_segment, + srte_segment_list_segment_cmd, + "index (0-4294967295)$index mpls label (16-1048575)$label " + "[nai$has_nai <" + "node <A.B.C.D$node_ipv4|X:X::X:X$node_ipv6>" + ">]", + "Index\n" + "Index Value\n" + "MPLS or IP Label\n" + "Label\n" + "Label Value\n" + "Segment NAI\n" + "NAI node identifier\n" + "NAI IPv4 node identifier\n" + "NAI IPv6 node identifier\n") +{ + char xpath[XPATH_MAXLEN]; + const char *node_id; + + snprintf(xpath, sizeof(xpath), "./segment[index='%s']", index_str); + nb_cli_enqueue_change(vty, xpath, NB_OP_CREATE, NULL); + + snprintf(xpath, sizeof(xpath), "./segment[index='%s']/sid-value", + index_str); + nb_cli_enqueue_change(vty, xpath, NB_OP_MODIFY, label_str); + + if (has_nai != NULL) { + snprintf(xpath, sizeof(xpath), "./segment[index='%s']/nai/type", + index_str); + if (node_ipv4_str != NULL) { + nb_cli_enqueue_change(vty, xpath, NB_OP_MODIFY, + "ipv4_node"); + node_id = node_ipv4_str; + } else if (node_ipv6_str != NULL) { + nb_cli_enqueue_change(vty, xpath, NB_OP_MODIFY, + "ipv6_node"); + node_id = node_ipv6_str; + } else { + return CMD_ERR_NO_MATCH; + } + snprintf(xpath, sizeof(xpath), + "./segment[index='%s']/nai/local-address", index_str); + nb_cli_enqueue_change(vty, xpath, NB_OP_MODIFY, node_id); + } else { + snprintf(xpath, sizeof(xpath), "./segment[index='%s']/nai", + index_str); + nb_cli_enqueue_change(vty, xpath, NB_OP_DESTROY, NULL); + } + + return nb_cli_apply_changes(vty, NULL); +} + +DEFPY(srte_segment_list_no_segment, + srte_segment_list_no_segment_cmd, + "no index (0-4294967295)$index", + NO_STR + "Index\n" + "Index Value\n") +{ + char xpath[XPATH_MAXLEN]; + + snprintf(xpath, sizeof(xpath), "./segment[index='%s']", index_str); + nb_cli_enqueue_change(vty, xpath, NB_OP_DESTROY, NULL); + + return nb_cli_apply_changes(vty, NULL); +} + +void cli_show_srte_segment_list_segment(struct vty *vty, + struct lyd_node *dnode, + bool show_defaults) +{ + vty_out(vty, " index %s mpls label %s", + yang_dnode_get_string(dnode, "./index"), + yang_dnode_get_string(dnode, "./sid-value")); + if (yang_dnode_exists(dnode, "./nai")) { + struct ipaddr addr; + switch (yang_dnode_get_enum(dnode, "./nai/type")) { + case SRTE_SEGMENT_NAI_TYPE_IPV4_NODE: + yang_dnode_get_ip(&addr, dnode, "./nai/local-address"); + vty_out(vty, " nai node %pI4", &addr.ipaddr_v4); + break; + case SRTE_SEGMENT_NAI_TYPE_IPV6_NODE: + yang_dnode_get_ip(&addr, dnode, "./nai/local-address"); + vty_out(vty, " nai node %pI6", &addr.ipaddr_v6); + break; + default: + break; + } + } + vty_out(vty, "\n"); +} + +/* + * XPath: /frr-pathd:pathd/policy + */ +DEFPY_NOSH( + srte_policy, + srte_policy_cmd, + "policy color (0-4294967295)$num endpoint <A.B.C.D|X:X::X:X>$endpoint", + "Segment Routing Policy\n" + "SR Policy color\n" + "SR Policy color value\n" + "SR Policy endpoint\n" + "SR Policy endpoint IPv4 address\n" + "SR Policy endpoint IPv6 address\n") +{ + char xpath[XPATH_POLICY_BASELEN]; + int ret; + + snprintf(xpath, sizeof(xpath), + "/frr-pathd:pathd/srte/policy[color='%s'][endpoint='%s']", + num_str, endpoint_str); + nb_cli_enqueue_change(vty, xpath, NB_OP_CREATE, NULL); + + ret = nb_cli_apply_changes(vty, NULL); + if (ret == CMD_SUCCESS) + VTY_PUSH_XPATH(SR_POLICY_NODE, xpath); + + return ret; +} + +DEFPY(srte_no_policy, + srte_no_policy_cmd, + "no policy color (0-4294967295)$num endpoint <A.B.C.D|X:X::X:X>$endpoint", + NO_STR + "Segment Routing Policy\n" + "SR Policy color\n" + "SR Policy color value\n" + "SR Policy endpoint\n" + "SR Policy endpoint IPv4 address\n" + "SR Policy endpoint IPv6 address\n") +{ + char xpath[XPATH_POLICY_BASELEN]; + + snprintf(xpath, sizeof(xpath), + "/frr-pathd:pathd/srte/policy[color='%s'][endpoint='%s']", + num_str, endpoint_str); + nb_cli_enqueue_change(vty, xpath, NB_OP_DESTROY, NULL); + + return nb_cli_apply_changes(vty, NULL); +} + +void cli_show_srte_policy(struct vty *vty, struct lyd_node *dnode, + bool show_defaults) +{ + vty_out(vty, " policy color %s endpoint %s\n", + yang_dnode_get_string(dnode, "./color"), + yang_dnode_get_string(dnode, "./endpoint")); +} + +/* + * XPath: /frr-pathd:pathd/srte/policy/name + */ +DEFPY(srte_policy_name, + srte_policy_name_cmd, + "name WORD$name", + "Segment Routing Policy name\n" + "SR Policy name value\n") +{ + nb_cli_enqueue_change(vty, "./name", NB_OP_CREATE, name); + + return nb_cli_apply_changes(vty, NULL); +} + +DEFPY(srte_policy_no_name, + srte_policy_no_name_cmd, + "no name [WORD]", + NO_STR + "Segment Routing Policy name\n" + "SR Policy name value\n") +{ + nb_cli_enqueue_change(vty, "./name", NB_OP_DESTROY, NULL); + + return nb_cli_apply_changes(vty, NULL); +} + + +void cli_show_srte_policy_name(struct vty *vty, struct lyd_node *dnode, + bool show_defaults) +{ + vty_out(vty, " name %s\n", yang_dnode_get_string(dnode, NULL)); +} + +/* + * XPath: /frr-pathd:pathd/srte/policy/binding-sid + */ +DEFPY(srte_policy_binding_sid, + srte_policy_binding_sid_cmd, + "binding-sid (16-1048575)$label", + "Segment Routing Policy Binding-SID\n" + "SR Policy Binding-SID label\n") +{ + nb_cli_enqueue_change(vty, "./binding-sid", NB_OP_CREATE, label_str); + + return nb_cli_apply_changes(vty, NULL); +} + +DEFPY(srte_policy_no_binding_sid, + srte_policy_no_binding_sid_cmd, + "no binding-sid [(16-1048575)]", + NO_STR + "Segment Routing Policy Binding-SID\n" + "SR Policy Binding-SID label\n") +{ + nb_cli_enqueue_change(vty, "./binding-sid", NB_OP_DESTROY, NULL); + + return nb_cli_apply_changes(vty, NULL); +} + +void cli_show_srte_policy_binding_sid(struct vty *vty, + struct lyd_node *dnode, + bool show_defaults) +{ + vty_out(vty, " binding-sid %s\n", yang_dnode_get_string(dnode, NULL)); +} + +/* + * XPath: /frr-pathd:pathd/srte/policy/candidate-path + */ +DEFPY(srte_policy_candidate_exp, + srte_policy_candidate_exp_cmd, + "candidate-path preference (0-4294967295)$preference name WORD$name \ + explicit segment-list WORD$list_name", + "Segment Routing Policy Candidate Path\n" + "Segment Routing Policy Candidate Path Preference\n" + "Administrative Preference\n" + "Segment Routing Policy Candidate Path Name\n" + "Symbolic Name\n" + "Explicit Path\n" + "List of SIDs\n" + "Name of the Segment List\n") +{ + nb_cli_enqueue_change(vty, ".", NB_OP_CREATE, preference_str); + nb_cli_enqueue_change(vty, "./name", NB_OP_MODIFY, name); + nb_cli_enqueue_change(vty, "./protocol-origin", NB_OP_MODIFY, "local"); + nb_cli_enqueue_change(vty, "./originator", NB_OP_MODIFY, "config"); + nb_cli_enqueue_change(vty, "./type", NB_OP_MODIFY, "explicit"); + nb_cli_enqueue_change(vty, "./segment-list-name", NB_OP_MODIFY, + list_name); + return nb_cli_apply_changes(vty, "./candidate-path[preference='%s']", + preference_str); +} + +DEFPY_NOSH( + srte_policy_candidate_dyn, + srte_policy_candidate_dyn_cmd, + "candidate-path preference (0-4294967295)$preference name WORD$name dynamic", + "Segment Routing Policy Candidate Path\n" + "Segment Routing Policy Candidate Path Preference\n" + "Administrative Preference\n" + "Segment Routing Policy Candidate Path Name\n" + "Symbolic Name\n" + "Dynamic Path\n") +{ + char xpath[XPATH_MAXLEN + XPATH_CANDIDATE_BASELEN]; + int ret; + + snprintf(xpath, sizeof(xpath), "%s/candidate-path[preference='%s']", + VTY_CURR_XPATH, preference_str); + + nb_cli_enqueue_change(vty, ".", NB_OP_CREATE, preference_str); + nb_cli_enqueue_change(vty, "./name", NB_OP_MODIFY, name); + nb_cli_enqueue_change(vty, "./protocol-origin", NB_OP_MODIFY, "local"); + nb_cli_enqueue_change(vty, "./originator", NB_OP_MODIFY, "config"); + nb_cli_enqueue_change(vty, "./type", NB_OP_MODIFY, "dynamic"); + ret = nb_cli_apply_changes(vty, "./candidate-path[preference='%s']", + preference_str); + + if (ret == CMD_SUCCESS) + VTY_PUSH_XPATH(SR_CANDIDATE_DYN_NODE, xpath); + + return ret; +} + +DEFPY(srte_candidate_bandwidth, + srte_candidate_bandwidth_cmd, + "bandwidth BANDWIDTH$value [required$required]", + "Define a bandwidth constraint\n" + "Bandwidth value\n" + "Required constraint\n") +{ + nb_cli_enqueue_change(vty, "./constraints/bandwidth/required", + NB_OP_MODIFY, required ? "true" : "false"); + nb_cli_enqueue_change(vty, "./constraints/bandwidth/value", + NB_OP_MODIFY, value); + return nb_cli_apply_changes(vty, NULL); +} + +DEFPY(srte_candidate_no_bandwidth, + srte_candidate_no_bandwidth_cmd, + "no bandwidth [BANDWIDTH$value] [required$required]", + NO_STR + "Remove a bandwidth constraint\n" + "Bandwidth value\n" + "Required constraint\n") +{ + nb_cli_enqueue_change(vty, "./constraints/bandwidth", NB_OP_DESTROY, + NULL); + return nb_cli_apply_changes(vty, NULL); +} + +DEFPY(srte_candidate_affinity_filter, + srte_candidate_affinity_filter_cmd, + "affinity {exclude-any|include-any|include-all}$type BITPATTERN$value", + "Affinity constraint\n" + "Exclude any matching link\n" + "Include any matching link\n" + "Include all matching links\n" + "Attribute filter bit pattern as an hexadecimal value from 0x00000000 to 0xFFFFFFFF\n") +{ + uint32_t filter; + char xpath[XPATH_CANDIDATE_MAXLEN]; + char decimal_value[11]; + + if (sscanf(value, "0x%x", &filter) != 1) { + vty_out(vty, "affinity type: fscanf: %s\n", + safe_strerror(errno)); + return CMD_WARNING_CONFIG_FAILED; + } + snprintf(decimal_value, sizeof(decimal_value), "%u", filter); + snprintf(xpath, sizeof(xpath), "./constraints/affinity/%s", type); + nb_cli_enqueue_change(vty, xpath, NB_OP_MODIFY, decimal_value); + + return nb_cli_apply_changes(vty, NULL); +} + +DEFPY(srte_candidate_no_affinity_filter, + srte_candidate_no_affinity_filter_cmd, + "no affinity {exclude-any|include-any|include-all}$type [BITPATTERN$value]", + NO_STR + "Affinity constraint\n" + "Exclude any matching link\n" + "Include any matching link\n" + "Include all matching links\n" + "Attribute filter bit pattern as an hexadecimal value from 0x00000000 to 0xFFFFFFFF\n") +{ + char xpath[XPATH_CANDIDATE_MAXLEN]; + + snprintf(xpath, sizeof(xpath), "./constraints/affinity/%s", type); + nb_cli_enqueue_change(vty, xpath, NB_OP_DESTROY, NULL); + return nb_cli_apply_changes(vty, NULL); +} + +DEFPY(srte_candidate_metric, + srte_candidate_metric_cmd, + "metric [bound$bound] <igp|te|hc|abc|lmll|cigp|cte|pigp|pte|phc|msd|pd|pdv|pl|ppd|ppdv|ppl|nap|nlp|dc|bnc>$type METRIC$value [required$required]", + "Define a metric constraint\n" + "If the metric is bounded\n" + "IGP metric\n" + "TE metric\n" + "Hop Counts\n" + "Aggregate bandwidth consumption\n" + "Load of the most loaded link\n" + "Cumulative IGP cost\n" + "Cumulative TE cost\n" + "P2MP IGP metric\n" + "P2MP TE metric\n" + "P2MP hop count metric\n" + "Segment-ID (SID) Depth.\n" + "Path Delay metric\n" + "Path Delay Variation metric\n" + "Path Loss metric\n" + "P2MP Path Delay metric\n" + "P2MP Path Delay variation metric\n" + "P2MP Path Loss metric\n" + "Number of adaptations on a path\n" + "Number of layers on a path\n" + "Domain Count metric\n" + "Border Node Count metric\n" + "Metric value\n" + "Required constraint\n") +{ + char xpath[XPATH_CANDIDATE_MAXLEN]; + snprintf(xpath, sizeof(xpath), "./constraints/metrics[type='%s']/value", + type); + nb_cli_enqueue_change(vty, xpath, NB_OP_MODIFY, value); + snprintf(xpath, sizeof(xpath), + "./constraints/metrics[type='%s']/is-bound", type); + nb_cli_enqueue_change(vty, xpath, NB_OP_MODIFY, + (bound != NULL) ? "true" : "false"); + snprintf(xpath, sizeof(xpath), + "./constraints/metrics[type='%s']/required", type); + nb_cli_enqueue_change(vty, xpath, NB_OP_MODIFY, + required ? "true" : "false"); + return nb_cli_apply_changes(vty, NULL); +} + +DEFPY(srte_candidate_no_metric, + srte_candidate_no_metric_cmd, + "no metric [bound] <igp|te|hc|abc|lmll|cigp|cte|pigp|pte|phc|msd|pd|pdv|pl|ppd|ppdv|ppl|nap|nlp|dc|bnc>$type [METRIC$value] [required$required]", + NO_STR + "Remove a metric constraint\n" + "If the metric is bounded\n" + "IGP metric\n" + "TE metric\n" + "Hop Counts\n" + "Aggregate bandwidth consumption\n" + "Load of the most loaded link\n" + "Cumulative IGP cost\n" + "Cumulative TE cost\n" + "P2MP IGP metric\n" + "P2MP TE metric\n" + "P2MP hop count metric\n" + "Segment-ID (SID) Depth.\n" + "Path Delay metric\n" + "Path Delay Variation metric\n" + "Path Loss metric\n" + "P2MP Path Delay metric\n" + "P2MP Path Delay variation metric\n" + "P2MP Path Loss metric\n" + "Number of adaptations on a path\n" + "Number of layers on a path\n" + "Domain Count metric\n" + "Border Node Count metric\n" + "Metric value\n" + "Required constraint\n") +{ + char xpath[XPATH_CANDIDATE_MAXLEN]; + snprintf(xpath, sizeof(xpath), "./constraints/metrics[type='%s']", + type); + nb_cli_enqueue_change(vty, xpath, NB_OP_DESTROY, NULL); + return nb_cli_apply_changes(vty, NULL); +} + +DEFPY(srte_policy_no_candidate, + srte_policy_no_candidate_cmd, + "no candidate-path\ + preference (0-4294967295)$preference\ + [name WORD\ + <\ + explicit segment-list WORD\ + |dynamic\ + >]", + NO_STR + "Segment Routing Policy Candidate Path\n" + "Segment Routing Policy Candidate Path Preference\n" + "Administrative Preference\n" + "Segment Routing Policy Candidate Path Name\n" + "Symbolic Name\n" + "Explicit Path\n" + "List of SIDs\n" + "Name of the Segment List\n" + "Dynamic Path\n") +{ + nb_cli_enqueue_change(vty, ".", NB_OP_DESTROY, NULL); + + return nb_cli_apply_changes(vty, "./candidate-path[preference='%s']", + preference_str); +} + +DEFPY(srte_candidate_objfun, + srte_candidate_objfun_cmd, + "objective-function <mcp|mlp|mbp|mbc|mll|mcc|spt|mct|mplp|mup|mrup|mtd|mbn|mctd|msl|mss|msn>$type [required$required]", + "Define an objective function constraint\n" + "Minimum Cost Path\n" + "Minimum Load Path\n" + "Maximum residual Bandwidth Path\n" + "Minimize aggregate Bandwidth Consumption\n" + "Minimize the Load of the most loaded Link\n" + "Minimize the Cumulative Cost of a set of paths\n" + "Shortest Path Tree\n" + "Minimum Cost Tree\n" + "Minimum Packet Loss Path\n" + "Maximum Under-Utilized Path\n" + "Maximum Reserved Under-Utilized Path\n" + "Minimize the number of Transit Domains\n" + "Minimize the number of Border Nodes\n" + "Minimize the number of Common Transit Domains\n" + "Minimize the number of Shared Links\n" + "Minimize the number of Shared SRLGs\n" + "Minimize the number of Shared Nodes\n" + "Required constraint\n") +{ + char xpath[XPATH_CANDIDATE_MAXLEN]; + nb_cli_enqueue_change(vty, "./constraints/objective-function", + NB_OP_DESTROY, NULL); + snprintf(xpath, sizeof(xpath), + "./constraints/objective-function/required"); + nb_cli_enqueue_change(vty, xpath, NB_OP_MODIFY, + required ? "true" : "false"); + nb_cli_enqueue_change(vty, "./constraints/objective-function/type", + NB_OP_MODIFY, type); + return nb_cli_apply_changes(vty, NULL); +} + +DEFPY(srte_candidate_no_objfun, + srte_candidate_no_objfun_cmd, + "no objective-function [<mcp|mlp|mbp|mbc|mll|mcc|spt|mct|mplp|mup|mrup|mtd|mbn|mctd|msl|mss|msn>] [required$required]", + NO_STR + "Remove an objective function constraint\n" + "Minimum Cost Path\n" + "Minimum Load Path\n" + "Maximum residual Bandwidth Path\n" + "Minimize aggregate Bandwidth Consumption\n" + "Minimize the Load of the most loaded Link\n" + "Minimize the Cumulative Cost of a set of paths\n" + "Shortest Path Tree\n" + "Minimum Cost Tree\n" + "Minimum Packet Loss Path\n" + "Maximum Under-Utilized Path\n" + "Maximum Reserved Under-Utilized Path\n" + "Minimize the number of Transit Domains\n" + "Minimize the number of Border Nodes\n" + "Minimize the number of Common Transit Domains\n" + "Minimize the number of Shared Links\n" + "Minimize the number of Shared SRLGs\n" + "Minimize the number of Shared Nodes\n" + "Required constraint\n") +{ + nb_cli_enqueue_change(vty, "./constraints/objective-function", + NB_OP_DESTROY, NULL); + return nb_cli_apply_changes(vty, NULL); +} + +static const char *objfun_type_name(enum objfun_type type) +{ + switch (type) { + case OBJFUN_MCP: + return "mcp"; + case OBJFUN_MLP: + return "mlp"; + case OBJFUN_MBP: + return "mbp"; + case OBJFUN_MBC: + return "mbc"; + case OBJFUN_MLL: + return "mll"; + case OBJFUN_MCC: + return "mcc"; + case OBJFUN_SPT: + return "spt"; + case OBJFUN_MCT: + return "mct"; + case OBJFUN_MPLP: + return "mplp"; + case OBJFUN_MUP: + return "mup"; + case OBJFUN_MRUP: + return "mrup"; + case OBJFUN_MTD: + return "mtd"; + case OBJFUN_MBN: + return "mbn"; + case OBJFUN_MCTD: + return "mctd"; + case OBJFUN_MSL: + return "msl"; + case OBJFUN_MSS: + return "mss"; + case OBJFUN_MSN: + return "msn"; + default: + return NULL; + } +} + +DEFPY_NOSH(show_debugging_pathd, show_debugging_pathd_cmd, + "show debugging [pathd]", + SHOW_STR + "State of each debugging option\n" + "pathd module debugging\n") +{ + /* nothing to do here */ + return CMD_SUCCESS; +} + +static const char *metric_type_name(enum srte_candidate_metric_type type) +{ + switch (type) { + case SRTE_CANDIDATE_METRIC_TYPE_IGP: + return "igp"; + case SRTE_CANDIDATE_METRIC_TYPE_TE: + return "te"; + case SRTE_CANDIDATE_METRIC_TYPE_HC: + return "hc"; + case SRTE_CANDIDATE_METRIC_TYPE_ABC: + return "abc"; + case SRTE_CANDIDATE_METRIC_TYPE_LMLL: + return "lmll"; + case SRTE_CANDIDATE_METRIC_TYPE_CIGP: + return "cigp"; + case SRTE_CANDIDATE_METRIC_TYPE_CTE: + return "cte"; + case SRTE_CANDIDATE_METRIC_TYPE_PIGP: + return "pigp"; + case SRTE_CANDIDATE_METRIC_TYPE_PTE: + return "pte"; + case SRTE_CANDIDATE_METRIC_TYPE_PHC: + return "phc"; + case SRTE_CANDIDATE_METRIC_TYPE_MSD: + return "msd"; + case SRTE_CANDIDATE_METRIC_TYPE_PD: + return "pd"; + case SRTE_CANDIDATE_METRIC_TYPE_PDV: + return "pdv"; + case SRTE_CANDIDATE_METRIC_TYPE_PL: + return "pl"; + case SRTE_CANDIDATE_METRIC_TYPE_PPD: + return "ppd"; + case SRTE_CANDIDATE_METRIC_TYPE_PPDV: + return "ppdv"; + case SRTE_CANDIDATE_METRIC_TYPE_PPL: + return "ppl"; + case SRTE_CANDIDATE_METRIC_TYPE_NAP: + return "nap"; + case SRTE_CANDIDATE_METRIC_TYPE_NLP: + return "nlp"; + case SRTE_CANDIDATE_METRIC_TYPE_DC: + return "dc"; + case SRTE_CANDIDATE_METRIC_TYPE_BNC: + return "bnc"; + default: + return NULL; + } +} + +static void config_write_float(struct vty *vty, float value) +{ + if (fabs(truncf(value) - value) < FLT_EPSILON) { + vty_out(vty, " %d", (int)value); + return; + } else { + vty_out(vty, " %f", value); + } +} + +static void config_write_metric(struct vty *vty, + enum srte_candidate_metric_type type, + float value, bool required, bool is_bound) +{ + const char *name = metric_type_name(type); + if (name == NULL) + return; + vty_out(vty, " metric %s%s", is_bound ? "bound " : "", + metric_type_name(type)); + config_write_float(vty, value); + vty_out(vty, required ? " required" : ""); + vty_out(vty, "\n"); +} + +static int config_write_metric_cb(const struct lyd_node *dnode, void *arg) +{ + struct vty *vty = arg; + enum srte_candidate_metric_type type; + bool required, is_bound = false; + float value; + + type = yang_dnode_get_enum(dnode, "./type"); + value = (float)yang_dnode_get_dec64(dnode, "./value"); + required = yang_dnode_get_bool(dnode, "./required"); + if (yang_dnode_exists(dnode, "./is-bound")) + is_bound = yang_dnode_get_bool(dnode, "./is-bound"); + + config_write_metric(vty, type, value, required, is_bound); + return YANG_ITER_CONTINUE; +} + +void cli_show_srte_policy_candidate_path(struct vty *vty, + struct lyd_node *dnode, + bool show_defaults) +{ + float bandwidth; + uint32_t affinity; + bool required; + enum objfun_type objfun_type; + const char *type = yang_dnode_get_string(dnode, "./type"); + + vty_out(vty, " candidate-path preference %s name %s %s", + yang_dnode_get_string(dnode, "./preference"), + yang_dnode_get_string(dnode, "./name"), type); + if (strmatch(type, "explicit")) + vty_out(vty, " segment-list %s", + yang_dnode_get_string(dnode, "./segment-list-name")); + vty_out(vty, "\n"); + + if (strmatch(type, "dynamic")) { + if (yang_dnode_exists(dnode, "./constraints/bandwidth")) { + bandwidth = (float)yang_dnode_get_dec64( + dnode, "./constraints/bandwidth/value"); + required = yang_dnode_get_bool( + dnode, "./constraints/bandwidth/required"); + vty_out(vty, " %sbandwidth", + required ? "required " : ""); + config_write_float(vty, bandwidth); + vty_out(vty, "\n"); + } + if (yang_dnode_exists(dnode, + "./constraints/affinity/exclude-any")) { + affinity = yang_dnode_get_uint32( + dnode, "./constraints/affinity/exclude-any"); + vty_out(vty, " affinity exclude-any 0x%08x\n", + affinity); + } + if (yang_dnode_exists(dnode, + "./constraints/affinity/include-any")) { + affinity = yang_dnode_get_uint32( + dnode, "./constraints/affinity/include-any"); + vty_out(vty, " affinity include-any 0x%08x\n", + affinity); + } + if (yang_dnode_exists(dnode, + "./constraints/affinity/include-all")) { + affinity = yang_dnode_get_uint32( + dnode, "./constraints/affinity/include-all"); + vty_out(vty, " affinity include-all 0x%08x\n", + affinity); + } + yang_dnode_iterate(config_write_metric_cb, vty, dnode, + "./constraints/metrics"); + if (yang_dnode_exists(dnode, + "./constraints/objective-function")) { + objfun_type = yang_dnode_get_enum(dnode, + "./constraints/objective-function/type"); + required = yang_dnode_get_bool(dnode, + "./constraints/objective-function/required"); + vty_out(vty, " objective-function %s%s\n", + objfun_type_name(objfun_type), + required ? " required" : ""); + } + } +} + +static int config_write_dnode(const struct lyd_node *dnode, void *arg) +{ + struct vty *vty = arg; + + nb_cli_show_dnode_cmds(vty, (struct lyd_node *)dnode, false); + + return YANG_ITER_CONTINUE; +} + +int config_write_segment_routing(struct vty *vty) +{ + vty_out(vty, "segment-routing\n"); + return 1; +} + +int config_write_traffic_eng(struct vty *vty) +{ + vty_out(vty, " traffic-eng\n"); + return 1; +} + +int config_write_segment_lists(struct vty *vty) +{ + yang_dnode_iterate(config_write_dnode, vty, running_config->dnode, + "/frr-pathd:pathd/srte/segment-list"); + + return 1; +} + +int config_write_sr_policies(struct vty *vty) +{ + yang_dnode_iterate(config_write_dnode, vty, running_config->dnode, + "/frr-pathd:pathd/srte/policy"); + + return 1; +} + +void path_cli_init(void) +{ + install_node(&segment_routing_node); + install_node(&sr_traffic_eng_node); + install_node(&srte_segment_list_node); + install_node(&srte_policy_node); + install_node(&srte_candidate_dyn_node); + install_default(SEGMENT_ROUTING_NODE); + install_default(SR_TRAFFIC_ENG_NODE); + install_default(SR_SEGMENT_LIST_NODE); + install_default(SR_POLICY_NODE); + install_default(SR_CANDIDATE_DYN_NODE); + + install_element(ENABLE_NODE, &show_debugging_pathd_cmd); + install_element(ENABLE_NODE, &show_srte_policy_cmd); + install_element(ENABLE_NODE, &show_srte_policy_detail_cmd); + + install_element(CONFIG_NODE, &segment_routing_cmd); + install_element(SEGMENT_ROUTING_NODE, &sr_traffic_eng_cmd); + install_element(SR_TRAFFIC_ENG_NODE, &srte_segment_list_cmd); + install_element(SR_TRAFFIC_ENG_NODE, &srte_no_segment_list_cmd); + install_element(SR_SEGMENT_LIST_NODE, + &srte_segment_list_segment_cmd); + install_element(SR_SEGMENT_LIST_NODE, + &srte_segment_list_no_segment_cmd); + install_element(SR_TRAFFIC_ENG_NODE, &srte_policy_cmd); + install_element(SR_TRAFFIC_ENG_NODE, &srte_no_policy_cmd); + install_element(SR_POLICY_NODE, &srte_policy_name_cmd); + install_element(SR_POLICY_NODE, &srte_policy_no_name_cmd); + install_element(SR_POLICY_NODE, &srte_policy_binding_sid_cmd); + install_element(SR_POLICY_NODE, &srte_policy_no_binding_sid_cmd); + install_element(SR_POLICY_NODE, &srte_policy_candidate_exp_cmd); + install_element(SR_POLICY_NODE, &srte_policy_candidate_dyn_cmd); + install_element(SR_POLICY_NODE, &srte_policy_no_candidate_cmd); + install_element(SR_CANDIDATE_DYN_NODE, + &srte_candidate_bandwidth_cmd); + install_element(SR_CANDIDATE_DYN_NODE, + &srte_candidate_no_bandwidth_cmd); + install_element(SR_CANDIDATE_DYN_NODE, + &srte_candidate_affinity_filter_cmd); + install_element(SR_CANDIDATE_DYN_NODE, + &srte_candidate_no_affinity_filter_cmd); + install_element(SR_CANDIDATE_DYN_NODE, + &srte_candidate_metric_cmd); + install_element(SR_CANDIDATE_DYN_NODE, + &srte_candidate_no_metric_cmd); + install_element(SR_CANDIDATE_DYN_NODE, + &srte_candidate_objfun_cmd); + install_element(SR_CANDIDATE_DYN_NODE, + &srte_candidate_no_objfun_cmd); +} diff --git a/pathd/path_debug.c b/pathd/path_debug.c new file mode 100644 index 0000000000..df0550715a --- /dev/null +++ b/pathd/path_debug.c @@ -0,0 +1,121 @@ +/* + * Copyright (C) 2020 NetDEF, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; see the file COPYING; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include <string.h> +#include <stdbool.h> +#include <time.h> +#include <libyang/libyang.h> + +#include "printfrr.h" +#include "ipaddr.h" + +#include "pathd/path_debug.h" + +THREAD_DATA char _debug_buff[DEBUG_BUFF_SIZE]; + +/** + * Gives the string representation of an srte_protocol_origin enum value. + * + * @param origin The enum value to convert to string + * @return a constant string representation of the enum value + */ +const char *srte_protocol_origin_name(enum srte_protocol_origin origin) +{ + switch (origin) { + case SRTE_ORIGIN_UNDEFINED: + return "UNDEFINED"; + case SRTE_ORIGIN_PCEP: + return "PCEP"; + case SRTE_ORIGIN_BGP: + return "BGP"; + case SRTE_ORIGIN_LOCAL: + return "LOCAL"; + default: + return "UNKNOWN"; + } +} + +/** + * Gives the string representation of an srte_candidate_type enum value. + * + * @param origin The enum value to convert to string + * @return a constant string representation of the enum value + */ +const char *srte_candidate_type_name(enum srte_candidate_type type) +{ + switch (type) { + case SRTE_CANDIDATE_TYPE_EXPLICIT: + return "EXPLICIT"; + case SRTE_CANDIDATE_TYPE_DYNAMIC: + return "DYNAMIC"; + case SRTE_CANDIDATE_TYPE_UNDEFINED: + return "UNDEFINED"; + default: + return "UNKNOWN"; + } +} + +/** + * Gives the string representation of an objfun_type enum value. + * + * @param origin The enum value to convert to string + * @return a constant string representation of the enum value + */ +const char *objfun_type_name(enum objfun_type type) +{ + switch (type) { + case OBJFUN_UNDEFINED: + return "UNDEFINED"; + case OBJFUN_MCP: + return "MCP"; + case OBJFUN_MLP: + return "MLP"; + case OBJFUN_MBP: + return "MBP"; + case OBJFUN_MBC: + return "MBC"; + case OBJFUN_MLL: + return "MLL"; + case OBJFUN_MCC: + return "MCC"; + case OBJFUN_SPT: + return "SPT"; + case OBJFUN_MCT: + return "MCT"; + case OBJFUN_MPLP: + return "MPLP"; + case OBJFUN_MUP: + return "MUP"; + case OBJFUN_MRUP: + return "MRUP"; + case OBJFUN_MTD: + return "MTD"; + case OBJFUN_MBN: + return "MBN"; + case OBJFUN_MCTD: + return "MCTD"; + case OBJFUN_MSL: + return "MSL"; + case OBJFUN_MSS: + return "MSS"; + case OBJFUN_MSN: + return "MSN"; + default: + return "UNKNOWN"; + } +} diff --git a/pathd/path_debug.h b/pathd/path_debug.h new file mode 100644 index 0000000000..d9cfcb6ba2 --- /dev/null +++ b/pathd/path_debug.h @@ -0,0 +1,44 @@ +/* + * Copyright (C) 2020 NetDEF, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; see the file COPYING; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef _PATH_DEBUG_H_ +#define _PATH_DEBUG_H_ + +#include "pathd/pathd.h" + +#ifdef __GNUC__ +#define THREAD_DATA __thread +#else +#define THREAD_DATA +#endif + +#define DEBUG_IDENT_SIZE 4 +#define DEBUG_BUFF_SIZE 4096 +#define TUP(A, B) ((((uint32_t)(A)) << 16) | ((uint32_t)(B))) +#define PATHD_FORMAT_INIT() _debug_buff[0] = 0 +#define PATHD_FORMAT(fmt, ...) \ + csnprintfrr(_debug_buff, DEBUG_BUFF_SIZE, fmt, ##__VA_ARGS__) +#define PATHD_FORMAT_FINI() _debug_buff + +extern THREAD_DATA char _debug_buff[DEBUG_BUFF_SIZE]; + +const char *srte_protocol_origin_name(enum srte_protocol_origin origin); +const char *srte_candidate_type_name(enum srte_candidate_type type); +const char *objfun_type_name(enum objfun_type type); + +#endif // _PATH_DEBUG_H_
\ No newline at end of file diff --git a/pathd/path_errors.c b/pathd/path_errors.c new file mode 100644 index 0000000000..f8560a848c --- /dev/null +++ b/pathd/path_errors.c @@ -0,0 +1,134 @@ +/* + * pathd-specific error messages. + * Copyright (C) 2020 NetDEF, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; see the file COPYING; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include <zebra.h> + +#include "lib/ferr.h" +#include "path_errors.h" + +/* clang-format off */ +static struct log_ref ferr_path_err[] = { + { + .code = EC_PATH_SYSTEM_CALL, + .title = "Thread setup error", + .description = "A system call for creating, or setting up PCEP module's pthread failed", + .suggestion = "Open an Issue with all relevant log files and restart FRR" + }, + { + .code = EC_PATH_PCEP_PCC_INIT, + .title = "PCC initialization error", + .description = "pceplib PCC initialization call failed", + .suggestion = "Open an Issue with all relevant log files and restart FRR" + }, + { + .code = EC_PATH_PCEP_PCC_FINI, + .title = "PCC finalization error", + .description = "pceplib PCC finalization call failed", + .suggestion = "Open an Issue with all relevant log files and restart FRR" + }, + { + .code = EC_PATH_PCEP_PCC_CONF_UPDATE, + .title = "PCC configuration update error", + .description = "The update of the PCC configuration failed", + .suggestion = "Open an Issue with all relevant log files and restart FRR" + }, + { + .code = END_FERR, + } +}; + +static struct log_ref ferr_path_warn[] = { + { + .code = EC_PATH_PCEP_LIB_CONNECT, + .title = "PCC connection error", + .description = "The PCEP module failed to connected to configured PCE", + .suggestion = "Check the connectivity between the PCC and the PCE" + }, + { + .code = EC_PATH_PCEP_MISSING_SOURCE_ADDRESS, + .title = "PCC connection error", + .description = "The PCEP module did not try to connect because it is missing a source address", + .suggestion = "Wait for the router ID to be defined or set the PCC source address in the configuration" + }, + { + .code = EC_PATH_PCEP_RECOVERABLE_INTERNAL_ERROR, + .title = "Recoverable internal error", + .description = "Some recoverable internal error", + .suggestion = "Open an Issue with all relevant log files" + }, + { + .code = EC_PATH_PCEP_UNSUPPORTED_PCEP_FEATURE, + .title = "Unsupported PCEP feature", + .description = "Receved an unsupported PCEP message", + .suggestion = "The PCC and PCE are probably not compatible. Open an Issue with all relevant log files" + }, + { + .code = EC_PATH_PCEP_UNEXPECTED_PCEP_MESSAGE, + .title = "Unexpected PCEP message", + .description = "The PCEP module received an unexpected PCEP message", + .suggestion = "Open an Issue with all relevant log files" + }, + { + .code = EC_PATH_PCEP_UNEXPECTED_PCEPLIB_EVENT, + .title = "Unexpected pceplib event", + .description = "The PCEP module received an unexpected event from pceplib", + .suggestion = "Open an Issue with all relevant log files" + }, + { + .code = EC_PATH_PCEP_UNEXPECTED_PCEP_OBJECT, + .title = "Unexpected PCEP object", + .description = "The PCEP module received an unexpected PCEP object from a PCE", + .suggestion = "Open an Issue with all relevant log files" + }, + { + .code = EC_PATH_PCEP_UNEXPECTED_PCEP_TLV, + .title = "Unexpected PCEP TLV", + .description = "The PCEP module received an unexpected PCEP TLV from a PCE", + .suggestion = "Open an Issue with all relevant log files" + }, + { + .code = EC_PATH_PCEP_UNEXPECTED_PCEP_ERO_SUBOBJ, + .title = "Unexpected PCEP ERO sub-object", + .description = "The PCEP module received an unexpected PCEP ERO sub-object from a PCE", + .suggestion = "Open an Issue with all relevant log files" + }, + { + .code = EC_PATH_PCEP_UNEXPECTED_SR_NAI, + .title = "Unexpected PCEP SR segment NAI", + .description = "The PCEP module received an SR segment with an unsupported NAI specification from the PCE", + .suggestion = "Open an Issue with all relevant log files" + }, + { + .code = EC_PATH_PCEP_COMPUTATION_REQUEST_TIMEOUT, + .title = "Computation request timeout", + .description = "The PCE did not respond in time to the PCC computation request", + .suggestion = "The PCE is overloaded or incompatible with the PCC, try with a different PCE" + }, + { + .code = END_FERR, + } + +}; +/* clang-format on */ + +void path_error_init(void) +{ + log_ref_add(ferr_path_err); + log_ref_add(ferr_path_warn); +} diff --git a/pathd/path_errors.h b/pathd/path_errors.h new file mode 100644 index 0000000000..72e127f26b --- /dev/null +++ b/pathd/path_errors.h @@ -0,0 +1,45 @@ +/* + * Copyright (C) 2020 NetDEF, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; see the file COPYING; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef __PATH_ERRORS_H__ +#define __PATH_ERRORS_H__ + +#include "lib/ferr.h" + +enum path_log_refs { + EC_PATH_PCEP_INIT = PATH_FERR_START, + EC_PATH_SYSTEM_CALL, + EC_PATH_PCEP_PCC_INIT, + EC_PATH_PCEP_PCC_FINI, + EC_PATH_PCEP_PCC_CONF_UPDATE, + EC_PATH_PCEP_LIB_CONNECT, + EC_PATH_PCEP_MISSING_SOURCE_ADDRESS, + EC_PATH_PCEP_RECOVERABLE_INTERNAL_ERROR, + EC_PATH_PCEP_UNSUPPORTED_PCEP_FEATURE, + EC_PATH_PCEP_UNEXPECTED_PCEP_MESSAGE, + EC_PATH_PCEP_UNEXPECTED_PCEPLIB_EVENT, + EC_PATH_PCEP_UNEXPECTED_PCEP_OBJECT, + EC_PATH_PCEP_UNEXPECTED_PCEP_TLV, + EC_PATH_PCEP_UNEXPECTED_PCEP_ERO_SUBOBJ, + EC_PATH_PCEP_UNEXPECTED_SR_NAI, + EC_PATH_PCEP_COMPUTATION_REQUEST_TIMEOUT +}; + +extern void path_error_init(void); + +#endif diff --git a/pathd/path_main.c b/pathd/path_main.c new file mode 100644 index 0000000000..8b7d4aba48 --- /dev/null +++ b/pathd/path_main.c @@ -0,0 +1,154 @@ +/* + * Copyright (C) 2020 NetDEF, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; see the file COPYING; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ +#include <zebra.h> + +#include <lib/version.h> +#include "getopt.h" +#include "thread.h" +#include "command.h" +#include "log.h" +#include "memory.h" +#include "privs.h" +#include "sigevent.h" +#include "libfrr.h" +#include "vrf.h" +#include "filter.h" + +#include "pathd.h" +#include "path_nb.h" +#include "path_zebra.h" +#include "path_errors.h" + +char backup_config_file[256]; + +zebra_capabilities_t _caps_p[] = {}; + +struct zebra_privs_t pathd_privs = { +#if defined(FRR_USER) && defined(FRR_GROUP) + .user = FRR_USER, + .group = FRR_GROUP, +#endif +#if defined(VTY_GROUP) + .vty_group = VTY_GROUP, +#endif + .caps_p = _caps_p, + .cap_num_p = array_size(_caps_p), + .cap_num_i = 0}; + +struct option longopts[] = {{0}}; + +/* Master of threads. */ +struct thread_master *master; + +static struct frr_daemon_info pathd_di; + +/* SIGHUP handler. */ +static void sighup(void) +{ + zlog_info("SIGHUP received"); + + /* Reload config file. */ + vty_read_config(NULL, pathd_di.config_file, config_default); +} + +/* SIGINT / SIGTERM handler. */ +static void sigint(void) +{ + zlog_notice("Terminating on signal"); + + exit(0); +} + +/* SIGUSR1 handler. */ +static void sigusr1(void) +{ + zlog_rotate(); +} + +struct quagga_signal_t path_signals[] = { + { + .signal = SIGHUP, + .handler = &sighup, + }, + { + .signal = SIGUSR1, + .handler = &sigusr1, + }, + { + .signal = SIGINT, + .handler = &sigint, + }, + { + .signal = SIGTERM, + .handler = &sigint, + }, +}; + +static const struct frr_yang_module_info *pathd_yang_modules[] = { + &frr_filter_info, + &frr_interface_info, + &frr_pathd_info, +}; + +#define PATH_VTY_PORT 2621 + +FRR_DAEMON_INFO(pathd, PATH, .vty_port = PATH_VTY_PORT, + + .proghelp = "Implementation of PATH.", + + .signals = path_signals, .n_signals = array_size(path_signals), + + .privs = &pathd_privs, .yang_modules = pathd_yang_modules, + .n_yang_modules = array_size(pathd_yang_modules), ) + +int main(int argc, char **argv, char **envp) +{ + frr_preinit(&pathd_di, argc, argv); + frr_opt_add("", longopts, ""); + + while (1) { + int opt; + + opt = frr_getopt(argc, argv, NULL); + + if (opt == EOF) + break; + + switch (opt) { + case 0: + break; + default: + frr_help_exit(1); + break; + } + } + + master = frr_init(); + + access_list_init(); + + path_error_init(); + path_zebra_init(master); + path_cli_init(); + + frr_config_fork(); + frr_run(master); + + /* Not reached. */ + return 0; +} diff --git a/pathd/path_memory.c b/pathd/path_memory.c new file mode 100644 index 0000000000..ad4904a298 --- /dev/null +++ b/pathd/path_memory.c @@ -0,0 +1,25 @@ +/* + * Copyright (C) 2020 NetDEF, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; see the file COPYING; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include <zebra.h> + +#include <memory.h> + +#include "pathd/path_memory.h" + +DEFINE_MGROUP(PATHD, "pathd") diff --git a/pathd/path_memory.h b/pathd/path_memory.h new file mode 100644 index 0000000000..e2f6787f66 --- /dev/null +++ b/pathd/path_memory.h @@ -0,0 +1,26 @@ +/* + * Copyright (C) 2020 NetDEF, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; see the file COPYING; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef _FRR_PATH_MEMORY_H_ +#define _FRR_PATH_MEMORY_H_ + +#include "memory.h" + +DECLARE_MGROUP(PATHD) + +#endif /* _FRR_PATH_MEMORY_H_ */ diff --git a/pathd/path_nb.c b/pathd/path_nb.c new file mode 100644 index 0000000000..a210e31b9c --- /dev/null +++ b/pathd/path_nb.c @@ -0,0 +1,351 @@ +/* + * Copyright (C) 2020 NetDEF, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; see the file COPYING; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include <zebra.h> + +#include "northbound.h" +#include "libfrr.h" + +#include "pathd/path_nb.h" + +static int iter_objfun_cb(const struct lyd_node *dnode, void *arg); +static int dummy_create(struct nb_cb_create_args *args); +static int dummy_modify(struct nb_cb_modify_args *args); +static int dummy_destroy(struct nb_cb_destroy_args *args); + +struct of_cb_pref { + uint32_t index; + enum objfun_type type; + struct of_cb_pref *next; +}; + +struct of_cb_args { + struct of_cb_pref *first; + uint32_t free_slot; + struct of_cb_pref prefs[MAX_OBJFUN_TYPE]; +}; + +/* clang-format off */ +const struct frr_yang_module_info frr_pathd_info = { + .name = "frr-pathd", + .nodes = { + { + .xpath = "/frr-pathd:pathd", + .cbs = { + .apply_finish = pathd_apply_finish, + }, + .priority = NB_DFLT_PRIORITY + 1 + }, + { + .xpath = "/frr-pathd:pathd/srte/segment-list", + .cbs = { + .create = pathd_srte_segment_list_create, + .cli_show = cli_show_srte_segment_list, + .destroy = pathd_srte_segment_list_destroy, + .get_next = pathd_srte_segment_list_get_next, + .get_keys = pathd_srte_segment_list_get_keys, + .lookup_entry = pathd_srte_segment_list_lookup_entry, + }, + .priority = NB_DFLT_PRIORITY - 1 + }, + { + .xpath = "/frr-pathd:pathd/srte/segment-list/protocol-origin", + .cbs = { + .modify = pathd_srte_segment_list_protocol_origin_modify, + }, + .priority = NB_DFLT_PRIORITY - 1 + }, + { + .xpath = "/frr-pathd:pathd/srte/segment-list/originator", + .cbs = { + .modify = pathd_srte_segment_list_originator_modify, + }, + .priority = NB_DFLT_PRIORITY - 1 + }, + { + .xpath = "/frr-pathd:pathd/srte/segment-list/segment", + .cbs = { + .create = pathd_srte_segment_list_segment_create, + .cli_show = cli_show_srte_segment_list_segment, + .destroy = pathd_srte_segment_list_segment_destroy, + }, + .priority = NB_DFLT_PRIORITY - 1 + }, + { + .xpath = "/frr-pathd:pathd/srte/segment-list/segment/sid-value", + .cbs = { + .modify = pathd_srte_segment_list_segment_sid_value_modify, + }, + .priority = NB_DFLT_PRIORITY - 1 + }, + { + .xpath = "/frr-pathd:pathd/srte/segment-list/segment/nai", + .cbs = { + .create = dummy_create, + .destroy = pathd_srte_segment_list_segment_nai_destroy, + .apply_finish = pathd_srte_segment_list_segment_nai_apply_finish + }, + .priority = NB_DFLT_PRIORITY - 1 + }, + { + .xpath = "/frr-pathd:pathd/srte/segment-list/segment/nai/type", + .cbs = {.modify = dummy_modify} + }, + { + .xpath = "/frr-pathd:pathd/srte/segment-list/segment/nai/local-address", + .cbs = {.modify = dummy_modify} + }, + { + .xpath = "/frr-pathd:pathd/srte/segment-list/segment/nai/local-interface", + .cbs = {.modify = dummy_modify, .destroy = dummy_destroy} + }, + { + .xpath = "/frr-pathd:pathd/srte/segment-list/segment/nai/remote-address", + .cbs = {.modify = dummy_modify, .destroy = dummy_destroy} + }, + { + .xpath = "/frr-pathd:pathd/srte/segment-list/segment/nai/remote-interface", + .cbs = {.modify = dummy_modify, .destroy = dummy_destroy} + }, + { + .xpath = "/frr-pathd:pathd/srte/policy", + .cbs = { + .create = pathd_srte_policy_create, + .cli_show = cli_show_srte_policy, + .destroy = pathd_srte_policy_destroy, + .get_next = pathd_srte_policy_get_next, + .get_keys = pathd_srte_policy_get_keys, + .lookup_entry = pathd_srte_policy_lookup_entry, + } + }, + { + .xpath = "/frr-pathd:pathd/srte/policy/name", + .cbs = { + .modify = pathd_srte_policy_name_modify, + .cli_show = cli_show_srte_policy_name, + .destroy = pathd_srte_policy_name_destroy, + } + }, + { + .xpath = "/frr-pathd:pathd/srte/policy/binding-sid", + .cbs = { + .modify = pathd_srte_policy_binding_sid_modify, + .cli_show = cli_show_srte_policy_binding_sid, + .destroy = pathd_srte_policy_binding_sid_destroy, + } + }, + { + .xpath = "/frr-pathd:pathd/srte/policy/is-operational", + .cbs = { + .get_elem = pathd_srte_policy_is_operational_get_elem + } + }, + { + .xpath = "/frr-pathd:pathd/srte/policy/candidate-path", + .cbs = { + .create = pathd_srte_policy_candidate_path_create, + .cli_show = cli_show_srte_policy_candidate_path, + .destroy = pathd_srte_policy_candidate_path_destroy, + .get_next = pathd_srte_policy_candidate_path_get_next, + .get_keys = pathd_srte_policy_candidate_path_get_keys, + .lookup_entry = pathd_srte_policy_candidate_path_lookup_entry, + } + }, + { + .xpath = "/frr-pathd:pathd/srte/policy/candidate-path/name", + .cbs = { + .modify = pathd_srte_policy_candidate_path_name_modify, + } + }, + { + .xpath = "/frr-pathd:pathd/srte/policy/candidate-path/is-best-candidate-path", + .cbs = { + .get_elem = pathd_srte_policy_candidate_path_is_best_candidate_path_get_elem, + } + }, + { + .xpath = "/frr-pathd:pathd/srte/policy/candidate-path/protocol-origin", + .cbs = { + .modify = pathd_srte_policy_candidate_path_protocol_origin_modify, + } + }, + { + .xpath = "/frr-pathd:pathd/srte/policy/candidate-path/originator", + .cbs = { + .modify = pathd_srte_policy_candidate_path_originator_modify, + } + }, + { + .xpath = "/frr-pathd:pathd/srte/policy/candidate-path/discriminator", + .cbs = { + .get_elem = pathd_srte_policy_candidate_path_discriminator_get_elem, + } + }, + { + .xpath = "/frr-pathd:pathd/srte/policy/candidate-path/type", + .cbs = { + .modify = pathd_srte_policy_candidate_path_type_modify, + } + }, + { + .xpath = "/frr-pathd:pathd/srte/policy/candidate-path/segment-list-name", + .cbs = { + .destroy = pathd_srte_policy_candidate_path_segment_list_name_destroy, + .modify = pathd_srte_policy_candidate_path_segment_list_name_modify, + } + }, + { + .xpath = "/frr-pathd:pathd/srte/policy/candidate-path/constraints/bandwidth", + .cbs = { + .create = dummy_create, + .destroy = pathd_srte_policy_candidate_path_bandwidth_destroy, + .apply_finish = pathd_srte_policy_candidate_path_bandwidth_apply_finish + } + }, + { + .xpath = "/frr-pathd:pathd/srte/policy/candidate-path/constraints/bandwidth/required", + .cbs = {.modify = dummy_modify} + }, + { + .xpath = "/frr-pathd:pathd/srte/policy/candidate-path/constraints/bandwidth/value", + .cbs = {.modify = dummy_modify} + }, + { + .xpath = "/frr-pathd:pathd/srte/policy/candidate-path/constraints/affinity/exclude-any", + .cbs = { + .modify = pathd_srte_policy_candidate_path_exclude_any_modify, + .destroy = pathd_srte_policy_candidate_path_exclude_any_destroy, + } + }, + { + .xpath = "/frr-pathd:pathd/srte/policy/candidate-path/constraints/affinity/include-any", + .cbs = { + .modify = pathd_srte_policy_candidate_path_include_any_modify, + .destroy = pathd_srte_policy_candidate_path_include_any_destroy, + } + }, + { + .xpath = "/frr-pathd:pathd/srte/policy/candidate-path/constraints/affinity/include-all", + .cbs = { + .modify = pathd_srte_policy_candidate_path_include_all_modify, + .destroy = pathd_srte_policy_candidate_path_include_all_destroy, + } + }, + { + .xpath = "/frr-pathd:pathd/srte/policy/candidate-path/constraints/metrics", + .cbs = { + .create = dummy_create, + .destroy = pathd_srte_policy_candidate_path_metrics_destroy, + .apply_finish = pathd_srte_policy_candidate_path_metrics_apply_finish + } + }, + { + .xpath = "/frr-pathd:pathd/srte/policy/candidate-path/constraints/metrics/value", + .cbs = {.modify = dummy_modify} + }, + { + .xpath = "/frr-pathd:pathd/srte/policy/candidate-path/constraints/metrics/required", + .cbs = {.modify = dummy_modify} + }, + { + .xpath = "/frr-pathd:pathd/srte/policy/candidate-path/constraints/metrics/is-bound", + .cbs = {.modify = dummy_modify, .destroy = dummy_destroy} + }, + { + .xpath = "/frr-pathd:pathd/srte/policy/candidate-path/constraints/metrics/is-computed", + .cbs = {.modify = dummy_modify, .destroy = dummy_destroy} + }, + { + .xpath = "/frr-pathd:pathd/srte/policy/candidate-path/constraints/objective-function", + .cbs = { + .create = dummy_create, + .destroy = pathd_srte_policy_candidate_path_objfun_destroy, + .apply_finish = pathd_srte_policy_candidate_path_objfun_apply_finish + } + }, + { + .xpath = "/frr-pathd:pathd/srte/policy/candidate-path/constraints/objective-function/required", + .cbs = {.modify = dummy_modify} + }, + { + .xpath = "/frr-pathd:pathd/srte/policy/candidate-path/constraints/objective-function/type", + .cbs = {.modify = dummy_modify} + }, + { + .xpath = NULL, + }, + } +}; + +void iter_objfun_prefs(const struct lyd_node *dnode, const char* path, + of_pref_cp_t fun, void *arg) +{ + struct of_cb_args args = {0}; + struct of_cb_pref *p; + + yang_dnode_iterate(iter_objfun_cb, &args, dnode, path); + for (p = args.first; p != NULL; p = p->next) + fun(p->type, arg); +} + +int iter_objfun_cb(const struct lyd_node *dnode, void *arg) +{ + struct of_cb_args *of_arg = arg; + struct of_cb_pref *pref; + struct of_cb_pref **p; + + if (of_arg->free_slot >= MAX_OBJFUN_TYPE) + return YANG_ITER_STOP; + + pref = &of_arg->prefs[of_arg->free_slot++]; + + pref->index = yang_dnode_get_uint32(dnode, "./index"); + pref->type = yang_dnode_get_enum(dnode, "./type"); + + /* Simplistic insertion sort */ + p = &of_arg->first; + while (true) { + if (*p == NULL) { + *p = pref; + break; + } + if ((*p)->index >= pref->index) { + pref->next = *p; + *p = pref; + break; + } + p = &(*p)->next; + } + + return YANG_ITER_CONTINUE; +} + +int dummy_create(struct nb_cb_create_args *args) +{ + return NB_OK; +} + +int dummy_modify(struct nb_cb_modify_args *args) +{ + return NB_OK; +} + +int dummy_destroy(struct nb_cb_destroy_args *args) +{ + return NB_OK; +} diff --git a/pathd/path_nb.h b/pathd/path_nb.h new file mode 100644 index 0000000000..3a0b3863ce --- /dev/null +++ b/pathd/path_nb.h @@ -0,0 +1,130 @@ +/* + * Copyright (C) 2020 NetDEF, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; see the file COPYING; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef _FRR_PATH_NB_H_ +#define _FRR_PATH_NB_H_ + +#include "pathd/pathd.h" + +extern const struct frr_yang_module_info frr_pathd_info; + +/* Mandatory callbacks. */ +int pathd_srte_segment_list_create(struct nb_cb_create_args *args); +int pathd_srte_segment_list_destroy(struct nb_cb_destroy_args *args); + +const void *pathd_srte_segment_list_get_next(struct nb_cb_get_next_args *args); +int pathd_srte_segment_list_get_keys(struct nb_cb_get_keys_args *args); +const void * +pathd_srte_segment_list_lookup_entry(struct nb_cb_lookup_entry_args *args); + +int pathd_srte_segment_list_segment_create(struct nb_cb_create_args *args); +int pathd_srte_segment_list_segment_destroy(struct nb_cb_destroy_args *args); +int pathd_srte_segment_list_protocol_origin_modify( + struct nb_cb_modify_args *args); +int pathd_srte_segment_list_originator_modify(struct nb_cb_modify_args *args); +int pathd_srte_segment_list_segment_sid_value_modify( + struct nb_cb_modify_args *args); +int pathd_srte_segment_list_segment_nai_destroy( + struct nb_cb_destroy_args *args); +void pathd_srte_segment_list_segment_nai_apply_finish( + struct nb_cb_apply_finish_args *args); +int pathd_srte_policy_create(struct nb_cb_create_args *args); +int pathd_srte_policy_destroy(struct nb_cb_destroy_args *args); +const void *pathd_srte_policy_get_next(struct nb_cb_get_next_args *args); +int pathd_srte_policy_get_keys(struct nb_cb_get_keys_args *args); +const void * +pathd_srte_policy_lookup_entry(struct nb_cb_lookup_entry_args *args); +int pathd_srte_policy_name_modify(struct nb_cb_modify_args *args); +int pathd_srte_policy_name_destroy(struct nb_cb_destroy_args *args); +int pathd_srte_policy_binding_sid_modify(struct nb_cb_modify_args *args); +int pathd_srte_policy_binding_sid_destroy(struct nb_cb_destroy_args *args); +struct yang_data * +pathd_srte_policy_is_operational_get_elem(struct nb_cb_get_elem_args *args); +int pathd_srte_policy_candidate_path_create(struct nb_cb_create_args *args); +int pathd_srte_policy_candidate_path_destroy(struct nb_cb_destroy_args *args); +int pathd_srte_policy_candidate_path_name_modify( + struct nb_cb_modify_args *args); +const void * +pathd_srte_policy_candidate_path_get_next(struct nb_cb_get_next_args *args); +int pathd_srte_policy_candidate_path_get_keys(struct nb_cb_get_keys_args *args); +const void *pathd_srte_policy_candidate_path_lookup_entry( + struct nb_cb_lookup_entry_args *args); +void pathd_srte_policy_candidate_path_bandwidth_apply_finish( + struct nb_cb_apply_finish_args *args); +int pathd_srte_policy_candidate_path_bandwidth_destroy( + struct nb_cb_destroy_args *args); +int pathd_srte_policy_candidate_path_exclude_any_modify( + struct nb_cb_modify_args *args); +int pathd_srte_policy_candidate_path_exclude_any_destroy( + struct nb_cb_destroy_args *args); +int pathd_srte_policy_candidate_path_include_any_modify( + struct nb_cb_modify_args *args); +int pathd_srte_policy_candidate_path_include_any_destroy( + struct nb_cb_destroy_args *args); +int pathd_srte_policy_candidate_path_include_all_modify( + struct nb_cb_modify_args *args); +int pathd_srte_policy_candidate_path_include_all_destroy( + struct nb_cb_destroy_args *args); +int pathd_srte_policy_candidate_path_metrics_destroy( + struct nb_cb_destroy_args *args); +void pathd_srte_policy_candidate_path_metrics_apply_finish( + struct nb_cb_apply_finish_args *args); +int pathd_srte_policy_candidate_path_objfun_destroy( + struct nb_cb_destroy_args *args); +void pathd_srte_policy_candidate_path_objfun_apply_finish( + struct nb_cb_apply_finish_args *args); +struct yang_data * +pathd_srte_policy_candidate_path_is_best_candidate_path_get_elem( + struct nb_cb_get_elem_args *args); +int pathd_srte_policy_candidate_path_protocol_origin_modify( + struct nb_cb_modify_args *args); +int pathd_srte_policy_candidate_path_originator_modify( + struct nb_cb_modify_args *args); +struct yang_data *pathd_srte_policy_candidate_path_discriminator_get_elem( + struct nb_cb_get_elem_args *args); +int pathd_srte_policy_candidate_path_type_modify( + struct nb_cb_modify_args *args); +int pathd_srte_policy_candidate_path_segment_list_name_modify( + struct nb_cb_modify_args *args); +int pathd_srte_policy_candidate_path_segment_list_name_destroy( + struct nb_cb_destroy_args *args); + +/* Optional 'apply_finish' callbacks. */ +void pathd_apply_finish(struct nb_cb_apply_finish_args *args); + +/* Optional 'cli_show' callbacks. */ +void cli_show_srte_segment_list(struct vty *vty, struct lyd_node *dnode, + bool show_defaults); +void cli_show_srte_segment_list_segment(struct vty *vty, struct lyd_node *dnode, + bool show_defaults); +void cli_show_srte_policy(struct vty *vty, struct lyd_node *dnode, + bool show_defaults); +void cli_show_srte_policy_name(struct vty *vty, struct lyd_node *dnode, + bool show_defaults); +void cli_show_srte_policy_binding_sid(struct vty *vty, struct lyd_node *dnode, + bool show_defaults); +void cli_show_srte_policy_candidate_path(struct vty *vty, + struct lyd_node *dnode, + bool show_defaults); + +/* Utility functions */ +typedef void (*of_pref_cp_t)(enum objfun_type type, void *arg); +void iter_objfun_prefs(const struct lyd_node *dnode, const char *path, + of_pref_cp_t fun, void *arg); + +#endif /* _FRR_PATH_NB_H_ */ diff --git a/pathd/path_nb_config.c b/pathd/path_nb_config.c new file mode 100644 index 0000000000..669db169ae --- /dev/null +++ b/pathd/path_nb_config.c @@ -0,0 +1,731 @@ +/* + * Copyright (C) 2020 NetDEF, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; see the file COPYING; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include <zebra.h> +#include <lib_errors.h> + +#include "northbound.h" +#include "libfrr.h" + +#include "pathd/path_zebra.h" +#include "pathd/path_nb.h" + +/* + * XPath: /frr-pathd:pathd + */ +void pathd_apply_finish(struct nb_cb_apply_finish_args *args) +{ + srte_apply_changes(); +} + +/* + * XPath: /frr-pathd:pathd/srte/segment-list + */ +int pathd_srte_segment_list_create(struct nb_cb_create_args *args) +{ + struct srte_segment_list *segment_list; + const char *name; + + if (args->event != NB_EV_APPLY) + return NB_OK; + + name = yang_dnode_get_string(args->dnode, "./name"); + segment_list = srte_segment_list_add(name); + nb_running_set_entry(args->dnode, segment_list); + SET_FLAG(segment_list->flags, F_SEGMENT_LIST_NEW); + + return NB_OK; +} + +int pathd_srte_segment_list_destroy(struct nb_cb_destroy_args *args) +{ + struct srte_segment_list *segment_list; + + if (args->event != NB_EV_APPLY) + return NB_OK; + + segment_list = nb_running_unset_entry(args->dnode); + SET_FLAG(segment_list->flags, F_SEGMENT_LIST_DELETED); + + return NB_OK; +} + +/* + * XPath: /frr-pathd:pathd/srte/segment-list/protocol-origin + */ +int pathd_srte_segment_list_protocol_origin_modify( + struct nb_cb_modify_args *args) +{ + struct srte_segment_list *segment_list; + + if (args->event != NB_EV_APPLY) + return NB_OK; + + segment_list = nb_running_get_entry(args->dnode, NULL, true); + segment_list->protocol_origin = yang_dnode_get_enum(args->dnode, NULL); + SET_FLAG(segment_list->flags, F_SEGMENT_LIST_MODIFIED); + + return NB_OK; +} + +/* + * XPath: /frr-pathd:pathd/srte/segment-list/originator + */ +int pathd_srte_segment_list_originator_modify(struct nb_cb_modify_args *args) +{ + struct srte_segment_list *segment_list; + const char *originator; + + if (args->event != NB_EV_APPLY) + return NB_OK; + + segment_list = nb_running_get_entry(args->dnode, NULL, true); + originator = yang_dnode_get_string(args->dnode, NULL); + strlcpy(segment_list->originator, originator, + sizeof(segment_list->originator)); + SET_FLAG(segment_list->flags, F_SEGMENT_LIST_MODIFIED); + + return NB_OK; +} + + +/* + * XPath: /frr-pathd:pathd/srte/segment-list/segment + */ +int pathd_srte_segment_list_segment_create(struct nb_cb_create_args *args) +{ + struct srte_segment_list *segment_list; + struct srte_segment_entry *segment; + uint32_t index; + + if (args->event != NB_EV_APPLY) + return NB_OK; + + segment_list = nb_running_get_entry(args->dnode, NULL, true); + index = yang_dnode_get_uint32(args->dnode, "./index"); + segment = srte_segment_entry_add(segment_list, index); + nb_running_set_entry(args->dnode, segment); + SET_FLAG(segment_list->flags, F_SEGMENT_LIST_MODIFIED); + + return NB_OK; +} + +int pathd_srte_segment_list_segment_destroy(struct nb_cb_destroy_args *args) +{ + struct srte_segment_entry *segment; + + if (args->event != NB_EV_APPLY) + return NB_OK; + + segment = nb_running_unset_entry(args->dnode); + SET_FLAG(segment->segment_list->flags, F_SEGMENT_LIST_MODIFIED); + + srte_segment_entry_del(segment); + + return NB_OK; +} + +/* + * XPath: /frr-pathd:pathd/srte/segment-list/segment/sid-value + */ +int pathd_srte_segment_list_segment_sid_value_modify( + struct nb_cb_modify_args *args) +{ + mpls_label_t sid_value; + struct srte_segment_entry *segment; + + if (args->event != NB_EV_APPLY) + return NB_OK; + + segment = nb_running_get_entry(args->dnode, NULL, true); + sid_value = yang_dnode_get_uint32(args->dnode, NULL); + segment->sid_value = sid_value; + SET_FLAG(segment->segment_list->flags, F_SEGMENT_LIST_MODIFIED); + + return NB_OK; +} + +int pathd_srte_segment_list_segment_nai_destroy(struct nb_cb_destroy_args *args) +{ + struct srte_segment_entry *segment; + + if (args->event != NB_EV_APPLY) + return NB_OK; + + segment = nb_running_get_entry(args->dnode, NULL, true); + segment->nai_type = SRTE_SEGMENT_NAI_TYPE_NONE; + segment->nai_local_addr.ipa_type = IPADDR_NONE; + segment->nai_local_iface = 0; + segment->nai_remote_addr.ipa_type = IPADDR_NONE; + segment->nai_remote_iface = 0; + + return NB_OK; +} + +void pathd_srte_segment_list_segment_nai_apply_finish( + struct nb_cb_apply_finish_args *args) +{ + struct srte_segment_entry *segment; + enum srte_segment_nai_type type; + struct ipaddr local_addr, remote_addr; + uint32_t local_iface = 0, remote_iface = 0; + + segment = nb_running_get_entry(args->dnode, NULL, true); + type = yang_dnode_get_enum(args->dnode, "./type"); + + yang_dnode_get_ip(&local_addr, args->dnode, "./local-address"); + + switch (type) { + case SRTE_SEGMENT_NAI_TYPE_IPV4_NODE: + case SRTE_SEGMENT_NAI_TYPE_IPV6_NODE: + break; + case SRTE_SEGMENT_NAI_TYPE_IPV4_ADJACENCY: + case SRTE_SEGMENT_NAI_TYPE_IPV6_ADJACENCY: + yang_dnode_get_ip(&remote_addr, args->dnode, + "./remote-address"); + break; + case SRTE_SEGMENT_NAI_TYPE_IPV4_UNNUMBERED_ADJACENCY: + yang_dnode_get_ip(&remote_addr, args->dnode, + "./remote-address"); + local_iface = + yang_dnode_get_uint32(args->dnode, "./local-interface"); + remote_iface = yang_dnode_get_uint32(args->dnode, + "./remote-interface"); + break; + default: + break; + } + + srte_segment_entry_set_nai(segment, type, &local_addr, local_iface, + &remote_addr, remote_iface); +} + +/* + * XPath: /frr-pathd:pathd/srte/policy + */ +int pathd_srte_policy_create(struct nb_cb_create_args *args) +{ + struct srte_policy *policy; + uint32_t color; + struct ipaddr endpoint; + + if (args->event != NB_EV_APPLY) + return NB_OK; + + color = yang_dnode_get_uint32(args->dnode, "./color"); + yang_dnode_get_ip(&endpoint, args->dnode, "./endpoint"); + policy = srte_policy_add(color, &endpoint); + + nb_running_set_entry(args->dnode, policy); + SET_FLAG(policy->flags, F_POLICY_NEW); + + return NB_OK; +} + +int pathd_srte_policy_destroy(struct nb_cb_destroy_args *args) +{ + struct srte_policy *policy; + + if (args->event != NB_EV_APPLY) + return NB_OK; + + policy = nb_running_unset_entry(args->dnode); + SET_FLAG(policy->flags, F_POLICY_DELETED); + + return NB_OK; +} + +/* + * XPath: /frr-pathd:pathd/srte/policy/name + */ +int pathd_srte_policy_name_modify(struct nb_cb_modify_args *args) +{ + struct srte_policy *policy; + const char *name; + + if (args->event != NB_EV_APPLY && args->event != NB_EV_VALIDATE) + return NB_OK; + + policy = nb_running_get_entry(args->dnode, NULL, true); + + if (args->event == NB_EV_VALIDATE) { + /* the policy name is fixed after setting it once */ + if (strlen(policy->name) > 0) { + flog_warn(EC_LIB_NB_CB_CONFIG_VALIDATE, + "The SR Policy name is fixed!"); + return NB_ERR_RESOURCE; + } else + return NB_OK; + } + + name = yang_dnode_get_string(args->dnode, NULL); + strlcpy(policy->name, name, sizeof(policy->name)); + SET_FLAG(policy->flags, F_POLICY_MODIFIED); + + return NB_OK; +} + +int pathd_srte_policy_name_destroy(struct nb_cb_destroy_args *args) +{ + struct srte_policy *policy; + + if (args->event != NB_EV_APPLY) + return NB_OK; + + policy = nb_running_get_entry(args->dnode, NULL, true); + policy->name[0] = '\0'; + SET_FLAG(policy->flags, F_POLICY_MODIFIED); + + return NB_OK; +} + +/* + * XPath: /frr-pathd:pathd/srte/policy/binding-sid + */ +int pathd_srte_policy_binding_sid_modify(struct nb_cb_modify_args *args) +{ + struct srte_policy *policy; + mpls_label_t binding_sid; + + policy = nb_running_get_entry(args->dnode, NULL, true); + binding_sid = yang_dnode_get_uint32(args->dnode, NULL); + + switch (args->event) { + case NB_EV_VALIDATE: + break; + case NB_EV_PREPARE: + if (path_zebra_request_label(binding_sid) < 0) + return NB_ERR_RESOURCE; + break; + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + srte_policy_update_binding_sid(policy, binding_sid); + SET_FLAG(policy->flags, F_POLICY_MODIFIED); + break; + } + + return NB_OK; +} + +int pathd_srte_policy_binding_sid_destroy(struct nb_cb_destroy_args *args) +{ + struct srte_policy *policy; + + if (args->event != NB_EV_APPLY) + return NB_OK; + + policy = nb_running_get_entry(args->dnode, NULL, true); + srte_policy_update_binding_sid(policy, MPLS_LABEL_NONE); + SET_FLAG(policy->flags, F_POLICY_MODIFIED); + + return NB_OK; +} + +/* + * XPath: /frr-pathd:pathd/srte/policy/candidate-path + */ +int pathd_srte_policy_candidate_path_create(struct nb_cb_create_args *args) +{ + struct srte_policy *policy; + struct srte_candidate *candidate; + uint32_t preference; + + if (args->event != NB_EV_APPLY) + return NB_OK; + + policy = nb_running_get_entry(args->dnode, NULL, true); + preference = yang_dnode_get_uint32(args->dnode, "./preference"); + candidate = srte_candidate_add(policy, preference); + nb_running_set_entry(args->dnode, candidate); + SET_FLAG(candidate->flags, F_CANDIDATE_NEW); + + return NB_OK; +} + +int pathd_srte_policy_candidate_path_destroy(struct nb_cb_destroy_args *args) +{ + struct srte_candidate *candidate; + + if (args->event != NB_EV_APPLY) + return NB_OK; + + candidate = nb_running_unset_entry(args->dnode); + SET_FLAG(candidate->flags, F_CANDIDATE_DELETED); + + return NB_OK; +} + +/* + * XPath: /frr-pathd:pathd/srte/policy/candidate-path/name + */ +int pathd_srte_policy_candidate_path_name_modify(struct nb_cb_modify_args *args) +{ + struct srte_candidate *candidate; + const char *name; + char xpath[XPATH_MAXLEN]; + char xpath_buf[XPATH_MAXLEN - 3]; + + if (args->event != NB_EV_APPLY && args->event != NB_EV_VALIDATE) + return NB_OK; + + /* the candidate name is fixed after setting it once, this is checked + * here */ + if (args->event == NB_EV_VALIDATE) { + /* first get the precise path to the candidate path */ + yang_dnode_get_path(args->dnode, xpath_buf, sizeof(xpath_buf)); + snprintf(xpath, sizeof(xpath), "%s%s", xpath_buf, "/.."); + + candidate = nb_running_get_entry_non_rec(NULL, xpath, false); + + /* then check if it exists and if the name was provided */ + if (candidate && strlen(candidate->name) > 0) { + flog_warn(EC_LIB_NB_CB_CONFIG_VALIDATE, + "The candidate name is fixed!"); + return NB_ERR_RESOURCE; + } else + return NB_OK; + } + + candidate = nb_running_get_entry(args->dnode, NULL, true); + + name = yang_dnode_get_string(args->dnode, NULL); + strlcpy(candidate->name, name, sizeof(candidate->name)); + SET_FLAG(candidate->flags, F_CANDIDATE_MODIFIED); + + return NB_OK; +} + + +static int affinity_filter_modify(struct nb_cb_modify_args *args, + enum affinity_filter_type type) +{ + uint32_t filter; + struct srte_candidate *candidate; + + if (args->event != NB_EV_APPLY) + return NB_OK; + + assert(args->context != NULL); + candidate = nb_running_get_entry(args->dnode, NULL, true); + filter = yang_dnode_get_uint32(args->dnode, NULL); + srte_candidate_set_affinity_filter(candidate, type, filter); + + return NB_OK; +} + +static int affinity_filter_destroy(struct nb_cb_destroy_args *args, + enum affinity_filter_type type) +{ + struct srte_candidate *candidate; + + if (args->event != NB_EV_APPLY) + return NB_OK; + + assert(args->context != NULL); + candidate = nb_running_get_entry(args->dnode, NULL, true); + srte_candidate_unset_affinity_filter(candidate, type); + + return NB_OK; +} + +/* + * XPath: + * /frr-pathd:pathd/srte/policy/candidate-path/constraints/affinity/exclude-any + */ + +int pathd_srte_policy_candidate_path_exclude_any_modify( + struct nb_cb_modify_args *args) +{ + return affinity_filter_modify(args, AFFINITY_FILTER_EXCLUDE_ANY); +} + +int pathd_srte_policy_candidate_path_exclude_any_destroy( + struct nb_cb_destroy_args *args) +{ + return affinity_filter_destroy(args, AFFINITY_FILTER_EXCLUDE_ANY); +} + + +/* + * XPath: + * /frr-pathd:pathd/srte/policy/candidate-path/constraints/affinity/include-any + */ +int pathd_srte_policy_candidate_path_include_any_modify( + struct nb_cb_modify_args *args) +{ + return affinity_filter_modify(args, AFFINITY_FILTER_INCLUDE_ANY); +} + +int pathd_srte_policy_candidate_path_include_any_destroy( + struct nb_cb_destroy_args *args) +{ + return affinity_filter_destroy(args, AFFINITY_FILTER_INCLUDE_ANY); +} + + +/* + * XPath: + * /frr-pathd:pathd/srte/policy/candidate-path/constraints/affinity/include-all + */ +int pathd_srte_policy_candidate_path_include_all_modify( + struct nb_cb_modify_args *args) +{ + return affinity_filter_modify(args, AFFINITY_FILTER_INCLUDE_ALL); +} + +int pathd_srte_policy_candidate_path_include_all_destroy( + struct nb_cb_destroy_args *args) +{ + return affinity_filter_destroy(args, AFFINITY_FILTER_INCLUDE_ALL); +} + + +/* + * XPath: /frr-pathd:pathd/srte/policy/candidate-path/constraints/metrics + */ +int pathd_srte_policy_candidate_path_metrics_destroy( + struct nb_cb_destroy_args *args) +{ + struct srte_candidate *candidate; + enum srte_candidate_metric_type type; + + if (args->event != NB_EV_APPLY) + return NB_OK; + + assert(args->context != NULL); + candidate = nb_running_get_entry(args->dnode, NULL, true); + + type = yang_dnode_get_enum(args->dnode, "./type"); + srte_candidate_unset_metric(candidate, type); + + return NB_OK; +} + +void pathd_srte_policy_candidate_path_metrics_apply_finish( + struct nb_cb_apply_finish_args *args) +{ + struct srte_candidate *candidate; + enum srte_candidate_metric_type type; + float value; + bool required, is_bound = false, is_computed = false; + + assert(args->context != NULL); + + candidate = nb_running_get_entry(args->dnode, NULL, true); + + type = yang_dnode_get_enum(args->dnode, "./type"); + value = (float)yang_dnode_get_dec64(args->dnode, "./value"); + required = yang_dnode_get_bool(args->dnode, "./required"); + if (yang_dnode_exists(args->dnode, "./is-bound")) + is_bound = yang_dnode_get_bool(args->dnode, "./is-bound"); + if (yang_dnode_exists(args->dnode, "./is-computed")) + is_computed = yang_dnode_get_bool(args->dnode, "./is-computed"); + + srte_candidate_set_metric(candidate, type, value, required, is_bound, + is_computed); +} + +/* + * XPath: + * /frr-pathd:pathd/srte/policy/candidate-path/constraints/objective-function + */ +int pathd_srte_policy_candidate_path_objfun_destroy( + struct nb_cb_destroy_args *args) +{ + struct srte_candidate *candidate; + + if (args->event != NB_EV_APPLY) + return NB_OK; + + assert(args->context != NULL); + + candidate = nb_running_get_entry(args->dnode, NULL, true); + srte_candidate_unset_objfun(candidate); + + return NB_OK; +} + +void pathd_srte_policy_candidate_path_objfun_apply_finish( + struct nb_cb_apply_finish_args *args) +{ + struct srte_candidate *candidate; + enum objfun_type type; + bool required; + + candidate = nb_running_get_entry(args->dnode, NULL, true); + required = yang_dnode_get_bool(args->dnode, "./required"); + type = yang_dnode_get_enum(args->dnode, "./type"); + srte_candidate_set_objfun(candidate, required, type); +} + +/* + * XPath: /frr-pathd:pathd/srte/policy/candidate-path/protocol-origin + */ +int pathd_srte_policy_candidate_path_protocol_origin_modify( + struct nb_cb_modify_args *args) +{ + struct srte_candidate *candidate; + enum srte_protocol_origin protocol_origin; + + if (args->event != NB_EV_APPLY) + return NB_OK; + + candidate = nb_running_get_entry(args->dnode, NULL, true); + protocol_origin = yang_dnode_get_enum(args->dnode, NULL); + candidate->protocol_origin = protocol_origin; + candidate->lsp->protocol_origin = protocol_origin; + SET_FLAG(candidate->flags, F_CANDIDATE_MODIFIED); + + return NB_OK; +} + +/* + * XPath: /frr-pathd:pathd/srte/policy/candidate-path/originator + */ +int pathd_srte_policy_candidate_path_originator_modify( + struct nb_cb_modify_args *args) +{ + struct srte_candidate *candidate; + const char *originator; + + if (args->event != NB_EV_APPLY) + return NB_OK; + + candidate = nb_running_get_entry(args->dnode, NULL, true); + originator = yang_dnode_get_string(args->dnode, NULL); + strlcpy(candidate->originator, originator, + sizeof(candidate->originator)); + strlcpy(candidate->lsp->originator, originator, + sizeof(candidate->lsp->originator)); + SET_FLAG(candidate->flags, F_CANDIDATE_MODIFIED); + + return NB_OK; +} + +/* + * XPath: /frr-pathd:pathd/srte/policy/candidate-path/type + */ +int pathd_srte_policy_candidate_path_type_modify(struct nb_cb_modify_args *args) +{ + struct srte_candidate *candidate; + enum srte_candidate_type type; + char xpath[XPATH_MAXLEN]; + char xpath_buf[XPATH_MAXLEN - 3]; + + if (args->event != NB_EV_APPLY && args->event != NB_EV_VALIDATE) + return NB_OK; + + /* the candidate type is fixed after setting it once, this is checked + * here */ + if (args->event == NB_EV_VALIDATE) { + /* first get the precise path to the candidate path */ + yang_dnode_get_path(args->dnode, xpath_buf, sizeof(xpath_buf)); + snprintf(xpath, sizeof(xpath), "%s%s", xpath_buf, "/.."); + + candidate = nb_running_get_entry_non_rec(NULL, xpath, false); + + /* then check if it exists and if the type was provided */ + if (candidate + && candidate->type != SRTE_CANDIDATE_TYPE_UNDEFINED) { + flog_warn(EC_LIB_NB_CB_CONFIG_VALIDATE, + "The candidate type is fixed!"); + return NB_ERR_RESOURCE; + } else + return NB_OK; + } + + candidate = nb_running_get_entry(args->dnode, NULL, true); + + type = yang_dnode_get_enum(args->dnode, NULL); + candidate->type = type; + SET_FLAG(candidate->flags, F_CANDIDATE_MODIFIED); + + return NB_OK; +} + +/* + * XPath: /frr-pathd:pathd/srte/policy/candidate-path/segment-list-name + */ +int pathd_srte_policy_candidate_path_segment_list_name_modify( + struct nb_cb_modify_args *args) +{ + struct srte_candidate *candidate; + const char *segment_list_name; + + candidate = nb_running_get_entry(args->dnode, NULL, true); + segment_list_name = yang_dnode_get_string(args->dnode, NULL); + + if (args->event != NB_EV_APPLY) + return NB_OK; + + candidate->segment_list = srte_segment_list_find(segment_list_name); + candidate->lsp->segment_list = candidate->segment_list; + assert(candidate->segment_list); + SET_FLAG(candidate->flags, F_CANDIDATE_MODIFIED); + + return NB_OK; +} + +int pathd_srte_policy_candidate_path_segment_list_name_destroy( + struct nb_cb_destroy_args *args) +{ + struct srte_candidate *candidate; + + if (args->event != NB_EV_APPLY) + return NB_OK; + + candidate = nb_running_get_entry(args->dnode, NULL, true); + candidate->segment_list = NULL; + candidate->lsp->segment_list = NULL; + SET_FLAG(candidate->flags, F_CANDIDATE_MODIFIED); + + return NB_OK; +} + +/* + * XPath: /frr-pathd:pathd/srte/policy/candidate-path/constraints/bandwidth + */ +void pathd_srte_policy_candidate_path_bandwidth_apply_finish( + struct nb_cb_apply_finish_args *args) +{ + struct srte_candidate *candidate; + float value; + bool required; + + assert(args->context != NULL); + + candidate = nb_running_get_entry(args->dnode, NULL, true); + value = (float)yang_dnode_get_dec64(args->dnode, "./value"); + required = yang_dnode_get_bool(args->dnode, "./required"); + srte_candidate_set_bandwidth(candidate, value, required); +} + +int pathd_srte_policy_candidate_path_bandwidth_destroy( + struct nb_cb_destroy_args *args) +{ + struct srte_candidate *candidate; + + if (args->event != NB_EV_APPLY) + return NB_OK; + + assert(args->context != NULL); + candidate = nb_running_get_entry(args->dnode, NULL, true); + srte_candidate_unset_bandwidth(candidate); + return NB_OK; +} diff --git a/pathd/path_nb_state.c b/pathd/path_nb_state.c new file mode 100644 index 0000000000..60f04f45c1 --- /dev/null +++ b/pathd/path_nb_state.c @@ -0,0 +1,189 @@ +/* + * Copyright (C) 2020 NetDEF, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; see the file COPYING; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include <zebra.h> + +#include "log.h" +#include "prefix.h" +#include "table.h" +#include "command.h" +#include "northbound.h" +#include "libfrr.h" + +#include "pathd/pathd.h" +#include "pathd/path_nb.h" + +/* + * XPath: /frr-pathd:pathd/srte/segment-list + */ +const void *pathd_srte_segment_list_get_next(struct nb_cb_get_next_args *args) +{ + struct srte_segment_list *segment_list = + (struct srte_segment_list *)args->list_entry; + + if (args->list_entry == NULL) + segment_list = + RB_MIN(srte_segment_list_head, &srte_segment_lists); + else + segment_list = RB_NEXT(srte_segment_list_head, segment_list); + + return segment_list; +} + +int pathd_srte_segment_list_get_keys(struct nb_cb_get_keys_args *args) +{ + const struct srte_segment_list *segment_list = + (struct srte_segment_list *)args->list_entry; + + args->keys->num = 1; + snprintf(args->keys->key[0], sizeof(args->keys->key[0]), "%s", + segment_list->name); + + return NB_OK; +} + +const void * +pathd_srte_segment_list_lookup_entry(struct nb_cb_lookup_entry_args *args) +{ + return srte_segment_list_find(args->keys->key[0]); +} + +/* + * XPath: /frr-pathd:pathd/srte/policy + */ +const void *pathd_srte_policy_get_next(struct nb_cb_get_next_args *args) +{ + struct srte_policy *policy = (struct srte_policy *)args->list_entry; + + if (args->list_entry == NULL) + policy = RB_MIN(srte_policy_head, &srte_policies); + else + policy = RB_NEXT(srte_policy_head, policy); + + return policy; +} + +int pathd_srte_policy_get_keys(struct nb_cb_get_keys_args *args) +{ + const struct srte_policy *policy = + (struct srte_policy *)args->list_entry; + + args->keys->num = 2; + snprintf(args->keys->key[0], sizeof(args->keys->key[0]), "%u", + policy->color); + ipaddr2str(&policy->endpoint, args->keys->key[1], + sizeof(args->keys->key[1])); + + return NB_OK; +} + +const void *pathd_srte_policy_lookup_entry(struct nb_cb_lookup_entry_args *args) +{ + uint32_t color; + struct ipaddr endpoint; + + color = yang_str2uint32(args->keys->key[0]); + yang_str2ip(args->keys->key[1], &endpoint); + + return srte_policy_find(color, &endpoint); +} + +/* + * XPath: /frr-pathd:pathd/srte/policy/is-operational + */ +struct yang_data * +pathd_srte_policy_is_operational_get_elem(struct nb_cb_get_elem_args *args) +{ + struct srte_policy *policy = (struct srte_policy *)args->list_entry; + bool is_operational = false; + + if (policy->status == SRTE_POLICY_STATUS_UP) + is_operational = true; + + return yang_data_new_bool(args->xpath, is_operational); +} + +/* + * XPath: /frr-pathd:pathd/srte/policy/candidate-path + */ +const void * +pathd_srte_policy_candidate_path_get_next(struct nb_cb_get_next_args *args) +{ + struct srte_policy *policy = + (struct srte_policy *)args->parent_list_entry; + struct srte_candidate *candidate = + (struct srte_candidate *)args->list_entry; + + if (args->list_entry == NULL) + candidate = + RB_MIN(srte_candidate_head, &policy->candidate_paths); + else + candidate = RB_NEXT(srte_candidate_head, candidate); + + return candidate; +} + +int pathd_srte_policy_candidate_path_get_keys(struct nb_cb_get_keys_args *args) +{ + const struct srte_candidate *candidate = + (struct srte_candidate *)args->list_entry; + + args->keys->num = 1; + snprintf(args->keys->key[0], sizeof(args->keys->key[0]), "%u", + candidate->preference); + + return NB_OK; +} + +const void *pathd_srte_policy_candidate_path_lookup_entry( + struct nb_cb_lookup_entry_args *args) +{ + struct srte_policy *policy = + (struct srte_policy *)args->parent_list_entry; + uint32_t preference; + + preference = yang_str2uint32(args->keys->key[0]); + + return srte_candidate_find(policy, preference); +} + +/* + * XPath: /frr-pathd:pathd/srte/policy/candidate_path/is-best-candidate-path + */ +struct yang_data * +pathd_srte_policy_candidate_path_is_best_candidate_path_get_elem( + struct nb_cb_get_elem_args *args) +{ + struct srte_candidate *candidate = + (struct srte_candidate *)args->list_entry; + + return yang_data_new_bool( + args->xpath, CHECK_FLAG(candidate->flags, F_CANDIDATE_BEST)); +} + +/* + * XPath: /frr-pathd:pathd/srte/policy/candidate-path/discriminator + */ +struct yang_data *pathd_srte_policy_candidate_path_discriminator_get_elem( + struct nb_cb_get_elem_args *args) +{ + struct srte_candidate *candidate = + (struct srte_candidate *)args->list_entry; + + return yang_data_new_uint32(args->xpath, candidate->discriminator); +} diff --git a/pathd/path_pcep.c b/pathd/path_pcep.c new file mode 100644 index 0000000000..2f9ff4f0f0 --- /dev/null +++ b/pathd/path_pcep.c @@ -0,0 +1,339 @@ +/* + * Copyright (C) 2020 NetDEF, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; see the file COPYING; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include <zebra.h> +#include <pcep_utils_counters.h> + +#include "log.h" +#include "command.h" +#include "libfrr.h" +#include "printfrr.h" +#include "version.h" +#include "northbound.h" +#include "frr_pthread.h" +#include "jhash.h" +#include "termtable.h" + +#include "pathd/pathd.h" +#include "pathd/path_errors.h" +#include "pathd/path_pcep_memory.h" +#include "pathd/path_pcep.h" +#include "pathd/path_pcep_cli.h" +#include "pathd/path_pcep_controller.h" +#include "pathd/path_pcep_lib.h" +#include "pathd/path_pcep_config.h" + + +/* + * Globals. + */ +static struct pcep_glob pcep_glob_space = {.dbg = {0, "pathd module: pcep"}}; +struct pcep_glob *pcep_g = &pcep_glob_space; + +/* Main Thread Even Handler */ +static int pcep_main_event_handler(enum pcep_main_event_type type, int pcc_id, + void *payload); +static int pcep_main_event_start_sync(int pcc_id); +static int pcep_main_event_start_sync_cb(struct path *path, void *arg); +static int pcep_main_event_update_candidate(struct path *path); +static int pcep_main_event_remove_candidate_segments(const char *originator, + bool force); + +/* Hook Handlers called from the Main Thread */ +static int pathd_candidate_created_handler(struct srte_candidate *candidate); +static int pathd_candidate_updated_handler(struct srte_candidate *candidate); +static int pathd_candidate_removed_handler(struct srte_candidate *candidate); + +/* Path manipulation functions */ +static struct path_metric *pcep_copy_metrics(struct path_metric *metric); +static struct path_hop *pcep_copy_hops(struct path_hop *hop); + +/* Module Functions */ +static int pcep_module_finish(void); +static int pcep_module_late_init(struct thread_master *tm); +static int pcep_module_init(void); + +/* ------------ Path Helper Functions ------------ */ + +struct path *pcep_new_path(void) +{ + struct path *path; + path = XCALLOC(MTYPE_PCEP, sizeof(*path)); + path->binding_sid = MPLS_LABEL_NONE; + path->enforce_bandwidth = true; + return path; +} + +struct path_hop *pcep_new_hop(void) +{ + struct path_hop *hop; + hop = XCALLOC(MTYPE_PCEP, sizeof(*hop)); + return hop; +} + +struct path_metric *pcep_new_metric(void) +{ + struct path_metric *metric; + metric = XCALLOC(MTYPE_PCEP, sizeof(*metric)); + return metric; +} + +struct path_metric *pcep_copy_metrics(struct path_metric *metric) +{ + if (metric == NULL) + return NULL; + struct path_metric *new_metric = pcep_new_metric(); + *new_metric = *metric; + new_metric->next = pcep_copy_metrics(metric->next); + return new_metric; +} + +struct path_hop *pcep_copy_hops(struct path_hop *hop) +{ + if (hop == NULL) + return NULL; + struct path_hop *new_hop = pcep_new_hop(); + *new_hop = *hop; + new_hop->next = pcep_copy_hops(hop->next); + return new_hop; +} + +struct path *pcep_copy_path(struct path *path) +{ + struct path *new_path = pcep_new_path(); + + *new_path = *path; + new_path->first_metric = pcep_copy_metrics(path->first_metric); + new_path->first_hop = pcep_copy_hops(path->first_hop); + if (path->name != NULL) + new_path->name = XSTRDUP(MTYPE_PCEP, path->name); + if (path->originator != NULL) + new_path->originator = XSTRDUP(MTYPE_PCEP, path->originator); + return new_path; +} + +void pcep_free_path(struct path *path) +{ + struct path_hop *hop; + struct path_metric *metric; + char *tmp; + + metric = path->first_metric; + while (metric != NULL) { + struct path_metric *next = metric->next; + XFREE(MTYPE_PCEP, metric); + metric = next; + } + hop = path->first_hop; + while (hop != NULL) { + struct path_hop *next = hop->next; + XFREE(MTYPE_PCEP, hop); + hop = next; + } + if (path->originator != NULL) { + /* The path own the memory, it is const so it is clear it + shouldn't be modified. XFREE macro do not support type casting + so we need a temporary variable */ + tmp = (char *)path->originator; + XFREE(MTYPE_PCEP, tmp); + path->originator = NULL; + } + if (path->name != NULL) { + /* The path own the memory, it is const so it is clear it + shouldn't be modified. XFREE macro do not support type casting + so we need a temporary variable */ + tmp = (char *)path->name; + XFREE(MTYPE_PCEP, tmp); + path->name = NULL; + } + XFREE(MTYPE_PCEP, path); +} + + +/* ------------ Main Thread Even Handler ------------ */ + +int pcep_main_event_handler(enum pcep_main_event_type type, int pcc_id, + void *payload) +{ + int ret = 0; + + switch (type) { + case PCEP_MAIN_EVENT_START_SYNC: + ret = pcep_main_event_start_sync(pcc_id); + break; + case PCEP_MAIN_EVENT_UPDATE_CANDIDATE: + assert(payload != NULL); + ret = pcep_main_event_update_candidate((struct path *)payload); + break; + case PCEP_MAIN_EVENT_REMOVE_CANDIDATE_LSP: + ret = pcep_main_event_remove_candidate_segments( + (const char *)payload, true); + break; + default: + flog_warn(EC_PATH_PCEP_RECOVERABLE_INTERNAL_ERROR, + "Unexpected event received in the main thread: %u", + type); + break; + } + + return ret; +} + +int pcep_main_event_start_sync(int pcc_id) +{ + path_pcep_config_list_path(pcep_main_event_start_sync_cb, &pcc_id); + pcep_ctrl_sync_done(pcep_g->fpt, pcc_id); + return 0; +} + +int pcep_main_event_start_sync_cb(struct path *path, void *arg) +{ + int *pcc_id = (int *)arg; + pcep_ctrl_sync_path(pcep_g->fpt, *pcc_id, path); + return 1; +} + +int pcep_main_event_update_candidate(struct path *path) +{ + struct path *resp = NULL; + int ret = 0; + + ret = path_pcep_config_update_path(path); + if (ret != PATH_NB_ERR && path->srp_id != 0) { + /* ODL and Cisco requires the first reported + * LSP to have a DOWN status, the later status changes + * will be comunicated through hook calls. + */ + enum pcep_lsp_operational_status real_status; + if ((resp = path_pcep_config_get_path(&path->nbkey))) { + resp->srp_id = path->srp_id; + real_status = resp->status; + resp->status = PCEP_LSP_OPERATIONAL_DOWN; + pcep_ctrl_send_report(pcep_g->fpt, path->pcc_id, resp); + /* If the update did not have any effect and the real + * status is not DOWN, we need to send a second report + * so the PCE is aware of the real status. This is due + * to the fact that NO notification will be received + * if the update did not apply any changes */ + if ((ret == PATH_NB_NO_CHANGE) + && (real_status != PCEP_LSP_OPERATIONAL_DOWN)) { + resp->status = real_status; + resp->srp_id = 0; + pcep_ctrl_send_report(pcep_g->fpt, path->pcc_id, + resp); + } + pcep_free_path(resp); + } + } + return ret; +} + +int pcep_main_event_remove_candidate_segments(const char *originator, + bool force) +{ + srte_candidate_unset_segment_list(originator, force); + /* Avoid compiler warnings about const char* */ + void *free_ptr = (void *)originator; + XFREE(MTYPE_PCEP, free_ptr); + + srte_apply_changes(); + + return 0; +} + +/* ------------ Hook Handlers Functions Called From Main Thread ------------ */ + +int pathd_candidate_created_handler(struct srte_candidate *candidate) +{ + struct path *path = candidate_to_path(candidate); + int ret = pcep_ctrl_pathd_event(pcep_g->fpt, PCEP_PATH_CREATED, path); + return ret; +} + +int pathd_candidate_updated_handler(struct srte_candidate *candidate) +{ + struct path *path = candidate_to_path(candidate); + int ret = pcep_ctrl_pathd_event(pcep_g->fpt, PCEP_PATH_UPDATED, path); + return ret; +} + +int pathd_candidate_removed_handler(struct srte_candidate *candidate) +{ + struct path *path = candidate_to_path(candidate); + int ret = pcep_ctrl_pathd_event(pcep_g->fpt, PCEP_PATH_REMOVED, path); + return ret; +} + + +/* ------------ Module Functions ------------ */ + +int pcep_module_late_init(struct thread_master *tm) +{ + assert(pcep_g->fpt == NULL); + assert(pcep_g->master == NULL); + + struct frr_pthread *fpt; + + if (pcep_ctrl_initialize(tm, &fpt, pcep_main_event_handler)) + return 1; + + if (pcep_lib_initialize(fpt)) + return 1; + + pcep_g->master = tm; + pcep_g->fpt = fpt; + + hook_register(pathd_candidate_created, pathd_candidate_created_handler); + hook_register(pathd_candidate_updated, pathd_candidate_updated_handler); + hook_register(pathd_candidate_removed, pathd_candidate_removed_handler); + + hook_register(frr_fini, pcep_module_finish); + + pcep_cli_init(); + + return 0; +} + +int pcep_module_finish(void) +{ + pcep_ctrl_finalize(&pcep_g->fpt); + pcep_lib_finalize(); + + for (int i = 0; i < MAX_PCC; i++) + if (pcep_g->pce_opts_cli[i] != NULL) + XFREE(MTYPE_PCEP, pcep_g->pce_opts_cli[i]); + + return 0; +} + +int pcep_module_init(void) +{ + pcep_g->num_pce_opts_cli = 0; + for (int i = 0; i < MAX_PCE; i++) + pcep_g->pce_opts_cli[i] = NULL; + pcep_g->num_config_group_opts = 0; + for (int i = 0; i < MAX_PCE; i++) + pcep_g->config_group_opts[i] = NULL; + + hook_register(frr_late_init, pcep_module_late_init); + return 0; +} + +FRR_MODULE_SETUP(.name = "frr_pathd_pcep", .version = FRR_VERSION, + .description = "FRR pathd PCEP module", + .init = pcep_module_init) diff --git a/pathd/path_pcep.h b/pathd/path_pcep.h new file mode 100644 index 0000000000..1896c265c1 --- /dev/null +++ b/pathd/path_pcep.h @@ -0,0 +1,326 @@ +/* + * Copyright (C) 2020 NetDEF, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; see the file COPYING; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef _PATH_PCEP_H_ +#define _PATH_PCEP_H_ + +#include <stdbool.h> +#include <debug.h> +#include <netinet/tcp.h> +#include <pcep_utils_logging.h> +#include <pcep_pcc_api.h> +#include "mpls.h" +#include "pathd/pathd.h" +#include "pathd/path_pcep_memory.h" + +#define PCEP_DEFAULT_PORT 4189 +#define MAX_PCC 32 +#define MAX_PCE 32 +#define MAX_TAG_SIZE 50 +#define PCEP_DEBUG_MODE_BASIC 0x01 +#define PCEP_DEBUG_MODE_PATH 0x02 +#define PCEP_DEBUG_MODE_PCEP 0x04 +#define PCEP_DEBUG_MODE_PCEPLIB 0x08 +#define PCEP_DEBUG(fmt, ...) \ + do { \ + if (DEBUG_FLAGS_CHECK(&pcep_g->dbg, PCEP_DEBUG_MODE_BASIC)) \ + DEBUGD(&pcep_g->dbg, "pcep: " fmt, ##__VA_ARGS__); \ + } while (0) +#define PCEP_DEBUG_PATH(fmt, ...) \ + do { \ + if (DEBUG_FLAGS_CHECK(&pcep_g->dbg, PCEP_DEBUG_MODE_PATH)) \ + DEBUGD(&pcep_g->dbg, "pcep: " fmt, ##__VA_ARGS__); \ + } while (0) +#define PCEP_DEBUG_PCEP(fmt, ...) \ + do { \ + if (DEBUG_FLAGS_CHECK(&pcep_g->dbg, PCEP_DEBUG_MODE_PCEP)) \ + DEBUGD(&pcep_g->dbg, "pcep: " fmt, ##__VA_ARGS__); \ + } while (0) +#define PCEP_DEBUG_PCEPLIB(priority, fmt, ...) \ + do { \ + switch (priority) { \ + case LOG_DEBUG: \ + if (DEBUG_FLAGS_CHECK(&pcep_g->dbg, \ + PCEP_DEBUG_MODE_PCEPLIB)) \ + DEBUGD(&pcep_g->dbg, "pcep: " fmt, \ + ##__VA_ARGS__); \ + break; \ + case LOG_INFO: \ + if (DEBUG_FLAGS_CHECK(&pcep_g->dbg, \ + PCEP_DEBUG_MODE_PCEPLIB)) \ + DEBUGI(&pcep_g->dbg, "pcep: " fmt, \ + ##__VA_ARGS__); \ + break; \ + case LOG_NOTICE: \ + if (DEBUG_FLAGS_CHECK(&pcep_g->dbg, \ + PCEP_DEBUG_MODE_PCEPLIB)) \ + DEBUGN(&pcep_g->dbg, "pcep: " fmt, \ + ##__VA_ARGS__); \ + break; \ + case LOG_WARNING: \ + case LOG_ERR: \ + default: \ + zlog(priority, "pcep: " fmt, ##__VA_ARGS__); \ + break; \ + } \ + } while (0) + +struct pcep_config_group_opts { + char name[64]; + char tcp_md5_auth[TCP_MD5SIG_MAXKEYLEN]; + struct ipaddr source_ip; + short source_port; + bool draft07; + bool pce_initiated; + int keep_alive_seconds; + int min_keep_alive_seconds; + int max_keep_alive_seconds; + int dead_timer_seconds; + int min_dead_timer_seconds; + int max_dead_timer_seconds; + int pcep_request_time_seconds; + int session_timeout_inteval_seconds; + int delegation_timeout_seconds; +}; + +struct pce_opts { + struct ipaddr addr; + short port; + char pce_name[64]; + struct pcep_config_group_opts config_opts; + uint8_t precedence; /* Multi-PCE precedence */ +}; + +struct pcc_opts { + struct ipaddr addr; + short port; + short msd; +}; + +/* Encapsulate the pce_opts with needed CLI information */ +struct pce_opts_cli { + struct pce_opts pce_opts; + char config_group_name[64]; + /* These are the values configured in the pcc-peer sub-commands. + * These need to be stored for later merging. Notice, it could + * be that not all of them are set. */ + struct pcep_config_group_opts pce_config_group_opts; + /* The pce_opts->config_opts will be a merge of the default values, + * optional config_group values (which overwrite default values), + * and any values configured in the pce sub-commands (which overwrite + * both default and config_group values). This flag indicates of the + * values need to be merged or not. */ + bool merged; +}; + +struct lsp_nb_key { + uint32_t color; + struct ipaddr endpoint; + uint32_t preference; +}; + +struct sid_mpls { + mpls_label_t label; + uint8_t traffic_class; + bool is_bottom; + uint8_t ttl; +}; + +struct pcep_caps { + bool is_stateful; + /* If we know the objective functions supported by the PCE. + * If we don't know, it doesn't mean the PCE doesn't support any */ + bool supported_ofs_are_known; + /* Defined if we know which objective funtions are supported by the PCE. + * One bit per objective function, the bit index being equal to + * enum pcep_objfun_type values: bit 0 is not used, bit 1 is + * PCEP_OBJFUN_MCP, up to bit 17 that is PCEP_OBJFUN_MSN */ + uint32_t supported_ofs; +}; + +union sid { + uint32_t value; + struct sid_mpls mpls; +}; + +struct nai { + /* NAI type */ + enum pcep_sr_subobj_nai type; + /* Local IP address*/ + struct ipaddr local_addr; + /* Local interface identifier if the NAI is an unnumbered adjacency */ + uint32_t local_iface; + /* Remote address if the NAI is an adjacency */ + struct ipaddr remote_addr; + /* Remote interface identifier if the NAI is an unnumbered adjacency */ + uint32_t remote_iface; +}; + +struct path_hop { + /* Pointer to the next hop in the path */ + struct path_hop *next; + /* Indicateif this ia a loose or strict hop */ + bool is_loose; + /* Indicate if there is an SID for the hop */ + bool has_sid; + /* Indicate if the hop as a MPLS label */ + bool is_mpls; + /* Indicate if the MPLS label has extra attributes (TTL, class..)*/ + bool has_attribs; + /* Hop's SID if available */ + union sid sid; + /* Indicate if there is a NAI for this hop */ + bool has_nai; + /* NAI if available */ + struct nai nai; +}; + +struct path_metric { + /* Pointer to the next metric */ + struct path_metric *next; + /* The metric type */ + enum pcep_metric_types type; + /* If the metric should be enforced */ + bool enforce; + /* If the metric value is bound (a maximum) */ + bool is_bound; + /* If the metric value is computed */ + bool is_computed; + /* The metric value */ + float value; +}; + +struct path { + /* Both the nbkey and the plspid are keys comming from the PCC, + but the PCE is only using the plspid. The missing key is looked up by + the PCC so we always have both */ + + /* The northbound key identifying this path */ + struct lsp_nb_key nbkey; + /* The generated unique PLSP identifier for this path. + See draft-ietf-pce-stateful-pce */ + uint32_t plsp_id; + + /* The transport address the path is comming from, PCE or PCC*/ + struct ipaddr sender; + /* The pcc protocol address, must be the same family as the endpoint */ + struct ipaddr pcc_addr; + + /* The identifier of the PCC the path is for/from. If 0 it is undefined, + meaning it hasn't be set yet or is for all the PCC */ + int pcc_id; + + /* The origin of the path creation */ + enum srte_protocol_origin create_origin; + /* The origin of the path modification */ + enum srte_protocol_origin update_origin; + /* The identifier of the entity that originated the path */ + const char *originator; + /* The type of the path, for PCE initiated or updated path it is always + SRTE_CANDIDATE_TYPE_DYNAMIC */ + enum srte_candidate_type type; + + /* The following data comes from either the PCC or the PCE if available + */ + + /* Path's binding SID */ + mpls_label_t binding_sid; + /* The name of the path */ + const char *name; + /* The request identifier from the PCE, when getting a path from the + PCE. See draft-ietf-pce-stateful-pce */ + uint32_t srp_id; + /* The request identifier from the PCC , when getting a path from the + PCE after a computation request. See rfc5440, section-7.4 */ + uint32_t req_id; + /* The operational status of the path */ + enum pcep_lsp_operational_status status; + /* If true, the receiver (PCC) must remove the path. + See draft-ietf-pce-pce-initiated-lsp */ + bool do_remove; + /* Indicate the given path was removed by the PCC. + See draft-ietf-pce-stateful-pce, section-7.3, flag R */ + bool was_removed; + /* Indicate the path is part of the synchronization process. + See draft-ietf-pce-stateful-pce, section-7.3, flag S */ + bool is_synching; + /* Indicate if the path bandwidth requirment is defined */ + bool has_bandwidth; + /* Indicate if the bandwidth requirment should be enforced */ + bool enforce_bandwidth; + /* Path required bandwidth if defined */ + float bandwidth; + /* Specify the list of hop defining the path */ + struct path_hop *first_hop; + /* Specify the list of metrics */ + struct path_metric *first_metric; + /* Indicate if the path has a PCC-defined objective function */ + bool has_pcc_objfun; + /* Indicate the PCC-defined objective function is required */ + bool enforce_pcc_objfun; + /* PCC-defined Objective Function */ + enum objfun_type pcc_objfun; + /* Indicate if the path has a PCE-defined objective function */ + bool has_pce_objfun; + /* PCE-defined Objective Function */ + enum objfun_type pce_objfun; + /* Indicate if some affinity filters are defined */ + bool has_affinity_filters; + /* Affinity attribute filters indexed by enum affinity_filter_type - 1 + */ + uint32_t affinity_filters[MAX_AFFINITY_FILTER_TYPE]; + + /* The following data need to be specialized for a given PCE */ + + /* Indicate the path is delegated to the PCE. + See draft-ietf-pce-stateful-pce, section-7.3, flag D */ + bool is_delegated; + /* Indicate if the PCE wants the path to get active. + See draft-ietf-pce-stateful-pce, section-7.3, flag A */ + bool go_active; + /* Indicate the given path was created by the PCE, + See draft-ietf-pce-pce-initiated-lsp, section-5.3.1, flag C */ + bool was_created; + + /* The following data is defined for comnputation replies */ + + /* Indicate that no path could be computed */ + bool no_path; +}; + +struct pcep_glob { + struct debug dbg; + struct thread_master *master; + struct frr_pthread *fpt; + uint8_t num_pce_opts_cli; + struct pce_opts_cli *pce_opts_cli[MAX_PCE]; + uint8_t num_config_group_opts; + struct pcep_config_group_opts *config_group_opts[MAX_PCE]; +}; + +extern struct pcep_glob *pcep_g; + +/* Path Helper Functions */ +struct path *pcep_new_path(void); +struct path_hop *pcep_new_hop(void); +struct path_metric *pcep_new_metric(void); +struct path *pcep_copy_path(struct path *path); +void pcep_free_path(struct path *path); + + +#endif // _PATH_PCEP_H_ diff --git a/pathd/path_pcep_cli.c b/pathd/path_pcep_cli.c new file mode 100644 index 0000000000..add3391f22 --- /dev/null +++ b/pathd/path_pcep_cli.c @@ -0,0 +1,2033 @@ +/* + * Copyright (C) 2020 Volta Networks, Inc + * Brady Johnson + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; see the file COPYING; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include <zebra.h> +#include <pcep_utils_counters.h> +#include <pcep_session_logic.h> + +#include "log.h" +#include "command.h" +#include "libfrr.h" +#include "printfrr.h" +#include "version.h" +#include "northbound.h" +#include "frr_pthread.h" +#include "jhash.h" +#include "termtable.h" + +#include "pathd/pathd.h" +#include "pathd/path_errors.h" +#include "pathd/path_pcep_memory.h" +#include "pathd/path_pcep.h" +#include "pathd/path_pcep_cli.h" +#include "pathd/path_pcep_controller.h" +#include "pathd/path_pcep_debug.h" +#include "pathd/path_pcep_lib.h" +#include "pathd/path_pcep_pcc.h" + +#ifndef VTYSH_EXTRACT_PL +#include "pathd/path_pcep_cli_clippy.c" +#endif + +#define DEFAULT_PCE_PRECEDENCE 255 +#define DEFAULT_PCC_MSD 4 +#define DEFAULT_SR_DRAFT07 false +#define DEFAULT_PCE_INITIATED false +#define DEFAULT_TIMER_KEEP_ALIVE 30 +#define DEFAULT_TIMER_KEEP_ALIVE_MIN 1 +#define DEFAULT_TIMER_KEEP_ALIVE_MAX 255 +#define DEFAULT_TIMER_DEADTIMER 120 +#define DEFAULT_TIMER_DEADTIMER_MIN 4 +#define DEFAULT_TIMER_DEADTIMER_MAX 255 +#define DEFAULT_TIMER_PCEP_REQUEST 30 +#define DEFAULT_TIMER_SESSION_TIMEOUT_INTERVAL 30 +#define DEFAULT_DELEGATION_TIMEOUT_INTERVAL 10 + +/* CLI Function declarations */ +static int pcep_cli_debug_config_write(struct vty *vty); +static int pcep_cli_debug_set_all(uint32_t flags, bool set); +static int pcep_cli_pcep_config_write(struct vty *vty); +static int pcep_cli_pcc_config_write(struct vty *vty); +static int pcep_cli_pce_config_write(struct vty *vty); +static int pcep_cli_pcep_pce_config_write(struct vty *vty); + +/* Internal Util Function declarations */ +static struct pce_opts_cli *pcep_cli_find_pce(const char *pce_name); +static bool pcep_cli_add_pce(struct pce_opts_cli *pce_opts_cli); +static struct pce_opts_cli *pcep_cli_create_pce_opts(); +static void pcep_cli_delete_pce(const char *pce_name); +static void +pcep_cli_merge_pcep_pce_config_options(struct pce_opts_cli *pce_opts_cli); +static struct pcep_config_group_opts * +pcep_cli_find_pcep_pce_config(const char *group_name); +static bool +pcep_cli_add_pcep_pce_config(struct pcep_config_group_opts *config_group_opts); +static struct pcep_config_group_opts * +pcep_cli_create_pcep_pce_config(const char *group_name); +static bool pcep_cli_is_pcep_pce_config_used(const char *group_name); +static void pcep_cli_delete_pcep_pce_config(const char *group_name); +static int pcep_cli_print_pce_config(struct pcep_config_group_opts *group_opts, + char *buf, size_t buf_len); +static void print_pcep_capabilities(char *buf, size_t buf_len, + pcep_configuration *config); +static void print_pcep_session(struct vty *vty, struct pce_opts *pce_opts, + struct pcep_pcc_info *pcc_info); +static bool pcep_cli_pcc_has_pce(const char *pce_name); +static void pcep_cli_add_pce_connection(struct pce_opts *pce_opts); +static void pcep_cli_remove_pce_connection(struct pce_opts *pce_opts); +static int path_pcep_cli_pcc_pcc_peer_delete(struct vty *vty, + const char *peer_name, + const char *precedence_str, + long precedence); + +/* + * Globals. + */ + +static const char PCEP_VTYSH_ARG_ADDRESS[] = "address"; +static const char PCEP_VTYSH_ARG_SOURCE_ADDRESS[] = "source-address"; +static const char PCEP_VTYSH_ARG_IP[] = "ip"; +static const char PCEP_VTYSH_ARG_IPV6[] = "ipv6"; +static const char PCEP_VTYSH_ARG_PORT[] = "port"; +static const char PCEP_VTYSH_ARG_PRECEDENCE[] = "precedence"; +static const char PCEP_VTYSH_ARG_MSD[] = "msd"; +static const char PCEP_VTYSH_ARG_KEEP_ALIVE[] = "keep-alive"; +static const char PCEP_VTYSH_ARG_TIMER[] = "timer"; +static const char PCEP_VTYSH_ARG_KEEP_ALIVE_MIN[] = "min-peer-keep-alive"; +static const char PCEP_VTYSH_ARG_KEEP_ALIVE_MAX[] = "max-peer-keep-alive"; +static const char PCEP_VTYSH_ARG_DEAD_TIMER[] = "dead-timer"; +static const char PCEP_VTYSH_ARG_DEAD_TIMER_MIN[] = "min-peer-dead-timer"; +static const char PCEP_VTYSH_ARG_DEAD_TIMER_MAX[] = "max-peer-dead-timer"; +static const char PCEP_VTYSH_ARG_PCEP_REQUEST[] = "pcep-request"; +static const char PCEP_VTYSH_ARG_SESSION_TIMEOUT[] = "session-timeout-interval"; +static const char PCEP_VTYSH_ARG_DELEGATION_TIMEOUT[] = "delegation-timeout"; +static const char PCEP_VTYSH_ARG_SR_DRAFT07[] = "sr-draft07"; +static const char PCEP_VTYSH_ARG_PCE_INIT[] = "pce-initiated"; +static const char PCEP_VTYSH_ARG_TCP_MD5[] = "tcp-md5-auth"; +static const char PCEP_VTYSH_ARG_BASIC[] = "basic"; +static const char PCEP_VTYSH_ARG_PATH[] = "path"; +static const char PCEP_VTYSH_ARG_MESSAGE[] = "message"; +static const char PCEP_VTYSH_ARG_PCEPLIB[] = "pceplib"; +static const char PCEP_CLI_CAP_STATEFUL[] = " [Stateful PCE]"; +static const char PCEP_CLI_CAP_INCL_DB_VER[] = " [Include DB version]"; +static const char PCEP_CLI_CAP_LSP_TRIGGERED[] = " [LSP Triggered Resync]"; +static const char PCEP_CLI_CAP_LSP_DELTA[] = " [LSP Delta Sync]"; +static const char PCEP_CLI_CAP_PCE_TRIGGERED[] = + " [PCE triggered Initial Sync]"; +static const char PCEP_CLI_CAP_SR_TE_PST[] = " [SR TE PST]"; +static const char PCEP_CLI_CAP_PCC_RESOLVE_NAI[] = + " [PCC can resolve NAI to SID]"; +static const char PCEP_CLI_CAP_PCC_INITIATED[] = " [PCC Initiated LSPs]"; +static const char PCEP_CLI_CAP_PCC_PCE_INITIATED[] = + " [PCC and PCE Initiated LSPs]"; + +struct pce_connections { + int num_connections; + struct pce_opts *connections[MAX_PCC]; +}; + +struct pce_connections pce_connections_g = {.num_connections = 0}; + +/* Default PCE group that all PCE-Groups and PCEs will inherit from */ +struct pcep_config_group_opts default_pcep_config_group_opts_g = { + .name = "default", + .tcp_md5_auth = "\0", + .draft07 = DEFAULT_SR_DRAFT07, + .pce_initiated = DEFAULT_PCE_INITIATED, + .keep_alive_seconds = DEFAULT_TIMER_KEEP_ALIVE, + .min_keep_alive_seconds = DEFAULT_TIMER_KEEP_ALIVE_MIN, + .max_keep_alive_seconds = DEFAULT_TIMER_KEEP_ALIVE_MAX, + .dead_timer_seconds = DEFAULT_TIMER_DEADTIMER, + .min_dead_timer_seconds = DEFAULT_TIMER_DEADTIMER_MIN, + .max_dead_timer_seconds = DEFAULT_TIMER_DEADTIMER_MAX, + .pcep_request_time_seconds = DEFAULT_TIMER_PCEP_REQUEST, + .session_timeout_inteval_seconds = + DEFAULT_TIMER_SESSION_TIMEOUT_INTERVAL, + .delegation_timeout_seconds = DEFAULT_DELEGATION_TIMEOUT_INTERVAL, + .source_port = DEFAULT_PCEP_TCP_PORT, + .source_ip.ipa_type = IPADDR_NONE, +}; + +/* Used by PCEP_PCE_CONFIG_NODE sub-commands to operate on the current pce group + */ +struct pcep_config_group_opts *current_pcep_config_group_opts_g = NULL; +/* Used by PCEP_PCE_NODE sub-commands to operate on the current pce opts */ +struct pce_opts_cli *current_pce_opts_g = NULL; +short pcc_msd_g = DEFAULT_PCC_MSD; +bool pcc_msd_configured_g = false; + +static struct cmd_node pcep_node = { + .name = "srte pcep", + .node = PCEP_NODE, + .parent_node = SR_TRAFFIC_ENG_NODE, + .config_write = pcep_cli_pcep_config_write, + .prompt = "%s(config-sr-te-pcep)# " +}; + +static struct cmd_node pcep_pcc_node = { + .name = "srte pcep pcc", + .node = PCEP_PCC_NODE, + .parent_node = PCEP_NODE, + .config_write = pcep_cli_pcc_config_write, + .prompt = "%s(config-sr-te-pcep-pcc)# " +}; + +static struct cmd_node pcep_pce_node = { + .name = "srte pcep pce", + .node = PCEP_PCE_NODE, + .parent_node = PCEP_NODE, + .config_write = pcep_cli_pce_config_write, + .prompt = "%s(config-sr-te-pcep-pce)# " +}; + +static struct cmd_node pcep_pce_config_node = { + .name = "srte pcep pce-config", + .node = PCEP_PCE_CONFIG_NODE, + .parent_node = PCEP_NODE, + .config_write = pcep_cli_pcep_pce_config_write, + .prompt = "%s(pce-sr-te-pcep-pce-config)# " +}; + +/* Common code used in VTYSH processing for int values */ +#define PCEP_VTYSH_INT_ARG_CHECK(arg_str, arg_val, arg_store, min_value, \ + max_value) \ + if (arg_str != NULL) { \ + if (arg_val <= min_value || arg_val >= max_value) { \ + vty_out(vty, \ + "%% Invalid value %ld in range [%d - %d]", \ + arg_val, min_value, max_value); \ + return CMD_WARNING; \ + } \ + arg_store = arg_val; \ + } + +#define MERGE_COMPARE_CONFIG_GROUP_VALUE(config_param, not_set_value) \ + pce_opts_cli->pce_opts.config_opts.config_param = \ + pce_opts_cli->pce_config_group_opts.config_param; \ + if (pce_opts_cli->pce_config_group_opts.config_param \ + == not_set_value) { \ + pce_opts_cli->pce_opts.config_opts.config_param = \ + ((pce_config != NULL \ + && pce_config->config_param != not_set_value) \ + ? pce_config->config_param \ + : default_pcep_config_group_opts_g \ + .config_param); \ + } + +/* + * Internal Util functions + */ + +/* Check if a pce_opts_cli already exists based on its name and return it, + * return NULL otherwise */ +static struct pce_opts_cli *pcep_cli_find_pce(const char *pce_name) +{ + for (int i = 0; i < MAX_PCE; i++) { + struct pce_opts_cli *pce_rhs_cli = pcep_g->pce_opts_cli[i]; + if (pce_rhs_cli != NULL) { + if (strcmp(pce_name, pce_rhs_cli->pce_opts.pce_name) + == 0) { + return pce_rhs_cli; + } + } + } + + return NULL; +} + +/* Add a new pce_opts_cli to pcep_g, return false if MAX_PCES, true otherwise */ +static bool pcep_cli_add_pce(struct pce_opts_cli *pce_opts_cli) +{ + for (int i = 0; i < MAX_PCE; i++) { + if (pcep_g->pce_opts_cli[i] == NULL) { + pcep_g->pce_opts_cli[i] = pce_opts_cli; + pcep_g->num_pce_opts_cli++; + return true; + } + } + + return false; +} + +/* Create a new pce opts_cli */ +static struct pce_opts_cli *pcep_cli_create_pce_opts(const char *name) +{ + struct pce_opts_cli *pce_opts_cli = + XCALLOC(MTYPE_PCEP, sizeof(struct pce_opts_cli)); + strlcpy(pce_opts_cli->pce_opts.pce_name, name, + sizeof(pce_opts_cli->pce_opts.pce_name)); + pce_opts_cli->pce_opts.port = PCEP_DEFAULT_PORT; + + return pce_opts_cli; +} + +static void pcep_cli_delete_pce(const char *pce_name) +{ + for (int i = 0; i < MAX_PCE; i++) { + if (pcep_g->pce_opts_cli[i] != NULL) { + if (strcmp(pcep_g->pce_opts_cli[i]->pce_opts.pce_name, + pce_name) + == 0) { + XFREE(MTYPE_PCEP, pcep_g->pce_opts_cli[i]); + pcep_g->pce_opts_cli[i] = NULL; + pcep_g->num_pce_opts_cli--; + return; + } + } + } +} + +static void +pcep_cli_merge_pcep_pce_config_options(struct pce_opts_cli *pce_opts_cli) +{ + if (pce_opts_cli->merged == true) { + return; + } + + struct pcep_config_group_opts *pce_config = + pcep_cli_find_pcep_pce_config(pce_opts_cli->config_group_name); + + /* Configuration priorities: + * 1) pce_opts->config_opts, if present, overwrite pce_config + * config_opts 2) pce_config config_opts, if present, overwrite + * default config_opts 3) If neither pce_opts->config_opts nor + * pce_config config_opts are set, then the default config_opts value + * will be used. + */ + + const char *tcp_md5_auth_str = + pce_opts_cli->pce_config_group_opts.tcp_md5_auth; + if (pce_opts_cli->pce_config_group_opts.tcp_md5_auth[0] == '\0') { + if (pce_config != NULL && pce_config->tcp_md5_auth[0] != '\0') { + tcp_md5_auth_str = pce_config->tcp_md5_auth; + } else { + tcp_md5_auth_str = + default_pcep_config_group_opts_g.tcp_md5_auth; + } + } + strncpy(pce_opts_cli->pce_opts.config_opts.tcp_md5_auth, + tcp_md5_auth_str, TCP_MD5SIG_MAXKEYLEN); + + struct ipaddr *source_ip = + &pce_opts_cli->pce_config_group_opts.source_ip; + if (pce_opts_cli->pce_config_group_opts.source_ip.ipa_type + == IPADDR_NONE) { + if (pce_config != NULL + && pce_config->source_ip.ipa_type != IPADDR_NONE) { + source_ip = &pce_config->source_ip; + } else { + source_ip = &default_pcep_config_group_opts_g.source_ip; + } + } + memcpy(&pce_opts_cli->pce_opts.config_opts.source_ip, source_ip, + sizeof(struct ipaddr)); + + MERGE_COMPARE_CONFIG_GROUP_VALUE(draft07, false); + MERGE_COMPARE_CONFIG_GROUP_VALUE(pce_initiated, false); + MERGE_COMPARE_CONFIG_GROUP_VALUE(keep_alive_seconds, 0); + MERGE_COMPARE_CONFIG_GROUP_VALUE(min_keep_alive_seconds, 0); + MERGE_COMPARE_CONFIG_GROUP_VALUE(max_keep_alive_seconds, 0); + MERGE_COMPARE_CONFIG_GROUP_VALUE(dead_timer_seconds, 0); + MERGE_COMPARE_CONFIG_GROUP_VALUE(min_dead_timer_seconds, 0); + MERGE_COMPARE_CONFIG_GROUP_VALUE(max_dead_timer_seconds, 0); + MERGE_COMPARE_CONFIG_GROUP_VALUE(pcep_request_time_seconds, 0); + MERGE_COMPARE_CONFIG_GROUP_VALUE(session_timeout_inteval_seconds, 0); + MERGE_COMPARE_CONFIG_GROUP_VALUE(delegation_timeout_seconds, 0); + MERGE_COMPARE_CONFIG_GROUP_VALUE(source_port, 0); + + pce_opts_cli->merged = true; +} + +/* Check if a pcep_config_group_opts already exists based on its name and return + * it, return NULL otherwise */ +static struct pcep_config_group_opts * +pcep_cli_find_pcep_pce_config(const char *group_name) +{ + for (int i = 0; i < MAX_PCE; i++) { + struct pcep_config_group_opts *pcep_pce_config_rhs = + pcep_g->config_group_opts[i]; + if (pcep_pce_config_rhs != NULL) { + if (strcmp(group_name, pcep_pce_config_rhs->name) + == 0) { + return pcep_pce_config_rhs; + } + } + } + + return NULL; +} + +/* Add a new pcep_config_group_opts to pcep_g, return false if MAX_PCE, + * true otherwise */ +static bool pcep_cli_add_pcep_pce_config( + struct pcep_config_group_opts *pcep_config_group_opts) +{ + for (int i = 0; i < MAX_PCE; i++) { + if (pcep_g->config_group_opts[i] == NULL) { + pcep_g->config_group_opts[i] = pcep_config_group_opts; + pcep_g->num_config_group_opts++; + return true; + } + } + + return false; +} + +/* Create a new pce group, inheriting its values from the default pce group */ +static struct pcep_config_group_opts * +pcep_cli_create_pcep_pce_config(const char *group_name) +{ + struct pcep_config_group_opts *pcep_config_group_opts = + XCALLOC(MTYPE_PCEP, sizeof(struct pcep_config_group_opts)); + strlcpy(pcep_config_group_opts->name, group_name, + sizeof(pcep_config_group_opts->name)); + + return pcep_config_group_opts; +} + +/* Iterate the pce_opts and return true if the pce-group-name is referenced, + * false otherwise. */ +static bool pcep_cli_is_pcep_pce_config_used(const char *group_name) +{ + for (int i = 0; i < MAX_PCE; i++) { + if (pcep_g->pce_opts_cli[i] != NULL) { + if (strcmp(pcep_g->pce_opts_cli[i]->config_group_name, + group_name) + == 0) { + return true; + } + } + } + + return false; +} + +static void pcep_cli_delete_pcep_pce_config(const char *group_name) +{ + for (int i = 0; i < MAX_PCE; i++) { + if (pcep_g->config_group_opts[i] != NULL) { + if (strcmp(pcep_g->config_group_opts[i]->name, + group_name) + == 0) { + XFREE(MTYPE_PCEP, pcep_g->config_group_opts[i]); + pcep_g->config_group_opts[i] = NULL; + pcep_g->num_config_group_opts--; + return; + } + } + } +} + +static bool pcep_cli_pcc_has_pce(const char *pce_name) +{ + for (int i = 0; i < MAX_PCC; i++) { + struct pce_opts *pce_opts = pce_connections_g.connections[i]; + if (pce_opts == NULL) { + continue; + } + + if (strcmp(pce_opts->pce_name, pce_name) == 0) { + return true; + } + } + + return false; +} + +static void pcep_cli_add_pce_connection(struct pce_opts *pce_opts) +{ + for (int i = 0; i < MAX_PCC; i++) { + if (pce_connections_g.connections[i] == NULL) { + pce_connections_g.num_connections++; + pce_connections_g.connections[i] = pce_opts; + return; + } + } +} + +static void pcep_cli_remove_pce_connection(struct pce_opts *pce_opts) +{ + for (int i = 0; i < MAX_PCC; i++) { + if (pce_connections_g.connections[i] == pce_opts) { + pce_connections_g.num_connections--; + pce_connections_g.connections[i] = NULL; + return; + } + } +} + +/* + * VTY command implementations + */ + +static int path_pcep_cli_debug(struct vty *vty, const char *no_str, + const char *basic_str, const char *path_str, + const char *message_str, const char *pceplib_str) +{ + uint32_t mode = DEBUG_NODE2MODE(vty->node); + bool no = (no_str != NULL); + + DEBUG_MODE_SET(&pcep_g->dbg, mode, !no); + + if (basic_str != NULL) { + DEBUG_FLAGS_SET(&pcep_g->dbg, PCEP_DEBUG_MODE_BASIC, !no); + } + if (path_str != NULL) { + DEBUG_FLAGS_SET(&pcep_g->dbg, PCEP_DEBUG_MODE_PATH, !no); + } + if (message_str != NULL) { + DEBUG_FLAGS_SET(&pcep_g->dbg, PCEP_DEBUG_MODE_PCEP, !no); + } + if (pceplib_str != NULL) { + DEBUG_FLAGS_SET(&pcep_g->dbg, PCEP_DEBUG_MODE_PCEPLIB, !no); + } + + return CMD_SUCCESS; +} + +static int path_pcep_cli_show_srte_pcep_counters(struct vty *vty) +{ + int i, j, row; + time_t diff_time; + struct tm *tm_info; + char tm_buffer[26]; + struct counters_group *group; + struct counters_subgroup *subgroup; + struct counter *counter; + const char *group_name, *empty_string = ""; + struct ttable *tt; + char *table; + + group = pcep_ctrl_get_counters(pcep_g->fpt, 1); + + if (group == NULL) { + vty_out(vty, "No counters to display.\n\n"); + return CMD_SUCCESS; + } + + diff_time = time(NULL) - group->start_time; + tm_info = localtime(&group->start_time); + strftime(tm_buffer, sizeof(tm_buffer), "%Y-%m-%d %H:%M:%S", tm_info); + + vty_out(vty, "PCEP counters since %s (%luh %lum %lus):\n", tm_buffer, + diff_time / 3600, (diff_time / 60) % 60, diff_time % 60); + + /* Prepare table. */ + tt = ttable_new(&ttable_styles[TTSTYLE_BLANK]); + ttable_add_row(tt, "Group|Name|Value"); + tt->style.cell.rpad = 2; + tt->style.corner = '+'; + ttable_restyle(tt); + ttable_rowseps(tt, 0, BOTTOM, true, '-'); + + for (row = 0, i = 0; i <= group->num_subgroups; i++) { + subgroup = group->subgroups[i]; + if (subgroup != NULL) { + group_name = subgroup->counters_subgroup_name; + for (j = 0; j <= subgroup->num_counters; j++) { + counter = subgroup->counters[j]; + if (counter != NULL) { + ttable_add_row(tt, "%s|%s|%u", + group_name, + counter->counter_name, + counter->counter_value); + row++; + group_name = empty_string; + } + } + ttable_rowseps(tt, row, BOTTOM, true, '-'); + } + } + + /* Dump the generated table. */ + table = ttable_dump(tt, "\n"); + vty_out(vty, "%s\n", table); + XFREE(MTYPE_TMP, table); + + ttable_del(tt); + + pcep_lib_free_counters(group); + + return CMD_SUCCESS; +} + +static int path_pcep_cli_pcep_pce_config(struct vty *vty, + const char *pcep_pce_config) +{ + struct pcep_config_group_opts *pce_config = + pcep_cli_find_pcep_pce_config(pcep_pce_config); + if (pce_config == NULL) { + pce_config = pcep_cli_create_pcep_pce_config(pcep_pce_config); + if (pcep_cli_add_pcep_pce_config(pce_config) == false) { + vty_out(vty, + "%% Cannot create pce-config, as the Maximum limit of %d pce-config has been reached.\n", + MAX_PCE); + XFREE(MTYPE_PCEP, pce_config); + return CMD_WARNING; + } + } else { + vty_out(vty, + "Notice: changes to this pce-config will not affect PCEs already configured with this group\n"); + } + + current_pcep_config_group_opts_g = pce_config; + vty->node = PCEP_PCE_CONFIG_NODE; + + return CMD_SUCCESS; +} + +static int path_pcep_cli_pcep_pce_config_delete(struct vty *vty, + const char *pcep_pce_config) +{ + struct pcep_config_group_opts *pce_config = + pcep_cli_find_pcep_pce_config(pcep_pce_config); + if (pce_config == NULL) { + vty_out(vty, + "%% Cannot delete pce-config, since it does not exist.\n"); + return CMD_WARNING; + } + + if (pcep_cli_is_pcep_pce_config_used(pce_config->name)) { + vty_out(vty, + "%% Cannot delete pce-config, since it is in use by a peer.\n"); + return CMD_WARNING; + } + + pcep_cli_delete_pcep_pce_config(pce_config->name); + + return CMD_SUCCESS; +} + +static int path_pcep_cli_show_srte_pcep_pce_config(struct vty *vty, + const char *pcep_pce_config) +{ + char buf[1024] = ""; + + /* Only show 1 Peer config group */ + struct pcep_config_group_opts *group_opts; + if (pcep_pce_config != NULL) { + if (strcmp(pcep_pce_config, "default") == 0) { + group_opts = &default_pcep_config_group_opts_g; + } else { + group_opts = + pcep_cli_find_pcep_pce_config(pcep_pce_config); + } + if (group_opts == NULL) { + vty_out(vty, "%% pce-config [%s] does not exist.\n", + pcep_pce_config); + return CMD_WARNING; + } + + vty_out(vty, "pce-config: %s\n", group_opts->name); + pcep_cli_print_pce_config(group_opts, buf, sizeof(buf)); + vty_out(vty, "%s", buf); + return CMD_SUCCESS; + } + + /* Show all Peer config groups */ + for (int i = 0; i < MAX_PCE; i++) { + group_opts = pcep_g->config_group_opts[i]; + if (group_opts == NULL) { + continue; + } + + vty_out(vty, "pce-config: %s\n", group_opts->name); + pcep_cli_print_pce_config(group_opts, buf, sizeof(buf)); + vty_out(vty, "%s", buf); + buf[0] = 0; + } + + return CMD_SUCCESS; +} + +static int path_pcep_cli_pce(struct vty *vty, const char *pce_peer_name) +{ + /* If it already exists, it will be updated in the sub-commands */ + struct pce_opts_cli *pce_opts_cli = pcep_cli_find_pce(pce_peer_name); + if (pce_opts_cli == NULL) { + pce_opts_cli = pcep_cli_create_pce_opts(pce_peer_name); + + if (!pcep_cli_add_pce(pce_opts_cli)) { + vty_out(vty, + "%% Cannot create PCE, as the Maximum limit of %d PCEs has been reached.\n", + MAX_PCE); + XFREE(MTYPE_PCEP, pce_opts_cli); + return CMD_WARNING; + } + } + + current_pce_opts_g = pce_opts_cli; + vty->node = PCEP_PCE_NODE; + + return CMD_SUCCESS; +} + +static int path_pcep_cli_pce_delete(struct vty *vty, const char *pce_peer_name) +{ + struct pce_opts_cli *pce_opts_cli = pcep_cli_find_pce(pce_peer_name); + if (pce_opts_cli == NULL) { + vty_out(vty, "%% PCC peer does not exist.\n"); + return CMD_WARNING; + } + + /* To better work with frr-reload, go ahead and delete it if its in use + */ + if (pcep_cli_pcc_has_pce(pce_peer_name)) { + vty_out(vty, + "%% Notice: the pce is in use by a PCC, also disconnecting.\n"); + path_pcep_cli_pcc_pcc_peer_delete(vty, pce_peer_name, NULL, 0); + } + + pcep_cli_delete_pce(pce_peer_name); + + return CMD_SUCCESS; +} + +/* Internal Util func to show an individual PCE, + * only used by path_pcep_cli_show_srte_pcep_pce() */ +static void show_pce_peer(struct vty *vty, struct pce_opts_cli *pce_opts_cli) +{ + struct pce_opts *pce_opts = &pce_opts_cli->pce_opts; + vty_out(vty, "PCE: %s\n", pce_opts->pce_name); + + /* Remote PCE IP address */ + if (IS_IPADDR_V6(&pce_opts->addr)) { + vty_out(vty, " %s %s %pI6 %s %d\n", PCEP_VTYSH_ARG_ADDRESS, + PCEP_VTYSH_ARG_IPV6, &pce_opts->addr.ipaddr_v6, + PCEP_VTYSH_ARG_PORT, pce_opts->port); + } else { + vty_out(vty, " %s %s %pI4 %s %d\n", PCEP_VTYSH_ARG_ADDRESS, + PCEP_VTYSH_ARG_IP, &pce_opts->addr.ipaddr_v4, + PCEP_VTYSH_ARG_PORT, pce_opts->port); + } + + if (pce_opts_cli->config_group_name[0] != '\0') { + vty_out(vty, " pce-config: %s\n", + pce_opts_cli->config_group_name); + } + + char buf[1024] = ""; + pcep_cli_print_pce_config(&pce_opts->config_opts, buf, sizeof(buf)); + vty_out(vty, "%s", buf); +} + +static int path_pcep_cli_show_srte_pcep_pce(struct vty *vty, + const char *pce_peer) +{ + /* Only show 1 PCE */ + struct pce_opts_cli *pce_opts_cli; + if (pce_peer != NULL) { + pce_opts_cli = pcep_cli_find_pce(pce_peer); + if (pce_opts_cli == NULL) { + vty_out(vty, "%% PCE [%s] does not exist.\n", pce_peer); + return CMD_WARNING; + } + + pcep_cli_merge_pcep_pce_config_options(pce_opts_cli); + show_pce_peer(vty, pce_opts_cli); + + return CMD_SUCCESS; + } + + /* Show all PCEs */ + for (int i = 0; i < MAX_PCE; i++) { + pce_opts_cli = pcep_g->pce_opts_cli[i]; + if (pce_opts_cli == NULL) { + continue; + } + + pcep_cli_merge_pcep_pce_config_options(pce_opts_cli); + show_pce_peer(vty, pce_opts_cli); + } + + return CMD_SUCCESS; +} + +static int path_pcep_cli_peer_sr_draft07(struct vty *vty) +{ + struct pcep_config_group_opts *pce_config = NULL; + + if (vty->node == PCEP_PCE_NODE) { + /* TODO need to see if the pce is in use, and reset the + * connection */ + pce_config = ¤t_pce_opts_g->pce_config_group_opts; + current_pce_opts_g->merged = false; + } else if (vty->node == PCEP_PCE_CONFIG_NODE) { + pce_config = current_pcep_config_group_opts_g; + } else { + return CMD_ERR_NO_MATCH; + } + + pce_config->draft07 = true; + + return CMD_SUCCESS; +} + +static int path_pcep_cli_peer_pce_initiated(struct vty *vty) +{ + struct pcep_config_group_opts *pce_config = NULL; + + if (vty->node == PCEP_PCE_NODE) { + /* TODO need to see if the pce is in use, and reset the + * connection */ + pce_config = ¤t_pce_opts_g->pce_config_group_opts; + current_pce_opts_g->merged = false; + } else if (vty->node == PCEP_PCE_CONFIG_NODE) { + pce_config = current_pcep_config_group_opts_g; + } else { + return CMD_ERR_NO_MATCH; + } + + pce_config->pce_initiated = true; + + return CMD_SUCCESS; +} + +static int path_pcep_cli_peer_tcp_md5_auth(struct vty *vty, + const char *tcp_md5_auth) +{ + struct pcep_config_group_opts *pce_config = NULL; + + if (vty->node == PCEP_PCE_NODE) { + /* TODO need to see if the pce is in use, and reset the + * connection */ + pce_config = ¤t_pce_opts_g->pce_config_group_opts; + current_pce_opts_g->merged = false; + } else if (vty->node == PCEP_PCE_CONFIG_NODE) { + pce_config = current_pcep_config_group_opts_g; + } else { + return CMD_ERR_NO_MATCH; + } + + strncpy(pce_config->tcp_md5_auth, tcp_md5_auth, TCP_MD5SIG_MAXKEYLEN); + + return CMD_SUCCESS; +} + +static int path_pcep_cli_peer_address(struct vty *vty, const char *ip_str, + struct in_addr *ip, const char *ipv6_str, + struct in6_addr *ipv6, + const char *port_str, long port) +{ + struct pce_opts *pce_opts = NULL; + if (vty->node == PCEP_PCE_NODE) { + /* TODO need to see if the pce is in use, and reset the + * connection */ + pce_opts = ¤t_pce_opts_g->pce_opts; + current_pce_opts_g->merged = false; + } else { + return CMD_ERR_NO_MATCH; + } + + if (ipv6_str != NULL) { + pce_opts->addr.ipa_type = IPADDR_V6; + memcpy(&pce_opts->addr.ipaddr_v6, ipv6, + sizeof(struct in6_addr)); + } else if (ip_str != NULL) { + pce_opts->addr.ipa_type = IPADDR_V4; + memcpy(&pce_opts->addr.ipaddr_v4, ip, sizeof(struct in_addr)); + } else { + return CMD_ERR_NO_MATCH; + } + + /* Handle the optional port */ + pce_opts->port = PCEP_DEFAULT_PORT; + PCEP_VTYSH_INT_ARG_CHECK(port_str, port, pce_opts->port, 0, 65535); + + return CMD_SUCCESS; +} + +static int path_pcep_cli_peer_source_address(struct vty *vty, + const char *ip_str, + struct in_addr *ip, + const char *ipv6_str, + struct in6_addr *ipv6, + const char *port_str, long port) +{ + struct pcep_config_group_opts *pce_config = NULL; + if (vty->node == PCEP_PCE_NODE) { + /* TODO need to see if the pce is in use, and reset the + * connection */ + pce_config = ¤t_pce_opts_g->pce_config_group_opts; + current_pce_opts_g->merged = false; + } else if (vty->node == PCEP_PCE_CONFIG_NODE) { + pce_config = current_pcep_config_group_opts_g; + } else { + return CMD_ERR_NO_MATCH; + } + + /* Handle the optional source IP */ + if (ipv6_str != NULL) { + pce_config->source_ip.ipa_type = IPADDR_V6; + memcpy(&pce_config->source_ip.ipaddr_v6, ipv6, + sizeof(struct in6_addr)); + } else if (ip_str != NULL) { + pce_config->source_ip.ipa_type = IPADDR_V4; + memcpy(&pce_config->source_ip.ipaddr_v4, ip, + sizeof(struct in_addr)); + } + + /* Handle the optional port */ + PCEP_VTYSH_INT_ARG_CHECK(port_str, port, pce_config->source_port, 0, + 65535); + + return CMD_SUCCESS; +} + +static int path_pcep_cli_peer_pcep_pce_config_ref(struct vty *vty, + const char *config_group_name) +{ + if (vty->node == PCEP_PCE_NODE) { + /* TODO need to see if the pce is in use, and reset the + * connection */ + current_pce_opts_g->merged = false; + } else { + return CMD_ERR_NO_MATCH; + } + + struct pcep_config_group_opts *pce_config = + pcep_cli_find_pcep_pce_config(config_group_name); + if (pce_config == NULL) { + vty_out(vty, "%% pce-config [%s] does not exist.\n", + config_group_name); + return CMD_WARNING; + } + + strlcpy(current_pce_opts_g->config_group_name, config_group_name, + sizeof(current_pce_opts_g->config_group_name)); + + return CMD_SUCCESS; +} + +static int path_pcep_cli_peer_timers( + struct vty *vty, const char *keep_alive_str, long keep_alive, + const char *min_peer_keep_alive_str, long min_peer_keep_alive, + const char *max_peer_keep_alive_str, long max_peer_keep_alive, + const char *dead_timer_str, long dead_timer, + const char *min_peer_dead_timer_str, long min_peer_dead_timer, + const char *max_peer_dead_timer_str, long max_peer_dead_timer, + const char *pcep_request_str, long pcep_request, + const char *session_timeout_interval_str, long session_timeout_interval, + const char *delegation_timeout_str, long delegation_timeout) +{ + struct pcep_config_group_opts *pce_config = NULL; + if (vty->node == PCEP_PCE_NODE) { + /* TODO need to see if the pce is in use, and reset the + * connection */ + pce_config = ¤t_pce_opts_g->pce_config_group_opts; + current_pce_opts_g->merged = false; + } else if (vty->node == PCEP_PCE_CONFIG_NODE) { + pce_config = current_pcep_config_group_opts_g; + } else { + return CMD_ERR_NO_MATCH; + } + + if (min_peer_keep_alive && max_peer_keep_alive) + if (min_peer_keep_alive >= max_peer_keep_alive) { + return CMD_ERR_NO_MATCH; + } + + if (min_peer_dead_timer && max_peer_dead_timer) + if (min_peer_dead_timer >= max_peer_dead_timer) { + return CMD_ERR_NO_MATCH; + } + + /* Handle the arguments */ + PCEP_VTYSH_INT_ARG_CHECK(keep_alive_str, keep_alive, + pce_config->keep_alive_seconds, 0, 64); + PCEP_VTYSH_INT_ARG_CHECK(min_peer_keep_alive_str, min_peer_keep_alive, + pce_config->min_keep_alive_seconds, 0, 256); + PCEP_VTYSH_INT_ARG_CHECK(max_peer_keep_alive_str, max_peer_keep_alive, + pce_config->max_keep_alive_seconds, 0, 256); + PCEP_VTYSH_INT_ARG_CHECK(dead_timer_str, dead_timer, + pce_config->dead_timer_seconds, 3, 256); + PCEP_VTYSH_INT_ARG_CHECK(min_peer_dead_timer_str, min_peer_dead_timer, + pce_config->min_dead_timer_seconds, 3, 256); + PCEP_VTYSH_INT_ARG_CHECK(max_peer_dead_timer_str, max_peer_dead_timer, + pce_config->max_dead_timer_seconds, 3, 256); + PCEP_VTYSH_INT_ARG_CHECK(pcep_request_str, pcep_request, + pce_config->pcep_request_time_seconds, 0, 121); + PCEP_VTYSH_INT_ARG_CHECK( + session_timeout_interval_str, session_timeout_interval, + pce_config->session_timeout_inteval_seconds, 0, 121); + PCEP_VTYSH_INT_ARG_CHECK(delegation_timeout_str, delegation_timeout, + pce_config->delegation_timeout_seconds, 0, 61); + + return CMD_SUCCESS; +} + +static int path_pcep_cli_pcc(struct vty *vty) +{ + VTY_PUSH_CONTEXT_NULL(PCEP_PCC_NODE); + + return CMD_SUCCESS; +} + +static int path_pcep_cli_pcc_delete(struct vty *vty) +{ + /* Clear the pce_connections */ + memset(&pce_connections_g, 0, sizeof(pce_connections_g)); + pcc_msd_configured_g = false; + + pcep_ctrl_remove_pcc(pcep_g->fpt, NULL); + + return CMD_SUCCESS; +} + +static int path_pcep_cli_pcc_pcc_msd(struct vty *vty, const char *msd_str, + long msd) +{ + pcc_msd_configured_g = true; + PCEP_VTYSH_INT_ARG_CHECK(msd_str, msd, pcc_msd_g, 0, 33); + + return CMD_SUCCESS; +} + +static int path_pcep_cli_pcc_pcc_peer(struct vty *vty, const char *peer_name, + const char *precedence_str, + long precedence) +{ + /* Check if the pcc-peer exists */ + struct pce_opts_cli *pce_opts_cli = pcep_cli_find_pce(peer_name); + if (pce_opts_cli == NULL) { + vty_out(vty, "%% PCE [%s] does not exist.\n", peer_name); + return CMD_WARNING; + } + struct pce_opts *pce_opts = &pce_opts_cli->pce_opts; + + /* Check if the pcc-peer is duplicated */ + if (pcep_cli_pcc_has_pce(peer_name)) { + vty_out(vty, "%% The peer [%s] has already been configured.\n", + peer_name); + return CMD_WARNING; + } + + /* Get the optional precedence argument */ + pce_opts->precedence = DEFAULT_PCE_PRECEDENCE; + PCEP_VTYSH_INT_ARG_CHECK(precedence_str, precedence, + pce_opts->precedence, 0, 256); + + /* Finalize the pce_opts config values */ + pcep_cli_merge_pcep_pce_config_options(pce_opts_cli); + pcep_cli_add_pce_connection(&pce_opts_cli->pce_opts); + + /* Verify the PCE has the IP set */ + struct in6_addr zero_v6_addr; + memset(&zero_v6_addr, 0, sizeof(struct in6_addr)); + if (memcmp(&pce_opts->addr.ip, &zero_v6_addr, IPADDRSZ(&pce_opts->addr)) + == 0) { + vty_out(vty, + "%% The peer [%s] does not have an IP set and cannot be used until it does.\n", + peer_name); + return CMD_WARNING; + } + + /* Update the pcc_opts with the source ip, port, and msd */ + struct pcc_opts *pcc_opts_copy = + XMALLOC(MTYPE_PCEP, sizeof(struct pcc_opts)); + memcpy(&pcc_opts_copy->addr, + &pce_opts_cli->pce_opts.config_opts.source_ip, + sizeof(struct pcc_opts)); + pcc_opts_copy->msd = pcc_msd_g; + pcc_opts_copy->port = pce_opts_cli->pce_opts.config_opts.source_port; + if (pcep_ctrl_update_pcc_options(pcep_g->fpt, pcc_opts_copy)) { + return CMD_WARNING; + } + + /* Send a copy of the pce_opts, this one is only used for the CLI */ + struct pce_opts *pce_opts_copy = + XMALLOC(MTYPE_PCEP, sizeof(struct pce_opts)); + memcpy(pce_opts_copy, pce_opts, sizeof(struct pce_opts)); + if (pcep_ctrl_update_pce_options(pcep_g->fpt, pce_opts_copy)) { + return CMD_WARNING; + } + + return CMD_SUCCESS; +} + +static int path_pcep_cli_pcc_pcc_peer_delete(struct vty *vty, + const char *peer_name, + const char *precedence_str, + long precedence) +{ + /* Check if the pcc-peer is connected to the PCC */ + if (!pcep_cli_pcc_has_pce(peer_name)) { + vty_out(vty, + "%% WARN: The peer [%s] is not connected to the PCC.\n", + peer_name); + return CMD_WARNING; + } + + struct pce_opts_cli *pce_opts_cli = pcep_cli_find_pce(peer_name); + pcep_cli_remove_pce_connection(&pce_opts_cli->pce_opts); + + /* Send a copy of the pce_opts, this one is used for CLI only */ + struct pce_opts *pce_opts_copy = + XMALLOC(MTYPE_PCEP, sizeof(struct pce_opts)); + memcpy(pce_opts_copy, &pce_opts_cli->pce_opts, sizeof(struct pce_opts)); + pcep_ctrl_remove_pcc(pcep_g->fpt, pce_opts_copy); + + return CMD_SUCCESS; +} + +static int path_pcep_cli_show_srte_pcep_pcc(struct vty *vty) +{ + vty_out(vty, "pcc msd %d\n", pcc_msd_g); + + return CMD_SUCCESS; +} + +/* Internal util function to print pcep capabilities to a buffer */ +static void print_pcep_capabilities(char *buf, size_t buf_len, + pcep_configuration *config) +{ + if (config->support_stateful_pce_lsp_update) { + csnprintfrr(buf, buf_len, "%s", PCEP_CLI_CAP_STATEFUL); + } + if (config->support_include_db_version) { + csnprintfrr(buf, buf_len, "%s", PCEP_CLI_CAP_INCL_DB_VER); + } + if (config->support_lsp_triggered_resync) { + csnprintfrr(buf, buf_len, "%s", PCEP_CLI_CAP_LSP_TRIGGERED); + } + if (config->support_lsp_delta_sync) { + csnprintfrr(buf, buf_len, "%s", PCEP_CLI_CAP_LSP_DELTA); + } + if (config->support_pce_triggered_initial_sync) { + csnprintfrr(buf, buf_len, "%s", PCEP_CLI_CAP_PCE_TRIGGERED); + } + if (config->support_sr_te_pst) { + csnprintfrr(buf, buf_len, "%s", PCEP_CLI_CAP_SR_TE_PST); + } + if (config->pcc_can_resolve_nai_to_sid) { + csnprintfrr(buf, buf_len, "%s", PCEP_CLI_CAP_PCC_RESOLVE_NAI); + } +} + +/* Internal util function to print a pcep session */ +static void print_pcep_session(struct vty *vty, struct pce_opts *pce_opts, + struct pcep_pcc_info *pcc_info) +{ + char buf[1024]; + buf[0] = '\0'; + + vty_out(vty, "\nPCE %s\n", pce_opts->pce_name); + + /* PCE IP */ + if (IS_IPADDR_V4(&pce_opts->addr)) { + vty_out(vty, " PCE IP %pI4 port %d\n", + &pce_opts->addr.ipaddr_v4, pce_opts->port); + } else if (IS_IPADDR_V6(&pce_opts->addr)) { + vty_out(vty, " PCE IPv6 %pI6 port %d\n", + &pce_opts->addr.ipaddr_v6, pce_opts->port); + } + + /* PCC IP */ + if (IS_IPADDR_V4(&pcc_info->pcc_addr)) { + vty_out(vty, " PCC IP %pI4 port %d\n", + &pcc_info->pcc_addr.ipaddr_v4, pcc_info->pcc_port); + } else if (IS_IPADDR_V6(&pcc_info->pcc_addr)) { + vty_out(vty, " PCC IPv6 %pI6 port %d\n", + &pcc_info->pcc_addr.ipaddr_v6, pcc_info->pcc_port); + } + vty_out(vty, " PCC MSD %d\n", pcc_info->msd); + + if (pcc_info->status == PCEP_PCC_OPERATING) { + vty_out(vty, " Session Status UP\n"); + } else { + vty_out(vty, " Session Status %s\n", + pcc_status_name(pcc_info->status)); + } + + if (pcc_info->is_best_multi_pce) { + vty_out(vty, " Precedence %d, best candidate\n", + ((pcc_info->precedence > 0) ? pcc_info->precedence + : DEFAULT_PCE_PRECEDENCE)); + } else { + vty_out(vty, " Precedence %d\n", + ((pcc_info->precedence > 0) ? pcc_info->precedence + : DEFAULT_PCE_PRECEDENCE)); + } + vty_out(vty, " Confidence %s\n", + ((pcc_info->previous_best) ? "low" + : "normal")); + + /* PCEPlib pcep session values, get a thread safe copy of the counters + */ + pcep_session *session = + pcep_ctrl_get_pcep_session(pcep_g->fpt, pcc_info->pcc_id); + + /* Config Options values */ + struct pcep_config_group_opts *config_opts = &pce_opts->config_opts; + if (session != NULL) { + vty_out(vty, " Timer: KeepAlive config %d, pce-negotiated %d\n", + config_opts->keep_alive_seconds, + session->pcc_config + .keep_alive_pce_negotiated_timer_seconds); + vty_out(vty, " Timer: DeadTimer config %d, pce-negotiated %d\n", + config_opts->dead_timer_seconds, + session->pcc_config.dead_timer_pce_negotiated_seconds); + } else { + vty_out(vty, " Timer: KeepAlive %d\n", + config_opts->keep_alive_seconds); + vty_out(vty, " Timer: DeadTimer %d\n", + config_opts->dead_timer_seconds); + } + vty_out(vty, " Timer: PcRequest %d\n", + config_opts->pcep_request_time_seconds); + vty_out(vty, " Timer: SessionTimeout Interval %d\n", + config_opts->session_timeout_inteval_seconds); + vty_out(vty, " Timer: Delegation Timeout %d\n", + config_opts->delegation_timeout_seconds); + if (strlen(config_opts->tcp_md5_auth) > 0) { + vty_out(vty, " TCP MD5 Auth Str: %s\n", + config_opts->tcp_md5_auth); + } else { + vty_out(vty, " No TCP MD5 Auth\n"); + } + + if (config_opts->draft07) { + vty_out(vty, " PCE SR Version draft07\n"); + } else { + vty_out(vty, " PCE SR Version draft16 and RFC8408\n"); + } + + vty_out(vty, " Next PcReq ID %d\n", pcc_info->next_reqid); + vty_out(vty, " Next PLSP ID %d\n", pcc_info->next_plspid); + + if (session != NULL) { + if (pcc_info->status == PCEP_PCC_SYNCHRONIZING + || pcc_info->status == PCEP_PCC_OPERATING) { + time_t current_time = time(NULL); + struct tm lt = {0}; + /* Just for the timezone */ + localtime_r(¤t_time, <); + gmtime_r(&session->time_connected, <); + vty_out(vty, + " Connected for %ld seconds, since %d-%02d-%02d %02d:%02d:%02d UTC\n", + (current_time - session->time_connected), + lt.tm_year + 1900, lt.tm_mon + 1, lt.tm_mday, + lt.tm_hour, lt.tm_min, lt.tm_sec); + } + + /* PCC capabilities */ + buf[0] = '\0'; + int index = 0; + if (config_opts->pce_initiated) { + index += csnprintfrr(buf, sizeof(buf), "%s", + PCEP_CLI_CAP_PCC_PCE_INITIATED); + } else { + index += csnprintfrr(buf, sizeof(buf), "%s", + PCEP_CLI_CAP_PCC_INITIATED); + } + print_pcep_capabilities(buf, sizeof(buf) - index, + &session->pcc_config); + vty_out(vty, " PCC Capabilities:%s\n", buf); + + /* PCE capabilities */ + buf[0] = '\0'; + print_pcep_capabilities(buf, sizeof(buf), &session->pce_config); + if (buf[0] != '\0') { + vty_out(vty, " PCE Capabilities:%s\n", buf); + } + XFREE(MTYPE_PCEP, session); + } else { + vty_out(vty, " Detailed session information not available\n"); + } + + /* Message Counters, get a thread safe copy of the counters */ + struct counters_group *group = + pcep_ctrl_get_counters(pcep_g->fpt, pcc_info->pcc_id); + + if (group != NULL) { + struct counters_subgroup *rx_msgs = + find_subgroup(group, COUNTER_SUBGROUP_ID_RX_MSG); + struct counters_subgroup *tx_msgs = + find_subgroup(group, COUNTER_SUBGROUP_ID_TX_MSG); + + if (rx_msgs != NULL && tx_msgs != NULL) { + vty_out(vty, " PCEP Message Statistics\n"); + vty_out(vty, " %27s %6s\n", "Sent", "Rcvd"); + for (int i = 0; i < rx_msgs->max_counters; i++) { + struct counter *rx_counter = + rx_msgs->counters[i]; + struct counter *tx_counter = + tx_msgs->counters[i]; + if (rx_counter != NULL && tx_counter != NULL) { + vty_out(vty, " %20s: %5d %5d\n", + tx_counter->counter_name, + tx_counter->counter_value, + rx_counter->counter_value); + } + } + vty_out(vty, " %20s: %5d %5d\n", "Total", + subgroup_counters_total(tx_msgs), + subgroup_counters_total(rx_msgs)); + } + pcep_lib_free_counters(group); + } else { + vty_out(vty, " Counters not available\n"); + } + + XFREE(MTYPE_PCEP, pcc_info); +} + +static int path_pcep_cli_show_srte_pcep_session(struct vty *vty, + const char *pcc_peer) +{ + struct pce_opts_cli *pce_opts_cli; + struct pcep_pcc_info *pcc_info; + + /* Only show 1 PCEP session */ + if (pcc_peer != NULL) { + pce_opts_cli = pcep_cli_find_pce(pcc_peer); + if (pce_opts_cli == NULL) { + vty_out(vty, "%% PCE [%s] does not exist.\n", pcc_peer); + return CMD_WARNING; + } + + if (!pcep_cli_pcc_has_pce(pcc_peer)) { + vty_out(vty, "%% PCC is not connected to PCE [%s].\n", + pcc_peer); + return CMD_WARNING; + } + + pcc_info = pcep_ctrl_get_pcc_info(pcep_g->fpt, pcc_peer); + if (pcc_info == NULL) { + vty_out(vty, + "%% Cannot retrieve PCEP session info for PCE [%s]\n", + pcc_peer); + return CMD_WARNING; + } + + print_pcep_session(vty, &pce_opts_cli->pce_opts, pcc_info); + + return CMD_SUCCESS; + } + + /* Show all PCEP sessions */ + struct pce_opts *pce_opts; + int num_pcep_sessions_conf = 0; + int num_pcep_sessions_conn = 0; + for (int i = 0; i < MAX_PCC; i++) { + pce_opts = pce_connections_g.connections[i]; + if (pce_opts == NULL) { + continue; + } + + pcc_info = + pcep_ctrl_get_pcc_info(pcep_g->fpt, pce_opts->pce_name); + if (pcc_info == NULL) { + vty_out(vty, + "%% Cannot retrieve PCEP session info for PCE [%s]\n", + pce_opts->pce_name); + continue; + } + + num_pcep_sessions_conn += + pcc_info->status == PCEP_PCC_OPERATING ? 1 : 0; + num_pcep_sessions_conf++; + print_pcep_session(vty, pce_opts, pcc_info); + } + + vty_out(vty, "PCEP Sessions => Configured %d ; Connected %d\n", + num_pcep_sessions_conf, num_pcep_sessions_conn); + + return CMD_SUCCESS; +} + +static int path_pcep_cli_clear_srte_pcep_session(struct vty *vty, + const char *pcc_peer) +{ + struct pce_opts_cli *pce_opts_cli; + + /* Only clear 1 PCEP session */ + if (pcc_peer != NULL) { + pce_opts_cli = pcep_cli_find_pce(pcc_peer); + if (pce_opts_cli == NULL) { + vty_out(vty, "%% PCE [%s] does not exist.\n", pcc_peer); + return CMD_WARNING; + } + + if (!pcep_cli_pcc_has_pce(pcc_peer)) { + vty_out(vty, "%% PCC is not connected to PCE [%s].\n", + pcc_peer); + return CMD_WARNING; + } + + pcep_ctrl_reset_pcc_session(pcep_g->fpt, + pce_opts_cli->pce_opts.pce_name); + vty_out(vty, "PCEP session cleared for peer %s\n", pcc_peer); + + return CMD_SUCCESS; + } + + /* Clear all PCEP sessions */ + struct pce_opts *pce_opts; + int num_pcep_sessions = 0; + for (int i = 0; i < MAX_PCC; i++) { + pce_opts = pce_connections_g.connections[i]; + if (pce_opts == NULL) { + continue; + } + + num_pcep_sessions++; + pcep_ctrl_reset_pcc_session(pcep_g->fpt, pce_opts->pce_name); + vty_out(vty, "PCEP session cleared for peer %s\n", + pce_opts->pce_name); + } + + vty_out(vty, "Cleared [%d] PCEP sessions\n", num_pcep_sessions); + + return CMD_SUCCESS; +} + +/* + * Config Write functions + */ + +int pcep_cli_debug_config_write(struct vty *vty) +{ + char buff[128] = ""; + + if (DEBUG_MODE_CHECK(&pcep_g->dbg, DEBUG_MODE_CONF)) { + if (DEBUG_FLAGS_CHECK(&pcep_g->dbg, PCEP_DEBUG_MODE_BASIC)) + csnprintfrr(buff, sizeof(buff), " %s", + PCEP_VTYSH_ARG_BASIC); + if (DEBUG_FLAGS_CHECK(&pcep_g->dbg, PCEP_DEBUG_MODE_PATH)) + csnprintfrr(buff, sizeof(buff), " %s", + PCEP_VTYSH_ARG_PATH); + if (DEBUG_FLAGS_CHECK(&pcep_g->dbg, PCEP_DEBUG_MODE_PCEP)) + csnprintfrr(buff, sizeof(buff), " %s", + PCEP_VTYSH_ARG_MESSAGE); + if (DEBUG_FLAGS_CHECK(&pcep_g->dbg, PCEP_DEBUG_MODE_PCEPLIB)) + csnprintfrr(buff, sizeof(buff), " %s", + PCEP_VTYSH_ARG_PCEPLIB); + vty_out(vty, "debug pathd pcep%s\n", buff); + buff[0] = 0; + return 1; + } + + return 0; +} + +int pcep_cli_debug_set_all(uint32_t flags, bool set) +{ + DEBUG_FLAGS_SET(&pcep_g->dbg, flags, set); + + /* If all modes have been turned off, don't preserve options. */ + if (!DEBUG_MODE_CHECK(&pcep_g->dbg, DEBUG_MODE_ALL)) + DEBUG_CLEAR(&pcep_g->dbg); + + return 0; +} + +int pcep_cli_pcep_config_write(struct vty *vty) +{ + vty_out(vty, " pcep\n"); + return 1; +} + +int pcep_cli_pcc_config_write(struct vty *vty) +{ + struct pce_opts *pce_opts; + char buf[128] = ""; + int lines = 0; + + /* The MSD, nor any PCE peers have been configured on the PCC */ + if (!pcc_msd_configured_g && pce_connections_g.num_connections == 0) { + return lines; + } + + vty_out(vty, " pcc\n"); + lines++; + + /* Prepare the MSD, if present */ + if (pcc_msd_configured_g) { + vty_out(vty, " %s %d\n", PCEP_VTYSH_ARG_MSD, pcc_msd_g); + lines++; + } + + if (pce_connections_g.num_connections == 0) { + return lines; + } + + buf[0] = 0; + for (int i = 0; i < MAX_PCC; i++) { + pce_opts = pce_connections_g.connections[i]; + if (pce_opts == NULL) { + continue; + } + + /* Only show the PCEs configured in the pcc sub-command */ + if (!pcep_cli_pcc_has_pce(pce_opts->pce_name)) { + continue; + } + + csnprintfrr(buf, sizeof(buf), " peer %s", + pce_opts->pce_name); + if (pce_opts->precedence > 0 + && pce_opts->precedence != DEFAULT_PCE_PRECEDENCE) { + csnprintfrr(buf, sizeof(buf), " %s %d", + PCEP_VTYSH_ARG_PRECEDENCE, + pce_opts->precedence); + } + vty_out(vty, "%s\n", buf); + lines++; + buf[0] = 0; + } + + return lines; +} + +/* Internal function used by pcep_cli_pce_config_write() + * and pcep_cli_pcep_pce_config_write() */ +static int pcep_cli_print_pce_config(struct pcep_config_group_opts *group_opts, + char *buf, size_t buf_len) +{ + int lines = 0; + + if (group_opts->source_ip.ipa_type != IPADDR_NONE + || group_opts->source_port != 0) { + csnprintfrr(buf, buf_len, " "); + if (IS_IPADDR_V4(&group_opts->source_ip)) { + csnprintfrr(buf, buf_len, " %s %s %pI4", + PCEP_VTYSH_ARG_SOURCE_ADDRESS, + PCEP_VTYSH_ARG_IP, + &group_opts->source_ip.ipaddr_v4); + } else if (IS_IPADDR_V6(&group_opts->source_ip)) { + csnprintfrr(buf, buf_len, " %s %s %pI6", + PCEP_VTYSH_ARG_SOURCE_ADDRESS, + PCEP_VTYSH_ARG_IPV6, + &group_opts->source_ip.ipaddr_v6); + } + if (group_opts->source_port > 0) { + csnprintfrr(buf, buf_len, " %s %d", PCEP_VTYSH_ARG_PORT, + group_opts->source_port); + } + csnprintfrr(buf, buf_len, "\n"); + lines++; + } + /* Group the keep-alive together for devman */ + if ((group_opts->keep_alive_seconds > 0) + || (group_opts->min_keep_alive_seconds > 0) + || (group_opts->max_keep_alive_seconds > 0)) { + csnprintfrr(buf, buf_len, " %s", PCEP_VTYSH_ARG_TIMER); + + if (group_opts->keep_alive_seconds > 0) { + csnprintfrr(buf, buf_len, " %s %d", + PCEP_VTYSH_ARG_KEEP_ALIVE, + group_opts->keep_alive_seconds); + } + if (group_opts->min_keep_alive_seconds > 0) { + csnprintfrr(buf, buf_len, " %s %d", + PCEP_VTYSH_ARG_KEEP_ALIVE_MIN, + group_opts->min_keep_alive_seconds); + } + if (group_opts->max_keep_alive_seconds > 0) { + csnprintfrr(buf, buf_len, " %s %d", + PCEP_VTYSH_ARG_KEEP_ALIVE_MAX, + group_opts->max_keep_alive_seconds); + } + csnprintfrr(buf, buf_len, "\n"); + lines++; + } + + /* Group the dead-timer together for devman */ + if ((group_opts->dead_timer_seconds > 0) + || (group_opts->min_dead_timer_seconds > 0) + || (group_opts->max_dead_timer_seconds > 0)) { + csnprintfrr(buf, buf_len, " %s", PCEP_VTYSH_ARG_TIMER); + + if (group_opts->dead_timer_seconds > 0) { + csnprintfrr(buf, buf_len, " %s %d", + PCEP_VTYSH_ARG_DEAD_TIMER, + group_opts->dead_timer_seconds); + } + if (group_opts->min_dead_timer_seconds > 0) { + csnprintfrr(buf, buf_len, " %s %d", + PCEP_VTYSH_ARG_DEAD_TIMER_MIN, + group_opts->min_dead_timer_seconds); + } + if (group_opts->max_dead_timer_seconds > 0) { + csnprintfrr(buf, buf_len, " %s %d", + PCEP_VTYSH_ARG_DEAD_TIMER_MAX, + group_opts->max_dead_timer_seconds); + } + csnprintfrr(buf, buf_len, "\n"); + lines++; + } + + if (group_opts->pcep_request_time_seconds > 0) { + csnprintfrr(buf, buf_len, " %s %s %d\n", + PCEP_VTYSH_ARG_TIMER, PCEP_VTYSH_ARG_PCEP_REQUEST, + group_opts->pcep_request_time_seconds); + lines++; + } + if (group_opts->delegation_timeout_seconds > 0) { + csnprintfrr(buf, buf_len, " %s %s %d\n", + PCEP_VTYSH_ARG_TIMER, + PCEP_VTYSH_ARG_DELEGATION_TIMEOUT, + group_opts->delegation_timeout_seconds); + lines++; + } + if (group_opts->session_timeout_inteval_seconds > 0) { + csnprintfrr(buf, buf_len, " %s %s %d\n", + PCEP_VTYSH_ARG_TIMER, + PCEP_VTYSH_ARG_SESSION_TIMEOUT, + group_opts->session_timeout_inteval_seconds); + lines++; + } + if (group_opts->tcp_md5_auth[0] != '\0') { + csnprintfrr(buf, buf_len, " %s %s\n", PCEP_VTYSH_ARG_TCP_MD5, + group_opts->tcp_md5_auth); + lines++; + } + if (group_opts->draft07) { + csnprintfrr(buf, buf_len, " %s\n", + PCEP_VTYSH_ARG_SR_DRAFT07); + lines++; + } + if (group_opts->pce_initiated) { + csnprintfrr(buf, buf_len, " %s\n", PCEP_VTYSH_ARG_PCE_INIT); + lines++; + } + + return lines; +} + +int pcep_cli_pce_config_write(struct vty *vty) +{ + int lines = 0; + char buf[1024] = ""; + + for (int i = 0; i < MAX_PCE; i++) { + struct pce_opts_cli *pce_opts_cli = pcep_g->pce_opts_cli[i]; + if (pce_opts_cli == NULL) { + continue; + } + struct pce_opts *pce_opts = &pce_opts_cli->pce_opts; + + vty_out(vty, " pce %s\n", pce_opts->pce_name); + if (IS_IPADDR_V6(&pce_opts->addr)) { + vty_out(vty, " %s %s %pI6", PCEP_VTYSH_ARG_ADDRESS, + PCEP_VTYSH_ARG_IPV6, &pce_opts->addr.ipaddr_v6); + } else if (IS_IPADDR_V4(&pce_opts->addr)) { + vty_out(vty, " address %s %pI4", PCEP_VTYSH_ARG_IP, + &pce_opts->addr.ipaddr_v4); + } + if (pce_opts->port != PCEP_DEFAULT_PORT) { + vty_out(vty, " %s %d", PCEP_VTYSH_ARG_PORT, + pce_opts->port); + } + vty_out(vty, "%s\n", buf); + lines += 2; + + if (pce_opts_cli->config_group_name[0] != '\0') { + vty_out(vty, " config %s\n", + pce_opts_cli->config_group_name); + lines++; + } + + /* Only display the values configured on the PCE, not the values + * from its optional pce-config-group, nor the default values */ + lines += pcep_cli_print_pce_config( + &pce_opts_cli->pce_config_group_opts, buf, sizeof(buf)); + + vty_out(vty, "%s", buf); + buf[0] = '\0'; + } + + return lines; +} + +int pcep_cli_pcep_pce_config_write(struct vty *vty) +{ + int lines = 0; + char buf[1024] = ""; + + for (int i = 0; i < MAX_PCE; i++) { + struct pcep_config_group_opts *group_opts = + pcep_g->config_group_opts[i]; + if (group_opts == NULL) { + continue; + } + + vty_out(vty, " pce-config %s\n", group_opts->name); + lines += 1; + + lines += + pcep_cli_print_pce_config(group_opts, buf, sizeof(buf)); + vty_out(vty, "%s", buf); + buf[0] = 0; + } + + return lines; +} + +/* + * VTYSH command syntax definitions + * The param names are taken from the path_pcep_cli_clippy.c generated file. + */ + +DEFPY(show_debugging_pathd_pcep, + show_debugging_pathd_pcep_cmd, + "show debugging pathd-pcep", + SHOW_STR + "State of each debugging option\n" + "pathd pcep module debugging\n") +{ + vty_out(vty, "Pathd pcep debugging status:\n"); + + if (DEBUG_MODE_CHECK(&pcep_g->dbg, DEBUG_MODE_CONF)) { + if (DEBUG_FLAGS_CHECK(&pcep_g->dbg, PCEP_DEBUG_MODE_BASIC)) + vty_out(vty, " Pathd pcep %s debugging is on\n", + PCEP_VTYSH_ARG_BASIC); + if (DEBUG_FLAGS_CHECK(&pcep_g->dbg, PCEP_DEBUG_MODE_PATH)) + vty_out(vty, " Pathd pcep %s debugging is on\n", + PCEP_VTYSH_ARG_PATH); + if (DEBUG_FLAGS_CHECK(&pcep_g->dbg, PCEP_DEBUG_MODE_PCEP)) + vty_out(vty, " Pathd pcep %s debugging is on\n", + PCEP_VTYSH_ARG_MESSAGE); + if (DEBUG_FLAGS_CHECK(&pcep_g->dbg, PCEP_DEBUG_MODE_PCEPLIB)) + vty_out(vty, " Pathd pcep %s debugging is on\n", + PCEP_VTYSH_ARG_PCEPLIB); + } + + return CMD_SUCCESS; +} + +DEFPY(pcep_cli_debug, + pcep_cli_debug_cmd, + "[no] debug pathd pcep [basic]$basic_str [path]$path_str [message]$message_str [pceplib]$pceplib_str", + NO_STR DEBUG_STR + "pathd debugging\n" + "pcep module debugging\n" + "module basic debugging\n" + "path structures debugging\n" + "pcep message debugging\n" + "pceplib debugging\n") +{ + return path_pcep_cli_debug(vty, no, basic_str, path_str, message_str, + pceplib_str); +} + +DEFPY(pcep_cli_show_srte_pcep_counters, + pcep_cli_show_srte_pcep_counters_cmd, + "show sr-te pcep counters", + SHOW_STR + "SR-TE info\n" + "PCEP info\n" + "PCEP counters\n") +{ + return path_pcep_cli_show_srte_pcep_counters(vty); +} + +DEFPY_NOSH( + pcep_cli_pcep, + pcep_cli_pcep_cmd, + "pcep", + "PCEP configuration\n") +{ + vty->node = PCEP_NODE; + return CMD_SUCCESS; +} + +DEFPY_NOSH( + pcep_cli_pcep_pce_config, + pcep_cli_pcep_pce_config_cmd, + "[no] pce-config WORD$name", + NO_STR + "Shared configuration\n" + "Shared configuration name\n") +{ + if (no == NULL) + return path_pcep_cli_pcep_pce_config(vty, name); + return path_pcep_cli_pcep_pce_config_delete(vty, name); +} + +DEFPY(pcep_cli_show_srte_pcep_pce_config, + pcep_cli_show_srte_pcep_pce_config_cmd, + "show sr-te pcep pce-config [<default|WORD>$name]", + SHOW_STR + "SR-TE info\n" + "PCEP info\n" + "Show shared PCE configuration\n" + "Show default hard-coded values\n" + "Shared configuration name\n") +{ + return path_pcep_cli_show_srte_pcep_pce_config(vty, name); +} + +DEFPY_NOSH( + pcep_cli_pce, + pcep_cli_pce_cmd, + "[no] pce WORD$name", + NO_STR + "PCE configuration, address sub-config is mandatory\n" + "PCE name\n") +{ + if (no == NULL) + return path_pcep_cli_pce(vty, name); + return path_pcep_cli_pce_delete(vty, name); +} + +DEFPY(pcep_cli_show_srte_pcep_pce, + pcep_cli_show_srte_pcep_pce_cmd, + "show sr-te pcep pce [WORD$name]", + SHOW_STR + "SR-TE info\n" + "PCEP info\n" + "Show detailed pce values\n" + "pce name\n") +{ + return path_pcep_cli_show_srte_pcep_pce(vty, name); +} + +DEFPY(pcep_cli_peer_sr_draft07, + pcep_cli_peer_sr_draft07_cmd, + "sr-draft07", + "Configure PCC to send PCEP Open with SR draft07\n") +{ + return path_pcep_cli_peer_sr_draft07(vty); +} + +DEFPY(pcep_cli_peer_pce_initiated, + pcep_cli_peer_pce_initiated_cmd, + "pce-initiated", + "Configure PCC to accept PCE initiated LSPs\n") +{ + return path_pcep_cli_peer_pce_initiated(vty); +} + +DEFPY(pcep_cli_peer_tcp_md5_auth, + pcep_cli_peer_tcp_md5_auth_cmd, + "tcp-md5-auth WORD", + "Configure PCC TCP-MD5 RFC2385 Authentication\n" + "TCP-MD5 Authentication string\n") +{ + return path_pcep_cli_peer_tcp_md5_auth(vty, tcp_md5_auth); +} + +DEFPY(pcep_cli_peer_address, + pcep_cli_peer_address_cmd, + "address <ip A.B.C.D | ipv6 X:X::X:X> [port (1024-65535)]", + "PCE IP Address configuration, mandatory configuration\n" + "PCE IPv4 address\n" + "Remote PCE server IPv4 address\n" + "PCE IPv6 address\n" + "Remote PCE server IPv6 address\n" + "Remote PCE server port\n" + "Remote PCE server port value\n") +{ + return path_pcep_cli_peer_address(vty, ip_str, &ip, ipv6_str, &ipv6, + port_str, port); +} + +DEFPY(pcep_cli_peer_source_address, + pcep_cli_peer_source_address_cmd, + "source-address [ip A.B.C.D | ipv6 X:X::X:X] [port (1024-65535)]", + "PCE source IP Address configuration\n" + "PCE source IPv4 address\n" + "PCE source IPv4 address value\n" + "PCE source IPv6 address\n" + "PCE source IPv6 address value\n" + "Source PCE server port\n" + "Source PCE server port value\n") +{ + return path_pcep_cli_peer_source_address(vty, ip_str, &ip, ipv6_str, + &ipv6, port_str, port); +} + +DEFPY(pcep_cli_peer_pcep_pce_config_ref, + pcep_cli_peer_pcep_pce_config_ref_cmd, + "config WORD$name", + "PCE shared configuration to use\n" + "Shared configuration name\n") +{ + return path_pcep_cli_peer_pcep_pce_config_ref(vty, name); +} + +DEFPY(pcep_cli_peer_timers, + pcep_cli_peer_timers_cmd, + "timer [keep-alive (1-63)] [min-peer-keep-alive (1-255)] [max-peer-keep-alive (1-255)] " + "[dead-timer (4-255)] [min-peer-dead-timer (4-255)] [max-peer-dead-timer (4-255)] " + "[pcep-request (1-120)] [session-timeout-interval (1-120)] [delegation-timeout (1-60)]", + "PCE PCEP Session Timers configuration\n" + "PCC Keep Alive Timer\n" + "PCC Keep Alive Timer value in seconds\n" + "Min Acceptable PCE Keep Alive Timer\n" + "Min Acceptable PCE Keep Alive Timer value in seconds\n" + "Max Acceptable PCE Keep Alive Timer\n" + "Max Acceptable PCE Keep Alive Timer value in seconds\n" + "PCC Dead Timer\n" + "PCC Dead Timer value in seconds\n" + "Min Acceptable PCE Dead Timer\n" + "Min Acceptable PCE Dead Timer value in seconds\n" + "Max Acceptable PCE Dead Timer\n" + "Max Acceptable PCE Dead Timer value in seconds\n" + "PCC PCEP Request Timer\n" + "PCC PCEP Request Timer value in seconds\n" + "PCC Session Timeout Interval\n" + "PCC Session Timeout Interval value in seconds\n" + "Multi-PCE delegation timeout\n" + "Multi-PCE delegation timeout value in seconds\n") +{ + return path_pcep_cli_peer_timers( + vty, keep_alive_str, keep_alive, min_peer_keep_alive_str, + min_peer_keep_alive, max_peer_keep_alive_str, + max_peer_keep_alive, dead_timer_str, dead_timer, + min_peer_dead_timer_str, min_peer_dead_timer, + max_peer_dead_timer_str, max_peer_dead_timer, pcep_request_str, + pcep_request, session_timeout_interval_str, + session_timeout_interval, delegation_timeout_str, + delegation_timeout); +} + +DEFPY_NOSH( + pcep_cli_pcc, + pcep_cli_pcc_cmd, + "[no] pcc", + NO_STR + "PCC configuration\n") +{ + if (no != NULL) { + return path_pcep_cli_pcc_delete(vty); + } else { + return path_pcep_cli_pcc(vty); + } +} + +DEFPY(pcep_cli_pcc_pcc_msd, + pcep_cli_pcc_pcc_msd_cmd, + "msd (1-32)", + "PCC maximum SID depth \n" + "PCC maximum SID depth value\n") +{ + return path_pcep_cli_pcc_pcc_msd(vty, msd_str, msd); +} + +DEFPY(pcep_cli_pcc_pcc_peer, + pcep_cli_pcc_pcc_peer_cmd, + "[no] peer WORD [precedence (1-255)]", + NO_STR + "PCC PCE peer\n" + "PCC PCE name\n" + "PCC Multi-PCE precedence\n" + "PCE precedence\n") +{ + if (no != NULL) { + return path_pcep_cli_pcc_pcc_peer_delete( + vty, peer, precedence_str, precedence); + } else { + return path_pcep_cli_pcc_pcc_peer(vty, peer, precedence_str, + precedence); + } +} + +DEFPY(pcep_cli_show_srte_pcc, + pcep_cli_show_srte_pcc_cmd, + "show sr-te pcep pcc", + SHOW_STR + "SR-TE info\n" + "PCEP info\n" + "Show current PCC configuration\n") +{ + return path_pcep_cli_show_srte_pcep_pcc(vty); +} + +DEFPY(pcep_cli_show_srte_pcep_session, + pcep_cli_show_srte_pcep_session_cmd, + "show sr-te pcep session [WORD]$pce", + SHOW_STR + "SR-TE info\n" + "PCEP info\n" + "Show PCEP Session information\n" + "PCE name\n") +{ + return path_pcep_cli_show_srte_pcep_session(vty, pce); +} + +DEFPY(pcep_cli_clear_srte_pcep_session, + pcep_cli_clear_srte_pcep_session_cmd, + "clear sr-te pcep session [WORD]$pce", + CLEAR_STR + "SR-TE\n" + "PCEP\n" + "Reset PCEP connection\n" + "PCE name\n") +{ + return path_pcep_cli_clear_srte_pcep_session(vty, pce); +} + +void pcep_cli_init(void) +{ + hook_register(nb_client_debug_config_write, + pcep_cli_debug_config_write); + hook_register(nb_client_debug_set_all, pcep_cli_debug_set_all); + + memset(&pce_connections_g, 0, sizeof(pce_connections_g)); + + install_node(&pcep_node); + install_node(&pcep_pcc_node); + install_node(&pcep_pce_node); + install_node(&pcep_pce_config_node); + + install_default(PCEP_PCE_CONFIG_NODE); + install_default(PCEP_PCE_NODE); + install_default(PCEP_PCC_NODE); + install_default(PCEP_NODE); + + install_element(SR_TRAFFIC_ENG_NODE, &pcep_cli_pcep_cmd); + + /* PCEP configuration group related configuration commands */ + install_element(PCEP_NODE, &pcep_cli_pcep_pce_config_cmd); + install_element(PCEP_PCE_CONFIG_NODE, + &pcep_cli_peer_source_address_cmd); + install_element(PCEP_PCE_CONFIG_NODE, &pcep_cli_peer_timers_cmd); + install_element(PCEP_PCE_CONFIG_NODE, &pcep_cli_peer_sr_draft07_cmd); + install_element(PCEP_PCE_CONFIG_NODE, &pcep_cli_peer_pce_initiated_cmd); + install_element(PCEP_PCE_CONFIG_NODE, &pcep_cli_peer_tcp_md5_auth_cmd); + + /* PCE peer related configuration commands */ + install_element(PCEP_NODE, &pcep_cli_pce_cmd); + install_element(PCEP_PCE_NODE, &pcep_cli_peer_address_cmd); + install_element(PCEP_PCE_NODE, &pcep_cli_peer_source_address_cmd); + install_element(PCEP_PCE_NODE, &pcep_cli_peer_pcep_pce_config_ref_cmd); + install_element(PCEP_PCE_NODE, &pcep_cli_peer_timers_cmd); + install_element(PCEP_PCE_NODE, &pcep_cli_peer_sr_draft07_cmd); + install_element(PCEP_PCE_NODE, &pcep_cli_peer_pce_initiated_cmd); + install_element(PCEP_PCE_NODE, &pcep_cli_peer_tcp_md5_auth_cmd); + + /* PCC related configuration commands */ + install_element(ENABLE_NODE, &pcep_cli_show_srte_pcc_cmd); + install_element(PCEP_NODE, &pcep_cli_pcc_cmd); + install_element(PCEP_PCC_NODE, &pcep_cli_pcc_pcc_peer_cmd); + install_element(PCEP_PCC_NODE, &pcep_cli_pcc_pcc_msd_cmd); + + /* Top commands */ + install_element(CONFIG_NODE, &pcep_cli_debug_cmd); + install_element(ENABLE_NODE, &pcep_cli_debug_cmd); + install_element(ENABLE_NODE, &show_debugging_pathd_pcep_cmd); + install_element(ENABLE_NODE, &pcep_cli_show_srte_pcep_counters_cmd); + install_element(ENABLE_NODE, &pcep_cli_show_srte_pcep_pce_config_cmd); + install_element(ENABLE_NODE, &pcep_cli_show_srte_pcep_pce_cmd); + install_element(ENABLE_NODE, &pcep_cli_show_srte_pcep_session_cmd); + install_element(ENABLE_NODE, &pcep_cli_clear_srte_pcep_session_cmd); +} diff --git a/pathd/path_pcep_cli.h b/pathd/path_pcep_cli.h new file mode 100644 index 0000000000..0b101ab215 --- /dev/null +++ b/pathd/path_pcep_cli.h @@ -0,0 +1,27 @@ +/* + * Copyright (C) 2020 Volta Networks, Inc + * Brady Johnson + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; see the file COPYING; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef _PATH_PCEP_CLI_H_ +#define _PATH_PCEP_CLI_H_ + + +/* PCEP CLI Functions */ +void pcep_cli_init(void); + +#endif // _PATH_PCEP_CLI_H_ diff --git a/pathd/path_pcep_config.c b/pathd/path_pcep_config.c new file mode 100644 index 0000000000..989223ebc3 --- /dev/null +++ b/pathd/path_pcep_config.c @@ -0,0 +1,435 @@ +/* + * Copyright (C) 2020 NetDEF, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; see the file COPYING; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include <northbound.h> +#include <yang.h> +#include <printfrr.h> +#include <pcep-objects.h> +#include "pathd/pathd.h" +#include "pathd/path_pcep.h" +#include "pathd/path_pcep_config.h" +#include "pathd/path_pcep_debug.h" +#include "thread.h" + +#define MAX_XPATH 256 +#define MAX_FLOAT_LEN 22 +#define INETADDR4_MAXLEN 16 +#define INETADDR6_MAXLEN 40 + + +static void copy_candidate_objfun_info(struct srte_candidate *candidate, + struct path *path); +static void copy_candidate_affinity_filters(struct srte_candidate *candidate, + struct path *path); +static struct path_hop * +path_pcep_config_list_path_hops(struct srte_segment_list *segment_list); +static struct srte_candidate *lookup_candidate(struct lsp_nb_key *key); +static char *candidate_name(struct srte_candidate *candidate); +static enum pcep_lsp_operational_status +status_int_to_ext(enum srte_policy_status status); +static enum pcep_sr_subobj_nai pcep_nai_type(enum srte_segment_nai_type type); +static enum srte_segment_nai_type srte_nai_type(enum pcep_sr_subobj_nai type); + +static int path_pcep_config_lookup_cb(struct thread *t) +{ + struct path *path = THREAD_ARG(t); + struct srte_candidate *candidate = lookup_candidate(&path->nbkey); + struct srte_lsp *lsp; + + if (candidate == NULL) + return 0; + + lsp = candidate->lsp; + + if (path->name == NULL) + path->name = candidate_name(candidate); + if (path->type == SRTE_CANDIDATE_TYPE_UNDEFINED) + path->type = candidate->type; + if (path->create_origin == SRTE_ORIGIN_UNDEFINED) + path->create_origin = candidate->protocol_origin; + if ((path->update_origin == SRTE_ORIGIN_UNDEFINED) + && (lsp->segment_list != NULL)) + path->update_origin = lsp->segment_list->protocol_origin; + + return 0; +} + +void path_pcep_config_lookup(struct path *path) +{ + /* + * Configuration access is strictly done via the main thread + */ + thread_execute(master, path_pcep_config_lookup_cb, path, 0); +} + +struct path *path_pcep_config_get_path(struct lsp_nb_key *key) +{ + struct srte_candidate *candidate = lookup_candidate(key); + if (candidate == NULL) + return NULL; + return candidate_to_path(candidate); +} + +void path_pcep_config_list_path(path_list_cb_t cb, void *arg) +{ + struct path *path; + struct srte_policy *policy; + struct srte_candidate *candidate; + + RB_FOREACH (policy, srte_policy_head, &srte_policies) { + RB_FOREACH (candidate, srte_candidate_head, + &policy->candidate_paths) { + path = candidate_to_path(candidate); + if (!cb(path, arg)) + return; + } + } +} + +struct path *candidate_to_path(struct srte_candidate *candidate) +{ + char *name; + struct path *path; + struct path_hop *hop = NULL; + struct path_metric *metric = NULL; + struct srte_policy *policy; + struct srte_lsp *lsp; + enum pcep_lsp_operational_status status; + enum srte_protocol_origin update_origin = 0; + char *originator = NULL; + + policy = candidate->policy; + lsp = candidate->lsp; + + if (lsp->segment_list != NULL) { + hop = path_pcep_config_list_path_hops(lsp->segment_list); + update_origin = lsp->segment_list->protocol_origin; + originator = XSTRDUP(MTYPE_PCEP, lsp->segment_list->originator); + } + path = pcep_new_path(); + name = candidate_name(candidate); + if (CHECK_FLAG(candidate->flags, F_CANDIDATE_BEST)) { + status = status_int_to_ext(policy->status); + } else { + status = PCEP_LSP_OPERATIONAL_DOWN; + } + for (uint32_t i = 0; i < MAX_METRIC_TYPE; i++) { + struct path_metric *path_metric; + struct srte_metric *srte_metric = &lsp->metrics[i]; + if (CHECK_FLAG(srte_metric->flags, F_METRIC_IS_DEFINED)) { + path_metric = pcep_new_metric(); + path_metric->next = metric; + metric = path_metric; + metric->type = i + 1; + metric->value = srte_metric->value; + metric->enforce = CHECK_FLAG(srte_metric->flags, + F_METRIC_IS_REQUIRED); + metric->is_bound = CHECK_FLAG(srte_metric->flags, + F_METRIC_IS_BOUND); + metric->is_computed = CHECK_FLAG(srte_metric->flags, + F_METRIC_IS_COMPUTED); + } + } + *path = (struct path){ + .nbkey = (struct lsp_nb_key){.color = policy->color, + .endpoint = policy->endpoint, + .preference = + candidate->preference}, + .create_origin = lsp->protocol_origin, + .update_origin = update_origin, + .originator = originator, + .plsp_id = 0, + .name = name, + .type = candidate->type, + .srp_id = 0, + .req_id = 0, + .binding_sid = policy->binding_sid, + .status = status, + .do_remove = false, + .go_active = false, + .was_created = false, + .was_removed = false, + .is_synching = false, + .is_delegated = false, + .first_hop = hop, + .first_metric = metric}; + + path->has_bandwidth = CHECK_FLAG(lsp->flags, F_CANDIDATE_HAS_BANDWIDTH); + if (path->has_bandwidth) { + path->enforce_bandwidth = + CHECK_FLAG(lsp->flags, F_CANDIDATE_REQUIRED_BANDWIDTH); + path->bandwidth = lsp->bandwidth; + } else { + path->enforce_bandwidth = true; + path->bandwidth = 0; + } + + copy_candidate_objfun_info(candidate, path); + copy_candidate_affinity_filters(candidate, path); + + return path; +} + +void copy_candidate_objfun_info(struct srte_candidate *candidate, + struct path *path) +{ + struct srte_lsp *lsp = candidate->lsp; + + if (lsp != NULL) { + if (CHECK_FLAG(lsp->flags, F_CANDIDATE_HAS_OBJFUN)) { + path->has_pce_objfun = true; + path->pce_objfun = lsp->objfun; + } else { + path->has_pce_objfun = false; + path->pce_objfun = OBJFUN_UNDEFINED; + } + } + if (CHECK_FLAG(candidate->flags, F_CANDIDATE_HAS_OBJFUN)) { + path->has_pcc_objfun = true; + path->pcc_objfun = candidate->objfun; + path->enforce_pcc_objfun = CHECK_FLAG( + candidate->flags, F_CANDIDATE_REQUIRED_OBJFUN); + + } else { + path->has_pcc_objfun = false; + path->pcc_objfun = OBJFUN_UNDEFINED; + UNSET_FLAG(candidate->flags, F_CANDIDATE_REQUIRED_OBJFUN); + } +} + +void copy_candidate_affinity_filters(struct srte_candidate *candidate, + struct path *path) +{ + bool eany = CHECK_FLAG(candidate->flags, F_CANDIDATE_HAS_EXCLUDE_ANY); + bool iany = CHECK_FLAG(candidate->flags, F_CANDIDATE_HAS_INCLUDE_ANY); + bool iall = CHECK_FLAG(candidate->flags, F_CANDIDATE_HAS_INCLUDE_ALL); + path->has_affinity_filters = eany || iany || iall; + path->affinity_filters[AFFINITY_FILTER_EXCLUDE_ANY - 1] = + eany ? candidate->affinity_filters[AFFINITY_FILTER_EXCLUDE_ANY + - 1] + : 0; + path->affinity_filters[AFFINITY_FILTER_INCLUDE_ANY - 1] = + iany ? candidate->affinity_filters[AFFINITY_FILTER_INCLUDE_ANY + - 1] + : 0; + path->affinity_filters[AFFINITY_FILTER_INCLUDE_ALL - 1] = + iall ? candidate->affinity_filters[AFFINITY_FILTER_INCLUDE_ALL + - 1] + : 0; +} + +struct path_hop * +path_pcep_config_list_path_hops(struct srte_segment_list *segment_list) +{ + struct srte_segment_entry *segment; + struct path_hop *hop = NULL, *last_hop = NULL; + + RB_FOREACH_REVERSE (segment, srte_segment_entry_head, + &segment_list->segments) { + hop = pcep_new_hop(); + *hop = (struct path_hop){ + .next = last_hop, + .is_loose = false, + .has_sid = true, + .is_mpls = true, + .has_attribs = false, + .sid = {.mpls = {.label = segment->sid_value}}, + .has_nai = + segment->nai_type != SRTE_SEGMENT_NAI_TYPE_NONE, + .nai = {.type = pcep_nai_type(segment->nai_type)}}; + switch (segment->nai_type) { + case SRTE_SEGMENT_NAI_TYPE_IPV4_NODE: + case SRTE_SEGMENT_NAI_TYPE_IPV6_NODE: + memcpy(&hop->nai.local_addr, &segment->nai_local_addr, + sizeof(struct ipaddr)); + break; + case SRTE_SEGMENT_NAI_TYPE_IPV4_ADJACENCY: + case SRTE_SEGMENT_NAI_TYPE_IPV6_ADJACENCY: + memcpy(&hop->nai.local_addr, &segment->nai_local_addr, + sizeof(struct ipaddr)); + memcpy(&hop->nai.remote_addr, &segment->nai_remote_addr, + sizeof(struct ipaddr)); + break; + case SRTE_SEGMENT_NAI_TYPE_IPV4_UNNUMBERED_ADJACENCY: + memcpy(&hop->nai.local_addr, &segment->nai_local_addr, + sizeof(struct ipaddr)); + hop->nai.local_iface = segment->nai_local_iface; + memcpy(&hop->nai.remote_addr, &segment->nai_remote_addr, + sizeof(struct ipaddr)); + hop->nai.remote_iface = segment->nai_remote_iface; + break; + default: + break; + } + last_hop = hop; + } + return hop; +} + +int path_pcep_config_update_path(struct path *path) +{ + assert(path != NULL); + assert(path->nbkey.preference != 0); + assert(path->nbkey.endpoint.ipa_type == IPADDR_V4); + + struct path_hop *hop; + struct path_metric *metric; + int index; + char segment_list_name_buff[64 + 1 + 64 + 1 + 11 + 1]; + char *segment_list_name = NULL; + struct srte_candidate *candidate; + struct srte_segment_list *segment_list = NULL; + struct srte_segment_entry *segment; + + candidate = lookup_candidate(&path->nbkey); + + // if there is no candidate to update we are done + if (!candidate) + return 0; + + // first clean up old segment list if present + if (candidate->lsp->segment_list) { + SET_FLAG(candidate->lsp->segment_list->flags, + F_SEGMENT_LIST_DELETED); + candidate->lsp->segment_list = NULL; + } + + if (path->first_hop != NULL) { + snprintf(segment_list_name_buff, sizeof(segment_list_name_buff), + "%s-%u", path->name, path->plsp_id); + segment_list_name = segment_list_name_buff; + + segment_list = srte_segment_list_add(segment_list_name); + segment_list->protocol_origin = path->update_origin; + strlcpy(segment_list->originator, path->originator, + sizeof(segment_list->originator)); + SET_FLAG(segment_list->flags, F_SEGMENT_LIST_NEW); + SET_FLAG(segment_list->flags, F_SEGMENT_LIST_MODIFIED); + + for (hop = path->first_hop, index = 10; hop != NULL; + hop = hop->next, index += 10) { + assert(hop->has_sid); + assert(hop->is_mpls); + + segment = srte_segment_entry_add(segment_list, index); + + segment->sid_value = (mpls_label_t)hop->sid.mpls.label; + SET_FLAG(segment->segment_list->flags, + F_SEGMENT_LIST_MODIFIED); + + if (hop->has_nai) + srte_segment_entry_set_nai( + segment, srte_nai_type(hop->nai.type), + &hop->nai.local_addr, + hop->nai.local_iface, + &hop->nai.remote_addr, + hop->nai.remote_iface); + } + } + + candidate->lsp->segment_list = segment_list; + SET_FLAG(candidate->flags, F_CANDIDATE_MODIFIED); + + for (metric = path->first_metric; metric != NULL; metric = metric->next) + srte_lsp_set_metric(candidate->lsp, metric->type, metric->value, + metric->enforce, metric->is_bound, + metric->is_computed); + + if (path->has_bandwidth) + srte_lsp_set_bandwidth(candidate->lsp, path->bandwidth, + path->enforce_bandwidth); + + if (path->has_pce_objfun) { + SET_FLAG(candidate->lsp->flags, F_CANDIDATE_HAS_OBJFUN); + candidate->lsp->objfun = path->pce_objfun; + } + + srte_apply_changes(); + + return 0; +} + +struct srte_candidate *lookup_candidate(struct lsp_nb_key *key) +{ + struct srte_policy *policy = NULL; + policy = srte_policy_find(key->color, &key->endpoint); + if (policy == NULL) + return NULL; + return srte_candidate_find(policy, key->preference); +} + +char *candidate_name(struct srte_candidate *candidate) +{ + return asprintfrr(MTYPE_PCEP, "%s-%s", candidate->policy->name, + candidate->name); +} + +enum pcep_lsp_operational_status +status_int_to_ext(enum srte_policy_status status) +{ + switch (status) { + case SRTE_POLICY_STATUS_UP: + return PCEP_LSP_OPERATIONAL_ACTIVE; + case SRTE_POLICY_STATUS_GOING_UP: + return PCEP_LSP_OPERATIONAL_GOING_UP; + case SRTE_POLICY_STATUS_GOING_DOWN: + return PCEP_LSP_OPERATIONAL_GOING_DOWN; + default: + return PCEP_LSP_OPERATIONAL_DOWN; + } +} + +enum pcep_sr_subobj_nai pcep_nai_type(enum srte_segment_nai_type type) +{ + switch (type) { + case SRTE_SEGMENT_NAI_TYPE_NONE: + return PCEP_SR_SUBOBJ_NAI_ABSENT; + case SRTE_SEGMENT_NAI_TYPE_IPV4_NODE: + return PCEP_SR_SUBOBJ_NAI_IPV4_NODE; + case SRTE_SEGMENT_NAI_TYPE_IPV6_NODE: + return PCEP_SR_SUBOBJ_NAI_IPV6_NODE; + case SRTE_SEGMENT_NAI_TYPE_IPV4_ADJACENCY: + return PCEP_SR_SUBOBJ_NAI_IPV4_ADJACENCY; + case SRTE_SEGMENT_NAI_TYPE_IPV6_ADJACENCY: + return PCEP_SR_SUBOBJ_NAI_IPV6_ADJACENCY; + case SRTE_SEGMENT_NAI_TYPE_IPV4_UNNUMBERED_ADJACENCY: + return PCEP_SR_SUBOBJ_NAI_UNNUMBERED_IPV4_ADJACENCY; + default: + return PCEP_SR_SUBOBJ_NAI_UNKNOWN; + } +} + +enum srte_segment_nai_type srte_nai_type(enum pcep_sr_subobj_nai type) +{ + switch (type) { + case PCEP_SR_SUBOBJ_NAI_ABSENT: + return SRTE_SEGMENT_NAI_TYPE_NONE; + case PCEP_SR_SUBOBJ_NAI_IPV4_NODE: + return SRTE_SEGMENT_NAI_TYPE_IPV4_NODE; + case PCEP_SR_SUBOBJ_NAI_IPV6_NODE: + return SRTE_SEGMENT_NAI_TYPE_IPV6_NODE; + case PCEP_SR_SUBOBJ_NAI_IPV4_ADJACENCY: + return SRTE_SEGMENT_NAI_TYPE_IPV4_ADJACENCY; + case PCEP_SR_SUBOBJ_NAI_IPV6_ADJACENCY: + return SRTE_SEGMENT_NAI_TYPE_IPV6_ADJACENCY; + case PCEP_SR_SUBOBJ_NAI_UNNUMBERED_IPV4_ADJACENCY: + return SRTE_SEGMENT_NAI_TYPE_IPV4_UNNUMBERED_ADJACENCY; + default: + return SRTE_SEGMENT_NAI_TYPE_NONE; + } +} diff --git a/pathd/path_pcep_config.h b/pathd/path_pcep_config.h new file mode 100644 index 0000000000..de29ab29c1 --- /dev/null +++ b/pathd/path_pcep_config.h @@ -0,0 +1,44 @@ +/* + * Copyright (C) 2020 NetDEF, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; see the file COPYING; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef _PATH_PCEP_CONFIG_H_ +#define _PATH_PCEP_CONFIG_H_ + +#include <stdbool.h> +#include <debug.h> + +#include "pathd/path_pcep.h" + +#define PATH_NB_NO_CHANGE 0 +#define PATH_NB_OK 1 +#define PATH_NB_ERR -1 + +typedef int (*path_list_cb_t)(struct path *path, void *arg); + +/* Lookup the candidate path and fill up the missing path attributes like name + and type. Used for path generated from PCEP message received from the PCE + so they contains more information about the candidate path. If no matching + policy or candidate path is found, nothing is changed */ +void path_pcep_config_lookup(struct path *path); +struct path *path_pcep_config_get_path(struct lsp_nb_key *key); +void path_pcep_config_list_path(path_list_cb_t cb, void *arg); +int path_pcep_config_update_path(struct path *path); +struct path *candidate_to_path(struct srte_candidate *candidate); + + +#endif // _PATH_PCEP_CONFIG_H_ diff --git a/pathd/path_pcep_controller.c b/pathd/path_pcep_controller.c new file mode 100644 index 0000000000..f4871a4d8d --- /dev/null +++ b/pathd/path_pcep_controller.c @@ -0,0 +1,1078 @@ +/* + * Copyright (C) 2020 NetDEF, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; see the file COPYING; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include <zebra.h> + +#include "log.h" +#include "command.h" +#include "libfrr.h" +#include "printfrr.h" +#include "version.h" +#include "northbound.h" +#include "frr_pthread.h" +#include "jhash.h" +#include "network.h" + +#include "pathd/pathd.h" +#include "pathd/path_errors.h" +#include "pathd/path_pcep.h" +#include "pathd/path_pcep_controller.h" +#include "pathd/path_pcep_pcc.h" +#include "pathd/path_pcep_config.h" +#include "pathd/path_pcep_debug.h" + +#define MAX_RECONNECT_DELAY 120 + +#define min(a, b) \ + ({ \ + __typeof__(a) _a = (a); \ + __typeof__(b) _b = (b); \ + _a <= _b ? _a : _b; \ + }) + + +/* Event handling data structures */ +enum pcep_ctrl_event_type { + EV_UPDATE_PCC_OPTS = 1, + EV_UPDATE_PCE_OPTS, + EV_REMOVE_PCC, + EV_PATHD_EVENT, + EV_SYNC_PATH, + EV_SYNC_DONE, + EV_PCEPLIB_EVENT, + EV_RESET_PCC_SESSION +}; + +struct pcep_ctrl_event_data { + struct ctrl_state *ctrl_state; + enum pcep_ctrl_event_type type; + uint32_t sub_type; + int pcc_id; + void *payload; +}; + +struct pcep_main_event_data { + pcep_main_event_handler_t handler; + int pcc_id; + enum pcep_main_event_type type; + void *payload; +}; + +/* Synchronous call arguments */ + +struct get_counters_args { + struct ctrl_state *ctrl_state; + int pcc_id; + struct counters_group *counters; +}; + +struct send_report_args { + struct ctrl_state *ctrl_state; + int pcc_id; + struct path *path; +}; + +struct get_pcep_session_args { + struct ctrl_state *ctrl_state; + int pcc_id; + pcep_session *pcep_session; +}; + +/* Internal Functions Called From Main Thread */ +static int pcep_ctrl_halt_cb(struct frr_pthread *fpt, void **res); + +/* Internal Functions Called From Controller Thread */ +static int pcep_thread_finish_event_handler(struct thread *thread); +static int pcep_thread_get_counters_callback(struct thread *t); +static int pcep_thread_send_report_callback(struct thread *t); +static int pcep_thread_get_pcep_session_callback(struct thread *t); +static int pcep_thread_get_pcc_info_callback(struct thread *t); + +/* Controller Thread Timer Handler */ +static int schedule_thread_timer(struct ctrl_state *ctrl_state, int pcc_id, + enum pcep_ctrl_timer_type timer_type, + enum pcep_ctrl_timeout_type timeout_type, + uint32_t delay, void *payload, + struct thread **thread); +static int schedule_thread_timer_with_cb( + struct ctrl_state *ctrl_state, int pcc_id, + enum pcep_ctrl_timer_type timer_type, + enum pcep_ctrl_timeout_type timeout_type, uint32_t delay, void *payload, + struct thread **thread, pcep_ctrl_thread_callback timer_cb); +static int pcep_thread_timer_handler(struct thread *thread); + +/* Controller Thread Socket read/write Handler */ +static int schedule_thread_socket(struct ctrl_state *ctrl_state, int pcc_id, + enum pcep_ctrl_socket_type type, bool is_read, + void *payload, int fd, struct thread **thread, + pcep_ctrl_thread_callback cb); + +/* Controller Thread Event Handler */ +static int send_to_thread(struct ctrl_state *ctrl_state, int pcc_id, + enum pcep_ctrl_event_type type, uint32_t sub_type, + void *payload); +static int send_to_thread_with_cb(struct ctrl_state *ctrl_state, int pcc_id, + enum pcep_ctrl_event_type type, + uint32_t sub_type, void *payload, + pcep_ctrl_thread_callback event_cb); +static int pcep_thread_event_handler(struct thread *thread); +static int pcep_thread_event_update_pcc_options(struct ctrl_state *ctrl_state, + struct pcc_opts *opts); +static int pcep_thread_event_update_pce_options(struct ctrl_state *ctrl_state, + int pcc_id, + struct pce_opts *opts); +static int pcep_thread_event_remove_pcc_by_id(struct ctrl_state *ctrl_state, + int pcc_id); +static int pcep_thread_event_remove_pcc_all(struct ctrl_state *ctrl_state); +static int pcep_thread_event_remove_pcc(struct ctrl_state *ctrl_state, + struct pce_opts *pce_opts); +static int pcep_thread_event_sync_path(struct ctrl_state *ctrl_state, + int pcc_id, struct path *path); +static int pcep_thread_event_sync_done(struct ctrl_state *ctrl_state, + int pcc_id); +static int pcep_thread_event_pathd_event(struct ctrl_state *ctrl_state, + enum pcep_pathd_event_type type, + struct path *path); + +/* Main Thread Event Handler */ +static int send_to_main(struct ctrl_state *ctrl_state, int pcc_id, + enum pcep_main_event_type type, void *payload); +static int pcep_main_event_handler(struct thread *thread); + +/* Helper functions */ +static void set_ctrl_state(struct frr_pthread *fpt, + struct ctrl_state *ctrl_state); +static struct ctrl_state *get_ctrl_state(struct frr_pthread *fpt); +int get_next_id(struct ctrl_state *ctrl_state); +int set_pcc_state(struct ctrl_state *ctrl_state, struct pcc_state *pcc_state); +void remove_pcc_state(struct ctrl_state *ctrl_state, + struct pcc_state *pcc_state); +static uint32_t backoff_delay(uint32_t max, uint32_t base, uint32_t attempt); +static const char *timer_type_name(enum pcep_ctrl_timer_type type); +static const char *timeout_type_name(enum pcep_ctrl_timeout_type type); + + +/* ------------ API Functions Called from Main Thread ------------ */ + +int pcep_ctrl_initialize(struct thread_master *main_thread, + struct frr_pthread **fpt, + pcep_main_event_handler_t event_handler) +{ + assert(fpt != NULL); + + int ret = 0; + struct ctrl_state *ctrl_state; + struct frr_pthread_attr attr = { + .start = frr_pthread_attr_default.start, + .stop = pcep_ctrl_halt_cb, + }; + + PCEP_DEBUG("Initializing pcep module controller"); + + /* Create and start the FRR pthread */ + *fpt = frr_pthread_new(&attr, "PCEP thread", "pcep_controller"); + if (*fpt == NULL) { + flog_err(EC_PATH_SYSTEM_CALL, + "failed to initialize PCEP thread"); + return 1; + } + ret = frr_pthread_run(*fpt, NULL); + if (ret < 0) { + flog_err(EC_PATH_SYSTEM_CALL, "failed to create PCEP thread"); + return ret; + } + frr_pthread_wait_running(*fpt); + + /* Initialize the thread state */ + ctrl_state = XCALLOC(MTYPE_PCEP, sizeof(*ctrl_state)); + ctrl_state->main = main_thread; + ctrl_state->self = (*fpt)->master; + ctrl_state->main_event_handler = event_handler; + ctrl_state->pcc_count = 0; + ctrl_state->pcc_last_id = 0; + ctrl_state->pcc_opts = + XCALLOC(MTYPE_PCEP, sizeof(*ctrl_state->pcc_opts)); + /* Default to no PCC address defined */ + ctrl_state->pcc_opts->addr.ipa_type = IPADDR_NONE; + ctrl_state->pcc_opts->port = PCEP_DEFAULT_PORT; + + /* Keep the state reference for events */ + set_ctrl_state(*fpt, ctrl_state); + + return ret; +} + +int pcep_ctrl_finalize(struct frr_pthread **fpt) +{ + assert(fpt != NULL); + + int ret = 0; + + PCEP_DEBUG("Finalizing pcep module controller"); + + if (*fpt != NULL) { + frr_pthread_stop(*fpt, NULL); + *fpt = NULL; + } + + return ret; +} + +int pcep_ctrl_update_pcc_options(struct frr_pthread *fpt, struct pcc_opts *opts) +{ + struct ctrl_state *ctrl_state = get_ctrl_state(fpt); + return send_to_thread(ctrl_state, 0, EV_UPDATE_PCC_OPTS, 0, opts); +} + +int pcep_ctrl_update_pce_options(struct frr_pthread *fpt, struct pce_opts *opts) +{ + struct ctrl_state *ctrl_state = get_ctrl_state(fpt); + return send_to_thread(ctrl_state, 0, EV_UPDATE_PCE_OPTS, 0, opts); +} + +int pcep_ctrl_remove_pcc(struct frr_pthread *fpt, struct pce_opts *pce_opts) +{ + struct ctrl_state *ctrl_state = get_ctrl_state(fpt); + return send_to_thread(ctrl_state, 0, EV_REMOVE_PCC, 0, pce_opts); +} + +int pcep_ctrl_reset_pcc_session(struct frr_pthread *fpt, char *pce_name) +{ + struct ctrl_state *ctrl_state = get_ctrl_state(fpt); + return send_to_thread(ctrl_state, 0, EV_RESET_PCC_SESSION, 0, pce_name); +} + +int pcep_ctrl_pathd_event(struct frr_pthread *fpt, + enum pcep_pathd_event_type type, struct path *path) +{ + struct ctrl_state *ctrl_state = get_ctrl_state(fpt); + return send_to_thread(ctrl_state, 0, EV_PATHD_EVENT, type, path); +} + +int pcep_ctrl_sync_path(struct frr_pthread *fpt, int pcc_id, struct path *path) +{ + struct ctrl_state *ctrl_state = get_ctrl_state(fpt); + return send_to_thread(ctrl_state, pcc_id, EV_SYNC_PATH, 0, path); +} + +int pcep_ctrl_sync_done(struct frr_pthread *fpt, int pcc_id) +{ + struct ctrl_state *ctrl_state = get_ctrl_state(fpt); + return send_to_thread(ctrl_state, pcc_id, EV_SYNC_DONE, 0, NULL); +} + +struct counters_group *pcep_ctrl_get_counters(struct frr_pthread *fpt, + int pcc_id) +{ + struct ctrl_state *ctrl_state = get_ctrl_state(fpt); + struct get_counters_args args = { + .ctrl_state = ctrl_state, .pcc_id = pcc_id, .counters = NULL}; + thread_execute(ctrl_state->self, pcep_thread_get_counters_callback, + &args, 0); + return args.counters; +} + +pcep_session *pcep_ctrl_get_pcep_session(struct frr_pthread *fpt, int pcc_id) +{ + struct ctrl_state *ctrl_state = get_ctrl_state(fpt); + struct get_pcep_session_args args = {.ctrl_state = ctrl_state, + .pcc_id = pcc_id, + .pcep_session = NULL}; + thread_execute(ctrl_state->self, pcep_thread_get_pcep_session_callback, + &args, 0); + return args.pcep_session; +} + +struct pcep_pcc_info *pcep_ctrl_get_pcc_info(struct frr_pthread *fpt, + const char *pce_name) +{ + struct ctrl_state *ctrl_state = get_ctrl_state(fpt); + struct pcep_pcc_info *args = XCALLOC(MTYPE_PCEP, sizeof(*args)); + args->ctrl_state = ctrl_state; + strncpy(args->pce_name, pce_name, sizeof(args->pce_name)); + thread_execute(ctrl_state->self, pcep_thread_get_pcc_info_callback, + args, 0); + + return args; +} + +void pcep_ctrl_send_report(struct frr_pthread *fpt, int pcc_id, + struct path *path) +{ + /* Sends a report stynchronously */ + struct ctrl_state *ctrl_state = get_ctrl_state(fpt); + struct send_report_args args = { + .ctrl_state = ctrl_state, .pcc_id = pcc_id, .path = path}; + thread_execute(ctrl_state->self, pcep_thread_send_report_callback, + &args, 0); +} + +/* ------------ Internal Functions Called from Main Thread ------------ */ + +int pcep_ctrl_halt_cb(struct frr_pthread *fpt, void **res) +{ + thread_add_event(fpt->master, pcep_thread_finish_event_handler, + (void *)fpt, 0, NULL); + pthread_join(fpt->thread, res); + + return 0; +} + + +/* ------------ API Functions Called From Controller Thread ------------ */ + +void pcep_thread_start_sync(struct ctrl_state *ctrl_state, int pcc_id) +{ + send_to_main(ctrl_state, pcc_id, PCEP_MAIN_EVENT_START_SYNC, NULL); +} + +void pcep_thread_update_path(struct ctrl_state *ctrl_state, int pcc_id, + struct path *path) +{ + send_to_main(ctrl_state, pcc_id, PCEP_MAIN_EVENT_UPDATE_CANDIDATE, + path); +} + +void pcep_thread_remove_candidate_path_segments(struct ctrl_state *ctrl_state, + struct pcc_state *pcc_state) +{ + if (!pcc_state) + return; + /* Will be deleted when the event is handled */ + char *originator = XSTRDUP(MTYPE_PCEP, pcc_state->originator); + PCEP_DEBUG("schedule candidate path segments removal for originator %s", + originator); + send_to_main(ctrl_state, pcep_pcc_get_pcc_id(pcc_state), + PCEP_MAIN_EVENT_REMOVE_CANDIDATE_LSP, originator); +} + +void pcep_thread_schedule_sync_best_pce(struct ctrl_state *ctrl_state, + int pcc_id, int delay, + struct thread **thread) +{ + + schedule_thread_timer(ctrl_state, pcc_id, TM_CALCULATE_BEST_PCE, + TO_UNDEFINED, delay, NULL, thread); +} + +void pcep_thread_cancel_timer(struct thread **thread) +{ + if (thread == NULL || *thread == NULL) { + return; + } + + struct pcep_ctrl_timer_data *data = THREAD_ARG(*thread); + PCEP_DEBUG("Timer %s / %s canceled", timer_type_name(data->timer_type), + timeout_type_name(data->timeout_type)); + if (data != NULL) { + XFREE(MTYPE_PCEP, data); + } + + if ((*thread)->master->owner == pthread_self()) { + thread_cancel(thread); + } else { + thread_cancel_async((*thread)->master, thread, NULL); + } +} + +void pcep_thread_schedule_reconnect(struct ctrl_state *ctrl_state, int pcc_id, + int retry_count, struct thread **thread) +{ + uint32_t delay = backoff_delay(MAX_RECONNECT_DELAY, 1, retry_count); + PCEP_DEBUG("Schedule RECONNECT_PCC for %us (retry %u)", delay, + retry_count); + schedule_thread_timer(ctrl_state, pcc_id, TM_RECONNECT_PCC, + TO_UNDEFINED, delay, NULL, thread); +} + +void pcep_thread_schedule_timeout(struct ctrl_state *ctrl_state, int pcc_id, + enum pcep_ctrl_timeout_type timeout_type, + uint32_t delay, void *param, + struct thread **thread) +{ + assert(timeout_type > TO_UNDEFINED); + assert(timeout_type < TO_MAX); + PCEP_DEBUG("Schedule timeout %s for %us", + timeout_type_name(timeout_type), delay); + schedule_thread_timer(ctrl_state, pcc_id, TM_TIMEOUT, timeout_type, + delay, param, thread); +} + +void pcep_thread_schedule_pceplib_timer(struct ctrl_state *ctrl_state, + int delay, void *payload, + struct thread **thread, + pcep_ctrl_thread_callback timer_cb) +{ + PCEP_DEBUG("Schedule PCEPLIB_TIMER for %us", delay); + schedule_thread_timer_with_cb(ctrl_state, 0, TM_PCEPLIB_TIMER, + TO_UNDEFINED, delay, payload, thread, + timer_cb); +} + +void pcep_thread_schedule_session_timeout(struct ctrl_state *ctrl_state, + int pcc_id, int delay, + struct thread **thread) +{ + PCEP_DEBUG("Schedule session_timeout interval for %us", delay); + schedule_thread_timer(ctrl_state, pcc_id, TM_SESSION_TIMEOUT_PCC, + TO_UNDEFINED, delay, NULL, thread); +} + +int pcep_thread_pcc_count(struct ctrl_state *ctrl_state) +{ + if (ctrl_state == NULL) { + return 0; + } + + return ctrl_state->pcc_count; +} + +/* ------------ Internal Functions Called From Controller Thread ------------ */ + +int pcep_thread_finish_event_handler(struct thread *thread) +{ + int i; + struct frr_pthread *fpt = THREAD_ARG(thread); + struct ctrl_state *ctrl_state = fpt->data; + + assert(ctrl_state != NULL); + + for (i = 0; i < MAX_PCC; i++) { + if (ctrl_state->pcc[i]) { + pcep_pcc_finalize(ctrl_state, ctrl_state->pcc[i]); + ctrl_state->pcc[i] = NULL; + } + } + + XFREE(MTYPE_PCEP, ctrl_state->pcc_opts); + XFREE(MTYPE_PCEP, ctrl_state); + fpt->data = NULL; + + atomic_store_explicit(&fpt->running, false, memory_order_relaxed); + return 0; +} + +int pcep_thread_get_counters_callback(struct thread *t) +{ + struct get_counters_args *args = THREAD_ARG(t); + assert(args != NULL); + struct ctrl_state *ctrl_state = args->ctrl_state; + assert(ctrl_state != NULL); + struct pcc_state *pcc_state; + + pcc_state = pcep_pcc_get_pcc_by_id(ctrl_state->pcc, args->pcc_id); + if (pcc_state) { + args->counters = pcep_lib_copy_counters(pcc_state->sess); + } else { + args->counters = NULL; + } + + return 0; +} + +int pcep_thread_send_report_callback(struct thread *t) +{ + struct send_report_args *args = THREAD_ARG(t); + assert(args != NULL); + struct ctrl_state *ctrl_state = args->ctrl_state; + assert(ctrl_state != NULL); + struct pcc_state *pcc_state; + + if (args->pcc_id == 0) { + for (int i = 0; i < MAX_PCC; i++) { + if (ctrl_state->pcc[i]) { + pcep_pcc_send_report(ctrl_state, + ctrl_state->pcc[i], + args->path); + } + } + } else { + pcc_state = + pcep_pcc_get_pcc_by_id(ctrl_state->pcc, args->pcc_id); + pcep_pcc_send_report(ctrl_state, pcc_state, args->path); + } + + return 0; +} + +int pcep_thread_get_pcep_session_callback(struct thread *t) +{ + struct get_pcep_session_args *args = THREAD_ARG(t); + assert(args != NULL); + struct ctrl_state *ctrl_state = args->ctrl_state; + assert(ctrl_state != NULL); + struct pcc_state *pcc_state; + + pcc_state = pcep_pcc_get_pcc_by_id(ctrl_state->pcc, args->pcc_id); + if (pcc_state) { + args->pcep_session = + pcep_lib_copy_pcep_session(pcc_state->sess); + } + + return 0; +} + +int pcep_thread_get_pcc_info_callback(struct thread *t) +{ + struct pcep_pcc_info *args = THREAD_ARG(t); + assert(args != NULL); + struct ctrl_state *ctrl_state = args->ctrl_state; + assert(ctrl_state != NULL); + + pcep_pcc_copy_pcc_info(ctrl_state->pcc, args); + + return 0; +} + +/* ------------ Controller Thread Timer Handler ------------ */ + +int schedule_thread_timer_with_cb(struct ctrl_state *ctrl_state, int pcc_id, + enum pcep_ctrl_timer_type timer_type, + enum pcep_ctrl_timeout_type timeout_type, + uint32_t delay, void *payload, + struct thread **thread, + pcep_ctrl_thread_callback timer_cb) +{ + assert(thread != NULL); + + struct pcep_ctrl_timer_data *data; + + data = XCALLOC(MTYPE_PCEP, sizeof(*data)); + data->ctrl_state = ctrl_state; + data->timer_type = timer_type; + data->timeout_type = timeout_type; + data->pcc_id = pcc_id; + data->payload = payload; + + thread_add_timer(ctrl_state->self, timer_cb, (void *)data, delay, + thread); + + return 0; +} + +int schedule_thread_timer(struct ctrl_state *ctrl_state, int pcc_id, + enum pcep_ctrl_timer_type timer_type, + enum pcep_ctrl_timeout_type timeout_type, + uint32_t delay, void *payload, struct thread **thread) +{ + return schedule_thread_timer_with_cb(ctrl_state, pcc_id, timer_type, + timeout_type, delay, payload, + thread, pcep_thread_timer_handler); +} + +int pcep_thread_timer_handler(struct thread *thread) +{ + /* data unpacking */ + struct pcep_ctrl_timer_data *data = THREAD_ARG(thread); + assert(data != NULL); + struct ctrl_state *ctrl_state = data->ctrl_state; + assert(ctrl_state != NULL); + enum pcep_ctrl_timer_type timer_type = data->timer_type; + enum pcep_ctrl_timeout_type timeout_type = data->timeout_type; + int pcc_id = data->pcc_id; + void *param = data->payload; + XFREE(MTYPE_PCEP, data); + + int ret = 0; + struct pcc_state *pcc_state = NULL; + + switch (timer_type) { + case TM_RECONNECT_PCC: + pcc_state = pcep_pcc_get_pcc_by_id(ctrl_state->pcc, pcc_id); + if (!pcc_state) + return ret; + pcep_pcc_reconnect(ctrl_state, pcc_state); + break; + case TM_TIMEOUT: + pcc_state = pcep_pcc_get_pcc_by_id(ctrl_state->pcc, pcc_id); + if (!pcc_state) + return ret; + pcep_pcc_timeout_handler(ctrl_state, pcc_state, timeout_type, + param); + break; + case TM_CALCULATE_BEST_PCE: + /* Previous best disconnect so new best should be synced */ + ret = pcep_pcc_timer_update_best_pce(ctrl_state, pcc_id); + break; + case TM_SESSION_TIMEOUT_PCC: + pcc_state = pcep_pcc_get_pcc_by_id(ctrl_state->pcc, pcc_id); + pcep_thread_remove_candidate_path_segments(ctrl_state, + pcc_state); + break; + default: + flog_warn(EC_PATH_PCEP_RECOVERABLE_INTERNAL_ERROR, + "Unknown controller timer triggered: %u", timer_type); + break; + } + + return ret; +} + +int pcep_thread_pcep_event(struct thread *thread) +{ + struct pcep_ctrl_event_data *data = THREAD_ARG(thread); + assert(data != NULL); + struct ctrl_state *ctrl_state = data->ctrl_state; + pcep_event *event = data->payload; + XFREE(MTYPE_PCEP, data); + int i; + + for (i = 0; i < MAX_PCC; i++) { + if (ctrl_state->pcc[i]) { + struct pcc_state *pcc_state = ctrl_state->pcc[i]; + if (pcc_state->sess != event->session) + continue; + pcep_pcc_pcep_event_handler(ctrl_state, pcc_state, + event); + break; + } + } + destroy_pcep_event(event); + + return 0; +} + +/* ------------ Controller Thread Socket Functions ------------ */ + +int schedule_thread_socket(struct ctrl_state *ctrl_state, int pcc_id, + enum pcep_ctrl_socket_type type, bool is_read, + void *payload, int fd, struct thread **thread, + pcep_ctrl_thread_callback socket_cb) +{ + assert(thread != NULL); + + struct pcep_ctrl_socket_data *data; + + data = XCALLOC(MTYPE_PCEP, sizeof(*data)); + data->ctrl_state = ctrl_state; + data->type = type; + data->is_read = is_read; + data->fd = fd; + data->pcc_id = pcc_id; + data->payload = payload; + + if (is_read) { + thread_add_read(ctrl_state->self, socket_cb, (void *)data, fd, + thread); + } else { + thread_add_write(ctrl_state->self, socket_cb, (void *)data, fd, + thread); + } + + return 0; +} + +int pcep_thread_socket_write(void *fpt, void **thread, int fd, void *payload, + pcep_ctrl_thread_callback socket_cb) +{ + struct ctrl_state *ctrl_state = ((struct frr_pthread *)fpt)->data; + + return schedule_thread_socket(ctrl_state, 0, SOCK_PCEPLIB, false, + payload, fd, (struct thread **)thread, + socket_cb); +} + +int pcep_thread_socket_read(void *fpt, void **thread, int fd, void *payload, + pcep_ctrl_thread_callback socket_cb) +{ + struct ctrl_state *ctrl_state = ((struct frr_pthread *)fpt)->data; + + return schedule_thread_socket(ctrl_state, 0, SOCK_PCEPLIB, true, + payload, fd, (struct thread **)thread, + socket_cb); +} + +int pcep_thread_send_ctrl_event(void *fpt, void *payload, + pcep_ctrl_thread_callback cb) +{ + struct ctrl_state *ctrl_state = ((struct frr_pthread *)fpt)->data; + + return send_to_thread_with_cb(ctrl_state, 0, EV_PCEPLIB_EVENT, 0, + payload, cb); +} + +/* ------------ Controller Thread Event Handler ------------ */ + +int send_to_thread(struct ctrl_state *ctrl_state, int pcc_id, + enum pcep_ctrl_event_type type, uint32_t sub_type, + void *payload) +{ + return send_to_thread_with_cb(ctrl_state, pcc_id, type, sub_type, + payload, pcep_thread_event_handler); +} + +int send_to_thread_with_cb(struct ctrl_state *ctrl_state, int pcc_id, + enum pcep_ctrl_event_type type, uint32_t sub_type, + void *payload, pcep_ctrl_thread_callback event_cb) +{ + struct pcep_ctrl_event_data *data; + + data = XCALLOC(MTYPE_PCEP, sizeof(*data)); + data->ctrl_state = ctrl_state; + data->type = type; + data->sub_type = sub_type; + data->pcc_id = pcc_id; + data->payload = payload; + + thread_add_event(ctrl_state->self, event_cb, (void *)data, 0, NULL); + + return 0; +} + +int pcep_thread_event_handler(struct thread *thread) +{ + /* data unpacking */ + struct pcep_ctrl_event_data *data = THREAD_ARG(thread); + assert(data != NULL); + struct ctrl_state *ctrl_state = data->ctrl_state; + assert(ctrl_state != NULL); + enum pcep_ctrl_event_type type = data->type; + uint32_t sub_type = data->sub_type; + int pcc_id = data->pcc_id; + void *payload = data->payload; + XFREE(MTYPE_PCEP, data); + + int ret = 0; + + /* Possible sub-type values */ + enum pcep_pathd_event_type path_event_type = PCEP_PATH_UNDEFINED; + + /* Possible payload values */ + struct path *path = NULL; + struct pcc_opts *pcc_opts = NULL; + struct pce_opts *pce_opts = NULL; + struct pcc_state *pcc_state = NULL; + + switch (type) { + case EV_UPDATE_PCC_OPTS: + assert(payload != NULL); + pcc_opts = (struct pcc_opts *)payload; + ret = pcep_thread_event_update_pcc_options(ctrl_state, + pcc_opts); + break; + case EV_UPDATE_PCE_OPTS: + assert(payload != NULL); + pce_opts = (struct pce_opts *)payload; + ret = pcep_thread_event_update_pce_options(ctrl_state, pcc_id, + pce_opts); + break; + case EV_REMOVE_PCC: + pce_opts = (struct pce_opts *)payload; + ret = pcep_thread_event_remove_pcc(ctrl_state, pce_opts); + if (ret == 0) { + ret = pcep_pcc_multi_pce_remove_pcc(ctrl_state, + ctrl_state->pcc); + } + break; + case EV_PATHD_EVENT: + assert(payload != NULL); + path_event_type = (enum pcep_pathd_event_type)sub_type; + path = (struct path *)payload; + ret = pcep_thread_event_pathd_event(ctrl_state, path_event_type, + path); + break; + case EV_SYNC_PATH: + assert(payload != NULL); + path = (struct path *)payload; + pcep_pcc_multi_pce_sync_path(ctrl_state, pcc_id, + ctrl_state->pcc); + pcep_thread_event_sync_path(ctrl_state, pcc_id, path); + break; + case EV_SYNC_DONE: + ret = pcep_thread_event_sync_done(ctrl_state, pcc_id); + break; + case EV_RESET_PCC_SESSION: + pcc_state = pcep_pcc_get_pcc_by_name(ctrl_state->pcc, + (const char *)payload); + if (pcc_state) { + pcep_pcc_disable(ctrl_state, pcc_state); + ret = pcep_pcc_enable(ctrl_state, pcc_state); + } else { + flog_warn(EC_PATH_PCEP_RECOVERABLE_INTERNAL_ERROR, + "Cannot reset state for PCE: %s", + (const char *)payload); + } + break; + default: + flog_warn(EC_PATH_PCEP_RECOVERABLE_INTERNAL_ERROR, + "Unexpected event received in controller thread: %u", + type); + break; + } + + return ret; +} + +int pcep_thread_event_update_pcc_options(struct ctrl_state *ctrl_state, + struct pcc_opts *opts) +{ + assert(opts != NULL); + if (ctrl_state->pcc_opts != NULL) { + XFREE(MTYPE_PCEP, ctrl_state->pcc_opts); + } + ctrl_state->pcc_opts = opts; + return 0; +} + +int pcep_thread_event_update_pce_options(struct ctrl_state *ctrl_state, + int pcc_id, struct pce_opts *pce_opts) +{ + if (!pce_opts || !ctrl_state) { + return 0; + } + struct pcc_state *pcc_state; + struct pcc_opts *pcc_opts; + + int current_pcc_id = + pcep_pcc_get_pcc_id_by_ip_port(ctrl_state->pcc, pce_opts); + if (current_pcc_id) { + pcc_state = + pcep_pcc_get_pcc_by_id(ctrl_state->pcc, current_pcc_id); + } else { + pcc_state = pcep_pcc_initialize(ctrl_state, + get_next_id(ctrl_state)); + if (set_pcc_state(ctrl_state, pcc_state)) { + XFREE(MTYPE_PCEP, pcc_state); + return 0; + } + } + + /* Copy the pcc options to delegate it to the update function */ + pcc_opts = XCALLOC(MTYPE_PCEP, sizeof(*pcc_opts)); + memcpy(pcc_opts, ctrl_state->pcc_opts, sizeof(*pcc_opts)); + + if (pcep_pcc_update(ctrl_state, pcc_state, pcc_opts, pce_opts)) { + flog_err(EC_PATH_PCEP_PCC_CONF_UPDATE, + "failed to update PCC configuration"); + } + + + return 0; +} + +int pcep_thread_event_remove_pcc_by_id(struct ctrl_state *ctrl_state, + int pcc_id) +{ + if (pcc_id) { + struct pcc_state *pcc_state = + pcep_pcc_get_pcc_by_id(ctrl_state->pcc, pcc_id); + if (pcc_state) { + remove_pcc_state(ctrl_state, pcc_state); + pcep_pcc_finalize(ctrl_state, pcc_state); + } + } + return 0; +} + +int pcep_thread_event_remove_pcc_all(struct ctrl_state *ctrl_state) +{ + assert(ctrl_state != NULL); + + for (int i = 0; i < MAX_PCC; i++) { + pcep_thread_event_remove_pcc_by_id( + ctrl_state, + pcep_pcc_get_pcc_id_by_idx(ctrl_state->pcc, i)); + } + return 0; +} + +int pcep_thread_event_remove_pcc(struct ctrl_state *ctrl_state, + struct pce_opts *pce_opts) +{ + assert(ctrl_state != NULL); + + if (pce_opts) { + int pcc_id = pcep_pcc_get_pcc_id_by_ip_port(ctrl_state->pcc, + pce_opts); + if (pcc_id) { + pcep_thread_event_remove_pcc_by_id(ctrl_state, pcc_id); + } else { + return -1; + } + XFREE(MTYPE_PCEP, pce_opts); + } else { + pcep_thread_event_remove_pcc_all(ctrl_state); + } + + return 0; +} + +int pcep_thread_event_sync_path(struct ctrl_state *ctrl_state, int pcc_id, + struct path *path) +{ + struct pcc_state *pcc_state = + pcep_pcc_get_pcc_by_id(ctrl_state->pcc, pcc_id); + pcep_pcc_sync_path(ctrl_state, pcc_state, path); + pcep_free_path(path); + return 0; +} + +int pcep_thread_event_sync_done(struct ctrl_state *ctrl_state, int pcc_id) +{ + struct pcc_state *pcc_state = + pcep_pcc_get_pcc_by_id(ctrl_state->pcc, pcc_id); + pcep_pcc_sync_done(ctrl_state, pcc_state); + return 0; +} + +int pcep_thread_event_pathd_event(struct ctrl_state *ctrl_state, + enum pcep_pathd_event_type type, + struct path *path) +{ + int i; + + for (i = 0; i < MAX_PCC; i++) { + if (ctrl_state->pcc[i]) { + struct pcc_state *pcc_state = ctrl_state->pcc[i]; + pcep_pcc_pathd_event_handler(ctrl_state, pcc_state, + type, path); + } + } + + pcep_free_path(path); + + return 0; +} + + +/* ------------ Main Thread Event Handler ------------ */ + +int send_to_main(struct ctrl_state *ctrl_state, int pcc_id, + enum pcep_main_event_type type, void *payload) +{ + struct pcep_main_event_data *data; + + data = XCALLOC(MTYPE_PCEP, sizeof(*data)); + data->handler = ctrl_state->main_event_handler; + data->type = type; + data->pcc_id = pcc_id; + data->payload = payload; + + thread_add_event(ctrl_state->main, pcep_main_event_handler, + (void *)data, 0, NULL); + return 0; +} + +int pcep_main_event_handler(struct thread *thread) +{ + /* data unpacking */ + struct pcep_main_event_data *data = THREAD_ARG(thread); + assert(data != NULL); + pcep_main_event_handler_t handler = data->handler; + enum pcep_main_event_type type = data->type; + int pcc_id = data->pcc_id; + void *payload = data->payload; + XFREE(MTYPE_PCEP, data); + + return handler(type, pcc_id, payload); +} + + +/* ------------ Helper functions ------------ */ +void set_ctrl_state(struct frr_pthread *fpt, struct ctrl_state *ctrl_state) +{ + assert(fpt != NULL); + fpt->data = ctrl_state; +} + +struct ctrl_state *get_ctrl_state(struct frr_pthread *fpt) +{ + assert(fpt != NULL); + assert(fpt->data != NULL); + + struct ctrl_state *ctrl_state; + ctrl_state = (struct ctrl_state *)fpt->data; + assert(ctrl_state != NULL); + return ctrl_state; +} + +int get_next_id(struct ctrl_state *ctrl_state) +{ + return ++ctrl_state->pcc_last_id; +} + +int set_pcc_state(struct ctrl_state *ctrl_state, struct pcc_state *pcc_state) +{ + assert(ctrl_state != NULL); + assert(pcep_pcc_get_pcc_id(pcc_state) != 0); + + int current_pcc_idx = pcep_pcc_get_free_pcc_idx(ctrl_state->pcc); + if (current_pcc_idx >= 0) { + ctrl_state->pcc[current_pcc_idx] = pcc_state; + ctrl_state->pcc_count++; + PCEP_DEBUG("added pce pcc_id (%d) idx (%d)", + pcep_pcc_get_pcc_id(pcc_state), current_pcc_idx); + return 0; + } else { + PCEP_DEBUG("Max number of pce "); + return 1; + } +} + +void remove_pcc_state(struct ctrl_state *ctrl_state, + struct pcc_state *pcc_state) +{ + assert(ctrl_state != NULL); + assert(pcep_pcc_get_pcc_id(pcc_state) != 0); + + int idx = 0; + idx = pcep_pcc_get_pcc_idx_by_id(ctrl_state->pcc, + pcep_pcc_get_pcc_id(pcc_state)); + if (idx != -1) { + ctrl_state->pcc[idx] = NULL; + ctrl_state->pcc_count--; + PCEP_DEBUG("removed pce pcc_id (%d)", + pcep_pcc_get_pcc_id(pcc_state)); + } +} + +uint32_t backoff_delay(uint32_t max, uint32_t base, uint32_t retry_count) +{ + uint32_t a = min(max, base * (1 << retry_count)); + uint64_t r = frr_weak_random(), m = RAND_MAX; + uint32_t b = (a / 2) + (r * (a / 2)) / m; + return b; +} + +const char *timer_type_name(enum pcep_ctrl_timer_type type) +{ + switch (type) { + case TM_UNDEFINED: + return "UNDEFINED"; + case TM_RECONNECT_PCC: + return "RECONNECT_PCC"; + case TM_PCEPLIB_TIMER: + return "PCEPLIB_TIMER"; + case TM_TIMEOUT: + return "TIMEOUT"; + default: + return "UNKNOWN"; + } +}; + +const char *timeout_type_name(enum pcep_ctrl_timeout_type type) +{ + switch (type) { + case TO_UNDEFINED: + return "UNDEFINED"; + case TO_COMPUTATION_REQUEST: + return "COMPUTATION_REQUEST"; + default: + return "UNKNOWN"; + } +} diff --git a/pathd/path_pcep_controller.h b/pathd/path_pcep_controller.h new file mode 100644 index 0000000000..f6eaa0ca2a --- /dev/null +++ b/pathd/path_pcep_controller.h @@ -0,0 +1,166 @@ +/* + * Copyright (C) 2020 NetDEF, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; see the file COPYING; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef _PATH_PCEP_CONTROLLER_H_ +#define _PATH_PCEP_CONTROLLER_H_ + +#include "pathd/path_pcep.h" + + +enum pcep_main_event_type { + PCEP_MAIN_EVENT_UNDEFINED = 0, + PCEP_MAIN_EVENT_START_SYNC, + PCEP_MAIN_EVENT_UPDATE_CANDIDATE, + PCEP_MAIN_EVENT_REMOVE_CANDIDATE_LSP +}; + +typedef int (*pcep_main_event_handler_t)(enum pcep_main_event_type type, + int pcc_id, void *payload); + +enum pcep_pathd_event_type { + PCEP_PATH_UNDEFINED = 0, + PCEP_PATH_CREATED, + PCEP_PATH_UPDATED, + PCEP_PATH_REMOVED +}; + +struct ctrl_state { + struct thread_master *main; + struct thread_master *self; + pcep_main_event_handler_t main_event_handler; + struct pcc_opts *pcc_opts; + int pcc_count; + int pcc_last_id; + struct pcc_state *pcc[MAX_PCC]; +}; + +/* Timer handling data structures */ + +enum pcep_ctrl_timeout_type { TO_UNDEFINED, TO_COMPUTATION_REQUEST, TO_MAX }; + +enum pcep_ctrl_timer_type { + TM_UNDEFINED, + TM_RECONNECT_PCC, + TM_PCEPLIB_TIMER, + TM_TIMEOUT, + TM_CALCULATE_BEST_PCE, + TM_SESSION_TIMEOUT_PCC, + TM_MAX +}; + +struct pcep_ctrl_timer_data { + struct ctrl_state *ctrl_state; + enum pcep_ctrl_timer_type timer_type; + enum pcep_ctrl_timeout_type timeout_type; + int pcc_id; + void *payload; +}; + +/* Socket handling data structures */ + +enum pcep_ctrl_socket_type { SOCK_PCEPLIB = 1 }; + +struct pcep_ctrl_socket_data { + struct ctrl_state *ctrl_state; + enum pcep_ctrl_socket_type type; + bool is_read; + int fd; + int pcc_id; + void *payload; +}; + +typedef int (*pcep_ctrl_thread_callback)(struct thread *); + +/* PCC connection information, populated in a thread-safe + * manner with pcep_ctrl_get_pcc_info() */ +struct pcep_pcc_info { + struct ctrl_state *ctrl_state; /* will be NULL when returned */ + char pce_name[64]; + int pcc_id; + struct ipaddr pcc_addr; + uint16_t pcc_port; + int status; + short msd; + uint32_t next_reqid; + uint32_t next_plspid; + bool is_best_multi_pce; + bool previous_best; + uint8_t precedence; +}; + +/* Functions called from the main thread */ +int pcep_ctrl_initialize(struct thread_master *main_thread, + struct frr_pthread **fpt, + pcep_main_event_handler_t event_handler); +int pcep_ctrl_finalize(struct frr_pthread **fpt); +int pcep_ctrl_update_pcc_options(struct frr_pthread *fpt, + struct pcc_opts *opts); +int pcep_ctrl_update_pce_options(struct frr_pthread *fpt, + struct pce_opts *opts); +int pcep_ctrl_remove_pcc(struct frr_pthread *fpt, struct pce_opts *pce_opts); +int pcep_ctrl_reset_pcc_session(struct frr_pthread *fpt, char *pce_name); +int pcep_ctrl_pathd_event(struct frr_pthread *fpt, + enum pcep_pathd_event_type type, struct path *path); +int pcep_ctrl_sync_path(struct frr_pthread *fpt, int pcc_id, struct path *path); +int pcep_ctrl_sync_done(struct frr_pthread *fpt, int pcc_id); +struct counters_group *pcep_ctrl_get_counters(struct frr_pthread *fpt, + int pcc_id); +pcep_session *pcep_ctrl_get_pcep_session(struct frr_pthread *fpt, int pcc_id); +struct pcep_pcc_info *pcep_ctrl_get_pcc_info(struct frr_pthread *fpt, + const char *pce_name); + +/* Synchronously send a report, the caller is responsible to free the path, + * If `pcc_id` is `0` the report is sent by all PCCs */ +void pcep_ctrl_send_report(struct frr_pthread *fpt, int pcc_id, + struct path *path); + +/* Functions called from the controller thread */ +void pcep_thread_start_sync(struct ctrl_state *ctrl_state, int pcc_id); +void pcep_thread_update_path(struct ctrl_state *ctrl_state, int pcc_id, + struct path *path); +void pcep_thread_cancel_timer(struct thread **thread); +void pcep_thread_schedule_reconnect(struct ctrl_state *ctrl_state, int pcc_id, + int retry_count, struct thread **thread); +void pcep_thread_schedule_timeout(struct ctrl_state *ctrl_state, int pcc_id, + enum pcep_ctrl_timeout_type type, + uint32_t delay, void *param, + struct thread **thread); +void pcep_thread_schedule_session_timeout(struct ctrl_state *ctrl_state, + int pcc_id, int delay, + struct thread **thread); +void pcep_thread_remove_candidate_path_segments(struct ctrl_state *ctrl_state, + struct pcc_state *pcc_state); + +void pcep_thread_schedule_sync_best_pce(struct ctrl_state *ctrl_state, + int pcc_id, int delay, + struct thread **thread); +void pcep_thread_schedule_pceplib_timer(struct ctrl_state *ctrl_state, + int delay, void *payload, + struct thread **thread, + pcep_ctrl_thread_callback cb); +int pcep_thread_socket_read(void *fpt, void **thread, int fd, void *payload, + pcep_ctrl_thread_callback cb); +int pcep_thread_socket_write(void *fpt, void **thread, int fd, void *payload, + pcep_ctrl_thread_callback cb); + +int pcep_thread_send_ctrl_event(void *fpt, void *payload, + pcep_ctrl_thread_callback cb); +int pcep_thread_pcep_event(struct thread *thread); +int pcep_thread_pcc_count(struct ctrl_state *ctrl_state); + +#endif // _PATH_PCEP_CONTROLLER_H_ diff --git a/pathd/path_pcep_debug.c b/pathd/path_pcep_debug.c new file mode 100644 index 0000000000..bcaadfe4d8 --- /dev/null +++ b/pathd/path_pcep_debug.c @@ -0,0 +1,1771 @@ +/* + * Copyright (C) 2020 NetDEF, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; see the file COPYING; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include <string.h> +#include <stdbool.h> +#include <time.h> +#include <libyang/libyang.h> + +#include "printfrr.h" +#include "ipaddr.h" + +#include "pathd/path_pcep_debug.h" + +static void _format_pcc_opts(int ps, struct pcc_opts *ops); +static void _format_pce_opts(int ps, struct pce_opts *ops); +static void _format_pcc_caps(int ps, struct pcep_caps *caps); +static void _format_pcc_state(int ps, struct pcc_state *state); +static void _format_ctrl_state(int ps, struct ctrl_state *state); +static void _format_path(int ps, struct path *path); +static void _format_path_hop(int ps, struct path_hop *hop); +static void _format_path_metric(int ps, struct path_metric *metric); +static void _format_pcep_event(int ps, pcep_event *event); +static void _format_pcep_message(int ps, struct pcep_message *msg); +static void _format_pcep_objects(int ps, double_linked_list *objs); +static void _format_pcep_object(int ps, struct pcep_object_header *obj); +static void _format_pcep_object_details(int ps, struct pcep_object_header *obj); +static void _format_pcep_object_error(int ps, struct pcep_object_error *obj); +static void _format_pcep_object_open(int ps, struct pcep_object_open *obj); +static void _format_pcep_object_rp(int ps, struct pcep_object_rp *obj); +static void _format_pcep_object_srp(int ps, struct pcep_object_srp *obj); +static void _format_pcep_object_lsp(int psps, struct pcep_object_lsp *obj); +static void _format_pcep_object_lspa(int psps, struct pcep_object_lspa *obj); +static void +_format_pcep_object_ipv4_endpoint(int ps, + struct pcep_object_endpoints_ipv4 *obj); +static void _format_pcep_object_metric(int ps, struct pcep_object_metric *obj); +static void _format_pcep_object_bandwidth(int ps, + struct pcep_object_bandwidth *obj); +static void _format_pcep_object_nopath(int ps, struct pcep_object_nopath *obj); +static void +_format_pcep_object_objfun(int ps, struct pcep_object_objective_function *obj); +static void _format_pcep_object_ro(int ps, struct pcep_object_ro *obj); +static void _format_pcep_object_ro_details(int ps, + struct pcep_object_ro_subobj *ro); +static void _format_pcep_object_ro_ipv4(int ps, + struct pcep_ro_subobj_ipv4 *obj); +static void _format_pcep_object_ro_sr(int ps, struct pcep_ro_subobj_sr *obj); +static void _format_pcep_object_tlvs(int ps, struct pcep_object_header *obj); +static void _format_pcep_object_tlv(int ps, + struct pcep_object_tlv_header *tlv_header); +static void +_format_pcep_object_tlv_details(int ps, + struct pcep_object_tlv_header *tlv_header); +static void _format_pcep_object_tlv_symbolic_path_name( + int ps, struct pcep_object_tlv_symbolic_path_name *tlv); +static void _format_pcep_object_tlv_stateful_pce_capability( + int ps, struct pcep_object_tlv_stateful_pce_capability *tlv); +static void _format_pcep_object_tlv_sr_pce_capability( + int ps, struct pcep_object_tlv_sr_pce_capability *tlv); +static void _format_pcep_object_tlv_path_setup_type( + int ps, struct pcep_object_tlv_path_setup_type *tlv); + +const char *pcc_status_name(enum pcc_status status) +{ + switch (status) { + case PCEP_PCC_INITIALIZED: + return "INITIALIZED"; + case PCEP_PCC_DISCONNECTED: + return "DISCONNECTED"; + case PCEP_PCC_CONNECTING: + return "CONNECTING"; + case PCEP_PCC_SYNCHRONIZING: + return "SYNCHRONIZING"; + case PCEP_PCC_OPERATING: + return "OPERATING"; + default: + return "UNKNOWN"; + } +} + +const char *pcep_event_type_name(pcep_event_type event_type) +{ + switch (event_type) { + case MESSAGE_RECEIVED: + return "MESSAGE_RECEIVED"; + case PCE_CLOSED_SOCKET: + return "PCE_CLOSED_SOCKET"; + case PCE_SENT_PCEP_CLOSE: + return "PCE_SENT_PCEP_CLOSE"; + case PCE_DEAD_TIMER_EXPIRED: + return "PCE_DEAD_TIMER_EXPIRED"; + case PCE_OPEN_KEEP_WAIT_TIMER_EXPIRED: + return "PCE_OPEN_KEEP_WAIT_TIMER_EXPIRED"; + case PCC_CONNECTED_TO_PCE: + return "PCC_CONNECTED_TO_PCE"; + case PCC_PCEP_SESSION_CLOSED: + return "PCC_PCEP_SESSION_CLOSED"; + case PCC_RCVD_INVALID_OPEN: + return "PCC_RCVD_INVALID_OPEN"; + case PCC_RCVD_MAX_INVALID_MSGS: + return "PCC_RCVD_MAX_INVALID_MSGS"; + case PCC_RCVD_MAX_UNKOWN_MSGS: + return "PCC_RCVD_MAX_UNKOWN_MSGS"; + default: + return "UNKNOWN"; + } +} + +const char *pcep_error_type_name(enum pcep_error_type error_type) +{ + switch (error_type) { + + case PCEP_ERRT_SESSION_FAILURE: + return "SESSION_FAILURE"; + case PCEP_ERRT_CAPABILITY_NOT_SUPPORTED: + return "CAPABILITY_NOT_SUPPORTED"; + case PCEP_ERRT_UNKNOW_OBJECT: + return "UNKNOW_OBJECT"; + case PCEP_ERRT_NOT_SUPPORTED_OBJECT: + return "NOT_SUPPORTED_OBJECT"; + case PCEP_ERRT_POLICY_VIOLATION: + return "POLICY_VIOLATION"; + case PCEP_ERRT_MANDATORY_OBJECT_MISSING: + return "MANDATORY_OBJECT_MISSING"; + case PCEP_ERRT_SYNC_PC_REQ_MISSING: + return "SYNC_PC_REQ_MISSING"; + case PCEP_ERRT_UNKNOWN_REQ_REF: + return "UNKNOWN_REQ_REF"; + case PCEP_ERRT_ATTEMPT_TO_ESTABLISH_2ND_PCEP_SESSION: + return "ATTEMPT_TO_ESTABLISH_2ND_PCEP_SESSION"; + case PCEP_ERRT_RECEPTION_OF_INV_OBJECT: + return "RECEPTION_OF_INV_OBJECT"; + case PCEP_ERRT_UNRECOGNIZED_EXRS_SUBOBJ: + return "UNRECOGNIZED_EXRS_SUBOBJ"; + case PCEP_ERRT_DIFFSERV_AWARE_TE_ERROR: + return "DIFFSERV_AWARE_TE_ERROR"; + case PCEP_ERRT_BRPC_PROC_COMPLETION_ERROR: + return "BRPC_PROC_COMPLETION_ERROR"; + case PCEP_ERRT_UNASSIGNED14: + return "UNASSIGNED14"; + case PCEP_ERRT_GLOBAL_CONCURRENT_ERROR: + return "GLOBAL_CONCURRENT_ERROR"; + case PCEP_ERRT_P2PMP_CAP_ERROR: + return "P2PMP_CAP_ERROR"; + case PCEP_ERRT_P2P_ENDPOINTS_ERROR: + return "P2P_ENDPOINTS_ERROR"; + case PCEP_ERRT_P2P_FRAGMENTATION_ERROR: + return "P2P_FRAGMENTATION_ERROR"; + case PCEP_ERRT_INVALID_OPERATION: + return "INVALID_OPERATION"; + case PCEP_ERRT_LSP_STATE_SYNC_ERROR: + return "LSP_STATE_SYNC_ERROR"; + case PCEP_ERRT_INVALID_TE_PATH_SETUP_TYPE: + return "INVALID_TE_PATH_SETUP_TYPE"; + case PCEP_ERRT_UNASSIGNED22: + return "UNASSIGNED22"; + case PCEP_ERRT_BAD_PARAMETER_VALUE: + return "BAD_PARAMETER_VALUE"; + case PCEP_ERRT_LSP_INSTANTIATE_ERROR: + return "LSP_INSTANTIATE_ERROR"; + case PCEP_ERRT_START_TLS_FAILURE: + return "START_TLS_FAILURE"; + case PCEP_ERRT_ASSOCIATION_ERROR: + return "ASSOCIATION_ERROR"; + case PCEP_ERRT_WSON_RWA_ERROR: + return "WSON_RWA_ERROR"; + case PCEP_ERRT_H_PCE_ERROR: + return "H_PCE_ERROR"; + case PCEP_ERRT_PATH_COMP_FAILURE: + return "PATH_COMP_FAILURE"; + case PCEP_ERRT_UNASSIGNED30: + return "UNASSIGNED30"; + default: + return "UNKNOWN"; + } +} + +const char *pcep_error_value_name(enum pcep_error_type error_type, + enum pcep_error_value error_value) +{ + switch (TUP(error_type, error_value)) { + + case TUP(PCEP_ERRT_CAPABILITY_NOT_SUPPORTED, PCEP_ERRV_UNASSIGNED): + case TUP(PCEP_ERRT_SYNC_PC_REQ_MISSING, PCEP_ERRV_UNASSIGNED): + case TUP(PCEP_ERRT_UNKNOWN_REQ_REF, PCEP_ERRV_UNASSIGNED): + case TUP(PCEP_ERRT_ATTEMPT_TO_ESTABLISH_2ND_PCEP_SESSION, + PCEP_ERRV_UNASSIGNED): + case TUP(PCEP_ERRT_UNRECOGNIZED_EXRS_SUBOBJ, PCEP_ERRV_UNASSIGNED): + return "UNASSIGNED"; + + case TUP(PCEP_ERRT_SESSION_FAILURE, PCEP_ERRV_RECVD_INVALID_OPEN_MSG): + return "RECVD_INVALID_OPEN_MSG"; + case TUP(PCEP_ERRT_SESSION_FAILURE, PCEP_ERRV_OPENWAIT_TIMED_OUT): + return "OPENWAIT_TIMED_OUT"; + case TUP(PCEP_ERRT_SESSION_FAILURE, + PCEP_ERRV_UNACCEPTABLE_OPEN_MSG_NO_NEG): + return "UNACCEPTABLE_OPEN_MSG_NO_NEG"; + case TUP(PCEP_ERRT_SESSION_FAILURE, + PCEP_ERRV_UNACCEPTABLE_OPEN_MSG_NEG): + return "UNACCEPTABLE_OPEN_MSG_NEG"; + case TUP(PCEP_ERRT_SESSION_FAILURE, + PCEP_ERRV_RECVD_SECOND_OPEN_MSG_UNACCEPTABLE): + return "RECVD_SECOND_OPEN_MSG_UNACCEPTABLE"; + case TUP(PCEP_ERRT_SESSION_FAILURE, PCEP_ERRV_RECVD_PCERR): + return "RECVD_PCERR"; + case TUP(PCEP_ERRT_SESSION_FAILURE, PCEP_ERRV_KEEPALIVEWAIT_TIMED_OUT): + return "KEEPALIVEWAIT_TIMED_OUT"; + case TUP(PCEP_ERRT_SESSION_FAILURE, + PCEP_ERRV_PCEP_VERSION_NOT_SUPPORTED): + return "PCEP_VERSION_NOT_SUPPORTED"; + + case TUP(PCEP_ERRT_UNKNOW_OBJECT, PCEP_ERRV_UNREC_OBJECT_CLASS): + return "UNREC_OBJECT_CLASS"; + case TUP(PCEP_ERRT_UNKNOW_OBJECT, PCEP_ERRV_UNREC_OBJECT_TYPE): + return "UNREC_OBJECT_TYPE"; + + case TUP(PCEP_ERRT_NOT_SUPPORTED_OBJECT, + PCEP_ERRV_NOT_SUPPORTED_OBJECT_CLASS): + return "NOT_SUPPORTED_OBJECT_CLASS"; + case TUP(PCEP_ERRT_NOT_SUPPORTED_OBJECT, + PCEP_ERRV_NOT_SUPPORTED_OBJECT_TYPE): + return "NOT_SUPPORTED_OBJECT_TYPE"; + case TUP(PCEP_ERRT_NOT_SUPPORTED_OBJECT, PCEP_ERRV_UNSUPPORTED_PARAM): + return "UNSUPPORTED_PARAM"; + case TUP(PCEP_ERRT_NOT_SUPPORTED_OBJECT, + PCEP_ERRV_UNSUPPORTED_NW_PERF_CONSTRAINT): + return "UNSUPPORTED_NW_PERF_CONSTRAINT"; + case TUP(PCEP_ERRT_NOT_SUPPORTED_OBJECT, + PCEP_ERRV_NOT_SUPPORTED_BW_OBJECT_3_4): + return "NOT_SUPPORTED_BW_OBJECT_3_4"; + case TUP(PCEP_ERRT_NOT_SUPPORTED_OBJECT, + PCEP_ERRV_UNSUPPORTED_ENDPOINT_TYPE): + return "UNSUPPORTED_ENDPOINT_TYPE"; + case TUP(PCEP_ERRT_NOT_SUPPORTED_OBJECT, + PCEP_ERRV_UNSUPPORTED_ENDPOINT_TLV): + return "UNSUPPORTED_ENDPOINT_TLV"; + case TUP(PCEP_ERRT_NOT_SUPPORTED_OBJECT, + PCEP_ERRV_UNSUPPORTED_RP_FLAG_GRANULARITY): + return "UNSUPPORTED_RP_FLAG_GRANULARITY"; + + case TUP(PCEP_ERRT_POLICY_VIOLATION, + PCEP_ERRV_C_BIT_SET_IN_METRIC_OBJECT): + return "C_BIT_SET_IN_METRIC_OBJECT"; + case TUP(PCEP_ERRT_POLICY_VIOLATION, + PCEP_ERRV_O_BIT_CLEARD_IN_RP_OBJECT): + return "O_BIT_CLEARD_IN_RP_OBJECT"; + case TUP(PCEP_ERRT_POLICY_VIOLATION, + PCEP_ERRV_OBJECTIVE_FUNC_NOT_ALLOWED): + return "OBJECTIVE_FUNC_NOT_ALLOWED"; + case TUP(PCEP_ERRT_POLICY_VIOLATION, PCEP_ERRV_RP_OF_BIT_SET): + return "RP_OF_BIT_SET"; + case TUP(PCEP_ERRT_POLICY_VIOLATION, + PCEP_ERRV_GLOBAL_CONCURRENCY_NOT_ALLOWED): + return "GLOBAL_CONCURRENCY_NOT_ALLOWED"; + case TUP(PCEP_ERRT_POLICY_VIOLATION, PCEP_ERRV_MONITORING_MSG_REJECTED): + return "MONITORING_MSG_REJECTED"; + case TUP(PCEP_ERRT_POLICY_VIOLATION, + PCEP_ERRV_P2MP_PATH_COMP_NOT_ALLOWED): + return "P2MP_PATH_COMP_NOT_ALLOWED"; + case TUP(PCEP_ERRT_POLICY_VIOLATION, + PCEP_ERRV_UNALLOWED_NW_PERF_CONSTRAINT): + return "UNALLOWED_NW_PERF_CONSTRAINT"; + + case TUP(PCEP_ERRT_MANDATORY_OBJECT_MISSING, + PCEP_ERRV_RP_OBJECT_MISSING): + return "RP_OBJECT_MISSING"; + case TUP(PCEP_ERRT_MANDATORY_OBJECT_MISSING, + PCEP_ERRV_RRO_OBJECT_MISSING_FOR_REOP): + return "RRO_OBJECT_MISSING_FOR_REOP"; + case TUP(PCEP_ERRT_MANDATORY_OBJECT_MISSING, + PCEP_ERRV_EP_OBJECT_MISSING): + return "EP_OBJECT_MISSING"; + case TUP(PCEP_ERRT_MANDATORY_OBJECT_MISSING, + PCEP_ERRV_MONITOR_OBJECT_MISSING): + return "MONITOR_OBJECT_MISSING"; + case TUP(PCEP_ERRT_MANDATORY_OBJECT_MISSING, + PCEP_ERRV_LSP_OBJECT_MISSING): + return "LSP_OBJECT_MISSING"; + case TUP(PCEP_ERRT_MANDATORY_OBJECT_MISSING, + PCEP_ERRV_ERO_OBJECT_MISSING): + return "ERO_OBJECT_MISSING"; + case TUP(PCEP_ERRT_MANDATORY_OBJECT_MISSING, + PCEP_ERRV_SRP_OBJECT_MISSING): + return "SRP_OBJECT_MISSING"; + case TUP(PCEP_ERRT_MANDATORY_OBJECT_MISSING, + PCEP_ERRV_LSP_ID_TLV_MISSING): + return "LSP_ID_TLV_MISSING"; + case TUP(PCEP_ERRT_MANDATORY_OBJECT_MISSING, + PCEP_ERRV_LSP_DB_TLV_MISSING): + return "LSP_DB_TLV_MISSING"; + case TUP(PCEP_ERRT_MANDATORY_OBJECT_MISSING, + PCEP_ERRV_S2LS_OBJECT_MISSING): + return "S2LS_OBJECT_MISSING"; + case TUP(PCEP_ERRT_MANDATORY_OBJECT_MISSING, + PCEP_ERRV_P2MP_LSP_ID_TLV_MISSING): + return "P2MP_LSP_ID_TLV_MISSING"; + case TUP(PCEP_ERRT_MANDATORY_OBJECT_MISSING, + PCEP_ERRV_DISJOINTED_CONF_TLV_MISSING): + return "DISJOINTED_CONF_TLV_MISSING"; + + case TUP(PCEP_ERRT_RECEPTION_OF_INV_OBJECT, + PCEP_ERRV_P_FLAG_NOT_CORRECT_IN_OBJECT): + return "P_FLAG_NOT_CORRECT_IN_OBJECT"; + case TUP(PCEP_ERRT_RECEPTION_OF_INV_OBJECT, PCEP_ERRV_BAD_LABEL_VALUE): + return "BAD_LABEL_VALUE"; + case TUP(PCEP_ERRT_RECEPTION_OF_INV_OBJECT, + PCEP_ERRV_UNSUPPORTED_NUM_SR_ERO_SUBOBJECTS): + return "UNSUPPORTED_NUM_SR_ERO_SUBOBJECTS"; + case TUP(PCEP_ERRT_RECEPTION_OF_INV_OBJECT, PCEP_ERRV_BAD_LABEL_FORMAT): + return "BAD_LABEL_FORMAT"; + case TUP(PCEP_ERRT_RECEPTION_OF_INV_OBJECT, PCEP_ERRV_ERO_SR_ERO_MIX): + return "ERO_SR_ERO_MIX"; + case TUP(PCEP_ERRT_RECEPTION_OF_INV_OBJECT, + PCEP_ERRV_SR_ERO_SID_NAI_ABSENT): + return "SR_ERO_SID_NAI_ABSENT"; + case TUP(PCEP_ERRT_RECEPTION_OF_INV_OBJECT, + PCEP_ERRV_SR_RRO_SID_NAI_ABSENT): + return "SR_RRO_SID_NAI_ABSENT"; + case TUP(PCEP_ERRT_RECEPTION_OF_INV_OBJECT, + PCEP_ERRV_SYMBOLIC_PATH_NAME_TLV_MISSING): + return "SYMBOLIC_PATH_NAME_TLV_MISSING"; + case TUP(PCEP_ERRT_RECEPTION_OF_INV_OBJECT, + PCEP_ERRV_MSD_EXCEEDS_PCEP_SESSION_MAX): + return "MSD_EXCEEDS_PCEP_SESSION_MAX"; + case TUP(PCEP_ERRT_RECEPTION_OF_INV_OBJECT, PCEP_ERRV_RRO_SR_RRO_MIX): + return "RRO_SR_RRO_MIX"; + case TUP(PCEP_ERRT_RECEPTION_OF_INV_OBJECT, PCEP_ERRV_MALFORMED_OBJECT): + return "MALFORMED_OBJECT"; + case TUP(PCEP_ERRT_RECEPTION_OF_INV_OBJECT, + PCEP_ERRV_MISSING_PCE_SR_CAP_TLV): + return "MISSING_PCE_SR_CAP_TLV"; + case TUP(PCEP_ERRT_RECEPTION_OF_INV_OBJECT, PCEP_ERRV_UNSUPPORTED_NAI): + return "UNSUPPORTED_NAI"; + case TUP(PCEP_ERRT_RECEPTION_OF_INV_OBJECT, PCEP_ERRV_UNKNOWN_SID): + return "UNKNOWN_SID"; + case TUP(PCEP_ERRT_RECEPTION_OF_INV_OBJECT, + PCEP_ERRV_CANNOT_RESOLVE_NAI_TO_SID): + return "CANNOT_RESOLVE_NAI_TO_SID"; + case TUP(PCEP_ERRT_RECEPTION_OF_INV_OBJECT, + PCEP_ERRV_COULD_NOT_FIND_SRGB): + return "COULD_NOT_FIND_SRGB"; + case TUP(PCEP_ERRT_RECEPTION_OF_INV_OBJECT, PCEP_ERRV_SID_EXCEEDS_SRGB): + return "SID_EXCEEDS_SRGB"; + case TUP(PCEP_ERRT_RECEPTION_OF_INV_OBJECT, + PCEP_ERRV_COULD_NOT_FIND_SRLB): + return "COULD_NOT_FIND_SRLB"; + case TUP(PCEP_ERRT_RECEPTION_OF_INV_OBJECT, PCEP_ERRV_SID_EXCEEDS_SRLB): + return "SID_EXCEEDS_SRLB"; + case TUP(PCEP_ERRT_RECEPTION_OF_INV_OBJECT, PCEP_ERRV_INCONSISTENT_SID): + return "INCONSISTENT_SID"; + case TUP(PCEP_ERRT_RECEPTION_OF_INV_OBJECT, + PCEP_ERRV_MSD_MUST_BE_NONZERO): + return "MSD_MUST_BE_NONZERO"; + case TUP(PCEP_ERRT_RECEPTION_OF_INV_OBJECT, + PCEP_ERRV_MISMATCH_O_S2LS_LSP): + return "MISMATCH_O_S2LS_LSP"; + case TUP(PCEP_ERRT_RECEPTION_OF_INV_OBJECT, + PCEP_ERRV_INCOMPATIBLE_H_PCE_OF): + return "INCOMPATIBLE_H_PCE_OF"; + case TUP(PCEP_ERRT_RECEPTION_OF_INV_OBJECT, + PCEP_ERRV_BAD_BANDWIDTH_TYPE_3_4): + return "BAD_BANDWIDTH_TYPE_3_4"; + case TUP(PCEP_ERRT_RECEPTION_OF_INV_OBJECT, + PCEP_ERRV_UNSUPPORTED_LSP_PROT_FLAGS): + return "UNSUPPORTED_LSP_PROT_FLAGS"; + case TUP(PCEP_ERRT_RECEPTION_OF_INV_OBJECT, + PCEP_ERRV_UNSUPPORTED_2ND_LSP_PROT_FLAGS): + return "UNSUPPORTED_2ND_LSP_PROT_FLAGS"; + case TUP(PCEP_ERRT_RECEPTION_OF_INV_OBJECT, + PCEP_ERRV_UNSUPPORTED_LINK_PROT_TYPE): + return "UNSUPPORTED_LINK_PROT_TYPE"; + case TUP(PCEP_ERRT_RECEPTION_OF_INV_OBJECT, + PCEP_ERRV_LABEL_SET_TLV_NO_RP_R): + return "LABEL_SET_TLV_NO_RP_R"; + case TUP(PCEP_ERRT_RECEPTION_OF_INV_OBJECT, + PCEP_ERRV_WRONG_LABEL_SET_TLV_O_L_SET): + return "WRONG_LABEL_SET_TLV_O_L_SET"; + case TUP(PCEP_ERRT_RECEPTION_OF_INV_OBJECT, + PCEP_ERRV_WRONG_LABEL_SET_O_SET): + return "WRONG_LABEL_SET_O_SET"; + case TUP(PCEP_ERRT_RECEPTION_OF_INV_OBJECT, + PCEP_ERRV_MISSING_GMPLS_CAP_TLV): + return "MISSING_GMPLS_CAP_TLV"; + case TUP(PCEP_ERRT_RECEPTION_OF_INV_OBJECT, + PCEP_ERRV_INCOMPATIBLE_OF_CODE): + return "INCOMPATIBLE_OF_CODE"; + + case TUP(PCEP_ERRT_DIFFSERV_AWARE_TE_ERROR, + PCEP_ERRV_UNSUPPORTED_CLASS_TYPE): + return "UNSUPPORTED_CLASS_TYPE"; + case TUP(PCEP_ERRT_DIFFSERV_AWARE_TE_ERROR, + PCEP_ERRV_INVALID_CLASS_TYPE): + return "INVALID_CLASS_TYPE"; + case TUP(PCEP_ERRT_DIFFSERV_AWARE_TE_ERROR, + PCEP_ERRV_CLASS_SETUP_TYPE_NOT_TE_CLASS): + return "CLASS_SETUP_TYPE_NOT_TE_CLASS"; + + case TUP(PCEP_ERRT_BRPC_PROC_COMPLETION_ERROR, + PCEP_ERRV_BRPC_PROC_NOT_SUPPORTED): + return "BRPC_PROC_NOT_SUPPORTED"; + + case TUP(PCEP_ERRT_GLOBAL_CONCURRENT_ERROR, + PCEP_ERRV_INSUFFICIENT_MEMORY): + return "INSUFFICIENT_MEMORY"; + case TUP(PCEP_ERRT_GLOBAL_CONCURRENT_ERROR, + PCEP_ERRV_GLOBAL_CONCURRENT_OPT_NOT_SUPPORTED): + return "GLOBAL_CONCURRENT_OPT_NOT_SUPPORTED"; + + case TUP(PCEP_ERRT_P2PMP_CAP_ERROR, PCEP_ERRV_PCE_INSUFFICIENT_MEMORY): + return "PCE_INSUFFICIENT_MEMORY"; + case TUP(PCEP_ERRT_P2PMP_CAP_ERROR, + PCEP_ERRV_PCE_NOT_CAPABLE_P2MP_COMP): + return "PCE_NOT_CAPABLE_P2MP_COMP"; + + case TUP(PCEP_ERRT_P2P_ENDPOINTS_ERROR, + PCEP_ERRV_NO_EP_WITH_LEAF_TYPE2): + return "NO_EP_WITH_LEAF_TYPE2"; + case TUP(PCEP_ERRT_P2P_ENDPOINTS_ERROR, + PCEP_ERRV_NO_EP_WITH_LEAF_TYPE3): + return "NO_EP_WITH_LEAF_TYPE3"; + case TUP(PCEP_ERRT_P2P_ENDPOINTS_ERROR, + PCEP_ERRV_NO_EP_WITH_LEAF_TYPE4): + return "NO_EP_WITH_LEAF_TYPE4"; + case TUP(PCEP_ERRT_P2P_ENDPOINTS_ERROR, PCEP_ERRV_INCONSITENT_EP): + return "INCONSITENT_EP"; + + case TUP(PCEP_ERRT_P2P_FRAGMENTATION_ERROR, + PCEP_ERRV_FRAG_REQUEST_FAILURE): + return "FRAG_REQUEST_FAILURE"; + case TUP(PCEP_ERRT_P2P_FRAGMENTATION_ERROR, + PCEP_ERRV_FRAG_REPORT_FAILURE): + return "FRAG_REPORT_FAILURE"; + case TUP(PCEP_ERRT_P2P_FRAGMENTATION_ERROR, + PCEP_ERRV_FRAG_UPDATE_FAILURE): + return "FRAG_UPDATE_FAILURE"; + case TUP(PCEP_ERRT_P2P_FRAGMENTATION_ERROR, + PCEP_ERRV_FRAG_INSTANTIATION_FAILURE): + return "FRAG_INSTANTIATION_FAILURE"; + + case TUP(PCEP_ERRT_INVALID_OPERATION, + PCEP_ERRV_LSP_UPDATE_FOR_NON_DELEGATED_LSP): + return "LSP_UPDATE_FOR_NON_DELEGATED_LS"; + case TUP(PCEP_ERRT_INVALID_OPERATION, + PCEP_ERRV_LSP_UPDATE_NON_ADVERTISED_PCE): + return "LSP_UPDATE_NON_ADVERTISED_PC"; + case TUP(PCEP_ERRT_INVALID_OPERATION, + PCEP_ERRV_LSP_UPDATE_UNKNOWN_PLSP_ID): + return "LSP_UPDATE_UNKNOWN_PLSP_I"; + case TUP(PCEP_ERRT_INVALID_OPERATION, + PCEP_ERRV_LSP_REPORT_NON_ADVERTISED_PCE): + return "LSP_REPORT_NON_ADVERTISED_PC"; + case TUP(PCEP_ERRT_INVALID_OPERATION, + PCEP_ERRV_PCE_INIT_LSP_LIMIT_REACHED): + return "PCE_INIT_LSP_LIMIT_REACHE"; + case TUP(PCEP_ERRT_INVALID_OPERATION, + PCEP_ERRV_PCE_INIT_LSP_DELEGATION_CANT_REVOKE): + return "PCE_INIT_LSP_DELEGATION_CANT_REVOK"; + case TUP(PCEP_ERRT_INVALID_OPERATION, + PCEP_ERRV_LSP_INIT_NON_ZERO_PLSP_ID): + return "LSP_INIT_NON_ZERO_PLSP_I"; + case TUP(PCEP_ERRT_INVALID_OPERATION, PCEP_ERRV_LSP_NOT_PCE_INITIATED): + return "LSP_NOT_PCE_INITIATE"; + case TUP(PCEP_ERRT_INVALID_OPERATION, + PCEP_ERRV_PCE_INIT_OP_FREQ_LIMIT_REACHED): + return "PCE_INIT_OP_FREQ_LIMIT_REACHE"; + case TUP(PCEP_ERRT_INVALID_OPERATION, + PCEP_ERRV_LSP_REPORT_P2MP_NOT_ADVERTISED): + return "LSP_REPORT_P2MP_NOT_ADVERTISE"; + case TUP(PCEP_ERRT_INVALID_OPERATION, + PCEP_ERRV_LSP_UPDATE_P2MP_NOT_ADVERTISED): + return "LSP_UPDATE_P2MP_NOT_ADVERTISE"; + case TUP(PCEP_ERRT_INVALID_OPERATION, + PCEP_ERRV_LSP_INSTANTIATION_P2MP_NOT_ADVERTISED): + return "LSP_INSTANTIATION_P2MP_NOT_ADVERTISE"; + case TUP(PCEP_ERRT_INVALID_OPERATION, + PCEP_ERRV_AUTO_BW_CAP_NOT_ADVERTISED): + return "AUTO_BW_CAP_NOT_ADVERTISE"; + + case TUP(PCEP_ERRT_LSP_STATE_SYNC_ERROR, + PCEP_ERRV_PCE_CANT_PROCESS_LSP_REPORT): + return "PCE_CANT_PROCESS_LSP_REPORT"; + case TUP(PCEP_ERRT_LSP_STATE_SYNC_ERROR, + PCEP_ERRV_LSP_DB_VERSION_MISMATCH): + return "LSP_DB_VERSION_MISMATCH"; + case TUP(PCEP_ERRT_LSP_STATE_SYNC_ERROR, + PCEP_ERRV_TRIGGER_ATTEMPT_BEFORE_PCE_TRIGGER): + return "TRIGGER_ATTEMPT_BEFORE_PCE_TRIGGER"; + case TUP(PCEP_ERRT_LSP_STATE_SYNC_ERROR, + PCEP_ERRV_TRIGGER_ATTEMPT_NO_PCE_TRIGGER_CAP): + return "TRIGGER_ATTEMPT_NO_PCE_TRIGGER_CAP"; + case TUP(PCEP_ERRT_LSP_STATE_SYNC_ERROR, + PCEP_ERRV_PCC_CANT_COMPLETE_STATE_SYNC): + return "PCC_CANT_COMPLETE_STATE_SYNC"; + case TUP(PCEP_ERRT_LSP_STATE_SYNC_ERROR, + PCEP_ERRV_INVALID_LSP_DB_VERSION_NUMBER): + return "INVALID_LSP_DB_VERSION_NUMBER"; + case TUP(PCEP_ERRT_LSP_STATE_SYNC_ERROR, + PCEP_ERRV_INVALID_SPEAKER_ENTITY_ID): + return "INVALID_SPEAKER_ENTITY_ID"; + + case TUP(PCEP_ERRT_INVALID_TE_PATH_SETUP_TYPE, + PCEP_ERRV_UNSUPPORTED_PATH_SETUP_TYPE): + return "UNSUPPORTED_PATH_SETUP_TYPE"; + case TUP(PCEP_ERRT_INVALID_TE_PATH_SETUP_TYPE, + PCEP_ERRV_MISMATCHED_PATH_SETUP_TYPE): + return "MISMATCHED_PATH_SETUP_TYPE"; + + case TUP(PCEP_ERRT_BAD_PARAMETER_VALUE, + PCEP_ERRV_SYMBOLIC_PATH_NAME_IN_USE): + return "SYMBOLIC_PATH_NAME_IN_USE"; + case TUP(PCEP_ERRT_BAD_PARAMETER_VALUE, + PCEP_ERRV_LSP_SPEAKER_ID_NOT_PCE_INITIATED): + return "LSP_SPEAKER_ID_NOT_PCE_INITIATED"; + + case TUP(PCEP_ERRT_LSP_INSTANTIATE_ERROR, + PCEP_ERRV_UNACCEPTABLE_INSTANTIATE_ERROR): + return "UNACCEPTABLE_INSTANTIATE_ERROR"; + case TUP(PCEP_ERRT_LSP_INSTANTIATE_ERROR, PCEP_ERRV_INTERNAL_ERROR): + return "INTERNAL_ERROR"; + case TUP(PCEP_ERRT_LSP_INSTANTIATE_ERROR, PCEP_ERRV_SIGNALLING_ERROR): + return "SIGNALLING_ERROR"; + + case TUP(PCEP_ERRT_START_TLS_FAILURE, + PCEP_ERRV_START_TLS_AFTER_PCEP_EXCHANGE): + return "START_TLS_AFTER_PCEP_EXCHANGE"; + case TUP(PCEP_ERRT_START_TLS_FAILURE, + PCEP_ERRV_MSG_NOT_START_TLS_OPEN_ERROR): + return "MSG_NOT_START_TLS_OPEN_ERROR"; + case TUP(PCEP_ERRT_START_TLS_FAILURE, + PCEP_ERRV_CONNECTION_WO_TLS_NOT_POSSIBLE): + return "CONNECTION_WO_TLS_NOT_POSSIBLE"; + case TUP(PCEP_ERRT_START_TLS_FAILURE, + PCEP_ERRV_CONNECTION_WO_TLS_IS_POSSIBLE): + return "CONNECTION_WO_TLS_IS_POSSIBLE"; + case TUP(PCEP_ERRT_START_TLS_FAILURE, + PCEP_ERRV_NO_START_TLS_BEFORE_START_TLS_WAIT_TIMER): + return "NO_START_TLS_BEFORE_START_TLS_WAIT_TIMER"; + + case TUP(PCEP_ERRT_ASSOCIATION_ERROR, + PCEP_ERRV_ASSOC_TYPE_NOT_SUPPORTED): + return "ASSOC_TYPE_NOT_SUPPORTED"; + case TUP(PCEP_ERRT_ASSOCIATION_ERROR, + PCEP_ERRV_TOO_MANY_LSPS_IN_ASSOC_GRP): + return "TOO_MANY_LSPS_IN_ASSOC_GRP"; + case TUP(PCEP_ERRT_ASSOCIATION_ERROR, PCEP_ERRV_TOO_MANY_ASSOC_GROUPS): + return "TOO_MANY_ASSOC_GROUPS"; + case TUP(PCEP_ERRT_ASSOCIATION_ERROR, PCEP_ERRV_ASSOCIATION_UNKNOWN): + return "ASSOCIATION_UNKNOWN"; + case TUP(PCEP_ERRT_ASSOCIATION_ERROR, + PCEP_ERRV_OP_CONF_ASSOC_INFO_MISMATCH): + return "OP_CONF_ASSOC_INFO_MISMATCH"; + case TUP(PCEP_ERRT_ASSOCIATION_ERROR, PCEP_ERRV_ASSOC_INFO_MISMATCH): + return "ASSOC_INFO_MISMATCH"; + case TUP(PCEP_ERRT_ASSOCIATION_ERROR, + PCEP_ERRV_CANNOT_JOIN_ASSOC_GROUP): + return "CANNOT_JOIN_ASSOC_GROUP"; + case TUP(PCEP_ERRT_ASSOCIATION_ERROR, PCEP_ERRV_ASSOC_ID_NOT_IN_RANGE): + return "ASSOC_ID_NOT_IN_RANGE"; + case TUP(PCEP_ERRT_ASSOCIATION_ERROR, + PCEP_ERRV_TUNNEL_EP_MISMATCH_PATH_PROT_ASSOC): + return "TUNNEL_EP_MISMATCH_PATH_PROT_ASSOC"; + case TUP(PCEP_ERRT_ASSOCIATION_ERROR, + PCEP_ERRV_ATTEMPTED_ADD_LSP_PATH_PROT_ASSOC): + return "ATTEMPTED_ADD_LSP_PATH_PROT_ASSOC"; + case TUP(PCEP_ERRT_ASSOCIATION_ERROR, + PCEP_ERRV_PROTECTION_TYPE_NOT_SUPPORTED): + return "PROTECTION_TYPE_NOT_SUPPORTED"; + + case TUP(PCEP_ERRT_WSON_RWA_ERROR, PCEP_ERRV_RWA_INSUFFICIENT_MEMORY): + return "RWA_INSUFFICIENT_MEMORY"; + case TUP(PCEP_ERRT_WSON_RWA_ERROR, PCEP_ERRV_RWA_COMP_NOT_SUPPORTED): + return "RWA_COMP_NOT_SUPPORTED"; + case TUP(PCEP_ERRT_WSON_RWA_ERROR, PCEP_ERRV_SYNTAX_ENC_ERROR): + return "SYNTAX_ENC_ERROR"; + + case TUP(PCEP_ERRT_H_PCE_ERROR, PCEP_ERRV_H_PCE_CAP_NOT_ADVERTISED): + return "H_PCE_CAP_NOT_ADVERTISED"; + case TUP(PCEP_ERRT_H_PCE_ERROR, + PCEP_ERRV_PARENT_PCE_CAP_CANT_BE_PROVIDED): + return "PARENT_PCE_CAP_CANT_BE_PROVIDED"; + + case TUP(PCEP_ERRT_PATH_COMP_FAILURE, + PCEP_ERRV_UNACCEPTABLE_REQUEST_MSG): + return "UNACCEPTABLE_REQUEST_MSG"; + case TUP(PCEP_ERRT_PATH_COMP_FAILURE, + PCEP_ERRV_GENERALIZED_BW_VAL_NOT_SUPPORTED): + return "GENERALIZED_BW_VAL_NOT_SUPPORTED"; + case TUP(PCEP_ERRT_PATH_COMP_FAILURE, + PCEP_ERRV_LABEL_SET_CONSTRAINT_COULD_NOT_BE_MET): + return "LABEL_SET_CONSTRAINT_COULD_NOT_BE_MET"; + case TUP(PCEP_ERRT_PATH_COMP_FAILURE, + PCEP_ERRV_LABEL_CONSTRAINT_COULD_NOT_BE_MET): + return "LABEL_CONSTRAINT_COULD_NOT_BE_MET"; + + default: + return "UNKNOWN"; + } +} + +const char *pcep_message_type_name(enum pcep_message_types pcep_message_type) +{ + switch (pcep_message_type) { + + case PCEP_TYPE_OPEN: + return "OPEN"; + case PCEP_TYPE_KEEPALIVE: + return "KEEPALIVE"; + case PCEP_TYPE_PCREQ: + return "PCREQ"; + case PCEP_TYPE_PCREP: + return "PCREP"; + case PCEP_TYPE_PCNOTF: + return "PCNOTF"; + case PCEP_TYPE_ERROR: + return "ERROR"; + case PCEP_TYPE_CLOSE: + return "CLOSE"; + case PCEP_TYPE_REPORT: + return "REPORT"; + case PCEP_TYPE_UPDATE: + return "UPDATE"; + case PCEP_TYPE_INITIATE: + return "INITIATE"; + case PCEP_TYPE_UNKOWN_MSG: + return "UNKOWN_MSG"; + default: + return "UNKNOWN"; + } +} + +const char *pcep_object_class_name(enum pcep_object_classes obj_class) +{ + switch (obj_class) { + case PCEP_OBJ_CLASS_OPEN: + return "OPEN"; + case PCEP_OBJ_CLASS_RP: + return "RP"; + case PCEP_OBJ_CLASS_NOPATH: + return "NOPATH"; + case PCEP_OBJ_CLASS_ENDPOINTS: + return "ENDPOINTS"; + case PCEP_OBJ_CLASS_BANDWIDTH: + return "BANDWIDTH"; + case PCEP_OBJ_CLASS_METRIC: + return "METRIC"; + case PCEP_OBJ_CLASS_ERO: + return "ERO"; + case PCEP_OBJ_CLASS_RRO: + return "RRO"; + case PCEP_OBJ_CLASS_LSPA: + return "LSPA"; + case PCEP_OBJ_CLASS_IRO: + return "IRO"; + case PCEP_OBJ_CLASS_SVEC: + return "SVEC"; + case PCEP_OBJ_CLASS_NOTF: + return "NOTF"; + case PCEP_OBJ_CLASS_ERROR: + return "ERROR"; + case PCEP_OBJ_CLASS_CLOSE: + return "CLOSE"; + case PCEP_OBJ_CLASS_OF: + return "OF"; + case PCEP_OBJ_CLASS_LSP: + return "LSP"; + case PCEP_OBJ_CLASS_SRP: + return "SRP"; + case PCEP_OBJ_CLASS_VENDOR_INFO: + return "VENDOR_INFO"; + case PCEP_OBJ_CLASS_INTER_LAYER: + return "INTER_LAYER"; + case PCEP_OBJ_CLASS_SWITCH_LAYER: + return "SWITCH_LAYER"; + case PCEP_OBJ_CLASS_REQ_ADAP_CAP: + return "REQ_ADAP_CAP"; + case PCEP_OBJ_CLASS_SERVER_IND: + return "SERVER_IND"; + case PCEP_OBJ_CLASS_ASSOCIATION: + return "ASSOCIATION"; + default: + return "UNKNOWN"; + } +} + +const char *pcep_object_type_name(enum pcep_object_classes obj_class, + enum pcep_object_types obj_type) +{ + switch (TUP(obj_class, obj_type)) { + case TUP(PCEP_OBJ_CLASS_OPEN, PCEP_OBJ_TYPE_OPEN): + return "OPEN"; + case TUP(PCEP_OBJ_CLASS_RP, PCEP_OBJ_TYPE_RP): + return "RP"; + case TUP(PCEP_OBJ_CLASS_NOPATH, PCEP_OBJ_TYPE_NOPATH): + return "NOPATH"; + case TUP(PCEP_OBJ_CLASS_ENDPOINTS, PCEP_OBJ_TYPE_ENDPOINT_IPV4): + return "ENDPOINT_IPV4"; + case TUP(PCEP_OBJ_CLASS_ENDPOINTS, PCEP_OBJ_TYPE_ENDPOINT_IPV6): + return "ENDPOINT_IPV6"; + case TUP(PCEP_OBJ_CLASS_BANDWIDTH, PCEP_OBJ_TYPE_BANDWIDTH_REQ): + return "BANDWIDTH_REQ"; + case TUP(PCEP_OBJ_CLASS_BANDWIDTH, PCEP_OBJ_TYPE_BANDWIDTH_TELSP): + return "BANDWIDTH_TELSP"; + case TUP(PCEP_OBJ_CLASS_BANDWIDTH, PCEP_OBJ_TYPE_BANDWIDTH_CISCO): + return "BANDWIDTH_CISCO"; + case TUP(PCEP_OBJ_CLASS_METRIC, PCEP_OBJ_TYPE_METRIC): + return "METRIC"; + case TUP(PCEP_OBJ_CLASS_ERO, PCEP_OBJ_TYPE_ERO): + return "ERO"; + case TUP(PCEP_OBJ_CLASS_RRO, PCEP_OBJ_TYPE_RRO): + return "RRO"; + case TUP(PCEP_OBJ_CLASS_LSPA, PCEP_OBJ_TYPE_LSPA): + return "LSPA"; + case TUP(PCEP_OBJ_CLASS_IRO, PCEP_OBJ_TYPE_IRO): + return "IRO"; + case TUP(PCEP_OBJ_CLASS_SVEC, PCEP_OBJ_TYPE_SVEC): + return "SVEC"; + case TUP(PCEP_OBJ_CLASS_NOTF, PCEP_OBJ_TYPE_NOTF): + return "NOTF"; + case TUP(PCEP_OBJ_CLASS_ERROR, PCEP_OBJ_TYPE_ERROR): + return "ERROR"; + case TUP(PCEP_OBJ_CLASS_CLOSE, PCEP_OBJ_TYPE_CLOSE): + return "CLOSE"; + case TUP(PCEP_OBJ_CLASS_INTER_LAYER, PCEP_OBJ_TYPE_INTER_LAYER): + return "INTER_LAYER"; + case TUP(PCEP_OBJ_CLASS_SWITCH_LAYER, PCEP_OBJ_TYPE_SWITCH_LAYER): + return "SWITCH_LAYER"; + case TUP(PCEP_OBJ_CLASS_REQ_ADAP_CAP, PCEP_OBJ_TYPE_REQ_ADAP_CAP): + return "REQ_ADAP_CAP"; + case TUP(PCEP_OBJ_CLASS_SERVER_IND, PCEP_OBJ_TYPE_SERVER_IND): + return "SERVER_IND"; + case TUP(PCEP_OBJ_CLASS_ASSOCIATION, PCEP_OBJ_TYPE_ASSOCIATION_IPV4): + return "ASSOCIATION_IPV4"; + case TUP(PCEP_OBJ_CLASS_ASSOCIATION, PCEP_OBJ_TYPE_ASSOCIATION_IPV6): + return "ASSOCIATION_IPV6"; + case TUP(PCEP_OBJ_CLASS_OF, PCEP_OBJ_TYPE_OF): + return "OF"; + default: + return "UNKNOWN"; + } +} + +const char *pcep_lsp_status_name(enum pcep_lsp_operational_status status) +{ + switch (status) { + case PCEP_LSP_OPERATIONAL_DOWN: + return "DOWN"; + case PCEP_LSP_OPERATIONAL_UP: + return "UP"; + case PCEP_LSP_OPERATIONAL_ACTIVE: + return "ACTIVE"; + case PCEP_LSP_OPERATIONAL_GOING_DOWN: + return "GOING_DOWN"; + case PCEP_LSP_OPERATIONAL_GOING_UP: + return "GOING_UP"; + default: + return "UNKNOWN"; + } +} + + +const char *pcep_tlv_type_name(enum pcep_object_tlv_types tlv_type) +{ + switch (tlv_type) { + case PCEP_OBJ_TLV_TYPE_NO_PATH_VECTOR: + return "NO_PATH_VECTOR"; + case PCEP_OBJ_TLV_TYPE_STATEFUL_PCE_CAPABILITY: + return "STATEFUL_PCE_CAPABILITY"; + case PCEP_OBJ_TLV_TYPE_SYMBOLIC_PATH_NAME: + return "SYMBOLIC_PATH_NAME"; + case PCEP_OBJ_TLV_TYPE_IPV4_LSP_IDENTIFIERS: + return "IPV4_LSP_IDENTIFIERS"; + case PCEP_OBJ_TLV_TYPE_IPV6_LSP_IDENTIFIERS: + return "IPV6_LSP_IDENTIFIERS"; + case PCEP_OBJ_TLV_TYPE_LSP_ERROR_CODE: + return "LSP_ERROR_CODE"; + case PCEP_OBJ_TLV_TYPE_RSVP_ERROR_SPEC: + return "RSVP_ERROR_SPEC"; + case PCEP_OBJ_TLV_TYPE_LSP_DB_VERSION: + return "LSP_DB_VERSION"; + case PCEP_OBJ_TLV_TYPE_SPEAKER_ENTITY_ID: + return "SPEAKER_ENTITY_ID"; + case PCEP_OBJ_TLV_TYPE_SR_PCE_CAPABILITY: + return "SR_PCE_CAPABILITY"; + case PCEP_OBJ_TLV_TYPE_PATH_SETUP_TYPE: + return "PATH_SETUP_TYPE"; + case PCEP_OBJ_TLV_TYPE_PATH_SETUP_TYPE_CAPABILITY: + return "PATH_SETUP_TYPE_CAPABILITY"; + default: + return "UNKNOWN"; + } +} + +const char *pcep_ro_type_name(enum pcep_ro_subobj_types ro_type) +{ + switch (ro_type) { + + case RO_SUBOBJ_TYPE_IPV4: + return "IPV4"; + case RO_SUBOBJ_TYPE_IPV6: + return "IPV6"; + case RO_SUBOBJ_TYPE_LABEL: + return "LABEL"; + case RO_SUBOBJ_TYPE_UNNUM: + return "UNNUM"; + case RO_SUBOBJ_TYPE_ASN: + return "ASN"; + case RO_SUBOBJ_TYPE_SR: + return "SR"; + default: + return "UNKNOWN"; + } +} + +const char *pcep_nai_type_name(enum pcep_sr_subobj_nai nai_type) +{ + switch (nai_type) { + case PCEP_SR_SUBOBJ_NAI_ABSENT: + return "ABSENT"; + case PCEP_SR_SUBOBJ_NAI_IPV4_NODE: + return "IPV4_NODE"; + case PCEP_SR_SUBOBJ_NAI_IPV6_NODE: + return "IPV6_NODE"; + case PCEP_SR_SUBOBJ_NAI_IPV4_ADJACENCY: + return "IPV4_ADJACENCY"; + case PCEP_SR_SUBOBJ_NAI_IPV6_ADJACENCY: + return "IPV6_ADJACENCY"; + case PCEP_SR_SUBOBJ_NAI_UNNUMBERED_IPV4_ADJACENCY: + return "UNNUMBERED_IPV4_ADJACENCY"; + case PCEP_SR_SUBOBJ_NAI_LINK_LOCAL_IPV6_ADJACENCY: + return "LINK_LOCAL_IPV6_ADJACENCY"; + default: + return "UNKNOWN"; + } +} + +const char *pcep_metric_type_name(enum pcep_metric_types type) +{ + switch (type) { + case PCEP_METRIC_IGP: + return "IGP"; + case PCEP_METRIC_TE: + return "TE"; + case PCEP_METRIC_HOP_COUNT: + return "HOP_COUNT"; + case PCEP_METRIC_AGGREGATE_BW: + return "AGGREGATE_BW"; + case PCEP_METRIC_MOST_LOADED_LINK: + return "MOST_LOADED_LINK"; + case PCEP_METRIC_CUMULATIVE_IGP: + return "CUMULATIVE_IGP"; + case PCEP_METRIC_CUMULATIVE_TE: + return "CUMULATIVE_TE"; + case PCEP_METRIC_P2MP_IGP: + return "P2MP_IGP"; + case PCEP_METRIC_P2MP_TE: + return "P2MP_TE"; + case PCEP_METRIC_P2MP_HOP_COUNT: + return "P2MP_HOP_COUNT"; + case PCEP_METRIC_SEGMENT_ID_DEPTH: + return "SEGMENT_ID_DEPTH"; + case PCEP_METRIC_PATH_DELAY: + return "PATH_DELAY"; + case PCEP_METRIC_PATH_DELAY_VARIATION: + return "PATH_DELAY_VARIATION"; + case PCEP_METRIC_PATH_LOSS: + return "PATH_LOSS"; + case PCEP_METRIC_P2MP_PATH_DELAY: + return "P2MP_PATH_DELAY"; + case PCEP_METRIC_P2MP_PATH_DELAY_VARIATION: + return "P2MP_PATH_DELAY_VARIATION"; + case PCEP_METRIC_P2MP_PATH_LOSS: + return "P2MP_PATH_LOSS"; + case PCEP_METRIC_NUM_PATH_ADAPTATIONS: + return "NUM_PATH_ADAPTATIONS"; + case PCEP_METRIC_NUM_PATH_LAYERS: + return "NUM_PATH_LAYERS"; + case PCEP_METRIC_DOMAIN_COUNT: + return "DOMAIN_COUNT"; + case PCEP_METRIC_BORDER_NODE_COUNT: + return "BORDER_NODE_COUNT"; + default: + return "UNKNOWN"; + } +} + +const char *pcep_nopath_tlv_err_code_name(enum pcep_nopath_tlv_err_codes type) +{ + switch (type) { + case PCEP_NOPATH_TLV_ERR_NO_TLV: + return "NO_TLV"; + case PCEP_NOPATH_TLV_ERR_PCE_UNAVAILABLE: + return "PCE_UNAVAILABLE"; + case PCEP_NOPATH_TLV_ERR_UNKNOWN_DST: + return "UNKNOWN_DST"; + case PCEP_NOPATH_TLV_ERR_UNKNOWN_SRC: + return "UNKNOWN_SRC"; + default: + return "UNKNOWN"; + } +} + +const char *format_objfun_set(uint32_t flags) +{ + int i, c; + PATHD_FORMAT_INIT(); + for (i = 1, c = 0; i <= MAX_OBJFUN_TYPE; i++) { + if (CHECK_FLAG(flags, i)) { + if (c > 0) + PATHD_FORMAT(", %s", objfun_type_name(i)); + else + PATHD_FORMAT("%s", objfun_type_name(i)); + c++; + } + } + return PATHD_FORMAT_FINI(); +} + + +const char *format_pcc_opts(struct pcc_opts *opts) +{ + PATHD_FORMAT_INIT(); + _format_pcc_opts(0, opts); + return PATHD_FORMAT_FINI(); +} + +const char *format_pcc_state(struct pcc_state *state) +{ + PATHD_FORMAT_INIT(); + _format_pcc_state(0, state); + return PATHD_FORMAT_FINI(); +} + +const char *format_ctrl_state(struct ctrl_state *state) +{ + PATHD_FORMAT_INIT(); + _format_ctrl_state(0, state); + return PATHD_FORMAT_FINI(); +} + +const char *format_path(struct path *path) +{ + PATHD_FORMAT_INIT(); + _format_path(0, path); + return PATHD_FORMAT_FINI(); +} + +const char *format_pcep_event(pcep_event *event) +{ + PATHD_FORMAT_INIT(); + _format_pcep_event(0, event); + return PATHD_FORMAT_FINI(); +} + +const char *format_pcep_message(struct pcep_message *msg) +{ + PATHD_FORMAT_INIT(); + _format_pcep_message(0, msg); + return PATHD_FORMAT_FINI(); +} + +const char *format_yang_dnode(struct lyd_node *dnode) +{ + char *buff; + int len; + + lyd_print_mem(&buff, dnode, LYD_JSON, LYP_FORMAT); + len = strlen(buff); + memcpy(_debug_buff, buff, len); + free(buff); + return _debug_buff; +} + +void _format_pcc_opts(int ps, struct pcc_opts *opts) +{ + if (opts == NULL) { + PATHD_FORMAT("NULL\n"); + } else { + int ps2 = ps + DEBUG_IDENT_SIZE; + PATHD_FORMAT("\n"); + if (IS_IPADDR_V4(&opts->addr)) { + PATHD_FORMAT("%*saddr_v4: %pI4\n", ps2, "", + &opts->addr.ipaddr_v4); + } else { + PATHD_FORMAT("%*saddr_v4: undefined", ps2, ""); + } + if (IS_IPADDR_V6(&opts->addr)) { + PATHD_FORMAT("%*saddr_v6: %pI6\n", ps2, "", + &opts->addr.ipaddr_v6); + } else { + PATHD_FORMAT("%*saddr_v6: undefined", ps2, ""); + } + PATHD_FORMAT("%*sport: %i\n", ps2, "", opts->port); + PATHD_FORMAT("%*smsd: %i\n", ps2, "", opts->msd); + } +} + +void _format_pce_opts(int ps, struct pce_opts *opts) +{ + if (opts == NULL) { + PATHD_FORMAT("NULL\n"); + } else { + int ps2 = ps + DEBUG_IDENT_SIZE; + PATHD_FORMAT("\n"); + if (IS_IPADDR_V6(&opts->addr)) { + PATHD_FORMAT("%*saddr: %pI6\n", ps2, "", + &opts->addr.ipaddr_v6); + } else { + PATHD_FORMAT("%*saddr: %pI4\n", ps2, "", + &opts->addr.ipaddr_v4); + } + PATHD_FORMAT("%*sport: %i\n", ps2, "", opts->port); + } +} + +void _format_pcc_caps(int ps, struct pcep_caps *caps) +{ + int ps2 = ps + DEBUG_IDENT_SIZE; + PATHD_FORMAT("\n"); + PATHD_FORMAT("%*sis_stateful: %d\n", ps2, "", caps->is_stateful); +} + +void _format_pcc_state(int ps, struct pcc_state *state) +{ + if (state == NULL) { + PATHD_FORMAT("NULL\n"); + } else { + int ps2 = ps + DEBUG_IDENT_SIZE; + PATHD_FORMAT("\n"); + PATHD_FORMAT("%*sstatus: %s\n", ps2, "", + pcc_status_name(state->status)); + PATHD_FORMAT("%*spcc_opts: ", ps2, ""); + _format_pcc_opts(ps2, state->pcc_opts); + PATHD_FORMAT("%*spce_opts: ", ps2, ""); + _format_pce_opts(ps2, state->pce_opts); + if (state->sess == NULL) { + PATHD_FORMAT("%*ssess: NULL\n", ps2, ""); + } else { + PATHD_FORMAT("%*ssess: <PCC SESSION %p>\n", ps2, "", + state->sess); + } + PATHD_FORMAT("%*scaps: ", ps2, ""); + _format_pcc_caps(ps2, &state->caps); + } +} + +void _format_ctrl_state(int ps, struct ctrl_state *state) +{ + if (state == NULL) { + PATHD_FORMAT("NULL\n"); + } else { + int i; + int ps2 = ps + DEBUG_IDENT_SIZE; + int ps3 = ps2 + DEBUG_IDENT_SIZE; + PATHD_FORMAT("\n"); + if (state->main == NULL) { + PATHD_FORMAT("%*smain: NULL\n", ps2, ""); + } else { + PATHD_FORMAT("%*smain: <THREAD MASTER %p>\n", ps2, "", + state->main); + } + if (state->self == NULL) { + PATHD_FORMAT("%*sself: NULL\n", ps2, ""); + } else { + PATHD_FORMAT("%*sself: <THREAD MASTER %p>\n", ps2, "", + state->self); + } + PATHD_FORMAT("%*spcc_count: %d\n", ps2, "", state->pcc_count); + PATHD_FORMAT("%*spcc:\n", ps2, ""); + for (i = 0; i < MAX_PCC; i++) { + if (state->pcc[i]) { + PATHD_FORMAT("%*s- ", ps3 - 2, ""); + _format_pcc_state(ps3, state->pcc[i]); + } + } + } +} + +void _format_path(int ps, struct path *path) +{ + if (path == NULL) { + PATHD_FORMAT("NULL\n"); + } else { + int ps2 = ps + DEBUG_IDENT_SIZE; + int ps3 = ps2 + DEBUG_IDENT_SIZE; + PATHD_FORMAT("\n"); + PATHD_FORMAT("%*snbkey: \n", ps2, ""); + PATHD_FORMAT("%*scolor: %u\n", ps3, "", path->nbkey.color); + switch (path->nbkey.endpoint.ipa_type) { + case IPADDR_V4: + PATHD_FORMAT("%*sendpoint: %pI4\n", ps3, "", + &path->nbkey.endpoint.ipaddr_v4); + break; + case IPADDR_V6: + PATHD_FORMAT("%*sendpoint: %pI6\n", ps3, "", + &path->nbkey.endpoint.ipaddr_v6); + break; + default: + PATHD_FORMAT("%*sendpoint: NONE\n", ps3, ""); + break; + } + PATHD_FORMAT("%*spreference: %u\n", ps3, "", + path->nbkey.preference); + + if (path->sender.ipa_type == IPADDR_V4) { + PATHD_FORMAT("%*ssender: %pI4\n", ps2, "", + &path->sender.ipaddr_v4); + } else if (path->sender.ipa_type == IPADDR_V6) { + PATHD_FORMAT("%*ssender: %pI6\n", ps2, "", + &path->sender.ipaddr_v6); + } else { + PATHD_FORMAT("%*ssender: UNDEFINED\n", ps2, ""); + } + if (path->pcc_addr.ipa_type == IPADDR_V4) { + PATHD_FORMAT("%*spcc_addr: %pI4\n", ps2, "", + &path->pcc_addr.ipaddr_v4); + } else if (path->pcc_addr.ipa_type == IPADDR_V6) { + PATHD_FORMAT("%*spcc_addr: %pI6\n", ps2, "", + &path->pcc_addr.ipaddr_v6); + } else { + PATHD_FORMAT("%*spcc_addr: UNDEFINED\n", ps2, ""); + } + PATHD_FORMAT("%*spcc_id: %u\n", ps2, "", path->pcc_id); + PATHD_FORMAT("%*screate_origin: %s (%u)\n", ps2, "", + srte_protocol_origin_name(path->create_origin), + path->create_origin); + PATHD_FORMAT("%*supdate_origin: %s (%u)\n", ps2, "", + srte_protocol_origin_name(path->update_origin), + path->update_origin); + if (path->originator != NULL) { + PATHD_FORMAT("%*soriginator: %s\n", ps2, "", + path->originator); + } else { + PATHD_FORMAT("%*soriginator: UNDEFINED\n", ps2, ""); + } + PATHD_FORMAT("%*stype: %s (%u)\n", ps2, "", + srte_candidate_type_name(path->type), path->type); + PATHD_FORMAT("%*splsp_id: %u\n", ps2, "", path->plsp_id); + if (path->name == NULL) { + PATHD_FORMAT("%*sname: NULL\n", ps2, ""); + } else { + PATHD_FORMAT("%*sname: %s\n", ps2, "", path->name); + } + PATHD_FORMAT("%*ssrp_id: %u\n", ps2, "", path->srp_id); + PATHD_FORMAT("%*sreq_id: %u\n", ps2, "", path->req_id); + PATHD_FORMAT("%*sstatus: %s (%u)\n", ps2, "", + pcep_lsp_status_name(path->status), path->status); + PATHD_FORMAT("%*sdo_remove: %u\n", ps2, "", path->do_remove); + PATHD_FORMAT("%*sgo_active: %u\n", ps2, "", path->go_active); + PATHD_FORMAT("%*swas_created: %u\n", ps2, "", + path->was_created); + PATHD_FORMAT("%*swas_removed: %u\n", ps2, "", + path->was_removed); + PATHD_FORMAT("%*sis_synching: %u\n", ps2, "", + path->is_synching); + PATHD_FORMAT("%*sis_delegated: %u\n", ps2, "", + path->is_delegated); + PATHD_FORMAT("%*shas_bandwidth: %u\n", ps2, "", + path->has_bandwidth); + if (path->has_bandwidth) { + PATHD_FORMAT("%*senforce_bandwidth: %u\n", ps2, "", + path->enforce_bandwidth); + PATHD_FORMAT("%*sbandwidth: %f\n", ps2, "", + path->bandwidth); + } + PATHD_FORMAT("%*shas_pcc_objfun: %u\n", ps2, "", + path->has_pcc_objfun); + if (path->has_pcc_objfun) { + PATHD_FORMAT("%*senforce_pcc_objfun: %d\n", ps2, "", + path->enforce_pcc_objfun); + PATHD_FORMAT("%*spcc_objfun: %s (%u)\n", ps2, "", + objfun_type_name(path->pcc_objfun), + path->pcc_objfun); + } + PATHD_FORMAT("%*shas_pce_objfun: %u\n", ps2, "", + path->has_pce_objfun); + if (path->has_pce_objfun) + PATHD_FORMAT("%*spce_objfun: %s (%u)\n", ps2, "", + objfun_type_name(path->pce_objfun), + path->pce_objfun); + PATHD_FORMAT("%*shas_affinity_filters: %u\n", ps2, "", + path->has_affinity_filters); + if (path->has_affinity_filters) { + PATHD_FORMAT("%*sexclude_any: 0x%08x\n", ps2, "", + path->affinity_filters + [AFFINITY_FILTER_EXCLUDE_ANY - 1]); + PATHD_FORMAT("%*sinclude_any: 0x%08x\n", ps2, "", + path->affinity_filters + [AFFINITY_FILTER_INCLUDE_ANY - 1]); + PATHD_FORMAT("%*sinclude_all: 0x%08x\n", ps2, "", + path->affinity_filters + [AFFINITY_FILTER_INCLUDE_ALL - 1]); + } + + if (path->first_hop == NULL) { + PATHD_FORMAT("%*shops: []\n", ps2, ""); + } else { + PATHD_FORMAT("%*shops: \n", ps2, ""); + for (struct path_hop *hop = path->first_hop; + hop != NULL; hop = hop->next) { + PATHD_FORMAT("%*s- ", ps3 - 2, ""); + _format_path_hop(ps3, hop); + } + } + if (path->first_metric == NULL) { + PATHD_FORMAT("%*smetrics: []\n", ps2, ""); + } else { + PATHD_FORMAT("%*smetrics: \n", ps2, ""); + for (struct path_metric *metric = path->first_metric; + NULL != metric; metric = metric->next) { + PATHD_FORMAT("%*s- ", ps3 - 2, ""); + _format_path_metric(ps3, metric); + } + } + } +} + +void _format_path_metric(int ps, struct path_metric *metric) +{ + PATHD_FORMAT("type: %s (%u)\n", pcep_metric_type_name(metric->type), + metric->type); + PATHD_FORMAT("%*senforce: %u\n", ps, "", metric->enforce); + PATHD_FORMAT("%*sis_bound: %u\n", ps, "", metric->is_bound); + PATHD_FORMAT("%*sis_computed: %u\n", ps, "", metric->is_computed); + PATHD_FORMAT("%*svalue: %f\n", ps, "", metric->value); +} + +void _format_path_hop(int ps, struct path_hop *hop) +{ + PATHD_FORMAT("is_loose: %u\n", hop->is_loose); + PATHD_FORMAT("%*shas_sid: %u\n", ps, "", hop->has_sid); + + if (hop->has_sid) { + PATHD_FORMAT("%*sis_mpls: %u\n", ps, "", hop->is_mpls); + if (hop->is_mpls) { + PATHD_FORMAT("%*shas_attribs: %u\n", ps, "", + hop->has_attribs); + PATHD_FORMAT("%*slabel: %u\n", ps, "", + hop->sid.mpls.label); + if (hop->has_attribs) { + PATHD_FORMAT("%*straffic_class: %u\n", ps, "", + hop->sid.mpls.traffic_class); + PATHD_FORMAT("%*sis_bottom: %u\n", ps, "", + hop->sid.mpls.is_bottom); + PATHD_FORMAT("%*sttl: %u\n", ps, "", + hop->sid.mpls.ttl); + } + } else { + PATHD_FORMAT("%*sSID: %u\n", ps, "", hop->sid.value); + } + } + + PATHD_FORMAT("%*shas_nai: %u\n", ps, "", hop->has_nai); + if (hop->has_nai) { + PATHD_FORMAT("%*snai_type: %s (%u)\n", ps, "", + pcep_nai_type_name(hop->nai.type), hop->nai.type); + switch (hop->nai.type) { + case PCEP_SR_SUBOBJ_NAI_IPV4_NODE: + PATHD_FORMAT("%*sNAI: %pI4\n", ps, "", + &hop->nai.local_addr.ipaddr_v4); + break; + case PCEP_SR_SUBOBJ_NAI_IPV6_NODE: + PATHD_FORMAT("%*sNAI: %pI6\n", ps, "", + &hop->nai.local_addr.ipaddr_v6); + break; + case PCEP_SR_SUBOBJ_NAI_IPV4_ADJACENCY: + PATHD_FORMAT("%*sNAI: %pI4/%pI4\n", ps, "", + &hop->nai.local_addr.ipaddr_v4, + &hop->nai.remote_addr.ipaddr_v4); + break; + case PCEP_SR_SUBOBJ_NAI_IPV6_ADJACENCY: + PATHD_FORMAT("%*sNAI: %pI6/%pI6\n", ps, "", + &hop->nai.local_addr.ipaddr_v6, + &hop->nai.remote_addr.ipaddr_v6); + break; + case PCEP_SR_SUBOBJ_NAI_UNNUMBERED_IPV4_ADJACENCY: + PATHD_FORMAT("%*sNAI: %pI4(%u)/%pI4(%u)\n", ps, "", + &hop->nai.local_addr.ipaddr_v6, + hop->nai.local_iface, + &hop->nai.remote_addr.ipaddr_v6, + hop->nai.remote_iface); + break; + default: + PATHD_FORMAT("%*sNAI: UNSUPPORTED\n", ps, ""); + break; + } + } +} + +void _format_pcep_event(int ps, pcep_event *event) +{ + if (event == NULL) { + PATHD_FORMAT("NULL\n"); + } else { + int ps2 = ps + DEBUG_IDENT_SIZE; + PATHD_FORMAT("\n"); + PATHD_FORMAT("%*sevent_type: %s\n", ps2, "", + pcep_event_type_name(event->event_type)); + PATHD_FORMAT("%*sevent_time: %s", ps2, "", + ctime(&event->event_time)); + if (event->session == NULL) { + PATHD_FORMAT("%*ssession: NULL\n", ps2, ""); + } else { + PATHD_FORMAT("%*ssession: <PCC SESSION %p>\n", ps2, "", + event->session); + } + PATHD_FORMAT("%*smessage: ", ps2, ""); + _format_pcep_message(ps2, event->message); + } +} + +void _format_pcep_message(int ps, struct pcep_message *msg) +{ + if (msg == NULL) { + PATHD_FORMAT("NULL\n"); + } else { + int ps2 = ps + DEBUG_IDENT_SIZE; + PATHD_FORMAT("\n"); + PATHD_FORMAT("%*spcep_version: %u\n", ps2, "", + msg->msg_header->pcep_version); + PATHD_FORMAT("%*stype: %s (%u)\n", ps2, "", + pcep_message_type_name(msg->msg_header->type), + msg->msg_header->type); + PATHD_FORMAT("%*sobjects: ", ps2, ""); + _format_pcep_objects(ps2, msg->obj_list); + } +} + +void _format_pcep_objects(int ps, double_linked_list *objs) +{ + if (objs == NULL) { + PATHD_FORMAT("NULL\n"); + } else { + double_linked_list_node *node; + int ps2 = ps + DEBUG_IDENT_SIZE; + int i; + + if (objs->num_entries == 0) { + PATHD_FORMAT("[]\n"); + return; + } + + PATHD_FORMAT("\n"); + for (node = objs->head, i = 0; node != NULL; + node = node->next_node, i++) { + struct pcep_object_header *obj = + (struct pcep_object_header *)node->data; + PATHD_FORMAT("%*s- ", ps2 - 2, ""); + _format_pcep_object(ps2, obj); + } + } +} + +void _format_pcep_object(int ps, struct pcep_object_header *obj) +{ + if (obj == NULL) { + PATHD_FORMAT("NULL\n"); + } else { + PATHD_FORMAT("object_class: %s (%u)\n", + pcep_object_class_name(obj->object_class), + obj->object_class); + PATHD_FORMAT("%*sobject_type: %s (%u)\n", ps, "", + pcep_object_type_name(obj->object_class, + obj->object_type), + obj->object_type); + PATHD_FORMAT("%*sflag_p: %u\n", ps, "", obj->flag_p); + PATHD_FORMAT("%*sflag_i: %u\n", ps, "", obj->flag_i); + _format_pcep_object_details(ps, obj); + _format_pcep_object_tlvs(ps, obj); + } +} + +void _format_pcep_object_details(int ps, struct pcep_object_header *obj) +{ + switch (TUP(obj->object_class, obj->object_type)) { + case TUP(PCEP_OBJ_CLASS_ERROR, PCEP_OBJ_TYPE_ERROR): + _format_pcep_object_error(ps, (struct pcep_object_error *)obj); + break; + case TUP(PCEP_OBJ_CLASS_OPEN, PCEP_OBJ_TYPE_OPEN): + _format_pcep_object_open(ps, (struct pcep_object_open *)obj); + break; + case TUP(PCEP_OBJ_CLASS_RP, PCEP_OBJ_TYPE_RP): + _format_pcep_object_rp(ps, (struct pcep_object_rp *)obj); + break; + case TUP(PCEP_OBJ_CLASS_SRP, PCEP_OBJ_TYPE_SRP): + _format_pcep_object_srp(ps, (struct pcep_object_srp *)obj); + break; + case TUP(PCEP_OBJ_CLASS_LSP, PCEP_OBJ_TYPE_LSP): + _format_pcep_object_lsp(ps, (struct pcep_object_lsp *)obj); + break; + case TUP(PCEP_OBJ_CLASS_LSPA, PCEP_OBJ_TYPE_LSPA): + _format_pcep_object_lspa(ps, (struct pcep_object_lspa *)obj); + break; + case TUP(PCEP_OBJ_CLASS_ENDPOINTS, PCEP_OBJ_TYPE_ENDPOINT_IPV4): + _format_pcep_object_ipv4_endpoint( + ps, (struct pcep_object_endpoints_ipv4 *)obj); + break; + case TUP(PCEP_OBJ_CLASS_ERO, PCEP_OBJ_TYPE_ERO): + _format_pcep_object_ro(ps, (struct pcep_object_ro *)obj); + break; + case TUP(PCEP_OBJ_CLASS_METRIC, PCEP_OBJ_TYPE_METRIC): + _format_pcep_object_metric(ps, + (struct pcep_object_metric *)obj); + break; + case TUP(PCEP_OBJ_CLASS_BANDWIDTH, PCEP_OBJ_TYPE_BANDWIDTH_REQ): + case TUP(PCEP_OBJ_CLASS_BANDWIDTH, PCEP_OBJ_TYPE_BANDWIDTH_CISCO): + _format_pcep_object_bandwidth( + ps, (struct pcep_object_bandwidth *)obj); + break; + case TUP(PCEP_OBJ_CLASS_NOPATH, PCEP_OBJ_TYPE_NOPATH): + _format_pcep_object_nopath(ps, + (struct pcep_object_nopath *)obj); + break; + case TUP(PCEP_OBJ_CLASS_OF, PCEP_OBJ_TYPE_OF): + _format_pcep_object_objfun( + ps, (struct pcep_object_objective_function *)obj); + break; + default: + PATHD_FORMAT("%*s...\n", ps, ""); + break; + } +} + +void _format_pcep_object_error(int ps, struct pcep_object_error *obj) +{ + PATHD_FORMAT("%*serror_type: %s (%u)\n", ps, "", + pcep_error_type_name(obj->error_type), obj->error_type); + PATHD_FORMAT("%*serror_value: %s (%u)\n", ps, "", + pcep_error_value_name(obj->error_type, obj->error_value), + obj->error_value); +} + + +void _format_pcep_object_open(int ps, struct pcep_object_open *obj) +{ + PATHD_FORMAT("%*sopen_version: %u\n", ps, "", obj->open_version); + PATHD_FORMAT("%*sopen_keepalive: %u\n", ps, "", obj->open_keepalive); + PATHD_FORMAT("%*sopen_deadtimer: %u\n", ps, "", obj->open_deadtimer); + PATHD_FORMAT("%*sopen_sid: %u\n", ps, "", obj->open_sid); +} + +void _format_pcep_object_rp(int ps, struct pcep_object_rp *obj) +{ + PATHD_FORMAT("%*spriority: %u\n", ps, "", obj->priority); + PATHD_FORMAT("%*sflag_reoptimization: %u\n", ps, "", + obj->flag_reoptimization); + PATHD_FORMAT("%*sflag_bidirectional: %u\n", ps, "", + obj->flag_bidirectional); + PATHD_FORMAT("%*sflag_strict: %u\n", ps, "", obj->flag_strict); + PATHD_FORMAT("%*sflag_of: %u\n", ps, "", obj->flag_of); + PATHD_FORMAT("%*srequest_id: %u\n", ps, "", obj->request_id); +} + + +void _format_pcep_object_srp(int ps, struct pcep_object_srp *obj) +{ + PATHD_FORMAT("%*sflag_lsp_remove: %u\n", ps, "", obj->flag_lsp_remove); + PATHD_FORMAT("%*ssrp_id_number: %u\n", ps, "", obj->srp_id_number); +} + +void _format_pcep_object_lsp(int ps, struct pcep_object_lsp *obj) +{ + PATHD_FORMAT("%*splsp_id: %u\n", ps, "", obj->plsp_id); + PATHD_FORMAT("%*sstatus: %s\n", ps, "", + pcep_lsp_status_name(obj->operational_status)); + PATHD_FORMAT("%*sflag_d: %u\n", ps, "", obj->flag_d); + PATHD_FORMAT("%*sflag_s: %u\n", ps, "", obj->flag_s); + PATHD_FORMAT("%*sflag_r: %u\n", ps, "", obj->flag_r); + PATHD_FORMAT("%*sflag_a: %u\n", ps, "", obj->flag_a); + PATHD_FORMAT("%*sflag_c: %u\n", ps, "", obj->flag_c); +} + +void _format_pcep_object_lspa(int ps, struct pcep_object_lspa *obj) +{ + PATHD_FORMAT("%*slspa_exclude_any: 0x%08x\n", ps, "", + obj->lspa_exclude_any); + PATHD_FORMAT("%*slspa_include_any: 0x%08x\n", ps, "", + obj->lspa_include_any); + PATHD_FORMAT("%*slspa_include_all: 0x%08x\n", ps, "", + obj->lspa_include_all); + PATHD_FORMAT("%*ssetup_priority: %u\n", ps, "", obj->setup_priority); + PATHD_FORMAT("%*sholding_priority: %u\n", ps, "", + obj->holding_priority); + PATHD_FORMAT("%*sflag_local_protection: %u\n", ps, "", + obj->flag_local_protection); +} + +void _format_pcep_object_ipv4_endpoint(int ps, + struct pcep_object_endpoints_ipv4 *obj) +{ + PATHD_FORMAT("%*ssrc_ipv4: %pI4\n", ps, "", &obj->src_ipv4); + PATHD_FORMAT("%*sdst_ipv4: %pI4\n", ps, "", &obj->dst_ipv4); +} + +void _format_pcep_object_metric(int ps, struct pcep_object_metric *obj) +{ + PATHD_FORMAT("%*stype: %s (%u)\n", ps, "", + pcep_metric_type_name(obj->type), obj->type); + PATHD_FORMAT("%*sflag_b: %u\n", ps, "", obj->flag_b); + PATHD_FORMAT("%*sflag_c: %u\n", ps, "", obj->flag_c); + PATHD_FORMAT("%*svalue: %f\n", ps, "", obj->value); +} + +void _format_pcep_object_bandwidth(int ps, struct pcep_object_bandwidth *obj) +{ + PATHD_FORMAT("%*sbandwidth: %f\n", ps, "", obj->bandwidth); +} + +void _format_pcep_object_nopath(int ps, struct pcep_object_nopath *obj) +{ + PATHD_FORMAT("%*sni: %u\n", ps, "", obj->ni); + PATHD_FORMAT("%*sflag_c: %u\n", ps, "", obj->flag_c); + PATHD_FORMAT("%*serr_code: %s (%u)\n", ps, "", + pcep_nopath_tlv_err_code_name(obj->err_code), + obj->err_code); +} + +void _format_pcep_object_objfun(int ps, + struct pcep_object_objective_function *obj) +{ + PATHD_FORMAT("%*sof_code: %s (%u)\n", ps, "", + objfun_type_name(obj->of_code), obj->of_code); +} + +void _format_pcep_object_ro(int ps, struct pcep_object_ro *obj) +{ + double_linked_list *obj_list = obj->sub_objects; + double_linked_list_node *node; + struct pcep_object_ro_subobj *sub_obj; + + int ps2 = ps + DEBUG_IDENT_SIZE; + int i; + + if ((obj_list == NULL) || (obj_list->num_entries == 0)) { + PATHD_FORMAT("%*ssub_objects: []\n", ps, ""); + return; + } + + PATHD_FORMAT("%*ssub_objects:\n", ps, ""); + + for (node = obj_list->head, i = 0; node != NULL; + node = node->next_node, i++) { + sub_obj = (struct pcep_object_ro_subobj *)node->data; + PATHD_FORMAT("%*s- flag_subobj_loose_hop: %u\n", ps2 - 2, "", + sub_obj->flag_subobj_loose_hop); + PATHD_FORMAT("%*sro_subobj_type: %s (%u)\n", ps2, "", + pcep_ro_type_name(sub_obj->ro_subobj_type), + sub_obj->ro_subobj_type); + _format_pcep_object_ro_details(ps2, sub_obj); + } +} + +void _format_pcep_object_ro_details(int ps, struct pcep_object_ro_subobj *ro) +{ + switch (ro->ro_subobj_type) { + case RO_SUBOBJ_TYPE_IPV4: + _format_pcep_object_ro_ipv4(ps, + (struct pcep_ro_subobj_ipv4 *)ro); + break; + case RO_SUBOBJ_TYPE_SR: + _format_pcep_object_ro_sr(ps, (struct pcep_ro_subobj_sr *)ro); + break; + default: + PATHD_FORMAT("%*s...\n", ps, ""); + break; + } +} + +void _format_pcep_object_ro_ipv4(int ps, struct pcep_ro_subobj_ipv4 *obj) +{ + PATHD_FORMAT("%*sip_addr: %pI4\n", ps, "", &obj->ip_addr); + PATHD_FORMAT("%*sprefix_length: %u\n", ps, "", obj->prefix_length); + PATHD_FORMAT("%*sflag_local_protection: %u\n", ps, "", + obj->flag_local_protection); +} + +void _format_pcep_object_ro_sr(int ps, struct pcep_ro_subobj_sr *obj) +{ + PATHD_FORMAT("%*snai_type = %s (%u)\n", ps, "", + pcep_nai_type_name(obj->nai_type), obj->nai_type); + PATHD_FORMAT("%*sflag_f: %u\n", ps, "", obj->flag_f); + PATHD_FORMAT("%*sflag_s: %u\n", ps, "", obj->flag_s); + PATHD_FORMAT("%*sflag_c: %u\n", ps, "", obj->flag_c); + PATHD_FORMAT("%*sflag_m: %u\n", ps, "", obj->flag_m); + + if (!obj->flag_s) { + PATHD_FORMAT("%*sSID: %u\n", ps, "", obj->sid); + if (obj->flag_m) { + PATHD_FORMAT("%*slabel: %u\n", ps, "", + GET_SR_ERO_SID_LABEL(obj->sid)); + if (obj->flag_c) { + PATHD_FORMAT("%*sTC: %u\n", ps, "", + GET_SR_ERO_SID_TC(obj->sid)); + PATHD_FORMAT("%*sS: %u\n", ps, "", + GET_SR_ERO_SID_S(obj->sid)); + PATHD_FORMAT("%*sTTL: %u\n", ps, "", + GET_SR_ERO_SID_TTL(obj->sid)); + } + } + } + + if (!obj->flag_f) { + struct in_addr *laddr4, *raddr4; + struct in6_addr *laddr6, *raddr6; + uint32_t *liface, *riface; + assert(obj->nai_list != NULL); + double_linked_list_node *n = obj->nai_list->head; + assert(n != NULL); + assert(n->data != NULL); + switch (obj->nai_type) { + case PCEP_SR_SUBOBJ_NAI_IPV4_NODE: + laddr4 = (struct in_addr *)n->data; + PATHD_FORMAT("%*sNAI: %pI4\n", ps, "", laddr4); + break; + case PCEP_SR_SUBOBJ_NAI_IPV6_NODE: + laddr6 = (struct in6_addr *)n->data; + PATHD_FORMAT("%*sNAI: %pI6\n", ps, "", laddr6); + break; + case PCEP_SR_SUBOBJ_NAI_IPV4_ADJACENCY: + assert(n->next_node != NULL); + assert(n->next_node->data != NULL); + laddr4 = (struct in_addr *)n->data; + raddr4 = (struct in_addr *)n->next_node->data; + PATHD_FORMAT("%*sNAI: %pI4/%pI4\n", ps, "", laddr4, + raddr4); + break; + case PCEP_SR_SUBOBJ_NAI_IPV6_ADJACENCY: + assert(n->next_node != NULL); + assert(n->next_node->data != NULL); + laddr6 = (struct in6_addr *)n->data; + raddr6 = (struct in6_addr *)n->next_node->data; + PATHD_FORMAT("%*sNAI: %pI6/%pI6\n", ps, "", laddr6, + raddr6); + break; + case PCEP_SR_SUBOBJ_NAI_UNNUMBERED_IPV4_ADJACENCY: + laddr4 = (struct in_addr *)n->data; + n = n->next_node; + assert(n != NULL); + assert(n->data != NULL); + liface = (uint32_t *)n->data; + n = n->next_node; + assert(n != NULL); + assert(n->data != NULL); + raddr4 = (struct in_addr *)n->data; + assert(n != NULL); + assert(n->data != NULL); + riface = (uint32_t *)n->data; + PATHD_FORMAT("%*sNAI: %pI4(%u)/%pI4(%u)\n", ps, "", + laddr4, *liface, raddr4, *riface); + break; + default: + PATHD_FORMAT("%*sNAI: UNSUPPORTED\n", ps, ""); + break; + } + } +} + +void _format_pcep_object_tlvs(int ps, struct pcep_object_header *obj) +{ + double_linked_list *tlv_list = obj->tlv_list; + struct pcep_object_tlv_header *tlv; + double_linked_list_node *node; + int ps2 = ps + DEBUG_IDENT_SIZE; + int i = 0; + + if (tlv_list == NULL) + return; + if (tlv_list->num_entries == 0) { + PATHD_FORMAT("%*stlvs: []\n", ps, ""); + return; + } + + PATHD_FORMAT("%*stlvs:\n", ps, ""); + + for (node = tlv_list->head, i = 0; node != NULL; + node = node->next_node, i++) { + tlv = (struct pcep_object_tlv_header *)node->data; + PATHD_FORMAT("%*s- ", ps2 - 2, ""); + _format_pcep_object_tlv(ps2, tlv); + } +} + +void _format_pcep_object_tlv(int ps, struct pcep_object_tlv_header *tlv_header) +{ + PATHD_FORMAT("type: %s (%u)\n", pcep_tlv_type_name(tlv_header->type), + tlv_header->type); + _format_pcep_object_tlv_details(ps, tlv_header); +} + +void _format_pcep_object_tlv_details(int ps, + struct pcep_object_tlv_header *tlv_header) +{ + switch (tlv_header->type) { + case PCEP_OBJ_TLV_TYPE_SYMBOLIC_PATH_NAME: + _format_pcep_object_tlv_symbolic_path_name( + ps, (struct pcep_object_tlv_symbolic_path_name *) + tlv_header); + break; + case PCEP_OBJ_TLV_TYPE_STATEFUL_PCE_CAPABILITY: + _format_pcep_object_tlv_stateful_pce_capability( + ps, (struct pcep_object_tlv_stateful_pce_capability *) + tlv_header); + break; + case PCEP_OBJ_TLV_TYPE_SR_PCE_CAPABILITY: + _format_pcep_object_tlv_sr_pce_capability( + ps, + (struct pcep_object_tlv_sr_pce_capability *)tlv_header); + break; + case PCEP_OBJ_TLV_TYPE_PATH_SETUP_TYPE: + _format_pcep_object_tlv_path_setup_type( + ps, + (struct pcep_object_tlv_path_setup_type *)tlv_header); + break; + default: + PATHD_FORMAT("%*s...\n", ps, ""); + break; + } +} + +void _format_pcep_object_tlv_symbolic_path_name( + int ps, struct pcep_object_tlv_symbolic_path_name *tlv) +{ + PATHD_FORMAT("%*ssymbolic_path_name: %.*s\n", ps, "", + tlv->symbolic_path_name_length, tlv->symbolic_path_name); +} + +void _format_pcep_object_tlv_stateful_pce_capability( + int ps, struct pcep_object_tlv_stateful_pce_capability *tlv) +{ + PATHD_FORMAT("%*sflag_u_lsp_update_capability: %u\n", ps, "", + tlv->flag_u_lsp_update_capability); + PATHD_FORMAT("%*sflag_s_include_db_version: %u\n", ps, "", + tlv->flag_s_include_db_version); + PATHD_FORMAT("%*sflag_i_lsp_instantiation_capability: %u\n", ps, "", + tlv->flag_i_lsp_instantiation_capability); + PATHD_FORMAT("%*sflag_t_triggered_resync: %u\n", ps, "", + tlv->flag_t_triggered_resync); + PATHD_FORMAT("%*sflag_d_delta_lsp_sync: %u\n", ps, "", + tlv->flag_d_delta_lsp_sync); + PATHD_FORMAT("%*sflag_f_triggered_initial_sync: %u\n", ps, "", + tlv->flag_f_triggered_initial_sync); +} + +void _format_pcep_object_tlv_sr_pce_capability( + int ps, struct pcep_object_tlv_sr_pce_capability *tlv) +{ + + PATHD_FORMAT("%*sflag_n: %u\n", ps, "", tlv->flag_n); + PATHD_FORMAT("%*sflag_x: %u\n", ps, "", tlv->flag_x); + PATHD_FORMAT("%*smax_sid_depth: %u\n", ps, "", tlv->max_sid_depth); +} + +void _format_pcep_object_tlv_path_setup_type( + int ps, struct pcep_object_tlv_path_setup_type *tlv) +{ + PATHD_FORMAT("%*spath_setup_type: %u\n", ps, "", tlv->path_setup_type); +} diff --git a/pathd/path_pcep_debug.h b/pathd/path_pcep_debug.h new file mode 100644 index 0000000000..68b29ab657 --- /dev/null +++ b/pathd/path_pcep_debug.h @@ -0,0 +1,56 @@ +/* + * Copyright (C) 2020 NetDEF, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; see the file COPYING; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef _PATH_PCEP_DEBUG_H_ +#define _PATH_PCEP_DEBUG_H_ + +#include "pathd/path_debug.h" +#include <pcep_pcc_api.h> +#include <pcep-objects.h> +#include "pathd/path_pcep.h" +#include "pathd/path_pcep_controller.h" +#include "pathd/path_pcep_pcc.h" +#include "pathd/path_pcep_lib.h" + +const char *pcc_status_name(enum pcc_status status); + +const char *pcep_error_type_name(enum pcep_error_type error_type); +const char *pcep_error_value_name(enum pcep_error_type error_type, + enum pcep_error_value error_value); +const char *pcep_event_type_name(pcep_event_type event_type); +const char *pcep_message_type_name(enum pcep_message_types pcep_message_type); +const char *pcep_object_class_name(enum pcep_object_classes obj_class); +const char *pcep_object_type_name(enum pcep_object_classes obj_class, + enum pcep_object_types obj_type); +const char *pcep_lsp_status_name(enum pcep_lsp_operational_status status); +const char *pcep_tlv_type_name(enum pcep_object_tlv_types tlv_type); +const char *pcep_ro_type_name(enum pcep_ro_subobj_types ro_type); +const char *pcep_nai_type_name(enum pcep_sr_subobj_nai nai_type); +const char *pcep_metric_type_name(enum pcep_metric_types type); +const char *pcep_nopath_tlv_err_code_name(enum pcep_nopath_tlv_err_codes code); + +const char *format_objfun_set(uint32_t flags); +const char *format_pcc_opts(struct pcc_opts *ops); +const char *format_pcc_state(struct pcc_state *state); +const char *format_ctrl_state(struct ctrl_state *state); +const char *format_path(struct path *path); +const char *format_pcep_event(pcep_event *event); +const char *format_pcep_message(struct pcep_message *msg); +const char *format_yang_dnode(struct lyd_node *dnode); + +#endif // _PATH_PCEP_DEBUG_H_ diff --git a/pathd/path_pcep_lib.c b/pathd/path_pcep_lib.c new file mode 100644 index 0000000000..bb6bfb1336 --- /dev/null +++ b/pathd/path_pcep_lib.c @@ -0,0 +1,1146 @@ +/* + * Copyright (C) 2020 NetDEF, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; see the file COPYING; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include <debug.h> +#include <pcep_utils_counters.h> +#include <pcep_timers.h> +#include "pathd/path_errors.h" +#include "pathd/path_memory.h" +#include "pathd/path_pcep.h" +#include "pathd/path_pcep_lib.h" +#include "pathd/path_pcep_debug.h" +#include "pathd/path_pcep_memory.h" + +#define CLASS_TYPE(CLASS, TYPE) (((CLASS) << 16) | (TYPE)) +#define DEFAULT_LSAP_SETUP_PRIO 4 +#define DEFAULT_LSAP_HOLDING_PRIO 4 +#define DEFAULT_LSAP_LOCAL_PRETECTION false + +/* pceplib logging callback */ +static int pceplib_logging_cb(int level, const char *fmt, va_list args); + +/* Socket callbacks */ +static int pcep_lib_pceplib_socket_read_cb(void *fpt, void **thread, int fd, + void *payload); +static int pcep_lib_pceplib_socket_write_cb(void *fpt, void **thread, int fd, + void *payload); +static int pcep_lib_socket_read_ready(struct thread *thread); +static int pcep_lib_socket_write_ready(struct thread *thread); + +/* pceplib pcep_event callbacks */ +static void pcep_lib_pceplib_event_cb(void *fpt, pcep_event *event); + +/* pceplib pthread creation callback */ +static int pcep_lib_pthread_create_cb(pthread_t *pthread_id, + const pthread_attr_t *attr, + void *(*start_routine)(void *), + void *data, const char *thread_name); +void *pcep_lib_pthread_start_passthrough(void *data); +int pcep_lib_pthread_stop_cb(struct frr_pthread *, void **); + +/* Internal functions */ +static double_linked_list *pcep_lib_format_path(struct pcep_caps *caps, + struct path *path); +static void pcep_lib_format_constraints(struct path *path, + double_linked_list *objs); +static void pcep_lib_parse_open(struct pcep_caps *caps, + struct pcep_object_open *open); +static void +pcep_lib_parse_open_pce_capability(struct pcep_caps *caps, + struct pcep_object_tlv_header *tlv_header); +static void +pcep_lib_parse_open_objfun_list(struct pcep_caps *caps, + struct pcep_object_tlv_header *tlv_header); +static void pcep_lib_parse_rp(struct path *path, struct pcep_object_rp *rp); +static void pcep_lib_parse_srp(struct path *path, struct pcep_object_srp *srp); +static void pcep_lib_parse_lsp(struct path *path, struct pcep_object_lsp *lsp); +static void pcep_lib_parse_lspa(struct path *path, + struct pcep_object_lspa *lspa); +static void pcep_lib_parse_metric(struct path *path, + struct pcep_object_metric *obj); +static void pcep_lib_parse_ero(struct path *path, struct pcep_object_ro *ero); +static struct path_hop *pcep_lib_parse_ero_sr(struct path_hop *next, + struct pcep_ro_subobj_sr *sr); +static struct counters_group *copy_counter_group(struct counters_group *from); +static struct counters_subgroup * +copy_counter_subgroup(struct counters_subgroup *from); +static struct counter *copy_counter(struct counter *from); +static void free_counter_group(struct counters_group *group); +static void free_counter_subgroup(struct counters_subgroup *subgroup); +static void free_counter(struct counter *counter); + +struct pcep_lib_pthread_passthrough_data { + void *(*start_routine)(void *data); + void *data; +}; + +/* ------------ API Functions ------------ */ + +int pcep_lib_initialize(struct frr_pthread *fpt) +{ + PCEP_DEBUG("Initializing pceplib"); + + /* Register pceplib logging callback */ + register_logger(pceplib_logging_cb); + + /* Its ok that this object goes out of scope, as it + * wont be stored, and its values will be copied */ + struct pceplib_infra_config infra = { + /* Memory infrastructure */ + .pceplib_infra_mt = MTYPE_PCEPLIB_INFRA, + .pceplib_messages_mt = MTYPE_PCEPLIB_MESSAGES, + .malloc_func = (pceplib_malloc_func)qmalloc, + .calloc_func = (pceplib_calloc_func)qcalloc, + .realloc_func = (pceplib_realloc_func)qrealloc, + .strdup_func = (pceplib_strdup_func)qstrdup, + .free_func = (pceplib_free_func)qfree, + /* Timers infrastructure */ + .external_infra_data = fpt, + .socket_read_func = pcep_lib_pceplib_socket_read_cb, + .socket_write_func = pcep_lib_pceplib_socket_write_cb, + /* PCEP events */ + .pcep_event_func = pcep_lib_pceplib_event_cb, + /* PCEPlib pthread creation callback */ + .pthread_create_func = pcep_lib_pthread_create_cb}; + if (!initialize_pcc_infra(&infra)) { + flog_err(EC_PATH_PCEP_PCC_INIT, "failed to initialize pceplib"); + return 1; + } + + return 0; +} + +void pcep_lib_finalize(void) +{ + PCEP_DEBUG("Finalizing pceplib"); + if (!destroy_pcc()) { + flog_err(EC_PATH_PCEP_PCC_FINI, "failed to finalize pceplib"); + } +} + + +pcep_session * +pcep_lib_connect(struct ipaddr *src_addr, int src_port, struct ipaddr *dst_addr, + int dst_port, short msd, + const struct pcep_config_group_opts *pcep_options) +{ + pcep_configuration *config; + pcep_session *sess; + + config = create_default_pcep_configuration(); + config->dst_pcep_port = dst_port; + config->src_pcep_port = src_port; + if (IS_IPADDR_V6(src_addr)) { + config->is_src_ipv6 = true; + memcpy(&config->src_ip.src_ipv6, &src_addr->ipaddr_v6, + sizeof(struct in6_addr)); + } else { + config->is_src_ipv6 = false; + config->src_ip.src_ipv4 = src_addr->ipaddr_v4; + } + + config->support_stateful_pce_lsp_update = true; + config->support_pce_lsp_instantiation = false; + config->support_include_db_version = false; + config->support_lsp_triggered_resync = false; + config->support_lsp_delta_sync = false; + config->support_pce_triggered_initial_sync = false; + config->support_sr_te_pst = true; + config->pcc_can_resolve_nai_to_sid = false; + + config->max_sid_depth = msd; + config->pcep_msg_versioning->draft_ietf_pce_segment_routing_07 = + pcep_options->draft07; + config->keep_alive_seconds = pcep_options->keep_alive_seconds; + config->min_keep_alive_seconds = pcep_options->min_keep_alive_seconds; + config->max_keep_alive_seconds = pcep_options->max_keep_alive_seconds; + config->dead_timer_seconds = pcep_options->dead_timer_seconds; + config->min_dead_timer_seconds = pcep_options->min_dead_timer_seconds; + config->max_dead_timer_seconds = pcep_options->max_dead_timer_seconds; + config->request_time_seconds = pcep_options->pcep_request_time_seconds; + /* TODO when available in the pceplib, set it here + pcep_options->state_timeout_inteval_seconds;*/ + + if (pcep_options->tcp_md5_auth != NULL + && pcep_options->tcp_md5_auth[0] != '\0') { + config->is_tcp_auth_md5 = true; + strncpy(config->tcp_authentication_str, + pcep_options->tcp_md5_auth, TCP_MD5SIG_MAXKEYLEN); + } else { + config->is_tcp_auth_md5 = false; + } + + if (IS_IPADDR_V6(dst_addr)) { + sess = connect_pce_ipv6(config, &dst_addr->ipaddr_v6); + } else { + sess = connect_pce(config, &dst_addr->ipaddr_v4); + } + destroy_pcep_configuration(config); + return sess; +} + +void pcep_lib_disconnect(pcep_session *sess) +{ + assert(sess != NULL); + disconnect_pce(sess); +} + +/* Callback passed to pceplib to write to a socket. + * When the socket is ready to be written to, + * pcep_lib_socket_write_ready() will be called */ + +int pcep_lib_pceplib_socket_write_cb(void *fpt, void **thread, int fd, + void *payload) +{ + return pcep_thread_socket_write(fpt, thread, fd, payload, + pcep_lib_socket_write_ready); +} + +/* Callback passed to pceplib to read from a socket. + * When the socket is ready to be read from, + * pcep_lib_socket_read_ready() will be called */ + +int pcep_lib_pceplib_socket_read_cb(void *fpt, void **thread, int fd, + void *payload) +{ + return pcep_thread_socket_read(fpt, thread, fd, payload, + pcep_lib_socket_read_ready); +} + +/* Callbacks called by path_pcep_controller when a socket is ready to read/write + */ + +int pcep_lib_socket_write_ready(struct thread *thread) +{ + struct pcep_ctrl_socket_data *data = THREAD_ARG(thread); + assert(data != NULL); + + int retval = pceplib_external_socket_write(data->fd, data->payload); + XFREE(MTYPE_PCEP, data); + + return retval; +} + +int pcep_lib_socket_read_ready(struct thread *thread) +{ + struct pcep_ctrl_socket_data *data = THREAD_ARG(thread); + assert(data != NULL); + + int retval = pceplib_external_socket_read(data->fd, data->payload); + XFREE(MTYPE_PCEP, data); + + return retval; +} + +/* Callback passed to pceplib when a pcep_event is ready */ +void pcep_lib_pceplib_event_cb(void *fpt, pcep_event *event) +{ + pcep_thread_send_ctrl_event(fpt, event, pcep_thread_pcep_event); +} + +/* Wrapper function around the actual pceplib thread start function */ +void *pcep_lib_pthread_start_passthrough(void *data) +{ + struct frr_pthread *fpt = data; + struct pcep_lib_pthread_passthrough_data *passthrough_data = fpt->data; + void *start_routine_data = passthrough_data->data; + void *(*start_routine)(void *) = passthrough_data->start_routine; + XFREE(MTYPE_PCEP, passthrough_data); + + if (start_routine != NULL) { + return start_routine(start_routine_data); + } + + return NULL; +} + +int pcep_lib_pthread_create_cb(pthread_t *thread_id, const pthread_attr_t *attr, + void *(*start_routine)(void *), void *data, + const char *thread_name) +{ + /* Since FRR calls the start_routine with a struct frr_pthread, + * we have to store the real data and callback in a passthrough + * and pass the actual data the start_routine needs */ + struct pcep_lib_pthread_passthrough_data *passthrough_data = XMALLOC( + MTYPE_PCEP, sizeof(struct pcep_lib_pthread_passthrough_data)); + passthrough_data->data = data; + passthrough_data->start_routine = start_routine; + + struct frr_pthread_attr fpt_attr = { + .start = pcep_lib_pthread_start_passthrough, + .stop = pcep_lib_pthread_stop_cb}; + struct frr_pthread *fpt = + frr_pthread_new(&fpt_attr, thread_name, "pcep_lib"); + if (fpt == NULL) { + return 1; + } + + fpt->data = passthrough_data; + int retval = frr_pthread_run(fpt, attr); + if (retval) { + return retval; + } + + *thread_id = fpt->thread; + + return 0; +} + +int pcep_lib_pthread_stop_cb(struct frr_pthread *fpt, void **res) +{ + pcep_lib_finalize(); + frr_pthread_destroy(fpt); + + return 0; +} + +struct pcep_message *pcep_lib_format_report(struct pcep_caps *caps, + struct path *path) +{ + double_linked_list *objs = pcep_lib_format_path(caps, path); + return pcep_msg_create_report(objs); +} + +static struct pcep_object_rp *create_rp(uint32_t reqid) +{ + double_linked_list *rp_tlvs; + struct pcep_object_tlv_path_setup_type *setup_type_tlv; + struct pcep_object_rp *rp; + + rp_tlvs = dll_initialize(); + setup_type_tlv = pcep_tlv_create_path_setup_type(SR_TE_PST); + dll_append(rp_tlvs, setup_type_tlv); + + rp = pcep_obj_create_rp(0, false, false, false, true, reqid, rp_tlvs); + + return rp; +} + +struct pcep_message *pcep_lib_format_request(struct pcep_caps *caps, + struct path *path) +{ + struct ipaddr *src = &path->pcc_addr; + struct ipaddr *dst = &path->nbkey.endpoint; + double_linked_list *objs; + struct pcep_object_rp *rp; + struct pcep_object_endpoints_ipv4 *endpoints_ipv4; + struct pcep_object_endpoints_ipv6 *endpoints_ipv6; + struct pcep_object_objective_function *of = NULL; + enum objfun_type objfun = OBJFUN_UNDEFINED; + + assert(src->ipa_type == dst->ipa_type); + + objs = dll_initialize(); + rp = create_rp(path->req_id); + rp->header.flag_p = true; + + pcep_lib_format_constraints(path, objs); + + /* Objective Function */ + if (path->has_pcc_objfun) { + objfun = path->pcc_objfun; + } + + if (objfun != OBJFUN_UNDEFINED) { + of = pcep_obj_create_objective_function(objfun, NULL); + assert(of != NULL); + of->header.flag_p = path->enforce_pcc_objfun; + dll_append(objs, of); + } + + if (IS_IPADDR_V6(src)) { + endpoints_ipv6 = pcep_obj_create_endpoint_ipv6(&src->ipaddr_v6, + &dst->ipaddr_v6); + endpoints_ipv6->header.flag_p = true; + return pcep_msg_create_request_ipv6(rp, endpoints_ipv6, objs); + } else { + endpoints_ipv4 = pcep_obj_create_endpoint_ipv4(&src->ipaddr_v4, + &dst->ipaddr_v4); + endpoints_ipv4->header.flag_p = true; + return pcep_msg_create_request(rp, endpoints_ipv4, objs); + } +} + +struct pcep_message *pcep_lib_format_error(int error_type, int error_value) +{ + return pcep_msg_create_error(error_type, error_value); +} + +struct pcep_message *pcep_lib_format_request_cancelled(uint32_t reqid) +{ + struct pcep_object_notify *notify; + double_linked_list *objs; + struct pcep_object_rp *rp; + + notify = pcep_obj_create_notify( + PCEP_NOTIFY_TYPE_PENDING_REQUEST_CANCELLED, + PCEP_NOTIFY_VALUE_PCC_CANCELLED_REQUEST); + objs = dll_initialize(); + rp = create_rp(reqid); + dll_append(objs, rp); + + return pcep_msg_create_notify(notify, objs); +} + +struct path *pcep_lib_parse_path(struct pcep_message *msg) +{ + struct path *path; + double_linked_list *objs = msg->obj_list; + double_linked_list_node *node; + + struct pcep_object_header *obj; + struct pcep_object_rp *rp = NULL; + struct pcep_object_srp *srp = NULL; + struct pcep_object_lsp *lsp = NULL; + struct pcep_object_lspa *lspa = NULL; + struct pcep_object_ro *ero = NULL; + struct pcep_object_metric *metric = NULL; + struct pcep_object_bandwidth *bandwidth = NULL; + struct pcep_object_objective_function *of = NULL; + + path = pcep_new_path(); + + for (node = objs->head; node != NULL; node = node->next_node) { + obj = (struct pcep_object_header *)node->data; + switch (CLASS_TYPE(obj->object_class, obj->object_type)) { + case CLASS_TYPE(PCEP_OBJ_CLASS_RP, PCEP_OBJ_TYPE_RP): + assert(rp == NULL); + rp = (struct pcep_object_rp *)obj; + pcep_lib_parse_rp(path, rp); + break; + case CLASS_TYPE(PCEP_OBJ_CLASS_SRP, PCEP_OBJ_TYPE_SRP): + assert(srp == NULL); + srp = (struct pcep_object_srp *)obj; + pcep_lib_parse_srp(path, srp); + break; + case CLASS_TYPE(PCEP_OBJ_CLASS_LSP, PCEP_OBJ_TYPE_LSP): + /* Only support single LSP per message */ + assert(lsp == NULL); + lsp = (struct pcep_object_lsp *)obj; + pcep_lib_parse_lsp(path, lsp); + break; + case CLASS_TYPE(PCEP_OBJ_CLASS_LSPA, PCEP_OBJ_TYPE_LSPA): + assert(lspa == NULL); + lspa = (struct pcep_object_lspa *)obj; + pcep_lib_parse_lspa(path, lspa); + break; + case CLASS_TYPE(PCEP_OBJ_CLASS_ERO, PCEP_OBJ_TYPE_ERO): + /* Only support single ERO per message */ + assert(ero == NULL); + ero = (struct pcep_object_ro *)obj; + pcep_lib_parse_ero(path, ero); + break; + case CLASS_TYPE(PCEP_OBJ_CLASS_METRIC, PCEP_OBJ_TYPE_METRIC): + metric = (struct pcep_object_metric *)obj; + pcep_lib_parse_metric(path, metric); + break; + case CLASS_TYPE(PCEP_OBJ_CLASS_BANDWIDTH, + PCEP_OBJ_TYPE_BANDWIDTH_REQ): + case CLASS_TYPE(PCEP_OBJ_CLASS_BANDWIDTH, + PCEP_OBJ_TYPE_BANDWIDTH_CISCO): + bandwidth = (struct pcep_object_bandwidth *)obj; + path->has_bandwidth = true; + path->bandwidth = bandwidth->bandwidth; + break; + case CLASS_TYPE(PCEP_OBJ_CLASS_NOPATH, PCEP_OBJ_TYPE_NOPATH): + path->no_path = true; + break; + case CLASS_TYPE(PCEP_OBJ_CLASS_OF, PCEP_OBJ_TYPE_OF): + of = (struct pcep_object_objective_function *)obj; + path->has_pce_objfun = true; + path->pce_objfun = of->of_code; + break; + default: + flog_warn(EC_PATH_PCEP_UNEXPECTED_PCEP_OBJECT, + "Unexpected PCEP object %s (%u) / %s (%u)", + pcep_object_class_name(obj->object_class), + obj->object_class, + pcep_object_type_name(obj->object_class, + obj->object_type), + obj->object_type); + break; + } + } + + return path; +} + +void pcep_lib_parse_capabilities(struct pcep_message *msg, + struct pcep_caps *caps) +{ + double_linked_list *objs = msg->obj_list; + double_linked_list_node *node; + + struct pcep_object_header *obj; + struct pcep_object_open *open = NULL; + + for (node = objs->head; node != NULL; node = node->next_node) { + obj = (struct pcep_object_header *)node->data; + switch (CLASS_TYPE(obj->object_class, obj->object_type)) { + case CLASS_TYPE(PCEP_OBJ_CLASS_OPEN, PCEP_OBJ_TYPE_OPEN): + assert(open == NULL); + open = (struct pcep_object_open *)obj; + pcep_lib_parse_open(caps, open); + break; + default: + flog_warn(EC_PATH_PCEP_UNEXPECTED_PCEP_OBJECT, + "Unexpected PCEP object %s (%u) / %s (%u)", + pcep_object_class_name(obj->object_class), + obj->object_class, + pcep_object_type_name(obj->object_class, + obj->object_type), + obj->object_type); + break; + } + } +} + +struct counters_group *pcep_lib_copy_counters(pcep_session *sess) +{ + if (!sess || !sess->pcep_session_counters) { + return NULL; + } + + return copy_counter_group(sess->pcep_session_counters); +} + +void pcep_lib_free_counters(struct counters_group *counters) +{ + free_counter_group(counters); +} + +pcep_session *pcep_lib_copy_pcep_session(pcep_session *sess) +{ + if (!sess) { + return NULL; + } + + pcep_session *copy; + copy = XCALLOC(MTYPE_PCEP, sizeof(*copy)); + memcpy(copy, sess, sizeof(*copy)); + /* These fields should not be accessed */ + copy->num_unknown_messages_time_queue = NULL; + copy->socket_comm_session = NULL; + copy->pcep_session_counters = NULL; + + return copy; +} + +/* ------------ pceplib logging callback ------------ */ + +int pceplib_logging_cb(int priority, const char *fmt, va_list args) +{ + char buffer[1024]; + vsnprintf(buffer, sizeof(buffer), fmt, args); + PCEP_DEBUG_PCEPLIB(priority, "pceplib: %s", buffer); + return 0; +} + +/* ------------ Internal Functions ------------ */ + +double_linked_list *pcep_lib_format_path(struct pcep_caps *caps, + struct path *path) +{ + struct in_addr addr_null; + double_linked_list *objs, *srp_tlvs, *lsp_tlvs, *ero_objs; + struct pcep_object_tlv_header *tlv; + struct pcep_object_ro_subobj *ero_obj; + struct pcep_object_srp *srp; + struct pcep_object_lsp *lsp; + struct pcep_object_ro *ero; + uint32_t encoded_binding_sid; + char binding_sid_lsp_tlv_data[6]; + + memset(&addr_null, 0, sizeof(addr_null)); + + objs = dll_initialize(); + + if (path->plsp_id != 0) { + /* SRP object */ + srp_tlvs = dll_initialize(); + tlv = (struct pcep_object_tlv_header *) + pcep_tlv_create_path_setup_type(SR_TE_PST); + assert(tlv != NULL); + dll_append(srp_tlvs, tlv); + srp = pcep_obj_create_srp(path->do_remove, path->srp_id, + srp_tlvs); + assert(srp != NULL); + srp->header.flag_p = true; + dll_append(objs, srp); + } + + /* LSP object */ + lsp_tlvs = dll_initialize(); + + if (path->plsp_id == 0 || IS_IPADDR_NONE(&path->nbkey.endpoint) + || IS_IPADDR_NONE(&path->pcc_addr)) { + tlv = (struct pcep_object_tlv_header *) + pcep_tlv_create_ipv4_lsp_identifiers( + &addr_null, &addr_null, 0, 0, &addr_null); + } else { + assert(path->pcc_addr.ipa_type + == path->nbkey.endpoint.ipa_type); + if (IS_IPADDR_V4(&path->pcc_addr)) { + tlv = (struct pcep_object_tlv_header *) + pcep_tlv_create_ipv4_lsp_identifiers( + &path->pcc_addr.ipaddr_v4, + &path->nbkey.endpoint.ipaddr_v4, 0, 0, + &path->pcc_addr.ipaddr_v4); + } else { + tlv = (struct pcep_object_tlv_header *) + pcep_tlv_create_ipv6_lsp_identifiers( + &path->pcc_addr.ipaddr_v6, + &path->nbkey.endpoint.ipaddr_v6, 0, 0, + &path->pcc_addr.ipaddr_v6); + } + } + assert(tlv != NULL); + dll_append(lsp_tlvs, tlv); + if (path->name != NULL) { + tlv = (struct pcep_object_tlv_header *) + /*FIXME: Remove the typecasty when pceplib is changed + to take a const char* */ + pcep_tlv_create_symbolic_path_name((char *)path->name, + strlen(path->name)); + assert(tlv != NULL); + dll_append(lsp_tlvs, tlv); + } + if ((path->plsp_id != 0) && (path->binding_sid != MPLS_LABEL_NONE)) { + memset(binding_sid_lsp_tlv_data, 0, 2); + encoded_binding_sid = htonl(path->binding_sid << 12); + memcpy(binding_sid_lsp_tlv_data + 2, &encoded_binding_sid, 4); + tlv = (struct pcep_object_tlv_header *) + pcep_tlv_create_tlv_arbitrary( + binding_sid_lsp_tlv_data, + sizeof(binding_sid_lsp_tlv_data), 65505); + assert(tlv != NULL); + dll_append(lsp_tlvs, tlv); + } + lsp = pcep_obj_create_lsp( + path->plsp_id, path->status, path->was_created /* C Flag */, + path->go_active /* A Flag */, path->was_removed /* R Flag */, + path->is_synching /* S Flag */, path->is_delegated /* D Flag */, + lsp_tlvs); + assert(lsp != NULL); + lsp->header.flag_p = true; + dll_append(objs, lsp); + /* ERO object */ + ero_objs = dll_initialize(); + for (struct path_hop *hop = path->first_hop; hop != NULL; + hop = hop->next) { + uint32_t sid; + + /* Only supporting MPLS hops with both sid and nai */ + assert(hop->is_mpls); + assert(hop->has_sid); + + if (hop->has_attribs) { + sid = ENCODE_SR_ERO_SID(hop->sid.mpls.label, + hop->sid.mpls.traffic_class, + hop->sid.mpls.is_bottom, + hop->sid.mpls.ttl); + } else { + sid = ENCODE_SR_ERO_SID(hop->sid.mpls.label, 0, 0, 0); + } + + ero_obj = NULL; + if (hop->has_nai) { + assert(hop->nai.type != PCEP_SR_SUBOBJ_NAI_ABSENT); + assert(hop->nai.type + != PCEP_SR_SUBOBJ_NAI_LINK_LOCAL_IPV6_ADJACENCY); + assert(hop->nai.type != PCEP_SR_SUBOBJ_NAI_UNKNOWN); + switch (hop->nai.type) { + case PCEP_SR_SUBOBJ_NAI_IPV4_NODE: + ero_obj = (struct pcep_object_ro_subobj *) + pcep_obj_create_ro_subobj_sr_ipv4_node( + hop->is_loose, !hop->has_sid, + hop->has_attribs, /* C Flag */ + hop->is_mpls, /* M Flag */ + sid, + &hop->nai.local_addr.ipaddr_v4); + break; + case PCEP_SR_SUBOBJ_NAI_IPV6_NODE: + ero_obj = (struct pcep_object_ro_subobj *) + pcep_obj_create_ro_subobj_sr_ipv6_node( + hop->is_loose, !hop->has_sid, + hop->has_attribs, /* C Flag */ + hop->is_mpls, /* M Flag */ + sid, + &hop->nai.local_addr.ipaddr_v6); + break; + case PCEP_SR_SUBOBJ_NAI_IPV4_ADJACENCY: + ero_obj = (struct pcep_object_ro_subobj *) + pcep_obj_create_ro_subobj_sr_ipv4_adj( + hop->is_loose, !hop->has_sid, + hop->has_attribs, /* C Flag */ + hop->is_mpls, /* M Flag */ + sid, + &hop->nai.local_addr.ipaddr_v4, + &hop->nai.remote_addr + .ipaddr_v4); + break; + case PCEP_SR_SUBOBJ_NAI_IPV6_ADJACENCY: + ero_obj = (struct pcep_object_ro_subobj *) + pcep_obj_create_ro_subobj_sr_ipv6_adj( + hop->is_loose, !hop->has_sid, + hop->has_attribs, /* C Flag */ + hop->is_mpls, /* M Flag */ + sid, + &hop->nai.local_addr.ipaddr_v6, + &hop->nai.remote_addr + .ipaddr_v6); + break; + case PCEP_SR_SUBOBJ_NAI_UNNUMBERED_IPV4_ADJACENCY: + ero_obj = (struct pcep_object_ro_subobj *) + pcep_obj_create_ro_subobj_sr_unnumbered_ipv4_adj( + hop->is_loose, !hop->has_sid, + hop->has_attribs, /* C Flag */ + hop->is_mpls, /* M Flag */ + sid, + hop->nai.local_addr.ipaddr_v4 + .s_addr, + hop->nai.local_iface, + hop->nai.remote_addr.ipaddr_v4 + .s_addr, + hop->nai.remote_iface); + break; + default: + break; + } + } + if (ero_obj == NULL) { + ero_obj = (struct pcep_object_ro_subobj *) + pcep_obj_create_ro_subobj_sr_nonai( + hop->is_loose, sid, + hop->has_attribs, /* C Flag */ + hop->is_mpls); /* M Flag */ + } + dll_append(ero_objs, ero_obj); + } + ero = pcep_obj_create_ero(ero_objs); + assert(ero != NULL); + ero->header.flag_p = true; + dll_append(objs, ero); + + if (path->plsp_id == 0) { + return objs; + } + + pcep_lib_format_constraints(path, objs); + + return objs; +} + +void pcep_lib_format_constraints(struct path *path, double_linked_list *objs) +{ + struct pcep_object_metric *metric; + struct pcep_object_bandwidth *bandwidth; + struct pcep_object_lspa *lspa; + + /* LSPA object */ + if (path->has_affinity_filters) { + lspa = pcep_obj_create_lspa( + path->affinity_filters[AFFINITY_FILTER_EXCLUDE_ANY - 1], + path->affinity_filters[AFFINITY_FILTER_INCLUDE_ANY - 1], + path->affinity_filters[AFFINITY_FILTER_INCLUDE_ALL - 1], + DEFAULT_LSAP_SETUP_PRIO, DEFAULT_LSAP_HOLDING_PRIO, + DEFAULT_LSAP_LOCAL_PRETECTION); + assert(lspa != NULL); + lspa->header.flag_p = true; + dll_append(objs, lspa); + } + + /* Bandwidth Objects */ + if (path->has_bandwidth) { + /* Requested Bandwidth */ + bandwidth = pcep_obj_create_bandwidth(path->bandwidth); + assert(bandwidth != NULL); + bandwidth->header.flag_p = path->enforce_bandwidth; + dll_append(objs, bandwidth); + } + + /* Metric Objects */ + for (struct path_metric *m = path->first_metric; m != NULL; + m = m->next) { + metric = pcep_obj_create_metric(m->type, m->is_bound, + m->is_computed, m->value); + assert(metric != NULL); + metric->header.flag_p = m->enforce; + dll_append(objs, metric); + } +} + +void pcep_lib_parse_open(struct pcep_caps *caps, struct pcep_object_open *open) +{ + double_linked_list *tlvs = open->header.tlv_list; + double_linked_list_node *node; + struct pcep_object_tlv_header *tlv_header; + + caps->is_stateful = false; + caps->supported_ofs_are_known = false; + caps->supported_ofs = 0; + + for (node = tlvs->head; node != NULL; node = node->next_node) { + tlv_header = (struct pcep_object_tlv_header *)node->data; + switch (tlv_header->type) { + case PCEP_OBJ_TLV_TYPE_STATEFUL_PCE_CAPABILITY: + pcep_lib_parse_open_pce_capability(caps, tlv_header); + break; + case PCEP_OBJ_TLV_TYPE_SR_PCE_CAPABILITY: + break; + case PCEP_OBJ_TLV_TYPE_OBJECTIVE_FUNCTION_LIST: + pcep_lib_parse_open_objfun_list(caps, tlv_header); + break; + default: + flog_warn(EC_PATH_PCEP_UNEXPECTED_PCEP_TLV, + "Unexpected OPEN's TLV %s (%u)", + pcep_tlv_type_name(tlv_header->type), + tlv_header->type); + break; + } + } +} + +void pcep_lib_parse_open_pce_capability( + struct pcep_caps *caps, struct pcep_object_tlv_header *tlv_header) +{ + struct pcep_object_tlv_stateful_pce_capability *tlv; + tlv = (struct pcep_object_tlv_stateful_pce_capability *)tlv_header; + caps->is_stateful = tlv->flag_u_lsp_update_capability; +} + +void pcep_lib_parse_open_objfun_list(struct pcep_caps *caps, + struct pcep_object_tlv_header *tlv_header) +{ + double_linked_list_node *node; + struct pcep_object_tlv_of_list *tlv; + tlv = (struct pcep_object_tlv_of_list *)tlv_header; + uint16_t of_code; + caps->supported_ofs_are_known = true; + for (node = tlv->of_list->head; node != NULL; node = node->next_node) { + of_code = *(uint16_t *)node->data; + if (of_code >= 32) { + zlog_warn( + "Ignoring unexpected objective function with code %u", + of_code); + continue; + } + SET_FLAG(caps->supported_ofs, of_code); + } +} + +void pcep_lib_parse_rp(struct path *path, struct pcep_object_rp *rp) +{ + double_linked_list *tlvs = rp->header.tlv_list; + double_linked_list_node *node; + struct pcep_object_tlv_header *tlv; + + /* We ignore the other flags and priority for now */ + path->req_id = rp->request_id; + path->has_pce_objfun = false; + path->pce_objfun = OBJFUN_UNDEFINED; + + for (node = tlvs->head; node != NULL; node = node->next_node) { + tlv = (struct pcep_object_tlv_header *)node->data; + switch (tlv->type) { + case PCEP_OBJ_TLV_TYPE_PATH_SETUP_TYPE: + // TODO: enforce the path setup type is SR_TE_PST + break; + default: + flog_warn(EC_PATH_PCEP_UNEXPECTED_PCEP_TLV, + "Unexpected RP's TLV %s (%u)", + pcep_tlv_type_name(tlv->type), tlv->type); + break; + } + } +} + +void pcep_lib_parse_srp(struct path *path, struct pcep_object_srp *srp) +{ + double_linked_list *tlvs = srp->header.tlv_list; + double_linked_list_node *node; + struct pcep_object_tlv_header *tlv; + + path->do_remove = srp->flag_lsp_remove; + path->srp_id = srp->srp_id_number; + + for (node = tlvs->head; node != NULL; node = node->next_node) { + tlv = (struct pcep_object_tlv_header *)node->data; + switch (tlv->type) { + case PCEP_OBJ_TLV_TYPE_PATH_SETUP_TYPE: + // TODO: enforce the path setup type is SR_TE_PST + break; + default: + flog_warn(EC_PATH_PCEP_UNEXPECTED_PCEP_TLV, + "Unexpected SRP's TLV %s (%u)", + pcep_tlv_type_name(tlv->type), tlv->type); + break; + } + } +} + +void pcep_lib_parse_lsp(struct path *path, struct pcep_object_lsp *lsp) +{ + double_linked_list *tlvs = lsp->header.tlv_list; + double_linked_list_node *node; + struct pcep_object_tlv_header *tlv; + + path->plsp_id = lsp->plsp_id; + path->status = lsp->operational_status; + path->go_active = lsp->flag_a; + path->was_created = lsp->flag_c; + path->was_removed = lsp->flag_r; + path->is_synching = lsp->flag_s; + path->is_delegated = lsp->flag_d; + + if (tlvs == NULL) + return; + + for (node = tlvs->head; node != NULL; node = node->next_node) { + tlv = (struct pcep_object_tlv_header *)node->data; + switch (tlv->type) { + default: + flog_warn(EC_PATH_PCEP_UNEXPECTED_PCEP_TLV, + "Unexpected LSP TLV %s (%u)", + pcep_tlv_type_name(tlv->type), tlv->type); + break; + } + } +} + +void pcep_lib_parse_lspa(struct path *path, struct pcep_object_lspa *lspa) +{ + path->has_affinity_filters = true; + path->affinity_filters[AFFINITY_FILTER_EXCLUDE_ANY - 1] = + lspa->lspa_exclude_any; + path->affinity_filters[AFFINITY_FILTER_INCLUDE_ANY - 1] = + lspa->lspa_include_any; + path->affinity_filters[AFFINITY_FILTER_INCLUDE_ALL - 1] = + lspa->lspa_include_all; +} + +void pcep_lib_parse_metric(struct path *path, struct pcep_object_metric *obj) +{ + struct path_metric *metric; + + metric = pcep_new_metric(); + metric->type = obj->type; + metric->is_bound = obj->flag_b; + metric->is_computed = obj->flag_c; + metric->value = obj->value; + metric->next = path->first_metric; + path->first_metric = metric; +} + +void pcep_lib_parse_ero(struct path *path, struct pcep_object_ro *ero) +{ + struct path_hop *hop = NULL; + double_linked_list *objs = ero->sub_objects; + double_linked_list_node *node; + struct pcep_object_ro_subobj *obj; + + for (node = objs->tail; node != NULL; node = node->prev_node) { + obj = (struct pcep_object_ro_subobj *)node->data; + switch (obj->ro_subobj_type) { + case RO_SUBOBJ_TYPE_SR: + hop = pcep_lib_parse_ero_sr( + hop, (struct pcep_ro_subobj_sr *)obj); + break; + default: + flog_warn(EC_PATH_PCEP_UNEXPECTED_PCEP_ERO_SUBOBJ, + "Unexpected ERO sub-object %s (%u)", + pcep_ro_type_name(obj->ro_subobj_type), + obj->ro_subobj_type); + break; + } + } + + path->first_hop = hop; +} + +struct path_hop *pcep_lib_parse_ero_sr(struct path_hop *next, + struct pcep_ro_subobj_sr *sr) +{ + struct path_hop *hop = NULL; + union sid sid; + + /* Only support IPv4 node with SID */ + assert(!sr->flag_s); + + if (sr->flag_m) { + sid.mpls = (struct sid_mpls){ + .label = GET_SR_ERO_SID_LABEL(sr->sid), + .traffic_class = GET_SR_ERO_SID_TC(sr->sid), + .is_bottom = GET_SR_ERO_SID_S(sr->sid), + .ttl = GET_SR_ERO_SID_TTL(sr->sid)}; + } else { + sid.value = sr->sid; + } + + hop = pcep_new_hop(); + *hop = (struct path_hop){.next = next, + .is_loose = + sr->ro_subobj.flag_subobj_loose_hop, + .has_sid = !sr->flag_s, + .is_mpls = sr->flag_m, + .has_attribs = sr->flag_c, + .sid = sid, + .has_nai = !sr->flag_f, + .nai = {.type = sr->nai_type}}; + + if (!sr->flag_f) { + assert(sr->nai_list != NULL); + double_linked_list_node *n = sr->nai_list->head; + assert(n != NULL); + assert(n->data != NULL); + switch (sr->nai_type) { + case PCEP_SR_SUBOBJ_NAI_IPV4_NODE: + hop->nai.local_addr.ipa_type = IPADDR_V4; + memcpy(&hop->nai.local_addr.ipaddr_v4, n->data, + sizeof(struct in_addr)); + break; + case PCEP_SR_SUBOBJ_NAI_IPV6_NODE: + hop->nai.local_addr.ipa_type = IPADDR_V6; + memcpy(&hop->nai.local_addr.ipaddr_v6, n->data, + sizeof(struct in6_addr)); + break; + case PCEP_SR_SUBOBJ_NAI_IPV4_ADJACENCY: + hop->nai.local_addr.ipa_type = IPADDR_V4; + memcpy(&hop->nai.local_addr.ipaddr_v4, n->data, + sizeof(struct in_addr)); + n = n->next_node; + assert(n != NULL); + assert(n->data != NULL); + hop->nai.remote_addr.ipa_type = IPADDR_V4; + memcpy(&hop->nai.remote_addr.ipaddr_v4, n->data, + sizeof(struct in_addr)); + break; + case PCEP_SR_SUBOBJ_NAI_IPV6_ADJACENCY: + hop->nai.local_addr.ipa_type = IPADDR_V6; + memcpy(&hop->nai.local_addr.ipaddr_v6, n->data, + sizeof(struct in6_addr)); + n = n->next_node; + assert(n != NULL); + assert(n->data != NULL); + hop->nai.remote_addr.ipa_type = IPADDR_V6; + memcpy(&hop->nai.remote_addr.ipaddr_v6, n->data, + sizeof(struct in6_addr)); + break; + case PCEP_SR_SUBOBJ_NAI_UNNUMBERED_IPV4_ADJACENCY: + hop->nai.local_addr.ipa_type = IPADDR_V4; + memcpy(&hop->nai.local_addr.ipaddr_v4, n->data, + sizeof(struct in_addr)); + n = n->next_node; + assert(n != NULL); + assert(n->data != NULL); + hop->nai.local_iface = *(uint32_t *)n->data; + n = n->next_node; + assert(n != NULL); + assert(n->data != NULL); + hop->nai.remote_addr.ipa_type = IPADDR_V4; + memcpy(&hop->nai.remote_addr.ipaddr_v4, n->data, + sizeof(struct in_addr)); + n = n->next_node; + assert(n != NULL); + assert(n->data != NULL); + hop->nai.remote_iface = *(uint32_t *)n->data; + break; + default: + hop->has_nai = false; + flog_warn(EC_PATH_PCEP_UNEXPECTED_SR_NAI, + "Unexpected SR segment NAI type %s (%u)", + pcep_nai_type_name(sr->nai_type), + sr->nai_type); + break; + } + } + + return hop; +} + +struct counters_group *copy_counter_group(struct counters_group *from) +{ + int size, i; + struct counters_group *result; + if (from == NULL) + return NULL; + assert(from->max_subgroups >= from->num_subgroups); + result = XCALLOC(MTYPE_PCEP, sizeof(*result)); + memcpy(result, from, sizeof(*result)); + size = sizeof(struct counters_subgroup *) * (from->max_subgroups + 1); + result->subgroups = XCALLOC(MTYPE_PCEP, size); + for (i = 0; i <= from->max_subgroups; i++) + result->subgroups[i] = + copy_counter_subgroup(from->subgroups[i]); + return result; +} + +struct counters_subgroup *copy_counter_subgroup(struct counters_subgroup *from) +{ + int size, i; + struct counters_subgroup *result; + if (from == NULL) + return NULL; + assert(from->max_counters >= from->num_counters); + result = XCALLOC(MTYPE_PCEP, sizeof(*result)); + memcpy(result, from, sizeof(*result)); + size = sizeof(struct counter *) * (from->max_counters + 1); + result->counters = XCALLOC(MTYPE_PCEP, size); + for (i = 0; i <= from->max_counters; i++) + result->counters[i] = copy_counter(from->counters[i]); + return result; +} + +struct counter *copy_counter(struct counter *from) +{ + struct counter *result; + if (from == NULL) + return NULL; + result = XCALLOC(MTYPE_PCEP, sizeof(*result)); + memcpy(result, from, sizeof(*result)); + return result; +} + +void free_counter_group(struct counters_group *group) +{ + int i; + if (group == NULL) + return; + for (i = 0; i <= group->max_subgroups; i++) + free_counter_subgroup(group->subgroups[i]); + XFREE(MTYPE_PCEP, group->subgroups); + XFREE(MTYPE_PCEP, group); +} + +void free_counter_subgroup(struct counters_subgroup *subgroup) +{ + int i; + if (subgroup == NULL) + return; + for (i = 0; i <= subgroup->max_counters; i++) + free_counter(subgroup->counters[i]); + XFREE(MTYPE_PCEP, subgroup->counters); + XFREE(MTYPE_PCEP, subgroup); +} + +void free_counter(struct counter *counter) +{ + if (counter == NULL) + return; + XFREE(MTYPE_PCEP, counter); +} diff --git a/pathd/path_pcep_lib.h b/pathd/path_pcep_lib.h new file mode 100644 index 0000000000..3bea28432d --- /dev/null +++ b/pathd/path_pcep_lib.h @@ -0,0 +1,48 @@ +/* + * Copyright (C) 2020 NetDEF, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; see the file COPYING; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef _PATH_PCEP_LIB_H_ +#define _PATH_PCEP_LIB_H_ + +#include <stdbool.h> +#include <pcep_pcc_api.h> +#include "frr_pthread.h" +#include "pathd/path_pcep.h" + +int pcep_lib_initialize(struct frr_pthread *fpt); +void pcep_lib_finalize(void); +pcep_session * +pcep_lib_connect(struct ipaddr *src_addr, int src_port, struct ipaddr *dst_addr, + int dst_port, short msd, + const struct pcep_config_group_opts *pcep_options); +void pcep_lib_disconnect(pcep_session *sess); +struct pcep_message *pcep_lib_format_report(struct pcep_caps *caps, + struct path *path); +struct pcep_message *pcep_lib_format_request(struct pcep_caps *caps, + struct path *path); +struct pcep_message *pcep_lib_format_request_cancelled(uint32_t reqid); + +struct pcep_message *pcep_lib_format_error(int error_type, int error_value); +struct path *pcep_lib_parse_path(struct pcep_message *msg); +void pcep_lib_parse_capabilities(struct pcep_message *msg, + struct pcep_caps *caps); +struct counters_group *pcep_lib_copy_counters(pcep_session *sess); +void pcep_lib_free_counters(struct counters_group *counters); +pcep_session *pcep_lib_copy_pcep_session(pcep_session *sess); + +#endif // _PATH_PCEP_LIB_H_ diff --git a/pathd/path_pcep_memory.c b/pathd/path_pcep_memory.c new file mode 100644 index 0000000000..8f608090a6 --- /dev/null +++ b/pathd/path_pcep_memory.c @@ -0,0 +1,27 @@ +/* + * Copyright (C) 2020 NetDEF, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; see the file COPYING; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include <zebra.h> + +#include <memory.h> + +#include "pathd/path_pcep_memory.h" + +DEFINE_MTYPE(PATHD, PCEP, "PCEP module") +DEFINE_MTYPE(PATHD, PCEPLIB_INFRA, "PCEPlib Infrastructure") +DEFINE_MTYPE(PATHD, PCEPLIB_MESSAGES, "PCEPlib PCEP Messages") diff --git a/pathd/path_pcep_memory.h b/pathd/path_pcep_memory.h new file mode 100644 index 0000000000..05c5e2d82b --- /dev/null +++ b/pathd/path_pcep_memory.h @@ -0,0 +1,28 @@ +/* + * Copyright (C) 2020 NetDEF, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; see the file COPYING; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef _FRR_PATH_PCEP_MEMORY_H_ +#define _FRR_PATH_PCEP_MEMORY_H_ + +#include "pathd/path_memory.h" + +DECLARE_MTYPE(PCEP) +DECLARE_MTYPE(PCEPLIB_INFRA) +DECLARE_MTYPE(PCEPLIB_MESSAGES) + +#endif /* _FRR_PATH_PCEP_MEMORY_H_ */ diff --git a/pathd/path_pcep_pcc.c b/pathd/path_pcep_pcc.c new file mode 100644 index 0000000000..c1f60edd22 --- /dev/null +++ b/pathd/path_pcep_pcc.c @@ -0,0 +1,1818 @@ +/* + * Copyright (C) 2020 NetDEF, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; see the file COPYING; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/* TODOS AND KNOWN ISSUES: + - Delete mapping from NB keys to PLSPID when an LSP is deleted either + by the PCE or by NB. + - Revert the hacks to work around ODL requiring a report with + operational status DOWN when an LSP is activated. + - Enforce only the PCE a policy has been delegated to can update it. + - If the router-id is used because the PCC IP is not specified + (either IPv4 or IPv6), the connection to the PCE is not reset + when the router-id changes. +*/ + +#include <zebra.h> + +#include "log.h" +#include "command.h" +#include "libfrr.h" +#include "printfrr.h" +#include "version.h" +#include "northbound.h" +#include "frr_pthread.h" +#include "jhash.h" + +#include "pathd/pathd.h" +#include "pathd/path_zebra.h" +#include "pathd/path_errors.h" +#include "pathd/path_pcep_memory.h" +#include "pathd/path_pcep.h" +#include "pathd/path_pcep_controller.h" +#include "pathd/path_pcep_lib.h" +#include "pathd/path_pcep_config.h" +#include "pathd/path_pcep_debug.h" + + +/* The number of time we will skip connecting if we are missing the PCC + * address for an inet family different from the selected transport one*/ +#define OTHER_FAMILY_MAX_RETRIES 4 +#define MAX_ERROR_MSG_SIZE 256 +#define MAX_COMPREQ_TRIES 3 + + +/* PCEP Event Handler */ +static void handle_pcep_open(struct ctrl_state *ctrl_state, + struct pcc_state *pcc_state, + struct pcep_message *msg); +static void handle_pcep_message(struct ctrl_state *ctrl_state, + struct pcc_state *pcc_state, + struct pcep_message *msg); +static void handle_pcep_lsp_update(struct ctrl_state *ctrl_state, + struct pcc_state *pcc_state, + struct pcep_message *msg); +static void handle_pcep_lsp_initiate(struct ctrl_state *ctrl_state, + struct pcc_state *pcc_state, + struct pcep_message *msg); +static void handle_pcep_comp_reply(struct ctrl_state *ctrl_state, + struct pcc_state *pcc_state, + struct pcep_message *msg); + +/* Internal Functions */ +static const char *ipaddr_type_name(struct ipaddr *addr); +static bool filter_path(struct pcc_state *pcc_state, struct path *path); +static void select_pcc_addresses(struct pcc_state *pcc_state); +static void select_transport_address(struct pcc_state *pcc_state); +static void update_tag(struct pcc_state *pcc_state); +static void update_originator(struct pcc_state *pcc_state); +static void schedule_reconnect(struct ctrl_state *ctrl_state, + struct pcc_state *pcc_state); +static void schedule_session_timeout(struct ctrl_state *ctrl_state, + struct pcc_state *pcc_state); +static void cancel_session_timeout(struct ctrl_state *ctrl_state, + struct pcc_state *pcc_state); +static void send_pcep_message(struct pcc_state *pcc_state, + struct pcep_message *msg); +static void send_pcep_error(struct pcc_state *pcc_state, + enum pcep_error_type error_type, + enum pcep_error_value error_value); +static void send_report(struct pcc_state *pcc_state, struct path *path); +static void send_comp_request(struct ctrl_state *ctrl_state, + struct pcc_state *pcc_state, + struct req_entry *req); +static void cancel_comp_requests(struct ctrl_state *ctrl_state, + struct pcc_state *pcc_state); +static void cancel_comp_request(struct ctrl_state *ctrl_state, + struct pcc_state *pcc_state, + struct req_entry *req); +static void specialize_outgoing_path(struct pcc_state *pcc_state, + struct path *path); +static void specialize_incoming_path(struct pcc_state *pcc_state, + struct path *path); +static bool validate_incoming_path(struct pcc_state *pcc_state, + struct path *path, char *errbuff, + size_t buffsize); +static void set_pcc_address(struct pcc_state *pcc_state, + struct lsp_nb_key *nbkey, struct ipaddr *addr); +static int compare_pcc_opts(struct pcc_opts *lhs, struct pcc_opts *rhs); +static int compare_pce_opts(struct pce_opts *lhs, struct pce_opts *rhs); +static int get_previous_best_pce(struct pcc_state **pcc); +static int get_best_pce(struct pcc_state **pcc); +static int get_pce_count_connected(struct pcc_state **pcc); +static bool update_best_pce(struct pcc_state **pcc, int best); + +/* Data Structure Helper Functions */ +static void lookup_plspid(struct pcc_state *pcc_state, struct path *path); +static void lookup_nbkey(struct pcc_state *pcc_state, struct path *path); +static void free_req_entry(struct req_entry *req); +static struct req_entry *push_new_req(struct pcc_state *pcc_state, + struct path *path); +static void repush_req(struct pcc_state *pcc_state, struct req_entry *req); +static struct req_entry *pop_req(struct pcc_state *pcc_state, uint32_t reqid); +static bool add_reqid_mapping(struct pcc_state *pcc_state, struct path *path); +static void remove_reqid_mapping(struct pcc_state *pcc_state, + struct path *path); +static uint32_t lookup_reqid(struct pcc_state *pcc_state, struct path *path); +static bool has_pending_req_for(struct pcc_state *pcc_state, struct path *path); + +/* Data Structure Callbacks */ +static int plspid_map_cmp(const struct plspid_map_data *a, + const struct plspid_map_data *b); +static uint32_t plspid_map_hash(const struct plspid_map_data *e); +static int nbkey_map_cmp(const struct nbkey_map_data *a, + const struct nbkey_map_data *b); +static uint32_t nbkey_map_hash(const struct nbkey_map_data *e); +static int req_map_cmp(const struct req_map_data *a, + const struct req_map_data *b); +static uint32_t req_map_hash(const struct req_map_data *e); + +/* Data Structure Declarations */ +DECLARE_HASH(plspid_map, struct plspid_map_data, mi, plspid_map_cmp, + plspid_map_hash) +DECLARE_HASH(nbkey_map, struct nbkey_map_data, mi, nbkey_map_cmp, + nbkey_map_hash) +DECLARE_HASH(req_map, struct req_map_data, mi, req_map_cmp, req_map_hash) + +static inline int req_entry_compare(const struct req_entry *a, + const struct req_entry *b) +{ + return a->path->req_id - b->path->req_id; +} +RB_GENERATE(req_entry_head, req_entry, entry, req_entry_compare) + + +/* ------------ API Functions ------------ */ + +struct pcc_state *pcep_pcc_initialize(struct ctrl_state *ctrl_state, int index) +{ + struct pcc_state *pcc_state = XCALLOC(MTYPE_PCEP, sizeof(*pcc_state)); + + pcc_state->id = index; + pcc_state->status = PCEP_PCC_DISCONNECTED; + pcc_state->next_reqid = 1; + pcc_state->next_plspid = 1; + + RB_INIT(req_entry_head, &pcc_state->requests); + + update_tag(pcc_state); + update_originator(pcc_state); + + PCEP_DEBUG("%s PCC initialized", pcc_state->tag); + + return pcc_state; +} + +void pcep_pcc_finalize(struct ctrl_state *ctrl_state, + struct pcc_state *pcc_state) +{ + PCEP_DEBUG("%s PCC finalizing...", pcc_state->tag); + + pcep_pcc_disable(ctrl_state, pcc_state); + + if (pcc_state->pcc_opts != NULL) { + XFREE(MTYPE_PCEP, pcc_state->pcc_opts); + pcc_state->pcc_opts = NULL; + } + if (pcc_state->pce_opts != NULL) { + XFREE(MTYPE_PCEP, pcc_state->pce_opts); + pcc_state->pce_opts = NULL; + } + if (pcc_state->originator != NULL) { + XFREE(MTYPE_PCEP, pcc_state->originator); + pcc_state->originator = NULL; + } + + if (pcc_state->t_reconnect != NULL) { + thread_cancel(&pcc_state->t_reconnect); + pcc_state->t_reconnect = NULL; + } + + if (pcc_state->t_update_best != NULL) { + thread_cancel(&pcc_state->t_update_best); + pcc_state->t_update_best = NULL; + } + + if (pcc_state->t_session_timeout != NULL) { + thread_cancel(&pcc_state->t_session_timeout); + pcc_state->t_session_timeout = NULL; + } + + XFREE(MTYPE_PCEP, pcc_state); +} + +int compare_pcc_opts(struct pcc_opts *lhs, struct pcc_opts *rhs) +{ + int retval; + + if (lhs == NULL) { + return 1; + } + + if (rhs == NULL) { + return -1; + } + + retval = lhs->port - rhs->port; + if (retval != 0) { + return retval; + } + + retval = lhs->msd - rhs->msd; + if (retval != 0) { + return retval; + } + + if (IS_IPADDR_V4(&lhs->addr)) { + retval = memcmp(&lhs->addr.ipaddr_v4, &rhs->addr.ipaddr_v4, + sizeof(lhs->addr.ipaddr_v4)); + if (retval != 0) { + return retval; + } + } else if (IS_IPADDR_V6(&lhs->addr)) { + retval = memcmp(&lhs->addr.ipaddr_v6, &rhs->addr.ipaddr_v6, + sizeof(lhs->addr.ipaddr_v6)); + if (retval != 0) { + return retval; + } + } + + return 0; +} + +int compare_pce_opts(struct pce_opts *lhs, struct pce_opts *rhs) +{ + if (lhs == NULL) { + return 1; + } + + if (rhs == NULL) { + return -1; + } + + int retval = lhs->port - rhs->port; + if (retval != 0) { + return retval; + } + + retval = strcmp(lhs->pce_name, rhs->pce_name); + if (retval != 0) { + return retval; + } + + retval = lhs->precedence - rhs->precedence; + if (retval != 0) { + return retval; + } + + retval = memcmp(&lhs->addr, &rhs->addr, sizeof(lhs->addr)); + if (retval != 0) { + return retval; + } + + return 0; +} + +int pcep_pcc_update(struct ctrl_state *ctrl_state, struct pcc_state *pcc_state, + struct pcc_opts *pcc_opts, struct pce_opts *pce_opts) +{ + int ret = 0; + + // If the options did not change, then there is nothing to do + if ((compare_pce_opts(pce_opts, pcc_state->pce_opts) == 0) + && (compare_pcc_opts(pcc_opts, pcc_state->pcc_opts) == 0)) { + return ret; + } + + if ((ret = pcep_pcc_disable(ctrl_state, pcc_state))) { + XFREE(MTYPE_PCEP, pcc_opts); + XFREE(MTYPE_PCEP, pce_opts); + return ret; + } + + if (pcc_state->pcc_opts != NULL) { + XFREE(MTYPE_PCEP, pcc_state->pcc_opts); + } + if (pcc_state->pce_opts != NULL) { + XFREE(MTYPE_PCEP, pcc_state->pce_opts); + } + + pcc_state->pcc_opts = pcc_opts; + pcc_state->pce_opts = pce_opts; + + if (IS_IPADDR_V4(&pcc_opts->addr)) { + pcc_state->pcc_addr_v4 = pcc_opts->addr.ipaddr_v4; + SET_FLAG(pcc_state->flags, F_PCC_STATE_HAS_IPV4); + } else { + UNSET_FLAG(pcc_state->flags, F_PCC_STATE_HAS_IPV4); + } + + if (IS_IPADDR_V6(&pcc_opts->addr)) { + memcpy(&pcc_state->pcc_addr_v6, &pcc_opts->addr.ipaddr_v6, + sizeof(struct in6_addr)); + SET_FLAG(pcc_state->flags, F_PCC_STATE_HAS_IPV6); + } else { + UNSET_FLAG(pcc_state->flags, F_PCC_STATE_HAS_IPV6); + } + + update_tag(pcc_state); + update_originator(pcc_state); + + return pcep_pcc_enable(ctrl_state, pcc_state); +} + +void pcep_pcc_reconnect(struct ctrl_state *ctrl_state, + struct pcc_state *pcc_state) +{ + if (pcc_state->status == PCEP_PCC_DISCONNECTED) + pcep_pcc_enable(ctrl_state, pcc_state); +} + +int pcep_pcc_enable(struct ctrl_state *ctrl_state, struct pcc_state *pcc_state) +{ + char pcc_buff[40]; + char pce_buff[40]; + + assert(pcc_state->status == PCEP_PCC_DISCONNECTED); + assert(pcc_state->sess == NULL); + + if (pcc_state->t_reconnect != NULL) { + thread_cancel(&pcc_state->t_reconnect); + pcc_state->t_reconnect = NULL; + } + + select_transport_address(pcc_state); + + /* Even though we are connecting using IPv6. we want to have an IPv4 + * address so we can handle candidate path with IPv4 endpoints */ + if (!CHECK_FLAG(pcc_state->flags, F_PCC_STATE_HAS_IPV4)) { + if (pcc_state->retry_count < OTHER_FAMILY_MAX_RETRIES) { + flog_warn(EC_PATH_PCEP_MISSING_SOURCE_ADDRESS, + "skipping connection to PCE %s:%d due to " + "missing PCC IPv4 address", + ipaddr2str(&pcc_state->pce_opts->addr, + pce_buff, sizeof(pce_buff)), + pcc_state->pce_opts->port); + schedule_reconnect(ctrl_state, pcc_state); + return 0; + } else { + flog_warn(EC_PATH_PCEP_MISSING_SOURCE_ADDRESS, + "missing IPv4 PCC address, IPv4 candidate " + "paths will be ignored"); + } + } + + /* Even though we are connecting using IPv4. we want to have an IPv6 + * address so we can handle candidate path with IPv6 endpoints */ + if (!CHECK_FLAG(pcc_state->flags, F_PCC_STATE_HAS_IPV6)) { + if (pcc_state->retry_count < OTHER_FAMILY_MAX_RETRIES) { + flog_warn(EC_PATH_PCEP_MISSING_SOURCE_ADDRESS, + "skipping connection to PCE %s:%d due to " + "missing PCC IPv6 address", + ipaddr2str(&pcc_state->pce_opts->addr, + pce_buff, sizeof(pce_buff)), + pcc_state->pce_opts->port); + schedule_reconnect(ctrl_state, pcc_state); + return 0; + } else { + flog_warn(EC_PATH_PCEP_MISSING_SOURCE_ADDRESS, + "missing IPv6 PCC address, IPv6 candidate " + "paths will be ignored"); + } + } + + /* Even if the maximum retries to try to have all the familly addresses + * have been spent, we still need the one for the transport familly */ + if (pcc_state->pcc_addr_tr.ipa_type == IPADDR_NONE) { + flog_warn(EC_PATH_PCEP_MISSING_SOURCE_ADDRESS, + "skipping connection to PCE %s:%d due to missing " + "PCC address", + ipaddr2str(&pcc_state->pce_opts->addr, pce_buff, + sizeof(pce_buff)), + pcc_state->pce_opts->port); + schedule_reconnect(ctrl_state, pcc_state); + return 0; + } + + PCEP_DEBUG("%s PCC connecting", pcc_state->tag); + pcc_state->sess = pcep_lib_connect( + &pcc_state->pcc_addr_tr, pcc_state->pcc_opts->port, + &pcc_state->pce_opts->addr, pcc_state->pce_opts->port, + pcc_state->pcc_opts->msd, &pcc_state->pce_opts->config_opts); + + if (pcc_state->sess == NULL) { + flog_warn(EC_PATH_PCEP_LIB_CONNECT, + "failed to connect to PCE %s:%d from %s:%d", + ipaddr2str(&pcc_state->pce_opts->addr, pce_buff, + sizeof(pce_buff)), + pcc_state->pce_opts->port, + ipaddr2str(&pcc_state->pcc_addr_tr, pcc_buff, + sizeof(pcc_buff)), + pcc_state->pcc_opts->port); + schedule_reconnect(ctrl_state, pcc_state); + return 0; + } + + // In case some best pce alternative were waiting to activate + if (pcc_state->t_update_best != NULL) { + thread_cancel(&pcc_state->t_update_best); + pcc_state->t_update_best = NULL; + } + + pcc_state->status = PCEP_PCC_CONNECTING; + + return 0; +} + +int pcep_pcc_disable(struct ctrl_state *ctrl_state, struct pcc_state *pcc_state) +{ + switch (pcc_state->status) { + case PCEP_PCC_DISCONNECTED: + return 0; + case PCEP_PCC_CONNECTING: + case PCEP_PCC_SYNCHRONIZING: + case PCEP_PCC_OPERATING: + PCEP_DEBUG("%s Disconnecting PCC...", pcc_state->tag); + cancel_comp_requests(ctrl_state, pcc_state); + pcep_lib_disconnect(pcc_state->sess); + /* No need to remove if any PCEs is connected */ + if (get_pce_count_connected(ctrl_state->pcc) == 0) { + pcep_thread_remove_candidate_path_segments(ctrl_state, + pcc_state); + } + pcc_state->sess = NULL; + pcc_state->status = PCEP_PCC_DISCONNECTED; + return 0; + default: + return 1; + } +} + +void pcep_pcc_sync_path(struct ctrl_state *ctrl_state, + struct pcc_state *pcc_state, struct path *path) +{ + if (pcc_state->status == PCEP_PCC_SYNCHRONIZING) { + path->is_synching = true; + } else if (pcc_state->status == PCEP_PCC_OPERATING) + path->is_synching = false; + else + return; + + path->go_active = true; + + /* Accumulate the dynamic paths without any LSP so computation + * requests can be performed after synchronization */ + if ((path->type == SRTE_CANDIDATE_TYPE_DYNAMIC) + && (path->first_hop == NULL) + && !has_pending_req_for(pcc_state, path)) { + PCEP_DEBUG("%s Scheduling computation request for path %s", + pcc_state->tag, path->name); + push_new_req(pcc_state, path); + return; + } + + /* Synchronize the path if the PCE supports LSP updates and the + * endpoint address familly is supported */ + if (pcc_state->caps.is_stateful) { + if (filter_path(pcc_state, path)) { + PCEP_DEBUG("%s Synchronizing path %s", pcc_state->tag, + path->name); + send_report(pcc_state, path); + } else { + PCEP_DEBUG( + "%s Skipping %s candidate path %s " + "synchronization", + pcc_state->tag, + ipaddr_type_name(&path->nbkey.endpoint), + path->name); + } + } +} + +void pcep_pcc_sync_done(struct ctrl_state *ctrl_state, + struct pcc_state *pcc_state) +{ + struct req_entry *req; + + if (pcc_state->status != PCEP_PCC_SYNCHRONIZING + && pcc_state->status != PCEP_PCC_OPERATING) + return; + + if (pcc_state->caps.is_stateful + && pcc_state->status == PCEP_PCC_SYNCHRONIZING) { + struct path *path = pcep_new_path(); + *path = (struct path){.name = NULL, + .srp_id = 0, + .plsp_id = 0, + .status = PCEP_LSP_OPERATIONAL_DOWN, + .do_remove = false, + .go_active = false, + .was_created = false, + .was_removed = false, + .is_synching = false, + .is_delegated = false, + .first_hop = NULL, + .first_metric = NULL}; + send_report(pcc_state, path); + pcep_free_path(path); + } + + pcc_state->synchronized = true; + pcc_state->status = PCEP_PCC_OPERATING; + + PCEP_DEBUG("%s Synchronization done", pcc_state->tag); + + /* Start the computation request accumulated during synchronization */ + RB_FOREACH (req, req_entry_head, &pcc_state->requests) { + send_comp_request(ctrl_state, pcc_state, req); + } +} + +void pcep_pcc_send_report(struct ctrl_state *ctrl_state, + struct pcc_state *pcc_state, struct path *path) +{ + if (pcc_state->status != PCEP_PCC_OPERATING) + return; + + if (pcc_state->caps.is_stateful) { + PCEP_DEBUG("%s Send report for candidate path %s", + pcc_state->tag, path->name); + send_report(pcc_state, path); + } +} + +/* ------------ Timeout handler ------------ */ + +void pcep_pcc_timeout_handler(struct ctrl_state *ctrl_state, + struct pcc_state *pcc_state, + enum pcep_ctrl_timer_type type, void *param) +{ + struct req_entry *req; + + switch (type) { + case TO_COMPUTATION_REQUEST: + assert(param != NULL); + req = (struct req_entry *)param; + pop_req(pcc_state, req->path->req_id); + flog_warn(EC_PATH_PCEP_COMPUTATION_REQUEST_TIMEOUT, + "Computation request %d timeout", req->path->req_id); + cancel_comp_request(ctrl_state, pcc_state, req); + if (req->retry_count++ < MAX_COMPREQ_TRIES) { + repush_req(pcc_state, req); + send_comp_request(ctrl_state, pcc_state, req); + return; + } + if (pcc_state->caps.is_stateful) { + struct path *path; + PCEP_DEBUG( + "%s Delegating undefined dynamic path %s to PCE %s", + pcc_state->tag, req->path->name, + pcc_state->originator); + path = pcep_copy_path(req->path); + path->is_delegated = true; + send_report(pcc_state, path); + free_req_entry(req); + } + break; + default: + break; + } +} + + +/* ------------ Pathd event handler ------------ */ + +void pcep_pcc_pathd_event_handler(struct ctrl_state *ctrl_state, + struct pcc_state *pcc_state, + enum pcep_pathd_event_type type, + struct path *path) +{ + struct req_entry *req; + + if (pcc_state->status != PCEP_PCC_OPERATING) + return; + + /* Skipping candidate path with endpoint that do not match the + * configured or deduced PCC IP version */ + if (!filter_path(pcc_state, path)) { + PCEP_DEBUG("%s Skipping %s candidate path %s event", + pcc_state->tag, + ipaddr_type_name(&path->nbkey.endpoint), path->name); + return; + } + + switch (type) { + case PCEP_PATH_CREATED: + if (has_pending_req_for(pcc_state, path)) { + PCEP_DEBUG( + "%s Candidate path %s created, computation request already sent", + pcc_state->tag, path->name); + return; + } + PCEP_DEBUG("%s Candidate path %s created", pcc_state->tag, + path->name); + if ((path->first_hop == NULL) + && (path->type == SRTE_CANDIDATE_TYPE_DYNAMIC)) { + req = push_new_req(pcc_state, path); + send_comp_request(ctrl_state, pcc_state, req); + } else if (pcc_state->caps.is_stateful) + send_report(pcc_state, path); + return; + case PCEP_PATH_UPDATED: + PCEP_DEBUG("%s Candidate path %s updated", pcc_state->tag, + path->name); + if (pcc_state->caps.is_stateful) + send_report(pcc_state, path); + return; + case PCEP_PATH_REMOVED: + PCEP_DEBUG("%s Candidate path %s removed", pcc_state->tag, + path->name); + path->was_removed = true; + if (pcc_state->caps.is_stateful) + send_report(pcc_state, path); + return; + default: + flog_warn(EC_PATH_PCEP_RECOVERABLE_INTERNAL_ERROR, + "Unexpected pathd event received by pcc %s: %u", + pcc_state->tag, type); + return; + } +} + + +/* ------------ PCEP event handler ------------ */ + +void pcep_pcc_pcep_event_handler(struct ctrl_state *ctrl_state, + struct pcc_state *pcc_state, pcep_event *event) +{ + PCEP_DEBUG("%s Received PCEP event: %s", pcc_state->tag, + pcep_event_type_name(event->event_type)); + switch (event->event_type) { + case PCC_CONNECTED_TO_PCE: + assert(PCEP_PCC_CONNECTING == pcc_state->status); + PCEP_DEBUG("%s Connection established", pcc_state->tag); + pcc_state->status = PCEP_PCC_SYNCHRONIZING; + pcc_state->retry_count = 0; + pcc_state->synchronized = false; + PCEP_DEBUG("%s Starting PCE synchronization", pcc_state->tag); + cancel_session_timeout(ctrl_state, pcc_state); + pcep_pcc_calculate_best_pce(ctrl_state->pcc); + pcep_thread_start_sync(ctrl_state, pcc_state->id); + break; + case PCC_SENT_INVALID_OPEN: + PCEP_DEBUG("%s Sent invalid OPEN message", pcc_state->tag); + PCEP_DEBUG( + "%s Reconciling values: keep alive (%d) dead timer (%d) seconds ", + pcc_state->tag, + pcc_state->sess->pcc_config + .keep_alive_pce_negotiated_timer_seconds, + pcc_state->sess->pcc_config + .dead_timer_pce_negotiated_seconds); + pcc_state->pce_opts->config_opts.keep_alive_seconds = + pcc_state->sess->pcc_config + .keep_alive_pce_negotiated_timer_seconds; + pcc_state->pce_opts->config_opts.dead_timer_seconds = + pcc_state->sess->pcc_config + .dead_timer_pce_negotiated_seconds; + break; + + case PCC_RCVD_INVALID_OPEN: + PCEP_DEBUG("%s Received invalid OPEN message", pcc_state->tag); + PCEP_DEBUG_PCEP("%s PCEP message: %s", pcc_state->tag, + format_pcep_message(event->message)); + break; + case PCE_DEAD_TIMER_EXPIRED: + case PCE_CLOSED_SOCKET: + case PCE_SENT_PCEP_CLOSE: + case PCE_OPEN_KEEP_WAIT_TIMER_EXPIRED: + case PCC_PCEP_SESSION_CLOSED: + case PCC_RCVD_MAX_INVALID_MSGS: + case PCC_RCVD_MAX_UNKOWN_MSGS: + pcep_pcc_disable(ctrl_state, pcc_state); + schedule_reconnect(ctrl_state, pcc_state); + schedule_session_timeout(ctrl_state, pcc_state); + break; + case MESSAGE_RECEIVED: + PCEP_DEBUG_PCEP("%s Received PCEP message: %s", pcc_state->tag, + format_pcep_message(event->message)); + if (pcc_state->status == PCEP_PCC_CONNECTING) { + if (event->message->msg_header->type == PCEP_TYPE_OPEN) + handle_pcep_open(ctrl_state, pcc_state, + event->message); + break; + } + assert(pcc_state->status == PCEP_PCC_SYNCHRONIZING + || pcc_state->status == PCEP_PCC_OPERATING); + handle_pcep_message(ctrl_state, pcc_state, event->message); + break; + default: + flog_warn(EC_PATH_PCEP_UNEXPECTED_PCEPLIB_EVENT, + "Unexpected event from pceplib: %s", + format_pcep_event(event)); + break; + } +} + + +/*------------------ Multi-PCE --------------------- */ + +/* Internal util function, returns true if sync is necessary, false otherwise */ +bool update_best_pce(struct pcc_state **pcc, int best) +{ + PCEP_DEBUG(" recalculating pce precedence "); + if (best) { + struct pcc_state *best_pcc_state = + pcep_pcc_get_pcc_by_id(pcc, best); + if (best_pcc_state->previous_best != best_pcc_state->is_best) { + PCEP_DEBUG(" %s Resynch best (%i) previous best (%i)", + best_pcc_state->tag, best_pcc_state->id, + best_pcc_state->previous_best); + return true; + } else { + PCEP_DEBUG( + " %s No Resynch best (%i) previous best (%i)", + best_pcc_state->tag, best_pcc_state->id, + best_pcc_state->previous_best); + } + } else { + PCEP_DEBUG(" No best pce available, all pce seem disconnected"); + } + + return false; +} + +int get_best_pce(struct pcc_state **pcc) +{ + for (int i = 0; i < MAX_PCC; i++) { + if (pcc[i] && pcc[i]->pce_opts) { + if (pcc[i]->is_best == true) { + return pcc[i]->id; + } + } + } + return 0; +} + +int get_pce_count_connected(struct pcc_state **pcc) +{ + int count = 0; + for (int i = 0; i < MAX_PCC; i++) { + if (pcc[i] && pcc[i]->pce_opts + && pcc[i]->status != PCEP_PCC_DISCONNECTED) { + count++; + } + } + return count; +} + +int get_previous_best_pce(struct pcc_state **pcc) +{ + int previous_best_pce = -1; + + for (int i = 0; i < MAX_PCC; i++) { + if (pcc[i] && pcc[i]->pce_opts && pcc[i]->previous_best == true + && pcc[i]->status != PCEP_PCC_DISCONNECTED) { + previous_best_pce = i; + break; + } + } + return previous_best_pce != -1 ? pcc[previous_best_pce]->id : 0; +} + +/* Called by path_pcep_controller EV_REMOVE_PCC + * Event handler when a PCC is removed. */ +int pcep_pcc_multi_pce_remove_pcc(struct ctrl_state *ctrl_state, + struct pcc_state **pcc) +{ + int new_best_pcc_id = -1; + new_best_pcc_id = pcep_pcc_calculate_best_pce(pcc); + if (new_best_pcc_id) { + if (update_best_pce(ctrl_state->pcc, new_best_pcc_id) == true) { + pcep_thread_start_sync(ctrl_state, new_best_pcc_id); + } + } + + return 0; +} + +/* Called by path_pcep_controller EV_SYNC_PATH + * Event handler when a path is sync'd. */ +int pcep_pcc_multi_pce_sync_path(struct ctrl_state *ctrl_state, int pcc_id, + struct pcc_state **pcc) +{ + int previous_best_pcc_id = -1; + + if (pcc_id == get_best_pce(pcc)) { + previous_best_pcc_id = get_previous_best_pce(pcc); + if (previous_best_pcc_id != 0) { + /* while adding new pce, path has to resync to the + * previous best. pcep_thread_start_sync() will be + * called by the calling function */ + if (update_best_pce(ctrl_state->pcc, + previous_best_pcc_id) + == true) { + cancel_comp_requests( + ctrl_state, + pcep_pcc_get_pcc_by_id( + pcc, previous_best_pcc_id)); + pcep_thread_start_sync(ctrl_state, + previous_best_pcc_id); + } + } + } + + return 0; +} + +/* Called by path_pcep_controller when the TM_CALCULATE_BEST_PCE + * timer expires */ +int pcep_pcc_timer_update_best_pce(struct ctrl_state *ctrl_state, int pcc_id) +{ + int ret = 0; + /* resync whatever was the new best */ + int prev_best = get_best_pce(ctrl_state->pcc); + int best_id = pcep_pcc_calculate_best_pce(ctrl_state->pcc); + if (best_id && prev_best != best_id) { // Avoid Multiple call + struct pcc_state *pcc_state = + pcep_pcc_get_pcc_by_id(ctrl_state->pcc, best_id); + if (update_best_pce(ctrl_state->pcc, pcc_state->id) == true) { + pcep_thread_start_sync(ctrl_state, pcc_state->id); + } + } + + return ret; +} + +/* Called by path_pcep_controller::pcep_thread_event_update_pce_options() + * Returns the best PCE id */ +int pcep_pcc_calculate_best_pce(struct pcc_state **pcc) +{ + int best_precedence = 255; // DEFAULT_PCE_PRECEDENCE; + int best_pce = -1; + int one_connected_pce = -1; + int previous_best_pce = -1; + int step_0_best = -1; + int step_0_previous = -1; + int pcc_count = 0; + + // Get state + for (int i = 0; i < MAX_PCC; i++) { + if (pcc[i] && pcc[i]->pce_opts) { + zlog_debug( + "multi-pce: calculate all : i (%i) is_best (%i) previous_best (%i) ", + i, pcc[i]->is_best, pcc[i]->previous_best); + pcc_count++; + + if (pcc[i]->is_best == true) { + step_0_best = i; + } + if (pcc[i]->previous_best == true) { + step_0_previous = i; + } + } + } + + if (!pcc_count) { + return 0; + } + + // Calculate best + for (int i = 0; i < MAX_PCC; i++) { + if (pcc[i] && pcc[i]->pce_opts + && pcc[i]->status != PCEP_PCC_DISCONNECTED) { + one_connected_pce = i; // In case none better + if (pcc[i]->pce_opts->precedence <= best_precedence) { + if (best_pce != -1 + && pcc[best_pce]->pce_opts->precedence + == pcc[i]->pce_opts + ->precedence) { + if (ipaddr_cmp( + &pcc[i]->pce_opts->addr, + &pcc[best_pce] + ->pce_opts->addr) + > 0) + // collide of precedences so + // compare ip + best_pce = i; + } else { + if (!pcc[i]->previous_best) { + best_precedence = + pcc[i]->pce_opts + ->precedence; + best_pce = i; + } + } + } + } + } + + zlog_debug( + "multi-pce: calculate data : sb (%i) sp (%i) oc (%i) b (%i) ", + step_0_best, step_0_previous, one_connected_pce, best_pce); + + // Changed of state so ... + if (step_0_best != best_pce) { + // Calculate previous + previous_best_pce = step_0_best; + // Clean state + if (step_0_best != -1) { + pcc[step_0_best]->is_best = false; + } + if (step_0_previous != -1) { + pcc[step_0_previous]->previous_best = false; + } + + // Set previous + if (previous_best_pce != -1 + && pcc[previous_best_pce]->status + == PCEP_PCC_DISCONNECTED) { + pcc[previous_best_pce]->previous_best = true; + zlog_debug("multi-pce: previous best pce (%i) ", + previous_best_pce + 1); + } + + + // Set best + if (best_pce != -1) { + pcc[best_pce]->is_best = true; + zlog_debug("multi-pce: best pce (%i) ", best_pce + 1); + } else { + if (one_connected_pce != -1) { + best_pce = one_connected_pce; + pcc[one_connected_pce]->is_best = true; + zlog_debug( + "multi-pce: one connected best pce (default) (%i) ", + one_connected_pce + 1); + } else { + for (int i = 0; i < MAX_PCC; i++) { + if (pcc[i] && pcc[i]->pce_opts) { + best_pce = i; + pcc[i]->is_best = true; + zlog_debug( + "(disconnected) best pce (default) (%i) ", + i + 1); + break; + } + } + } + } + } + + return ((best_pce == -1) ? 0 : pcc[best_pce]->id); +} + +int pcep_pcc_get_pcc_id_by_ip_port(struct pcc_state **pcc, + struct pce_opts *pce_opts) +{ + if (pcc == NULL) { + return 0; + } + + for (int idx = 0; idx < MAX_PCC; idx++) { + if (pcc[idx]) { + if ((ipaddr_cmp((const struct ipaddr *)&pcc[idx] + ->pce_opts->addr, + (const struct ipaddr *)&pce_opts->addr) + == 0) + && pcc[idx]->pce_opts->port == pce_opts->port) { + zlog_debug("found pcc_id (%d) idx (%d)", + pcc[idx]->id, idx); + return pcc[idx]->id; + } + } + } + return 0; +} + +int pcep_pcc_get_pcc_id_by_idx(struct pcc_state **pcc, int idx) +{ + if (pcc == NULL || idx < 0) { + return 0; + } + + return pcc[idx] ? pcc[idx]->id : 0; +} + +struct pcc_state *pcep_pcc_get_pcc_by_id(struct pcc_state **pcc, int id) +{ + if (pcc == NULL || id < 0) { + return NULL; + } + + for (int i = 0; i < MAX_PCC; i++) { + if (pcc[i]) { + if (pcc[i]->id == id) { + zlog_debug("found id (%d) pcc_idx (%d)", + pcc[i]->id, i); + return pcc[i]; + } + } + } + + return NULL; +} + +struct pcc_state *pcep_pcc_get_pcc_by_name(struct pcc_state **pcc, + const char *pce_name) +{ + if (pcc == NULL || pce_name == NULL) { + return NULL; + } + + for (int i = 0; i < MAX_PCC; i++) { + if (pcc[i] == NULL) { + continue; + } + + if (strcmp(pcc[i]->pce_opts->pce_name, pce_name) == 0) { + return pcc[i]; + } + } + + return NULL; +} + +int pcep_pcc_get_pcc_idx_by_id(struct pcc_state **pcc, int id) +{ + if (pcc == NULL) { + return -1; + } + + for (int idx = 0; idx < MAX_PCC; idx++) { + if (pcc[idx]) { + if (pcc[idx]->id == id) { + zlog_debug("found pcc_id (%d) array_idx (%d)", + pcc[idx]->id, idx); + return idx; + } + } + } + + return -1; +} + +int pcep_pcc_get_free_pcc_idx(struct pcc_state **pcc) +{ + assert(pcc != NULL); + + for (int idx = 0; idx < MAX_PCC; idx++) { + if (pcc[idx] == NULL) { + zlog_debug("new pcc_idx (%d)", idx); + return idx; + } + } + + return -1; +} + +int pcep_pcc_get_pcc_id(struct pcc_state *pcc) +{ + return ((pcc == NULL) ? 0 : pcc->id); +} + +void pcep_pcc_copy_pcc_info(struct pcc_state **pcc, + struct pcep_pcc_info *pcc_info) +{ + struct pcc_state *pcc_state = + pcep_pcc_get_pcc_by_name(pcc, pcc_info->pce_name); + if (!pcc_state) { + return; + } + + pcc_info->ctrl_state = NULL; + pcc_info->msd = pcc_state->pcc_opts->msd; + pcc_info->pcc_port = pcc_state->pcc_opts->port; + pcc_info->next_plspid = pcc_state->next_plspid; + pcc_info->next_reqid = pcc_state->next_reqid; + pcc_info->status = pcc_state->status; + pcc_info->pcc_id = pcc_state->id; + pcc_info->is_best_multi_pce = pcc_state->is_best; + pcc_info->previous_best = pcc_state->previous_best; + pcc_info->precedence = + pcc_state->pce_opts ? pcc_state->pce_opts->precedence : 0; + memcpy(&pcc_info->pcc_addr, &pcc_state->pcc_addr_tr, + sizeof(struct ipaddr)); +} + + +/*------------------ PCEP Message handlers --------------------- */ + +void handle_pcep_open(struct ctrl_state *ctrl_state, + struct pcc_state *pcc_state, struct pcep_message *msg) +{ + assert(msg->msg_header->type == PCEP_TYPE_OPEN); + pcep_lib_parse_capabilities(msg, &pcc_state->caps); + PCEP_DEBUG("PCE capabilities: %s, %s%s", + pcc_state->caps.is_stateful ? "stateful" : "stateless", + pcc_state->caps.supported_ofs_are_known + ? (pcc_state->caps.supported_ofs == 0 + ? "no objective functions supported" + : "supported objective functions are ") + : "supported objective functions are unknown", + format_objfun_set(pcc_state->caps.supported_ofs)); +} + +void handle_pcep_message(struct ctrl_state *ctrl_state, + struct pcc_state *pcc_state, struct pcep_message *msg) +{ + if (pcc_state->status != PCEP_PCC_OPERATING) + return; + + switch (msg->msg_header->type) { + case PCEP_TYPE_INITIATE: + handle_pcep_lsp_initiate(ctrl_state, pcc_state, msg); + break; + case PCEP_TYPE_UPDATE: + handle_pcep_lsp_update(ctrl_state, pcc_state, msg); + break; + case PCEP_TYPE_PCREP: + handle_pcep_comp_reply(ctrl_state, pcc_state, msg); + break; + default: + flog_warn(EC_PATH_PCEP_UNEXPECTED_PCEP_MESSAGE, + "Unexpected pcep message from pceplib: %s", + format_pcep_message(msg)); + break; + } +} + +void handle_pcep_lsp_update(struct ctrl_state *ctrl_state, + struct pcc_state *pcc_state, + struct pcep_message *msg) +{ + char err[MAX_ERROR_MSG_SIZE] = ""; + struct path *path; + path = pcep_lib_parse_path(msg); + lookup_nbkey(pcc_state, path); + /* TODO: Investigate if this is safe to do in the controller thread */ + path_pcep_config_lookup(path); + specialize_incoming_path(pcc_state, path); + PCEP_DEBUG("%s Received LSP update", pcc_state->tag); + PCEP_DEBUG_PATH("%s", format_path(path)); + + if (validate_incoming_path(pcc_state, path, err, sizeof(err))) + pcep_thread_update_path(ctrl_state, pcc_state->id, path); + else { + /* FIXME: Monitor the amount of errors from the PCE and + * possibly disconnect and blacklist */ + flog_warn(EC_PATH_PCEP_UNSUPPORTED_PCEP_FEATURE, + "Unsupported PCEP protocol feature: %s", err); + pcep_free_path(path); + } +} + +void handle_pcep_lsp_initiate(struct ctrl_state *ctrl_state, + struct pcc_state *pcc_state, + struct pcep_message *msg) +{ + PCEP_DEBUG("%s Received LSP initiate, not supported yet", + pcc_state->tag); + + /* TODO when we support both PCC and PCE initiated sessions, + * we should first check the session type before + * rejecting this message. */ + send_pcep_error(pcc_state, PCEP_ERRT_INVALID_OPERATION, + PCEP_ERRV_LSP_NOT_PCE_INITIATED); +} + +void handle_pcep_comp_reply(struct ctrl_state *ctrl_state, + struct pcc_state *pcc_state, + struct pcep_message *msg) +{ + char err[MAX_ERROR_MSG_SIZE] = ""; + struct req_entry *req; + struct path *path; + + path = pcep_lib_parse_path(msg); + req = pop_req(pcc_state, path->req_id); + if (req == NULL) { + /* TODO: check the rate of bad computation reply and close + * the connection if more that a given rate. + */ + PCEP_DEBUG( + "%s Received computation reply for unknown request " + "%d", + pcc_state->tag, path->req_id); + PCEP_DEBUG_PATH("%s", format_path(path)); + send_pcep_error(pcc_state, PCEP_ERRT_UNKNOWN_REQ_REF, + PCEP_ERRV_UNASSIGNED); + return; + } + + /* Cancel the computation request timeout */ + pcep_thread_cancel_timer(&req->t_retry); + + /* Transfer relevent metadata from the request to the response */ + path->nbkey = req->path->nbkey; + path->plsp_id = req->path->plsp_id; + path->type = req->path->type; + path->name = XSTRDUP(MTYPE_PCEP, req->path->name); + specialize_incoming_path(pcc_state, path); + + PCEP_DEBUG("%s Received computation reply %d (no-path: %s)", + pcc_state->tag, path->req_id, + path->no_path ? "true" : "false"); + PCEP_DEBUG_PATH("%s", format_path(path)); + + if (path->no_path) { + PCEP_DEBUG("%s Computation for path %s did not find any result", + pcc_state->tag, path->name); + } else if (validate_incoming_path(pcc_state, path, err, sizeof(err))) { + /* Updating a dynamic path will automatically delegate it */ + pcep_thread_update_path(ctrl_state, pcc_state->id, path); + free_req_entry(req); + return; + } else { + /* FIXME: Monitor the amount of errors from the PCE and + * possibly disconnect and blacklist */ + flog_warn(EC_PATH_PCEP_UNSUPPORTED_PCEP_FEATURE, + "Unsupported PCEP protocol feature: %s", err); + } + + pcep_free_path(path); + + /* Delegate the path regardless of the outcome */ + /* TODO: For now we are using the path from the request, when + * pathd API is thread safe, we could get a new path */ + if (pcc_state->caps.is_stateful) { + PCEP_DEBUG("%s Delegating undefined dynamic path %s to PCE %s", + pcc_state->tag, path->name, pcc_state->originator); + path = pcep_copy_path(req->path); + path->is_delegated = true; + send_report(pcc_state, path); + pcep_free_path(path); + } + + free_req_entry(req); +} + + +/* ------------ Internal Functions ------------ */ + +const char *ipaddr_type_name(struct ipaddr *addr) +{ + if (IS_IPADDR_V4(addr)) + return "IPv4"; + if (IS_IPADDR_V6(addr)) + return "IPv6"; + return "undefined"; +} + +bool filter_path(struct pcc_state *pcc_state, struct path *path) +{ + return (IS_IPADDR_V4(&path->nbkey.endpoint) + && CHECK_FLAG(pcc_state->flags, F_PCC_STATE_HAS_IPV4)) + || (IS_IPADDR_V6(&path->nbkey.endpoint) + && CHECK_FLAG(pcc_state->flags, F_PCC_STATE_HAS_IPV6)); +} + +void select_pcc_addresses(struct pcc_state *pcc_state) +{ + /* If no IPv4 address was specified, try to get one from zebra */ + if (!CHECK_FLAG(pcc_state->flags, F_PCC_STATE_HAS_IPV4)) { + if (get_ipv4_router_id(&pcc_state->pcc_addr_v4)) { + SET_FLAG(pcc_state->flags, F_PCC_STATE_HAS_IPV4); + } + } + + /* If no IPv6 address was specified, try to get one from zebra */ + if (!CHECK_FLAG(pcc_state->flags, F_PCC_STATE_HAS_IPV6)) { + if (get_ipv6_router_id(&pcc_state->pcc_addr_v6)) { + SET_FLAG(pcc_state->flags, F_PCC_STATE_HAS_IPV6); + } + } +} + +void select_transport_address(struct pcc_state *pcc_state) +{ + struct ipaddr *taddr = &pcc_state->pcc_addr_tr; + + select_pcc_addresses(pcc_state); + + taddr->ipa_type = IPADDR_NONE; + + /* Select a transport source address in function of the configured PCE + * address */ + if (IS_IPADDR_V4(&pcc_state->pce_opts->addr)) { + if (CHECK_FLAG(pcc_state->flags, F_PCC_STATE_HAS_IPV4)) { + taddr->ipa_type = IPADDR_V4; + taddr->ipaddr_v4 = pcc_state->pcc_addr_v4; + } + } else { + if (CHECK_FLAG(pcc_state->flags, F_PCC_STATE_HAS_IPV6)) { + taddr->ipa_type = IPADDR_V6; + taddr->ipaddr_v6 = pcc_state->pcc_addr_v6; + } + } +} + +void update_tag(struct pcc_state *pcc_state) +{ + if (pcc_state->pce_opts != NULL) { + assert(!IS_IPADDR_NONE(&pcc_state->pce_opts->addr)); + if (IS_IPADDR_V6(&pcc_state->pce_opts->addr)) { + snprintfrr(pcc_state->tag, sizeof(pcc_state->tag), + "%pI6:%i (%u)", + &pcc_state->pce_opts->addr.ipaddr_v6, + pcc_state->pce_opts->port, pcc_state->id); + } else { + snprintfrr(pcc_state->tag, sizeof(pcc_state->tag), + "%pI4:%i (%u)", + &pcc_state->pce_opts->addr.ipaddr_v4, + pcc_state->pce_opts->port, pcc_state->id); + } + } else { + snprintfrr(pcc_state->tag, sizeof(pcc_state->tag), "(%u)", + pcc_state->id); + } +} + +void update_originator(struct pcc_state *pcc_state) +{ + char *originator; + if (pcc_state->originator != NULL) { + XFREE(MTYPE_PCEP, pcc_state->originator); + pcc_state->originator = NULL; + } + if (pcc_state->pce_opts == NULL) + return; + originator = XCALLOC(MTYPE_PCEP, 52); + assert(!IS_IPADDR_NONE(&pcc_state->pce_opts->addr)); + if (IS_IPADDR_V6(&pcc_state->pce_opts->addr)) { + snprintfrr(originator, 52, "%pI6:%i", + &pcc_state->pce_opts->addr.ipaddr_v6, + pcc_state->pce_opts->port); + } else { + snprintfrr(originator, 52, "%pI4:%i", + &pcc_state->pce_opts->addr.ipaddr_v4, + pcc_state->pce_opts->port); + } + pcc_state->originator = originator; +} + +void schedule_reconnect(struct ctrl_state *ctrl_state, + struct pcc_state *pcc_state) +{ + pcc_state->retry_count++; + pcep_thread_schedule_reconnect(ctrl_state, pcc_state->id, + pcc_state->retry_count, + &pcc_state->t_reconnect); + if (pcc_state->retry_count == 1) { + pcep_thread_schedule_sync_best_pce( + ctrl_state, pcc_state->id, + pcc_state->pce_opts->config_opts + .delegation_timeout_seconds, + &pcc_state->t_update_best); + } +} + +void schedule_session_timeout(struct ctrl_state *ctrl_state, + struct pcc_state *pcc_state) +{ + /* No need to schedule timeout if multiple PCEs are connected */ + if (get_pce_count_connected(ctrl_state->pcc)) { + PCEP_DEBUG_PCEP( + "schedule_session_timeout not setting timer for multi-pce mode"); + + return; + } + + pcep_thread_schedule_session_timeout( + ctrl_state, pcep_pcc_get_pcc_id(pcc_state), + pcc_state->pce_opts->config_opts + .session_timeout_inteval_seconds, + &pcc_state->t_session_timeout); +} + +void cancel_session_timeout(struct ctrl_state *ctrl_state, + struct pcc_state *pcc_state) +{ + /* No need to schedule timeout if multiple PCEs are connected */ + if (pcc_state->t_session_timeout == NULL) { + PCEP_DEBUG_PCEP("cancel_session_timeout timer thread NULL"); + return; + } + + PCEP_DEBUG_PCEP("Cancel session_timeout timer"); + pcep_thread_cancel_timer(&pcc_state->t_session_timeout); + pcc_state->t_session_timeout = NULL; +} + +void send_pcep_message(struct pcc_state *pcc_state, struct pcep_message *msg) +{ + if (pcc_state->sess != NULL) { + PCEP_DEBUG_PCEP("%s Sending PCEP message: %s", pcc_state->tag, + format_pcep_message(msg)); + send_message(pcc_state->sess, msg, true); + } +} + +void send_pcep_error(struct pcc_state *pcc_state, + enum pcep_error_type error_type, + enum pcep_error_value error_value) +{ + struct pcep_message *msg; + PCEP_DEBUG("%s Sending PCEP error type %s (%d) value %s (%d)", + pcc_state->tag, pcep_error_type_name(error_type), error_type, + pcep_error_value_name(error_type, error_value), error_value); + msg = pcep_lib_format_error(error_type, error_value); + send_pcep_message(pcc_state, msg); +} + +void send_report(struct pcc_state *pcc_state, struct path *path) +{ + struct pcep_message *report; + + path->req_id = 0; + specialize_outgoing_path(pcc_state, path); + PCEP_DEBUG_PATH("%s Sending path %s: %s", pcc_state->tag, path->name, + format_path(path)); + report = pcep_lib_format_report(&pcc_state->caps, path); + send_pcep_message(pcc_state, report); +} + +/* Updates the path for the PCE, updating the delegation and creation flags */ +void specialize_outgoing_path(struct pcc_state *pcc_state, struct path *path) +{ + bool is_delegated = false; + bool was_created = false; + + lookup_plspid(pcc_state, path); + + set_pcc_address(pcc_state, &path->nbkey, &path->pcc_addr); + path->sender = pcc_state->pcc_addr_tr; + + /* TODO: When the pathd API have a way to mark a path as + * delegated, use it instead of considering all dynamic path + * delegated. We need to disable the originator check for now, + * because path could be delegated without having any originator yet */ + // if ((path->originator == NULL) + // || (strcmp(path->originator, pcc_state->originator) == 0)) { + // is_delegated = (path->type == SRTE_CANDIDATE_TYPE_DYNAMIC) + // && (path->first_hop != NULL); + // /* it seems the PCE consider updating an LSP a creation ?!? + // at least Cisco does... */ + // was_created = path->update_origin == SRTE_ORIGIN_PCEP; + // } + is_delegated = (path->type == SRTE_CANDIDATE_TYPE_DYNAMIC); + was_created = path->update_origin == SRTE_ORIGIN_PCEP; + + path->pcc_id = pcc_state->id; + path->go_active = is_delegated && pcc_state->is_best; + path->is_delegated = is_delegated && pcc_state->is_best; + path->was_created = was_created; +} + +/* Updates the path for the PCC */ +void specialize_incoming_path(struct pcc_state *pcc_state, struct path *path) +{ + set_pcc_address(pcc_state, &path->nbkey, &path->pcc_addr); + path->sender = pcc_state->pce_opts->addr; + path->pcc_id = pcc_state->id; + path->update_origin = SRTE_ORIGIN_PCEP; + path->originator = XSTRDUP(MTYPE_PCEP, pcc_state->originator); +} + +/* Ensure the path can be handled by the PCC and if not, sends an error */ +bool validate_incoming_path(struct pcc_state *pcc_state, struct path *path, + char *errbuff, size_t buffsize) +{ + struct path_hop *hop; + enum pcep_error_type err_type = 0; + enum pcep_error_value err_value = PCEP_ERRV_UNASSIGNED; + + for (hop = path->first_hop; hop != NULL; hop = hop->next) { + /* Hops without SID are not supported */ + if (!hop->has_sid) { + snprintfrr(errbuff, buffsize, "SR segment without SID"); + err_type = PCEP_ERRT_RECEPTION_OF_INV_OBJECT; + err_value = PCEP_ERRV_DISJOINTED_CONF_TLV_MISSING; + break; + } + /* Hops with non-MPLS SID are not supported */ + if (!hop->is_mpls) { + snprintfrr(errbuff, buffsize, + "SR segment with non-MPLS SID"); + err_type = PCEP_ERRT_RECEPTION_OF_INV_OBJECT; + err_value = PCEP_ERRV_UNSUPPORTED_NAI; + break; + } + } + + if (err_type != 0) { + send_pcep_error(pcc_state, err_type, err_value); + return false; + } + + return true; +} + +void send_comp_request(struct ctrl_state *ctrl_state, + struct pcc_state *pcc_state, struct req_entry *req) +{ + assert(req != NULL); + + if (req->t_retry) + return; + + assert(req->path != NULL); + assert(req->path->req_id > 0); + assert(RB_FIND(req_entry_head, &pcc_state->requests, req) == req); + assert(lookup_reqid(pcc_state, req->path) == req->path->req_id); + + int timeout; + char buff[40]; + struct pcep_message *msg; + + if (!pcc_state->is_best) { + return; + } + /* TODO: Add a timer to retry the computation request ? */ + + specialize_outgoing_path(pcc_state, req->path); + + PCEP_DEBUG( + "%s Sending computation request %d for path %s to %s (retry %d)", + pcc_state->tag, req->path->req_id, req->path->name, + ipaddr2str(&req->path->nbkey.endpoint, buff, sizeof(buff)), + req->retry_count); + PCEP_DEBUG_PATH("%s Computation request path %s: %s", pcc_state->tag, + req->path->name, format_path(req->path)); + + msg = pcep_lib_format_request(&pcc_state->caps, req->path); + send_pcep_message(pcc_state, msg); + req->was_sent = true; + + /* TODO: Enable this back when the pcep config changes are merged back + */ + // timeout = pcc_state->pce_opts->config_opts.pcep_request_time_seconds; + timeout = 30; + pcep_thread_schedule_timeout(ctrl_state, pcc_state->id, + TO_COMPUTATION_REQUEST, timeout, + (void *)req, &req->t_retry); +} + +void cancel_comp_requests(struct ctrl_state *ctrl_state, + struct pcc_state *pcc_state) +{ + struct req_entry *req, *safe_req; + + RB_FOREACH_SAFE (req, req_entry_head, &pcc_state->requests, safe_req) { + cancel_comp_request(ctrl_state, pcc_state, req); + RB_REMOVE(req_entry_head, &pcc_state->requests, req); + remove_reqid_mapping(pcc_state, req->path); + free_req_entry(req); + } +} + +void cancel_comp_request(struct ctrl_state *ctrl_state, + struct pcc_state *pcc_state, struct req_entry *req) +{ + char buff[40]; + struct pcep_message *msg; + + if (req->was_sent) { + /* TODO: Send a computation request cancelation + * notification to the PCE */ + pcep_thread_cancel_timer(&req->t_retry); + } + + PCEP_DEBUG( + "%s Canceling computation request %d for path %s to %s (retry %d)", + pcc_state->tag, req->path->req_id, req->path->name, + ipaddr2str(&req->path->nbkey.endpoint, buff, sizeof(buff)), + req->retry_count); + PCEP_DEBUG_PATH("%s Canceled computation request path %s: %s", + pcc_state->tag, req->path->name, + format_path(req->path)); + + msg = pcep_lib_format_request_cancelled(req->path->req_id); + send_pcep_message(pcc_state, msg); +} + +void set_pcc_address(struct pcc_state *pcc_state, struct lsp_nb_key *nbkey, + struct ipaddr *addr) +{ + select_pcc_addresses(pcc_state); + if (IS_IPADDR_V6(&nbkey->endpoint)) { + assert(CHECK_FLAG(pcc_state->flags, F_PCC_STATE_HAS_IPV6)); + addr->ipa_type = IPADDR_V6; + addr->ipaddr_v6 = pcc_state->pcc_addr_v6; + } else if (IS_IPADDR_V4(&nbkey->endpoint)) { + assert(CHECK_FLAG(pcc_state->flags, F_PCC_STATE_HAS_IPV4)); + addr->ipa_type = IPADDR_V4; + addr->ipaddr_v4 = pcc_state->pcc_addr_v4; + } else { + addr->ipa_type = IPADDR_NONE; + } +} + + +/* ------------ Data Structure Helper Functions ------------ */ + +void lookup_plspid(struct pcc_state *pcc_state, struct path *path) +{ + struct plspid_map_data key, *plspid_mapping; + struct nbkey_map_data *nbkey_mapping; + + if (path->nbkey.color != 0) { + key.nbkey = path->nbkey; + plspid_mapping = plspid_map_find(&pcc_state->plspid_map, &key); + if (plspid_mapping == NULL) { + plspid_mapping = + XCALLOC(MTYPE_PCEP, sizeof(*plspid_mapping)); + plspid_mapping->nbkey = key.nbkey; + plspid_mapping->plspid = pcc_state->next_plspid; + plspid_map_add(&pcc_state->plspid_map, plspid_mapping); + nbkey_mapping = + XCALLOC(MTYPE_PCEP, sizeof(*nbkey_mapping)); + nbkey_mapping->nbkey = key.nbkey; + nbkey_mapping->plspid = pcc_state->next_plspid; + nbkey_map_add(&pcc_state->nbkey_map, nbkey_mapping); + pcc_state->next_plspid++; + // FIXME: Send some error to the PCE isntead of crashing + assert(pcc_state->next_plspid <= 1048576); + } + path->plsp_id = plspid_mapping->plspid; + } +} + +void lookup_nbkey(struct pcc_state *pcc_state, struct path *path) +{ + struct nbkey_map_data key, *mapping; + // TODO: Should give an error to the PCE instead of crashing + assert(path->plsp_id != 0); + key.plspid = path->plsp_id; + mapping = nbkey_map_find(&pcc_state->nbkey_map, &key); + assert(mapping != NULL); + path->nbkey = mapping->nbkey; +} + +void free_req_entry(struct req_entry *req) +{ + pcep_free_path(req->path); + XFREE(MTYPE_PCEP, req); +} + +struct req_entry *push_new_req(struct pcc_state *pcc_state, struct path *path) +{ + struct req_entry *req; + + req = XCALLOC(MTYPE_PCEP, sizeof(*req)); + req->retry_count = 0; + req->path = pcep_copy_path(path); + repush_req(pcc_state, req); + + return req; +} + +void repush_req(struct pcc_state *pcc_state, struct req_entry *req) +{ + uint32_t reqid = pcc_state->next_reqid; + void *res; + + req->was_sent = false; + req->path->req_id = reqid; + res = RB_INSERT(req_entry_head, &pcc_state->requests, req); + assert(res == NULL); + assert(add_reqid_mapping(pcc_state, req->path) == true); + + pcc_state->next_reqid += 1; + /* Wrapping is allowed, but 0 is not a valid id */ + if (pcc_state->next_reqid == 0) + pcc_state->next_reqid = 1; +} + +struct req_entry *pop_req(struct pcc_state *pcc_state, uint32_t reqid) +{ + struct path path = {.req_id = reqid}; + struct req_entry key = {.path = &path}; + struct req_entry *req; + + req = RB_FIND(req_entry_head, &pcc_state->requests, &key); + if (req == NULL) + return NULL; + RB_REMOVE(req_entry_head, &pcc_state->requests, req); + remove_reqid_mapping(pcc_state, req->path); + + return req; +} + +bool add_reqid_mapping(struct pcc_state *pcc_state, struct path *path) +{ + struct req_map_data *mapping; + mapping = XCALLOC(MTYPE_PCEP, sizeof(*mapping)); + mapping->nbkey = path->nbkey; + mapping->reqid = path->req_id; + if (req_map_add(&pcc_state->req_map, mapping) != NULL) { + XFREE(MTYPE_PCEP, mapping); + return false; + } + return true; +} + +void remove_reqid_mapping(struct pcc_state *pcc_state, struct path *path) +{ + struct req_map_data key, *mapping; + key.nbkey = path->nbkey; + mapping = req_map_find(&pcc_state->req_map, &key); + if (mapping != NULL) { + req_map_del(&pcc_state->req_map, mapping); + XFREE(MTYPE_PCEP, mapping); + } +} + +uint32_t lookup_reqid(struct pcc_state *pcc_state, struct path *path) +{ + struct req_map_data key, *mapping; + key.nbkey = path->nbkey; + mapping = req_map_find(&pcc_state->req_map, &key); + if (mapping != NULL) + return mapping->reqid; + return 0; +} + +bool has_pending_req_for(struct pcc_state *pcc_state, struct path *path) +{ + return lookup_reqid(pcc_state, path) != 0; +} + + +/* ------------ Data Structure Callbacks ------------ */ + +#define CMP_RETURN(A, B) \ + if (A != B) \ + return (A < B) ? -1 : 1 + +static uint32_t hash_nbkey(const struct lsp_nb_key *nbkey) +{ + uint32_t hash; + hash = jhash_2words(nbkey->color, nbkey->preference, 0x55aa5a5a); + switch (nbkey->endpoint.ipa_type) { + case IPADDR_V4: + return jhash(&nbkey->endpoint.ipaddr_v4, + sizeof(nbkey->endpoint.ipaddr_v4), hash); + case IPADDR_V6: + return jhash(&nbkey->endpoint.ipaddr_v6, + sizeof(nbkey->endpoint.ipaddr_v6), hash); + default: + return hash; + } +} + +static int cmp_nbkey(const struct lsp_nb_key *a, const struct lsp_nb_key *b) +{ + CMP_RETURN(a->color, b->color); + int cmp = ipaddr_cmp(&a->endpoint, &b->endpoint); + if (cmp != 0) + return cmp; + CMP_RETURN(a->preference, b->preference); + return 0; +} + +int plspid_map_cmp(const struct plspid_map_data *a, + const struct plspid_map_data *b) +{ + return cmp_nbkey(&a->nbkey, &b->nbkey); +} + +uint32_t plspid_map_hash(const struct plspid_map_data *e) +{ + return hash_nbkey(&e->nbkey); +} + +int nbkey_map_cmp(const struct nbkey_map_data *a, + const struct nbkey_map_data *b) +{ + CMP_RETURN(a->plspid, b->plspid); + return 0; +} + +uint32_t nbkey_map_hash(const struct nbkey_map_data *e) +{ + return e->plspid; +} + +int req_map_cmp(const struct req_map_data *a, const struct req_map_data *b) +{ + return cmp_nbkey(&a->nbkey, &b->nbkey); +} + +uint32_t req_map_hash(const struct req_map_data *e) +{ + return hash_nbkey(&e->nbkey); +} diff --git a/pathd/path_pcep_pcc.h b/pathd/path_pcep_pcc.h new file mode 100644 index 0000000000..a466d92d50 --- /dev/null +++ b/pathd/path_pcep_pcc.h @@ -0,0 +1,141 @@ +/* + * Copyright (C) 2020 NetDEF, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; see the file COPYING; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef _PATH_PCEP_PCC_H_ +#define _PATH_PCEP_PCC_H_ + +#include "typesafe.h" +#include "pathd/path_pcep.h" + +enum pcc_status { + PCEP_PCC_INITIALIZED = 0, + PCEP_PCC_DISCONNECTED, + PCEP_PCC_CONNECTING, + PCEP_PCC_SYNCHRONIZING, + PCEP_PCC_OPERATING +}; + +PREDECL_HASH(plspid_map) +PREDECL_HASH(nbkey_map) +PREDECL_HASH(req_map) + +struct plspid_map_data { + struct plspid_map_item mi; + struct lsp_nb_key nbkey; + uint32_t plspid; +}; + +struct nbkey_map_data { + struct nbkey_map_item mi; + struct lsp_nb_key nbkey; + uint32_t plspid; +}; + +struct req_map_data { + struct req_map_item mi; + struct lsp_nb_key nbkey; + uint32_t reqid; +}; + +struct req_entry { + RB_ENTRY(req_entry) entry; + struct thread *t_retry; + int retry_count; + bool was_sent; + struct path *path; +}; +RB_HEAD(req_entry_head, req_entry); +RB_PROTOTYPE(req_entry_head, req_entry, entry, req_entry_compare); + +struct pcc_state { + int id; + char tag[MAX_TAG_SIZE]; + enum pcc_status status; + uint16_t flags; +#define F_PCC_STATE_HAS_IPV4 0x0002 +#define F_PCC_STATE_HAS_IPV6 0x0004 + struct pcc_opts *pcc_opts; + struct pce_opts *pce_opts; + struct in_addr pcc_addr_v4; + struct in6_addr pcc_addr_v6; + /* PCC transport source address */ + struct ipaddr pcc_addr_tr; + char *originator; + pcep_session *sess; + uint32_t retry_count; + bool synchronized; + struct thread *t_reconnect; + struct thread *t_update_best; + struct thread *t_session_timeout; + uint32_t next_reqid; + uint32_t next_plspid; + struct plspid_map_head plspid_map; + struct nbkey_map_head nbkey_map; + struct req_map_head req_map; + struct req_entry_head requests; + struct pcep_caps caps; + bool is_best; + bool previous_best; +}; + +struct pcc_state *pcep_pcc_initialize(struct ctrl_state *ctrl_state, + int pcc_id); +void pcep_pcc_finalize(struct ctrl_state *ctrl_state, + struct pcc_state *pcc_state); +int pcep_pcc_enable(struct ctrl_state *ctrl_state, struct pcc_state *pcc_state); +int pcep_pcc_disable(struct ctrl_state *ctrl_state, + struct pcc_state *pcc_state); +int pcep_pcc_update(struct ctrl_state *ctrl_state, struct pcc_state *pcc_state, + struct pcc_opts *pcc_opts, struct pce_opts *pce_opts); +void pcep_pcc_reconnect(struct ctrl_state *ctrl_state, + struct pcc_state *pcc_state); +void pcep_pcc_pcep_event_handler(struct ctrl_state *ctrl_state, + struct pcc_state *pcc_state, + pcep_event *event); +void pcep_pcc_pathd_event_handler(struct ctrl_state *ctrl_state, + struct pcc_state *pcc_state, + enum pcep_pathd_event_type type, + struct path *path); +void pcep_pcc_timeout_handler(struct ctrl_state *ctrl_state, + struct pcc_state *pcc_state, + enum pcep_ctrl_timer_type type, void *param); +void pcep_pcc_sync_path(struct ctrl_state *ctrl_state, + struct pcc_state *pcc_state, struct path *path); +void pcep_pcc_sync_done(struct ctrl_state *ctrl_state, + struct pcc_state *pcc_state); +void pcep_pcc_send_report(struct ctrl_state *ctrl_state, + struct pcc_state *pcc_state, struct path *path); +int pcep_pcc_multi_pce_sync_path(struct ctrl_state *ctrl_state, int pcc_id, + struct pcc_state **pcc_state_list); +int pcep_pcc_multi_pce_remove_pcc(struct ctrl_state *ctrl_state, + struct pcc_state **pcc_state_list); +int pcep_pcc_timer_update_best_pce(struct ctrl_state *ctrl_state, int pcc_id); +int pcep_pcc_calculate_best_pce(struct pcc_state **pcc); +int pcep_pcc_get_pcc_id_by_ip_port(struct pcc_state **pcc, + struct pce_opts *pce_opts); +int pcep_pcc_get_pcc_id_by_idx(struct pcc_state **pcc, int idx); +struct pcc_state *pcep_pcc_get_pcc_by_id(struct pcc_state **pcc, int id); +struct pcc_state *pcep_pcc_get_pcc_by_name(struct pcc_state **pcc, + const char *pce_name); +int pcep_pcc_get_pcc_idx_by_id(struct pcc_state **pcc, int id); +int pcep_pcc_get_free_pcc_idx(struct pcc_state **pcc); +int pcep_pcc_get_pcc_id(struct pcc_state *pcc); +void pcep_pcc_copy_pcc_info(struct pcc_state **pcc, + struct pcep_pcc_info *pcc_info); + +#endif // _PATH_PCEP_PCC_H_ diff --git a/pathd/path_zebra.c b/pathd/path_zebra.c new file mode 100644 index 0000000000..276bc9289c --- /dev/null +++ b/pathd/path_zebra.c @@ -0,0 +1,294 @@ +/* + * Copyright (C) 2020 NetDEF, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; see the file COPYING; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include "thread.h" +#include "log.h" +#include "lib_errors.h" +#include "if.h" +#include "prefix.h" +#include "zclient.h" +#include "network.h" +#include "stream.h" +#include "linklist.h" +#include "nexthop.h" +#include "vrf.h" +#include "typesafe.h" + +#include "pathd/pathd.h" +#include "pathd/path_zebra.h" + +static struct zclient *zclient; +static struct zclient *zclient_sync; + +/* Global Variables */ +bool g_has_router_id_v4 = false; +bool g_has_router_id_v6 = false; +struct in_addr g_router_id_v4; +struct in6_addr g_router_id_v6; +pthread_mutex_t g_router_id_v4_mtx = PTHREAD_MUTEX_INITIALIZER; +pthread_mutex_t g_router_id_v6_mtx = PTHREAD_MUTEX_INITIALIZER; + +/** + * Gives the IPv4 router ID received from Zebra. + * + * @param router_id The in_addr strucure where to store the router id + * @return true if the router ID was available, false otherwise + */ +bool get_ipv4_router_id(struct in_addr *router_id) +{ + bool retval = false; + assert(router_id != NULL); + pthread_mutex_lock(&g_router_id_v4_mtx); + if (g_has_router_id_v4) { + memcpy(router_id, &g_router_id_v4, sizeof(*router_id)); + retval = true; + } + pthread_mutex_unlock(&g_router_id_v4_mtx); + return retval; +} + +/** + * Gives the IPv6 router ID received from Zebra. + * + * @param router_id The in6_addr strucure where to store the router id + * @return true if the router ID was available, false otherwise + */ +bool get_ipv6_router_id(struct in6_addr *router_id) +{ + bool retval = false; + assert(router_id != NULL); + pthread_mutex_lock(&g_router_id_v6_mtx); + if (g_has_router_id_v6) { + memcpy(router_id, &g_router_id_v6, sizeof(*router_id)); + retval = true; + } + pthread_mutex_unlock(&g_router_id_v6_mtx); + return retval; +} + +static void path_zebra_connected(struct zclient *zclient) +{ + struct srte_policy *policy; + + zclient_send_reg_requests(zclient, VRF_DEFAULT); + zclient_send_router_id_update(zclient, ZEBRA_ROUTER_ID_ADD, AFI_IP6, + VRF_DEFAULT); + + RB_FOREACH (policy, srte_policy_head, &srte_policies) { + struct srte_candidate *candidate; + struct srte_segment_list *segment_list; + + candidate = policy->best_candidate; + if (!candidate) + continue; + + segment_list = candidate->lsp->segment_list; + if (!segment_list) + continue; + + path_zebra_add_sr_policy(policy, segment_list); + } +} + +static int path_zebra_sr_policy_notify_status(ZAPI_CALLBACK_ARGS) +{ + struct zapi_sr_policy zapi_sr_policy; + struct srte_policy *policy; + struct srte_candidate *best_candidate_path; + + if (zapi_sr_policy_notify_status_decode(zclient->ibuf, &zapi_sr_policy)) + return -1; + + policy = srte_policy_find(zapi_sr_policy.color, + &zapi_sr_policy.endpoint); + if (!policy) + return -1; + + best_candidate_path = policy->best_candidate; + if (!best_candidate_path) + return -1; + + srte_candidate_status_update(best_candidate_path, + zapi_sr_policy.status); + + return 0; +} + +/* Router-id update message from zebra. */ +static int path_zebra_router_id_update(ZAPI_CALLBACK_ARGS) +{ + struct prefix pref; + const char *family; + char buf[PREFIX2STR_BUFFER]; + zebra_router_id_update_read(zclient->ibuf, &pref); + if (pref.family == AF_INET) { + pthread_mutex_lock(&g_router_id_v4_mtx); + memcpy(&g_router_id_v4, &pref.u.prefix4, + sizeof(g_router_id_v4)); + g_has_router_id_v4 = true; + inet_ntop(AF_INET, &g_router_id_v4, buf, sizeof(buf)); + pthread_mutex_unlock(&g_router_id_v4_mtx); + family = "IPv4"; + } else if (pref.family == AF_INET6) { + pthread_mutex_lock(&g_router_id_v6_mtx); + memcpy(&g_router_id_v6, &pref.u.prefix6, + sizeof(g_router_id_v6)); + g_has_router_id_v6 = true; + inet_ntop(AF_INET6, &g_router_id_v6, buf, sizeof(buf)); + pthread_mutex_unlock(&g_router_id_v6_mtx); + family = "IPv6"; + } else { + zlog_warn("Unexpected router ID address family for vrf %u: %u", + vrf_id, pref.family); + return 0; + } + zlog_info("%s Router Id updated for VRF %u: %s", family, vrf_id, buf); + return 0; +} + +/** + * Adds a segment routing policy to Zebra. + * + * @param policy The policy to add + * @param segment_list The segment list for the policy + */ +void path_zebra_add_sr_policy(struct srte_policy *policy, + struct srte_segment_list *segment_list) +{ + struct zapi_sr_policy zp = {}; + struct srte_segment_entry *segment; + + zp.color = policy->color; + zp.endpoint = policy->endpoint; + strlcpy(zp.name, policy->name, sizeof(zp.name)); + zp.segment_list.type = ZEBRA_LSP_SRTE; + zp.segment_list.local_label = policy->binding_sid; + zp.segment_list.label_num = 0; + RB_FOREACH (segment, srte_segment_entry_head, &segment_list->segments) + zp.segment_list.labels[zp.segment_list.label_num++] = + segment->sid_value; + policy->status = SRTE_POLICY_STATUS_GOING_UP; + + (void)zebra_send_sr_policy(zclient, ZEBRA_SR_POLICY_SET, &zp); +} + +/** + * Deletes a segment policy from Zebra. + * + * @param policy The policy to remove + */ +void path_zebra_delete_sr_policy(struct srte_policy *policy) +{ + struct zapi_sr_policy zp = {}; + + zp.color = policy->color; + zp.endpoint = policy->endpoint; + strlcpy(zp.name, policy->name, sizeof(zp.name)); + zp.segment_list.type = ZEBRA_LSP_SRTE; + zp.segment_list.local_label = policy->binding_sid; + zp.segment_list.label_num = 0; + policy->status = SRTE_POLICY_STATUS_DOWN; + + (void)zebra_send_sr_policy(zclient, ZEBRA_SR_POLICY_DELETE, &zp); +} + +/** + * Allocates a label from Zebra's label manager. + * + * @param label the label to be allocated + * @return 0 if the label has been allocated, -1 otherwise + */ +int path_zebra_request_label(mpls_label_t label) +{ + int ret; + uint32_t start, end; + + ret = lm_get_label_chunk(zclient_sync, 0, label, 1, &start, &end); + if (ret < 0) { + zlog_warn("%s: error getting label range!", __func__); + return -1; + } + + return 0; +} + +/** + * Releases a previously allocated label from Zebra's label manager. + * + * @param label The label to release + * @return 0 ifthe label has beel released, -1 otherwise + */ +void path_zebra_release_label(mpls_label_t label) +{ + int ret; + + ret = lm_release_label_chunk(zclient_sync, label, label); + if (ret < 0) + zlog_warn("%s: error releasing label range!", __func__); +} + +static void path_zebra_label_manager_connect(void) +{ + /* Connect to label manager. */ + while (zclient_socket_connect(zclient_sync) < 0) { + zlog_warn("%s: error connecting synchronous zclient!", + __func__); + sleep(1); + } + set_nonblocking(zclient_sync->sock); + + /* Send hello to notify zebra this is a synchronous client */ + while (zclient_send_hello(zclient_sync) < 0) { + zlog_warn("%s: Error sending hello for synchronous zclient!", + __func__); + sleep(1); + } + + while (lm_label_manager_connect(zclient_sync, 0) != 0) { + zlog_warn("%s: error connecting to label manager!", __func__); + sleep(1); + } +} + +/** + * Initializes Zebra asynchronous connection. + * + * @param master The master thread + */ +void path_zebra_init(struct thread_master *master) +{ + struct zclient_options options = zclient_options_default; + options.synchronous = true; + + /* Initialize asynchronous zclient. */ + zclient = zclient_new(master, &zclient_options_default); + zclient_init(zclient, ZEBRA_ROUTE_SRTE, 0, &pathd_privs); + zclient->zebra_connected = path_zebra_connected; + zclient->sr_policy_notify_status = path_zebra_sr_policy_notify_status; + zclient->router_id_update = path_zebra_router_id_update; + + /* Initialize special zclient for synchronous message exchanges. */ + zclient_sync = zclient_new(master, &options); + zclient_sync->sock = -1; + zclient_sync->redist_default = ZEBRA_ROUTE_SRTE; + zclient_sync->instance = 1; + zclient_sync->privs = &pathd_privs; + + /* Connect to the LM. */ + path_zebra_label_manager_connect(); +} diff --git a/pathd/path_zebra.h b/pathd/path_zebra.h new file mode 100644 index 0000000000..42a7123dd4 --- /dev/null +++ b/pathd/path_zebra.h @@ -0,0 +1,34 @@ +/* + * Copyright (C) 2020 NetDEF, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; see the file COPYING; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef _FRR_PATH_MPLS_H_ +#define _FRR_PATH_MPLS_H_ + +#include <zebra.h> +#include "pathd/pathd.h" + +bool get_ipv4_router_id(struct in_addr *router_id); +bool get_ipv6_router_id(struct in6_addr *router_id); +void path_zebra_add_sr_policy(struct srte_policy *policy, + struct srte_segment_list *segment_list); +void path_zebra_delete_sr_policy(struct srte_policy *policy); +int path_zebra_request_label(mpls_label_t label); +void path_zebra_release_label(mpls_label_t label); +void path_zebra_init(struct thread_master *master); + +#endif /* _FRR_PATH_MPLS_H_ */ diff --git a/pathd/pathd.c b/pathd/pathd.c new file mode 100644 index 0000000000..e2c7c95728 --- /dev/null +++ b/pathd/pathd.c @@ -0,0 +1,1128 @@ +/* + * Copyright (C) 2020 NetDEF, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; see the file COPYING; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include <zebra.h> + +#include "memory.h" +#include "log.h" +#include "lib_errors.h" +#include "network.h" + +#include "pathd/pathd.h" +#include "pathd/path_memory.h" +#include "pathd/path_zebra.h" +#include "pathd/path_debug.h" + +#define HOOK_DELAY 3 + +DEFINE_MTYPE_STATIC(PATHD, PATH_SEGMENT_LIST, "Segment List") +DEFINE_MTYPE_STATIC(PATHD, PATH_SR_POLICY, "SR Policy") +DEFINE_MTYPE_STATIC(PATHD, PATH_SR_CANDIDATE, "SR Policy candidate path") + +DEFINE_HOOK(pathd_candidate_created, (struct srte_candidate * candidate), + (candidate)) +DEFINE_HOOK(pathd_candidate_updated, (struct srte_candidate * candidate), + (candidate)) +DEFINE_HOOK(pathd_candidate_removed, (struct srte_candidate * candidate), + (candidate)) + +static void trigger_pathd_candidate_created(struct srte_candidate *candidate); +static int trigger_pathd_candidate_created_timer(struct thread *thread); +static void trigger_pathd_candidate_updated(struct srte_candidate *candidate); +static int trigger_pathd_candidate_updated_timer(struct thread *thread); +static void trigger_pathd_candidate_removed(struct srte_candidate *candidate); +static const char * +srte_candidate_metric_name(enum srte_candidate_metric_type type); + +static void srte_set_metric(struct srte_metric *metric, float value, + bool required, bool is_bound, bool is_computed); +static void srte_unset_metric(struct srte_metric *metric); + + +/* Generate rb-tree of Segment List Segment instances. */ +static inline int srte_segment_entry_compare(const struct srte_segment_entry *a, + const struct srte_segment_entry *b) +{ + return a->index - b->index; +} +RB_GENERATE(srte_segment_entry_head, srte_segment_entry, entry, + srte_segment_entry_compare) + +/* Generate rb-tree of Segment List instances. */ +static inline int srte_segment_list_compare(const struct srte_segment_list *a, + const struct srte_segment_list *b) +{ + return strcmp(a->name, b->name); +} +RB_GENERATE(srte_segment_list_head, srte_segment_list, entry, + srte_segment_list_compare) + +struct srte_segment_list_head srte_segment_lists = + RB_INITIALIZER(&srte_segment_lists); + +/* Generate rb-tree of Candidate Path instances. */ +static inline int srte_candidate_compare(const struct srte_candidate *a, + const struct srte_candidate *b) +{ + return a->preference - b->preference; +} +RB_GENERATE(srte_candidate_head, srte_candidate, entry, srte_candidate_compare) + +/* Generate rb-tree of SR Policy instances. */ +static inline int srte_policy_compare(const struct srte_policy *a, + const struct srte_policy *b) +{ + return sr_policy_compare(&a->endpoint, &b->endpoint, a->color, + b->color); +} +RB_GENERATE(srte_policy_head, srte_policy, entry, srte_policy_compare) + +struct srte_policy_head srte_policies = RB_INITIALIZER(&srte_policies); + +/** + * Adds a segment list to pathd. + * + * @param name The name of the segment list to add + * @return The added segment list + */ +struct srte_segment_list *srte_segment_list_add(const char *name) +{ + struct srte_segment_list *segment_list; + + segment_list = XCALLOC(MTYPE_PATH_SEGMENT_LIST, sizeof(*segment_list)); + strlcpy(segment_list->name, name, sizeof(segment_list->name)); + RB_INIT(srte_segment_entry_head, &segment_list->segments); + RB_INSERT(srte_segment_list_head, &srte_segment_lists, segment_list); + + return segment_list; +} + +/** + * Deletes a segment list from pathd. + * + * The given segment list structure will be freed and should not be used anymore + * after calling this function. + * + * @param segment_list the segment list to remove from pathd. + */ +void srte_segment_list_del(struct srte_segment_list *segment_list) +{ + struct srte_segment_entry *segment, *safe_seg; + RB_FOREACH_SAFE (segment, srte_segment_entry_head, + &segment_list->segments, safe_seg) { + srte_segment_entry_del(segment); + } + RB_REMOVE(srte_segment_list_head, &srte_segment_lists, segment_list); + XFREE(MTYPE_PATH_SEGMENT_LIST, segment_list); +} + +/** + * Search for a segment list by name. + * + * @param name The name of the segment list to look for + * @return The segment list if found, NULL otherwise + */ +struct srte_segment_list *srte_segment_list_find(const char *name) +{ + struct srte_segment_list search; + + strlcpy(search.name, name, sizeof(search.name)); + return RB_FIND(srte_segment_list_head, &srte_segment_lists, &search); +} + +/** + * Adds a segment to a segment list. + * + * @param segment_list The segment list the segment should be added to + * @param index The index of the added segment in the segment list + * @return The added segment + */ +struct srte_segment_entry * +srte_segment_entry_add(struct srte_segment_list *segment_list, uint32_t index) +{ + struct srte_segment_entry *segment; + + segment = XCALLOC(MTYPE_PATH_SEGMENT_LIST, sizeof(*segment)); + segment->segment_list = segment_list; + segment->index = index; + RB_INSERT(srte_segment_entry_head, &segment_list->segments, segment); + + return segment; +} + +/** + * Deletes a segment from a segment list. + * + * @param segment The segment to be removed + */ +void srte_segment_entry_del(struct srte_segment_entry *segment) +{ + RB_REMOVE(srte_segment_entry_head, &segment->segment_list->segments, + segment); + XFREE(MTYPE_PATH_SEGMENT_LIST, segment); +} + +/** + * Set the node or adjacency identifier of a segment. + * + * @param segment The segment for which the NAI should be set + * @param type The type of the NAI + * @param type The address of the node or the local address of the adjacency + * @param type The local interface index of the unumbered adjacency + * @param type The remote address of the adjacency + * @param type The remote interface index of the unumbered adjacency + */ +void srte_segment_entry_set_nai(struct srte_segment_entry *segment, + enum srte_segment_nai_type type, + struct ipaddr *local_ip, uint32_t local_iface, + struct ipaddr *remote_ip, uint32_t remote_iface) +{ + segment->nai_type = type; + memcpy(&segment->nai_local_addr, local_ip, sizeof(struct ipaddr)); + + switch (type) { + case SRTE_SEGMENT_NAI_TYPE_IPV4_NODE: + case SRTE_SEGMENT_NAI_TYPE_IPV6_NODE: + break; + case SRTE_SEGMENT_NAI_TYPE_IPV4_ADJACENCY: + case SRTE_SEGMENT_NAI_TYPE_IPV6_ADJACENCY: + memcpy(&segment->nai_remote_addr, remote_ip, + sizeof(struct ipaddr)); + break; + case SRTE_SEGMENT_NAI_TYPE_IPV4_UNNUMBERED_ADJACENCY: + memcpy(&segment->nai_remote_addr, remote_ip, + sizeof(struct ipaddr)); + segment->nai_local_iface = local_iface; + segment->nai_remote_iface = remote_iface; + break; + default: + segment->nai_local_addr.ipa_type = IPADDR_NONE; + segment->nai_local_iface = 0; + segment->nai_remote_addr.ipa_type = IPADDR_NONE; + segment->nai_remote_iface = 0; + } +} + +/** + * Add a policy to pathd. + * + * WARNING: The color 0 is a special case as it is the no-color. + * + * @param color The color of the policy. + * @param endpoint The IP address of the policy endpoint + * @return The created policy + */ +struct srte_policy *srte_policy_add(uint32_t color, struct ipaddr *endpoint) +{ + struct srte_policy *policy; + + policy = XCALLOC(MTYPE_PATH_SR_POLICY, sizeof(*policy)); + policy->color = color; + policy->endpoint = *endpoint; + policy->binding_sid = MPLS_LABEL_NONE; + RB_INIT(srte_candidate_head, &policy->candidate_paths); + RB_INSERT(srte_policy_head, &srte_policies, policy); + + return policy; +} + +/** + * Delete a policy from pathd. + * + * The given policy structure will be freed and should never be used again + * after calling this function. + * + * @param policy The policy to be removed + */ +void srte_policy_del(struct srte_policy *policy) +{ + struct srte_candidate *candidate; + + path_zebra_delete_sr_policy(policy); + + path_zebra_release_label(policy->binding_sid); + + while (!RB_EMPTY(srte_candidate_head, &policy->candidate_paths)) { + candidate = + RB_ROOT(srte_candidate_head, &policy->candidate_paths); + trigger_pathd_candidate_removed(candidate); + srte_candidate_del(candidate); + } + + RB_REMOVE(srte_policy_head, &srte_policies, policy); + XFREE(MTYPE_PATH_SR_POLICY, policy); +} + +/** + * Search for a policy by color and endpoint. + * + * WARNING: The color 0 is a special case as it is the no-color. + * + * @param color The color of the policy to look for + * @param endpoint The endpoint of the policy to look for + * @return The policy if found, NULL otherwise + */ +struct srte_policy *srte_policy_find(uint32_t color, struct ipaddr *endpoint) +{ + struct srte_policy search; + + search.color = color; + search.endpoint = *endpoint; + return RB_FIND(srte_policy_head, &srte_policies, &search); +} + +/** + * Update a policy binding SID. + * + * @param policy The policy for which the SID should be updated + * @param binding_sid The new binding SID for the given policy + */ +void srte_policy_update_binding_sid(struct srte_policy *policy, + uint32_t binding_sid) +{ + if (policy->binding_sid != MPLS_LABEL_NONE) + path_zebra_release_label(policy->binding_sid); + + policy->binding_sid = binding_sid; + + /* Reinstall the Binding-SID if necessary. */ + if (policy->best_candidate) + path_zebra_add_sr_policy( + policy, policy->best_candidate->lsp->segment_list); +} + +/** + * Gives the policy best candidate path. + * + * @param policy The policy we want the best candidate path from + * @return The best candidate path + */ +static struct srte_candidate * +srte_policy_best_candidate(const struct srte_policy *policy) +{ + struct srte_candidate *candidate; + + RB_FOREACH_REVERSE (candidate, srte_candidate_head, + &policy->candidate_paths) { + /* search for highest preference with existing segment list */ + if (!CHECK_FLAG(candidate->flags, F_CANDIDATE_DELETED) + && candidate->lsp->segment_list) + return candidate; + } + + return NULL; +} + +/** + * Apply changes defined by setting the policies, candidate paths + * and segment lists modification flags NEW, MODIFIED and DELETED. + * + * This allows the northbound code to delay all the side effects of adding + * modifying and deleting them to the end. + * + * Example of marking an object as modified: + * `SET_FLAG(obj->flags, F_XXX_MODIFIED)` + */ +void srte_apply_changes(void) +{ + struct srte_policy *policy, *safe_pol; + struct srte_segment_list *segment_list, *safe_sl; + + RB_FOREACH_SAFE (policy, srte_policy_head, &srte_policies, safe_pol) { + if (CHECK_FLAG(policy->flags, F_POLICY_DELETED)) { + srte_policy_del(policy); + continue; + } + srte_policy_apply_changes(policy); + UNSET_FLAG(policy->flags, F_POLICY_NEW); + UNSET_FLAG(policy->flags, F_POLICY_MODIFIED); + } + + RB_FOREACH_SAFE (segment_list, srte_segment_list_head, + &srte_segment_lists, safe_sl) { + if (CHECK_FLAG(segment_list->flags, F_SEGMENT_LIST_DELETED)) { + srte_segment_list_del(segment_list); + continue; + } + UNSET_FLAG(segment_list->flags, F_SEGMENT_LIST_NEW); + UNSET_FLAG(segment_list->flags, F_SEGMENT_LIST_MODIFIED); + } +} + +/** + * Apply changes defined by setting the given policy and its candidate paths + * modification flags NEW, MODIFIED and DELETED. + * + * In moste cases `void srte_apply_changes(void)` should be used instead, + * this function will not handle the changes of segment lists used by the + * policy. + * + * @param policy The policy changes has to be applied to. + */ +void srte_policy_apply_changes(struct srte_policy *policy) +{ + struct srte_candidate *candidate, *safe; + struct srte_candidate *old_best_candidate; + struct srte_candidate *new_best_candidate; + char endpoint[46]; + + ipaddr2str(&policy->endpoint, endpoint, sizeof(endpoint)); + + /* Get old and new best candidate path. */ + old_best_candidate = policy->best_candidate; + new_best_candidate = srte_policy_best_candidate(policy); + + if (new_best_candidate != old_best_candidate) { + /* TODO: add debug guard. */ + zlog_debug( + "SR-TE(%s, %u): best candidate changed from %s to %s", + endpoint, policy->color, + old_best_candidate ? old_best_candidate->name : "none", + new_best_candidate ? new_best_candidate->name : "none"); + + if (old_best_candidate) { + policy->best_candidate = NULL; + UNSET_FLAG(old_best_candidate->flags, F_CANDIDATE_BEST); + SET_FLAG(old_best_candidate->flags, + F_CANDIDATE_MODIFIED); + + /* + * Rely on replace semantics if there's a new best + * candidate. + */ + if (!new_best_candidate) + path_zebra_delete_sr_policy(policy); + } + if (new_best_candidate) { + policy->best_candidate = new_best_candidate; + SET_FLAG(new_best_candidate->flags, F_CANDIDATE_BEST); + SET_FLAG(new_best_candidate->flags, + F_CANDIDATE_MODIFIED); + + path_zebra_add_sr_policy( + policy, new_best_candidate->lsp->segment_list); + } + } else if (new_best_candidate) { + /* The best candidate path did not change, but some of its + * attributes or its segment list may have changed. + */ + + bool candidate_changed = CHECK_FLAG(new_best_candidate->flags, + F_CANDIDATE_MODIFIED); + bool segment_list_changed = + new_best_candidate->lsp->segment_list + && CHECK_FLAG( + new_best_candidate->lsp->segment_list->flags, + F_SEGMENT_LIST_MODIFIED); + + if (candidate_changed || segment_list_changed) { + /* TODO: add debug guard. */ + zlog_debug("SR-TE(%s, %u): best candidate %s changed", + endpoint, policy->color, + new_best_candidate->name); + + path_zebra_add_sr_policy( + policy, new_best_candidate->lsp->segment_list); + } + } + + RB_FOREACH_SAFE (candidate, srte_candidate_head, + &policy->candidate_paths, safe) { + if (CHECK_FLAG(candidate->flags, F_CANDIDATE_DELETED)) { + trigger_pathd_candidate_removed(candidate); + srte_candidate_del(candidate); + continue; + } else if (CHECK_FLAG(candidate->flags, F_CANDIDATE_NEW)) { + trigger_pathd_candidate_created(candidate); + } else if (CHECK_FLAG(candidate->flags, F_CANDIDATE_MODIFIED)) { + trigger_pathd_candidate_updated(candidate); + } else if (candidate->lsp->segment_list + && CHECK_FLAG(candidate->lsp->segment_list->flags, + F_SEGMENT_LIST_MODIFIED)) { + trigger_pathd_candidate_updated(candidate); + } + + UNSET_FLAG(candidate->flags, F_CANDIDATE_NEW); + UNSET_FLAG(candidate->flags, F_CANDIDATE_MODIFIED); + } +} + +/** + * Adds a candidate path to a policy. + * + * @param policy The policy the candidate path should be added to + * @param preference The preference of the candidate path to be added + * @return The added candidate path + */ +struct srte_candidate *srte_candidate_add(struct srte_policy *policy, + uint32_t preference) +{ + struct srte_candidate *candidate; + struct srte_lsp *lsp; + + candidate = XCALLOC(MTYPE_PATH_SR_CANDIDATE, sizeof(*candidate)); + lsp = XCALLOC(MTYPE_PATH_SR_CANDIDATE, sizeof(*lsp)); + + candidate->preference = preference; + candidate->policy = policy; + candidate->type = SRTE_CANDIDATE_TYPE_UNDEFINED; + candidate->discriminator = frr_weak_random(); + + lsp->candidate = candidate; + candidate->lsp = lsp; + + RB_INSERT(srte_candidate_head, &policy->candidate_paths, candidate); + + return candidate; +} + +/** + * Deletes a candidate. + * + * The corresponding LSP will be removed alongside the candidate path. + * The given candidate will be freed and shouldn't be used anymore after the + * calling this function. + * + * @param candidate The candidate path to delete + */ +void srte_candidate_del(struct srte_candidate *candidate) +{ + struct srte_policy *srte_policy = candidate->policy; + + RB_REMOVE(srte_candidate_head, &srte_policy->candidate_paths, + candidate); + + XFREE(MTYPE_PATH_SR_CANDIDATE, candidate->lsp); + XFREE(MTYPE_PATH_SR_CANDIDATE, candidate); +} + +/** + * Sets the bandwidth constraint of given candidate path. + * + * The corresponding LSP will be changed too. + * + * @param candidate The candidate path of which the bandwidth should be changed + * @param bandwidth The Bandwidth constraint to set to the candidate path + * @param required If the constraint is required (true) or only desired (false) + */ +void srte_candidate_set_bandwidth(struct srte_candidate *candidate, + float bandwidth, bool required) +{ + struct srte_policy *policy = candidate->policy; + char endpoint[46]; + ipaddr2str(&policy->endpoint, endpoint, sizeof(endpoint)); + zlog_debug( + "SR-TE(%s, %u): candidate %s %sconfig bandwidth set to %f B/s", + endpoint, policy->color, candidate->name, + required ? "required " : "", bandwidth); + SET_FLAG(candidate->flags, F_CANDIDATE_HAS_BANDWIDTH); + COND_FLAG(candidate->flags, F_CANDIDATE_REQUIRED_BANDWIDTH, required); + candidate->bandwidth = bandwidth; + + srte_lsp_set_bandwidth(candidate->lsp, bandwidth, required); +} + +/** + * Sets the bandwidth constraint of the given LSP. + * + * The changes will not be shown as part of the running configuration. + * + * @param lsp The lsp of which the bandwidth should be changed + * @param bandwidth The Bandwidth constraint to set to the candidate path + * @param required If the constraint is required (true) or only desired (false) + */ +void srte_lsp_set_bandwidth(struct srte_lsp *lsp, float bandwidth, + bool required) +{ + struct srte_candidate *candidate = lsp->candidate; + struct srte_policy *policy = candidate->policy; + char endpoint[46]; + ipaddr2str(&policy->endpoint, endpoint, sizeof(endpoint)); + zlog_debug("SR-TE(%s, %u): candidate %s %slsp bandwidth set to %f B/s", + endpoint, policy->color, candidate->name, + required ? "required" : "", bandwidth); + SET_FLAG(lsp->flags, F_CANDIDATE_HAS_BANDWIDTH); + COND_FLAG(lsp->flags, F_CANDIDATE_REQUIRED_BANDWIDTH, required); + lsp->bandwidth = bandwidth; +} + +/** + * Remove a candidate path bandwidth constraint. + * + * The corresponding LSP will be changed too. + * + * @param candidate The candidate path of which the bandwidth should be removed + */ +void srte_candidate_unset_bandwidth(struct srte_candidate *candidate) +{ + struct srte_policy *policy = candidate->policy; + char endpoint[46]; + ipaddr2str(&policy->endpoint, endpoint, sizeof(endpoint)); + zlog_debug("SR-TE(%s, %u): candidate %s config bandwidth unset", + endpoint, policy->color, candidate->name); + UNSET_FLAG(candidate->flags, F_CANDIDATE_HAS_BANDWIDTH); + UNSET_FLAG(candidate->flags, F_CANDIDATE_REQUIRED_BANDWIDTH); + candidate->bandwidth = 0; + SET_FLAG(candidate->flags, F_CANDIDATE_MODIFIED); + srte_lsp_unset_bandwidth(candidate->lsp); +} + +/** + * Remove an LSP bandwidth constraint. + * + * The changes will not be shown as part of the running configuration. + * + * @param lsp The lsp of which the bandwidth should be changed + */ +void srte_lsp_unset_bandwidth(struct srte_lsp *lsp) +{ + struct srte_candidate *candidate = lsp->candidate; + struct srte_policy *policy = candidate->policy; + char endpoint[46]; + ipaddr2str(&policy->endpoint, endpoint, sizeof(endpoint)); + zlog_debug("SR-TE(%s, %u): candidate %s lsp bandwidth unset", endpoint, + policy->color, candidate->name); + UNSET_FLAG(lsp->flags, F_CANDIDATE_HAS_BANDWIDTH); + UNSET_FLAG(lsp->flags, F_CANDIDATE_REQUIRED_BANDWIDTH); + SET_FLAG(candidate->flags, F_CANDIDATE_MODIFIED); + lsp->bandwidth = 0; +} + +/** + * Sets a candidate path metric constraint. + * + * The corresponding LSP will be changed too. + * + * @param candidate The candidate path of which the metric should be changed + * @param type The metric type + * @param value The metric value + * @param required If the constraint is required (true) or only desired (false) + * @param is_bound If the metric is an indicative value or a strict upper bound + * @param is_computed If the metric was computed or configured + */ +void srte_candidate_set_metric(struct srte_candidate *candidate, + enum srte_candidate_metric_type type, + float value, bool required, bool is_bound, + bool is_computed) +{ + struct srte_policy *policy = candidate->policy; + char endpoint[46]; + ipaddr2str(&policy->endpoint, endpoint, sizeof(endpoint)); + zlog_debug( + "SR-TE(%s, %u): candidate %s %sconfig metric %s (%u) set to %f " + "(is-bound: %s; is_computed: %s)", + endpoint, policy->color, candidate->name, + required ? "required " : "", srte_candidate_metric_name(type), + type, value, is_bound ? "true" : "false", + is_computed ? "true" : "false"); + assert((type > 0) && (type <= MAX_METRIC_TYPE)); + srte_set_metric(&candidate->metrics[type - 1], value, required, + is_bound, is_computed); + srte_lsp_set_metric(candidate->lsp, type, value, required, is_bound, + is_computed); + SET_FLAG(candidate->flags, F_CANDIDATE_MODIFIED); +} + +/** + * Sets an LSP metric constraint. + * + * The changes will not be shown as part of the running configuration. + * + * @param lsp The LSP of which the metric should be changed + * @param type The metric type + * @param value The metric value + * @param required If the constraint is required (true) or only desired (false) + * @param is_bound If the metric is an indicative value or a strict upper bound + * @param is_computed If the metric was computed or configured + */ +void srte_lsp_set_metric(struct srte_lsp *lsp, + enum srte_candidate_metric_type type, float value, + bool required, bool is_bound, bool is_computed) +{ + struct srte_candidate *candidate = lsp->candidate; + struct srte_policy *policy = candidate->policy; + char endpoint[46]; + ipaddr2str(&policy->endpoint, endpoint, sizeof(endpoint)); + zlog_debug( + "SR-TE(%s, %u): candidate %s %slsp metric %s (%u) set to %f " + "(is-bound: %s; is_computed: %s)", + endpoint, policy->color, candidate->name, + required ? "required " : "", srte_candidate_metric_name(type), + type, value, is_bound ? "true" : "false", + is_computed ? "true" : "false"); + assert((type > 0) && (type <= MAX_METRIC_TYPE)); + srte_set_metric(&lsp->metrics[type - 1], value, required, is_bound, + is_computed); +} + +void srte_set_metric(struct srte_metric *metric, float value, bool required, + bool is_bound, bool is_computed) +{ + SET_FLAG(metric->flags, F_METRIC_IS_DEFINED); + COND_FLAG(metric->flags, F_METRIC_IS_REQUIRED, required); + COND_FLAG(metric->flags, F_METRIC_IS_BOUND, is_bound); + COND_FLAG(metric->flags, F_METRIC_IS_COMPUTED, is_computed); + metric->value = value; +} + +/** + * Removes a candidate path metric constraint. + * + * The corresponding LSP will be changed too. + * + * @param candidate The candidate path from which the metric should be removed + * @param type The metric type + */ +void srte_candidate_unset_metric(struct srte_candidate *candidate, + enum srte_candidate_metric_type type) +{ + struct srte_policy *policy = candidate->policy; + char endpoint[46]; + ipaddr2str(&policy->endpoint, endpoint, sizeof(endpoint)); + zlog_debug("SR-TE(%s, %u): candidate %s config metric %s (%u) unset", + endpoint, policy->color, candidate->name, + srte_candidate_metric_name(type), type); + assert((type > 0) && (type <= MAX_METRIC_TYPE)); + srte_unset_metric(&candidate->metrics[type - 1]); + srte_lsp_unset_metric(candidate->lsp, type); + SET_FLAG(candidate->flags, F_CANDIDATE_MODIFIED); +} + +/** + * Removes a candidate path metric constraint. + * + * The changes will not be shown as part of the running configuration. + * + * @param lsp The LSP from which the metric should be removed + * @param type The metric type + */ +void srte_lsp_unset_metric(struct srte_lsp *lsp, + enum srte_candidate_metric_type type) +{ + struct srte_candidate *candidate = lsp->candidate; + struct srte_policy *policy = candidate->policy; + char endpoint[46]; + ipaddr2str(&policy->endpoint, endpoint, sizeof(endpoint)); + zlog_debug("SR-TE(%s, %u): candidate %s lsp metric %s (%u) unset", + endpoint, policy->color, candidate->name, + srte_candidate_metric_name(type), type); + assert((type > 0) && (type <= MAX_METRIC_TYPE)); + srte_unset_metric(&lsp->metrics[type - 1]); +} + +void srte_unset_metric(struct srte_metric *metric) +{ + UNSET_FLAG(metric->flags, F_METRIC_IS_DEFINED); + UNSET_FLAG(metric->flags, F_METRIC_IS_BOUND); + UNSET_FLAG(metric->flags, F_METRIC_IS_COMPUTED); + metric->value = 0; +} + +/** + * Sets a candidate path objective function. + * + * @param candidate The candidate path of which the OF should be changed + * @param required If the constraint is required (true) or only desired (false) + * @param type The objective function type + */ +void srte_candidate_set_objfun(struct srte_candidate *candidate, bool required, + enum objfun_type type) +{ + struct srte_policy *policy = candidate->policy; + char endpoint[46]; + ipaddr2str(&policy->endpoint, endpoint, sizeof(endpoint)); + + candidate->objfun = type; + SET_FLAG(candidate->flags, F_CANDIDATE_HAS_OBJFUN); + COND_FLAG(candidate->flags, F_CANDIDATE_REQUIRED_OBJFUN, required); + SET_FLAG(candidate->flags, F_CANDIDATE_MODIFIED); + zlog_debug("SR-TE(%s, %u): candidate %s %sobjective function set to %s", + endpoint, policy->color, candidate->name, + required ? "required " : "", objfun_type_name(type)); +} + +/** + * Removed the objective function constraint from a candidate path. + * + * @param candidate The candidate path from which the OF should be removed + */ +void srte_candidate_unset_objfun(struct srte_candidate *candidate) +{ + struct srte_policy *policy = candidate->policy; + char endpoint[46]; + ipaddr2str(&policy->endpoint, endpoint, sizeof(endpoint)); + + UNSET_FLAG(candidate->flags, F_CANDIDATE_HAS_OBJFUN); + UNSET_FLAG(candidate->flags, F_CANDIDATE_REQUIRED_OBJFUN); + SET_FLAG(candidate->flags, F_CANDIDATE_MODIFIED); + candidate->objfun = OBJFUN_UNDEFINED; + zlog_debug( + "SR-TE(%s, %u): candidate %s objective functions preferences unset", + endpoint, policy->color, candidate->name); +} + +static uint32_t filter_type_to_flag(enum affinity_filter_type type) +{ + switch (type) { + case AFFINITY_FILTER_EXCLUDE_ANY: + return F_CANDIDATE_HAS_EXCLUDE_ANY; + case AFFINITY_FILTER_INCLUDE_ANY: + return F_CANDIDATE_HAS_INCLUDE_ANY; + case AFFINITY_FILTER_INCLUDE_ALL: + return F_CANDIDATE_HAS_INCLUDE_ALL; + default: + return 0; + } +} + +static const char *filter_type_name(enum affinity_filter_type type) +{ + switch (type) { + case AFFINITY_FILTER_EXCLUDE_ANY: + return "exclude-any"; + case AFFINITY_FILTER_INCLUDE_ANY: + return "include-any"; + case AFFINITY_FILTER_INCLUDE_ALL: + return "include-all"; + default: + return "unknown"; + } +} + +/** + * Sets a candidate path affinity filter constraint. + * + * @param candidate The candidate path of which the constraint should be changed + * @param type The affinity constraint type to set + * @param filter The bitmask filter of the constraint + */ +void srte_candidate_set_affinity_filter(struct srte_candidate *candidate, + enum affinity_filter_type type, + uint32_t filter) +{ + struct srte_policy *policy = candidate->policy; + char endpoint[46]; + ipaddr2str(&policy->endpoint, endpoint, sizeof(endpoint)); + + assert(type > AFFINITY_FILTER_UNDEFINED); + assert(type <= MAX_AFFINITY_FILTER_TYPE); + SET_FLAG(candidate->flags, filter_type_to_flag(type)); + SET_FLAG(candidate->flags, F_CANDIDATE_MODIFIED); + candidate->affinity_filters[type - 1] = filter; + zlog_debug( + "SR-TE(%s, %u): candidate %s affinity filter %s set to 0x%08x", + endpoint, policy->color, candidate->name, + filter_type_name(type), filter); +} + +/** + * Removes a candidate path affinity filter constraint. + * + * @param candidate The candidate path from which the constraint should be + * removed + * @param type The affinity constraint type to remove + */ +void srte_candidate_unset_affinity_filter(struct srte_candidate *candidate, + enum affinity_filter_type type) +{ + struct srte_policy *policy = candidate->policy; + char endpoint[46]; + ipaddr2str(&policy->endpoint, endpoint, sizeof(endpoint)); + + assert(type > AFFINITY_FILTER_UNDEFINED); + assert(type <= MAX_AFFINITY_FILTER_TYPE); + UNSET_FLAG(candidate->flags, filter_type_to_flag(type)); + SET_FLAG(candidate->flags, F_CANDIDATE_MODIFIED); + candidate->affinity_filters[type - 1] = 0; + zlog_debug("SR-TE(%s, %u): candidate %s affinity filter %s unset", + endpoint, policy->color, candidate->name, + filter_type_name(type)); +} + +/** + * Searches for a candidate path of the given policy. + * + * @param policy The policy to search for candidate path + * @param preference The preference of the candidate path you are looking for + * @return The candidate path if found, NULL otherwise + */ +struct srte_candidate *srte_candidate_find(struct srte_policy *policy, + uint32_t preference) +{ + struct srte_candidate search; + + search.preference = preference; + return RB_FIND(srte_candidate_head, &policy->candidate_paths, &search); +} + +/** + * Searches for a an entry of a given segment list. + * + * @param segment_list The segment list to search for the entry + * @param index The index of the entry you are looking for + * @return The segment list entry if found, NULL otherwise. + */ +struct srte_segment_entry * +srte_segment_entry_find(struct srte_segment_list *segment_list, uint32_t index) +{ + struct srte_segment_entry search; + + search.index = index; + return RB_FIND(srte_segment_entry_head, &segment_list->segments, + &search); +} + +/** + * Updates a candidate status. + * + * @param candidate The candidate of which the status should be updated + * @param status The new candidate path status + */ +void srte_candidate_status_update(struct srte_candidate *candidate, int status) +{ + struct srte_policy *policy = candidate->policy; + char endpoint[46]; + ipaddr2str(&policy->endpoint, endpoint, sizeof(endpoint)); + zlog_debug("SR-TE(%s, %u): zebra updated status to %d", endpoint, + policy->color, status); + switch (status) { + case ZEBRA_SR_POLICY_DOWN: + switch (policy->status) { + /* If the policy is GOING_UP, and zebra faild + to install it, we wait for zebra to retry */ + /* TODO: Add some timeout after which we would + get is back to DOWN and remove the + policy */ + case SRTE_POLICY_STATUS_GOING_UP: + case SRTE_POLICY_STATUS_DOWN: + return; + default: + zlog_debug("SR-TE(%s, %u): policy is DOWN", endpoint, + policy->color); + policy->status = SRTE_POLICY_STATUS_DOWN; + break; + } + break; + case ZEBRA_SR_POLICY_UP: + switch (policy->status) { + case SRTE_POLICY_STATUS_UP: + return; + default: + zlog_debug("SR-TE(%s, %u): policy is UP", endpoint, + policy->color); + policy->status = SRTE_POLICY_STATUS_UP; + break; + } + break; + } + + trigger_pathd_candidate_updated(candidate); +} + +/** + * Flags the segment lists from give originator for removal. + * + * The function srte_apply_changes must be called afterward for + * the segment list to be removed. + * + * @param originator The originator tag of the segment list to be marked + * @param force If the unset should be forced regardless of the originator + */ +void srte_candidate_unset_segment_list(const char *originator, bool force) +{ + if (originator == NULL) { + zlog_warn( + "Cannot unset segment list because originator is NULL"); + return; + } + + zlog_debug("Unset segment lists for originator %s", originator); + + /* Iterate the policies, then iterate each policy's candidate path + * to check the candidate path's segment list originator */ + struct srte_policy *policy; + RB_FOREACH (policy, srte_policy_head, &srte_policies) { + zlog_debug("Unset segment lists checking policy %s", + policy->name); + struct srte_candidate *candidate; + RB_FOREACH (candidate, srte_candidate_head, + &policy->candidate_paths) { + zlog_debug("Unset segment lists checking candidate %s", + candidate->name); + if (candidate->lsp == NULL) { + continue; + } + + /* The candidate->lsp->segment_list is operational data, + * configured by the PCE. We dont want to modify the + * candidate->segment_list, + * which is configuration data. */ + struct srte_segment_list *segment_list = + candidate->lsp->segment_list; + if (segment_list == NULL) { + continue; + } + + if (segment_list->protocol_origin + == SRTE_ORIGIN_LOCAL) { + zlog_warn( + "Cannot unset segment list %s because it " + "was created locally", + segment_list->name); + continue; + } + + /* In the case of last pce,we force the unset + * because we don't have pce by prefix (TODO) is all + * 'global' */ + if (strncmp(segment_list->originator, originator, + sizeof(segment_list->originator)) + == 0 + || force) { + zlog_debug("Unset segment list %s", + segment_list->name); + SET_FLAG(segment_list->flags, + F_SEGMENT_LIST_DELETED); + SET_FLAG(candidate->flags, + F_CANDIDATE_MODIFIED); + candidate->lsp->segment_list = NULL; + } + } + } +} + +/** + * Gives a string representation of given protocol origin enum. + * + * @param origin The enum you want a string representation of + * @return The string representation of given enum + */ +const char *srte_origin2str(enum srte_protocol_origin origin) +{ + switch (origin) { + case SRTE_ORIGIN_PCEP: + return "PCEP"; + case SRTE_ORIGIN_BGP: + return "BGP"; + case SRTE_ORIGIN_LOCAL: + return "Local"; + default: + return "Unknown"; + } +} + +void trigger_pathd_candidate_created(struct srte_candidate *candidate) +{ + /* The hook is called asynchronously to let the PCEP module + time to send a response to the PCE before receiving any updates from + pathd. In addition, a minimum amount of time need to pass before + the hook is called to prevent the hook to be called multiple times + from changing the candidate by hand with the console */ + if (candidate->hook_timer != NULL) + return; + thread_add_timer(master, trigger_pathd_candidate_created_timer, + (void *)candidate, HOOK_DELAY, &candidate->hook_timer); +} + +int trigger_pathd_candidate_created_timer(struct thread *thread) +{ + struct srte_candidate *candidate = THREAD_ARG(thread); + candidate->hook_timer = NULL; + return hook_call(pathd_candidate_created, candidate); +} + +void trigger_pathd_candidate_updated(struct srte_candidate *candidate) +{ + /* The hook is called asynchronously to let the PCEP module + time to send a response to the PCE before receiving any updates from + pathd. In addition, a minimum amount of time need to pass before + the hook is called to prevent the hook to be called multiple times + from changing the candidate by hand with the console */ + if (candidate->hook_timer != NULL) + return; + thread_add_timer(master, trigger_pathd_candidate_updated_timer, + (void *)candidate, HOOK_DELAY, &candidate->hook_timer); +} + +int trigger_pathd_candidate_updated_timer(struct thread *thread) +{ + struct srte_candidate *candidate = THREAD_ARG(thread); + candidate->hook_timer = NULL; + return hook_call(pathd_candidate_updated, candidate); +} + +void trigger_pathd_candidate_removed(struct srte_candidate *candidate) +{ + /* The hook needs to be call synchronously, otherwise the candidate + path will be already deleted when the handler is called */ + if (candidate->hook_timer != NULL) { + thread_cancel(&candidate->hook_timer); + candidate->hook_timer = NULL; + } + hook_call(pathd_candidate_removed, candidate); +} + +const char *srte_candidate_metric_name(enum srte_candidate_metric_type type) +{ + switch (type) { + case SRTE_CANDIDATE_METRIC_TYPE_IGP: + return "IGP"; + case SRTE_CANDIDATE_METRIC_TYPE_TE: + return "TE"; + case SRTE_CANDIDATE_METRIC_TYPE_HC: + return "HC"; + case SRTE_CANDIDATE_METRIC_TYPE_ABC: + return "ABC"; + case SRTE_CANDIDATE_METRIC_TYPE_LMLL: + return "LMLL"; + case SRTE_CANDIDATE_METRIC_TYPE_CIGP: + return "CIGP"; + case SRTE_CANDIDATE_METRIC_TYPE_CTE: + return "CTE"; + case SRTE_CANDIDATE_METRIC_TYPE_PIGP: + return "PIGP"; + case SRTE_CANDIDATE_METRIC_TYPE_PTE: + return "PTE"; + case SRTE_CANDIDATE_METRIC_TYPE_PHC: + return "PHC"; + case SRTE_CANDIDATE_METRIC_TYPE_MSD: + return "MSD"; + case SRTE_CANDIDATE_METRIC_TYPE_PD: + return "PD"; + case SRTE_CANDIDATE_METRIC_TYPE_PDV: + return "PDV"; + case SRTE_CANDIDATE_METRIC_TYPE_PL: + return "PL"; + case SRTE_CANDIDATE_METRIC_TYPE_PPD: + return "PPD"; + case SRTE_CANDIDATE_METRIC_TYPE_PPDV: + return "PPDV"; + case SRTE_CANDIDATE_METRIC_TYPE_PPL: + return "PPL"; + case SRTE_CANDIDATE_METRIC_TYPE_NAP: + return "NAP"; + case SRTE_CANDIDATE_METRIC_TYPE_NLP: + return "NLP"; + case SRTE_CANDIDATE_METRIC_TYPE_DC: + return "DC"; + case SRTE_CANDIDATE_METRIC_TYPE_BNC: + return "BNC"; + default: + return "UNKNOWN"; + } +} diff --git a/pathd/pathd.conf.sample b/pathd/pathd.conf.sample new file mode 100644 index 0000000000..9fe7d2d6e3 --- /dev/null +++ b/pathd/pathd.conf.sample @@ -0,0 +1,41 @@ +! Default pathd configuration sample +! +password frr +log stdout + +segment-routing + traffic-eng + segment-list test1 + index 10 mpls label 123 + index 20 mpls label 456 + ! + segment-list test2 + index 10 mpls label 321 + index 20 mpls label 654 + ! + policy color 1 endpoint 1.1.1.1 + name one + binding-sid 100 + candidate-path preference 100 name test1 explicit segment-list test1 + candidate-path preference 200 name test2 explicit segment-list test2 + ! + policy color 2 endpoint 2.2.2.2 + name two + binding-sid 101 + candidate-path preference 100 name def explicit segment-list test2 + candidate-path preference 200 name dyn dynamic + bandwidth 12345 + metric bound abc 16 required + metric te 10 + ! + ! + pcep + pcc-peer PCE1 + address ip 127.0.0.1 + sr-draft07 + ! + pcc + peer PCE1 + ! + ! +! diff --git a/pathd/pathd.h b/pathd/pathd.h new file mode 100644 index 0000000000..4879239db8 --- /dev/null +++ b/pathd/pathd.h @@ -0,0 +1,412 @@ +/* + * Copyright (C) 2020 NetDEF, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; see the file COPYING; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef _FRR_PATHD_H_ +#define _FRR_PATHD_H_ + +#include "lib/mpls.h" +#include "lib/ipaddr.h" +#include "lib/srte.h" +#include "lib/hook.h" + +enum srte_protocol_origin { + SRTE_ORIGIN_UNDEFINED = 0, + SRTE_ORIGIN_PCEP = 1, + SRTE_ORIGIN_BGP = 2, + SRTE_ORIGIN_LOCAL = 3, +}; + +enum srte_policy_status { + SRTE_POLICY_STATUS_UNKNOWN = 0, + SRTE_POLICY_STATUS_DOWN = 1, + SRTE_POLICY_STATUS_UP = 2, + SRTE_POLICY_STATUS_GOING_DOWN = 3, + SRTE_POLICY_STATUS_GOING_UP = 4 +}; + +enum srte_candidate_type { + SRTE_CANDIDATE_TYPE_UNDEFINED = 0, + SRTE_CANDIDATE_TYPE_EXPLICIT = 1, + SRTE_CANDIDATE_TYPE_DYNAMIC = 2, +}; + +enum srte_candidate_metric_type { + /* IGP metric */ + SRTE_CANDIDATE_METRIC_TYPE_IGP = 1, + /* TE metric */ + SRTE_CANDIDATE_METRIC_TYPE_TE = 2, + /* Hop Counts */ + SRTE_CANDIDATE_METRIC_TYPE_HC = 3, + /* Aggregate bandwidth consumption */ + SRTE_CANDIDATE_METRIC_TYPE_ABC = 4, + /* Load of the most loaded link */ + SRTE_CANDIDATE_METRIC_TYPE_LMLL = 5, + /* Cumulative IGP cost */ + SRTE_CANDIDATE_METRIC_TYPE_CIGP = 6, + /* Cumulative TE cost */ + SRTE_CANDIDATE_METRIC_TYPE_CTE = 7, + /* P2MP IGP metric */ + SRTE_CANDIDATE_METRIC_TYPE_PIGP = 8, + /* P2MP TE metric */ + SRTE_CANDIDATE_METRIC_TYPE_PTE = 9, + /* P2MP hop count metric */ + SRTE_CANDIDATE_METRIC_TYPE_PHC = 10, + /* Segment-ID (SID) Depth */ + SRTE_CANDIDATE_METRIC_TYPE_MSD = 11, + /* Path Delay metric */ + SRTE_CANDIDATE_METRIC_TYPE_PD = 12, + /* Path Delay Variation metric */ + SRTE_CANDIDATE_METRIC_TYPE_PDV = 13, + /* Path Loss metric */ + SRTE_CANDIDATE_METRIC_TYPE_PL = 14, + /* P2MP Path Delay metric */ + SRTE_CANDIDATE_METRIC_TYPE_PPD = 15, + /* P2MP Path Delay variation metric */ + SRTE_CANDIDATE_METRIC_TYPE_PPDV = 16, + /* P2MP Path Loss metric */ + SRTE_CANDIDATE_METRIC_TYPE_PPL = 17, + /* Number of adaptations on a path */ + SRTE_CANDIDATE_METRIC_TYPE_NAP = 18, + /* Number of layers on a path */ + SRTE_CANDIDATE_METRIC_TYPE_NLP = 19, + /* Domain Count metric */ + SRTE_CANDIDATE_METRIC_TYPE_DC = 20, + /* Border Node Count metric */ + SRTE_CANDIDATE_METRIC_TYPE_BNC = 21, +}; +#define MAX_METRIC_TYPE 21 + +enum srte_segment_nai_type { + SRTE_SEGMENT_NAI_TYPE_NONE = 0, + SRTE_SEGMENT_NAI_TYPE_IPV4_NODE = 1, + SRTE_SEGMENT_NAI_TYPE_IPV6_NODE = 2, + SRTE_SEGMENT_NAI_TYPE_IPV4_ADJACENCY = 3, + SRTE_SEGMENT_NAI_TYPE_IPV6_ADJACENCY = 4, + SRTE_SEGMENT_NAI_TYPE_IPV4_UNNUMBERED_ADJACENCY = 5 +}; + +enum objfun_type { + OBJFUN_UNDEFINED = 0, + /* Minimum Cost Path [RFC5541] */ + OBJFUN_MCP = 1, + /* Minimum Load Path [RFC5541] */ + OBJFUN_MLP = 2, + /* Maximum residual Bandwidth Path [RFC5541] */ + OBJFUN_MBP = 3, + /* Minimize aggregate Bandwidth Consumption [RFC5541] */ + OBJFUN_MBC = 4, + /* Minimize the Load of the most loaded Link [RFC5541] */ + OBJFUN_MLL = 5, + /* Minimize the Cumulative Cost of a set of paths [RFC5541] */ + OBJFUN_MCC = 6, + /* Shortest Path Tree [RFC8306] */ + OBJFUN_SPT = 7, + /* Minimum Cost Tree [RFC8306] */ + OBJFUN_MCT = 8, + /* Minimum Packet Loss Path [RFC8233] */ + OBJFUN_MPLP = 9, + /* Maximum Under-Utilized Path [RFC8233] */ + OBJFUN_MUP = 10, + /* Maximum Reserved Under-Utilized Path [RFC8233] */ + OBJFUN_MRUP = 11, + /* Minimize the number of Transit Domains [RFC8685] */ + OBJFUN_MTD = 12, + /* Minimize the number of Border Nodes [RFC8685] */ + OBJFUN_MBN = 13, + /* Minimize the number of Common Transit Domains [RFC8685] */ + OBJFUN_MCTD = 14, + /* Minimize the number of Shared Links [RFC8800] */ + OBJFUN_MSL = 15, + /* Minimize the number of Shared SRLGs [RFC8800] */ + OBJFUN_MSS = 16, + /* Minimize the number of Shared Nodes [RFC8800] */ + OBJFUN_MSN = 17, +}; +#define MAX_OBJFUN_TYPE 17 + +enum affinity_filter_type { + AFFINITY_FILTER_UNDEFINED = 0, + AFFINITY_FILTER_EXCLUDE_ANY = 1, + AFFINITY_FILTER_INCLUDE_ANY = 2, + AFFINITY_FILTER_INCLUDE_ALL = 3, +}; +#define MAX_AFFINITY_FILTER_TYPE 3 + +struct srte_segment_list; + +struct srte_segment_entry { + RB_ENTRY(srte_segment_entry) entry; + + /* The segment list the entry belong to */ + struct srte_segment_list *segment_list; + + /* Index of the Label. */ + uint32_t index; + + /* Label Value. */ + mpls_label_t sid_value; + + /* NAI Type */ + enum srte_segment_nai_type nai_type; + /* NAI local address when nai type is not NONE */ + struct ipaddr nai_local_addr; + /* NAI local interface when nai type is not IPv4 unnumbered adjacency */ + uint32_t nai_local_iface; + /* NAI local interface when nai type is IPv4 or IPv6 adjacency */ + struct ipaddr nai_remote_addr; + /* NAI remote interface when nai type is not IPv4 unnumbered adjacency + */ + uint32_t nai_remote_iface; +}; +RB_HEAD(srte_segment_entry_head, srte_segment_entry); +RB_PROTOTYPE(srte_segment_entry_head, srte_segment_entry, entry, + srte_segment_entry_compare) + +struct srte_segment_list { + RB_ENTRY(srte_segment_list) entry; + + /* Name of the Segment List. */ + char name[64]; + + /* The Protocol-Origin. */ + enum srte_protocol_origin protocol_origin; + + /* The Originator */ + char originator[64]; + + /* Nexthops. */ + struct srte_segment_entry_head segments; + + /* Status flags. */ + uint16_t flags; +#define F_SEGMENT_LIST_NEW 0x0002 +#define F_SEGMENT_LIST_MODIFIED 0x0004 +#define F_SEGMENT_LIST_DELETED 0x0008 +}; +RB_HEAD(srte_segment_list_head, srte_segment_list); +RB_PROTOTYPE(srte_segment_list_head, srte_segment_list, entry, + srte_segment_list_compare) + +struct srte_metric { + uint16_t flags; +#define F_METRIC_IS_DEFINED 0x0001 +#define F_METRIC_IS_REQUIRED 0x0002 +#define F_METRIC_IS_BOUND 0x0004 +#define F_METRIC_IS_COMPUTED 0x0008 + float value; +}; + +/* Runtime information about the candidate path */ +struct srte_lsp { + /* Backpointer to the Candidate Path. */ + struct srte_candidate *candidate; + + /* The associated Segment List. */ + struct srte_segment_list *segment_list; + + /* The Protocol-Origin. */ + enum srte_protocol_origin protocol_origin; + + /* The Originator */ + char originator[64]; + + /* The Discriminator */ + uint32_t discriminator; + + /* Flags. */ + uint32_t flags; + + /* Metrics LSP Values */ + struct srte_metric metrics[MAX_METRIC_TYPE]; + + /* Bandwidth Configured Value */ + float bandwidth; + + /* The objective function in used */ + enum objfun_type objfun; +}; + +/* Configured candidate path */ +struct srte_candidate { + RB_ENTRY(srte_candidate) entry; + + /* Backpointer to SR Policy */ + struct srte_policy *policy; + + /* The LSP associated with this candidate path. */ + struct srte_lsp *lsp; + + /* Administrative preference. */ + uint32_t preference; + + /* Symbolic Name. */ + char name[64]; + + /* The associated Segment List. */ + struct srte_segment_list *segment_list; + + /* The Protocol-Origin. */ + enum srte_protocol_origin protocol_origin; + + /* The Originator */ + char originator[64]; + + /* The Discriminator */ + uint32_t discriminator; + + /* The Type (explicit or dynamic) */ + enum srte_candidate_type type; + + /* Flags. */ + uint32_t flags; +#define F_CANDIDATE_BEST 0x0001 +#define F_CANDIDATE_NEW 0x0002 +#define F_CANDIDATE_MODIFIED 0x0004 +#define F_CANDIDATE_DELETED 0x0008 +#define F_CANDIDATE_HAS_BANDWIDTH 0x0100 +#define F_CANDIDATE_REQUIRED_BANDWIDTH 0x0200 +#define F_CANDIDATE_HAS_OBJFUN 0x0400 +#define F_CANDIDATE_REQUIRED_OBJFUN 0x0800 +#define F_CANDIDATE_HAS_EXCLUDE_ANY 0x1000 +#define F_CANDIDATE_HAS_INCLUDE_ANY 0x2000 +#define F_CANDIDATE_HAS_INCLUDE_ALL 0x4000 + + /* Metrics Configured Values */ + struct srte_metric metrics[MAX_METRIC_TYPE]; + + /* Bandwidth Configured Value */ + float bandwidth; + + /* Configured objective functions */ + enum objfun_type objfun; + + /* Path constraints attribute filters */ + uint32_t affinity_filters[MAX_AFFINITY_FILTER_TYPE]; + + /* Hooks delaying timer */ + struct thread *hook_timer; +}; + +RB_HEAD(srte_candidate_head, srte_candidate); +RB_PROTOTYPE(srte_candidate_head, srte_candidate, entry, srte_candidate_compare) + +struct srte_policy { + RB_ENTRY(srte_policy) entry; + + /* Color */ + uint32_t color; + + /* Endpoint */ + struct ipaddr endpoint; + + /* Name */ + char name[64]; + + /* Binding SID */ + mpls_label_t binding_sid; + + /* Operational Status of the policy */ + enum srte_policy_status status; + + /* Best candidate path. */ + struct srte_candidate *best_candidate; + + /* Candidate Paths */ + struct srte_candidate_head candidate_paths; + /* Status flags. */ + uint16_t flags; +#define F_POLICY_NEW 0x0002 +#define F_POLICY_MODIFIED 0x0004 +#define F_POLICY_DELETED 0x0008 +}; +RB_HEAD(srte_policy_head, srte_policy); +RB_PROTOTYPE(srte_policy_head, srte_policy, entry, srte_policy_compare) + +DECLARE_HOOK(pathd_candidate_created, (struct srte_candidate * candidate), + (candidate)) +DECLARE_HOOK(pathd_candidate_updated, (struct srte_candidate * candidate), + (candidate)) +DECLARE_HOOK(pathd_candidate_removed, (struct srte_candidate * candidate), + (candidate)) + +extern struct srte_segment_list_head srte_segment_lists; +extern struct srte_policy_head srte_policies; +extern struct zebra_privs_t pathd_privs; + +/* master thread, defined in path_main.c */ +extern struct thread_master *master; + +/* pathd.c */ +struct srte_segment_list *srte_segment_list_add(const char *name); +void srte_segment_list_del(struct srte_segment_list *segment_list); +struct srte_segment_list *srte_segment_list_find(const char *name); +struct srte_segment_entry * +srte_segment_entry_add(struct srte_segment_list *segment_list, uint32_t index); +void srte_segment_entry_del(struct srte_segment_entry *segment); +void srte_segment_entry_set_nai(struct srte_segment_entry *segment, + enum srte_segment_nai_type type, + struct ipaddr *local_ip, uint32_t local_iface, + struct ipaddr *remote_ip, + uint32_t remote_iface); +struct srte_policy *srte_policy_add(uint32_t color, struct ipaddr *endpoint); +void srte_policy_del(struct srte_policy *policy); +struct srte_policy *srte_policy_find(uint32_t color, struct ipaddr *endpoint); +void srte_policy_update_binding_sid(struct srte_policy *policy, + uint32_t binding_sid); +void srte_apply_changes(void); +void srte_policy_apply_changes(struct srte_policy *policy); +struct srte_candidate *srte_candidate_add(struct srte_policy *policy, + uint32_t preference); +void srte_candidate_del(struct srte_candidate *candidate); +void srte_candidate_set_bandwidth(struct srte_candidate *candidate, + float bandwidth, bool required); +void srte_candidate_unset_bandwidth(struct srte_candidate *candidate); +void srte_candidate_set_metric(struct srte_candidate *candidate, + enum srte_candidate_metric_type type, + float value, bool required, bool is_cound, + bool is_computed); +void srte_candidate_unset_metric(struct srte_candidate *candidate, + enum srte_candidate_metric_type type); +void srte_candidate_set_objfun(struct srte_candidate *candidate, bool required, + enum objfun_type type); +void srte_candidate_unset_objfun(struct srte_candidate *candidate); +void srte_candidate_set_affinity_filter(struct srte_candidate *candidate, + enum affinity_filter_type type, + uint32_t filter); +void srte_candidate_unset_affinity_filter(struct srte_candidate *candidate, + enum affinity_filter_type type); +void srte_lsp_set_bandwidth(struct srte_lsp *lsp, float bandwidth, + bool required); +void srte_lsp_unset_bandwidth(struct srte_lsp *lsp); +void srte_lsp_set_metric(struct srte_lsp *lsp, + enum srte_candidate_metric_type type, float value, + bool required, bool is_cound, bool is_computed); +void srte_lsp_unset_metric(struct srte_lsp *lsp, + enum srte_candidate_metric_type type); +struct srte_candidate *srte_candidate_find(struct srte_policy *policy, + uint32_t preference); +struct srte_segment_entry * +srte_segment_entry_find(struct srte_segment_list *segment_list, uint32_t index); +void srte_candidate_status_update(struct srte_candidate *candidate, int status); +void srte_candidate_unset_segment_list(const char *originator, bool force); +const char *srte_origin2str(enum srte_protocol_origin origin); + +/* path_cli.c */ +void path_cli_init(void); + +#endif /* _FRR_PATHD_H_ */ diff --git a/pathd/subdir.am b/pathd/subdir.am new file mode 100644 index 0000000000..520a8c696a --- /dev/null +++ b/pathd/subdir.am @@ -0,0 +1,74 @@ +# +# pathd +# + +if PATHD +noinst_LIBRARIES += pathd/libpath.a +sbin_PROGRAMS += pathd/pathd +dist_examples_DATA += pathd/pathd.conf.sample +vtysh_scan += $(top_srcdir)/pathd/path_cli.c +vtysh_daemons += pathd +# TODO add man page +#man8 += $(MANBUILD)/pathd.8 + +if HAVE_PATHD_PCEP +vtysh_scan += $(top_srcdir)/pathd/path_pcep_cli.c +module_LTLIBRARIES += pathd/pathd_pcep.la +endif + +endif + +pathd_libpath_a_SOURCES = \ + pathd/path_cli.c \ + pathd/path_debug.c \ + pathd/path_errors.c \ + pathd/path_main.c \ + pathd/path_memory.c \ + pathd/path_nb.c \ + pathd/path_nb_config.c \ + pathd/path_nb_state.c \ + pathd/path_zebra.c \ + pathd/pathd.c \ + # end + +clippy_scan += \ + pathd/path_cli.c \ + pathd/path_pcep_cli.c \ + # end + +noinst_HEADERS += \ + pathd/path_debug.h \ + pathd/path_errors.h \ + pathd/path_memory.h \ + pathd/path_nb.h \ + pathd/path_pcep.h \ + pathd/path_pcep_cli.h \ + pathd/path_pcep_controller.h \ + pathd/path_pcep_debug.h \ + pathd/path_pcep_lib.h \ + pathd/path_pcep_memory.h \ + pathd/path_pcep_config.h \ + pathd/path_pcep_pcc.h \ + pathd/path_zebra.h \ + pathd/pathd.h \ + # end + +pathd_pathd_SOURCES = pathd/path_main.c +nodist_pathd_pathd_SOURCES = \ + yang/frr-pathd.yang.c \ + # end +pathd_pathd_LDADD = pathd/libpath.a lib/libfrr.la -lm $(LIBCAP) + +pathd_pathd_pcep_la_SOURCES = \ + pathd/path_pcep.c \ + pathd/path_pcep_cli.c \ + pathd/path_pcep_controller.c \ + pathd/path_pcep_debug.c \ + pathd/path_pcep_lib.c \ + pathd/path_pcep_memory.c \ + pathd/path_pcep_config.c \ + pathd/path_pcep_pcc.c \ + # end +pathd_pathd_pcep_la_CFLAGS = $(WERROR) +pathd_pathd_pcep_la_LDFLAGS = -avoid-version -module -shared -export-dynamic +pathd_pathd_pcep_la_LIBADD = @PATHD_PCEP_LIBS@ diff --git a/pbrd/pbr_map.c b/pbrd/pbr_map.c index 01caff5b52..5b851988f6 100644 --- a/pbrd/pbr_map.c +++ b/pbrd/pbr_map.c @@ -304,7 +304,7 @@ static void pbrms_vrf_update(struct pbr_map_sequence *pbrms, if (pbrms->vrf_lookup && (strncmp(vrf_name, pbrms->vrf_name, sizeof(pbrms->vrf_name)) == 0)) { - DEBUGD(&pbr_dbg_map, "\tSeq %u uses vrf %s (%u), updating map", + DEBUGD(&pbr_dbg_map, " Seq %u uses vrf %s (%u), updating map", pbrms->seqno, vrf_name, pbr_vrf_id(pbr_vrf)); pbr_map_check(pbrms, false); @@ -666,7 +666,7 @@ void pbr_map_schedule_policy_from_nhg(const char *nh_group, bool installed) RB_FOREACH (pbrm, pbr_map_entry_head, &pbr_maps) { DEBUGD(&pbr_dbg_map, "%s: Looking at %s", __func__, pbrm->name); for (ALL_LIST_ELEMENTS_RO(pbrm->seqnumbers, node, pbrms)) { - DEBUGD(&pbr_dbg_map, "\tNH Grp name: %s", + DEBUGD(&pbr_dbg_map, " NH Grp name: %s", pbrms->nhgrp_name ? pbrms->nhgrp_name : pbrms->internal_nhg_name); @@ -707,7 +707,7 @@ void pbr_map_policy_install(const char *name) if (pbrm->valid && pbrms->nhs_installed && pbrm->incoming->count) { - DEBUGD(&pbr_dbg_map, "\tInstalling %s %u", pbrm->name, + DEBUGD(&pbr_dbg_map, " Installing %s %u", pbrm->name, pbrms->seqno); for (ALL_LIST_ELEMENTS_RO(pbrm->incoming, inode, pmi)) if (pbr_map_interface_is_valid(pmi)) @@ -861,7 +861,7 @@ void pbr_map_check(struct pbr_map_sequence *pbrms, bool changed) DEBUGD(&pbr_dbg_map, "%s: Installing %s(%u) reason: %" PRIu64, __func__, pbrm->name, pbrms->seqno, pbrms->reason); DEBUGD(&pbr_dbg_map, - "\tSending PBR_MAP_POLICY_INSTALL event"); + " Sending PBR_MAP_POLICY_INSTALL event"); } else { install = false; DEBUGD(&pbr_dbg_map, "%s: Removing %s(%u) reason: %" PRIu64, diff --git a/pbrd/pbr_nht.c b/pbrd/pbr_nht.c index ce7780ed49..ba9ad97ab8 100644 --- a/pbrd/pbr_nht.c +++ b/pbrd/pbr_nht.c @@ -213,7 +213,7 @@ void pbr_nhgroup_add_cb(const char *name) nhgc = nhgc_find(name); if (!nhgc) { - DEBUGD(&pbr_dbg_nht, "%s: Could not find nhgc with name: %s\n", + DEBUGD(&pbr_dbg_nht, "%s: Could not find nhgc with name: %s", __func__, name); return; } @@ -320,13 +320,6 @@ void pbr_nhgroup_delete_cb(const char *name) pbr_map_check_nh_group_change(name); } -#if 0 -static struct pbr_nexthop_cache *pbr_nht_lookup_nexthop(struct nexthop *nexthop) -{ - return NULL; -} -#endif - static void pbr_nht_find_nhg_from_table_update(struct pbr_nexthop_group_cache *pnhgc, uint32_t table_id, bool installed) @@ -627,7 +620,7 @@ struct pbr_nexthop_group_cache *pbr_nht_add_group(const char *name) nhgc = nhgc_find(name); if (!nhgc) { - DEBUGD(&pbr_dbg_nht, "%s: Could not find nhgc with name: %s\n", + DEBUGD(&pbr_dbg_nht, "%s: Could not find nhgc with name: %s", __func__, name); return NULL; } @@ -693,7 +686,7 @@ bool pbr_nht_nexthop_group_valid(const char *name) pnhgc = hash_get(pbr_nhg_hash, &lookup, NULL); if (!pnhgc) return false; - DEBUGD(&pbr_dbg_nht, "%s: \t%d %d", __func__, pnhgc->valid, + DEBUGD(&pbr_dbg_nht, "%s: %d %d", __func__, pnhgc->valid, pnhgc->installed); if (pnhgc->valid && pnhgc->installed) return true; @@ -719,21 +712,46 @@ pbr_nht_individual_nexthop_gw_update(struct pbr_nexthop_cache *pnhc, { bool is_valid = pnhc->valid; - if (!pnhi->nhr) /* It doesn't care about non-nexthop updates */ + /* + * If we have an interface down event, let's note that + * it is happening and find all the nexthops that depend + * on that interface. As that if we have an interface + * flapping fast enough it means that zebra might turn + * those nexthop tracking events into a no-update + * So let's search and do the right thing on the + * interface event. + */ + if (!pnhi->nhr && pnhi->ifp) { + switch (pnhc->nexthop.type) { + case NEXTHOP_TYPE_BLACKHOLE: + case NEXTHOP_TYPE_IPV4: + case NEXTHOP_TYPE_IPV6: + goto done; + case NEXTHOP_TYPE_IFINDEX: + case NEXTHOP_TYPE_IPV4_IFINDEX: + case NEXTHOP_TYPE_IPV6_IFINDEX: + if (pnhc->nexthop.ifindex == pnhi->ifp->ifindex) + is_valid = if_is_up(pnhi->ifp); + goto done; + } + goto done; + } - switch (pnhi->nhr->prefix.family) { - case AF_INET: - if (pnhc->nexthop.gate.ipv4.s_addr - != pnhi->nhr->prefix.u.prefix4.s_addr) - goto done; /* Unrelated change */ - break; - case AF_INET6: - if (memcmp(&pnhc->nexthop.gate.ipv6, - &pnhi->nhr->prefix.u.prefix6, 16) - != 0) - goto done; /* Unrelated change */ - break; + if (pnhi->nhr) { + switch (pnhi->nhr->prefix.family) { + case AF_INET: + if (pnhc->nexthop.gate.ipv4.s_addr + != pnhi->nhr->prefix.u.prefix4.s_addr) + goto done; /* Unrelated change */ + break; + case AF_INET6: + if (memcmp(&pnhc->nexthop.gate.ipv6, + &pnhi->nhr->prefix.u.prefix6, 16) + != 0) + goto done; /* Unrelated change */ + break; + } } pnhi->nhr_matched = true; @@ -833,7 +851,7 @@ static void pbr_nht_individual_nexthop_update_lookup(struct hash_bucket *b, pbr_nht_individual_nexthop_update(pnhc, pnhi); - DEBUGD(&pbr_dbg_nht, "\tFound %pFX: old: %d new: %d", + DEBUGD(&pbr_dbg_nht, " Found %pFX: old: %d new: %d", &pnhi->nhr->prefix, old_valid, pnhc->valid); if (pnhc->valid) @@ -983,7 +1001,6 @@ static void pbr_nht_nexthop_vrf_handle(struct hash_bucket *b, void *data) struct pbr_vrf *pbr_vrf = data; struct pbr_nht_individual pnhi = {}; - zlog_debug("pnhgc iterating"); hash_iterate(pnhgc->nhh, pbr_nht_clear_looked_at, NULL); memset(&pnhi, 0, sizeof(pnhi)); pnhi.pbr_vrf = pbr_vrf; @@ -1085,7 +1102,7 @@ pbr_nht_individual_nexthop_interface_update_lookup(struct hash_bucket *b, pbr_nht_individual_nexthop_update(pnhc, pnhi); - DEBUGD(&pbr_dbg_nht, "\tFound %s: old: %d new: %d", pnhi->ifp->name, + DEBUGD(&pbr_dbg_nht, " Found %s: old: %d new: %d", pnhi->ifp->name, old_valid, pnhc->valid); if (pnhc->valid) @@ -1097,6 +1114,7 @@ static void pbr_nht_nexthop_interface_update_lookup(struct hash_bucket *b, { struct pbr_nexthop_group_cache *pnhgc = b->data; struct pbr_nht_individual pnhi = {}; + struct nexthop_group nhg = {}; bool old_valid; old_valid = pnhgc->valid; @@ -1111,6 +1129,15 @@ static void pbr_nht_nexthop_interface_update_lookup(struct hash_bucket *b, */ pnhgc->valid = pnhi.valid; + pbr_nexthop_group_cache_to_nexthop_group(&nhg, pnhgc); + + if (pnhgc->valid) + pbr_nht_install_nexthop_group(pnhgc, nhg); + else + pbr_nht_uninstall_nexthop_group(pnhgc, nhg, 0); + + nexthops_free(nhg.nexthop); + if (old_valid != pnhgc->valid) pbr_map_check_nh_group_change(pnhgc->name); } diff --git a/pbrd/pbr_vty.c b/pbrd/pbr_vty.c index 26163dcc56..216834fe0c 100644 --- a/pbrd/pbr_vty.c +++ b/pbrd/pbr_vty.c @@ -137,6 +137,11 @@ DEFPY(pbr_map_match_src, pbr_map_match_src_cmd, { struct pbr_map_sequence *pbrms = VTY_GET_CONTEXT(pbr_map_sequence); + if (pbrms->dst && pbrms->family && prefix->family != pbrms->family) { + vty_out(vty, "Cannot mismatch families within match src/dst\n"); + return CMD_WARNING_CONFIG_FAILED; + } + pbrms->family = prefix->family; if (!no) { @@ -165,6 +170,11 @@ DEFPY(pbr_map_match_dst, pbr_map_match_dst_cmd, { struct pbr_map_sequence *pbrms = VTY_GET_CONTEXT(pbr_map_sequence); + if (pbrms->src && pbrms->family && prefix->family != pbrms->family) { + vty_out(vty, "Cannot mismatch families within match src/dst\n"); + return CMD_WARNING_CONFIG_FAILED; + } + pbrms->family = prefix->family; if (!no) { diff --git a/pbrd/pbr_zebra.c b/pbrd/pbr_zebra.c index 222a10e751..467bbc8f72 100644 --- a/pbrd/pbr_zebra.c +++ b/pbrd/pbr_zebra.c @@ -266,7 +266,7 @@ static void route_add_helper(struct zapi_route *api, struct nexthop_group nhg, api->prefix.family = install_afi; - DEBUGD(&pbr_dbg_zebra, "\tEncoding %pFX", &api->prefix); + DEBUGD(&pbr_dbg_zebra, " Encoding %pFX", &api->prefix); i = 0; for (ALL_NEXTHOPS(nhg, nhop)) { @@ -409,12 +409,12 @@ static int pbr_zebra_nexthop_update(ZAPI_CALLBACK_ARGS) DEBUGD(&pbr_dbg_zebra, "%s: Received Nexthop update: %pFX", __func__, &nhr.prefix); - DEBUGD(&pbr_dbg_zebra, "%s: (\tNexthops(%u)", __func__, + DEBUGD(&pbr_dbg_zebra, "%s: (Nexthops(%u)", __func__, nhr.nexthop_num); for (i = 0; i < nhr.nexthop_num; i++) { DEBUGD(&pbr_dbg_zebra, - "%s: \tType: %d: vrf: %d, ifindex: %d gate: %pI4", + "%s: Type: %d: vrf: %d, ifindex: %d gate: %pI4", __func__, nhr.nexthops[i].type, nhr.nexthops[i].vrf_id, nhr.nexthops[i].ifindex, &nhr.nexthops[i].gate.ipv4); @@ -585,7 +585,7 @@ bool pbr_send_pbr_map(struct pbr_map_sequence *pbrms, */ stream_putl(s, 1); - DEBUGD(&pbr_dbg_zebra, "%s: \t%s %s seq %u %d %s %u", __func__, + DEBUGD(&pbr_dbg_zebra, "%s: %s %s seq %u %d %s %u", __func__, install ? "Installing" : "Deleting", pbrm->name, pbrms->seqno, install, pmi->ifp->name, pmi->delete); diff --git a/pimd/pim_bfd.c b/pimd/pim_bfd.c index 1d653cdc3f..5e1b9a69e1 100644 --- a/pimd/pim_bfd.c +++ b/pimd/pim_bfd.c @@ -217,7 +217,7 @@ static int pim_bfd_interface_dest_update(ZAPI_CALLBACK_ARGS) { struct interface *ifp = NULL; struct pim_interface *pim_ifp = NULL; - struct prefix p; + struct prefix p, src_p; int status; char msg[100]; int old_status; @@ -227,8 +227,8 @@ static int pim_bfd_interface_dest_update(ZAPI_CALLBACK_ARGS) struct listnode *neigh_nextnode = NULL; struct pim_neighbor *neigh = NULL; - ifp = bfd_get_peer_info(zclient->ibuf, &p, NULL, &status, - NULL, vrf_id); + ifp = bfd_get_peer_info(zclient->ibuf, &p, &src_p, &status, NULL, + vrf_id); if ((ifp == NULL) || (p.family != AF_INET)) return 0; diff --git a/pimd/pim_bsm.c b/pimd/pim_bsm.c index 1acfece895..e873af5759 100644 --- a/pimd/pim_bsm.c +++ b/pimd/pim_bsm.c @@ -63,7 +63,7 @@ void pim_bsm_write_config(struct vty *vty, struct interface *ifp) } } -static void pim_free_bsgrp_data(struct bsgrp_node *bsgrp_node) +void pim_free_bsgrp_data(struct bsgrp_node *bsgrp_node) { if (bsgrp_node->bsrp_list) list_delete(&bsgrp_node->bsrp_list); @@ -72,7 +72,7 @@ static void pim_free_bsgrp_data(struct bsgrp_node *bsgrp_node) XFREE(MTYPE_PIM_BSGRP_NODE, bsgrp_node); } -static void pim_free_bsgrp_node(struct route_table *rt, struct prefix *grp) +void pim_free_bsgrp_node(struct route_table *rt, struct prefix *grp) { struct route_node *rn; @@ -222,7 +222,7 @@ static int pim_on_bs_timer(struct thread *t) return 0; } -static void pim_bs_timer_stop(struct bsm_scope *scope) +void pim_bs_timer_stop(struct bsm_scope *scope) { if (PIM_DEBUG_BSM) zlog_debug("%s : BS timer being stopped of sz: %d", __func__, diff --git a/pimd/pim_bsm.h b/pimd/pim_bsm.h index 0758c94f19..2829c1e05a 100644 --- a/pimd/pim_bsm.h +++ b/pimd/pim_bsm.h @@ -195,4 +195,7 @@ int pim_bsm_process(struct interface *ifp, bool pim_bsm_new_nbr_fwd(struct pim_neighbor *neigh, struct interface *ifp); struct bsgrp_node *pim_bsm_get_bsgrp_node(struct bsm_scope *scope, struct prefix *grp); +void pim_bs_timer_stop(struct bsm_scope *scope); +void pim_free_bsgrp_data(struct bsgrp_node *bsgrp_node); +void pim_free_bsgrp_node(struct route_table *rt, struct prefix *grp); #endif diff --git a/pimd/pim_cmd.c b/pimd/pim_cmd.c index 8e08dc724b..714d6e8e1d 100644 --- a/pimd/pim_cmd.c +++ b/pimd/pim_cmd.c @@ -4049,6 +4049,152 @@ DEFUN (clear_ip_pim_oil, return CMD_SUCCESS; } +static void clear_pim_bsr_db(struct pim_instance *pim) +{ + struct route_node *rn; + struct route_node *rpnode; + struct bsgrp_node *bsgrp; + struct prefix nht_p; + struct prefix g_all; + struct rp_info *rp_all; + struct pim_upstream *up; + struct rp_info *rp_info; + bool is_bsr_tracking = true; + + /* Remove next hop tracking for the bsr */ + nht_p.family = AF_INET; + nht_p.prefixlen = IPV4_MAX_BITLEN; + nht_p.u.prefix4 = pim->global_scope.current_bsr; + if (PIM_DEBUG_BSM) { + zlog_debug("%s: Deregister BSR addr %pFX with Zebra NHT", + __func__, &nht_p); + } + pim_delete_tracked_nexthop(pim, &nht_p, NULL, NULL, is_bsr_tracking); + + /* Reset scope zone data */ + pim->global_scope.accept_nofwd_bsm = false; + pim->global_scope.state = ACCEPT_ANY; + pim->global_scope.current_bsr.s_addr = INADDR_ANY; + pim->global_scope.current_bsr_prio = 0; + pim->global_scope.current_bsr_first_ts = 0; + pim->global_scope.current_bsr_last_ts = 0; + pim->global_scope.bsm_frag_tag = 0; + list_delete_all_node(pim->global_scope.bsm_list); + + pim_bs_timer_stop(&pim->global_scope); + + for (rn = route_top(pim->global_scope.bsrp_table); rn; + rn = route_next(rn)) { + bsgrp = rn->info; + if (!bsgrp) + continue; + + rpnode = route_node_lookup(pim->rp_table, &bsgrp->group); + + if (!rpnode) { + pim_free_bsgrp_node(bsgrp->scope->bsrp_table, + &bsgrp->group); + pim_free_bsgrp_data(bsgrp); + continue; + } + + rp_info = (struct rp_info *)rpnode->info; + + if ((!rp_info) || (rp_info->rp_src != RP_SRC_BSR)) { + pim_free_bsgrp_node(bsgrp->scope->bsrp_table, + &bsgrp->group); + pim_free_bsgrp_data(bsgrp); + continue; + } + + /* Deregister addr with Zebra NHT */ + nht_p.family = AF_INET; + nht_p.prefixlen = IPV4_MAX_BITLEN; + nht_p.u.prefix4 = rp_info->rp.rpf_addr.u.prefix4; + + if (PIM_DEBUG_PIM_NHT_RP) { + zlog_debug("%s: Deregister RP addr %pFX with Zebra ", + __func__, &nht_p); + } + + pim_delete_tracked_nexthop(pim, &nht_p, NULL, rp_info, false); + + if (!str2prefix("224.0.0.0/4", &g_all)) + return; + + rp_all = pim_rp_find_match_group(pim, &g_all); + + if (rp_all == rp_info) { + rp_all->rp.rpf_addr.family = AF_INET; + rp_all->rp.rpf_addr.u.prefix4.s_addr = INADDR_NONE; + rp_all->i_am_rp = 0; + } else { + /* Delete the rp_info from rp-list */ + listnode_delete(pim->rp_list, rp_info); + + /* Delete the rp node from rp_table */ + rpnode->info = NULL; + route_unlock_node(rpnode); + route_unlock_node(rpnode); + } + + XFREE(MTYPE_PIM_RP, rp_info); + + pim_free_bsgrp_node(bsgrp->scope->bsrp_table, &bsgrp->group); + pim_free_bsgrp_data(bsgrp); + } + pim_rp_refresh_group_to_rp_mapping(pim); + + + frr_each (rb_pim_upstream, &pim->upstream_head, up) { + /* Find the upstream (*, G) whose upstream address is same as + * the RP + */ + if (up->sg.src.s_addr != INADDR_ANY) + continue; + + struct prefix grp; + struct rp_info *trp_info; + + grp.family = AF_INET; + grp.prefixlen = IPV4_MAX_BITLEN; + grp.u.prefix4 = up->sg.grp; + + trp_info = pim_rp_find_match_group(pim, &grp); + + /* RP not found for the group grp */ + if (pim_rpf_addr_is_inaddr_none(&trp_info->rp)) { + pim_upstream_rpf_clear(pim, up); + pim_rp_set_upstream_addr(pim, &up->upstream_addr, + up->sg.src, up->sg.grp); + } else { + /* RP found for the group grp */ + pim_upstream_update(pim, up); + } + } +} + + +DEFUN (clear_ip_pim_bsr_db, + clear_ip_pim_bsr_db_cmd, + "clear ip pim [vrf NAME] bsr-data", + CLEAR_STR + IP_STR + CLEAR_IP_PIM_STR + VRF_CMD_HELP_STR + "Reset pim bsr data\n") +{ + int idx = 2; + struct vrf *vrf = pim_cmd_lookup_vrf(vty, argv, argc, &idx); + + if (!vrf) + return CMD_WARNING; + + clear_pim_bsr_db(vrf->info); + + return CMD_SUCCESS; +} + DEFUN (show_ip_igmp_interface, show_ip_igmp_interface_cmd, "show ip igmp [vrf NAME] interface [detail|WORD] [json]", @@ -10927,9 +11073,9 @@ static void pim_show_vxlan_sg_entry(struct pim_vxlan_sg *vxlan_sg, } } -static void pim_show_vxlan_sg_hash_entry(struct hash_bucket *backet, void *arg) +static void pim_show_vxlan_sg_hash_entry(struct hash_bucket *bucket, void *arg) { - pim_show_vxlan_sg_entry((struct pim_vxlan_sg *)backet->data, + pim_show_vxlan_sg_entry((struct pim_vxlan_sg *)bucket->data, (struct pim_sg_cache_walk_data *)arg); } @@ -11447,6 +11593,7 @@ void pim_cmd_init(void) install_element(ENABLE_NODE, &clear_ip_pim_interface_traffic_cmd); install_element(ENABLE_NODE, &clear_ip_pim_oil_cmd); install_element(ENABLE_NODE, &clear_ip_pim_statistics_cmd); + install_element(ENABLE_NODE, &clear_ip_pim_bsr_db_cmd); install_element(ENABLE_NODE, &show_debugging_pim_cmd); diff --git a/pimd/pim_hello.c b/pimd/pim_hello.c index e50504ec10..6f5c4174e2 100644 --- a/pimd/pim_hello.c +++ b/pimd/pim_hello.c @@ -95,22 +95,6 @@ static void tlv_trace_uint32_hex(const char *label, const char *tlv_name, } } -#if 0 -static void tlv_trace(const char *label, const char *tlv_name, - const char *ifname, struct in_addr src_addr, - int isset) -{ - if (isset) { - char src_str[INET_ADDRSTRLEN]; - pim_inet4_dump("<src?>", src_addr, src_str, sizeof(src_str)); - zlog_debug("%s: PIM hello option from %s on interface %s: %s", - label, - src_str, ifname, - tlv_name); - } -} -#endif - static void tlv_trace_list(const char *label, const char *tlv_name, const char *ifname, struct in_addr src_addr, int isset, struct list *addr_list) diff --git a/pimd/pim_ifchannel.c b/pimd/pim_ifchannel.c index e7ff434f4b..cdaf7bcdd4 100644 --- a/pimd/pim_ifchannel.c +++ b/pimd/pim_ifchannel.c @@ -550,8 +550,21 @@ struct pim_ifchannel *pim_ifchannel_add(struct interface *ifp, struct pim_upstream *up; ch = pim_ifchannel_find(ifp, sg); - if (ch) + if (ch) { + if (up_flags == PIM_UPSTREAM_FLAG_MASK_SRC_PIM) + PIM_IF_FLAG_SET_PROTO_PIM(ch->flags); + + if (up_flags == PIM_UPSTREAM_FLAG_MASK_SRC_IGMP) + PIM_IF_FLAG_SET_PROTO_IGMP(ch->flags); + + if (ch->upstream) + ch->upstream->flags |= up_flags; + else if (PIM_DEBUG_EVENTS) + zlog_debug("%s:%s No Upstream found", __func__, + pim_str_sg_dump(sg)); + return ch; + } pim_ifp = ifp->info; @@ -642,6 +655,12 @@ static void ifjoin_to_noinfo(struct pim_ifchannel *ch, bool ch_del) { pim_forward_stop(ch, !ch_del); pim_ifchannel_ifjoin_switch(__func__, ch, PIM_IFJOIN_NOINFO); + + if (ch->upstream) + PIM_UPSTREAM_FLAG_UNSET_SRC_PIM(ch->upstream->flags); + + PIM_IF_FLAG_UNSET_PROTO_PIM(ch->flags); + if (ch_del) delete_on_noinfo(ch); } @@ -710,6 +729,21 @@ static int on_ifjoin_prune_pending_timer(struct thread *t) pim_jp_agg_single_upstream_send(&parent->rpf, parent, true); + /* + * SGRpt prune pending expiry has to install + * SG entry with empty olist to drop the SG + * traffic incase no other intf exists. + * On that scenario, SG entry wouldn't have + * got installed until Prune pending timer + * expired. So install now. + */ + pim_channel_del_oif( + ch->upstream->channel_oil, ifp, + PIM_OIF_FLAG_PROTO_STAR, __func__); + if (!ch->upstream->channel_oil->installed) + pim_upstream_mroute_add( + ch->upstream->channel_oil, + __PRETTY_FUNCTION__); } } /* from here ch may have been deleted */ @@ -1094,6 +1128,24 @@ void pim_ifchannel_prune(struct interface *ifp, struct in_addr upstream, case PIM_IFJOIN_PRUNE: if (source_flags & PIM_ENCODE_RPT_BIT) { THREAD_OFF(ch->t_ifjoin_prune_pending_timer); + /* + * While in Prune State, Receive SGRpt Prune. + * RFC 7761 Sec 4.5.3: + * The (S,G,rpt) downstream state machine on interface I + * remains in Prune state. The Expiry Timer (ET) is + * restarted and is then set to the maximum of its + * current value and the HoldTime from the triggering + * Join/Prune message. + */ + if (ch->t_ifjoin_expiry_timer) { + unsigned long rem = thread_timer_remain_second( + ch->t_ifjoin_expiry_timer); + + if (rem > holdtime) + return; + THREAD_OFF(ch->t_ifjoin_expiry_timer); + } + thread_add_timer(router->master, on_ifjoin_expiry_timer, ch, holdtime, &ch->t_ifjoin_expiry_timer); @@ -1272,6 +1324,13 @@ void pim_ifchannel_local_membership_del(struct interface *ifp, * parent' delete_no_info */ } } + + /* Resettng the IGMP flags here */ + if (orig->upstream) + PIM_UPSTREAM_FLAG_UNSET_SRC_IGMP(orig->upstream->flags); + + PIM_IF_FLAG_UNSET_PROTO_IGMP(orig->flags); + delete_on_noinfo(orig); } diff --git a/pimd/pim_igmp.c b/pimd/pim_igmp.c index 9924e335b0..73e42e9d83 100644 --- a/pimd/pim_igmp.c +++ b/pimd/pim_igmp.c @@ -558,8 +558,8 @@ int pim_igmp_packet(struct igmp_sock *igmp, char *buf, size_t len) igmp_msg, igmp_msg_len); case PIM_IGMP_V2_LEAVE_GROUP: - return igmp_v2_recv_leave(igmp, ip_hdr->ip_src, from_str, - igmp_msg, igmp_msg_len); + return igmp_v2_recv_leave(igmp, ip_hdr, from_str, igmp_msg, + igmp_msg_len); case PIM_IGMP_MTRACE_RESPONSE: return igmp_mtrace_recv_response(igmp, ip_hdr, ip_hdr->ip_src, diff --git a/pimd/pim_igmpv2.c b/pimd/pim_igmpv2.c index d836c66cbb..7f3c7a0f8c 100644 --- a/pimd/pim_igmpv2.c +++ b/pimd/pim_igmpv2.c @@ -158,12 +158,13 @@ int igmp_v2_recv_report(struct igmp_sock *igmp, struct in_addr from, return 0; } -int igmp_v2_recv_leave(struct igmp_sock *igmp, struct in_addr from, +int igmp_v2_recv_leave(struct igmp_sock *igmp, struct ip *ip_hdr, const char *from_str, char *igmp_msg, int igmp_msg_len) { struct interface *ifp = igmp->interface; struct in_addr group_addr; char group_str[INET_ADDRSTRLEN]; + struct in_addr from = ip_hdr->ip_src; on_trace(__func__, igmp->interface, from); @@ -184,8 +185,6 @@ int igmp_v2_recv_leave(struct igmp_sock *igmp, struct in_addr from, return -1; } - /* Collecting IGMP Rx stats */ - igmp->rx_stats.leave_v2++; memcpy(&group_addr, igmp_msg + 4, sizeof(struct in_addr)); @@ -195,6 +194,32 @@ int igmp_v2_recv_leave(struct igmp_sock *igmp, struct in_addr from, zlog_debug("Recv IGMPv2 LEAVE from %s on %s for %s", from_str, ifp->name, group_str); } + /* + * As per RFC 2236, section 9: + Message Type Destination Group + ------------ ----------------- + General Query ALL-SYSTEMS (224.0.0.1) + Group-Specific Query The group being queried + Membership Report The group being reported + Leave Message ALL-ROUTERS (224.0.0.2) + + Note: in older (i.e., non-standard and now obsolete) versions of + IGMPv2, hosts send Leave Messages to the group being left. A + router SHOULD accept Leave Messages addressed to the group being + left in the interests of backwards compatibility with such hosts. + In all cases, however, hosts MUST send to the ALL-ROUTERS address + to be compliant with this specification. + */ + if ((ntohl(ip_hdr->ip_dst.s_addr) != INADDR_ALLRTRS_GROUP) + && (ip_hdr->ip_dst.s_addr != group_addr.s_addr)) { + if (PIM_DEBUG_IGMP_EVENTS) + zlog_debug( + "IGMPv2 Leave message is ignored since received on address other than ALL-ROUTERS or Group-address"); + return -1; + } + + /* Collecting IGMP Rx stats */ + igmp->rx_stats.leave_v2++; /* * RFC 3376 diff --git a/pimd/pim_igmpv2.h b/pimd/pim_igmpv2.h index f0a6fdc5fb..29591ff16c 100644 --- a/pimd/pim_igmpv2.h +++ b/pimd/pim_igmpv2.h @@ -29,7 +29,7 @@ void igmp_v2_send_query(struct igmp_group *group, int fd, const char *ifname, int igmp_v2_recv_report(struct igmp_sock *igmp, struct in_addr from, const char *from_str, char *igmp_msg, int igmp_msg_len); -int igmp_v2_recv_leave(struct igmp_sock *igmp, struct in_addr from, +int igmp_v2_recv_leave(struct igmp_sock *igmp, struct ip *ip_hdr, const char *from_str, char *igmp_msg, int igmp_msg_len); #endif /* PIM_IGMPV2_H */ diff --git a/pimd/pim_instance.c b/pimd/pim_instance.c index b7e49078ef..019048abf1 100644 --- a/pimd/pim_instance.c +++ b/pimd/pim_instance.c @@ -71,6 +71,8 @@ static void pim_instance_terminate(struct pim_instance *pim) XFREE(MTYPE_PIM_PLIST_NAME, pim->spt.plist); XFREE(MTYPE_PIM_PLIST_NAME, pim->register_plist); + + pim->vrf = NULL; XFREE(MTYPE_PIM_PIM_INSTANCE, pim); } @@ -153,10 +155,16 @@ static int pim_vrf_delete(struct vrf *vrf) { struct pim_instance *pim = vrf->info; + if (!pim) + return 0; + zlog_debug("VRF Deletion: %s(%u)", vrf->name, vrf->vrf_id); pim_ssmpingd_destroy(pim); pim_instance_terminate(pim); + + vrf->info = NULL; + return 0; } diff --git a/pimd/pim_jp_agg.c b/pimd/pim_jp_agg.c index 5279a00855..d95d9dd25d 100644 --- a/pimd/pim_jp_agg.c +++ b/pimd/pim_jp_agg.c @@ -360,11 +360,9 @@ void pim_jp_agg_switch_interface(struct pim_rpf *orpf, struct pim_rpf *nrpf, void pim_jp_agg_single_upstream_send(struct pim_rpf *rpf, struct pim_upstream *up, bool is_join) { - static struct list *groups = NULL; - static struct pim_jp_agg_group jag; - static struct pim_jp_sources js; - - static bool first = true; + struct list groups, sources; + struct pim_jp_agg_group jag; + struct pim_jp_sources js; /* skip JP upstream messages if source is directly connected */ if (!up || !rpf->source_nexthop.interface || @@ -373,19 +371,19 @@ void pim_jp_agg_single_upstream_send(struct pim_rpf *rpf, if_is_loopback_or_vrf(rpf->source_nexthop.interface)) return; - if (first) { - groups = list_new(); - jag.sources = list_new(); - - listnode_add(groups, &jag); - listnode_add(jag.sources, &js); + memset(&groups, 0, sizeof(groups)); + memset(&sources, 0, sizeof(sources)); + jag.sources = &sources; - first = false; - } + listnode_add(&groups, &jag); + listnode_add(jag.sources, &js); jag.group.s_addr = up->sg.grp.s_addr; js.up = up; js.is_join = is_join; - pim_joinprune_send(rpf, groups); + pim_joinprune_send(rpf, &groups); + + list_delete_all_node(jag.sources); + list_delete_all_node(&groups); } diff --git a/pimd/pim_mroute.c b/pimd/pim_mroute.c index 0bccba397b..afd38face3 100644 --- a/pimd/pim_mroute.c +++ b/pimd/pim_mroute.c @@ -628,7 +628,7 @@ static int pim_mroute_msg(struct pim_instance *pim, const char *buf, ifaddr = connected_src->u.prefix4; igmp = pim_igmp_sock_lookup_ifaddr(pim_ifp->igmp_socket_list, ifaddr); - if (PIM_DEBUG_MROUTE) { + if (PIM_DEBUG_IGMP_PACKETS) { zlog_debug( "%s(%s): igmp kernel upcall on %s(%p) for %pI4 -> %pI4", __func__, pim->vrf->name, ifp->name, igmp, @@ -1221,10 +1221,9 @@ void pim_mroute_update_counters(struct channel_oil *c_oil) sg.src = c_oil->oil.mfcc_origin; sg.grp = c_oil->oil.mfcc_mcastgrp; - if (PIM_DEBUG_MROUTE) - zlog_debug( - "Channel%s is not installed no need to collect data from kernel", - pim_str_sg_dump(&sg)); + zlog_debug( + "Channel%s is not installed no need to collect data from kernel", + pim_str_sg_dump(&sg)); } return; } diff --git a/pimd/pim_msdp_socket.c b/pimd/pim_msdp_socket.c index 7620cd5792..ddd8dc6bf9 100644 --- a/pimd/pim_msdp_socket.c +++ b/pimd/pim_msdp_socket.c @@ -44,7 +44,7 @@ static void pim_msdp_update_sock_send_buffer_size(int fd) if (getsockopt(fd, SOL_SOCKET, SO_SNDBUF, &optval, &optlen) < 0) { flog_err_sys(EC_LIB_SOCKET, - "getsockopt of SO_SNDBUF failed %s\n", + "getsockopt of SO_SNDBUF failed %s", safe_strerror(errno)); return; } @@ -53,7 +53,7 @@ static void pim_msdp_update_sock_send_buffer_size(int fd) if (setsockopt(fd, SOL_SOCKET, SO_SNDBUF, &size, sizeof(size)) < 0) { flog_err_sys(EC_LIB_SOCKET, - "Couldn't increase send buffer: %s\n", + "Couldn't increase send buffer: %s", safe_strerror(errno)); } } diff --git a/pimd/pim_nb_config.c b/pimd/pim_nb_config.c index ba044de2f8..4bc78529a8 100644 --- a/pimd/pim_nb_config.c +++ b/pimd/pim_nb_config.c @@ -545,7 +545,7 @@ static int pim_cmd_igmp_start(struct interface *ifp) pim_ifp = ifp->info; if (!pim_ifp) { - (void)pim_if_new(ifp, true, false, false, false); + pim_ifp = pim_if_new(ifp, true, false, false, false); need_startup = 1; } else { if (!PIM_IF_TEST_IGMP(pim_ifp->options)) { @@ -553,6 +553,7 @@ static int pim_cmd_igmp_start(struct interface *ifp) need_startup = 1; } } + pim_if_create_pimreg(pim_ifp->pim); /* 'ip igmp' executed multiple times, with need_startup * avoid multiple if add all and membership refresh diff --git a/pimd/pim_rp.c b/pimd/pim_rp.c index fa5d6f37bf..dbba6b66d8 100644 --- a/pimd/pim_rp.c +++ b/pimd/pim_rp.c @@ -242,7 +242,7 @@ struct rp_info *pim_rp_find_match_group(struct pim_instance *pim, if (!rn) { flog_err( EC_LIB_DEVELOPMENT, - "%s: BUG We should have found default group information\n", + "%s: BUG We should have found default group information", __func__); return best; } @@ -271,7 +271,7 @@ struct rp_info *pim_rp_find_match_group(struct pim_instance *pim, * * This is a placeholder function for now. */ -static void pim_rp_refresh_group_to_rp_mapping(struct pim_instance *pim) +void pim_rp_refresh_group_to_rp_mapping(struct pim_instance *pim) { pim_msdp_i_am_rp_changed(pim); pim_upstream_reeval_use_rpt(pim); diff --git a/pimd/pim_rp.h b/pimd/pim_rp.h index 8a12cb076c..dd7cd5d75e 100644 --- a/pimd/pim_rp.h +++ b/pimd/pim_rp.h @@ -86,4 +86,5 @@ int pim_rp_list_cmp(void *v1, void *v2); struct rp_info *pim_rp_find_match_group(struct pim_instance *pim, const struct prefix *group); void pim_upstream_update(struct pim_instance *pim, struct pim_upstream *up); +void pim_rp_refresh_group_to_rp_mapping(struct pim_instance *pim); #endif diff --git a/pimd/pim_upstream.c b/pimd/pim_upstream.c index d95b092d94..9899172e6c 100644 --- a/pimd/pim_upstream.c +++ b/pimd/pim_upstream.c @@ -749,6 +749,13 @@ void pim_upstream_switch(struct pim_instance *pim, struct pim_upstream *up, bool send_xg_jp = false; forward_off(up); + /* + * RFC 4601 Sec 4.5.7: + * JoinDesired(S,G) -> False, set SPTbit to false. + */ + if (up->sg.src.s_addr != INADDR_ANY) + up->sptbit = PIM_UPSTREAM_SPTBIT_FALSE; + if (old_state == PIM_UPSTREAM_JOINED) pim_msdp_up_join_state_changed(pim, up); diff --git a/pimd/pim_vxlan.c b/pimd/pim_vxlan.c index 380c97a97c..6a12c7fb13 100644 --- a/pimd/pim_vxlan.c +++ b/pimd/pim_vxlan.c @@ -497,10 +497,10 @@ static void pim_vxlan_orig_mr_del(struct pim_vxlan_sg *vxlan_sg) pim_vxlan_orig_mr_up_del(vxlan_sg); } -static void pim_vxlan_orig_mr_iif_update(struct hash_bucket *backet, void *arg) +static void pim_vxlan_orig_mr_iif_update(struct hash_bucket *bucket, void *arg) { struct interface *ifp; - struct pim_vxlan_sg *vxlan_sg = (struct pim_vxlan_sg *)backet->data; + struct pim_vxlan_sg *vxlan_sg = (struct pim_vxlan_sg *)bucket->data; struct interface *old_iif = vxlan_sg->iif; if (!pim_vxlan_is_orig_mroute(vxlan_sg)) @@ -812,11 +812,11 @@ bool pim_vxlan_do_mlag_reg(void) * to the MLAG peer which may mroute it over the underlay if there are any * interested receivers. */ -static void pim_vxlan_sg_peerlink_oif_update(struct hash_bucket *backet, +static void pim_vxlan_sg_peerlink_oif_update(struct hash_bucket *bucket, void *arg) { struct interface *new_oif = (struct interface *)arg; - struct pim_vxlan_sg *vxlan_sg = (struct pim_vxlan_sg *)backet->data; + struct pim_vxlan_sg *vxlan_sg = (struct pim_vxlan_sg *)bucket->data; if (!pim_vxlan_is_orig_mroute(vxlan_sg)) return; @@ -950,10 +950,10 @@ static void pim_vxlan_up_cost_update(struct pim_instance *pim, } } -static void pim_vxlan_term_mr_cost_update(struct hash_bucket *backet, void *arg) +static void pim_vxlan_term_mr_cost_update(struct hash_bucket *bucket, void *arg) { struct interface *old_peerlink_rif = (struct interface *)arg; - struct pim_vxlan_sg *vxlan_sg = (struct pim_vxlan_sg *)backet->data; + struct pim_vxlan_sg *vxlan_sg = (struct pim_vxlan_sg *)bucket->data; struct pim_upstream *up; struct listnode *listnode; struct pim_upstream *child; @@ -975,11 +975,11 @@ static void pim_vxlan_term_mr_cost_update(struct hash_bucket *backet, void *arg) old_peerlink_rif); } -static void pim_vxlan_sg_peerlink_rif_update(struct hash_bucket *backet, +static void pim_vxlan_sg_peerlink_rif_update(struct hash_bucket *bucket, void *arg) { - pim_vxlan_orig_mr_iif_update(backet, NULL); - pim_vxlan_term_mr_cost_update(backet, arg); + pim_vxlan_orig_mr_iif_update(bucket, NULL); + pim_vxlan_term_mr_cost_update(bucket, arg); } static void pim_vxlan_set_peerlink_rif(struct pim_instance *pim, @@ -1032,10 +1032,10 @@ static void pim_vxlan_set_peerlink_rif(struct pim_instance *pim, } } -static void pim_vxlan_term_mr_oif_update(struct hash_bucket *backet, void *arg) +static void pim_vxlan_term_mr_oif_update(struct hash_bucket *bucket, void *arg) { struct interface *ifp = (struct interface *)arg; - struct pim_vxlan_sg *vxlan_sg = (struct pim_vxlan_sg *)backet->data; + struct pim_vxlan_sg *vxlan_sg = (struct pim_vxlan_sg *)bucket->data; if (pim_vxlan_is_orig_mroute(vxlan_sg)) return; diff --git a/pimd/test_igmpv3_join.c b/pimd/test_igmpv3_join.c index bf44f3c94a..3c26517e88 100644 --- a/pimd/test_igmpv3_join.c +++ b/pimd/test_igmpv3_join.c @@ -54,11 +54,6 @@ static int iface_solve_index(const char *ifname) } for (i = 0; ini[i].if_index; ++i) { -#if 0 - fprintf(stderr, - "%s: interface=%s matching against local ifname=%s ifindex=%d\n", - prog_name, ifname, ini[i].if_name, ini[i].if_index); -#endif if (!strcmp(ini[i].if_name, ifname)) { ifindex = ini[i].if_index; break; diff --git a/redhat/frr.spec.in b/redhat/frr.spec.in index 4ebc504b8a..02c272f47c 100644 --- a/redhat/frr.spec.in +++ b/redhat/frr.spec.in @@ -27,6 +27,7 @@ %{!?with_vrrpd: %global with_vrrpd 1 } %{!?with_rtadv: %global with_rtadv 1 } %{!?with_watchfrr: %global with_watchfrr 1 } +%{!?with_pathd: %global with_pathd 1 } # user and group %{!?frr_user: %global frr_user frr } @@ -87,7 +88,7 @@ %{!?frr_gid: %global frr_gid 92 } %{!?vty_gid: %global vty_gid 85 } -%define daemon_list zebra ripd ospfd bgpd isisd ripngd ospf6d pbrd staticd bfdd fabricd +%define daemon_list zebra ripd ospfd bgpd isisd ripngd ospf6d pbrd staticd bfdd fabricd pathd %if %{with_ldpd} %define daemon_ldpd ldpd @@ -143,7 +144,13 @@ %define daemon_bfdd "" %endif -%define all_daemons %{daemon_list} %{daemon_ldpd} %{daemon_pimd} %{daemon_nhrpd} %{daemon_eigrpd} %{daemon_babeld} %{daemon_watchfrr} %{daemon_pbrd} %{daemon_bfdd} %{daemon_vrrpd} +%if %{with_pathd} + %define daemon_pathd pathd +%else + %define daemon_pathd "" +%endif + +%define all_daemons %{daemon_list} %{daemon_ldpd} %{daemon_pimd} %{daemon_nhrpd} %{daemon_eigrpd} %{daemon_babeld} %{daemon_watchfrr} %{daemon_pbrd} %{daemon_bfdd} %{daemon_vrrpd} %{daemon_pathd} #release sub-revision (the two digits after the CONFDATE) %{!?release_rev: %global release_rev 01 } @@ -398,6 +405,11 @@ routing state through standard SNMP MIBs. %else --disable-bfdd \ %endif +%if %{with_pathd} + --enable-pathd \ +%else + --disable-pathd \ +%endif --enable-snmp # end @@ -526,6 +538,9 @@ zebra_spec_add_service fabricd 2618/tcp "Fabricd vty" %if %{with_vrrpd} zebra_spec_add_service vrrpd 2619/tcp "VRRPd vty" %endif +%if %{with_pathd} + zebra_spec_add_service pathd 2620/tcp "Pathd vty" +%endif %if "%{initsystem}" == "systemd" for daemon in %all_daemons ; do @@ -681,6 +696,9 @@ fi %if %{with_bfdd} %{_sbindir}/bfdd %endif +%if %{with_pathd} + %{_sbindir}/pathd +%endif %{_libdir}/libfrr.so* %{_libdir}/libfrrcares* %{_libdir}/libfrrospf* @@ -798,6 +816,9 @@ sed -i 's/ -M rpki//' %{_sysconfdir}/frr/daemons - migrate route-maps to use northbound interface - plus countless bug fixes and other improvements +* Mon Jun 15 2020 Sascha Kattelmann <sascha@netdef.org> +- Add Pathd support + * Wed May 06 2020 David Lamparter <equinox@opensourcerouting.org> - 7.3.1 - upstream 7.3.1 diff --git a/ripd/rip_interface.c b/ripd/rip_interface.c index c601ab4047..89e7e5dc17 100644 --- a/ripd/rip_interface.c +++ b/ripd/rip_interface.c @@ -208,39 +208,6 @@ static void rip_request_interface(struct interface *ifp) rip_request_interface_send(ifp, RIPv2); } -#if 0 -/* Send RIP request to the neighbor. */ -static void -rip_request_neighbor (struct in_addr addr) -{ - struct sockaddr_in to; - - memset (&to, 0, sizeof(struct sockaddr_in)); - to.sin_port = htons (RIP_PORT_DEFAULT); - to.sin_addr = addr; - - rip_request_send (&to, NULL, rip->version_send, NULL); -} - -/* Request routes at all interfaces. */ -static void -rip_request_neighbor_all (void) -{ - struct route_node *rp; - - if (! rip) - return; - - if (IS_RIP_DEBUG_EVENT) - zlog_debug ("request to the all neighbor"); - - /* Send request to all neighbor. */ - for (rp = route_top (rip->neighbor); rp; rp = route_next (rp)) - if (rp->info) - rip_request_neighbor (rp->p.u.prefix4); -} -#endif - /* Multicast packet receive socket. */ static int rip_multicast_join(struct interface *ifp, int sock) { diff --git a/ripd/rip_snmp.c b/ripd/rip_snmp.c index be222c7a5f..4e6ed1400f 100644 --- a/ripd/rip_snmp.c +++ b/ripd/rip_snmp.c @@ -547,18 +547,7 @@ static uint8_t *rip2PeerTable(struct variable *v, oid name[], size_t *length, return (uint8_t *)&domain; case RIP2PEERLASTUPDATE: -#if 0 - /* We don't know the SNMP agent startup time. We have two choices here: - * - assume ripd startup time equals SNMP agent startup time - * - don't support this variable, at all - * Currently, we do the latter... - */ - *val_len = sizeof(time_t); - uptime = peer->uptime; /* now - snmp_agent_startup - peer->uptime */ - return (uint8_t *) &uptime; -#else return (uint8_t *)NULL; -#endif case RIP2PEERVERSION: *val_len = sizeof(int); diff --git a/ripd/ripd.c b/ripd/ripd.c index 82dd401f96..a276dedec8 100644 --- a/ripd/ripd.c +++ b/ripd/ripd.c @@ -2858,23 +2858,6 @@ void rip_event(struct rip *rip, enum rip_event event, int sock) } } -#if 0 -static void -rip_update_default_metric (void) -{ - struct route_node *np; - struct rip_info *rinfo = NULL; - struct list *list = NULL; - struct listnode *listnode = NULL; - - for (np = route_top (rip->table); np; np = route_next (np)) - if ((list = np->info) != NULL) - for (ALL_LIST_ELEMENTS_RO (list, listnode, rinfo)) - if (rinfo->type != ZEBRA_ROUTE_RIP && rinfo->type != ZEBRA_ROUTE_CONNECT) - rinfo->metric = rip->default_metric; -} -#endif - struct rip_distance *rip_distance_new(void) { return XCALLOC(MTYPE_RIP_DISTANCE, sizeof(struct rip_distance)); diff --git a/ripngd/ripngd.c b/ripngd/ripngd.c index 1ebdae43fb..a9f570598f 100644 --- a/ripngd/ripngd.c +++ b/ripngd/ripngd.c @@ -2217,136 +2217,6 @@ DEFUN (show_ipv6_ripng_status, return CMD_SUCCESS; } -#if 0 -/* RIPng update timer setup. */ -DEFUN (ripng_update_timer, - ripng_update_timer_cmd, - "update-timer SECOND", - "Set RIPng update timer in seconds\n" - "Seconds\n") -{ - unsigned long update; - char *endptr = NULL; - - update = strtoul (argv[0], &endptr, 10); - if (update == ULONG_MAX || *endptr != '\0') - { - vty_out (vty, "update timer value error\n"); - return CMD_WARNING_CONFIG_FAILED; - } - - ripng->update_time = update; - - ripng_event (RIPNG_UPDATE_EVENT, 0); - return CMD_SUCCESS; -} - -DEFUN (no_ripng_update_timer, - no_ripng_update_timer_cmd, - "no update-timer SECOND", - NO_STR - "Unset RIPng update timer in seconds\n" - "Seconds\n") -{ - ripng->update_time = RIPNG_UPDATE_TIMER_DEFAULT; - ripng_event (RIPNG_UPDATE_EVENT, 0); - return CMD_SUCCESS; -} - -/* RIPng timeout timer setup. */ -DEFUN (ripng_timeout_timer, - ripng_timeout_timer_cmd, - "timeout-timer SECOND", - "Set RIPng timeout timer in seconds\n" - "Seconds\n") -{ - unsigned long timeout; - char *endptr = NULL; - - timeout = strtoul (argv[0], &endptr, 10); - if (timeout == ULONG_MAX || *endptr != '\0') - { - vty_out (vty, "timeout timer value error\n"); - return CMD_WARNING_CONFIG_FAILED; - } - - ripng->timeout_time = timeout; - - return CMD_SUCCESS; -} - -DEFUN (no_ripng_timeout_timer, - no_ripng_timeout_timer_cmd, - "no timeout-timer SECOND", - NO_STR - "Unset RIPng timeout timer in seconds\n" - "Seconds\n") -{ - ripng->timeout_time = RIPNG_TIMEOUT_TIMER_DEFAULT; - return CMD_SUCCESS; -} - -/* RIPng garbage timer setup. */ -DEFUN (ripng_garbage_timer, - ripng_garbage_timer_cmd, - "garbage-timer SECOND", - "Set RIPng garbage timer in seconds\n" - "Seconds\n") -{ - unsigned long garbage; - char *endptr = NULL; - - garbage = strtoul (argv[0], &endptr, 10); - if (garbage == ULONG_MAX || *endptr != '\0') - { - vty_out (vty, "garbage timer value error\n"); - return CMD_WARNING_CONFIG_FAILED; - } - - ripng->garbage_time = garbage; - - return CMD_SUCCESS; -} - -DEFUN (no_ripng_garbage_timer, - no_ripng_garbage_timer_cmd, - "no garbage-timer SECOND", - NO_STR - "Unset RIPng garbage timer in seconds\n" - "Seconds\n") -{ - ripng->garbage_time = RIPNG_GARBAGE_TIMER_DEFAULT; - return CMD_SUCCESS; -} -#endif /* 0 */ - -#if 0 -DEFUN (show_ipv6_protocols, - show_ipv6_protocols_cmd, - "show ipv6 protocols", - SHOW_STR - IPV6_STR - "Routing protocol information\n") -{ - if (! ripng) - return CMD_SUCCESS; - - vty_out (vty, "Routing Protocol is \"ripng\"\n"); - - vty_out (vty, "Sending updates every %ld seconds, next due in %d seconds\n", - ripng->update_time, 0); - - vty_out (vty, "Timerout after %ld seconds, garbage correct %ld\n", - ripng->timeout_time, - ripng->garbage_time); - - vty_out (vty, "Outgoing update filter list for all interfaces is not set"); - vty_out (vty, "Incoming update filter list for all interfaces is not set"); - - return CMD_SUCCESS; -} -#endif - /* Update ECMP routes to zebra when ECMP is disabled. */ void ripng_ecmp_disable(struct ripng *ripng) { @@ -2847,16 +2717,6 @@ void ripng_init(void) install_default(RIPNG_NODE); -#if 0 - install_element (VIEW_NODE, &show_ipv6_protocols_cmd); - install_element (RIPNG_NODE, &ripng_update_timer_cmd); - install_element (RIPNG_NODE, &no_ripng_update_timer_cmd); - install_element (RIPNG_NODE, &ripng_timeout_timer_cmd); - install_element (RIPNG_NODE, &no_ripng_timeout_timer_cmd); - install_element (RIPNG_NODE, &ripng_garbage_timer_cmd); - install_element (RIPNG_NODE, &no_ripng_garbage_timer_cmd); -#endif /* 0 */ - ripng_if_init(); ripng_debug_init(); diff --git a/ripngd/ripngd.h b/ripngd/ripngd.h index a42c32ebb7..14ac29b3fe 100644 --- a/ripngd/ripngd.h +++ b/ripngd/ripngd.h @@ -230,35 +230,6 @@ struct ripng_info { struct agg_node *rp; }; -#ifdef notyet -#if 0 -/* RIPng tag structure. */ -struct ripng_tag -{ - /* Tag value. */ - uint16_t tag; - - /* Port. */ - uint16_t port; - - /* Multicast group. */ - struct in6_addr maddr; - - /* Table number. */ - int table; - - /* Distance. */ - int distance; - - /* Split horizon. */ - uint8_t split_horizon; - - /* Poison reverse. */ - uint8_t poison_reverse; -}; -#endif /* 0 */ -#endif /* not yet */ - typedef enum { RIPNG_NO_SPLIT_HORIZON = 0, RIPNG_SPLIT_HORIZON, @@ -294,13 +265,6 @@ struct ripng_interface { /* Route-map. */ struct route_map *routemap[RIPNG_FILTER_MAX]; -#ifdef notyet -#if 0 - /* RIPng tag configuration. */ - struct ripng_tag *rtag; -#endif /* 0 */ -#endif /* notyet */ - /* Default information originate. */ uint8_t default_originate; diff --git a/sharpd/sharp_zebra.c b/sharpd/sharp_zebra.c index 4445bc0132..fed732b843 100644 --- a/sharpd/sharp_zebra.c +++ b/sharpd/sharp_zebra.c @@ -539,6 +539,7 @@ void nhg_add(uint32_t id, const struct nexthop_group *nhg, struct zapi_nhg api_nhg = {}; struct zapi_nexthop *api_nh; struct nexthop *nh; + bool is_valid = true; api_nhg.id = id; for (ALL_NEXTHOPS_PTR(nhg, nh)) { @@ -549,12 +550,25 @@ void nhg_add(uint32_t id, const struct nexthop_group *nhg, break; } + /* Unresolved nexthops will lead to failure - only send + * nexthops that zebra will consider valid. + */ + if (nh->ifindex == 0) + continue; + api_nh = &api_nhg.nexthops[api_nhg.nexthop_num]; zapi_nexthop_from_nexthop(api_nh, nh); api_nhg.nexthop_num++; } + if (api_nhg.nexthop_num == 0) { + zlog_debug("%s: nhg %u not sent: no valid nexthops", + __func__, id); + is_valid = false; + goto done; + } + if (backup_nhg) { for (ALL_NEXTHOPS_PTR(backup_nhg, nh)) { if (api_nhg.backup_nexthop_num >= MULTIPATH_NUM) { @@ -563,6 +577,20 @@ void nhg_add(uint32_t id, const struct nexthop_group *nhg, __func__); break; } + + /* Unresolved nexthop: will be rejected by zebra. + * That causes a problem, since the primary nexthops + * rely on array indexing into the backup nexthops. If + * that array isn't valid, the backup indexes won't be + * valid. + */ + if (nh->ifindex == 0) { + zlog_debug("%s: nhg %u: invalid backup nexthop", + __func__, id); + is_valid = false; + break; + } + api_nh = &api_nhg.backup_nexthops [api_nhg.backup_nexthop_num]; @@ -571,7 +599,9 @@ void nhg_add(uint32_t id, const struct nexthop_group *nhg, } } - zclient_nhg_send(zclient, ZEBRA_NHG_ADD, &api_nhg); +done: + if (is_valid) + zclient_nhg_send(zclient, ZEBRA_NHG_ADD, &api_nhg); } void nhg_del(uint32_t id) diff --git a/snapcraft/snapcraft.yaml.in b/snapcraft/snapcraft.yaml.in index c0c31b5fd1..1836f34979 100644 --- a/snapcraft/snapcraft.yaml.in +++ b/snapcraft/snapcraft.yaml.in @@ -259,7 +259,7 @@ parts: - usr/lib/x86_64-linux-gnu/libssh.so* source: https://github.com/rtrlib/rtrlib.git source-type: git - source-tag: v0.6.3 + source-tag: v0.7.0 plugin: cmake configflags: - -DCMAKE_BUILD_TYPE=Release @@ -392,6 +392,6 @@ parts: passthrough: layout: - /usr/lib/x86_64-linux-gnu/libyang: - bind: $SNAP/usr/lib/x86_64-linux-gnu/libyang + /usr/lib/x86_64-linux-gnu/libyang1: + bind: $SNAP/usr/lib/x86_64-linux-gnu/libyang1 diff --git a/staticd/static_nb.c b/staticd/static_nb.c index 51704426f0..a2a14751cf 100644 --- a/staticd/static_nb.c +++ b/staticd/static_nb.c @@ -47,12 +47,6 @@ const struct frr_yang_module_info frr_staticd_info = { } }, { - .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-staticd:staticd/route-list/path-list/table-id", - .cbs = { - .modify = routing_control_plane_protocols_control_plane_protocol_staticd_route_list_path_list_table_id_modify, - } - }, - { .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-staticd:staticd/route-list/path-list/frr-nexthops/nexthop", .cbs = { .apply_finish = routing_control_plane_protocols_control_plane_protocol_staticd_route_list_path_list_frr_nexthops_nexthop_apply_finish, @@ -72,7 +66,6 @@ const struct frr_yang_module_info frr_staticd_info = { .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-staticd:staticd/route-list/path-list/frr-nexthops/nexthop/onlink", .cbs = { .modify = routing_control_plane_protocols_control_plane_protocol_staticd_route_list_path_list_frr_nexthops_nexthop_onlink_modify, - .destroy = routing_control_plane_protocols_control_plane_protocol_staticd_route_list_path_list_frr_nexthops_nexthop_onlink_destroy, } }, { @@ -132,12 +125,6 @@ const struct frr_yang_module_info frr_staticd_info = { } }, { - .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-staticd:staticd/route-list/src-list/path-list/table-id", - .cbs = { - .modify = routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_table_id_modify, - } - }, - { .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-staticd:staticd/route-list/src-list/path-list/frr-nexthops/nexthop", .cbs = { .apply_finish = routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_frr_nexthops_nexthop_apply_finish, @@ -157,7 +144,6 @@ const struct frr_yang_module_info frr_staticd_info = { .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-staticd:staticd/route-list/src-list/path-list/frr-nexthops/nexthop/onlink", .cbs = { .modify = routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_frr_nexthops_nexthop_onlink_modify, - .destroy = routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_frr_nexthops_nexthop_onlink_destroy, } }, { diff --git a/staticd/static_nb.h b/staticd/static_nb.h index d145c31f77..e85e1d0e9f 100644 --- a/staticd/static_nb.h +++ b/staticd/static_nb.h @@ -31,8 +31,6 @@ int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_pa struct nb_cb_destroy_args *args); int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_path_list_tag_modify( struct nb_cb_modify_args *args); -int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_path_list_table_id_modify( - struct nb_cb_modify_args *args); int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_path_list_frr_nexthops_nexthop_create( struct nb_cb_create_args *args); int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_path_list_frr_nexthops_nexthop_destroy( @@ -43,8 +41,6 @@ int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_pa struct nb_cb_destroy_args *args); int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_path_list_frr_nexthops_nexthop_onlink_modify( struct nb_cb_modify_args *args); -int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_path_list_frr_nexthops_nexthop_onlink_destroy( - struct nb_cb_destroy_args *args); int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_path_list_frr_nexthops_nexthop_color_modify( struct nb_cb_modify_args *args); int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_path_list_frr_nexthops_nexthop_color_destroy( @@ -75,8 +71,6 @@ int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_sr struct nb_cb_destroy_args *args); int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_tag_modify( struct nb_cb_modify_args *args); -int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_table_id_modify( - struct nb_cb_modify_args *args); int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_frr_nexthops_nexthop_create( struct nb_cb_create_args *args); int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_frr_nexthops_nexthop_destroy( @@ -87,8 +81,6 @@ int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_sr struct nb_cb_destroy_args *args); int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_frr_nexthops_nexthop_onlink_modify( struct nb_cb_modify_args *args); -int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_frr_nexthops_nexthop_onlink_destroy( - struct nb_cb_destroy_args *args); int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_frr_nexthops_nexthop_color_modify( struct nb_cb_modify_args *args); int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_frr_nexthops_nexthop_color_destroy( @@ -134,13 +126,11 @@ int routing_control_plane_protocols_name_validate( "/frr-routing:routing/control-plane-protocols/" \ "control-plane-protocol[type='%s'][name='%s'][vrf='%s']/" \ "frr-staticd:staticd/route-list[prefix='%s'][afi-safi='%s']/" \ - "path-list[distance='%u']" + "path-list[table-id='%u'][distance='%u']" #define FRR_STATIC_ROUTE_PATH_TAG_XPATH "/tag" -#define FRR_STATIC_ROUTE_PATH_TABLEID_XPATH "/table-id" - /* route-list/frr-nexthops */ #define FRR_STATIC_ROUTE_NH_KEY_XPATH \ "/frr-nexthops/" \ @@ -161,7 +151,7 @@ int routing_control_plane_protocols_name_validate( "/frr-routing:routing/control-plane-protocols/" \ "control-plane-protocol[type='%s'][name='%s'][vrf='%s']/" \ "frr-staticd:staticd/route-list[prefix='%s'][afi-safi='%s']/" \ - "src-list[src-prefix='%s']/path-list[distance='%u']" + "src-list[src-prefix='%s']/path-list[table-id='%u'][distance='%u']" /* route-list/frr-nexthops */ #define FRR_DEL_S_ROUTE_NH_KEY_XPATH \ diff --git a/staticd/static_nb_config.c b/staticd/static_nb_config.c index 6e59f50a00..bf669957bf 100644 --- a/staticd/static_nb_config.c +++ b/staticd/static_nb_config.c @@ -35,17 +35,40 @@ static int static_path_list_create(struct nb_cb_create_args *args) { struct route_node *rn; struct static_path *pn; + const struct lyd_node *vrf_dnode; + const char *vrf; uint8_t distance; + uint32_t table_id; switch (args->event) { case NB_EV_VALIDATE: + vrf_dnode = yang_dnode_get_parent(args->dnode, + "control-plane-protocol"); + vrf = yang_dnode_get_string(vrf_dnode, "./vrf"); + table_id = yang_dnode_get_uint32(args->dnode, "./table-id"); + + /* + * TableId is not applicable for VRF. Consider the case of + * l3mdev, there is one uint32_t space to work with. + * A l3mdev device points at a specific table that it + * relates to and a set of interfaces it belongs to. + */ + if (table_id && (strcmp(vrf, vrf_get_default_name()) != 0) + && !vrf_is_backend_netns()) { + snprintf( + args->errmsg, args->errmsg_len, + "%% table param only available when running on netns-based vrfs"); + return NB_ERR_VALIDATION; + } + break; case NB_EV_ABORT: case NB_EV_PREPARE: break; case NB_EV_APPLY: rn = nb_running_get_entry(args->dnode, NULL, true); distance = yang_dnode_get_uint8(args->dnode, "./distance"); - pn = static_add_path(rn, distance); + table_id = yang_dnode_get_uint32(args->dnode, "./table-id"); + pn = static_add_path(rn, table_id, distance); nb_running_set_entry(args->dnode, pn); } @@ -80,44 +103,6 @@ static void static_path_list_tag_modify(struct nb_cb_modify_args *args, static_install_path(rn, pn, info->safi, info->svrf); } -static int static_path_list_tableid_modify(struct nb_cb_modify_args *args, - const struct lyd_node *rn_dnode, - struct stable_info *info) -{ - struct static_path *pn; - struct route_node *rn; - uint32_t table_id; - const struct lyd_node *vrf_dnode; - const char *vrf; - - switch (args->event) { - case NB_EV_VALIDATE: - vrf_dnode = yang_dnode_get_parent(args->dnode, - "control-plane-protocol"); - vrf = yang_dnode_get_string(vrf_dnode, "./vrf"); - table_id = yang_dnode_get_uint32(args->dnode, NULL); - if (table_id && (strcmp(vrf, vrf_get_default_name()) != 0) - && !vrf_is_backend_netns()) { - snprintf(args->errmsg, args->errmsg_len, - "%% table param only available when running on netns-based vrfs"); - return NB_ERR_VALIDATION; - } - break; - case NB_EV_PREPARE: - case NB_EV_ABORT: - break; - case NB_EV_APPLY: - table_id = yang_dnode_get_uint32(args->dnode, NULL); - pn = nb_running_get_entry(args->dnode, NULL, true); - pn->table_id = table_id; - rn = nb_running_get_entry(rn_dnode, NULL, true); - static_install_path(rn, pn, info->safi, info->svrf); - break; - } - - return NB_OK; -} - static bool static_nexthop_create(struct nb_cb_create_args *args, const struct lyd_node *rn_dnode, struct stable_info *info) @@ -302,9 +287,27 @@ static int static_nexthop_mpls_label_modify(struct nb_cb_modify_args *args) static int static_nexthop_onlink_modify(struct nb_cb_modify_args *args) { struct static_nexthop *nh; + static_types nh_type; - nh = nb_running_get_entry(args->dnode, NULL, true); - nh->onlink = yang_dnode_get_bool(args->dnode, NULL); + switch (args->event) { + case NB_EV_VALIDATE: + nh_type = yang_dnode_get_enum(args->dnode, "../nh-type"); + if ((nh_type != STATIC_IPV4_GATEWAY_IFNAME) + && (nh_type != STATIC_IPV6_GATEWAY_IFNAME)) { + snprintf( + args->errmsg, args->errmsg_len, + "nexthop type is not the ipv4 or ipv6 interface type"); + return NB_ERR_VALIDATION; + } + break; + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + nh = nb_running_get_entry(args->dnode, NULL, true); + nh->onlink = yang_dnode_get_bool(args->dnode, NULL); + break; + } return NB_OK; } @@ -332,9 +335,25 @@ static int static_nexthop_color_destroy(struct nb_cb_destroy_args *args) static int static_nexthop_bh_type_modify(struct nb_cb_modify_args *args) { struct static_nexthop *nh; + static_types nh_type; - nh = nb_running_get_entry(args->dnode, NULL, true); - nh->bh_type = yang_dnode_get_enum(args->dnode, NULL); + switch (args->event) { + case NB_EV_VALIDATE: + nh_type = yang_dnode_get_enum(args->dnode, "../nh-type"); + if (nh_type != STATIC_BLACKHOLE) { + snprintf(args->errmsg, args->errmsg_len, + "nexthop type is not the blackhole type"); + return NB_ERR_VALIDATION; + } + break; + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + nh = nb_running_get_entry(args->dnode, NULL, true); + nh->bh_type = yang_dnode_get_enum(args->dnode, NULL); + break; + } return NB_OK; } @@ -580,38 +599,6 @@ int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_pa /* * XPath: - * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-staticd:staticd/route-list/path-list/table-id - */ -int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_path_list_table_id_modify( - struct nb_cb_modify_args *args) -{ - struct route_node *rn; - const struct lyd_node *rn_dnode; - struct stable_info *info; - - switch (args->event) { - case NB_EV_VALIDATE: - if (static_path_list_tableid_modify(args, NULL, NULL) != NB_OK) - return NB_ERR_VALIDATION; - break; - case NB_EV_PREPARE: - case NB_EV_ABORT: - break; - case NB_EV_APPLY: - rn_dnode = yang_dnode_get_parent(args->dnode, "route-list"); - rn = nb_running_get_entry(rn_dnode, NULL, true); - info = route_table_get_info(rn->table); - - if (static_path_list_tableid_modify(args, rn_dnode, info) - != NB_OK) - return NB_ERR_VALIDATION; - break; - } - return NB_OK; -} - -/* - * XPath: * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-staticd:staticd/route-list/path-list/frr-nexthops/nexthop */ int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_path_list_frr_nexthops_nexthop_create( @@ -636,7 +623,7 @@ int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_pa info = route_table_get_info(rn->table); if (static_nexthop_create(args, rn_dnode, info) != NB_OK) - return NB_ERR_VALIDATION; + return NB_ERR_INCONSISTENCY; break; } return NB_OK; @@ -673,17 +660,7 @@ int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_pa int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_path_list_frr_nexthops_nexthop_bh_type_modify( struct nb_cb_modify_args *args) { - switch (args->event) { - case NB_EV_VALIDATE: - case NB_EV_PREPARE: - case NB_EV_ABORT: - break; - case NB_EV_APPLY: - if (static_nexthop_bh_type_modify(args) != NB_OK) - return NB_ERR; - break; - } - return NB_OK; + return static_nexthop_bh_type_modify(args); } int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_path_list_frr_nexthops_nexthop_bh_type_destroy( @@ -709,33 +686,7 @@ int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_pa int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_path_list_frr_nexthops_nexthop_onlink_modify( struct nb_cb_modify_args *args) { - switch (args->event) { - case NB_EV_VALIDATE: - case NB_EV_PREPARE: - case NB_EV_ABORT: - break; - case NB_EV_APPLY: - if (static_nexthop_onlink_modify(args) != NB_OK) - return NB_ERR; - - break; - } - return NB_OK; -} -int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_path_list_frr_nexthops_nexthop_onlink_destroy( - struct nb_cb_destroy_args *args) -{ - /* onlink has a boolean type with default value, - * so no need to do any operations in destroy callback - */ - switch (args->event) { - case NB_EV_VALIDATE: - case NB_EV_PREPARE: - case NB_EV_ABORT: - case NB_EV_APPLY: - break; - } - return NB_OK; + return static_nexthop_onlink_modify(args); } /* @@ -1023,41 +974,6 @@ int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_sr /* * XPath: - * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-staticd:staticd/route-list/src-list/path-list/table-id - */ -int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_table_id_modify( - struct nb_cb_modify_args *args) -{ - struct route_node *rn; - const struct lyd_node *rn_dnode; - const struct lyd_node *src_dnode; - struct stable_info *info; - - switch (args->event) { - case NB_EV_VALIDATE: - if (static_path_list_tableid_modify(args, NULL, NULL) != NB_OK) - return NB_ERR_VALIDATION; - break; - case NB_EV_PREPARE: - case NB_EV_ABORT: - break; - case NB_EV_APPLY: - src_dnode = yang_dnode_get_parent(args->dnode, "src-list"); - rn_dnode = yang_dnode_get_parent(src_dnode, "route-list"); - rn = nb_running_get_entry(rn_dnode, NULL, true); - info = route_table_get_info(rn->table); - - if (static_path_list_tableid_modify(args, src_dnode, info) - != NB_OK) - return NB_ERR_VALIDATION; - - break; - } - return NB_OK; -} - -/* - * XPath: * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-staticd:staticd/route-list/src-list/path-list/frr-nexthops/nexthop */ int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_frr_nexthops_nexthop_create( @@ -1124,17 +1040,7 @@ int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_sr int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_frr_nexthops_nexthop_bh_type_modify( struct nb_cb_modify_args *args) { - switch (args->event) { - case NB_EV_VALIDATE: - case NB_EV_PREPARE: - case NB_EV_ABORT: - break; - case NB_EV_APPLY: - if (static_nexthop_bh_type_modify(args) != NB_OK) - return NB_ERR; - break; - } - return NB_OK; + return static_nexthop_bh_type_modify(args); } int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_frr_nexthops_nexthop_bh_type_destroy( @@ -1161,35 +1067,7 @@ int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_sr int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_frr_nexthops_nexthop_onlink_modify( struct nb_cb_modify_args *args) { - switch (args->event) { - case NB_EV_VALIDATE: - case NB_EV_PREPARE: - case NB_EV_ABORT: - break; - case NB_EV_APPLY: - if (static_nexthop_onlink_modify(args) != NB_OK) - return NB_ERR; - - break; - } - return NB_OK; -} - - -int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_path_list_frr_nexthops_nexthop_onlink_destroy( - struct nb_cb_destroy_args *args) -{ - /* onlink has a boolean type with default value, - * so no need to do any operations in destroy callback - */ - switch (args->event) { - case NB_EV_VALIDATE: - case NB_EV_PREPARE: - case NB_EV_ABORT: - case NB_EV_APPLY: - break; - } - return NB_OK; + return static_nexthop_onlink_modify(args); } /* diff --git a/staticd/static_routes.c b/staticd/static_routes.c index 05355c48fe..1c436a66b0 100644 --- a/staticd/static_routes.c +++ b/staticd/static_routes.c @@ -161,7 +161,8 @@ bool static_add_nexthop_validate(struct static_vrf *svrf, static_types type, return true; } -struct static_path *static_add_path(struct route_node *rn, uint8_t distance) +struct static_path *static_add_path(struct route_node *rn, uint32_t table_id, + uint8_t distance) { struct static_path *pn; struct static_route_info *si; @@ -172,6 +173,7 @@ struct static_path *static_add_path(struct route_node *rn, uint8_t distance) pn = XCALLOC(MTYPE_STATIC_PATH, sizeof(struct static_path)); pn->distance = distance; + pn->table_id = table_id; static_nexthop_list_init(&(pn->nexthop_list)); si = rn->info; diff --git a/staticd/static_routes.h b/staticd/static_routes.h index bd2cd78fd9..470afb605d 100644 --- a/staticd/static_routes.h +++ b/staticd/static_routes.h @@ -187,7 +187,7 @@ extern void static_del_route(struct route_node *rn, safi_t safi, struct static_vrf *svrf); extern struct static_path *static_add_path(struct route_node *rn, - uint8_t distance); + uint32_t table_id, uint8_t distance); extern void static_del_path(struct route_node *rn, struct static_path *pn, safi_t safi, struct static_vrf *svrf); diff --git a/staticd/static_vty.c b/staticd/static_vty.c index c3c453f42d..1488cc1775 100644 --- a/staticd/static_vty.c +++ b/staticd/static_vty.c @@ -68,7 +68,6 @@ static int static_route_leak(struct vty *vty, const char *svrf, char buf_src_prefix[PREFIX_STRLEN]; char buf_nh_type[PREFIX_STRLEN]; char buf_tag[PREFIX_STRLEN]; - char buf_tableid[PREFIX_STRLEN]; uint8_t label_stack_id = 0; const char *buf_gate_str; uint8_t distance = ZEBRA_STATIC_DISTANCE_DEFAULT; @@ -162,14 +161,14 @@ static int static_route_leak(struct vty *vty, const char *svrf, "frr-staticd:staticd", "staticd", svrf, buf_prefix, yang_afi_safi_value2identity(afi, safi), - buf_src_prefix, distance); + buf_src_prefix, table_id, distance); else snprintf(xpath_prefix, sizeof(xpath_prefix), FRR_STATIC_ROUTE_INFO_KEY_XPATH, "frr-staticd:staticd", "staticd", svrf, buf_prefix, yang_afi_safi_value2identity(afi, safi), - distance); + table_id, distance); nb_cli_enqueue_change(vty, xpath_prefix, NB_OP_CREATE, NULL); @@ -180,12 +179,6 @@ static int static_route_leak(struct vty *vty, const char *svrf, sizeof(ab_xpath)); nb_cli_enqueue_change(vty, ab_xpath, NB_OP_MODIFY, buf_tag); - /* Table-Id processing */ - snprintf(buf_tableid, sizeof(buf_tableid), "%u", table_id); - strlcpy(ab_xpath, xpath_prefix, sizeof(ab_xpath)); - strlcat(ab_xpath, FRR_STATIC_ROUTE_PATH_TABLEID_XPATH, - sizeof(ab_xpath)); - nb_cli_enqueue_change(vty, ab_xpath, NB_OP_MODIFY, buf_tableid); /* nexthop processing */ snprintf(ab_xpath, sizeof(ab_xpath), @@ -289,16 +282,16 @@ static int static_route_leak(struct vty *vty, const char *svrf, "frr-staticd:staticd", "staticd", svrf, buf_prefix, yang_afi_safi_value2identity(afi, safi), - buf_src_prefix, distance, buf_nh_type, nh_svrf, - buf_gate_str, ifname); + buf_src_prefix, table_id, distance, + buf_nh_type, nh_svrf, buf_gate_str, ifname); else snprintf(ab_xpath, sizeof(ab_xpath), FRR_DEL_S_ROUTE_NH_KEY_XPATH, "frr-staticd:staticd", "staticd", svrf, buf_prefix, yang_afi_safi_value2identity(afi, safi), - distance, buf_nh_type, nh_svrf, buf_gate_str, - ifname); + table_id, distance, buf_nh_type, nh_svrf, + buf_gate_str, ifname); dnode = yang_dnode_get(vty->candidate_config->dnode, ab_xpath); if (!dnode) diff --git a/tests/.gitignore b/tests/.gitignore index 5e809a81e6..ca20b0ecac 100644 --- a/tests/.gitignore +++ b/tests/.gitignore @@ -47,7 +47,9 @@ /lib/test_ttable /lib/test_typelist /lib/test_versioncmp +/lib/test_xref /lib/test_zlog /lib/test_zmq /ospf6d/test_lsdb /ospf6d/test_lsdb_clippy.c +/zebra/test_lm_plugin
\ No newline at end of file diff --git a/tests/bgpd/test_aspath.c b/tests/bgpd/test_aspath.c index 439891b559..936ffaaad5 100644 --- a/tests/bgpd/test_aspath.c +++ b/tests/bgpd/test_aspath.c @@ -1265,7 +1265,8 @@ int main(void) { int i = 0; qobj_init(); - bgp_master_init(thread_master_create(NULL), BGP_SOCKET_SNDBUF_SIZE); + bgp_master_init(thread_master_create(NULL), BGP_SOCKET_SNDBUF_SIZE, + list_new()); master = bm->master; bgp_option_set(BGP_OPT_NO_LISTEN); bgp_attr_init(); diff --git a/tests/bgpd/test_capability.c b/tests/bgpd/test_capability.c index 1b3f90434b..153b83897d 100644 --- a/tests/bgpd/test_capability.c +++ b/tests/bgpd/test_capability.c @@ -912,7 +912,7 @@ int main(void) qobj_init(); master = thread_master_create(NULL); - bgp_master_init(master, BGP_SOCKET_SNDBUF_SIZE); + bgp_master_init(master, BGP_SOCKET_SNDBUF_SIZE, list_new()); vrf_init(NULL, NULL, NULL, NULL, NULL); bgp_option_set(BGP_OPT_NO_LISTEN); diff --git a/tests/bgpd/test_mp_attr.c b/tests/bgpd/test_mp_attr.c index 7fabaad7fa..f510760913 100644 --- a/tests/bgpd/test_mp_attr.c +++ b/tests/bgpd/test_mp_attr.c @@ -1086,7 +1086,7 @@ int main(void) cmd_init(0); bgp_vty_init(); master = thread_master_create("test mp attr"); - bgp_master_init(master, BGP_SOCKET_SNDBUF_SIZE); + bgp_master_init(master, BGP_SOCKET_SNDBUF_SIZE, list_new()); vrf_init(NULL, NULL, NULL, NULL, NULL); bgp_option_set(BGP_OPT_NO_LISTEN); bgp_attr_init(); diff --git a/tests/bgpd/test_mpath.c b/tests/bgpd/test_mpath.c index 520c460f15..92efd4c3d6 100644 --- a/tests/bgpd/test_mpath.c +++ b/tests/bgpd/test_mpath.c @@ -393,7 +393,7 @@ static int global_test_init(void) qobj_init(); master = thread_master_create(NULL); zclient = zclient_new(master, &zclient_options_default); - bgp_master_init(master, BGP_SOCKET_SNDBUF_SIZE); + bgp_master_init(master, BGP_SOCKET_SNDBUF_SIZE, list_new()); vrf_init(NULL, NULL, NULL, NULL, NULL); bgp_option_set(BGP_OPT_NO_LISTEN); diff --git a/tests/bgpd/test_packet.c b/tests/bgpd/test_packet.c index d2c093fbea..db5918745d 100644 --- a/tests/bgpd/test_packet.c +++ b/tests/bgpd/test_packet.c @@ -59,7 +59,7 @@ int main(int argc, char *argv[]) qobj_init(); bgp_attr_init(); master = thread_master_create(NULL); - bgp_master_init(master, BGP_SOCKET_SNDBUF_SIZE); + bgp_master_init(master, BGP_SOCKET_SNDBUF_SIZE, list_new()); vrf_init(NULL, NULL, NULL, NULL, NULL); bgp_option_set(BGP_OPT_NO_LISTEN); diff --git a/tests/bgpd/test_peer_attr.c b/tests/bgpd/test_peer_attr.c index 0a15886c10..123d97bc97 100644 --- a/tests/bgpd/test_peer_attr.c +++ b/tests/bgpd/test_peer_attr.c @@ -1399,7 +1399,7 @@ static void bgp_startup(void) master = thread_master_create(NULL); yang_init(true); nb_init(master, bgpd_yang_modules, array_size(bgpd_yang_modules), false); - bgp_master_init(master, BGP_SOCKET_SNDBUF_SIZE); + bgp_master_init(master, BGP_SOCKET_SNDBUF_SIZE, list_new()); bgp_option_set(BGP_OPT_NO_LISTEN); vrf_init(NULL, NULL, NULL, NULL, NULL); frr_pthread_init(); diff --git a/tests/isisd/test_common.c b/tests/isisd/test_common.c index 5fa604c749..5b2028ffd4 100644 --- a/tests/isisd/test_common.c +++ b/tests/isisd/test_common.c @@ -69,6 +69,24 @@ test_find_adjacency(const struct isis_test_node *tnode, const char *hostname) return NULL; } +mpls_label_t test_topology_node_ldp_label(const struct isis_topology *topology, + struct in_addr router_id) +{ + for (size_t i = 0; topology->nodes[i].hostname[0]; i++) { + const struct isis_test_node *tnode = &topology->nodes[i]; + struct in_addr node_router_id; + + if (!tnode->router_id) + continue; + + (void)inet_pton(AF_INET, tnode->router_id, &node_router_id); + if (IPV4_ADDR_SAME(&router_id, &node_router_id)) + return (50000 + (i + 1) * 100); + } + + return MPLS_INVALID_LABEL; +} + static struct isis_lsp *lsp_add(struct lspdb_head *lspdb, struct isis_area *area, int level, const uint8_t *sysid, uint8_t pseudonode_id) diff --git a/tests/isisd/test_common.h b/tests/isisd/test_common.h index 6fd0d3813e..3359a893ac 100644 --- a/tests/isisd/test_common.h +++ b/tests/isisd/test_common.h @@ -70,6 +70,9 @@ test_topology_find_node(const struct isis_topology *topology, const char *hostname, uint8_t pseudonode_id); extern const struct isis_topology * test_topology_find(struct isis_topology *test_topologies, uint16_t number); +extern mpls_label_t +test_topology_node_ldp_label(const struct isis_topology *topology, + struct in_addr router_id); extern int test_topology_load(const struct isis_topology *topology, struct isis_area *area, struct lspdb_head lspdb[]); diff --git a/tests/isisd/test_isis_spf.c b/tests/isisd/test_isis_spf.c index 36ef93669b..e06944a037 100644 --- a/tests/isisd/test_isis_spf.c +++ b/tests/isisd/test_isis_spf.c @@ -31,6 +31,7 @@ #include "isisd/isisd.h" #include "isisd/isis_dynhn.h" #include "isisd/isis_misc.h" +#include "isisd/isis_route.h" #include "isisd/isis_spf.h" #include "isisd/isis_spf_private.h" @@ -40,6 +41,7 @@ enum test_type { TEST_SPF = 1, TEST_REVERSE_SPF, TEST_LFA, + TEST_RLFA, TEST_TI_LFA, }; @@ -105,6 +107,86 @@ static void test_run_lfa(struct vty *vty, const struct isis_topology *topology, isis_spftree_del(spftree_self); } +static void test_run_rlfa(struct vty *vty, const struct isis_topology *topology, + const struct isis_test_node *root, + struct isis_area *area, struct lspdb_head *lspdb, + int level, int tree, + struct lfa_protected_resource *protected_resource) +{ + struct isis_spftree *spftree_self; + struct isis_spftree *spftree_reverse; + struct isis_spftree *spftree_pc; + struct isis_spf_node *spf_node, *node; + struct rlfa *rlfa; + uint8_t flags; + + /* Run forward SPF in the root node. */ + flags = F_SPFTREE_NO_ADJACENCIES; + spftree_self = isis_spftree_new(area, lspdb, root->sysid, level, tree, + SPF_TYPE_FORWARD, flags); + isis_run_spf(spftree_self); + + /* Run reverse SPF in the root node. */ + spftree_reverse = isis_spf_reverse_run(spftree_self); + + /* Run forward SPF on all adjacent routers. */ + isis_spf_run_neighbors(spftree_self); + + /* Compute the local LFA repair paths. */ + isis_lfa_compute(area, NULL, spftree_self, protected_resource); + + /* Compute the remote LFA repair paths. */ + spftree_pc = isis_rlfa_compute(area, spftree_self, spftree_reverse, 0, + protected_resource); + + /* Print the extended P-space and Q-space. */ + vty_out(vty, "P-space (self):\n"); + RB_FOREACH (node, isis_spf_nodes, &spftree_pc->lfa.p_space) + vty_out(vty, " %s\n", print_sys_hostname(node->sysid)); + vty_out(vty, "\n"); + RB_FOREACH (spf_node, isis_spf_nodes, &spftree_self->adj_nodes) { + if (RB_EMPTY(isis_spf_nodes, &spf_node->lfa.p_space)) + continue; + vty_out(vty, "P-space (%s):\n", + print_sys_hostname(spf_node->sysid)); + RB_FOREACH (node, isis_spf_nodes, &spf_node->lfa.p_space) + vty_out(vty, " %s\n", print_sys_hostname(node->sysid)); + vty_out(vty, "\n"); + } + vty_out(vty, "Q-space:\n"); + RB_FOREACH (node, isis_spf_nodes, &spftree_pc->lfa.q_space) + vty_out(vty, " %s\n", print_sys_hostname(node->sysid)); + vty_out(vty, "\n"); + + /* Print the post-convergence SPT. */ + isis_print_spftree(vty, spftree_pc); + + /* + * Activate the computed RLFAs (if any) using artificial LDP labels for + * the PQ nodes. + */ + frr_each_safe (rlfa_tree, &spftree_self->lfa.remote.rlfas, rlfa) { + struct zapi_rlfa_response response = {}; + + response.pq_label = test_topology_node_ldp_label( + topology, rlfa->pq_address); + assert(response.pq_label != MPLS_INVALID_LABEL); + isis_rlfa_activate(spftree_self, rlfa, &response); + } + + /* Print the SPT and the corresponding main/backup routing tables. */ + isis_print_spftree(vty, spftree_self); + vty_out(vty, "Main:\n"); + isis_print_routes(vty, spftree_self, false, false); + vty_out(vty, "Backup:\n"); + isis_print_routes(vty, spftree_self, false, true); + + /* Cleanup everything. */ + isis_spftree_del(spftree_self); + isis_spftree_del(spftree_reverse); + isis_spftree_del(spftree_pc); +} + static void test_run_ti_lfa(struct vty *vty, const struct isis_topology *topology, const struct isis_test_node *root, @@ -242,6 +324,11 @@ static int test_run(struct vty *vty, const struct isis_topology *topology, &area->lspdb[level - 1], level, tree, &protected_resource); break; + case TEST_RLFA: + test_run_rlfa(vty, topology, root, area, + &area->lspdb[level - 1], level, + tree, &protected_resource); + break; case TEST_TI_LFA: test_run_ti_lfa(vty, topology, root, area, &area->lspdb[level - 1], level, @@ -266,6 +353,7 @@ DEFUN(test_isis, test_isis_cmd, spf\ |reverse-spf\ |lfa system-id WORD [pseudonode-id <1-255>]\ + |remote-lfa system-id WORD [pseudonode-id <1-255>]\ |ti-lfa system-id WORD [pseudonode-id <1-255>] [node-protection]\ >\ [display-lspdb] [<ipv4-only|ipv6-only>] [<level-1-only|level-2-only>]", @@ -282,6 +370,11 @@ DEFUN(test_isis, test_isis_cmd, "System ID\n" "Pseudonode-ID\n" "Pseudonode-ID\n" + "Remote LFA\n" + "System ID\n" + "System ID\n" + "Pseudonode-ID\n" + "Pseudonode-ID\n" "Topology Independent LFA\n" "System ID\n" "System ID\n" @@ -335,6 +428,14 @@ DEFUN(test_isis, test_isis_cmd, fail_pseudonode_id = strtoul(argv[idx + 1]->arg, NULL, 10); protection_type = LFA_LINK_PROTECTION; + } else if (argv_find(argv, argc, "remote-lfa", &idx)) { + test_type = TEST_RLFA; + + fail_sysid_str = argv[idx + 2]->arg; + if (argv_find(argv, argc, "pseudonode-id", &idx)) + fail_pseudonode_id = + strtoul(argv[idx + 1]->arg, NULL, 10); + protection_type = LFA_LINK_PROTECTION; } else if (argv_find(argv, argc, "ti-lfa", &idx)) { test_type = TEST_TI_LFA; diff --git a/tests/isisd/test_isis_spf.in b/tests/isisd/test_isis_spf.in index 93e18124e6..f8f65ffdf7 100644 --- a/tests/isisd/test_isis_spf.in +++ b/tests/isisd/test_isis_spf.in @@ -31,6 +31,18 @@ test isis topology 14 root rt1 lfa system-id rt1 pseudonode-id 1 test isis topology 14 root rt1 lfa system-id rt2 test isis topology 14 root rt5 lfa system-id rt4 +test isis topology 1 root rt1 remote-lfa system-id rt2 +test isis topology 2 root rt5 remote-lfa system-id rt1 pseudonode-id 1 +test isis topology 3 root rt5 remote-lfa system-id rt4 ipv4-only +test isis topology 3 root rt5 remote-lfa system-id rt3 ipv4-only +test isis topology 5 root rt1 remote-lfa system-id rt2 ipv4-only +test isis topology 6 root rt4 remote-lfa system-id rt3 ipv4-only +test isis topology 7 root rt11 remote-lfa system-id rt8 ipv4-only +test isis topology 7 root rt6 remote-lfa system-id rt5 ipv4-only +test isis topology 8 root rt2 remote-lfa system-id rt5 ipv4-only +test isis topology 11 root rt2 remote-lfa system-id rt4 +test isis topology 13 root rt1 remote-lfa system-id rt3 ipv4-only + test isis topology 1 root rt1 ti-lfa system-id rt2 test isis topology 2 root rt1 ti-lfa system-id rt3 test isis topology 2 root rt1 ti-lfa system-id rt1 pseudonode-id 1 diff --git a/tests/isisd/test_isis_spf.refout b/tests/isisd/test_isis_spf.refout index dced6fb103..024f7256e0 100644 --- a/tests/isisd/test_isis_spf.refout +++ b/tests/isisd/test_isis_spf.refout @@ -1807,6 +1807,1227 @@ IS-IS L1 IPv6 routing table: 2001:db8::4/128 60 - rt3 -
test#
+test# test isis topology 1 root rt1 remote-lfa system-id rt2
+P-space (self):
+ rt3
+ rt5
+
+P-space (rt3):
+ rt3
+ rt5
+ rt6
+
+Q-space:
+ rt2
+ rt4
+ rt6
+
+IS-IS paths to level-1 routers that speak IP
+Vertex Type Metric Next-Hop Interface Parent
+rt1
+10.0.255.1/32 IP internal 0 rt1(4)
+rt3 TE-IS 10 rt3 - rt1(4)
+rt5 TE-IS 20 rt3 - rt3(4)
+10.0.255.3/32 IP TE 20 rt3 - rt3(4)
+rt6 TE-IS 30 rt3 - rt5(4)
+10.0.255.5/32 IP TE 30 rt3 - rt5(4)
+rt4 TE-IS 40 rt3 - rt6(4)
+10.0.255.6/32 IP TE 40 rt3 - rt6(4)
+rt2 TE-IS 50 rt3 - rt4(4)
+10.0.255.4/32 IP TE 50 rt3 - rt4(4)
+10.0.255.2/32 IP TE 60 rt3 - rt2(4)
+
+IS-IS paths to level-1 routers that speak IP
+Vertex Type Metric Next-Hop Interface Parent
+rt1
+10.0.255.1/32 IP internal 0 rt1(4)
+rt2 TE-IS 10 rt2 - rt1(4)
+rt3 TE-IS 10 rt3 - rt1(4)
+rt4 TE-IS 20 rt2 - rt2(4)
+rt5 TE-IS 20 rt3 - rt3(4)
+10.0.255.2/32 IP TE 20 rt2 - rt2(4)
+10.0.255.3/32 IP TE 20 rt3 - rt3(4)
+rt6 TE-IS 30 rt2 - rt4(4)
+ rt3 - rt5(4)
+10.0.255.4/32 IP TE 30 rt2 - rt4(4)
+10.0.255.5/32 IP TE 30 rt3 - rt5(4)
+10.0.255.6/32 IP TE 40 rt2 - rt6(4)
+ rt3 -
+
+Main:
+IS-IS L1 IPv4 routing table:
+
+ Prefix Metric Interface Nexthop Label(s)
+ ----------------------------------------------------------
+ 10.0.255.1/32 0 - - -
+ 10.0.255.2/32 20 - rt2 implicit-null
+ 10.0.255.3/32 20 - rt3 implicit-null
+ 10.0.255.4/32 30 - rt2 16040
+ 10.0.255.5/32 30 - rt3 16050
+ 10.0.255.6/32 40 - rt2 16060
+ - rt3 16060
+
+Backup:
+IS-IS L1 IPv4 routing table:
+
+ Prefix Metric Interface Nexthop Label(s)
+ --------------------------------------------------------
+ 10.0.255.2/32 60 - rt3 50600/16020
+ 10.0.255.4/32 50 - rt3 50600/16040
+
+P-space (self):
+ rt3
+ rt5
+
+P-space (rt3):
+ rt3
+ rt5
+ rt6
+
+Q-space:
+ rt2
+ rt4
+ rt6
+
+IS-IS paths to level-1 routers that speak IPv6
+Vertex Type Metric Next-Hop Interface Parent
+rt1
+2001:db8::1/128 IP6 internal 0 rt1(4)
+rt3 TE-IS 10 rt3 - rt1(4)
+rt5 TE-IS 20 rt3 - rt3(4)
+2001:db8::3/128 IP6 internal 20 rt3 - rt3(4)
+rt6 TE-IS 30 rt3 - rt5(4)
+2001:db8::5/128 IP6 internal 30 rt3 - rt5(4)
+rt4 TE-IS 40 rt3 - rt6(4)
+2001:db8::6/128 IP6 internal 40 rt3 - rt6(4)
+rt2 TE-IS 50 rt3 - rt4(4)
+2001:db8::4/128 IP6 internal 50 rt3 - rt4(4)
+2001:db8::2/128 IP6 internal 60 rt3 - rt2(4)
+
+IS-IS paths to level-1 routers that speak IPv6
+Vertex Type Metric Next-Hop Interface Parent
+rt1
+2001:db8::1/128 IP6 internal 0 rt1(4)
+rt2 TE-IS 10 rt2 - rt1(4)
+rt3 TE-IS 10 rt3 - rt1(4)
+rt4 TE-IS 20 rt2 - rt2(4)
+rt5 TE-IS 20 rt3 - rt3(4)
+2001:db8::2/128 IP6 internal 20 rt2 - rt2(4)
+2001:db8::3/128 IP6 internal 20 rt3 - rt3(4)
+rt6 TE-IS 30 rt2 - rt4(4)
+ rt3 - rt5(4)
+2001:db8::4/128 IP6 internal 30 rt2 - rt4(4)
+2001:db8::5/128 IP6 internal 30 rt3 - rt5(4)
+2001:db8::6/128 IP6 internal 40 rt2 - rt6(4)
+ rt3 -
+
+Main:
+IS-IS L1 IPv6 routing table:
+
+ Prefix Metric Interface Nexthop Label(s)
+ ------------------------------------------------------------
+ 2001:db8::1/128 0 - - -
+ 2001:db8::2/128 20 - rt2 implicit-null
+ 2001:db8::3/128 20 - rt3 implicit-null
+ 2001:db8::4/128 30 - rt2 16041
+ 2001:db8::5/128 30 - rt3 16051
+ 2001:db8::6/128 40 - rt2 16061
+ - rt3 16061
+
+Backup:
+IS-IS L1 IPv6 routing table:
+
+ Prefix Metric Interface Nexthop Label(s)
+ ----------------------------------------------------------
+ 2001:db8::2/128 60 - rt3 50600/16021
+ 2001:db8::4/128 50 - rt3 50600/16041
+
+test# test isis topology 2 root rt5 remote-lfa system-id rt1 pseudonode-id 1
+P-space (self):
+ rt6
+
+P-space (rt3):
+ rt1
+ rt2
+ rt3
+ rt4
+
+P-space (rt6):
+ rt4
+ rt6
+
+Q-space:
+ rt1
+ rt2
+ rt3
+ rt4
+ rt6
+
+IS-IS paths to level-1 routers that speak IP
+Vertex Type Metric Next-Hop Interface Parent
+rt5
+10.0.255.5/32 IP internal 0 rt5(4)
+rt6 TE-IS 10 rt6 - rt5(4)
+rt4 TE-IS 20 rt6 - rt6(4)
+10.0.255.6/32 IP TE 20 rt6 - rt6(4)
+rt1 pseudo_TE-IS 30 rt6 - rt4(4)
+rt1 TE-IS 30 rt6 - rt1(2)
+10.0.255.4/32 IP TE 30 rt6 - rt4(4)
+rt3 TE-IS 40 rt3 - rt5(4)
+10.0.255.1/32 IP TE 40 rt6 - rt1(4)
+rt2 TE-IS 45 rt6 - rt1(4)
+10.0.255.3/32 IP TE 50 rt3 - rt3(4)
+10.0.255.2/32 IP TE 55 rt6 - rt2(4)
+
+IS-IS paths to level-1 routers that speak IP
+Vertex Type Metric Next-Hop Interface Parent
+rt5
+10.0.255.5/32 IP internal 0 rt5(4)
+rt1 TE-IS 10 rt1 - rt5(4)
+rt4 TE-IS 10 rt4 - rt5(4)
+rt6 TE-IS 10 rt6 - rt5(4)
+rt1 pseudo_TE-IS 20 rt1 - rt1(4)
+ rt4 - rt4(4)
+10.0.255.1/32 IP TE 20 rt1 - rt1(4)
+10.0.255.4/32 IP TE 20 rt4 - rt4(4)
+10.0.255.6/32 IP TE 20 rt6 - rt6(4)
+rt2 TE-IS 25 rt1 - rt1(4)
+10.0.255.2/32 IP TE 35 rt1 - rt2(4)
+rt3 TE-IS 40 rt3 - rt5(4)
+ rt1 - rt1(4)
+10.0.255.3/32 IP TE 50 rt3 - rt3(4)
+ rt1 -
+
+Main:
+IS-IS L1 IPv4 routing table:
+
+ Prefix Metric Interface Nexthop Label(s)
+ ----------------------------------------------------------
+ 10.0.255.1/32 20 - rt1 implicit-null
+ 10.0.255.2/32 35 - rt1 16020
+ 10.0.255.3/32 50 - rt3 implicit-null
+ - rt1 implicit-null
+ 10.0.255.4/32 20 - rt4 implicit-null
+ 10.0.255.5/32 0 - - -
+ 10.0.255.6/32 20 - rt6 implicit-null
+
+Backup:
+IS-IS L1 IPv4 routing table:
+
+ Prefix Metric Interface Nexthop Label(s)
+ --------------------------------------------------------
+ 10.0.255.1/32 40 - rt6 50400/16010
+ 10.0.255.2/32 55 - rt6 50400/16020
+ 10.0.255.4/32 30 - rt6 50400/16040
+
+P-space (self):
+ rt6
+
+P-space (rt3):
+ rt1
+ rt2
+ rt3
+ rt4
+
+P-space (rt6):
+ rt4
+ rt6
+
+Q-space:
+ rt1
+ rt2
+ rt3
+ rt4
+ rt6
+
+IS-IS paths to level-1 routers that speak IPv6
+Vertex Type Metric Next-Hop Interface Parent
+rt5
+2001:db8::5/128 IP6 internal 0 rt5(4)
+rt6 TE-IS 10 rt6 - rt5(4)
+rt4 TE-IS 20 rt6 - rt6(4)
+2001:db8::6/128 IP6 internal 20 rt6 - rt6(4)
+rt1 pseudo_TE-IS 30 rt6 - rt4(4)
+rt1 TE-IS 30 rt6 - rt1(2)
+2001:db8::4/128 IP6 internal 30 rt6 - rt4(4)
+rt3 TE-IS 40 rt3 - rt5(4)
+2001:db8::1/128 IP6 internal 40 rt6 - rt1(4)
+rt2 TE-IS 45 rt6 - rt1(4)
+2001:db8::3/128 IP6 internal 50 rt3 - rt3(4)
+2001:db8::2/128 IP6 internal 55 rt6 - rt2(4)
+
+IS-IS paths to level-1 routers that speak IPv6
+Vertex Type Metric Next-Hop Interface Parent
+rt5
+2001:db8::5/128 IP6 internal 0 rt5(4)
+rt1 TE-IS 10 rt1 - rt5(4)
+rt4 TE-IS 10 rt4 - rt5(4)
+rt6 TE-IS 10 rt6 - rt5(4)
+rt1 pseudo_TE-IS 20 rt1 - rt1(4)
+ rt4 - rt4(4)
+2001:db8::1/128 IP6 internal 20 rt1 - rt1(4)
+2001:db8::4/128 IP6 internal 20 rt4 - rt4(4)
+2001:db8::6/128 IP6 internal 20 rt6 - rt6(4)
+rt2 TE-IS 25 rt1 - rt1(4)
+2001:db8::2/128 IP6 internal 35 rt1 - rt2(4)
+rt3 TE-IS 40 rt3 - rt5(4)
+ rt1 - rt1(4)
+2001:db8::3/128 IP6 internal 50 rt3 - rt3(4)
+ rt1 -
+
+Main:
+IS-IS L1 IPv6 routing table:
+
+ Prefix Metric Interface Nexthop Label(s)
+ ------------------------------------------------------------
+ 2001:db8::1/128 20 - rt1 implicit-null
+ 2001:db8::2/128 35 - rt1 16021
+ 2001:db8::3/128 50 - rt3 implicit-null
+ - rt1 implicit-null
+ 2001:db8::4/128 20 - rt4 implicit-null
+ 2001:db8::5/128 0 - - -
+ 2001:db8::6/128 20 - rt6 implicit-null
+
+Backup:
+IS-IS L1 IPv6 routing table:
+
+ Prefix Metric Interface Nexthop Label(s)
+ ----------------------------------------------------------
+ 2001:db8::1/128 40 - rt6 50400/16011
+ 2001:db8::2/128 55 - rt6 50400/16021
+ 2001:db8::4/128 30 - rt6 50400/16041
+
+test# test isis topology 3 root rt5 remote-lfa system-id rt4 ipv4-only
+P-space (self):
+ rt6
+
+P-space (rt3):
+ rt1
+ rt2
+ rt3
+ rt4
+ rt6
+
+P-space (rt6):
+ rt1
+ rt2
+ rt3
+ rt4
+ rt6
+
+Q-space:
+ rt1
+ rt2
+ rt3
+ rt4
+ rt6
+
+IS-IS paths to level-1 routers that speak IP
+Vertex Type Metric Next-Hop Interface Parent
+rt5
+10.0.255.5/32 IP internal 0 rt5(4)
+rt6 TE-IS 10 rt6 - rt5(4)
+rt4 TE-IS 20 rt6 - rt6(4)
+10.0.255.6/32 IP TE 20 rt6 - rt6(4)
+rt3 TE-IS 30 rt3 - rt5(4)
+rt2 TE-IS 30 rt6 - rt4(4)
+10.0.255.4/32 IP TE 30 rt6 - rt4(4)
+rt1 TE-IS 40 rt3 - rt3(4)
+ rt6 - rt2(4)
+10.0.255.3/32 IP TE 40 rt3 - rt3(4)
+10.0.255.2/32 IP TE 40 rt6 - rt2(4)
+10.0.255.1/32 IP TE 50 rt3 - rt1(4)
+ rt6 -
+
+IS-IS paths to level-1 routers that speak IP
+Vertex Type Metric Next-Hop Interface Parent
+rt5
+10.0.255.5/32 IP internal 0 rt5(4)
+rt4 TE-IS 10 rt4 - rt5(4)
+rt6 TE-IS 10 rt6 - rt5(4)
+rt2 TE-IS 20 rt4 - rt4(4)
+10.0.255.4/32 IP TE 20 rt4 - rt4(4)
+10.0.255.6/32 IP TE 20 rt6 - rt6(4)
+rt3 TE-IS 30 rt3 - rt5(4)
+ rt4 - rt2(4)
+rt1 TE-IS 30 rt4 - rt2(4)
+10.0.255.2/32 IP TE 30 rt4 - rt2(4)
+10.0.255.3/32 IP TE 40 rt3 - rt3(4)
+ rt4 -
+10.0.255.1/32 IP TE 40 rt4 - rt1(4)
+
+Main:
+IS-IS L1 IPv4 routing table:
+
+ Prefix Metric Interface Nexthop Label(s)
+ ----------------------------------------------------------
+ 10.0.255.1/32 40 - rt4 16010
+ 10.0.255.2/32 30 - rt4 16020
+ 10.0.255.3/32 40 - rt3 implicit-null
+ - rt4 implicit-null
+ 10.0.255.4/32 20 - rt4 implicit-null
+ 10.0.255.5/32 0 - - -
+ 10.0.255.6/32 20 - rt6 implicit-null
+
+Backup:
+IS-IS L1 IPv4 routing table:
+
+ Prefix Metric Interface Nexthop Label(s)
+ -----------------------------------------------------
+ 10.0.255.1/32 40 - rt3 16010
+ - rt6 16010
+ 10.0.255.2/32 30 - rt3 16020
+ - rt6 16020
+ 10.0.255.4/32 20 - rt3 16040
+ - rt6 16040
+
+test# test isis topology 3 root rt5 remote-lfa system-id rt3 ipv4-only
+P-space (self):
+ rt1
+ rt2
+ rt4
+ rt6
+
+P-space (rt4):
+ rt1
+ rt2
+ rt3
+ rt4
+ rt6
+
+P-space (rt6):
+ rt1
+ rt2
+ rt3
+ rt4
+ rt6
+
+Q-space:
+ rt1
+ rt2
+ rt3
+ rt4
+ rt6
+
+IS-IS paths to level-1 routers that speak IP
+Vertex Type Metric Next-Hop Interface Parent
+rt5
+10.0.255.5/32 IP internal 0 rt5(4)
+rt4 TE-IS 10 rt4 - rt5(4)
+rt6 TE-IS 10 rt6 - rt5(4)
+rt2 TE-IS 20 rt4 - rt4(4)
+10.0.255.4/32 IP TE 20 rt4 - rt4(4)
+10.0.255.6/32 IP TE 20 rt6 - rt6(4)
+rt1 TE-IS 30 rt4 - rt2(4)
+rt3 TE-IS 30 rt4 - rt2(4)
+10.0.255.2/32 IP TE 30 rt4 - rt2(4)
+10.0.255.1/32 IP TE 40 rt4 - rt1(4)
+10.0.255.3/32 IP TE 40 rt4 - rt3(4)
+
+IS-IS paths to level-1 routers that speak IP
+Vertex Type Metric Next-Hop Interface Parent
+rt5
+10.0.255.5/32 IP internal 0 rt5(4)
+rt4 TE-IS 10 rt4 - rt5(4)
+rt6 TE-IS 10 rt6 - rt5(4)
+rt2 TE-IS 20 rt4 - rt4(4)
+10.0.255.4/32 IP TE 20 rt4 - rt4(4)
+10.0.255.6/32 IP TE 20 rt6 - rt6(4)
+rt3 TE-IS 30 rt3 - rt5(4)
+ rt4 - rt2(4)
+rt1 TE-IS 30 rt4 - rt2(4)
+10.0.255.2/32 IP TE 30 rt4 - rt2(4)
+10.0.255.3/32 IP TE 40 rt3 - rt3(4)
+ rt4 -
+10.0.255.1/32 IP TE 40 rt4 - rt1(4)
+
+Main:
+IS-IS L1 IPv4 routing table:
+
+ Prefix Metric Interface Nexthop Label(s)
+ ----------------------------------------------------------
+ 10.0.255.1/32 40 - rt4 16010
+ 10.0.255.2/32 30 - rt4 16020
+ 10.0.255.3/32 40 - rt3 implicit-null
+ - rt4 implicit-null
+ 10.0.255.4/32 20 - rt4 implicit-null
+ 10.0.255.5/32 0 - - -
+ 10.0.255.6/32 20 - rt6 implicit-null
+
+Backup:
+IS-IS L1 IPv4 routing table:
+
+test# test isis topology 5 root rt1 remote-lfa system-id rt2 ipv4-only
+P-space (self):
+ rt3
+ rt5
+ rt7
+
+P-space (rt3):
+ rt3
+ rt5
+ rt7
+ rt8
+
+Q-space:
+ rt2
+ rt4
+ rt6
+ rt8
+
+IS-IS paths to level-1 routers that speak IP
+Vertex Type Metric Next-Hop Interface Parent
+rt1
+10.0.255.1/32 IP internal 0 rt1(4)
+rt3 TE-IS 10 rt3 - rt1(4)
+rt5 TE-IS 20 rt3 - rt3(4)
+10.0.255.3/32 IP TE 20 rt3 - rt3(4)
+rt7 TE-IS 30 rt3 - rt5(4)
+10.0.255.5/32 IP TE 30 rt3 - rt5(4)
+rt8 TE-IS 40 rt3 - rt7(4)
+10.0.255.7/32 IP TE 40 rt3 - rt7(4)
+rt6 TE-IS 50 rt3 - rt8(4)
+10.0.255.8/32 IP TE 50 rt3 - rt8(4)
+rt4 TE-IS 60 rt3 - rt6(4)
+10.0.255.6/32 IP TE 60 rt3 - rt6(4)
+rt2 TE-IS 70 rt3 - rt4(4)
+10.0.255.4/32 IP TE 70 rt3 - rt4(4)
+10.0.255.2/32 IP TE 80 rt3 - rt2(4)
+
+IS-IS paths to level-1 routers that speak IP
+Vertex Type Metric Next-Hop Interface Parent
+rt1
+10.0.255.1/32 IP internal 0 rt1(4)
+rt2 TE-IS 10 rt2 - rt1(4)
+rt3 TE-IS 10 rt3 - rt1(4)
+rt4 TE-IS 20 rt2 - rt2(4)
+rt5 TE-IS 20 rt3 - rt3(4)
+10.0.255.2/32 IP TE 20 rt2 - rt2(4)
+10.0.255.3/32 IP TE 20 rt3 - rt3(4)
+rt6 TE-IS 30 rt2 - rt4(4)
+rt7 TE-IS 30 rt3 - rt5(4)
+10.0.255.4/32 IP TE 30 rt2 - rt4(4)
+10.0.255.5/32 IP TE 30 rt3 - rt5(4)
+rt8 TE-IS 40 rt2 - rt6(4)
+ rt3 - rt7(4)
+10.0.255.6/32 IP TE 40 rt2 - rt6(4)
+10.0.255.7/32 IP TE 40 rt3 - rt7(4)
+10.0.255.8/32 IP TE 50 rt2 - rt8(4)
+ rt3 -
+
+Main:
+IS-IS L1 IPv4 routing table:
+
+ Prefix Metric Interface Nexthop Label(s)
+ ----------------------------------------------------------
+ 10.0.255.1/32 0 - - -
+ 10.0.255.2/32 20 - rt2 implicit-null
+ 10.0.255.3/32 20 - rt3 implicit-null
+ 10.0.255.4/32 30 - rt2 16040
+ 10.0.255.5/32 30 - rt3 16050
+ 10.0.255.6/32 40 - rt2 16060
+ 10.0.255.7/32 40 - rt3 16070
+ 10.0.255.8/32 50 - rt2 16080
+ - rt3 16080
+
+Backup:
+IS-IS L1 IPv4 routing table:
+
+ Prefix Metric Interface Nexthop Label(s)
+ --------------------------------------------------------
+ 10.0.255.2/32 80 - rt3 50800/16020
+ 10.0.255.4/32 70 - rt3 50800/16040
+ 10.0.255.6/32 60 - rt3 50800/16060
+
+test# test isis topology 6 root rt4 remote-lfa system-id rt3 ipv4-only
+P-space (self):
+ rt2
+ rt5
+ rt6
+ rt7
+ rt8
+
+P-space (rt2):
+ rt1
+ rt2
+
+P-space (rt6):
+ rt5
+ rt6
+ rt7
+ rt8
+
+Q-space:
+ rt1
+ rt3
+
+IS-IS paths to level-1 routers that speak IP
+Vertex Type Metric Next-Hop Interface Parent
+rt4
+10.0.255.4/32 IP internal 0 rt4(4)
+rt2 TE-IS 10 rt2 - rt4(4)
+rt6 TE-IS 10 rt6 - rt4(4)
+rt1 TE-IS 20 rt2 - rt2(4)
+rt5 TE-IS 20 rt6 - rt6(4)
+rt8 TE-IS 20 rt6 - rt6(4)
+10.0.255.2/32 IP TE 20 rt2 - rt2(4)
+10.0.255.6/32 IP TE 20 rt6 - rt6(4)
+rt3 TE-IS 30 rt2 - rt1(4)
+rt7 TE-IS 30 rt6 - rt5(4)
+ rt8(4)
+10.0.255.1/32 IP TE 30 rt2 - rt1(4)
+10.0.255.5/32 IP TE 30 rt6 - rt5(4)
+10.0.255.8/32 IP TE 30 rt6 - rt8(4)
+10.0.255.3/32 IP TE 40 rt2 - rt3(4)
+10.0.255.7/32 IP TE 40 rt6 - rt7(4)
+
+IS-IS paths to level-1 routers that speak IP
+Vertex Type Metric Next-Hop Interface Parent
+rt4
+10.0.255.4/32 IP internal 0 rt4(4)
+rt2 TE-IS 10 rt2 - rt4(4)
+rt3 TE-IS 10 rt3 - rt4(4)
+rt6 TE-IS 10 rt6 - rt4(4)
+rt1 TE-IS 20 rt2 - rt2(4)
+ rt3 - rt3(4)
+rt5 TE-IS 20 rt6 - rt6(4)
+rt8 TE-IS 20 rt6 - rt6(4)
+10.0.255.2/32 IP TE 20 rt2 - rt2(4)
+10.0.255.3/32 IP TE 20 rt3 - rt3(4)
+10.0.255.6/32 IP TE 20 rt6 - rt6(4)
+rt7 TE-IS 30 rt6 - rt5(4)
+ rt8(4)
+10.0.255.1/32 IP TE 30 rt2 - rt1(4)
+ rt3 -
+10.0.255.5/32 IP TE 30 rt6 - rt5(4)
+10.0.255.8/32 IP TE 30 rt6 - rt8(4)
+10.0.255.7/32 IP TE 40 rt6 - rt7(4)
+
+Main:
+IS-IS L1 IPv4 routing table:
+
+ Prefix Metric Interface Nexthop Label(s)
+ ----------------------------------------------------------
+ 10.0.255.1/32 30 - rt2 16010
+ - rt3 16010
+ 10.0.255.2/32 20 - rt2 implicit-null
+ 10.0.255.3/32 20 - rt3 implicit-null
+ 10.0.255.4/32 0 - - -
+ 10.0.255.5/32 30 - rt6 16050
+ 10.0.255.6/32 20 - rt6 implicit-null
+ 10.0.255.7/32 40 - rt6 16070
+ 10.0.255.8/32 30 - rt6 16080
+
+Backup:
+IS-IS L1 IPv4 routing table:
+
+ Prefix Metric Interface Nexthop Label(s)
+ --------------------------------------------------------
+ 10.0.255.3/32 40 - rt2 50100/16030
+
+test# test isis topology 7 root rt11 remote-lfa system-id rt8 ipv4-only
+P-space (self):
+ rt10
+ rt12
+
+P-space (rt10):
+ rt1
+ rt4
+ rt7
+ rt10
+
+P-space (rt12):
+ rt9
+ rt12
+
+Q-space:
+ rt1
+ rt2
+ rt3
+ rt4
+ rt5
+ rt6
+ rt7
+ rt8
+ rt9
+
+IS-IS paths to level-1 routers that speak IP
+Vertex Type Metric Next-Hop Interface Parent
+rt11
+10.0.255.11/32 IP internal 0 rt11(4)
+rt10 TE-IS 10 rt10 - rt11(4)
+rt12 TE-IS 10 rt12 - rt11(4)
+rt9 TE-IS 20 rt12 - rt12(4)
+10.0.255.10/32 IP TE 20 rt10 - rt10(4)
+10.0.255.12/32 IP TE 20 rt12 - rt12(4)
+rt7 TE-IS 30 rt10 - rt10(4)
+rt8 TE-IS 30 rt12 - rt9(4)
+10.0.255.9/32 IP TE 30 rt12 - rt9(4)
+rt4 TE-IS 40 rt10 - rt7(4)
+rt5 TE-IS 40 rt12 - rt8(4)
+10.0.255.7/32 IP TE 40 rt10 - rt7(4)
+10.0.255.8/32 IP TE 40 rt12 - rt8(4)
+rt6 TE-IS 50 rt12 - rt9(4)
+ rt5(4)
+rt1 TE-IS 50 rt10 - rt4(4)
+rt2 TE-IS 50 rt12 - rt5(4)
+10.0.255.4/32 IP TE 50 rt10 - rt4(4)
+10.0.255.5/32 IP TE 50 rt12 - rt5(4)
+rt3 TE-IS 60 rt12 - rt6(4)
+ rt2(4)
+10.0.255.6/32 IP TE 60 rt12 - rt6(4)
+10.0.255.1/32 IP TE 60 rt10 - rt1(4)
+10.0.255.2/32 IP TE 60 rt12 - rt2(4)
+10.0.255.3/32 IP TE 70 rt12 - rt3(4)
+
+IS-IS paths to level-1 routers that speak IP
+Vertex Type Metric Next-Hop Interface Parent
+rt11
+10.0.255.11/32 IP internal 0 rt11(4)
+rt8 TE-IS 10 rt8 - rt11(4)
+rt10 TE-IS 10 rt10 - rt11(4)
+rt12 TE-IS 10 rt12 - rt11(4)
+rt5 TE-IS 20 rt8 - rt8(4)
+rt7 TE-IS 20 rt8 - rt8(4)
+rt9 TE-IS 20 rt8 - rt8(4)
+ rt12 - rt12(4)
+10.0.255.8/32 IP TE 20 rt8 - rt8(4)
+10.0.255.10/32 IP TE 20 rt10 - rt10(4)
+10.0.255.12/32 IP TE 20 rt12 - rt12(4)
+rt2 TE-IS 30 rt8 - rt5(4)
+rt4 TE-IS 30 rt8 - rt5(4)
+ rt7(4)
+rt6 TE-IS 30 rt8 - rt5(4)
+10.0.255.5/32 IP TE 30 rt8 - rt5(4)
+10.0.255.7/32 IP TE 30 rt8 - rt7(4)
+10.0.255.9/32 IP TE 30 rt8 - rt9(4)
+ rt12 -
+rt3 TE-IS 40 rt8 - rt2(4)
+ rt6(4)
+rt1 TE-IS 40 rt8 - rt4(4)
+10.0.255.2/32 IP TE 40 rt8 - rt2(4)
+10.0.255.4/32 IP TE 40 rt8 - rt4(4)
+10.0.255.6/32 IP TE 40 rt8 - rt6(4)
+10.0.255.3/32 IP TE 50 rt8 - rt3(4)
+10.0.255.1/32 IP TE 50 rt8 - rt1(4)
+
+Main:
+IS-IS L1 IPv4 routing table:
+
+ Prefix Metric Interface Nexthop Label(s)
+ -----------------------------------------------------------
+ 10.0.255.1/32 50 - rt8 16010
+ 10.0.255.2/32 40 - rt8 16020
+ 10.0.255.3/32 50 - rt8 16030
+ 10.0.255.4/32 40 - rt8 16040
+ 10.0.255.5/32 30 - rt8 16050
+ 10.0.255.6/32 40 - rt8 16060
+ 10.0.255.7/32 30 - rt8 16070
+ 10.0.255.8/32 20 - rt8 implicit-null
+ 10.0.255.9/32 30 - rt8 16090
+ - rt12 16090
+ 10.0.255.10/32 20 - rt10 implicit-null
+ 10.0.255.11/32 0 - - -
+ 10.0.255.12/32 20 - rt12 implicit-null
+
+Backup:
+IS-IS L1 IPv4 routing table:
+
+ Prefix Metric Interface Nexthop Label(s)
+ --------------------------------------------------------
+ 10.0.255.1/32 50 - rt10 16010
+ 10.0.255.2/32 60 - rt12 50900/16020
+ 10.0.255.3/32 70 - rt12 50900/16030
+ 10.0.255.4/32 40 - rt10 16040
+ 10.0.255.5/32 50 - rt12 50900/16050
+ 10.0.255.6/32 60 - rt12 50900/16060
+ 10.0.255.7/32 30 - rt10 16070
+ 10.0.255.8/32 40 - rt12 50900/16080
+
+test# test isis topology 7 root rt6 remote-lfa system-id rt5 ipv4-only
+P-space (self):
+ rt3
+
+P-space (rt3):
+ rt2
+ rt3
+
+P-space (rt9):
+ rt1
+ rt2
+ rt4
+ rt5
+ rt7
+ rt8
+ rt9
+ rt10
+ rt11
+ rt12
+
+Q-space:
+ rt1
+ rt2
+ rt4
+ rt5
+ rt7
+ rt8
+ rt9
+ rt10
+ rt11
+ rt12
+
+IS-IS paths to level-1 routers that speak IP
+Vertex Type Metric Next-Hop Interface Parent
+rt6
+10.0.255.6/32 IP internal 0 rt6(4)
+rt3 TE-IS 10 rt3 - rt6(4)
+rt2 TE-IS 20 rt3 - rt3(4)
+10.0.255.3/32 IP TE 20 rt3 - rt3(4)
+rt9 TE-IS 30 rt9 - rt6(4)
+rt5 TE-IS 30 rt3 - rt2(4)
+10.0.255.2/32 IP TE 30 rt3 - rt2(4)
+rt8 TE-IS 40 rt9 - rt9(4)
+ rt3 - rt5(4)
+rt12 TE-IS 40 rt9 - rt9(4)
+rt4 TE-IS 40 rt3 - rt5(4)
+10.0.255.9/32 IP TE 40 rt9 - rt9(4)
+10.0.255.5/32 IP TE 40 rt3 - rt5(4)
+rt7 TE-IS 50 rt9 - rt8(4)
+ rt3 - rt4(4)
+rt11 TE-IS 50 rt9 - rt8(4)
+ rt3 - rt12(4)
+rt1 TE-IS 50 rt3 - rt4(4)
+10.0.255.8/32 IP TE 50 rt9 - rt8(4)
+ rt3 -
+10.0.255.12/32 IP TE 50 rt9 - rt12(4)
+10.0.255.4/32 IP TE 50 rt3 - rt4(4)
+rt10 TE-IS 60 rt9 - rt11(4)
+ rt3 -
+10.0.255.7/32 IP TE 60 rt9 - rt7(4)
+ rt3 -
+10.0.255.11/32 IP TE 60 rt9 - rt11(4)
+ rt3 -
+10.0.255.1/32 IP TE 60 rt3 - rt1(4)
+10.0.255.10/32 IP TE 70 rt9 - rt10(4)
+ rt3 -
+
+IS-IS paths to level-1 routers that speak IP
+Vertex Type Metric Next-Hop Interface Parent
+rt6
+10.0.255.6/32 IP internal 0 rt6(4)
+rt3 TE-IS 10 rt3 - rt6(4)
+rt5 TE-IS 10 rt5 - rt6(4)
+rt2 TE-IS 20 rt3 - rt3(4)
+ rt5 - rt5(4)
+rt4 TE-IS 20 rt5 - rt5(4)
+rt8 TE-IS 20 rt5 - rt5(4)
+10.0.255.3/32 IP TE 20 rt3 - rt3(4)
+10.0.255.5/32 IP TE 20 rt5 - rt5(4)
+rt9 TE-IS 30 rt9 - rt6(4)
+ rt5 - rt8(4)
+rt1 TE-IS 30 rt5 - rt4(4)
+rt7 TE-IS 30 rt5 - rt4(4)
+ rt8(4)
+rt11 TE-IS 30 rt5 - rt8(4)
+10.0.255.2/32 IP TE 30 rt3 - rt2(4)
+ rt5 -
+10.0.255.4/32 IP TE 30 rt5 - rt4(4)
+10.0.255.8/32 IP TE 30 rt5 - rt8(4)
+rt12 TE-IS 40 rt9 - rt9(4)
+ rt5 - rt11(4)
+rt10 TE-IS 40 rt5 - rt11(4)
+10.0.255.9/32 IP TE 40 rt9 - rt9(4)
+ rt5 -
+10.0.255.1/32 IP TE 40 rt5 - rt1(4)
+10.0.255.7/32 IP TE 40 rt5 - rt7(4)
+10.0.255.11/32 IP TE 40 rt5 - rt11(4)
+10.0.255.12/32 IP TE 50 rt9 - rt12(4)
+ rt5 -
+10.0.255.10/32 IP TE 50 rt5 - rt10(4)
+
+Main:
+IS-IS L1 IPv4 routing table:
+
+ Prefix Metric Interface Nexthop Label(s)
+ -----------------------------------------------------------
+ 10.0.255.1/32 40 - rt5 16010
+ 10.0.255.2/32 30 - rt3 16020
+ - rt5 16020
+ 10.0.255.3/32 20 - rt3 implicit-null
+ 10.0.255.4/32 30 - rt5 16040
+ 10.0.255.5/32 20 - rt5 implicit-null
+ 10.0.255.6/32 0 - - -
+ 10.0.255.7/32 40 - rt5 16070
+ 10.0.255.8/32 30 - rt5 16080
+ 10.0.255.9/32 40 - rt9 implicit-null
+ - rt5 implicit-null
+ 10.0.255.10/32 50 - rt5 16100
+ 10.0.255.11/32 40 - rt5 16110
+ 10.0.255.12/32 50 - rt9 16120
+ - rt5 16120
+
+Backup:
+IS-IS L1 IPv4 routing table:
+
+ Prefix Metric Interface Nexthop Label(s)
+ ------------------------------------------------------
+ 10.0.255.1/32 70 - rt9 16010
+ 10.0.255.4/32 60 - rt9 16040
+ 10.0.255.5/32 50 - rt9 16050
+ 10.0.255.7/32 50 - rt9 16070
+ 10.0.255.8/32 40 - rt9 16080
+ 10.0.255.10/32 60 - rt9 16100
+ 10.0.255.11/32 50 - rt9 16110
+
+test# test isis topology 8 root rt2 remote-lfa system-id rt5 ipv4-only
+P-space (self):
+ rt1
+ rt3
+ rt4
+ rt7
+ rt10
+
+P-space (rt1):
+ rt1
+ rt4
+ rt7
+ rt10
+
+P-space (rt3):
+ rt3
+ rt6
+
+Q-space:
+ rt5
+ rt6
+ rt8
+ rt9
+ rt11
+ rt12
+
+IS-IS paths to level-1 routers that speak IP
+Vertex Type Metric Next-Hop Interface Parent
+rt2
+10.0.255.2/32 IP internal 0 rt2(4)
+rt1 TE-IS 10 rt1 - rt2(4)
+rt3 TE-IS 10 rt3 - rt2(4)
+rt4 TE-IS 20 rt1 - rt1(4)
+rt6 TE-IS 20 rt3 - rt3(4)
+10.0.255.1/32 IP TE 20 rt1 - rt1(4)
+10.0.255.3/32 IP TE 20 rt3 - rt3(4)
+rt7 TE-IS 30 rt1 - rt4(4)
+rt5 TE-IS 30 rt3 - rt6(4)
+10.0.255.4/32 IP TE 30 rt1 - rt4(4)
+10.0.255.6/32 IP TE 30 rt3 - rt6(4)
+rt10 TE-IS 40 rt1 - rt7(4)
+rt8 TE-IS 40 rt3 - rt5(4)
+10.0.255.7/32 IP TE 40 rt1 - rt7(4)
+10.0.255.5/32 IP TE 40 rt3 - rt5(4)
+rt9 TE-IS 50 rt3 - rt8(4)
+rt11 TE-IS 50 rt3 - rt8(4)
+10.0.255.10/32 IP TE 50 rt1 - rt10(4)
+10.0.255.8/32 IP TE 50 rt3 - rt8(4)
+rt12 TE-IS 60 rt3 - rt9(4)
+ rt11(4)
+10.0.255.9/32 IP TE 60 rt3 - rt9(4)
+10.0.255.11/32 IP TE 60 rt3 - rt11(4)
+10.0.255.12/32 IP TE 70 rt3 - rt12(4)
+
+IS-IS paths to level-1 routers that speak IP
+Vertex Type Metric Next-Hop Interface Parent
+rt2
+10.0.255.2/32 IP internal 0 rt2(4)
+rt1 TE-IS 10 rt1 - rt2(4)
+rt3 TE-IS 10 rt3 - rt2(4)
+rt5 TE-IS 10 rt5 - rt2(4)
+rt4 TE-IS 20 rt1 - rt1(4)
+rt6 TE-IS 20 rt3 - rt3(4)
+ rt5 - rt5(4)
+rt8 TE-IS 20 rt5 - rt5(4)
+10.0.255.1/32 IP TE 20 rt1 - rt1(4)
+10.0.255.3/32 IP TE 20 rt3 - rt3(4)
+10.0.255.5/32 IP TE 20 rt5 - rt5(4)
+rt7 TE-IS 30 rt1 - rt4(4)
+rt9 TE-IS 30 rt5 - rt8(4)
+rt11 TE-IS 30 rt5 - rt8(4)
+10.0.255.4/32 IP TE 30 rt1 - rt4(4)
+10.0.255.6/32 IP TE 30 rt3 - rt6(4)
+ rt5 -
+10.0.255.8/32 IP TE 30 rt5 - rt8(4)
+rt10 TE-IS 40 rt1 - rt7(4)
+rt12 TE-IS 40 rt5 - rt9(4)
+ rt11(4)
+10.0.255.7/32 IP TE 40 rt1 - rt7(4)
+10.0.255.9/32 IP TE 40 rt5 - rt9(4)
+10.0.255.11/32 IP TE 40 rt5 - rt11(4)
+10.0.255.10/32 IP TE 50 rt1 - rt10(4)
+10.0.255.12/32 IP TE 50 rt5 - rt12(4)
+
+Main:
+IS-IS L1 IPv4 routing table:
+
+ Prefix Metric Interface Nexthop Label(s)
+ -----------------------------------------------------------
+ 10.0.255.1/32 20 - rt1 implicit-null
+ 10.0.255.2/32 0 - - -
+ 10.0.255.3/32 20 - rt3 implicit-null
+ 10.0.255.4/32 30 - rt1 16040
+ 10.0.255.5/32 20 - rt5 implicit-null
+ 10.0.255.6/32 30 - rt3 16060
+ - rt5 16060
+ 10.0.255.7/32 40 - rt1 16070
+ 10.0.255.8/32 30 - rt5 16080
+ 10.0.255.9/32 40 - rt5 16090
+ 10.0.255.10/32 50 - rt1 16100
+ 10.0.255.11/32 40 - rt5 16110
+ 10.0.255.12/32 50 - rt5 16120
+
+Backup:
+IS-IS L1 IPv4 routing table:
+
+ Prefix Metric Interface Nexthop Label(s)
+ ---------------------------------------------------------
+ 10.0.255.5/32 40 - rt3 50600/16050
+ 10.0.255.8/32 50 - rt3 50600/16080
+ 10.0.255.9/32 60 - rt3 50600/16090
+ 10.0.255.11/32 60 - rt3 50600/16110
+ 10.0.255.12/32 70 - rt3 50600/16120
+
+test# test isis topology 11 root rt2 remote-lfa system-id rt4
+P-space (self):
+
+P-space (rt1):
+ rt1
+ rt3
+ rt5
+
+P-space (rt3):
+ rt1
+ rt3
+ rt5
+ rt6
+
+Q-space:
+ rt1
+ rt3
+ rt4
+ rt5
+ rt6
+
+IS-IS paths to level-1 routers that speak IP
+Vertex Type Metric Next-Hop Interface Parent
+rt2
+10.0.255.2/32 IP internal 0 rt2(4)
+rt1 TE-IS 50 rt1 - rt2(4)
+rt3 TE-IS 50 rt3 - rt2(4)
+rt2
+rt5 TE-IS 60 rt3 - rt3(4)
+10.0.255.1/32 IP TE 60 rt1 - rt1(4)
+10.0.255.3/32 IP TE 60 rt3 - rt3(4)
+rt4 TE-IS 70 rt3 - rt5(4)
+rt6 TE-IS 70 rt3 - rt5(4)
+10.0.255.5/32 IP TE 70 rt3 - rt5(4)
+10.0.255.4/32 IP TE 80 rt3 - rt4(4)
+10.0.255.6/32 IP TE 80 rt3 - rt6(4)
+
+IS-IS paths to level-1 routers that speak IP
+Vertex Type Metric Next-Hop Interface Parent
+rt2
+10.0.255.2/32 IP internal 0 rt2(4)
+rt4 TE-IS 10 rt4 - rt2(4)
+rt5 TE-IS 20 rt4 - rt4(4)
+rt6 TE-IS 20 rt4 - rt4(4)
+10.0.255.4/32 IP TE 20 rt4 - rt4(4)
+rt3 TE-IS 30 rt4 - rt5(4)
+10.0.255.5/32 IP TE 30 rt4 - rt5(4)
+10.0.255.6/32 IP TE 30 rt4 - rt6(4)
+rt2
+rt1 TE-IS 40 rt4 - rt2(2)
+10.0.255.3/32 IP TE 40 rt4 - rt3(4)
+10.0.255.1/32 IP TE 50 rt4 - rt1(4)
+
+Main:
+IS-IS L1 IPv4 routing table:
+
+ Prefix Metric Interface Nexthop Label(s)
+ ----------------------------------------------------------
+ 10.0.255.1/32 50 - rt4 16010
+ 10.0.255.2/32 0 - - -
+ 10.0.255.3/32 40 - rt4 16030
+ 10.0.255.4/32 20 - rt4 implicit-null
+ 10.0.255.5/32 30 - rt4 16050
+ 10.0.255.6/32 30 - rt4 16060
+
+Backup:
+IS-IS L1 IPv4 routing table:
+
+ Prefix Metric Interface Nexthop Label(s)
+ ----------------------------------------------------------
+ 10.0.255.1/32 50 - rt1 implicit-null
+ - rt3 16010
+ 10.0.255.3/32 50 - rt1 16030
+ - rt3 implicit-null
+ 10.0.255.4/32 80 - rt3 50500/16040
+ 10.0.255.5/32 60 - rt1 16050
+ - rt3 16050
+ 10.0.255.6/32 70 - rt3 16060
+
+P-space (self):
+
+P-space (rt1):
+ rt1
+ rt3
+ rt5
+
+P-space (rt3):
+ rt1
+ rt3
+ rt5
+ rt6
+
+Q-space:
+ rt1
+ rt3
+ rt4
+ rt5
+ rt6
+
+IS-IS paths to level-1 routers that speak IPv6
+Vertex Type Metric Next-Hop Interface Parent
+rt2
+2001:db8::2/128 IP6 internal 0 rt2(4)
+rt1 TE-IS 50 rt1 - rt2(4)
+rt3 TE-IS 50 rt3 - rt2(4)
+rt2
+rt5 TE-IS 60 rt3 - rt3(4)
+2001:db8::1/128 IP6 internal 60 rt1 - rt1(4)
+2001:db8::3/128 IP6 internal 60 rt3 - rt3(4)
+rt4 TE-IS 70 rt3 - rt5(4)
+rt6 TE-IS 70 rt3 - rt5(4)
+2001:db8::5/128 IP6 internal 70 rt3 - rt5(4)
+2001:db8::4/128 IP6 internal 80 rt3 - rt4(4)
+2001:db8::6/128 IP6 internal 80 rt3 - rt6(4)
+
+IS-IS paths to level-1 routers that speak IPv6
+Vertex Type Metric Next-Hop Interface Parent
+rt2
+2001:db8::2/128 IP6 internal 0 rt2(4)
+rt4 TE-IS 10 rt4 - rt2(4)
+rt5 TE-IS 20 rt4 - rt4(4)
+rt6 TE-IS 20 rt4 - rt4(4)
+2001:db8::4/128 IP6 internal 20 rt4 - rt4(4)
+rt3 TE-IS 30 rt4 - rt5(4)
+2001:db8::5/128 IP6 internal 30 rt4 - rt5(4)
+2001:db8::6/128 IP6 internal 30 rt4 - rt6(4)
+rt2
+rt1 TE-IS 40 rt4 - rt2(2)
+2001:db8::3/128 IP6 internal 40 rt4 - rt3(4)
+2001:db8::1/128 IP6 internal 50 rt4 - rt1(4)
+
+Main:
+IS-IS L1 IPv6 routing table:
+
+ Prefix Metric Interface Nexthop Label(s)
+ ------------------------------------------------------------
+ 2001:db8::1/128 50 - rt4 16011
+ 2001:db8::2/128 0 - - -
+ 2001:db8::3/128 40 - rt4 16031
+ 2001:db8::4/128 20 - rt4 implicit-null
+ 2001:db8::5/128 30 - rt4 16051
+ 2001:db8::6/128 30 - rt4 16061
+
+Backup:
+IS-IS L1 IPv6 routing table:
+
+ Prefix Metric Interface Nexthop Label(s)
+ ------------------------------------------------------------
+ 2001:db8::1/128 50 - rt1 implicit-null
+ - rt3 16011
+ 2001:db8::3/128 50 - rt1 16031
+ - rt3 implicit-null
+ 2001:db8::4/128 80 - rt3 50500/16041
+ 2001:db8::5/128 60 - rt1 16051
+ - rt3 16051
+ 2001:db8::6/128 70 - rt3 16061
+
+test# test isis topology 13 root rt1 remote-lfa system-id rt3 ipv4-only
+P-space (self):
+ rt2
+
+P-space (rt2):
+ rt2
+ rt4
+
+Q-space:
+ rt3
+ rt4
+ rt5
+ rt6
+ rt7
+
+IS-IS paths to level-1 routers that speak IP
+Vertex Type Metric Next-Hop Interface Parent
+rt1
+10.0.255.1/32 IP internal 0 rt1(4)
+rt2 TE-IS 10 rt2 - rt1(4)
+rt4 TE-IS 20 rt2 - rt2(4)
+10.0.255.2/32 IP TE 20 rt2 - rt2(4)
+rt3 TE-IS 30 rt2 - rt4(4)
+10.0.255.4/32 IP TE 30 rt2 - rt4(4)
+rt5 TE-IS 40 rt2 - rt3(4)
+rt6 TE-IS 40 rt2 - rt3(4)
+10.0.255.3/32 IP TE 40 rt2 - rt3(4)
+rt7 TE-IS 50 rt2 - rt5(4)
+ rt6(4)
+10.0.255.5/32 IP TE 50 rt2 - rt5(4)
+10.0.255.6/32 IP TE 50 rt2 - rt6(4)
+10.0.255.7/32 IP TE 60 rt2 - rt7(4)
+
+IS-IS paths to level-1 routers that speak IP
+Vertex Type Metric Next-Hop Interface Parent
+rt1
+10.0.255.1/32 IP internal 0 rt1(4)
+rt2 TE-IS 10 rt2 - rt1(4)
+rt3 TE-IS 10 rt3 - rt1(4)
+rt4 TE-IS 20 rt2 - rt2(4)
+ rt3 - rt3(4)
+rt5 TE-IS 20 rt3 - rt3(4)
+rt6 TE-IS 20 rt3 - rt3(4)
+10.0.255.2/32 IP TE 20 rt2 - rt2(4)
+10.0.255.3/32 IP TE 20 rt3 - rt3(4)
+rt7 TE-IS 30 rt3 - rt5(4)
+ rt6(4)
+10.0.255.4/32 IP TE 30 rt2 - rt4(4)
+ rt3 -
+10.0.255.5/32 IP TE 30 rt3 - rt5(4)
+10.0.255.6/32 IP TE 30 rt3 - rt6(4)
+10.0.255.7/32 IP TE 40 rt3 - rt7(4)
+
+Main:
+IS-IS L1 IPv4 routing table:
+
+ Prefix Metric Interface Nexthop Label(s)
+ ----------------------------------------------------------
+ 10.0.255.1/32 0 - - -
+ 10.0.255.2/32 20 - rt2 implicit-null
+ 10.0.255.3/32 20 - rt3 implicit-null
+ 10.0.255.4/32 30 - rt2 16040
+ - rt3 16040
+ 10.0.255.5/32 30 - rt3 16050
+ 10.0.255.6/32 30 - rt3 16060
+ 10.0.255.7/32 40 - rt3 16070
+
+Backup:
+IS-IS L1 IPv4 routing table:
+
+ Prefix Metric Interface Nexthop Label(s)
+ --------------------------------------------------------
+ 10.0.255.3/32 40 - rt2 50400/16030
+ 10.0.255.5/32 50 - rt2 50400/16050
+ 10.0.255.6/32 50 - rt2 50400/16060
+ 10.0.255.7/32 60 - rt2 50400/16070
+
+test#
test# test isis topology 1 root rt1 ti-lfa system-id rt2
P-space (self):
rt3
diff --git a/tests/lib/test_xref.c b/tests/lib/test_xref.c new file mode 100644 index 0000000000..700950de1f --- /dev/null +++ b/tests/lib/test_xref.c @@ -0,0 +1,140 @@ +/* + * xref tests + * Copyright (C) 2020 David Lamparter for NetDEF, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; see the file COPYING; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include <zebra.h> +#include "xref.h" +#include "log.h" + +/* + * "lib/test_xref.c" (only 1 directory component included) + * "logging call" + * 0x00000003 (network byte order - LOG_ERR) + * 0x00000000 (network byte order - EC / zero here) + * + * note there are no '\0' terminators included for the strings + * + * SHA256 + * => 71a65ce6e81517f642c8f55fb2af6f181f7df54357913b5b577aa61a663fdd4c + * & 0f -> 0x01 'H' + * & f001 -> 0x07 '7' + * & 3e -> 0x13 'K' + * & c007 -> 0x12 'J' + * & f8 -> 0x0b 'B' + * etc. + * (for reference: base32ch[] = "0123456789ABCDEFGHJKMNPQRSTVWXYZ") + * + * (bits are consumed starting with the lowest bit, and the first character + * only consumes 4 bits and has the 5th bit at 1) + */ + +static const char *expect_uid = "H7KJB-67TBH"; +static bool test_logcall(void) +{ + zlog_err("logging call"); + + return true; +} + +static void check_xref(const struct xref *xref, bool *found, bool *error) +{ + const char *file = xref->file, *p; + + p = strrchr(file, '/'); + if (p) + file = p + 1; + + if (strcmp(file, "test_xref.c")) + return; + if (xref->type != XREFT_LOGMSG) + return; + if (strcmp(xref->func, "test_logcall")) + return; + + printf("xref: %s:%d %s() type=%d uid=%s\n", + xref->file, xref->line, xref->func, xref->type, + xref->xrefdata ? xref->xrefdata->uid : "--"); + + if (*found) { + printf("duplicate xref!\n"); + *error = true; + } + + const struct xref_logmsg *logmsg; + + logmsg = container_of(xref, struct xref_logmsg, xref); + if (strcmp(logmsg->fmtstring, "logging call")) { + printf("log message mismatch!\n"); + *error = true; + } + if (logmsg->priority != LOG_ERR || logmsg->ec != 0) { + printf("metadata mismatch!\n"); + *error = true; + } + + *found = true; + + if (!xref->xrefdata) { + printf("no unique ID?\n"); + *error = true; + return; + } + + if (strcmp(xref->xrefdata->uid, expect_uid)) { + printf("unique ID mismatch, expected %s, got %s\n", + expect_uid, xref->xrefdata->uid); + *error = true; + } +} + +static bool test_lookup(void) +{ + struct xref_block *xb; + bool found = false, error = false; + + for (xb = xref_blocks; xb; xb = xb->next) { + const struct xref * const *xrefp; + + for (xrefp = xb->start; xrefp < xb->stop; xrefp++) { + const struct xref *xref = *xrefp; + + if (!xref) + continue; + + check_xref(xref, &found, &error); + } + } + return found && !error; +} + +bool (*tests[])(void) = { + test_lookup, + test_logcall, +}; + +XREF_SETUP() + +int main(int argc, char **argv) +{ + zlog_aux_init("NONE: ", ZLOG_DISABLED); + + for (unsigned int i = 0; i < array_size(tests); i++) + if (!tests[i]()) + return 1; + return 0; +} diff --git a/tests/lib/test_xref.py b/tests/lib/test_xref.py new file mode 100644 index 0000000000..8c3db3e182 --- /dev/null +++ b/tests/lib/test_xref.py @@ -0,0 +1,6 @@ +import frrtest + +class TestXref(frrtest.TestMultiOut): + program = './test_xref' + +TestXref.exit_cleanly() diff --git a/tests/ospfd/.gitignore b/tests/ospfd/.gitignore new file mode 100644 index 0000000000..c659b645db --- /dev/null +++ b/tests/ospfd/.gitignore @@ -0,0 +1,3 @@ +/*_afl/* +test_ospf_spf +core diff --git a/tests/ospfd/common.c b/tests/ospfd/common.c new file mode 100644 index 0000000000..eb30c4016e --- /dev/null +++ b/tests/ospfd/common.c @@ -0,0 +1,248 @@ +#include <zebra.h> + +#include "lib/stream.h" +#include "lib/vty.h" +#include "lib/mpls.h" +#include "lib/if.h" +#include "lib/table.h" + +#include "ospfd/ospfd.h" +#include "ospfd/ospf_route.h" +#include "ospfd/ospf_spf.h" +#include "ospfd/ospf_flood.h" +#include "ospfd/ospf_lsa.h" +#include "ospfd/ospf_lsdb.h" +#include "ospfd/ospf_interface.h" +#include "ospfd/ospf_sr.h" + +#include "common.h" + +struct thread_master *master; +struct zebra_privs_t ospfd_privs; + + +struct ospf_topology *test_find_topology(const char *name) +{ + if (strmatch(name, "topo1")) + return &topo1; + else if (strmatch(name, "topo2")) + return &topo2; + else if (strmatch(name, "topo3")) + return &topo3; + else if (strmatch(name, "topo4")) + return &topo4; + else if (strmatch(name, "topo5")) + return &topo5; + + return NULL; +} + +int sort_paths(const void **path1, const void **path2) +{ + const struct ospf_path *p1 = *path1; + const struct ospf_path *p2 = *path2; + + return (p1->nexthop.s_addr - p2->nexthop.s_addr); +} + +void print_route_table(struct vty *vty, struct route_table *rt) +{ + struct route_node *rn; + struct ospf_route * or ; + struct listnode *pnode; + struct ospf_path *path; + struct mpls_label_stack *label_stack; + char buf[MPLS_LABEL_STRLEN]; + + for (rn = route_top(rt); rn; rn = route_next(rn)) { + if ((or = rn->info) == NULL) + continue; + + vty_out(vty, "N %-18pFX %-15pI4 %d\n", &rn->p, + & or->u.std.area_id, or->cost); + + list_sort(or->paths, sort_paths); + + for (ALL_LIST_ELEMENTS_RO(or->paths, pnode, path)) { + if (path->nexthop.s_addr == 0) + continue; + + vty_out(vty, " -> %pI4 with adv router %pI4", + &path->nexthop, &path->adv_router); + + if (path->srni.backup_label_stack) { + label_stack = path->srni.backup_label_stack; + mpls_label2str(label_stack->num_labels, + label_stack->label, buf, + MPLS_LABEL_STRLEN, true); + vty_out(vty, " and backup path %s", buf); + } + vty_out(vty, "\n"); + } + } +} + +struct ospf_test_node *test_find_node(struct ospf_topology *topology, + const char *hostname) +{ + for (int i = 0; topology->nodes[i].hostname[0]; i++) + if (strmatch(hostname, topology->nodes[i].hostname)) + return &topology->nodes[i]; + + return NULL; +} + +static void inject_router_lsa(struct vty *vty, struct ospf *ospf, + struct ospf_topology *topology, + struct ospf_test_node *root, + struct ospf_test_node *tnode) +{ + struct ospf_area *area; + struct in_addr router_id; + struct in_addr adj_router_id; + struct prefix_ipv4 prefix; + struct in_addr data; + struct stream *s; + struct lsa_header *lsah; + struct ospf_lsa *new; + int length; + unsigned long putp; + uint16_t link_count; + struct ospf_test_node *tfound_adj_node; + struct ospf_test_adj *tadj; + bool is_self_lsa = false; + + area = ospf->backbone; + inet_aton(tnode->router_id, &router_id); + + if (strncmp(root->router_id, tnode->router_id, 256) == 0) + is_self_lsa = true; + + s = stream_new(OSPF_MAX_LSA_SIZE); + lsa_header_set(s, LSA_OPTIONS_GET(area) | LSA_OPTIONS_NSSA_GET(area), + OSPF_ROUTER_LSA, router_id, router_id); + + stream_putc(s, router_lsa_flags(area)); + stream_putc(s, 0); + + putp = stream_get_endp(s); + stream_putw(s, 0); + + for (link_count = 0; tnode->adjacencies[link_count].hostname[0]; + link_count++) { + tadj = &tnode->adjacencies[link_count]; + tfound_adj_node = test_find_node(topology, tadj->hostname); + str2prefix_ipv4(tnode->adjacencies[link_count].network, + &prefix); + + inet_aton(tfound_adj_node->router_id, &adj_router_id); + data.s_addr = prefix.prefix.s_addr; + link_info_set(&s, adj_router_id, data, + LSA_LINK_TYPE_POINTOPOINT, 0, tadj->metric); + + masklen2ip(prefix.prefixlen, &data); + link_info_set(&s, prefix.prefix, data, LSA_LINK_TYPE_STUB, 0, + tadj->metric); + } + + /* Don't forget the node itself (just a stub) */ + str2prefix_ipv4(tnode->router_id, &prefix); + data.s_addr = 0xffffffff; + link_info_set(&s, prefix.prefix, data, LSA_LINK_TYPE_STUB, 0, 0); + + /* Take twice the link count (for P2P and stub) plus the local stub */ + stream_putw_at(s, putp, (2 * link_count) + 1); + + length = stream_get_endp(s); + lsah = (struct lsa_header *)STREAM_DATA(s); + lsah->length = htons(length); + + new = ospf_lsa_new_and_data(length); + new->area = area; + new->vrf_id = area->ospf->vrf_id; + + if (is_self_lsa) + SET_FLAG(new->flags, OSPF_LSA_SELF | OSPF_LSA_SELF_CHECKED); + + memcpy(new->data, lsah, length); + stream_free(s); + + ospf_lsdb_add(area->lsdb, new); + + if (is_self_lsa) { + ospf_lsa_unlock(&area->router_lsa_self); + area->router_lsa_self = ospf_lsa_lock(new); + } +} + +static void inject_sr_db_entry(struct vty *vty, struct ospf_test_node *tnode, + struct ospf_topology *topology) +{ + struct ospf_test_node *tfound_adj_node; + struct ospf_test_adj *tadj; + struct in_addr router_id; + struct in_addr remote_id; + struct sr_node *srn; + struct sr_prefix *srp; + struct sr_link *srl; + int link_count; + + inet_aton(tnode->router_id, &router_id); + + srn = ospf_sr_node_create(&router_id); + + srn->srgb.range_size = 8000; + srn->srgb.lower_bound = 16000; + srn->msd = 16; + + srn->srlb.range_size = 1000; + srn->srlb.lower_bound = 15000; + + /* Prefix SID */ + srp = XCALLOC(MTYPE_OSPF_SR_PARAMS, sizeof(struct sr_prefix)); + srp->adv_router = router_id; + srp->sid = tnode->label; + srp->srn = srn; + + listnode_add(srn->ext_prefix, srp); + + /* Adjacency SIDs for all adjacencies */ + for (link_count = 0; tnode->adjacencies[link_count].hostname[0]; + link_count++) { + tadj = &tnode->adjacencies[link_count]; + tfound_adj_node = test_find_node(topology, tadj->hostname); + + srl = XCALLOC(MTYPE_OSPF_SR_PARAMS, sizeof(struct sr_link)); + srl->adv_router = router_id; + + inet_aton(tfound_adj_node->router_id, &remote_id); + srl->remote_id = remote_id; + + srl->type = ADJ_SID; + srl->sid[0] = srn->srlb.lower_bound + tadj->label; + srl->srn = srn; + + listnode_add(srn->ext_link, srl); + } +} + +int topology_load(struct vty *vty, struct ospf_topology *topology, + struct ospf_test_node *root, struct ospf *ospf) +{ + struct ospf_test_node *tnode; + + for (int i = 0; topology->nodes[i].hostname[0]; i++) { + tnode = &topology->nodes[i]; + + /* Inject a router LSA for each node, used for SPF */ + inject_router_lsa(vty, ospf, topology, root, tnode); + + /* + * SR information could also be inected via LSAs, but directly + * filling the SR DB with labels is just easier. + */ + inject_sr_db_entry(vty, tnode, topology); + } + + return 0; +} diff --git a/tests/ospfd/common.h b/tests/ospfd/common.h new file mode 100644 index 0000000000..6d3e63e359 --- /dev/null +++ b/tests/ospfd/common.h @@ -0,0 +1,47 @@ +#ifndef _COMMON_OSPF_H +#define _COMMON_OSPF_H + +#define MAX_ADJACENCIES 8 +#define MAX_NODES 12 + +struct ospf_test_adj { + char hostname[256]; + char network[256]; + uint32_t metric; + mpls_label_t label; +}; + +struct ospf_test_node { + char hostname[256]; + const char *router_id; + mpls_label_t label; + struct ospf_test_adj adjacencies[MAX_ADJACENCIES + 1]; +}; + +struct ospf_topology { + struct ospf_test_node nodes[MAX_NODES + 1]; +}; + +/* Prototypes. */ +extern struct ospf_topology *test_find_topology(const char *name); +extern struct ospf_test_node *test_find_node(struct ospf_topology *topology, + const char *hostname); +extern int topology_load(struct vty *vty, struct ospf_topology *topology, + struct ospf_test_node *root, struct ospf *ospf); + +/* Global variables. */ +extern struct thread_master *master; +extern struct ospf_topology topo1; +extern struct ospf_topology topo2; +extern struct ospf_topology topo3; +extern struct ospf_topology topo4; +extern struct ospf_topology topo5; +extern struct zebra_privs_t ospfd_privs; + +/* For stable order in unit tests */ +extern int sort_paths(const void **path1, const void **path2); + +/* Print the routing table */ +extern void print_route_table(struct vty *vty, struct route_table *rt); + +#endif /* _COMMON_OSPF_H */ diff --git a/tests/ospfd/test_ospf_spf.c b/tests/ospfd/test_ospf_spf.c new file mode 100644 index 0000000000..a85f7e14ec --- /dev/null +++ b/tests/ospfd/test_ospf_spf.c @@ -0,0 +1,303 @@ +#include <zebra.h> + +#include "getopt.h" +#include "thread.h" +#include <lib/version.h> +#include "vty.h" +#include "command.h" +#include "log.h" +#include "vrf.h" +#include "table.h" +#include "mpls.h" + +#include "ospfd/ospfd.h" +#include "ospfd/ospf_asbr.h" +#include "ospfd/ospf_lsa.h" +#include "ospfd/ospf_route.h" +#include "ospfd/ospf_spf.h" +#include "ospfd/ospf_ti_lfa.h" +#include "ospfd/ospf_vty.h" +#include "ospfd/ospf_dump.h" +#include "ospfd/ospf_sr.h" + +#include "common.h" + +DECLARE_RBTREE_UNIQ(p_spaces, struct p_space, p_spaces_item, + p_spaces_compare_func) +DECLARE_RBTREE_UNIQ(q_spaces, struct q_space, q_spaces_item, + q_spaces_compare_func) + +static struct ospf *test_init(struct ospf_test_node *root) +{ + struct ospf *ospf; + struct ospf_area *area; + struct in_addr area_id; + struct in_addr router_id; + + ospf = ospf_new_alloc(0, NULL); + + area_id.s_addr = OSPF_AREA_BACKBONE; + area = ospf_area_new(ospf, area_id); + listnode_add_sort(ospf->areas, area); + + inet_aton(root->router_id, &router_id); + ospf->router_id = router_id; + ospf->router_id_static = router_id; + ospf->ti_lfa_enabled = true; + + return ospf; +} + +static void test_run_spf(struct vty *vty, struct ospf *ospf, + enum protection_type protection_type, bool verbose) +{ + struct route_table *new_table, *new_rtrs; + struct ospf_area *area; + struct p_space *p_space; + struct q_space *q_space; + char label_buf[MPLS_LABEL_STRLEN]; + char res_buf[PROTECTED_RESOURCE_STRLEN]; + + /* Just use the backbone for testing */ + area = ospf->backbone; + + new_table = route_table_init(); + new_rtrs = route_table_init(); + + /* dryrun true, root_node false */ + ospf_spf_calculate(area, area->router_lsa_self, new_table, new_rtrs, + true, false); + + if (verbose) { + vty_out(vty, "SPF Tree without TI-LFA backup paths:\n\n"); + ospf_spf_print(vty, area->spf, 0); + + vty_out(vty, + "\nRouting Table without TI-LFA backup paths:\n\n"); + print_route_table(vty, new_table); + } + + if (verbose) + vty_out(vty, "\n... generating TI-LFA backup paths ...\n"); + + /* TI-LFA testrun */ + ospf_ti_lfa_generate_p_spaces(area, protection_type); + ospf_ti_lfa_insert_backup_paths(area, new_table); + + /* Print P/Q space information */ + if (verbose) { + vty_out(vty, "\nP and Q space info:\n"); + frr_each (p_spaces, area->p_spaces, p_space) { + ospf_print_protected_resource( + p_space->protected_resource, res_buf); + vty_out(vty, "\nP Space for root %pI4 and %s\n", + &p_space->root->id, res_buf); + ospf_spf_print(vty, p_space->root, 0); + + frr_each (q_spaces, p_space->q_spaces, q_space) { + vty_out(vty, + "\nQ Space for destination %pI4:\n", + &q_space->root->id); + ospf_spf_print(vty, q_space->root, 0); + if (q_space->label_stack) { + mpls_label2str( + q_space->label_stack + ->num_labels, + q_space->label_stack->label, + label_buf, MPLS_LABEL_STRLEN, + true); + vty_out(vty, "\nLabel stack: %s\n", + label_buf); + } else { + vty_out(vty, + "\nLabel stack not generated!\n"); + } + } + + vty_out(vty, "\nPost-convergence SPF Tree:\n"); + ospf_spf_print(vty, p_space->pc_spf, 0); + } + } + + /* Cleanup */ + ospf_ti_lfa_free_p_spaces(area); + ospf_spf_cleanup(area->spf, area->spf_vertex_list); + + /* + * Print the new routing table which is augmented with TI-LFA backup + * paths (label stacks). + */ + if (verbose) + vty_out(vty, + "\n\nFinal Routing Table including backup paths:\n\n"); + + print_route_table(vty, new_table); +} + +static int test_run(struct vty *vty, struct ospf_topology *topology, + struct ospf_test_node *root, + enum protection_type protection_type, bool verbose) +{ + struct ospf *ospf; + + ospf = test_init(root); + + /* Inject LSAs into the OSPF backbone according to the topology */ + if (topology_load(vty, topology, root, ospf)) { + vty_out(vty, "%% Failed to load topology\n"); + return CMD_WARNING; + } + + if (verbose) { + vty_out(vty, "\n"); + show_ip_ospf_database_summary(vty, ospf, 0, NULL); + } + + test_run_spf(vty, ospf, protection_type, verbose); + + return 0; +} + +DEFUN(test_ospf, test_ospf_cmd, + "test ospf topology WORD root HOSTNAME ti-lfa [node-protection] [verbose]", + "Test mode\n" + "Choose OSPF for SPF testing\n" + "Network topology to choose\n" + "Name of the network topology to choose\n" + "Root node to choose\n" + "Hostname of the root node to choose\n" + "Use Topology-Independent LFA\n" + "Use node protection (default is link protection)\n" + "Verbose output\n") +{ + struct ospf_topology *topology; + struct ospf_test_node *root; + enum protection_type protection_type = OSPF_TI_LFA_LINK_PROTECTION; + int idx = 0; + bool verbose = false; + + /* Parse topology. */ + argv_find(argv, argc, "topology", &idx); + topology = test_find_topology(argv[idx + 1]->arg); + if (!topology) { + vty_out(vty, "%% Topology not found\n"); + return CMD_WARNING; + } + + argv_find(argv, argc, "root", &idx); + root = test_find_node(topology, argv[idx + 1]->arg); + if (!root) { + vty_out(vty, "%% Root not found\n"); + return CMD_WARNING; + } + + if (argv_find(argv, argc, "node-protection", &idx)) + protection_type = OSPF_TI_LFA_NODE_PROTECTION; + + if (argv_find(argv, argc, "verbose", &idx)) + verbose = true; + + return test_run(vty, topology, root, protection_type, verbose); +} + +static void vty_do_exit(int isexit) +{ + printf("\nend.\n"); + + cmd_terminate(); + vty_terminate(); + thread_master_free(master); + + if (!isexit) + exit(0); +} + +struct option longopts[] = {{"help", no_argument, NULL, 'h'}, + {"debug", no_argument, NULL, 'd'}, + {0} }; + +/* Help information display. */ +static void usage(char *progname, int status) +{ + if (status != 0) + fprintf(stderr, "Try `%s --help' for more information.\n", + progname); + else { + printf("Usage : %s [OPTION...]\n\ +ospfd SPF test program.\n\n\ +-u, --debug Enable debugging\n\ +-h, --help Display this help and exit\n\ +\n\ +Report bugs to %s\n", + progname, FRR_BUG_ADDRESS); + } + exit(status); +} + +int main(int argc, char **argv) +{ + char *p; + char *progname; + struct thread thread; + bool debug = false; + + /* Set umask before anything for security */ + umask(0027); + + /* get program name */ + progname = ((p = strrchr(argv[0], '/')) ? ++p : argv[0]); + + while (1) { + int opt; + + opt = getopt_long(argc, argv, "hd", longopts, 0); + + if (opt == EOF) + break; + + switch (opt) { + case 0: + break; + case 'd': + debug = true; + break; + case 'h': + usage(progname, 0); + break; + default: + usage(progname, 1); + break; + } + } + + /* master init. */ + master = thread_master_create(NULL); + + /* Library inits. */ + cmd_init(1); + cmd_hostname_set("test"); + vty_init(master, false); + if (debug) + zlog_aux_init("NONE: ", LOG_DEBUG); + else + zlog_aux_init("NONE: ", ZLOG_DISABLED); + + /* Install test command. */ + install_element(VIEW_NODE, &test_ospf_cmd); + + /* needed for SR DB init */ + ospf_vty_init(); + ospf_sr_init(); + + term_debug_ospf_ti_lfa = 1; + + /* Read input from .in file. */ + vty_stdio(vty_do_exit); + + /* Fetch next active thread. */ + while (thread_fetch(master, &thread)) + thread_call(&thread); + + /* Not reached. */ + exit(0); +} diff --git a/tests/ospfd/test_ospf_spf.in b/tests/ospfd/test_ospf_spf.in new file mode 100644 index 0000000000..f1e746745f --- /dev/null +++ b/tests/ospfd/test_ospf_spf.in @@ -0,0 +1,10 @@ +test ospf topology topo1 root rt1 ti-lfa +test ospf topology topo1 root rt1 ti-lfa node-protection +test ospf topology topo2 root rt1 ti-lfa +test ospf topology topo2 root rt1 ti-lfa node-protection +test ospf topology topo3 root rt1 ti-lfa +test ospf topology topo3 root rt1 ti-lfa node-protection +test ospf topology topo4 root rt1 ti-lfa +test ospf topology topo4 root rt1 ti-lfa node-protection +test ospf topology topo5 root rt1 ti-lfa +test ospf topology topo5 root rt1 ti-lfa node-protection diff --git a/tests/ospfd/test_ospf_spf.py b/tests/ospfd/test_ospf_spf.py new file mode 100644 index 0000000000..92a1c6a145 --- /dev/null +++ b/tests/ospfd/test_ospf_spf.py @@ -0,0 +1,4 @@ +import frrtest + +class TestOspfSPF(frrtest.TestRefOut): + program = './test_ospf_spf' diff --git a/tests/ospfd/test_ospf_spf.refout b/tests/ospfd/test_ospf_spf.refout new file mode 100644 index 0000000000..d1e3c7bc65 --- /dev/null +++ b/tests/ospfd/test_ospf_spf.refout @@ -0,0 +1,130 @@ +test# test ospf topology topo1 root rt1 ti-lfa +N 1.1.1.1/32 0.0.0.0 0 +N 2.2.2.2/32 0.0.0.0 10 + -> 10.0.1.2 with adv router 2.2.2.2 and backup path 15002 +N 3.3.3.3/32 0.0.0.0 10 + -> 10.0.3.2 with adv router 3.3.3.3 and backup path 15001 +N 10.0.1.0/24 0.0.0.0 10 +N 10.0.2.0/24 0.0.0.0 20 + -> 10.0.1.2 with adv router 2.2.2.2 and backup path 15002 + -> 10.0.3.2 with adv router 3.3.3.3 and backup path 15001 +N 10.0.3.0/24 0.0.0.0 10 +test# test ospf topology topo1 root rt1 ti-lfa node-protection +N 1.1.1.1/32 0.0.0.0 0 +N 2.2.2.2/32 0.0.0.0 10 + -> 10.0.1.2 with adv router 2.2.2.2 +N 3.3.3.3/32 0.0.0.0 10 + -> 10.0.3.2 with adv router 3.3.3.3 +N 10.0.1.0/24 0.0.0.0 10 +N 10.0.2.0/24 0.0.0.0 20 + -> 10.0.1.2 with adv router 2.2.2.2 + -> 10.0.3.2 with adv router 3.3.3.3 +N 10.0.3.0/24 0.0.0.0 10 +test# test ospf topology topo2 root rt1 ti-lfa +N 1.1.1.1/32 0.0.0.0 0 +N 2.2.2.2/32 0.0.0.0 10 + -> 10.0.1.2 with adv router 2.2.2.2 and backup path 15002 +N 3.3.3.3/32 0.0.0.0 20 + -> 10.0.1.2 with adv router 3.3.3.3 and backup path 15002 +N 10.0.1.0/24 0.0.0.0 10 +N 10.0.2.0/24 0.0.0.0 20 + -> 10.0.1.2 with adv router 2.2.2.2 and backup path 15002 +N 10.0.3.0/24 0.0.0.0 30 +test# test ospf topology topo2 root rt1 ti-lfa node-protection +N 1.1.1.1/32 0.0.0.0 0 +N 2.2.2.2/32 0.0.0.0 10 + -> 10.0.1.2 with adv router 2.2.2.2 +N 3.3.3.3/32 0.0.0.0 20 + -> 10.0.1.2 with adv router 3.3.3.3 and backup path 15002 +N 10.0.1.0/24 0.0.0.0 10 +N 10.0.2.0/24 0.0.0.0 20 + -> 10.0.1.2 with adv router 2.2.2.2 +N 10.0.3.0/24 0.0.0.0 30 +test# test ospf topology topo3 root rt1 ti-lfa +N 1.1.1.1/32 0.0.0.0 0 +N 2.2.2.2/32 0.0.0.0 10 + -> 10.0.1.2 with adv router 2.2.2.2 and backup path 16030 +N 3.3.3.3/32 0.0.0.0 20 + -> 10.0.4.2 with adv router 3.3.3.3 and backup path 15001 +N 4.4.4.4/32 0.0.0.0 10 + -> 10.0.4.2 with adv router 4.4.4.4 and backup path 15001/15004 +N 10.0.1.0/24 0.0.0.0 10 +N 10.0.2.0/24 0.0.0.0 30 + -> 10.0.1.2 with adv router 2.2.2.2 and backup path 16030 +N 10.0.3.0/24 0.0.0.0 20 + -> 10.0.4.2 with adv router 4.4.4.4 and backup path 15001/15004 +N 10.0.4.0/24 0.0.0.0 10 +test# test ospf topology topo3 root rt1 ti-lfa node-protection +N 1.1.1.1/32 0.0.0.0 0 +N 2.2.2.2/32 0.0.0.0 10 + -> 10.0.1.2 with adv router 2.2.2.2 +N 3.3.3.3/32 0.0.0.0 20 + -> 10.0.4.2 with adv router 3.3.3.3 and backup path 15001 +N 4.4.4.4/32 0.0.0.0 10 + -> 10.0.4.2 with adv router 4.4.4.4 +N 10.0.1.0/24 0.0.0.0 10 +N 10.0.2.0/24 0.0.0.0 30 + -> 10.0.1.2 with adv router 2.2.2.2 +N 10.0.3.0/24 0.0.0.0 20 + -> 10.0.4.2 with adv router 4.4.4.4 +N 10.0.4.0/24 0.0.0.0 10 +test# test ospf topology topo4 root rt1 ti-lfa +N 1.1.1.1/32 0.0.0.0 0 +N 2.2.2.2/32 0.0.0.0 10 + -> 10.0.1.2 with adv router 2.2.2.2 and backup path 16030/15006 +N 3.3.3.3/32 0.0.0.0 20 + -> 10.0.4.2 with adv router 3.3.3.3 and backup path 15001/15004 +N 4.4.4.4/32 0.0.0.0 10 + -> 10.0.4.2 with adv router 4.4.4.4 and backup path 15001/15004 +N 10.0.1.0/24 0.0.0.0 10 +N 10.0.2.0/24 0.0.0.0 60 + -> 10.0.1.2 with adv router 2.2.2.2 and backup path 16030/15006 +N 10.0.3.0/24 0.0.0.0 20 + -> 10.0.4.2 with adv router 4.4.4.4 and backup path 15001/15004 +N 10.0.4.0/24 0.0.0.0 10 +test# test ospf topology topo4 root rt1 ti-lfa node-protection +N 1.1.1.1/32 0.0.0.0 0 +N 2.2.2.2/32 0.0.0.0 10 + -> 10.0.1.2 with adv router 2.2.2.2 +N 3.3.3.3/32 0.0.0.0 20 + -> 10.0.4.2 with adv router 3.3.3.3 and backup path 15001/15004 +N 4.4.4.4/32 0.0.0.0 10 + -> 10.0.4.2 with adv router 4.4.4.4 +N 10.0.1.0/24 0.0.0.0 10 +N 10.0.2.0/24 0.0.0.0 60 + -> 10.0.1.2 with adv router 2.2.2.2 +N 10.0.3.0/24 0.0.0.0 20 + -> 10.0.4.2 with adv router 4.4.4.4 +N 10.0.4.0/24 0.0.0.0 10 +test# test ospf topology topo5 root rt1 ti-lfa +N 1.1.1.1/32 0.0.0.0 0 +N 2.2.2.2/32 0.0.0.0 30 + -> 10.0.4.2 with adv router 2.2.2.2 and backup path 15001 +N 3.3.3.3/32 0.0.0.0 20 + -> 10.0.4.2 with adv router 3.3.3.3 and backup path 15001/15004 +N 4.4.4.4/32 0.0.0.0 10 + -> 10.0.4.2 with adv router 4.4.4.4 and backup path 15001/15004/15006 +N 10.0.1.0/24 0.0.0.0 40 + -> 10.0.4.2 with adv router 2.2.2.2 and backup path 15001 +N 10.0.2.0/24 0.0.0.0 30 + -> 10.0.4.2 with adv router 3.3.3.3 and backup path 15001/15004 +N 10.0.3.0/24 0.0.0.0 20 + -> 10.0.4.2 with adv router 4.4.4.4 and backup path 15001/15004/15006 +N 10.0.4.0/24 0.0.0.0 10 +test# test ospf topology topo5 root rt1 ti-lfa node-protection +N 1.1.1.1/32 0.0.0.0 0 +N 2.2.2.2/32 0.0.0.0 30 + -> 10.0.4.2 with adv router 2.2.2.2 and backup path 15001 +N 3.3.3.3/32 0.0.0.0 20 + -> 10.0.4.2 with adv router 3.3.3.3 and backup path 15001/15004 +N 4.4.4.4/32 0.0.0.0 10 + -> 10.0.4.2 with adv router 4.4.4.4 +N 10.0.1.0/24 0.0.0.0 40 + -> 10.0.4.2 with adv router 2.2.2.2 and backup path 15001 +N 10.0.2.0/24 0.0.0.0 30 + -> 10.0.4.2 with adv router 3.3.3.3 and backup path 15001/15004 +N 10.0.3.0/24 0.0.0.0 20 + -> 10.0.4.2 with adv router 4.4.4.4 +N 10.0.4.0/24 0.0.0.0 10 +test# +end. diff --git a/tests/ospfd/topologies.c b/tests/ospfd/topologies.c new file mode 100644 index 0000000000..2dc611ce96 --- /dev/null +++ b/tests/ospfd/topologies.c @@ -0,0 +1,575 @@ +#include <zebra.h> + +#include "mpls.h" +#include "if.h" + +#include "ospfd/ospfd.h" + +#include "common.h" + +/* + * +---------+ +---------+ + * | | | | + * | RT1 |eth-rt2 eth-rt1| RT2 | + * | 1.1.1.1 +---------------------+ 2.2.2.2 | + * | | 10.0.1.0/24 | | + * +---------+ +---------+ + * |eth-rt3 eth-rt3| + * | | + * |10.0.3.0/24 | + * | | + * |eth-rt1 | + * +---------+ | + * | |eth-rt2 10.0.2.0/24| + * | RT3 +--------------------------+ + * | 3.3.3.3 | + * | | + * +---------+ + * + * Link Protection: + * P and Q spaces overlap here, hence just one P/Q node regardless of which + * link is protected. Hence the backup label stack just has one label. + * + * Node Protection: + * Obviously no backup paths involved. + */ +struct ospf_topology topo1 = { + .nodes = + { + { + .hostname = "rt1", + .router_id = "1.1.1.1", + .label = 10, + .adjacencies = + { + { + .hostname = "rt2", + .network = + "10.0.1.1/24", + .metric = 10, + .label = 1, + }, + { + .hostname = "rt3", + .network = + "10.0.3.1/24", + .metric = 10, + .label = 2, + }, + }, + }, + { + .hostname = "rt2", + .router_id = "2.2.2.2", + .label = 20, + .adjacencies = + { + { + .hostname = "rt1", + .network = + "10.0.1.2/24", + .metric = 10, + .label = 3, + }, + { + .hostname = "rt3", + .network = + "10.0.2.1/24", + .metric = 10, + .label = 4, + }, + }, + }, + { + .hostname = "rt3", + .router_id = "3.3.3.3", + .label = 30, + .adjacencies = + { + { + .hostname = "rt1", + .network = + "10.0.3.2/24", + .metric = 10, + .label = 5, + }, + { + .hostname = "rt2", + .network = + "10.0.2.2/24", + .metric = 10, + .label = 6, + }, + }, + }, + }, +}; + + +/* + * +---------+ +---------+ + * | | | | + * | RT1 |eth-rt2 eth-rt1| RT2 | + * | 1.1.1.1 +---------------------+ 2.2.2.2 | + * | | 10.0.1.0/24 (10) | | + * +---------+ +---------+ + * |eth-rt3 eth-rt3| + * | | + * |10.0.3.0/24 (30) | + * | | + * |eth-rt1 | + * +---------+ | + * | |eth-rt2 10.0.2.0/24|(10) + * | RT3 +--------------------------+ + * | 3.3.3.3 | + * | | + * +---------+ + * + * Link Protection: + * Regarding the subnet 10.0.1.0/24, the P space of RT1 is just RT1 itself + * while the Q space of RT3 consists of RT3 and RT2. Hence the P and Q + * nodes are disjunct (tricky: the root node is the P node here). For the + * backup label stack just one label is necessary. + * + * Node Protection: + * For protected node RT2 and route from RT1 to RT3 there is just the backup + * path consisting of the label 15002. + */ +struct ospf_topology topo2 = { + .nodes = + { + { + .hostname = "rt1", + .router_id = "1.1.1.1", + .label = 10, + .adjacencies = + { + { + .hostname = "rt2", + .network = + "10.0.1.1/24", + .metric = 10, + .label = 1, + }, + { + .hostname = "rt3", + .network = + "10.0.3.1/24", + .metric = 30, + .label = 2, + }, + }, + }, + { + .hostname = "rt2", + .router_id = "2.2.2.2", + .label = 20, + .adjacencies = + { + { + .hostname = "rt1", + .network = + "10.0.1.2/24", + .metric = 10, + .label = 3, + }, + { + .hostname = "rt3", + .network = + "10.0.2.1/24", + .metric = 10, + .label = 4, + }, + }, + }, + { + .hostname = "rt3", + .router_id = "3.3.3.3", + .label = 30, + .adjacencies = + { + { + .hostname = "rt1", + .network = + "10.0.3.2/24", + .metric = 30, + .label = 5, + }, + { + .hostname = "rt2", + .network = + "10.0.2.2/24", + .metric = 10, + .label = 6, + }, + }, + }, + }, +}; + +/* + * +---------+ +---------+ + * | | | | + * | RT1 |eth-rt4 eth-rt1| RT4 | + * | 1.1.1.1 +---------------------+ 4.4.4.4 | + * | | 10.0.4.0/24 (10) | | + * +---------+ +---------+ + * |eth-rt2 eth-rt3| + * | | + * |10.0.1.0/24 (10) | + * | 10.0.3.0/24 (10) | + * |eth-rt1 eth-rt4| + * +---------+ +---------+ + * | |eth-rt3 eth-rt2| | + * | RT2 +---------------------+ RT3 | + * | 2.2.2.2 | 10.0.2.0/24 (20) | 3.3.3.3 | + * | | | | + * +---------+ +---------+ + * + * Link Protection: + * Regarding the protected subnet 10.0.4.0/24, the P and Q spaces for root RT1 + * and destination RT4 are disjunct and the P node is RT2 while RT3 is the Q + * node. Hence the backup label stack here is 16020/15004. Note that here the + * P and Q nodes are neither the root nor the destination nodes, so this is a + * case where you really need a label stack consisting of two labels. + * + * Node Protection: + * For the protected node RT4 and the route from RT1 to RT3 there is a backup + * path with the single label 15001. + */ +struct ospf_topology topo3 = { + .nodes = + { + { + .hostname = "rt1", + .router_id = "1.1.1.1", + .label = 10, + .adjacencies = + { + { + .hostname = "rt2", + .network = + "10.0.1.1/24", + .metric = 10, + .label = 1, + }, + { + .hostname = "rt4", + .network = + "10.0.4.1/24", + .metric = 10, + .label = 2, + }, + }, + }, + { + .hostname = "rt2", + .router_id = "2.2.2.2", + .label = 20, + .adjacencies = + { + { + .hostname = "rt1", + .network = + "10.0.1.2/24", + .metric = 10, + .label = 3, + }, + { + .hostname = "rt3", + .network = + "10.0.2.1/24", + .metric = 20, + .label = 4, + }, + }, + }, + { + .hostname = "rt3", + .router_id = "3.3.3.3", + .label = 30, + .adjacencies = + { + { + .hostname = "rt2", + .network = + "10.0.2.2/24", + .metric = 20, + .label = 5, + }, + { + .hostname = "rt4", + .network = + "10.0.3.1/24", + .metric = 10, + .label = 6, + }, + }, + }, + { + .hostname = "rt4", + .router_id = "4.4.4.4", + .label = 40, + .adjacencies = + { + { + .hostname = "rt1", + .network = + "10.0.4.2/24", + .metric = 10, + .label = 7, + }, + { + .hostname = "rt3", + .network = + "10.0.3.2/24", + .metric = 10, + .label = 8, + }, + }, + }, + }, +}; + +/* + * +---------+ +---------+ + * | | | | + * | RT1 |eth-rt4 eth-rt1| RT4 | + * | 1.1.1.1 +---------------------+ 4.4.4.4 | + * | | 10.0.4.0/24 (10) | | + * +---------+ +---------+ + * |eth+rt2 eth-rt3| + * | | + * |10.0.1.0/24 (10) | + * | 10.0.3.0/24 (10) | + * |eth-rt1 eth-rt4| + * +---------+ +---------+ + * | |eth-rt3 eth-rt2| | + * | RT2 +---------------------+ RT3 | + * | 2.2.2.2 | 10.0.2.0/24 (40) | 3.3.3.3 | + * | | | | + * +---------+ +---------+ + * + * This case was specifically created for Node Protection with RT4 as + * protected node from the perspective of RT1. Note the weight of 40 + * on the link between RT2 and RT3. + * The P space of RT1 is just RT2 while the Q space of RT3 is empty. + * This means that the P and Q spaces are disjunct and there are two + * labels needed to get from RT1 to RT3. + */ +struct ospf_topology topo4 = { + .nodes = + { + { + .hostname = "rt1", + .router_id = "1.1.1.1", + .label = 10, + .adjacencies = + { + { + .hostname = "rt2", + .network = + "10.0.1.1/24", + .metric = 10, + .label = 1, + }, + { + .hostname = "rt4", + .network = + "10.0.4.1/24", + .metric = 10, + .label = 2, + }, + }, + }, + { + .hostname = "rt2", + .router_id = "2.2.2.2", + .label = 20, + .adjacencies = + { + { + .hostname = "rt1", + .network = + "10.0.1.2/24", + .metric = 10, + .label = 3, + }, + { + .hostname = "rt3", + .network = + "10.0.2.1/24", + .metric = 50, + .label = 4, + }, + }, + }, + { + .hostname = "rt3", + .router_id = "3.3.3.3", + .label = 30, + .adjacencies = + { + { + .hostname = "rt2", + .network = + "10.0.2.2/24", + .metric = 50, + .label = 5, + }, + { + .hostname = "rt4", + .network = + "10.0.3.1/24", + .metric = 10, + .label = 6, + }, + }, + }, + { + .hostname = "rt4", + .router_id = "4.4.4.4", + .label = 40, + .adjacencies = + { + { + .hostname = "rt3", + .network = + "10.0.3.2/24", + .metric = 10, + .label = 7, + }, + { + .hostname = "rt1", + .network = + "10.0.4.2/24", + .metric = 10, + .label = 8, + }, + }, + }, + }, +}; + +/* + * +---------+ +---------+ + * | | | | + * | RT1 |eth-rt4 eth-rt1| RT4 | + * | 1.1.1.1 +---------------------+ 4.4.4.4 | + * | | 10.0.4.0/24 | | + * +---------+ +---------+ + * |eth+rt2 eth-rt3| + * | | + * |10.0.1.0/24 | + * | 10.0.3.0/24| + * |eth-rt1 eth-rt4| + * +---------+ +---------+ + * | |eth-rt3 eth-rt2| | + * | RT2 +---------------------+ RT3 | + * | 2.2.2.2 | 10.0.2.0/24 | 3.3.3.3 | + * | | | | + * +---------+ +---------+ + * + * Weights: + * - clockwise: 10 + * - counterclockwise: 40 + * + * This is an example where 3 (!) labels are needed for the protected + * link RT1<->RT2, e.g. the subnet 10.0.1.0/24, to reach RT4. + * + * Because the initial P and Q spaces will not be overlapping or + * adjacent for this case the TI-LFA will be applied recursively. + */ +struct ospf_topology topo5 = { + .nodes = + { + { + .hostname = "rt1", + .router_id = "1.1.1.1", + .label = 10, + .adjacencies = + { + { + .hostname = "rt2", + .network = + "10.0.1.1/24", + .metric = 40, + .label = 1, + }, + { + .hostname = "rt4", + .network = + "10.0.4.1/24", + .metric = 10, + .label = 2, + }, + }, + }, + { + .hostname = "rt2", + .router_id = "2.2.2.2", + .label = 20, + .adjacencies = + { + { + .hostname = "rt1", + .network = + "10.0.1.2/24", + .metric = 10, + .label = 3, + }, + { + .hostname = "rt3", + .network = + "10.0.2.1/24", + .metric = 40, + .label = 4, + }, + }, + }, + { + .hostname = "rt3", + .router_id = "3.3.3.3", + .label = 30, + .adjacencies = + { + { + .hostname = "rt2", + .network = + "10.0.2.2/24", + .metric = 10, + .label = 5, + }, + { + .hostname = "rt4", + .network = + "10.0.3.1/24", + .metric = 40, + .label = 6, + }, + }, + }, + { + .hostname = "rt4", + .router_id = "4.4.4.4", + .label = 40, + .adjacencies = + { + { + .hostname = "rt3", + .network = + "10.0.3.2/24", + .metric = 10, + .label = 7, + }, + { + .hostname = "rt1", + .network = + "10.0.4.2/24", + .metric = 40, + .label = 8, + }, + }, + }, + }, +}; diff --git a/tests/subdir.am b/tests/subdir.am index 211814c1c3..370e6a49a9 100644 --- a/tests/subdir.am +++ b/tests/subdir.am @@ -31,6 +31,14 @@ TESTS_ISISD = IGNORE_ISISD = --ignore=isisd/ endif +if OSPFD +TESTS_OSPFD = \ + tests/ospfd/test_ospf_spf \ + # end +else +TESTS_OSPFD = +endif + if OSPF6D TESTS_OSPF6D = \ tests/ospf6d/test_lsdb \ @@ -41,6 +49,16 @@ TESTS_OSPF6D = IGNORE_OSPF6D = --ignore=ospf6d/ endif +if ZEBRA +TESTS_ZEBRA = \ + tests/zebra/test_lm_plugin \ + #end +IGNORE_ZEBRA = +else +TESTS_ZEBRA = +IGNORE_ZEBRA = --ignore=zebra/ +endif + clippy_scan += \ tests/lib/cli/test_cli.c \ tests/ospf6d/test_lsdb.c \ @@ -73,6 +91,7 @@ check_PROGRAMS = \ tests/lib/test_ttable \ tests/lib/test_typelist \ tests/lib/test_versioncmp \ + tests/lib/test_xref \ tests/lib/test_zlog \ tests/lib/test_graph \ tests/lib/cli/test_cli \ @@ -80,7 +99,9 @@ check_PROGRAMS = \ tests/lib/northbound/test_oper_data \ $(TESTS_BGPD) \ $(TESTS_ISISD) \ + $(TESTS_OSPFD) \ $(TESTS_OSPF6D) \ + $(TESTS_ZEBRA) \ # end if ZEROMQ @@ -115,6 +136,7 @@ noinst_HEADERS += \ tests/lib/cli/common_cli.h \ tests/lib/test_typelist.h \ tests/isisd/test_common.h \ + tests/ospfd/common.h \ # end # @@ -134,7 +156,9 @@ TESTS_CFLAGS = \ ALL_TESTS_LDADD = lib/libfrr.la $(LIBCAP) BGP_TEST_LDADD = bgpd/libbgp.a $(RFPLDADD) $(ALL_TESTS_LDADD) -lm ISISD_TEST_LDADD = isisd/libisis.a $(ALL_TESTS_LDADD) +OSPFD_TEST_LDADD = ospfd/libfrrospf.a $(ALL_TESTS_LDADD) OSPF6_TEST_LDADD = ospf6d/libospf6.a $(ALL_TESTS_LDADD) +ZEBRA_TEST_LDADD = zebra/label_manager.o $(ALL_TESTS_LDADD) tests_bgpd_test_aspath_CFLAGS = $(TESTS_CFLAGS) tests_bgpd_test_aspath_CPPFLAGS = $(TESTS_CPPFLAGS) @@ -201,6 +225,11 @@ tests_isisd_test_isis_vertex_queue_CPPFLAGS = $(TESTS_CPPFLAGS) tests_isisd_test_isis_vertex_queue_LDADD = $(ISISD_TEST_LDADD) tests_isisd_test_isis_vertex_queue_SOURCES = tests/isisd/test_isis_vertex_queue.c tests/isisd/test_common.c +tests_ospfd_test_ospf_spf_CFLAGS = $(TESTS_CFLAGS) +tests_ospfd_test_ospf_spf_CPPFLAGS = $(TESTS_CPPFLAGS) +tests_ospfd_test_ospf_spf_LDADD = $(OSPFD_TEST_LDADD) +tests_ospfd_test_ospf_spf_SOURCES = tests/ospfd/test_ospf_spf.c tests/ospfd/common.c tests/ospfd/topologies.c + tests_lib_cxxcompat_CFLAGS = $(TESTS_CFLAGS) $(CXX_COMPAT_CFLAGS) $(WERROR) tests_lib_cxxcompat_CPPFLAGS = $(TESTS_CPPFLAGS) tests_lib_cxxcompat_SOURCES = tests/lib/cxxcompat.c @@ -322,6 +351,10 @@ tests_lib_test_versioncmp_CFLAGS = $(TESTS_CFLAGS) tests_lib_test_versioncmp_CPPFLAGS = $(TESTS_CPPFLAGS) tests_lib_test_versioncmp_LDADD = $(ALL_TESTS_LDADD) tests_lib_test_versioncmp_SOURCES = tests/lib/test_versioncmp.c +tests_lib_test_xref_CFLAGS = $(TESTS_CFLAGS) +tests_lib_test_xref_CPPFLAGS = $(TESTS_CPPFLAGS) +tests_lib_test_xref_LDADD = $(ALL_TESTS_LDADD) +tests_lib_test_xref_SOURCES = tests/lib/test_xref.c tests_lib_test_zlog_CFLAGS = $(TESTS_CFLAGS) tests_lib_test_zlog_CPPFLAGS = $(TESTS_CPPFLAGS) tests_lib_test_zlog_LDADD = $(ALL_TESTS_LDADD) @@ -336,6 +369,11 @@ tests_ospf6d_test_lsdb_CPPFLAGS = $(TESTS_CPPFLAGS) tests_ospf6d_test_lsdb_LDADD = $(OSPF6_TEST_LDADD) tests_ospf6d_test_lsdb_SOURCES = tests/ospf6d/test_lsdb.c tests/lib/cli/common_cli.c +tests_zebra_test_lm_plugin_CFLAGS = $(TESTS_CFLAGS) +tests_zebra_test_lm_plugin_CPPFLAGS = $(TESTS_CPPFLAGS) +tests_zebra_test_lm_plugin_LDADD = $(ZEBRA_TEST_LDADD) +tests_zebra_test_lm_plugin_SOURCES = tests/zebra/test_lm_plugin.c + EXTRA_DIST += \ tests/runtests.py \ tests/bgpd/test_aspath.py \ @@ -353,6 +391,9 @@ EXTRA_DIST += \ tests/isisd/test_isis_spf.in \ tests/isisd/test_isis_spf.refout \ tests/isisd/test_isis_vertex_queue.py \ + tests/ospfd/test_ospf_spf.py \ + tests/ospfd/test_ospf_spf.in \ + tests/ospfd/test_ospf_spf.refout \ tests/lib/cli/test_commands.in \ tests/lib/cli/test_commands.py \ tests/lib/cli/test_commands.refout \ @@ -377,12 +418,15 @@ EXTRA_DIST += \ tests/lib/test_ttable.refout \ tests/lib/test_typelist.py \ tests/lib/test_versioncmp.py \ + tests/lib/test_xref.py \ tests/lib/test_zlog.py \ tests/lib/test_graph.py \ tests/lib/test_graph.refout \ tests/ospf6d/test_lsdb.py \ tests/ospf6d/test_lsdb.in \ tests/ospf6d/test_lsdb.refout \ + tests/zebra/test_lm_plugin.py \ + tests/zebra/test_lm_plugin.refout \ # end .PHONY: tests/tests.xml diff --git a/tests/topotests/all-protocol-startup/r1/ip_nht.ref b/tests/topotests/all-protocol-startup/r1/ip_nht.ref index 098e3bf387..1da4da4df5 100644 --- a/tests/topotests/all-protocol-startup/r1/ip_nht.ref +++ b/tests/topotests/all-protocol-startup/r1/ip_nht.ref @@ -39,6 +39,18 @@ 4.4.4.2 unresolved Client list: pbr(fd XX) +6.6.6.1 + unresolved + Client list: pbr(fd XX) +6.6.6.2 + unresolved + Client list: pbr(fd XX) +6.6.6.3 + unresolved + Client list: pbr(fd XX) +6.6.6.4 + unresolved + Client list: pbr(fd XX) 192.168.0.2 resolved via connected is directly connected, r1-eth0 diff --git a/tests/topotests/all-protocol-startup/test_all_protocol_startup.py b/tests/topotests/all-protocol-startup/test_all_protocol_startup.py index 08378d9b58..5942aca71d 100644 --- a/tests/topotests/all-protocol-startup/test_all_protocol_startup.py +++ b/tests/topotests/all-protocol-startup/test_all_protocol_startup.py @@ -83,6 +83,9 @@ class NetworkTopo(Topo): ##################################################### +@pytest.mark.isis +@pytest.mark.ospf +@pytest.mark.rip def setup_module(module): global topo, net global fatal_error @@ -342,7 +345,7 @@ def test_converge_protocols(): actual = ( net["r%s" % i] .cmd( - 'vtysh -c "show ip route" | /usr/bin/tail -n +7 | env LC_ALL=en_US.UTF-8 sort 2> /dev/null' + 'vtysh -c "show ip route" | sed -e \'/^Codes: /,/^\s*$/d\' | env LC_ALL=en_US.UTF-8 sort 2> /dev/null' ) .rstrip() ) @@ -373,7 +376,7 @@ def test_converge_protocols(): actual = ( net["r%s" % i] .cmd( - 'vtysh -c "show ipv6 route" | /usr/bin/tail -n +7 | env LC_ALL=en_US.UTF-8 sort 2> /dev/null' + 'vtysh -c "show ipv6 route" | sed -e \'/^Codes: /,/^\s*$/d\' | env LC_ALL=en_US.UTF-8 sort 2> /dev/null' ) .rstrip() ) @@ -535,6 +538,51 @@ def test_nexthop_groups(): verify_route_nexthop_group("5.5.5.1/32") + ## 4-way ECMP Routes Pointing to Each Other + + # This is to check for a bug with NH resolution where + # routes would infintely resolve to each other blowing + # up the resolved-> nexthop pointer. + + net["r1"].cmd( + 'vtysh -c "c t" -c "nexthop-group infinite-recursive" -c "nexthop 6.6.6.1" -c "nexthop 6.6.6.2" \ + -c "nexthop 6.6.6.3" -c "nexthop 6.6.6.4"' + ) + + # static route nexthops can recurse to + + net["r1"].cmd('vtysh -c "c t" -c "ip route 6.6.6.0/24 1.1.1.1"') + + # Make routes that point to themselves in ecmp + + net["r1"].cmd( + 'vtysh -c "sharp install routes 6.6.6.4 nexthop-group infinite-recursive 1"' + ) + + net["r1"].cmd( + 'vtysh -c "sharp install routes 6.6.6.3 nexthop-group infinite-recursive 1"' + ) + + net["r1"].cmd( + 'vtysh -c "sharp install routes 6.6.6.2 nexthop-group infinite-recursive 1"' + ) + + net["r1"].cmd( + 'vtysh -c "sharp install routes 6.6.6.1 nexthop-group infinite-recursive 1"' + ) + + # Get routes and test if has too many (duplicate) nexthops + nhg_id = route_get_nhg_id("6.6.6.1/32") + output = net["r1"].cmd('vtysh -c "show nexthop-group rib %d"' % nhg_id) + + dups = re.findall(r"(via 1\.1\.1\.1)", output) + + # Should find 3, itself is inactive + assert len(dups) == 3, ( + "Route 6.6.6.1/32 with Nexthop Group ID=%d has wrong number of resolved nexthops" + % nhg_id + ) + ##CLI(net) ## Remove all NHG routes @@ -546,6 +594,8 @@ def test_nexthop_groups(): net["r1"].cmd('vtysh -c "sharp remove routes 4.4.4.1 1"') net["r1"].cmd('vtysh -c "sharp remove routes 4.4.4.2 1"') net["r1"].cmd('vtysh -c "sharp remove routes 5.5.5.1 1"') + net["r1"].cmd('vtysh -c "sharp remove routes 6.6.6.1 4"') + net["r1"].cmd('vtysh -c "c t" -c "no ip route 6.6.6.0/24 1.1.1.1"') def test_rip_status(): @@ -1016,6 +1066,7 @@ def test_bgp_ipv6_summary(): # For debugging after starting FRR daemons, uncomment the next line # CLI(net) + def test_nht(): print("\n\n**** Test that nexthop tracking is at least nominally working ****\n") @@ -1026,13 +1077,16 @@ def test_nht(): expected = open(nhtFile).read().rstrip() expected = ("\n".join(expected.splitlines()) + "\n").splitlines(1) - actual = (net["r%s" %i].cmd('vtysh -c "show ip nht" 2> /dev/null').rstrip()) + actual = net["r%s" % i].cmd('vtysh -c "show ip nht" 2> /dev/null').rstrip() actual = re.sub(r"fd [0-9][0-9]", "fd XX", actual) actual = ("\n".join(actual.splitlines()) + "\n").splitlines(1) - diff = topotest.get_textdiff(actual, expected, - title1="Actual `show ip nht`", - title2="Expected `show ip nht`") + diff = topotest.get_textdiff( + actual, + expected, + title1="Actual `show ip nht`", + title2="Expected `show ip nht`", + ) if diff: assert 0, "r%s failed ip nht check:\n%s\n" % (i, diff) @@ -1043,19 +1097,23 @@ def test_nht(): expected = open(nhtFile).read().rstrip() expected = ("\n".join(expected.splitlines()) + "\n").splitlines(1) - actual = (net["r%s" %i].cmd('vtysh -c "show ipv6 nht" 2> /dev/null').rstrip()) + actual = net["r%s" % i].cmd('vtysh -c "show ipv6 nht" 2> /dev/null').rstrip() actual = re.sub(r"fd [0-9][0-9]", "fd XX", actual) actual = ("\n".join(actual.splitlines()) + "\n").splitlines(1) - diff = topotest.get_textdiff(actual, expected, - title1="Actual `show ip nht`", - title2="Expected `show ip nht`") + diff = topotest.get_textdiff( + actual, + expected, + title1="Actual `show ip nht`", + title2="Expected `show ip nht`", + ) if diff: assert 0, "r%s failed ipv6 nht check:\n%s\n" % (i, diff) else: print("show ipv6 nht is ok\n") + def test_bgp_ipv4(): global fatal_error global net diff --git a/tests/topotests/bfd-bgp-cbit-topo3/test_bfd_bgp_cbit_topo3.py b/tests/topotests/bfd-bgp-cbit-topo3/test_bfd_bgp_cbit_topo3.py index 595132214b..cc1c1e3a0c 100644 --- a/tests/topotests/bfd-bgp-cbit-topo3/test_bfd_bgp_cbit_topo3.py +++ b/tests/topotests/bfd-bgp-cbit-topo3/test_bfd_bgp_cbit_topo3.py @@ -64,7 +64,7 @@ class BFDTopo(Topo): switch.add_link(tgen.gears["r2"]) switch.add_link(tgen.gears["r3"]) - +@pytest.mark.bfd def setup_module(mod): "Sets up the pytest environment" tgen = Topogen(BFDTopo, mod.__name__) @@ -74,7 +74,8 @@ def setup_module(mod): for rname, router in router_list.items(): router.load_config( - TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname)), + TopoRouter.RD_ZEBRA, + os.path.join(CWD, "{}/zebra.conf".format(rname)), ) router.load_config( TopoRouter.RD_BFD, os.path.join(CWD, "{}/bfdd.conf".format(rname)) diff --git a/tests/topotests/bfd-isis-topo1/test_bfd_isis_topo1.py b/tests/topotests/bfd-isis-topo1/test_bfd_isis_topo1.py index 1adfec76d8..23da7ed850 100644 --- a/tests/topotests/bfd-isis-topo1/test_bfd_isis_topo1.py +++ b/tests/topotests/bfd-isis-topo1/test_bfd_isis_topo1.py @@ -127,7 +127,8 @@ class TemplateTopo(Topo): switch.add_link(tgen.gears["rt4"], nodeif="eth-rt5") switch.add_link(tgen.gears["rt5"], nodeif="eth-rt4") - +@pytest.mark.bfd +@pytest.mark.isis def setup_module(mod): "Sets up the pytest environment" tgen = Topogen(TemplateTopo, mod.__name__) diff --git a/tests/topotests/bfd-ospf-topo1/__init__.py b/tests/topotests/bfd-ospf-topo1/__init__.py new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/tests/topotests/bfd-ospf-topo1/__init__.py diff --git a/tests/topotests/bfd-ospf-topo1/rt1/bfdd.conf b/tests/topotests/bfd-ospf-topo1/rt1/bfdd.conf new file mode 100644 index 0000000000..610a20f88a --- /dev/null +++ b/tests/topotests/bfd-ospf-topo1/rt1/bfdd.conf @@ -0,0 +1,9 @@ +log file bfdd.log +log timestamp precision 3 +! +debug bfd network +debug bfd peer +debug bfd zebra +! +bfd +! diff --git a/tests/topotests/bfd-ospf-topo1/rt1/ospf6d.conf b/tests/topotests/bfd-ospf-topo1/rt1/ospf6d.conf new file mode 100644 index 0000000000..18def599b4 --- /dev/null +++ b/tests/topotests/bfd-ospf-topo1/rt1/ospf6d.conf @@ -0,0 +1,21 @@ +log file ospf6d.log +log timestamp precision 3 +! +hostname rt1 +! +password 1 +! +interface eth-rt2 + ipv6 ospf6 network broadcast + ipv6 ospf6 bfd +! +interface eth-rt3 + ipv6 ospf6 network broadcast + ipv6 ospf6 bfd +! +router ospf6 + ospf6 router-id 1.1.1.1 + interface eth-rt2 area 0.0.0.0 + interface eth-rt3 area 0.0.0.0 + redistribute connected +! diff --git a/tests/topotests/bfd-ospf-topo1/rt1/ospfd.conf b/tests/topotests/bfd-ospf-topo1/rt1/ospfd.conf new file mode 100644 index 0000000000..07b42f9885 --- /dev/null +++ b/tests/topotests/bfd-ospf-topo1/rt1/ospfd.conf @@ -0,0 +1,26 @@ +log file ospfd.log +log timestamp precision 3 +! +hostname rt1 +! +password 1 +! +debug ospf event +debug ospf zebra +! +interface lo + ip ospf area 0.0.0.0 +! +interface eth-rt2 + ip ospf area 0.0.0.0 + ip ospf bfd +! +interface eth-rt3 + ip ospf area 0.0.0.0 + ip ospf bfd +! +router ospf + ospf router-id 1.1.1.1 + passive interface lo + router-info area 0.0.0.0 +! diff --git a/tests/topotests/bfd-ospf-topo1/rt1/step1/show_ip_route.ref b/tests/topotests/bfd-ospf-topo1/rt1/step1/show_ip_route.ref new file mode 100644 index 0000000000..f354eff697 --- /dev/null +++ b/tests/topotests/bfd-ospf-topo1/rt1/step1/show_ip_route.ref @@ -0,0 +1,74 @@ +{ + "2.2.2.2\/32":[ + { + "prefix":"2.2.2.2\/32", + "protocol":"ospf", + "selected":true, + "destSelected":true, + "installed":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.1.2", + "afi":"ipv4", + "interfaceName":"eth-rt2", + "active":true + } + ] + } + ], + "3.3.3.3\/32":[ + { + "prefix":"3.3.3.3\/32", + "protocol":"ospf", + "selected":true, + "destSelected":true, + "installed":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.2.2", + "afi":"ipv4", + "interfaceName":"eth-rt3", + "active":true + } + ] + } + ], + "4.4.4.4\/32":[ + { + "prefix":"4.4.4.4\/32", + "protocol":"ospf", + "selected":true, + "destSelected":true, + "installed":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.2.2", + "afi":"ipv4", + "interfaceName":"eth-rt3", + "active":true + } + ] + } + ], + "5.5.5.5\/32":[ + { + "prefix":"5.5.5.5\/32", + "protocol":"ospf", + "selected":true, + "destSelected":true, + "installed":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.1.2", + "afi":"ipv4", + "interfaceName":"eth-rt2", + "active":true + } + ] + } + ] +} diff --git a/tests/topotests/bfd-ospf-topo1/rt1/step1/show_ipv6_route.ref b/tests/topotests/bfd-ospf-topo1/rt1/step1/show_ipv6_route.ref new file mode 100644 index 0000000000..6465efb8b5 --- /dev/null +++ b/tests/topotests/bfd-ospf-topo1/rt1/step1/show_ipv6_route.ref @@ -0,0 +1,70 @@ +{ + "::ffff:202:202\/128":[ + { + "prefix":"::ffff:202:202\/128", + "protocol":"ospf6", + "selected":true, + "destSelected":true, + "installed":true, + "nexthops":[ + { + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-rt2", + "active":true + } + ] + } + ], + "::ffff:303:303\/128":[ + { + "prefix":"::ffff:303:303\/128", + "protocol":"ospf6", + "selected":true, + "destSelected":true, + "installed":true, + "nexthops":[ + { + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-rt3", + "active":true + } + ] + } + ], + "::ffff:404:404\/128":[ + { + "prefix":"::ffff:404:404\/128", + "protocol":"ospf6", + "selected":true, + "destSelected":true, + "installed":true, + "nexthops":[ + { + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-rt3", + "active":true + } + ] + } + ], + "::ffff:505:505\/128":[ + { + "prefix":"::ffff:505:505\/128", + "protocol":"ospf6", + "selected":true, + "destSelected":true, + "installed":true, + "nexthops":[ + { + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-rt2", + "active":true + } + ] + } + ] +} diff --git a/tests/topotests/bfd-ospf-topo1/rt1/step2/show_bfd_peers.ref b/tests/topotests/bfd-ospf-topo1/rt1/step2/show_bfd_peers.ref new file mode 100644 index 0000000000..63f0d50784 --- /dev/null +++ b/tests/topotests/bfd-ospf-topo1/rt1/step2/show_bfd_peers.ref @@ -0,0 +1,26 @@ +[ + { + "interface": "eth-rt3", + "status": "up", + "diagnostic": "ok", + "remote-diagnostic": "ok" + }, + { + "interface": "eth-rt2", + "status": "up", + "diagnostic": "ok", + "remote-diagnostic": "ok" + }, + { + "interface": "eth-rt3", + "status": "up", + "diagnostic": "ok", + "remote-diagnostic": "ok" + }, + { + "interface": "eth-rt2", + "status": "up", + "diagnostic": "ok", + "remote-diagnostic": "ok" + } +] diff --git a/tests/topotests/bfd-ospf-topo1/rt1/step3/show_bfd_peers_healthy.ref b/tests/topotests/bfd-ospf-topo1/rt1/step3/show_bfd_peers_healthy.ref new file mode 100644 index 0000000000..42051f9582 --- /dev/null +++ b/tests/topotests/bfd-ospf-topo1/rt1/step3/show_bfd_peers_healthy.ref @@ -0,0 +1,28 @@ +[ + { + "peer": "10.0.2.2", + "interface": "eth-rt3", + "status": "up", + "diagnostic": "ok", + "remote-diagnostic": "ok" + }, + { + "peer": "10.0.1.2", + "interface": "eth-rt2", + "status": "up", + "diagnostic": "ok", + "remote-diagnostic": "ok" + }, + { + "interface": "eth-rt3", + "status": "up", + "diagnostic": "ok", + "remote-diagnostic": "ok" + }, + { + "interface": "eth-rt2", + "status": "up", + "diagnostic": "ok", + "remote-diagnostic": "ok" + } +] diff --git a/tests/topotests/bfd-ospf-topo1/rt1/step3/show_bfd_peers_rt2_down.ref b/tests/topotests/bfd-ospf-topo1/rt1/step3/show_bfd_peers_rt2_down.ref new file mode 100644 index 0000000000..d844ee6813 --- /dev/null +++ b/tests/topotests/bfd-ospf-topo1/rt1/step3/show_bfd_peers_rt2_down.ref @@ -0,0 +1,15 @@ +[ + { + "peer": "10.0.2.2", + "interface": "eth-rt3", + "status": "up", + "diagnostic": "ok", + "remote-diagnostic": "ok" + }, + { + "interface": "eth-rt3", + "status": "up", + "diagnostic": "ok", + "remote-diagnostic": "ok" + } +] diff --git a/tests/topotests/bfd-ospf-topo1/rt1/step3/show_bfd_peers_rt3_down.ref b/tests/topotests/bfd-ospf-topo1/rt1/step3/show_bfd_peers_rt3_down.ref new file mode 100644 index 0000000000..32799084fb --- /dev/null +++ b/tests/topotests/bfd-ospf-topo1/rt1/step3/show_bfd_peers_rt3_down.ref @@ -0,0 +1,15 @@ +[ + { + "peer": "10.0.1.2", + "interface": "eth-rt2", + "status": "up", + "diagnostic": "ok", + "remote-diagnostic": "ok" + }, + { + "interface": "eth-rt2", + "status": "up", + "diagnostic": "ok", + "remote-diagnostic": "ok" + } +] diff --git a/tests/topotests/bfd-ospf-topo1/rt1/step3/show_ip_route_healthy.ref b/tests/topotests/bfd-ospf-topo1/rt1/step3/show_ip_route_healthy.ref new file mode 100644 index 0000000000..f354eff697 --- /dev/null +++ b/tests/topotests/bfd-ospf-topo1/rt1/step3/show_ip_route_healthy.ref @@ -0,0 +1,74 @@ +{ + "2.2.2.2\/32":[ + { + "prefix":"2.2.2.2\/32", + "protocol":"ospf", + "selected":true, + "destSelected":true, + "installed":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.1.2", + "afi":"ipv4", + "interfaceName":"eth-rt2", + "active":true + } + ] + } + ], + "3.3.3.3\/32":[ + { + "prefix":"3.3.3.3\/32", + "protocol":"ospf", + "selected":true, + "destSelected":true, + "installed":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.2.2", + "afi":"ipv4", + "interfaceName":"eth-rt3", + "active":true + } + ] + } + ], + "4.4.4.4\/32":[ + { + "prefix":"4.4.4.4\/32", + "protocol":"ospf", + "selected":true, + "destSelected":true, + "installed":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.2.2", + "afi":"ipv4", + "interfaceName":"eth-rt3", + "active":true + } + ] + } + ], + "5.5.5.5\/32":[ + { + "prefix":"5.5.5.5\/32", + "protocol":"ospf", + "selected":true, + "destSelected":true, + "installed":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.1.2", + "afi":"ipv4", + "interfaceName":"eth-rt2", + "active":true + } + ] + } + ] +} diff --git a/tests/topotests/bfd-ospf-topo1/rt1/step3/show_ip_route_rt2_down.ref b/tests/topotests/bfd-ospf-topo1/rt1/step3/show_ip_route_rt2_down.ref new file mode 100644 index 0000000000..43eecd0b7a --- /dev/null +++ b/tests/topotests/bfd-ospf-topo1/rt1/step3/show_ip_route_rt2_down.ref @@ -0,0 +1,74 @@ +{ + "2.2.2.2\/32":[ + { + "prefix":"2.2.2.2\/32", + "protocol":"ospf", + "selected":true, + "destSelected":true, + "installed":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.2.2", + "afi":"ipv4", + "interfaceName":"eth-rt3", + "active":true + } + ] + } + ], + "3.3.3.3\/32":[ + { + "prefix":"3.3.3.3\/32", + "protocol":"ospf", + "selected":true, + "destSelected":true, + "installed":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.2.2", + "afi":"ipv4", + "interfaceName":"eth-rt3", + "active":true + } + ] + } + ], + "4.4.4.4\/32":[ + { + "prefix":"4.4.4.4\/32", + "protocol":"ospf", + "selected":true, + "destSelected":true, + "installed":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.2.2", + "afi":"ipv4", + "interfaceName":"eth-rt3", + "active":true + } + ] + } + ], + "5.5.5.5\/32":[ + { + "prefix":"5.5.5.5\/32", + "protocol":"ospf", + "selected":true, + "destSelected":true, + "installed":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.2.2", + "afi":"ipv4", + "interfaceName":"eth-rt3", + "active":true + } + ] + } + ] +} diff --git a/tests/topotests/bfd-ospf-topo1/rt1/step3/show_ip_route_rt3_down.ref b/tests/topotests/bfd-ospf-topo1/rt1/step3/show_ip_route_rt3_down.ref new file mode 100644 index 0000000000..409af6308b --- /dev/null +++ b/tests/topotests/bfd-ospf-topo1/rt1/step3/show_ip_route_rt3_down.ref @@ -0,0 +1,74 @@ +{ + "2.2.2.2\/32":[ + { + "prefix":"2.2.2.2\/32", + "protocol":"ospf", + "selected":true, + "destSelected":true, + "installed":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.1.2", + "afi":"ipv4", + "interfaceName":"eth-rt2", + "active":true + } + ] + } + ], + "3.3.3.3\/32":[ + { + "prefix":"3.3.3.3\/32", + "protocol":"ospf", + "selected":true, + "destSelected":true, + "installed":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.1.2", + "afi":"ipv4", + "interfaceName":"eth-rt2", + "active":true + } + ] + } + ], + "4.4.4.4\/32":[ + { + "prefix":"4.4.4.4\/32", + "protocol":"ospf", + "selected":true, + "destSelected":true, + "installed":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.1.2", + "afi":"ipv4", + "interfaceName":"eth-rt2", + "active":true + } + ] + } + ], + "5.5.5.5\/32":[ + { + "prefix":"5.5.5.5\/32", + "protocol":"ospf", + "selected":true, + "destSelected":true, + "installed":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.1.2", + "afi":"ipv4", + "interfaceName":"eth-rt2", + "active":true + } + ] + } + ] +} diff --git a/tests/topotests/bfd-ospf-topo1/rt1/step3/show_ipv6_route_healthy.ref b/tests/topotests/bfd-ospf-topo1/rt1/step3/show_ipv6_route_healthy.ref new file mode 100644 index 0000000000..6465efb8b5 --- /dev/null +++ b/tests/topotests/bfd-ospf-topo1/rt1/step3/show_ipv6_route_healthy.ref @@ -0,0 +1,70 @@ +{ + "::ffff:202:202\/128":[ + { + "prefix":"::ffff:202:202\/128", + "protocol":"ospf6", + "selected":true, + "destSelected":true, + "installed":true, + "nexthops":[ + { + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-rt2", + "active":true + } + ] + } + ], + "::ffff:303:303\/128":[ + { + "prefix":"::ffff:303:303\/128", + "protocol":"ospf6", + "selected":true, + "destSelected":true, + "installed":true, + "nexthops":[ + { + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-rt3", + "active":true + } + ] + } + ], + "::ffff:404:404\/128":[ + { + "prefix":"::ffff:404:404\/128", + "protocol":"ospf6", + "selected":true, + "destSelected":true, + "installed":true, + "nexthops":[ + { + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-rt3", + "active":true + } + ] + } + ], + "::ffff:505:505\/128":[ + { + "prefix":"::ffff:505:505\/128", + "protocol":"ospf6", + "selected":true, + "destSelected":true, + "installed":true, + "nexthops":[ + { + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-rt2", + "active":true + } + ] + } + ] +} diff --git a/tests/topotests/bfd-ospf-topo1/rt1/step3/show_ipv6_route_rt2_down.ref b/tests/topotests/bfd-ospf-topo1/rt1/step3/show_ipv6_route_rt2_down.ref new file mode 100644 index 0000000000..cfb1ef1bb6 --- /dev/null +++ b/tests/topotests/bfd-ospf-topo1/rt1/step3/show_ipv6_route_rt2_down.ref @@ -0,0 +1,70 @@ +{ + "::ffff:202:202\/128":[ + { + "prefix":"::ffff:202:202\/128", + "protocol":"ospf6", + "selected":true, + "destSelected":true, + "installed":true, + "nexthops":[ + { + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-rt3", + "active":true + } + ] + } + ], + "::ffff:303:303\/128":[ + { + "prefix":"::ffff:303:303\/128", + "protocol":"ospf6", + "selected":true, + "destSelected":true, + "installed":true, + "nexthops":[ + { + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-rt3", + "active":true + } + ] + } + ], + "::ffff:404:404\/128":[ + { + "prefix":"::ffff:404:404\/128", + "protocol":"ospf6", + "selected":true, + "destSelected":true, + "installed":true, + "nexthops":[ + { + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-rt3", + "active":true + } + ] + } + ], + "::ffff:505:505\/128":[ + { + "prefix":"::ffff:505:505\/128", + "protocol":"ospf6", + "selected":true, + "destSelected":true, + "installed":true, + "nexthops":[ + { + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-rt3", + "active":true + } + ] + } + ] +} diff --git a/tests/topotests/bfd-ospf-topo1/rt1/step3/show_ipv6_route_rt3_down.ref b/tests/topotests/bfd-ospf-topo1/rt1/step3/show_ipv6_route_rt3_down.ref new file mode 100644 index 0000000000..58b44da5c2 --- /dev/null +++ b/tests/topotests/bfd-ospf-topo1/rt1/step3/show_ipv6_route_rt3_down.ref @@ -0,0 +1,70 @@ +{ + "::ffff:202:202\/128":[ + { + "prefix":"::ffff:202:202\/128", + "protocol":"ospf6", + "selected":true, + "destSelected":true, + "installed":true, + "nexthops":[ + { + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-rt2", + "active":true + } + ] + } + ], + "::ffff:303:303\/128":[ + { + "prefix":"::ffff:303:303\/128", + "protocol":"ospf6", + "selected":true, + "destSelected":true, + "installed":true, + "nexthops":[ + { + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-rt2", + "active":true + } + ] + } + ], + "::ffff:404:404\/128":[ + { + "prefix":"::ffff:404:404\/128", + "protocol":"ospf6", + "selected":true, + "destSelected":true, + "installed":true, + "nexthops":[ + { + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-rt2", + "active":true + } + ] + } + ], + "::ffff:505:505\/128":[ + { + "prefix":"::ffff:505:505\/128", + "protocol":"ospf6", + "selected":true, + "destSelected":true, + "installed":true, + "nexthops":[ + { + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-rt2", + "active":true + } + ] + } + ] +} diff --git a/tests/topotests/bfd-ospf-topo1/rt1/zebra.conf b/tests/topotests/bfd-ospf-topo1/rt1/zebra.conf new file mode 100644 index 0000000000..6003125b6b --- /dev/null +++ b/tests/topotests/bfd-ospf-topo1/rt1/zebra.conf @@ -0,0 +1,25 @@ +log file zebra.log +log timestamp precision 3 +! +hostname rt1 +! +debug zebra kernel +debug zebra packet +debug zebra events +debug zebra rib +! +interface lo + ip address 1.1.1.1/32 + ipv6 address ::ffff:0101:0101/128 +! +interface eth-rt2 + ip address 10.0.1.1/24 +! +interface eth-rt3 + ip address 10.0.2.1/24 +! +ip forwarding +ipv6 forwarding +! +line vty +! diff --git a/tests/topotests/bfd-ospf-topo1/rt2/bfdd.conf b/tests/topotests/bfd-ospf-topo1/rt2/bfdd.conf new file mode 100644 index 0000000000..437f063d8f --- /dev/null +++ b/tests/topotests/bfd-ospf-topo1/rt2/bfdd.conf @@ -0,0 +1,7 @@ +! +debug bfd network +debug bfd peer +debug bfd zebra +! +bfd +! diff --git a/tests/topotests/bfd-ospf-topo1/rt2/ospf6d.conf b/tests/topotests/bfd-ospf-topo1/rt2/ospf6d.conf new file mode 100644 index 0000000000..2f35099564 --- /dev/null +++ b/tests/topotests/bfd-ospf-topo1/rt2/ospf6d.conf @@ -0,0 +1,19 @@ +log file ospf6d.log +! +hostname rt2 +! +password 1 +! +interface eth-rt1 + ipv6 ospf6 network broadcast + ipv6 ospf6 bfd +! +interface eth-rt5 + ipv6 ospf6 network broadcast +! +router ospf6 + ospf6 router-id 2.2.2.2 + interface eth-rt1 area 0.0.0.0 + interface eth-rt5 area 0.0.0.0 + redistribute connected +! diff --git a/tests/topotests/bfd-ospf-topo1/rt2/ospfd.conf b/tests/topotests/bfd-ospf-topo1/rt2/ospfd.conf new file mode 100644 index 0000000000..a05d8b58c8 --- /dev/null +++ b/tests/topotests/bfd-ospf-topo1/rt2/ospfd.conf @@ -0,0 +1,24 @@ +log file ospfd.log +! +hostname rt2 +! +password 1 +! +debug ospf event +debug ospf zebra +! +interface lo + ip ospf area 0.0.0.0 +! +interface eth-rt1 + ip ospf area 0.0.0.0 + ip ospf bfd +! +interface eth-rt5 + ip ospf area 0.0.0.0 +! +router ospf + ospf router-id 2.2.2.2 + passive interface lo + router-info area 0.0.0.0 +! diff --git a/tests/topotests/bfd-ospf-topo1/rt2/step2/show_bfd_peers.ref b/tests/topotests/bfd-ospf-topo1/rt2/step2/show_bfd_peers.ref new file mode 100644 index 0000000000..d6df1ebfb2 --- /dev/null +++ b/tests/topotests/bfd-ospf-topo1/rt2/step2/show_bfd_peers.ref @@ -0,0 +1,14 @@ +[ + { + "interface": "eth-rt1", + "status": "up", + "diagnostic": "ok", + "remote-diagnostic": "ok" + }, + { + "interface": "eth-rt1", + "status": "up", + "diagnostic": "ok", + "remote-diagnostic": "ok" + } +] diff --git a/tests/topotests/bfd-ospf-topo1/rt2/zebra.conf b/tests/topotests/bfd-ospf-topo1/rt2/zebra.conf new file mode 100644 index 0000000000..5fc7fc5b28 --- /dev/null +++ b/tests/topotests/bfd-ospf-topo1/rt2/zebra.conf @@ -0,0 +1,22 @@ +log file zebra.log +! +hostname rt2 +! +debug zebra kernel +debug zebra packet +! +interface lo + ip address 2.2.2.2/32 + ipv6 address ::ffff:0202:0202/128 +! +interface eth-rt1 + ip address 10.0.1.2/24 +! +interface eth-rt5 + ip address 10.0.3.1/24 +! +ip forwarding +ipv6 forwarding +! +line vty +! diff --git a/tests/topotests/bfd-ospf-topo1/rt3/bfdd.conf b/tests/topotests/bfd-ospf-topo1/rt3/bfdd.conf new file mode 100644 index 0000000000..437f063d8f --- /dev/null +++ b/tests/topotests/bfd-ospf-topo1/rt3/bfdd.conf @@ -0,0 +1,7 @@ +! +debug bfd network +debug bfd peer +debug bfd zebra +! +bfd +! diff --git a/tests/topotests/bfd-ospf-topo1/rt3/ospf6d.conf b/tests/topotests/bfd-ospf-topo1/rt3/ospf6d.conf new file mode 100644 index 0000000000..3e8777019e --- /dev/null +++ b/tests/topotests/bfd-ospf-topo1/rt3/ospf6d.conf @@ -0,0 +1,19 @@ +log file ospf6d.log +! +hostname rt3 +! +password 1 +! +interface eth-rt1 + ipv6 ospf6 network broadcast + ipv6 ospf6 bfd +! +interface eth-rt4 + ipv6 ospf6 network broadcast +! +router ospf6 + ospf6 router-id 3.3.3.3 + interface eth-rt1 area 0.0.0.0 + interface eth-rt4 area 0.0.0.0 + redistribute connected +! diff --git a/tests/topotests/bfd-ospf-topo1/rt3/ospfd.conf b/tests/topotests/bfd-ospf-topo1/rt3/ospfd.conf new file mode 100644 index 0000000000..1196e6d189 --- /dev/null +++ b/tests/topotests/bfd-ospf-topo1/rt3/ospfd.conf @@ -0,0 +1,24 @@ +log file ospfd.log +! +hostname rt3 +! +password 1 +! +debug ospf event +debug ospf zebra +! +interface lo + ip ospf area 0.0.0.0 +! +interface eth-rt1 + ip ospf area 0.0.0.0 + ip ospf bfd +! +interface eth-rt4 + ip ospf area 0.0.0.0 +! +router ospf + ospf router-id 3.3.3.3 + passive interface lo + router-info area 0.0.0.0 +! diff --git a/tests/topotests/bfd-ospf-topo1/rt3/step2/show_bfd_peers.ref b/tests/topotests/bfd-ospf-topo1/rt3/step2/show_bfd_peers.ref new file mode 100644 index 0000000000..d6df1ebfb2 --- /dev/null +++ b/tests/topotests/bfd-ospf-topo1/rt3/step2/show_bfd_peers.ref @@ -0,0 +1,14 @@ +[ + { + "interface": "eth-rt1", + "status": "up", + "diagnostic": "ok", + "remote-diagnostic": "ok" + }, + { + "interface": "eth-rt1", + "status": "up", + "diagnostic": "ok", + "remote-diagnostic": "ok" + } +] diff --git a/tests/topotests/bfd-ospf-topo1/rt3/zebra.conf b/tests/topotests/bfd-ospf-topo1/rt3/zebra.conf new file mode 100644 index 0000000000..d368de9bbe --- /dev/null +++ b/tests/topotests/bfd-ospf-topo1/rt3/zebra.conf @@ -0,0 +1,22 @@ +log file zebra.log +! +hostname rt3 +! +debug zebra kernel +debug zebra packet +! +interface lo + ip address 3.3.3.3/32 + ipv6 address ::ffff:0303:0303/128 +! +interface eth-rt1 + ip address 10.0.2.2/24 +! +interface eth-rt4 + ip address 10.0.4.1/24 +! +ip forwarding +ipv6 forwarding +! +line vty +! diff --git a/tests/topotests/bfd-ospf-topo1/rt4/bfdd.conf b/tests/topotests/bfd-ospf-topo1/rt4/bfdd.conf new file mode 100644 index 0000000000..f35e772790 --- /dev/null +++ b/tests/topotests/bfd-ospf-topo1/rt4/bfdd.conf @@ -0,0 +1,5 @@ +! +debug bfd network +debug bfd peer +debug bfd zebra +! diff --git a/tests/topotests/bfd-ospf-topo1/rt4/ospf6d.conf b/tests/topotests/bfd-ospf-topo1/rt4/ospf6d.conf new file mode 100644 index 0000000000..bccd1e75bd --- /dev/null +++ b/tests/topotests/bfd-ospf-topo1/rt4/ospf6d.conf @@ -0,0 +1,18 @@ +log file ospf6d.log +! +hostname rt4 +! +password 1 +! +interface eth-rt3 + ipv6 ospf6 network broadcast +! +interface eth-rt5 + ipv6 ospf6 network broadcast +! +router ospf6 + ospf6 router-id 4.4.4.4 + interface eth-rt3 area 0.0.0.0 + interface eth-rt5 area 0.0.0.0 + redistribute connected +! diff --git a/tests/topotests/bfd-ospf-topo1/rt4/ospfd.conf b/tests/topotests/bfd-ospf-topo1/rt4/ospfd.conf new file mode 100644 index 0000000000..3a2568b4ab --- /dev/null +++ b/tests/topotests/bfd-ospf-topo1/rt4/ospfd.conf @@ -0,0 +1,23 @@ +log file ospfd.log +! +hostname rt4 +! +password 1 +! +debug ospf event +debug ospf zebra +! +interface lo + ip ospf area 0.0.0.0 +! +interface eth-rt3 + ip ospf area 0.0.0.0 +! +interface eth-rt5 + ip ospf area 0.0.0.0 +! +router ospf + ospf router-id 4.4.4.4 + passive interface lo + router-info area 0.0.0.0 +! diff --git a/tests/topotests/bfd-ospf-topo1/rt4/zebra.conf b/tests/topotests/bfd-ospf-topo1/rt4/zebra.conf new file mode 100644 index 0000000000..7b053bac35 --- /dev/null +++ b/tests/topotests/bfd-ospf-topo1/rt4/zebra.conf @@ -0,0 +1,22 @@ +log file zebra.log +! +hostname rt4 +! +debug zebra kernel +debug zebra packet +! +interface lo + ip address 4.4.4.4/32 + ipv6 address ::ffff:0404:0404/128 +! +interface eth-rt3 + ip address 10.0.4.2/24 +! +interface eth-rt5 + ip address 10.0.5.1/24 +! +ip forwarding +ipv6 forwarding +! +line vty +! diff --git a/tests/topotests/bfd-ospf-topo1/rt5/bfdd.conf b/tests/topotests/bfd-ospf-topo1/rt5/bfdd.conf new file mode 100644 index 0000000000..f35e772790 --- /dev/null +++ b/tests/topotests/bfd-ospf-topo1/rt5/bfdd.conf @@ -0,0 +1,5 @@ +! +debug bfd network +debug bfd peer +debug bfd zebra +! diff --git a/tests/topotests/bfd-ospf-topo1/rt5/ospf6d.conf b/tests/topotests/bfd-ospf-topo1/rt5/ospf6d.conf new file mode 100644 index 0000000000..766862276c --- /dev/null +++ b/tests/topotests/bfd-ospf-topo1/rt5/ospf6d.conf @@ -0,0 +1,18 @@ +log file ospf6d.log +! +hostname rt5 +! +password 1 +! +interface eth-rt2 + ipv6 ospf6 network broadcast +! +interface eth-rt4 + ipv6 ospf6 network broadcast +! +router ospf6 + ospf6 router-id 5.5.5.5 + interface eth-rt2 area 0.0.0.0 + interface eth-rt4 area 0.0.0.0 + redistribute connected +! diff --git a/tests/topotests/bfd-ospf-topo1/rt5/ospfd.conf b/tests/topotests/bfd-ospf-topo1/rt5/ospfd.conf new file mode 100644 index 0000000000..a35de5f45f --- /dev/null +++ b/tests/topotests/bfd-ospf-topo1/rt5/ospfd.conf @@ -0,0 +1,23 @@ +log file ospfd.log +! +hostname rt5 +! +password 1 +! +debug ospf event +debug ospf zebra +! +interface lo + ip ospf area 0.0.0.0 +! +interface eth-rt2 + ip ospf area 0.0.0.0 +! +interface eth-rt4 + ip ospf area 0.0.0.0 +! +router ospf + ospf router-id 5.5.5.5 + passive interface lo + router-info area 0.0.0.0 +! diff --git a/tests/topotests/bfd-ospf-topo1/rt5/zebra.conf b/tests/topotests/bfd-ospf-topo1/rt5/zebra.conf new file mode 100644 index 0000000000..0b7c9e02f3 --- /dev/null +++ b/tests/topotests/bfd-ospf-topo1/rt5/zebra.conf @@ -0,0 +1,22 @@ +log file zebra.log +! +hostname rt5 +! +debug zebra kernel +debug zebra packet +! +interface lo + ip address 5.5.5.5/32 + ipv6 address ::ffff:0505:0505/128 +! +interface eth-rt2 + ip address 10.0.3.2/24 +! +interface eth-rt4 + ip address 10.0.5.2/24 +! +ip forwarding +ipv6 forwarding +! +line vty +! diff --git a/tests/topotests/bfd-ospf-topo1/test_bfd_ospf_topo1.py b/tests/topotests/bfd-ospf-topo1/test_bfd_ospf_topo1.py new file mode 100755 index 0000000000..1cec62789b --- /dev/null +++ b/tests/topotests/bfd-ospf-topo1/test_bfd_ospf_topo1.py @@ -0,0 +1,307 @@ +#!/usr/bin/env python + +# +# test_bfd_ospf_topo1.py +# Part of NetDEF Topology Tests +# +# Copyright (c) 2020 by +# Network Device Education Foundation, Inc. ("NetDEF") +# +# Permission to use, copy, modify, and/or distribute this software +# for any purpose with or without fee is hereby granted, provided +# that the above copyright notice and this permission notice appear +# in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND NETDEF DISCLAIMS ALL WARRANTIES +# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NETDEF BE LIABLE FOR +# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY +# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS +# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE +# OF THIS SOFTWARE. +# + +""" +test_bfd_ospf_topo1.py: + + +---------+ + | | + eth-rt2 (.1) | RT1 | eth-rt3 (.1) + +----------+ 1.1.1.1 +----------+ + | | | | + | +---------+ | + | | + | 10.0.2.0/24 | + | | + | eth-rt1 | (.2) + | 10.0.1.0/24 +----+----+ + | | | + | | RT3 | + | | 3.3.3.3 | + | | | + (.2) | eth-rt1 +----+----+ + +----+----+ eth-rt4 | (.1) + | | | + | RT2 | | + | 2.2.2.2 | 10.0.4.0/24 | + | | | + +----+----+ | + (.1) | eth-rt5 eth-rt3 | (.2) + | +----+----+ + | | | + | | RT4 | + | | 4.4.4.4 | + | | | + | +----+----+ + | 10.0.3.0/24 eth-rt5 | (.1) + | | + | | + | 10.0.5.0/24 | + | | + | +---------+ | + | | | | + +----------+ RT5 +----------+ + eth-rt2 (.2) | 5.5.5.5 | eth-rt4 (.2) + | | + +---------+ + +""" + +import os +import sys +import pytest +import json +import re +from time import sleep +from time import time +from functools import partial + +# Save the Current Working Directory to find configuration files. +CWD = os.path.dirname(os.path.realpath(__file__)) +sys.path.append(os.path.join(CWD, "../")) + +# pylint: disable=C0413 +# Import topogen and topotest helpers +from lib import topotest +from lib.topogen import Topogen, TopoRouter, get_topogen +from lib.topolog import logger + +# Required to instantiate the topology builder class. +from mininet.topo import Topo + + +class TemplateTopo(Topo): + "Test topology builder" + + def build(self, *_args, **_opts): + "Build function" + tgen = get_topogen(self) + + # + # Define FRR Routers + # + for router in ["rt1", "rt2", "rt3", "rt4", "rt5"]: + tgen.add_router(router) + + # + # Define connections + # + switch = tgen.add_switch("s1") + switch.add_link(tgen.gears["rt1"], nodeif="eth-rt2") + switch.add_link(tgen.gears["rt2"], nodeif="eth-rt1") + + switch = tgen.add_switch("s2") + switch.add_link(tgen.gears["rt1"], nodeif="eth-rt3") + switch.add_link(tgen.gears["rt3"], nodeif="eth-rt1") + + switch = tgen.add_switch("s3") + switch.add_link(tgen.gears["rt2"], nodeif="eth-rt5") + switch.add_link(tgen.gears["rt5"], nodeif="eth-rt2") + + switch = tgen.add_switch("s4") + switch.add_link(tgen.gears["rt3"], nodeif="eth-rt4") + switch.add_link(tgen.gears["rt4"], nodeif="eth-rt3") + + switch = tgen.add_switch("s5") + switch.add_link(tgen.gears["rt4"], nodeif="eth-rt5") + switch.add_link(tgen.gears["rt5"], nodeif="eth-rt4") + + +def setup_module(mod): + "Sets up the pytest environment" + tgen = Topogen(TemplateTopo, mod.__name__) + tgen.start_topology() + + router_list = tgen.routers() + + # For all registered routers, load the zebra configuration file + for rname, router in router_list.iteritems(): + router.load_config( + TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname)) + ) + router.load_config( + TopoRouter.RD_BFD, os.path.join(CWD, "{}/bfdd.conf".format(rname)) + ) + router.load_config( + TopoRouter.RD_OSPF, os.path.join(CWD, "{}/ospfd.conf".format(rname)) + ) + router.load_config( + TopoRouter.RD_OSPF6, os.path.join(CWD, "{}/ospf6d.conf".format(rname)) + ) + + tgen.start_router() + + +def teardown_module(mod): + "Teardown the pytest environment" + tgen = get_topogen() + + # This function tears down the whole topology. + tgen.stop_topology() + + +def print_cmd_result(rname, command): + print(get_topogen().gears[rname].vtysh_cmd(command, isjson=False)) + + +def router_compare_json_output(rname, command, reference, count=120, wait=0.5): + "Compare router JSON output" + + logger.info('Comparing router "%s" "%s" output', rname, command) + + tgen = get_topogen() + filename = "{}/{}/{}".format(CWD, rname, reference) + expected = json.loads(open(filename).read()) + + # Run test function until we get an result. Wait at most 60 seconds. + test_func = partial(topotest.router_json_cmp, tgen.gears[rname], command, expected) + _, diff = topotest.run_and_expect(test_func, None, count=count, wait=wait) + assertmsg = '"{}" JSON output mismatches the expected result'.format(rname) + assert diff is None, assertmsg + + +## TEST STEPS + + +def test_rib_ospf_step1(): + logger.info("Test (step 1): verify RIB for OSPF") + tgen = get_topogen() + + # Skip if previous fatal error condition is raised + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + router_compare_json_output( + "rt1", "show ip route ospf json", "step1/show_ip_route.ref" + ) + router_compare_json_output( + "rt1", "show ipv6 route ospf json", "step1/show_ipv6_route.ref" + ) + + +def test_bfd_ospf_sessions_step2(): + logger.info("Test (step 2): verify BFD peers for OSPF") + tgen = get_topogen() + + # Skip if previous fatal error condition is raised + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + # BFD is just used on three routers + for rt in ["rt1", "rt2", "rt3"]: + router_compare_json_output( + rt, "show bfd peers json", "step2/show_bfd_peers.ref" + ) + + +def test_bfd_ospf_interface_failure_rt2_step3(): + logger.info("Test (step 3): Check failover handling with RT2 down") + tgen = get_topogen() + + # Skip if previous fatal error condition is raised + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + # Let's kill the interface on rt2 and see what happens with the RIB and BFD on rt1 + tgen.gears["rt2"].link_enable("eth-rt1", enabled=False) + + # By default BFD provides a recovery time of 900ms plus jitter, so let's wait + # initial 2 seconds to let the CI not suffer. + # TODO: add check for array size + sleep(2) + router_compare_json_output( + "rt1", "show ip route ospf json", "step3/show_ip_route_rt2_down.ref", 1, 0 + ) + router_compare_json_output( + "rt1", "show ipv6 route ospf json", "step3/show_ipv6_route_rt2_down.ref", 1, 0 + ) + router_compare_json_output( + "rt1", "show bfd peers json", "step3/show_bfd_peers_rt2_down.ref", 1, 0 + ) + + # Check recovery, this can take some time + tgen.gears["rt2"].link_enable("eth-rt1", enabled=True) + + router_compare_json_output( + "rt1", "show ip route ospf json", "step3/show_ip_route_healthy.ref" + ) + router_compare_json_output( + "rt1", "show ipv6 route ospf json", "step3/show_ipv6_route_healthy.ref" + ) + router_compare_json_output( + "rt1", "show bfd peers json", "step3/show_bfd_peers_healthy.ref" + ) + + +def test_bfd_ospf_interface_failure_rt3_step3(): + logger.info("Test (step 3): Check failover handling with RT3 down") + tgen = get_topogen() + + # Skip if previous fatal error condition is raised + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + # Let's kill the interface on rt3 and see what happens with the RIB and BFD on rt1 + tgen.gears["rt3"].link_enable("eth-rt1", enabled=False) + + # By default BFD provides a recovery time of 900ms plus jitter, so let's wait + # initial 2 seconds to let the CI not suffer. + # TODO: add check for array size + sleep(2) + router_compare_json_output( + "rt1", "show ip route ospf json", "step3/show_ip_route_rt3_down.ref", 1, 0 + ) + router_compare_json_output( + "rt1", "show ipv6 route ospf json", "step3/show_ipv6_route_rt3_down.ref", 1, 0 + ) + router_compare_json_output( + "rt1", "show bfd peers json", "step3/show_bfd_peers_rt3_down.ref", 1, 0 + ) + + # Check recovery, this can take some time + tgen.gears["rt3"].link_enable("eth-rt1", enabled=True) + + router_compare_json_output( + "rt1", "show ip route ospf json", "step3/show_ip_route_healthy.ref" + ) + router_compare_json_output( + "rt1", "show ipv6 route ospf json", "step3/show_ipv6_route_healthy.ref" + ) + router_compare_json_output( + "rt1", "show bfd peers json", "step3/show_bfd_peers_healthy.ref" + ) + + +def test_memory_leak(): + "Run the memory leak test and report results." + tgen = get_topogen() + if not tgen.is_memleak_enabled(): + pytest.skip("Memory leak test/report is disabled") + + tgen.report_memory_leaks() + + +if __name__ == "__main__": + args = ["-s"] + sys.argv[1:] + sys.exit(pytest.main(args)) diff --git a/tests/topotests/bfd-profiles-topo1/test_bfd_profiles_topo1.py b/tests/topotests/bfd-profiles-topo1/test_bfd_profiles_topo1.py index bd3b876eeb..6283f03ddf 100644 --- a/tests/topotests/bfd-profiles-topo1/test_bfd_profiles_topo1.py +++ b/tests/topotests/bfd-profiles-topo1/test_bfd_profiles_topo1.py @@ -77,7 +77,8 @@ class BFDProfTopo(Topo): switch.add_link(tgen.gears["r1"]) switch.add_link(tgen.gears["r6"]) - +@pytest.mark.bfd +@pytest.mark.isis def setup_module(mod): "Sets up the pytest environment" tgen = Topogen(BFDProfTopo, mod.__name__) diff --git a/tests/topotests/bfd-topo1/test_bfd_topo1.py b/tests/topotests/bfd-topo1/test_bfd_topo1.py index 6e589d55eb..4c13fdcfc5 100644 --- a/tests/topotests/bfd-topo1/test_bfd_topo1.py +++ b/tests/topotests/bfd-topo1/test_bfd_topo1.py @@ -69,7 +69,7 @@ class BFDTopo(Topo): switch.add_link(tgen.gears["r2"]) switch.add_link(tgen.gears["r4"]) - +@pytest.mark.bfd def setup_module(mod): "Sets up the pytest environment" tgen = Topogen(BFDTopo, mod.__name__) diff --git a/tests/topotests/bfd-topo2/test_bfd_topo2.py b/tests/topotests/bfd-topo2/test_bfd_topo2.py index feb4576bd3..5181a40f47 100644 --- a/tests/topotests/bfd-topo2/test_bfd_topo2.py +++ b/tests/topotests/bfd-topo2/test_bfd_topo2.py @@ -70,7 +70,7 @@ class BFDTopo(Topo): switch.add_link(tgen.gears["r2"]) switch.add_link(tgen.gears["r4"]) - +@pytest.mark.bfd def setup_module(mod): "Sets up the pytest environment" tgen = Topogen(BFDTopo, mod.__name__) diff --git a/tests/topotests/bfd-vrf-topo1/test_bfd_vrf_topo1.py b/tests/topotests/bfd-vrf-topo1/test_bfd_vrf_topo1.py index 5fed135f8d..956b526583 100644 --- a/tests/topotests/bfd-vrf-topo1/test_bfd_vrf_topo1.py +++ b/tests/topotests/bfd-vrf-topo1/test_bfd_vrf_topo1.py @@ -70,7 +70,7 @@ class BFDTopo(Topo): switch.add_link(tgen.gears["r2"]) switch.add_link(tgen.gears["r4"]) - +@pytest.mark.bfd def setup_module(mod): "Sets up the pytest environment" tgen = Topogen(BFDTopo, mod.__name__) diff --git a/tests/topotests/bgp-basic-functionality-topo1/test_bgp_basic_functionality.py b/tests/topotests/bgp-basic-functionality-topo1/test_bgp_basic_functionality.py index b3b7256ac4..b701a0d61e 100644 --- a/tests/topotests/bgp-basic-functionality-topo1/test_bgp_basic_functionality.py +++ b/tests/topotests/bgp-basic-functionality-topo1/test_bgp_basic_functionality.py @@ -282,10 +282,26 @@ def test_BGP_config_with_invalid_ASN_p2(request): # Api call to modify AS number input_dict = { - "r1": {"bgp": {"local_as": 0,}}, - "r2": {"bgp": {"local_as": 0,}}, - "r3": {"bgp": {"local_as": 0,}}, - "r4": {"bgp": {"local_as": 64000,}}, + "r1": { + "bgp": { + "local_as": 0, + } + }, + "r2": { + "bgp": { + "local_as": 0, + } + }, + "r3": { + "bgp": { + "local_as": 0, + } + }, + "r4": { + "bgp": { + "local_as": 64000, + } + }, } result = modify_as_number(tgen, topo, input_dict) try: @@ -819,7 +835,11 @@ def test_bgp_with_loopback_interface(request): # Adding ['source_link'] = 'lo' key:value pair topo["routers"][routerN]["bgp"]["address_family"]["ipv4"]["unicast"][ "neighbor" - ][bgp_neighbor]["dest_link"] = {"lo": {"source_link": "lo",}} + ][bgp_neighbor]["dest_link"] = { + "lo": { + "source_link": "lo", + } + } # Creating configuration from JSON build_config_from_json(tgen, topo) diff --git a/tests/topotests/bgp-ecmp-topo2/test_ebgp_ecmp_topo2.py b/tests/topotests/bgp-ecmp-topo2/test_ebgp_ecmp_topo2.py index 54a3c699f3..353df0684b 100644 --- a/tests/topotests/bgp-ecmp-topo2/test_ebgp_ecmp_topo2.py +++ b/tests/topotests/bgp-ecmp-topo2/test_ebgp_ecmp_topo2.py @@ -273,8 +273,20 @@ def test_modify_ecmp_max_paths(request, ecmp_num, test_type): "r3": { "bgp": { "address_family": { - "ipv4": {"unicast": {"maximum_paths": {"ebgp": ecmp_num,}}}, - "ipv6": {"unicast": {"maximum_paths": {"ebgp": ecmp_num,}}}, + "ipv4": { + "unicast": { + "maximum_paths": { + "ebgp": ecmp_num, + } + } + }, + "ipv6": { + "unicast": { + "maximum_paths": { + "ebgp": ecmp_num, + } + } + }, } } } @@ -303,7 +315,7 @@ def test_modify_ecmp_max_paths(request, ecmp_num, test_type): input_dict_1, next_hop=NEXT_HOPS[addr_type][: int(ecmp_num)], protocol=protocol, - count_only=True + count_only=True, ) assert result is True, "Testcase {} : Failed \n Error: {}".format( @@ -312,9 +324,9 @@ def test_modify_ecmp_max_paths(request, ecmp_num, test_type): write_test_footer(tc_name) - +@pytest.mark.parametrize("ecmp_num", ["8", "16", "32"]) @pytest.mark.parametrize("test_type", ["redist_static", "advertise_nw"]) -def test_ecmp_after_clear_bgp(request, test_type): +def test_ecmp_after_clear_bgp(request, ecmp_num, test_type): """ Verify BGP table and RIB in DUT after clear BGP routes and neighbors""" tc_name = request.node.name @@ -337,7 +349,7 @@ def test_ecmp_after_clear_bgp(request, test_type): addr_type, dut, input_dict_1, - next_hop=NEXT_HOPS[addr_type], + next_hop=NEXT_HOPS[addr_type][:int(ecmp_num)], protocol=protocol, ) assert result is True, "Testcase {} : Failed \n Error: {}".format( @@ -360,7 +372,7 @@ def test_ecmp_after_clear_bgp(request, test_type): addr_type, dut, input_dict_1, - next_hop=NEXT_HOPS[addr_type], + next_hop=NEXT_HOPS[addr_type][:int(ecmp_num)], protocol=protocol, ) assert result is True, "Testcase {} : Failed \n Error: {}".format( @@ -371,8 +383,8 @@ def test_ecmp_after_clear_bgp(request, test_type): def test_ecmp_remove_redistribute_static(request): - """ Verify routes are cleared from BGP and RIB table of DUT when - redistribute static configuration is removed.""" + """Verify routes are cleared from BGP and RIB table of DUT when + redistribute static configuration is removed.""" tc_name = request.node.name write_test_header(tc_name) @@ -481,8 +493,8 @@ def test_ecmp_remove_redistribute_static(request): @pytest.mark.parametrize("test_type", ["redist_static", "advertise_nw"]) def test_ecmp_shut_bgp_neighbor(request, test_type): - """ Shut BGP neigbors one by one and verify BGP and routing table updated - accordingly in DUT """ + """Shut BGP neigbors one by one and verify BGP and routing table updated + accordingly in DUT""" tc_name = request.node.name write_test_header(tc_name) diff --git a/tests/topotests/bgp-ecmp-topo2/test_ibgp_ecmp_topo2.py b/tests/topotests/bgp-ecmp-topo2/test_ibgp_ecmp_topo2.py index 73724ac069..2f73bdb1b8 100644 --- a/tests/topotests/bgp-ecmp-topo2/test_ibgp_ecmp_topo2.py +++ b/tests/topotests/bgp-ecmp-topo2/test_ibgp_ecmp_topo2.py @@ -274,8 +274,20 @@ def test_modify_ecmp_max_paths(request, ecmp_num, test_type): "r3": { "bgp": { "address_family": { - "ipv4": {"unicast": {"maximum_paths": {"ibgp": ecmp_num,}}}, - "ipv6": {"unicast": {"maximum_paths": {"ibgp": ecmp_num,}}}, + "ipv4": { + "unicast": { + "maximum_paths": { + "ibgp": ecmp_num, + } + } + }, + "ipv6": { + "unicast": { + "maximum_paths": { + "ibgp": ecmp_num, + } + } + }, } } } @@ -304,7 +316,7 @@ def test_modify_ecmp_max_paths(request, ecmp_num, test_type): input_dict_1, next_hop=NEXT_HOPS[addr_type][: int(ecmp_num)], protocol=protocol, - count_only=True + count_only=True, ) assert result is True, "Testcase {} : Failed \n Error: {}".format( @@ -313,9 +325,9 @@ def test_modify_ecmp_max_paths(request, ecmp_num, test_type): write_test_footer(tc_name) - +@pytest.mark.parametrize("ecmp_num", ["8", "16", "32"]) @pytest.mark.parametrize("test_type", ["redist_static", "advertise_nw"]) -def test_ecmp_after_clear_bgp(request, test_type): +def test_ecmp_after_clear_bgp(request, ecmp_num, test_type): """ Verify BGP table and RIB in DUT after clear BGP routes and neighbors""" tc_name = request.node.name @@ -338,7 +350,7 @@ def test_ecmp_after_clear_bgp(request, test_type): addr_type, dut, input_dict_1, - next_hop=NEXT_HOPS[addr_type], + next_hop=NEXT_HOPS[addr_type][:int(ecmp_num)], protocol=protocol, ) assert result is True, "Testcase {} : Failed \n Error: {}".format( @@ -361,7 +373,7 @@ def test_ecmp_after_clear_bgp(request, test_type): addr_type, dut, input_dict_1, - next_hop=NEXT_HOPS[addr_type], + next_hop=NEXT_HOPS[addr_type][:int(ecmp_num)], protocol=protocol, ) assert result is True, "Testcase {} : Failed \n Error: {}".format( @@ -372,8 +384,8 @@ def test_ecmp_after_clear_bgp(request, test_type): def test_ecmp_remove_redistribute_static(request): - """ Verify routes are cleared from BGP and RIB table of DUT when - redistribute static configuration is removed.""" + """Verify routes are cleared from BGP and RIB table of DUT when + redistribute static configuration is removed.""" tc_name = request.node.name write_test_header(tc_name) @@ -482,8 +494,8 @@ def test_ecmp_remove_redistribute_static(request): @pytest.mark.parametrize("test_type", ["redist_static", "advertise_nw"]) def test_ecmp_shut_bgp_neighbor(request, test_type): - """ Shut BGP neigbors one by one and verify BGP and routing table updated - accordingly in DUT """ + """Shut BGP neigbors one by one and verify BGP and routing table updated + accordingly in DUT""" tc_name = request.node.name write_test_header(tc_name) diff --git a/tests/topotests/bgp-evpn-mh/test_evpn_mh.py b/tests/topotests/bgp-evpn-mh/test_evpn_mh.py index 4c56d1a02d..61be947a71 100644 --- a/tests/topotests/bgp-evpn-mh/test_evpn_mh.py +++ b/tests/topotests/bgp-evpn-mh/test_evpn_mh.py @@ -35,6 +35,8 @@ import json import platform from functools import partial +pytestmark = pytest.mark.pimd + # Save the Current Working Directory to find configuration files. CWD = os.path.dirname(os.path.realpath(__file__)) sys.path.append(os.path.join(CWD, "../")) @@ -658,10 +660,11 @@ def test_evpn_mac(): assertmsg = '"{}" remote MAC content incorrect'.format(tor.name) assert result is None, assertmsg + def check_df_role(dut, esi, role): - ''' + """ Return error string if the df role on the dut is different - ''' + """ es_json = dut.vtysh_cmd("show evpn es %s json" % esi) es = json.loads(es_json) @@ -676,12 +679,13 @@ def check_df_role(dut, esi, role): return None + def test_evpn_df(): - ''' + """ 1. Check the DF role on all the PEs on rack-1. 2. Increase the DF preference on the non-DF and check if it becomes the DF winner. - ''' + """ tgen = get_topogen() @@ -720,10 +724,11 @@ def test_evpn_df(): # tgen.mininet_cli() + def check_protodown_rc(dut, protodown_rc): - ''' + """ check if specified protodown reason code is set - ''' + """ out = dut.vtysh_cmd("show evpn json") @@ -739,13 +744,14 @@ def check_protodown_rc(dut, protodown_rc): return None + def test_evpn_uplink_tracking(): - ''' + """ 1. Wait for access ports to come out of startup-delay 2. disable uplinks and check if access ports have been protodowned 3. enable uplinks and check if access ports have been moved out of protodown - ''' + """ tgen = get_topogen() @@ -778,6 +784,7 @@ def test_evpn_uplink_tracking(): assertmsg = '"{}" protodown rc incorrect'.format(dut_name) assert result is None, assertmsg + if __name__ == "__main__": args = ["-s"] + sys.argv[1:] sys.exit(pytest.main(args)) diff --git a/tests/topotests/bgp-evpn-vxlan_topo1/PE1/bgpd.conf b/tests/topotests/bgp-evpn-vxlan_topo1/PE1/bgpd.conf index c7a76a98ed..991a1e7e56 100644 --- a/tests/topotests/bgp-evpn-vxlan_topo1/PE1/bgpd.conf +++ b/tests/topotests/bgp-evpn-vxlan_topo1/PE1/bgpd.conf @@ -8,3 +8,4 @@ router bgp 65000 address-family l2vpn evpn neighbor 10.30.30.30 activate advertise-all-vni + advertise-svi-ip diff --git a/tests/topotests/bgp-evpn-vxlan_topo1/PE1/evpn.vni.json b/tests/topotests/bgp-evpn-vxlan_topo1/PE1/evpn.vni.json index 2937504244..e500a1d85c 100644 --- a/tests/topotests/bgp-evpn-vxlan_topo1/PE1/evpn.vni.json +++ b/tests/topotests/bgp-evpn-vxlan_topo1/PE1/evpn.vni.json @@ -6,8 +6,8 @@ "vtepIp":"10.10.10.10", "mcastGroup":"0.0.0.0", "advertiseGatewayMacip":"No", - "numMacs":5, - "numArpNd":3, + "numMacs":6, + "numArpNd":6, "numRemoteVteps":[ "10.30.30.30" ] diff --git a/tests/topotests/bgp-evpn-vxlan_topo1/PE2/bgpd.conf b/tests/topotests/bgp-evpn-vxlan_topo1/PE2/bgpd.conf index 0a24158bb8..52f8687bc1 100644 --- a/tests/topotests/bgp-evpn-vxlan_topo1/PE2/bgpd.conf +++ b/tests/topotests/bgp-evpn-vxlan_topo1/PE2/bgpd.conf @@ -9,3 +9,4 @@ router bgp 65000 address-family l2vpn evpn neighbor 10.10.10.10 activate advertise-all-vni + advertise-svi-ip diff --git a/tests/topotests/bgp-evpn-vxlan_topo1/PE2/evpn.vni.json b/tests/topotests/bgp-evpn-vxlan_topo1/PE2/evpn.vni.json index 0853147a00..0a56a235bd 100644 --- a/tests/topotests/bgp-evpn-vxlan_topo1/PE2/evpn.vni.json +++ b/tests/topotests/bgp-evpn-vxlan_topo1/PE2/evpn.vni.json @@ -6,8 +6,8 @@ "vtepIp":"10.30.30.30", "mcastGroup":"0.0.0.0", "advertiseGatewayMacip":"No", - "numMacs":5, - "numArpNd":3, + "numMacs":6, + "numArpNd":6, "numRemoteVteps":[ "10.10.10.10" ] diff --git a/tests/topotests/bgp-evpn-vxlan_topo1/test_bgp_evpn_vxlan.py b/tests/topotests/bgp-evpn-vxlan_topo1/test_bgp_evpn_vxlan.py index 5098808d55..9a38158b2b 100755 --- a/tests/topotests/bgp-evpn-vxlan_topo1/test_bgp_evpn_vxlan.py +++ b/tests/topotests/bgp-evpn-vxlan_topo1/test_bgp_evpn_vxlan.py @@ -310,8 +310,10 @@ def ip_learn_test(tgen, host, local, remote, ip_addr): assertmsg = "local learned mac wrong type: {} ".format(mac_type) assert mac_type == "local", assertmsg - assertmsg = "learned address mismatch with configured address host: {} learned: {}".format( - ip_addr, learned_ip + assertmsg = ( + "learned address mismatch with configured address host: {} learned: {}".format( + ip_addr, learned_ip + ) ) assert ip_addr == learned_ip, assertmsg diff --git a/tests/topotests/bgp-path-attributes-topo1/test_bgp_path_attributes.py b/tests/topotests/bgp-path-attributes-topo1/test_bgp_path_attributes.py index 607b036c6a..a9541a55c5 100644 --- a/tests/topotests/bgp-path-attributes-topo1/test_bgp_path_attributes.py +++ b/tests/topotests/bgp-path-attributes-topo1/test_bgp_path_attributes.py @@ -238,9 +238,10 @@ def test_next_hop_attribute(request): result = verify_rib( tgen, addr_type, dut, input_dict, protocol=protocol, expected=False ) - assert result is not True, ( - "Testcase {} : Failed \n Error: " - "{} routes are not present in RIB".format(addr_type, tc_name) + assert ( + result is not True + ), "Testcase {} : Failed \n Error: " "{} routes are not present in RIB".format( + addr_type, tc_name ) # Configure next-hop-self to bgp neighbor diff --git a/tests/topotests/bgp-route-map/test_route_map_topo1.py b/tests/topotests/bgp-route-map/test_route_map_topo1.py index 1aa951edaa..0158e24d31 100644 --- a/tests/topotests/bgp-route-map/test_route_map_topo1.py +++ b/tests/topotests/bgp-route-map/test_route_map_topo1.py @@ -807,7 +807,9 @@ def test_route_map_multiple_seq_different_match_set_clause_p0(request): "prefix_lists": "pf_list_2_{}".format(addr_type) } }, - "set": {"locPrf": 150,}, + "set": { + "locPrf": 150, + }, }, { "action": "permit", @@ -906,7 +908,17 @@ def test_route_map_multiple_seq_different_match_set_clause_p0(request): dut = "r3" protocol = "bgp" input_dict = { - "r3": {"route_maps": {"rmap_match_pf_list1": [{"set": {"metric": 50,}}],}} + "r3": { + "route_maps": { + "rmap_match_pf_list1": [ + { + "set": { + "metric": 50, + } + } + ], + } + } } static_routes = [NETWORK[adt][0]] @@ -1093,7 +1105,14 @@ def test_route_map_set_only_no_match_p0(request): input_dict_4 = { "r3": { "route_maps": { - "rmap_match_pf_1": [{"action": "permit", "set": {"metric": 50,}}] + "rmap_match_pf_1": [ + { + "action": "permit", + "set": { + "metric": 50, + }, + } + ] } } } @@ -1210,7 +1229,13 @@ def test_route_map_match_only_no_set_p0(request): "r1": { "route_maps": { "rmap_match_pf_1_{}".format(addr_type): [ - {"action": "permit", "set": {"metric": 50, "locPrf": 150,}} + { + "action": "permit", + "set": { + "metric": 50, + "locPrf": 150, + }, + } ] } } diff --git a/tests/topotests/bgp-route-map/test_route_map_topo2.py b/tests/topotests/bgp-route-map/test_route_map_topo2.py index 3056aa29f3..958eceba62 100644 --- a/tests/topotests/bgp-route-map/test_route_map_topo2.py +++ b/tests/topotests/bgp-route-map/test_route_map_topo2.py @@ -268,12 +268,20 @@ def test_rmap_match_prefix_list_permit_in_and_outbound_prefixes_p0(): "prefix_lists": { "ipv4": { "pf_list_1_ipv4": [ - {"seqid": 10, "network": "any", "action": "permit",} + { + "seqid": 10, + "network": "any", + "action": "permit", + } ] }, "ipv6": { "pf_list_1_ipv6": [ - {"seqid": 10, "network": "any", "action": "permit",} + { + "seqid": 10, + "network": "any", + "action": "permit", + } ] }, } @@ -472,7 +480,11 @@ def test_modify_set_match_clauses_in_rmap_p0(): "prefix_lists": { "ipv4": { "pf_list_1_ipv4": [ - {"seqid": 10, "network": "any", "action": "permit",} + { + "seqid": 10, + "network": "any", + "action": "permit", + } ], "pf_list_2_ipv4": [ {"seqid": 10, "network": "any", "action": "permit"} @@ -480,7 +492,11 @@ def test_modify_set_match_clauses_in_rmap_p0(): }, "ipv6": { "pf_list_1_ipv6": [ - {"seqid": 10, "network": "any", "action": "permit",} + { + "seqid": 10, + "network": "any", + "action": "permit", + } ], "pf_list_2_ipv6": [ {"seqid": 10, "network": "any", "action": "permit"} @@ -506,7 +522,9 @@ def test_modify_set_match_clauses_in_rmap_p0(): "prefix_lists": "pf_list_1_{}".format(addr_type) } }, - "set": {"locPrf": 150,}, + "set": { + "locPrf": 150, + }, } ], "rmap_match_pf_2_{}".format(addr_type): [ @@ -666,7 +684,9 @@ def test_modify_set_match_clauses_in_rmap_p0(): "prefix_lists": "pf_list_1_{}".format(addr_type) } }, - "set": {"locPrf": 1000,}, + "set": { + "locPrf": 1000, + }, } ], "rmap_match_pf_2_{}".format(addr_type): [ @@ -816,12 +836,20 @@ def test_modify_prefix_list_referenced_by_rmap_p0(): "prefix_lists": { "ipv4": { "pf_list_1_ipv4": [ - {"seqid": 10, "network": "any", "action": "permit",} + { + "seqid": 10, + "network": "any", + "action": "permit", + } ] }, "ipv6": { "pf_list_1_ipv6": [ - {"seqid": 100, "network": "any", "action": "permit",} + { + "seqid": 100, + "network": "any", + "action": "permit", + } ] }, } @@ -1090,7 +1118,9 @@ def test_remove_prefix_list_referenced_by_rmap_p0(): "prefix_lists": "pf_list_1_{}".format(addr_type) } }, - "set": {"locPrf": 150,}, + "set": { + "locPrf": 150, + }, } ], "rmap_match_pf_2_{}".format(addr_type): [ @@ -1894,7 +1924,9 @@ def test_multiple_match_statement_in_route_map_logical_ANDed_p1(): "prefix_lists": "pf_list_1_{}".format(addr_type) } }, - "set": {"locPrf": 150,}, + "set": { + "locPrf": 150, + }, } ] } @@ -1921,7 +1953,9 @@ def test_multiple_match_statement_in_route_map_logical_ANDed_p1(): } } }, - "set": {"locPrf": 150,}, + "set": { + "locPrf": 150, + }, } ] } @@ -2048,7 +2082,9 @@ def test_add_remove_rmap_to_specific_neighbor_p0(): "prefix_lists": "pf_list_1_{}".format(addr_type) } }, - "set": {"locPrf": 150,}, + "set": { + "locPrf": 150, + }, } ] } @@ -3505,7 +3541,9 @@ def test_create_rmap_match_prefix_list_to_deny_in_and_outbound_prefixes_p0(): "prefix_lists": "pf_list_1_{}".format(addr_type) } }, - "set": {"locPrf": 150,}, + "set": { + "locPrf": 150, + }, } ], "rmap_match_pf_2_{}".format(addr_type): [ diff --git a/tests/topotests/bgp-snmp-mplsl3vpn/ce1/bgpd.conf b/tests/topotests/bgp-snmp-mplsl3vpn/ce1/bgpd.conf new file mode 100644 index 0000000000..b598666dfb --- /dev/null +++ b/tests/topotests/bgp-snmp-mplsl3vpn/ce1/bgpd.conf @@ -0,0 +1,12 @@ +log file /tmp/bgpd.log debugging +! +router bgp 65001 + timers bgp 3 9 + bgp router-id 192.168.100.10 + neighbor 192.168.100.20 remote-as 65001 + neighbor 192.168.100.20 update-source 192.168.100.10 + ! + address-family ipv4 unicast + redistribute connected + exit-address-family +! diff --git a/tests/topotests/bgp-snmp-mplsl3vpn/ce1/snmpd.conf b/tests/topotests/bgp-snmp-mplsl3vpn/ce1/snmpd.conf new file mode 100644 index 0000000000..36218d3538 --- /dev/null +++ b/tests/topotests/bgp-snmp-mplsl3vpn/ce1/snmpd.conf @@ -0,0 +1,15 @@ +agentAddress udp:10.5.5.5:161 + +com2sec public localhost public + +group public_group v1 public +group public_group v2c public + +access public_group "" any noauth prefix all all none + +view all included .1 + +iquerySecName frr +rouser frr + +master agentx diff --git a/tests/topotests/bgp-snmp-mplsl3vpn/ce1/zebra.conf b/tests/topotests/bgp-snmp-mplsl3vpn/ce1/zebra.conf new file mode 100644 index 0000000000..8ad2ddc48c --- /dev/null +++ b/tests/topotests/bgp-snmp-mplsl3vpn/ce1/zebra.conf @@ -0,0 +1,19 @@ +log file /tmp/zebra.log +log stdout +! +debug zebra events +debug zebra dplane +! +! +interface ce1-eth0 + ip address 192.168.100.10/24 + ipv6 address 2000:1:1:100::10/64 +! +! +interface lo + ip address 10.5.5.5/32 + ipv6 address 2000:5:5:5::5/128 +! +! +! +line vty diff --git a/tests/topotests/bgp-snmp-mplsl3vpn/ce2/bgpd.conf b/tests/topotests/bgp-snmp-mplsl3vpn/ce2/bgpd.conf new file mode 100644 index 0000000000..e388ccba8a --- /dev/null +++ b/tests/topotests/bgp-snmp-mplsl3vpn/ce2/bgpd.conf @@ -0,0 +1,12 @@ +log file /tmp/bgpd.log debugging +! +router bgp 65001 + bgp router-id 192.168.200.10 + timers bgp 3 9 + neighbor 192.168.200.20 remote-as 65001 + neighbor 192.168.200.20 update-source 192.168.200.10 + ! + address-family ipv4 unicast + redistribute connected + exit-address-family +! diff --git a/tests/topotests/bgp-snmp-mplsl3vpn/ce2/snmpd.conf b/tests/topotests/bgp-snmp-mplsl3vpn/ce2/snmpd.conf new file mode 100644 index 0000000000..714585cb9b --- /dev/null +++ b/tests/topotests/bgp-snmp-mplsl3vpn/ce2/snmpd.conf @@ -0,0 +1,15 @@ +agentAddress udp:10.6.6.6:161 + +com2sec public localhost public + +group public_group v1 public +group public_group v2c public + +access public_group "" any noauth prefix all all none + +view all included .1 + +iquerySecName frr +rouser frr + +master agentx diff --git a/tests/topotests/bgp-snmp-mplsl3vpn/ce2/zebra.conf b/tests/topotests/bgp-snmp-mplsl3vpn/ce2/zebra.conf new file mode 100644 index 0000000000..fa2e968e55 --- /dev/null +++ b/tests/topotests/bgp-snmp-mplsl3vpn/ce2/zebra.conf @@ -0,0 +1,19 @@ +log file /tmp/zebra.log +log stdout +! +debug zebra events +debug zebra dplane +! +! +interface ce2-eth0 + ip address 192.168.200.10/24 + ipv6 address 2000:1:1:200::10/64 +! +! +interface lo + ip address 10.6.6.6/32 + ipv6 address 2000:6:6:6::6/128 +! +! +! +line vty diff --git a/tests/topotests/bgp-snmp-mplsl3vpn/ce3/bgpd.conf b/tests/topotests/bgp-snmp-mplsl3vpn/ce3/bgpd.conf new file mode 100644 index 0000000000..e388ccba8a --- /dev/null +++ b/tests/topotests/bgp-snmp-mplsl3vpn/ce3/bgpd.conf @@ -0,0 +1,12 @@ +log file /tmp/bgpd.log debugging +! +router bgp 65001 + bgp router-id 192.168.200.10 + timers bgp 3 9 + neighbor 192.168.200.20 remote-as 65001 + neighbor 192.168.200.20 update-source 192.168.200.10 + ! + address-family ipv4 unicast + redistribute connected + exit-address-family +! diff --git a/tests/topotests/bgp-snmp-mplsl3vpn/ce3/snmpd.conf b/tests/topotests/bgp-snmp-mplsl3vpn/ce3/snmpd.conf new file mode 100644 index 0000000000..36218d3538 --- /dev/null +++ b/tests/topotests/bgp-snmp-mplsl3vpn/ce3/snmpd.conf @@ -0,0 +1,15 @@ +agentAddress udp:10.5.5.5:161 + +com2sec public localhost public + +group public_group v1 public +group public_group v2c public + +access public_group "" any noauth prefix all all none + +view all included .1 + +iquerySecName frr +rouser frr + +master agentx diff --git a/tests/topotests/bgp-snmp-mplsl3vpn/ce3/zebra.conf b/tests/topotests/bgp-snmp-mplsl3vpn/ce3/zebra.conf new file mode 100644 index 0000000000..ea91e21bad --- /dev/null +++ b/tests/topotests/bgp-snmp-mplsl3vpn/ce3/zebra.conf @@ -0,0 +1,19 @@ +log file /tmp/zebra.log +log stdout +! +debug zebra events +debug zebra dplane +! +! +interface ce3-eth0 + ip address 192.168.200.10/24 + ipv6 address 2000:1:1:200::10/64 +! +! +interface lo + ip address 10.7.7.7/32 + ipv6 address 2000:7:7:7::7/128 +! +! +! +line vty diff --git a/tests/topotests/bgp-snmp-mplsl3vpn/ce4/bgpd.conf b/tests/topotests/bgp-snmp-mplsl3vpn/ce4/bgpd.conf new file mode 100644 index 0000000000..e388ccba8a --- /dev/null +++ b/tests/topotests/bgp-snmp-mplsl3vpn/ce4/bgpd.conf @@ -0,0 +1,12 @@ +log file /tmp/bgpd.log debugging +! +router bgp 65001 + bgp router-id 192.168.200.10 + timers bgp 3 9 + neighbor 192.168.200.20 remote-as 65001 + neighbor 192.168.200.20 update-source 192.168.200.10 + ! + address-family ipv4 unicast + redistribute connected + exit-address-family +! diff --git a/tests/topotests/bgp-snmp-mplsl3vpn/ce4/snmpd.conf b/tests/topotests/bgp-snmp-mplsl3vpn/ce4/snmpd.conf new file mode 100644 index 0000000000..36218d3538 --- /dev/null +++ b/tests/topotests/bgp-snmp-mplsl3vpn/ce4/snmpd.conf @@ -0,0 +1,15 @@ +agentAddress udp:10.5.5.5:161 + +com2sec public localhost public + +group public_group v1 public +group public_group v2c public + +access public_group "" any noauth prefix all all none + +view all included .1 + +iquerySecName frr +rouser frr + +master agentx diff --git a/tests/topotests/bgp-snmp-mplsl3vpn/ce4/zebra.conf b/tests/topotests/bgp-snmp-mplsl3vpn/ce4/zebra.conf new file mode 100644 index 0000000000..0866fa9759 --- /dev/null +++ b/tests/topotests/bgp-snmp-mplsl3vpn/ce4/zebra.conf @@ -0,0 +1,19 @@ +log file /tmp/zebra.log +log stdout +! +debug zebra events +debug zebra dplane +! +! +interface ce4-eth0 + ip address 192.168.34.10/24 + ipv6 address 2000:1:1:300::10/64 +! +! +interface lo + ip address 10.8.8.8/32 + ipv6 address 2000:8:8:8::8/128 +! +! +! +line vty diff --git a/tests/topotests/bgp-snmp-mplsl3vpn/r1/bgpd.conf b/tests/topotests/bgp-snmp-mplsl3vpn/r1/bgpd.conf new file mode 100644 index 0000000000..098e55d0ed --- /dev/null +++ b/tests/topotests/bgp-snmp-mplsl3vpn/r1/bgpd.conf @@ -0,0 +1,48 @@ +log file /tmp/bgpd.log debugging +! +router bgp 65000 + bgp router-id 10.1.1.1 + neighbor 10.4.4.4 remote-as 65000 + neighbor 10.4.4.4 update-source 10.1.1.1 + neighbor 10.4.4.4 timers connect 10 + ! + address-family ipv4 vpn + neighbor 10.4.4.4 activate + exit-address-family + +! +router bgp 65001 vrf VRF-a + bgp router-id 192.168.100.20 + timers bgp 3 9 + neighbor 192.168.100.10 remote-as 65001 + neighbor 192.168.100.10 update-source 192.168.100.20 + neighbor 192.168.200.10 remote-as 65001 + neighbor 192.168.200.10 update-source 192.168.200.20 + ! + address-family ipv4 unicast + redistribute connected + redistribute isis + label vpn export 1111 + rd vpn export 10:1 + rt vpn both 1:1 + export vpn + import vpn + exit-address-family + +router bgp 65002 vrf VRF-b + bgp router-id 192.168.10.20 + timers bgp 3 9 + neighbor 192.168.10.10 remote-as 65003 + neighbor 192.168.10.10 update-source 192.168.10.20 +! + address-family ipv4 unicast + redistribute connected + redistribute isis + label vpn export 6666 + rd vpn export 10:2 + rt vpn both 1:2 + export vpn + import vpn + exit-address-family + +agentx diff --git a/tests/topotests/bgp-snmp-mplsl3vpn/r1/isisd.conf b/tests/topotests/bgp-snmp-mplsl3vpn/r1/isisd.conf new file mode 100644 index 0000000000..b5ca993da3 --- /dev/null +++ b/tests/topotests/bgp-snmp-mplsl3vpn/r1/isisd.conf @@ -0,0 +1,46 @@ +log stdout debugging +! +debug isis route-events +debug isis events +! +interface r1-eth0 + ip router isis ISIS1 + ipv6 router isis ISIS1 + isis circuit-type level-1 + no isis hello padding + isis hello-interval 1 + isis hello-multiplier 3 + isis network point-to-point +! +interface r1-eth1 + ip router isis ISIS1 + ipv6 router isis ISIS1 + isis circuit-type level-1 + no isis hello padding + isis hello-interval 1 + isis hello-multiplier 3 + isis network point-to-point +! +interface r1-eth2 + ip router isis ISIS1 + ipv6 router isis ISIS1 + isis circuit-type level-1 + no isis hello padding + isis hello-interval 1 + isis hello-multiplier 3 + isis network point-to-point +! +interface lo + ip router isis ISIS1 + ipv6 router isis ISIS1 + isis circuit-type level-1 + isis passive + no isis hello padding +! +router isis ISIS1 + net 01.1111.0000.0000.0001.00 + is-type level-1 + topology ipv6-unicast +! +line vty +! diff --git a/tests/topotests/bgp-snmp-mplsl3vpn/r1/snmpd.conf b/tests/topotests/bgp-snmp-mplsl3vpn/r1/snmpd.conf new file mode 100644 index 0000000000..c903c1ad2e --- /dev/null +++ b/tests/topotests/bgp-snmp-mplsl3vpn/r1/snmpd.conf @@ -0,0 +1,17 @@ +agentAddress udp:10.1.1.1:161 + +com2sec public 10.1.1.1 public + +group public_group v1 public +group public_group v2c public + +access public_group "" any noauth prefix all all none + +view all included .1 + +iquerySecName frr +rouser frr + +master agentx + +noRangeCheck yes
\ No newline at end of file diff --git a/tests/topotests/bgp-snmp-mplsl3vpn/r1/zebra.conf b/tests/topotests/bgp-snmp-mplsl3vpn/r1/zebra.conf new file mode 100644 index 0000000000..7228ae6bd2 --- /dev/null +++ b/tests/topotests/bgp-snmp-mplsl3vpn/r1/zebra.conf @@ -0,0 +1,33 @@ +log file zebra.log +! +interface r1-eth0 + ip address 192.168.12.12/24 + ipv6 address 2000:1:1:12::12/64 +! +interface r1-eth1 + ip address 192.168.13.13/24 + ipv6 address 2000:1:1:13::13/64 +! +interface r1-eth2 + ip address 192.168.14.14/24 + ipv6 address 2000:1:1:14::14/64 +! +interface r1-eth3 vrf VRF-a + ip address 192.168.100.20/24 + ipv6 address 2000:1:1:100::20/64 +! +interface r1-eth4 vrf VRF-a + ip address 192.168.200.20/24 + ipv6 address 2000:1:1:200::20/64 +! +interface r1-eth5 vrf VRF-b + ip address 192.168.300.20/24 + ipv6 address 2000:1:1:300::20/64 + +interface lo + ip address 10.1.1.1/32 + ipv6 address 2000:1:1:1::1/128 +! +! +! +line vty diff --git a/tests/topotests/bgp-snmp-mplsl3vpn/r2/isisd.conf b/tests/topotests/bgp-snmp-mplsl3vpn/r2/isisd.conf new file mode 100644 index 0000000000..3dfa43831a --- /dev/null +++ b/tests/topotests/bgp-snmp-mplsl3vpn/r2/isisd.conf @@ -0,0 +1,37 @@ +log stdout debugging +! +debug isis route-events +debug isis events +! +interface r2-eth0 + ip router isis ISIS1 + ipv6 router isis ISIS1 + isis circuit-type level-1 + no isis hello padding + isis hello-interval 1 + isis hello-multiplier 3 + isis network point-to-point +! +interface r2-eth1 + ip router isis ISIS1 + ipv6 router isis ISIS1 + isis circuit-type level-1 + no isis hello padding + isis hello-interval 1 + isis hello-multiplier 3 + isis network point-to-point +! +interface lo + ip router isis ISIS1 + ipv6 router isis ISIS1 + isis circuit-type level-1 + isis passive + no isis hello padding +! +router isis ISIS1 + net 01.1111.0000.0000.0002.00 + is-type level-1 + topology ipv6-unicast +! +line vty +! diff --git a/tests/topotests/bgp-snmp-mplsl3vpn/r2/snmpd.conf b/tests/topotests/bgp-snmp-mplsl3vpn/r2/snmpd.conf new file mode 100644 index 0000000000..0cfebc7238 --- /dev/null +++ b/tests/topotests/bgp-snmp-mplsl3vpn/r2/snmpd.conf @@ -0,0 +1,15 @@ +agentAddress udp:10.2.2.2:161 + +com2sec public localhost public + +group public_group v1 public +group public_group v2c public + +access public_group "" any noauth prefix all all none + +view all included .1 + +iquerySecName frr +rouser frr + +master agentx diff --git a/tests/topotests/bgp-snmp-mplsl3vpn/r2/zebra.conf b/tests/topotests/bgp-snmp-mplsl3vpn/r2/zebra.conf new file mode 100644 index 0000000000..9bc4331bae --- /dev/null +++ b/tests/topotests/bgp-snmp-mplsl3vpn/r2/zebra.conf @@ -0,0 +1,24 @@ +log file /tmp/zebra.log +log stdout +! +debug zebra events +debug zebra dplane +! +! +interface r2-eth0 + ip address 192.168.12.21/24 + ipv6 address 2000:1:1:12::21/64 +! +interface r2-eth1 + ip address 192.168.23.23/24 + ipv6 address 2000:1:1:23::23/64 +! +! +interface lo + ip address 10.2.2.2/32 + ipv6 address 2000:2:2:2::2/128 +! +! +! +line vty +! diff --git a/tests/topotests/bgp-snmp-mplsl3vpn/r3/isisd.conf b/tests/topotests/bgp-snmp-mplsl3vpn/r3/isisd.conf new file mode 100644 index 0000000000..578ebafad6 --- /dev/null +++ b/tests/topotests/bgp-snmp-mplsl3vpn/r3/isisd.conf @@ -0,0 +1,45 @@ +log stdout debugging +! +debug isis route-events +debug isis events +! +interface r3-eth0 + ip router isis ISIS1 + ipv6 router isis ISIS1 + isis circuit-type level-1 + no isis hello padding + isis hello-interval 1 + isis hello-multiplier 3 + isis network point-to-point +! +interface r3-eth1 + ip router isis ISIS1 + ipv6 router isis ISIS1 + isis circuit-type level-1 + no isis hello padding + isis hello-interval 1 + isis hello-multiplier 3 + isis network point-to-point +! +interface r3-eth2 + ip router isis ISIS1 + ipv6 router isis ISIS1 + isis circuit-type level-1 + no isis hello padding + isis hello-interval 1 + isis hello-multiplier 3 + isis network point-to-point +! +interface lo + ip router isis ISIS1 + ipv6 router isis ISIS1 + isis circuit-type level-1 + isis passive + no isis hello padding +! +router isis ISIS1 + net 01.1111.0000.0000.0003.00 + is-type level-1 + topology ipv6-unicast +! +line vty diff --git a/tests/topotests/bgp-snmp-mplsl3vpn/r3/snmpd.conf b/tests/topotests/bgp-snmp-mplsl3vpn/r3/snmpd.conf new file mode 100644 index 0000000000..b9eb00ea52 --- /dev/null +++ b/tests/topotests/bgp-snmp-mplsl3vpn/r3/snmpd.conf @@ -0,0 +1,15 @@ +agentAddress udp:10.3.3.3:161 + +com2sec public localhost public + +group public_group v1 public +group public_group v2c public + +access public_group "" any noauth prefix all all none + +view all included .1 + +iquerySecName frr +rouser frr + +master agentx diff --git a/tests/topotests/bgp-snmp-mplsl3vpn/r3/zebra.conf b/tests/topotests/bgp-snmp-mplsl3vpn/r3/zebra.conf new file mode 100644 index 0000000000..4d2007e787 --- /dev/null +++ b/tests/topotests/bgp-snmp-mplsl3vpn/r3/zebra.conf @@ -0,0 +1,27 @@ +log file /tmp/zebra.log +log stdout +! +debug zebra events +debug zebra dplane +! +! +interface r3-eth0 + ip address 192.168.13.31/24 + ipv6 address 2000:1:1:13::31/64 +! +interface r3-eth1 + ip address 192.168.23.32/24 + ipv6 address 2000:1:1:23::32/64 +! +interface r3-eth2 + ip address 192.168.34.34/24 + ipv6 address 2000:1:1:34::34/64 +! +! +interface lo + ip address 10.3.3.3/32 + ipv6 address 2000:3:3:3::3/128 +! +! +! +line vty diff --git a/tests/topotests/bgp-snmp-mplsl3vpn/r4/bgpd.conf b/tests/topotests/bgp-snmp-mplsl3vpn/r4/bgpd.conf new file mode 100644 index 0000000000..2a834c799e --- /dev/null +++ b/tests/topotests/bgp-snmp-mplsl3vpn/r4/bgpd.conf @@ -0,0 +1,43 @@ +log file /tmp/bgpd.log debugging +! +router bgp 65000 + bgp router-id 10.4.4.4 + timers bgp 3 9 + neighbor 10.1.1.1 remote-as 65000 + neighbor 10.1.1.1 update-source 10.4.4.4 + neighbor 10.1.1.1 timers connect 10 + ! + address-family ipv4 vpn + neighbor 10.1.1.1 activate + exit-address-family +! + + address-family ipv6 vpn + neighbor 10.1.1.1 activate + exit-address-family +! +router bgp 65001 vrf VRF-a + bgp router-id 192.168.200.20 + timers bgp 3 9 + neighbor 192.168.200.10 remote-as 65001 + neighbor 192.168.200.10 update-source 192.168.200.20 + ! + address-family ipv4 unicast + redistribute connected + redistribute isis + label vpn export 1111 + rd vpn export 10:3 + rt vpn both 1:1 + export vpn + import vpn + exit-address-family + + address-family ipv6 unicast + redistribute connected + redistribute isis + label vpn export 1111 + rd vpn export 10:3 + rt vpn both 1:2 + export vpn + import vpn + exit-address-family diff --git a/tests/topotests/bgp-snmp-mplsl3vpn/r4/isisd.conf b/tests/topotests/bgp-snmp-mplsl3vpn/r4/isisd.conf new file mode 100644 index 0000000000..3e9e9af45f --- /dev/null +++ b/tests/topotests/bgp-snmp-mplsl3vpn/r4/isisd.conf @@ -0,0 +1,36 @@ +log stdout debugging +! +debug isis route-events +debug isis events +! +interface r4-eth0 + ip router isis ISIS1 + ipv6 router isis ISIS1 + isis circuit-type level-1 + no isis hello padding + isis hello-interval 1 + isis hello-multiplier 3 + isis network point-to-point +! +interface r4-eth1 + ip router isis ISIS1 + ipv6 router isis ISIS1 + isis circuit-type level-1 + no isis hello padding + isis hello-interval 1 + isis hello-multiplier 3 + isis network point-to-point +! +interface lo + ip router isis ISIS1 + ipv6 router isis ISIS1 + isis circuit-type level-1 + isis passive + no isis hello padding +! +router isis ISIS1 + net 01.1111.0000.0000.0004.00 + is-type level-1 + topology ipv6-unicast +! +line vty diff --git a/tests/topotests/bgp-snmp-mplsl3vpn/r4/snmpd.conf b/tests/topotests/bgp-snmp-mplsl3vpn/r4/snmpd.conf new file mode 100644 index 0000000000..ec35f9f9c9 --- /dev/null +++ b/tests/topotests/bgp-snmp-mplsl3vpn/r4/snmpd.conf @@ -0,0 +1,15 @@ +agentAddress udp:10.4.4.4:161 + +com2sec public localhost public + +group public_group v1 public +group public_group v2c public + +access public_group "" any noauth prefix all all none + +view all included .1 + +iquerySecName frr +rouser frr + +master agentx diff --git a/tests/topotests/bgp-snmp-mplsl3vpn/r4/zebra.conf b/tests/topotests/bgp-snmp-mplsl3vpn/r4/zebra.conf new file mode 100644 index 0000000000..c48407c108 --- /dev/null +++ b/tests/topotests/bgp-snmp-mplsl3vpn/r4/zebra.conf @@ -0,0 +1,27 @@ +log file /tmp/zebra.log +log stdout +! +debug zebra events +debug zebra dplane +! +! +interface r4-eth0 + ip address 192.168.14.41/24 + ipv6 address 2000:1:1:14::41/64 +! +interface r4-eth1 + ip address 192.168.34.43/24 + ipv6 address 2000:1:1:34::43/64 +! +interface r4-eth2 vrf aaa + ip address 192.168.200.20/24 + ipv6 address 2000:1:1:200::20/64 +! +! +interface lo + ip address 10.4.4.4/32 + ipv6 address 2000:4:4:4::4/128 +! +! +line vty +! diff --git a/tests/topotests/bgp-snmp-mplsl3vpn/test_bgp_snmp_mplsvpn.py b/tests/topotests/bgp-snmp-mplsl3vpn/test_bgp_snmp_mplsvpn.py new file mode 100755 index 0000000000..5eb1738632 --- /dev/null +++ b/tests/topotests/bgp-snmp-mplsl3vpn/test_bgp_snmp_mplsvpn.py @@ -0,0 +1,738 @@ +#!/usr/bin/env python + +# +# test_bgp_snmp_mplsl3vpn.py +# Part of NetDEF Topology Tests +# +# Copyright (c) 2020 by Volta Networks +# +# Permission to use, copy, modify, and/or distribute this software +# for any purpose with or without fee is hereby granted, provided +# that the above copyright notice and this permission notice appear +# in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND NETDEF DISCLAIMS ALL WARRANTIES +# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NETDEF BE LIABLE FOR +# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY +# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS +# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE +# OF THIS SOFTWARE. +# + +""" +test_bgp_snmp_mplsl3vpn.py: Test mplsL3Vpn MIB [RFC4382]. +""" + +import os +import sys +import json +from functools import partial +from time import sleep +import pytest +import re + +# Save the Current Working Directory to find configuration files. +CWD = os.path.dirname(os.path.realpath(__file__)) +sys.path.append(os.path.join(CWD, "../")) + +# pylint: disable=C0413 +# Import topogen and topotest helpers +from lib import topotest +from lib.topogen import Topogen, TopoRouter, get_topogen +from lib.topolog import logger +from lib.snmptest import SnmpTester + +# Required to instantiate the topology builder class. +from mininet.topo import Topo + + +class TemplateTopo(Topo): + "Test topology builder" + + def build(self, *_args, **_opts): + "Build function" + tgen = get_topogen(self) + + # This function only purpose is to define allocation and relationship + # between routers, switches and hosts. + # + # + # Create routers + tgen.add_router("r1") + tgen.add_router("r2") + tgen.add_router("r3") + tgen.add_router("r4") + tgen.add_router("ce1") + tgen.add_router("ce2") + tgen.add_router("ce3") + tgen.add_router("ce4") + + # r1-r2 + switch = tgen.add_switch("s1") + switch.add_link(tgen.gears["r1"]) + switch.add_link(tgen.gears["r2"]) + + # r1-r3 + switch = tgen.add_switch("s2") + switch.add_link(tgen.gears["r1"]) + switch.add_link(tgen.gears["r3"]) + + # r1-r4 + switch = tgen.add_switch("s3") + switch.add_link(tgen.gears["r1"]) + switch.add_link(tgen.gears["r4"]) + + # r1-ce1 + switch = tgen.add_switch("s4") + switch.add_link(tgen.gears["r1"]) + switch.add_link(tgen.gears["ce1"]) + + # r1-ce3 + switch = tgen.add_switch("s5") + switch.add_link(tgen.gears["r1"]) + switch.add_link(tgen.gears["ce3"]) + + # r1-ce4 + switch = tgen.add_switch("s6") + switch.add_link(tgen.gears["r1"]) + switch.add_link(tgen.gears["ce4"]) + + # r1-dangling + switch = tgen.add_switch("s7") + switch.add_link(tgen.gears["r1"]) + + # r2-r3 + switch = tgen.add_switch("s8") + switch.add_link(tgen.gears["r2"]) + switch.add_link(tgen.gears["r3"]) + + # r3-r4 + switch = tgen.add_switch("s9") + switch.add_link(tgen.gears["r3"]) + switch.add_link(tgen.gears["r4"]) + + # r4-ce2 + switch = tgen.add_switch("s10") + switch.add_link(tgen.gears["r4"]) + switch.add_link(tgen.gears["ce2"]) + + +def setup_module(mod): + "Sets up the pytest environment" + + # skip tests is SNMP not installed + snmpd = os.system("which snmpd") + if snmpd: + error_msg = "SNMP not installed - skipping" + pytest.skip(error_msg) + + # This function initiates the topology build with Topogen... + tgen = Topogen(TemplateTopo, mod.__name__) + # ... and here it calls Mininet initialization functions. + tgen.start_topology() + + r1 = tgen.gears["r1"] + r2 = tgen.gears["r2"] + r3 = tgen.gears["r3"] + r4 = tgen.gears["r4"] + + # setup VRF-a in r1 + r1.run("ip link add VRF-a type vrf table 1001") + r1.run("ip link set up dev VRF-a") + r1.run("ip link add VRF-b type vrf table 1002") + r1.run("ip link set up dev VRF-b") + r4.run("ip link add VRF-a type vrf table 1001") + r4.run("ip link set up dev VRF-a") + + # enslave vrf interfaces + r1.run("ip link set r1-eth3 master VRF-a") + r1.run("ip link set r1-eth4 master VRF-a") + r1.run("ip link set r1-eth5 master VRF-b") + r4.run("ip link set r4-eth1 master VRF-a") + + r1.run("sysctl -w net.ipv4.ip_forward=1") + r2.run("sysctl -w net.ipv4.ip_forward=1") + r3.run("sysctl -w net.ipv4.ip_forward=1") + r4.run("sysctl -w net.ipv4.ip_forward=1") + r1.run("sysctl -w net.mpls.conf.r1-eth0.input=1") + r1.run("sysctl -w net.mpls.conf.r1-eth1.input=1") + r1.run("sysctl -w net.mpls.conf.r1-eth2.input=1") + r2.run("sysctl -w net.mpls.conf.r1-eth0.input=1") + r2.run("sysctl -w net.mpls.conf.r1-eth1.input=1") + r3.run("sysctl -w net.mpls.conf.r1-eth0.input=1") + r3.run("sysctl -w net.mpls.conf.r1-eth1.input=1") + r3.run("sysctl -w net.mpls.conf.r1-eth2.input=1") + r4.run("sysctl -w net.mpls.conf.r1-eth0.input=1") + r4.run("sysctl -w net.mpls.conf.r1-eth1.input=1") + + router_list = tgen.routers() + + # For all registred routers, load the zebra configuration file + for rname, router in router_list.items(): + router.load_config( + TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname)) + ) + router.load_config( + TopoRouter.RD_ISIS, os.path.join(CWD, "{}/isisd.conf".format(rname)) + ) + router.load_config( + TopoRouter.RD_BGP, + os.path.join(CWD, "{}/bgpd.conf".format(rname)), + "-M snmp", + ) + router.load_config( + TopoRouter.RD_SNMP, + os.path.join(CWD, "{}/snmpd.conf".format(rname)), + "-Le -Ivacm_conf,usmConf,iquery -V -DAgentX,trap", + ) + + # After loading the configurations, this function loads configured daemons. + tgen.start_router() + + +def teardown_module(mod): + "Teardown the pytest environment" + tgen = get_topogen() + + # This function tears down the whole topology. + tgen.stop_topology() + + +# SNMP utilities - maybe move to lib +def snmp_uint32_to_oid(val): + oid1 = int(val / 16777216) % 256 + oid2 = int(val / 65536) % 256 + oid3 = int(val / 256) % 256 + oid4 = int(val) % 256 + return "%(oid1)s.%(oid2)s.%(oid3)s.%(oid4)s" % locals() + + +def snmp_oid_to_uint32(oid): + values = oid.split(".") + return ( + (int(values[0]) * 16777216) + + (int(values[1]) * 65536) + + (int(values[2]) * 256) + + int(values[3]) + ) + + +def snmp_str_to_oid(str): + out_oid = "" + for char in str: + out_oid += "{}.".format(ord(char)) + return out_oid.rstrip(".") + + +def snmp_oid_to_str(oid): + out_str = "" + oids = oid.split(".") + for char in oids: + out_str += "{}".format(chr(int(char))) + return out_str + + +def snmp_rte_oid(vrf, dtype, dest, plen, policy, ntype, nhop=0): + oid_1 = snmp_str_to_oid(vrf) + oid_2 = dtype + oid_3 = dest + oid_4 = plen + oid_5 = "0.{}".format(policy) + oid_6 = ntype + if ntype == 0: + oid_7 = "" + else: + oid_7 = ".{}".format(nhop) + + return "{}.{}.{}.{}.{}.{}{}".format(oid_1, oid_2, oid_3, oid_4, oid_5, oid_6, oid_7) + + +def test_pe1_converge_evpn(): + "Wait for protocol convergence" + tgen = get_topogen() + + r1 = tgen.net.get("r1") + r1_snmp = SnmpTester(r1, "10.1.1.1", "public", "2c") + + assertmsg = "BGP SNMP does not seem to be running" + assert r1_snmp.test_oid("bgpVersion", "10"), assertmsg + count = 0 + passed = False + while count < 125: + if r1_snmp.test_oid_walk("bgpPeerLocalAddr.10.4.4.4", ["10.1.1.1"]): + passed = True + break + count += 1 + sleep(1) + #tgen.mininet_cli() + assertmsg = "BGP Peer 10.4.4.4 did not connect" + assert passed, assertmsg + + +interfaces_up_test = { + "mplsL3VpnConfiguredVrfs": "2", + "mplsL3VpnActiveVrfs": "2", + "mplsL3VpnConnectedInterfaces": "3", + "mplsL3VpnNotificationEnable": "true(1)", + "mplsL3VpnVrfConfMaxPossRts": "0", + "mplsL3VpnVrfConfRteMxThrshTime": "0 seconds", + "mplsL3VpnIlllblRcvThrsh": "0", +} + +interfaces_down_test = { + "mplsL3VpnConfiguredVrfs": "2", + "mplsL3VpnActiveVrfs": "1", + "mplsL3VpnConnectedInterfaces": "3", + "mplsL3VpnNotificationEnable": "true(1)", + "mplsL3VpnVrfConfMaxPossRts": "0", + "mplsL3VpnVrfConfRteMxThrshTime": "0 seconds", + "mplsL3VpnIlllblRcvThrsh": "0", +} + + +def test_r1_mplsvpn_scalars(): + "check scalar values" + tgen = get_topogen() + r1 = tgen.net.get("r1") + r1_snmp = SnmpTester(r1, "10.1.1.1", "public", "2c") + + for item in interfaces_up_test.keys(): + assertmsg = "{} should be {}: value {}".format( + item, interfaces_up_test[item], r1_snmp.get_next(item) + ) + assert r1_snmp.test_oid(item, interfaces_up_test[item]), assertmsg + + +def test_r1_mplsvpn_scalars_interface(): + "check scalar interface changing values" + tgen = get_topogen() + r1 = tgen.net.get("r1") + r1_cmd = tgen.gears["r1"] + r1_snmp = SnmpTester(r1, "10.1.1.1", "public", "2c") + + r1_cmd.vtysh_cmd("conf t\ninterface r1-eth3\nshutdown") + r1_cmd.vtysh_cmd("conf t\ninterface r1-eth4\nshutdown") + + for item in interfaces_up_test.keys(): + assertmsg = "{} should be {}: value {}".format( + item, interfaces_down_test[item], r1_snmp.get_next(item) + ) + assert r1_snmp.test_oid(item, interfaces_down_test[item]), assertmsg + + r1_cmd.vtysh_cmd("conf t\ninterface r1-eth3\nno shutdown") + r1_cmd.vtysh_cmd("conf t\ninterface r1-eth4\nno shutdown") + + for item in interfaces_up_test.keys(): + assertmsg = "{} should be {}: value {}".format( + item, interfaces_up_test[item], r1_snmp.get_next(item) + ) + assert r1_snmp.test_oid(item, interfaces_up_test[item]), assertmsg + + +def router_interface_get_ifindex(router, interface): + ifindex = 0 + r_int_output = router.vtysh_cmd( + "show interface {}-{}".format(router.name, interface) + ) + int_lines = r_int_output.splitlines() + for line in int_lines: + line_items = line.lstrip().split(" ") + if "index" in line_items[0]: + ifindex = line_items[1] + return ifindex + + +def generate_vrf_ifindex_oid(vrf, ifindex): + + intoid = snmp_uint32_to_oid(int(ifindex)) + vrfoid = snmp_str_to_oid(vrf) + oid = "{}.{}".format(vrfoid, intoid) + + return oid + + +def generate_vrf_index_type_oid(vrf, index, type): + vrfoid = snmp_str_to_oid(vrf) + intoid = snmp_uint32_to_oid(int(index)) + oid = "{}.{}.{}".format(vrfoid, intoid, type) + + return oid + + +iftable_up_test = { + "mplsL3VpnIfVpnClassification": ["enterprise(2)", "enterprise(2)", "enterprise(2)"], + "mplsL3VpnIfConfStorageType": ["volatile(2)", "volatile(2)", "volatile(2)"], + "mplsL3VpnIfConfRowStatus": ["active(1)", "active(1)", "active(1)"], +} + + +def get_timetick_val(time): + return int(time.split(" ")[0].lstrip("(").rstrip(")")) + + +def test_r1_mplsvpn_IfTable(): + "mplsL3VpnIf table values" + + tgen = get_topogen() + r1 = tgen.net.get("r1") + r1r = tgen.gears["r1"] + + r1_snmp = SnmpTester(r1, "10.1.1.1", "public", "2c") + + # tgen.mininet_cli() + eth3_ifindex = router_interface_get_ifindex(r1r, "eth3") + eth4_ifindex = router_interface_get_ifindex(r1r, "eth4") + eth5_ifindex = router_interface_get_ifindex(r1r, "eth5") + + # get ifindex and make sure the oid is correct + + oids = [] + # generate oid + oids.append(generate_vrf_ifindex_oid("VRF-a", eth3_ifindex)) + oids.append(generate_vrf_ifindex_oid("VRF-a", eth4_ifindex)) + oids.append(generate_vrf_ifindex_oid("VRF-b", eth5_ifindex)) + + for item in iftable_up_test.keys(): + assertmsg = "{} should be {} oids {} full dict {}:".format( + item, iftable_up_test[item], oids, r1_snmp.walk(item) + ) + assert r1_snmp.test_oid_walk(item, iftable_up_test[item], oids), assertmsg + + # an inactive vrf should not affect these values + r1.cmd("ip link set r1-eth5 down") + + for item in iftable_up_test.keys(): + assertmsg = "{} should be {} oids {} full dict {}:".format( + item, iftable_up_test[item], oids, r1_snmp.walk(item) + ) + assert r1_snmp.test_oid_walk(item, iftable_up_test[item], oids), assertmsg + + r1.cmd("ip link set r1-eth5 up") + + +vrftable_test = { + "mplsL3VpnVrfDescription": ["VRF-a", "VRF-b"], + "mplsL3VpnVrfRD": ['"10:1"', '"10:2"'], + "mplsL3VpnVrfOperStatus": ["up(1)", "up(1)"], + "mplsL3VpnVrfActiveInterfaces": ["2", "1"], + "mplsL3VpnVrfAssociatedInterfaces": ["2", "1"], + "mplsL3VpnVrfConfMidRteThresh": ["0", "0"], + "mplsL3VpnVrfConfHighRteThresh": ["0", "0"], + "mplsL3VpnVrfConfMaxRoutes": ["0", "0"], + "mplsL3VpnVrfConfRowStatus": ["active(1)", "active(1)"], + "mplsL3VpnVrfConfAdminStatus": ["up(1)", "up(1)"], + "mplsL3VpnVrfConfStorageType": ["volatile(2)", "volatile(2)"], +} + + +def test_r1_mplsvpn_VrfTable(): + tgen = get_topogen() + + r1 = tgen.net.get("r1") + r1r = tgen.gears["r1"] + + r1_snmp = SnmpTester(r1, "10.1.1.1", "public", "2c") + + # tgen.mininet_cli() + + oids = [] + + oids.append(snmp_str_to_oid("VRF-a")) + oids.append(snmp_str_to_oid("VRF-b")) + + # check items + for item in vrftable_test.keys(): + assertmsg = "{} should be {} oids {} full dict {}:".format( + item, vrftable_test[item], oids, r1_snmp.walk(item) + ) + assert r1_snmp.test_oid_walk(item, vrftable_test[item], oids), assertmsg + + # check timetick set and stable + ts_a = r1_snmp.get("mplsL3VpnVrfCreationTime.{}".format(snmp_str_to_oid("VRF-a"))) + ts_b = r1_snmp.get("mplsL3VpnVrfCreationTime.{}".format(snmp_str_to_oid("VRF-b"))) + ts_val_a1 = get_timetick_val(ts_a) + ts_val_b1 = get_timetick_val(ts_b) + ts_a = r1_snmp.get("mplsL3VpnVrfCreationTime.{}".format(snmp_str_to_oid("VRF-a"))) + ts_b = r1_snmp.get("mplsL3VpnVrfCreationTime.{}".format(snmp_str_to_oid("VRF-b"))) + ts_val_a2 = get_timetick_val(ts_a) + ts_val_b2 = get_timetick_val(ts_b) + + assertmsg = "timestamp values for VRF-a do not match {} {}".format( + ts_val_a1, ts_val_a2 + ) + assert ts_val_a1 == ts_val_a2, assertmsg + assertmsg = "timestamp values for VRF-b do not match {} {}".format( + ts_val_b1, ts_val_b2 + ) + assert ts_val_b1 == ts_val_b2, assertmsg + + # take Last changed time, fiddle with active interfaces, ensure + # time changes and active interfaces change + ts_last = r1_snmp.get( + "mplsL3VpnVrfConfLastChanged.{}".format(snmp_str_to_oid("VRF-a")) + ) + ts_val_last_1 = get_timetick_val(ts_last) + r1r.vtysh_cmd("conf t\ninterface r1-eth3\nshutdown") + active_int = r1_snmp.get( + "mplsL3VpnVrfActiveInterfaces.{}".format(snmp_str_to_oid("VRF-a")) + ) + assertmsg = "mplsL3VpnVrfActiveInterfaces incorrect should be 1 value {}".format( + active_int + ) + assert active_int == "1", assertmsg + + ts_last = r1_snmp.get( + "mplsL3VpnVrfConfLastChanged.{}".format(snmp_str_to_oid("VRF-a")) + ) + ts_val_last_2 = get_timetick_val(ts_last) + assertmsg = "mplsL3VpnVrfConfLastChanged does not update on interface change" + assert ts_val_last_2 > ts_val_last_1, assertmsg + r1r.vtysh_cmd("conf t\ninterface r1-eth3\nno shutdown") + + # take Last changed time, fiddle with associated interfaces, ensure + # time changes and active interfaces change + ts_last = r1_snmp.get( + "mplsL3VpnVrfConfLastChanged.{}".format(snmp_str_to_oid("VRF-a")) + ) + ts_val_last_1 = get_timetick_val(ts_last) + r1.cmd("ip link set r1-eth6 master VRF-a") + r1.cmd("ip link set r1-eth6 up") + + associated_int = r1_snmp.get( + "mplsL3VpnVrfAssociatedInterfaces.{}".format(snmp_str_to_oid("VRF-a")) + ) + assertmsg = "mplsL3VpnVrfAssociatedInterfaces incorrect should be 3 value {}".format( + associated_int + ) + + assert associated_int == "3", assertmsg + ts_last = r1_snmp.get( + "mplsL3VpnVrfConfLastChanged.{}".format(snmp_str_to_oid("VRF-a")) + ) + ts_val_last_2 = get_timetick_val(ts_last) + assertmsg = "mplsL3VpnVrfConfLastChanged does not update on interface change" + assert ts_val_last_2 > ts_val_last_1, assertmsg + r1.cmd("ip link del r1-eth6 master VRF-a") + r1.cmd("ip link set r1-eth6 down") + + +rt_table_test = { + "mplsL3VpnVrfRT": ['"1:1"', '"1:2"'], + "mplsL3VpnVrfRTDescr": ["RT both for VRF VRF-a", "RT both for VRF VRF-b"], + "mplsL3VpnVrfRTRowStatus": ["active(1)", "active(1)"], + "mplsL3VpnVrfRTStorageType": ["volatile(2)", "volatile(2)"], +} + + +def test_r1_mplsvpn_VrfRT_table(): + tgen = get_topogen() + + r1 = tgen.net.get("r1") + r1r = tgen.gears["r1"] + + r1_snmp = SnmpTester(r1, "10.1.1.1", "public", "2c") + + oids = [] + oids.append(generate_vrf_index_type_oid("VRF-a", 1, 3)) + oids.append(generate_vrf_index_type_oid("VRF-b", 1, 3)) + + # check items + for item in rt_table_test.keys(): + print(item) + assertmsg = "{} should be {} oids {} full dict {}:".format( + item, rt_table_test[item], oids, r1_snmp.walk(item) + ) + assert r1_snmp.test_oid_walk(item, rt_table_test[item], oids), assertmsg + + +def test_r1_mplsvpn_perf_table(): + tgen = get_topogen() + + r1 = tgen.net.get("r1") + r1r = tgen.gears["r1"] + + r1_snmp = SnmpTester(r1, "10.1.1.1", "public", "2c") + + # tgen.mininet_cli() + oid_a = snmp_str_to_oid("VRF-a") + oid_b = snmp_str_to_oid("VRF-b") + + # poll for 10 seconds for routes to appear + count = 0 + passed = False + while count < 60: + if r1_snmp.test_oid_walk( + "mplsL3VpnVrfPerfCurrNumRoutes.{}".format(oid_a), ["7"] + ): + passed = True + break + count += 1 + sleep(1) + # tgen.mininet_cli() + assertmsg = "mplsL3VpnVrfPerfCurrNumRoutes shouold be 7 got {}".format( + r1_snmp.get("mplsL3VpnVrfPerfCurrNumRoutes.{}".format(oid_a)) + ) + assert passed, assertmsg + curr_a = int(r1_snmp.get("mplsL3VpnVrfPerfCurrNumRoutes.{}".format(oid_a))) + del_a = int(r1_snmp.get("mplsL3VpnVrfPerfRoutesDeleted.{}".format(oid_a))) + add_a = int(r1_snmp.get("mplsL3VpnVrfPerfRoutesAdded.{}".format(oid_a))) + + assertmsg = "FAIL curr{} does not equal added{} - deleted {}".format( + curr_a, add_a, del_a + ) + assert curr_a == (add_a - del_a), assertmsg + curr_b = int(r1_snmp.get("mplsL3VpnVrfPerfCurrNumRoutes.{}".format(oid_b))) + del_b = int(r1_snmp.get("mplsL3VpnVrfPerfRoutesDeleted.{}".format(oid_b))) + add_b = int(r1_snmp.get("mplsL3VpnVrfPerfRoutesAdded.{}".format(oid_b))) + assertmsg = "FAIL curr{} does not equal added{} - deleted {}".format( + curr_b, add_b, del_b + ) + assert curr_b == (add_b - del_b), assertmsg + + +rte_table_test = { + "mplsL3VpnVrfRteInetCidrDestType": [ + "ipv4(1)", + "ipv4(1)", + "ipv4(1)", + "ipv4(1)", + "ipv4(1)", + "ipv4(1)", + "ipv4(1)", + ], + "mplsL3VpnVrfRteInetCidrDest": [ + "0A 05 05 05", + "0A 07 07 07", + "C0 A8 22 00", + "C0 A8 64 00", + "C0 A8 64 00", + "C0 A8 C8 00", + "C0 A8 C8 00", + ], + "mplsL3VpnVrfRteInetCidrPfxLen": ["32", "32", "24", "24", "24", "24", "24"], + "mplsL3VpnVrfRteInetCidrNHopType": [ + "ipv4(1)", + "ipv4(1)", + "ipv4(1)", + "ipv4(1)", + "unknown(0)", + "ipv4(1)", + "unknown(0)", + ], + "mplsL3VpnVrfRteInetCidrNextHop": [ + "C0 A8 64 0A", + "C0 A8 C8 0A", + "0A 04 04 04", + "C0 A8 64 0A", + '""', + "C0 A8 C8 0A", + '""', + ], + "mplsL3VpnVrfRteInetCidrIfIndex": ["5", "6", "4", "5", "0", "6", "0"], + "mplsL3VpnVrfRteInetCidrType": [ + "local(3)", + "local(3)", + "remote(4)", + "local(3)", + "other(1)", + "local(3)", + "other(1)", + ], + "mplsL3VpnVrfRteInetCidrProto": [ + "bgp(14)", + "bgp(14)", + "bgp(14)", + "bgp(14)", + "local(2)", + "bgp(14)", + "local(2)", + ], + "mplsL3VpnVrfRteInetCidrNextHopAS": ["65001", "65001", "0", "65001", "0", "65001", "0"], + "mplsL3VpnVrfRteInetCidrMetric1": ["0", "0", "20", "0", "0", "0", "0"], + "mplsL3VpnVrfRteInetCidrMetric2": ["-1", "-1", "-1", "-1", "-1", "-1", "-1"], + "mplsL3VpnVrfRteInetCidrMetric3": ["-1", "-1", "-1", "-1", "-1", "-1", "-1"], + "mplsL3VpnVrfRteInetCidrMetric4": ["-1", "-1", "-1", "-1", "-1", "-1", "-1"], + "mplsL3VpnVrfRteInetCidrMetric5": ["-1", "-1", "-1", "-1", "-1", "-1", "-1"], + "mplsL3VpnVrfRteXCPointer": ["00", "00", "00", "00", "00", "00", "00"], + "mplsL3VpnVrfRteInetCidrStatus": [ + "active(1)", + "active(1)", + "active(1)", + "active(1)", + "active(1)", + "active(1)", + "active(1)", + ], +} + + +def test_r1_mplsvpn_rte_table(): + tgen = get_topogen() + + r1 = tgen.net.get("r1") + r1r = tgen.gears["r1"] + + r1_snmp = SnmpTester(r1, "10.1.1.1", "public", "2c") + + # tgen.mininet_cli() + oid_1 = snmp_rte_oid("VRF-a", 1, "10.5.5.5", 32, 0, 1, "192.168.100.10") + oid_2 = snmp_rte_oid("VRF-a", 1, "10.7.7.7", 32, 0, 1, "192.168.200.10") + oid_3 = snmp_rte_oid("VRF-a", 1, "192.168.34.0", 24, 0, 1, "10.4.4.4") + oid_4 = snmp_rte_oid("VRF-a", 1, "192.168.100.0", 24, 1, 1, "192.168.100.10") + oid_4_a = snmp_rte_oid("VRF-a", 1, "192.168.100.0", 24, 0, 1, "192.168.100.10") + oid_5 = snmp_rte_oid("VRF-a", 1, "192.168.100.0", 24, 0, 0) + oid_5_a = snmp_rte_oid("VRF-a", 1, "192.168.100.0", 24, 1, 0) + oid_6 = snmp_rte_oid("VRF-a", 1, "192.168.200.0", 24, 1, 1, "192.168.200.10") + oid_6_a = snmp_rte_oid("VRF-a", 1, "192.168.200.0", 24, 0, 1, "192.168.200.10") + oid_7 = snmp_rte_oid("VRF-a", 1, "192.168.200.0", 24, 0, 0) + oid_7_a = snmp_rte_oid("VRF-a", 1, "192.168.200.0", 24, 1, 0) + + oid_lists = [ + [oid_1, oid_2, oid_3, oid_4, oid_5, oid_6, oid_7], + [oid_1, oid_2, oid_3, oid_4_a, oid_5_a, oid_6, oid_7], + [oid_1, oid_2, oid_3, oid_4, oid_5, oid_6_a, oid_7_a], + [oid_1, oid_2, oid_3, oid_4_a, oid_5_a, oid_6_a, oid_7_a], + [oid_1, oid_2, oid_3, oid_4, oid_5, oid_6, oid_7], + [oid_1, oid_2, oid_3, oid_4_a, oid_5_a, oid_6, oid_7], + [oid_1, oid_2, oid_3, oid_4, oid_5, oid_6_a, oid_7_a], + [oid_1, oid_2, oid_3, oid_4_a, oid_5_a, oid_6_a, oid_7_a], + ] + + # check items + + passed = False + for oid_list in oid_lists: + passed = True + for item in rte_table_test.keys(): + print(item) + assertmsg = "{} should be {} oids {} full dict {}:".format( + item, rte_table_test[item], oid_list, r1_snmp.walk(item) + ) + if not r1_snmp.test_oid_walk(item, rte_table_test[item], oid_list): + passed = False + break + print( + "{} should be {} oids {} full dict {}:".format( + item, rte_table_test[item], oid_list, r1_snmp.walk(item) + ) + ) + if passed: + break + print("passed {}".format(passed)) + assert passed, assertmsg + + +def test_memory_leak(): + "Run the memory leak test and report results." + tgen = get_topogen() + if not tgen.is_memleak_enabled(): + pytest.skip("Memory leak test/report is disabled") + + tgen.report_memory_leaks() + + +if __name__ == "__main__": + args = ["-s"] + sys.argv[1:] + sys.exit(pytest.main(args)) diff --git a/tests/topotests/bgp-vrf-route-leak-basic/test_bgp-vrf-route-leak-basic.py b/tests/topotests/bgp-vrf-route-leak-basic/test_bgp-vrf-route-leak-basic.py index 36f1d8cd56..71f64e9b70 100644 --- a/tests/topotests/bgp-vrf-route-leak-basic/test_bgp-vrf-route-leak-basic.py +++ b/tests/topotests/bgp-vrf-route-leak-basic/test_bgp-vrf-route-leak-basic.py @@ -90,7 +90,11 @@ def test_vrf_route_leak(): # Test DONNA VRF. expect = { - "10.0.0.0/24": [{"protocol": "connected",}], + "10.0.0.0/24": [ + { + "protocol": "connected", + } + ], "10.0.1.0/24": [ {"protocol": "bgp", "selected": True, "nexthops": [{"fib": True}]} ], @@ -111,11 +115,19 @@ def test_vrf_route_leak(): "10.0.0.0/24": [ {"protocol": "bgp", "selected": True, "nexthops": [{"fib": True}]} ], - "10.0.1.0/24": [{"protocol": "connected",}], + "10.0.1.0/24": [ + { + "protocol": "connected", + } + ], "10.0.2.0/24": [ {"protocol": "bgp", "selected": True, "nexthops": [{"fib": True}]} ], - "10.0.3.0/24": [{"protocol": "connected",}], + "10.0.3.0/24": [ + { + "protocol": "connected", + } + ], } test_func = partial( diff --git a/tests/topotests/bgp_blackhole_community/__init__.py b/tests/topotests/bgp_blackhole_community/__init__.py new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/tests/topotests/bgp_blackhole_community/__init__.py diff --git a/tests/topotests/bgp_blackhole_community/r1/bgpd.conf b/tests/topotests/bgp_blackhole_community/r1/bgpd.conf new file mode 100644 index 0000000000..574d199aeb --- /dev/null +++ b/tests/topotests/bgp_blackhole_community/r1/bgpd.conf @@ -0,0 +1,14 @@ +! +router bgp 65001 + timers bgp 3 9 + no bgp ebgp-requires-policy + neighbor r1-eth0 interface remote-as external + address-family ipv4 unicast + redistribute connected + neighbor r1-eth0 route-map r2 out + exit-address-family + ! +! +route-map r2 permit 10 + set community blackhole +! diff --git a/tests/topotests/bgp_blackhole_community/r1/zebra.conf b/tests/topotests/bgp_blackhole_community/r1/zebra.conf new file mode 100644 index 0000000000..70dc5e516d --- /dev/null +++ b/tests/topotests/bgp_blackhole_community/r1/zebra.conf @@ -0,0 +1,10 @@ +! +interface lo + ip address 172.16.255.254/32 +! +interface r1-eth0 + ip address 192.168.0.1/24 +! +ip forwarding +! + diff --git a/tests/topotests/bgp_blackhole_community/r2/bgpd.conf b/tests/topotests/bgp_blackhole_community/r2/bgpd.conf new file mode 100644 index 0000000000..2260613253 --- /dev/null +++ b/tests/topotests/bgp_blackhole_community/r2/bgpd.conf @@ -0,0 +1,8 @@ +! +router bgp 65002 + no bgp ebgp-requires-policy + timers bgp 3 9 + neighbor r2-eth0 interface remote-as external + neighbor r2-eth1 interface remote-as external + neighbor r2-eth2 interface remote-as internal +! diff --git a/tests/topotests/bgp_blackhole_community/r2/zebra.conf b/tests/topotests/bgp_blackhole_community/r2/zebra.conf new file mode 100644 index 0000000000..cf6fb6d984 --- /dev/null +++ b/tests/topotests/bgp_blackhole_community/r2/zebra.conf @@ -0,0 +1,12 @@ +! +interface r2-eth0 + ip address 192.168.0.2/24 +! +interface r2-eth1 + ip address 192.168.1.1/24 +! +interface r2-eth2 + ip address 192.168.2.1/24 +! +ip forwarding +! diff --git a/tests/topotests/bgp_blackhole_community/r3/bgpd.conf b/tests/topotests/bgp_blackhole_community/r3/bgpd.conf new file mode 100644 index 0000000000..d0c4b400f7 --- /dev/null +++ b/tests/topotests/bgp_blackhole_community/r3/bgpd.conf @@ -0,0 +1,6 @@ +! +router bgp 65003 + timers bgp 3 9 + no bgp ebgp-requires-policy + neighbor r3-eth0 interface remote-as external +! diff --git a/tests/topotests/bgp_blackhole_community/r3/zebra.conf b/tests/topotests/bgp_blackhole_community/r3/zebra.conf new file mode 100644 index 0000000000..05ab56d6f1 --- /dev/null +++ b/tests/topotests/bgp_blackhole_community/r3/zebra.conf @@ -0,0 +1,6 @@ +! +interface r3-eth0 + ip address 192.168.1.2/24 +! +ip forwarding +! diff --git a/tests/topotests/bgp_blackhole_community/r4/bgpd.conf b/tests/topotests/bgp_blackhole_community/r4/bgpd.conf new file mode 100644 index 0000000000..0ac963e642 --- /dev/null +++ b/tests/topotests/bgp_blackhole_community/r4/bgpd.conf @@ -0,0 +1,6 @@ +! +router bgp 65002 + timers bgp 3 9 + no bgp ebgp-requires-policy + neighbor r4-eth0 interface remote-as internal +! diff --git a/tests/topotests/bgp_blackhole_community/r4/zebra.conf b/tests/topotests/bgp_blackhole_community/r4/zebra.conf new file mode 100644 index 0000000000..e2ccaed52a --- /dev/null +++ b/tests/topotests/bgp_blackhole_community/r4/zebra.conf @@ -0,0 +1,6 @@ +! +interface r4-eth0 + ip address 192.168.2.2/24 +! +ip forwarding +! diff --git a/tests/topotests/bgp_blackhole_community/test_bgp_blackhole_community.py b/tests/topotests/bgp_blackhole_community/test_bgp_blackhole_community.py new file mode 100644 index 0000000000..a856c9278f --- /dev/null +++ b/tests/topotests/bgp_blackhole_community/test_bgp_blackhole_community.py @@ -0,0 +1,156 @@ +#!/usr/bin/env python + +# +# Copyright (c) 2021 by +# Donatas Abraitis <donatas.abraitis@gmail.com> +# +# Permission to use, copy, modify, and/or distribute this software +# for any purpose with or without fee is hereby granted, provided +# that the above copyright notice and this permission notice appear +# in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND NETDEF DISCLAIMS ALL WARRANTIES +# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NETDEF BE LIABLE FOR +# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY +# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS +# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE +# OF THIS SOFTWARE. +# + +""" +Test if 172.16.255.254/32 tagged with BLACKHOLE community is not +re-advertised downstream outside local AS. +""" + +import os +import sys +import json +import pytest +import functools + +CWD = os.path.dirname(os.path.realpath(__file__)) +sys.path.append(os.path.join(CWD, "../")) + +# pylint: disable=C0413 +from lib import topotest +from lib.topogen import Topogen, TopoRouter, get_topogen +from lib.topolog import logger +from mininet.topo import Topo +from lib.common_config import step + + +class TemplateTopo(Topo): + def build(self, *_args, **_opts): + tgen = get_topogen(self) + + for routern in range(1, 5): + tgen.add_router("r{}".format(routern)) + + switch = tgen.add_switch("s1") + switch.add_link(tgen.gears["r1"]) + switch.add_link(tgen.gears["r2"]) + + switch = tgen.add_switch("s2") + switch.add_link(tgen.gears["r2"]) + switch.add_link(tgen.gears["r3"]) + + switch = tgen.add_switch("s3") + switch.add_link(tgen.gears["r2"]) + switch.add_link(tgen.gears["r4"]) + + +def setup_module(mod): + tgen = Topogen(TemplateTopo, mod.__name__) + tgen.start_topology() + + router_list = tgen.routers() + + for i, (rname, router) in enumerate(router_list.items(), 1): + router.load_config( + TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname)) + ) + router.load_config( + TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format(rname)) + ) + + tgen.start_router() + + +def teardown_module(mod): + tgen = get_topogen() + tgen.stop_topology() + + +def test_bgp_blackhole_community(): + tgen = get_topogen() + + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + def _bgp_converge(): + output = json.loads( + tgen.gears["r2"].vtysh_cmd("show ip bgp 172.16.255.254/32 json") + ) + expected = {"paths": [{"community": {"list": ["blackhole", "noExport"]}}]} + return topotest.json_cmp(output, expected) + + def _bgp_no_advertise_ebgp(): + output = json.loads( + tgen.gears["r2"].vtysh_cmd( + "show ip bgp neighbor r2-eth1 advertised-routes json" + ) + ) + expected = { + "advertisedRoutes": {}, + "totalPrefixCounter": 0, + "filteredPrefixCounter": 0, + } + + return topotest.json_cmp(output, expected) + + def _bgp_no_advertise_ibgp(): + output = json.loads( + tgen.gears["r2"].vtysh_cmd( + "show ip bgp neighbor r2-eth2 advertised-routes json" + ) + ) + expected = { + "advertisedRoutes": {"172.16.255.254/32": {}}, + "totalPrefixCounter": 2, + } + + return topotest.json_cmp(output, expected) + + test_func = functools.partial(_bgp_converge) + success, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5) + + assert result is None, 'Failed bgp convergence in "{}"'.format(tgen.gears["r2"]) + + step("Check if 172.16.255.254/32 is not advertised to eBGP peers") + + test_func = functools.partial(_bgp_no_advertise_ebgp) + success, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5) + + assert ( + result is None + ), 'Advertised blackhole tagged prefix to eBGP peers in "{}"'.format( + tgen.gears["r2"] + ) + + step("Check if 172.16.255.254/32 is advertised to iBGP peers") + + test_func = functools.partial(_bgp_no_advertise_ibgp) + success, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5) + + assert ( + result is None + ), 'Withdrawn blackhole tagged prefix to iBGP peers in "{}"'.format( + tgen.gears["r2"] + ) + + +if __name__ == "__main__": + args = ["-s"] + sys.argv[1:] + sys.exit(pytest.main(args)) diff --git a/tests/topotests/bgp_community_change_update/__init__.py b/tests/topotests/bgp_community_change_update/__init__.py new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/tests/topotests/bgp_community_change_update/__init__.py diff --git a/tests/topotests/bgp_community_change_update/c1/bgpd.conf b/tests/topotests/bgp_community_change_update/c1/bgpd.conf new file mode 100644 index 0000000000..24cf9dffb9 --- /dev/null +++ b/tests/topotests/bgp_community_change_update/c1/bgpd.conf @@ -0,0 +1,11 @@ +! +debug bgp updates +! +router bgp 65001 + no bgp ebgp-requires-policy + neighbor 10.0.1.2 remote-as external + neighbor 10.0.1.2 timers 3 10 + address-family ipv4 unicast + redistribute connected + exit-address-family +! diff --git a/tests/topotests/bgp_community_change_update/c1/zebra.conf b/tests/topotests/bgp_community_change_update/c1/zebra.conf new file mode 100644 index 0000000000..e3dbbc051d --- /dev/null +++ b/tests/topotests/bgp_community_change_update/c1/zebra.conf @@ -0,0 +1,6 @@ +! +interface c1-eth0 + ip address 10.0.1.1/30 +! +ip forwarding +! diff --git a/tests/topotests/bgp_community_change_update/test_bgp_community_change_update.py b/tests/topotests/bgp_community_change_update/test_bgp_community_change_update.py new file mode 100644 index 0000000000..5fc4310266 --- /dev/null +++ b/tests/topotests/bgp_community_change_update/test_bgp_community_change_update.py @@ -0,0 +1,225 @@ +#!/usr/bin/env python + +# Copyright (c) 2020 by +# Donatas Abraitis <donatas.abraitis@gmail.com> +# +# Permission to use, copy, modify, and/or distribute this software +# for any purpose with or without fee is hereby granted, provided +# that the above copyright notice and this permission notice appear +# in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND NETDEF DISCLAIMS ALL WARRANTIES +# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NETDEF BE LIABLE FOR +# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY +# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS +# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE +# OF THIS SOFTWARE. +# + +""" +Reference: https://www.cmand.org/communityexploration + + --y2-- + / | \ + c1 ---- x1 ---- y1 | z1 + \ | / + --y3-- + +1. z1 announces 192.168.255.254/32 to y2, y3. +2. y2 and y3 tags this prefix at ingress with appropriate +communities 65004:2 (y2) and 65004:3 (y3). +3. x1 filters all communities at the egress to c1. +4. Shutdown the link between y1 and y2. +5. y1 will generate a BGP UPDATE message regarding the next-hop change. +6. x1 will generate a BGP UPDATE message regarding community change. + +To avoid sending duplicate BGP UPDATE messages we should make sure +we send only actual route updates. In this example, x1 will skip +BGP UPDATE to c1 because the actual route is the same +(filtered communities - nothing changes). +""" + +import os +import sys +import json +import pytest +import functools + +CWD = os.path.dirname(os.path.realpath(__file__)) +sys.path.append(os.path.join(CWD, "../")) + +# pylint: disable=C0413 +from lib import topotest +from lib.topogen import Topogen, TopoRouter, get_topogen +from lib.topolog import logger +from mininet.topo import Topo + +from lib.common_config import step +from time import sleep + + +class TemplateTopo(Topo): + def build(self, *_args, **_opts): + tgen = get_topogen(self) + + tgen.add_router("z1") + tgen.add_router("y1") + tgen.add_router("y2") + tgen.add_router("y3") + tgen.add_router("x1") + tgen.add_router("c1") + + # 10.0.1.0/30 + switch = tgen.add_switch("s1") + switch.add_link(tgen.gears["c1"]) + switch.add_link(tgen.gears["x1"]) + + # 10.0.2.0/30 + switch = tgen.add_switch("s2") + switch.add_link(tgen.gears["x1"]) + switch.add_link(tgen.gears["y1"]) + + # 10.0.3.0/30 + switch = tgen.add_switch("s3") + switch.add_link(tgen.gears["y1"]) + switch.add_link(tgen.gears["y2"]) + + # 10.0.4.0/30 + switch = tgen.add_switch("s4") + switch.add_link(tgen.gears["y1"]) + switch.add_link(tgen.gears["y3"]) + + # 10.0.5.0/30 + switch = tgen.add_switch("s5") + switch.add_link(tgen.gears["y2"]) + switch.add_link(tgen.gears["y3"]) + + # 10.0.6.0/30 + switch = tgen.add_switch("s6") + switch.add_link(tgen.gears["y2"]) + switch.add_link(tgen.gears["z1"]) + + # 10.0.7.0/30 + switch = tgen.add_switch("s7") + switch.add_link(tgen.gears["y3"]) + switch.add_link(tgen.gears["z1"]) + + +def setup_module(mod): + tgen = Topogen(TemplateTopo, mod.__name__) + tgen.start_topology() + + router_list = tgen.routers() + + for i, (rname, router) in enumerate(router_list.items(), 1): + router.load_config( + TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname)) + ) + router.load_config( + TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format(rname)) + ) + + tgen.start_router() + + +def teardown_module(mod): + tgen = get_topogen() + tgen.stop_topology() + + +def test_bgp_community_update_path_change(): + tgen = get_topogen() + + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + def _bgp_converge_initial(): + output = json.loads( + tgen.gears["c1"].vtysh_cmd("show ip bgp neighbor 10.0.1.2 json") + ) + expected = { + "10.0.1.2": { + "bgpState": "Established", + "addressFamilyInfo": {"ipv4Unicast": {"acceptedPrefixCounter": 8}}, + } + } + return topotest.json_cmp(output, expected) + + step("Check if an initial topology is converged") + test_func = functools.partial(_bgp_converge_initial) + success, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5) + assert result is None, "Failed to see bgp convergence in c1" + + step("Disable link between y1 and y2") + tgen.gears["y1"].run("ip link set dev y1-eth1 down") + + def _bgp_converge_link_disabled(): + output = json.loads(tgen.gears["y1"].vtysh_cmd("show ip bgp nei 10.0.3.2 json")) + expected = {"10.0.3.2": {"bgpState": "Active"}} + return topotest.json_cmp(output, expected) + + step("Check if a topology is converged after a link down between y1 and y2") + test_func = functools.partial(_bgp_converge_link_disabled) + success, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5) + assert result is None, "Failed to see bgp convergence in y1" + + def _bgp_check_for_duplicate_updates(): + duplicate = False + i = 0 + while i < 5: + if ( + len( + tgen.gears["c1"].run( + 'grep "10.0.1.2 rcvd 192.168.255.254/32 IPv4 unicast...duplicate ignored" bgpd.log' + ) + ) + > 0 + ): + duplicate = True + i += 1 + sleep(0.5) + return duplicate + + step("Check if we see duplicate BGP UPDATE message in c1 (suppress-duplicates)") + assert ( + _bgp_check_for_duplicate_updates() == False + ), "Seen duplicate BGP UPDATE message in c1 from x1" + + step("Disable bgp suppress-duplicates at x1") + tgen.gears["x1"].run( + "vtysh -c 'conf' -c 'router bgp' -c 'no bgp suppress-duplicates'" + ) + + step("Enable link between y1 and y2") + tgen.gears["y1"].run("ip link set dev y1-eth1 up") + + def _bgp_converge_link_enabled(): + output = json.loads(tgen.gears["y1"].vtysh_cmd("show ip bgp nei 10.0.3.2 json")) + expected = { + "10.0.3.2": { + "bgpState": "Established", + "addressFamilyInfo": { + "ipv4Unicast": {"acceptedPrefixCounter": 5, "sentPrefixCounter": 4} + }, + } + } + return topotest.json_cmp(output, expected) + + step("Check if a topology is converged after a link up between y1 and y2") + test_func = functools.partial(_bgp_converge_link_enabled) + success, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5) + assert result is None, "Failed to see bgp convergence in y1" + + step( + "Check if we see duplicate BGP UPDATE message in c1 (no bgp suppress-duplicates)" + ) + assert ( + _bgp_check_for_duplicate_updates() == True + ), "Didn't see duplicate BGP UPDATE message in c1 from x1" + + +if __name__ == "__main__": + args = ["-s"] + sys.argv[1:] + sys.exit(pytest.main(args)) diff --git a/tests/topotests/bgp_community_change_update/x1/bgpd.conf b/tests/topotests/bgp_community_change_update/x1/bgpd.conf new file mode 100644 index 0000000000..8d7bcb948b --- /dev/null +++ b/tests/topotests/bgp_community_change_update/x1/bgpd.conf @@ -0,0 +1,20 @@ +! +debug bgp updates +! +router bgp 65002 + no bgp ebgp-requires-policy + neighbor 10.0.1.1 remote-as external + neighbor 10.0.1.1 timers 3 10 + neighbor 10.0.2.2 remote-as external + neighbor 10.0.2.2 timers 3 10 + address-family ipv4 unicast + redistribute connected + neighbor 10.0.1.1 route-map c1 out + exit-address-family +! +bgp community-list standard c1 seq 1 permit 65004:2 +bgp community-list standard c1 seq 2 permit 65004:3 +! +route-map c1 permit 10 + set comm-list c1 delete +! diff --git a/tests/topotests/bgp_community_change_update/x1/zebra.conf b/tests/topotests/bgp_community_change_update/x1/zebra.conf new file mode 100644 index 0000000000..8b7c03ffbf --- /dev/null +++ b/tests/topotests/bgp_community_change_update/x1/zebra.conf @@ -0,0 +1,9 @@ +! +interface x1-eth0 + ip address 10.0.1.2/30 +! +interface x1-eth1 + ip address 10.0.2.1/30 +! +ip forwarding +! diff --git a/tests/topotests/bgp_community_change_update/y1/bgpd.conf b/tests/topotests/bgp_community_change_update/y1/bgpd.conf new file mode 100644 index 0000000000..a010085835 --- /dev/null +++ b/tests/topotests/bgp_community_change_update/y1/bgpd.conf @@ -0,0 +1,12 @@ +router bgp 65003 + no bgp ebgp-requires-policy + neighbor 10.0.2.1 remote-as external + neighbor 10.0.2.1 timers 3 10 + neighbor 10.0.3.2 remote-as internal + neighbor 10.0.3.2 timers 3 10 + neighbor 10.0.4.2 remote-as internal + neighbor 10.0.4.2 timers 3 10 + address-family ipv4 unicast + redistribute connected + exit-address-family +! diff --git a/tests/topotests/bgp_community_change_update/y1/zebra.conf b/tests/topotests/bgp_community_change_update/y1/zebra.conf new file mode 100644 index 0000000000..4096f2a2e3 --- /dev/null +++ b/tests/topotests/bgp_community_change_update/y1/zebra.conf @@ -0,0 +1,12 @@ +! +interface y1-eth0 + ip address 10.0.2.2/30 +! +interface y1-eth1 + ip address 10.0.3.1/30 +! +interface y1-eth2 + ip address 10.0.4.1/30 +! +ip forwarding +! diff --git a/tests/topotests/bgp_community_change_update/y2/bgpd.conf b/tests/topotests/bgp_community_change_update/y2/bgpd.conf new file mode 100644 index 0000000000..27f1e81f7d --- /dev/null +++ b/tests/topotests/bgp_community_change_update/y2/bgpd.conf @@ -0,0 +1,18 @@ +router bgp 65003 + no bgp ebgp-requires-policy + neighbor 10.0.3.1 remote-as internal + neighbor 10.0.3.1 timers 3 10 + neighbor 10.0.5.2 remote-as internal + neighbor 10.0.5.2 timers 3 10 + neighbor 10.0.6.2 remote-as external + neighbor 10.0.6.2 timers 3 10 + address-family ipv4 unicast + redistribute connected + neighbor 10.0.3.1 route-reflector-client + neighbor 10.0.5.2 route-reflector-client + neighbor 10.0.6.2 route-map z1 in + exit-address-family +! +route-map z1 permit 10 + set community 65004:2 +! diff --git a/tests/topotests/bgp_community_change_update/y2/zebra.conf b/tests/topotests/bgp_community_change_update/y2/zebra.conf new file mode 100644 index 0000000000..7e9458a45c --- /dev/null +++ b/tests/topotests/bgp_community_change_update/y2/zebra.conf @@ -0,0 +1,12 @@ +! +interface y2-eth0 + ip address 10.0.3.2/30 +! +interface y2-eth1 + ip address 10.0.5.1/30 +! +interface y2-eth2 + ip address 10.0.6.1/30 +! +ip forwarding +! diff --git a/tests/topotests/bgp_community_change_update/y3/bgpd.conf b/tests/topotests/bgp_community_change_update/y3/bgpd.conf new file mode 100644 index 0000000000..8e57284929 --- /dev/null +++ b/tests/topotests/bgp_community_change_update/y3/bgpd.conf @@ -0,0 +1,18 @@ +router bgp 65003 + no bgp ebgp-requires-policy + neighbor 10.0.4.1 remote-as internal + neighbor 10.0.4.1 timers 3 10 + neighbor 10.0.5.1 remote-as internal + neighbor 10.0.5.1 timers 3 10 + neighbor 10.0.7.2 remote-as external + neighbor 10.0.7.2 timers 3 10 + address-family ipv4 unicast + redistribute connected + neighbor 10.0.4.1 route-reflector-client + neighbor 10.0.5.1 route-reflector-client + neighbor 10.0.7.2 route-map z1 in + exit-address-family +! +route-map z1 permit 10 + set community 65004:3 +! diff --git a/tests/topotests/bgp_community_change_update/y3/zebra.conf b/tests/topotests/bgp_community_change_update/y3/zebra.conf new file mode 100644 index 0000000000..93dd87dbd2 --- /dev/null +++ b/tests/topotests/bgp_community_change_update/y3/zebra.conf @@ -0,0 +1,12 @@ +! +interface y3-eth0 + ip address 10.0.4.2/30 +! +interface y3-eth1 + ip address 10.0.5.2/30 +! +interface y3-eth2 + ip address 10.0.7.1/30 +! +ip forwarding +! diff --git a/tests/topotests/bgp_community_change_update/z1/bgpd.conf b/tests/topotests/bgp_community_change_update/z1/bgpd.conf new file mode 100644 index 0000000000..2f470f18b8 --- /dev/null +++ b/tests/topotests/bgp_community_change_update/z1/bgpd.conf @@ -0,0 +1,10 @@ +router bgp 65004 + no bgp ebgp-requires-policy + neighbor 10.0.6.1 remote-as external + neighbor 10.0.6.1 timers 3 10 + neighbor 10.0.7.1 remote-as external + neighbor 10.0.7.1 timers 3 10 + address-family ipv4 unicast + redistribute connected + exit-address-family +! diff --git a/tests/topotests/bgp_community_change_update/z1/zebra.conf b/tests/topotests/bgp_community_change_update/z1/zebra.conf new file mode 100644 index 0000000000..7f6bbb66b3 --- /dev/null +++ b/tests/topotests/bgp_community_change_update/z1/zebra.conf @@ -0,0 +1,12 @@ +! +interface lo + ip address 192.168.255.254/32 +! +interface z1-eth0 + ip address 10.0.6.2/30 +! +interface z1-eth1 + ip address 10.0.7.2/30 +! +ip forwarding +! diff --git a/tests/topotests/bgp_features/exabgp.env b/tests/topotests/bgp_features/exabgp.env new file mode 100644 index 0000000000..6c554f5fa8 --- /dev/null +++ b/tests/topotests/bgp_features/exabgp.env @@ -0,0 +1,53 @@ + +[exabgp.api] +encoder = text +highres = false +respawn = false +socket = '' + +[exabgp.bgp] +openwait = 60 + +[exabgp.cache] +attributes = true +nexthops = true + +[exabgp.daemon] +daemonize = true +pid = '/var/run/exabgp/exabgp.pid' +user = 'exabgp' + +[exabgp.log] +all = false +configuration = true +daemon = true +destination = '/var/log/exabgp.log' +enable = true +level = INFO +message = false +network = true +packets = false +parser = false +processes = true +reactor = true +rib = false +routes = false +short = false +timers = false + +[exabgp.pdb] +enable = false + +[exabgp.profile] +enable = false +file = '' + +[exabgp.reactor] +speed = 1.0 + +[exabgp.tcp] +acl = false +bind = '' +delay = 0 +once = false +port = 179 diff --git a/tests/topotests/bgp_features/peer1/exa_readpipe.py b/tests/topotests/bgp_features/peer1/exa_readpipe.py new file mode 100644 index 0000000000..dba1536388 --- /dev/null +++ b/tests/topotests/bgp_features/peer1/exa_readpipe.py @@ -0,0 +1,19 @@ +#!/usr/bin/env python +"Helper script to read api commands from a pipe and feed them to ExaBGP" + +import sys + +if len(sys.argv) != 2: + sys.exit(1) +fifo = sys.argv[1] + +while True: + pipe = open(fifo, 'r') + with pipe: + line = pipe.readline().strip() + if line != "": + sys.stdout.write("{}\n".format(line)) + sys.stdout.flush() + pipe.close() + +sys.exit(0) diff --git a/tests/topotests/bgp_features/peer1/exabgp.cfg b/tests/topotests/bgp_features/peer1/exabgp.cfg new file mode 100644 index 0000000000..2e95252cf6 --- /dev/null +++ b/tests/topotests/bgp_features/peer1/exabgp.cfg @@ -0,0 +1,12 @@ +group exabgp { + process announce-routes { + run "/etc/exabgp/exa_readpipe.py /var/run/exabgp_peer1.in"; + encoder text; + } + neighbor 192.168.101.1 { + router-id 192.168.101.3; + local-address 192.168.101.3; + local-as 65403; + peer-as 65000; + } +} diff --git a/tests/topotests/bgp_features/peer2/exa_readpipe.py b/tests/topotests/bgp_features/peer2/exa_readpipe.py new file mode 100644 index 0000000000..dba1536388 --- /dev/null +++ b/tests/topotests/bgp_features/peer2/exa_readpipe.py @@ -0,0 +1,19 @@ +#!/usr/bin/env python +"Helper script to read api commands from a pipe and feed them to ExaBGP" + +import sys + +if len(sys.argv) != 2: + sys.exit(1) +fifo = sys.argv[1] + +while True: + pipe = open(fifo, 'r') + with pipe: + line = pipe.readline().strip() + if line != "": + sys.stdout.write("{}\n".format(line)) + sys.stdout.flush() + pipe.close() + +sys.exit(0) diff --git a/tests/topotests/bgp_features/peer2/exabgp.cfg b/tests/topotests/bgp_features/peer2/exabgp.cfg new file mode 100644 index 0000000000..1f65547bc5 --- /dev/null +++ b/tests/topotests/bgp_features/peer2/exabgp.cfg @@ -0,0 +1,12 @@ +group exabgp { + process announce-routes { + run "/etc/exabgp/exa_readpipe.py /var/run/exabgp_peer2.in"; + encoder text; + } + neighbor 192.168.101.1 { + router-id 192.168.101.4; + local-address 192.168.101.4; + local-as 65404; + peer-as 65000; + } +} diff --git a/tests/topotests/bgp_features/peer3/exa_readpipe.py b/tests/topotests/bgp_features/peer3/exa_readpipe.py new file mode 100644 index 0000000000..dba1536388 --- /dev/null +++ b/tests/topotests/bgp_features/peer3/exa_readpipe.py @@ -0,0 +1,19 @@ +#!/usr/bin/env python +"Helper script to read api commands from a pipe and feed them to ExaBGP" + +import sys + +if len(sys.argv) != 2: + sys.exit(1) +fifo = sys.argv[1] + +while True: + pipe = open(fifo, 'r') + with pipe: + line = pipe.readline().strip() + if line != "": + sys.stdout.write("{}\n".format(line)) + sys.stdout.flush() + pipe.close() + +sys.exit(0) diff --git a/tests/topotests/bgp_features/peer3/exabgp.cfg b/tests/topotests/bgp_features/peer3/exabgp.cfg new file mode 100644 index 0000000000..8632cc86c5 --- /dev/null +++ b/tests/topotests/bgp_features/peer3/exabgp.cfg @@ -0,0 +1,12 @@ +group exabgp { + process announce-routes { + run "/etc/exabgp/exa_readpipe.py /var/run/exabgp_peer3.in"; + encoder text; + } + neighbor 192.168.101.1 { + router-id 192.168.101.5; + local-address 192.168.101.5; + local-as 65405; + peer-as 65000; + } +} diff --git a/tests/topotests/bgp_features/peer4/exa_readpipe.py b/tests/topotests/bgp_features/peer4/exa_readpipe.py new file mode 100644 index 0000000000..dba1536388 --- /dev/null +++ b/tests/topotests/bgp_features/peer4/exa_readpipe.py @@ -0,0 +1,19 @@ +#!/usr/bin/env python +"Helper script to read api commands from a pipe and feed them to ExaBGP" + +import sys + +if len(sys.argv) != 2: + sys.exit(1) +fifo = sys.argv[1] + +while True: + pipe = open(fifo, 'r') + with pipe: + line = pipe.readline().strip() + if line != "": + sys.stdout.write("{}\n".format(line)) + sys.stdout.flush() + pipe.close() + +sys.exit(0) diff --git a/tests/topotests/bgp_features/peer4/exabgp.cfg b/tests/topotests/bgp_features/peer4/exabgp.cfg new file mode 100644 index 0000000000..06bc0d6e64 --- /dev/null +++ b/tests/topotests/bgp_features/peer4/exabgp.cfg @@ -0,0 +1,12 @@ +group exabgp { + process announce-routes { + run "/etc/exabgp/exa_readpipe.py /var/run/exabgp_peer4.in"; + encoder text; + } + neighbor 192.168.101.1 { + router-id 192.168.101.6; + local-address 192.168.101.6; + local-as 65406; + peer-as 65000; + } +} diff --git a/tests/topotests/bgp_features/r1/bgp_damp_announced.json b/tests/topotests/bgp_features/r1/bgp_damp_announced.json new file mode 100644 index 0000000000..cb4a2c9b2f --- /dev/null +++ b/tests/topotests/bgp_features/r1/bgp_damp_announced.json @@ -0,0 +1,21 @@ +{ + "localAS":65000, + "routes":{ + "192.168.31.0/24": [ { "valid":true, "network":"192.168.31.0\/24", "peerId":"192.168.101.3" } ], + "192.168.32.0/24": [ { "valid":true, "network":"192.168.32.0\/24", "peerId":"192.168.101.3" } ], + "192.168.33.0/24": [ { "valid":true, "network":"192.168.33.0\/24", "peerId":"192.168.101.3" } ], + "192.168.34.0/24": [ { "valid":true, "network":"192.168.34.0\/24", "peerId":"192.168.101.3" } ], + "192.168.41.0/24": [ { "valid":true, "network":"192.168.41.0\/24", "peerId":"192.168.101.4" } ], + "192.168.42.0/24": [ { "valid":true, "network":"192.168.42.0\/24", "peerId":"192.168.101.4" } ], + "192.168.43.0/24": [ { "valid":true, "network":"192.168.43.0\/24", "peerId":"192.168.101.4" } ], + "192.168.44.0/24": [ { "valid":true, "network":"192.168.44.0\/24", "peerId":"192.168.101.4" } ], + "192.168.51.0/24": [ { "valid":true, "network":"192.168.51.0\/24", "peerId":"192.168.101.5" } ], + "192.168.52.0/24": [ { "valid":true, "network":"192.168.52.0\/24", "peerId":"192.168.101.5" } ], + "192.168.53.0/24": [ { "valid":true, "network":"192.168.53.0\/24", "peerId":"192.168.101.5" } ], + "192.168.54.0/24": [ { "valid":true, "network":"192.168.54.0\/24", "peerId":"192.168.101.5" } ], + "192.168.61.0/24": [ { "valid":true, "network":"192.168.61.0\/24", "peerId":"192.168.101.6" } ], + "192.168.62.0/24": [ { "valid":true, "network":"192.168.62.0\/24", "peerId":"192.168.101.6" } ], + "192.168.63.0/24": [ { "valid":true, "network":"192.168.63.0\/24", "peerId":"192.168.101.6" } ], + "192.168.64.0/24": [ { "valid":true, "network":"192.168.64.0\/24", "peerId":"192.168.101.6" } ] + } +} diff --git a/tests/topotests/bgp_features/r1/bgp_damp_setup.json b/tests/topotests/bgp_features/r1/bgp_damp_setup.json new file mode 100644 index 0000000000..f9f89db894 --- /dev/null +++ b/tests/topotests/bgp_features/r1/bgp_damp_setup.json @@ -0,0 +1,10 @@ +{ + "ipv4Unicast":{ + "peers":{ + "192.168.101.3":{"remoteAs":65403, "state":"Established"}, + "192.168.101.4":{"remoteAs":65404, "state":"Established"}, + "192.168.101.5":{"remoteAs":65405, "state":"Established"}, + "192.168.101.6":{"remoteAs":65406, "state":"Established"} + } + } +} diff --git a/tests/topotests/bgp_features/r2/bgp_damp_announced.json b/tests/topotests/bgp_features/r2/bgp_damp_announced.json new file mode 100644 index 0000000000..9394358f82 --- /dev/null +++ b/tests/topotests/bgp_features/r2/bgp_damp_announced.json @@ -0,0 +1,21 @@ +{ + "localAS":65000, + "routes":{ + "192.168.31.0/24": [ { "network":"192.168.31.0\/24", "peerId":"192.168.0.1" } ], + "192.168.32.0/24": [ { "network":"192.168.32.0\/24", "peerId":"192.168.0.1" } ], + "192.168.33.0/24": [ { "network":"192.168.33.0\/24", "peerId":"192.168.0.1" } ], + "192.168.34.0/24": [ { "network":"192.168.34.0\/24", "peerId":"192.168.0.1" } ], + "192.168.41.0/24": [ { "network":"192.168.41.0\/24", "peerId":"192.168.0.1" } ], + "192.168.42.0/24": [ { "network":"192.168.42.0\/24", "peerId":"192.168.0.1" } ], + "192.168.43.0/24": [ { "network":"192.168.43.0\/24", "peerId":"192.168.0.1" } ], + "192.168.44.0/24": [ { "network":"192.168.44.0\/24", "peerId":"192.168.0.1" } ], + "192.168.51.0/24": [ { "network":"192.168.51.0\/24", "peerId":"192.168.0.1" } ], + "192.168.52.0/24": [ { "network":"192.168.52.0\/24", "peerId":"192.168.0.1" } ], + "192.168.53.0/24": [ { "network":"192.168.53.0\/24", "peerId":"192.168.0.1" } ], + "192.168.54.0/24": [ { "network":"192.168.54.0\/24", "peerId":"192.168.0.1" } ], + "192.168.61.0/24": [ { "network":"192.168.61.0\/24", "peerId":"192.168.0.1" } ], + "192.168.62.0/24": [ { "network":"192.168.62.0\/24", "peerId":"192.168.0.1" } ], + "192.168.63.0/24": [ { "network":"192.168.63.0\/24", "peerId":"192.168.0.1" } ], + "192.168.64.0/24": [ { "network":"192.168.64.0\/24", "peerId":"192.168.0.1" } ] + } +} diff --git a/tests/topotests/bgp_features/r2/bgp_damp_withdrawn.json b/tests/topotests/bgp_features/r2/bgp_damp_withdrawn.json new file mode 100644 index 0000000000..f3c54a70a1 --- /dev/null +++ b/tests/topotests/bgp_features/r2/bgp_damp_withdrawn.json @@ -0,0 +1,18 @@ +{ + "192.168.31.0/24": null, + "192.168.32.0/24": null, + "192.168.33.0/24": null, + "192.168.34.0/24": null, + "192.168.41.0/24": null, + "192.168.42.0/24": null, + "192.168.43.0/24": null, + "192.168.44.0/24": null, + "192.168.51.0/24": null, + "192.168.52.0/24": null, + "192.168.53.0/24": null, + "192.168.54.0/24": null, + "192.168.61.0/24": null, + "192.168.62.0/24": null, + "192.168.63.0/24": null, + "192.168.64.0/24": null +} diff --git a/tests/topotests/bgp_features/test_bgp_features.py b/tests/topotests/bgp_features/test_bgp_features.py index bc821bd7a0..3d963b4cf6 100644 --- a/tests/topotests/bgp_features/test_bgp_features.py +++ b/tests/topotests/bgp_features/test_bgp_features.py @@ -33,6 +33,7 @@ import sys import pytest import re import time +from time import sleep # Save the Current Working Directory to find configuration files. CWD = os.path.dirname(os.path.realpath(__file__)) @@ -64,6 +65,14 @@ class BGPFeaturesTopo1(Topo): for rtrNum in range(1, 6): tgen.add_router("r{}".format(rtrNum)) + # create ExaBGP peers + for peer_num in range(1, 5): + tgen.add_exabgp_peer( + "peer{}".format(peer_num), + ip="192.168.101.{}".format(peer_num + 2), + defaultRoute="via 192.168.101.1", + ) + # Setup Switches and connections for swNum in range(1, 11): tgen.add_switch("sw{}".format(swNum)) @@ -89,6 +98,12 @@ class BGPFeaturesTopo1(Topo): tgen.gears["r2"].add_link(tgen.gears["sw5"]) tgen.gears["r5"].add_link(tgen.gears["sw5"]) + # Add ExaBGP peers to sw4 + tgen.gears["peer1"].add_link(tgen.gears["sw4"]) + tgen.gears["peer2"].add_link(tgen.gears["sw4"]) + tgen.gears["peer3"].add_link(tgen.gears["sw4"]) + tgen.gears["peer4"].add_link(tgen.gears["sw4"]) + ##################################################### # @@ -753,41 +768,71 @@ def test_bgp_delayopen_without(): pytest.skip(tgen.errors) # part 1: no delay r1 <=> no delay r4 - logger.info("Starting optional test of BGP functionality without DelayOpenTimer enabled to establish a reference for following tests") + logger.info( + "Starting optional test of BGP functionality without DelayOpenTimer enabled to establish a reference for following tests" + ) # 1.1 enable peering shutdown logger.info("Enable shutdown of peering between r1 and r4") - tgen.net["r1"].cmd('vtysh -c "conf t" -c "router bgp 65000" -c "neighbor 192.168.101.2 shutdown"') - tgen.net["r4"].cmd('vtysh -c "conf t" -c "router bgp 65100" -c "neighbor 192.168.101.1 shutdown"') + tgen.net["r1"].cmd( + 'vtysh -c "conf t" -c "router bgp 65000" -c "neighbor 192.168.101.2 shutdown"' + ) + tgen.net["r4"].cmd( + 'vtysh -c "conf t" -c "router bgp 65100" -c "neighbor 192.168.101.1 shutdown"' + ) # 1.2 wait for peers to shut down (poll output) for router_num in [1, 4]: - logger.info("Checking BGP summary after enabling shutdown of peering on r{}".format(router_num)) + logger.info( + "Checking BGP summary after enabling shutdown of peering on r{}".format( + router_num + ) + ) router = tgen.gears["r{}".format(router_num)] - reffile = os.path.join(CWD, "r{}/bgp_delayopen_summary_shutdown.json".format(router_num)) + reffile = os.path.join( + CWD, "r{}/bgp_delayopen_summary_shutdown.json".format(router_num) + ) expected = json.loads(open(reffile).read()) - test_func = functools.partial(topotest.router_json_cmp, router, "show ip bgp summary json", expected) + test_func = functools.partial( + topotest.router_json_cmp, router, "show ip bgp summary json", expected + ) _, res = topotest.run_and_expect(test_func, None, count=3, wait=1) assertmsg = "BGP session on r{} did not shut down peer".format(router_num) assert res is None, assertmsg # 1.3 disable peering shutdown logger.info("Disable shutdown of peering between r1 and r4") - tgen.net["r1"].cmd('vtysh -c "conf t" -c "router bgp 65000" -c "no neighbor 192.168.101.2 shutdown"') - tgen.net["r4"].cmd('vtysh -c "conf t" -c "router bgp 65100" -c "no neighbor 192.168.101.1 shutdown"') + tgen.net["r1"].cmd( + 'vtysh -c "conf t" -c "router bgp 65000" -c "no neighbor 192.168.101.2 shutdown"' + ) + tgen.net["r4"].cmd( + 'vtysh -c "conf t" -c "router bgp 65100" -c "no neighbor 192.168.101.1 shutdown"' + ) # 1.4 wait for peers to establish connection (poll output) for router_num in [1, 4]: - logger.info("Checking BGP summary after disabling shutdown of peering on r{}".format(router_num)) + logger.info( + "Checking BGP summary after disabling shutdown of peering on r{}".format( + router_num + ) + ) router = tgen.gears["r{}".format(router_num)] - reffile = os.path.join(CWD, "r{}/bgp_delayopen_summary_established.json".format(router_num)) + reffile = os.path.join( + CWD, "r{}/bgp_delayopen_summary_established.json".format(router_num) + ) expected = json.loads(open(reffile).read()) - test_func = functools.partial(topotest.router_json_cmp, router, "show ip bgp summary json", expected) + test_func = functools.partial( + topotest.router_json_cmp, router, "show ip bgp summary json", expected + ) _, res = topotest.run_and_expect(test_func, None, count=5, wait=1) - assertmsg = "BGP session on r{} did not establish a connection with peer".format(router_num) + assertmsg = ( + "BGP session on r{} did not establish a connection with peer".format( + router_num + ) + ) assert res is None, assertmsg - #tgen.mininet_cli() + # tgen.mininet_cli() # end test_bgp_delayopen_without @@ -800,65 +845,107 @@ def test_bgp_delayopen_singular(): pytest.skip(tgen.errors) # part 2: delay 240s r1 <=> no delay r4 - logger.info("Starting test of BGP functionality and behaviour with DelayOpenTimer enabled on one side of the peering") + logger.info( + "Starting test of BGP functionality and behaviour with DelayOpenTimer enabled on one side of the peering" + ) # 2.1 enable peering shutdown logger.info("Enable shutdown of peering between r1 and r4") - tgen.net["r1"].cmd('vtysh -c "conf t" -c "router bgp 65000" -c "neighbor 192.168.101.2 shutdown"') - tgen.net["r4"].cmd('vtysh -c "conf t" -c "router bgp 65100" -c "neighbor 192.168.101.1 shutdown"') + tgen.net["r1"].cmd( + 'vtysh -c "conf t" -c "router bgp 65000" -c "neighbor 192.168.101.2 shutdown"' + ) + tgen.net["r4"].cmd( + 'vtysh -c "conf t" -c "router bgp 65100" -c "neighbor 192.168.101.1 shutdown"' + ) # 2.2 wait for peers to shut down (poll output) for router_num in [1, 4]: - logger.info("Checking BGP summary after disabling shutdown of peering on r{}".format(router_num)) + logger.info( + "Checking BGP summary after disabling shutdown of peering on r{}".format( + router_num + ) + ) router = tgen.gears["r{}".format(router_num)] - reffile = os.path.join(CWD, "r{}/bgp_delayopen_summary_shutdown.json".format(router_num)) + reffile = os.path.join( + CWD, "r{}/bgp_delayopen_summary_shutdown.json".format(router_num) + ) expected = json.loads(open(reffile).read()) - test_func = functools.partial(topotest.router_json_cmp, router, "show ip bgp summary json", expected) + test_func = functools.partial( + topotest.router_json_cmp, router, "show ip bgp summary json", expected + ) _, res = topotest.run_and_expect(test_func, None, count=3, wait=1) assertmsg = "BGP session on r{} did not shut down peer".format(router_num) assert res is None, assertmsg # 2.3 set delayopen on R1 to 240 logger.info("Setting DelayOpenTime for neighbor r4 to 240 seconds on r1") - tgen.net["r1"].cmd('vtysh -c "conf t" -c "router bgp 65000" -c "neighbor 192.168.101.2 timers delayopen 240"') + tgen.net["r1"].cmd( + 'vtysh -c "conf t" -c "router bgp 65000" -c "neighbor 192.168.101.2 timers delayopen 240"' + ) # 2.4 check config (poll output) logger.info("Checking BGP neighbor configuration after setting DelayOpenTime on r1") router = tgen.gears["r1"] reffile = os.path.join(CWD, "r1/bgp_delayopen_neighbor.json") expected = json.loads(open(reffile).read()) - test_func = functools.partial(topotest.router_json_cmp, router, "show bgp neighbors json", expected) + test_func = functools.partial( + topotest.router_json_cmp, router, "show bgp neighbors json", expected + ) _, res = topotest.run_and_expect(test_func, None, count=3, wait=1) assertmsg = "BGP session on r1 failed to set DelayOpenTime for r4" assert res is None, assertmsg # 2.5 disable peering shutdown logger.info("Disable shutdown of peering between r1 and r4") - tgen.net["r1"].cmd('vtysh -c "conf t" -c "router bgp 65000" -c "no neighbor 192.168.101.2 shutdown"') - tgen.net["r4"].cmd('vtysh -c "conf t" -c "router bgp 65100" -c "no neighbor 192.168.101.1 shutdown"') + tgen.net["r1"].cmd( + 'vtysh -c "conf t" -c "router bgp 65000" -c "no neighbor 192.168.101.2 shutdown"' + ) + tgen.net["r4"].cmd( + 'vtysh -c "conf t" -c "router bgp 65100" -c "no neighbor 192.168.101.1 shutdown"' + ) # 2.6 wait for peers to establish connection (poll output) for router_num in [1, 4]: - logger.info("Checking BGP summary after disabling shutdown of peering on r{}".format(router_num)) + logger.info( + "Checking BGP summary after disabling shutdown of peering on r{}".format( + router_num + ) + ) router = tgen.gears["r{}".format(router_num)] - reffile = os.path.join(CWD, "r{}/bgp_delayopen_summary_established.json".format(router_num)) + reffile = os.path.join( + CWD, "r{}/bgp_delayopen_summary_established.json".format(router_num) + ) expected = json.loads(open(reffile).read()) - test_func = functools.partial(topotest.router_json_cmp, router, "show ip bgp summary json", expected) + test_func = functools.partial( + topotest.router_json_cmp, router, "show ip bgp summary json", expected + ) _, res = topotest.run_and_expect(test_func, None, count=5, wait=1) - assertmsg = "BGP session on r{} did not establish a connection with peer".format(router_num) + assertmsg = ( + "BGP session on r{} did not establish a connection with peer".format( + router_num + ) + ) assert res is None, assertmsg # 2.7 unset delayopen on R1 logger.info("Disabling DelayOpenTimer for neighbor r4 on r1") - tgen.net["r1"].cmd('vtysh -c "conf t" -c "router bgp 65000" -c "no neighbor 192.168.101.2 timers delayopen"') + tgen.net["r1"].cmd( + 'vtysh -c "conf t" -c "router bgp 65000" -c "no neighbor 192.168.101.2 timers delayopen"' + ) # 2.8 check config (poll output) - logger.info("Checking BGP neighbor configuration after disabling DelayOpenTimer on r1") - delayopen_cfg = tgen.net["r1"].cmd('vtysh -c "show bgp neighbors json" | grep "DelayOpenTimeMsecs"').rstrip() + logger.info( + "Checking BGP neighbor configuration after disabling DelayOpenTimer on r1" + ) + delayopen_cfg = ( + tgen.net["r1"] + .cmd('vtysh -c "show bgp neighbors json" | grep "DelayOpenTimeMsecs"') + .rstrip() + ) assertmsg = "BGP session on r1 failed disable DelayOpenTimer for peer r4" assert delayopen_cfg == "", assertmsg - #tgen.mininet_cli() + # tgen.mininet_cli() # end test_bgp_delayopen_singular @@ -870,67 +957,119 @@ def test_bgp_delayopen_dual(): pytest.skip(tgen.errors) # part 3: delay 60s R2 <=> delay 30s R5 - logger.info("Starting test of BGP functionality and behaviour with DelayOpenTimer enabled on both sides of the peering with different timer intervals") + logger.info( + "Starting test of BGP functionality and behaviour with DelayOpenTimer enabled on both sides of the peering with different timer intervals" + ) # 3.1 enable peering shutdown logger.info("Enable shutdown of peering between r2 and r5") - tgen.net["r2"].cmd('vtysh -c "conf t" -c "router bgp 65000" -c "neighbor 192.168.201.2 shutdown"') - tgen.net["r5"].cmd('vtysh -c "conf t" -c "router bgp 65200" -c "neighbor 192.168.201.1 shutdown"') + tgen.net["r2"].cmd( + 'vtysh -c "conf t" -c "router bgp 65000" -c "neighbor 192.168.201.2 shutdown"' + ) + tgen.net["r5"].cmd( + 'vtysh -c "conf t" -c "router bgp 65200" -c "neighbor 192.168.201.1 shutdown"' + ) # 3.2 wait for peers to shut down (pool output) for router_num in [2, 5]: - logger.info("Checking BGP summary after disabling shutdown of peering on r{}".format(router_num)) + logger.info( + "Checking BGP summary after disabling shutdown of peering on r{}".format( + router_num + ) + ) router = tgen.gears["r{}".format(router_num)] - reffile = os.path.join(CWD, "r{}/bgp_delayopen_summary_shutdown.json".format(router_num)) + reffile = os.path.join( + CWD, "r{}/bgp_delayopen_summary_shutdown.json".format(router_num) + ) expected = json.loads(open(reffile).read()) - test_func = functools.partial(topotest.router_json_cmp, router, "show ip bgp summary json", expected) + test_func = functools.partial( + topotest.router_json_cmp, router, "show ip bgp summary json", expected + ) _, res = topotest.run_and_expect(test_func, None, count=3, wait=1) assertmsg = "BGP session on r{} did not shut down peer".format(router_num) assert res is None, assertmsg # 3.3 set delayopen on R2 to 60s and on R5 to 30s logger.info("Setting DelayOpenTime for neighbor r5 to 60 seconds on r2") - tgen.net["r2"].cmd('vtysh -c "conf t" -c "router bgp 65000" -c "neighbor 192.168.201.2 timers delayopen 60"') + tgen.net["r2"].cmd( + 'vtysh -c "conf t" -c "router bgp 65000" -c "neighbor 192.168.201.2 timers delayopen 60"' + ) logger.info("Setting DelayOpenTime for neighbor r2 to 30 seconds on r5") - tgen.net["r5"].cmd('vtysh -c "conf t" -c "router bgp 65200" -c "neighbor 192.168.201.1 timers delayopen 30"') + tgen.net["r5"].cmd( + 'vtysh -c "conf t" -c "router bgp 65200" -c "neighbor 192.168.201.1 timers delayopen 30"' + ) # 3.4 check config (poll output) for router_num in [2, 5]: - logger.info("Checking BGP neighbor configuration after setting DelayOpenTime on r{}i".format(router_num)) + logger.info( + "Checking BGP neighbor configuration after setting DelayOpenTime on r{}i".format( + router_num + ) + ) router = tgen.gears["r{}".format(router_num)] - reffile = os.path.join(CWD, "r{}/bgp_delayopen_neighbor.json".format(router_num)) + reffile = os.path.join( + CWD, "r{}/bgp_delayopen_neighbor.json".format(router_num) + ) expected = json.loads(open(reffile).read()) - test_func = functools.partial(topotest.router_json_cmp, router, "show bgp neighbors json", expected) + test_func = functools.partial( + topotest.router_json_cmp, router, "show bgp neighbors json", expected + ) _, res = topotest.run_and_expect(test_func, None, count=3, wait=1) assertmsg = "BGP session on r{} failed to set DelayOpenTime".format(router_num) assert res is None, assertmsg ## 3.5 disable peering shutdown logger.info("Disable shutdown of peering between r2 and r5") - tgen.net["r2"].cmd('vtysh -c "conf t" -c "router bgp 65000" -c "no neighbor 192.168.201.2 shutdown"') - tgen.net["r5"].cmd('vtysh -c "conf t" -c "router bgp 65200" -c "no neighbor 192.168.201.1 shutdown"') + tgen.net["r2"].cmd( + 'vtysh -c "conf t" -c "router bgp 65000" -c "no neighbor 192.168.201.2 shutdown"' + ) + tgen.net["r5"].cmd( + 'vtysh -c "conf t" -c "router bgp 65200" -c "no neighbor 192.168.201.1 shutdown"' + ) ## 3.6 wait for peers to reach connect or active state (poll output) delay_start = int(time.time()) for router_num in [2, 5]: - logger.info("Checking BGP summary after disabling shutdown of peering on r{}".format(router_num)) + logger.info( + "Checking BGP summary after disabling shutdown of peering on r{}".format( + router_num + ) + ) router = tgen.gears["r{}".format(router_num)] - reffile = os.path.join(CWD, "r{}/bgp_delayopen_summary_connect.json".format(router_num)) + reffile = os.path.join( + CWD, "r{}/bgp_delayopen_summary_connect.json".format(router_num) + ) expected = json.loads(open(reffile).read()) - test_func = functools.partial(topotest.router_json_cmp, router, "show ip bgp summary json", expected) + test_func = functools.partial( + topotest.router_json_cmp, router, "show ip bgp summary json", expected + ) _, res = topotest.run_and_expect(test_func, None, count=3, wait=1) - assertmsg = "BGP session on r{} did not enter Connect state with peer".format(router_num) + assertmsg = "BGP session on r{} did not enter Connect state with peer".format( + router_num + ) assert res is None, assertmsg ## 3.7 wait for peers to establish connection (poll output) for router_num in [2, 5]: - logger.info("Checking BGP summary after disabling shutdown of peering on r{}".format(router_num)) + logger.info( + "Checking BGP summary after disabling shutdown of peering on r{}".format( + router_num + ) + ) router = tgen.gears["r{}".format(router_num)] - reffile = os.path.join(CWD, "r{}/bgp_delayopen_summary_established.json".format(router_num)) + reffile = os.path.join( + CWD, "r{}/bgp_delayopen_summary_established.json".format(router_num) + ) expected = json.loads(open(reffile).read()) - test_func = functools.partial(topotest.router_json_cmp, router, "show ip bgp summary json", expected) + test_func = functools.partial( + topotest.router_json_cmp, router, "show ip bgp summary json", expected + ) _, res = topotest.run_and_expect(test_func, None, count=35, wait=1) - assertmsg = "BGP session on r{} did not establish a connection with peer".format(router_num) + assertmsg = ( + "BGP session on r{} did not establish a connection with peer".format( + router_num + ) + ) assert res is None, assertmsg delay_stop = int(time.time()) @@ -939,22 +1078,692 @@ def test_bgp_delayopen_dual(): # 3.8 unset delayopen on R2 and R5 logger.info("Disabling DelayOpenTimer for neighbor r5 on r2") - tgen.net["r2"].cmd('vtysh -c "conf t" -c "router bgp 65000" -c "no neighbor 192.168.201.2 timers delayopen"') + tgen.net["r2"].cmd( + 'vtysh -c "conf t" -c "router bgp 65000" -c "no neighbor 192.168.201.2 timers delayopen"' + ) logger.info("Disabling DelayOpenTimer for neighbor r2 on r5") - tgen.net["r5"].cmd('vtysh -c "conf t" -c "router bgp 65200" -c "no neighbor 192.168.201.1 timers delayopen"') + tgen.net["r5"].cmd( + 'vtysh -c "conf t" -c "router bgp 65200" -c "no neighbor 192.168.201.1 timers delayopen"' + ) # 3.9 check config (poll output) for router_num in [2, 5]: - logger.info("Checking BGP neighbor configuration after disabling DelayOpenTimer on r{}".format(router_num)) - delayopen_cfg = tgen.net["r{}".format(router_num)].cmd('vtysh -c "show bgp neighbors json" | grep "DelayOpenTimeMsecs"').rstrip() - assertmsg = "BGP session on r{} failed disable DelayOpenTimer".format(router_num) + logger.info( + "Checking BGP neighbor configuration after disabling DelayOpenTimer on r{}".format( + router_num + ) + ) + delayopen_cfg = ( + tgen.net["r{}".format(router_num)] + .cmd('vtysh -c "show bgp neighbors json" | grep "DelayOpenTimeMsecs"') + .rstrip() + ) + assertmsg = "BGP session on r{} failed disable DelayOpenTimer".format( + router_num + ) assert delayopen_cfg == "", assertmsg - #tgen.mininet_cli() + # tgen.mininet_cli() # end test_bgp_delayopen_dual +def test_bgp_dampening_setup(): + "BGP route-flap dampening test setup" + + # This test starts four ExaBGP peers, adds them as neighbors to the + # configuration of router r1 and checks if connections get established. + + tgen = get_topogen() + + # Skip if previous fatal error condition is raised + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + logger.info("Starting BGP route-flap dampening test setup") + + # Start ExaBGP peers connected to r1 via switch 4 + logger.info("Starting ExaBGP peers") + for peer_num in range(1, 5): + logger.info("Creating named pipe for ExaBGP peer peer{}".format(peer_num)) + fifo_in = "/var/run/exabgp_peer{}.in".format(peer_num) + if os.path.exists(fifo_in): + os.remove(fifo_in) + os.mkfifo(fifo_in, 0o777) + logger.info("Starting ExaBGP on peer peer{}".format(peer_num)) + peer = tgen.gears["peer{}".format(peer_num)] + peer_dir = os.path.join(CWD, "peer{}".format(peer_num)) + env_file = os.path.join(CWD, "exabgp.env") + peer.start(peer_dir, env_file) + + # Add ExaBGP peers to configuration of router r2 + logger.info("Adding ExaBGP peers as neighbors to configuration of router r2") + tgen.net["r1"].cmd( + 'vtysh -c "conf t" -c "router bgp 65000" -c "neighbor 192.168.101.3 remote-as 65403"' + ) + tgen.net["r1"].cmd( + 'vtysh -c "conf t" -c "router bgp 65000" -c "address-family ipv4 unicast" -c "neighbor 192.168.101.3 route-map testmap-in"' + ) + tgen.net["r1"].cmd( + 'vtysh -c "conf t" -c "router bgp 65000" -c "address-family ipv4 unicast" -c "neighbor 192.168.101.3 route-map testmap-out"' + ) + tgen.net["r1"].cmd( + 'vtysh -c "conf t" -c "router bgp 65000" -c "neighbor 192.168.101.4 remote-as 65404"' + ) + tgen.net["r1"].cmd( + 'vtysh -c "conf t" -c "router bgp 65000" -c "address-family ipv4 unicast" -c "neighbor 192.168.101.4 route-map testmap-in"' + ) + tgen.net["r1"].cmd( + 'vtysh -c "conf t" -c "router bgp 65000" -c "address-family ipv4 unicast" -c "neighbor 192.168.101.4 route-map testmap-out"' + ) + tgen.net["r1"].cmd( + 'vtysh -c "conf t" -c "router bgp 65000" -c "neighbor 192.168.101.5 remote-as 65405"' + ) + tgen.net["r1"].cmd( + 'vtysh -c "conf t" -c "router bgp 65000" -c "address-family ipv4 unicast" -c "neighbor 192.168.101.5 route-map testmap-in"' + ) + tgen.net["r1"].cmd( + 'vtysh -c "conf t" -c "router bgp 65000" -c "address-family ipv4 unicast" -c "neighbor 192.168.101.5 route-map testmap-out"' + ) + tgen.net["r1"].cmd( + 'vtysh -c "conf t" -c "router bgp 65000" -c "neighbor 192.168.101.6 remote-as 65406"' + ) + tgen.net["r1"].cmd( + 'vtysh -c "conf t" -c "router bgp 65000" -c "address-family ipv4 unicast" -c "neighbor 192.168.101.6 route-map testmap-in"' + ) + tgen.net["r1"].cmd( + 'vtysh -c "conf t" -c "router bgp 65000" -c "address-family ipv4 unicast" -c "neighbor 192.168.101.6 route-map testmap-out"' + ) + + # Check if exabgp peers are up and running + logger.info("Checking for established connections to ExaBGP peers on router r1") + router = tgen.gears["r1"] + reffile = os.path.join(CWD, "r1/bgp_damp_setup.json") + expected = json.loads(open(reffile).read()) + test_func = functools.partial( + topotest.router_json_cmp, router, "show ip bgp summary json", expected + ) + _, res = topotest.run_and_expect(test_func, None, count=10, wait=1) + assertmsg = ( + "BGP session on r1 did not establish connections with one ore more ExaBGP peers" + ) + assert res is None, assertmsg + + # end test_bgp_dampening_setup + + +def test_bgp_dampening_route_announce(): + "Test of BGP route-flap dampening route announcement" + + # This test checks if the four ExaBGP peers can announce routes to router + # r1 and if these routes get forwarded to router r2. + + tgen = get_topogen() + + # Skip if previous fatal error condition is raised + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + logger.info("Starting test of BGP route-flap dampening route announcement") + + # Announce routes on exabgp peers to r2 + logger.info("Announcing routes on ExaBGP peers to r1") + for prefix_iter in range(1, 5): + for peer_num in range(1, 5): + pipe = open("/run/exabgp_peer{}.in".format(peer_num), "w") + with pipe: + pipe.write( + "announce route 192.168.{}{}.0/24 next-hop 192.168.101.{}\n".format( + (peer_num + 2), prefix_iter, (peer_num + 2) + ) + ) + pipe.close() + sleep(0.1) # ExaBGP API command processing delay + + # Check if routes announced by ExaBGP peers are present in RIB of router r1 + logger.info( + "Checking if routes announced by ExaBGP peers are present in RIB of router r1" + ) + router = tgen.gears["r1"] + reffile = os.path.join(CWD, "r1/bgp_damp_announced.json") + expected = json.loads(open(reffile).read()) + test_func = functools.partial( + topotest.router_json_cmp, router, "show ip bgp json", expected + ) + _, res = topotest.run_and_expect(test_func, None, count=10, wait=1) + assertmsg = ( + "BGP session on router r1 did not receive routes announced by ExaBGP peers" + ) + assert res is None, assertmsg + + # Check if routes announced by ExaBGP peers to router r1 have been forwarded + # and are now present in RIB of router r2 + logger.info( + "Checking if forwarded routes announced by ExaBGP peers are present in RIB of router r2" + ) + router = tgen.gears["r2"] + reffile = os.path.join(CWD, "r2/bgp_damp_announced.json") + expected = json.loads(open(reffile).read()) + test_func = functools.partial( + topotest.router_json_cmp, router, "show ip bgp json", expected + ) + _, res = topotest.run_and_expect(test_func, None, count=10, wait=1) + assertmsg = "BGP session on router r2 did not receive routes announced by ExaBGP peers forwarded by router r1" + assert res is None, assertmsg + + # end test_bgp_dampening_route_announce + + +def test_bgp_dampening_disabled(): + "Test of BGP route-flapping with dampening disabled" + + # This test verifies that flapped routes do not get withdrawn from the RIB + # of router r1 if dampening is disabled. + + tgen = get_topogen() + + # Skip if previous fatal error condition is raised + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + logger.info("Starting test of BGP route-flapping with dampening disabled") + + # Flapping routes on ExaBGP peer peer1 + logger.info( + "Flapping routes on ExaBGP peer peer1 with route-flap dampening disabled" + ) + for _ in range(1, 5): + for prefix_iter in range(1, 5): + pipe = open("/run/exabgp_peer1.in", "w") + with pipe: + pipe.write( + "withdraw route 192.168.3{}.0/24 next-hop 192.168.101.3\n".format( + prefix_iter + ) + ) + pipe.close() + sleep(0.1) # ExaBGP API command processing delay + sleep(1) # Give the BGP session on router r1 time to process routes + for prefix_iter in range(1, 5): + pipe = open("/run/exabgp_peer1.in", "w") + with pipe: + pipe.write( + "announce route 192.168.3{}.0/24 next-hop 192.168.101.3\n".format( + prefix_iter + ) + ) + pipe.close() + sleep(0.1) # ExaBGP API command processing delay + + # Verify flapped routes are still present in RIB of router r1 + logger.info( + "Verifying that the flapped routes are still present in RIB of router r1" + ) + router = tgen.gears["r1"] + reffile = os.path.join(CWD, "r1/bgp_damp_announced.json") + expected = json.loads(open(reffile).read()) + test_func = functools.partial( + topotest.router_json_cmp, router, "show ip bgp json", expected + ) + _, res = topotest.run_and_expect(test_func, None, count=10, wait=1) + assertmsg = "BGP session on router r1 removed flapped routes despite route-flap dampening being disabled" + assert res is None, assertmsg + + # end test_bgp_dampening_disabled + + +def test_bgp_dampening_config(): + "Test of BGP route-flap dampening configuration" + + # This test adds peer-group group1 with peers peer1 and peer2 to the + # configuration of router r1, sets up dampening configurations with + # different profiles and verifies the configured dampening parameters. + + tgen = get_topogen() + r_1 = tgen.net["r1"] + + # Skip if previous fatal error condition is raised + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + logger.info("Starting test of BGP route-flap dampening configuration") + + # Add peer-group group1 with peers peer1 and peer2 + logger.info( + "Creating peer-group group1 and adding ExaBGP peers peer1 and peer2 to it" + ) + r_1.cmd('vtysh -c "conf t" -c "router bgp 65000" -c "neighbor group1 peer-group"') + r_1.cmd( + 'vtysh -c "conf t" -c "router bgp 65000" -c "neighbor 192.168.101.3 peer-group group1"' + ) # peer1 + r_1.cmd( + 'vtysh -c "conf t" -c "router bgp 65000" -c "neighbor 192.168.101.4 peer-group group1"' + ) # peer2 + + # Enable different dampening profiles for peer1, peer3, group1 and global + # configuration + logger.info( + "Enabling different dampening profiles for peer1, peer3, group1 and global configuration" + ) + r_1.cmd( + 'vtysh -c "conf t" -c "router bgp 65000" -c "address-family ipv4 unicast" -c "bgp dampening 30 300 900 90"' + ) + r_1.cmd( + 'vtysh -c "conf t" -c "router bgp 65000" -c "address-family ipv4 unicast" -c "neighbor group1 dampening 20 200 600 60"' + ) + r_1.cmd( + 'vtysh -c "conf t" -c "router bgp 65000" -c "address-family ipv4 unicast" -c "neighbor 192.168.101.3 dampening 10 100 300 30"' + ) # peer1 + r_1.cmd( + 'vtysh -c "conf t" -c "router bgp 65000" -c "address-family ipv4 unicast" -c "neighbor 192.168.101.5 dampening 10 100 300 30"' + ) # peer3 + + # Verify route-flap dampening configuration + logger.info("Verifying route-flap dampening configuration on router r1") + vtyout = r_1.cmd('vtysh -c "show running-config"') + assertmsg = "BGP Session on r1 does not show enabled global route-flap dampening in running configuration" + assert re.search("bgp dampening 30 300 900 90", vtyout), assertmsg + assertmsg = "BGP Session on r1 does not show route-flap dampening enabled for peer-group group1 in running configuration" + assert re.search("neighbor group1 dampening 20 200 600 60", vtyout), assertmsg + assertmsg = "BGP Session on r1 does not show route-flap dampening enabled for peer peer1 in running configuration" + assert re.search( + "neighbor 192.168.101.3 dampening 10 100 300 30", vtyout + ), assertmsg + assertmsg = "BGP Session on r1 does not show route-flap dampening enabled for peer peer3 in running configuration" + assert re.search( + "neighbor 192.168.101.5 dampening 10 100 300 30", vtyout + ), assertmsg + + # end test_bgp_dampening_config + + +def test_bgp_dampening_profile_peer_over_group(): + "Test of BGP route-flap dampening profile preferences: peer over group" + + # This test verifies that the dampening profile of a peer takes precedence + # over the dampening profile of its peer-group by flapping the peers routes + # until dampened and comparing the reuse times to the one specified in the + # dampening configuration. + + tgen = get_topogen() + r_1 = tgen.net["r1"] + + # Skip if previous fatal error condition is raised + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + logger.info( + "Starting test of BGP route-flap dampening profile preferences: peer over group" + ) + + # Flapping routes on ExaBGP peer peer1 + logger.info( + "Flapping routes on ExaBGP peer peer1 with route-flap dampening enabled" + ) + for _ in range(1, 5): + for prefix_iter in range(1, 5): + pipe = open("/run/exabgp_peer1.in", "w") + with pipe: + pipe.write( + "withdraw route 192.168.3{}.0/24 next-hop 192.168.101.3\n".format( + prefix_iter + ) + ) + pipe.close() + sleep(0.1) # ExaBGP API command processing delay + sleep(1) # Give the BGP session on router r1 time to process routes + for prefix_iter in range(1, 5): + pipe = open("/run/exabgp_peer1.in", "w") + with pipe: + pipe.write( + "announce route 192.168.3{}.0/24 next-hop 192.168.101.3\n".format( + prefix_iter + ) + ) + pipe.close() + sleep(0.1) # ExaBGP API command processing delay + + # Check damped paths on r1 for routes of peer1 witn peer profile + logger.info( + "Checking if router r1 used the correct dampening profile on routes flapped by ExaBGP peer peer1" + ) + sleep(5) # Wait 5 seconds for paths to show up in dampened-paths list + vtyout = r_1.cmd('vtysh -c "show ip bgp dampening dampened-paths"') + routes = re.findall(r"\*d 192\.168\.3\d\.0\/24.*", vtyout) + assertmsg = ( + "BGP session on router r1 did not dampen routes flapped by ExaBGP peer peer1" + ) + assert len(routes) == 4, assertmsg + assertmsg = "BGP session on router r1 used wrong dampening profile for a route flapped by ExaBGP peer peer1" + for route in routes: + assert (int(route.split()[3].split(":")[0]) == 0) and ( # hours of reuse time + 35 > int(route.split()[3].split(":")[1]) > 25 + ), assertmsg # minutes of reuse time + + # end test_bgp_dampening_profile_peer_over_group + + +def test_bgp_dampening_profile_group_over_global(): + "Test of BGP route-flap dampening profile preferences: group over global" + + # This test verifies that the dampening profile of a peer-group takes + # precedence over the global dampening profile by flapping the routes of a + # peer-group member until dampened and comparing the reuse times to the one + # specified in the dampening configuration. + + tgen = get_topogen() + r_1 = tgen.net["r1"] + + # Skip if previous fatal error condition is raised + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + logger.info( + "Starting test of BGP route-flap dampening profile preferences: group over global" + ) + + # Flapping routes on ExaBGP peer peer2 + logger.info( + "Flapping routes on ExaBGP peer peer2 with route-flap dampening enabled" + ) + for _ in range(1, 5): + for prefix_iter in range(1, 5): + pipe = open("/run/exabgp_peer2.in", "w") + with pipe: + pipe.write( + "withdraw route 192.168.4{}.0/24 next-hop 192.168.101.4\n".format( + prefix_iter + ) + ) + pipe.close() + sleep(0.1) # ExaBGP API command processing delay + sleep(1) # Give the BGP session on router r1 time to process routes + for prefix_iter in range(1, 5): + pipe = open("/run/exabgp_peer2.in", "w") + with pipe: + pipe.write( + "announce route 192.168.4{}.0/24 next-hop 192.168.101.4\n".format( + prefix_iter + ) + ) + pipe.close() + sleep(0.1) # ExaBGP API command processing delay + + # Check damped paths on r1 for routes of peer2 witn group profile + logger.info( + "Checking if router r1 used the correct dampening profile on routes flapped by ExaBGP peer peer2" + ) + sleep(5) # wait 5 seconds for paths to shop up in damp list + vtyout = r_1.cmd('vtysh -c "show ip bgp dampening dampened-paths"') + routes = re.findall(r"\*d 192\.168\.4\d\.0\/24.*", vtyout) + assertmsg = ( + "BGP session on router r1 did not dampen routes flapped by ExaBGP peer peer2" + ) + assert len(routes) == 4, assertmsg + assertmsg = "BGP session on router r1 used wrong dampening profile for a route flapped by ExaBGP peer peer2" + for route in routes: + assert (int(route.split()[3].split(":")[0]) == 0) and ( # hours of reuse time + 65 > int(route.split()[3].split(":")[1]) > 55 + ), assertmsg # minutes of reuse time + + # end test_bgp_dampening_profile_group_over_global + + +def test_bgp_dampening_profile_peer_over_global(): + "Test of BGP route-flap dampening profile preferences: peer over global" + + # This test verifies that the dampening profile of a peer takes precedence + # over the global dampening profile by flapping the routes of the peer until + # dampened and comparing the reuse times to the one specified in the + # dampening configuration. + + tgen = get_topogen() + r_1 = tgen.net["r1"] + + # Skip if previous fatal error condition is raised + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + logger.info( + "Starting test of BGP route-flap dampening profile preferences: peer over global" + ) + + # Flapping routes on ExaBGP peer peer3 + logger.info( + "Flapping routes on ExaBGP peer peer3 with route-flap dampening enabled" + ) + for _ in range(1, 5): + for prefix_iter in range(1, 5): + pipe = open("/run/exabgp_peer3.in", "w") + with pipe: + pipe.write( + "withdraw route 192.168.5{}.0/24 next-hop 192.168.101.5\n".format( + prefix_iter + ) + ) + pipe.close() + sleep(0.1) # ExaBGP API command processing delay + sleep(1) # Give the BGP session on router r1 time to process routes + for prefix_iter in range(1, 5): + pipe = open("/run/exabgp_peer3.in", "w") + with pipe: + pipe.write( + "announce route 192.168.5{}.0/24 next-hop 192.168.101.5\n".format( + prefix_iter + ) + ) + pipe.close() + sleep(0.1) # ExaBGP API command processing delay + + # Check damped paths on r1 for routes of peer3 witn peer profile + logger.info( + "Checking if router r1 used the correct dampening profile on routes flapped by ExaBGP peer peer3" + ) + sleep(5) # wait 5 seconds for paths to shop up in damp list + vtyout = r_1.cmd('vtysh -c "show ip bgp dampening dampened-paths"') + routes = re.findall(r"\*d 192\.168\.5\d\.0\/24.*", vtyout) + assertmsg = ( + "BGP session on router r1 did not dampen routes flapped by ExaBGP peer peer3" + ) + assert len(routes) == 4, assertmsg + assertmsg = "BGP session on router r1 used wrong dampening profile for a route flapped by ExaBGP peer peer3" + for route in routes: + assert (int(route.split()[3].split(":")[0]) == 0) and ( # hours of reuse time + 35 > int(route.split()[3].split(":")[1]) > 25 + ), assertmsg # minutes of reuse time + + # end test_bgp_dampening_profile_peer_over_global + + +def test_bgp_dampening_profile_global(): + "Test of BGP route-flap dampening global profile" + + # This test verifies the application of the global dampening profile by + # flapping the routes of a peer until dampened and comparing the reuse times + # to the one specified in the dampening configuration. + + tgen = get_topogen() + r_1 = tgen.net["r1"] + + # Skip if previous fatal error condition is raised + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + logger.info("Starting test of BGP route-flap dampening global profile") + + # Flapping routes on ExaBGP peer peer4 + logger.info( + "Flapping routes on ExaBGP peer peer4 with route-flap dampening enabled" + ) + for _ in range(1, 5): + for prefix_iter in range(1, 5): + pipe = open("/run/exabgp_peer4.in", "w") + with pipe: + pipe.write( + "withdraw route 192.168.6{}.0/24 next-hop 192.168.101.6\n".format( + prefix_iter + ) + ) + pipe.close() + sleep(0.1) # ExaBGP API command processing delay + sleep(1) # Give the BGP session on router r1 time to process routes + for prefix_iter in range(1, 5): + pipe = open("/run/exabgp_peer4.in", "w") + with pipe: + pipe.write( + "announce route 192.168.6{}.0/24 next-hop 192.168.101.6\n".format( + prefix_iter + ) + ) + pipe.close() + sleep(0.1) # ExaBGP API command processing delay + + # Check damped paths on r1 for routes of peer4 witn global profile + logger.info( + "Checking if router r1 used the global dampening profile on routes flapped by ExaBGP peer peer4" + ) + sleep(5) # wait 5 seconds for paths to shop up in damp list + vtyout = r_1.cmd('vtysh -c "show ip bgp dampening dampened-paths"') + routes = re.findall(r"\*d 192\.168\.6\d\.0\/24.*", vtyout) + assertmsg = ( + "BGP session on router r1 did not dampen routes flapped by ExaBGP peer peer4" + ) + assert len(routes) == 4, assertmsg + assertmsg = "BGP session on router r1 did not use the global dampening profile for a route flapped by ExaBGP peer peer4" + for route in routes: + assert (int(route.split()[3].split(":")[0]) == 1) and ( # hours of reuse time + 35 > int(route.split()[3].split(":")[1]) > 25 + ), assertmsg # minutes of reuse time + + # end test_bgp_dampening_profile_global + + +def test_bgp_dampening_withdaw(): + "Test BGP route-flap dampening route withdraw" + + # This test verifies that the withrawl of dampened routes from the RIB of + # router r1 was propagated to router r2. + + tgen = get_topogen() + + # Skip if previous fatal error condition is raised + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + logger.info("Starting test of BGP route-flap dampening route withdraw") + + # Check if routes dampened on router r1 have been withdrawn from the RIB on + # router r2 + logger.info( + "Checking if routes dampened on router r1 have been withdrawn of RIB on router r2" + ) + reffile = os.path.join(CWD, "r2/bgp_damp_withdrawn.json") + expected = json.loads(open(reffile).read()) + test_func = functools.partial( + topotest.router_json_cmp, tgen.gears["r2"], "show ip bgp json", expected + ) + _, res = topotest.run_and_expect(test_func, None, count=5, wait=1) + assertmsg = "BGP session on router r2 did not receive withdraw of routes dampened on router r1" + assert res is None, assertmsg + + # end test_bgp_dampening_withdaw + + +def test_bgp_dampening_cleanup(): + "BGP route-flap dampening test cleanup" + + # This test cleans up after other tests associated with route-flap dampening + # by disabling all dampening configurations, removing added peers and + # peer-groups from the configuration on router r1, and shutting down ExaBGP + # peers peer1, peer2 and peer3. + + tgen = get_topogen() + r_1 = tgen.net["r1"] + + # Skip if previous fatal error condition is raised + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + logger.info("Starting BGP route-flap dampening test cleanup") + + # Disable all dampening configurations + logger.info("Disabling all dampening configurations") + r_1.cmd( + 'vtysh -c "conf t" -c "router bgp 65000" -c "address-family ipv4 unicast" -c "no bgp dampening"' + ) + r_1.cmd( + 'vtysh -c "conf t" -c "router bgp 65000" -c "address-family ipv4 unicast" -c "no neighbor group1 dampening"' + ) + r_1.cmd( + 'vtysh -c "conf t" -c "router bgp 65000" -c "address-family ipv4 unicast" -c "no neighbor 192.168.101.3 dampening"' + ) + r_1.cmd( + 'vtysh -c "conf t" -c "router bgp 65000" -c "address-family ipv4 unicast" -c "no neighbor 192.168.101.5 dampening"' + ) + + # Remove ExaBGP peers from configuration of router r1 + logger.info("Removing ExaBGP peers from configuration of router r1") + for router_num in range(3, 7): + r_1.cmd( + 'vtysh -c "conf t" -c "router bgp 65000" -c "no neighbor 192.168.101.{}"'.format( + router_num + ) + ) + + # Remove peer-group group1 from configuration of router r1 + logger.info("Removing peer-group group1 peers from configuration of router r1") + r_1.cmd( + 'vtysh -c "conf t" -c "router bgp 65000" -c "no neighbor group1 peer-group"' + ) + + # Stop ExaBGP peers and remove associated named pipes + logger.info("Stopping ExaBGP peers and removing associated named pipes") + for peer_num in range(1, 5): + logger.info("Terminating ExaBGP on peer peer{}".format(peer_num)) + peer = tgen.gears["peer{}".format(peer_num)] + logger.info("Removing named pipe of ExaBGP peer peer{}".format(peer_num)) + fifo_in = "/var/run/exabgp_peer{}.in".format(peer_num) + peer.stop() + if os.path.exists(fifo_in): + os.remove(fifo_in) + + # end test_bgp_dampening_cleanup + + +def test_bgp_dampening_aftermath(): + "BGP route-flap dampening aftermath test" + + # This test verifies routers r1 and r2 not being affected by the route-flap + # dampening test series. + + tgen = get_topogen() + + # Skip if previous fatal error condition is raised + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + # Check BGP Summary on routers r1 and r2 + for rtr_num in [1, 2]: + logger.info( + "Checking if BGP router on r{} remains unaffected by route-flap dampening tests".format( + rtr_num + ) + ) + router = tgen.gears["r{}".format(rtr_num)] + reffile = os.path.join(CWD, "r{}/show_bgp.json".format(rtr_num)) + expected = json.loads(open(reffile).read()) + test_func = functools.partial( + topotest.router_json_cmp, router, "show ip bgp json", expected + ) + _, res = topotest.run_and_expect(test_func, None, count=10, wait=2) + assertmsg = "BGP routes on router r{} are wrong after route-flap dampening tests".format( + rtr_num + ) + assert res is None, assertmsg + + # end test_bgp_dampening_aftermath + + if __name__ == "__main__": args = ["-s"] + sys.argv[1:] sys.exit(pytest.main(args)) diff --git a/tests/topotests/bgp_gr_functionality_topo1/test_bgp_gr_functionality_topo1.py b/tests/topotests/bgp_gr_functionality_topo1/test_bgp_gr_functionality_topo1.py index 097b654e77..3a2d283025 100644 --- a/tests/topotests/bgp_gr_functionality_topo1/test_bgp_gr_functionality_topo1.py +++ b/tests/topotests/bgp_gr_functionality_topo1/test_bgp_gr_functionality_topo1.py @@ -316,7 +316,9 @@ def test_BGP_GR_TC_46_p1(request): input_dict = { "r1": { "bgp": { - "graceful-restart": {"graceful-restart": True,}, + "graceful-restart": { + "graceful-restart": True, + }, "address_family": { "ipv4": { "unicast": { @@ -1184,8 +1186,8 @@ def test_BGP_GR_TC_53_p1(request): def test_BGP_GR_TC_4_p0(request): """ - Test Objective : Verify that the restarting node sets "R" bit while sending the - BGP open messages after the node restart, only if GR is enabled. + Test Objective : Verify that the restarting node sets "R" bit while sending the + BGP open messages after the node restart, only if GR is enabled. """ tgen = get_topogen() @@ -1368,9 +1370,9 @@ def test_BGP_GR_TC_4_p0(request): def test_BGP_GR_TC_5_1_2_p1(request): """ - Test Objective : Verify if restarting node resets R bit in BGP open message - during normal BGP session flaps as well, even when GR restarting mode is enabled. - Here link flap happen due to interface UP/DOWN. + Test Objective : Verify if restarting node resets R bit in BGP open message + during normal BGP session flaps as well, even when GR restarting mode is enabled. + Here link flap happen due to interface UP/DOWN. """ tgen = get_topogen() @@ -1486,7 +1488,7 @@ def test_BGP_GR_TC_5_1_2_p1(request): shutdown_bringup_interface(tgen, "r2", intf) # Bring up Interface - shutdown_bringup_interface(tgen, dut, intf, ifaceaction=True) + shutdown_bringup_interface(tgen, "r2", intf, ifaceaction=True) for addr_type in ADDR_TYPES: result = verify_graceful_restart( @@ -1773,7 +1775,7 @@ def test_BGP_GR_TC_6_1_2_p1(request): shutdown_bringup_interface(tgen, "r2", intf) # Bring up Interface - shutdown_bringup_interface(tgen, dut, intf, ifaceaction=True) + shutdown_bringup_interface(tgen, "r2", intf, ifaceaction=True) for addr_type in ADDR_TYPES: result = verify_graceful_restart( @@ -1815,8 +1817,8 @@ def test_BGP_GR_TC_6_1_2_p1(request): def test_BGP_GR_TC_8_p1(request): """ - Test Objective : Verify that restarting nodes set "F" bit while sending - the BGP open messages after it restarts, only when BGP GR is enabled. + Test Objective : Verify that restarting nodes set "F" bit while sending + the BGP open messages after it restarts, only when BGP GR is enabled. """ tgen = get_topogen() @@ -1959,8 +1961,8 @@ def test_BGP_GR_TC_8_p1(request): def test_BGP_GR_TC_17_p1(request): """ - Test Objective : Verify that only GR helper routers keep the stale - route entries, not any GR disabled router. + Test Objective : Verify that only GR helper routers keep the stale + route entries, not any GR disabled router. """ tgen = get_topogen() @@ -2145,8 +2147,8 @@ def test_BGP_GR_TC_17_p1(request): def test_BGP_GR_TC_19_p1(request): """ - Test Objective : Verify that GR helper routers keeps all the routes received - from restarting node if both the routers are configured as GR restarting node. + Test Objective : Verify that GR helper routers keeps all the routes received + from restarting node if both the routers are configured as GR restarting node. """ tgen = get_topogen() @@ -2325,8 +2327,8 @@ def test_BGP_GR_TC_19_p1(request): def test_BGP_GR_TC_20_p1(request): """ - Test Objective : Verify that GR helper routers delete all the routes - received from a node if both the routers are configured as GR helper node. + Test Objective : Verify that GR helper routers delete all the routes + received from a node if both the routers are configured as GR helper node. """ tgen = get_topogen() tc_name = request.node.name @@ -3090,8 +3092,8 @@ def test_BGP_GR_TC_31_2_p1(request): def test_BGP_GR_TC_9_p1(request): """ - Test Objective : Verify that restarting nodes reset "F" bit while sending - the BGP open messages after it's restarts, when BGP GR is **NOT** enabled. + Test Objective : Verify that restarting nodes reset "F" bit while sending + the BGP open messages after it's restarts, when BGP GR is **NOT** enabled. """ tgen = get_topogen() @@ -3264,8 +3266,8 @@ def test_BGP_GR_TC_9_p1(request): def test_BGP_GR_TC_17_p1(request): """ - Test Objective : Verify that only GR helper routers keep the stale - route entries, not any GR disabled router. + Test Objective : Verify that only GR helper routers keep the stale + route entries, not any GR disabled router. """ tgen = get_topogen() @@ -3467,7 +3469,13 @@ def test_BGP_GR_TC_43_p1(request): step("Configure R1 and R2 as GR restarting node in global level") input_dict = { - "r1": {"bgp": {"graceful-restart": {"graceful-restart": True,}}}, + "r1": { + "bgp": { + "graceful-restart": { + "graceful-restart": True, + } + } + }, "r2": {"bgp": {"graceful-restart": {"graceful-restart": True}}}, } @@ -3560,7 +3568,13 @@ def test_BGP_GR_TC_43_p1(request): step("Verify on R2 that R1 doesn't advertise any GR capabilities") input_dict = { - "r1": {"bgp": {"graceful-restart": {"graceful-restart-disable": True,}}}, + "r1": { + "bgp": { + "graceful-restart": { + "graceful-restart-disable": True, + } + } + }, "r2": {"bgp": {"graceful-restart": {"graceful-restart": True}}}, } @@ -3659,7 +3673,13 @@ def test_BGP_GR_TC_43_p1(request): step("Verify on R2 that R1 advertises GR capabilities as a restarting node") input_dict = { - "r1": {"bgp": {"graceful-restart": {"graceful-restart": True,}}}, + "r1": { + "bgp": { + "graceful-restart": { + "graceful-restart": True, + } + } + }, "r2": {"bgp": {"graceful-restart": {"graceful-restart": True}}}, } @@ -3779,7 +3799,13 @@ def test_BGP_GR_TC_44_p1(request): step("Verify on R2 that R1 advertises GR capabilities as a helper node") input_dict = { - "r1": {"bgp": {"graceful-restart": {"graceful-restart-helper": True,}}}, + "r1": { + "bgp": { + "graceful-restart": { + "graceful-restart-helper": True, + } + } + }, "r2": {"bgp": {"graceful-restart": {"graceful-restart": True}}}, } @@ -3849,7 +3875,13 @@ def test_BGP_GR_TC_44_p1(request): start_router_daemons(tgen, "r2", ["bgpd"]) input_dict = { - "r1": {"bgp": {"graceful-restart": {"graceful-restart-disable": True,}}} + "r1": { + "bgp": { + "graceful-restart": { + "graceful-restart-disable": True, + } + } + } } configure_gr_followed_by_clear(tgen, topo, input_dict, tc_name, dut="r1", peer="r2") @@ -3857,7 +3889,13 @@ def test_BGP_GR_TC_44_p1(request): step("Verify on R2 that R1 doesn't advertise any GR capabilities") input_dict = { - "r1": {"bgp": {"graceful-restart": {"graceful-restart-disable": True,}}}, + "r1": { + "bgp": { + "graceful-restart": { + "graceful-restart-disable": True, + } + } + }, "r2": {"bgp": {"graceful-restart": {"graceful-restart": True}}}, } @@ -3941,7 +3979,13 @@ def test_BGP_GR_TC_44_p1(request): step("Verify on R2 that R1 advertises GR capabilities as a helper node") input_dict = { - "r1": {"bgp": {"graceful-restart": {"graceful-restart-helper": True,}}}, + "r1": { + "bgp": { + "graceful-restart": { + "graceful-restart-helper": True, + } + } + }, "r2": {"bgp": {"graceful-restart": {"graceful-restart": True}}}, } @@ -4108,14 +4152,28 @@ def test_BGP_GR_TC_45_p1(request): start_router_daemons(tgen, "r1", ["bgpd"]) - input_dict = {"r1": {"bgp": {"graceful-restart": {"graceful-restart": False,}}}} + input_dict = { + "r1": { + "bgp": { + "graceful-restart": { + "graceful-restart": False, + } + } + } + } configure_gr_followed_by_clear(tgen, topo, input_dict, tc_name, dut="r1", peer="r2") step("Verify on R2 that R1 advertises GR capabilities as a helper node") input_dict = { - "r1": {"bgp": {"graceful-restart": {"graceful-restart-helper": True,}}}, + "r1": { + "bgp": { + "graceful-restart": { + "graceful-restart-helper": True, + } + } + }, "r2": {"bgp": {"graceful-restart": {"graceful-restart": True}}}, } @@ -4199,14 +4257,28 @@ def test_BGP_GR_TC_45_p1(request): start_router_daemons(tgen, "r2", ["bgpd"]) - input_dict = {"r1": {"bgp": {"graceful-restart": {"graceful-restart": True,}}}} + input_dict = { + "r1": { + "bgp": { + "graceful-restart": { + "graceful-restart": True, + } + } + } + } configure_gr_followed_by_clear(tgen, topo, input_dict, tc_name, dut="r1", peer="r2") step("Verify on R2 that R1 advertises GR capabilities as a restarting node") input_dict = { - "r1": {"bgp": {"graceful-restart": {"graceful-restart": True,}}}, + "r1": { + "bgp": { + "graceful-restart": { + "graceful-restart": True, + } + } + }, "r2": {"bgp": {"graceful-restart": {"graceful-restart": True}}}, } @@ -4307,7 +4379,9 @@ def test_BGP_GR_TC_46_p1(request): input_dict = { "r1": { "bgp": { - "graceful-restart": {"graceful-restart": True,}, + "graceful-restart": { + "graceful-restart": True, + }, "address_family": { "ipv4": { "unicast": { @@ -4559,7 +4633,9 @@ def test_BGP_GR_TC_47_p1(request): input_dict = { "r1": { "bgp": { - "graceful-restart": {"graceful-restart": True,}, + "graceful-restart": { + "graceful-restart": True, + }, "address_family": { "ipv4": { "unicast": { @@ -4698,7 +4774,13 @@ def test_BGP_GR_TC_47_p1(request): step("Verify on R2 that R1 still advertises GR capabilities as a restarting node") input_dict = { - "r1": {"bgp": {"graceful-restart": {"graceful-restart": True,}}}, + "r1": { + "bgp": { + "graceful-restart": { + "graceful-restart": True, + } + } + }, "r2": {"bgp": {"graceful-restart": {"graceful-restart": True}}}, } @@ -4814,7 +4896,9 @@ def test_BGP_GR_TC_48_p1(request): input_dict = { "r1": { "bgp": { - "graceful-restart": {"graceful-restart": True,}, + "graceful-restart": { + "graceful-restart": True, + }, "address_family": { "ipv4": { "unicast": { @@ -4960,7 +5044,13 @@ def test_BGP_GR_TC_48_p1(request): step("Verify on R2 that R1 advertises GR capabilities as a restarting node") input_dict = { - "r1": {"bgp": {"graceful-restart": {"graceful-restart": True,}}}, + "r1": { + "bgp": { + "graceful-restart": { + "graceful-restart": True, + } + } + }, "r2": {"bgp": {"graceful-restart": {"graceful-restart-helper": True}}}, } diff --git a/tests/topotests/bgp_gr_functionality_topo2/test_bgp_gr_functionality_topo2.py b/tests/topotests/bgp_gr_functionality_topo2/test_bgp_gr_functionality_topo2.py index 6926121a6b..2ddeab13f6 100644 --- a/tests/topotests/bgp_gr_functionality_topo2/test_bgp_gr_functionality_topo2.py +++ b/tests/topotests/bgp_gr_functionality_topo2/test_bgp_gr_functionality_topo2.py @@ -456,7 +456,9 @@ def test_BGP_GR_TC_3_p0(request): input_dict = { "r1": { "bgp": { - "graceful-restart": {"disable-eor": True,}, + "graceful-restart": { + "disable-eor": True, + }, "address_family": { "ipv4": { "unicast": { @@ -2095,7 +2097,10 @@ def test_BGP_GR_chaos_33_p1(request): "ipv4": { "unicast": { "advertise_networks": [ - {"network": "200.0.20.1/32", "no_of_network": 2,} + { + "network": "200.0.20.1/32", + "no_of_network": 2, + } ] } }, @@ -2207,13 +2212,13 @@ def test_BGP_GR_chaos_33_p1(request): else: next_hop_6 = NEXT_HOP_6[1] - result = verify_rib(tgen, addr_type, dut, input_dict_2, next_hop_6, - expected=False) - assert result is not True,\ - "Testcase {} :Failed \n Error {}". \ - format(tc_name, result) - logger.info(" Expected behavior: {}".\ - format(result)) + result = verify_rib( + tgen, addr_type, dut, input_dict_2, next_hop_6, expected=False + ) + assert result is not True, "Testcase {} :Failed \n Error {}".format( + tc_name, result + ) + logger.info(" Expected behavior: {}".format(result)) logger.info("[Step 4] : Start BGPd daemon on R1 and R4..") @@ -3960,7 +3965,13 @@ def test_BGP_GR_21_p2(request): } } }, - "r2": {"bgp": {"graceful-restart": {"graceful-restart": True,}}}, + "r2": { + "bgp": { + "graceful-restart": { + "graceful-restart": True, + } + } + }, } configure_gr_followed_by_clear(tgen, topo, input_dict, tc_name, dut="r1", peer="r2") diff --git a/tests/topotests/bgp_large_community/test_bgp_large_community_topo_1.py b/tests/topotests/bgp_large_community/test_bgp_large_community_topo_1.py index 40489f438f..31fbdcd4b5 100644 --- a/tests/topotests/bgp_large_community/test_bgp_large_community_topo_1.py +++ b/tests/topotests/bgp_large_community/test_bgp_large_community_topo_1.py @@ -1174,6 +1174,39 @@ def test_large_community_boundary_values(request): ) +def test_large_community_invalid_chars(request): + """ + BGP canonical lcommunities must only be digits + """ + tc_name = request.node.name + write_test_header(tc_name) + tgen = get_topogen() + + # Don"t run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + input_dict = { + "r4": { + "bgp_community_lists": [ + { + "community_type": "standard", + "action": "permit", + "name": "ANY", + "value": "1:a:2", + "large": True, + } + ] + } + } + + step("Checking boundary value for community 1:a:2") + result = create_bgp_community_lists(tgen, input_dict) + assert result is not True, "Test case {} : Failed \n Error: {}".format( + tc_name, result + ) + + def test_large_community_after_clear_bgp(request): """ Clear BGP neighbor-ship and check if large community and community diff --git a/tests/topotests/bgp_large_community/test_bgp_large_community_topo_2.py b/tests/topotests/bgp_large_community/test_bgp_large_community_topo_2.py index 9c0355a3e9..c2858a4bd0 100644 --- a/tests/topotests/bgp_large_community/test_bgp_large_community_topo_2.py +++ b/tests/topotests/bgp_large_community/test_bgp_large_community_topo_2.py @@ -2132,7 +2132,11 @@ def test_large_community_lists_with_rmap_match_regex(request): { "action": "permit", "seq_id": "10", - "match": {"large_community_list": {"id": "ALL",},}, + "match": { + "large_community_list": { + "id": "ALL", + }, + }, } ] } @@ -2208,7 +2212,11 @@ def test_large_community_lists_with_rmap_match_regex(request): { "action": "permit", "seq_id": "20", - "match": {"large_community_list": {"id": "EXP_ALL",},}, + "match": { + "large_community_list": { + "id": "EXP_ALL", + }, + }, } ] } diff --git a/tests/topotests/bgp_listen_on_multiple_addresses/bgp_listen_on_multiple_addresses.json b/tests/topotests/bgp_listen_on_multiple_addresses/bgp_listen_on_multiple_addresses.json new file mode 100644 index 0000000000..95de8cc10c --- /dev/null +++ b/tests/topotests/bgp_listen_on_multiple_addresses/bgp_listen_on_multiple_addresses.json @@ -0,0 +1,154 @@ +{ + "ipv4base": "10.0.0.0", + "ipv4mask": 24, + "ipv6base": "fd00::", + "ipv6mask": 64, + "link_ip_start": { + "ipv4": "10.0.0.0", + "v4mask": 24, + "ipv6": "fd00::", + "v6mask": 64 + }, + "lo_prefix": { + "ipv4": "1.0.", + "v4mask": 32, + "ipv6": "2001:DB8:F::", + "v6mask": 128 + }, + "routers": { + "r1": { + "links": { + "lo": { + "ipv4": "auto", + "ipv6": "auto", + "type": "loopback" + }, + "r2": { + "ipv4": "auto", + "ipv6": "auto" + } + }, + "bgp": { + "local_as": "1000", + "address_family": { + "ipv4": { + "unicast": { + "neighbor": { + "r2": { + "dest_link": { + "r1": {} + } + } + } + } + } + } + } + }, + "r2": { + "links": { + "lo": { + "ipv4": "auto", + "ipv6": "auto", + "type": "loopback" + }, + "r1": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r3": { + "ipv4": "auto", + "ipv6": "auto" + } + }, + "bgp": { + "local_as": "2000", + "address_family": { + "ipv4": { + "unicast": { + "neighbor": { + "r1": { + "dest_link": { + "r2": {} + } + }, + "r3": { + "dest_link": { + "r2": {} + } + } + } + } + } + } + } + }, + "r3": { + "links": { + "lo": { + "ipv4": "auto", + "ipv6": "auto", + "type": "loopback" + }, + "r2": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r4": { + "ipv4": "auto", + "ipv6": "auto" + } + }, + "bgp": { + "local_as": "2000", + "address_family": { + "ipv4": { + "unicast": { + "neighbor": { + "r2": { + "dest_link": { + "r3": {} + } + }, + "r4": { + "dest_link": { + "r3": {} + } + } + } + } + } + } + } + }, + "r4": { + "links": { + "lo": { + "ipv4": "auto", + "ipv6": "auto", + "type": "loopback" + }, + "r3": { + "ipv4": "auto", + "ipv6": "auto" + } + }, + "bgp": { + "local_as": "3000", + "address_family": { + "ipv4": { + "unicast": { + "neighbor": { + "r3": { + "dest_link": { + "r4": {} + } + } + } + } + } + } + } + } + } +} diff --git a/tests/topotests/bgp_listen_on_multiple_addresses/test_bgp_listen_on_multiple_addresses.py b/tests/topotests/bgp_listen_on_multiple_addresses/test_bgp_listen_on_multiple_addresses.py new file mode 100755 index 0000000000..d773e87ef6 --- /dev/null +++ b/tests/topotests/bgp_listen_on_multiple_addresses/test_bgp_listen_on_multiple_addresses.py @@ -0,0 +1,160 @@ +#!/usr/bin/env python + +# +# test_bgp_listen_on_multiple_addresses.py +# Part of NetDEF Topology Tests +# +# Copyright (c) 2021 by Boeing Defence Australia +# Adriano Marto Reis +# +# Permission to use, copy, modify, and/or distribute this software +# for any purpose with or without fee is hereby granted, provided +# that the above copyright notice and this permission notice appear +# in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND NETDEF DISCLAIMS ALL WARRANTIES +# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NETDEF BE LIABLE FOR +# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY +# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS +# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE +# OF THIS SOFTWARE. +# + +""" +test_bgp_listen_on_multiple_addresses.py: Test BGP daemon listening for +connections on multiple addresses. + + +------+ +------+ +------+ +------+ + | | | | | | | | + | r1 |--------| r2 |--------| r3 |--------| r4 | + | | | | | | | | + +------+ +------+ +------+ +------+ + + | | | | + | AS 1000 | AS 2000 | AS 3000 | + | | | | + +------------+--------------------------------+-------------+ +""" + +import os +import sys +import json +import pytest + + +# Save the Current Working Directory to find configuration files. +CWD = os.path.dirname(os.path.realpath(__file__)) +sys.path.append(os.path.join(CWD, "../")) + +from lib.topogen import Topogen, get_topogen +from lib.topojson import build_topo_from_json, build_config_from_json +from lib.common_config import start_topology +from lib.topotest import router_json_cmp, run_and_expect +from mininet.topo import Topo +from functools import partial + + +LISTEN_ADDRESSES = { + "r1": ["10.0.0.1"], + "r2": ["10.0.0.2", "10.0.1.1"], + "r3": ["10.0.1.2", "10.0.2.1"], + "r4": ["10.0.2.2"], +} + + +# Reads data from JSON File for topology and configuration creation. +jsonFile = "{}/bgp_listen_on_multiple_addresses.json".format(CWD) +try: + with open(jsonFile, "r") as topoJson: + topo = json.load(topoJson) +except IOError: + assert False, "Could not read file {}".format(jsonFile) + + +class TemplateTopo(Topo): + "Topology builder." + + def build(self, *_args, **_opts): + "Defines the allocation and relationship between routers and switches." + tgen = get_topogen(self) + build_topo_from_json(tgen, topo) + + +def setup_module(mod): + "Sets up the test environment." + tgen = Topogen(TemplateTopo, mod.__name__) + + # Adds extra parameters to bgpd so they listen for connections on specific + # multiple addresses. + for router_name in tgen.routers().keys(): + tgen.net[router_name].daemons_options["bgpd"] = "-l " + " -l ".join( + LISTEN_ADDRESSES[router_name] + ) + + start_topology(tgen) + build_config_from_json(tgen, topo) + + +def teardown_module(_mod): + "Tears-down the test environment." + tgen = get_topogen() + tgen.stop_topology() + + +def test_peering(): + "Checks if the routers peer-up." + tgen = get_topogen() + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + _bgp_converge_initial("r1", "10.0.0.2") + _bgp_converge_initial("r2", "10.0.0.1") + _bgp_converge_initial("r2", "10.0.1.2") + _bgp_converge_initial("r3", "10.0.1.1") + _bgp_converge_initial("r3", "10.0.2.2") + _bgp_converge_initial("r4", "10.0.2.1") + + +def test_listening_address(): + """ + Checks if bgpd is only listening on the specified IP addresses. + """ + tgen = get_topogen() + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + for router in tgen.routers().values(): + # bgpd must not be listening on the default address. + output = router.run("netstat -nlt4 | grep 0.0.0.0:179") + assert output == "", "{}: bpgd is listening on 0.0.0.0:179".format(router.name) + + # bgpd must be listening on the specified addresses. + for address in LISTEN_ADDRESSES[router.name]: + output = router.run("netstat -nlt4 | grep {}:179".format(address)) + assert output != "", "{}: bpgd is not listening on {}:179".format( + router.name, address + ) + + +def _bgp_converge_initial(router_name, peer_address, timeout=180): + """ + Waits for the BGP connection between a given router and a given peer + (specified by its IP address) to be established. If the connection is + not established within a given timeout, then an exception is raised. + """ + tgen = get_topogen() + router = tgen.routers()[router_name] + expected = {"ipv4Unicast": {"peers": {peer_address: {"state": "Established"}}}} + + test_func = partial(router_json_cmp, router, "show ip bgp summary json", expected) + _, result = run_and_expect(test_func, None, count=timeout, wait=1) + assert result is None, "{}: Failed to establish connection with {}".format( + router_name, peer_address + ) + + +if __name__ == "__main__": + args = ["-s"] + sys.argv[1:] + sys.exit(pytest.main(args)) diff --git a/tests/topotests/bgp_lu_topo1/R1/bgpd.conf b/tests/topotests/bgp_lu_topo1/R1/bgpd.conf new file mode 100644 index 0000000000..1bdb4c7a3e --- /dev/null +++ b/tests/topotests/bgp_lu_topo1/R1/bgpd.conf @@ -0,0 +1,21 @@ +! +debug bgp labelpool +debug bgp zebra +! +router bgp 1 + bgp router-id 10.0.0.1 + timers bgp 3 9 + no bgp ebgp-requires-policy + no bgp network import-check + neighbor 10.0.0.2 remote-as 2 + neighbor 10.0.0.2 solo + neighbor 10.0.0.2 timers connect 10 +! + address-family ipv4 unicast + no neighbor 10.0.0.2 activate + exit-address-family + ! + address-family ipv4 labeled-unicast + neighbor 10.0.0.2 activate + exit-address-family +! diff --git a/tests/topotests/bgp_lu_topo1/R1/labelpool.summ.json b/tests/topotests/bgp_lu_topo1/R1/labelpool.summ.json new file mode 100644 index 0000000000..29e6c2cbf7 --- /dev/null +++ b/tests/topotests/bgp_lu_topo1/R1/labelpool.summ.json @@ -0,0 +1,8 @@ +{ + "Ledger":506, + "InUse":506, + "Requests":0, + "LabelChunks":11, + "Pending":0, + "Reconnects":0 +} diff --git a/tests/topotests/bgp_lu_topo1/R1/zebra.conf b/tests/topotests/bgp_lu_topo1/R1/zebra.conf new file mode 100644 index 0000000000..4f6fee579f --- /dev/null +++ b/tests/topotests/bgp_lu_topo1/R1/zebra.conf @@ -0,0 +1,6 @@ +debug zebra events +debug zebra dplane +debug zebra mpls +! +interface R1-eth0 + ip address 10.0.0.1/24 diff --git a/tests/topotests/bgp_lu_topo1/R2/bgpd.conf b/tests/topotests/bgp_lu_topo1/R2/bgpd.conf new file mode 100644 index 0000000000..bac608e1c3 --- /dev/null +++ b/tests/topotests/bgp_lu_topo1/R2/bgpd.conf @@ -0,0 +1,23 @@ +debug bgp labelpool +debug bgp zebra +! +router bgp 2 + bgp router-id 10.0.0.2 + timers bgp 3 9 + no bgp ebgp-requires-policy + no bgp network import-check + neighbor 10.0.1.3 remote-as 2 + neighbor 10.0.1.3 update-source 10.0.1.2 + neighbor 10.0.1.3 timers connect 10 + neighbor 10.0.0.1 remote-as 1 + neighbor 10.0.0.1 timers connect 10 +! + address-family ipv4 unicast + neighbor 10.0.1.3 activate + no neighbor 10.0.0.1 activate + exit-address-family + ! + address-family ipv4 labeled-unicast + neighbor 10.0.0.1 activate + exit-address-family +! diff --git a/tests/topotests/bgp_lu_topo1/R2/labelpool.summ.json b/tests/topotests/bgp_lu_topo1/R2/labelpool.summ.json new file mode 100644 index 0000000000..29e6c2cbf7 --- /dev/null +++ b/tests/topotests/bgp_lu_topo1/R2/labelpool.summ.json @@ -0,0 +1,8 @@ +{ + "Ledger":506, + "InUse":506, + "Requests":0, + "LabelChunks":11, + "Pending":0, + "Reconnects":0 +} diff --git a/tests/topotests/bgp_lu_topo1/R2/zebra.conf b/tests/topotests/bgp_lu_topo1/R2/zebra.conf new file mode 100644 index 0000000000..33ee53efe7 --- /dev/null +++ b/tests/topotests/bgp_lu_topo1/R2/zebra.conf @@ -0,0 +1,11 @@ +! +debug zebra events +debug zebra dplane +debug zebra mpls +! +interface R2-eth0 + ip address 10.0.0.2/24 +! +interface R2-eth1 + ip address 10.0.1.2/24 +!
\ No newline at end of file diff --git a/tests/topotests/bgp_lu_topo1/R3/bgpd.conf b/tests/topotests/bgp_lu_topo1/R3/bgpd.conf new file mode 100644 index 0000000000..b42df022e0 --- /dev/null +++ b/tests/topotests/bgp_lu_topo1/R3/bgpd.conf @@ -0,0 +1,523 @@ +log file /tmp/bgpd.log +! +debug bgp updates +! +router bgp 2 + bgp router-id 10.0.1.3 + timers bgp 3 9 + no bgp ebgp-requires-policy + no bgp network import-check + neighbor 10.0.1.2 remote-as 2 + neighbor 10.0.1.2 timers connect 10 + ! + address-family ipv4 unicast + neighbor 10.0.1.2 activate + network 11.0.0.1/32 + network 11.0.0.2/32 + network 11.0.0.3/32 + network 11.0.0.4/32 + network 11.0.0.5/32 + network 11.0.0.6/32 + network 11.0.0.7/32 + network 11.0.0.8/32 + network 11.0.0.9/32 + network 11.0.0.10/32 + network 11.0.0.11/32 + network 11.0.0.12/32 + network 11.0.0.13/32 + network 11.0.0.14/32 + network 11.0.0.15/32 + network 11.0.0.16/32 + network 11.0.0.17/32 + network 11.0.0.18/32 + network 11.0.0.19/32 + network 11.0.0.20/32 + network 11.0.0.21/32 + network 11.0.0.22/32 + network 11.0.0.23/32 + network 11.0.0.24/32 + network 11.0.0.25/32 + network 11.0.0.26/32 + network 11.0.0.27/32 + network 11.0.0.28/32 + network 11.0.0.29/32 + network 11.0.0.30/32 + network 11.0.0.31/32 + network 11.0.0.32/32 + network 11.0.0.33/32 + network 11.0.0.34/32 + network 11.0.0.35/32 + network 11.0.0.36/32 + network 11.0.0.37/32 + network 11.0.0.38/32 + network 11.0.0.39/32 + network 11.0.0.40/32 + network 11.0.0.41/32 + network 11.0.0.42/32 + network 11.0.0.43/32 + network 11.0.0.44/32 + network 11.0.0.45/32 + network 11.0.0.46/32 + network 11.0.0.47/32 + network 11.0.0.48/32 + network 11.0.0.49/32 + network 11.0.0.50/32 + network 11.0.0.51/32 + network 11.0.0.52/32 + network 11.0.0.53/32 + network 11.0.0.54/32 + network 11.0.0.55/32 + network 11.0.0.56/32 + network 11.0.0.57/32 + network 11.0.0.58/32 + network 11.0.0.59/32 + network 11.0.0.60/32 + network 11.0.0.61/32 + network 11.0.0.62/32 + network 11.0.0.63/32 + network 11.0.0.64/32 + network 11.0.0.65/32 + network 11.0.0.66/32 + network 11.0.0.67/32 + network 11.0.0.68/32 + network 11.0.0.69/32 + network 11.0.0.70/32 + network 11.0.0.71/32 + network 11.0.0.72/32 + network 11.0.0.73/32 + network 11.0.0.74/32 + network 11.0.0.75/32 + network 11.0.0.76/32 + network 11.0.0.77/32 + network 11.0.0.78/32 + network 11.0.0.79/32 + network 11.0.0.80/32 + network 11.0.0.81/32 + network 11.0.0.82/32 + network 11.0.0.83/32 + network 11.0.0.84/32 + network 11.0.0.85/32 + network 11.0.0.86/32 + network 11.0.0.87/32 + network 11.0.0.88/32 + network 11.0.0.89/32 + network 11.0.0.90/32 + network 11.0.0.91/32 + network 11.0.0.92/32 + network 11.0.0.93/32 + network 11.0.0.94/32 + network 11.0.0.95/32 + network 11.0.0.96/32 + network 11.0.0.97/32 + network 11.0.0.98/32 + network 11.0.0.99/32 + network 11.0.0.100/32 + network 11.0.0.101/32 + network 11.0.0.102/32 + network 11.0.0.103/32 + network 11.0.0.104/32 + network 11.0.0.105/32 + network 11.0.0.106/32 + network 11.0.0.107/32 + network 11.0.0.108/32 + network 11.0.0.109/32 + network 11.0.0.110/32 + network 11.0.0.111/32 + network 11.0.0.112/32 + network 11.0.0.113/32 + network 11.0.0.114/32 + network 11.0.0.115/32 + network 11.0.0.116/32 + network 11.0.0.117/32 + network 11.0.0.118/32 + network 11.0.0.119/32 + network 11.0.0.120/32 + network 11.0.0.121/32 + network 11.0.0.122/32 + network 11.0.0.123/32 + network 11.0.0.124/32 + network 11.0.0.125/32 + network 11.0.0.126/32 + network 11.0.0.127/32 + network 11.0.0.128/32 + network 11.0.0.129/32 + network 11.0.0.130/32 + network 11.0.0.131/32 + network 11.0.0.132/32 + network 11.0.0.133/32 + network 11.0.0.134/32 + network 11.0.0.135/32 + network 11.0.0.136/32 + network 11.0.0.137/32 + network 11.0.0.138/32 + network 11.0.0.139/32 + network 11.0.0.140/32 + network 11.0.0.141/32 + network 11.0.0.142/32 + network 11.0.0.143/32 + network 11.0.0.144/32 + network 11.0.0.145/32 + network 11.0.0.146/32 + network 11.0.0.147/32 + network 11.0.0.148/32 + network 11.0.0.149/32 + network 11.0.0.150/32 + network 11.0.0.151/32 + network 11.0.0.152/32 + network 11.0.0.153/32 + network 11.0.0.154/32 + network 11.0.0.155/32 + network 11.0.0.156/32 + network 11.0.0.157/32 + network 11.0.0.158/32 + network 11.0.0.159/32 + network 11.0.0.160/32 + network 11.0.0.161/32 + network 11.0.0.162/32 + network 11.0.0.163/32 + network 11.0.0.164/32 + network 11.0.0.165/32 + network 11.0.0.166/32 + network 11.0.0.167/32 + network 11.0.0.168/32 + network 11.0.0.169/32 + network 11.0.0.170/32 + network 11.0.0.171/32 + network 11.0.0.172/32 + network 11.0.0.173/32 + network 11.0.0.174/32 + network 11.0.0.175/32 + network 11.0.0.176/32 + network 11.0.0.177/32 + network 11.0.0.178/32 + network 11.0.0.179/32 + network 11.0.0.180/32 + network 11.0.0.181/32 + network 11.0.0.182/32 + network 11.0.0.183/32 + network 11.0.0.184/32 + network 11.0.0.185/32 + network 11.0.0.186/32 + network 11.0.0.187/32 + network 11.0.0.188/32 + network 11.0.0.189/32 + network 11.0.0.190/32 + network 11.0.0.191/32 + network 11.0.0.192/32 + network 11.0.0.193/32 + network 11.0.0.194/32 + network 11.0.0.195/32 + network 11.0.0.196/32 + network 11.0.0.197/32 + network 11.0.0.198/32 + network 11.0.0.199/32 + network 11.0.0.200/32 + network 11.0.0.201/32 + network 11.0.0.202/32 + network 11.0.0.203/32 + network 11.0.0.204/32 + network 11.0.0.205/32 + network 11.0.0.206/32 + network 11.0.0.207/32 + network 11.0.0.208/32 + network 11.0.0.209/32 + network 11.0.0.210/32 + network 11.0.0.211/32 + network 11.0.0.212/32 + network 11.0.0.213/32 + network 11.0.0.214/32 + network 11.0.0.215/32 + network 11.0.0.216/32 + network 11.0.0.217/32 + network 11.0.0.218/32 + network 11.0.0.219/32 + network 11.0.0.220/32 + network 11.0.0.221/32 + network 11.0.0.222/32 + network 11.0.0.223/32 + network 11.0.0.224/32 + network 11.0.0.225/32 + network 11.0.0.226/32 + network 11.0.0.227/32 + network 11.0.0.228/32 + network 11.0.0.229/32 + network 11.0.0.230/32 + network 11.0.0.231/32 + network 11.0.0.232/32 + network 11.0.0.233/32 + network 11.0.0.234/32 + network 11.0.0.235/32 + network 11.0.0.236/32 + network 11.0.0.237/32 + network 11.0.0.238/32 + network 11.0.0.239/32 + network 11.0.0.240/32 + network 11.0.0.241/32 + network 11.0.0.242/32 + network 11.0.0.243/32 + network 11.0.0.244/32 + network 11.0.0.245/32 + network 11.0.0.246/32 + network 11.0.0.247/32 + network 11.0.0.248/32 + network 11.0.0.249/32 + network 11.0.0.250/32 + network 11.0.0.251/32 + network 11.0.0.252/32 + network 11.0.0.253/32 + network 11.0.1.1/32 + network 11.0.1.2/32 + network 11.0.1.3/32 + network 11.0.1.4/32 + network 11.0.1.5/32 + network 11.0.1.6/32 + network 11.0.1.7/32 + network 11.0.1.8/32 + network 11.0.1.9/32 + network 11.0.1.10/32 + network 11.0.1.11/32 + network 11.0.1.12/32 + network 11.0.1.13/32 + network 11.0.1.14/32 + network 11.0.1.15/32 + network 11.0.1.16/32 + network 11.0.1.17/32 + network 11.0.1.18/32 + network 11.0.1.19/32 + network 11.0.1.20/32 + network 11.0.1.21/32 + network 11.0.1.22/32 + network 11.0.1.23/32 + network 11.0.1.24/32 + network 11.0.1.25/32 + network 11.0.1.26/32 + network 11.0.1.27/32 + network 11.0.1.28/32 + network 11.0.1.29/32 + network 11.0.1.30/32 + network 11.0.1.31/32 + network 11.0.1.32/32 + network 11.0.1.33/32 + network 11.0.1.34/32 + network 11.0.1.35/32 + network 11.0.1.36/32 + network 11.0.1.37/32 + network 11.0.1.38/32 + network 11.0.1.39/32 + network 11.0.1.40/32 + network 11.0.1.41/32 + network 11.0.1.42/32 + network 11.0.1.43/32 + network 11.0.1.44/32 + network 11.0.1.45/32 + network 11.0.1.46/32 + network 11.0.1.47/32 + network 11.0.1.48/32 + network 11.0.1.49/32 + network 11.0.1.50/32 + network 11.0.1.51/32 + network 11.0.1.52/32 + network 11.0.1.53/32 + network 11.0.1.54/32 + network 11.0.1.55/32 + network 11.0.1.56/32 + network 11.0.1.57/32 + network 11.0.1.58/32 + network 11.0.1.59/32 + network 11.0.1.60/32 + network 11.0.1.61/32 + network 11.0.1.62/32 + network 11.0.1.63/32 + network 11.0.1.64/32 + network 11.0.1.65/32 + network 11.0.1.66/32 + network 11.0.1.67/32 + network 11.0.1.68/32 + network 11.0.1.69/32 + network 11.0.1.70/32 + network 11.0.1.71/32 + network 11.0.1.72/32 + network 11.0.1.73/32 + network 11.0.1.74/32 + network 11.0.1.75/32 + network 11.0.1.76/32 + network 11.0.1.77/32 + network 11.0.1.78/32 + network 11.0.1.79/32 + network 11.0.1.80/32 + network 11.0.1.81/32 + network 11.0.1.82/32 + network 11.0.1.83/32 + network 11.0.1.84/32 + network 11.0.1.85/32 + network 11.0.1.86/32 + network 11.0.1.87/32 + network 11.0.1.88/32 + network 11.0.1.89/32 + network 11.0.1.90/32 + network 11.0.1.91/32 + network 11.0.1.92/32 + network 11.0.1.93/32 + network 11.0.1.94/32 + network 11.0.1.95/32 + network 11.0.1.96/32 + network 11.0.1.97/32 + network 11.0.1.98/32 + network 11.0.1.99/32 + network 11.0.1.100/32 + network 11.0.1.101/32 + network 11.0.1.102/32 + network 11.0.1.103/32 + network 11.0.1.104/32 + network 11.0.1.105/32 + network 11.0.1.106/32 + network 11.0.1.107/32 + network 11.0.1.108/32 + network 11.0.1.109/32 + network 11.0.1.110/32 + network 11.0.1.111/32 + network 11.0.1.112/32 + network 11.0.1.113/32 + network 11.0.1.114/32 + network 11.0.1.115/32 + network 11.0.1.116/32 + network 11.0.1.117/32 + network 11.0.1.118/32 + network 11.0.1.119/32 + network 11.0.1.120/32 + network 11.0.1.121/32 + network 11.0.1.122/32 + network 11.0.1.123/32 + network 11.0.1.124/32 + network 11.0.1.125/32 + network 11.0.1.126/32 + network 11.0.1.127/32 + network 11.0.1.128/32 + network 11.0.1.129/32 + network 11.0.1.130/32 + network 11.0.1.131/32 + network 11.0.1.132/32 + network 11.0.1.133/32 + network 11.0.1.134/32 + network 11.0.1.135/32 + network 11.0.1.136/32 + network 11.0.1.137/32 + network 11.0.1.138/32 + network 11.0.1.139/32 + network 11.0.1.140/32 + network 11.0.1.141/32 + network 11.0.1.142/32 + network 11.0.1.143/32 + network 11.0.1.144/32 + network 11.0.1.145/32 + network 11.0.1.146/32 + network 11.0.1.147/32 + network 11.0.1.148/32 + network 11.0.1.149/32 + network 11.0.1.150/32 + network 11.0.1.151/32 + network 11.0.1.152/32 + network 11.0.1.153/32 + network 11.0.1.154/32 + network 11.0.1.155/32 + network 11.0.1.156/32 + network 11.0.1.157/32 + network 11.0.1.158/32 + network 11.0.1.159/32 + network 11.0.1.160/32 + network 11.0.1.161/32 + network 11.0.1.162/32 + network 11.0.1.163/32 + network 11.0.1.164/32 + network 11.0.1.165/32 + network 11.0.1.166/32 + network 11.0.1.167/32 + network 11.0.1.168/32 + network 11.0.1.169/32 + network 11.0.1.170/32 + network 11.0.1.171/32 + network 11.0.1.172/32 + network 11.0.1.173/32 + network 11.0.1.174/32 + network 11.0.1.175/32 + network 11.0.1.176/32 + network 11.0.1.177/32 + network 11.0.1.178/32 + network 11.0.1.179/32 + network 11.0.1.180/32 + network 11.0.1.181/32 + network 11.0.1.182/32 + network 11.0.1.183/32 + network 11.0.1.184/32 + network 11.0.1.185/32 + network 11.0.1.186/32 + network 11.0.1.187/32 + network 11.0.1.188/32 + network 11.0.1.189/32 + network 11.0.1.190/32 + network 11.0.1.191/32 + network 11.0.1.192/32 + network 11.0.1.193/32 + network 11.0.1.194/32 + network 11.0.1.195/32 + network 11.0.1.196/32 + network 11.0.1.197/32 + network 11.0.1.198/32 + network 11.0.1.199/32 + network 11.0.1.200/32 + network 11.0.1.201/32 + network 11.0.1.202/32 + network 11.0.1.203/32 + network 11.0.1.204/32 + network 11.0.1.205/32 + network 11.0.1.206/32 + network 11.0.1.207/32 + network 11.0.1.208/32 + network 11.0.1.209/32 + network 11.0.1.210/32 + network 11.0.1.211/32 + network 11.0.1.212/32 + network 11.0.1.213/32 + network 11.0.1.214/32 + network 11.0.1.215/32 + network 11.0.1.216/32 + network 11.0.1.217/32 + network 11.0.1.218/32 + network 11.0.1.219/32 + network 11.0.1.220/32 + network 11.0.1.221/32 + network 11.0.1.222/32 + network 11.0.1.223/32 + network 11.0.1.224/32 + network 11.0.1.225/32 + network 11.0.1.226/32 + network 11.0.1.227/32 + network 11.0.1.228/32 + network 11.0.1.229/32 + network 11.0.1.230/32 + network 11.0.1.231/32 + network 11.0.1.232/32 + network 11.0.1.233/32 + network 11.0.1.234/32 + network 11.0.1.235/32 + network 11.0.1.236/32 + network 11.0.1.237/32 + network 11.0.1.238/32 + network 11.0.1.239/32 + network 11.0.1.240/32 + network 11.0.1.241/32 + network 11.0.1.242/32 + network 11.0.1.243/32 + network 11.0.1.244/32 + network 11.0.1.245/32 + network 11.0.1.246/32 + network 11.0.1.247/32 + network 11.0.1.248/32 + network 11.0.1.249/32 + network 11.0.1.250/32 + network 11.0.1.251/32 + network 11.0.1.252/32 + network 11.0.1.253/32 + exit-address-family + ! +! diff --git a/tests/topotests/bgp_lu_topo1/R3/zebra.conf b/tests/topotests/bgp_lu_topo1/R3/zebra.conf new file mode 100644 index 0000000000..524978bff6 --- /dev/null +++ b/tests/topotests/bgp_lu_topo1/R3/zebra.conf @@ -0,0 +1,9 @@ +log file /tmp/zebra.log +! +debug zebra events +debug zebra packet detail +debug zebra mpls +! +interface R3-eth0 + ip address 10.0.1.3/24 +! diff --git a/tests/topotests/bgp_lu_topo1/test_bgp_lu.py b/tests/topotests/bgp_lu_topo1/test_bgp_lu.py new file mode 100644 index 0000000000..61418d7a79 --- /dev/null +++ b/tests/topotests/bgp_lu_topo1/test_bgp_lu.py @@ -0,0 +1,178 @@ +#!/usr/bin/env python + +# +# test_bgp_lu.py +# Part of NetDEF Topology Tests +# +# Copyright (c) 2020 by Volta Networks +# +# Permission to use, copy, modify, and/or distribute this software +# for any purpose with or without fee is hereby granted, provided +# that the above copyright notice and this permission notice appear +# in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND NETDEF DISCLAIMS ALL WARRANTIES +# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NETDEF BE LIABLE FOR +# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY +# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS +# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE +# OF THIS SOFTWARE. +# + +""" +test_bgp_lu.py: Test BGP LU label allocation +""" + +import os +import sys +import json +from functools import partial +from time import sleep +import pytest + +# Save the Current Working Directory to find configuration files. +CWD = os.path.dirname(os.path.realpath(__file__)) +sys.path.append(os.path.join(CWD, "../")) + +# pylint: disable=C0413 +# Import topogen and topotest helpers +from lib import topotest +from lib.topogen import Topogen, TopoRouter, get_topogen +from lib.topolog import logger + +# Required to instantiate the topology builder class. +from mininet.topo import Topo + +#Basic scenario for BGP-LU. Nodes are directly connected. +#Node 3 is advertising many routes to 2, which advertises them +#as BGP-LU to 1; this way we get routes with actual labels, as +#opposed to implicit-null routes in the 2-node case. +# +# AS1 BGP-LU AS2 iBGP AS2 +#+-----+ +-----+ +-----+ +#| |.1 .2| |.2 .3| | +#| 1 +----------------+ 2 +-----------------+ 3 | +#| | 10.0.0.0/24 | | 10.0.1.0/24 | | +#+-----+ +-----+ +-----+ + +class TemplateTopo(Topo): + "Test topology builder" + + def build(self, *_args, **_opts): + "Build function" + tgen = get_topogen(self) + + # This function only purpose is to define allocation and relationship + # between routers, switches and hosts. + # + # + # Create routers + tgen.add_router("R1") + tgen.add_router("R2") + tgen.add_router("R3") + + # R1-R2 + switch = tgen.add_switch("s1") + switch.add_link(tgen.gears["R1"]) + switch.add_link(tgen.gears["R2"]) + + # R2-R3 + switch = tgen.add_switch("s2") + switch.add_link(tgen.gears["R2"]) + switch.add_link(tgen.gears["R3"]) + + + +def setup_module(mod): + "Sets up the pytest environment" + # This function initiates the topology build with Topogen... + tgen = Topogen(TemplateTopo, mod.__name__) + # ... and here it calls Mininet initialization functions. + tgen.start_topology() + + # This is a sample of configuration loading. + router_list = tgen.routers() + + # For all registred routers, load the zebra configuration file + for rname, router in router_list.items(): + router.load_config( + TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname)) + ) + router.load_config( + TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format(rname)) + ) + + # After loading the configurations, this function loads configured daemons. + tgen.start_router() + + +def teardown_module(mod): + "Teardown the pytest environment" + tgen = get_topogen() + + # This function tears down the whole topology. + tgen.stop_topology() + +def check_labelpool(router): + json_file = "{}/{}/labelpool.summ.json".format(CWD, router.name) + expected = json.loads(open(json_file).read()) + + test_func = partial(topotest.router_json_cmp, router, "show bgp labelpool summary json", expected) + _, result = topotest.run_and_expect(test_func, None, count=20, wait=1) + assertmsg = '"{}" JSON output mismatches - Did not converge'.format(router.name) + assert result is None, assertmsg + +def test_converge_bgplu(): + "Wait for protocol convergence" + + tgen = get_topogen() + # Don't run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + #tgen.mininet_cli(); + r1 = tgen.gears["R1"] + r2 = tgen.gears["R2"] + + check_labelpool(r1) + check_labelpool(r2) + +def test_clear_bgplu(): + "Wait for protocol convergence" + + tgen = get_topogen() + # Don't run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + #tgen.mininet_cli(); + r1 = tgen.gears["R1"] + r2 = tgen.gears["R2"] + + r1.vtysh_cmd("clear bgp 10.0.0.2") + check_labelpool(r1) + check_labelpool(r2) + + r2.vtysh_cmd("clear bgp 10.0.1.3") + check_labelpool(r1) + check_labelpool(r2) + + r1.vtysh_cmd("clear bgp 10.0.0.2") + r2.vtysh_cmd("clear bgp 10.0.1.3") + check_labelpool(r1) + check_labelpool(r2) + +def test_memory_leak(): + "Run the memory leak test and report results." + tgen = get_topogen() + if not tgen.is_memleak_enabled(): + pytest.skip("Memory leak test/report is disabled") + + tgen.report_memory_leaks() + + +if __name__ == "__main__": + args = ["-s"] + sys.argv[1:] + sys.exit(pytest.main(args)) diff --git a/tests/topotests/bgp_multi_vrf_topo2/test_bgp_multi_vrf_topo2.py b/tests/topotests/bgp_multi_vrf_topo2/test_bgp_multi_vrf_topo2.py index 9703cf8d57..19a9140c13 100644 --- a/tests/topotests/bgp_multi_vrf_topo2/test_bgp_multi_vrf_topo2.py +++ b/tests/topotests/bgp_multi_vrf_topo2/test_bgp_multi_vrf_topo2.py @@ -95,7 +95,7 @@ from lib.common_config import ( kill_router_daemons, start_router_daemons, stop_router, - start_router + start_router, ) from lib.topolog import logger @@ -129,7 +129,7 @@ LOOPBACK_2 = { "ipv4": "20.20.20.20/32", "ipv6": "20::20:20/128", "ipv4_mask": "255.255.255.255", - "ipv6_mask": None + "ipv6_mask": None, } MAX_PATHS = 2 @@ -724,16 +724,40 @@ def test_vrf_with_multiple_links_p1(request): "local_as": "200", "vrf": "RED_A", "address_family": { - "ipv4": {"unicast": {"maximum_paths": {"ebgp": MAX_PATHS,}}}, - "ipv6": {"unicast": {"maximum_paths": {"ebgp": MAX_PATHS,}}}, + "ipv4": { + "unicast": { + "maximum_paths": { + "ebgp": MAX_PATHS, + } + } + }, + "ipv6": { + "unicast": { + "maximum_paths": { + "ebgp": MAX_PATHS, + } + } + }, }, }, { "local_as": "200", "vrf": "BLUE_A", "address_family": { - "ipv4": {"unicast": {"maximum_paths": {"ebgp": MAX_PATHS,}}}, - "ipv6": {"unicast": {"maximum_paths": {"ebgp": MAX_PATHS,}}}, + "ipv4": { + "unicast": { + "maximum_paths": { + "ebgp": MAX_PATHS, + } + } + }, + "ipv6": { + "unicast": { + "maximum_paths": { + "ebgp": MAX_PATHS, + } + } + }, }, }, ] @@ -2148,7 +2172,7 @@ def test_restart_bgpd_daemon_p1(request): assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) result = verify_bgp_convergence(tgen, topo) - assert result is True, "Testcase () :Failed\n Error {}". format(tc_name, result) + assert result is True, "Testcase () :Failed\n Error {}".format(tc_name, result) step("Kill BGPd daemon on R1.") kill_router_daemons(tgen, "r1", ["bgpd"]) @@ -2663,6 +2687,9 @@ def test_delete_and_re_add_vrf_p1(request): } } + result = verify_bgp_convergence(tgen, topo) + assert result is True, "Testcase {}: Failed\n Error {}".format(tc_name, result) + result = verify_bgp_rib(tgen, addr_type, dut, input_dict_2) assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result) diff --git a/tests/topotests/bgp_peer-group/__init__.py b/tests/topotests/bgp_peer-group/__init__.py new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/tests/topotests/bgp_peer-group/__init__.py diff --git a/tests/topotests/bgp_peer-group/r1/bgpd.conf b/tests/topotests/bgp_peer-group/r1/bgpd.conf new file mode 100644 index 0000000000..19b490a359 --- /dev/null +++ b/tests/topotests/bgp_peer-group/r1/bgpd.conf @@ -0,0 +1,8 @@ +! +router bgp 65001 + neighbor PG peer-group + neighbor PG remote-as external + neighbor PG timers 3 10 + neighbor 192.168.255.3 peer-group PG + neighbor r1-eth0 interface peer-group PG +! diff --git a/tests/topotests/bgp_peer-group/r1/zebra.conf b/tests/topotests/bgp_peer-group/r1/zebra.conf new file mode 100644 index 0000000000..e2c399e536 --- /dev/null +++ b/tests/topotests/bgp_peer-group/r1/zebra.conf @@ -0,0 +1,6 @@ +! +interface r1-eth0 + ip address 192.168.255.1/24 +! +ip forwarding +! diff --git a/tests/topotests/bgp_peer-group/r2/bgpd.conf b/tests/topotests/bgp_peer-group/r2/bgpd.conf new file mode 100644 index 0000000000..0880ee9fae --- /dev/null +++ b/tests/topotests/bgp_peer-group/r2/bgpd.conf @@ -0,0 +1,7 @@ +! +router bgp 65002 + neighbor PG peer-group + neighbor PG remote-as external + neighbor PG timers 3 10 + neighbor r2-eth0 interface peer-group PG +! diff --git a/tests/topotests/bgp_peer-group/r2/zebra.conf b/tests/topotests/bgp_peer-group/r2/zebra.conf new file mode 100644 index 0000000000..606c17bec9 --- /dev/null +++ b/tests/topotests/bgp_peer-group/r2/zebra.conf @@ -0,0 +1,6 @@ +! +interface r2-eth0 + ip address 192.168.255.2/24 +! +ip forwarding +! diff --git a/tests/topotests/bgp_peer-group/r3/bgpd.conf b/tests/topotests/bgp_peer-group/r3/bgpd.conf new file mode 100644 index 0000000000..eb2fca15fb --- /dev/null +++ b/tests/topotests/bgp_peer-group/r3/bgpd.conf @@ -0,0 +1,7 @@ +! +router bgp 65003 + neighbor PG peer-group + neighbor PG remote-as external + neighbor PG timers 3 10 + neighbor 192.168.255.1 peer-group PG +! diff --git a/tests/topotests/bgp_peer-group/r3/zebra.conf b/tests/topotests/bgp_peer-group/r3/zebra.conf new file mode 100644 index 0000000000..e9fdfb70c5 --- /dev/null +++ b/tests/topotests/bgp_peer-group/r3/zebra.conf @@ -0,0 +1,6 @@ +! +interface r3-eth0 + ip address 192.168.255.3/24 +! +ip forwarding +! diff --git a/tests/topotests/bgp_peer-group/test_bgp_peer-group.py b/tests/topotests/bgp_peer-group/test_bgp_peer-group.py new file mode 100644 index 0000000000..7c7a8b87ed --- /dev/null +++ b/tests/topotests/bgp_peer-group/test_bgp_peer-group.py @@ -0,0 +1,99 @@ +#!/usr/bin/env python + +# +# Copyright (c) 2021 by +# Donatas Abraitis <donatas.abraitis@gmail.com> +# +# Permission to use, copy, modify, and/or distribute this software +# for any purpose with or without fee is hereby granted, provided +# that the above copyright notice and this permission notice appear +# in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND NETDEF DISCLAIMS ALL WARRANTIES +# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NETDEF BE LIABLE FOR +# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY +# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS +# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE +# OF THIS SOFTWARE. +# + +""" +Test if peer-group works for numbered and unnumbered configurations. +""" + +import os +import sys +import json +import pytest +import functools + +CWD = os.path.dirname(os.path.realpath(__file__)) +sys.path.append(os.path.join(CWD, "../")) + +# pylint: disable=C0413 +from lib import topotest +from lib.topogen import Topogen, TopoRouter, get_topogen +from lib.topolog import logger +from mininet.topo import Topo + + +class TemplateTopo(Topo): + def build(self, *_args, **_opts): + tgen = get_topogen(self) + + for routern in range(1, 4): + tgen.add_router("r{}".format(routern)) + + switch = tgen.add_switch("s1") + switch.add_link(tgen.gears["r1"]) + switch.add_link(tgen.gears["r2"]) + switch.add_link(tgen.gears["r3"]) + + +def setup_module(mod): + tgen = Topogen(TemplateTopo, mod.__name__) + tgen.start_topology() + + router_list = tgen.routers() + + for i, (rname, router) in enumerate(router_list.items(), 1): + router.load_config( + TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname)) + ) + router.load_config( + TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format(rname)) + ) + + tgen.start_router() + + +def teardown_module(mod): + tgen = get_topogen() + tgen.stop_topology() + + +def test_bgp_peer_group(): + tgen = get_topogen() + + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + def _bgp_peer_group_configured(): + output = json.loads(tgen.gears["r1"].vtysh_cmd("show ip bgp neighbor json")) + expected = { + "r1-eth0": {"peerGroup": "PG", "bgpState": "Established"}, + "192.168.255.3": {"peerGroup": "PG", "bgpState": "Established"}, + } + return topotest.json_cmp(output, expected) + + test_func = functools.partial(_bgp_peer_group_configured) + success, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5) + + assert result is None, 'Failed bgp convergence in "{}"'.format(tgen.gears["r1"]) + + +if __name__ == "__main__": + args = ["-s"] + sys.argv[1:] + sys.exit(pytest.main(args)) diff --git a/tests/topotests/bgp_prefix_sid/test_bgp_prefix_sid.py b/tests/topotests/bgp_prefix_sid/test_bgp_prefix_sid.py index 6d7131e1e5..ceac84709b 100644 --- a/tests/topotests/bgp_prefix_sid/test_bgp_prefix_sid.py +++ b/tests/topotests/bgp_prefix_sid/test_bgp_prefix_sid.py @@ -101,7 +101,11 @@ def test_r1_receive_and_advertise_prefix_sid_type1(): "prefix": prefix, "advertisedTo": {"10.0.0.101": {}, "10.0.0.102": {}}, "paths": [ - {"valid": True, "remoteLabel": remoteLabel, "labelIndex": labelIndex,} + { + "valid": True, + "remoteLabel": remoteLabel, + "labelIndex": labelIndex, + } ], } return topotest.json_cmp(output, expected) diff --git a/tests/topotests/bgp_recursive_route_ebgp_multi_hop/test_bgp_recursive_route_ebgp_multi_hop.py b/tests/topotests/bgp_recursive_route_ebgp_multi_hop/test_bgp_recursive_route_ebgp_multi_hop.py index 3af944473d..3f9009967d 100644 --- a/tests/topotests/bgp_recursive_route_ebgp_multi_hop/test_bgp_recursive_route_ebgp_multi_hop.py +++ b/tests/topotests/bgp_recursive_route_ebgp_multi_hop/test_bgp_recursive_route_ebgp_multi_hop.py @@ -1042,9 +1042,10 @@ def test_next_hop_with_recursive_lookup_p1(request): next_hop=next_hop, expected=False, ) - assert result is not True, ( - "Testcase {} : Failed \n " - "Route is still present \n Error : {}".format(tc_name, result) + assert ( + result is not True + ), "Testcase {} : Failed \n " "Route is still present \n Error : {}".format( + tc_name, result ) step("Re-apply redistribution on R4.") @@ -1125,9 +1126,10 @@ def test_next_hop_with_recursive_lookup_p1(request): next_hop=next_hop, expected=False, ) - assert result is not True, ( - "Testcase {} : Failed \n " - "Route is still present \n Error : {}".format(tc_name, result) + assert ( + result is not True + ), "Testcase {} : Failed \n " "Route is still present \n Error : {}".format( + tc_name, result ) shutdown_bringup_interface(tgen, "r3", intf_r3_r4, True) @@ -1182,9 +1184,10 @@ def test_next_hop_with_recursive_lookup_p1(request): next_hop=next_hop, expected=False, ) - assert result is not True, ( - "Testcase {} : Failed \n " - "Route is still present \n Error : {}".format(tc_name, result) + assert ( + result is not True + ), "Testcase {} : Failed \n " "Route is still present \n Error : {}".format( + tc_name, result ) shutdown_bringup_interface(tgen, "r4", intf_r4_r3, True) @@ -2101,8 +2104,20 @@ def test_BGP_active_standby_preemption_and_ecmp_p1(request): "r4": { "bgp": { "address_family": { - "ipv4": {"unicast": {"maximum_paths": {"ebgp": 1,}}}, - "ipv6": {"unicast": {"maximum_paths": {"ebgp": 1,}}}, + "ipv4": { + "unicast": { + "maximum_paths": { + "ebgp": 1, + } + } + }, + "ipv6": { + "unicast": { + "maximum_paths": { + "ebgp": 1, + } + } + }, } } } @@ -2131,8 +2146,20 @@ def test_BGP_active_standby_preemption_and_ecmp_p1(request): "r4": { "bgp": { "address_family": { - "ipv4": {"unicast": {"maximum_paths": {"ebgp": 2,}}}, - "ipv6": {"unicast": {"maximum_paths": {"ebgp": 2,}}}, + "ipv4": { + "unicast": { + "maximum_paths": { + "ebgp": 2, + } + } + }, + "ipv6": { + "unicast": { + "maximum_paths": { + "ebgp": 2, + } + } + }, } } } diff --git a/tests/topotests/bgp_route_aggregation/test_bgp_aggregation.py b/tests/topotests/bgp_route_aggregation/test_bgp_aggregation.py index 0fabd90341..0467bf1bfb 100644 --- a/tests/topotests/bgp_route_aggregation/test_bgp_aggregation.py +++ b/tests/topotests/bgp_route_aggregation/test_bgp_aggregation.py @@ -415,9 +415,10 @@ def test_route_summarisation_with_summary_only_p1(request): result = verify_rib( tgen, addr_type, "r3", input_static, protocol="bgp", expected=False ) - assert result is not True, ( - "Testcase : Failed \n " - "Routes are still present \n Error: {}".format(tc_name, result) + assert ( + result is not True + ), "Testcase : Failed \n " "Routes are still present \n Error: {}".format( + tc_name, result ) result = verify_rib(tgen, addr_type, "r1", input_static_agg, protocol="bgp") @@ -614,7 +615,9 @@ def test_route_summarisation_with_summary_only_p1(request): addr_type: { "unicast": { "advertise_networks": [ - {"network": NETWORK_4_1[addr_type],} + { + "network": NETWORK_4_1[addr_type], + } ] } } @@ -1014,7 +1017,11 @@ def test_route_summarisation_with_as_set_p1(request): assert result is True, "Testcase : Failed \n Error: {}".format(tc_name, result) for addr_type in ADDR_TYPES: - for pfx, seq_id, network, in zip([6, 7], [60, 70], [NETWORK_3_1, NETWORK_4_1]): + for ( + pfx, + seq_id, + network, + ) in zip([6, 7], [60, 70], [NETWORK_3_1, NETWORK_4_1]): prefix_list = { "r1": { "prefix_lists": { diff --git a/tests/topotests/bgp_suppress_fib/test_bgp_suppress_fib.py b/tests/topotests/bgp_suppress_fib/test_bgp_suppress_fib.py index cf8be5f44f..c75055c26f 100644 --- a/tests/topotests/bgp_suppress_fib/test_bgp_suppress_fib.py +++ b/tests/topotests/bgp_suppress_fib/test_bgp_suppress_fib.py @@ -56,6 +56,7 @@ class TemplateTopo(Topo): switch.add_link(tgen.gears["r2"]) switch.add_link(tgen.gears["r3"]) + def setup_module(mod): tgen = Topogen(TemplateTopo, mod.__name__) tgen.start_topology() @@ -114,6 +115,7 @@ def test_bgp_route(): assertmsg = '"r3" JSON output mismatches' assert result is None, assertmsg + if __name__ == "__main__": args = ["-s"] + sys.argv[1:] sys.exit(pytest.main(args)) diff --git a/tests/topotests/bgp_vrf_dynamic_route_leak/test_bgp_vrf_dynamic_route_leak_topo2.py b/tests/topotests/bgp_vrf_dynamic_route_leak/test_bgp_vrf_dynamic_route_leak_topo2.py index 9106c163cd..6e7495d929 100644 --- a/tests/topotests/bgp_vrf_dynamic_route_leak/test_bgp_vrf_dynamic_route_leak_topo2.py +++ b/tests/topotests/bgp_vrf_dynamic_route_leak/test_bgp_vrf_dynamic_route_leak_topo2.py @@ -943,9 +943,10 @@ def test_modify_route_map_match_set_clauses_p1(request): } result = verify_bgp_rib(tgen, addr_type, "r1", input_routes_r1, expected=False) - assert result is not True, ( - "Testcase {} : Failed \n Error : Routes are still " - "present {}".format(tc_name, result) + assert ( + result is not True + ), "Testcase {} : Failed \n Error : Routes are still " "present {}".format( + tc_name, result ) write_test_footer(tc_name) diff --git a/tests/topotests/eigrp-topo1/test_eigrp_topo1.py b/tests/topotests/eigrp-topo1/test_eigrp_topo1.py index 3ce1472ac0..bf94d39a4b 100644 --- a/tests/topotests/eigrp-topo1/test_eigrp_topo1.py +++ b/tests/topotests/eigrp-topo1/test_eigrp_topo1.py @@ -91,7 +91,7 @@ class NetworkTopo(Topo): ## ##################################################### - +@pytest.mark.eigrp def setup_module(module): "Setup topology" tgen = Topogen(NetworkTopo, module.__name__) diff --git a/tests/topotests/evpn-pim-1/test_evpn_pim_topo1.py b/tests/topotests/evpn-pim-1/test_evpn_pim_topo1.py index 265124132f..d8c0cdc2fd 100644 --- a/tests/topotests/evpn-pim-1/test_evpn_pim_topo1.py +++ b/tests/topotests/evpn-pim-1/test_evpn_pim_topo1.py @@ -34,6 +34,8 @@ import pytest import json from functools import partial +pytestmark = pytest.mark.pimd + # Save the Current Working Directory to find configuration files. CWD = os.path.dirname(os.path.realpath(__file__)) sys.path.append(os.path.join(CWD, "../")) diff --git a/tests/topotests/example-test/test_template.py b/tests/topotests/example-test/test_template.py index 4305e0199f..973303b830 100644 --- a/tests/topotests/example-test/test_template.py +++ b/tests/topotests/example-test/test_template.py @@ -44,6 +44,18 @@ from lib.topolog import logger from mininet.topo import Topo +#TODO: select markers based on daemons used during test +# pytest module level markers +""" +pytestmark = pytest.mark.bfdd # single marker +pytestmark = [ + pytest.mark.bgpd, + pytest.mark.ospfd, + pytest.mark.ospf6d +] # multiple markers +""" + + class TemplateTopo(Topo): "Test topology builder" diff --git a/tests/topotests/example-topojson-test/test_topo_json_multiple_links/test_example_topojson_multiple_links.py b/tests/topotests/example-topojson-test/test_topo_json_multiple_links/test_example_topojson_multiple_links.py index f24f463b8a..cd48716905 100755 --- a/tests/topotests/example-topojson-test/test_topo_json_multiple_links/test_example_topojson_multiple_links.py +++ b/tests/topotests/example-topojson-test/test_topo_json_multiple_links/test_example_topojson_multiple_links.py @@ -53,6 +53,19 @@ from lib.topolog import logger from lib.bgp import verify_bgp_convergence from lib.topojson import build_topo_from_json, build_config_from_json + +#TODO: select markers based on daemons used during test +# pytest module level markers +""" +pytestmark = pytest.mark.bfdd # single marker +pytestmark = [ + pytest.mark.bgpd, + pytest.mark.ospfd, + pytest.mark.ospf6d +] # multiple markers +""" + + # Reading the data from JSON File for topology and configuration creation jsonFile = "{}/example_topojson_multiple_links.json".format(CWD) try: diff --git a/tests/topotests/example-topojson-test/test_topo_json_single_link/test_example_topojson.py b/tests/topotests/example-topojson-test/test_topo_json_single_link/test_example_topojson.py index 3ae3c9f4fe..0c72e30044 100755 --- a/tests/topotests/example-topojson-test/test_topo_json_single_link/test_example_topojson.py +++ b/tests/topotests/example-topojson-test/test_topo_json_single_link/test_example_topojson.py @@ -52,6 +52,19 @@ from lib.topolog import logger from lib.bgp import verify_bgp_convergence from lib.topojson import build_topo_from_json, build_config_from_json + +#TODO: select markers based on daemons used during test +# pytest module level markers +""" +pytestmark = pytest.mark.bfdd # single marker +pytestmark = [ + pytest.mark.bgpd, + pytest.mark.ospfd, + pytest.mark.ospf6d +] # multiple markers +""" + + # Reading the data from JSON File for topology and configuration creation jsonFile = "{}/example_topojson.json".format(CWD) diff --git a/tests/topotests/example-topojson-test/test_topo_json_single_link_loopback/test_example_topojson.py b/tests/topotests/example-topojson-test/test_topo_json_single_link_loopback/test_example_topojson.py index 06fa2f4626..d05ad6db21 100755 --- a/tests/topotests/example-topojson-test/test_topo_json_single_link_loopback/test_example_topojson.py +++ b/tests/topotests/example-topojson-test/test_topo_json_single_link_loopback/test_example_topojson.py @@ -54,6 +54,19 @@ from lib.topolog import logger from lib.bgp import verify_bgp_convergence from lib.topojson import build_topo_from_json, build_config_from_json + +#TODO: select markers based on daemons used during test +# pytest module level markers +""" +pytestmark = pytest.mark.bfdd # single marker +pytestmark = [ + pytest.mark.bgpd, + pytest.mark.ospfd, + pytest.mark.ospf6d +] # multiple markers +""" + + # Reading the data from JSON File for topology and configuration creation jsonFile = "{}/example_topojson.json".format(CWD) diff --git a/tests/topotests/isis-lfa-topo1/test_isis_lfa_topo1.py b/tests/topotests/isis-lfa-topo1/test_isis_lfa_topo1.py index 67edcae90e..a655b418cf 100755 --- a/tests/topotests/isis-lfa-topo1/test_isis_lfa_topo1.py +++ b/tests/topotests/isis-lfa-topo1/test_isis_lfa_topo1.py @@ -62,7 +62,7 @@ from functools import partial # Save the Current Working Directory to find configuration files. CWD = os.path.dirname(os.path.realpath(__file__)) -sys.path.append(os.path.join(CWD, '../')) +sys.path.append(os.path.join(CWD, "../")) # pylint: disable=C0413 # Import topogen and topotest helpers @@ -76,8 +76,10 @@ from mininet.topo import Topo # Global multi-dimensional dictionary containing all expected outputs outputs = {} + class TemplateTopo(Topo): "Test topology builder" + def build(self, *_args, **_opts): "Build function" tgen = get_topogen(self) @@ -85,59 +87,58 @@ class TemplateTopo(Topo): # # Define FRR Routers # - for router in ['rt1', 'rt2', 'rt3', 'rt4', 'rt5', 'rt6', 'rt7']: + for router in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6", "rt7"]: tgen.add_router(router) # # Define connections # - switch = tgen.add_switch('s1') - switch.add_link(tgen.gears['rt1'], nodeif="eth-rt2") - switch.add_link(tgen.gears['rt2'], nodeif="eth-rt1") - switch = tgen.add_switch('s2') - switch.add_link(tgen.gears['rt2'], nodeif="eth-rt3") - switch.add_link(tgen.gears['rt3'], nodeif="eth-rt2") - switch = tgen.add_switch('s3') - switch.add_link(tgen.gears['rt1'], nodeif="eth-rt3") - switch.add_link(tgen.gears['rt3'], nodeif="eth-rt1") - switch = tgen.add_switch('s4') - switch.add_link(tgen.gears['rt1'], nodeif="eth-rt4") - switch.add_link(tgen.gears['rt4'], nodeif="eth-rt1") - switch = tgen.add_switch('s5') - switch.add_link(tgen.gears['rt1'], nodeif="eth-rt5") - switch.add_link(tgen.gears['rt5'], nodeif="eth-rt1") - switch = tgen.add_switch('s6') - switch.add_link(tgen.gears['rt1'], nodeif="eth-rt6") - switch.add_link(tgen.gears['rt6'], nodeif="eth-rt1") - switch = tgen.add_switch('s7') - switch.add_link(tgen.gears['rt2'], nodeif="eth-rt7") - switch.add_link(tgen.gears['rt7'], nodeif="eth-rt2") - switch = tgen.add_switch('s8') - switch.add_link(tgen.gears['rt3'], nodeif="eth-rt7") - switch.add_link(tgen.gears['rt7'], nodeif="eth-rt3") - switch = tgen.add_switch('s9') - switch.add_link(tgen.gears['rt4'], nodeif="eth-rt7") - switch.add_link(tgen.gears['rt7'], nodeif="eth-rt4") - switch = tgen.add_switch('s10') - switch.add_link(tgen.gears['rt5'], nodeif="eth-rt7") - switch.add_link(tgen.gears['rt7'], nodeif="eth-rt5") - switch = tgen.add_switch('s11') - switch.add_link(tgen.gears['rt6'], nodeif="eth-rt7") - switch.add_link(tgen.gears['rt7'], nodeif="eth-rt6") + switch = tgen.add_switch("s1") + switch.add_link(tgen.gears["rt1"], nodeif="eth-rt2") + switch.add_link(tgen.gears["rt2"], nodeif="eth-rt1") + switch = tgen.add_switch("s2") + switch.add_link(tgen.gears["rt2"], nodeif="eth-rt3") + switch.add_link(tgen.gears["rt3"], nodeif="eth-rt2") + switch = tgen.add_switch("s3") + switch.add_link(tgen.gears["rt1"], nodeif="eth-rt3") + switch.add_link(tgen.gears["rt3"], nodeif="eth-rt1") + switch = tgen.add_switch("s4") + switch.add_link(tgen.gears["rt1"], nodeif="eth-rt4") + switch.add_link(tgen.gears["rt4"], nodeif="eth-rt1") + switch = tgen.add_switch("s5") + switch.add_link(tgen.gears["rt1"], nodeif="eth-rt5") + switch.add_link(tgen.gears["rt5"], nodeif="eth-rt1") + switch = tgen.add_switch("s6") + switch.add_link(tgen.gears["rt1"], nodeif="eth-rt6") + switch.add_link(tgen.gears["rt6"], nodeif="eth-rt1") + switch = tgen.add_switch("s7") + switch.add_link(tgen.gears["rt2"], nodeif="eth-rt7") + switch.add_link(tgen.gears["rt7"], nodeif="eth-rt2") + switch = tgen.add_switch("s8") + switch.add_link(tgen.gears["rt3"], nodeif="eth-rt7") + switch.add_link(tgen.gears["rt7"], nodeif="eth-rt3") + switch = tgen.add_switch("s9") + switch.add_link(tgen.gears["rt4"], nodeif="eth-rt7") + switch.add_link(tgen.gears["rt7"], nodeif="eth-rt4") + switch = tgen.add_switch("s10") + switch.add_link(tgen.gears["rt5"], nodeif="eth-rt7") + switch.add_link(tgen.gears["rt7"], nodeif="eth-rt5") + switch = tgen.add_switch("s11") + switch.add_link(tgen.gears["rt6"], nodeif="eth-rt7") + switch.add_link(tgen.gears["rt7"], nodeif="eth-rt6") # # Populate multi-dimensional dictionary containing all expected outputs # - files = ["show_ipv6_route.ref", - "show_yang_interface_isis_adjacencies.ref"] - for rname in ['rt1', 'rt2', 'rt3', 'rt4', 'rt5', 'rt6', 'rt7']: + files = ["show_ipv6_route.ref", "show_yang_interface_isis_adjacencies.ref"] + for rname in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6", "rt7"]: outputs[rname] = {} for step in range(1, 13 + 1): outputs[rname][step] = {} for file in files: if step == 1: # Get snapshots relative to the expected initial network convergence - filename = '{}/{}/step{}/{}'.format(CWD, rname, step, file) + filename = "{}/{}/step{}/{}".format(CWD, rname, step, file) outputs[rname][step][file] = open(filename).read() else: if rname != "rt1": @@ -146,20 +147,23 @@ class TemplateTopo(Topo): continue # Get diff relative to the previous step - filename = '{}/{}/step{}/{}.diff'.format(CWD, rname, step, file) + filename = "{}/{}/step{}/{}.diff".format(CWD, rname, step, file) # Create temporary files in order to apply the diff f_in = tempfile.NamedTemporaryFile() f_in.write(outputs[rname][step - 1][file]) f_in.flush() f_out = tempfile.NamedTemporaryFile() - os.system("patch -s -o %s %s %s" %(f_out.name, f_in.name, filename)) + os.system( + "patch -s -o %s %s %s" % (f_out.name, f_in.name, filename) + ) # Store the updated snapshot and remove the temporary files outputs[rname][step][file] = open(f_out.name).read() f_in.close() f_out.close() +@pytest.mark.isis def setup_module(mod): "Sets up the pytest environment" tgen = Topogen(TemplateTopo, mod.__name__) @@ -170,16 +174,15 @@ def setup_module(mod): # For all registered routers, load the zebra configuration file for rname, router in router_list.iteritems(): router.load_config( - TopoRouter.RD_ZEBRA, - os.path.join(CWD, '{}/zebra.conf'.format(rname)) + TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname)) ) router.load_config( - TopoRouter.RD_ISIS, - os.path.join(CWD, '{}/isisd.conf'.format(rname)) + TopoRouter.RD_ISIS, os.path.join(CWD, "{}/isisd.conf".format(rname)) ) tgen.start_router() + def teardown_module(mod): "Teardown the pytest environment" tgen = get_topogen() @@ -187,6 +190,7 @@ def teardown_module(mod): # This function tears down the whole topology. tgen.stop_topology() + def router_compare_json_output(rname, command, reference): "Compare router JSON output" @@ -196,12 +200,12 @@ def router_compare_json_output(rname, command, reference): expected = json.loads(reference) # Run test function until we get an result. Wait at most 60 seconds. - test_func = partial(topotest.router_json_cmp, - tgen.gears[rname], command, expected) + test_func = partial(topotest.router_json_cmp, tgen.gears[rname], command, expected) _, diff = topotest.run_and_expect(test_func, None, count=120, wait=0.5) assertmsg = '"{}" JSON output mismatches the expected result'.format(rname) assert diff is None, assertmsg + # # Step 1 # @@ -215,9 +219,13 @@ def test_isis_adjacencies_step1(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - for rname in ['rt1', 'rt2', 'rt3', 'rt4', 'rt5', 'rt6', 'rt7']: - router_compare_json_output(rname, "show yang operational-data /frr-interface:lib isisd", - outputs[rname][1]["show_yang_interface_isis_adjacencies.ref"]) + for rname in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6", "rt7"]: + router_compare_json_output( + rname, + "show yang operational-data /frr-interface:lib isisd", + outputs[rname][1]["show_yang_interface_isis_adjacencies.ref"], + ) + def test_rib_ipv6_step1(): logger.info("Test (step 1): verify IPv6 RIB") @@ -227,9 +235,11 @@ def test_rib_ipv6_step1(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - for rname in ['rt1', 'rt2', 'rt3', 'rt4', 'rt5', 'rt6', 'rt7']: - router_compare_json_output(rname, "show ipv6 route isis json", - outputs[rname][1]["show_ipv6_route.ref"]) + for rname in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6", "rt7"]: + router_compare_json_output( + rname, "show ipv6 route isis json", outputs[rname][1]["show_ipv6_route.ref"] + ) + # # Step 2 @@ -248,16 +258,28 @@ def test_rib_ipv6_step2(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - logger.info('Disabling LFA protection on all rt1 interfaces') - tgen.net['rt1'].cmd('vtysh -c "conf t" -c "interface eth-rt2" -c "no isis fast-reroute lfa"') - tgen.net['rt1'].cmd('vtysh -c "conf t" -c "interface eth-rt3" -c "no isis fast-reroute lfa"') - tgen.net['rt1'].cmd('vtysh -c "conf t" -c "interface eth-rt4" -c "no isis fast-reroute lfa"') - tgen.net['rt1'].cmd('vtysh -c "conf t" -c "interface eth-rt5" -c "no isis fast-reroute lfa"') - tgen.net['rt1'].cmd('vtysh -c "conf t" -c "interface eth-rt6" -c "no isis fast-reroute lfa"') + logger.info("Disabling LFA protection on all rt1 interfaces") + tgen.net["rt1"].cmd( + 'vtysh -c "conf t" -c "interface eth-rt2" -c "no isis fast-reroute lfa"' + ) + tgen.net["rt1"].cmd( + 'vtysh -c "conf t" -c "interface eth-rt3" -c "no isis fast-reroute lfa"' + ) + tgen.net["rt1"].cmd( + 'vtysh -c "conf t" -c "interface eth-rt4" -c "no isis fast-reroute lfa"' + ) + tgen.net["rt1"].cmd( + 'vtysh -c "conf t" -c "interface eth-rt5" -c "no isis fast-reroute lfa"' + ) + tgen.net["rt1"].cmd( + 'vtysh -c "conf t" -c "interface eth-rt6" -c "no isis fast-reroute lfa"' + ) + + for rname in ["rt1"]: + router_compare_json_output( + rname, "show ipv6 route isis json", outputs[rname][2]["show_ipv6_route.ref"] + ) - for rname in ['rt1']: - router_compare_json_output(rname, "show ipv6 route isis json", - outputs[rname][2]["show_ipv6_route.ref"]) # # Step 3 @@ -276,16 +298,28 @@ def test_rib_ipv6_step3(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - logger.info('Re-enabling LFA protection on all rt1 interfaces') - tgen.net['rt1'].cmd('vtysh -c "conf t" -c "interface eth-rt2" -c "isis fast-reroute lfa"') - tgen.net['rt1'].cmd('vtysh -c "conf t" -c "interface eth-rt3" -c "isis fast-reroute lfa"') - tgen.net['rt1'].cmd('vtysh -c "conf t" -c "interface eth-rt4" -c "isis fast-reroute lfa"') - tgen.net['rt1'].cmd('vtysh -c "conf t" -c "interface eth-rt5" -c "isis fast-reroute lfa"') - tgen.net['rt1'].cmd('vtysh -c "conf t" -c "interface eth-rt6" -c "isis fast-reroute lfa"') + logger.info("Re-enabling LFA protection on all rt1 interfaces") + tgen.net["rt1"].cmd( + 'vtysh -c "conf t" -c "interface eth-rt2" -c "isis fast-reroute lfa"' + ) + tgen.net["rt1"].cmd( + 'vtysh -c "conf t" -c "interface eth-rt3" -c "isis fast-reroute lfa"' + ) + tgen.net["rt1"].cmd( + 'vtysh -c "conf t" -c "interface eth-rt4" -c "isis fast-reroute lfa"' + ) + tgen.net["rt1"].cmd( + 'vtysh -c "conf t" -c "interface eth-rt5" -c "isis fast-reroute lfa"' + ) + tgen.net["rt1"].cmd( + 'vtysh -c "conf t" -c "interface eth-rt6" -c "isis fast-reroute lfa"' + ) + + for rname in ["rt1"]: + router_compare_json_output( + rname, "show ipv6 route isis json", outputs[rname][3]["show_ipv6_route.ref"] + ) - for rname in ['rt1']: - router_compare_json_output(rname, "show ipv6 route isis json", - outputs[rname][3]["show_ipv6_route.ref"]) # # Step 4 @@ -304,12 +338,16 @@ def test_rib_ipv6_step4(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - logger.info('Disabling LFA load-sharing on rt1') - tgen.net['rt1'].cmd('vtysh -c "conf t" -c "router isis 1" -c "fast-reroute load-sharing disable"') + logger.info("Disabling LFA load-sharing on rt1") + tgen.net["rt1"].cmd( + 'vtysh -c "conf t" -c "router isis 1" -c "fast-reroute load-sharing disable"' + ) + + for rname in ["rt1"]: + router_compare_json_output( + rname, "show ipv6 route isis json", outputs[rname][4]["show_ipv6_route.ref"] + ) - for rname in ['rt1']: - router_compare_json_output(rname, "show ipv6 route isis json", - outputs[rname][4]["show_ipv6_route.ref"]) # # Step 5 @@ -328,12 +366,16 @@ def test_rib_ipv6_step5(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - logger.info('Re-enabling LFA load-sharing on rt1') - tgen.net['rt1'].cmd('vtysh -c "conf t" -c "router isis 1" -c "no fast-reroute load-sharing disable"') + logger.info("Re-enabling LFA load-sharing on rt1") + tgen.net["rt1"].cmd( + 'vtysh -c "conf t" -c "router isis 1" -c "no fast-reroute load-sharing disable"' + ) + + for rname in ["rt1"]: + router_compare_json_output( + rname, "show ipv6 route isis json", outputs[rname][5]["show_ipv6_route.ref"] + ) - for rname in ['rt1']: - router_compare_json_output(rname, "show ipv6 route isis json", - outputs[rname][5]["show_ipv6_route.ref"]) # # Step 6 @@ -352,12 +394,16 @@ def test_rib_ipv6_step6(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - logger.info('Limiting backup computation to critical priority prefixes only') - tgen.net['rt1'].cmd('vtysh -c "conf t" -c "router isis 1" -c "fast-reroute priority-limit critical"') + logger.info("Limiting backup computation to critical priority prefixes only") + tgen.net["rt1"].cmd( + 'vtysh -c "conf t" -c "router isis 1" -c "fast-reroute priority-limit critical"' + ) + + for rname in ["rt1"]: + router_compare_json_output( + rname, "show ipv6 route isis json", outputs[rname][6]["show_ipv6_route.ref"] + ) - for rname in ['rt1']: - router_compare_json_output(rname, "show ipv6 route isis json", - outputs[rname][6]["show_ipv6_route.ref"]) # # Step 7 @@ -377,13 +423,19 @@ def test_rib_ipv6_step7(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - logger.info('Configuring a prefix priority list') - tgen.net['rt1'].cmd('vtysh -c "conf t" -c "router isis 1" -c "spf prefix-priority critical CRITICAL_DESTINATIONS"') - tgen.net['rt1'].cmd('vtysh -c "conf t" -c "ipv6 access-list CRITICAL_DESTINATIONS seq 5 permit 2001:db8:1000::7/128"') + logger.info("Configuring a prefix priority list") + tgen.net["rt1"].cmd( + 'vtysh -c "conf t" -c "router isis 1" -c "spf prefix-priority critical CRITICAL_DESTINATIONS"' + ) + tgen.net["rt1"].cmd( + 'vtysh -c "conf t" -c "ipv6 access-list CRITICAL_DESTINATIONS seq 5 permit 2001:db8:1000::7/128"' + ) + + for rname in ["rt1"]: + router_compare_json_output( + rname, "show ipv6 route isis json", outputs[rname][7]["show_ipv6_route.ref"] + ) - for rname in ['rt1']: - router_compare_json_output(rname, "show ipv6 route isis json", - outputs[rname][7]["show_ipv6_route.ref"]) # # Step 8 @@ -402,14 +454,22 @@ def test_rib_ipv6_step8(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - logger.info('Reverting previous changes related to prefix priorities') - tgen.net['rt1'].cmd('vtysh -c "conf t" -c "no ipv6 access-list CRITICAL_DESTINATIONS seq 5 permit 2001:db8:1000::7/128"') - tgen.net['rt1'].cmd('vtysh -c "conf t" -c "router isis 1" -c "no fast-reroute priority-limit critical"') - tgen.net['rt1'].cmd('vtysh -c "conf t" -c "router isis 1" -c "no spf prefix-priority critical CRITICAL_DESTINATIONS"') + logger.info("Reverting previous changes related to prefix priorities") + tgen.net["rt1"].cmd( + 'vtysh -c "conf t" -c "no ipv6 access-list CRITICAL_DESTINATIONS seq 5 permit 2001:db8:1000::7/128"' + ) + tgen.net["rt1"].cmd( + 'vtysh -c "conf t" -c "router isis 1" -c "no fast-reroute priority-limit critical"' + ) + tgen.net["rt1"].cmd( + 'vtysh -c "conf t" -c "router isis 1" -c "no spf prefix-priority critical CRITICAL_DESTINATIONS"' + ) + + for rname in ["rt1"]: + router_compare_json_output( + rname, "show ipv6 route isis json", outputs[rname][8]["show_ipv6_route.ref"] + ) - for rname in ['rt1']: - router_compare_json_output(rname, "show ipv6 route isis json", - outputs[rname][8]["show_ipv6_route.ref"]) # # Step 9 @@ -428,12 +488,16 @@ def test_rib_ipv6_step9(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - logger.info('Excluding eth-rt6 from LFA computation for eth-rt2\'s failure') - tgen.net['rt1'].cmd('vtysh -c "conf t" -c "interface eth-rt2" -c "isis fast-reroute lfa exclude interface eth-rt6"') + logger.info("Excluding eth-rt6 from LFA computation for eth-rt2's failure") + tgen.net["rt1"].cmd( + 'vtysh -c "conf t" -c "interface eth-rt2" -c "isis fast-reroute lfa exclude interface eth-rt6"' + ) + + for rname in ["rt1"]: + router_compare_json_output( + rname, "show ipv6 route isis json", outputs[rname][9]["show_ipv6_route.ref"] + ) - for rname in ['rt1']: - router_compare_json_output(rname, "show ipv6 route isis json", - outputs[rname][9]["show_ipv6_route.ref"]) # # Step 10 @@ -452,12 +516,20 @@ def test_rib_ipv6_step10(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - logger.info('Removing exclusion of eth-rt6 from LFA computation for eth-rt2\'s failure') - tgen.net['rt1'].cmd('vtysh -c "conf t" -c "interface eth-rt2" -c "no isis fast-reroute lfa exclude interface eth-rt6"') + logger.info( + "Removing exclusion of eth-rt6 from LFA computation for eth-rt2's failure" + ) + tgen.net["rt1"].cmd( + 'vtysh -c "conf t" -c "interface eth-rt2" -c "no isis fast-reroute lfa exclude interface eth-rt6"' + ) + + for rname in ["rt1"]: + router_compare_json_output( + rname, + "show ipv6 route isis json", + outputs[rname][10]["show_ipv6_route.ref"], + ) - for rname in ['rt1']: - router_compare_json_output(rname, "show ipv6 route isis json", - outputs[rname][10]["show_ipv6_route.ref"]) # # Step 11 @@ -476,12 +548,18 @@ def test_rib_ipv6_step11(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - logger.info('Adding LFA tiebreaker: prefer node protecting backup path') - tgen.net['rt1'].cmd('vtysh -c "conf t" -c "router isis 1" -c "fast-reroute lfa tiebreaker node-protecting index 10"') + logger.info("Adding LFA tiebreaker: prefer node protecting backup path") + tgen.net["rt1"].cmd( + 'vtysh -c "conf t" -c "router isis 1" -c "fast-reroute lfa tiebreaker node-protecting index 10"' + ) + + for rname in ["rt1"]: + router_compare_json_output( + rname, + "show ipv6 route isis json", + outputs[rname][11]["show_ipv6_route.ref"], + ) - for rname in ['rt1']: - router_compare_json_output(rname, "show ipv6 route isis json", - outputs[rname][11]["show_ipv6_route.ref"]) # # Step 12 @@ -500,12 +578,18 @@ def test_rib_ipv6_step12(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - logger.info('Adding LFA tiebreaker: prefer backup path via downstream node') - tgen.net['rt1'].cmd('vtysh -c "conf t" -c "router isis 1" -c "fast-reroute lfa tiebreaker downstream index 20"') + logger.info("Adding LFA tiebreaker: prefer backup path via downstream node") + tgen.net["rt1"].cmd( + 'vtysh -c "conf t" -c "router isis 1" -c "fast-reroute lfa tiebreaker downstream index 20"' + ) + + for rname in ["rt1"]: + router_compare_json_output( + rname, + "show ipv6 route isis json", + outputs[rname][12]["show_ipv6_route.ref"], + ) - for rname in ['rt1']: - router_compare_json_output(rname, "show ipv6 route isis json", - outputs[rname][12]["show_ipv6_route.ref"]) # # Step 13 @@ -524,22 +608,29 @@ def test_rib_ipv6_step13(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - logger.info('Adding LFA tiebreaker: prefer backup path with lowest total metric') - tgen.net['rt1'].cmd('vtysh -c "conf t" -c "router isis 1" -c "fast-reroute lfa tiebreaker lowest-backup-metric index 30"') + logger.info("Adding LFA tiebreaker: prefer backup path with lowest total metric") + tgen.net["rt1"].cmd( + 'vtysh -c "conf t" -c "router isis 1" -c "fast-reroute lfa tiebreaker lowest-backup-metric index 30"' + ) + + for rname in ["rt1"]: + router_compare_json_output( + rname, + "show ipv6 route isis json", + outputs[rname][13]["show_ipv6_route.ref"], + ) - for rname in ['rt1']: - router_compare_json_output(rname, "show ipv6 route isis json", - outputs[rname][13]["show_ipv6_route.ref"]) # Memory leak test template def test_memory_leak(): "Run the memory leak test and report results." tgen = get_topogen() if not tgen.is_memleak_enabled(): - pytest.skip('Memory leak test/report is disabled') + pytest.skip("Memory leak test/report is disabled") tgen.report_memory_leaks() -if __name__ == '__main__': + +if __name__ == "__main__": args = ["-s"] + sys.argv[1:] sys.exit(pytest.main(args)) diff --git a/tests/topotests/isis-lsp-bits-topo1/__init__.py b/tests/topotests/isis-lsp-bits-topo1/__init__.py new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/tests/topotests/isis-lsp-bits-topo1/__init__.py diff --git a/tests/topotests/isis-lsp-bits-topo1/rt1/isisd.conf b/tests/topotests/isis-lsp-bits-topo1/rt1/isisd.conf new file mode 100644 index 0000000000..90764a0d0f --- /dev/null +++ b/tests/topotests/isis-lsp-bits-topo1/rt1/isisd.conf @@ -0,0 +1,27 @@ +password 1 +hostname rt1 +log file isisd.log +! +debug isis events +debug isis route-events +debug isis spf-events +debug isis sr-events +debug isis lsp-gen +! +interface lo + ip router isis 1 + ipv6 router isis 1 + isis passive +! +interface eth-sw1 + ip router isis 1 + ipv6 router isis 1 + isis hello-multiplier 3 + isis priority 100 +! +router isis 1 + net 49.0000.0000.0000.0001.00 + is-type level-1 + lsp-gen-interval 2 + topology ipv6-unicast +! diff --git a/tests/topotests/isis-lsp-bits-topo1/rt1/step1/show_ip_route.ref b/tests/topotests/isis-lsp-bits-topo1/rt1/step1/show_ip_route.ref new file mode 100644 index 0000000000..8557f4b010 --- /dev/null +++ b/tests/topotests/isis-lsp-bits-topo1/rt1/step1/show_ip_route.ref @@ -0,0 +1,89 @@ +{ + "0.0.0.0\/0":[ + { + "prefix":"0.0.0.0\/0", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":10, + "installed":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.1.2", + "afi":"ipv4", + "interfaceName":"eth-sw1", + "active":true + }, + { + "fib":true, + "ip":"10.0.1.3", + "afi":"ipv4", + "interfaceName":"eth-sw1", + "active":true + } + ] + } + ], + "2.2.2.2\/32":[ + { + "prefix":"2.2.2.2\/32", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":20, + "installed":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.1.2", + "afi":"ipv4", + "interfaceName":"eth-sw1", + "active":true + } + ] + } + ], + "3.3.3.3\/32":[ + { + "prefix":"3.3.3.3\/32", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":20, + "installed":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.1.3", + "afi":"ipv4", + "interfaceName":"eth-sw1", + "active":true + } + ] + } + ], + "10.0.1.0\/24":[ + { + "prefix":"10.0.1.0\/24", + "protocol":"isis", + "distance":115, + "metric":20, + "nexthops":[ + { + "ip":"10.0.1.2", + "afi":"ipv4", + "interfaceName":"eth-sw1" + }, + { + "ip":"10.0.1.3", + "afi":"ipv4", + "interfaceName":"eth-sw1" + } + ] + } + ] +} diff --git a/tests/topotests/isis-lsp-bits-topo1/rt1/step1/show_ipv6_route.ref b/tests/topotests/isis-lsp-bits-topo1/rt1/step1/show_ipv6_route.ref new file mode 100644 index 0000000000..fa76533756 --- /dev/null +++ b/tests/topotests/isis-lsp-bits-topo1/rt1/step1/show_ipv6_route.ref @@ -0,0 +1,65 @@ +{ + "::\/0":[ + { + "prefix":"::\/0", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":10, + "installed":true, + "nexthops":[ + { + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-sw1", + "active":true + }, + { + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-sw1", + "active":true + } + ] + } + ], + "2001:db8:1000::2\/128":[ + { + "prefix":"2001:db8:1000::2\/128", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":20, + "installed":true, + "nexthops":[ + { + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-sw1", + "active":true + } + ] + } + ], + "2001:db8:1000::3\/128":[ + { + "prefix":"2001:db8:1000::3\/128", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":20, + "installed":true, + "nexthops":[ + { + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-sw1", + "active":true + } + ] + } + ] +} diff --git a/tests/topotests/isis-lsp-bits-topo1/rt1/step1/show_yang_interface_isis_adjacencies.ref b/tests/topotests/isis-lsp-bits-topo1/rt1/step1/show_yang_interface_isis_adjacencies.ref new file mode 100644 index 0000000000..26f0dffa7a --- /dev/null +++ b/tests/topotests/isis-lsp-bits-topo1/rt1/step1/show_yang_interface_isis_adjacencies.ref @@ -0,0 +1,32 @@ +{ + "frr-interface:lib": { + "interface": [ + { + "name": "eth-sw1", + "vrf": "default", + "state": { + "frr-isisd:isis": { + "adjacencies": { + "adjacency": [ + { + "neighbor-sys-type": "level-1", + "neighbor-sysid": "0000.0000.0003", + "hold-timer": 9, + "neighbor-priority": 64, + "state": "up" + }, + { + "neighbor-sys-type": "level-1", + "neighbor-sysid": "0000.0000.0002", + "hold-timer": 9, + "neighbor-priority": 64, + "state": "up" + } + ] + } + } + } + } + ] + } +} diff --git a/tests/topotests/isis-lsp-bits-topo1/rt1/step2/show_ip_route.ref b/tests/topotests/isis-lsp-bits-topo1/rt1/step2/show_ip_route.ref new file mode 100644 index 0000000000..c826efdcfe --- /dev/null +++ b/tests/topotests/isis-lsp-bits-topo1/rt1/step2/show_ip_route.ref @@ -0,0 +1,82 @@ +{ + "0.0.0.0\/0":[ + { + "prefix":"0.0.0.0\/0", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":10, + "installed":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.1.3", + "afi":"ipv4", + "interfaceName":"eth-sw1", + "active":true + } + ] + } + ], + "2.2.2.2\/32":[ + { + "prefix":"2.2.2.2\/32", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":20, + "installed":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.1.2", + "afi":"ipv4", + "interfaceName":"eth-sw1", + "active":true + } + ] + } + ], + "3.3.3.3\/32":[ + { + "prefix":"3.3.3.3\/32", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":20, + "installed":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.1.3", + "afi":"ipv4", + "interfaceName":"eth-sw1", + "active":true + } + ] + } + ], + "10.0.1.0\/24":[ + { + "prefix":"10.0.1.0\/24", + "protocol":"isis", + "distance":115, + "metric":20, + "nexthops":[ + { + "ip":"10.0.1.2", + "afi":"ipv4", + "interfaceName":"eth-sw1" + }, + { + "ip":"10.0.1.3", + "afi":"ipv4", + "interfaceName":"eth-sw1" + } + ] + } + ] +} diff --git a/tests/topotests/isis-lsp-bits-topo1/rt1/step2/show_ipv6_route.ref b/tests/topotests/isis-lsp-bits-topo1/rt1/step2/show_ipv6_route.ref new file mode 100644 index 0000000000..a386b45dad --- /dev/null +++ b/tests/topotests/isis-lsp-bits-topo1/rt1/step2/show_ipv6_route.ref @@ -0,0 +1,59 @@ +{ + "::\/0":[ + { + "prefix":"::\/0", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":10, + "installed":true, + "nexthops":[ + { + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-sw1", + "active":true + } + ] + } + ], + "2001:db8:1000::2\/128":[ + { + "prefix":"2001:db8:1000::2\/128", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":20, + "installed":true, + "nexthops":[ + { + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-sw1", + "active":true + } + ] + } + ], + "2001:db8:1000::3\/128":[ + { + "prefix":"2001:db8:1000::3\/128", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":20, + "installed":true, + "nexthops":[ + { + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-sw1", + "active":true + } + ] + } + ] +} diff --git a/tests/topotests/isis-lsp-bits-topo1/rt1/step3/show_ip_route.ref b/tests/topotests/isis-lsp-bits-topo1/rt1/step3/show_ip_route.ref new file mode 100644 index 0000000000..2b281b74fb --- /dev/null +++ b/tests/topotests/isis-lsp-bits-topo1/rt1/step3/show_ip_route.ref @@ -0,0 +1,62 @@ +{ + "2.2.2.2\/32":[ + { + "prefix":"2.2.2.2\/32", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":20, + "installed":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.1.2", + "afi":"ipv4", + "interfaceName":"eth-sw1", + "active":true + } + ] + } + ], + "3.3.3.3\/32":[ + { + "prefix":"3.3.3.3\/32", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":20, + "installed":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.1.3", + "afi":"ipv4", + "interfaceName":"eth-sw1", + "active":true + } + ] + } + ], + "10.0.1.0\/24":[ + { + "prefix":"10.0.1.0\/24", + "protocol":"isis", + "distance":115, + "metric":20, + "nexthops":[ + { + "ip":"10.0.1.2", + "afi":"ipv4", + "interfaceName":"eth-sw1" + }, + { + "ip":"10.0.1.3", + "afi":"ipv4", + "interfaceName":"eth-sw1" + } + ] + } + ] +} diff --git a/tests/topotests/isis-lsp-bits-topo1/rt1/step3/show_ipv6_route.ref b/tests/topotests/isis-lsp-bits-topo1/rt1/step3/show_ipv6_route.ref new file mode 100644 index 0000000000..4b920eda01 --- /dev/null +++ b/tests/topotests/isis-lsp-bits-topo1/rt1/step3/show_ipv6_route.ref @@ -0,0 +1,40 @@ +{ + "2001:db8:1000::2\/128":[ + { + "prefix":"2001:db8:1000::2\/128", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":20, + "installed":true, + "nexthops":[ + { + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-sw1", + "active":true + } + ] + } + ], + "2001:db8:1000::3\/128":[ + { + "prefix":"2001:db8:1000::3\/128", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":20, + "installed":true, + "nexthops":[ + { + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-sw1", + "active":true + } + ] + } + ] +} diff --git a/tests/topotests/isis-lsp-bits-topo1/rt1/step4/show_ip_route.ref b/tests/topotests/isis-lsp-bits-topo1/rt1/step4/show_ip_route.ref new file mode 100644 index 0000000000..8557f4b010 --- /dev/null +++ b/tests/topotests/isis-lsp-bits-topo1/rt1/step4/show_ip_route.ref @@ -0,0 +1,89 @@ +{ + "0.0.0.0\/0":[ + { + "prefix":"0.0.0.0\/0", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":10, + "installed":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.1.2", + "afi":"ipv4", + "interfaceName":"eth-sw1", + "active":true + }, + { + "fib":true, + "ip":"10.0.1.3", + "afi":"ipv4", + "interfaceName":"eth-sw1", + "active":true + } + ] + } + ], + "2.2.2.2\/32":[ + { + "prefix":"2.2.2.2\/32", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":20, + "installed":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.1.2", + "afi":"ipv4", + "interfaceName":"eth-sw1", + "active":true + } + ] + } + ], + "3.3.3.3\/32":[ + { + "prefix":"3.3.3.3\/32", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":20, + "installed":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.1.3", + "afi":"ipv4", + "interfaceName":"eth-sw1", + "active":true + } + ] + } + ], + "10.0.1.0\/24":[ + { + "prefix":"10.0.1.0\/24", + "protocol":"isis", + "distance":115, + "metric":20, + "nexthops":[ + { + "ip":"10.0.1.2", + "afi":"ipv4", + "interfaceName":"eth-sw1" + }, + { + "ip":"10.0.1.3", + "afi":"ipv4", + "interfaceName":"eth-sw1" + } + ] + } + ] +} diff --git a/tests/topotests/isis-lsp-bits-topo1/rt1/step4/show_ipv6_route.ref b/tests/topotests/isis-lsp-bits-topo1/rt1/step4/show_ipv6_route.ref new file mode 100644 index 0000000000..fa76533756 --- /dev/null +++ b/tests/topotests/isis-lsp-bits-topo1/rt1/step4/show_ipv6_route.ref @@ -0,0 +1,65 @@ +{ + "::\/0":[ + { + "prefix":"::\/0", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":10, + "installed":true, + "nexthops":[ + { + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-sw1", + "active":true + }, + { + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-sw1", + "active":true + } + ] + } + ], + "2001:db8:1000::2\/128":[ + { + "prefix":"2001:db8:1000::2\/128", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":20, + "installed":true, + "nexthops":[ + { + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-sw1", + "active":true + } + ] + } + ], + "2001:db8:1000::3\/128":[ + { + "prefix":"2001:db8:1000::3\/128", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":20, + "installed":true, + "nexthops":[ + { + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-sw1", + "active":true + } + ] + } + ] +} diff --git a/tests/topotests/isis-lsp-bits-topo1/rt1/zebra.conf b/tests/topotests/isis-lsp-bits-topo1/rt1/zebra.conf new file mode 100644 index 0000000000..9d71d3005f --- /dev/null +++ b/tests/topotests/isis-lsp-bits-topo1/rt1/zebra.conf @@ -0,0 +1,19 @@ +log file zebra.log +! +hostname rt1 +! +debug zebra kernel +debug zebra packet +debug zebra mpls +! +interface lo + ip address 1.1.1.1/32 + ipv6 address 2001:db8:1000::1/128 +! +interface eth-sw1 + ip address 10.0.1.1/24 +! +ip forwarding +! +line vty +! diff --git a/tests/topotests/isis-lsp-bits-topo1/rt2/isisd.conf b/tests/topotests/isis-lsp-bits-topo1/rt2/isisd.conf new file mode 100644 index 0000000000..2bc4c4ad97 --- /dev/null +++ b/tests/topotests/isis-lsp-bits-topo1/rt2/isisd.conf @@ -0,0 +1,35 @@ +hostname rt2 +log file isisd.log +! +debug isis events +debug isis route-events +debug isis spf-events +debug isis sr-events +debug isis lsp-gen +! +interface lo + ip router isis 1 + ipv6 router isis 1 + isis passive +! +interface eth-sw1 + ip router isis 1 + ipv6 router isis 1 + isis hello-multiplier 3 +! +interface eth-rt4 + ip router isis 2 + ipv6 router isis 2 + isis network point-to-point + isis hello-multiplier 3 +! +router isis 1 + net 49.0000.0000.0000.0002.00 + lsp-gen-interval 2 + topology ipv6-unicast +! +router isis 2 + net 49.0002.0000.0000.0002.00 + lsp-gen-interval 2 + topology ipv6-unicast +! diff --git a/tests/topotests/isis-lsp-bits-topo1/rt2/step1/show_ip_route.ref b/tests/topotests/isis-lsp-bits-topo1/rt2/step1/show_ip_route.ref new file mode 100644 index 0000000000..d7e886ce86 --- /dev/null +++ b/tests/topotests/isis-lsp-bits-topo1/rt2/step1/show_ip_route.ref @@ -0,0 +1,77 @@ +{ + "1.1.1.1\/32":[ + { + "prefix":"1.1.1.1\/32", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":20, + "installed":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.1.1", + "afi":"ipv4", + "interfaceName":"eth-sw1", + "active":true + } + ] + } + ], + "3.3.3.3\/32":[ + { + "prefix":"3.3.3.3\/32", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":20, + "installed":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.1.3", + "afi":"ipv4", + "interfaceName":"eth-sw1", + "active":true + } + ] + } + ], + "10.0.1.0\/24":[ + { + "prefix":"10.0.1.0\/24", + "protocol":"isis", + "distance":115, + "metric":20, + "nexthops":[ + { + "ip":"10.0.1.1", + "afi":"ipv4", + "interfaceName":"eth-sw1" + }, + { + "ip":"10.0.1.3", + "afi":"ipv4", + "interfaceName":"eth-sw1" + } + ] + } + ], + "10.0.2.0\/24":[ + { + "prefix":"10.0.2.0\/24", + "protocol":"isis", + "distance":115, + "metric":20, + "nexthops":[ + { + "ip":"10.0.2.4", + "afi":"ipv4", + "interfaceName":"eth-rt4" + } + ] + } + ] +} diff --git a/tests/topotests/isis-lsp-bits-topo1/rt2/step1/show_ipv6_route.ref b/tests/topotests/isis-lsp-bits-topo1/rt2/step1/show_ipv6_route.ref new file mode 100644 index 0000000000..a92272f6d0 --- /dev/null +++ b/tests/topotests/isis-lsp-bits-topo1/rt2/step1/show_ipv6_route.ref @@ -0,0 +1,40 @@ +{ + "2001:db8:1000::1\/128":[ + { + "prefix":"2001:db8:1000::1\/128", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":20, + "installed":true, + "nexthops":[ + { + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-sw1", + "active":true + } + ] + } + ], + "2001:db8:1000::3\/128":[ + { + "prefix":"2001:db8:1000::3\/128", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":20, + "installed":true, + "nexthops":[ + { + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-sw1", + "active":true + } + ] + } + ] +} diff --git a/tests/topotests/isis-lsp-bits-topo1/rt2/step1/show_yang_interface_isis_adjacencies.ref b/tests/topotests/isis-lsp-bits-topo1/rt2/step1/show_yang_interface_isis_adjacencies.ref new file mode 100644 index 0000000000..c70b44e1c9 --- /dev/null +++ b/tests/topotests/isis-lsp-bits-topo1/rt2/step1/show_yang_interface_isis_adjacencies.ref @@ -0,0 +1,58 @@ +{ + "frr-interface:lib": { + "interface": [ + { + "name": "eth-rt4", + "vrf": "default", + "state": { + "frr-isisd:isis": { + "adjacencies": { + "adjacency": [ + { + "neighbor-sys-type": "level-1-2", + "neighbor-sysid": "0000.0000.0004", + "hold-timer": 9, + "neighbor-priority": 0, + "state": "up" + } + ] + } + } + } + }, + { + "name": "eth-sw1", + "vrf": "default", + "state": { + "frr-isisd:isis": { + "adjacencies": { + "adjacency": [ + { + "neighbor-sys-type": "level-1", + "neighbor-sysid": "0000.0000.0001", + "hold-timer": 9, + "neighbor-priority": 100, + "state": "up" + }, + { + "neighbor-sys-type": "level-1", + "neighbor-sysid": "0000.0000.0003", + "hold-timer": 9, + "neighbor-priority": 64, + "state": "up" + }, + { + "neighbor-sys-type": "level-2", + "neighbor-sysid": "0000.0000.0003", + "hold-timer": 9, + "neighbor-priority": 64, + "state": "up" + } + ] + } + } + } + } + ] + } +} diff --git a/tests/topotests/isis-lsp-bits-topo1/rt2/zebra.conf b/tests/topotests/isis-lsp-bits-topo1/rt2/zebra.conf new file mode 100644 index 0000000000..234e10efa9 --- /dev/null +++ b/tests/topotests/isis-lsp-bits-topo1/rt2/zebra.conf @@ -0,0 +1,22 @@ +log file zebra.log +! +hostname rt2 +! +debug zebra kernel +debug zebra packet +debug zebra mpls +! +interface lo + ip address 2.2.2.2/32 + ipv6 address 2001:db8:1000::2/128 +! +interface eth-sw1 + ip address 10.0.1.2/24 +! +interface eth-rt4 + ip address 10.0.2.2/24 +! +ip forwarding +! +line vty +! diff --git a/tests/topotests/isis-lsp-bits-topo1/rt3/isisd.conf b/tests/topotests/isis-lsp-bits-topo1/rt3/isisd.conf new file mode 100644 index 0000000000..9ad97109b5 --- /dev/null +++ b/tests/topotests/isis-lsp-bits-topo1/rt3/isisd.conf @@ -0,0 +1,35 @@ +hostname rt3 +log file isisd.log +! +debug isis events +debug isis route-events +debug isis spf-events +debug isis sr-events +debug isis lsp-gen +! +interface lo + ip router isis 1 + ipv6 router isis 1 + isis passive +! +interface eth-sw1 + ip router isis 1 + ipv6 router isis 1 + isis hello-multiplier 3 +! +interface eth-rt5 + ip router isis 2 + ipv6 router isis 2 + isis network point-to-point + isis hello-multiplier 3 +! +router isis 1 + net 49.0000.0000.0000.0003.00 + lsp-gen-interval 2 + topology ipv6-unicast +! +router isis 2 + net 49.0002.0000.0000.0003.00 + lsp-gen-interval 2 + topology ipv6-unicast +! diff --git a/tests/topotests/isis-lsp-bits-topo1/rt3/step1/show_ip_route.ref b/tests/topotests/isis-lsp-bits-topo1/rt3/step1/show_ip_route.ref new file mode 100644 index 0000000000..55f0aedef5 --- /dev/null +++ b/tests/topotests/isis-lsp-bits-topo1/rt3/step1/show_ip_route.ref @@ -0,0 +1,97 @@ +{ + "1.1.1.1\/32":[ + { + "prefix":"1.1.1.1\/32", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":20, + "installed":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.1.1", + "afi":"ipv4", + "interfaceName":"eth-sw1", + "active":true + } + ] + } + ], + "2.2.2.2\/32":[ + { + "prefix":"2.2.2.2\/32", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":20, + "installed":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.1.2", + "afi":"ipv4", + "interfaceName":"eth-sw1", + "active":true + } + ] + } + ], + "5.5.5.5\/32":[ + { + "prefix":"5.5.5.5\/32", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":20, + "installed":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.4.5", + "afi":"ipv4", + "interfaceName":"eth-rt5", + "active":true + } + ] + } + ], + "10.0.1.0\/24":[ + { + "prefix":"10.0.1.0\/24", + "protocol":"isis", + "distance":115, + "metric":20, + "nexthops":[ + { + "ip":"10.0.1.1", + "afi":"ipv4", + "interfaceName":"eth-sw1" + }, + { + "ip":"10.0.1.2", + "afi":"ipv4", + "interfaceName":"eth-sw1" + } + ] + } + ], + "10.0.4.0\/24":[ + { + "prefix":"10.0.4.0\/24", + "protocol":"isis", + "distance":115, + "metric":20, + "nexthops":[ + { + "ip":"10.0.4.5", + "afi":"ipv4", + "interfaceName":"eth-rt5" + } + ] + } + ] +} diff --git a/tests/topotests/isis-lsp-bits-topo1/rt3/step1/show_ipv6_route.ref b/tests/topotests/isis-lsp-bits-topo1/rt3/step1/show_ipv6_route.ref new file mode 100644 index 0000000000..5d6dfca76a --- /dev/null +++ b/tests/topotests/isis-lsp-bits-topo1/rt3/step1/show_ipv6_route.ref @@ -0,0 +1,59 @@ +{ + "2001:db8:1000::1\/128":[ + { + "prefix":"2001:db8:1000::1\/128", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":20, + "installed":true, + "nexthops":[ + { + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-sw1", + "active":true + } + ] + } + ], + "2001:db8:1000::2\/128":[ + { + "prefix":"2001:db8:1000::2\/128", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":20, + "installed":true, + "nexthops":[ + { + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-sw1", + "active":true + } + ] + } + ], + "2001:db8:1000::5\/128":[ + { + "prefix":"2001:db8:1000::5\/128", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":20, + "installed":true, + "nexthops":[ + { + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-rt5", + "active":true + } + ] + } + ] +} diff --git a/tests/topotests/isis-lsp-bits-topo1/rt3/step1/show_yang_interface_isis_adjacencies.ref b/tests/topotests/isis-lsp-bits-topo1/rt3/step1/show_yang_interface_isis_adjacencies.ref new file mode 100644 index 0000000000..6950086b1e --- /dev/null +++ b/tests/topotests/isis-lsp-bits-topo1/rt3/step1/show_yang_interface_isis_adjacencies.ref @@ -0,0 +1,51 @@ +{ + "frr-interface:lib": { + "interface": [ + { + "name": "eth-rt5", + "vrf": "default", + "state": { + "frr-isisd:isis": { + "adjacencies": { + "adjacency": [ + { + "neighbor-sys-type": "level-1-2", + "neighbor-sysid": "0000.0000.0005", + "hold-timer": 9, + "neighbor-priority": 0, + "state": "up" + } + ] + } + } + } + }, + { + "name": "eth-sw1", + "vrf": "default", + "state": { + "frr-isisd:isis": { + "adjacencies": { + "adjacency": [ + { + "neighbor-sys-type": "level-1", + "neighbor-sysid": "0000.0000.0001", + "hold-timer": 9, + "neighbor-priority": 100, + "state": "up" + }, + { + "neighbor-sys-type": "level-1", + "neighbor-sysid": "0000.0000.0002", + "hold-timer": 9, + "neighbor-priority": 64, + "state": "up" + } + ] + } + } + } + } + ] + } +} diff --git a/tests/topotests/isis-lsp-bits-topo1/rt3/zebra.conf b/tests/topotests/isis-lsp-bits-topo1/rt3/zebra.conf new file mode 100644 index 0000000000..9a0defd62b --- /dev/null +++ b/tests/topotests/isis-lsp-bits-topo1/rt3/zebra.conf @@ -0,0 +1,22 @@ +log file zebra.log +! +hostname rt3 +! +debug zebra kernel +debug zebra packet +debug zebra mpls +! +interface lo + ip address 3.3.3.3/32 + ipv6 address 2001:db8:1000::3/128 +! +interface eth-sw1 + ip address 10.0.1.3/24 +! +interface eth-rt5 + ip address 10.0.4.3/24 +! +ip forwarding +! +line vty +! diff --git a/tests/topotests/isis-lsp-bits-topo1/rt4/isisd.conf b/tests/topotests/isis-lsp-bits-topo1/rt4/isisd.conf new file mode 100644 index 0000000000..e85412a71d --- /dev/null +++ b/tests/topotests/isis-lsp-bits-topo1/rt4/isisd.conf @@ -0,0 +1,42 @@ +hostname rt4 +log file isisd.log +! +debug isis events +debug isis route-events +debug isis spf-events +debug isis sr-events +debug isis lsp-gen +! +interface lo + ip router isis 4 + ipv6 router isis 4 + isis passive +! +interface eth-rt2 + ip router isis 2 + ipv6 router isis 2 + isis network point-to-point + isis hello-multiplier 3 +! +interface eth-rt5 + ip router isis 4 + ipv6 router isis 4 + isis network point-to-point + isis hello-multiplier 3 +! +interface eth-rt6 + ip router isis 4 + ipv6 router isis 4 + isis network point-to-point + isis hello-multiplier 3 +! +router isis 2 + net 49.0002.0000.0000.0004.00 + lsp-gen-interval 2 + topology ipv6-unicast +! +router isis 4 + net 49.0004.0000.0000.0004.00 + lsp-gen-interval 2 + topology ipv6-unicast +! diff --git a/tests/topotests/isis-lsp-bits-topo1/rt4/step1/show_ip_route.ref b/tests/topotests/isis-lsp-bits-topo1/rt4/step1/show_ip_route.ref new file mode 100644 index 0000000000..2cf5c40635 --- /dev/null +++ b/tests/topotests/isis-lsp-bits-topo1/rt4/step1/show_ip_route.ref @@ -0,0 +1,94 @@ +{ + "6.6.6.6\/32":[ + { + "prefix":"6.6.6.6\/32", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":20, + "installed":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.7.6", + "afi":"ipv4", + "interfaceName":"eth-rt6", + "active":true + } + ] + } + ], + "10.0.2.0\/24":[ + { + "prefix":"10.0.2.0\/24", + "protocol":"isis", + "distance":115, + "metric":20, + "nexthops":[ + { + "ip":"10.0.2.2", + "afi":"ipv4", + "interfaceName":"eth-rt2" + } + ] + } + ], + "10.0.6.0\/24":[ + { + "prefix":"10.0.6.0\/24", + "protocol":"isis", + "distance":115, + "metric":20, + "nexthops":[ + { + "ip":"10.0.6.5", + "afi":"ipv4", + "interfaceName":"eth-rt5" + } + ] + } + ], + "10.0.7.0\/24":[ + { + "prefix":"10.0.7.0\/24", + "protocol":"isis", + "distance":115, + "metric":20, + "nexthops":[ + { + "ip":"10.0.7.6", + "afi":"ipv4", + "interfaceName":"eth-rt6" + } + ] + } + ], + "10.0.8.0\/24":[ + { + "prefix":"10.0.8.0\/24", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":20, + "installed":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.6.5", + "afi":"ipv4", + "interfaceName":"eth-rt5", + "active":true + }, + { + "fib":true, + "ip":"10.0.7.6", + "afi":"ipv4", + "interfaceName":"eth-rt6", + "active":true + } + ] + } + ] +} diff --git a/tests/topotests/isis-lsp-bits-topo1/rt4/step1/show_ipv6_route.ref b/tests/topotests/isis-lsp-bits-topo1/rt4/step1/show_ipv6_route.ref new file mode 100644 index 0000000000..cde7287943 --- /dev/null +++ b/tests/topotests/isis-lsp-bits-topo1/rt4/step1/show_ipv6_route.ref @@ -0,0 +1,21 @@ +{ + "2001:db8:1000::6\/128":[ + { + "prefix":"2001:db8:1000::6\/128", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":20, + "installed":true, + "nexthops":[ + { + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-rt6", + "active":true + } + ] + } + ] +} diff --git a/tests/topotests/isis-lsp-bits-topo1/rt4/step1/show_yang_interface_isis_adjacencies.ref b/tests/topotests/isis-lsp-bits-topo1/rt4/step1/show_yang_interface_isis_adjacencies.ref new file mode 100644 index 0000000000..233180ceb8 --- /dev/null +++ b/tests/topotests/isis-lsp-bits-topo1/rt4/step1/show_yang_interface_isis_adjacencies.ref @@ -0,0 +1,63 @@ +{ + "frr-interface:lib": { + "interface": [ + { + "name": "eth-rt2", + "vrf": "default", + "state": { + "frr-isisd:isis": { + "adjacencies": { + "adjacency": [ + { + "neighbor-sys-type": "level-1-2", + "neighbor-sysid": "0000.0000.0002", + "hold-timer": 9, + "neighbor-priority": 0, + "state": "up" + } + ] + } + } + } + }, + { + "name": "eth-rt5", + "vrf": "default", + "state": { + "frr-isisd:isis": { + "adjacencies": { + "adjacency": [ + { + "neighbor-sys-type": "level-1-2", + "neighbor-sysid": "0000.0000.0005", + "hold-timer": 9, + "neighbor-priority": 0, + "state": "up" + } + ] + } + } + } + }, + { + "name": "eth-rt6", + "vrf": "default", + "state": { + "frr-isisd:isis": { + "adjacencies": { + "adjacency": [ + { + "neighbor-sys-type": "level-1", + "neighbor-sysid": "0000.0000.0006", + "hold-timer": 9, + "neighbor-priority": 0, + "state": "up" + } + ] + } + } + } + } + ] + } +} diff --git a/tests/topotests/isis-lsp-bits-topo1/rt4/zebra.conf b/tests/topotests/isis-lsp-bits-topo1/rt4/zebra.conf new file mode 100644 index 0000000000..adcf433249 --- /dev/null +++ b/tests/topotests/isis-lsp-bits-topo1/rt4/zebra.conf @@ -0,0 +1,25 @@ +log file zebra.log +! +hostname rt4 +! +debug zebra kernel +debug zebra packet +debug zebra mpls +! +interface lo + ip address 4.4.4.4/32 + ipv6 address 2001:db8:1000::4/128 +! +interface eth-rt2 + ip address 10.0.2.4/24 +! +interface eth-rt5 + ip address 10.0.6.4/24 +! +interface eth-rt6 + ip address 10.0.7.4/24 +! +ip forwarding +! +line vty +! diff --git a/tests/topotests/isis-lsp-bits-topo1/rt5/isisd.conf b/tests/topotests/isis-lsp-bits-topo1/rt5/isisd.conf new file mode 100644 index 0000000000..2cab0c88fc --- /dev/null +++ b/tests/topotests/isis-lsp-bits-topo1/rt5/isisd.conf @@ -0,0 +1,42 @@ +hostname rt5 +log file isisd.log +! +debug isis events +debug isis route-events +debug isis spf-events +debug isis sr-events +debug isis lsp-gen +! +interface lo + ip router isis 2 + ipv6 router isis 2 + isis passive +! +interface eth-rt3 + ip router isis 2 + ipv6 router isis 2 + isis network point-to-point + isis hello-multiplier 3 +! +interface eth-rt4 + ip router isis 4 + ipv6 router isis 4 + isis network point-to-point + isis hello-multiplier 3 +! +interface eth-rt6 + ip router isis 4 + ipv6 router isis 4 + isis network point-to-point + isis hello-multiplier 3 +! +router isis 2 + net 49.0002.0000.0000.0005.00 + lsp-gen-interval 2 + topology ipv6-unicast +! +router isis 4 + net 49.0004.0000.0000.0005.00 + lsp-gen-interval 2 + topology ipv6-unicast +! diff --git a/tests/topotests/isis-lsp-bits-topo1/rt5/step1/show_ip_route.ref b/tests/topotests/isis-lsp-bits-topo1/rt5/step1/show_ip_route.ref new file mode 100644 index 0000000000..fe34b03890 --- /dev/null +++ b/tests/topotests/isis-lsp-bits-topo1/rt5/step1/show_ip_route.ref @@ -0,0 +1,118 @@ +{ + "4.4.4.4\/32":[ + { + "prefix":"4.4.4.4\/32", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":20, + "installed":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.6.4", + "afi":"ipv4", + "interfaceName":"eth-rt4", + "active":true + } + ] + } + ], + "6.6.6.6\/32":[ + { + "prefix":"6.6.6.6\/32", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":20, + "installed":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.8.6", + "afi":"ipv4", + "interfaceName":"eth-rt6", + "active":true + } + ] + } + ], + "10.0.4.0\/24":[ + { + "prefix":"10.0.4.0\/24", + "protocol":"isis", + "distance":115, + "metric":20, + "nexthops":[ + { + "ip":"10.0.4.3", + "afi":"ipv4", + "interfaceIndex":2, + "interfaceName":"eth-rt3" + } + ] + } + ], + "10.0.6.0\/24":[ + { + "prefix":"10.0.6.0\/24", + "protocol":"isis", + "distance":115, + "metric":20, + "nexthops":[ + { + "ip":"10.0.6.4", + "afi":"ipv4", + "interfaceIndex":3, + "interfaceName":"eth-rt4" + } + ] + } + ], + "10.0.7.0\/24":[ + { + "prefix":"10.0.7.0\/24", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":20, + "installed":true, + "table":254, + "nexthops":[ + { + "fib":true, + "ip":"10.0.6.4", + "afi":"ipv4", + "interfaceName":"eth-rt4", + "active":true + }, + { + "fib":true, + "ip":"10.0.8.6", + "afi":"ipv4", + "interfaceName":"eth-rt6", + "active":true + } + ] + } + ], + "10.0.8.0\/24":[ + { + "prefix":"10.0.8.0\/24", + "protocol":"isis", + "distance":115, + "metric":20, + "nexthops":[ + { + "ip":"10.0.8.6", + "afi":"ipv4", + "interfaceName":"eth-rt6", + "weight":1 + } + ] + } + ] +} diff --git a/tests/topotests/isis-lsp-bits-topo1/rt5/step1/show_ipv6_route.ref b/tests/topotests/isis-lsp-bits-topo1/rt5/step1/show_ipv6_route.ref new file mode 100644 index 0000000000..7586c73852 --- /dev/null +++ b/tests/topotests/isis-lsp-bits-topo1/rt5/step1/show_ipv6_route.ref @@ -0,0 +1,40 @@ +{ + "2001:db8:1000::4\/128":[ + { + "prefix":"2001:db8:1000::4\/128", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":20, + "installed":true, + "nexthops":[ + { + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-rt4", + "active":true + } + ] + } + ], + "2001:db8:1000::6\/128":[ + { + "prefix":"2001:db8:1000::6\/128", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":20, + "installed":true, + "nexthops":[ + { + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-rt6", + "active":true + } + ] + } + ] +} diff --git a/tests/topotests/isis-lsp-bits-topo1/rt5/step1/show_yang_interface_isis_adjacencies.ref b/tests/topotests/isis-lsp-bits-topo1/rt5/step1/show_yang_interface_isis_adjacencies.ref new file mode 100644 index 0000000000..f939a6abff --- /dev/null +++ b/tests/topotests/isis-lsp-bits-topo1/rt5/step1/show_yang_interface_isis_adjacencies.ref @@ -0,0 +1,63 @@ +{ + "frr-interface:lib": { + "interface": [ + { + "name": "eth-rt3", + "vrf": "default", + "state": { + "frr-isisd:isis": { + "adjacencies": { + "adjacency": [ + { + "neighbor-sys-type": "level-1-2", + "neighbor-sysid": "0000.0000.0003", + "hold-timer": 9, + "neighbor-priority": 0, + "state": "up" + } + ] + } + } + } + }, + { + "name": "eth-rt4", + "vrf": "default", + "state": { + "frr-isisd:isis": { + "adjacencies": { + "adjacency": [ + { + "neighbor-sys-type": "level-1-2", + "neighbor-sysid": "0000.0000.0004", + "hold-timer": 9, + "neighbor-priority": 0, + "state": "up" + } + ] + } + } + } + }, + { + "name": "eth-rt6", + "vrf": "default", + "state": { + "frr-isisd:isis": { + "adjacencies": { + "adjacency": [ + { + "neighbor-sys-type": "level-1", + "neighbor-sysid": "0000.0000.0006", + "hold-timer": 9, + "neighbor-priority": 0, + "state": "up" + } + ] + } + } + } + } + ] + } +} diff --git a/tests/topotests/isis-lsp-bits-topo1/rt5/zebra.conf b/tests/topotests/isis-lsp-bits-topo1/rt5/zebra.conf new file mode 100644 index 0000000000..0f10ce921f --- /dev/null +++ b/tests/topotests/isis-lsp-bits-topo1/rt5/zebra.conf @@ -0,0 +1,25 @@ +log file zebra.log +! +hostname rt5 +! +debug zebra kernel +debug zebra packet +debug zebra mpls +! +interface lo + ip address 5.5.5.5/32 + ipv6 address 2001:db8:1000::5/128 +! +interface eth-rt3 + ip address 10.0.4.5/24 +! +interface eth-rt4 + ip address 10.0.6.5/24 +! +interface eth-rt6 + ip address 10.0.8.5/24 +! +ip forwarding +! +line vty +! diff --git a/tests/topotests/isis-lsp-bits-topo1/rt6/isisd.conf b/tests/topotests/isis-lsp-bits-topo1/rt6/isisd.conf new file mode 100644 index 0000000000..249f945e0c --- /dev/null +++ b/tests/topotests/isis-lsp-bits-topo1/rt6/isisd.conf @@ -0,0 +1,32 @@ +hostname rt6 +log file isisd.log +! +debug isis events +debug isis route-events +debug isis spf-events +debug isis sr-events +debug isis lsp-gen +! +interface lo + ip router isis 4 + ipv6 router isis 4 + isis passive +! +interface eth-rt4 + ip router isis 4 + ipv6 router isis 4 + isis network point-to-point + isis hello-multiplier 3 +! +interface eth-rt5 + ip router isis 4 + ipv6 router isis 4 + isis network point-to-point + isis hello-multiplier 3 +! +router isis 4 + net 49.0004.0000.0000.0006.00 + is-type level-1 + lsp-gen-interval 2 + topology ipv6-unicast +! diff --git a/tests/topotests/isis-lsp-bits-topo1/rt6/step1/show_ip_route.ref b/tests/topotests/isis-lsp-bits-topo1/rt6/step1/show_ip_route.ref new file mode 100644 index 0000000000..2840514e6e --- /dev/null +++ b/tests/topotests/isis-lsp-bits-topo1/rt6/step1/show_ip_route.ref @@ -0,0 +1,107 @@ +{ + "0.0.0.0\/0":[ + { + "prefix":"0.0.0.0\/0", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":10, + "installed":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.7.4", + "afi":"ipv4", + "interfaceName":"eth-rt4", + "active":true + }, + { + "fib":true, + "ip":"10.0.8.5", + "afi":"ipv4", + "interfaceName":"eth-rt5", + "active":true + } + ] + } + ], + "4.4.4.4\/32":[ + { + "prefix":"4.4.4.4\/32", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":20, + "installed":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.7.4", + "afi":"ipv4", + "interfaceName":"eth-rt4", + "active":true + } + ] + } + ], + "10.0.6.0\/24":[ + { + "prefix":"10.0.6.0\/24", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":20, + "installed":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.7.4", + "afi":"ipv4", + "interfaceName":"eth-rt4", + "active":true + }, + { + "fib":true, + "ip":"10.0.8.5", + "afi":"ipv4", + "interfaceName":"eth-rt5", + "active":true + } + ] + } + ], + "10.0.7.0\/24":[ + { + "prefix":"10.0.7.0\/24", + "protocol":"isis", + "distance":115, + "metric":20, + "nexthops":[ + { + "ip":"10.0.7.4", + "afi":"ipv4", + "interfaceName":"eth-rt4" + } + ] + } + ], + "10.0.8.0\/24":[ + { + "prefix":"10.0.8.0\/24", + "protocol":"isis", + "distance":115, + "metric":20, + "nexthops":[ + { + "ip":"10.0.8.5", + "afi":"ipv4", + "interfaceIndex":3, + "interfaceName":"eth-rt5" + } + ] + } + ] +} diff --git a/tests/topotests/isis-lsp-bits-topo1/rt6/step1/show_ipv6_route.ref b/tests/topotests/isis-lsp-bits-topo1/rt6/step1/show_ipv6_route.ref new file mode 100644 index 0000000000..278129f481 --- /dev/null +++ b/tests/topotests/isis-lsp-bits-topo1/rt6/step1/show_ipv6_route.ref @@ -0,0 +1,46 @@ +{ + "::\/0":[ + { + "prefix":"::\/0", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":10, + "installed":true, + "nexthops":[ + { + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-rt5", + "active":true + }, + { + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-rt4", + "active":true + } + ] + } + ], + "2001:db8:1000::4\/128":[ + { + "prefix":"2001:db8:1000::4\/128", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":20, + "installed":true, + "nexthops":[ + { + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-rt4", + "active":true + } + ] + } + ] +} diff --git a/tests/topotests/isis-lsp-bits-topo1/rt6/step1/show_yang_interface_isis_adjacencies.ref b/tests/topotests/isis-lsp-bits-topo1/rt6/step1/show_yang_interface_isis_adjacencies.ref new file mode 100644 index 0000000000..b4e8c23189 --- /dev/null +++ b/tests/topotests/isis-lsp-bits-topo1/rt6/step1/show_yang_interface_isis_adjacencies.ref @@ -0,0 +1,44 @@ +{ + "frr-interface:lib": { + "interface": [ + { + "name": "eth-rt4", + "vrf": "default", + "state": { + "frr-isisd:isis": { + "adjacencies": { + "adjacency": [ + { + "neighbor-sys-type": "level-1-2", + "neighbor-sysid": "0000.0000.0004", + "hold-timer": 9, + "neighbor-priority": 0, + "state": "up" + } + ] + } + } + } + }, + { + "name": "eth-rt5", + "vrf": "default", + "state": { + "frr-isisd:isis": { + "adjacencies": { + "adjacency": [ + { + "neighbor-sys-type": "level-1-2", + "neighbor-sysid": "0000.0000.0005", + "hold-timer": 9, + "neighbor-priority": 0, + "state": "up" + } + ] + } + } + } + } + ] + } +} diff --git a/tests/topotests/isis-lsp-bits-topo1/rt6/step2/show_ip_route.ref b/tests/topotests/isis-lsp-bits-topo1/rt6/step2/show_ip_route.ref new file mode 100644 index 0000000000..61efcb9e98 --- /dev/null +++ b/tests/topotests/isis-lsp-bits-topo1/rt6/step2/show_ip_route.ref @@ -0,0 +1,100 @@ +{ + "0.0.0.0\/0":[ + { + "prefix":"0.0.0.0\/0", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":10, + "installed":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.8.5", + "afi":"ipv4", + "interfaceName":"eth-rt5", + "active":true + } + ] + } + ], + "4.4.4.4\/32":[ + { + "prefix":"4.4.4.4\/32", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":20, + "installed":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.7.4", + "afi":"ipv4", + "interfaceName":"eth-rt4", + "active":true + } + ] + } + ], + "10.0.6.0\/24":[ + { + "prefix":"10.0.6.0\/24", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":20, + "installed":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.7.4", + "afi":"ipv4", + "interfaceName":"eth-rt4", + "active":true + }, + { + "fib":true, + "ip":"10.0.8.5", + "afi":"ipv4", + "interfaceName":"eth-rt5", + "active":true + } + ] + } + ], + "10.0.7.0\/24":[ + { + "prefix":"10.0.7.0\/24", + "protocol":"isis", + "distance":115, + "metric":20, + "nexthops":[ + { + "ip":"10.0.7.4", + "afi":"ipv4", + "interfaceName":"eth-rt4" + } + ] + } + ], + "10.0.8.0\/24":[ + { + "prefix":"10.0.8.0\/24", + "protocol":"isis", + "distance":115, + "metric":20, + "nexthops":[ + { + "ip":"10.0.8.5", + "afi":"ipv4", + "interfaceIndex":3, + "interfaceName":"eth-rt5" + } + ] + } + ] +} diff --git a/tests/topotests/isis-lsp-bits-topo1/rt6/step2/show_ipv6_route.ref b/tests/topotests/isis-lsp-bits-topo1/rt6/step2/show_ipv6_route.ref new file mode 100644 index 0000000000..2e982e0c37 --- /dev/null +++ b/tests/topotests/isis-lsp-bits-topo1/rt6/step2/show_ipv6_route.ref @@ -0,0 +1,40 @@ +{ + "::\/0":[ + { + "prefix":"::\/0", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":10, + "installed":true, + "nexthops":[ + { + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-rt5", + "active":true + } + ] + } + ], + "2001:db8:1000::4\/128":[ + { + "prefix":"2001:db8:1000::4\/128", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":20, + "installed":true, + "nexthops":[ + { + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-rt4", + "active":true + } + ] + } + ] +} diff --git a/tests/topotests/isis-lsp-bits-topo1/rt6/step3/show_ip_route.ref b/tests/topotests/isis-lsp-bits-topo1/rt6/step3/show_ip_route.ref new file mode 100644 index 0000000000..8fecf14687 --- /dev/null +++ b/tests/topotests/isis-lsp-bits-topo1/rt6/step3/show_ip_route.ref @@ -0,0 +1,80 @@ +{ + "4.4.4.4\/32":[ + { + "prefix":"4.4.4.4\/32", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":20, + "installed":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.7.4", + "afi":"ipv4", + "interfaceName":"eth-rt4", + "active":true + } + ] + } + ], + "10.0.6.0\/24":[ + { + "prefix":"10.0.6.0\/24", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":20, + "installed":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.7.4", + "afi":"ipv4", + "interfaceName":"eth-rt4", + "active":true + }, + { + "fib":true, + "ip":"10.0.8.5", + "afi":"ipv4", + "interfaceName":"eth-rt5", + "active":true + } + ] + } + ], + "10.0.7.0\/24":[ + { + "prefix":"10.0.7.0\/24", + "protocol":"isis", + "distance":115, + "metric":20, + "nexthops":[ + { + "ip":"10.0.7.4", + "afi":"ipv4", + "interfaceName":"eth-rt4" + } + ] + } + ], + "10.0.8.0\/24":[ + { + "prefix":"10.0.8.0\/24", + "protocol":"isis", + "distance":115, + "metric":20, + "nexthops":[ + { + "ip":"10.0.8.5", + "afi":"ipv4", + "interfaceIndex":3, + "interfaceName":"eth-rt5" + } + ] + } + ] +} diff --git a/tests/topotests/isis-lsp-bits-topo1/rt6/step3/show_ipv6_route.ref b/tests/topotests/isis-lsp-bits-topo1/rt6/step3/show_ipv6_route.ref new file mode 100644 index 0000000000..9b53a1d760 --- /dev/null +++ b/tests/topotests/isis-lsp-bits-topo1/rt6/step3/show_ipv6_route.ref @@ -0,0 +1,21 @@ +{ + "2001:db8:1000::4\/128":[ + { + "prefix":"2001:db8:1000::4\/128", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":20, + "installed":true, + "nexthops":[ + { + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-rt4", + "active":true + } + ] + } + ] +} diff --git a/tests/topotests/isis-lsp-bits-topo1/rt6/step4/show_ip_route.ref b/tests/topotests/isis-lsp-bits-topo1/rt6/step4/show_ip_route.ref new file mode 100644 index 0000000000..2840514e6e --- /dev/null +++ b/tests/topotests/isis-lsp-bits-topo1/rt6/step4/show_ip_route.ref @@ -0,0 +1,107 @@ +{ + "0.0.0.0\/0":[ + { + "prefix":"0.0.0.0\/0", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":10, + "installed":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.7.4", + "afi":"ipv4", + "interfaceName":"eth-rt4", + "active":true + }, + { + "fib":true, + "ip":"10.0.8.5", + "afi":"ipv4", + "interfaceName":"eth-rt5", + "active":true + } + ] + } + ], + "4.4.4.4\/32":[ + { + "prefix":"4.4.4.4\/32", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":20, + "installed":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.7.4", + "afi":"ipv4", + "interfaceName":"eth-rt4", + "active":true + } + ] + } + ], + "10.0.6.0\/24":[ + { + "prefix":"10.0.6.0\/24", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":20, + "installed":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.7.4", + "afi":"ipv4", + "interfaceName":"eth-rt4", + "active":true + }, + { + "fib":true, + "ip":"10.0.8.5", + "afi":"ipv4", + "interfaceName":"eth-rt5", + "active":true + } + ] + } + ], + "10.0.7.0\/24":[ + { + "prefix":"10.0.7.0\/24", + "protocol":"isis", + "distance":115, + "metric":20, + "nexthops":[ + { + "ip":"10.0.7.4", + "afi":"ipv4", + "interfaceName":"eth-rt4" + } + ] + } + ], + "10.0.8.0\/24":[ + { + "prefix":"10.0.8.0\/24", + "protocol":"isis", + "distance":115, + "metric":20, + "nexthops":[ + { + "ip":"10.0.8.5", + "afi":"ipv4", + "interfaceIndex":3, + "interfaceName":"eth-rt5" + } + ] + } + ] +} diff --git a/tests/topotests/isis-lsp-bits-topo1/rt6/step4/show_ipv6_route.ref b/tests/topotests/isis-lsp-bits-topo1/rt6/step4/show_ipv6_route.ref new file mode 100644 index 0000000000..278129f481 --- /dev/null +++ b/tests/topotests/isis-lsp-bits-topo1/rt6/step4/show_ipv6_route.ref @@ -0,0 +1,46 @@ +{ + "::\/0":[ + { + "prefix":"::\/0", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":10, + "installed":true, + "nexthops":[ + { + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-rt5", + "active":true + }, + { + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-rt4", + "active":true + } + ] + } + ], + "2001:db8:1000::4\/128":[ + { + "prefix":"2001:db8:1000::4\/128", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":20, + "installed":true, + "nexthops":[ + { + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-rt4", + "active":true + } + ] + } + ] +} diff --git a/tests/topotests/isis-lsp-bits-topo1/rt6/zebra.conf b/tests/topotests/isis-lsp-bits-topo1/rt6/zebra.conf new file mode 100644 index 0000000000..6084010a93 --- /dev/null +++ b/tests/topotests/isis-lsp-bits-topo1/rt6/zebra.conf @@ -0,0 +1,22 @@ +log file zebra.log +! +hostname rt6 +! +debug zebra kernel +debug zebra packet +debug zebra mpls +! +interface lo + ip address 6.6.6.6/32 + ipv6 address 2001:db8:1000::6/128 +! +interface eth-rt4 + ip address 10.0.7.6/24 +! +interface eth-rt5 + ip address 10.0.8.6/24 +! +ip forwarding +! +line vty +! diff --git a/tests/topotests/isis-lsp-bits-topo1/test_isis_lsp_bits_topo1.py b/tests/topotests/isis-lsp-bits-topo1/test_isis_lsp_bits_topo1.py new file mode 100755 index 0000000000..95a0d87c33 --- /dev/null +++ b/tests/topotests/isis-lsp-bits-topo1/test_isis_lsp_bits_topo1.py @@ -0,0 +1,353 @@ +#!/usr/bin/env python + +# +# test_isis_lsp_bits_topo1.py +# Part of NetDEF Topology Tests +# +# Copyright (c) 2021 by Volta Networks +# +# Permission to use, copy, modify, and/or distribute this software +# for any purpose with or without fee is hereby granted, provided +# that the above copyright notice and this permission notice appear +# in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND NETDEF DISCLAIMS ALL WARRANTIES +# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NETDEF BE LIABLE FOR +# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY +# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS +# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE +# OF THIS SOFTWARE. +# + +""" +test_isis_lsp_bits_topo1.py: + + +---------+ + | | + | RT1 | + | 1.1.1.1 | + | L1 | + +---------+ + |eth-sw1 + | + | + | + +---------+ | +---------+ + | | | | | + | RT2 |eth-sw1 | eth-sw1| RT3 | + | 2.2.2.2 +----------+----------+ 3.3.3.3 | + | L1|L2 | 10.0.1.0/24 | L1|L2 | + +---------+ +---------+ + eth-rt4| eth-rt5| + | | + 10.0.2.0/24| |10.0.4.0/24 + | | + eth-rt2| eth-rt3| + +---------+ +---------+ + | | | | + | RT4 | 10.0.6.0/24 | RT5 | + | 4.4.4.4 +---------------------+ 5.5.5.5 | + | L1|L2 |eth-rt5 eth-rt4| L1|L2 | + +---------+ +---------+ + eth-rt6| |eth-rt6 + | | + 10.0.7.0/24| |10.0.8.0/24 + | +---------+ | + | | | | + | | RT6 | | + +----------+ 6.6.6.6 +-----------+ + eth-rt4| L1 |eth-rt5 + +---------+ +""" + +import os +import sys +import pytest +import json +import re +import tempfile +from time import sleep +from functools import partial + +# Save the Current Working Directory to find configuration files. +CWD = os.path.dirname(os.path.realpath(__file__)) +sys.path.append(os.path.join(CWD, '../')) + +# pylint: disable=C0413 +# Import topogen and topotest helpers +from lib import topotest +from lib.topogen import Topogen, TopoRouter, get_topogen +from lib.topolog import logger + +# Required to instantiate the topology builder class. +from mininet.topo import Topo + +# Global multi-dimensional dictionary containing all expected outputs +outputs = {} + +class TemplateTopo(Topo): + "Test topology builder" + def build(self, *_args, **_opts): + "Build function" + tgen = get_topogen(self) + + # + # Define FRR Routers + # + for router in ['rt1', 'rt2', 'rt3', 'rt4', 'rt5', 'rt6']: + tgen.add_router(router) + + # + # Define connections + # + switch = tgen.add_switch('s1') + switch.add_link(tgen.gears['rt1'], nodeif="eth-sw1") + switch.add_link(tgen.gears['rt2'], nodeif="eth-sw1") + switch.add_link(tgen.gears['rt3'], nodeif="eth-sw1") + + switch = tgen.add_switch('s2') + switch.add_link(tgen.gears['rt2'], nodeif="eth-rt4") + switch.add_link(tgen.gears['rt4'], nodeif="eth-rt2") + + switch = tgen.add_switch('s4') + switch.add_link(tgen.gears['rt3'], nodeif="eth-rt5") + switch.add_link(tgen.gears['rt5'], nodeif="eth-rt3") + + switch = tgen.add_switch('s6') + switch.add_link(tgen.gears['rt4'], nodeif="eth-rt5") + switch.add_link(tgen.gears['rt5'], nodeif="eth-rt4") + + switch = tgen.add_switch('s7') + switch.add_link(tgen.gears['rt4'], nodeif="eth-rt6") + switch.add_link(tgen.gears['rt6'], nodeif="eth-rt4") + + switch = tgen.add_switch('s8') + switch.add_link(tgen.gears['rt5'], nodeif="eth-rt6") + switch.add_link(tgen.gears['rt6'], nodeif="eth-rt5") + + +def setup_module(mod): + "Sets up the pytest environment" + tgen = Topogen(TemplateTopo, mod.__name__) + tgen.start_topology() + + router_list = tgen.routers() + + # For all registered routers, load the zebra configuration file + for rname, router in router_list.items(): + router.load_config( + TopoRouter.RD_ZEBRA, + os.path.join(CWD, '{}/zebra.conf'.format(rname)) + ) + router.load_config( + TopoRouter.RD_ISIS, + os.path.join(CWD, '{}/isisd.conf'.format(rname)) + ) + + tgen.start_router() + +def teardown_module(mod): + "Teardown the pytest environment" + tgen = get_topogen() + + # This function tears down the whole topology. + tgen.stop_topology() + +def router_compare_json_output(rname, command, reference): + "Compare router JSON output" + + logger.info('Comparing router "%s" "%s" output', rname, command) + + tgen = get_topogen() + filename = "{}/{}/{}".format(CWD, rname, reference) + expected = json.loads(open(filename).read()) + + # Run test function until we get an result. Wait at most 60 seconds. + test_func = partial(topotest.router_json_cmp, tgen.gears[rname], command, expected) + _, diff = topotest.run_and_expect(test_func, None, count=120, wait=0.5) + assertmsg = '"{}" JSON output mismatches the expected result'.format(rname) + assert diff is None, assertmsg + +# +# Step 1 +# +# Test initial network convergence +# Attach-bit defaults to on, so expect default route pointing to L1|L2 router +# +def test_isis_adjacencies_step1(): + logger.info("Test (step 1): check IS-IS adjacencies") + tgen = get_topogen() + + # Skip if previous fatal error condition is raised + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + for rname in ['rt1', 'rt2', 'rt3', 'rt4', 'rt5', 'rt6']: + router_compare_json_output( + rname, + "show yang operational-data /frr-interface:lib isisd", + "step1/show_yang_interface_isis_adjacencies.ref", + ) + +def test_rib_ipv4_step1(): + logger.info("Test (step 1): verify IPv4 RIB") + tgen = get_topogen() + + # Skip if previous fatal error condition is raised + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + for rname in ['rt1', 'rt2', 'rt3', 'rt4', 'rt5', 'rt6']: + router_compare_json_output( + rname, "show ip route isis json", "step1/show_ip_route.ref" + ) + +def test_rib_ipv6_step1(): + logger.info("Test (step 1): verify IPv6 RIB") + tgen = get_topogen() + + # Skip if previous fatal error condition is raised + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + for rname in ['rt1', 'rt2', 'rt3', 'rt4', 'rt5', 'rt6']: + router_compare_json_output( + rname, "show ipv6 route isis json", "step1/show_ipv6_route.ref" + ) + +# +# Step 2 +# +# Action(s): +# -Disable sending Attach bit on RT2 and RT4 +# +# Expected changes: +# -RT1 should remove the default route pointing to RT2 +# -RT6 should remove the default route pointing to RT4 +# +def test_rib_ipv4_step2(): + logger.info("Test (step 2): verify IPv4 RIB") + tgen = get_topogen() + + # Skip if previous fatal error condition is raised + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + logger.info('Disabling setting the attached-bit on RT2 and RT4') + tgen.net['rt2'].cmd('vtysh -c "conf t" -c "router isis 1" -c "no attached-bit send"') + tgen.net['rt4'].cmd('vtysh -c "conf t" -c "router isis 1" -c "no attached-bit send"') + + for rname in ['rt1', 'rt6']: + router_compare_json_output( + rname, "show ip route isis json", "step2/show_ip_route.ref" + ) + +def test_rib_ipv6_step2(): + logger.info("Test (step 2): verify IPv6 RIB") + tgen = get_topogen() + + # Skip if previous fatal error condition is raised + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + for rname in ['rt1', 'rt6']: + router_compare_json_output( + rname, "show ipv6 route isis json", "step2/show_ipv6_route.ref" + ) + +# +# Step 3 +# +# Action(s): +# -restore attach-bit, enable sending attach-bit +# -disble processing a LSP with attach bit set +# +# Expected changes: +# -RT1 and RT6 should not install a default route +# +def test_rib_ipv4_step3(): + logger.info("Test (step 3): verify IPv4 RIB") + tgen = get_topogen() + + # Skip if previous fatal error condition is raised + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + logger.info('Enable setting the attached-bit on RT2 and RT4') + tgen.net['rt2'].cmd('vtysh -c "conf t" -c "router isis 1" -c "attached-bit send"') + tgen.net['rt4'].cmd('vtysh -c "conf t" -c "router isis 1" -c "attached-bit send"') + + logger.info('Disable processing received attached-bit in LSP on RT1 and RT6') + tgen.net['rt1'].cmd('vtysh -c "conf t" -c "router isis 1" -c "attached-bit receive ignore"') + tgen.net['rt6'].cmd('vtysh -c "conf t" -c "router isis 1" -c "attached-bit receive ignore"') + + for rname in ['rt1', 'rt6']: + router_compare_json_output( + rname, "show ip route isis json", "step3/show_ip_route.ref" + ) + +def test_rib_ipv6_step3(): + logger.info("Test (step 3): verify IPv6 RIB") + tgen = get_topogen() + + # Skip if previous fatal error condition is raised + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + for rname in ['rt1', 'rt6']: + router_compare_json_output( + rname, "show ipv6 route isis json", "step3/show_ipv6_route.ref" + ) + +# +# Step 4 +# +# Action(s): +# -restore back to default attach-bit config +# +# Expected changes: +# -RT1 and RT6 should add default route +# -no changes on other routers +# +def test_rib_ipv4_step4(): + logger.info("Test (step 4): verify IPv4 RIB") + tgen = get_topogen() + + # Skip if previous fatal error condition is raised + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + logger.info('restore default processing on received attached-bit in LSP on RT1 and RT6') + tgen.net['rt1'].cmd('vtysh -c "conf t" -c "router isis 1" -c "no attached-bit receive ignore"') + tgen.net['rt6'].cmd('vtysh -c "conf t" -c "router isis 1" -c "no attached-bit receive ignore"') + + for rname in ['rt1', 'rt6']: + router_compare_json_output( + rname, "show ip route isis json", "step4/show_ip_route.ref") + +def test_rib_ipv6_step4(): + logger.info("Test (step 4): verify IPv6 RIB") + tgen = get_topogen() + + # Skip if previous fatal error condition is raised + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + for rname in ['rt1', 'rt6']: + router_compare_json_output( + rname, "show ipv6 route isis json", "step4/show_ipv6_route.ref") + +# Memory leak test template +def test_memory_leak(): + "Run the memory leak test and report results." + tgen = get_topogen() + if not tgen.is_memleak_enabled(): + pytest.skip('Memory leak test/report is disabled') + + tgen.report_memory_leaks() + +if __name__ == '__main__': + args = ["-s"] + sys.argv[1:] + sys.exit(pytest.main(args)) diff --git a/tests/topotests/isis-rlfa-topo1/__init__.py b/tests/topotests/isis-rlfa-topo1/__init__.py new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/tests/topotests/isis-rlfa-topo1/__init__.py diff --git a/tests/topotests/isis-rlfa-topo1/rt1/isisd.conf b/tests/topotests/isis-rlfa-topo1/rt1/isisd.conf new file mode 100644 index 0000000000..a80f30dc7b --- /dev/null +++ b/tests/topotests/isis-rlfa-topo1/rt1/isisd.conf @@ -0,0 +1,39 @@ +password 1 +hostname rt1 +log file isisd.log +! +debug isis events +debug isis route-events +debug isis spf-events +debug isis lsp-gen +! +interface lo + ip router isis 1 + ipv6 router isis 1 + isis passive +! +interface eth-rt2 + ip router isis 1 + ipv6 router isis 1 + isis hello-multiplier 3 + isis network point-to-point + isis fast-reroute lfa + isis fast-reroute remote-lfa tunnel mpls-ldp +! +interface eth-rt3 + ip router isis 1 + ipv6 router isis 1 + isis hello-multiplier 3 + isis network point-to-point + isis fast-reroute lfa + isis fast-reroute remote-lfa tunnel mpls-ldp +! +ip prefix-list PLIST seq 5 permit 10.0.255.8/32 +! +router isis 1 + net 49.0000.0000.0000.0001.00 + is-type level-1-2 + lsp-gen-interval 2 + topology ipv6-unicast + fast-reroute remote-lfa prefix-list PLIST +! diff --git a/tests/topotests/isis-rlfa-topo1/rt1/ldpd.conf b/tests/topotests/isis-rlfa-topo1/rt1/ldpd.conf new file mode 100644 index 0000000000..f60fdb9742 --- /dev/null +++ b/tests/topotests/isis-rlfa-topo1/rt1/ldpd.conf @@ -0,0 +1,30 @@ +log file ldpd.log +! +hostname rt1 +! +debug mpls ldp messages recv +debug mpls ldp messages sent +debug mpls ldp zebra +! +mpls ldp + router-id 10.0.255.1 + dual-stack transport-connection prefer ipv4 + ! + address-family ipv4 + label local allocate host-routes + discovery targeted-hello accept + discovery transport-address 10.0.255.1 + ! + interface eth-rt2 + interface eth-rt3 + ! + ! + address-family ipv6 + label local allocate host-routes + discovery transport-address 2001:db8::1 + ! + interface eth-rt2 + interface eth-rt3 + ! + ! +! diff --git a/tests/topotests/isis-rlfa-topo1/rt1/step1/show_ip_route.ref b/tests/topotests/isis-rlfa-topo1/rt1/step1/show_ip_route.ref new file mode 100644 index 0000000000..680b31eb8d --- /dev/null +++ b/tests/topotests/isis-rlfa-topo1/rt1/step1/show_ip_route.ref @@ -0,0 +1,235 @@ +{ + "10.0.255.2\/32":[ + { + "prefix":"10.0.255.2\/32", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":20, + "installed":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.255.2", + "afi":"ipv4", + "interfaceName":"eth-rt2", + "active":true, + "onLink":true, + "backupIndex":[ + 0 + ] + } + ], + "backupNexthops":[ + { + "ip":"10.0.255.3", + "afi":"ipv4", + "interfaceName":"eth-rt3", + "active":true, + "onLink":true, + "labels":"*" + } + ] + } + ], + "10.0.255.3\/32":[ + { + "prefix":"10.0.255.3\/32", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":20, + "installed":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.255.3", + "afi":"ipv4", + "interfaceName":"eth-rt3", + "active":true, + "onLink":true, + "backupIndex":[ + 0 + ] + } + ], + "backupNexthops":[ + { + "ip":"10.0.255.2", + "afi":"ipv4", + "interfaceName":"eth-rt2", + "active":true, + "onLink":true, + "labels":"*" + } + ] + } + ], + "10.0.255.4\/32":[ + { + "prefix":"10.0.255.4\/32", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":30, + "installed":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.255.2", + "afi":"ipv4", + "interfaceName":"eth-rt2", + "active":true, + "onLink":true, + "backupIndex":[ + 0 + ] + } + ], + "backupNexthops":[ + { + "ip":"10.0.255.3", + "afi":"ipv4", + "interfaceName":"eth-rt3", + "active":true, + "onLink":true, + "labels":"*" + } + ] + } + ], + "10.0.255.5\/32":[ + { + "prefix":"10.0.255.5\/32", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":30, + "installed":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.255.3", + "afi":"ipv4", + "interfaceName":"eth-rt3", + "active":true, + "onLink":true, + "backupIndex":[ + 0 + ] + } + ], + "backupNexthops":[ + { + "ip":"10.0.255.2", + "afi":"ipv4", + "interfaceName":"eth-rt2", + "active":true, + "onLink":true, + "labels":"*" + } + ] + } + ], + "10.0.255.6\/32":[ + { + "prefix":"10.0.255.6\/32", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":40, + "installed":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.255.2", + "afi":"ipv4", + "interfaceName":"eth-rt2", + "active":true, + "onLink":true, + "backupIndex":[ + 0 + ] + } + ], + "backupNexthops":[ + { + "ip":"10.0.255.3", + "afi":"ipv4", + "interfaceName":"eth-rt3", + "active":true, + "onLink":true, + "labels":"*" + } + ] + } + ], + "10.0.255.7\/32":[ + { + "prefix":"10.0.255.7\/32", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":40, + "installed":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.255.3", + "afi":"ipv4", + "interfaceName":"eth-rt3", + "active":true, + "onLink":true, + "backupIndex":[ + 0 + ] + } + ], + "backupNexthops":[ + { + "ip":"10.0.255.2", + "afi":"ipv4", + "interfaceName":"eth-rt2", + "active":true, + "onLink":true, + "labels":"*" + } + ] + } + ], + "10.0.255.8\/32":[ + { + "prefix":"10.0.255.8\/32", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":50, + "installed":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.255.2", + "afi":"ipv4", + "interfaceName":"eth-rt2", + "active":true, + "onLink":true + }, + { + "fib":true, + "ip":"10.0.255.3", + "afi":"ipv4", + "interfaceName":"eth-rt3", + "active":true, + "onLink":true + } + ] + } + ] +} diff --git a/tests/topotests/isis-rlfa-topo1/rt1/step1/show_ipv6_route.ref b/tests/topotests/isis-rlfa-topo1/rt1/step1/show_ipv6_route.ref new file mode 100644 index 0000000000..c487d2740d --- /dev/null +++ b/tests/topotests/isis-rlfa-topo1/rt1/step1/show_ipv6_route.ref @@ -0,0 +1,207 @@ +{ + "2001:db8::2\/128":[ + { + "prefix":"2001:db8::2\/128", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":20, + "installed":true, + "nexthops":[ + { + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-rt2", + "active":true, + "backupIndex":[ + 0 + ] + } + ], + "backupNexthops":[ + { + "afi":"ipv6", + "interfaceName":"eth-rt3", + "active":true, + "labels":"*" + } + ] + } + ], + "2001:db8::3\/128":[ + { + "prefix":"2001:db8::3\/128", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":20, + "installed":true, + "nexthops":[ + { + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-rt3", + "active":true, + "backupIndex":[ + 0 + ] + } + ], + "backupNexthops":[ + { + "afi":"ipv6", + "interfaceName":"eth-rt2", + "active":true, + "labels":"*" + } + ] + } + ], + "2001:db8::4\/128":[ + { + "prefix":"2001:db8::4\/128", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":30, + "installed":true, + "nexthops":[ + { + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-rt2", + "active":true, + "backupIndex":[ + 0 + ] + } + ], + "backupNexthops":[ + { + "afi":"ipv6", + "interfaceName":"eth-rt3", + "active":true, + "labels":"*" + } + ] + } + ], + "2001:db8::5\/128":[ + { + "prefix":"2001:db8::5\/128", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":30, + "installed":true, + "nexthops":[ + { + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-rt3", + "active":true, + "backupIndex":[ + 0 + ] + } + ], + "backupNexthops":[ + { + "afi":"ipv6", + "interfaceName":"eth-rt2", + "active":true, + "labels":"*" + } + ] + } + ], + "2001:db8::6\/128":[ + { + "prefix":"2001:db8::6\/128", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":40, + "installed":true, + "nexthops":[ + { + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-rt2", + "active":true, + "backupIndex":[ + 0 + ] + } + ], + "backupNexthops":[ + { + "afi":"ipv6", + "interfaceName":"eth-rt3", + "active":true, + "labels":"*" + } + ] + } + ], + "2001:db8::7\/128":[ + { + "prefix":"2001:db8::7\/128", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":40, + "installed":true, + "nexthops":[ + { + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-rt3", + "active":true, + "backupIndex":[ + 0 + ] + } + ], + "backupNexthops":[ + { + "afi":"ipv6", + "interfaceName":"eth-rt2", + "active":true, + "labels":"*" + } + ] + } + ], + "2001:db8::8\/128":[ + { + "prefix":"2001:db8::8\/128", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":50, + "installed":true, + "nexthops":[ + { + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-rt3", + "active":true + }, + { + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-rt2", + "active":true + } + ] + } + ] +} diff --git a/tests/topotests/isis-rlfa-topo1/rt1/step1/show_yang_interface_isis_adjacencies.ref b/tests/topotests/isis-rlfa-topo1/rt1/step1/show_yang_interface_isis_adjacencies.ref new file mode 100644 index 0000000000..3fe2b798a0 --- /dev/null +++ b/tests/topotests/isis-rlfa-topo1/rt1/step1/show_yang_interface_isis_adjacencies.ref @@ -0,0 +1,44 @@ +{ + "frr-interface:lib": { + "interface": [ + { + "name": "eth-rt2", + "vrf": "default", + "state": { + "frr-isisd:isis": { + "adjacencies": { + "adjacency": [ + { + "neighbor-sys-type": "level-1-2", + "neighbor-sysid": "0000.0000.0002", + "hold-timer": 9, + "neighbor-priority": 0, + "state": "up" + } + ] + } + } + } + }, + { + "name": "eth-rt3", + "vrf": "default", + "state": { + "frr-isisd:isis": { + "adjacencies": { + "adjacency": [ + { + "neighbor-sys-type": "level-1-2", + "neighbor-sysid": "0000.0000.0003", + "hold-timer": 9, + "neighbor-priority": 0, + "state": "up" + } + ] + } + } + } + } + ] + } +} diff --git a/tests/topotests/isis-rlfa-topo1/rt1/step10/show_ip_route.ref.diff b/tests/topotests/isis-rlfa-topo1/rt1/step10/show_ip_route.ref.diff new file mode 100644 index 0000000000..ef5707f14a --- /dev/null +++ b/tests/topotests/isis-rlfa-topo1/rt1/step10/show_ip_route.ref.diff @@ -0,0 +1,68 @@ +--- a/rt1/step9/show_ip_route.ref ++++ b/rt1/step10/show_ip_route.ref +@@ -15,7 +15,20 @@ + "afi":"ipv4", + "interfaceName":"eth-rt2", + "active":true, +- "onLink":true ++ "onLink":true, ++ "backupIndex":[ ++ 0 ++ ] ++ } ++ ], ++ "backupNexthops":[ ++ { ++ "ip":"10.0.255.3", ++ "afi":"ipv4", ++ "interfaceName":"eth-rt3", ++ "active":true, ++ "onLink":true, ++ "labels":"*" + } + ] + } +@@ -70,7 +83,20 @@ + "afi":"ipv4", + "interfaceName":"eth-rt2", + "active":true, +- "onLink":true ++ "onLink":true, ++ "backupIndex":[ ++ 0 ++ ] ++ } ++ ], ++ "backupNexthops":[ ++ { ++ "ip":"10.0.255.3", ++ "afi":"ipv4", ++ "interfaceName":"eth-rt3", ++ "active":true, ++ "onLink":true, ++ "labels":"*" + } + ] + } +@@ -125,7 +151,20 @@ + "afi":"ipv4", + "interfaceName":"eth-rt2", + "active":true, +- "onLink":true ++ "onLink":true, ++ "backupIndex":[ ++ 0 ++ ] ++ } ++ ], ++ "backupNexthops":[ ++ { ++ "ip":"10.0.255.3", ++ "afi":"ipv4", ++ "interfaceName":"eth-rt3", ++ "active":true, ++ "onLink":true, ++ "labels":"*" + } + ] + } diff --git a/tests/topotests/isis-rlfa-topo1/rt1/step10/show_ipv6_route.ref.diff b/tests/topotests/isis-rlfa-topo1/rt1/step10/show_ipv6_route.ref.diff new file mode 100644 index 0000000000..acd2ce003a --- /dev/null +++ b/tests/topotests/isis-rlfa-topo1/rt1/step10/show_ipv6_route.ref.diff @@ -0,0 +1,62 @@ +--- a/rt1/step9/show_ipv6_route.ref ++++ b/rt1/step10/show_ipv6_route.ref +@@ -13,7 +13,18 @@ + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-rt2", +- "active":true ++ "active":true, ++ "backupIndex":[ ++ 0 ++ ] ++ } ++ ], ++ "backupNexthops":[ ++ { ++ "afi":"ipv6", ++ "interfaceName":"eth-rt3", ++ "active":true, ++ "labels":"*" + } + ] + } +@@ -62,7 +73,18 @@ + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-rt2", +- "active":true ++ "active":true, ++ "backupIndex":[ ++ 0 ++ ] ++ } ++ ], ++ "backupNexthops":[ ++ { ++ "afi":"ipv6", ++ "interfaceName":"eth-rt3", ++ "active":true, ++ "labels":"*" + } + ] + } +@@ -111,7 +133,18 @@ + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-rt2", +- "active":true ++ "active":true, ++ "backupIndex":[ ++ 0 ++ ] ++ } ++ ], ++ "backupNexthops":[ ++ { ++ "afi":"ipv6", ++ "interfaceName":"eth-rt3", ++ "active":true, ++ "labels":"*" + } + ] + } diff --git a/tests/topotests/isis-rlfa-topo1/rt1/step2/show_ip_route.ref.diff b/tests/topotests/isis-rlfa-topo1/rt1/step2/show_ip_route.ref.diff new file mode 100644 index 0000000000..f7f31ac021 --- /dev/null +++ b/tests/topotests/isis-rlfa-topo1/rt1/step2/show_ip_route.ref.diff @@ -0,0 +1,134 @@ +--- a/rt1/step1/show_ip_route.ref ++++ b/rt1/step2/show_ip_route.ref +@@ -15,20 +15,7 @@ + "afi":"ipv4", + "interfaceName":"eth-rt2", + "active":true, +- "onLink":true, +- "backupIndex":[ +- 0 +- ] +- } +- ], +- "backupNexthops":[ +- { +- "ip":"10.0.255.3", +- "afi":"ipv4", +- "interfaceName":"eth-rt3", +- "active":true, +- "onLink":true, +- "labels":"*" ++ "onLink":true + } + ] + } +@@ -49,20 +36,7 @@ + "afi":"ipv4", + "interfaceName":"eth-rt3", + "active":true, +- "onLink":true, +- "backupIndex":[ +- 0 +- ] +- } +- ], +- "backupNexthops":[ +- { +- "ip":"10.0.255.2", +- "afi":"ipv4", +- "interfaceName":"eth-rt2", +- "active":true, +- "onLink":true, +- "labels":"*" ++ "onLink":true + } + ] + } +@@ -83,20 +57,7 @@ + "afi":"ipv4", + "interfaceName":"eth-rt2", + "active":true, +- "onLink":true, +- "backupIndex":[ +- 0 +- ] +- } +- ], +- "backupNexthops":[ +- { +- "ip":"10.0.255.3", +- "afi":"ipv4", +- "interfaceName":"eth-rt3", +- "active":true, +- "onLink":true, +- "labels":"*" ++ "onLink":true + } + ] + } +@@ -117,20 +78,7 @@ + "afi":"ipv4", + "interfaceName":"eth-rt3", + "active":true, +- "onLink":true, +- "backupIndex":[ +- 0 +- ] +- } +- ], +- "backupNexthops":[ +- { +- "ip":"10.0.255.2", +- "afi":"ipv4", +- "interfaceName":"eth-rt2", +- "active":true, +- "onLink":true, +- "labels":"*" ++ "onLink":true + } + ] + } +@@ -151,20 +99,7 @@ + "afi":"ipv4", + "interfaceName":"eth-rt2", + "active":true, +- "onLink":true, +- "backupIndex":[ +- 0 +- ] +- } +- ], +- "backupNexthops":[ +- { +- "ip":"10.0.255.3", +- "afi":"ipv4", +- "interfaceName":"eth-rt3", +- "active":true, +- "onLink":true, +- "labels":"*" ++ "onLink":true + } + ] + } +@@ -185,20 +120,7 @@ + "afi":"ipv4", + "interfaceName":"eth-rt3", + "active":true, +- "onLink":true, +- "backupIndex":[ +- 0 +- ] +- } +- ], +- "backupNexthops":[ +- { +- "ip":"10.0.255.2", +- "afi":"ipv4", +- "interfaceName":"eth-rt2", +- "active":true, +- "onLink":true, +- "labels":"*" ++ "onLink":true + } + ] + } diff --git a/tests/topotests/isis-rlfa-topo1/rt1/step2/show_ipv6_route.ref.diff b/tests/topotests/isis-rlfa-topo1/rt1/step2/show_ipv6_route.ref.diff new file mode 100644 index 0000000000..e980031ad7 --- /dev/null +++ b/tests/topotests/isis-rlfa-topo1/rt1/step2/show_ipv6_route.ref.diff @@ -0,0 +1,122 @@ +--- a/rt1/step1/show_ipv6_route.ref ++++ b/rt1/step2/show_ipv6_route.ref +@@ -13,18 +13,7 @@ + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-rt2", +- "active":true, +- "backupIndex":[ +- 0 +- ] +- } +- ], +- "backupNexthops":[ +- { +- "afi":"ipv6", +- "interfaceName":"eth-rt3", +- "active":true, +- "labels":"*" ++ "active":true + } + ] + } +@@ -43,18 +32,7 @@ + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-rt3", +- "active":true, +- "backupIndex":[ +- 0 +- ] +- } +- ], +- "backupNexthops":[ +- { +- "afi":"ipv6", +- "interfaceName":"eth-rt2", +- "active":true, +- "labels":"*" ++ "active":true + } + ] + } +@@ -73,18 +51,7 @@ + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-rt2", +- "active":true, +- "backupIndex":[ +- 0 +- ] +- } +- ], +- "backupNexthops":[ +- { +- "afi":"ipv6", +- "interfaceName":"eth-rt3", +- "active":true, +- "labels":"*" ++ "active":true + } + ] + } +@@ -103,18 +70,7 @@ + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-rt3", +- "active":true, +- "backupIndex":[ +- 0 +- ] +- } +- ], +- "backupNexthops":[ +- { +- "afi":"ipv6", +- "interfaceName":"eth-rt2", +- "active":true, +- "labels":"*" ++ "active":true + } + ] + } +@@ -133,18 +89,7 @@ + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-rt2", +- "active":true, +- "backupIndex":[ +- 0 +- ] +- } +- ], +- "backupNexthops":[ +- { +- "afi":"ipv6", +- "interfaceName":"eth-rt3", +- "active":true, +- "labels":"*" ++ "active":true + } + ] + } +@@ -163,18 +108,7 @@ + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-rt3", +- "active":true, +- "backupIndex":[ +- 0 +- ] +- } +- ], +- "backupNexthops":[ +- { +- "afi":"ipv6", +- "interfaceName":"eth-rt2", +- "active":true, +- "labels":"*" ++ "active":true + } + ] + } diff --git a/tests/topotests/isis-rlfa-topo1/rt1/step3/show_ip_route.ref.diff b/tests/topotests/isis-rlfa-topo1/rt1/step3/show_ip_route.ref.diff new file mode 100644 index 0000000000..f3ed764f0b --- /dev/null +++ b/tests/topotests/isis-rlfa-topo1/rt1/step3/show_ip_route.ref.diff @@ -0,0 +1,134 @@ +--- a/rt1/step2/show_ip_route.ref ++++ b/rt1/step3/show_ip_route.ref +@@ -15,7 +15,20 @@ + "afi":"ipv4", + "interfaceName":"eth-rt2", + "active":true, +- "onLink":true ++ "onLink":true, ++ "backupIndex":[ ++ 0 ++ ] ++ } ++ ], ++ "backupNexthops":[ ++ { ++ "ip":"10.0.255.3", ++ "afi":"ipv4", ++ "interfaceName":"eth-rt3", ++ "active":true, ++ "onLink":true, ++ "labels":"*" + } + ] + } +@@ -36,7 +49,20 @@ + "afi":"ipv4", + "interfaceName":"eth-rt3", + "active":true, +- "onLink":true ++ "onLink":true, ++ "backupIndex":[ ++ 0 ++ ] ++ } ++ ], ++ "backupNexthops":[ ++ { ++ "ip":"10.0.255.2", ++ "afi":"ipv4", ++ "interfaceName":"eth-rt2", ++ "active":true, ++ "onLink":true, ++ "labels":"*" + } + ] + } +@@ -57,7 +83,20 @@ + "afi":"ipv4", + "interfaceName":"eth-rt2", + "active":true, +- "onLink":true ++ "onLink":true, ++ "backupIndex":[ ++ 0 ++ ] ++ } ++ ], ++ "backupNexthops":[ ++ { ++ "ip":"10.0.255.3", ++ "afi":"ipv4", ++ "interfaceName":"eth-rt3", ++ "active":true, ++ "onLink":true, ++ "labels":"*" + } + ] + } +@@ -78,7 +117,20 @@ + "afi":"ipv4", + "interfaceName":"eth-rt3", + "active":true, +- "onLink":true ++ "onLink":true, ++ "backupIndex":[ ++ 0 ++ ] ++ } ++ ], ++ "backupNexthops":[ ++ { ++ "ip":"10.0.255.2", ++ "afi":"ipv4", ++ "interfaceName":"eth-rt2", ++ "active":true, ++ "onLink":true, ++ "labels":"*" + } + ] + } +@@ -99,7 +151,20 @@ + "afi":"ipv4", + "interfaceName":"eth-rt2", + "active":true, +- "onLink":true ++ "onLink":true, ++ "backupIndex":[ ++ 0 ++ ] ++ } ++ ], ++ "backupNexthops":[ ++ { ++ "ip":"10.0.255.3", ++ "afi":"ipv4", ++ "interfaceName":"eth-rt3", ++ "active":true, ++ "onLink":true, ++ "labels":"*" + } + ] + } +@@ -120,7 +185,20 @@ + "afi":"ipv4", + "interfaceName":"eth-rt3", + "active":true, +- "onLink":true ++ "onLink":true, ++ "backupIndex":[ ++ 0 ++ ] ++ } ++ ], ++ "backupNexthops":[ ++ { ++ "ip":"10.0.255.2", ++ "afi":"ipv4", ++ "interfaceName":"eth-rt2", ++ "active":true, ++ "onLink":true, ++ "labels":"*" + } + ] + } diff --git a/tests/topotests/isis-rlfa-topo1/rt1/step3/show_ipv6_route.ref.diff b/tests/topotests/isis-rlfa-topo1/rt1/step3/show_ipv6_route.ref.diff new file mode 100644 index 0000000000..57b0b1de1a --- /dev/null +++ b/tests/topotests/isis-rlfa-topo1/rt1/step3/show_ipv6_route.ref.diff @@ -0,0 +1,122 @@ +--- a/rt1/step2/show_ipv6_route.ref ++++ b/rt1/step3/show_ipv6_route.ref +@@ -13,7 +13,18 @@ + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-rt2", +- "active":true ++ "active":true, ++ "backupIndex":[ ++ 0 ++ ] ++ } ++ ], ++ "backupNexthops":[ ++ { ++ "afi":"ipv6", ++ "interfaceName":"eth-rt3", ++ "active":true, ++ "labels":"*" + } + ] + } +@@ -32,7 +43,18 @@ + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-rt3", +- "active":true ++ "active":true, ++ "backupIndex":[ ++ 0 ++ ] ++ } ++ ], ++ "backupNexthops":[ ++ { ++ "afi":"ipv6", ++ "interfaceName":"eth-rt2", ++ "active":true, ++ "labels":"*" + } + ] + } +@@ -51,7 +73,18 @@ + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-rt2", +- "active":true ++ "active":true, ++ "backupIndex":[ ++ 0 ++ ] ++ } ++ ], ++ "backupNexthops":[ ++ { ++ "afi":"ipv6", ++ "interfaceName":"eth-rt3", ++ "active":true, ++ "labels":"*" + } + ] + } +@@ -70,7 +103,18 @@ + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-rt3", +- "active":true ++ "active":true, ++ "backupIndex":[ ++ 0 ++ ] ++ } ++ ], ++ "backupNexthops":[ ++ { ++ "afi":"ipv6", ++ "interfaceName":"eth-rt2", ++ "active":true, ++ "labels":"*" + } + ] + } +@@ -89,7 +133,18 @@ + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-rt2", +- "active":true ++ "active":true, ++ "backupIndex":[ ++ 0 ++ ] ++ } ++ ], ++ "backupNexthops":[ ++ { ++ "afi":"ipv6", ++ "interfaceName":"eth-rt3", ++ "active":true, ++ "labels":"*" + } + ] + } +@@ -108,7 +163,18 @@ + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-rt3", +- "active":true ++ "active":true, ++ "backupIndex":[ ++ 0 ++ ] ++ } ++ ], ++ "backupNexthops":[ ++ { ++ "afi":"ipv6", ++ "interfaceName":"eth-rt2", ++ "active":true, ++ "labels":"*" + } + ] + } diff --git a/tests/topotests/isis-rlfa-topo1/rt1/step4/show_ip_route.ref.diff b/tests/topotests/isis-rlfa-topo1/rt1/step4/show_ip_route.ref.diff new file mode 100644 index 0000000000..107a0ba2f7 --- /dev/null +++ b/tests/topotests/isis-rlfa-topo1/rt1/step4/show_ip_route.ref.diff @@ -0,0 +1,68 @@ +--- a/rt1/step3/show_ip_route.ref ++++ b/rt1/step4/show_ip_route.ref +@@ -15,20 +15,7 @@ + "afi":"ipv4", + "interfaceName":"eth-rt2", + "active":true, +- "onLink":true, +- "backupIndex":[ +- 0 +- ] +- } +- ], +- "backupNexthops":[ +- { +- "ip":"10.0.255.3", +- "afi":"ipv4", +- "interfaceName":"eth-rt3", +- "active":true, +- "onLink":true, +- "labels":"*" ++ "onLink":true + } + ] + } +@@ -83,20 +70,7 @@ + "afi":"ipv4", + "interfaceName":"eth-rt2", + "active":true, +- "onLink":true, +- "backupIndex":[ +- 0 +- ] +- } +- ], +- "backupNexthops":[ +- { +- "ip":"10.0.255.3", +- "afi":"ipv4", +- "interfaceName":"eth-rt3", +- "active":true, +- "onLink":true, +- "labels":"*" ++ "onLink":true + } + ] + } +@@ -151,20 +125,7 @@ + "afi":"ipv4", + "interfaceName":"eth-rt2", + "active":true, +- "onLink":true, +- "backupIndex":[ +- 0 +- ] +- } +- ], +- "backupNexthops":[ +- { +- "ip":"10.0.255.3", +- "afi":"ipv4", +- "interfaceName":"eth-rt3", +- "active":true, +- "onLink":true, +- "labels":"*" ++ "onLink":true + } + ] + } diff --git a/tests/topotests/isis-rlfa-topo1/rt1/step4/show_ipv6_route.ref.diff b/tests/topotests/isis-rlfa-topo1/rt1/step4/show_ipv6_route.ref.diff new file mode 100644 index 0000000000..9cf24082e1 --- /dev/null +++ b/tests/topotests/isis-rlfa-topo1/rt1/step4/show_ipv6_route.ref.diff @@ -0,0 +1,62 @@ +--- a/rt1/step3/show_ipv6_route.ref ++++ b/rt1/step4/show_ipv6_route.ref +@@ -13,18 +13,7 @@ + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-rt2", +- "active":true, +- "backupIndex":[ +- 0 +- ] +- } +- ], +- "backupNexthops":[ +- { +- "afi":"ipv6", +- "interfaceName":"eth-rt3", +- "active":true, +- "labels":"*" ++ "active":true + } + ] + } +@@ -73,18 +62,7 @@ + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-rt2", +- "active":true, +- "backupIndex":[ +- 0 +- ] +- } +- ], +- "backupNexthops":[ +- { +- "afi":"ipv6", +- "interfaceName":"eth-rt3", +- "active":true, +- "labels":"*" ++ "active":true + } + ] + } +@@ -133,18 +111,7 @@ + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-rt2", +- "active":true, +- "backupIndex":[ +- 0 +- ] +- } +- ], +- "backupNexthops":[ +- { +- "afi":"ipv6", +- "interfaceName":"eth-rt3", +- "active":true, +- "labels":"*" ++ "active":true + } + ] + } diff --git a/tests/topotests/isis-rlfa-topo1/rt1/step5/show_ip_route.ref.diff b/tests/topotests/isis-rlfa-topo1/rt1/step5/show_ip_route.ref.diff new file mode 100644 index 0000000000..09469501f5 --- /dev/null +++ b/tests/topotests/isis-rlfa-topo1/rt1/step5/show_ip_route.ref.diff @@ -0,0 +1,68 @@ +--- a/rt1/step4/show_ip_route.ref ++++ b/rt1/step5/show_ip_route.ref +@@ -36,20 +36,7 @@ + "afi":"ipv4", + "interfaceName":"eth-rt3", + "active":true, +- "onLink":true, +- "backupIndex":[ +- 0 +- ] +- } +- ], +- "backupNexthops":[ +- { +- "ip":"10.0.255.2", +- "afi":"ipv4", +- "interfaceName":"eth-rt2", +- "active":true, +- "onLink":true, +- "labels":"*" ++ "onLink":true + } + ] + } +@@ -91,20 +78,7 @@ + "afi":"ipv4", + "interfaceName":"eth-rt3", + "active":true, +- "onLink":true, +- "backupIndex":[ +- 0 +- ] +- } +- ], +- "backupNexthops":[ +- { +- "ip":"10.0.255.2", +- "afi":"ipv4", +- "interfaceName":"eth-rt2", +- "active":true, +- "onLink":true, +- "labels":"*" ++ "onLink":true + } + ] + } +@@ -146,20 +120,7 @@ + "afi":"ipv4", + "interfaceName":"eth-rt3", + "active":true, +- "onLink":true, +- "backupIndex":[ +- 0 +- ] +- } +- ], +- "backupNexthops":[ +- { +- "ip":"10.0.255.2", +- "afi":"ipv4", +- "interfaceName":"eth-rt2", +- "active":true, +- "onLink":true, +- "labels":"*" ++ "onLink":true + } + ] + } diff --git a/tests/topotests/isis-rlfa-topo1/rt1/step5/show_ipv6_route.ref.diff b/tests/topotests/isis-rlfa-topo1/rt1/step5/show_ipv6_route.ref.diff new file mode 100644 index 0000000000..70fb1a65c7 --- /dev/null +++ b/tests/topotests/isis-rlfa-topo1/rt1/step5/show_ipv6_route.ref.diff @@ -0,0 +1,62 @@ +--- a/rt1/step4/show_ipv6_route.ref ++++ b/rt1/step5/show_ipv6_route.ref +@@ -32,18 +32,7 @@ + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-rt3", +- "active":true, +- "backupIndex":[ +- 0 +- ] +- } +- ], +- "backupNexthops":[ +- { +- "afi":"ipv6", +- "interfaceName":"eth-rt2", +- "active":true, +- "labels":"*" ++ "active":true + } + ] + } +@@ -81,18 +70,7 @@ + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-rt3", +- "active":true, +- "backupIndex":[ +- 0 +- ] +- } +- ], +- "backupNexthops":[ +- { +- "afi":"ipv6", +- "interfaceName":"eth-rt2", +- "active":true, +- "labels":"*" ++ "active":true + } + ] + } +@@ -130,18 +108,7 @@ + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-rt3", +- "active":true, +- "backupIndex":[ +- 0 +- ] +- } +- ], +- "backupNexthops":[ +- { +- "afi":"ipv6", +- "interfaceName":"eth-rt2", +- "active":true, +- "labels":"*" ++ "active":true + } + ] + } diff --git a/tests/topotests/isis-rlfa-topo1/rt1/step6/show_ip_route.ref.diff b/tests/topotests/isis-rlfa-topo1/rt1/step6/show_ip_route.ref.diff new file mode 100644 index 0000000000..4e4a5692a4 --- /dev/null +++ b/tests/topotests/isis-rlfa-topo1/rt1/step6/show_ip_route.ref.diff @@ -0,0 +1,134 @@ +--- a/rt1/step5/show_ip_route.ref ++++ b/rt1/step6/show_ip_route.ref +@@ -15,7 +15,20 @@ + "afi":"ipv4", + "interfaceName":"eth-rt2", + "active":true, +- "onLink":true ++ "onLink":true, ++ "backupIndex":[ ++ 0 ++ ] ++ } ++ ], ++ "backupNexthops":[ ++ { ++ "ip":"10.0.255.3", ++ "afi":"ipv4", ++ "interfaceName":"eth-rt3", ++ "active":true, ++ "onLink":true, ++ "labels":"*" + } + ] + } +@@ -36,7 +49,20 @@ + "afi":"ipv4", + "interfaceName":"eth-rt3", + "active":true, +- "onLink":true ++ "onLink":true, ++ "backupIndex":[ ++ 0 ++ ] ++ } ++ ], ++ "backupNexthops":[ ++ { ++ "ip":"10.0.255.2", ++ "afi":"ipv4", ++ "interfaceName":"eth-rt2", ++ "active":true, ++ "onLink":true, ++ "labels":"*" + } + ] + } +@@ -57,7 +83,20 @@ + "afi":"ipv4", + "interfaceName":"eth-rt2", + "active":true, +- "onLink":true ++ "onLink":true, ++ "backupIndex":[ ++ 0 ++ ] ++ } ++ ], ++ "backupNexthops":[ ++ { ++ "ip":"10.0.255.3", ++ "afi":"ipv4", ++ "interfaceName":"eth-rt3", ++ "active":true, ++ "onLink":true, ++ "labels":"*" + } + ] + } +@@ -78,7 +117,20 @@ + "afi":"ipv4", + "interfaceName":"eth-rt3", + "active":true, +- "onLink":true ++ "onLink":true, ++ "backupIndex":[ ++ 0 ++ ] ++ } ++ ], ++ "backupNexthops":[ ++ { ++ "ip":"10.0.255.2", ++ "afi":"ipv4", ++ "interfaceName":"eth-rt2", ++ "active":true, ++ "onLink":true, ++ "labels":"*" + } + ] + } +@@ -99,7 +151,20 @@ + "afi":"ipv4", + "interfaceName":"eth-rt2", + "active":true, +- "onLink":true ++ "onLink":true, ++ "backupIndex":[ ++ 0 ++ ] ++ } ++ ], ++ "backupNexthops":[ ++ { ++ "ip":"10.0.255.3", ++ "afi":"ipv4", ++ "interfaceName":"eth-rt3", ++ "active":true, ++ "onLink":true, ++ "labels":"*" + } + ] + } +@@ -120,7 +185,20 @@ + "afi":"ipv4", + "interfaceName":"eth-rt3", + "active":true, +- "onLink":true ++ "onLink":true, ++ "backupIndex":[ ++ 0 ++ ] ++ } ++ ], ++ "backupNexthops":[ ++ { ++ "ip":"10.0.255.2", ++ "afi":"ipv4", ++ "interfaceName":"eth-rt2", ++ "active":true, ++ "onLink":true, ++ "labels":"*" + } + ] + } diff --git a/tests/topotests/isis-rlfa-topo1/rt1/step6/show_ipv6_route.ref.diff b/tests/topotests/isis-rlfa-topo1/rt1/step6/show_ipv6_route.ref.diff new file mode 100644 index 0000000000..c9ebb1e3f5 --- /dev/null +++ b/tests/topotests/isis-rlfa-topo1/rt1/step6/show_ipv6_route.ref.diff @@ -0,0 +1,122 @@ +--- a/rt1/step5/show_ipv6_route.ref ++++ b/rt1/step6/show_ipv6_route.ref +@@ -13,7 +13,18 @@ + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-rt2", +- "active":true ++ "active":true, ++ "backupIndex":[ ++ 0 ++ ] ++ } ++ ], ++ "backupNexthops":[ ++ { ++ "afi":"ipv6", ++ "interfaceName":"eth-rt3", ++ "active":true, ++ "labels":"*" + } + ] + } +@@ -32,7 +43,18 @@ + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-rt3", +- "active":true ++ "active":true, ++ "backupIndex":[ ++ 0 ++ ] ++ } ++ ], ++ "backupNexthops":[ ++ { ++ "afi":"ipv6", ++ "interfaceName":"eth-rt2", ++ "active":true, ++ "labels":"*" + } + ] + } +@@ -51,7 +73,18 @@ + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-rt2", +- "active":true ++ "active":true, ++ "backupIndex":[ ++ 0 ++ ] ++ } ++ ], ++ "backupNexthops":[ ++ { ++ "afi":"ipv6", ++ "interfaceName":"eth-rt3", ++ "active":true, ++ "labels":"*" + } + ] + } +@@ -70,7 +103,18 @@ + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-rt3", +- "active":true ++ "active":true, ++ "backupIndex":[ ++ 0 ++ ] ++ } ++ ], ++ "backupNexthops":[ ++ { ++ "afi":"ipv6", ++ "interfaceName":"eth-rt2", ++ "active":true, ++ "labels":"*" + } + ] + } +@@ -89,7 +133,18 @@ + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-rt2", +- "active":true ++ "active":true, ++ "backupIndex":[ ++ 0 ++ ] ++ } ++ ], ++ "backupNexthops":[ ++ { ++ "afi":"ipv6", ++ "interfaceName":"eth-rt3", ++ "active":true, ++ "labels":"*" + } + ] + } +@@ -108,7 +163,18 @@ + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-rt3", +- "active":true ++ "active":true, ++ "backupIndex":[ ++ 0 ++ ] ++ } ++ ], ++ "backupNexthops":[ ++ { ++ "afi":"ipv6", ++ "interfaceName":"eth-rt2", ++ "active":true, ++ "labels":"*" + } + ] + } diff --git a/tests/topotests/isis-rlfa-topo1/rt1/step7/show_ip_route.ref.diff b/tests/topotests/isis-rlfa-topo1/rt1/step7/show_ip_route.ref.diff new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/tests/topotests/isis-rlfa-topo1/rt1/step7/show_ip_route.ref.diff diff --git a/tests/topotests/isis-rlfa-topo1/rt1/step7/show_ipv6_route.ref.diff b/tests/topotests/isis-rlfa-topo1/rt1/step7/show_ipv6_route.ref.diff new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/tests/topotests/isis-rlfa-topo1/rt1/step7/show_ipv6_route.ref.diff diff --git a/tests/topotests/isis-rlfa-topo1/rt1/step8/show_ip_route.ref.diff b/tests/topotests/isis-rlfa-topo1/rt1/step8/show_ip_route.ref.diff new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/tests/topotests/isis-rlfa-topo1/rt1/step8/show_ip_route.ref.diff diff --git a/tests/topotests/isis-rlfa-topo1/rt1/step8/show_ipv6_route.ref.diff b/tests/topotests/isis-rlfa-topo1/rt1/step8/show_ipv6_route.ref.diff new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/tests/topotests/isis-rlfa-topo1/rt1/step8/show_ipv6_route.ref.diff diff --git a/tests/topotests/isis-rlfa-topo1/rt1/step9/show_ip_route.ref.diff b/tests/topotests/isis-rlfa-topo1/rt1/step9/show_ip_route.ref.diff new file mode 100644 index 0000000000..33eb6577bd --- /dev/null +++ b/tests/topotests/isis-rlfa-topo1/rt1/step9/show_ip_route.ref.diff @@ -0,0 +1,68 @@ +--- a/rt1/step8/show_ip_route.ref ++++ b/rt1/step9/show_ip_route.ref +@@ -15,20 +15,7 @@ + "afi":"ipv4", + "interfaceName":"eth-rt2", + "active":true, +- "onLink":true, +- "backupIndex":[ +- 0 +- ] +- } +- ], +- "backupNexthops":[ +- { +- "ip":"10.0.255.3", +- "afi":"ipv4", +- "interfaceName":"eth-rt3", +- "active":true, +- "onLink":true, +- "labels":"*" ++ "onLink":true + } + ] + } +@@ -83,20 +70,7 @@ + "afi":"ipv4", + "interfaceName":"eth-rt2", + "active":true, +- "onLink":true, +- "backupIndex":[ +- 0 +- ] +- } +- ], +- "backupNexthops":[ +- { +- "ip":"10.0.255.3", +- "afi":"ipv4", +- "interfaceName":"eth-rt3", +- "active":true, +- "onLink":true, +- "labels":"*" ++ "onLink":true + } + ] + } +@@ -151,20 +125,7 @@ + "afi":"ipv4", + "interfaceName":"eth-rt2", + "active":true, +- "onLink":true, +- "backupIndex":[ +- 0 +- ] +- } +- ], +- "backupNexthops":[ +- { +- "ip":"10.0.255.3", +- "afi":"ipv4", +- "interfaceName":"eth-rt3", +- "active":true, +- "onLink":true, +- "labels":"*" ++ "onLink":true + } + ] + } diff --git a/tests/topotests/isis-rlfa-topo1/rt1/step9/show_ipv6_route.ref.diff b/tests/topotests/isis-rlfa-topo1/rt1/step9/show_ipv6_route.ref.diff new file mode 100644 index 0000000000..7aaca3354e --- /dev/null +++ b/tests/topotests/isis-rlfa-topo1/rt1/step9/show_ipv6_route.ref.diff @@ -0,0 +1,62 @@ +--- a/rt1/step8/show_ipv6_route.ref ++++ b/rt1/step9/show_ipv6_route.ref +@@ -13,18 +13,7 @@ + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-rt2", +- "active":true, +- "backupIndex":[ +- 0 +- ] +- } +- ], +- "backupNexthops":[ +- { +- "afi":"ipv6", +- "interfaceName":"eth-rt3", +- "active":true, +- "labels":"*" ++ "active":true + } + ] + } +@@ -73,18 +62,7 @@ + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-rt2", +- "active":true, +- "backupIndex":[ +- 0 +- ] +- } +- ], +- "backupNexthops":[ +- { +- "afi":"ipv6", +- "interfaceName":"eth-rt3", +- "active":true, +- "labels":"*" ++ "active":true + } + ] + } +@@ -133,18 +111,7 @@ + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-rt2", +- "active":true, +- "backupIndex":[ +- 0 +- ] +- } +- ], +- "backupNexthops":[ +- { +- "afi":"ipv6", +- "interfaceName":"eth-rt3", +- "active":true, +- "labels":"*" ++ "active":true + } + ] + } diff --git a/tests/topotests/isis-rlfa-topo1/rt1/zebra.conf b/tests/topotests/isis-rlfa-topo1/rt1/zebra.conf new file mode 100644 index 0000000000..741fc2d02b --- /dev/null +++ b/tests/topotests/isis-rlfa-topo1/rt1/zebra.conf @@ -0,0 +1,22 @@ +log file zebra.log +! +hostname rt1 +! +debug zebra kernel +debug zebra packet +debug zebra mpls +! +interface lo + ip address 10.0.255.1/32 + ipv6 address 2001:db8::1/128 +! +interface eth-rt2 + ip address 10.0.255.1/32 +! +interface eth-rt3 + ip address 10.0.255.1/32 +! +ip forwarding +! +line vty +! diff --git a/tests/topotests/isis-rlfa-topo1/rt2/isisd.conf b/tests/topotests/isis-rlfa-topo1/rt2/isisd.conf new file mode 100644 index 0000000000..7b4c6c50b9 --- /dev/null +++ b/tests/topotests/isis-rlfa-topo1/rt2/isisd.conf @@ -0,0 +1,32 @@ +password 1 +hostname rt2 +log file isisd.log +! +debug isis events +debug isis route-events +debug isis spf-events +debug isis lsp-gen +! +interface lo + ip router isis 1 + ipv6 router isis 1 + isis passive +! +interface eth-rt1 + ip router isis 1 + ipv6 router isis 1 + isis hello-multiplier 3 + isis network point-to-point +! +interface eth-rt4 + ip router isis 1 + ipv6 router isis 1 + isis hello-multiplier 3 + isis network point-to-point +! +router isis 1 + net 49.0000.0000.0000.0002.00 + is-type level-1-2 + lsp-gen-interval 2 + topology ipv6-unicast +! diff --git a/tests/topotests/isis-rlfa-topo1/rt2/ldpd.conf b/tests/topotests/isis-rlfa-topo1/rt2/ldpd.conf new file mode 100644 index 0000000000..0a815ef004 --- /dev/null +++ b/tests/topotests/isis-rlfa-topo1/rt2/ldpd.conf @@ -0,0 +1,30 @@ +log file ldpd.log +! +hostname rt2 +! +debug mpls ldp messages recv +debug mpls ldp messages sent +debug mpls ldp zebra +! +mpls ldp + router-id 10.0.255.2 + dual-stack transport-connection prefer ipv4 + ! + address-family ipv4 + label local allocate host-routes + discovery targeted-hello accept + discovery transport-address 10.0.255.2 + ! + interface eth-rt1 + interface eth-rt4 + ! + ! + address-family ipv6 + label local allocate host-routes + discovery transport-address 2001:db8::2 + ! + interface eth-rt1 + interface eth-rt4 + ! + ! +! diff --git a/tests/topotests/isis-rlfa-topo1/rt2/zebra.conf b/tests/topotests/isis-rlfa-topo1/rt2/zebra.conf new file mode 100644 index 0000000000..657c69bf28 --- /dev/null +++ b/tests/topotests/isis-rlfa-topo1/rt2/zebra.conf @@ -0,0 +1,22 @@ +log file zebra.log +! +hostname rt2 +! +debug zebra kernel +debug zebra packet +debug zebra mpls +! +interface lo + ip address 10.0.255.2/32 + ipv6 address 2001:db8::2/128 +! +interface eth-rt1 + ip address 10.0.255.2/32 +! +interface eth-rt4 + ip address 10.0.255.2/32 +! +ip forwarding +! +line vty +! diff --git a/tests/topotests/isis-rlfa-topo1/rt3/isisd.conf b/tests/topotests/isis-rlfa-topo1/rt3/isisd.conf new file mode 100644 index 0000000000..17d58a9d15 --- /dev/null +++ b/tests/topotests/isis-rlfa-topo1/rt3/isisd.conf @@ -0,0 +1,32 @@ +password 1 +hostname rt3 +log file isisd.log +! +debug isis events +debug isis route-events +debug isis spf-events +debug isis lsp-gen +! +interface lo + ip router isis 1 + ipv6 router isis 1 + isis passive +! +interface eth-rt1 + ip router isis 1 + ipv6 router isis 1 + isis hello-multiplier 3 + isis network point-to-point +! +interface eth-rt5 + ip router isis 1 + ipv6 router isis 1 + isis hello-multiplier 3 + isis network point-to-point +! +router isis 1 + net 49.0000.0000.0000.0003.00 + is-type level-1-2 + lsp-gen-interval 2 + topology ipv6-unicast +! diff --git a/tests/topotests/isis-rlfa-topo1/rt3/ldpd.conf b/tests/topotests/isis-rlfa-topo1/rt3/ldpd.conf new file mode 100644 index 0000000000..40f1f5587a --- /dev/null +++ b/tests/topotests/isis-rlfa-topo1/rt3/ldpd.conf @@ -0,0 +1,30 @@ +log file ldpd.log +! +hostname rt3 +! +debug mpls ldp messages recv +debug mpls ldp messages sent +debug mpls ldp zebra +! +mpls ldp + router-id 10.0.255.3 + dual-stack transport-connection prefer ipv4 + ! + address-family ipv4 + label local allocate host-routes + discovery targeted-hello accept + discovery transport-address 10.0.255.3 + ! + interface eth-rt1 + interface eth-rt5 + ! + ! + address-family ipv6 + label local allocate host-routes + discovery transport-address 2001:db8::3 + ! + interface eth-rt1 + interface eth-rt5 + ! + ! +! diff --git a/tests/topotests/isis-rlfa-topo1/rt3/zebra.conf b/tests/topotests/isis-rlfa-topo1/rt3/zebra.conf new file mode 100644 index 0000000000..86f5d2871a --- /dev/null +++ b/tests/topotests/isis-rlfa-topo1/rt3/zebra.conf @@ -0,0 +1,22 @@ +log file zebra.log +! +hostname rt3 +! +debug zebra kernel +debug zebra packet +debug zebra mpls +! +interface lo + ip address 10.0.255.3/32 + ipv6 address 2001:db8::3/128 +! +interface eth-rt1 + ip address 10.0.255.3/32 +! +interface eth-rt5 + ip address 10.0.255.3/32 +! +ip forwarding +! +line vty +! diff --git a/tests/topotests/isis-rlfa-topo1/rt4/isisd.conf b/tests/topotests/isis-rlfa-topo1/rt4/isisd.conf new file mode 100644 index 0000000000..1519fd4c16 --- /dev/null +++ b/tests/topotests/isis-rlfa-topo1/rt4/isisd.conf @@ -0,0 +1,32 @@ +password 1 +hostname rt4 +log file isisd.log +! +debug isis events +debug isis route-events +debug isis spf-events +debug isis lsp-gen +! +interface lo + ip router isis 1 + ipv6 router isis 1 + isis passive +! +interface eth-rt2 + ip router isis 1 + ipv6 router isis 1 + isis hello-multiplier 3 + isis network point-to-point +! +interface eth-rt6 + ip router isis 1 + ipv6 router isis 1 + isis hello-multiplier 3 + isis network point-to-point +! +router isis 1 + net 49.0000.0000.0000.0004.00 + is-type level-1-2 + lsp-gen-interval 2 + topology ipv6-unicast +! diff --git a/tests/topotests/isis-rlfa-topo1/rt4/ldpd.conf b/tests/topotests/isis-rlfa-topo1/rt4/ldpd.conf new file mode 100644 index 0000000000..569ecf733e --- /dev/null +++ b/tests/topotests/isis-rlfa-topo1/rt4/ldpd.conf @@ -0,0 +1,30 @@ +log file ldpd.log +! +hostname rt4 +! +debug mpls ldp messages recv +debug mpls ldp messages sent +debug mpls ldp zebra +! +mpls ldp + router-id 10.0.255.4 + dual-stack transport-connection prefer ipv4 + ! + address-family ipv4 + label local allocate host-routes + discovery targeted-hello accept + discovery transport-address 10.0.255.4 + ! + interface eth-rt2 + interface eth-rt6 + ! + ! + address-family ipv6 + label local allocate host-routes + discovery transport-address 2001:db8::4 + ! + interface eth-rt2 + interface eth-rt6 + ! + ! +! diff --git a/tests/topotests/isis-rlfa-topo1/rt4/zebra.conf b/tests/topotests/isis-rlfa-topo1/rt4/zebra.conf new file mode 100644 index 0000000000..1dd09bf83b --- /dev/null +++ b/tests/topotests/isis-rlfa-topo1/rt4/zebra.conf @@ -0,0 +1,22 @@ +log file zebra.log +! +hostname rt4 +! +debug zebra kernel +debug zebra packet +debug zebra mpls +! +interface lo + ip address 10.0.255.4/32 + ipv6 address 2001:db8::4/128 +! +interface eth-rt2 + ip address 10.0.255.4/32 +! +interface eth-rt6 + ip address 10.0.255.4/32 +! +ip forwarding +! +line vty +! diff --git a/tests/topotests/isis-rlfa-topo1/rt5/isisd.conf b/tests/topotests/isis-rlfa-topo1/rt5/isisd.conf new file mode 100644 index 0000000000..caf7477073 --- /dev/null +++ b/tests/topotests/isis-rlfa-topo1/rt5/isisd.conf @@ -0,0 +1,32 @@ +password 1 +hostname rt5 +log file isisd.log +! +debug isis events +debug isis route-events +debug isis spf-events +debug isis lsp-gen +! +interface lo + ip router isis 1 + ipv6 router isis 1 + isis passive +! +interface eth-rt3 + ip router isis 1 + ipv6 router isis 1 + isis hello-multiplier 3 + isis network point-to-point +! +interface eth-rt7 + ip router isis 1 + ipv6 router isis 1 + isis hello-multiplier 3 + isis network point-to-point +! +router isis 1 + net 49.0000.0000.0000.0005.00 + is-type level-1-2 + lsp-gen-interval 2 + topology ipv6-unicast +! diff --git a/tests/topotests/isis-rlfa-topo1/rt5/ldpd.conf b/tests/topotests/isis-rlfa-topo1/rt5/ldpd.conf new file mode 100644 index 0000000000..519c3d3628 --- /dev/null +++ b/tests/topotests/isis-rlfa-topo1/rt5/ldpd.conf @@ -0,0 +1,30 @@ +log file ldpd.log +! +hostname rt5 +! +debug mpls ldp messages recv +debug mpls ldp messages sent +debug mpls ldp zebra +! +mpls ldp + router-id 10.0.255.5 + dual-stack transport-connection prefer ipv4 + ! + address-family ipv4 + label local allocate host-routes + discovery targeted-hello accept + discovery transport-address 10.0.255.5 + ! + interface eth-rt3 + interface eth-rt7 + ! + ! + address-family ipv6 + label local allocate host-routes + discovery transport-address 2001:db8::5 + ! + interface eth-rt3 + interface eth-rt7 + ! + ! +! diff --git a/tests/topotests/isis-rlfa-topo1/rt5/zebra.conf b/tests/topotests/isis-rlfa-topo1/rt5/zebra.conf new file mode 100644 index 0000000000..7117a2a2e3 --- /dev/null +++ b/tests/topotests/isis-rlfa-topo1/rt5/zebra.conf @@ -0,0 +1,22 @@ +log file zebra.log +! +hostname rt5 +! +debug zebra kernel +debug zebra packet +debug zebra mpls +! +interface lo + ip address 10.0.255.5/32 + ipv6 address 2001:db8::5/128 +! +interface eth-rt3 + ip address 10.0.255.5/32 +! +interface eth-rt7 + ip address 10.0.255.5/32 +! +ip forwarding +! +line vty +! diff --git a/tests/topotests/isis-rlfa-topo1/rt6/isisd.conf b/tests/topotests/isis-rlfa-topo1/rt6/isisd.conf new file mode 100644 index 0000000000..cdf6267236 --- /dev/null +++ b/tests/topotests/isis-rlfa-topo1/rt6/isisd.conf @@ -0,0 +1,32 @@ +password 1 +hostname rt6 +log file isisd.log +! +debug isis events +debug isis route-events +debug isis spf-events +debug isis lsp-gen +! +interface lo + ip router isis 1 + ipv6 router isis 1 + isis passive +! +interface eth-rt4 + ip router isis 1 + ipv6 router isis 1 + isis hello-multiplier 3 + isis network point-to-point +! +interface eth-rt8 + ip router isis 1 + ipv6 router isis 1 + isis hello-multiplier 3 + isis network point-to-point +! +router isis 1 + net 49.0000.0000.0000.0006.00 + is-type level-1-2 + lsp-gen-interval 2 + topology ipv6-unicast +! diff --git a/tests/topotests/isis-rlfa-topo1/rt6/ldpd.conf b/tests/topotests/isis-rlfa-topo1/rt6/ldpd.conf new file mode 100644 index 0000000000..a5b7062bec --- /dev/null +++ b/tests/topotests/isis-rlfa-topo1/rt6/ldpd.conf @@ -0,0 +1,30 @@ +log file ldpd.log +! +hostname rt6 +! +debug mpls ldp messages recv +debug mpls ldp messages sent +debug mpls ldp zebra +! +mpls ldp + router-id 10.0.255.6 + dual-stack transport-connection prefer ipv4 + ! + address-family ipv4 + label local allocate host-routes + discovery targeted-hello accept + discovery transport-address 10.0.255.6 + ! + interface eth-rt4 + interface eth-rt8 + ! + ! + address-family ipv6 + label local allocate host-routes + discovery transport-address 2001:db8::6 + ! + interface eth-rt4 + interface eth-rt8 + ! + ! +! diff --git a/tests/topotests/isis-rlfa-topo1/rt6/zebra.conf b/tests/topotests/isis-rlfa-topo1/rt6/zebra.conf new file mode 100644 index 0000000000..c6344870b7 --- /dev/null +++ b/tests/topotests/isis-rlfa-topo1/rt6/zebra.conf @@ -0,0 +1,22 @@ +log file zebra.log +! +hostname rt6 +! +debug zebra kernel +debug zebra packet +debug zebra mpls +! +interface lo + ip address 10.0.255.6/32 + ipv6 address 2001:db8::6/128 +! +interface eth-rt4 + ip address 10.0.255.6/32 +! +interface eth-rt8 + ip address 10.0.255.6/32 +! +ip forwarding +! +line vty +! diff --git a/tests/topotests/isis-rlfa-topo1/rt7/isisd.conf b/tests/topotests/isis-rlfa-topo1/rt7/isisd.conf new file mode 100644 index 0000000000..8ab8fcb232 --- /dev/null +++ b/tests/topotests/isis-rlfa-topo1/rt7/isisd.conf @@ -0,0 +1,32 @@ +password 1 +hostname rt7 +log file isisd.log +! +debug isis events +debug isis route-events +debug isis spf-events +debug isis lsp-gen +! +interface lo + ip router isis 1 + ipv6 router isis 1 + isis passive +! +interface eth-rt5 + ip router isis 1 + ipv6 router isis 1 + isis hello-multiplier 3 + isis network point-to-point +! +interface eth-rt8 + ip router isis 1 + ipv6 router isis 1 + isis hello-multiplier 3 + isis network point-to-point +! +router isis 1 + net 49.0000.0000.0000.0007.00 + is-type level-1-2 + lsp-gen-interval 2 + topology ipv6-unicast +! diff --git a/tests/topotests/isis-rlfa-topo1/rt7/ldpd.conf b/tests/topotests/isis-rlfa-topo1/rt7/ldpd.conf new file mode 100644 index 0000000000..26d428c4c6 --- /dev/null +++ b/tests/topotests/isis-rlfa-topo1/rt7/ldpd.conf @@ -0,0 +1,30 @@ +log file ldpd.log +! +hostname rt7 +! +debug mpls ldp messages recv +debug mpls ldp messages sent +debug mpls ldp zebra +! +mpls ldp + router-id 10.0.255.7 + dual-stack transport-connection prefer ipv4 + ! + address-family ipv4 + label local allocate host-routes + discovery targeted-hello accept + discovery transport-address 10.0.255.7 + ! + interface eth-rt5 + interface eth-rt8 + ! + ! + address-family ipv6 + label local allocate host-routes + discovery transport-address 2001:db8::7 + ! + interface eth-rt5 + interface eth-rt8 + ! + ! +! diff --git a/tests/topotests/isis-rlfa-topo1/rt7/zebra.conf b/tests/topotests/isis-rlfa-topo1/rt7/zebra.conf new file mode 100644 index 0000000000..4c5e0f1126 --- /dev/null +++ b/tests/topotests/isis-rlfa-topo1/rt7/zebra.conf @@ -0,0 +1,22 @@ +log file zebra.log +! +hostname rt7 +! +debug zebra kernel +debug zebra packet +debug zebra mpls +! +interface lo + ip address 10.0.255.7/32 + ipv6 address 2001:db8::7/128 +! +interface eth-rt5 + ip address 10.0.255.7/32 +! +interface eth-rt8 + ip address 10.0.255.7/32 +! +ip forwarding +! +line vty +! diff --git a/tests/topotests/isis-rlfa-topo1/rt8/isisd.conf b/tests/topotests/isis-rlfa-topo1/rt8/isisd.conf new file mode 100644 index 0000000000..abdc6a53a5 --- /dev/null +++ b/tests/topotests/isis-rlfa-topo1/rt8/isisd.conf @@ -0,0 +1,32 @@ +password 1 +hostname rt8 +log file isisd.log +! +debug isis events +debug isis route-events +debug isis spf-events +debug isis lsp-gen +! +interface lo + ip router isis 1 + ipv6 router isis 1 + isis passive +! +interface eth-rt6 + ip router isis 1 + ipv6 router isis 1 + isis hello-multiplier 3 + isis network point-to-point +! +interface eth-rt7 + ip router isis 1 + ipv6 router isis 1 + isis hello-multiplier 3 + isis network point-to-point +! +router isis 1 + net 49.0000.0000.0000.0008.00 + is-type level-1-2 + lsp-gen-interval 2 + topology ipv6-unicast +! diff --git a/tests/topotests/isis-rlfa-topo1/rt8/ldpd.conf b/tests/topotests/isis-rlfa-topo1/rt8/ldpd.conf new file mode 100644 index 0000000000..1629f82de1 --- /dev/null +++ b/tests/topotests/isis-rlfa-topo1/rt8/ldpd.conf @@ -0,0 +1,30 @@ +log file ldpd.log +! +hostname rt8 +! +debug mpls ldp messages recv +debug mpls ldp messages sent +debug mpls ldp zebra +! +mpls ldp + router-id 10.0.255.8 + dual-stack transport-connection prefer ipv4 + ! + address-family ipv4 + label local allocate host-routes + discovery targeted-hello accept + discovery transport-address 10.0.255.8 + ! + interface eth-rt6 + interface eth-rt7 + ! + ! + address-family ipv6 + label local allocate host-routes + discovery transport-address 2001:db8::8 + ! + interface eth-rt6 + interface eth-rt7 + ! + ! +! diff --git a/tests/topotests/isis-rlfa-topo1/rt8/zebra.conf b/tests/topotests/isis-rlfa-topo1/rt8/zebra.conf new file mode 100644 index 0000000000..f3f10f649a --- /dev/null +++ b/tests/topotests/isis-rlfa-topo1/rt8/zebra.conf @@ -0,0 +1,22 @@ +log file zebra.log +! +hostname rt8 +! +debug zebra kernel +debug zebra packet +debug zebra mpls +! +interface lo + ip address 10.0.255.8/32 + ipv6 address 2001:db8::8/128 +! +interface eth-rt6 + ip address 10.0.255.8/32 +! +interface eth-rt7 + ip address 10.0.255.8/32 +! +ip forwarding +! +line vty +! diff --git a/tests/topotests/isis-rlfa-topo1/test_isis_rlfa_topo1.py b/tests/topotests/isis-rlfa-topo1/test_isis_rlfa_topo1.py new file mode 100755 index 0000000000..bb43e6b114 --- /dev/null +++ b/tests/topotests/isis-rlfa-topo1/test_isis_rlfa_topo1.py @@ -0,0 +1,662 @@ +#!/usr/bin/env python + +# +# test_isis_rlfa_topo1.py +# Part of NetDEF Topology Tests +# +# Copyright (c) 2020 by +# Network Device Education Foundation, Inc. ("NetDEF") +# +# Permission to use, copy, modify, and/or distribute this software +# for any purpose with or without fee is hereby granted, provided +# that the above copyright notice and this permission notice appear +# in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND NETDEF DISCLAIMS ALL WARRANTIES +# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NETDEF BE LIABLE FOR +# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY +# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS +# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE +# OF THIS SOFTWARE. +# + +""" +test_isis_rlfa_topo1.py: + + +---------+ +---------+ + | | | | + | RT1 | | RT2 | + | +---------------------+ | + | | | | + +---+-----+ +------+--+ + | | + | | + | | + +---+-----+ +------+--+ + | | | | + | RT3 | | RT4 | + | | | | + | | | | + +---+-----+ +------+--+ + | | + | | + | | + +---+-----+ +------+--+ + | | | | + | RT5 | | RT6 | + | | | | + | | | | + +---+-----+ +------+--+ + | | + | | + | | + +---+-----+ +------+--+ + | | | | + | RT7 | | RT8 | + | +---------------------+ | + | | | | + +---------+ +---------+ +""" + +import os +import sys +import pytest +import json +import re +import tempfile +from time import sleep +from functools import partial + +# Save the Current Working Directory to find configuration files. +CWD = os.path.dirname(os.path.realpath(__file__)) +sys.path.append(os.path.join(CWD, "../")) + +# pylint: disable=C0413 +# Import topogen and topotest helpers +from lib import topotest +from lib.topogen import Topogen, TopoRouter, get_topogen +from lib.topolog import logger + +# Required to instantiate the topology builder class. +from mininet.topo import Topo + +# Global multi-dimensional dictionary containing all expected outputs +outputs = {} + + +class TemplateTopo(Topo): + "Test topology builder" + + def build(self, *_args, **_opts): + "Build function" + tgen = get_topogen(self) + + # + # Define FRR Routers + # + for router in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6", "rt7", "rt8"]: + tgen.add_router(router) + + # + # Define connections + # + switch = tgen.add_switch("s1") + switch.add_link(tgen.gears["rt1"], nodeif="eth-rt2") + switch.add_link(tgen.gears["rt2"], nodeif="eth-rt1") + switch = tgen.add_switch("s2") + switch.add_link(tgen.gears["rt1"], nodeif="eth-rt3") + switch.add_link(tgen.gears["rt3"], nodeif="eth-rt1") + switch = tgen.add_switch("s3") + switch.add_link(tgen.gears["rt2"], nodeif="eth-rt4") + switch.add_link(tgen.gears["rt4"], nodeif="eth-rt2") + switch = tgen.add_switch("s4") + switch.add_link(tgen.gears["rt3"], nodeif="eth-rt5") + switch.add_link(tgen.gears["rt5"], nodeif="eth-rt3") + switch = tgen.add_switch("s5") + switch.add_link(tgen.gears["rt4"], nodeif="eth-rt6") + switch.add_link(tgen.gears["rt6"], nodeif="eth-rt4") + switch = tgen.add_switch("s6") + switch.add_link(tgen.gears["rt5"], nodeif="eth-rt7") + switch.add_link(tgen.gears["rt7"], nodeif="eth-rt5") + switch = tgen.add_switch("s7") + switch.add_link(tgen.gears["rt6"], nodeif="eth-rt8") + switch.add_link(tgen.gears["rt8"], nodeif="eth-rt6") + switch = tgen.add_switch("s8") + switch.add_link(tgen.gears["rt7"], nodeif="eth-rt8") + switch.add_link(tgen.gears["rt8"], nodeif="eth-rt7") + + # + # Populate multi-dimensional dictionary containing all expected outputs + # + files = [ + "show_ip_route.ref", + "show_ipv6_route.ref", + "show_yang_interface_isis_adjacencies.ref", + ] + for rname in ["rt1"]: + outputs[rname] = {} + for step in range(1, 10 + 1): + outputs[rname][step] = {} + for file in files: + if step == 1: + # Get snapshots relative to the expected initial network convergence + filename = "{}/{}/step{}/{}".format(CWD, rname, step, file) + outputs[rname][step][file] = open(filename).read() + else: + if file == "show_yang_interface_isis_adjacencies.ref": + continue + + # Get diff relative to the previous step + filename = "{}/{}/step{}/{}.diff".format(CWD, rname, step, file) + + # Create temporary files in order to apply the diff + f_in = tempfile.NamedTemporaryFile() + f_in.write(outputs[rname][step - 1][file]) + f_in.flush() + f_out = tempfile.NamedTemporaryFile() + os.system( + "patch -s -o %s %s %s" % (f_out.name, f_in.name, filename) + ) + + # Store the updated snapshot and remove the temporary files + outputs[rname][step][file] = open(f_out.name).read() + f_in.close() + f_out.close() + +@pytest.mark.isis +def setup_module(mod): + "Sets up the pytest environment" + tgen = Topogen(TemplateTopo, mod.__name__) + tgen.start_topology() + + router_list = tgen.routers() + + # For all registered routers, load the zebra configuration file + for rname, router in router_list.iteritems(): + router.load_config( + TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname)) + ) + router.load_config( + TopoRouter.RD_ISIS, os.path.join(CWD, "{}/isisd.conf".format(rname)) + ) + router.load_config( + TopoRouter.RD_LDP, os.path.join(CWD, "{}/ldpd.conf".format(rname)) + ) + + tgen.start_router() + + +def teardown_module(mod): + "Teardown the pytest environment" + tgen = get_topogen() + + # This function tears down the whole topology. + tgen.stop_topology() + + +def router_compare_json_output(rname, command, reference): + "Compare router JSON output" + + logger.info('Comparing router "%s" "%s" output', rname, command) + + tgen = get_topogen() + expected = json.loads(reference) + + # Run test function until we get an result. Wait at most 60 seconds. + test_func = partial(topotest.router_json_cmp, tgen.gears[rname], command, expected) + _, diff = topotest.run_and_expect(test_func, None, count=120, wait=0.5) + assertmsg = '"{}" JSON output mismatches the expected result'.format(rname) + assert diff is None, assertmsg + + +# +# Step 1 +# +# Test initial network convergence +# +def test_isis_adjacencies_step1(): + logger.info("Test (step 1): check IS-IS adjacencies") + tgen = get_topogen() + + # Skip if previous fatal error condition is raised + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + for rname in ["rt1"]: + router_compare_json_output( + rname, + "show yang operational-data /frr-interface:lib isisd", + outputs[rname][1]["show_yang_interface_isis_adjacencies.ref"], + ) + + +def test_rib_ipv4_step1(): + logger.info("Test (step 1): verify IPv4 RIB") + tgen = get_topogen() + + # Skip if previous fatal error condition is raised + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + for rname in ["rt1"]: + router_compare_json_output( + rname, "show ip route isis json", outputs[rname][1]["show_ip_route.ref"] + ) + + +def test_rib_ipv6_step1(): + logger.info("Test (step 1): verify IPv6 RIB") + tgen = get_topogen() + + # Skip if previous fatal error condition is raised + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + for rname in ["rt1"]: + router_compare_json_output( + rname, "show ipv6 route isis json", outputs[rname][1]["show_ipv6_route.ref"] + ) + + +# +# Step 2 +# +# Action(s): +# -Configure rt8 (rt1's PQ router) to not accept targeted hello messages +# +# Expected changes: +# -All rt1 backup routes should be uninstalled +# +def test_rib_ipv4_step2(): + logger.info("Test (step 2): verify IPv4 RIB") + tgen = get_topogen() + + # Skip if previous fatal error condition is raised + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + logger.info("Configuring rt8 to not accept targeted hello messages") + tgen.net["rt8"].cmd( + 'vtysh -c "conf t" -c "mpls ldp" -c "address-family ipv4" -c "no discovery targeted-hello accept"' + ) + + for rname in ["rt1"]: + router_compare_json_output( + rname, "show ip route isis json", outputs[rname][2]["show_ip_route.ref"] + ) + + +def test_rib_ipv6_step2(): + logger.info("Test (step 2): verify IPv6 RIB") + tgen = get_topogen() + + # Skip if previous fatal error condition is raised + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + for rname in ["rt1"]: + router_compare_json_output( + rname, "show ipv6 route isis json", outputs[rname][2]["show_ipv6_route.ref"] + ) + + +# +# Step 3 +# +# Action(s): +# -Configure rt8 (rt1's PQ router) to accept targeted hello messages +# +# Expected changes: +# -All rt1 previously uninstalled backup routes should be reinstalled +# +def test_rib_ipv4_step3(): + logger.info("Test (step 3): verify IPv4 RIB") + tgen = get_topogen() + + # Skip if previous fatal error condition is raised + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + logger.info("Configuring rt8 to accept targeted hello messages") + tgen.net["rt8"].cmd( + 'vtysh -c "conf t" -c "mpls ldp" -c "address-family ipv4" -c "discovery targeted-hello accept"' + ) + + for rname in ["rt1"]: + router_compare_json_output( + rname, "show ip route isis json", outputs[rname][3]["show_ip_route.ref"] + ) + + +def test_rib_ipv6_step3(): + logger.info("Test (step 3): verify IPv6 RIB") + tgen = get_topogen() + + # Skip if previous fatal error condition is raised + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + for rname in ["rt1"]: + router_compare_json_output( + rname, "show ipv6 route isis json", outputs[rname][3]["show_ipv6_route.ref"] + ) + + +# +# Step 4 +# +# Action(s): +# -Disable RLFA on rt1's eth-rt2 interface +# +# Expected changes: +# -All non-ECMP routes whose primary nexthop is eth-rt2 should lose their backup nexthops +# +def test_rib_ipv4_step4(): + logger.info("Test (step 4): verify IPv4 RIB") + tgen = get_topogen() + + # Skip if previous fatal error condition is raised + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + logger.info("Disabling RLFA on rt1's eth-rt2 interface") + tgen.net["rt1"].cmd( + 'vtysh -c "conf t" -c "interface eth-rt2" -c "no isis fast-reroute remote-lfa tunnel mpls-ldp"' + ) + + for rname in ["rt1"]: + router_compare_json_output( + rname, "show ip route isis json", outputs[rname][4]["show_ip_route.ref"] + ) + + +def test_rib_ipv6_step4(): + logger.info("Test (step 4): verify IPv6 RIB") + tgen = get_topogen() + + # Skip if previous fatal error condition is raised + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + for rname in ["rt1"]: + router_compare_json_output( + rname, "show ipv6 route isis json", outputs[rname][4]["show_ipv6_route.ref"] + ) + + +# +# Step 5 +# +# Action(s): +# -Disable RLFA on rt1's eth-rt3 interface +# +# Expected changes: +# -All non-ECMP routes whose primary nexthop is eth-rt3 should lose their backup nexthops +# +def test_rib_ipv4_step5(): + logger.info("Test (step 5): verify IPv4 RIB") + tgen = get_topogen() + + # Skip if previous fatal error condition is raised + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + logger.info("Disabling RLFA on rt1's eth-rt3 interface") + tgen.net["rt1"].cmd( + 'vtysh -c "conf t" -c "interface eth-rt3" -c "no isis fast-reroute remote-lfa tunnel mpls-ldp"' + ) + + for rname in ["rt1"]: + router_compare_json_output( + rname, "show ip route isis json", outputs[rname][5]["show_ip_route.ref"] + ) + + +def test_rib_ipv6_step5(): + logger.info("Test (step 5): verify IPv6 RIB") + tgen = get_topogen() + + # Skip if previous fatal error condition is raised + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + for rname in ["rt1"]: + router_compare_json_output( + rname, "show ipv6 route isis json", outputs[rname][5]["show_ipv6_route.ref"] + ) + + +# +# Step 6 +# +# Action(s): +# -Re-enable RLFA on rt1's eth-rt2 and eth-rt3 interfaces +# +# Expected changes: +# -Revert changes from the previous two steps (reinstall all backup routes) +# +def test_rib_ipv4_step6(): + logger.info("Test (step 6): verify IPv4 RIB") + tgen = get_topogen() + + # Skip if previous fatal error condition is raised + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + logger.info("Re-enabling RLFA on rt1's eth-rt2 and eth-rt3 interfaces") + tgen.net["rt1"].cmd( + 'vtysh -c "conf t" -c "interface eth-rt2" -c "isis fast-reroute remote-lfa tunnel mpls-ldp"' + ) + tgen.net["rt1"].cmd( + 'vtysh -c "conf t" -c "interface eth-rt3" -c "isis fast-reroute remote-lfa tunnel mpls-ldp"' + ) + + for rname in ["rt1"]: + router_compare_json_output( + rname, "show ip route isis json", outputs[rname][6]["show_ip_route.ref"] + ) + + +def test_rib_ipv6_step6(): + logger.info("Test (step 6): verify IPv6 RIB") + tgen = get_topogen() + + # Skip if previous fatal error condition is raised + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + for rname in ["rt1"]: + router_compare_json_output( + rname, "show ipv6 route isis json", outputs[rname][6]["show_ipv6_route.ref"] + ) + + +# +# Step 7 +# +# Action(s): +# -Configure a PQ node prefix-list filter +# +# Expected changes: +# -All backup routes should be uninstalled +# +def test_rib_ipv4_step7(): + logger.info("Test (step 7): verify IPv4 RIB") + tgen = get_topogen() + + # Skip if previous fatal error condition is raised + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + logger.info("Configuring a PQ node prefix-list filter") + tgen.net["rt1"].cmd( + 'vtysh -c "conf t" -c "router isis 1" -c "fast-reroute remote-lfa prefix-list PLIST"' + ) + + for rname in ["rt1"]: + router_compare_json_output( + rname, "show ip route isis json", outputs[rname][7]["show_ip_route.ref"] + ) + + +def test_rib_ipv6_step7(): + logger.info("Test (step 7): verify IPv6 RIB") + tgen = get_topogen() + + # Skip if previous fatal error condition is raised + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + for rname in ["rt1"]: + router_compare_json_output( + rname, "show ipv6 route isis json", outputs[rname][7]["show_ipv6_route.ref"] + ) + + +# +# Step 8 +# +# Action(s): +# -Configure a prefix-list allowing rt8 as a PQ node +# +# Expected changes: +# -All backup routes should be installed again +# +def test_rib_ipv4_step8(): + logger.info("Test (step 8): verify IPv4 RIB") + tgen = get_topogen() + + # Skip if previous fatal error condition is raised + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + logger.info("Configuring a prefix-list allowing rt8 as a PQ node") + tgen.net["rt1"].cmd( + 'vtysh -c "conf t" -c "ip prefix-list PLIST seq 5 permit 10.0.255.8/32"' + ) + + for rname in ["rt1"]: + router_compare_json_output( + rname, "show ip route isis json", outputs[rname][8]["show_ip_route.ref"] + ) + + +def test_rib_ipv6_step8(): + logger.info("Test (step 8): verify IPv6 RIB") + tgen = get_topogen() + + # Skip if previous fatal error condition is raised + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + for rname in ["rt1"]: + router_compare_json_output( + rname, "show ipv6 route isis json", outputs[rname][8]["show_ipv6_route.ref"] + ) + + +# +# Step 9 +# +# Action(s): +# -Change the maximum metric up to the PQ node to 30 on the eth-rt2 interface +# +# Expected changes: +# -All non-ECMP routes whose primary nexthop is eth-rt2 should lose their backup nexthops +# +def test_rib_ipv4_step9(): + logger.info("Test (step 9): verify IPv4 RIB") + tgen = get_topogen() + + # Skip if previous fatal error condition is raised + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + logger.info( + "Changing the maximum metric up to the PQ node to 30 on the eth-rt2 interface" + ) + tgen.net["rt1"].cmd( + 'vtysh -c "conf t" -c "interface eth-rt2" -c "isis fast-reroute remote-lfa maximum-metric 30"' + ) + + for rname in ["rt1"]: + router_compare_json_output( + rname, "show ip route isis json", outputs[rname][9]["show_ip_route.ref"] + ) + + +def test_rib_ipv6_step9(): + logger.info("Test (step 9): verify IPv6 RIB") + tgen = get_topogen() + + # Skip if previous fatal error condition is raised + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + for rname in ["rt1"]: + router_compare_json_output( + rname, "show ipv6 route isis json", outputs[rname][9]["show_ipv6_route.ref"] + ) + + +# +# Step 10 +# +# Action(s): +# -Change the maximum metric up to the PQ node to 40 on the eth-rt2 interface +# +# Expected changes: +# -All non-ECMP routes whose primary nexthop is eth-rt2 should recover their backup nexthops +# +def test_rib_ipv4_step10(): + logger.info("Test (step 10): verify IPv4 RIB") + tgen = get_topogen() + + # Skip if previous fatal error condition is raised + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + logger.info( + "Changing the maximum metric up to the PQ node to 40 on the eth-rt2 interface" + ) + tgen.net["rt1"].cmd( + 'vtysh -c "conf t" -c "interface eth-rt2" -c "isis fast-reroute remote-lfa maximum-metric 40"' + ) + + for rname in ["rt1"]: + router_compare_json_output( + rname, "show ip route isis json", outputs[rname][10]["show_ip_route.ref"] + ) + + +def test_rib_ipv6_step10(): + logger.info("Test (step 10): verify IPv6 RIB") + tgen = get_topogen() + + # Skip if previous fatal error condition is raised + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + for rname in ["rt1"]: + router_compare_json_output( + rname, + "show ipv6 route isis json", + outputs[rname][10]["show_ipv6_route.ref"], + ) + + +# Memory leak test template +def test_memory_leak(): + "Run the memory leak test and report results." + tgen = get_topogen() + if not tgen.is_memleak_enabled(): + pytest.skip("Memory leak test/report is disabled") + + tgen.report_memory_leaks() + + +if __name__ == "__main__": + args = ["-s"] + sys.argv[1:] + sys.exit(pytest.main(args)) diff --git a/tests/topotests/isis-sr-te-topo1/dst/zebra.conf b/tests/topotests/isis-sr-te-topo1/dst/zebra.conf new file mode 100644 index 0000000000..e873ac8a5c --- /dev/null +++ b/tests/topotests/isis-sr-te-topo1/dst/zebra.conf @@ -0,0 +1,19 @@ +log file zebra.log +! +hostname dst +! +debug zebra kernel +debug zebra packet +debug zebra mpls +! +interface lo + ip address 9.9.9.2/32 + ipv6 address 2001:db8:1066::2/128 +! +interface eth-rt6 + ip address 10.0.11.2/24 +! +ip forwarding +! +line vty +! diff --git a/tests/topotests/isis-sr-te-topo1/rt1/bgpd.conf b/tests/topotests/isis-sr-te-topo1/rt1/bgpd.conf new file mode 100644 index 0000000000..efc03701b5 --- /dev/null +++ b/tests/topotests/isis-sr-te-topo1/rt1/bgpd.conf @@ -0,0 +1,16 @@ +log file bgpd.log +! +router bgp 1 + bgp router-id 1.1.1.1 + neighbor 6.6.6.6 remote-as 1 + neighbor 6.6.6.6 update-source lo + ! + address-family ipv4 unicast + redistribute static + neighbor 6.6.6.6 next-hop-self + neighbor 6.6.6.6 route-map SET_SR_POLICY in + exit-address-family +! +route-map SET_SR_POLICY permit 10 + set sr-te color 1 +! diff --git a/tests/topotests/isis-sr-te-topo1/rt1/isisd.conf b/tests/topotests/isis-sr-te-topo1/rt1/isisd.conf new file mode 100644 index 0000000000..70ae1b07f5 --- /dev/null +++ b/tests/topotests/isis-sr-te-topo1/rt1/isisd.conf @@ -0,0 +1,30 @@ +password 1 +hostname rt1 +log file isisd.log +! +debug isis events +debug isis route-events +debug isis spf-events +debug isis sr-events +debug isis lsp-gen +! +interface lo + ip router isis 1 + ipv6 router isis 1 + isis passive +! +interface eth-sw1 + ip router isis 1 + ipv6 router isis 1 + isis hello-multiplier 3 +! +router isis 1 + net 49.0000.0000.0000.0001.00 + is-type level-1 + topology ipv6-unicast + segment-routing on + segment-routing global-block 16000 23999 + segment-routing node-msd 8 + segment-routing prefix 1.1.1.1/32 index 10 + segment-routing prefix 2001:db8:1000::1/128 index 11 +! diff --git a/tests/topotests/isis-sr-te-topo1/rt1/pathd.conf b/tests/topotests/isis-sr-te-topo1/rt1/pathd.conf new file mode 100644 index 0000000000..911971496e --- /dev/null +++ b/tests/topotests/isis-sr-te-topo1/rt1/pathd.conf @@ -0,0 +1,21 @@ +log file pathd.log +! +hostname rt1 +! +segment-routing + traffic-eng + segment-list default + index 10 mpls label 16050 + index 20 mpls label 16060 + ! + segment-list test + index 10 mpls label 16020 + index 20 mpls label 16040 + index 30 mpls label 16060 + ! + policy color 1 endpoint 6.6.6.6 + name default + binding-sid 1111 + ! + ! +!
\ No newline at end of file diff --git a/tests/topotests/isis-sr-te-topo1/rt1/step1/show_mpls_table_with_candidate.ref b/tests/topotests/isis-sr-te-topo1/rt1/step1/show_mpls_table_with_candidate.ref new file mode 100644 index 0000000000..d4b27d157d --- /dev/null +++ b/tests/topotests/isis-sr-te-topo1/rt1/step1/show_mpls_table_with_candidate.ref @@ -0,0 +1,91 @@ +{ + "1111":{ + "inLabel":1111, + "installed":true, + "nexthops":[ + { + "type":"SR-TE", + "outLabel":16050, + "outLabelStack":[ + 16050, + 16060 + ], + "distance":150, + "installed":true, + "nexthop":"10.0.1.3" + } + ] + }, + "16020":{ + "inLabel":16020, + "installed":true, + "nexthops":[ + { + "type":"SR (IS-IS)", + "outLabel":16020, + "distance":150, + "installed":true, + "nexthop":"10.0.1.2" + } + ] + }, + "16030":{ + "inLabel":16030, + "installed":true, + "nexthops":[ + { + "type":"SR (IS-IS)", + "outLabel":16030, + "distance":150, + "installed":true, + "nexthop":"10.0.1.3" + } + ] + }, + "16040":{ + "inLabel":16040, + "installed":true, + "nexthops":[ + { + "type":"SR (IS-IS)", + "outLabel":16040, + "distance":150, + "installed":true, + "nexthop":"10.0.1.2" + } + ] + }, + "16050":{ + "inLabel":16050, + "installed":true, + "nexthops":[ + { + "type":"SR (IS-IS)", + "outLabel":16050, + "distance":150, + "installed":true, + "nexthop":"10.0.1.3" + } + ] + }, + "16060":{ + "inLabel":16060, + "installed":true, + "nexthops":[ + { + "type":"SR (IS-IS)", + "outLabel":16060, + "distance":150, + "installed":true, + "nexthop":"10.0.1.3" + }, + { + "type":"SR (IS-IS)", + "outLabel":16060, + "distance":150, + "installed":true, + "nexthop":"10.0.1.2" + } + ] + } +} diff --git a/tests/topotests/isis-sr-te-topo1/rt1/step1/show_mpls_table_without_candidate.ref b/tests/topotests/isis-sr-te-topo1/rt1/step1/show_mpls_table_without_candidate.ref new file mode 100644 index 0000000000..5fe58d0824 --- /dev/null +++ b/tests/topotests/isis-sr-te-topo1/rt1/step1/show_mpls_table_without_candidate.ref @@ -0,0 +1,74 @@ +{ + "16020":{ + "inLabel":16020, + "installed":true, + "nexthops":[ + { + "type":"SR (IS-IS)", + "outLabel":16020, + "distance":150, + "installed":true, + "nexthop":"10.0.1.2" + } + ] + }, + "16030":{ + "inLabel":16030, + "installed":true, + "nexthops":[ + { + "type":"SR (IS-IS)", + "outLabel":16030, + "distance":150, + "installed":true, + "nexthop":"10.0.1.3" + } + ] + }, + "16040":{ + "inLabel":16040, + "installed":true, + "nexthops":[ + { + "type":"SR (IS-IS)", + "outLabel":16040, + "distance":150, + "installed":true, + "nexthop":"10.0.1.2" + } + ] + }, + "16050":{ + "inLabel":16050, + "installed":true, + "nexthops":[ + { + "type":"SR (IS-IS)", + "outLabel":16050, + "distance":150, + "installed":true, + "nexthop":"10.0.1.3" + } + ] + }, + "16060":{ + "inLabel":16060, + "installed":true, + "nexthops":[ + { + "type":"SR (IS-IS)", + "outLabel":16060, + "distance":150, + "installed":true, + "nexthop":"10.0.1.3" + }, + { + "type":"SR (IS-IS)", + "outLabel":16060, + "distance":150, + "installed":true, + "nexthop":"10.0.1.2" + } + ] + } +} diff --git a/tests/topotests/isis-sr-te-topo1/rt1/step2/show_operational_data.ref b/tests/topotests/isis-sr-te-topo1/rt1/step2/show_operational_data.ref new file mode 100644 index 0000000000..4ef8d946f2 --- /dev/null +++ b/tests/topotests/isis-sr-te-topo1/rt1/step2/show_operational_data.ref @@ -0,0 +1,13 @@ +{ + "frr-pathd:pathd": { + "srte": { + "policy": [ + { + "color": 1, + "endpoint": "6.6.6.6", + "is-operational": false + } + ] + } + } +} diff --git a/tests/topotests/isis-sr-te-topo1/rt1/step2/show_operational_data_with_candidate.ref b/tests/topotests/isis-sr-te-topo1/rt1/step2/show_operational_data_with_candidate.ref new file mode 100644 index 0000000000..9b28f6a42b --- /dev/null +++ b/tests/topotests/isis-sr-te-topo1/rt1/step2/show_operational_data_with_candidate.ref @@ -0,0 +1,20 @@ +{ + "frr-pathd:pathd": { + "srte": { + "policy": [ + { + "color": 1, + "endpoint": "6.6.6.6", + "is-operational": true, + "candidate-path": [ + { + "preference": 100, + "discriminator": "*", + "is-best-candidate-path": true + } + ] + } + ] + } + } +} diff --git a/tests/topotests/isis-sr-te-topo1/rt1/step3/show_operational_data_with_single_candidate.ref b/tests/topotests/isis-sr-te-topo1/rt1/step3/show_operational_data_with_single_candidate.ref new file mode 100644 index 0000000000..9b28f6a42b --- /dev/null +++ b/tests/topotests/isis-sr-te-topo1/rt1/step3/show_operational_data_with_single_candidate.ref @@ -0,0 +1,20 @@ +{ + "frr-pathd:pathd": { + "srte": { + "policy": [ + { + "color": 1, + "endpoint": "6.6.6.6", + "is-operational": true, + "candidate-path": [ + { + "preference": 100, + "discriminator": "*", + "is-best-candidate-path": true + } + ] + } + ] + } + } +} diff --git a/tests/topotests/isis-sr-te-topo1/rt1/step3/show_operational_data_with_two_candidates.ref b/tests/topotests/isis-sr-te-topo1/rt1/step3/show_operational_data_with_two_candidates.ref new file mode 100644 index 0000000000..249117198a --- /dev/null +++ b/tests/topotests/isis-sr-te-topo1/rt1/step3/show_operational_data_with_two_candidates.ref @@ -0,0 +1,25 @@ +{ + "frr-pathd:pathd": { + "srte": { + "policy": [ + { + "color": 1, + "endpoint": "6.6.6.6", + "is-operational": true, + "candidate-path": [ + { + "preference": 100, + "discriminator": "*", + "is-best-candidate-path": false + }, + { + "preference": 200, + "discriminator": "*", + "is-best-candidate-path": true + } + ] + } + ] + } + } +} diff --git a/tests/topotests/isis-sr-te-topo1/rt1/step4/show_mpls_table.ref b/tests/topotests/isis-sr-te-topo1/rt1/step4/show_mpls_table.ref new file mode 100644 index 0000000000..21f71f1254 --- /dev/null +++ b/tests/topotests/isis-sr-te-topo1/rt1/step4/show_mpls_table.ref @@ -0,0 +1,20 @@ +{ + "1111":{ + "inLabel":1111, + "installed":true, + "nexthops":[ + { + "type":"SR-TE", + "outLabel":16020, + "outLabelStack":[ + 16020, + 16040, + 16060 + ], + "distance":150, + "installed":true, + "nexthop":"10.0.1.2" + } + ] + } +} diff --git a/tests/topotests/isis-sr-te-topo1/rt1/step4/show_mpls_table_add_segment.ref b/tests/topotests/isis-sr-te-topo1/rt1/step4/show_mpls_table_add_segment.ref new file mode 100644 index 0000000000..3635c89efb --- /dev/null +++ b/tests/topotests/isis-sr-te-topo1/rt1/step4/show_mpls_table_add_segment.ref @@ -0,0 +1,21 @@ +{ + "1111":{ + "inLabel":1111, + "installed":true, + "nexthops":[ + { + "type":"SR-TE", + "outLabel":16020, + "outLabelStack":[ + 16020, + 16040, + 16050, + 16060 + ], + "distance":150, + "installed":true, + "nexthop":"10.0.1.2" + } + ] + } +} diff --git a/tests/topotests/isis-sr-te-topo1/rt1/step4/show_mpls_table_change_segment.ref b/tests/topotests/isis-sr-te-topo1/rt1/step4/show_mpls_table_change_segment.ref new file mode 100644 index 0000000000..5712d210d4 --- /dev/null +++ b/tests/topotests/isis-sr-te-topo1/rt1/step4/show_mpls_table_change_segment.ref @@ -0,0 +1,21 @@ +{ + "1111":{ + "inLabel":1111, + "installed":true, + "nexthops":[ + { + "type":"SR-TE", + "outLabel":16020, + "outLabelStack":[ + 16020, + 16040, + 16030, + 16060 + ], + "distance":150, + "installed":true, + "nexthop":"10.0.1.2" + } + ] + } +} diff --git a/tests/topotests/isis-sr-te-topo1/rt1/step5/show_ip_route_bgp_active_srte.ref b/tests/topotests/isis-sr-te-topo1/rt1/step5/show_ip_route_bgp_active_srte.ref new file mode 100644 index 0000000000..5a76246e50 --- /dev/null +++ b/tests/topotests/isis-sr-te-topo1/rt1/step5/show_ip_route_bgp_active_srte.ref @@ -0,0 +1,29 @@ +{ + "9.9.9.2\/32":[ + { + "prefix":"9.9.9.2\/32", + "protocol":"bgp", + "installed":true, + "nexthops":[ + { + "ip":"6.6.6.6", + "afi":"ipv4", + "active":true, + "recursive":true, + "srteColor":1 + }, + { + "fib":true, + "ip":"10.0.1.3", + "afi":"ipv4", + "interfaceName":"eth-sw1", + "active":true, + "labels":[ + 16050, + 16060 + ] + } + ] + } + ] +} diff --git a/tests/topotests/isis-sr-te-topo1/rt1/step5/show_ip_route_bgp_inactive_srte.ref b/tests/topotests/isis-sr-te-topo1/rt1/step5/show_ip_route_bgp_inactive_srte.ref new file mode 100644 index 0000000000..09d5958305 --- /dev/null +++ b/tests/topotests/isis-sr-te-topo1/rt1/step5/show_ip_route_bgp_inactive_srte.ref @@ -0,0 +1,38 @@ +{ + "9.9.9.2\/32":[ + { + "prefix":"9.9.9.2\/32", + "protocol":"bgp", + "installed":true, + "nexthops":[ + { + "ip":"6.6.6.6", + "afi":"ipv4", + "active":true, + "recursive":true, + "srteColor":1 + }, + { + "fib":true, + "ip":"10.0.1.2", + "afi":"ipv4", + "interfaceName":"eth-sw1", + "active":true, + "labels":[ + 16060 + ] + }, + { + "fib":true, + "ip":"10.0.1.3", + "afi":"ipv4", + "interfaceName":"eth-sw1", + "active":true, + "labels":[ + 16060 + ] + } + ] + } + ] +} diff --git a/tests/topotests/isis-sr-te-topo1/rt1/step5/show_operational_data_active.ref b/tests/topotests/isis-sr-te-topo1/rt1/step5/show_operational_data_active.ref new file mode 100644 index 0000000000..e26039b835 --- /dev/null +++ b/tests/topotests/isis-sr-te-topo1/rt1/step5/show_operational_data_active.ref @@ -0,0 +1,20 @@ +{ + "frr-pathd:pathd": { + "srte": { + "policy": [ + { + "color": 1, + "endpoint": "6.6.6.6", + "is-operational": true, + "candidate-path": [ + { + "preference": 100, + "discriminator": "*", + "is-best-candidate-path": true + } + ] + } + ] + } + } +} diff --git a/tests/topotests/isis-sr-te-topo1/rt1/step5/show_operational_data_inactive.ref b/tests/topotests/isis-sr-te-topo1/rt1/step5/show_operational_data_inactive.ref new file mode 100644 index 0000000000..01505c0318 --- /dev/null +++ b/tests/topotests/isis-sr-te-topo1/rt1/step5/show_operational_data_inactive.ref @@ -0,0 +1,20 @@ +{ + "frr-pathd:pathd": { + "srte": { + "policy": [ + { + "color": 1, + "endpoint": "6.6.6.6", + "is-operational": false, + "candidate-path": [ + { + "preference": 100, + "discriminator": "*", + "is-best-candidate-path": true + } + ] + } + ] + } + } +} diff --git a/tests/topotests/isis-sr-te-topo1/rt1/zebra.conf b/tests/topotests/isis-sr-te-topo1/rt1/zebra.conf new file mode 100644 index 0000000000..9d71d3005f --- /dev/null +++ b/tests/topotests/isis-sr-te-topo1/rt1/zebra.conf @@ -0,0 +1,19 @@ +log file zebra.log +! +hostname rt1 +! +debug zebra kernel +debug zebra packet +debug zebra mpls +! +interface lo + ip address 1.1.1.1/32 + ipv6 address 2001:db8:1000::1/128 +! +interface eth-sw1 + ip address 10.0.1.1/24 +! +ip forwarding +! +line vty +! diff --git a/tests/topotests/isis-sr-te-topo1/rt2/isisd.conf b/tests/topotests/isis-sr-te-topo1/rt2/isisd.conf new file mode 100644 index 0000000000..733f26bc62 --- /dev/null +++ b/tests/topotests/isis-sr-te-topo1/rt2/isisd.conf @@ -0,0 +1,41 @@ +hostname rt2 +log file isisd.log +! +debug isis events +debug isis route-events +debug isis spf-events +debug isis sr-events +debug isis lsp-gen +! +interface lo + ip router isis 1 + ipv6 router isis 1 + isis passive +! +interface eth-sw1 + ip router isis 1 + ipv6 router isis 1 + isis hello-multiplier 3 +! +interface eth-rt4-1 + ip router isis 1 + ipv6 router isis 1 + isis network point-to-point + isis hello-multiplier 3 +! +interface eth-rt4-2 + ip router isis 1 + ipv6 router isis 1 + isis network point-to-point + isis hello-multiplier 3 +! +router isis 1 + net 49.0000.0000.0000.0002.00 + is-type level-1 + topology ipv6-unicast + segment-routing on + segment-routing global-block 16000 23999 + segment-routing node-msd 8 + segment-routing prefix 2.2.2.2/32 index 20 no-php-flag + segment-routing prefix 2001:db8:1000::2/128 index 21 no-php-flag +! diff --git a/tests/topotests/isis-sr-te-topo1/rt2/zebra.conf b/tests/topotests/isis-sr-te-topo1/rt2/zebra.conf new file mode 100644 index 0000000000..dcb0686dc2 --- /dev/null +++ b/tests/topotests/isis-sr-te-topo1/rt2/zebra.conf @@ -0,0 +1,25 @@ +log file zebra.log +! +hostname rt2 +! +debug zebra kernel +debug zebra packet +debug zebra mpls +! +interface lo + ip address 2.2.2.2/32 + ipv6 address 2001:db8:1000::2/128 +! +interface eth-sw1 + ip address 10.0.1.2/24 +! +interface eth-rt4-1 + ip address 10.0.2.2/24 +! +interface eth-rt4-2 + ip address 10.0.3.2/24 +! +ip forwarding +! +line vty +! diff --git a/tests/topotests/isis-sr-te-topo1/rt3/isisd.conf b/tests/topotests/isis-sr-te-topo1/rt3/isisd.conf new file mode 100644 index 0000000000..2395906cbf --- /dev/null +++ b/tests/topotests/isis-sr-te-topo1/rt3/isisd.conf @@ -0,0 +1,41 @@ +hostname rt3 +log file isisd.log +! +debug isis events +debug isis route-events +debug isis spf-events +debug isis sr-events +debug isis lsp-gen +! +interface lo + ip router isis 1 + ipv6 router isis 1 + isis passive +! +interface eth-sw1 + ip router isis 1 + ipv6 router isis 1 + isis hello-multiplier 3 +! +interface eth-rt5-1 + ip router isis 1 + ipv6 router isis 1 + isis network point-to-point + isis hello-multiplier 3 +! +interface eth-rt5-2 + ip router isis 1 + ipv6 router isis 1 + isis network point-to-point + isis hello-multiplier 3 +! +router isis 1 + net 49.0000.0000.0000.0003.00 + is-type level-1 + topology ipv6-unicast + segment-routing on + segment-routing global-block 16000 23999 + segment-routing node-msd 8 + segment-routing prefix 3.3.3.3/32 index 30 no-php-flag + segment-routing prefix 2001:db8:1000::3/128 index 31 no-php-flag +! diff --git a/tests/topotests/isis-sr-te-topo1/rt3/zebra.conf b/tests/topotests/isis-sr-te-topo1/rt3/zebra.conf new file mode 100644 index 0000000000..3254529386 --- /dev/null +++ b/tests/topotests/isis-sr-te-topo1/rt3/zebra.conf @@ -0,0 +1,25 @@ +log file zebra.log +! +hostname rt3 +! +debug zebra kernel +debug zebra packet +debug zebra mpls +! +interface lo + ip address 3.3.3.3/32 + ipv6 address 2001:db8:1000::3/128 +! +interface eth-sw1 + ip address 10.0.1.3/24 +! +interface eth-rt5-1 + ip address 10.0.4.3/24 +! +interface eth-rt5-2 + ip address 10.0.5.3/24 +! +ip forwarding +! +line vty +! diff --git a/tests/topotests/isis-sr-te-topo1/rt4/isisd.conf b/tests/topotests/isis-sr-te-topo1/rt4/isisd.conf new file mode 100644 index 0000000000..07a7867cbb --- /dev/null +++ b/tests/topotests/isis-sr-te-topo1/rt4/isisd.conf @@ -0,0 +1,48 @@ +hostname rt4 +log file isisd.log +! +debug isis events +debug isis route-events +debug isis spf-events +debug isis sr-events +debug isis lsp-gen +! +interface lo + ip router isis 1 + ipv6 router isis 1 + isis passive +! +interface eth-rt2-1 + ip router isis 1 + ipv6 router isis 1 + isis network point-to-point + isis hello-multiplier 3 +! +interface eth-rt2-2 + ip router isis 1 + ipv6 router isis 1 + isis network point-to-point + isis hello-multiplier 3 +! +interface eth-rt5 + ip router isis 1 + ipv6 router isis 1 + isis network point-to-point + isis hello-multiplier 3 +! +interface eth-rt6 + ip router isis 1 + ipv6 router isis 1 + isis network point-to-point + isis hello-multiplier 3 +! +router isis 1 + net 49.0000.0000.0000.0004.00 + is-type level-1 + topology ipv6-unicast + segment-routing on + segment-routing global-block 16000 23999 + segment-routing node-msd 8 + segment-routing prefix 4.4.4.4/32 index 40 no-php-flag + segment-routing prefix 2001:db8:1000::4/128 index 41 no-php-flag +! diff --git a/tests/topotests/isis-sr-te-topo1/rt4/zebra.conf b/tests/topotests/isis-sr-te-topo1/rt4/zebra.conf new file mode 100644 index 0000000000..4945897e9d --- /dev/null +++ b/tests/topotests/isis-sr-te-topo1/rt4/zebra.conf @@ -0,0 +1,28 @@ +log file zebra.log +! +hostname rt4 +! +debug zebra kernel +debug zebra packet +debug zebra mpls +! +interface lo + ip address 4.4.4.4/32 + ipv6 address 2001:db8:1000::4/128 +! +interface eth-rt2-1 + ip address 10.0.2.4/24 +! +interface eth-rt2-2 + ip address 10.0.3.4/24 +! +interface eth-rt5 + ip address 10.0.6.4/24 +! +interface eth-rt6 + ip address 10.0.7.4/24 +! +ip forwarding +! +line vty +! diff --git a/tests/topotests/isis-sr-te-topo1/rt5/isisd.conf b/tests/topotests/isis-sr-te-topo1/rt5/isisd.conf new file mode 100644 index 0000000000..b0fcdede07 --- /dev/null +++ b/tests/topotests/isis-sr-te-topo1/rt5/isisd.conf @@ -0,0 +1,48 @@ +hostname rt5 +log file isisd.log +! +debug isis events +debug isis route-events +debug isis spf-events +debug isis sr-events +debug isis lsp-gen +! +interface lo + ip router isis 1 + ipv6 router isis 1 + isis passive +! +interface eth-rt3-1 + ip router isis 1 + ipv6 router isis 1 + isis network point-to-point + isis hello-multiplier 3 +! +interface eth-rt3-2 + ip router isis 1 + ipv6 router isis 1 + isis network point-to-point + isis hello-multiplier 3 +! +interface eth-rt4 + ip router isis 1 + ipv6 router isis 1 + isis network point-to-point + isis hello-multiplier 3 +! +interface eth-rt6 + ip router isis 1 + ipv6 router isis 1 + isis network point-to-point + isis hello-multiplier 3 +! +router isis 1 + net 49.0000.0000.0000.0005.00 + is-type level-1 + topology ipv6-unicast + segment-routing on + segment-routing global-block 16000 23999 + segment-routing node-msd 8 + segment-routing prefix 5.5.5.5/32 index 50 no-php-flag + segment-routing prefix 2001:db8:1000::5/128 index 51 no-php-flag +! diff --git a/tests/topotests/isis-sr-te-topo1/rt5/zebra.conf b/tests/topotests/isis-sr-te-topo1/rt5/zebra.conf new file mode 100644 index 0000000000..4cfea1a59f --- /dev/null +++ b/tests/topotests/isis-sr-te-topo1/rt5/zebra.conf @@ -0,0 +1,28 @@ +log file zebra.log +! +hostname rt5 +! +debug zebra kernel +debug zebra packet +debug zebra mpls +! +interface lo + ip address 5.5.5.5/32 + ipv6 address 2001:db8:1000::5/128 +! +interface eth-rt3-1 + ip address 10.0.4.5/24 +! +interface eth-rt3-2 + ip address 10.0.5.5/24 +! +interface eth-rt4 + ip address 10.0.6.5/24 +! +interface eth-rt6 + ip address 10.0.8.5/24 +! +ip forwarding +! +line vty +! diff --git a/tests/topotests/isis-sr-te-topo1/rt6/bgpd.conf b/tests/topotests/isis-sr-te-topo1/rt6/bgpd.conf new file mode 100644 index 0000000000..e72ee52fce --- /dev/null +++ b/tests/topotests/isis-sr-te-topo1/rt6/bgpd.conf @@ -0,0 +1,12 @@ +log file bgpd.log +! +router bgp 1 + bgp router-id 6.6.6.6 + neighbor 1.1.1.1 remote-as 1 + neighbor 1.1.1.1 update-source lo + ! + address-family ipv4 unicast + redistribute static + neighbor 1.1.1.1 next-hop-self + exit-address-family +! diff --git a/tests/topotests/isis-sr-te-topo1/rt6/isisd.conf b/tests/topotests/isis-sr-te-topo1/rt6/isisd.conf new file mode 100644 index 0000000000..3be24ad24c --- /dev/null +++ b/tests/topotests/isis-sr-te-topo1/rt6/isisd.conf @@ -0,0 +1,36 @@ +hostname rt6 +log file isisd.log +! +debug isis events +debug isis route-events +debug isis spf-events +debug isis sr-events +debug isis lsp-gen +! +interface lo + ip router isis 1 + ipv6 router isis 1 + isis passive +! +interface eth-rt4 + ip router isis 1 + ipv6 router isis 1 + isis network point-to-point + isis hello-multiplier 3 +! +interface eth-rt5 + ip router isis 1 + ipv6 router isis 1 + isis network point-to-point + isis hello-multiplier 3 +! +router isis 1 + net 49.0000.0000.0000.0006.00 + is-type level-1 + topology ipv6-unicast + segment-routing on + segment-routing global-block 16000 23999 + segment-routing node-msd 8 + segment-routing prefix 6.6.6.6/32 index 60 + segment-routing prefix 2001:db8:1000::6/128 index 61 +! diff --git a/tests/topotests/isis-sr-te-topo1/rt6/pathd.conf b/tests/topotests/isis-sr-te-topo1/rt6/pathd.conf new file mode 100644 index 0000000000..3bada7147c --- /dev/null +++ b/tests/topotests/isis-sr-te-topo1/rt6/pathd.conf @@ -0,0 +1,21 @@ +log file pathd.log +! +hostname rt6 +! +segment-routing + traffic-eng + segment-list default + index 10 mpls label 16020 + index 20 mpls label 16010 + ! + segment-list test + index 10 mpls label 16050 + index 20 mpls label 16030 + index 30 mpls label 16010 + ! + policy color 1 endpoint 1.1.1.1 + name default + binding-sid 6666 + ! + ! +! diff --git a/tests/topotests/isis-sr-te-topo1/rt6/step1/show_mpls_table_with_candidate.ref b/tests/topotests/isis-sr-te-topo1/rt6/step1/show_mpls_table_with_candidate.ref new file mode 100644 index 0000000000..2bb000346f --- /dev/null +++ b/tests/topotests/isis-sr-te-topo1/rt6/step1/show_mpls_table_with_candidate.ref @@ -0,0 +1,91 @@ +{ + "6666":{ + "inLabel":6666, + "installed":true, + "nexthops":[ + { + "type":"SR-TE", + "outLabel":16020, + "outLabelStack":[ + 16020, + 16010 + ], + "distance":150, + "installed":true, + "nexthop":"10.0.7.4" + } + ] + }, + "16010": { + "inLabel": 16010, + "installed": true, + "nexthops": [ + { + "distance": 150, + "installed": true, + "nexthop": "10.0.7.4", + "outLabel": 16010, + "type": "SR (IS-IS)" + }, + { + "distance": 150, + "installed": true, + "nexthop": "10.0.8.5", + "outLabel": 16010, + "type": "SR (IS-IS)" + } + ] + }, + "16020":{ + "inLabel":16020, + "installed":true, + "nexthops":[ + { + "type":"SR (IS-IS)", + "outLabel":16020, + "distance":150, + "installed":true, + "nexthop":"10.0.7.4" + } + ] + }, + "16030":{ + "inLabel":16030, + "installed":true, + "nexthops":[ + { + "type":"SR (IS-IS)", + "outLabel":16030, + "distance":150, + "installed":true, + "nexthop":"10.0.8.5" + } + ] + }, + "16040":{ + "inLabel":16040, + "installed":true, + "nexthops":[ + { + "type":"SR (IS-IS)", + "outLabel":16040, + "distance":150, + "installed":true, + "nexthop":"10.0.7.4" + } + ] + }, + "16050":{ + "inLabel":16050, + "installed":true, + "nexthops":[ + { + "type":"SR (IS-IS)", + "outLabel":16050, + "distance":150, + "installed":true, + "nexthop":"10.0.8.5" + } + ] + } +} diff --git a/tests/topotests/isis-sr-te-topo1/rt6/step1/show_mpls_table_without_candidate.ref b/tests/topotests/isis-sr-te-topo1/rt6/step1/show_mpls_table_without_candidate.ref new file mode 100644 index 0000000000..348f7761eb --- /dev/null +++ b/tests/topotests/isis-sr-te-topo1/rt6/step1/show_mpls_table_without_candidate.ref @@ -0,0 +1,74 @@ +{ + "16010": { + "inLabel": 16010, + "installed": true, + "nexthops": [ + { + "distance": 150, + "installed": true, + "nexthop": "10.0.7.4", + "outLabel": 16010, + "type": "SR (IS-IS)" + }, + { + "distance": 150, + "installed": true, + "nexthop": "10.0.8.5", + "outLabel": 16010, + "type": "SR (IS-IS)" + } + ] + }, + "16020":{ + "inLabel":16020, + "installed":true, + "nexthops":[ + { + "type":"SR (IS-IS)", + "outLabel":16020, + "distance":150, + "installed":true, + "nexthop":"10.0.7.4" + } + ] + }, + "16030":{ + "inLabel":16030, + "installed":true, + "nexthops":[ + { + "type":"SR (IS-IS)", + "outLabel":16030, + "distance":150, + "installed":true, + "nexthop":"10.0.8.5" + } + ] + }, + "16040":{ + "inLabel":16040, + "installed":true, + "nexthops":[ + { + "type":"SR (IS-IS)", + "outLabel":16040, + "distance":150, + "installed":true, + "nexthop":"10.0.7.4" + } + ] + }, + "16050":{ + "inLabel":16050, + "installed":true, + "nexthops":[ + { + "type":"SR (IS-IS)", + "outLabel":16050, + "distance":150, + "installed":true, + "nexthop":"10.0.8.5" + } + ] + } +} diff --git a/tests/topotests/isis-sr-te-topo1/rt6/step2/show_operational_data.ref b/tests/topotests/isis-sr-te-topo1/rt6/step2/show_operational_data.ref new file mode 100644 index 0000000000..241c80bdd7 --- /dev/null +++ b/tests/topotests/isis-sr-te-topo1/rt6/step2/show_operational_data.ref @@ -0,0 +1,13 @@ +{ + "frr-pathd:pathd": { + "srte": { + "policy": [ + { + "color": 1, + "endpoint": "1.1.1.1", + "is-operational": false + } + ] + } + } +} diff --git a/tests/topotests/isis-sr-te-topo1/rt6/step2/show_operational_data_with_candidate.ref b/tests/topotests/isis-sr-te-topo1/rt6/step2/show_operational_data_with_candidate.ref new file mode 100644 index 0000000000..20ea69e386 --- /dev/null +++ b/tests/topotests/isis-sr-te-topo1/rt6/step2/show_operational_data_with_candidate.ref @@ -0,0 +1,19 @@ +{ + "frr-pathd:pathd": { + "srte": { + "policy": [ + { + "color": 1, + "endpoint": "1.1.1.1", + "is-operational": true, + "candidate-path": [ + { + "preference": 100, + "is-best-candidate-path": true + } + ] + } + ] + } + } +} diff --git a/tests/topotests/isis-sr-te-topo1/rt6/step3/show_operational_data_with_single_candidate.ref b/tests/topotests/isis-sr-te-topo1/rt6/step3/show_operational_data_with_single_candidate.ref new file mode 100644 index 0000000000..20ea69e386 --- /dev/null +++ b/tests/topotests/isis-sr-te-topo1/rt6/step3/show_operational_data_with_single_candidate.ref @@ -0,0 +1,19 @@ +{ + "frr-pathd:pathd": { + "srte": { + "policy": [ + { + "color": 1, + "endpoint": "1.1.1.1", + "is-operational": true, + "candidate-path": [ + { + "preference": 100, + "is-best-candidate-path": true + } + ] + } + ] + } + } +} diff --git a/tests/topotests/isis-sr-te-topo1/rt6/step3/show_operational_data_with_two_candidates.ref b/tests/topotests/isis-sr-te-topo1/rt6/step3/show_operational_data_with_two_candidates.ref new file mode 100644 index 0000000000..10cafe9091 --- /dev/null +++ b/tests/topotests/isis-sr-te-topo1/rt6/step3/show_operational_data_with_two_candidates.ref @@ -0,0 +1,23 @@ +{ + "frr-pathd:pathd": { + "srte": { + "policy": [ + { + "color": 1, + "endpoint": "1.1.1.1", + "is-operational": true, + "candidate-path": [ + { + "preference": 100, + "is-best-candidate-path": false + }, + { + "preference": 200, + "is-best-candidate-path": true + } + ] + } + ] + } + } +} diff --git a/tests/topotests/isis-sr-te-topo1/rt6/step4/show_mpls_table.ref b/tests/topotests/isis-sr-te-topo1/rt6/step4/show_mpls_table.ref new file mode 100644 index 0000000000..95bf995e2e --- /dev/null +++ b/tests/topotests/isis-sr-te-topo1/rt6/step4/show_mpls_table.ref @@ -0,0 +1,20 @@ +{ + "6666":{ + "inLabel":6666, + "installed":true, + "nexthops":[ + { + "type":"SR-TE", + "outLabel":16050, + "outLabelStack":[ + 16050, + 16030, + 16010 + ], + "distance":150, + "installed":true, + "nexthop":"10.0.8.5" + } + ] + } +} diff --git a/tests/topotests/isis-sr-te-topo1/rt6/zebra.conf b/tests/topotests/isis-sr-te-topo1/rt6/zebra.conf new file mode 100644 index 0000000000..32c6e6c4e0 --- /dev/null +++ b/tests/topotests/isis-sr-te-topo1/rt6/zebra.conf @@ -0,0 +1,27 @@ +log file zebra.log +! +hostname rt6 +! +debug zebra kernel +debug zebra packet +debug zebra mpls +! +interface lo + ip address 6.6.6.6/32 + ipv6 address 2001:db8:1000::6/128 +! +interface eth-rt4 + ip address 10.0.7.6/24 +! +interface eth-rt5 + ip address 10.0.8.6/24 +! +interface eth-dst + ip address 10.0.11.1/24 +! +ip forwarding +! +ip route 9.9.9.2/32 10.0.11.2 +! +line vty +! diff --git a/tests/topotests/isis-sr-te-topo1/test_isis_sr_te_topo1.py b/tests/topotests/isis-sr-te-topo1/test_isis_sr_te_topo1.py new file mode 100755 index 0000000000..bfd2f92a28 --- /dev/null +++ b/tests/topotests/isis-sr-te-topo1/test_isis_sr_te_topo1.py @@ -0,0 +1,532 @@ +#!/usr/bin/env python + +# +# test_isis_sr_topo1.py +# Part of NetDEF Topology Tests +# +# Copyright (c) 2019 by +# Network Device Education Foundation, Inc. ("NetDEF") +# +# Permission to use, copy, modify, and/or distribute this software +# for any purpose with or without fee is hereby granted, provided +# that the above copyright notice and this permission notice appear +# in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND NETDEF DISCLAIMS ALL WARRANTIES +# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NETDEF BE LIABLE FOR +# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY +# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS +# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE +# OF THIS SOFTWARE. +# + +""" +test_isis_sr_te_topo1.py: + + +---------+ + | | + | RT1 | + | 1.1.1.1 | + | | + +---------+ + |eth-sw1 + | + | + | + +---------+ | +---------+ + | | | | | + | RT2 |eth-sw1 | eth-sw1| RT3 | + | 2.2.2.2 +----------+----------+ 3.3.3.3 | + | | 10.0.1.0/24 | | + +---------+ +---------+ + eth-rt4-1| |eth-rt4-2 eth-rt5-1| |eth-rt5-2 + | | | | + 10.0.2.0/24| |10.0.3.0/24 10.0.4.0/24| |10.0.5.0/24 + | | | | + eth-rt2-1| |eth-rt2-2 eth-rt3-1| |eth-rt3-2 + +---------+ +---------+ + | | | | + | RT4 | 10.0.6.0/24 | RT5 | + | 4.4.4.4 +---------------------+ 5.5.5.5 | + | |eth-rt5 eth-rt4| | + +---------+ +---------+ + eth-rt6| |eth-rt6 + | | + 10.0.7.0/24| |10.0.8.0/24 + | +---------+ | + | | | | + | | RT6 | | + +----------+ 6.6.6.6 +-----------+ + eth-rt4| |eth-rt5 + +---------+ + |eth-dst (.1) + | + |10.0.11.0/24 + | + |eth-rt6 (.2) + +---------+ + | | + | DST | + | 9.9.9.2 | + | | + +---------+ + +""" + +import os +import sys +import pytest +import json +import re +from time import sleep +from functools import partial + +# Save the Current Working Directory to find configuration files. +CWD = os.path.dirname(os.path.realpath(__file__)) +sys.path.append(os.path.join(CWD, '../')) + +# pylint: disable=C0413 +# Import topogen and topotest helpers +from lib import topotest +from lib.topogen import Topogen, TopoRouter, get_topogen +from lib.topolog import logger + +# Required to instantiate the topology builder class. +from mininet.topo import Topo + +class TemplateTopo(Topo): + "Test topology builder" + def build(self, *_args, **_opts): + "Build function" + tgen = get_topogen(self) + + # + # Define FRR Routers + # + for router in ['rt1', 'rt2', 'rt3', 'rt4', 'rt5', 'rt6', 'dst']: + tgen.add_router(router) + + # + # Define connections + # + switch = tgen.add_switch('s1') + switch.add_link(tgen.gears['rt1'], nodeif="eth-sw1") + switch.add_link(tgen.gears['rt2'], nodeif="eth-sw1") + switch.add_link(tgen.gears['rt3'], nodeif="eth-sw1") + + switch = tgen.add_switch('s2') + switch.add_link(tgen.gears['rt2'], nodeif="eth-rt4-1") + switch.add_link(tgen.gears['rt4'], nodeif="eth-rt2-1") + + switch = tgen.add_switch('s3') + switch.add_link(tgen.gears['rt2'], nodeif="eth-rt4-2") + switch.add_link(tgen.gears['rt4'], nodeif="eth-rt2-2") + + switch = tgen.add_switch('s4') + switch.add_link(tgen.gears['rt3'], nodeif="eth-rt5-1") + switch.add_link(tgen.gears['rt5'], nodeif="eth-rt3-1") + + switch = tgen.add_switch('s5') + switch.add_link(tgen.gears['rt3'], nodeif="eth-rt5-2") + switch.add_link(tgen.gears['rt5'], nodeif="eth-rt3-2") + + switch = tgen.add_switch('s6') + switch.add_link(tgen.gears['rt4'], nodeif="eth-rt5") + switch.add_link(tgen.gears['rt5'], nodeif="eth-rt4") + + switch = tgen.add_switch('s7') + switch.add_link(tgen.gears['rt4'], nodeif="eth-rt6") + switch.add_link(tgen.gears['rt6'], nodeif="eth-rt4") + + switch = tgen.add_switch('s8') + switch.add_link(tgen.gears['rt5'], nodeif="eth-rt6") + switch.add_link(tgen.gears['rt6'], nodeif="eth-rt5") + + switch = tgen.add_switch('s9') + switch.add_link(tgen.gears['rt6'], nodeif="eth-dst") + switch.add_link(tgen.gears['dst'], nodeif="eth-rt6") + +@pytest.mark.isis +def setup_module(mod): + "Sets up the pytest environment" + + tgen = Topogen(TemplateTopo, mod.__name__) + + frrdir = tgen.config.get(tgen.CONFIG_SECTION, "frrdir") + if not os.path.isfile(os.path.join(frrdir, "pathd")): + pytest.skip("pathd daemon wasn't built") + + tgen.start_topology() + + router_list = tgen.routers() + + # For all registered routers, load the zebra configuration file + for rname, router in router_list.iteritems(): + router.load_config( + TopoRouter.RD_ZEBRA, + os.path.join(CWD, '{}/zebra.conf'.format(rname)) + ) + router.load_config( + TopoRouter.RD_ISIS, + os.path.join(CWD, '{}/isisd.conf'.format(rname)) + ) + router.load_config( + TopoRouter.RD_PATH, + os.path.join(CWD, '{}/pathd.conf'.format(rname)) + ) + router.load_config( + TopoRouter.RD_BGP, + os.path.join(CWD, '{}/bgpd.conf'.format(rname)) + ) + + tgen.start_router() + +def teardown_module(mod): + "Teardown the pytest environment" + tgen = get_topogen() + + # This function tears down the whole topology. + tgen.stop_topology() + +def setup_testcase(msg): + logger.info(msg) + tgen = get_topogen() + + # Skip if previous fatal error condition is raised + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + return tgen + +def print_cmd_result(rname, command): + print(get_topogen().gears[rname].vtysh_cmd(command, isjson=False)) + +def compare_json_test(router, command, reference, exact): + output = router.vtysh_cmd(command, isjson=True) + result = topotest.json_cmp(output, reference) + + # Note: topotest.json_cmp() just checks on inclusion of keys. + # For exact matching also compare the other way around. + if not result and exact: + return topotest.json_cmp(reference, output) + else: + return result + +def cmp_json_output(rname, command, reference, exact=False): + "Compare router JSON output" + + logger.info('Comparing router "%s" "%s" output', rname, command) + + tgen = get_topogen() + filename = '{}/{}/{}'.format(CWD, rname, reference) + expected = json.loads(open(filename).read()) + + # Run test function until we get an result. Wait at most 60 seconds. + test_func = partial(compare_json_test, + tgen.gears[rname], command, expected, exact) + _, diff = topotest.run_and_expect(test_func, None, count=120, wait=0.5) + assertmsg = '"{}" JSON output mismatches the expected result'.format(rname) + assert diff is None, assertmsg + +def cmp_json_output_exact(rname, command, reference): + return cmp_json_output(rname, command, reference, True) + +def add_candidate_path(rname, endpoint, pref, name, segment_list='default'): + get_topogen().net[rname].cmd(''' \ + vtysh -c "conf t" \ + -c "segment-routing" \ + -c "traffic-eng" \ + -c "policy color 1 endpoint ''' + endpoint + '''" \ + -c "candidate-path preference ''' + str(pref) + ''' name ''' + name + ''' explicit segment-list ''' + segment_list + '''"''') + +def delete_candidate_path(rname, endpoint, pref): + get_topogen().net[rname].cmd(''' \ + vtysh -c "conf t" \ + -c "segment-routing" \ + -c "traffic-eng" \ + -c "policy color 1 endpoint ''' + endpoint + '''" \ + -c "no candidate-path preference ''' + str(pref) + '''"''') + +def add_segment(rname, name, index, label): + get_topogen().net[rname].cmd(''' \ + vtysh -c "conf t" \ + -c "segment-routing" \ + -c "traffic-eng" \ + -c "segment-list ''' + name + '''" \ + -c "index ''' + str(index) + ''' mpls label ''' + str(label) + '''"''') + +def delete_segment(rname, name, index): + get_topogen().net[rname].cmd(''' \ + vtysh -c "conf t" \ + -c "segment-routing" \ + -c "traffic-eng" \ + -c "segment-list ''' + name + '''" \ + -c "no index ''' + str(index) + '''"''') + +def create_sr_policy(rname, endpoint, bsid): + get_topogen().net[rname].cmd(''' \ + vtysh -c "conf t" \ + -c "segment-routing" \ + -c "traffic-eng" \ + -c "policy color 1 endpoint ''' + endpoint + '''" \ + -c "name default" \ + -c "binding-sid ''' + str(bsid) + '''"''') + +def delete_sr_policy(rname, endpoint): + get_topogen().net[rname].cmd(''' \ + vtysh -c "conf t" \ + -c "segment-routing" \ + -c "traffic-eng" \ + -c "no policy color 1 endpoint ''' + endpoint + '''"''') + +def create_prefix_sid(rname, prefix, sid): + get_topogen().net[rname].cmd(''' \ + vtysh -c "conf t" \ + -c "router isis 1" \ + -c "segment-routing prefix ''' + prefix + " index " + str(sid) + '''"''') + +def delete_prefix_sid(rname, prefix): + get_topogen().net[rname].cmd(''' \ + vtysh -c "conf t" \ + -c "router isis 1" \ + -c "no segment-routing prefix "''' + prefix) + +# +# Step 1 +# +# Checking the MPLS table using a single SR Policy and a single Candidate Path +# +def test_srte_init_step1(): + setup_testcase("Test (step 1): wait for IS-IS convergence / label distribution") + + for rname in ['rt1', 'rt6']: + cmp_json_output(rname, + "show mpls table json", + "step1/show_mpls_table_without_candidate.ref") + +def test_srte_add_candidate_check_mpls_table_step1(): + setup_testcase("Test (step 1): check MPLS table regarding the added Candidate Path") + + for rname, endpoint in [('rt1', '6.6.6.6'), ('rt6', '1.1.1.1')]: + add_candidate_path(rname, endpoint, 100, 'default') + cmp_json_output(rname, + "show mpls table json", + "step1/show_mpls_table_with_candidate.ref") + delete_candidate_path(rname, endpoint, 100) + +def test_srte_reinstall_sr_policy_check_mpls_table_step1(): + setup_testcase("Test (step 1): check MPLS table after the SR Policy was removed and reinstalled") + + for rname, endpoint, bsid in [('rt1', '6.6.6.6', 1111), ('rt6', '1.1.1.1', 6666)]: + add_candidate_path(rname, endpoint, 100, 'default') + delete_sr_policy(rname, endpoint) + cmp_json_output(rname, + "show mpls table json", + "step1/show_mpls_table_without_candidate.ref") + create_sr_policy(rname, endpoint, bsid) + add_candidate_path(rname, endpoint, 100, 'default') + cmp_json_output(rname, + "show mpls table json", + "step1/show_mpls_table_with_candidate.ref") + delete_candidate_path(rname, endpoint, 100) + +# +# Step 2 +# +# Checking pathd operational data using a single SR Policy and a single Candidate Path +# +def test_srte_bare_policy_step2(): + setup_testcase("Test (step 2): bare SR Policy should not be operational") + + for rname in ['rt1', 'rt6']: + cmp_json_output_exact(rname, + "show yang operational-data /frr-pathd:pathd pathd", + "step2/show_operational_data.ref") + +def test_srte_add_candidate_check_operational_data_step2(): + setup_testcase("Test (step 2): add single Candidate Path, SR Policy should be operational") + + for rname, endpoint in [('rt1', '6.6.6.6'), ('rt6', '1.1.1.1')]: + add_candidate_path(rname, endpoint, 100, 'default') + cmp_json_output(rname, + "show yang operational-data /frr-pathd:pathd pathd", + "step2/show_operational_data_with_candidate.ref") + +def test_srte_config_remove_candidate_check_operational_data_step2(): + setup_testcase("Test (step 2): remove single Candidate Path, SR Policy should not be operational anymore") + + for rname, endpoint in [('rt1', '6.6.6.6'), ('rt6', '1.1.1.1')]: + delete_candidate_path(rname, endpoint, 100) + cmp_json_output_exact(rname, + "show yang operational-data /frr-pathd:pathd pathd", + "step2/show_operational_data.ref") + +# +# Step 3 +# +# Testing the Candidate Path selection +# +def test_srte_add_two_candidates_step3(): + setup_testcase("Test (step 3): second Candidate Path has higher Priority") + + for rname, endpoint in [('rt1', '6.6.6.6'), ('rt6', '1.1.1.1')]: + for pref, cand_name in [('100', 'first'), ('200', 'second')]: + add_candidate_path(rname, endpoint, pref, cand_name) + cmp_json_output(rname, + "show yang operational-data /frr-pathd:pathd pathd", + "step3/show_operational_data_with_two_candidates.ref") + + # cleanup + for rname, endpoint in [('rt1', '6.6.6.6'), ('rt6', '1.1.1.1')]: + for pref in ['100', '200']: + delete_candidate_path(rname, endpoint, pref) + +def test_srte_add_two_candidates_with_reverse_priority_step3(): + setup_testcase("Test (step 3): second Candidate Path has lower Priority") + + # Use reversed priorities here + for rname, endpoint in [('rt1', '6.6.6.6'), ('rt6', '1.1.1.1')]: + for pref, cand_name in [('200', 'first'), ('100', 'second')]: + add_candidate_path(rname, endpoint, pref, cand_name) + cmp_json_output(rname, + "show yang operational-data /frr-pathd:pathd pathd", + "step3/show_operational_data_with_two_candidates.ref") + + # cleanup + for rname, endpoint in [('rt1', '6.6.6.6'), ('rt6', '1.1.1.1')]: + for pref in ['100', '200']: + delete_candidate_path(rname, endpoint, pref) + +def test_srte_remove_best_candidate_step3(): + setup_testcase("Test (step 3): delete the Candidate Path with higher priority") + + for rname, endpoint in [('rt1', '6.6.6.6'), ('rt6', '1.1.1.1')]: + for pref, cand_name in [('100', 'first'), ('200', 'second')]: + add_candidate_path(rname, endpoint, pref, cand_name) + + # Delete candidate with higher priority + for rname, endpoint in [('rt1', '6.6.6.6'), ('rt6', '1.1.1.1')]: + delete_candidate_path(rname, endpoint, 200) + + # Candidate with lower priority should get active now + for rname, endpoint in [('rt1', '6.6.6.6'), ('rt6', '1.1.1.1')]: + cmp_json_output(rname, + "show yang operational-data /frr-pathd:pathd pathd", + "step3/show_operational_data_with_single_candidate.ref") + # cleanup + delete_candidate_path(rname, endpoint, 100) + +# +# Step 4 +# +# Checking MPLS table with a single SR Policy and a Candidate Path with different Segment Lists and other modifications +# +def test_srte_change_segment_list_check_mpls_table_step4(): + setup_testcase("Test (step 4): check MPLS table for changed Segment List") + + for rname, endpoint in [('rt1', '6.6.6.6'), ('rt6', '1.1.1.1')]: + add_candidate_path(rname, endpoint, 100, 'default') + # now change the segment list name + add_candidate_path(rname, endpoint, 100, 'default', 'test') + cmp_json_output(rname, + "show mpls table json", + "step4/show_mpls_table.ref") + delete_candidate_path(rname, endpoint, 100) + +def test_srte_segment_list_add_segment_check_mpls_table_step4(): + setup_testcase("Test (step 4): check MPLS table for added (then changed and finally deleted) segment") + + add_candidate_path('rt1', '6.6.6.6', 100, 'default', 'test') + + # first add a new segment + add_segment('rt1', 'test', 25, 16050) + cmp_json_output('rt1', + "show mpls table json", + "step4/show_mpls_table_add_segment.ref") + + # ... then change it ... + add_segment('rt1', 'test', 25, 16030) + cmp_json_output('rt1', + "show mpls table json", + "step4/show_mpls_table_change_segment.ref") + + # ... and finally delete it + delete_segment('rt1', 'test', 25) + cmp_json_output('rt1', + "show mpls table json", + "step4/show_mpls_table.ref") + delete_candidate_path('rt1', '6.6.6.6', 100) + +# +# Step 5 +# +# Checking the nexthop using a single SR Policy and a Candidate Path with configured route-map +# +def test_srte_route_map_with_sr_policy_check_nextop_step5(): + setup_testcase("Test (step 5): recursive nexthop learned through BGP neighbour should be aligned with SR Policy from route-map") + + # (re-)build the SR Policy two times to ensure that reinstalling still works + for i in [1,2]: + cmp_json_output('rt1', + "show ip route bgp json", + "step5/show_ip_route_bgp_inactive_srte.ref") + + delete_sr_policy('rt1', '6.6.6.6') + cmp_json_output('rt1', + "show ip route bgp json", + "step5/show_ip_route_bgp_inactive_srte.ref") + + create_sr_policy('rt1', '6.6.6.6', 1111) + cmp_json_output('rt1', + "show ip route bgp json", + "step5/show_ip_route_bgp_inactive_srte.ref") + + add_candidate_path('rt1', '6.6.6.6', 100, 'default') + cmp_json_output('rt1', + "show ip route bgp json", + "step5/show_ip_route_bgp_active_srte.ref") + + delete_candidate_path('rt1', '6.6.6.6', 100) + +def test_srte_route_map_with_sr_policy_reinstall_prefix_sid_check_nextop_step5(): + setup_testcase("Test (step 5): remove and re-install prefix SID on fist path element and check SR Policy activity") + + # first add a candidate path so the SR Policy is active + add_candidate_path('rt1', '6.6.6.6', 100, 'default') + cmp_json_output('rt1', + "show yang operational-data /frr-pathd:pathd pathd", + "step5/show_operational_data_active.ref") + + # delete prefix SID from first element of the configured path and check + # if the SR Policy is inactive since the label can't be resolved anymore + delete_prefix_sid('rt5', "5.5.5.5/32") + cmp_json_output('rt1', + "show yang operational-data /frr-pathd:pathd pathd", + "step5/show_operational_data_inactive.ref") + cmp_json_output('rt1', + "show ip route bgp json", + "step5/show_ip_route_bgp_inactive_srte.ref") + + # re-create the prefix SID and check if the SR Policy is active + create_prefix_sid('rt5', "5.5.5.5/32", 50) + cmp_json_output('rt1', + "show yang operational-data /frr-pathd:pathd pathd", + "step5/show_operational_data_active.ref") + cmp_json_output('rt1', + "show ip route bgp json", + "step5/show_ip_route_bgp_active_srte.ref") + +# Memory leak test template +def test_memory_leak(): + "Run the memory leak test and report results." + tgen = get_topogen() + if not tgen.is_memleak_enabled(): + pytest.skip('Memory leak test/report is disabled') + + tgen.report_memory_leaks() + +if __name__ == '__main__': + args = ["-s"] + sys.argv[1:] + sys.exit(pytest.main(args)) diff --git a/tests/topotests/isis-sr-topo1/test_isis_sr_topo1.py b/tests/topotests/isis-sr-topo1/test_isis_sr_topo1.py index 34eb6d90f6..63be3f78aa 100644 --- a/tests/topotests/isis-sr-topo1/test_isis_sr_topo1.py +++ b/tests/topotests/isis-sr-topo1/test_isis_sr_topo1.py @@ -134,7 +134,7 @@ class TemplateTopo(Topo): switch.add_link(tgen.gears["rt5"], nodeif="eth-rt6") switch.add_link(tgen.gears["rt6"], nodeif="eth-rt5") - +@pytest.mark.isis def setup_module(mod): "Sets up the pytest environment" tgen = Topogen(TemplateTopo, mod.__name__) diff --git a/tests/topotests/isis-tilfa-topo1/test_isis_tilfa_topo1.py b/tests/topotests/isis-tilfa-topo1/test_isis_tilfa_topo1.py index 514ea53552..83751fabcd 100755 --- a/tests/topotests/isis-tilfa-topo1/test_isis_tilfa_topo1.py +++ b/tests/topotests/isis-tilfa-topo1/test_isis_tilfa_topo1.py @@ -74,7 +74,7 @@ from functools import partial # Save the Current Working Directory to find configuration files. CWD = os.path.dirname(os.path.realpath(__file__)) -sys.path.append(os.path.join(CWD, '../')) +sys.path.append(os.path.join(CWD, "../")) # pylint: disable=C0413 # Import topogen and topotest helpers @@ -88,8 +88,10 @@ from mininet.topo import Topo # Global multi-dimensional dictionary containing all expected outputs outputs = {} + class TemplateTopo(Topo): "Test topology builder" + def build(self, *_args, **_opts): "Build function" tgen = get_topogen(self) @@ -97,80 +99,85 @@ class TemplateTopo(Topo): # # Define FRR Routers # - for router in ['rt1', 'rt2', 'rt3', 'rt4', 'rt5', 'rt6']: + for router in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6"]: tgen.add_router(router) # # Define connections # - switch = tgen.add_switch('s1') - switch.add_link(tgen.gears['rt1'], nodeif="eth-sw1") - switch.add_link(tgen.gears['rt2'], nodeif="eth-sw1") - switch.add_link(tgen.gears['rt3'], nodeif="eth-sw1") + switch = tgen.add_switch("s1") + switch.add_link(tgen.gears["rt1"], nodeif="eth-sw1") + switch.add_link(tgen.gears["rt2"], nodeif="eth-sw1") + switch.add_link(tgen.gears["rt3"], nodeif="eth-sw1") - switch = tgen.add_switch('s2') - switch.add_link(tgen.gears['rt2'], nodeif="eth-rt4-1") - switch.add_link(tgen.gears['rt4'], nodeif="eth-rt2-1") + switch = tgen.add_switch("s2") + switch.add_link(tgen.gears["rt2"], nodeif="eth-rt4-1") + switch.add_link(tgen.gears["rt4"], nodeif="eth-rt2-1") - switch = tgen.add_switch('s3') - switch.add_link(tgen.gears['rt2'], nodeif="eth-rt4-2") - switch.add_link(tgen.gears['rt4'], nodeif="eth-rt2-2") + switch = tgen.add_switch("s3") + switch.add_link(tgen.gears["rt2"], nodeif="eth-rt4-2") + switch.add_link(tgen.gears["rt4"], nodeif="eth-rt2-2") - switch = tgen.add_switch('s4') - switch.add_link(tgen.gears['rt3'], nodeif="eth-rt5-1") - switch.add_link(tgen.gears['rt5'], nodeif="eth-rt3-1") + switch = tgen.add_switch("s4") + switch.add_link(tgen.gears["rt3"], nodeif="eth-rt5-1") + switch.add_link(tgen.gears["rt5"], nodeif="eth-rt3-1") - switch = tgen.add_switch('s5') - switch.add_link(tgen.gears['rt3'], nodeif="eth-rt5-2") - switch.add_link(tgen.gears['rt5'], nodeif="eth-rt3-2") + switch = tgen.add_switch("s5") + switch.add_link(tgen.gears["rt3"], nodeif="eth-rt5-2") + switch.add_link(tgen.gears["rt5"], nodeif="eth-rt3-2") - switch = tgen.add_switch('s6') - switch.add_link(tgen.gears['rt4'], nodeif="eth-rt5") - switch.add_link(tgen.gears['rt5'], nodeif="eth-rt4") + switch = tgen.add_switch("s6") + switch.add_link(tgen.gears["rt4"], nodeif="eth-rt5") + switch.add_link(tgen.gears["rt5"], nodeif="eth-rt4") - switch = tgen.add_switch('s7') - switch.add_link(tgen.gears['rt4'], nodeif="eth-rt6") - switch.add_link(tgen.gears['rt6'], nodeif="eth-rt4") + switch = tgen.add_switch("s7") + switch.add_link(tgen.gears["rt4"], nodeif="eth-rt6") + switch.add_link(tgen.gears["rt6"], nodeif="eth-rt4") - switch = tgen.add_switch('s8') - switch.add_link(tgen.gears['rt5'], nodeif="eth-rt6") - switch.add_link(tgen.gears['rt6'], nodeif="eth-rt5") + switch = tgen.add_switch("s8") + switch.add_link(tgen.gears["rt5"], nodeif="eth-rt6") + switch.add_link(tgen.gears["rt6"], nodeif="eth-rt5") # # Populate multi-dimensional dictionary containing all expected outputs # - files = ["show_ip_route.ref", - "show_ipv6_route.ref", - "show_mpls_table.ref", - "show_yang_interface_isis_adjacencies.ref"] - for rname in ['rt1', 'rt2', 'rt3', 'rt4', 'rt5', 'rt6']: + files = [ + "show_ip_route.ref", + "show_ipv6_route.ref", + "show_mpls_table.ref", + "show_yang_interface_isis_adjacencies.ref", + ] + for rname in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6"]: outputs[rname] = {} for step in range(1, 9 + 1): outputs[rname][step] = {} for file in files: if step == 1: # Get snapshots relative to the expected initial network convergence - filename = '{}/{}/step{}/{}'.format(CWD, rname, step, file) + filename = "{}/{}/step{}/{}".format(CWD, rname, step, file) outputs[rname][step][file] = open(filename).read() else: if file == "show_yang_interface_isis_adjacencies.ref": continue # Get diff relative to the previous step - filename = '{}/{}/step{}/{}.diff'.format(CWD, rname, step, file) + filename = "{}/{}/step{}/{}.diff".format(CWD, rname, step, file) # Create temporary files in order to apply the diff f_in = tempfile.NamedTemporaryFile() f_in.write(outputs[rname][step - 1][file]) f_in.flush() f_out = tempfile.NamedTemporaryFile() - os.system("patch -s -o %s %s %s" %(f_out.name, f_in.name, filename)) + os.system( + "patch -s -o %s %s %s" % (f_out.name, f_in.name, filename) + ) # Store the updated snapshot and remove the temporary files outputs[rname][step][file] = open(f_out.name).read() f_in.close() f_out.close() +@pytest.mark.isis def setup_module(mod): "Sets up the pytest environment" tgen = Topogen(TemplateTopo, mod.__name__) @@ -181,16 +188,15 @@ def setup_module(mod): # For all registered routers, load the zebra configuration file for rname, router in router_list.items(): router.load_config( - TopoRouter.RD_ZEBRA, - os.path.join(CWD, '{}/zebra.conf'.format(rname)) + TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname)) ) router.load_config( - TopoRouter.RD_ISIS, - os.path.join(CWD, '{}/isisd.conf'.format(rname)) + TopoRouter.RD_ISIS, os.path.join(CWD, "{}/isisd.conf".format(rname)) ) tgen.start_router() + def teardown_module(mod): "Teardown the pytest environment" tgen = get_topogen() @@ -198,6 +204,7 @@ def teardown_module(mod): # This function tears down the whole topology. tgen.stop_topology() + def router_compare_json_output(rname, command, reference): "Compare router JSON output" @@ -207,12 +214,12 @@ def router_compare_json_output(rname, command, reference): expected = json.loads(reference) # Run test function until we get an result. Wait at most 60 seconds. - test_func = partial(topotest.router_json_cmp, - tgen.gears[rname], command, expected) + test_func = partial(topotest.router_json_cmp, tgen.gears[rname], command, expected) _, diff = topotest.run_and_expect(test_func, None, count=120, wait=0.5) assertmsg = '"{}" JSON output mismatches the expected result'.format(rname) assert diff is None, assertmsg + # # Step 1 # @@ -226,9 +233,13 @@ def test_isis_adjacencies_step1(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - for rname in ['rt1', 'rt2', 'rt3', 'rt4', 'rt5', 'rt6']: - router_compare_json_output(rname, "show yang operational-data /frr-interface:lib isisd", - outputs[rname][1]["show_yang_interface_isis_adjacencies.ref"]) + for rname in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6"]: + router_compare_json_output( + rname, + "show yang operational-data /frr-interface:lib isisd", + outputs[rname][1]["show_yang_interface_isis_adjacencies.ref"], + ) + def test_rib_ipv4_step1(): logger.info("Test (step 1): verify IPv4 RIB") @@ -238,9 +249,11 @@ def test_rib_ipv4_step1(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - for rname in ['rt1', 'rt2', 'rt3', 'rt4', 'rt5', 'rt6']: - router_compare_json_output(rname, "show ip route isis json", - outputs[rname][1]["show_ip_route.ref"]) + for rname in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6"]: + router_compare_json_output( + rname, "show ip route isis json", outputs[rname][1]["show_ip_route.ref"] + ) + def test_rib_ipv6_step1(): logger.info("Test (step 1): verify IPv6 RIB") @@ -250,9 +263,11 @@ def test_rib_ipv6_step1(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - for rname in ['rt1', 'rt2', 'rt3', 'rt4', 'rt5', 'rt6']: - router_compare_json_output(rname, "show ipv6 route isis json", - outputs[rname][1]["show_ipv6_route.ref"]) + for rname in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6"]: + router_compare_json_output( + rname, "show ipv6 route isis json", outputs[rname][1]["show_ipv6_route.ref"] + ) + def test_mpls_lib_step1(): logger.info("Test (step 1): verify MPLS LIB") @@ -262,9 +277,11 @@ def test_mpls_lib_step1(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - for rname in ['rt1', 'rt2', 'rt3', 'rt4', 'rt5', 'rt6']: - router_compare_json_output(rname, "show mpls table json", - outputs[rname][1]["show_mpls_table.ref"]) + for rname in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6"]: + router_compare_json_output( + rname, "show mpls table json", outputs[rname][1]["show_mpls_table.ref"] + ) + # # Step 2 @@ -283,12 +300,16 @@ def test_rib_ipv4_step2(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - logger.info('Disabling TI-LFA link protection on rt2\'s eth-sw1 interface') - tgen.net['rt2'].cmd('vtysh -c "conf t" -c "interface eth-sw1" -c "no isis fast-reroute ti-lfa"') + logger.info("Disabling TI-LFA link protection on rt2's eth-sw1 interface") + tgen.net["rt2"].cmd( + 'vtysh -c "conf t" -c "interface eth-sw1" -c "no isis fast-reroute ti-lfa"' + ) + + for rname in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6"]: + router_compare_json_output( + rname, "show ip route isis json", outputs[rname][2]["show_ip_route.ref"] + ) - for rname in ['rt1', 'rt2', 'rt3', 'rt4', 'rt5', 'rt6']: - router_compare_json_output(rname, "show ip route isis json", - outputs[rname][2]["show_ip_route.ref"]) def test_rib_ipv6_step2(): logger.info("Test (step 2): verify IPv6 RIB") @@ -298,9 +319,11 @@ def test_rib_ipv6_step2(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - for rname in ['rt1', 'rt2', 'rt3', 'rt4', 'rt5', 'rt6']: - router_compare_json_output(rname, "show ipv6 route isis json", - outputs[rname][2]["show_ipv6_route.ref"]) + for rname in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6"]: + router_compare_json_output( + rname, "show ipv6 route isis json", outputs[rname][2]["show_ipv6_route.ref"] + ) + def test_mpls_lib_step2(): logger.info("Test (step 2): verify MPLS LIB") @@ -310,9 +333,11 @@ def test_mpls_lib_step2(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - for rname in ['rt1', 'rt2', 'rt3', 'rt4', 'rt5', 'rt6']: - router_compare_json_output(rname, "show mpls table json", - outputs[rname][2]["show_mpls_table.ref"]) + for rname in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6"]: + router_compare_json_output( + rname, "show mpls table json", outputs[rname][2]["show_mpls_table.ref"] + ) + # # Step 3 @@ -331,12 +356,16 @@ def test_rib_ipv4_step3(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - logger.info('Enabling TI-LFA link protection on rt2\'s eth-sw1 interface') - tgen.net['rt2'].cmd('vtysh -c "conf t" -c "interface eth-sw1" -c "isis fast-reroute ti-lfa"') + logger.info("Enabling TI-LFA link protection on rt2's eth-sw1 interface") + tgen.net["rt2"].cmd( + 'vtysh -c "conf t" -c "interface eth-sw1" -c "isis fast-reroute ti-lfa"' + ) + + for rname in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6"]: + router_compare_json_output( + rname, "show ip route isis json", outputs[rname][3]["show_ip_route.ref"] + ) - for rname in ['rt1', 'rt2', 'rt3', 'rt4', 'rt5', 'rt6']: - router_compare_json_output(rname, "show ip route isis json", - outputs[rname][3]["show_ip_route.ref"]) def test_rib_ipv6_step3(): logger.info("Test (step 3): verify IPv6 RIB") @@ -346,9 +375,11 @@ def test_rib_ipv6_step3(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - for rname in ['rt1', 'rt2', 'rt3', 'rt4', 'rt5', 'rt6']: - router_compare_json_output(rname, "show ipv6 route isis json", - outputs[rname][3]["show_ipv6_route.ref"]) + for rname in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6"]: + router_compare_json_output( + rname, "show ipv6 route isis json", outputs[rname][3]["show_ipv6_route.ref"] + ) + def test_mpls_lib_step3(): logger.info("Test (step 3): verify MPLS LIB") @@ -358,9 +389,11 @@ def test_mpls_lib_step3(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - for rname in ['rt1', 'rt2', 'rt3', 'rt4', 'rt5', 'rt6']: - router_compare_json_output(rname, "show mpls table json", - outputs[rname][3]["show_mpls_table.ref"]) + for rname in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6"]: + router_compare_json_output( + rname, "show mpls table json", outputs[rname][3]["show_mpls_table.ref"] + ) + # # Step 4 @@ -384,12 +417,16 @@ def test_rib_ipv4_step4(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - logger.info('Disabling SR on rt4') - tgen.net['rt4'].cmd('vtysh -c "conf t" -c "router isis 1" -c "no segment-routing on"') + logger.info("Disabling SR on rt4") + tgen.net["rt4"].cmd( + 'vtysh -c "conf t" -c "router isis 1" -c "no segment-routing on"' + ) + + for rname in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6"]: + router_compare_json_output( + rname, "show ip route isis json", outputs[rname][4]["show_ip_route.ref"] + ) - for rname in ['rt1', 'rt2', 'rt3', 'rt4', 'rt5', 'rt6']: - router_compare_json_output(rname, "show ip route isis json", - outputs[rname][4]["show_ip_route.ref"]) def test_rib_ipv6_step4(): logger.info("Test (step 4): verify IPv6 RIB") @@ -399,9 +436,11 @@ def test_rib_ipv6_step4(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - for rname in ['rt1', 'rt2', 'rt3', 'rt4', 'rt5', 'rt6']: - router_compare_json_output(rname, "show ipv6 route isis json", - outputs[rname][4]["show_ipv6_route.ref"]) + for rname in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6"]: + router_compare_json_output( + rname, "show ipv6 route isis json", outputs[rname][4]["show_ipv6_route.ref"] + ) + def test_mpls_lib_step4(): logger.info("Test (step 4): verify MPLS LIB") @@ -411,9 +450,11 @@ def test_mpls_lib_step4(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - for rname in ['rt1', 'rt2', 'rt3', 'rt4', 'rt5', 'rt6']: - router_compare_json_output(rname, "show mpls table json", - outputs[rname][4]["show_mpls_table.ref"]) + for rname in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6"]: + router_compare_json_output( + rname, "show mpls table json", outputs[rname][4]["show_mpls_table.ref"] + ) + # # Step 5 @@ -432,12 +473,14 @@ def test_rib_ipv4_step5(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - logger.info('Enabling SR on rt4') - tgen.net['rt4'].cmd('vtysh -c "conf t" -c "router isis 1" -c "segment-routing on"') + logger.info("Enabling SR on rt4") + tgen.net["rt4"].cmd('vtysh -c "conf t" -c "router isis 1" -c "segment-routing on"') + + for rname in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6"]: + router_compare_json_output( + rname, "show ip route isis json", outputs[rname][5]["show_ip_route.ref"] + ) - for rname in ['rt1', 'rt2', 'rt3', 'rt4', 'rt5', 'rt6']: - router_compare_json_output(rname, "show ip route isis json", - outputs[rname][5]["show_ip_route.ref"]) def test_rib_ipv6_step5(): logger.info("Test (step 5): verify IPv6 RIB") @@ -447,9 +490,11 @@ def test_rib_ipv6_step5(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - for rname in ['rt1', 'rt2', 'rt3', 'rt4', 'rt5', 'rt6']: - router_compare_json_output(rname, "show ipv6 route isis json", - outputs[rname][5]["show_ipv6_route.ref"]) + for rname in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6"]: + router_compare_json_output( + rname, "show ipv6 route isis json", outputs[rname][5]["show_ipv6_route.ref"] + ) + def test_mpls_lib_step5(): logger.info("Test (step 5): verify MPLS LIB") @@ -459,9 +504,11 @@ def test_mpls_lib_step5(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - for rname in ['rt1', 'rt2', 'rt3', 'rt4', 'rt5', 'rt6']: - router_compare_json_output(rname, "show mpls table json", - outputs[rname][5]["show_mpls_table.ref"]) + for rname in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6"]: + router_compare_json_output( + rname, "show mpls table json", outputs[rname][5]["show_mpls_table.ref"] + ) + # # Step 6 @@ -480,12 +527,16 @@ def test_rib_ipv4_step6(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - logger.info('Changing rt5\'s SRGB') - tgen.net['rt5'].cmd('vtysh -c "conf t" -c "router isis 1" -c "segment-routing global-block 30000 37999"') + logger.info("Changing rt5's SRGB") + tgen.net["rt5"].cmd( + 'vtysh -c "conf t" -c "router isis 1" -c "segment-routing global-block 30000 37999"' + ) + + for rname in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6"]: + router_compare_json_output( + rname, "show ip route isis json", outputs[rname][6]["show_ip_route.ref"] + ) - for rname in ['rt1', 'rt2', 'rt3', 'rt4', 'rt5', 'rt6']: - router_compare_json_output(rname, "show ip route isis json", - outputs[rname][6]["show_ip_route.ref"]) def test_rib_ipv6_step6(): logger.info("Test (step 6): verify IPv6 RIB") @@ -495,9 +546,11 @@ def test_rib_ipv6_step6(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - for rname in ['rt1', 'rt2', 'rt3', 'rt4', 'rt5', 'rt6']: - router_compare_json_output(rname, "show ipv6 route isis json", - outputs[rname][6]["show_ipv6_route.ref"]) + for rname in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6"]: + router_compare_json_output( + rname, "show ipv6 route isis json", outputs[rname][6]["show_ipv6_route.ref"] + ) + def test_mpls_lib_step6(): logger.info("Test (step 6): verify MPLS LIB") @@ -507,9 +560,11 @@ def test_mpls_lib_step6(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - for rname in ['rt1', 'rt2', 'rt3', 'rt4', 'rt5', 'rt6']: - router_compare_json_output(rname, "show mpls table json", - outputs[rname][6]["show_mpls_table.ref"]) + for rname in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6"]: + router_compare_json_output( + rname, "show mpls table json", outputs[rname][6]["show_mpls_table.ref"] + ) + # # Step 7 @@ -529,13 +584,19 @@ def test_rib_ipv4_step7(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - logger.info('Deleting rt5\'s Prefix-SIDs') - tgen.net['rt5'].cmd('vtysh -c "conf t" -c "router isis 1" -c "no segment-routing prefix 5.5.5.5/32 index 50"') - tgen.net['rt5'].cmd('vtysh -c "conf t" -c "router isis 1" -c "no segment-routing prefix 2001:db8:1000::5/128 index 51"') + logger.info("Deleting rt5's Prefix-SIDs") + tgen.net["rt5"].cmd( + 'vtysh -c "conf t" -c "router isis 1" -c "no segment-routing prefix 5.5.5.5/32 index 50"' + ) + tgen.net["rt5"].cmd( + 'vtysh -c "conf t" -c "router isis 1" -c "no segment-routing prefix 2001:db8:1000::5/128 index 51"' + ) + + for rname in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6"]: + router_compare_json_output( + rname, "show ip route isis json", outputs[rname][7]["show_ip_route.ref"] + ) - for rname in ['rt1', 'rt2', 'rt3', 'rt4', 'rt5', 'rt6']: - router_compare_json_output(rname, "show ip route isis json", - outputs[rname][7]["show_ip_route.ref"]) def test_rib_ipv6_step7(): logger.info("Test (step 7): verify IPv6 RIB") @@ -545,9 +606,11 @@ def test_rib_ipv6_step7(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - for rname in ['rt1', 'rt2', 'rt3', 'rt4', 'rt5', 'rt6']: - router_compare_json_output(rname, "show ipv6 route isis json", - outputs[rname][7]["show_ipv6_route.ref"]) + for rname in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6"]: + router_compare_json_output( + rname, "show ipv6 route isis json", outputs[rname][7]["show_ipv6_route.ref"] + ) + def test_mpls_lib_step7(): logger.info("Test (step 7): verify MPLS LIB") @@ -557,9 +620,11 @@ def test_mpls_lib_step7(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - for rname in ['rt1', 'rt2', 'rt3', 'rt4', 'rt5', 'rt6']: - router_compare_json_output(rname, "show mpls table json", - outputs[rname][7]["show_mpls_table.ref"]) + for rname in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6"]: + router_compare_json_output( + rname, "show mpls table json", outputs[rname][7]["show_mpls_table.ref"] + ) + # # Step 8 @@ -578,13 +643,19 @@ def test_rib_ipv4_step8(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - logger.info('Re-adding rt5\'s Prefix-SIDs') - tgen.net['rt5'].cmd('vtysh -c "conf t" -c "router isis 1" -c "segment-routing prefix 5.5.5.5/32 index 50"') - tgen.net['rt5'].cmd('vtysh -c "conf t" -c "router isis 1" -c "segment-routing prefix 2001:db8:1000::5/128 index 51"') + logger.info("Re-adding rt5's Prefix-SIDs") + tgen.net["rt5"].cmd( + 'vtysh -c "conf t" -c "router isis 1" -c "segment-routing prefix 5.5.5.5/32 index 50"' + ) + tgen.net["rt5"].cmd( + 'vtysh -c "conf t" -c "router isis 1" -c "segment-routing prefix 2001:db8:1000::5/128 index 51"' + ) + + for rname in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6"]: + router_compare_json_output( + rname, "show ip route isis json", outputs[rname][8]["show_ip_route.ref"] + ) - for rname in ['rt1', 'rt2', 'rt3', 'rt4', 'rt5', 'rt6']: - router_compare_json_output(rname, "show ip route isis json", - outputs[rname][8]["show_ip_route.ref"]) def test_rib_ipv6_step8(): logger.info("Test (step 8): verify IPv6 RIB") @@ -594,9 +665,11 @@ def test_rib_ipv6_step8(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - for rname in ['rt1', 'rt2', 'rt3', 'rt4', 'rt5', 'rt6']: - router_compare_json_output(rname, "show ipv6 route isis json", - outputs[rname][8]["show_ipv6_route.ref"]) + for rname in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6"]: + router_compare_json_output( + rname, "show ipv6 route isis json", outputs[rname][8]["show_ipv6_route.ref"] + ) + def test_mpls_lib_step8(): logger.info("Test (step 8): verify MPLS LIB") @@ -606,9 +679,11 @@ def test_mpls_lib_step8(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - for rname in ['rt1', 'rt2', 'rt3', 'rt4', 'rt5', 'rt6']: - router_compare_json_output(rname, "show mpls table json", - outputs[rname][8]["show_mpls_table.ref"]) + for rname in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6"]: + router_compare_json_output( + rname, "show mpls table json", outputs[rname][8]["show_mpls_table.ref"] + ) + # # Step 9 @@ -628,13 +703,19 @@ def test_rib_ipv4_step9(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - logger.info('Re-adding rt5\'s Prefix-SIDs') - tgen.net['rt5'].cmd('vtysh -c "conf t" -c "router isis 1" -c "segment-routing prefix 5.5.5.5/32 index 500"') - tgen.net['rt5'].cmd('vtysh -c "conf t" -c "router isis 1" -c "segment-routing prefix 2001:db8:1000::5/128 index 501"') + logger.info("Re-adding rt5's Prefix-SIDs") + tgen.net["rt5"].cmd( + 'vtysh -c "conf t" -c "router isis 1" -c "segment-routing prefix 5.5.5.5/32 index 500"' + ) + tgen.net["rt5"].cmd( + 'vtysh -c "conf t" -c "router isis 1" -c "segment-routing prefix 2001:db8:1000::5/128 index 501"' + ) + + for rname in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6"]: + router_compare_json_output( + rname, "show ip route isis json", outputs[rname][9]["show_ip_route.ref"] + ) - for rname in ['rt1', 'rt2', 'rt3', 'rt4', 'rt5', 'rt6']: - router_compare_json_output(rname, "show ip route isis json", - outputs[rname][9]["show_ip_route.ref"]) def test_rib_ipv6_step9(): logger.info("Test (step 9): verify IPv6 RIB") @@ -644,9 +725,11 @@ def test_rib_ipv6_step9(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - for rname in ['rt1', 'rt2', 'rt3', 'rt4', 'rt5', 'rt6']: - router_compare_json_output(rname, "show ipv6 route isis json", - outputs[rname][9]["show_ipv6_route.ref"]) + for rname in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6"]: + router_compare_json_output( + rname, "show ipv6 route isis json", outputs[rname][9]["show_ipv6_route.ref"] + ) + def test_mpls_lib_step9(): logger.info("Test (step 9): verify MPLS LIB") @@ -656,19 +739,22 @@ def test_mpls_lib_step9(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - for rname in ['rt1', 'rt2', 'rt3', 'rt4', 'rt5', 'rt6']: - router_compare_json_output(rname, "show mpls table json", - outputs[rname][9]["show_mpls_table.ref"]) + for rname in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6"]: + router_compare_json_output( + rname, "show mpls table json", outputs[rname][9]["show_mpls_table.ref"] + ) + # Memory leak test template def test_memory_leak(): "Run the memory leak test and report results." tgen = get_topogen() if not tgen.is_memleak_enabled(): - pytest.skip('Memory leak test/report is disabled') + pytest.skip("Memory leak test/report is disabled") tgen.report_memory_leaks() -if __name__ == '__main__': + +if __name__ == "__main__": args = ["-s"] + sys.argv[1:] sys.exit(pytest.main(args)) diff --git a/tests/topotests/isis-topo1-vrf/test_isis_topo1_vrf.py b/tests/topotests/isis-topo1-vrf/test_isis_topo1_vrf.py index 12121e4ddf..a79c8a268b 100644 --- a/tests/topotests/isis-topo1-vrf/test_isis_topo1_vrf.py +++ b/tests/topotests/isis-topo1-vrf/test_isis_topo1_vrf.py @@ -82,7 +82,7 @@ class ISISTopo1(Topo): sw.add_link(tgen.gears["r4"]) sw.add_link(tgen.gears["r5"]) - +@pytest.mark.isis def setup_module(mod): "Sets up the pytest environment" tgen = Topogen(ISISTopo1, mod.__name__) diff --git a/tests/topotests/isis-topo1/test_isis_topo1.py b/tests/topotests/isis-topo1/test_isis_topo1.py index 71005a0362..25116d9452 100644 --- a/tests/topotests/isis-topo1/test_isis_topo1.py +++ b/tests/topotests/isis-topo1/test_isis_topo1.py @@ -84,7 +84,7 @@ class ISISTopo1(Topo): sw.add_link(tgen.gears["r4"]) sw.add_link(tgen.gears["r5"]) - +@pytest.mark.isis def setup_module(mod): "Sets up the pytest environment" tgen = Topogen(ISISTopo1, mod.__name__) diff --git a/tests/topotests/ldp-topo1/test_ldp_topo1.py b/tests/topotests/ldp-topo1/test_ldp_topo1.py index 31adeafbf6..dfe65f010e 100644 --- a/tests/topotests/ldp-topo1/test_ldp_topo1.py +++ b/tests/topotests/ldp-topo1/test_ldp_topo1.py @@ -159,7 +159,7 @@ class NetworkTopo(Topo): ## ##################################################### - +@pytest.mark.ldp def setup_module(module): global topo, net global fatal_error @@ -647,9 +647,11 @@ def test_zebra_ipv4_routingTable(): else: print("r%s ok" % i) - assert failures == 0, ( - "IPv4 Zebra Routing Table verification failed for router r%s:\n%s" - % (i, diff) + assert ( + failures == 0 + ), "IPv4 Zebra Routing Table verification failed for router r%s:\n%s" % ( + i, + diff, ) # Make sure that all daemons are running diff --git a/tests/topotests/ldp-vpls-topo1/test_ldp_vpls_topo1.py b/tests/topotests/ldp-vpls-topo1/test_ldp_vpls_topo1.py index ba94cd47d4..d659acb470 100644 --- a/tests/topotests/ldp-vpls-topo1/test_ldp_vpls_topo1.py +++ b/tests/topotests/ldp-vpls-topo1/test_ldp_vpls_topo1.py @@ -121,7 +121,8 @@ class TemplateTopo(Topo): switch.add_link(tgen.gears["r2"]) switch.add_link(tgen.gears["r3"]) - +@pytest.mark.ldp +@pytest.mark.ospf def setup_module(mod): "Sets up the pytest environment" tgen = Topogen(TemplateTopo, mod.__name__) diff --git a/tests/topotests/lib/bgp.py b/tests/topotests/lib/bgp.py index ddeaf55b33..22602cb460 100644 --- a/tests/topotests/lib/bgp.py +++ b/tests/topotests/lib/bgp.py @@ -21,6 +21,7 @@ from copy import deepcopy from time import sleep import traceback +import ipaddr import ipaddress import os import sys @@ -691,8 +692,8 @@ def __create_bgp_neighbor(topo, input_dict, router, addr_type, add_neigh=True): config_data.append("{} activate".format(neigh_cxt)) disable_connected = peer.setdefault("disable_connected_check", False) - keep_alive = peer.setdefault("keepalivetimer", 60) - hold_down = peer.setdefault("holddowntimer", 180) + keep_alive = peer.setdefault("keepalivetimer", 3) + hold_down = peer.setdefault("holddowntimer", 10) password = peer.setdefault("password", None) no_password = peer.setdefault("no_password", None) max_hop_limit = peer.setdefault("ebgp_multihop", 1) @@ -1615,8 +1616,6 @@ def clear_bgp(tgen, addr_type, router, vrf=None): else: run_frr_cmd(rnode, "clear bgp *") - sleep(5) - logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name)) @@ -2115,8 +2114,8 @@ def verify_bgp_attributes( errormsg(str) or True """ - logger.debug("Entering lib API: verify_bgp_attributes()") - for router, rnode in tgen.routers().items(): + logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name)) + for router, rnode in tgen.routers().iteritems(): if router != dut: continue @@ -2129,7 +2128,9 @@ def verify_bgp_attributes( dict_to_test = [] tmp_list = [] - if "route_maps" in input_dict.values()[0]: + dict_list = list(input_dict.values())[0] + + if "route_maps" in dict_list: for rmap_router in input_dict.keys(): for rmap, values in input_dict[rmap_router]["route_maps"].items(): if rmap == rmap_name: @@ -2194,7 +2195,7 @@ def verify_bgp_attributes( ) return errormsg - logger.debug("Exiting lib API: verify_bgp_attributes()") + logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name)) return True @@ -2514,8 +2515,9 @@ def verify_best_path_as_per_admin_distance( return True -@retry(attempts=6, wait=2, return_is_str=True) -def verify_bgp_rib(tgen, addr_type, dut, input_dict, next_hop=None, aspath=None): +@retry(attempts=5, wait=2, return_is_str=True, initial_wait=2) +def verify_bgp_rib(tgen, addr_type, dut, input_dict, next_hop=None, +aspath=None, multi_nh=None): """ This API is to verify whether bgp rib has any matching route for a nexthop. @@ -2550,6 +2552,7 @@ def verify_bgp_rib(tgen, addr_type, dut, input_dict, next_hop=None, aspath=None) additional_nexthops_in_required_nhs = [] list1 = [] list2 = [] + found_hops = [] for routerInput in input_dict.keys(): for router, rnode in router_list.items(): if router != dut: @@ -2616,44 +2619,73 @@ def verify_bgp_rib(tgen, addr_type, dut, input_dict, next_hop=None, aspath=None) st_found = True found_routes.append(st_rt) - if next_hop: + if next_hop and multi_nh and st_found: if not isinstance(next_hop, list): next_hop = [next_hop] list1 = next_hop - found_hops = [ - rib_r["ip"] - for rib_r in rib_routes_json["routes"][st_rt][0][ - "nexthops" - ] - ] - list2 = found_hops - - missing_list_of_nexthops = set(list2).difference(list1) - additional_nexthops_in_required_nhs = set( - list1 - ).difference(list2) + for mnh in range( + 0, len(rib_routes_json["routes"][st_rt]) + ): + found_hops.append( + [ + rib_r["ip"] + for rib_r in rib_routes_json["routes"][ + st_rt + ][mnh]["nexthops"] + ] + ) + for mnh in found_hops: + for each_nh_in_multipath in mnh: + list2.append(each_nh_in_multipath) + if found_hops[0]: + missing_list_of_nexthops = set(list2).difference( + list1 + ) + additional_nexthops_in_required_nhs = set( + list1 + ).difference(list2) - if list2: - if additional_nexthops_in_required_nhs: - logger.info( - "Missing nexthop %s for route" - " %s in RIB of router %s\n", - additional_nexthops_in_required_nhs, - st_rt, - dut, - ) - errormsg = ( - "Nexthop {} is Missing for " - "route {} in RIB of router {}\n".format( + if list2: + if additional_nexthops_in_required_nhs: + logger.info( + "Missing nexthop %s for route" + " %s in RIB of router %s\n", additional_nexthops_in_required_nhs, st_rt, dut, ) - ) return errormsg else: nh_found = True + + elif next_hop and multi_nh is None: + if not isinstance(next_hop, list): + next_hop = [next_hop] + list1 = next_hop + found_hops = [rib_r["ip"] for rib_r in + rib_routes_json["routes"][ + st_rt][0]["nexthops"]] + list2 = found_hops + missing_list_of_nexthops = \ + set(list2).difference(list1) + additional_nexthops_in_required_nhs = \ + set(list1).difference(list2) + + if list2: + if additional_nexthops_in_required_nhs: + logger.info("Missing nexthop %s for route"\ + " %s in RIB of router %s\n", \ + additional_nexthops_in_required_nhs, \ + st_rt, dut) + errormsg=("Nexthop {} is Missing for "\ + "route {} in RIB of router {}\n".format( + additional_nexthops_in_required_nhs, + st_rt, dut)) + return errormsg + else: + nh_found = True + if aspath: found_paths = rib_routes_json["routes"][st_rt][0][ "path" @@ -3676,7 +3708,6 @@ def verify_attributes_for_evpn_routes( """ API to verify rd and rt value using "sh bgp l2vpn evpn 10.1.1.1" command. - Parameters ---------- * `tgen`: topogen object @@ -3690,7 +3721,6 @@ def verify_attributes_for_evpn_routes( * `ipLen` : IP prefix length * `rd_peer` : Peer name from which RD will be auto-generated * `rt_peer` : Peer name from which RT will be auto-generated - Usage ----- input_dict_1 = { @@ -4067,7 +4097,6 @@ def verify_evpn_routes( """ API to verify evpn routes using "sh bgp l2vpn evpn" command. - Parameters ---------- * `tgen`: topogen object @@ -4078,7 +4107,6 @@ def verify_evpn_routes( * `route_type` : Route type 5 is supported as of now * `EthTag` : Ethernet tag, by-default is 0 * `next_hop` : Prefered nexthop for the evpn routes - Usage ----- input_dict_1 = { @@ -4091,7 +4119,6 @@ def verify_evpn_routes( } } result = verify_evpn_routes(tgen, topo, input_dict) - Returns ------- errormsg(str) or True diff --git a/tests/topotests/lib/common_config.py b/tests/topotests/lib/common_config.py index 6c24b6ddbb..3f360ef40a 100644 --- a/tests/topotests/lib/common_config.py +++ b/tests/topotests/lib/common_config.py @@ -31,7 +31,6 @@ from re import search as re_search from tempfile import mkdtemp import os -import io import sys import traceback import socket @@ -39,6 +38,7 @@ import ipaddress import platform if sys.version_info[0] > 2: + import io import configparser else: import StringIO @@ -151,8 +151,8 @@ class InvalidCLIError(Exception): def run_frr_cmd(rnode, cmd, isjson=False): """ - Execute frr show commands in priviledged mode - * `rnode`: router node on which commands needs to executed + Execute frr show commands in privileged mode + * `rnode`: router node on which command needs to be executed * `cmd`: Command to be executed on frr * `isjson`: If command is to get json data or not :return str: @@ -184,11 +184,11 @@ def apply_raw_config(tgen, input_dict): """ API to configure raw configuration on device. This can be used for any cli - which does has not been implemented in JSON. + which has not been implemented in JSON. Parameters ---------- - * `tgen`: tgen onject + * `tgen`: tgen object * `input_dict`: configuration that needs to be applied Usage @@ -232,8 +232,8 @@ def create_common_configuration( frr_json.conf and load to router Parameters ---------- - * `tgen`: tgen onject - * `data`: Congiguration data saved in a list. + * `tgen`: tgen object + * `data`: Configuration data saved in a list. * `router` : router id to be configured. * `config_type` : Syntactic information while writing configuration. Should be one of the value as mentioned in the config_map below. @@ -257,6 +257,8 @@ def create_common_configuration( "bgp": "! BGP Config\n", "vrf": "! VRF Config\n", "ospf": "! OSPF Config\n", + "ospf6": "! OSPF Config\n", + "pim": "! PIM Config\n", } ) @@ -292,8 +294,8 @@ def create_common_configuration( def kill_router_daemons(tgen, router, daemons): """ - Router's current config would be saved to /etc/frr/ for each deamon - and deamon would be killed forcefully using SIGKILL. + Router's current config would be saved to /etc/frr/ for each daemon + and daemon would be killed forcefully using SIGKILL. * `tgen` : topogen object * `router`: Device under test * `daemons`: list of daemons to be killed @@ -389,6 +391,8 @@ def check_router_status(tgen): daemons.append("bgpd") if "zebra" in result: daemons.append("zebra") + if "pimd" in result: + daemons.append("pimd") rnode.startDaemons(daemons) @@ -593,13 +597,13 @@ def load_config_to_router(tgen, routerName, save_bkup=False): def get_frr_ipv6_linklocal(tgen, router, intf=None, vrf=None): """ - API to get the link local ipv6 address of a perticular interface using + API to get the link local ipv6 address of a particular interface using FRR command 'show interface' - * `tgen`: tgen onject - * `router` : router for which hightest interface should be + * `tgen`: tgen object + * `router` : router for which highest interface should be calculated - * `intf` : interface for which linklocal address needs to be taken + * `intf` : interface for which link-local address needs to be taken * `vrf` : VRF name Usage @@ -688,7 +692,7 @@ def generate_support_bundle(): def start_topology(tgen, daemon=None): """ Starting topology, create tmp files which are loaded to routers - to start deamons and then start routers + to start daemons and then start routers * `tgen` : topogen object """ @@ -696,7 +700,7 @@ def start_topology(tgen, daemon=None): # Starting topology tgen.start_topology() - # Starting deamons + # Starting daemons router_list = tgen.routers() ROUTER_LIST = sorted( @@ -735,28 +739,41 @@ def start_topology(tgen, daemon=None): except IOError as err: logger.error("I/O error({0}): {1}".format(err.errno, err.strerror)) - # Loading empty zebra.conf file to router, to start the zebra deamon + # Loading empty zebra.conf file to router, to start the zebra daemon router.load_config( TopoRouter.RD_ZEBRA, "{}/{}/zebra.conf".format(TMPDIR, rname) ) - # Loading empty bgpd.conf file to router, to start the bgp deamon + # Loading empty bgpd.conf file to router, to start the bgp daemon router.load_config(TopoRouter.RD_BGP, "{}/{}/bgpd.conf".format(TMPDIR, rname)) if daemon and "ospfd" in daemon: - # Loading empty ospf.conf file to router, to start the bgp deamon + # Loading empty ospf.conf file to router, to start the bgp daemon router.load_config( TopoRouter.RD_OSPF, "{}/{}/ospfd.conf".format(TMPDIR, rname) ) - # Starting routers + + if daemon and "ospf6d" in daemon: + # Loading empty ospf.conf file to router, to start the bgp daemon + router.load_config( + TopoRouter.RD_OSPF6, "{}/{}/ospf6d.conf".format(TMPDIR, rname) + ) + + if daemon and "pimd" in daemon: + # Loading empty pimd.conf file to router, to start the pim deamon + router.load_config( + TopoRouter.RD_PIM, "{}/{}/pimd.conf".format(TMPDIR, rname) + ) + + # Starting routers logger.info("Starting all routers once topology is created") tgen.start_router() def stop_router(tgen, router): """ - Router"s current config would be saved to /etc/frr/ for each deamon - and router and its deamons would be stopped. + Router"s current config would be saved to /tmp/topotest/<suite>/<router> for each daemon + and router and its daemons would be stopped. * `tgen` : topogen object * `router`: Device under test @@ -774,8 +791,8 @@ def stop_router(tgen, router): def start_router(tgen, router): """ - Router will started and config would be loaded from /etc/frr/ for each - deamon + Router will be started and config would be loaded from /tmp/topotest/<suite>/<router> for each + daemon * `tgen` : topogen object * `router`: Device under test @@ -786,8 +803,8 @@ def start_router(tgen, router): try: router_list = tgen.routers() - # Router and its deamons would be started and config would - # be loaded to router for each deamon from /etc/frr + # Router and its daemons would be started and config would + # be loaded to router for each daemon from /etc/frr router_list[router].start() # Waiting for router to come up @@ -835,9 +852,204 @@ def topo_daemons(tgen, topo): if "ospf" in topo["routers"][rtr] and "ospfd" not in daemon_list: daemon_list.append("ospfd") + if "ospf6" in topo["routers"][rtr] and "ospf6d" not in daemon_list: + daemon_list.append("ospf6d") + + for val in topo["routers"][rtr]["links"].values(): + if "pim" in val and "pimd" not in daemon_list: + daemon_list.append("pimd") + break + return daemon_list +def add_interfaces_to_vlan(tgen, input_dict): + """ + Add interfaces to VLAN, we need vlan pakcage to be installed on machine + + * `tgen`: tgen onject + * `input_dict` : interfaces to be added to vlans + + input_dict= { + "r1":{ + "vlan":{ + VLAN_1: [{ + intf_r1_s1: { + "ip": "10.1.1.1", + "subnet": "255.255.255.0 + } + }] + } + } + } + + add_interfaces_to_vlan(tgen, input_dict) + + """ + + router_list = tgen.routers() + for dut in input_dict.keys(): + rnode = tgen.routers()[dut] + + if "vlan" in input_dict[dut]: + for vlan, interfaces in input_dict[dut]["vlan"].items(): + for intf_dict in interfaces: + for interface, data in intf_dict.items(): + # Adding interface to VLAN + cmd = "vconfig add {} {}".format(interface, vlan) + logger.info("[DUT: %s]: Running command: %s", dut, cmd) + rnode.run(cmd) + + vlan_intf = "{}.{}".format(interface, vlan) + + ip = data["ip"] + subnet = data["subnet"] + + # Bringing interface up + cmd = "ip link set up {}".format(vlan_intf) + logger.info("[DUT: %s]: Running command: %s", dut, cmd) + rnode.run(cmd) + + # Assigning IP address + cmd = "ifconfig {} {} netmask {}".format(vlan_intf, ip, subnet) + logger.info("[DUT: %s]: Running command: %s", dut, cmd) + rnode.run(cmd) + + +def tcpdump_capture_start( + tgen, + router, + intf, + protocol=None, + grepstr=None, + timeout=0, + options=None, + cap_file=None, + background=True, +): + """ + API to capture network packets using tcp dump. + + Packages used : + + Parameters + ---------- + * `tgen`: topogen object. + * `router`: router on which ping has to be performed. + * `intf` : interface for capture. + * `protocol` : protocol for which packet needs to be captured. + * `grepstr` : string to filter out tcp dump output. + * `timeout` : Time for which packet needs to be captured. + * `options` : options for TCP dump, all tcpdump options can be used. + * `cap_file` : filename to store capture dump. + * `background` : Make tcp dump run in back ground. + + Usage + ----- + tcpdump_result = tcpdump_dut(tgen, 'r2', intf, protocol='tcp', timeout=20, + options='-A -vv -x > r2bgp.txt ') + Returns + ------- + 1) True for successful capture + 2) errormsg - when tcp dump fails + """ + + logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name)) + + rnode = tgen.routers()[router] + + if timeout > 0: + cmd = "timeout {}".format(timeout) + else: + cmd = "" + + cmdargs = "{} tcpdump".format(cmd) + + if intf: + cmdargs += " -i {}".format(str(intf)) + if protocol: + cmdargs += " {}".format(str(protocol)) + if options: + cmdargs += " -s 0 {}".format(str(options)) + + if cap_file: + file_name = os.path.join(LOGDIR, tgen.modname, router, cap_file) + cmdargs += " -w {}".format(str(file_name)) + # Remove existing capture file + rnode.run("rm -rf {}".format(file_name)) + + if grepstr: + cmdargs += ' | grep "{}"'.format(str(grepstr)) + + logger.info("Running tcpdump command: [%s]", cmdargs) + if not background: + rnode.run(cmdargs) + else: + rnode.run("nohup {} & /dev/null 2>&1".format(cmdargs)) + + # Check if tcpdump process is running + if background: + result = rnode.run("pgrep tcpdump") + logger.debug("ps -ef | grep tcpdump \n {}".format(result)) + + if not result: + errormsg = "tcpdump is not running {}".format("tcpdump") + return errormsg + else: + logger.info("Packet capture started on %s: interface %s", router, intf) + + logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name)) + return True + + +def tcpdump_capture_stop(tgen, router): + """ + API to capture network packets using tcp dump. + + Packages used : + + Parameters + ---------- + * `tgen`: topogen object. + * `router`: router on which ping has to be performed. + * `intf` : interface for capture. + * `protocol` : protocol for which packet needs to be captured. + * `grepstr` : string to filter out tcp dump output. + * `timeout` : Time for which packet needs to be captured. + * `options` : options for TCP dump, all tcpdump options can be used. + * `cap2file` : filename to store capture dump. + * `bakgrnd` : Make tcp dump run in back ground. + + Usage + ----- + tcpdump_result = tcpdump_dut(tgen, 'r2', intf, protocol='tcp', timeout=20, + options='-A -vv -x > r2bgp.txt ') + Returns + ------- + 1) True for successful capture + 2) errormsg - when tcp dump fails + """ + + logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name)) + + rnode = tgen.routers()[router] + + # Check if tcpdump process is running + result = rnode.run("ps -ef | grep tcpdump") + logger.debug("ps -ef | grep tcpdump \n {}".format(result)) + + if not re_search(r"{}".format("tcpdump"), result): + errormsg = "tcpdump is not running {}".format("tcpdump") + return errormsg + else: + ppid = tgen.net.nameToNode[rnode.name].pid + rnode.run("set +m; pkill -P %s tcpdump &> /dev/null" % ppid) + logger.info("Stopped tcpdump capture") + + logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name)) + return True + + ############################################# # Common APIs, will be used by all protocols ############################################# @@ -1137,11 +1349,11 @@ def generate_ips(network, no_of_ips): """ Returns list of IPs. based on start_ip and no_of_ips + * `network` : from here the ip will start generating, start_ip will be * `no_of_ips` : these many IPs will be generated """ - ipaddress_list = [] if type(network) is not list: network = [network] @@ -1152,14 +1364,20 @@ def generate_ips(network, no_of_ips): mask = int(start_ipaddr.split("/")[1]) else: logger.debug("start_ipaddr {} must have a / in it".format(start_ipaddr)) - assert(0) + assert 0 addr_type = validate_ip_address(start_ip) if addr_type == "ipv4": - start_ip = ipaddress.IPv4Address(frr_unicode(start_ip)) + if start_ip == "0.0.0.0" and mask == 0 and no_of_ips == 1: + ipaddress_list.append("{}/{}".format(start_ip, mask)) + return ipaddress_list + start_ip = ipaddress.IPv4Address(unicode(start_ip)) step = 2 ** (32 - mask) if addr_type == "ipv6": - start_ip = ipaddress.IPv6Address(frr_unicode(start_ip)) + if start_ip == "0::0" and mask == 0 and no_of_ips == 1: + ipaddress_list.append("{}/{}".format(start_ip, mask)) + return ipaddress_list + start_ip = ipaddress.IPv6Address(unicode(start_ip)) step = 2 ** (128 - mask) next_ip = start_ip @@ -1181,7 +1399,7 @@ def find_interface_with_greater_ip(topo, router, loopback=True, interface=True): it will return highest IP from loopback IPs otherwise from physical interface IPs. * `topo` : json file data - * `router` : router for which hightest interface should be calculated + * `router` : router for which highest interface should be calculated """ link_data = topo["routers"][router]["links"] @@ -1287,7 +1505,6 @@ def retry(attempts=3, wait=2, return_is_str=True, initial_wait=0, return_is_dict _wait = kwargs.pop("wait", wait) _attempts = kwargs.pop("attempts", attempts) _attempts = int(_attempts) - expected = True if _attempts < 0: raise ValueError("attempts must be 0 or greater") @@ -1297,12 +1514,10 @@ def retry(attempts=3, wait=2, return_is_str=True, initial_wait=0, return_is_dict _return_is_str = kwargs.pop("return_is_str", return_is_str) _return_is_dict = kwargs.pop("return_is_str", return_is_dict) + _expected = kwargs.setdefault("expected", True) + kwargs.pop("expected") for i in range(1, _attempts + 1): try: - _expected = kwargs.setdefault("expected", True) - if _expected is False: - expected = _expected - kwargs.pop("expected") ret = func(*args, **kwargs) logger.debug("Function returned %s", ret) if _return_is_str and isinstance(ret, bool) and _expected: @@ -1314,11 +1529,11 @@ def retry(attempts=3, wait=2, return_is_str=True, initial_wait=0, return_is_dict if _return_is_dict and isinstance(ret, dict): return ret - if _attempts == i and expected: + if _attempts == i: generate_support_bundle() return ret except Exception as err: - if _attempts == i and expected: + if _attempts == i: generate_support_bundle() logger.info("Max number of attempts (%r) reached", _attempts) raise @@ -1360,6 +1575,17 @@ def step(msg, reset=False): _step(msg, reset) +def do_countdown(secs): + """ + Countdown timer display + """ + for i in range(secs, 0, -1): + sys.stdout.write("{} ".format(str(i))) + sys.stdout.flush() + sleep(1) + return + + ############################################# # These APIs, will used by testcase ############################################# @@ -1378,6 +1604,25 @@ def create_interfaces_cfg(tgen, topo, build=False): ------- True or False """ + + def _create_interfaces_ospf_cfg(ospf, c_data, data, ospf_keywords): + interface_data = [] + ip_ospf = "ipv6 ospf6" if ospf == "ospf6" else "ip ospf" + for keyword in ospf_keywords: + if keyword in data[ospf]: + intf_ospf_value = c_data["links"][destRouterLink][ospf][keyword] + if "delete" in data and data["delete"]: + interface_data.append( + "no {} {}".format(ip_ospf, keyword.replace("_", "-")) + ) + else: + interface_data.append( + "{} {} {}".format( + ip_ospf, keyword.replace("_", "-"), intf_ospf_value + ) + ) + return interface_data + result = False topo = deepcopy(topo) @@ -1424,66 +1669,26 @@ def create_interfaces_cfg(tgen, topo, build=False): else: interface_data.append("ipv6 address {}\n".format(intf_addr)) + ospf_keywords = [ + "hello_interval", + "dead_interval", + "network", + "priority", + "cost", + ] if "ospf" in data: - ospf_data = data["ospf"] - if "area" in ospf_data: - intf_ospf_area = c_data["links"][destRouterLink]["ospf"]["area"] - if "delete" in data and data["delete"]: - interface_data.append("no ip ospf area") - else: - interface_data.append( - "ip ospf area {}".format(intf_ospf_area) - ) - - if "hello_interval" in ospf_data: - intf_ospf_hello = c_data["links"][destRouterLink]["ospf"][ - "hello_interval" - ] - if "delete" in data and data["delete"]: - interface_data.append("no ip ospf " " hello-interval") - else: - interface_data.append( - "ip ospf " " hello-interval {}".format(intf_ospf_hello) - ) - - if "dead_interval" in ospf_data: - intf_ospf_dead = c_data["links"][destRouterLink]["ospf"][ - "dead_interval" - ] - if "delete" in data and data["delete"]: - interface_data.append("no ip ospf" " dead-interval") - else: - interface_data.append( - "ip ospf " " dead-interval {}".format(intf_ospf_dead) - ) - - if "network" in ospf_data: - intf_ospf_nw = c_data["links"][destRouterLink]["ospf"][ - "network" - ] - if "delete" in data and data["delete"]: - interface_data.append( - "no ip ospf" " network {}".format(intf_ospf_nw) - ) - else: - interface_data.append( - "ip ospf" " network {}".format(intf_ospf_nw) - ) - - if "priority" in ospf_data: - intf_ospf_nw = c_data["links"][destRouterLink]["ospf"][ - "priority" - ] + interface_data += _create_interfaces_ospf_cfg( + "ospf", c_data, data, ospf_keywords + ["area"] + ) + if "ospf6" in data: + interface_data += _create_interfaces_ospf_cfg( + "ospf6", c_data, data, ospf_keywords + ) - if "delete" in data and data["delete"]: - interface_data.append("no ip ospf" " priority") - else: - interface_data.append( - "ip ospf" " priority {}".format(intf_ospf_nw) - ) result = create_common_configuration( tgen, c_router, interface_data, "interface_config", build=build ) + except InvalidCLIError: # Traceback errormsg = traceback.format_exc() @@ -2222,9 +2427,9 @@ def shutdown_bringup_interface(tgen, dut, intf_name, ifaceaction=False): ----- dut = "r3" intf = "r3-r1-eth0" - # Shut down ineterface + # Shut down interface shutdown_bringup_interface(tgen, dut, intf, False) - # Bring up ineterface + # Bring up interface shutdown_bringup_interface(tgen, dut, intf, True) Returns ------- @@ -2233,13 +2438,58 @@ def shutdown_bringup_interface(tgen, dut, intf_name, ifaceaction=False): router_list = tgen.routers() if ifaceaction: - logger.info("Bringing up interface : {}".format(intf_name)) + logger.info("Bringing up interface {} : {}".format(dut, intf_name)) else: - logger.info("Shutting down interface : {}".format(intf_name)) + logger.info("Shutting down interface {} : {}".format(dut, intf_name)) interface_set_status(router_list[dut], intf_name, ifaceaction) +def stop_router(tgen, router): + """ + Router's current config would be saved to /tmp/topotest/<suite>/<router> + for each daemon and router and its daemons would be stopped. + + * `tgen` : topogen object + * `router`: Device under test + """ + + router_list = tgen.routers() + + # Saving router config to /etc/frr, which will be loaded to router + # when it starts + router_list[router].vtysh_cmd("write memory") + + # Stop router + router_list[router].stop() + + +def start_router(tgen, router): + """ + Router will be started and config would be loaded from + /tmp/topotest/<suite>/<router> for each daemon + + * `tgen` : topogen object + * `router`: Device under test + """ + + logger.debug("Entering lib API: start_router") + + try: + router_list = tgen.routers() + + # Router and its daemons would be started and config would + # be loaded to router for each daemon from /etc/frr + router_list[router].start() + + except Exception as e: + errormsg = traceback.format_exc() + logger.error(errormsg) + return errormsg + + logger.debug("Exiting lib API: start_router()") + + def addKernelRoute( tgen, router, intf, group_addr_range, next_hop=None, src=None, del_action=None ): @@ -2250,7 +2500,7 @@ def addKernelRoute( ----------- * `tgen` : Topogen object * `router`: router for which kernal routes needs to be added - * `intf`: interface name, for which kernal routes needs to be added + * `intf`: interface name, for which kernel routes needs to be added * `bindToAddress`: bind to <host>, an interface or multicast address @@ -2313,7 +2563,7 @@ def configure_vxlan(tgen, input_dict): """ Add and configure vxlan - * `tgen`: tgen onject + * `tgen`: tgen object * `input_dict` : data for vxlan config Usage: @@ -2414,7 +2664,7 @@ def configure_brctl(tgen, topo, input_dict): """ Add and configure brctl - * `tgen`: tgen onject + * `tgen`: tgen object * `input_dict` : data for brctl config Usage: @@ -2508,7 +2758,7 @@ def configure_interface_mac(tgen, input_dict): """ Add and configure brctl - * `tgen`: tgen onject + * `tgen`: tgen object * `input_dict` : data for mac config input_mac= { @@ -2562,7 +2812,7 @@ def verify_rib( tag=None, metric=None, fib=None, - count_only=False + count_only=False, ): """ Data will be read from input_dict or input JSON file, API will generate @@ -2754,8 +3004,10 @@ def verify_rib( "Nexthops are missing for " "route {} in RIB of router {}: " "expected {}, found {}\n".format( - st_rt, dut, len(next_hop), - len(found_hops) + st_rt, + dut, + len(next_hop), + len(found_hops), ) ) return errormsg @@ -2802,7 +3054,11 @@ def verify_rib( errormsg = ( "[DUT: {}]: tag value {}" " is not matched for" - " route {} in RIB \n".format(dut, _tag, st_rt,) + " route {} in RIB \n".format( + dut, + _tag, + st_rt, + ) ) return errormsg @@ -2819,7 +3075,11 @@ def verify_rib( errormsg = ( "[DUT: {}]: metric value " "{} is not matched for " - "route {} in RIB \n".format(dut, metric, st_rt,) + "route {} in RIB \n".format( + dut, + metric, + st_rt, + ) ) return errormsg @@ -2868,7 +3128,9 @@ def verify_rib( for advertise_network_dict in advertise_network: if "vrf" in advertise_network_dict: - cmd = "{} vrf {} json".format(command, advertise_network_dict["vrf"]) + cmd = "{} vrf {} json".format( + command, advertise_network_dict["vrf"] + ) else: cmd = "{} json".format(command) @@ -2947,6 +3209,7 @@ def verify_rib( logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name)) return True + @retry(attempts=6, wait=2, return_is_str=True) def verify_fib_routes(tgen, addr_type, dut, input_dict, next_hop=None): """ @@ -3327,7 +3590,12 @@ def verify_prefix_lists(tgen, input_dict): for addr_type in prefix_lists_addr: if not check_address_types(addr_type): continue - + # show ip prefix list + if addr_type == "ipv4": + cmd = "show ip prefix-list" + else: + cmd = "show {} prefix-list".format(addr_type) + show_prefix_list = run_frr_cmd(rnode, cmd) for prefix_list in prefix_lists_addr[addr_type].keys(): if prefix_list in show_prefix_list: errormsg = ( @@ -3550,7 +3818,6 @@ def verify_cli_json(tgen, input_dict): """ API to verify if JSON is available for clis command. - Parameters ---------- * `tgen`: topogen object @@ -3720,7 +3987,6 @@ def verify_vrf_vni(tgen, input_dict): """ API to verify vrf vni details using "show vrf vni json" command. - Parameters ---------- * `tgen`: topogen object @@ -3852,3 +4118,269 @@ def required_linux_kernel_version(required_version): ) return error_msg return True + + +def iperfSendIGMPJoin( + tgen, server, bindToAddress, l4Type="UDP", join_interval=1, inc_step=0, repeat=0 +): + """ + Run iperf to send IGMP join and traffic + + Parameters: + ----------- + * `tgen` : Topogen object + * `l4Type`: string, one of [ TCP, UDP ] + * `server`: iperf server, from where IGMP join would be sent + * `bindToAddress`: bind to <host>, an interface or multicast + address + * `join_interval`: seconds between periodic bandwidth reports + * `inc_step`: increamental steps, by default 0 + * `repeat`: Repetition of group, by default 0 + + returns: + -------- + errormsg or True + """ + + logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name)) + + rnode = tgen.routers()[server] + + iperfArgs = "iperf -s " + + # UDP/TCP + if l4Type == "UDP": + iperfArgs += "-u " + + iperfCmd = iperfArgs + # Group address range to cover + if bindToAddress: + if type(bindToAddress) is not list: + Address = [] + start = ipaddress.IPv4Address(frr_unicode(bindToAddress)) + + Address = [start] + next_ip = start + + count = 1 + while count < repeat: + next_ip += inc_step + Address.append(next_ip) + count += 1 + bindToAddress = Address + + for bindTo in bindToAddress: + iperfArgs = iperfCmd + iperfArgs += "-B %s " % bindTo + + # Join interval + if join_interval: + iperfArgs += "-i %d " % join_interval + + iperfArgs += " &>/dev/null &" + # Run iperf command to send IGMP join + logger.debug("[DUT: {}]: Running command: [{}]".format(server, iperfArgs)) + output = rnode.run("set +m; {} sleep 0.5".format(iperfArgs)) + + # Check if iperf process is running + if output: + pid = output.split()[1] + rnode.run("touch /var/run/frr/iperf_server.pid") + rnode.run("echo %s >> /var/run/frr/iperf_server.pid" % pid) + else: + errormsg = "IGMP join is not sent for {}. Error: {}".format(bindTo, output) + logger.error(output) + return errormsg + + logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name)) + return True + + +def iperfSendTraffic( + tgen, + client, + bindToAddress, + ttl, + time=0, + l4Type="UDP", + inc_step=0, + repeat=0, + mappedAddress=None, +): + """ + Run iperf to send IGMP join and traffic + + Parameters: + ----------- + * `tgen` : Topogen object + * `l4Type`: string, one of [ TCP, UDP ] + * `client`: iperf client, from where iperf traffic would be sent + * `bindToAddress`: bind to <host>, an interface or multicast + address + * `ttl`: time to live + * `time`: time in seconds to transmit for + * `inc_step`: increamental steps, by default 0 + * `repeat`: Repetition of group, by default 0 + * `mappedAddress`: Mapped Interface ip address + + returns: + -------- + errormsg or True + """ + + logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name)) + + rnode = tgen.routers()[client] + + iperfArgs = "iperf -c " + + iperfCmd = iperfArgs + # Group address range to cover + if bindToAddress: + if type(bindToAddress) is not list: + Address = [] + start = ipaddress.IPv4Address(frr_unicode(bindToAddress)) + + Address = [start] + next_ip = start + + count = 1 + while count < repeat: + next_ip += inc_step + Address.append(next_ip) + count += 1 + bindToAddress = Address + + for bindTo in bindToAddress: + iperfArgs = iperfCmd + iperfArgs += "%s " % bindTo + + # Mapped Interface IP + if mappedAddress: + iperfArgs += "-B %s " % mappedAddress + + # UDP/TCP + if l4Type == "UDP": + iperfArgs += "-u -b 0.012m " + + # TTL + if ttl: + iperfArgs += "-T %d " % ttl + + # Time + if time: + iperfArgs += "-t %d " % time + + iperfArgs += " &>/dev/null &" + + # Run iperf command to send multicast traffic + logger.debug("[DUT: {}]: Running command: [{}]".format(client, iperfArgs)) + output = rnode.run("set +m; {} sleep 0.5".format(iperfArgs)) + + # Check if iperf process is running + if output: + pid = output.split()[1] + rnode.run("touch /var/run/frr/iperf_client.pid") + rnode.run("echo %s >> /var/run/frr/iperf_client.pid" % pid) + else: + errormsg = "Multicast traffic is not sent for {}. Error {}".format( + bindTo, output + ) + logger.error(output) + return errormsg + + logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name)) + return True + + +def kill_iperf(tgen, dut=None, action=None): + """ + Killing iperf process if running for any router in topology + Parameters: + ----------- + * `tgen` : Topogen object + * `dut` : Any iperf hostname to send igmp prune + * `action`: to kill igmp join iperf action is remove_join + to kill traffic iperf action is remove_traffic + + Usage + ---- + kill_iperf(tgen, dut ="i6", action="remove_join") + + """ + + logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name)) + + router_list = tgen.routers() + for router, rnode in router_list.items(): + # Run iperf command to send IGMP join + pid_client = rnode.run("cat /var/run/frr/iperf_client.pid") + pid_server = rnode.run("cat /var/run/frr/iperf_server.pid") + if action == "remove_join": + pids = pid_server + elif action == "remove_traffic": + pids = pid_client + else: + pids = "\n".join([pid_client, pid_server]) + for pid in pids.split("\n"): + pid = pid.strip() + if pid.isdigit(): + cmd = "set +m; kill -9 %s &> /dev/null" % pid + logger.debug("[DUT: {}]: Running command: [{}]".format(router, cmd)) + rnode.run(cmd) + + logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name)) + + +def verify_ip_nht(tgen, input_dict): + """ + Running "show ip nht" command and verifying given nexthop resolution + Parameters + ---------- + * `tgen` : topogen object + * `input_dict`: data to verify nexthop + Usage + ----- + input_dict_4 = { + "r1": { + nh: { + "Address": nh, + "resolvedVia": "connected", + "nexthops": { + "nexthop1": { + "Interface": intf + } + } + } + } + } + result = verify_ip_nht(tgen, input_dict_4) + Returns + ------- + errormsg(str) or True + """ + + logger.debug("Entering lib API: verify_ip_nht()") + + for router in input_dict.keys(): + if router not in tgen.routers(): + continue + + rnode = tgen.routers()[router] + nh_list = input_dict[router] + + if validate_ip_address(nh_list.keys()[0]) is "ipv6": + show_ip_nht = run_frr_cmd(rnode, "show ipv6 nht") + else: + show_ip_nht = run_frr_cmd(rnode, "show ip nht") + + for nh in nh_list: + if nh in show_ip_nht: + logger.info("Nexthop %s is resolved on %s", nh, router) + return True + else: + errormsg = "Nexthop {} is resolved on {}".format(nh, router) + return errormsg + + logger.debug("Exiting lib API: verify_ip_nht()") + return False diff --git a/tests/topotests/lib/lutil.py b/tests/topotests/lib/lutil.py index 9cbea67af1..0b6a946fda 100644 --- a/tests/topotests/lib/lutil.py +++ b/tests/topotests/lib/lutil.py @@ -23,6 +23,7 @@ import time import datetime import json import math +import time from lib.topolog import logger from mininet.net import Mininet @@ -194,8 +195,9 @@ Total %-4d %-4d %d\n\ if op != "wait": self.l_line += 1 self.log( - "(#%d) %s:%s COMMAND:%s:%s:%s:%s:%s:" + "%s (#%d) %s:%s COMMAND:%s:%s:%s:%s:%s:" % ( + time.asctime(), self.l_total + 1, self.l_filename, self.l_line, diff --git a/tests/topotests/lib/ospf.py b/tests/topotests/lib/ospf.py index 9f3d4841b0..5bc9f14fea 100644 --- a/tests/topotests/lib/ospf.py +++ b/tests/topotests/lib/ospf.py @@ -62,7 +62,7 @@ def create_router_ospf(tgen, topo, input_dict=None, build=False, load_config=Tru "r1": { "ospf": { "router_id": "22.22.22.22", - "area": [{ "id":0.0.0.0, "type": "nssa"}] + "area": [{ "id": "0.0.0.0", "type": "nssa"}] } } @@ -94,7 +94,7 @@ def create_router_ospf(tgen, topo, input_dict=None, build=False, load_config=Tru return result -def __create_ospf_global(tgen, input_dict, router, build=False, load_config=True): +def __create_ospf_global(tgen, input_dict, router, build=False, load_config=True, ospf="ospf"): """ Helper API to create ospf global configuration. @@ -105,6 +105,33 @@ def __create_ospf_global(tgen, input_dict, router, build=False, load_config=True * `router` : router to be configured. * `build` : Only for initial setup phase this is set as True. * `load_config` : Loading the config to router this is set as True. + * `ospf` : either 'ospf' or 'ospf6' + + Usage + ----- + input_dict = { + "routers": { + "r1": { + "links": { + "r3": { + "ipv6": "2013:13::1/64", + "ospf6": { + "hello_interval": 1, + "dead_interval": 4, + "network": "point-to-point" + } + } + }, + "ospf6": { + "router_id": "1.1.1.1", + "neighbors": { + "r3": { + "area": "1.1.1.1" + } + } + } + } + } Returns ------- @@ -115,17 +142,17 @@ def __create_ospf_global(tgen, input_dict, router, build=False, load_config=True logger.debug("Entering lib API: __create_ospf_global()") try: - ospf_data = input_dict[router]["ospf"] + ospf_data = input_dict[router][ospf] del_ospf_action = ospf_data.setdefault("delete", False) if del_ospf_action: - config_data = ["no router ospf"] + config_data = ["no router {}".format(ospf)] result = create_common_configuration( - tgen, router, config_data, "ospf", build, load_config + tgen, router, config_data, ospf, build, load_config ) return result config_data = [] - cmd = "router ospf" + cmd = "router {}".format(ospf) config_data.append(cmd) @@ -133,9 +160,9 @@ def __create_ospf_global(tgen, input_dict, router, build=False, load_config=True router_id = ospf_data.setdefault("router_id", None) del_router_id = ospf_data.setdefault("del_router_id", False) if del_router_id: - config_data.append("no ospf router-id") + config_data.append("no {} router-id".format(ospf)) if router_id: - config_data.append("ospf router-id {}".format(router_id)) + config_data.append("{} router-id {}".format(ospf, router_id)) # redistribute command redistribute_data = ospf_data.setdefault("redistribute", {}) @@ -154,6 +181,7 @@ def __create_ospf_global(tgen, input_dict, router, build=False, load_config=True if del_action: cmd = "no {}".format(cmd) config_data.append(cmd) + # area information area_data = ospf_data.setdefault("area", {}) if area_data: @@ -172,9 +200,20 @@ def __create_ospf_global(tgen, input_dict, router, build=False, load_config=True if del_action: cmd = "no {}".format(cmd) config_data.append(cmd) - result = create_common_configuration( - tgen, router, config_data, "ospf", build, load_config - ) + + # area interface information for ospf6d only + if ospf == "ospf6": + area_iface = ospf_data.setdefault("neighbors", {}) + if area_iface: + for neighbor in area_iface: + if "area" in area_iface[neighbor]: + iface = input_dict[router]["links"][neighbor]["interface"] + cmd = "interface {} area {}".format( + iface, area_iface[neighbor]["area"] + ) + if area_iface[neighbor].setdefault("delete", False): + cmd = "no {}".format(cmd) + config_data.append(cmd) # summary information summary_data = ospf_data.setdefault("summary-address", {}) @@ -200,8 +239,9 @@ def __create_ospf_global(tgen, input_dict, router, build=False, load_config=True if del_action: cmd = "no {}".format(cmd) config_data.append(cmd) + result = create_common_configuration( - tgen, router, config_data, "ospf", build, load_config + tgen, router, config_data, ospf, build, load_config ) except InvalidCLIError: @@ -238,7 +278,7 @@ def create_router_ospf6(tgen, topo, input_dict=None, build=False, load_config=Tr ------- True or False """ - logger.debug("Entering lib API: create_router_ospf()") + logger.debug("Entering lib API: create_router_ospf6()") result = False if not input_dict: @@ -247,67 +287,15 @@ def create_router_ospf6(tgen, topo, input_dict=None, build=False, load_config=Tr topo = topo["routers"] input_dict = deepcopy(input_dict) for router in input_dict.keys(): - if "ospf" not in input_dict[router]: - logger.debug("Router %s: 'ospf' not present in input_dict", router) + if "ospf6" not in input_dict[router]: + logger.debug("Router %s: 'ospf6' not present in input_dict", router) continue - result = __create_ospf_global(tgen, input_dict, router, build, load_config) - - logger.debug("Exiting lib API: create_router_ospf()") - return result - - -def __create_ospf6_global(tgen, input_dict, router, build=False, load_config=True): - """ - Helper API to create ospf global configuration. - - Parameters - ---------- - * `tgen` : Topogen object - * `input_dict` : Input dict data, required when configuring from testcase - * `router` : router id to be configured. - * `build` : Only for initial setup phase this is set as True. - - Returns - ------- - True or False - """ - - result = False - logger.debug("Entering lib API: __create_ospf_global()") - try: - - ospf_data = input_dict[router]["ospf6"] - del_ospf_action = ospf_data.setdefault("delete", False) - if del_ospf_action: - config_data = ["no ipv6 router ospf"] - result = create_common_configuration( - tgen, router, config_data, "ospf", build, load_config - ) - return result - - config_data = [] - cmd = "router ospf" - - config_data.append(cmd) - - router_id = ospf_data.setdefault("router_id", None) - del_router_id = ospf_data.setdefault("del_router_id", False) - if del_router_id: - config_data.append("no ospf router-id") - if router_id: - config_data.append("ospf router-id {}".format(router_id)) - - result = create_common_configuration( - tgen, router, config_data, "ospf", build, load_config + result = __create_ospf_global( + tgen, input_dict, router, build, load_config, "ospf6" ) - except InvalidCLIError: - # Traceback - errormsg = traceback.format_exc() - logger.error(errormsg) - return errormsg - logger.debug("Exiting lib API: create_ospf_global()") + logger.debug("Exiting lib API: create_router_ospf6()") return result @@ -330,7 +318,7 @@ def config_ospf_interface(tgen, topo, input_dict=None, build=False, load_config= "links": { "r2": { "ospf": { - "authentication": 'message-digest', + "authentication": "message-digest", "authentication-key": "ospf", "message-digest-key": "10" } @@ -379,6 +367,7 @@ def config_ospf_interface(tgen, topo, input_dict=None, build=False, load_config= if data_ospf_area: cmd = "ip ospf area {}".format(data_ospf_area) config_data.append(cmd) + # interface ospf auth if data_ospf_auth: if data_ospf_auth == "null": @@ -464,6 +453,32 @@ def clear_ospf(tgen, router): logger.debug("Exiting lib API: clear_ospf()") +def redistribute_ospf(tgen, topo, dut, route_type, **kwargs): + """ + Redstribution of routes inside ospf. + + Parameters + ---------- + * `tgen`: Topogen object + * `topo` : json file data + * `dut`: device under test + * `route_type`: "static" or "connected" or .... + * `kwargs`: pass extra information (see below) + + Usage + ----- + redistribute_ospf(tgen, topo, "r0", "static", delete=True) + redistribute_ospf(tgen, topo, "r0", "static", route_map="rmap_ipv4") + """ + + ospf_red = {dut: {"ospf": {"redistribute": [{"redist_type": route_type}]}}} + for k, v in kwargs.items(): + ospf_red[dut]["ospf"]["redistribute"][0][k] = v + + result = create_router_ospf(tgen, topo, ospf_red) + assert result is True, "Testcase : Failed \n Error: {}".format(result) + + ################################ # Verification procs ################################ @@ -525,7 +540,7 @@ def verify_ospf_neighbor(tgen, topo, dut=None, input_dict=None, lan=False): logger.info("Verifying OSPF neighborship on router %s:", router) show_ospf_json = run_frr_cmd( - rnode, "show ip ospf neighbor all json", isjson=True + rnode, "show ip ospf neighbor all json", isjson=True ) # Verifying output dictionary show_ospf_json is empty or not @@ -658,6 +673,70 @@ def verify_ospf_neighbor(tgen, topo, dut=None, input_dict=None, lan=False): return result +################################ +# Verification procs +################################ +@retry(attempts=40, wait=2, return_is_str=True) +def verify_ospf6_neighbor(tgen, topo): + """ + This API is to verify ospf neighborship by running + show ip ospf neighbour command, + + Parameters + ---------- + * `tgen` : Topogen object + * `topo` : json file data + + Usage + ----- + Check FULL neighbors. + verify_ospf_neighbor(tgen, topo) + + result = verify_ospf_neighbor(tgen, topo) + + Returns + ------- + True or False (Error Message) + """ + + logger.debug("Entering lib API: verify_ospf6_neighbor()") + result = False + for router, rnode in tgen.routers().items(): + if "ospf6" not in topo["routers"][router]: + continue + + logger.info("Verifying OSPF6 neighborship on router %s:", router) + show_ospf_json = run_frr_cmd( + rnode, "show ipv6 ospf6 neighbor json", isjson=True + ) + + if not show_ospf_json: + return "OSPF6 is not running" + + ospf_nbr_list = topo["routers"][router]["ospf6"]["neighbors"] + no_of_peer = 0 + for ospf_nbr in ospf_nbr_list: + ospf_nbr_rid = topo["routers"][ospf_nbr]["ospf6"]["router_id"] + for neighbor in show_ospf_json["neighbors"]: + if neighbor["neighborId"] == ospf_nbr_rid: + nh_state = neighbor["state"] + break + else: + return "[DUT: {}] OSPF6 peer {} missing".format(router, data_rid) + + if nh_state == "Full": + no_of_peer += 1 + + if no_of_peer == len(ospf_nbr_list): + logger.info("[DUT: {}] OSPF6 is Converged".format(router)) + result = True + else: + return "[DUT: {}] OSPF6 is not Converged".format(router) + + logger.debug("Exiting API: verify_ospf6_neighbor()") + return result + + @retry(attempts=21, wait=2, return_is_str=True) def verify_ospf_rib( tgen, dut, input_dict, next_hop=None, tag=None, metric=None, fib=None @@ -847,19 +926,23 @@ def verify_ospf_rib( if "routeType" not in ospf_rib_json[st_rt]: errormsg = ( "[DUT: {}]: routeType missing" - "for route {} in OSPF RIB \n".format(dut, st_rt) + " for route {} in OSPF RIB \n".format( + dut, st_rt + ) ) return errormsg elif _rtype != ospf_rib_json[st_rt]["routeType"]: errormsg = ( "[DUT: {}]: routeType mismatch" - "for route {} in OSPF RIB \n".format(dut, st_rt) + " for route {} in OSPF RIB \n".format( + dut, st_rt + ) ) return errormsg else: logger.info( - "DUT: {}]: Found routeType {}" - "for route {}".format(dut, _rtype, st_rt) + "[DUT: {}]: Found routeType {}" + " for route {}".format(dut, _rtype, st_rt) ) if tag: if "tag" not in ospf_rib_json[st_rt]: @@ -874,7 +957,11 @@ def verify_ospf_rib( errormsg = ( "[DUT: {}]: tag value {}" " is not matched for" - " route {} in RIB \n".format(dut, _tag, st_rt,) + " route {} in RIB \n".format( + dut, + _tag, + st_rt, + ) ) return errormsg @@ -891,7 +978,11 @@ def verify_ospf_rib( errormsg = ( "[DUT: {}]: metric value " "{} is not matched for " - "route {} in RIB \n".format(dut, metric, st_rt,) + "route {} in RIB \n".format( + dut, + metric, + st_rt, + ) ) return errormsg diff --git a/tests/topotests/lib/pim.py b/tests/topotests/lib/pim.py new file mode 100644 index 0000000000..294f60bf68 --- /dev/null +++ b/tests/topotests/lib/pim.py @@ -0,0 +1,3450 @@ +# Copyright (c) 2019 by VMware, Inc. ("VMware") +# Used Copyright (c) 2018 by Network Device Education Foundation, Inc. +# ("NetDEF") in this file. +# +# Permission to use, copy, modify, and/or distribute this software +# for any purpose with or without fee is hereby granted, provided +# that the above copyright notice and this permission notice appear +# in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND VMWARE DISCLAIMS ALL WARRANTIES +# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL VMWARE BE LIABLE FOR +# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY +# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS +# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE +# OF THIS SOFTWARE. + +import sys +import os +import re +import datetime +import traceback +import pytest +from time import sleep +from copy import deepcopy +from lib.topolog import logger + +# Import common_config to use commomnly used APIs +from lib.common_config import ( + create_common_configuration, + InvalidCLIError, + retry, + run_frr_cmd, +) + +#### +CWD = os.path.dirname(os.path.realpath(__file__)) + + +def create_pim_config(tgen, topo, input_dict=None, build=False, load_config=True): + """ + API to configure pim on router + + Parameters + ---------- + * `tgen` : Topogen object + * `topo` : json file data + * `input_dict` : Input dict data, required when configuring from + testcase + * `build` : Only for initial setup phase this is set as True. + + Usage + ----- + input_dict = { + "r1": { + "pim": { + "disable" : ["l1-i1-eth1"], + "rp": [{ + "rp_addr" : "1.0.3.17". + "keep-alive-timer": "100" + "group_addr_range": ["224.1.1.0/24", "225.1.1.0/24"] + "prefix-list": "pf_list_1" + "delete": True + }] + } + } + } + + + Returns + ------- + True or False + """ + logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name)) + result = False + if not input_dict: + input_dict = deepcopy(topo) + else: + topo = topo["routers"] + input_dict = deepcopy(input_dict) + for router in input_dict.keys(): + result = _enable_disable_pim(tgen, topo, input_dict, router, build) + + if "pim" not in input_dict[router]: + logger.debug("Router %s: 'pim' is not present in " "input_dict", router) + continue + + if result is True: + if "rp" not in input_dict[router]["pim"]: + continue + + result = _create_pim_config( + tgen, topo, input_dict, router, build, load_config + ) + if result is not True: + return False + + logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name)) + return result + + +def _create_pim_config(tgen, topo, input_dict, router, build=False, load_config=False): + """ + Helper API to create pim configuration. + + Parameters + ---------- + * `tgen` : Topogen object + * `topo` : json file data + * `input_dict` : Input dict data, required when configuring from testcase + * `router` : router id to be configured. + * `build` : Only for initial setup phase this is set as True. + + Returns + ------- + True or False + """ + + result = False + logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name)) + try: + + pim_data = input_dict[router]["pim"] + + for dut in tgen.routers(): + if "pim" not in input_dict[router]: + continue + + for destLink, data in topo[dut]["links"].items(): + if "pim" not in data: + continue + + if "rp" in pim_data: + config_data = [] + rp_data = pim_data["rp"] + + for rp_dict in deepcopy(rp_data): + # ip address of RP + if "rp_addr" not in rp_dict and build: + logger.error( + "Router %s: 'ip address of RP' not " + "present in input_dict/JSON", + router, + ) + + return False + rp_addr = rp_dict.setdefault("rp_addr", None) + + # Keep alive Timer + keep_alive_timer = rp_dict.setdefault("keep_alive_timer", None) + + # Group Address range to cover + if "group_addr_range" not in rp_dict and build: + logger.error( + "Router %s:'Group Address range to cover'" + " not present in input_dict/JSON", + router, + ) + + return False + group_addr_range = rp_dict.setdefault("group_addr_range", None) + + # Group prefix-list filter + prefix_list = rp_dict.setdefault("prefix_list", None) + + # Delete rp config + del_action = rp_dict.setdefault("delete", False) + + if keep_alive_timer: + cmd = "ip pim rp keep-alive-timer {}".format(keep_alive_timer) + config_data.append(cmd) + + if del_action: + cmd = "no {}".format(cmd) + config_data.append(cmd) + + if rp_addr: + if group_addr_range: + if type(group_addr_range) is not list: + group_addr_range = [group_addr_range] + + for grp_addr in group_addr_range: + cmd = "ip pim rp {} {}".format(rp_addr, grp_addr) + config_data.append(cmd) + + if del_action: + cmd = "no {}".format(cmd) + config_data.append(cmd) + + if prefix_list: + cmd = "ip pim rp {} prefix-list {}".format( + rp_addr, prefix_list + ) + config_data.append(cmd) + + if del_action: + cmd = "no {}".format(cmd) + config_data.append(cmd) + + result = create_common_configuration( + tgen, dut, config_data, "pim", build, load_config + ) + if result is not True: + return False + + except InvalidCLIError: + # Traceback + errormsg = traceback.format_exc() + logger.error(errormsg) + return errormsg + + logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name)) + return result + + +def create_igmp_config(tgen, topo, input_dict=None, build=False): + """ + API to configure igmp on router + + Parameters + ---------- + * `tgen` : Topogen object + * `topo` : json file data + * `input_dict` : Input dict data, required when configuring from + testcase + * `build` : Only for initial setup phase this is set as True. + + Usage + ----- + input_dict = { + "r1": { + "igmp": { + "interfaces": { + "r1-r0-eth0" :{ + "igmp":{ + "version": "2", + "delete": True + "query": { + "query-interval" : 100, + "query-max-response-time": 200 + } + } + } + } + } + } + } + + Returns + ------- + True or False + """ + logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name)) + result = False + if not input_dict: + input_dict = deepcopy(topo) + else: + topo = topo["routers"] + input_dict = deepcopy(input_dict) + for router in input_dict.keys(): + if "igmp" not in input_dict[router]: + logger.debug("Router %s: 'igmp' is not present in " "input_dict", router) + continue + + igmp_data = input_dict[router]["igmp"] + + if "interfaces" in igmp_data: + config_data = [] + intf_data = igmp_data["interfaces"] + + for intf_name in intf_data.keys(): + cmd = "interface {}".format(intf_name) + config_data.append(cmd) + protocol = "igmp" + del_action = intf_data[intf_name]["igmp"].setdefault("delete", False) + cmd = "ip igmp" + if del_action: + cmd = "no {}".format(cmd) + config_data.append(cmd) + + del_attr = intf_data[intf_name]["igmp"].setdefault("delete_attr", False) + for attribute, data in intf_data[intf_name]["igmp"].items(): + if attribute == "version": + cmd = "ip {} {} {}".format(protocol, attribute, data) + if del_action: + cmd = "no {}".format(cmd) + config_data.append(cmd) + + if attribute == "join": + for group in data: + cmd = "ip {} {} {}".format(protocol, attribute, group) + if del_attr: + cmd = "no {}".format(cmd) + config_data.append(cmd) + + if attribute == "query": + for query, value in data.items(): + if query != "delete": + cmd = "ip {} {} {}".format(protocol, query, value) + + if "delete" in intf_data[intf_name][protocol]["query"]: + cmd = "no {}".format(cmd) + + config_data.append(cmd) + try: + + result = create_common_configuration( + tgen, router, config_data, "interface_config", build=build + ) + except InvalidCLIError: + errormsg = traceback.format_exc() + logger.error(errormsg) + return errormsg + + logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name)) + return result + + +def _enable_disable_pim(tgen, topo, input_dict, router, build=False): + """ + Helper API to enable or disable pim on interfaces + + Parameters + ---------- + * `tgen` : Topogen object + * `topo` : json file data + * `input_dict` : Input dict data, required when configuring from testcase + * `router` : router id to be configured. + * `build` : Only for initial setup phase this is set as True. + + Returns + ------- + True or False + """ + result = False + logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name)) + try: + config_data = [] + + enable_flag = True + # Disable pim on interface + if "pim" in input_dict[router]: + if "disable" in input_dict[router]["pim"]: + enable_flag = False + interfaces = input_dict[router]["pim"]["disable"] + + if type(interfaces) is not list: + interfaces = [interfaces] + + for interface in interfaces: + cmd = "interface {}".format(interface) + config_data.append(cmd) + config_data.append("no ip pim") + + # Enable pim on interface + if enable_flag: + for destRouterLink, data in sorted(topo[router]["links"].items()): + if "pim" in data and data["pim"] == "enable": + + # Loopback interfaces + if "type" in data and data["type"] == "loopback": + interface_name = destRouterLink + else: + interface_name = data["interface"] + + cmd = "interface {}".format(interface_name) + config_data.append(cmd) + config_data.append("ip pim") + + result = create_common_configuration( + tgen, router, config_data, "interface_config", build=build + ) + if result is not True: + return False + + except InvalidCLIError: + # Traceback + errormsg = traceback.format_exc() + logger.error(errormsg) + return errormsg + + logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name)) + return result + + +def add_rp_interfaces_and_pim_config(tgen, topo, interface, rp, rp_mapping): + """ + Add physical interfaces tp RP for all the RPs + + Parameters + ---------- + * `tgen` : Topogen object + * `topo` : json file data + * `interface` : RP interface + * `rp` : rp for given topology + * `rp_mapping` : dictionary of all groups and RPs + + Returns + ------- + True or False + """ + result = False + logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name)) + + try: + config_data = [] + + for group, rp_list in rp_mapping.items(): + for _rp in rp_list: + config_data.append("interface {}".format(interface)) + config_data.append("ip address {}".format(_rp)) + config_data.append("ip pim") + + result = create_common_configuration( + tgen, rp, config_data, "interface_config" + ) + if result is not True: + return False + + except InvalidCLIError: + # Traceback + errormsg = traceback.format_exc() + logger.error(errormsg) + return errormsg + + logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name)) + return result + + +def find_rp_details(tgen, topo): + """ + Find who is RP in topology and returns list of RPs + + Parameters: + ----------- + * `tgen` : Topogen object + * `topo` : json file data + + returns: + -------- + errormsg or True + """ + + rp_details = {} + + router_list = tgen.routers() + topo_data = topo["routers"] + + for router in router_list.keys(): + + if "pim" not in topo_data[router]: + continue + + pim_data = topo_data[router]["pim"] + if "rp" in pim_data: + rp_data = pim_data["rp"] + for rp_dict in rp_data: + # ip address of RP + rp_addr = rp_dict["rp_addr"] + + for link, data in topo["routers"][router]["links"].items(): + if data["ipv4"].split("/")[0] == rp_addr: + rp_details[router] = rp_addr + + return rp_details + + +def configure_pim_force_expire(tgen, topo, input_dict, build=False): + """ + Helper API to create pim configuration. + + Parameters + ---------- + * `tgen` : Topogen object + * `topo` : json file data + * `input_dict` : Input dict data, required when configuring from testcase + * `build` : Only for initial setup phase this is set as True. + + Usage + ----- + input_dict ={ + "l1": { + "pim": { + "force_expire":{ + "10.0.10.1": ["255.1.1.1"] + } + } + } + } + + result = create_pim_config(tgen, topo, input_dict) + + Returns + ------- + True or False + """ + + result = False + logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name)) + try: + + for dut in input_dict.keys(): + if "pim" not in input_dict[dut]: + continue + + pim_data = input_dict[dut]["pim"] + + if "force_expire" in pim_data: + config_data = [] + force_expire_data = pim_data["force_expire"] + + for source, groups in force_expire_data.items(): + if type(groups) is not list: + groups = [groups] + + for group in groups: + cmd = "ip pim force-expire source {} group {}".format( + source, group + ) + config_data.append(cmd) + + result = create_common_configuration( + tgen, dut, config_data, "pim", build=build + ) + if result is not True: + return False + + except InvalidCLIError: + # Traceback + errormsg = traceback.format_exc() + logger.error(errormsg) + return errormsg + + logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name)) + return result + + +############################################# +# Verification APIs +############################################# +@retry(attempts=6, wait=2, return_is_str=True) +def verify_pim_neighbors(tgen, topo, dut=None, iface=None, nbr_ip=None): + """ + Verify all PIM neighbors are up and running, config is verified + using "show ip pim neighbor" cli + + Parameters + ---------- + * `tgen`: topogen object + * `topo` : json file data + * `dut` : dut info + * `iface` : link for which PIM nbr need to check + * `nbr_ip` : neighbor ip of interface + + Usage + ----- + result = verify_pim_neighbors(tgen, topo, dut, iface=ens192, nbr_ip=20.1.1.2) + + Returns + ------- + errormsg(str) or True + """ + + logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name)) + + for router in tgen.routers(): + if dut is not None and dut != router: + continue + + rnode = tgen.routers()[router] + show_ip_pim_neighbor_json = rnode.vtysh_cmd( + "show ip pim neighbor json", isjson=True + ) + + for destLink, data in topo["routers"][router]["links"].items(): + if iface is not None and iface != data["interface"]: + continue + + if "type" in data and data["type"] == "loopback": + continue + + if "pim" not in data: + continue + + if "pim" in data and data["pim"] == "enable": + local_interface = data["interface"] + + if "-" in destLink: + # Spliting and storing destRouterLink data in tempList + tempList = destLink.split("-") + + # destRouter + destLink = tempList.pop(0) + + # Current Router Link + tempList.insert(0, router) + curRouter = "-".join(tempList) + else: + curRouter = router + if destLink not in topo["routers"]: + continue + data = topo["routers"][destLink]["links"][curRouter] + if "type" in data and data["type"] == "loopback": + continue + + if "pim" not in data: + continue + + logger.info("[DUT: %s]: Verifying PIM neighbor status:", router) + + if "pim" in data and data["pim"] == "enable": + pim_nh_intf_ip = data["ipv4"].split("/")[0] + + # Verifying PIM neighbor + if local_interface in show_ip_pim_neighbor_json: + if show_ip_pim_neighbor_json[local_interface]: + if ( + show_ip_pim_neighbor_json[local_interface][pim_nh_intf_ip][ + "neighbor" + ] + != pim_nh_intf_ip + ): + errormsg = ( + "[DUT %s]: Local interface: %s, PIM" + " neighbor check failed " + "Expected neighbor: %s, Found neighbor:" + " %s" + % ( + router, + local_interface, + pim_nh_intf_ip, + show_ip_pim_neighbor_json[local_interface][ + pim_nh_intf_ip + ]["neighbor"], + ) + ) + return errormsg + + logger.info( + "[DUT %s]: Local interface: %s, Found" + " expected PIM neighbor %s", + router, + local_interface, + pim_nh_intf_ip, + ) + else: + errormsg = ( + "[DUT %s]: Local interface: %s, and" + "interface ip: %s is not found in " + "PIM neighbor " % (router, local_interface, pim_nh_intf_ip) + ) + return errormsg + else: + errormsg = ( + "[DUT %s]: Local interface: %s, is not " + "present in PIM neighbor " % (router, local_interface) + ) + return errormsg + + logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name)) + return True + + +@retry(attempts=21, wait=2, return_is_str=True) +def verify_igmp_groups(tgen, dut, interface, group_addresses): + """ + Verify IGMP groups are received from an intended interface + by running "show ip igmp groups" command + + Parameters + ---------- + * `tgen`: topogen object + * `dut`: device under test + * `interface`: interface, from which IGMP groups would be received + * `group_addresses`: IGMP group address + + Usage + ----- + dut = "r1" + interface = "r1-r0-eth0" + group_address = "225.1.1.1" + result = verify_igmp_groups(tgen, dut, interface, group_address) + + Returns + ------- + errormsg(str) or True + """ + + logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name)) + + if dut not in tgen.routers(): + return False + + rnode = tgen.routers()[dut] + + logger.info("[DUT: %s]: Verifying IGMP groups received:", dut) + show_ip_igmp_json = run_frr_cmd(rnode, "show ip igmp groups json", isjson=True) + + if type(group_addresses) is not list: + group_addresses = [group_addresses] + + if interface in show_ip_igmp_json: + show_ip_igmp_json = show_ip_igmp_json[interface]["groups"] + else: + errormsg = ( + "[DUT %s]: Verifying IGMP group received" + " from interface %s [FAILED]!! " % (dut, interface) + ) + return errormsg + + found = False + for grp_addr in group_addresses: + for index in show_ip_igmp_json: + if index["group"] == grp_addr: + found = True + break + if found is not True: + errormsg = ( + "[DUT %s]: Verifying IGMP group received" + " from interface %s [FAILED]!! " + " Expected not found: %s" % (dut, interface, grp_addr) + ) + return errormsg + + logger.info( + "[DUT %s]: Verifying IGMP group %s received " + "from interface %s [PASSED]!! ", + dut, + grp_addr, + interface, + ) + + logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name)) + return True + + +@retry(attempts=31, wait=2, return_is_str=True) +def verify_upstream_iif( + tgen, dut, iif, src_address, group_addresses, joinState=None, refCount=1 +): + """ + Verify upstream inbound interface is updated correctly + by running "show ip pim upstream" cli + + Parameters + ---------- + * `tgen`: topogen object + * `dut`: device under test + * `iif`: inbound interface + * `src_address`: source address + * `group_addresses`: IGMP group address + * `joinState`: upstream join state + * `refCount`: refCount value + + Usage + ----- + dut = "r1" + iif = "r1-r0-eth0" + src_address = "*" + group_address = "225.1.1.1" + result = verify_upstream_iif(tgen, dut, iif, src_address, group_address, + state, refCount) + + Returns + ------- + errormsg(str) or True + """ + + logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name)) + + if dut not in tgen.routers(): + return False + + rnode = tgen.routers()[dut] + + logger.info( + "[DUT: %s]: Verifying upstream Inbound Interface" " for IGMP groups received:", + dut, + ) + show_ip_pim_upstream_json = run_frr_cmd( + rnode, "show ip pim upstream json", isjson=True + ) + + if type(group_addresses) is not list: + group_addresses = [group_addresses] + + if type(iif) is not list: + iif = [iif] + + for grp_addr in group_addresses: + # Verify group address + if grp_addr not in show_ip_pim_upstream_json: + errormsg = "[DUT %s]: Verifying upstream" " for group %s [FAILED]!!" % ( + dut, + grp_addr, + ) + return errormsg + group_addr_json = show_ip_pim_upstream_json[grp_addr] + + # Verify source address + if src_address not in group_addr_json: + errormsg = "[DUT %s]: Verifying upstream" " for (%s,%s) [FAILED]!!" % ( + dut, + src_address, + grp_addr, + ) + return errormsg + + # Verify Inbound Interface + found = False + for in_interface in iif: + if group_addr_json[src_address]["inboundInterface"] == in_interface: + if refCount > 0: + logger.info( + "[DUT %s]: Verifying refCount " + "for (%s,%s) [PASSED]!! " + " Found Expected: %s", + dut, + src_address, + grp_addr, + group_addr_json[src_address]["refCount"], + ) + found = True + if found: + if joinState is None: + if group_addr_json[src_address]["joinState"] != "Joined": + errormsg = ( + "[DUT %s]: Verifying iif " + "(Inbound Interface) for (%s,%s) and" + " joinState :%s [FAILED]!! " + " Expected: %s, Found: %s" + % ( + dut, + src_address, + grp_addr, + group_addr_json[src_address]["joinState"], + in_interface, + group_addr_json[src_address]["inboundInterface"], + ) + ) + return errormsg + + elif group_addr_json[src_address]["joinState"] != joinState: + errormsg = ( + "[DUT %s]: Verifying iif " + "(Inbound Interface) for (%s,%s) and" + " joinState :%s [FAILED]!! " + " Expected: %s, Found: %s" + % ( + dut, + src_address, + grp_addr, + group_addr_json[src_address]["joinState"], + in_interface, + group_addr_json[src_address]["inboundInterface"], + ) + ) + return errormsg + + logger.info( + "[DUT %s]: Verifying iif(Inbound Interface)" + " for (%s,%s) and joinState is %s [PASSED]!! " + " Found Expected: (%s)", + dut, + src_address, + grp_addr, + group_addr_json[src_address]["joinState"], + group_addr_json[src_address]["inboundInterface"], + ) + if not found: + errormsg = ( + "[DUT %s]: Verifying iif " + "(Inbound Interface) for (%s, %s) " + "[FAILED]!! " + " Expected: %s, Found: %s" + % ( + dut, + src_address, + grp_addr, + in_interface, + group_addr_json[src_address]["inboundInterface"], + ) + ) + return errormsg + + logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name)) + return True + + +@retry(attempts=6, wait=2, return_is_str=True) +def verify_join_state_and_timer(tgen, dut, iif, src_address, group_addresses): + """ + Verify join state is updated correctly and join timer is + running with the help of "show ip pim upstream" cli + + Parameters + ---------- + * `tgen`: topogen object + * `dut`: device under test + * `iif`: inbound interface + * `src_address`: source address + * `group_addresses`: IGMP group address + + Usage + ----- + dut = "r1" + iif = "r1-r0-eth0" + group_address = "225.1.1.1" + result = verify_join_state_and_timer(tgen, dut, iif, group_address) + + Returns + ------- + errormsg(str) or True + """ + + logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name)) + errormsg = "" + + if dut not in tgen.routers(): + return False + + rnode = tgen.routers()[dut] + + logger.info( + "[DUT: %s]: Verifying Join state and Join Timer" " for IGMP groups received:", + dut, + ) + show_ip_pim_upstream_json = run_frr_cmd( + rnode, "show ip pim upstream json", isjson=True + ) + + if type(group_addresses) is not list: + group_addresses = [group_addresses] + + for grp_addr in group_addresses: + # Verify group address + if grp_addr not in show_ip_pim_upstream_json: + errormsg = "[DUT %s]: Verifying upstream" " for group %s [FAILED]!!" % ( + dut, + grp_addr, + ) + return errormsg + + group_addr_json = show_ip_pim_upstream_json[grp_addr] + + # Verify source address + if src_address not in group_addr_json: + errormsg = "[DUT %s]: Verifying upstream" " for (%s,%s) [FAILED]!!" % ( + dut, + src_address, + grp_addr, + ) + return errormsg + + # Verify join state + joinState = group_addr_json[src_address]["joinState"] + if joinState != "Joined": + error = ( + "[DUT %s]: Verifying join state for" + " (%s,%s) [FAILED]!! " + " Expected: %s, Found: %s" + % (dut, src_address, grp_addr, "Joined", joinState) + ) + errormsg = errormsg + "\n" + str(error) + else: + logger.info( + "[DUT %s]: Verifying join state for" + " (%s,%s) [PASSED]!! " + " Found Expected: %s", + dut, + src_address, + grp_addr, + joinState, + ) + + # Verify join timer + joinTimer = group_addr_json[src_address]["joinTimer"] + if not re.match(r"(\d{2}):(\d{2}):(\d{2})", joinTimer): + error = ( + "[DUT %s]: Verifying join timer for" + " (%s,%s) [FAILED]!! " + " Expected: %s, Found: %s", + dut, + src_address, + grp_addr, + "join timer should be running", + joinTimer, + ) + errormsg = errormsg + "\n" + str(error) + else: + logger.info( + "[DUT %s]: Verifying join timer is running" + " for (%s,%s) [PASSED]!! " + " Found Expected: %s", + dut, + src_address, + grp_addr, + joinTimer, + ) + + if errormsg != "": + return errormsg + + logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name)) + return True + + +@retry(attempts=41, wait=2, return_is_dict=True) +def verify_ip_mroutes( + tgen, dut, src_address, group_addresses, iif, oil, return_uptime=False, mwait=0 +): + """ + Verify ip mroutes and make sure (*, G)/(S, G) is present in mroutes + by running "show ip pim upstream" cli + + Parameters + ---------- + * `tgen`: topogen object + * `dut`: device under test + * `src_address`: source address + * `group_addresses`: IGMP group address + * `iif`: Incoming interface + * `oil`: Outgoing interface + * `return_uptime`: If True, return uptime dict, default is False + * `mwait`: Wait time, default is 0 + + + Usage + ----- + dut = "r1" + group_address = "225.1.1.1" + result = verify_ip_mroutes(tgen, dut, src_address, group_address) + + Returns + ------- + errormsg(str) or True + """ + + logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name)) + + if dut not in tgen.routers(): + return False + + rnode = tgen.routers()[dut] + + if return_uptime: + logger.info("Sleeping for %s sec..", mwait) + sleep(mwait) + + logger.info("[DUT: %s]: Verifying ip mroutes", dut) + show_ip_mroute_json = run_frr_cmd(rnode, "show ip mroute json", isjson=True) + + if return_uptime: + uptime_dict = {} + + if bool(show_ip_mroute_json) == False: + error_msg = "[DUT %s]: mroutes are not present or flushed out !!" % (dut) + return error_msg + + if not isinstance(group_addresses, list): + group_addresses = [group_addresses] + + if not isinstance(iif, list) and iif is not "none": + iif = [iif] + + if not isinstance(oil, list) and oil is not "none": + oil = [oil] + + for grp_addr in group_addresses: + if grp_addr not in show_ip_mroute_json: + errormsg = "[DUT %s]: Verifying (%s, %s) mroute," "[FAILED]!! " % ( + dut, + src_address, + grp_addr, + ) + return errormsg + else: + if return_uptime: + uptime_dict[grp_addr] = {} + + group_addr_json = show_ip_mroute_json[grp_addr] + + if src_address not in group_addr_json: + errormsg = "[DUT %s]: Verifying (%s, %s) mroute," "[FAILED]!! " % ( + dut, + src_address, + grp_addr, + ) + return errormsg + else: + if return_uptime: + uptime_dict[grp_addr][src_address] = {} + + mroutes = group_addr_json[src_address] + + if mroutes["installed"] != 0: + logger.info( + "[DUT %s]: mroute (%s,%s) is installed", dut, src_address, grp_addr + ) + + if "oil" not in mroutes: + if oil == "none" and mroutes["iif"] in iif: + logger.info( + "[DUT %s]: Verifying (%s, %s) mroute," + " [PASSED]!! Found Expected: " + "(iif: %s, oil: %s, installed: (%s,%s))", + dut, + src_address, + grp_addr, + mroutes["iif"], + oil, + src_address, + grp_addr, + ) + else: + errormsg = ( + "[DUT %s]: Verifying (%s, %s) mroute," + " [FAILED]!! " + "Expected: (oil: %s, installed:" + " (%s,%s)) Found: ( oil: none, " + "installed: (%s,%s))" + % ( + dut, + src_address, + grp_addr, + oil, + src_address, + grp_addr, + src_address, + grp_addr, + ) + ) + + return errormsg + + else: + found = False + for route, data in mroutes["oil"].items(): + if route in oil and route != "pimreg": + if ( + data["source"] == src_address + and data["group"] == grp_addr + and data["inboundInterface"] in iif + and data["outboundInterface"] in oil + ): + if return_uptime: + + uptime_dict[grp_addr][src_address] = data["upTime"] + + logger.info( + "[DUT %s]: Verifying (%s, %s)" + " mroute, [PASSED]!! " + "Found Expected: " + "(iif: %s, oil: %s, installed:" + " (%s,%s)", + dut, + src_address, + grp_addr, + data["inboundInterface"], + data["outboundInterface"], + data["source"], + data["group"], + ) + found = True + break + else: + continue + + if not found: + errormsg = ( + "[DUT %s]: Verifying (%s, %s)" + " mroute [FAILED]!! " + "Expected in: (iif: %s, oil: %s," + " installed: (%s,%s)) Found: " + "(iif: %s, oil: %s, " + "installed: (%s,%s))" + % ( + dut, + src_address, + grp_addr, + iif, + oil, + src_address, + grp_addr, + data["inboundInterface"], + data["outboundInterface"], + data["source"], + data["group"], + ) + ) + return errormsg + + else: + errormsg = "[DUT %s]: mroute (%s,%s) is not installed" % ( + dut, + src_address, + grp_addr, + ) + return errormsg + + logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name)) + return True if return_uptime == False else uptime_dict + + +@retry(attempts=31, wait=2, return_is_str=True) +def verify_pim_rp_info( + tgen, topo, dut, group_addresses, oif=None, rp=None, source=None, iamrp=None +): + """ + Verify pim rp info by running "show ip pim rp-info" cli + + Parameters + ---------- + * `tgen`: topogen object + * `topo`: JSON file handler + * `dut`: device under test + * `group_addresses`: IGMP group address + * `oif`: outbound interface name + * `rp`: RP address + * `source`: Source of RP + * `iamrp`: User defined RP + + Usage + ----- + dut = "r1" + result = verify_pim_rp_info(tgen, topo, dut, group_address, + rp=rp, source="BSR") + + Returns + ------- + errormsg(str) or True + """ + + logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name)) + + if dut not in tgen.routers(): + return False + + rnode = tgen.routers()[dut] + + logger.info("[DUT: %s]: Verifying ip rp info", dut) + show_ip_rp_info_json = run_frr_cmd(rnode, "show ip pim rp-info json", isjson=True) + + if type(group_addresses) is not list: + group_addresses = [group_addresses] + + if type(oif) is not list: + oif = [oif] + + for grp_addr in group_addresses: + if rp is None: + rp_details = find_rp_details(tgen, topo) + + if dut in rp_details: + iamRP = True + else: + iamRP = False + else: + show_ip_route_json = run_frr_cmd( + rnode, "show ip route connected json", isjson=True + ) + for _rp in show_ip_route_json.keys(): + if rp == _rp.split("/")[0]: + iamRP = True + break + else: + iamRP = False + + if rp not in show_ip_rp_info_json: + errormsg = "[DUT %s]: Verifying rp-info" "for rp_address %s [FAILED]!! " % ( + dut, + rp, + ) + return errormsg + else: + group_addr_json = show_ip_rp_info_json[rp] + + for rp_json in group_addr_json: + if oif is not None: + found = False + if rp_json["outboundInterface"] not in oif: + errormsg = ( + "[DUT %s]: Verifying OIF " + "for group %s and RP %s [FAILED]!! " + "Expected interfaces: (%s)," + " Found: (%s)" + % (dut, grp_addr, rp, oif, rp_json["outboundInterface"]) + ) + return errormsg + + logger.info( + "[DUT %s]: Verifying OIF " + "for group %s and RP %s [PASSED]!! " + "Found Expected: (%s)" + % (dut, grp_addr, rp, rp_json["outboundInterface"]) + ) + + if source is not None: + if rp_json["source"] != source: + errormsg = ( + "[DUT %s]: Verifying SOURCE " + "for group %s and RP %s [FAILED]!! " + "Expected: (%s)," + " Found: (%s)" % (dut, grp_addr, rp, source, rp_json["source"]) + ) + return errormsg + + logger.info( + "[DUT %s]: Verifying SOURCE " + "for group %s and RP %s [PASSED]!! " + "Found Expected: (%s)" % (dut, grp_addr, rp, rp_json["source"]) + ) + + if rp_json["group"] == grp_addr and iamrp is not None: + if iamRP: + if rp_json["iAmRP"]: + logger.info( + "[DUT %s]: Verifying group " + "and iAmRP [PASSED]!!" + " Found Expected: (%s, %s:%s)", + dut, + grp_addr, + "iAmRP", + rp_json["iAmRP"], + ) + else: + errormsg = ( + "[DUT %s]: Verifying group" + "%s and iAmRP [FAILED]!! " + "Expected: (iAmRP: %s)," + " Found: (iAmRP: %s)" + % (dut, grp_addr, "true", rp_json["iAmRP"]) + ) + return errormsg + + if not iamRP: + if rp_json["iAmRP"] == False: + logger.info( + "[DUT %s]: Verifying group " + "and iAmNotRP [PASSED]!!" + " Found Expected: (%s, %s:%s)", + dut, + grp_addr, + "iAmRP", + rp_json["iAmRP"], + ) + else: + errormsg = ( + "[DUT %s]: Verifying group" + "%s and iAmRP [FAILED]!! " + "Expected: (iAmRP: %s)," + " Found: (iAmRP: %s)" + % (dut, grp_addr, "false", rp_json["iAmRP"]) + ) + return errormsg + + logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name)) + return True + + +@retry(attempts=31, wait=2, return_is_str=True) +def verify_pim_state( + tgen, dut, iif, oil, group_addresses, src_address=None, installed_fl=None +): + """ + Verify pim state by running "show ip pim state" cli + + Parameters + ---------- + * `tgen`: topogen object + * `dut`: device under test + * `iif`: inbound interface + * `oil`: outbound interface + * `group_addresses`: IGMP group address + * `src_address`: source address, default = None + * installed_fl` : Installed flag + + Usage + ----- + dut = "r1" + iif = "r1-r3-eth1" + oil = "r1-r0-eth0" + group_address = "225.1.1.1" + result = verify_pim_state(tgen, dut, iif, oil, group_address) + + Returns + ------- + errormsg(str) or True + """ + + logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name)) + + if dut not in tgen.routers(): + return False + + rnode = tgen.routers()[dut] + + logger.info("[DUT: %s]: Verifying pim state", dut) + show_pim_state_json = run_frr_cmd(rnode, "show ip pim state json", isjson=True) + + if installed_fl is None: + installed_fl = 1 + + if type(group_addresses) is not list: + group_addresses = [group_addresses] + + for grp_addr in group_addresses: + if src_address is None: + src_address = "*" + pim_state_json = show_pim_state_json[grp_addr][src_address] + else: + pim_state_json = show_pim_state_json[grp_addr][src_address] + + if pim_state_json["Installed"] == installed_fl: + logger.info( + "[DUT %s]: group %s is installed flag: %s", + dut, + grp_addr, + pim_state_json["Installed"], + ) + for interface, data in pim_state_json[iif].items(): + if interface != oil: + continue + + # Verify iif, oil and installed state + if ( + data["group"] == grp_addr + and data["installed"] == installed_fl + and data["inboundInterface"] == iif + and data["outboundInterface"] == oil + ): + logger.info( + "[DUT %s]: Verifying pim state for group" + " %s [PASSED]!! Found Expected: " + "(iif: %s, oil: %s, installed: %s) ", + dut, + grp_addr, + data["inboundInterface"], + data["outboundInterface"], + data["installed"], + ) + else: + errormsg = ( + "[DUT %s]: Verifying pim state for group" + " %s, [FAILED]!! Expected: " + "(iif: %s, oil: %s, installed: %s) ", + "Found: (iif: %s, oil: %s, installed: %s)" + % ( + dut, + grp_addr, + iif, + oil, + "1", + data["inboundInterface"], + data["outboundInterface"], + data["installed"], + ), + ) + return errormsg + else: + errormsg = "[DUT %s]: %s install flag value not as expected" % ( + dut, + grp_addr, + ) + return errormsg + + logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name)) + return True + + +def verify_pim_interface_traffic(tgen, input_dict): + """ + Verify ip pim interface traffice by running + "show ip pim interface traffic" cli + + Parameters + ---------- + * `tgen`: topogen object + * `input_dict(dict)`: defines DUT, what and from which interfaces + traffic needs to be verified + Usage + ----- + input_dict = { + "r1": { + "r1-r0-eth0": { + "helloRx": 0, + "helloTx": 1, + "joinRx": 0, + "joinTx": 0 + } + } + } + + result = verify_pim_interface_traffic(tgen, input_dict) + + Returns + ------- + errormsg(str) or True + """ + + logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name)) + + output_dict = {} + for dut in input_dict.keys(): + if dut not in tgen.routers(): + continue + + rnode = tgen.routers()[dut] + + logger.info("[DUT: %s]: Verifying pim interface traffic", dut) + show_pim_intf_traffic_json = run_frr_cmd( + rnode, "show ip pim interface traffic json", isjson=True + ) + + output_dict[dut] = {} + for intf, data in input_dict[dut].items(): + interface_json = show_pim_intf_traffic_json[intf] + for state in data: + + # Verify Tx/Rx + if state in interface_json: + output_dict[dut][state] = interface_json[state] + else: + errormsg = ( + "[DUT %s]: %s is not present" + "for interface %s [FAILED]!! " % (dut, state, intf) + ) + return errormsg + + logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name)) + return output_dict + + +@retry(attempts=21, wait=2, return_is_str=True) +def verify_pim_interface(tgen, topo, dut, interface=None, interface_ip=None): + """ + Verify all PIM interface are up and running, config is verified + using "show ip pim interface" cli + + Parameters + ---------- + * `tgen`: topogen object + * `topo` : json file data + * `dut` : device under test + * `interface` : interface name + * `interface_ip` : interface ip address + + Usage + ----- + result = verify_pim_interfacetgen, topo, dut, interface=ens192, interface_ip=20.1.1.1) + + Returns + ------- + errormsg(str) or True + """ + + logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name)) + + for router in tgen.routers(): + if router != dut: + continue + + logger.info("[DUT: %s]: Verifying PIM interface status:", dut) + + rnode = tgen.routers()[dut] + show_ip_pim_interface_json = rnode.\ + vtysh_cmd("show ip pim interface json", isjson=True) + + logger.info("show_ip_pim_interface_json: \n %s", + show_ip_pim_interface_json) + + if interface_ip: + if interface in show_ip_pim_interface_json: + pim_intf_json = show_ip_pim_interface_json[interface] + if pim_intf_json["address"] != interface_ip: + errormsg = ("[DUT %s]: PIM interface " + "ip is not correct " + "[FAILED]!! Expected : %s, Found : %s" + %(dut, pim_intf_json["address"],interface_ip)) + return errormsg + else: + logger.info("[DUT %s]: PIM interface " + "ip is correct " + "[Passed]!! Expected : %s, Found : %s" + %(dut, pim_intf_json["address"],interface_ip)) + return True + else: + for destLink, data in topo["routers"][dut]["links"].items(): + if "type" in data and data["type"] == "loopback": + continue + + if "pim" in data and data["pim"] == "enable": + pim_interface = data["interface"] + pim_intf_ip = data["ipv4"].split("/")[0] + + if pim_interface in show_ip_pim_interface_json: + pim_intf_json = show_ip_pim_interface_json\ + [pim_interface] + + # Verifying PIM interface + if pim_intf_json["address"] != pim_intf_ip and \ + pim_intf_json["state"] != "up": + errormsg = ("[DUT %s]: PIM interface: %s " + "PIM interface ip: %s, status check " + "[FAILED]!! Expected : %s, Found : %s" + %(dut, pim_interface, pim_intf_ip, + pim_interface, pim_intf_json["state"])) + return errormsg + + logger.info("[DUT %s]: PIM interface: %s, " + "interface ip: %s, status: %s" + " [PASSED]!!", + dut, pim_interface, pim_intf_ip, + pim_intf_json["state"]) + + logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name)) + return True + + +def clear_ip_pim_interface_traffic(tgen, topo): + """ + Clear ip pim interface traffice by running + "clear ip pim interface traffic" cli + + Parameters + ---------- + * `tgen`: topogen object + Usage + ----- + + result = clear_ip_pim_interface_traffic(tgen, topo) + + Returns + ------- + errormsg(str) or True + """ + + logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name)) + + for dut in tgen.routers(): + if "pim" not in topo["routers"][dut]: + continue + + rnode = tgen.routers()[dut] + + logger.info("[DUT: %s]: Clearing pim interface traffic", dut) + result = run_frr_cmd(rnode, "clear ip pim interface traffic") + + logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name)) + + return True + + +def clear_ip_pim_interfaces(tgen, dut): + """ + Clear ip pim interface by running + "clear ip pim interfaces" cli + + Parameters + ---------- + * `tgen`: topogen object + * `dut`: Device Under Test + Usage + ----- + + result = clear_ip_pim_interfaces(tgen, dut) + + Returns + ------- + errormsg(str) or True + """ + + logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name)) + + nh_before_clear = {} + nh_after_clear = {} + + rnode = tgen.routers()[dut] + + logger.info("[DUT: %s]: Verify pim neighbor before pim" " neighbor clear", dut) + # To add uptime initially + sleep(10) + run_json_before = run_frr_cmd(rnode, "show ip pim neighbor json", isjson=True) + + for key, value in run_json_before.items(): + if bool(value): + for _key, _value in value.items(): + nh_before_clear[key] = _value["upTime"] + + # Clearing PIM neighbors + logger.info("[DUT: %s]: Clearing pim interfaces", dut) + run_frr_cmd(rnode, "clear ip pim interfaces") + + logger.info("[DUT: %s]: Verify pim neighbor after pim" " neighbor clear", dut) + + found = False + + # Waiting for maximum 60 sec + fail_intf = [] + for retry in range(1, 13): + logger.info("[DUT: %s]: Waiting for 5 sec for PIM neighbors" " to come up", dut) + sleep(5) + run_json_after = run_frr_cmd(rnode, "show ip pim neighbor json", isjson=True) + found = True + for pim_intf in nh_before_clear.keys(): + if pim_intf not in run_json_after or not run_json_after[pim_intf]: + found = False + fail_intf.append(pim_intf) + + if found is True: + break + else: + errormsg = ( + "[DUT: %s]: pim neighborship is not formed for %s" + "after clear_ip_pim_interfaces %s [FAILED!!]", + dut, + fail_intf, + ) + return errormsg + + for key, value in run_json_after.items(): + if bool(value): + for _key, _value in value.items(): + nh_after_clear[key] = _value["upTime"] + + # Verify uptime for neighbors + for pim_intf in nh_before_clear.keys(): + d1 = datetime.datetime.strptime(nh_before_clear[pim_intf], "%H:%M:%S") + d2 = datetime.datetime.strptime(nh_after_clear[pim_intf], "%H:%M:%S") + if d2 >= d1: + errormsg = ( + "[DUT: %s]: PIM neighborship is not cleared for", + " interface %s [FAILED!!]", + dut, + pim_intf, + ) + + logger.info("[DUT: %s]: PIM neighborship is cleared [PASSED!!]") + + logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name)) + + return True + + +def clear_ip_igmp_interfaces(tgen, dut): + """ + Clear ip igmp interfaces by running + "clear ip igmp interfaces" cli + + Parameters + ---------- + * `tgen`: topogen object + * `dut`: device under test + + Usage + ----- + dut = "r1" + result = clear_ip_igmp_interfaces(tgen, dut) + Returns + ------- + errormsg(str) or True + """ + + logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name)) + + group_before_clear = {} + group_after_clear = {} + + rnode = tgen.routers()[dut] + + logger.info("[DUT: %s]: IGMP group uptime before clear" " igmp groups:", dut) + igmp_json = run_frr_cmd(rnode, "show ip igmp groups json", isjson=True) + + total_groups_before_clear = igmp_json["totalGroups"] + + for key, value in igmp_json.items(): + if type(value) is not dict: + continue + + groups = value["groups"] + group = groups[0]["group"] + uptime = groups[0]["uptime"] + group_before_clear[group] = uptime + + logger.info("[DUT: %s]: Clearing ip igmp interfaces", dut) + result = run_frr_cmd(rnode, "clear ip igmp interfaces") + + # Waiting for maximum 60 sec + for retry in range(1, 13): + logger.info( + "[DUT: %s]: Waiting for 5 sec for igmp interfaces" " to come up", dut + ) + sleep(5) + igmp_json = run_frr_cmd(rnode, "show ip igmp groups json", isjson=True) + + total_groups_after_clear = igmp_json["totalGroups"] + + if total_groups_before_clear == total_groups_after_clear: + break + + for key, value in igmp_json.items(): + if type(value) is not dict: + continue + + groups = value["groups"] + group = groups[0]["group"] + uptime = groups[0]["uptime"] + group_after_clear[group] = uptime + + # Verify uptime for groups + for group in group_before_clear.keys(): + d1 = datetime.datetime.strptime(group_before_clear[group], "%H:%M:%S") + d2 = datetime.datetime.strptime(group_after_clear[group], "%H:%M:%S") + if d2 >= d1: + errormsg = ("[DUT: %s]: IGMP group is not cleared", " [FAILED!!]", dut) + + logger.info("[DUT: %s]: IGMP group is cleared [PASSED!!]") + + logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name)) + + return True + + +@retry(attempts=10, wait=2, return_is_str=True) +def clear_ip_mroute_verify(tgen, dut): + """ + Clear ip mroute by running "clear ip mroute" cli and verify + mroutes are up again after mroute clear + + Parameters + ---------- + * `tgen`: topogen object + * `dut`: Device Under Test + Usage + ----- + + result = clear_ip_mroute_verify(tgen, dut) + + Returns + ------- + errormsg(str) or True + """ + + logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name)) + + mroute_before_clear = {} + mroute_after_clear = {} + + rnode = tgen.routers()[dut] + + logger.info("[DUT: %s]: IP mroutes uptime before clear", dut) + mroute_json_1 = run_frr_cmd(rnode, "show ip mroute json", isjson=True) + + for group in mroute_json_1.keys(): + mroute_before_clear[group] = {} + for key in mroute_json_1[group].keys(): + for _key, _value in mroute_json_1[group][key]["oil"].items(): + if _key != "pimreg": + mroute_before_clear[group][key] = _value["upTime"] + + logger.info("[DUT: %s]: Clearing ip mroute", dut) + result = run_frr_cmd(rnode, "clear ip mroute") + + # RFC 3376: 8.2. Query Interval - Default: 125 seconds + # So waiting for maximum 130 sec to get the igmp report + for retry in range(1, 26): + logger.info("[DUT: %s]: Waiting for 2 sec for mroutes" " to come up", dut) + sleep(5) + keys_json1 = mroute_json_1.keys() + mroute_json_2 = run_frr_cmd(rnode, "show ip mroute json", isjson=True) + + if bool(mroute_json_2): + keys_json2 = mroute_json_2.keys() + + for group in mroute_json_2.keys(): + flag = False + for key in mroute_json_2[group].keys(): + if "oil" not in mroute_json_2[group]: + continue + + for _key, _value in mroute_json_2[group][key]["oil"].items(): + if _key != "pimreg" and keys_json1 == keys_json2: + break + flag = True + if flag: + break + else: + continue + + for group in mroute_json_2.keys(): + mroute_after_clear[group] = {} + for key in mroute_json_2[group].keys(): + for _key, _value in mroute_json_2[group][key]["oil"].items(): + if _key != "pimreg": + mroute_after_clear[group][key] = _value["upTime"] + + # Verify uptime for mroute + for group in mroute_before_clear.keys(): + for source in mroute_before_clear[group].keys(): + if set(mroute_before_clear[group]) != set(mroute_after_clear[group]): + errormsg = ( + "[DUT: %s]: mroute (%s, %s) has not come" + " up after mroute clear [FAILED!!]" % (dut, source, group) + ) + return errormsg + + d1 = datetime.datetime.strptime( + mroute_before_clear[group][source], "%H:%M:%S" + ) + d2 = datetime.datetime.strptime( + mroute_after_clear[group][source], "%H:%M:%S" + ) + if d2 >= d1: + errormsg = "[DUT: %s]: IP mroute is not cleared" " [FAILED!!]" % (dut) + + logger.info("[DUT: %s]: IP mroute is cleared [PASSED!!]", dut) + + logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name)) + + return True + + +def clear_ip_mroute(tgen, dut=None): + """ + Clear ip mroute by running "clear ip mroute" cli + + Parameters + ---------- + * `tgen`: topogen object + * `dut`: device under test, default None + + Usage + ----- + clear_ip_mroute(tgen, dut) + """ + + logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name)) + + router_list = tgen.routers() + for router, rnode in router_list.items(): + if dut is not None and router != dut: + continue + + logger.debug("[DUT: %s]: Clearing ip mroute", router) + rnode.vtysh_cmd("clear ip mroute") + + logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name)) + + +def reconfig_interfaces(tgen, topo, senderRouter, receiverRouter, packet=None): + """ + Configure interface ip for sender and receiver routers + as per bsr packet + + Parameters + ---------- + * `tgen` : Topogen object + * `topo` : json file data + * `senderRouter` : Sender router + * `receiverRouter` : Receiver router + * `packet` : BSR packet in raw format + + Returns + ------- + True or False + """ + result = False + logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name)) + + try: + config_data = [] + + src_ip = topo["routers"][senderRouter]["bsm"]["bsr_packets"][packet]["src_ip"] + dest_ip = topo["routers"][senderRouter]["bsm"]["bsr_packets"][packet]["dest_ip"] + + for destLink, data in topo["routers"][senderRouter]["links"].items(): + if "type" in data and data["type"] == "loopback": + continue + + if "pim" in data and data["pim"] == "enable": + sender_interface = data["interface"] + sender_interface_ip = data["ipv4"] + + config_data.append("interface {}".format(sender_interface)) + config_data.append("no ip address {}".format(sender_interface_ip)) + config_data.append("ip address {}".format(src_ip)) + + result = create_common_configuration( + tgen, senderRouter, config_data, "interface_config" + ) + if result is not True: + return False + + config_data = [] + links = topo["routers"][destLink]["links"] + pim_neighbor = {key: links[key] for key in [senderRouter]} + + data = pim_neighbor[senderRouter] + if "type" in data and data["type"] == "loopback": + continue + + if "pim" in data and data["pim"] == "enable": + receiver_interface = data["interface"] + receiver_interface_ip = data["ipv4"] + + config_data.append("interface {}".format(receiver_interface)) + config_data.append("no ip address {}".format(receiver_interface_ip)) + config_data.append("ip address {}".format(dest_ip)) + + result = create_common_configuration( + tgen, receiverRouter, config_data, "interface_config" + ) + if result is not True: + return False + + except InvalidCLIError: + # Traceback + errormsg = traceback.format_exc() + logger.error(errormsg) + return errormsg + + logger.debug("Exiting lib API: reconfig_interfaces()") + return result + + +def add_rp_interfaces_and_pim_config(tgen, topo, interface, rp, rp_mapping): + """ + Add physical interfaces tp RP for all the RPs + + Parameters + ---------- + * `tgen` : Topogen object + * `topo` : json file data + * `interface` : RP interface + * `rp` : rp for given topology + * `rp_mapping` : dictionary of all groups and RPs + + Returns + ------- + True or False + """ + result = False + logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name)) + + try: + config_data = [] + + for group, rp_list in rp_mapping.items(): + for _rp in rp_list: + config_data.append("interface {}".format(interface)) + config_data.append("ip address {}".format(_rp)) + config_data.append("ip pim") + + result = create_common_configuration( + tgen, rp, config_data, "interface_config" + ) + if result is not True: + return False + + except InvalidCLIError: + # Traceback + errormsg = traceback.format_exc() + logger.error(errormsg) + return errormsg + + logger.debug("Exiting lib API: add_rp_interfaces_and_pim_config()") + return result + + +def scapy_send_bsr_raw_packet( + tgen, topo, senderRouter, receiverRouter, packet=None, interval=1, count=1 +): + """ + Using scapy Raw() method to send BSR raw packet from one FRR + to other + + Parameters: + ----------- + * `tgen` : Topogen object + * `topo` : json file data + * `senderRouter` : Sender router + * `receiverRouter` : Receiver router + * `packet` : BSR packet in raw format + * `interval` : Interval between the packets + * `count` : Number of packets to be sent + + returns: + -------- + errormsg or True + """ + + global CWD + result = "" + logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name)) + + rnode = tgen.routers()[senderRouter] + + for destLink, data in topo["routers"][senderRouter]["links"].items(): + if "type" in data and data["type"] == "loopback": + continue + + if "pim" in data and data["pim"] == "enable": + sender_interface = data["interface"] + + packet = topo["routers"][senderRouter]["bsm"]["bsr_packets"][packet]["data"] + + if interval > 1 or count > 1: + cmd = ( + "nohup /usr/bin/python {}/send_bsr_packet.py '{}' '{}' " + "--interval={} --count={} &".format( + CWD, packet, sender_interface, interval, count + ) + ) + else: + cmd = ( + "/usr/bin/python {}/send_bsr_packet.py '{}' '{}' " + "--interval={} --count={}".format( + CWD, packet, sender_interface, interval, count + ) + ) + + logger.info("Scapy cmd: \n %s", cmd) + result = rnode.run(cmd) + + if result == "": + return result + + logger.debug("Exiting lib API: scapy_send_bsr_raw_packet") + return True + + +def find_rp_from_bsrp_info(tgen, dut, bsr, grp=None): + """ + Find which RP is having lowest prioriy and returns rp IP + + Parameters + ---------- + * `tgen`: topogen object + * `dut`: device under test + * `bsr`: BSR address + * 'grp': Group Address + + Usage + ----- + dut = "r1" + result = verify_pim_rp_info(tgen, dut, bsr) + + Returns: + dictionary: group and RP, which has to be installed as per + lowest priority or highest priority + """ + + rp_details = {} + rnode = tgen.routers()[dut] + + logger.info("[DUT: %s]: Fetching rp details from bsrp-info", dut) + bsrp_json = run_frr_cmd(rnode, "show ip pim bsrp-info json", isjson=True) + + if grp not in bsrp_json: + return {} + + for group, rp_data in bsrp_json.items(): + if group == "BSR Address" and bsrp_json["BSR Address"] == bsr: + continue + + if group != grp: + continue + + rp_priority = {} + rp_hash = {} + + for rp, value in rp_data.items(): + if rp == "Pending RP count": + continue + rp_priority[value["Rp Address"]] = value["Rp Priority"] + rp_hash[value["Rp Address"]] = value["Hash Val"] + + priority_dict = dict(zip(rp_priority.values(), rp_priority.keys())) + hash_dict = dict(zip(rp_hash.values(), rp_hash.keys())) + + # RP with lowest priority + if len(priority_dict) != 1: + rp_p, lowest_priority = sorted(rp_priority.items(), key=lambda x: x[1])[0] + rp_details[group] = rp_p + + # RP with highest hash value + if len(priority_dict) == 1: + rp_h, highest_hash = sorted(rp_hash.items(), key=lambda x: x[1])[-1] + rp_details[group] = rp_h + + # RP with highest IP address + if len(priority_dict) == 1 and len(hash_dict) == 1: + rp_details[group] = sorted(rp_priority.keys())[-1] + + return rp_details + + +@retry(attempts=6, wait=2, return_is_str=True) +def verify_pim_grp_rp_source(tgen, topo, dut, grp_addr, rp_source, rpadd=None): + """ + Verify pim rp info by running "show ip pim rp-info" cli + + Parameters + ---------- + * `tgen`: topogen object + * `topo`: JSON file handler + * `dut`: device under test + * `grp_addr`: IGMP group address + * 'rp_source': source from which rp installed + * 'rpadd': rp address + + Usage + ----- + dut = "r1" + group_address = "225.1.1.1" + rp_source = "BSR" + result = verify_pim_rp_and_source(tgen, topo, dut, group_address, rp_source) + + Returns + ------- + errormsg(str) or True + """ + + logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name)) + + if dut not in tgen.routers(): + return False + + rnode = tgen.routers()[dut] + + logger.info("[DUT: %s]: Verifying ip rp info", dut) + show_ip_rp_info_json = run_frr_cmd(rnode, "show ip pim rp-info json", isjson=True) + + if rpadd != None: + rp_json = show_ip_rp_info_json[rpadd] + if rp_json[0]["group"] == grp_addr: + if rp_json[0]["source"] == rp_source: + logger.info( + "[DUT %s]: Verifying Group and rp_source [PASSED]" + "Found Expected: %s, %s" + % (dut, rp_json[0]["group"], rp_json[0]["source"]) + ) + return True + else: + errormsg = ( + "[DUT %s]: Verifying Group and rp_source [FAILED]" + "Expected (%s, %s) " + "Found (%s, %s)" + % ( + dut, + grp_addr, + rp_source, + rp_json[0]["group"], + rp_json[0]["source"], + ) + ) + return errormsg + errormsg = ( + "[DUT %s]: Verifying Group and rp_source [FAILED]" + "Expected: %s, %s but not found" % (dut, grp_addr, rp_source) + ) + return errormsg + + for rp in show_ip_rp_info_json: + rp_json = show_ip_rp_info_json[rp] + logger.info("%s", rp_json) + if rp_json[0]["group"] == grp_addr: + if rp_json[0]["source"] == rp_source: + logger.info( + "[DUT %s]: Verifying Group and rp_source [PASSED]" + "Found Expected: %s, %s" + % (dut, rp_json[0]["group"], rp_json[0]["source"]) + ) + return True + else: + errormsg = ( + "[DUT %s]: Verifying Group and rp_source [FAILED]" + "Expected (%s, %s) " + "Found (%s, %s)" + % ( + dut, + grp_addr, + rp_source, + rp_json[0]["group"], + rp_json[0]["source"], + ) + ) + return errormsg + + errormsg = ( + "[DUT %s]: Verifying Group and rp_source [FAILED]" + "Expected: %s, %s but not found" % (dut, grp_addr, rp_source) + ) + + logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name)) + + return errormsg + + +@retry(attempts=31, wait=2, return_is_str=True) +def verify_pim_bsr(tgen, topo, dut, bsr_ip): + """ + Verify all PIM interface are up and running, config is verified + using "show ip pim interface" cli + + Parameters + ---------- + * `tgen`: topogen object + * `topo` : json file data + * `dut` : device under test + * 'bsr' : bsr ip to be verified + + Usage + ----- + result = verify_pim_bsr(tgen, topo, dut, bsr_ip) + + Returns + ------- + errormsg(str) or True + """ + + logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name)) + + for router in tgen.routers(): + if router != dut: + continue + + logger.info("[DUT: %s]: Verifying PIM bsr status:", dut) + + rnode = tgen.routers()[dut] + pim_bsr_json = rnode.vtysh_cmd("show ip pim bsr json", isjson=True) + + logger.info("show_ip_pim_bsr_json: \n %s", pim_bsr_json) + + # Verifying PIM bsr + if pim_bsr_json["bsr"] != bsr_ip: + errormsg = ( + "[DUT %s]:" + "bsr status: not found" + "[FAILED]!! Expected : %s, Found : %s" + % (dut, bsr_ip, pim_bsr_json["bsr"]) + ) + return errormsg + + logger.info( + "[DUT %s]:" " bsr status: found, Address :%s" " [PASSED]!!", + dut, + pim_bsr_json["bsr"], + ) + + logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name)) + return True + + +@retry(attempts=31, wait=2, return_is_str=True) +def verify_ip_pim_upstream_rpf(tgen, topo, dut, interface, group_addresses, rp=None): + """ + Verify IP PIM upstream rpf, config is verified + using "show ip pim neighbor" cli + + Parameters + ---------- + * `tgen`: topogen object + * `topo` : json file data + * `dut` : devuce under test + * `interface` : upstream interface + * `group_addresses` : list of group address for which upstream info + needs to be checked + * `rp` : RP address + + Usage + ----- + result = verify_ip_pim_upstream_rpf(gen, topo, dut, interface, + group_addresses, rp=None) + + Returns + ------- + errormsg(str) or True + """ + + logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name)) + + if "pim" in topo["routers"][dut]: + + logger.info("[DUT: %s]: Verifying ip pim upstream rpf:", dut) + + rnode = tgen.routers()[dut] + show_ip_pim_upstream_rpf_json = rnode.vtysh_cmd( + "show ip pim upstream-rpf json", isjson=True + ) + + logger.info( + "show_ip_pim_upstream_rpf_json: \n %s", show_ip_pim_upstream_rpf_json + ) + + if type(group_addresses) is not list: + group_addresses = [group_addresses] + + for grp_addr in group_addresses: + for destLink, data in topo["routers"][dut]["links"].items(): + if "type" in data and data["type"] == "loopback": + continue + + if "pim" not in topo["routers"][destLink]: + continue + + # Verify RP info + if rp is None: + rp_details = find_rp_details(tgen, topo) + else: + rp_details = {dut: ip} + rp_details[dut] = rp + + if dut in rp_details: + pim_nh_intf_ip = topo["routers"][dut]["links"]["lo"]["ipv4"].split( + "/" + )[0] + else: + if destLink not in interface: + continue + + links = topo["routers"][destLink]["links"] + pim_neighbor = {key: links[key] for key in [dut]} + + data = pim_neighbor[dut] + if "pim" in data and data["pim"] == "enable": + pim_nh_intf_ip = data["ipv4"].split("/")[0] + + upstream_rpf_json = show_ip_pim_upstream_rpf_json[grp_addr]["*"] + + # Verifying ip pim upstream rpf + if ( + upstream_rpf_json["rpfInterface"] == interface + and upstream_rpf_json["ribNexthop"] != pim_nh_intf_ip + ): + errormsg = ( + "[DUT %s]: Verifying group: %s, " + "rpf interface: %s, " + " rib Nexthop check [FAILED]!!" + "Expected: %s, Found: %s" + % ( + dut, + grp_addr, + interface, + pim_nh_intf_ip, + upstream_rpf_json["ribNexthop"], + ) + ) + return errormsg + + logger.info( + "[DUT %s]: Verifying group: %s," + " rpf interface: %s, " + " rib Nexthop: %s [PASSED]!!", + dut, + grp_addr, + interface, + pim_nh_intf_ip, + ) + + logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name)) + return True + + +def enable_disable_pim_unicast_bsm(tgen, router, intf, enable=True): + """ + Helper API to enable or disable pim bsm on interfaces + + Parameters + ---------- + * `tgen` : Topogen object + * `router` : router id to be configured. + * `intf` : Interface to be configured + * `enable` : this flag denotes if config should be enabled or disabled + + Returns + ------- + True or False + """ + result = False + logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name)) + + try: + config_data = [] + cmd = "interface {}".format(intf) + config_data.append(cmd) + + if enable == True: + config_data.append("ip pim unicast-bsm") + else: + config_data.append("no ip pim unicast-bsm") + + result = create_common_configuration( + tgen, router, config_data, "interface_config", build=False + ) + if result is not True: + return False + + except InvalidCLIError: + # Traceback + errormsg = traceback.format_exc() + logger.error(errormsg) + return errormsg + + logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name)) + return result + + +def enable_disable_pim_bsm(tgen, router, intf, enable=True): + """ + Helper API to enable or disable pim bsm on interfaces + + Parameters + ---------- + * `tgen` : Topogen object + * `router` : router id to be configured. + * `intf` : Interface to be configured + * `enable` : this flag denotes if config should be enabled or disabled + + Returns + ------- + True or False + """ + result = False + logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name)) + + try: + config_data = [] + cmd = "interface {}".format(intf) + config_data.append(cmd) + + if enable is True: + config_data.append("ip pim bsm") + else: + config_data.append("no ip pim bsm") + + result = create_common_configuration( + tgen, router, config_data, "interface_config", build=False + ) + if result is not True: + return False + + except InvalidCLIError: + # Traceback + errormsg = traceback.format_exc() + logger.error(errormsg) + return errormsg + + logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name)) + return result + + +@retry(attempts=31, wait=2, return_is_str=True) +def verify_ip_pim_join(tgen, topo, dut, interface, group_addresses, src_address=None): + """ + Verify ip pim join by running "show ip pim join" cli + + Parameters + ---------- + * `tgen`: topogen object + * `topo`: JSON file handler + * `dut`: device under test + * `interface`: interface name, from which PIM join would come + * `group_addresses`: IGMP group address + * `src_address`: Source address + + Usage + ----- + dut = "r1" + interface = "r1-r0-eth0" + group_address = "225.1.1.1" + result = verify_ip_pim_join(tgen, dut, star, group_address, interface) + + Returns + ------- + errormsg(str) or True + """ + logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name)) + + if dut not in tgen.routers(): + return False + + rnode = tgen.routers()[dut] + + logger.info("[DUT: %s]: Verifying pim join", dut) + show_pim_join_json = run_frr_cmd(rnode, "show ip pim join json", isjson=True) + + if type(group_addresses) is not list: + group_addresses = [group_addresses] + + for grp_addr in group_addresses: + # Verify if IGMP is enabled in DUT + if "igmp" not in topo["routers"][dut]: + pim_join = True + else: + pim_join = False + + interface_json = show_pim_join_json[interface] + + grp_addr = grp_addr.split("/")[0] + for source, data in interface_json[grp_addr].items(): + + # Verify pim join + if pim_join: + if data["group"] == grp_addr and data["channelJoinName"] == "JOIN": + logger.info( + "[DUT %s]: Verifying pim join for group: %s" + "[PASSED]!! Found Expected: (%s)", + dut, + grp_addr, + data["channelJoinName"], + ) + else: + errormsg = ( + "[DUT %s]: Verifying pim join for group: %s" + "[FAILED]!! Expected: (%s) " + "Found: (%s)" % (dut, grp_addr, "JOIN", data["channelJoinName"]) + ) + return errormsg + + if not pim_join: + if data["group"] == grp_addr and data["channelJoinName"] == "NOINFO": + logger.info( + "[DUT %s]: Verifying pim join for group: %s" + "[PASSED]!! Found Expected: (%s)", + dut, + grp_addr, + data["channelJoinName"], + ) + else: + errormsg = ( + "[DUT %s]: Verifying pim join for group: %s" + "[FAILED]!! Expected: (%s) " + "Found: (%s)" + % (dut, grp_addr, "NOINFO", data["channelJoinName"]) + ) + return errormsg + + logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name)) + return True + + +@retry(attempts=31, wait=2, return_is_dict=True) +def verify_igmp_config(tgen, input_dict, stats_return=False): + """ + Verify igmp interface details, verifying following configs: + timerQueryInterval + timerQueryResponseIntervalMsec + lastMemberQueryCount + timerLastMemberQueryMsec + + Parameters + ---------- + * `tgen`: topogen object + * `input_dict` : Input dict data, required to verify + timer + * `stats_return`: If user wants API to return statistics + + Usage + ----- + input_dict ={ + "l1": { + "igmp": { + "interfaces": { + "l1-i1-eth1": { + "igmp": { + "query": { + "query-interval" : 200, + "query-max-response-time" : 100 + }, + "statistics": { + "queryV2" : 2, + "reportV2" : 1 + } + } + } + } + } + } + } + result = verify_igmp_config(tgen, input_dict, stats_return) + + Returns + ------- + errormsg(str) or True + """ + + logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name)) + + for dut in input_dict.keys(): + rnode = tgen.routers()[dut] + + for interface, data in input_dict[dut]["igmp"]["interfaces"].items(): + + statistics = False + report = False + if "statistics" in input_dict[dut]["igmp"]["interfaces"][interface]["igmp"]: + statistics = True + cmd = "show ip igmp statistics" + else: + cmd = "show ip igmp" + + logger.info( + "[DUT: %s]: Verifying IGMP interface %s detail:", dut, interface + ) + + if statistics: + if ( + "report" + in input_dict[dut]["igmp"]["interfaces"][interface]["igmp"][ + "statistics" + ] + ): + report = True + + if statistics and report: + show_ip_igmp_intf_json = run_frr_cmd( + rnode, "{} json".format(cmd, interface), isjson=True + ) + intf_detail_json = show_ip_igmp_intf_json["global"] + else: + show_ip_igmp_intf_json = run_frr_cmd( + rnode, "{} interface {} json".format(cmd, interface), isjson=True + ) + + if not report: + if interface not in show_ip_igmp_intf_json: + errormsg = ( + "[DUT %s]: IGMP interface: %s " + " is not present in CLI output " + "[FAILED]!! " % (dut, interface) + ) + return errormsg + + else: + intf_detail_json = show_ip_igmp_intf_json[interface] + + if stats_return: + igmp_stats = {} + + if "statistics" in data["igmp"]: + if stats_return: + igmp_stats["statistics"] = {} + for query, value in data["igmp"]["statistics"].items(): + if query == "queryV2": + # Verifying IGMP interface queryV2 statistics + if stats_return: + igmp_stats["statistics"][query] = intf_detail_json[ + "queryV2" + ] + + else: + if intf_detail_json["queryV2"] != value: + errormsg = ( + "[DUT %s]: IGMP interface: %s " + " queryV2 statistics verification " + "[FAILED]!! Expected : %s," + " Found : %s" + % ( + dut, + interface, + value, + intf_detail_json["queryV2"], + ) + ) + return errormsg + + logger.info( + "[DUT %s]: IGMP interface: %s " + "queryV2 statistics is %s", + dut, + interface, + value, + ) + + if query == "reportV2": + # Verifying IGMP interface timerV2 statistics + if stats_return: + igmp_stats["statistics"][query] = intf_detail_json[ + "reportV2" + ] + + else: + if intf_detail_json["reportV2"] <= value: + errormsg = ( + "[DUT %s]: IGMP reportV2 " + "statistics verification " + "[FAILED]!! Expected : %s " + "or more, Found : %s" + % ( + dut, + interface, + value, + intf_detail_json["reportV2"], + ) + ) + return errormsg + + logger.info( + "[DUT %s]: IGMP reportV2 " "statistics is %s", + dut, + intf_detail_json["reportV2"], + ) + + if "query" in data["igmp"]: + for query, value in data["igmp"]["query"].items(): + if query == "query-interval": + # Verifying IGMP interface query interval timer + if intf_detail_json["timerQueryInterval"] != value: + errormsg = ( + "[DUT %s]: IGMP interface: %s " + " query-interval verification " + "[FAILED]!! Expected : %s," + " Found : %s" + % ( + dut, + interface, + value, + intf_detail_json["timerQueryInterval"], + ) + ) + return errormsg + + logger.info( + "[DUT %s]: IGMP interface: %s " "query-interval is %s", + dut, + interface, + value, + ) + + if query == "query-max-response-time": + # Verifying IGMP interface query max response timer + if ( + intf_detail_json["timerQueryResponseIntervalMsec"] + != value * 100 + ): + errormsg = ( + "[DUT %s]: IGMP interface: %s " + "query-max-response-time " + "verification [FAILED]!!" + " Expected : %s, Found : %s" + % ( + dut, + interface, + value * 1000, + intf_detail_json["timerQueryResponseIntervalMsec"], + ) + ) + return errormsg + + logger.info( + "[DUT %s]: IGMP interface: %s " + "query-max-response-time is %s ms", + dut, + interface, + value * 100, + ) + + if query == "last-member-query-count": + # Verifying IGMP interface last member query count + if intf_detail_json["lastMemberQueryCount"] != value: + errormsg = ( + "[DUT %s]: IGMP interface: %s " + "last-member-query-count " + "verification [FAILED]!!" + " Expected : %s, Found : %s" + % ( + dut, + interface, + value, + intf_detail_json["lastMemberQueryCount"], + ) + ) + return errormsg + + logger.info( + "[DUT %s]: IGMP interface: %s " + "last-member-query-count is %s ms", + dut, + interface, + value * 1000, + ) + + if query == "last-member-query-interval": + # Verifying IGMP interface last member query interval + if ( + intf_detail_json["timerLastMemberQueryMsec"] + != value * 100 * intf_detail_json["lastMemberQueryCount"] + ): + errormsg = ( + "[DUT %s]: IGMP interface: %s " + "last-member-query-interval " + "verification [FAILED]!!" + " Expected : %s, Found : %s" + % ( + dut, + interface, + value * 1000, + intf_detail_json["timerLastMemberQueryMsec"], + ) + ) + return errormsg + + logger.info( + "[DUT %s]: IGMP interface: %s " + "last-member-query-interval is %s ms", + dut, + interface, + value * intf_detail_json["lastMemberQueryCount"] * 100, + ) + + if "version" in data["igmp"]: + # Verifying IGMP interface state is up + if intf_detail_json["state"] != "up": + errormsg = ( + "[DUT %s]: IGMP interface: %s " + " state: %s verification " + "[FAILED]!!" % (dut, interface, intf_detail_json["state"]) + ) + return errormsg + + logger.info( + "[DUT %s]: IGMP interface: %s " "state: %s", + dut, + interface, + intf_detail_json["state"], + ) + + logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name)) + return True if stats_return == False else igmp_stats + + +@retry(attempts=31, wait=2, return_is_str=True) +def verify_pim_config(tgen, input_dict): + """ + Verify pim interface details, verifying following configs: + drPriority + helloPeriod + helloReceived + helloSend + drAddress + + Parameters + ---------- + * `tgen`: topogen object + * `input_dict` : Input dict data, required to verify + timer + + Usage + ----- + input_dict ={ + "l1": { + "igmp": { + "interfaces": { + "l1-i1-eth1": { + "pim": { + "drPriority" : 10, + "helloPeriod" : 5 + } + } + } + } + } + } + } + result = verify_pim_config(tgen, input_dict) + + Returns + ------- + errormsg(str) or True + """ + + logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name)) + + for dut in input_dict.keys(): + rnode = tgen.routers()[dut] + + for interface, data in input_dict[dut]["pim"]["interfaces"].items(): + + logger.info("[DUT: %s]: Verifying PIM interface %s detail:", dut, interface) + + show_ip_igmp_intf_json = run_frr_cmd( + rnode, "show ip pim interface {} json".format(interface), isjson=True + ) + + if interface not in show_ip_igmp_intf_json: + errormsg = ( + "[DUT %s]: PIM interface: %s " + " is not present in CLI output " + "[FAILED]!! " % (dut, interface) + ) + return errormsg + + intf_detail_json = show_ip_igmp_intf_json[interface] + + for config, value in data.items(): + if config == "helloPeriod": + # Verifying PIM interface helloPeriod + if intf_detail_json["helloPeriod"] != value: + errormsg = ( + "[DUT %s]: PIM interface: %s " + " helloPeriod verification " + "[FAILED]!! Expected : %s," + " Found : %s" + % (dut, interface, value, intf_detail_json["helloPeriod"]) + ) + return errormsg + + logger.info( + "[DUT %s]: PIM interface: %s " "helloPeriod is %s", + dut, + interface, + value, + ) + + if config == "drPriority": + # Verifying PIM interface drPriority + if intf_detail_json["drPriority"] != value: + errormsg = ( + "[DUT %s]: PIM interface: %s " + " drPriority verification " + "[FAILED]!! Expected : %s," + " Found : %s" + % (dut, interface, value, intf_detail_json["drPriority"]) + ) + return errormsg + + logger.info( + "[DUT %s]: PIM interface: %s " "drPriority is %s", + dut, + interface, + value, + ) + + if config == "drAddress": + # Verifying PIM interface drAddress + if intf_detail_json["drAddress"] != value: + errormsg = ( + "[DUT %s]: PIM interface: %s " + " drAddress verification " + "[FAILED]!! Expected : %s," + " Found : %s" + % (dut, interface, value, intf_detail_json["drAddress"]) + ) + return errormsg + + logger.info( + "[DUT %s]: PIM interface: %s " "drAddress is %s", + dut, + interface, + value, + ) + + logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name)) + return True + + +@retry(attempts=21, wait=2, return_is_dict=True) +def verify_multicast_traffic(tgen, input_dict, return_traffic=False): + """ + Verify multicast traffic by running + "show multicast traffic count json" cli + + Parameters + ---------- + * `tgen`: topogen object + * `input_dict(dict)`: defines DUT, what and for which interfaces + traffic needs to be verified + * `return_traffic`: returns traffic stats + Usage + ----- + input_dict = { + "r1": { + "traffic_received": ["r1-r0-eth0"], + "traffic_sent": ["r1-r0-eth0"] + } + } + + result = verify_multicast_traffic(tgen, input_dict) + + Returns + ------- + errormsg(str) or True + """ + + logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name)) + + traffic_dict = {} + for dut in input_dict.keys(): + if dut not in tgen.routers(): + continue + + rnode = tgen.routers()[dut] + + logger.info("[DUT: %s]: Verifying multicast " "traffic", dut) + + show_multicast_traffic_json = run_frr_cmd( + rnode, "show ip multicast count json", isjson=True + ) + + for traffic_type, interfaces in input_dict[dut].items(): + traffic_dict[traffic_type] = {} + if traffic_type == "traffic_received": + for interface in interfaces: + traffic_dict[traffic_type][interface] = {} + interface_json = show_multicast_traffic_json[interface] + + if interface_json["pktsIn"] == 0 and interface_json["bytesIn"] == 0: + errormsg = ( + "[DUT %s]: Multicast traffic is " + "not received on interface %s " + "PktsIn: %s, BytesIn: %s " + "[FAILED]!!" + % ( + dut, + interface, + interface_json["pktsIn"], + interface_json["bytesIn"], + ) + ) + return errormsg + + elif ( + interface_json["pktsIn"] != 0 and interface_json["bytesIn"] != 0 + ): + + traffic_dict[traffic_type][interface][ + "pktsIn" + ] = interface_json["pktsIn"] + traffic_dict[traffic_type][interface][ + "bytesIn" + ] = interface_json["bytesIn"] + + logger.info( + "[DUT %s]: Multicast traffic is " + "received on interface %s " + "PktsIn: %s, BytesIn: %s " + "[PASSED]!!" + % ( + dut, + interface, + interface_json["pktsIn"], + interface_json["bytesIn"], + ) + ) + + else: + errormsg = ( + "[DUT %s]: Multicast traffic interface %s:" + " Miss-match in " + "PktsIn: %s, BytesIn: %s" + "[FAILED]!!" + % ( + dut, + interface, + interface_json["pktsIn"], + interface_json["bytesIn"], + ) + ) + return errormsg + + if traffic_type == "traffic_sent": + traffic_dict[traffic_type] = {} + for interface in interfaces: + traffic_dict[traffic_type][interface] = {} + interface_json = show_multicast_traffic_json[interface] + + if ( + interface_json["pktsOut"] == 0 + and interface_json["bytesOut"] == 0 + ): + errormsg = ( + "[DUT %s]: Multicast traffic is " + "not received on interface %s " + "PktsIn: %s, BytesIn: %s" + "[FAILED]!!" + % ( + dut, + interface, + interface_json["pktsOut"], + interface_json["bytesOut"], + ) + ) + return errormsg + + elif ( + interface_json["pktsOut"] != 0 + and interface_json["bytesOut"] != 0 + ): + + traffic_dict[traffic_type][interface][ + "pktsOut" + ] = interface_json["pktsOut"] + traffic_dict[traffic_type][interface][ + "bytesOut" + ] = interface_json["bytesOut"] + + logger.info( + "[DUT %s]: Multicast traffic is " + "received on interface %s " + "PktsOut: %s, BytesOut: %s " + "[PASSED]!!" + % ( + dut, + interface, + interface_json["pktsOut"], + interface_json["bytesOut"], + ) + ) + else: + errormsg = ( + "[DUT %s]: Multicast traffic interface %s:" + " Miss-match in " + "PktsOut: %s, BytesOut: %s " + "[FAILED]!!" + % ( + dut, + interface, + interface_json["pktsOut"], + interface_json["bytesOut"], + ) + ) + return errormsg + + logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name)) + return True if return_traffic == False else traffic_dict + + +def get_refCount_for_mroute(tgen, dut, iif, src_address, group_addresses): + """ + Verify upstream inbound interface is updated correctly + by running "show ip pim upstream" cli + + Parameters + ---------- + * `tgen`: topogen object + * `dut`: device under test + * `iif`: inbound interface + * `src_address`: source address + * `group_addresses`: IGMP group address + + Usage + ----- + dut = "r1" + iif = "r1-r0-eth0" + src_address = "*" + group_address = "225.1.1.1" + result = get_refCount_for_mroute(tgen, dut, iif, src_address, + group_address) + + Returns + ------- + refCount(int) + """ + + logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name)) + + refCount = 0 + if dut not in tgen.routers(): + return False + + rnode = tgen.routers()[dut] + + logger.info("[DUT: %s]: Verifying refCount for mroutes: ", dut) + show_ip_pim_upstream_json = run_frr_cmd( + rnode, "show ip pim upstream json", isjson=True + ) + + if type(group_addresses) is not list: + group_addresses = [group_addresses] + + for grp_addr in group_addresses: + # Verify group address + if grp_addr not in show_ip_pim_upstream_json: + errormsg = "[DUT %s]: Verifying upstream" " for group %s [FAILED]!!" % ( + dut, + grp_addr, + ) + return errormsg + group_addr_json = show_ip_pim_upstream_json[grp_addr] + + # Verify source address + if src_address not in group_addr_json: + errormsg = "[DUT %s]: Verifying upstream" " for (%s,%s) [FAILED]!!" % ( + dut, + src_address, + grp_addr, + ) + return errormsg + + # Verify Inbound Interface + if group_addr_json[src_address]["inboundInterface"] == iif: + refCount = group_addr_json[src_address]["refCount"] + + logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name)) + return refCount + + +@retry(attempts=21, wait=2, return_is_str=True) +def verify_multicast_flag_state(tgen, dut, src_address, group_addresses, flag): + """ + Verify flag state for mroutes and make sure (*, G)/(S, G) are having + coorect flags by running "show ip mroute" cli + + Parameters + ---------- + * `tgen`: topogen object + * `dut`: device under test + * `src_address`: source address + * `group_addresses`: IGMP group address + * `flag`: flag state, needs to be verified + + Usage + ----- + dut = "r1" + flag = "SC" + group_address = "225.1.1.1" + result = verify_multicast_flag_state(tgen, dut, src_address, + group_address, flag) + + Returns + ------- + errormsg(str) or True + """ + + logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name)) + + if dut not in tgen.routers(): + return False + + rnode = tgen.routers()[dut] + + logger.info("[DUT: %s]: Verifying flag state for mroutes", dut) + show_ip_mroute_json = run_frr_cmd(rnode, "show ip mroute json", isjson=True) + + if bool(show_ip_mroute_json) == False: + error_msg = "[DUT %s]: mroutes are not present or flushed out !!" % (dut) + return error_msg + + if type(group_addresses) is not list: + group_addresses = [group_addresses] + + for grp_addr in group_addresses: + if grp_addr not in show_ip_mroute_json: + errormsg = ( + "[DUT %s]: Verifying (%s, %s) mroute," "[FAILED]!! ", + dut, + src_address, + grp_addr, + ) + return errormsg + else: + group_addr_json = show_ip_mroute_json[grp_addr] + + if src_address not in group_addr_json: + errormsg = "[DUT %s]: Verifying (%s, %s) mroute," "[FAILED]!! " % ( + dut, + src_address, + grp_addr, + ) + return errormsg + else: + mroutes = group_addr_json[src_address] + + if mroutes["installed"] != 0: + logger.info( + "[DUT %s]: mroute (%s,%s) is installed", dut, src_address, grp_addr + ) + + if mroutes["flags"] != flag: + errormsg = ( + "[DUT %s]: Verifying flag for (%s, %s) " + "mroute [FAILED]!! " + "Expected: %s Found: %s" + % (dut, src_address, grp_addr, flag, mroutes["flags"]) + ) + return errormsg + + logger.info( + "[DUT %s]: Verifying flag for (%s, %s)" + " mroute, [PASSED]!! " + "Found Expected: %s", + dut, + src_address, + grp_addr, + mroutes["flags"], + ) + + logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name)) + return True + + +@retry(attempts=21, wait=2, return_is_str=True) +def verify_igmp_interface(tgen, topo, dut, igmp_iface, interface_ip): + """ + Verify all IGMP interface are up and running, config is verified + using "show ip igmp interface" cli + + Parameters + ---------- + * `tgen`: topogen object + * `topo` : json file data + * `dut` : device under test + * `igmp_iface` : interface name + * `interface_ip` : interface ip address + + Usage + ----- + result = verify_igmp_interface(tgen, topo, dut, igmp_iface, interface_ip) + + Returns + ------- + errormsg(str) or True + """ + + logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name)) + + for router in tgen.routers(): + if router != dut: + continue + + logger.info("[DUT: %s]: Verifying PIM interface status:", + dut) + + rnode = tgen.routers()[dut] + show_ip_igmp_interface_json = \ + run_frr_cmd(rnode, "show ip igmp interface json", isjson=True) + + if igmp_iface in show_ip_igmp_interface_json: + igmp_intf_json = show_ip_igmp_interface_json[igmp_iface] + # Verifying igmp interface + if igmp_intf_json["address"] != interface_ip: + errormsg = ("[DUT %s]: igmp interface ip is not correct " + "[FAILED]!! Expected : %s, Found : %s" + %(dut, igmp_intf_json["address"], interface_ip)) + return errormsg + + logger.info("[DUT %s]: igmp interface: %s, " + "interface ip: %s" + " [PASSED]!!", + dut, igmp_iface, interface_ip) + else: + errormsg = ("[DUT %s]: igmp interface: %s " + "igmp interface ip: %s, is not present " + %(dut, igmp_iface, interface_ip)) + return errormsg + + logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name)) + return True diff --git a/tests/topotests/lib/send_bsr_packet.py b/tests/topotests/lib/send_bsr_packet.py new file mode 100755 index 0000000000..c226899324 --- /dev/null +++ b/tests/topotests/lib/send_bsr_packet.py @@ -0,0 +1,58 @@ +# +# Copyright (c) 2019 by VMware, Inc. ("VMware") +# Used Copyright (c) 2018 by Network Device Education Foundation, Inc. +# ("NetDEF") in this file. +# +# Permission to use, copy, modify, and/or distribute this software +# for any purpose with or without fee is hereby granted, provided +# that the above copyright notice and this permission notice appear +# in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND VMWARE DISCLAIMS ALL WARRANTIES +# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL VMWARE BE LIABLE FOR +# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY +# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS +# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE +# OF THIS SOFTWARE. +# + +import sys +import argparse +from scapy.all import Raw, sendp +import binascii + + +def send_packet(packet, iface, interval, count): + """ + Read BSR packet in Raw format and send it to specified interface + + Parameter: + --------- + * `packet` : BSR packet in raw format + * `interface` : Interface from which packet would be send + * `interval` : Interval between the packets + * `count` : Number of packets to be sent + """ + + data = binascii.a2b_hex(packet) + p = Raw(load=data) + p.show() + sendp(p, inter=int(interval), iface=iface, count=int(count)) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description="Send BSR Raw packet") + parser.add_argument("packet", help="Packet in raw format") + parser.add_argument("iface", help="Packet send to this ineterface") + parser.add_argument("--interval", help="Interval between packets", default=0) + parser.add_argument( + "--count", help="Number of times packet is sent repetitively", default=0 + ) + args = parser.parse_args() + + if not args.packet or not args.iface: + sys.exit(1) + + send_packet(args.packet, args.iface, args.interval, args.count) diff --git a/tests/topotests/lib/snmptest.py b/tests/topotests/lib/snmptest.py new file mode 100644 index 0000000000..5112500e0b --- /dev/null +++ b/tests/topotests/lib/snmptest.py @@ -0,0 +1,152 @@ +# +# topogen.py +# Library of helper functions for NetDEF Topology Tests +# +# Copyright (c) 2020 by Volta Networks +# +# +# Permission to use, copy, modify, and/or distribute this software +# for any purpose with or without fee is hereby granted, provided +# that the above copyright notice and this permission notice appear +# in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND NETDEF DISCLAIMS ALL WARRANTIES +# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NETDEF BE LIABLE FOR +# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY +# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS +# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE +# OF THIS SOFTWARE. +# + +""" +SNMP library to test snmp walks and gets + +Basic usage instructions: + +* define an SnmpTester class giving a router, address, community and version +* use test_oid or test_walk to check values in MIBS +* see tests/topotest/simple-snmp-test/test_simple_snmp.py for example +""" + +from topolog import logger + + +class SnmpTester(object): + "A helper class for testing SNMP" + + def __init__(self, router, iface, community, version): + self.community = community + self.version = version + self.router = router + self.iface = iface + logger.info( + "created SNMP tester: SNMPv{0} community:{1}".format( + self.version, self.community + ) + ) + + def _snmp_config(self): + """ + Helper function to build a string with SNMP + configuration for commands. + """ + return "-v {0} -c {1} {2}".format(self.version, self.community, self.iface) + + @staticmethod + def _get_snmp_value(snmp_output): + tokens = snmp_output.strip().split() + + num_value_tokens = len(tokens) - 3 + + # this copes with the emptys string return + if num_value_tokens == 0: + return tokens[2] + + if num_value_tokens > 1: + output = "" + index = 3 + while index < len(tokens) - 1: + output += "{} ".format(tokens[index]) + index += 1 + output += "{}".format(tokens[index]) + return output + # third token is the value of the object + return tokens[3] + + @staticmethod + def _get_snmp_oid(snmp_output): + tokens = snmp_output.strip().split() + + # third token onwards is the value of the object + return tokens[0].split(".", 1)[1] + + @staticmethod + def _get_snmp_oid(snmp_output): + tokens = snmp_output.strip().split() + +# if len(tokens) > 5: +# return None + + + # third token is the value of the object + return tokens[0].split('.',1)[1] + + def _parse_multiline(self, snmp_output): + results = snmp_output.strip().split("\r\n") + + out_dict = {} + out_list = [] + for response in results: + out_dict[self._get_snmp_oid(response)] = self._get_snmp_value(response) + out_list.append(self._get_snmp_value(response)) + + return out_dict, out_list + + def get(self, oid): + cmd = "snmpget {0} {1}".format(self._snmp_config(), oid) + + result = self.router.cmd(cmd) + if "not found" in result: + return None + return self._get_snmp_value(result) + + def get_next(self, oid): + cmd = "snmpgetnext {0} {1}".format(self._snmp_config(), oid) + + result = self.router.cmd(cmd) + print("get_next: {}".format(result)) + if "not found" in result: + return None + return self._get_snmp_value(result) + + def walk(self, oid): + cmd = "snmpwalk {0} {1}".format(self._snmp_config(), oid) + + result = self.router.cmd(cmd) + return self._parse_multiline(result) + + def test_oid(self, oid, value): + print("oid: {}".format(self.get_next(oid))) + return self.get_next(oid) == value + + def test_oid_walk(self, oid, values, oids=None): + results_dict, results_list = self.walk(oid) + print("test_oid_walk: {} {}".format(oid, results_dict)) + if oids is not None: + index = 0 + for oid in oids: + # avoid key error for missing keys + if not oid in results_dict.keys(): + print("FAIL: missing oid key {}".format(oid)) + return False + if results_dict[oid] != values[index]: + print("FAIL{} {} |{}| == |{}|".format(oid, index, results_dict[oid], values[index])) + return False + index += 1 + return True + + # Return true if 'values' is a subset of 'results_list' + print("test {} == {}".format(results_list[: len(values)], values)) + return results_list[: len(values)] == values diff --git a/tests/topotests/lib/test/test_json.py b/tests/topotests/lib/test/test_json.py index b85e193d3b..7b3c8593cc 100755 --- a/tests/topotests/lib/test/test_json.py +++ b/tests/topotests/lib/test/test_json.py @@ -107,16 +107,25 @@ def test_json_intersect_multilevel_true(): dcomplete = { "i1": "item1", "i2": "item2", - "i3": {"i100": "item100",}, + "i3": { + "i100": "item100", + }, "i4": { - "i41": {"i411": "item411",}, - "i42": {"i421": "item421", "i422": "item422",}, + "i41": { + "i411": "item411", + }, + "i42": { + "i421": "item421", + "i422": "item422", + }, }, } dsub1 = { "i1": "item1", - "i3": {"i100": "item100",}, + "i3": { + "i100": "item100", + }, "i10": None, } dsub2 = { @@ -126,10 +135,36 @@ def test_json_intersect_multilevel_true(): } dsub3 = { "i2": "item2", - "i4": {"i41": {"i411": "item411",}, "i42": {"i422": "item422", "i450": None,}}, + "i4": { + "i41": { + "i411": "item411", + }, + "i42": { + "i422": "item422", + "i450": None, + }, + }, + } + dsub4 = { + "i2": "item2", + "i4": { + "i41": {}, + "i42": { + "i450": None, + }, + }, + } + dsub5 = { + "i2": "item2", + "i3": { + "i100": "item100", + }, + "i4": { + "i42": { + "i450": None, + } + }, } - dsub4 = {"i2": "item2", "i4": {"i41": {}, "i42": {"i450": None,}}} - dsub5 = {"i2": "item2", "i3": {"i100": "item100",}, "i4": {"i42": {"i450": None,}}} assert json_cmp(dcomplete, dsub1) is None assert json_cmp(dcomplete, dsub2) is None @@ -144,17 +179,26 @@ def test_json_intersect_multilevel_false(): dcomplete = { "i1": "item1", "i2": "item2", - "i3": {"i100": "item100",}, + "i3": { + "i100": "item100", + }, "i4": { - "i41": {"i411": "item411",}, - "i42": {"i421": "item421", "i422": "item422",}, + "i41": { + "i411": "item411", + }, + "i42": { + "i421": "item421", + "i422": "item422", + }, }, } # Incorrect sub-level value dsub1 = { "i1": "item1", - "i3": {"i100": "item00",}, + "i3": { + "i100": "item00", + }, "i10": None, } # Inexistent sub-level @@ -166,14 +210,41 @@ def test_json_intersect_multilevel_false(): # Inexistent sub-level value dsub3 = { "i1": "item1", - "i3": {"i100": None,}, + "i3": { + "i100": None, + }, } # Inexistent sub-sub-level value - dsub4 = {"i4": {"i41": {"i412": "item412",}, "i42": {"i421": "item421",}}} + dsub4 = { + "i4": { + "i41": { + "i412": "item412", + }, + "i42": { + "i421": "item421", + }, + } + } # Invalid sub-sub-level value - dsub5 = {"i4": {"i41": {"i411": "item411",}, "i42": {"i421": "item420000",}}} + dsub5 = { + "i4": { + "i41": { + "i411": "item411", + }, + "i42": { + "i421": "item420000", + }, + } + } # sub-sub-level should be value - dsub6 = {"i4": {"i41": {"i411": "item411",}, "i42": "foobar",}} + dsub6 = { + "i4": { + "i41": { + "i411": "item411", + }, + "i42": "foobar", + } + } assert json_cmp(dcomplete, dsub1) is not None assert json_cmp(dcomplete, dsub2) is not None @@ -187,7 +258,15 @@ def test_json_with_list_sucess(): "Test successful json comparisons that have lists." dcomplete = { - "list": [{"i1": "item 1", "i2": "item 2",}, {"i10": "item 10",},], + "list": [ + { + "i1": "item 1", + "i2": "item 2", + }, + { + "i10": "item 10", + }, + ], "i100": "item 100", } @@ -197,12 +276,19 @@ def test_json_with_list_sucess(): } # Test list correct list items dsub2 = { - "list": [{"i1": "item 1",},], + "list": [ + { + "i1": "item 1", + }, + ], "i100": "item 100", } # Test list correct list size dsub3 = { - "list": [{}, {},], + "list": [ + {}, + {}, + ], } assert json_cmp(dcomplete, dsub1) is None @@ -214,7 +300,15 @@ def test_json_with_list_failure(): "Test failed json comparisons that have lists." dcomplete = { - "list": [{"i1": "item 1", "i2": "item 2",}, {"i10": "item 10",},], + "list": [ + { + "i1": "item 1", + "i2": "item 2", + }, + { + "i10": "item 10", + }, + ], "i100": "item 100", } @@ -224,12 +318,20 @@ def test_json_with_list_failure(): } # Test list incorrect list items dsub2 = { - "list": [{"i1": "item 2",},], + "list": [ + { + "i1": "item 2", + }, + ], "i100": "item 100", } # Test list correct list size dsub3 = { - "list": [{}, {}, {},], + "list": [ + {}, + {}, + {}, + ], } assert json_cmp(dcomplete, dsub1) is not None @@ -241,20 +343,52 @@ def test_json_list_start_success(): "Test JSON encoded data that starts with a list that should succeed." dcomplete = [ - {"id": 100, "value": "abc",}, - {"id": 200, "value": "abcd",}, - {"id": 300, "value": "abcde",}, + { + "id": 100, + "value": "abc", + }, + { + "id": 200, + "value": "abcd", + }, + { + "id": 300, + "value": "abcde", + }, ] - dsub1 = [{"id": 100, "value": "abc",}] + dsub1 = [ + { + "id": 100, + "value": "abc", + } + ] - dsub2 = [{"id": 100, "value": "abc",}, {"id": 200, "value": "abcd",}] + dsub2 = [ + { + "id": 100, + "value": "abc", + }, + { + "id": 200, + "value": "abcd", + }, + ] - dsub3 = [{"id": 300, "value": "abcde",}] + dsub3 = [ + { + "id": 300, + "value": "abcde", + } + ] dsub4 = [] - dsub5 = [{"id": 100,}] + dsub5 = [ + { + "id": 100, + } + ] assert json_cmp(dcomplete, dsub1) is None assert json_cmp(dcomplete, dsub2) is None @@ -272,13 +406,44 @@ def test_json_list_start_failure(): {"id": 300, "value": "abcde"}, ] - dsub1 = [{"id": 100, "value": "abcd",}] + dsub1 = [ + { + "id": 100, + "value": "abcd", + } + ] - dsub2 = [{"id": 100, "value": "abc",}, {"id": 200, "value": "abc",}] + dsub2 = [ + { + "id": 100, + "value": "abc", + }, + { + "id": 200, + "value": "abc", + }, + ] - dsub3 = [{"id": 100, "value": "abc",}, {"id": 350, "value": "abcde",}] + dsub3 = [ + { + "id": 100, + "value": "abc", + }, + { + "id": 350, + "value": "abcde", + }, + ] - dsub4 = [{"value": "abcx",}, {"id": 300, "value": "abcde",}] + dsub4 = [ + { + "value": "abcx", + }, + { + "id": 300, + "value": "abcde", + }, + ] assert json_cmp(dcomplete, dsub1) is not None assert json_cmp(dcomplete, dsub2) is not None diff --git a/tests/topotests/lib/topogen.py b/tests/topotests/lib/topogen.py index eaf7f90479..f958cc11d3 100644 --- a/tests/topotests/lib/topogen.py +++ b/tests/topotests/lib/topogen.py @@ -336,7 +336,9 @@ class Topogen(object): for gear in self.gears.values(): errors += gear.stop() if len(errors) > 0: - logger.error("Errors found post shutdown - details follow: {}".format(errors)) + logger.error( + "Errors found post shutdown - details follow: {}".format(errors) + ) self.net.stop() @@ -552,6 +554,8 @@ class TopoRouter(TopoGear): RD_SHARP = 14 RD_BABEL = 15 RD_PBRD = 16 + RD_PATH = 17 + RD_SNMP = 18 RD = { RD_ZEBRA: "zebra", RD_RIP: "ripd", @@ -569,6 +573,8 @@ class TopoRouter(TopoGear): RD_SHARP: "sharpd", RD_BABEL: "babeld", RD_PBRD: "pbrd", + RD_PATH: "pathd", + RD_SNMP: "snmpd", } def __init__(self, tgen, cls, name, **params): @@ -653,7 +659,7 @@ class TopoRouter(TopoGear): Possible daemon values are: TopoRouter.RD_ZEBRA, TopoRouter.RD_RIP, TopoRouter.RD_RIPNG, TopoRouter.RD_OSPF, TopoRouter.RD_OSPF6, TopoRouter.RD_ISIS, TopoRouter.RD_BGP, TopoRouter.RD_LDP, - TopoRouter.RD_PIM, TopoRouter.RD_PBR. + TopoRouter.RD_PIM, TopoRouter.RD_PBR, TopoRouter.RD_SNMP. """ daemonstr = self.RD.get(daemon) self.logger.info('loading "{}" configuration: {}'.format(daemonstr, source)) diff --git a/tests/topotests/lib/topojson.py b/tests/topotests/lib/topojson.py index f2fafa5e2a..fcc6c19868 100644 --- a/tests/topotests/lib/topojson.py +++ b/tests/topotests/lib/topojson.py @@ -43,8 +43,9 @@ from lib.common_config import ( create_vrf_cfg, ) +from lib.pim import create_pim_config, create_igmp_config from lib.bgp import create_router_bgp -from lib.ospf import create_router_ospf +from lib.ospf import create_router_ospf, create_router_ospf6 ROUTER_LIST = [] @@ -68,20 +69,18 @@ def build_topo_from_json(tgen, topo): topo["switches"].keys(), key=lambda x: int(re_search("\d+", x).group(0)) ) - listRouters = ROUTER_LIST[:] - listSwitches = SWITCH_LIST[:] + listRouters = sorted(ROUTER_LIST[:]) + listSwitches = sorted(SWITCH_LIST[:]) listAllRouters = deepcopy(listRouters) dictSwitches = {} for routerN in ROUTER_LIST: logger.info("Topo: Add router {}".format(routerN)) tgen.add_router(routerN) - listRouters.append(routerN) for switchN in SWITCH_LIST: logger.info("Topo: Add switch {}".format(switchN)) dictSwitches[switchN] = tgen.add_switch(switchN) - listSwitches.append(switchN) if "ipv4base" in topo: ipv4Next = ipaddress.IPv4Address(topo["link_ip_start"]["ipv4"]) @@ -96,33 +95,25 @@ def build_topo_from_json(tgen, topo): for router in listRouters: topo["routers"][router]["nextIfname"] = 0 + router_count = 0 while listRouters != []: curRouter = listRouters.pop(0) # Physical Interfaces if "links" in topo["routers"][curRouter]: - - def link_sort(x): - if x == "lo": - return 0 - elif "link" in x: - return int(x.split("-link")[1]) - else: - return int(re_search("\d+", x).group(0)) - for destRouterLink, data in sorted( - topo["routers"][curRouter]["links"].items(), - key=lambda x: link_sort(x[0]), + topo["routers"][curRouter]["links"].iteritems() ): currRouter_lo_json = topo["routers"][curRouter]["links"][destRouterLink] # Loopback interfaces if "type" in data and data["type"] == "loopback": + router_count += 1 if ( "ipv4" in currRouter_lo_json and currRouter_lo_json["ipv4"] == "auto" ): currRouter_lo_json["ipv4"] = "{}{}.{}/{}".format( topo["lo_prefix"]["ipv4"], - number_to_row(curRouter), + router_count, number_to_column(curRouter), topo["lo_prefix"]["v4mask"], ) @@ -132,7 +123,7 @@ def build_topo_from_json(tgen, topo): ): currRouter_lo_json["ipv6"] = "{}{}:{}/{}".format( topo["lo_prefix"]["ipv6"], - number_to_row(curRouter), + router_count, number_to_column(curRouter), topo["lo_prefix"]["v6mask"], ) @@ -167,6 +158,14 @@ def build_topo_from_json(tgen, topo): destRouter, curRouter, topo["routers"][destRouter]["nextIfname"] ) + # add link interface + destRouter_link_json["peer-interface"] = "{}-{}-eth{}".format( + curRouter, destRouter, topo["routers"][curRouter]["nextIfname"] + ) + currRouter_link_json["peer-interface"] = "{}-{}-eth{}".format( + destRouter, curRouter, topo["routers"][destRouter]["nextIfname"] + ) + topo["routers"][curRouter]["nextIfname"] += 1 topo["routers"][destRouter]["nextIfname"] += 1 @@ -311,8 +310,11 @@ def build_config_from_json(tgen, topo, save_bkup=True): ("prefix_lists", create_prefix_lists), ("bgp_community_list", create_bgp_community_lists), ("route_maps", create_route_maps), + ("pim", create_pim_config), + ("igmp", create_igmp_config), ("bgp", create_router_bgp), ("ospf", create_router_ospf), + ("ospf6", create_router_ospf6), ] ) diff --git a/tests/topotests/lib/topotest.py b/tests/topotests/lib/topotest.py index 20d60ebbef..1e6ef1b2b3 100644 --- a/tests/topotests/lib/topotest.py +++ b/tests/topotests/lib/topotest.py @@ -609,8 +609,10 @@ def interface_set_status(node, ifacename, ifaceaction=False, vrf_name=None): ifacename, str_ifaceaction ) else: - cmd = 'vtysh -c "configure terminal" -c "interface {0} vrf {1}" -c "{2}"'.format( - ifacename, vrf_name, str_ifaceaction + cmd = ( + 'vtysh -c "configure terminal" -c "interface {0} vrf {1}" -c "{2}"'.format( + ifacename, vrf_name, str_ifaceaction + ) ) node.run(cmd) @@ -924,40 +926,44 @@ def checkAddressSanitizerError(output, router, component, logdir=""): ) if addressSanitizerLog: # Find Calling Test. Could be multiple steps back - testframe=sys._current_frames().values()[0] - level=0 + testframe = sys._current_frames().values()[0] + level = 0 while level < 10: - test=os.path.splitext(os.path.basename(testframe.f_globals["__file__"]))[0] + test = os.path.splitext( + os.path.basename(testframe.f_globals["__file__"]) + )[0] if (test != "topotest") and (test != "topogen"): # Found the calling test - callingTest=os.path.basename(testframe.f_globals["__file__"]) + callingTest = os.path.basename(testframe.f_globals["__file__"]) break - level=level+1 - testframe=testframe.f_back - if (level >= 10): + level = level + 1 + testframe = testframe.f_back + if level >= 10: # somehow couldn't find the test script. - callingTest="unknownTest" + callingTest = "unknownTest" # # Now finding Calling Procedure - level=0 + level = 0 while level < 20: - callingProc=sys._getframe(level).f_code.co_name - if ((callingProc != "processAddressSanitizerError") and - (callingProc != "checkAddressSanitizerError") and - (callingProc != "checkRouterCores") and - (callingProc != "stopRouter") and - (callingProc != "__stop_internal") and - (callingProc != "stop") and - (callingProc != "stop_topology") and - (callingProc != "checkRouterRunning") and - (callingProc != "check_router_running") and - (callingProc != "routers_have_failure")): + callingProc = sys._getframe(level).f_code.co_name + if ( + (callingProc != "processAddressSanitizerError") + and (callingProc != "checkAddressSanitizerError") + and (callingProc != "checkRouterCores") + and (callingProc != "stopRouter") + and (callingProc != "__stop_internal") + and (callingProc != "stop") + and (callingProc != "stop_topology") + and (callingProc != "checkRouterRunning") + and (callingProc != "check_router_running") + and (callingProc != "routers_have_failure") + ): # Found the calling test break - level=level+1 - if (level >= 20): + level = level + 1 + if level >= 20: # something wrong - couldn't found the calling test function - callingProc="unknownProc" + callingProc = "unknownProc" with open("/tmp/AddressSanitzer.txt", "a") as addrSanFile: sys.stderr.write( "AddressSanitizer error in topotest `%s`, test `%s`, router `%s`\n\n" @@ -979,7 +985,6 @@ def checkAddressSanitizerError(output, router, component, logdir=""): addrSanFile.write("\n---------------\n") return - addressSanitizerError = re.search( "(==[0-9]+==)ERROR: AddressSanitizer: ([^\s]*) ", output ) @@ -989,16 +994,20 @@ def checkAddressSanitizerError(output, router, component, logdir=""): # No Address Sanitizer Error in Output. Now check for AddressSanitizer daemon file if logdir: - filepattern=logdir+"/"+router+"/"+component+".asan.*" - logger.debug("Log check for %s on %s, pattern %s\n" % (component, router, filepattern)) + filepattern = logdir + "/" + router + "/" + component + ".asan.*" + logger.debug( + "Log check for %s on %s, pattern %s\n" % (component, router, filepattern) + ) for file in glob.glob(filepattern): with open(file, "r") as asanErrorFile: - asanError=asanErrorFile.read() + asanError = asanErrorFile.read() addressSanitizerError = re.search( "(==[0-9]+==)ERROR: AddressSanitizer: ([^\s]*) ", asanError - ) + ) if addressSanitizerError: - processAddressSanitizerError(addressSanitizerError, asanError, router, component) + processAddressSanitizerError( + addressSanitizerError, asanError, router, component + ) return True return False @@ -1065,7 +1074,7 @@ class Router(Node): if self.logdir is None: cur_test = os.environ["PYTEST_CURRENT_TEST"] self.logdir = "/tmp/topotests/" + cur_test[ - cur_test.find("/")+1 : cur_test.find(".py") + cur_test.find("/") + 1 : cur_test.find(".py") ].replace("/", ".") # If the logdir is not created, then create it and set the @@ -1073,7 +1082,7 @@ class Router(Node): if not os.path.isdir(self.logdir): os.system("mkdir -p " + self.logdir + "/" + name) os.system("chmod -R go+rw /tmp/topotests") - # Erase logs of previous run + # Erase logs of previous run os.system("rm -rf " + self.logdir + "/" + name) self.daemondir = None @@ -1096,6 +1105,8 @@ class Router(Node): "sharpd": 0, "babeld": 0, "pbrd": 0, + "pathd": 0, + "snmpd": 0, } self.daemons_options = {"zebra": ""} self.reportCores = True @@ -1279,6 +1290,8 @@ class Router(Node): % (self.routertype, self.routertype, self.routertype, daemon) ) self.waitOutput() + if (daemon == "snmpd") and (self.routertype == "frr"): + self.cmd('echo "agentXSocket /etc/frr/agentx" > /etc/snmp/frr.conf') if (daemon == "zebra") and (self.daemons["staticd"] == 0): # Add staticd with zebra - if it exists staticd_path = os.path.join(self.daemondir, "staticd") @@ -1435,6 +1448,20 @@ class Router(Node): while "staticd" in daemons_list: daemons_list.remove("staticd") + if "snmpd" in daemons_list: + snmpd_path = "/usr/sbin/snmpd" + snmpd_option = self.daemons_options["snmpd"] + self.cmd( + "{0} {1} -C -c /etc/frr/snmpd.conf -p /var/run/{2}/snmpd.pid -x /etc/frr/agentx > snmpd.out 2> snmpd.err".format( + snmpd_path, snmpd_option, self.routertype + ) + ) + logger.info("{}: {} snmpd started".format(self, self.routertype)) + + # Remove `snmpd` so we don't attempt to start it again. + while "snmpd" in daemons_list: + daemons_list.remove("snmpd") + # Fix Link-Local Addresses # Somehow (on Mininet only), Zebra removes the IPv6 Link-Local addresses on start. Fix this self.cmd( @@ -1598,6 +1625,8 @@ class Router(Node): return "%s: vtysh killed by AddressSanitizer" % (self.name) for daemon in self.daemons: + if daemon == "snmpd": + continue if (self.daemons[daemon] == 1) and not (daemon in daemonsRunning): sys.stderr.write("%s: Daemon %s not running\n" % (self.name, daemon)) if daemon == "staticd": diff --git a/tests/topotests/multicast-pim-bsm-topo1/mcast_pim_bsmp_01.json b/tests/topotests/multicast-pim-bsm-topo1/mcast_pim_bsmp_01.json new file mode 100644 index 0000000000..14cb0bee1d --- /dev/null +++ b/tests/topotests/multicast-pim-bsm-topo1/mcast_pim_bsmp_01.json @@ -0,0 +1,238 @@ +{ + "ipv4base": "10.0.0.0", + "ipv4mask": 24, + "link_ip_start": {"ipv4": "10.0.0.0", "v4mask": 24}, + "lo_prefix": {"ipv4": "1.0.", "v4mask": 32}, + "routers": { + "b1": { + "links": { + "f1": {"ipv4": "auto", "pim": "enable"}, + "lo": {"ipv4": "auto", "type": "loopback", "pim": "enable"} + }, + "bsm": { + "bsr_packets": { + "packet1" : { + "data": "01005e00000d005056961165080045c000aa5af500000167372a46000001e000000d2400f5ce165b000001004600000101000018e1010100080800000100090a090a0096650001000909090a0096660001000708090a00966700010007070907009668000100070702070096690001000705020700966a0001000702020700966b0001000202020200966c0001000020e1010101010100000100050606050096000001000020e20101010101000001000909090900960000", + "src_ip": "70.0.0.1/24", + "dest_ip": "70.0.0.2/24", + "bsr": "70.0.0.1/24", + "pkt_dst": "224.0.0.13", + "rp_mapping" : { + "225.1.1.0/24": ["9.10.9.10/32", "7.8.9.10/32", + "9.9.9.10/32", "7.7.9.7/32", + "7.7.2.7/32", "7.5.2.7/32", + "7.2.2.7/32", "2.2.2.2/32"], + "225.1.1.1/32": ["5.6.6.5/32"], + "225.200.100.100/32": ["210.210.210.210/32"], + "226.1.1.1/32": ["9.9.9.9/32"] + + }, + "Desc" : "Packet with 3 group range - rp prio different" + }, + "packet2" : { + "data": "01005e00000d005056961165080045c0009420f400000167714146000001e000000d24000b3b164a000001004600000101000018e1010100080800000100020202020096640001000909090a0096640001000707020700966400010007020207009664000100070709070096640001000708090a00966400010007050207009664000100090a090a0096640001000020e20101010101000001000909090900000000", + "src_ip": "70.0.0.1/24", + "dest_ip": "70.0.0.2/24", + "bsr": "70.0.0.1/24", + "pkt_dst": "224.0.0.13", + "rp_mapping" : { + "225.1.1.0/24": ["9.10.9.10/32", "7.8.9.10/32", + "9.9.9.10/32", "7.7.9.7/32", + "7.7.2.7/32", "7.5.2.7/32", + "7.2.2.7/32", "2.2.2.2/32"] + }, + "Desc" : "Packet 1 with hold time 0 for 226.1.1.1/32" + }, + "packet3" : { + "data": "01005e00000d005056961165080045c000944d0000000167453546000001e000000d2400e52b17c3000001004600000101000018e1010100080800000100090a090a0096650001000909090a0096660001000708090a00966700010007070907009668000100070702070096690001000705020700966a0001000702020700966b0001000202020200966c0001000020e20101010101000001000909090900960000", + "src_ip": "70.0.0.1/24", + "dest_ip": "70.0.0.2/24", + "bsr": "70.0.0.1/24", + "pkt_dst": "224.0.0.13", + "rp_mapping" : { + "225.1.1.0/24": ["9.10.9.10/32", "7.8.9.10/32", + "9.9.9.10/32", "7.7.9.7/32", + "7.7.2.7/32", "7.5.2.7/32", + "7.2.2.7/32", "2.2.2.2/32"], + "226.1.1.1/32": ["9.9.9.9/32"] + }, + "Desc" : "BSR Prio - TC 4" + }, + "packet4" : { + "data": "01005e00000d005056961165080045c000aa3d1c00000167550346000001e000000d24000d671c52000001004600000101000018e1010100080800000100020202020096640001000909090a0096640001000707020700966400010007020207009664000100070709070096640001000708090a00966400010007050207009664000100090a090a0096640001000020e1010101010100000100090909090000000001000020e20101010101000001000909090900960000", + "src_ip": "70.0.0.1/24", + "dest_ip": "70.0.0.2/24", + "bsr": "70.0.0.1/24", + "pkt_dst": "224.0.0.13", + "rp_mapping" : { + "225.1.1.0/24": ["9.10.9.10/32", "7.8.9.10/32", + "9.9.9.10/32", "7.7.9.7/32", + "7.7.2.7/32", "7.5.2.7/32", + "7.2.2.7/32", "2.2.2.2/32"], + "225.1.1.1/32": ["9.9.9.9/32"], + "226.1.1.1/32": ["9.9.9.9/32"] + }, + "Desc" : "TC - 5" + }, + "packet5" : { + "data": "01005e00000d005056961165080045c000aa3d1c00000167550346000001e000000d24000d671c52000001004600000101000018e1010100080800000100020202020096640001000909090a0096640001000707020700966400010007020207009664000100070709070096640001000708090a00966400010007050207009664000100090a090a0096640001000020e1010101010100000100090909090000000001000020e20101010101000001000909090900960000", + "src_ip": "70.0.0.1/24", + "dest_ip": "70.0.0.2/24", + "bsr": "70.0.0.1/24", + "pkt_dst": "224.0.0.13", + "rp_mapping" : { + "225.1.1.0/24": ["9.10.9.10/32", "7.8.9.10/32", + "9.9.9.10/32", "7.7.9.7/32", + "7.7.2.7/32", "7.5.2.7/32", + "7.2.2.7/32", "2.2.2.2/32"], + "226.1.1.1/32": ["9.9.9.9/32"] + }, + "Desc" : "TC - 5, 225.1.1.1 with hold time 0" + }, + "packet6" : { + "data": "01005e00000d005056961165080045c0008a795e0000016718e146000001e000000d24006cc509d5000001004600000101000018e10101000707000001000909090a0096660001000708090a00966700010007070907009668000100070702070096690001000705020700966a0001000702020700966b0001000202020200966c0001000020e20101010101000001000909090900960000", + "src_ip": "70.0.0.1/24", + "dest_ip": "70.0.0.2/24", + "bsr": "70.0.0.1/24", + "pkt_dst": "224.0.0.13", + "rp_mapping" : { + "225.1.1.0/24": ["7.8.9.10/32", + "9.9.9.10/32", "7.7.9.7/32", + "7.7.2.7/32", "7.5.2.7/32", + "7.2.2.7/32", "2.2.2.2/32"], + "226.1.1.1/32": ["9.9.9.9/32"] + }, + "Desc" : "TC - 6,High prio rp removed on 225.1.1.0/24" + }, + "packet7" : { + "data": "01005e00000d005056961165080045c0007e6ebb00000167239046000001e000000d2400090810b3000001004600000101000018e1010100080800000100020202020096640001000909090a0096640001000707020700966400010007020207009664000100070709070096640001000708090a00966400010007050207009664000100090a090a00966400", + "src_ip": "70.0.0.1/24", + "dest_ip": "70.0.0.2/24", + "bsr": "70.0.0.1/24", + "pkt_dst": "224.0.0.13", + "rp_mapping" : { + "225.1.1.0/24": ["9.10.9.10/32", "7.8.9.10/32", + "9.9.9.10/32", "7.7.9.7/32", + "7.7.2.7/32", "7.5.2.7/32", + "7.2.2.7/32", "2.2.2.2/32"] + }, + "Desc" : "TC - 8, rps with same priority" + }, + + "packet8" : { + "data": "01005e00000d005056b76687080045c000383cdf0000016755b246000001e000000d24008ad51a9f000001004600000101000020e1c86464010100000100d2d2d2d200960000", + "group": "225.200.100.100/32", + "src_ip": "70.0.0.1/24", + "dest_ip": "70.0.0.2/24", + "bsr": "70.0.0.1/24", + "pkt_dst": "224.0.0.13", + "rp_mapping" : { + "225.200.100.100/32": ["210.210.210.210/32"] + }, + "Desc" : "TC - 30, grp add with all octet" + }, + + "packet9" : { + "data": "01005e00000d005056b76687080045c000387b8600000167170b46000001e000000d2400c6282245000001000101020701000020e1c86464010100000100d2d2d2d200960000", + "group": "225.200.100.100/32", + "candidate_rp": "210.210.210.210/32", + "src_ip": "70.0.0.1/24", + "dest_ip": "70.0.0.2/24", + "bsr": "1.1.2.7/32", + "pkt_dst": "224.0.0.13", + "rp_mapping" : { + "225.200.100.100/32": ["210.210.210.210/32"] + }, + "Desc" : "TC -29, BSM with preferred ip" + } + + } + } + }, + + "b2": { + "links": { + "f1": {"ipv4": "auto", "pim": "enable"}, + "lo": {"ipv4": "auto", "type": "loopback", "pim": "enable"} + }, + "bsm": { + "bsr_packets": { + "packet1" : { + "data": "01005e00000d005056b70489080045c0003865db0000016731b641000001e000000d2400659c0c6f000001004100000101000018e10101000101000001002121212100960000", + "src_ip": "65.0.0.1/24", + "dest_ip": "65.0.0.2/24", + "bsr": "65.0.0.1/24", + "pkt_dst": "224.0.0.13", + "rp_mapping" : { + "225.1.1.0/24": ["33.33.33.33/32"], + "225.200.100.100/32": ["210.210.210.210/32"] + } + }, + "packet2" : { + "data": "01005e00000d005056b70489080045c00038663000000167316141000001e000000d24006dce0433000a01004100000101000018e10101000101000001002121212100960000", + "src_ip": "65.0.0.1/24", + "dest_ip": "65.0.0.2/24", + "bsr": "65.0.0.1/24", + "pkt_dst": "224.0.0.13", + "rp_mapping" : { + "225.1.1.0/24": ["33.33.33.33/32"] + } + }, + + "packet3" : { + "data": "01005e00000d005056b76687080045c00038f5c800000167a1c841000001e000000d2400c6621a10000001000a02010101000020e1c86464010100000100d2d2d2d200960000", + "src_ip": "65.0.0.1/24", + "dest_ip": "65.0.0.2/24", + "bsr": "10.2.1.1/32", + "pkt_dst": "224.0.0.13", + "rp_mapping" : { + "225.200.100.100/32": ["210.210.210.210/32"] + } + } + + } + } + }, + + "f1": { + "links": { + "b1": {"ipv4": "auto", "pim": "enable"}, + "b2": {"ipv4": "auto", "pim": "enable"}, + "i1": {"ipv4": "auto", "pim": "enable"}, + "s1": {"ipv4": "auto", "pim": "enable"} + } + }, + "i1": { + "links": { + "lo": {"ipv4": "auto", "type": "loopback", "pim": "enable"}, + "f1": {"ipv4": "auto", "pim": "enable"}, + "l1": {"ipv4": "auto", "pim": "enable"} + } + }, + "l1": { + "links": { + "i1": {"ipv4": "auto", "pim": "enable"}, + "r1": {"ipv4": "auto", "pim": "enable"} + }, + "igmp": { + "interfaces": { + "l1-r1-eth1" :{ + "igmp":{ + "version": "2" + } + } + } + } + }, + "s1": { + "links": { + "f1": {"ipv4": "auto", "pim": "enable"} + } + }, + "r1": { + "links": { + "l1": {"ipv4": "auto", "pim": "disable"} + } + } + } +} diff --git a/tests/topotests/multicast-pim-bsm-topo1/test_mcast_pim_bsmp_01.py b/tests/topotests/multicast-pim-bsm-topo1/test_mcast_pim_bsmp_01.py new file mode 100644 index 0000000000..c670c82d21 --- /dev/null +++ b/tests/topotests/multicast-pim-bsm-topo1/test_mcast_pim_bsmp_01.py @@ -0,0 +1,1653 @@ +#!/usr/bin/env python +# +# Copyright (c) 2020 by VMware, Inc. ("VMware") +# Used Copyright (c) 2018 by Network Device Education Foundation, +# Inc. ("NetDEF") in this file. +# +# Permission to use, copy, modify, and/or distribute this software +# for any purpose with or without fee is hereby granted, provided +# that the above copyright notice and this permission notice appear +# in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND VMWARE DISCLAIMS ALL WARRANTIES +# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL VMWARE BE LIABLE FOR +# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY +# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS +# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE +# OF THIS SOFTWARE. +# + +""" +Following tests are covered to test PIM BSM processing basic functionality: + +Test steps +- Create topology (setup module) +- Bring up topology + +Tests covered in this suite +1. Verify FRR router select higher IP BSR , when 2 BSR present in the network +2. Verify BSR and RP updated correctly after configuring as black hole address +3.1 Verify when new router added to the topology, FRR node will send + unicast BSM to new router +3.2 Verify if no forwarding bit is set , FRR is not forwarding the + BSM to other PIM nbrs +3.3 Verify multicast BSM is sent to new router when unicast BSM is disabled +4.1 Verfiy BSM arrived on non bsm capable interface is dropped and + not processed +4.2 Verify group to RP info updated correctly in FRR node, after shut and + no-shut of BSM enable interfaces +5. Verify static RP is preferred over BSR +6.1 Verify adding/deleting the group to rp mapping and RP priority + multiple times +6.2 Verify RP and (*,G) detail after PIM process restart on FRR node +7.1 Verify BSM timeout on FRR1 +7.2 Verify RP state in FRR1 after Bootstrap timer expiry +8.1 Verify upstream interfaces(IIF) and join state are updated properly + after BSM received for FRR +8.2 Verify IIF and OIL in "show ip pim state" updated properly after + BSM received +""" + +import os +import sys +import json +import time +import pytest + +# Save the Current Working Directory to find configuration files. +CWD = os.path.dirname(os.path.realpath(__file__)) +sys.path.append(os.path.join(CWD, "../")) +sys.path.append(os.path.join(CWD, "../lib/")) + +# Required to instantiate the topology builder class. + +# pylint: disable=C0413 +# Import topogen and topotest helpers +from lib.topogen import Topogen, get_topogen +from mininet.topo import Topo + +from lib.common_config import ( + start_topology, + write_test_header, + write_test_footer, + step, + addKernelRoute, + create_static_routes, + iperfSendIGMPJoin, + stop_router, + start_router, + shutdown_bringup_interface, + kill_router_daemons, + start_router_daemons, + reset_config_on_routers, + do_countdown, + apply_raw_config, + kill_iperf, + run_frr_cmd, + required_linux_kernel_version, + topo_daemons, +) + +from lib.pim import ( + create_pim_config, + add_rp_interfaces_and_pim_config, + reconfig_interfaces, + scapy_send_bsr_raw_packet, + find_rp_from_bsrp_info, + verify_pim_grp_rp_source, + verify_pim_bsr, + verify_ip_mroutes, + verify_join_state_and_timer, + verify_pim_state, + verify_upstream_iif, + verify_igmp_groups, + verify_ip_pim_upstream_rpf, + enable_disable_pim_unicast_bsm, + enable_disable_pim_bsm, + clear_ip_mroute, + clear_ip_pim_interface_traffic, + verify_pim_interface_traffic, +) +from lib.topolog import logger +from lib.topojson import build_topo_from_json, build_config_from_json + +# Reading the data from JSON File for topology creation +jsonFile = "{}/mcast_pim_bsmp_01.json".format(CWD) +try: + with open(jsonFile, "r") as topoJson: + topo = json.load(topoJson) +except IOError: + assert False, "Could not read file {}".format(jsonFile) + +TOPOLOGY = """ + + b1_____ + | + | + s1-----f1-----i1-----l1----r1 + | + ______| + b2 + + b1 - BSR 1 + b2 - BSR 2 + s1 - Source + f1 - FHR + i1 - Intermediate Router (also RP) + r1 - Receiver + +""" +# Global variables +NEXT_HOP1 = "70.0.0.1" +NEXT_HOP2 = "65.0.0.1" +BSR_IP_1 = "1.1.2.7" +BSR_IP_2 = "10.2.1.1" +BSR1_ADDR = "1.1.2.7/32" +BSR2_ADDR = "10.2.1.1/32" + + +class CreateTopo(Topo): + """ + Test BasicTopo - topology 1 + + * `Topo`: Topology object + """ + + def build(self, *_args, **_opts): + """Build function""" + tgen = get_topogen(self) + + # Building topology from json file + build_topo_from_json(tgen, topo) + + +def setup_module(mod): + """ + Sets up the pytest environment + + * `mod`: module name + """ + + # Required linux kernel version for this suite to run. + result = required_linux_kernel_version("4.15") + if result is not True: + pytest.skip("Kernel requirements are not met") + + testsuite_run_time = time.asctime(time.localtime(time.time())) + logger.info("Testsuite start time: {}".format(testsuite_run_time)) + logger.info("=" * 40) + logger.info("Master Topology: \n {}".format(TOPOLOGY)) + + logger.info("Running setup_module to create topology") + + # This function initiates the topology build with Topogen... + tgen = Topogen(CreateTopo, mod.__name__) + # ... and here it calls Mininet initialization functions. + + # get list of daemons needs to be started for this suite. + daemons = topo_daemons(tgen, topo) + + # Starting topology, create tmp files which are loaded to routers + # to start deamons and then start routers + start_topology(tgen, daemons) + + # Don"t run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + # Creating configuration from JSON + build_config_from_json(tgen, topo) + + logger.info("Running setup_module() done") + + +def teardown_module(): + """Teardown the pytest environment""" + + logger.info("Running teardown_module to delete topology") + + tgen = get_topogen() + + # Stop toplogy and Remove tmp files + tgen.stop_topology() + + logger.info( + "Testsuite end time: {}".format(time.asctime(time.localtime(time.time()))) + ) + logger.info("=" * 40) + + +##################################################### +# +# Local APIs +# +##################################################### + + +def clear_bsrp_data(tgen, topo): + + """ + clear bsm databas after test" + Parameters + ---------- + * `tgen`: topogen object + + Usage + ----- + result = clear_bsrp_data(tgen, topo) + Returns + ------- + errormsg(str) or True + """ + + for dut in tgen.routers(): + + rnode = tgen.routers()[dut] + + logger.info("[DUT: %s]: clear_bsrp_data") + + run_frr_cmd(rnode, "clear ip pim bsr-data") + + return True + + +def verify_state_incremented(state_before, state_after): + """ + API to compare interface traffic state incrementing + + Parameters + ---------- + * `state_before` : State dictionary for any particular instance + * `state_after` : State dictionary for any particular instance + """ + + for router, state_data in state_before.items(): + for state, value in state_data.items(): + if state_before[router][state] >= state_after[router][state]: + errormsg = ( + "[DUT: %s]: state %s value has not" + " incremented, Initial value: %s, " + "Current value: %s [FAILED!!]" + % ( + router, + state, + state_before[router][state], + state_after[router][state], + ) + ) + return errormsg + + logger.info( + "[DUT: %s]: State %s value is " + "incremented, Initial value: %s, Current value: %s" + " [PASSED!!]", + router, + state, + state_before[router][state], + state_after[router][state], + ) + + return True + + +def pre_config_to_bsm(tgen, topo, tc_name, bsr, sender, receiver, fhr, rp, lhr, packet): + """ + API to do required configuration to send and receive BSR packet + """ + + # Re-configure interfaces as per BSR packet + result = reconfig_interfaces(tgen, topo, bsr, fhr, packet) + assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result) + + # Create static routes + if "bsr" in topo["routers"][bsr]["bsm"]["bsr_packets"][packet]: + bsr_route = topo["routers"][bsr]["bsm"]["bsr_packets"][packet]["bsr"] + next_hop = topo["routers"][bsr]["bsm"]["bsr_packets"][packet]["src_ip"].split( + "/" + )[0] + next_hop_rp = topo["routers"][fhr]["links"][rp]["ipv4"].split("/")[0] + next_hop_lhr = topo["routers"][rp]["links"][lhr]["ipv4"].split("/")[0] + + # Add static routes + input_dict = { + fhr: {"static_routes": [{"network": bsr_route, "next_hop": next_hop}]}, + rp: {"static_routes": [{"network": bsr_route, "next_hop": next_hop_rp}]}, + lhr: {"static_routes": [{"network": bsr_route, "next_hop": next_hop_lhr}]}, + } + + result = create_static_routes(tgen, input_dict) + assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result) + + # Add kernal route for source + group = topo["routers"][bsr]["bsm"]["bsr_packets"][packet]["pkt_dst"] + bsr_interface = topo["routers"][bsr]["links"][fhr]["interface"] + result = addKernelRoute(tgen, bsr, bsr_interface, group) + assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result) + + # RP Mapping + rp_mapping = topo["routers"][bsr]["bsm"]["bsr_packets"][packet]["rp_mapping"] + + # Add interfaces in RP for all the RPs + result = add_rp_interfaces_and_pim_config(tgen, topo, "lo", rp, rp_mapping) + assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result) + + # Add kernal routes to sender and receiver + for group, rp_list in rp_mapping.items(): + mask = group.split("/")[1] + if int(mask) == 32: + group = group.split("/")[0] + + # Add kernal routes for sender + s_interface = topo["routers"][sender]["links"][fhr]["interface"] + result = addKernelRoute(tgen, sender, s_interface, group) + assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result) + + # Add kernal routes for receiver + r_interface = topo["routers"][receiver]["links"][lhr]["interface"] + result = addKernelRoute(tgen, receiver, r_interface, group) + assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result) + + # Add static routes for RPs in FHR and LHR + next_hop_fhr = topo["routers"][rp]["links"][fhr]["ipv4"].split("/")[0] + next_hop_lhr = topo["routers"][rp]["links"][lhr]["ipv4"].split("/")[0] + input_dict = { + fhr: {"static_routes": [{"network": rp_list, "next_hop": next_hop_fhr}]}, + } + result = create_static_routes(tgen, input_dict) + assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result) + + input_dict = { + lhr: {"static_routes": [{"network": rp_list, "next_hop": next_hop_lhr}]}, + } + result = create_static_routes(tgen, input_dict) + assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result) + return True + + +##################################################### +# +# Testcases +# +##################################################### + + +def test_BSR_higher_prefer_ip_p0(request): + """ + Verify FRR router select higher IP BSR , when 2 BSR present in the network + + Topology used: + b1_____ + | + | + s1-----f1-----i1-----l1----r1 + | + ______| + b2 + + b1 - BSR 1 + b2 - BSR 2 + s1 - Source + f1 - FHR + i1 - Intermediate Router (also RP) + r1 - Receiver + """ + + tgen = get_topogen() + tc_name = request.node.name + write_test_header(tc_name) + + kill_iperf(tgen) + clear_ip_mroute(tgen) + reset_config_on_routers(tgen) + clear_ip_pim_interface_traffic(tgen, topo) + + # Don"t run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + reset_config_on_routers(tgen) + step("pre-configure BSM packet") + step("Configure cisco-1 as BSR1 1.1.2.7") + result = pre_config_to_bsm( + tgen, topo, tc_name, "b1", "s1", "r1", "f1", "i1", "l1", "packet1" + ) + assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result) + step("Configure cisco-1 as BSR1 10.2.1.1") + result = pre_config_to_bsm( + tgen, topo, tc_name, "b2", "s1", "r1", "f1", "i1", "l1", "packet1" + ) + assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result) + step("configuring loopback address of b1 and b2 as BSR") + intf_lo_addr_b1 = topo["routers"]["b1"]["links"]["lo"]["ipv4"] + intf_lo_addr_b2 = topo["routers"]["b2"]["links"]["lo"]["ipv4"] + + raw_config = { + "b1": { + "raw_config": [ + "interface lo", + "no ip address {}".format(intf_lo_addr_b1), + "ip address {}".format(BSR1_ADDR), + "ip pim", + ] + }, + "b2": { + "raw_config": [ + "interface lo", + "no ip address {}".format(intf_lo_addr_b2), + "ip address {}".format(BSR2_ADDR), + "ip pim", + ] + }, + } + result = apply_raw_config(tgen, raw_config) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + GROUP_ADDRESS = "225.200.100.100" + step("configuring static routes for both the BSR") + + next_hop_rp = topo["routers"]["f1"]["links"]["i1"]["ipv4"].split("/")[0] + next_hop_lhr = topo["routers"]["i1"]["links"]["l1"]["ipv4"].split("/")[0] + + input_dict = { + "f1": { + "static_routes": [ + {"network": BSR1_ADDR, "next_hop": NEXT_HOP1}, + {"network": BSR2_ADDR, "next_hop": NEXT_HOP2}, + ] + }, + "i1": { + "static_routes": [ + {"network": BSR1_ADDR, "next_hop": next_hop_rp}, + {"network": BSR2_ADDR, "next_hop": next_hop_rp}, + ] + }, + "l1": { + "static_routes": [ + {"network": BSR1_ADDR, "next_hop": next_hop_lhr}, + {"network": BSR2_ADDR, "next_hop": next_hop_lhr}, + ] + }, + } + + result = create_static_routes(tgen, input_dict) + assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result) + + # Use scapy to send pre-defined packet from senser to receiver + step("Send BSR packet from b1 to FHR") + result = scapy_send_bsr_raw_packet(tgen, topo, "b1", "f1", "packet9") + assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result) + + result = iperfSendIGMPJoin(tgen, "r1", GROUP_ADDRESS, join_interval=1) + assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result) + do_countdown(5) + + dut = "l1" + step("Verify if b1 chosen as BSR in f1") + result = verify_pim_bsr(tgen, topo, "f1", BSR_IP_1) + assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result) + + group = topo["routers"]["b1"]["bsm"]["bsr_packets"]["packet9"]["group"] + step("Find the elected rp from bsrp-info in LHR l1") + rp = find_rp_from_bsrp_info(tgen, dut, BSR_IP_1, group) + assert rp is not {}, "Testcase {} :Failed \n Error {}".format(tc_name, result) + + step("Verify RP in LHR") + result = verify_pim_grp_rp_source(tgen, topo, dut, group, "BSR", rp[group]) + assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result) + + step("Send BSR packet from b2 to FHR") + result = scapy_send_bsr_raw_packet(tgen, topo, "b2", "f1", "packet3") + assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result) + + dut = "l1" + step("Verify if b2 chosen as BSR in f1") + result = verify_pim_bsr(tgen, topo, "f1", BSR_IP_2) + assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result) + + step("Find the elected rp from bsrp-info in LHR l1") + rp = find_rp_from_bsrp_info(tgen, dut, BSR_IP_2, group) + assert rp is not {}, "Testcase {} :Failed \n Error {}".format(tc_name, result) + + step("Verify RP in LHR") + result = verify_pim_grp_rp_source(tgen, topo, dut, group, "BSR", rp[group]) + assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result) + + step("Shut higher prefer BSR2 link f1 to b2") + + f1_b2_eth1 = topo["routers"]["f1"]["links"]["b2"]["interface"] + shutdown_bringup_interface(tgen, "f1", "f1-b2-eth1", False) + + step("clearing bsr to timeout old BSR") + clear_bsrp_data(tgen, topo) + + step("Send BSR packet from b1 and b2 to FHR") + result = scapy_send_bsr_raw_packet(tgen, topo, "b1", "f1", "packet9") + assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result) + + result = scapy_send_bsr_raw_packet(tgen, topo, "b2", "f1", "packet3") + assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result) + + step("sleeping for 3 sec to leran new packet") + do_countdown(3) + step("verify BSR1 is become prefered RP") + dut = "l1" + + step("Verify if b1 chosen as BSR in f1") + result = verify_pim_bsr(tgen, topo, "f1", BSR_IP_1) + assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result) + + step("Find the elected rp from bsrp-info in LHR l1") + rp = find_rp_from_bsrp_info(tgen, dut, BSR_IP_1, group) + assert rp is not {}, "Testcase {} :Failed \n Error {}".format(tc_name, result) + + step("Verify RP in LHR") + result = verify_pim_grp_rp_source(tgen, topo, dut, group, "BSR", rp[group]) + assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result) + + step("NoShut higher prefer BSR2 link f1 to b2") + step("sleeping for 3 min to leran new packet") + do_countdown(3) + f1_b2_eth1 = topo["routers"]["f1"]["links"]["b2"]["interface"] + shutdown_bringup_interface(tgen, "f1", "f1-b2-eth1", True) + step("verify BSR2 is become prefered RP") + dut = "l1" + + step("Send BSR packet from b1 and b2 to FHR") + result = scapy_send_bsr_raw_packet(tgen, topo, "b1", "f1", "packet9") + assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result) + + result = scapy_send_bsr_raw_packet(tgen, topo, "b2", "f1", "packet3") + assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result) + + step("Verify if b2 chosen as BSR in f1") + result = verify_pim_bsr(tgen, topo, "f1", BSR_IP_2) + assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result) + + step("Find the elected rp from bsrp-info in LHR l1") + rp = find_rp_from_bsrp_info(tgen, dut, BSR_IP_2, group) + assert rp is not {}, "Testcase {} :Failed \n Error {}".format(tc_name, result) + + step("Verify RP in LHR") + result = verify_pim_grp_rp_source(tgen, topo, dut, group, "BSR", rp[group]) + assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result) + + step("Clear BSM database before moving to next case") + clear_bsrp_data(tgen, topo) + + write_test_footer(tc_name) + + +def test_BSR_CRP_with_blackhole_address_p1(request): + """ + Verify BSR and RP updated correctly after configuring as black hole address + + Topology used: + b1_____ + | + | + s1-----f1-----i1-----l1----r1 + | + ______| + b2 + + b1 - BSR 1 + b2 - BSR 2 + s1 - Source + f1 - FHR + i1 - Intermediate Router (also RP) + r1 - Receiver + """ + + tgen = get_topogen() + tc_name = request.node.name + write_test_header(tc_name) + + kill_iperf(tgen) + clear_ip_mroute(tgen) + reset_config_on_routers(tgen) + clear_ip_pim_interface_traffic(tgen, topo) + + # Don"t run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + reset_config_on_routers(tgen) + step("pre-configure BSM packet") + step("Configure cisco-1 as BSR1 1.1.2.7") + result = pre_config_to_bsm( + tgen, topo, tc_name, "b1", "s1", "r1", "f1", "i1", "l1", "packet1" + ) + assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result) + + step("configuring loopback address of b1 and b2 as BSR") + intf_lo_addr_b1 = topo["routers"]["b1"]["links"]["lo"]["ipv4"] + + raw_config = { + "b1": { + "raw_config": [ + "interface lo", + "no ip address {}".format(intf_lo_addr_b1), + "ip address {}".format(BSR1_ADDR), + "ip pim", + ] + } + } + result = apply_raw_config(tgen, raw_config) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + GROUP_ADDRESS = "225.200.100.100" + step("configuring static routes for both the BSR") + + next_hop_rp = topo["routers"]["f1"]["links"]["i1"]["ipv4"].split("/")[0] + next_hop_lhr = topo["routers"]["i1"]["links"]["l1"]["ipv4"].split("/")[0] + + input_dict = { + "f1": {"static_routes": [{"network": BSR1_ADDR, "next_hop": NEXT_HOP1}]}, + "i1": {"static_routes": [{"network": BSR1_ADDR, "next_hop": next_hop_rp}]}, + "l1": {"static_routes": [{"network": BSR1_ADDR, "next_hop": next_hop_lhr}]}, + } + + result = create_static_routes(tgen, input_dict) + assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result) + + # Use scapy to send pre-defined packet from senser to receiver + + group = topo["routers"]["b1"]["bsm"]["bsr_packets"]["packet9"]["group"] + CRP = topo["routers"]["b1"]["bsm"]["bsr_packets"]["packet9"]["candidate_rp"] + step("waiting for BSR to timeout before configuring blackhole route") + clear_bsrp_data(tgen, topo) + + step("Configure black-hole address for BSR and candidate RP") + input_dict = { + "f1": { + "static_routes": [{"network": [BSR1_ADDR, CRP], "next_hop": "blackhole"}] + } + } + + result = create_static_routes(tgen, input_dict) + assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result) + + intf_f1_i1 = topo["routers"]["f1"]["links"]["i1"]["interface"] + step("Verify bsm transit count is not increamented" "show ip pim interface traffic") + state_dict = {"f1": {intf_f1_i1: ["bsmTx"]}} + + state_before = verify_pim_interface_traffic(tgen, state_dict) + assert isinstance( + state_before, dict + ), "Testcase{} : Failed \n state_before is not dictionary \n " + "Error: {}".format(tc_name, result) + + step("Sending BSR after Configure black hole address for BSR and candidate RP") + step("Send BSR packet from b1 to FHR") + result = scapy_send_bsr_raw_packet(tgen, topo, "b1", "f1", "packet9") + assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result) + + dut = "l1" + step("Find the elected rp from bsrp-info in LHR l1") + rp = find_rp_from_bsrp_info(tgen, dut, BSR_IP_1, group) + assert rp is not {}, "Testcase {} :Failed \n Error {}".format(tc_name, result) + + step("Verify if b1 chosen as BSR in l1") + result = verify_pim_bsr(tgen, topo, "l1", BSR_IP_1, expected=False) + assert result is not True, "Testcase {} :Failed \n Error {}".format(tc_name, result) + + state_after = verify_pim_interface_traffic(tgen, state_dict) + assert isinstance( + state_after, dict + ), "Testcase{} : Failed \n state_before is not dictionary \n " + "Error: {}".format(tc_name, result) + + result = verify_state_incremented(state_before, state_after) + assert result is not True, "Testcase{} : Failed Error: {}".format(tc_name, result) + + step("Remove black-hole address for BSR and candidate RP") + input_dict = { + "f1": { + "static_routes": [ + {"network": [BSR1_ADDR, CRP], "next_hop": "blackhole", "delete": True} + ] + } + } + + result = create_static_routes(tgen, input_dict) + assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result) + + step("Sending BSR after removing black-hole address for BSR and candidate RP") + step("Send BSR packet from b1 to FHR") + result = scapy_send_bsr_raw_packet(tgen, topo, "b1", "f1", "packet9") + assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result) + + step("Verify if b1 chosen as BSR in f1") + result = verify_pim_bsr(tgen, topo, "f1", BSR_IP_1) + assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result) + + dut = "l1" + group = topo["routers"]["b1"]["bsm"]["bsr_packets"]["packet9"]["group"] + step("Find the elected rp from bsrp-info in LHR l1") + rp = find_rp_from_bsrp_info(tgen, dut, BSR_IP_1, group) + assert rp is not {}, "Testcase {} :Failed \n Error {}".format(tc_name, result) + + step("Verify RP in LHR l1") + result = verify_pim_grp_rp_source(tgen, topo, dut, group, "BSR", rp[group]) + assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result) + + step("clear BSM database before moving to next case") + clear_bsrp_data(tgen, topo) + + write_test_footer(tc_name) + + +def test_new_router_fwd_p0(request): + """ + 1. Verify when new router added to the topology, FRR node will send + unicast BSM to new router + 2. Verify if no forwarding bit is set , FRR is not forwarding the + BSM to other PIM nbrs + 3. Verify multicast BSM is sent to new router when unicast BSM is disabled + + Topology used: + b1_____ + | + | + s1-----f1-----i1-----l1----r1 + | + ______| + b2 + + b1 - BSR 1 + b2 - BSR 2 + s1 - Source + f1 - FHR + i1 - Intermediate Router (also RP) + r1 - Receiver + + """ + + tgen = get_topogen() + tc_name = request.node.name + write_test_header(tc_name) + + kill_iperf(tgen) + clear_ip_mroute(tgen) + reset_config_on_routers(tgen) + clear_ip_pim_interface_traffic(tgen, topo) + + # Don"t run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + reset_config_on_routers(tgen) + + result = pre_config_to_bsm( + tgen, topo, tc_name, "b1", "s1", "r1", "f1", "i1", "l1", "packet1" + ) + assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result) + + result = pre_config_to_bsm( + tgen, topo, tc_name, "b2", "s1", "r1", "f1", "i1", "l1", "packet1" + ) + assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result) + + GROUP_ADDRESS = "225.1.1.1" + + # Use scapy to send pre-defined packet from senser to receiver + result = scapy_send_bsr_raw_packet(tgen, topo, "b1", "f1", "packet1") + assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result) + + bsr_ip = topo["routers"]["b1"]["bsm"]["bsr_packets"]["packet1"]["bsr"].split("/")[0] + time.sleep(1) + + result = iperfSendIGMPJoin(tgen, "r1", GROUP_ADDRESS, join_interval=1) + assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result) + + # Verify bsr state in FHR + step("Verify if b1 chosen as BSR in f1") + result = verify_pim_bsr(tgen, topo, "f1", bsr_ip) + assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result) + + # Verify bsr state in i1 + step("Verify if b1 chosen as BSR in i1") + result = verify_pim_bsr(tgen, topo, "i1", bsr_ip) + assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result) + + # Verify ip mroute + iif = "l1-i1-eth0" + src_addr = "*" + oil = "l1-r1-eth1" + + step("Verify mroute populated on l1") + result = verify_ip_mroutes(tgen, "l1", src_addr, GROUP_ADDRESS, iif, oil) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + # Reload i1 and l1 + step("Reloading i1 and l1. Stop both. bring up l1 and then i1") + stop_router(tgen, "i1") + start_router(tgen, "i1") + stop_router(tgen, "l1") + start_router(tgen, "l1") + + # Verify bsr state in i1 + step("Verify BSR in i1 after restart while no new bsm sent from b1") + result = verify_pim_bsr(tgen, topo, "i1", bsr_ip) + assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result) + + # Verify bsr state in l1 + step("Verify no BSR in l1 as i1 would not forward the no-forward bsm") + result = verify_pim_bsr(tgen, topo, "l1", bsr_ip, expected=False) + assert result is not True, "Testcase {} :Failed \n Error {}".format(tc_name, result) + + # unconfigure unicast bsm on f1-i1-eth2 + step("unconfigure unicast bsm on f1-i1-eth2, will forward with only mcast") + enable_disable_pim_unicast_bsm(tgen, "f1", "f1-i1-eth2", enable=False) + + # Reboot i1 to check if still bsm received with multicast address + step("Reboot i1 to check if still bsm received with multicast address") + stop_router(tgen, "i1") + start_router(tgen, "i1") + + result = iperfSendIGMPJoin(tgen, "r1", GROUP_ADDRESS, join_interval=1) + assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result) + + # Verify again if BSR is installed from bsm forwarded by f1 + step("Verify again if BSR is installed from bsm forwarded by f1") + result = verify_pim_bsr(tgen, topo, "i1", bsr_ip) + assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result) + + # Use scapy to send pre-defined packet from senser to receiver + step("Send another BSM packet from b1 which will reach l1(LHR)") + result = scapy_send_bsr_raw_packet(tgen, topo, "b1", "f1", "packet2") + assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result) + do_countdown(5) + + # Verify ip mroute populated again + step("Verify mroute again on l1 (lhr)") + result = verify_ip_mroutes(tgen, "l1", src_addr, GROUP_ADDRESS, iif, oil) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("clear BSM database before moving to next case") + clear_bsrp_data(tgen, topo) + + write_test_footer(tc_name) + + +def test_int_bsm_config_p1(request): + """ + 1. Verfiy BSM arrived on non bsm capable interface is dropped and + not processed + 2. Verify group to RP info updated correctly in FRR node, after shut and + no-shut of BSM enable interfaces + + Topology used: + b1_____ + | + | + s1-----f1-----i1-----l1----r1 + | + ______| + b2 + + b1 - BSR 1 + b2 - BSR 2 + s1 - Source + f1 - FHR + i1 - Intermediate Router (also RP) + r1 - Receiver + + """ + + tgen = get_topogen() + tc_name = request.node.name + write_test_header(tc_name) + + kill_iperf(tgen) + clear_ip_mroute(tgen) + reset_config_on_routers(tgen) + clear_ip_pim_interface_traffic(tgen, topo) + + # Don"t run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + reset_config_on_routers(tgen) + + result = pre_config_to_bsm( + tgen, topo, tc_name, "b1", "s1", "r1", "f1", "i1", "l1", "packet1" + ) + assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result) + + result = pre_config_to_bsm( + tgen, topo, tc_name, "b2", "s1", "r1", "f1", "i1", "l1", "packet1" + ) + assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result) + + GROUP_ADDRESS = "225.1.1.1" + + bsr_ip = topo["routers"]["b1"]["bsm"]["bsr_packets"]["packet1"]["bsr"].split("/")[0] + time.sleep(1) + + result = iperfSendIGMPJoin(tgen, "r1", GROUP_ADDRESS, join_interval=1) + assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result) + + # Use scapy to send pre-defined packet from senser to receiver + step("Send BSM packet from b1") + result = scapy_send_bsr_raw_packet(tgen, topo, "b1", "f1", "packet1") + assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result) + + # Verify bsr state in i1 + step("Verify if b1 is chosen as BSR in i1") + result = verify_pim_bsr(tgen, topo, "i1", bsr_ip) + assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result) + + # check if mroute installed + step("check if mroute installed in i1") + iif = "lo" + src_addr = "*" + oil = "i1-l1-eth1" + + result = verify_ip_mroutes(tgen, "i1", src_addr, GROUP_ADDRESS, iif, oil) + assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result) + + # wait till bsm rp age out + step("wait till bsm rp age out") + clear_bsrp_data(tgen, topo) + + # check if mroute uninstalled because of rp age out + step("check if mroute uninstalled because of rp age out in i1") + result = verify_ip_mroutes( + tgen, "i1", src_addr, GROUP_ADDRESS, iif, oil, expected=False + ) + assert result is not True, "Testcase {}:Failed \n Error: {}".format(tc_name, result) + + # unconfigure bsm processing on f1 on f1-i1-eth2 + step("unconfigure bsm processing on f1 in f1-i1-eth2, will drop bsm") + result = enable_disable_pim_bsm(tgen, "f1", "f1-i1-eth2", enable=False) + assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result) + + # Use scapy to send pre-defined packet from senser to receiver + step("Send BSM packet from b1") + result = scapy_send_bsr_raw_packet(tgen, topo, "b1", "f1", "packet1") + assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result) + + # Verify bsr state in FHR + step("Verify if b1 chosen as BSR in f1") + result = verify_pim_bsr(tgen, topo, "f1", bsr_ip) + assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result) + + # Verify bsr state in i1 + step("Verify if b1 is not chosen as BSR in i1") + result = verify_pim_bsr(tgen, topo, "i1", bsr_ip, expected=False) + assert result is not True, "Testcase {} :Failed \n Error {}".format(tc_name, result) + + # check if mroute still not installed because of rp not available + step("check if mroute still not installed because of rp not available") + result = verify_ip_mroutes( + tgen, "i1", src_addr, GROUP_ADDRESS, iif, oil, expected=False + ) + assert result is not True, "Testcase {}:Failed \n Error: {}".format(tc_name, result) + + # configure bsm processing on i1 on f1-i1-eth2 + step("configure bsm processing on f1 in f1-i1-eth2, will accept bsm") + result = enable_disable_pim_bsm(tgen, "f1", "f1-i1-eth2", enable=True) + assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result) + + # Use scapy to send pre-defined packet from senser to receiver + step("Send BSM packet again from b1") + result = scapy_send_bsr_raw_packet(tgen, topo, "b1", "f1", "packet2") + assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result) + + # Verify again if BSR is installed from bsm forwarded by f1 + step("Verify again if BSR is installed from bsm forwarded by f1") + result = verify_pim_bsr(tgen, topo, "i1", bsr_ip) + assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result) + + # verify ip mroute populated + step("Verify ip mroute") + result = verify_ip_mroutes(tgen, "i1", src_addr, GROUP_ADDRESS, iif, oil) + assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result) + + # Shut/No shut the bsm rpf interface and check mroute on lhr(l1) + step("Shut/No shut the bsm rpf interface and check mroute on lhr(l1)") + intf = "l1-i1-eth0" + shutdown_bringup_interface(tgen, "l1", intf, False) + shutdown_bringup_interface(tgen, "l1", intf, True) + + iif = "l1-i1-eth0" + oil = "l1-r1-eth1" + + result = verify_ip_mroutes(tgen, "l1", src_addr, GROUP_ADDRESS, iif, oil) + assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result) + + step("clear BSM database before moving to next case") + clear_bsrp_data(tgen, topo) + + write_test_footer(tc_name) + + +def test_static_rp_override_p1(request): + """ + Verify static RP is preferred over BSR + + Topology used: + b1_____ + | + | + s1-----f1-----i1-----l1----r1 + | + ______| + b2 + + b1 - BSR 1 + b2 - BSR 2 + s1 - Source + f1 - FHR + i1 - Intermediate Router (also RP) + r1 - Receiver + + """ + + tgen = get_topogen() + tc_name = request.node.name + write_test_header(tc_name) + + kill_iperf(tgen) + clear_ip_mroute(tgen) + reset_config_on_routers(tgen) + clear_ip_pim_interface_traffic(tgen, topo) + + # Don"t run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + reset_config_on_routers(tgen) + + result = pre_config_to_bsm( + tgen, topo, tc_name, "b1", "s1", "r1", "f1", "i1", "l1", "packet1" + ) + assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result) + + result = pre_config_to_bsm( + tgen, topo, tc_name, "b2", "s1", "r1", "f1", "i1", "l1", "packet1" + ) + assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result) + + GROUP_ADDRESS = "225.1.1.1" + # Use scapy to send pre-defined packet from senser to receiver + result = scapy_send_bsr_raw_packet(tgen, topo, "b1", "f1", "packet1") + assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result) + + bsr_ip = topo["routers"]["b1"]["bsm"]["bsr_packets"]["packet1"]["bsr"].split("/")[0] + time.sleep(1) + + result = iperfSendIGMPJoin(tgen, "r1", GROUP_ADDRESS, join_interval=1) + assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result) + + # Verify bsr state in FHR + result = verify_pim_bsr(tgen, topo, "f1", bsr_ip) + assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result) + + # Check igmp groups + step("Verify IGMP groups in LHR") + dut = "l1" + intf = "l1-r1-eth1" + result = verify_igmp_groups(tgen, dut, intf, GROUP_ADDRESS) + assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result) + + group = "225.1.1.1/32" + + # Find the elected rp from bsrp-info + step("Find the elected rp from bsrp-info in LHR l1") + rp = find_rp_from_bsrp_info(tgen, dut, bsr_ip, group) + assert rp is not {}, "Testcase {} :Failed \n Error {}".format(tc_name, result) + + # Check RP detail in LHR + step("Verify that BS RP in LHR l1") + result = verify_pim_grp_rp_source(tgen, topo, dut, group, "BSR", rp[group]) + assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result) + + iif = "l1-i1-eth0" + # Verify upstream rpf for 225.1.1.1 is chosen as rp1 + step("Verify upstream rpf for 225.1.1.1 is chosen as bsrp") + result = verify_ip_pim_upstream_rpf(tgen, topo, dut, iif, GROUP_ADDRESS, rp[group]) + assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result) + + # Configure a static rp for the group 225.1.1.1/32 + step("Configure a static rp 33.33.33.33 for the group 225.1.1.1/32 in l1") + input_dict = { + "l1": { + "pim": { + "rp": [ + {"rp_addr": "33.33.33.33", "group_addr_range": ["225.1.1.1/32"],} + ] + } + } + } + result = create_pim_config(tgen, topo, input_dict) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + # Verify that static rp is configured over bsrp + static_rp = "33.33.33.33" + step("Verify that Static RP in LHR in l1") + result = verify_pim_grp_rp_source(tgen, topo, dut, group, "Static", static_rp) + assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result) + + # Verify if upstream also reflects the static rp + step("Verify upstream rpf for 225.1.1.1 is chosen as static in l1") + result = verify_ip_pim_upstream_rpf(tgen, topo, dut, iif, GROUP_ADDRESS, static_rp) + assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result) + + # delete static rp for the group 225.1.1.1/32 + step("Delete static rp 33.33.33.33 for the group 225.1.1.1/32 in l1") + input_dict = { + "l1": { + "pim": { + "rp": [ + { + "rp_addr": "33.33.33.33", + "group_addr_range": ["225.1.1.1/32"], + "delete": True, + } + ] + } + } + } + result = create_pim_config(tgen, topo, input_dict) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + # Verify if bsrp is installed back for the group 225.1.1.1/32 + step("Verify that BS RP in installed in LHR") + result = verify_pim_grp_rp_source(tgen, topo, dut, group, "BSR", rp[group]) + assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result) + + # Verify upstream rpf for 225.1.1.1 is chosen as bsrp + step("Verify upstream rpf for 225.1.1.1 is chosen as bsrp in l1") + result = verify_ip_pim_upstream_rpf(tgen, topo, dut, iif, GROUP_ADDRESS, rp[group]) + assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result) + + step("clear BSM database before moving to next case") + clear_bsrp_data(tgen, topo) + + write_test_footer(tc_name) + + +def test_bsmp_stress_add_del_restart_p2(request): + """ + 1. Verify adding/deleting the group to rp mapping and RP priority + multiple times + 2. Verify RP and (*,G) detail after PIM process restart on FRR node + + Topology used: + b1_____ + | + | + s1-----f1-----i1-----l1----r1 + | + ______| + b2 + + b1 - BSR 1 + b2 - BSR 2 + s1 - Source + f1 - FHR + i1 - Intermediate Router (also RP) + r1 - Receiver + + """ + + tgen = get_topogen() + tc_name = request.node.name + write_test_header(tc_name) + + kill_iperf(tgen) + clear_ip_mroute(tgen) + reset_config_on_routers(tgen) + clear_ip_pim_interface_traffic(tgen, topo) + + # Don"t run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + reset_config_on_routers(tgen) + + result = pre_config_to_bsm( + tgen, topo, tc_name, "b1", "s1", "r1", "f1", "i1", "l1", "packet1" + ) + assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result) + + result = pre_config_to_bsm( + tgen, topo, tc_name, "b2", "s1", "r1", "f1", "i1", "l1", "packet1" + ) + assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result) + + GROUP_ADDRESS = "225.1.1.1" + + # Use scapy to send pre-defined packet from senser to receiver + step("Send BSR packet from b1 to FHR") + result = scapy_send_bsr_raw_packet(tgen, topo, "b1", "f1", "packet1") + assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result) + + bsr_ip = topo["routers"]["b1"]["bsm"]["bsr_packets"]["packet1"]["bsr"].split("/")[0] + time.sleep(1) + + result = iperfSendIGMPJoin(tgen, "r1", GROUP_ADDRESS, join_interval=1) + assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result) + + # Verify bsr state in FHR + step("Verify if b1 is chosen as bsr in f1") + result = verify_pim_bsr(tgen, topo, "f1", bsr_ip) + assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result) + + dut = "l1" + group = "225.1.1.0/24" + # Find the elected rp from bsrp-info + step("Find the elected rp from bsrp-info in LHR l1") + rp1 = find_rp_from_bsrp_info(tgen, dut, bsr_ip, group) + assert rp1 is not {}, "Testcase {} :Failed \n Error {}".format(tc_name, result) + + # Check RP detail in LHR + step("Verify RP in LHR l1") + result = verify_pim_grp_rp_source(tgen, topo, dut, group, "BSR", rp1[group]) + assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result) + + # Send BSR packet from b1 after deleting high prio rp for 225.1.1.0/24 + step("Send BSM from b1 to FHR deleting high prio rp for 225.1.1.0/24") + result = scapy_send_bsr_raw_packet(tgen, topo, "b1", "f1", "packet6") + assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result) + + # Find the elected rp from bsrp-info + step("Find the elected rp from bsrp-info in LHR l1") + rp2 = find_rp_from_bsrp_info(tgen, dut, bsr_ip, group) + assert rp2 is not {}, "Testcase {} :Failed \n Error {}".format(tc_name, result) + logger.info("RP old: %s RP2 new: %s", rp1[group], rp2[group]) + + # Verify is the rp is different now + assert rp1[group] != rp2[group], "Testcase {} :Failed \n Error {}".format( + tc_name, result + ) + + rp_add1 = rp1[group] + rp_add2 = rp2[group] + + # Verify if that rp is installed + step("Verify new RP in LHR installed") + result = verify_pim_grp_rp_source(tgen, topo, dut, group, "BSR", rp_add2) + assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result) + + step("Change rp priority in the bsm and send multiple times") + + for i in range(4): + # Send BSR pkt from b1 after putting back high prio rp for 225.1.1.0/24 + step("Send BSM from b1 to FHR put back high prio rp for 225.1.1.0/24") + result = scapy_send_bsr_raw_packet(tgen, topo, "b1", "f1", "packet1") + assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result) + + # Find the elected rp from bsrp-info + step("Find the elected rp from bsrp-info in LHR") + rp2 = find_rp_from_bsrp_info(tgen, dut, bsr_ip, group) + assert rp2 is not {}, "Testcase {} :Failed \n Error : RP not Found".format( + tc_name + ) + + # Verify is the rp is different now + step("Verify now old RP is elected again") + assert ( + rp_add1 == rp2[group] + ), "Testcase {} :Failed \n Error : rp expected {} rp received {}".format( + tc_name, rp_add1, + ) + + # Verify if that rp is installed + step("Verify old RP in LHR installed") + result = verify_pim_grp_rp_source(tgen, topo, dut, group, "BSR", rp_add1) + assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result) + + # Send BSR packet from b1 after deleting high prio rp for 225.1.1.0/24 + step("Send BSM from b1 to FHR deleting high prio rp for 225.1.1.0/24") + result = scapy_send_bsr_raw_packet(tgen, topo, "b1", "f1", "packet6") + assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result) + + # Verify if that rp is installed + step("Verify new RP(rp2) in LHR installed") + result = verify_pim_grp_rp_source(tgen, topo, dut, group, "BSR", rp_add2) + assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result) + + # Restart pimd + step("Restarting pimd in LHR") + kill_router_daemons(tgen, "l1", ["pimd"]) + start_router_daemons(tgen, "l1", ["pimd"]) + logger.info("Restarting done") + + # Verify if that rp is installed + step("Verify old RP in LHR installed") + result = verify_pim_grp_rp_source(tgen, topo, dut, group, "BSR", rp_add2) + assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result) + + # Send IGMP join to LHR + result = iperfSendIGMPJoin(tgen, "r1", GROUP_ADDRESS, join_interval=1) + assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result) + + do_countdown(5) + + # VErify mroute created after pimd restart + step("VErify mroute created after pimd restart") + iif = "l1-i1-eth0" + src_addr = "*" + oil = "l1-r1-eth1" + result = verify_ip_mroutes(tgen, "l1", src_addr, GROUP_ADDRESS, iif, oil) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + write_test_footer(tc_name) + + +def test_BSM_timeout_p0(request): + """ + Verify BSM timeout on FRR1 + Verify RP state in FRR1 after Bootstrap timer expiry + + Topology used: + b1_____ + | + | + s1-----f1-----i1-----l1----r1 + | + ______| + b2 + + b1 - BSR 1 + b2 - BSR 2 + s1 - Source + f1 - FHR + i1 - Intermediate Router (also RP) + r1 - Receiver + + """ + + tgen = get_topogen() + tc_name = request.node.name + write_test_header(tc_name) + + kill_iperf(tgen) + clear_ip_mroute(tgen) + reset_config_on_routers(tgen) + clear_ip_pim_interface_traffic(tgen, topo) + + # Don"t run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + result = pre_config_to_bsm( + tgen, topo, tc_name, "b1", "s1", "r1", "f1", "i1", "l1", "packet1" + ) + assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result) + + result = pre_config_to_bsm( + tgen, topo, tc_name, "b2", "s1", "r1", "f1", "i1", "l1", "packet1" + ) + assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result) + + GROUP_ADDRESS = "225.1.1.1" + + bsr_ip = topo["routers"]["b1"]["bsm"]["bsr_packets"]["packet1"]["bsr"].split("/")[0] + + # Use scapy to send pre-defined packet from senser to receiver + step("send BSR packet from b1") + result = scapy_send_bsr_raw_packet(tgen, topo, "b1", "f1", "packet1") + assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result) + + # Send IGMP join for group 225.1.1.1 from receiver + result = iperfSendIGMPJoin(tgen, "r1", GROUP_ADDRESS, join_interval=1) + assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result) + + # Verify bsr state in FHR + step("Verify bsr state in FHR f1") + result = verify_pim_bsr(tgen, topo, "f1", bsr_ip) + assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result) + + # Verify ip mroute in LHR + step(" Verify ip mroute in LHR l1") + dut = "l1" + iif = "l1-i1-eth0" + src_addr = "*" + oil = "l1-r1-eth1" + result = verify_ip_mroutes(tgen, dut, src_addr, GROUP_ADDRESS, iif, oil) + assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result) + + # Verify join state and join timer + step("Verify join state and join timer in lhr l1") + result = verify_join_state_and_timer(tgen, dut, iif, src_addr, GROUP_ADDRESS) + assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result) + + # Verify upstream IIF interface + step("Verify upstream IIF interface in LHR l1") + result = verify_upstream_iif(tgen, dut, iif, src_addr, GROUP_ADDRESS) + assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result) + + # Verify RP mapping + dut = "l1" + group = "225.1.1.1/32" + step("Verify RP mapping in LHR l1") + rp = find_rp_from_bsrp_info(tgen, dut, bsr_ip, group) + assert rp != {}, "Testcase {} :Failed \n Error {}".format(tc_name, result) + + logger.info("Waiting for 130 secs to check BSR timeout") + clear_bsrp_data(tgen, topo) + + # Verify if bsr has aged out + step("Verify if bsr has aged out in f1") + no_bsr_ip = "0.0.0.0" + result = verify_pim_bsr(tgen, topo, "f1", no_bsr_ip) + assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result) + + result = verify_pim_grp_rp_source( + tgen, topo, "f1", group, rp_source="BSR", expected=False + ) + + assert result is not True, "Testcase {} :Failed \n Error {}".format(tc_name, result) + + # Verify RP mapping removed after hold timer expires + group = "225.1.1.1/32" + step("Verify RP mapping removed after hold timer expires in l1") + rp = find_rp_from_bsrp_info(tgen, dut, bsr_ip, group) + assert rp == {}, "Testcase {} :Failed \n Error : RP found when not expected".format( + tc_name + ) + + # Verify iif is unknown after RP timeout + step("Verify iif is unknown after RP timeout in l1") + iif = "Unknown" + result = verify_upstream_iif( + tgen, dut, iif, src_addr, GROUP_ADDRESS, joinState="NotJoined" + ) + assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result) + + # Verify join state and join timer + step("Verify join state and join timer in l1") + iif = "l1-i1-eth0" + result = verify_join_state_and_timer( + tgen, dut, iif, src_addr, GROUP_ADDRESS, expected=False + ) + assert result is not True, "Testcase {}:Failed \n Error: {}".format(tc_name, result) + + # Verify ip mroute is not installed + step("Verify mroute not installed in l1") + result = verify_ip_mroutes( + tgen, dut, src_addr, GROUP_ADDRESS, iif, oil, expected=False + ) + assert result is not True, "Testcase {}:Failed \n Error: {}".format(tc_name, result) + + step("clear BSM database before moving to next case") + clear_bsrp_data(tgen, topo) + + write_test_footer(tc_name) + + +def test_iif_join_state_p0(request): + """ + 1. Verify upstream interfaces(IIF) and join state are updated properly + after BSM received for FRR + 2. Verify IIF and OIL in "show ip pim state" updated properly after + BSM received + + Topology used: + b1_____ + | + | + s1-----f1-----i1-----l1----r1 + | + ______| + b2 + + b1 - BSR 1 + b2 - BSR 2 + s1 - Source + f1 - FHR + i1 - Intermediate Router (also RP) + r1 - Receiver + + """ + + tgen = get_topogen() + tc_name = request.node.name + write_test_header(tc_name) + + kill_iperf(tgen) + clear_ip_mroute(tgen) + reset_config_on_routers(tgen) + clear_ip_pim_interface_traffic(tgen, topo) + + # Don"t run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + reset_config_on_routers(tgen) + + result = pre_config_to_bsm( + tgen, topo, tc_name, "b1", "s1", "r1", "f1", "i1", "l1", "packet1" + ) + assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result) + + result = pre_config_to_bsm( + tgen, topo, tc_name, "b2", "s1", "r1", "f1", "i1", "l1", "packet1" + ) + assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result) + + GROUP_ADDRESS = "225.1.1.1" + + # Use scapy to send pre-defined packet from senser to receiver + result = scapy_send_bsr_raw_packet(tgen, topo, "b1", "f1", "packet1") + assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result) + + bsr_ip = topo["routers"]["b1"]["bsm"]["bsr_packets"]["packet1"]["bsr"].split("/")[0] + time.sleep(1) + + result = iperfSendIGMPJoin(tgen, "r1", GROUP_ADDRESS, join_interval=1) + assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result) + + # Verify bsr state in FHR + result = verify_pim_bsr(tgen, topo, "f1", bsr_ip) + assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result) + + # Check igmp groups + step("Verify IGMP groups in LHR l1") + dut = "l1" + intf = "l1-r1-eth1" + result = verify_igmp_groups(tgen, dut, intf, GROUP_ADDRESS) + assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result) + + group = "225.1.1.1/32" + + # Find the elected rp from bsrp-info + step("Find the elected rp from bsrp-info in LHR l1") + rp = find_rp_from_bsrp_info(tgen, dut, bsr_ip, group) + assert rp is not {}, "Testcase {} :Failed \n Error {}".format(tc_name, result) + + # Check RP detail in LHR + step("Verify RP in LHR l1") + result = verify_pim_grp_rp_source(tgen, topo, dut, group, "BSR", rp[group]) + assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result) + + # Verify join state and join timer + step("Verify join state and join timer l1") + iif = "l1-i1-eth0" + src_addr = "*" + result = verify_join_state_and_timer(tgen, dut, iif, src_addr, GROUP_ADDRESS) + assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result) + + # Verify upstream IIF interface + step("Verify upstream IIF interface l1") + result = verify_upstream_iif(tgen, dut, iif, src_addr, GROUP_ADDRESS) + assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result) + + # Verify IIF/OIL in pim state + oil = "l1-r1-eth1" + result = verify_pim_state(tgen, dut, iif, oil, GROUP_ADDRESS) + assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result) + + # Verify ip mroute + src_addr = "*" + step("Verify ip mroute in l1") + result = verify_ip_mroutes(tgen, dut, src_addr, GROUP_ADDRESS, iif, oil) + assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result) + + # Make RP unreachanble in LHR + step("Make RP unreachanble in LHR l1") + rp = find_rp_from_bsrp_info(tgen, dut, bsr_ip, group) + assert rp is not {}, "Testcase {} :Failed \n Error {}".format(tc_name, result) + + next_hop_lhr = topo["routers"]["i1"]["links"]["l1"]["ipv4"].split("/")[0] + + rp_ip = rp[group] + "/32" + input_dict = { + "l1": { + "static_routes": [ + {"network": rp_ip, "next_hop": next_hop_lhr, "delete": True} + ] + } + } + result = create_static_routes(tgen, input_dict) + assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result) + + # Check RP unreachable + step("Check RP unreachability") + iif = "Unknown" + result = verify_upstream_iif( + tgen, dut, iif, src_addr, GROUP_ADDRESS, joinState="NotJoined" + ) + assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result) + + # Verify that it is not installed + step("Verify that it is not installed") + iif = "<iif?>" + result = verify_pim_state(tgen, dut, iif, oil, GROUP_ADDRESS, installed_fl=0) + assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result) + + # Verify mroute not installed + step("Verify mroute not installed") + result = verify_ip_mroutes( + tgen, dut, src_addr, GROUP_ADDRESS, iif, oil, expected=False + ) + assert result is not True, "Testcase {}:Failed \n Error: {}".format(tc_name, result) + + # Add back route for RP to make it reachable + step("Add back route for RP to make it reachable") + input_dict = { + "l1": {"static_routes": [{"network": rp_ip, "next_hop": next_hop_lhr,}]} + } + result = create_static_routes(tgen, input_dict) + assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result) + + # Verify that (*,G) installed in mroute again + iif = "l1-i1-eth0" + result = verify_ip_mroutes(tgen, dut, src_addr, GROUP_ADDRESS, iif, oil) + assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result) + + step("clear BSM database before moving to next case") + clear_bsrp_data(tgen, topo) + + write_test_footer(tc_name) + + +if __name__ == "__main__": + args = ["-s"] + sys.argv[1:] + sys.exit(pytest.main(args)) diff --git a/tests/topotests/multicast-pim-bsm-topo2/mcast_pim_bsmp_02.json b/tests/topotests/multicast-pim-bsm-topo2/mcast_pim_bsmp_02.json new file mode 100644 index 0000000000..14cb0bee1d --- /dev/null +++ b/tests/topotests/multicast-pim-bsm-topo2/mcast_pim_bsmp_02.json @@ -0,0 +1,238 @@ +{ + "ipv4base": "10.0.0.0", + "ipv4mask": 24, + "link_ip_start": {"ipv4": "10.0.0.0", "v4mask": 24}, + "lo_prefix": {"ipv4": "1.0.", "v4mask": 32}, + "routers": { + "b1": { + "links": { + "f1": {"ipv4": "auto", "pim": "enable"}, + "lo": {"ipv4": "auto", "type": "loopback", "pim": "enable"} + }, + "bsm": { + "bsr_packets": { + "packet1" : { + "data": "01005e00000d005056961165080045c000aa5af500000167372a46000001e000000d2400f5ce165b000001004600000101000018e1010100080800000100090a090a0096650001000909090a0096660001000708090a00966700010007070907009668000100070702070096690001000705020700966a0001000702020700966b0001000202020200966c0001000020e1010101010100000100050606050096000001000020e20101010101000001000909090900960000", + "src_ip": "70.0.0.1/24", + "dest_ip": "70.0.0.2/24", + "bsr": "70.0.0.1/24", + "pkt_dst": "224.0.0.13", + "rp_mapping" : { + "225.1.1.0/24": ["9.10.9.10/32", "7.8.9.10/32", + "9.9.9.10/32", "7.7.9.7/32", + "7.7.2.7/32", "7.5.2.7/32", + "7.2.2.7/32", "2.2.2.2/32"], + "225.1.1.1/32": ["5.6.6.5/32"], + "225.200.100.100/32": ["210.210.210.210/32"], + "226.1.1.1/32": ["9.9.9.9/32"] + + }, + "Desc" : "Packet with 3 group range - rp prio different" + }, + "packet2" : { + "data": "01005e00000d005056961165080045c0009420f400000167714146000001e000000d24000b3b164a000001004600000101000018e1010100080800000100020202020096640001000909090a0096640001000707020700966400010007020207009664000100070709070096640001000708090a00966400010007050207009664000100090a090a0096640001000020e20101010101000001000909090900000000", + "src_ip": "70.0.0.1/24", + "dest_ip": "70.0.0.2/24", + "bsr": "70.0.0.1/24", + "pkt_dst": "224.0.0.13", + "rp_mapping" : { + "225.1.1.0/24": ["9.10.9.10/32", "7.8.9.10/32", + "9.9.9.10/32", "7.7.9.7/32", + "7.7.2.7/32", "7.5.2.7/32", + "7.2.2.7/32", "2.2.2.2/32"] + }, + "Desc" : "Packet 1 with hold time 0 for 226.1.1.1/32" + }, + "packet3" : { + "data": "01005e00000d005056961165080045c000944d0000000167453546000001e000000d2400e52b17c3000001004600000101000018e1010100080800000100090a090a0096650001000909090a0096660001000708090a00966700010007070907009668000100070702070096690001000705020700966a0001000702020700966b0001000202020200966c0001000020e20101010101000001000909090900960000", + "src_ip": "70.0.0.1/24", + "dest_ip": "70.0.0.2/24", + "bsr": "70.0.0.1/24", + "pkt_dst": "224.0.0.13", + "rp_mapping" : { + "225.1.1.0/24": ["9.10.9.10/32", "7.8.9.10/32", + "9.9.9.10/32", "7.7.9.7/32", + "7.7.2.7/32", "7.5.2.7/32", + "7.2.2.7/32", "2.2.2.2/32"], + "226.1.1.1/32": ["9.9.9.9/32"] + }, + "Desc" : "BSR Prio - TC 4" + }, + "packet4" : { + "data": "01005e00000d005056961165080045c000aa3d1c00000167550346000001e000000d24000d671c52000001004600000101000018e1010100080800000100020202020096640001000909090a0096640001000707020700966400010007020207009664000100070709070096640001000708090a00966400010007050207009664000100090a090a0096640001000020e1010101010100000100090909090000000001000020e20101010101000001000909090900960000", + "src_ip": "70.0.0.1/24", + "dest_ip": "70.0.0.2/24", + "bsr": "70.0.0.1/24", + "pkt_dst": "224.0.0.13", + "rp_mapping" : { + "225.1.1.0/24": ["9.10.9.10/32", "7.8.9.10/32", + "9.9.9.10/32", "7.7.9.7/32", + "7.7.2.7/32", "7.5.2.7/32", + "7.2.2.7/32", "2.2.2.2/32"], + "225.1.1.1/32": ["9.9.9.9/32"], + "226.1.1.1/32": ["9.9.9.9/32"] + }, + "Desc" : "TC - 5" + }, + "packet5" : { + "data": "01005e00000d005056961165080045c000aa3d1c00000167550346000001e000000d24000d671c52000001004600000101000018e1010100080800000100020202020096640001000909090a0096640001000707020700966400010007020207009664000100070709070096640001000708090a00966400010007050207009664000100090a090a0096640001000020e1010101010100000100090909090000000001000020e20101010101000001000909090900960000", + "src_ip": "70.0.0.1/24", + "dest_ip": "70.0.0.2/24", + "bsr": "70.0.0.1/24", + "pkt_dst": "224.0.0.13", + "rp_mapping" : { + "225.1.1.0/24": ["9.10.9.10/32", "7.8.9.10/32", + "9.9.9.10/32", "7.7.9.7/32", + "7.7.2.7/32", "7.5.2.7/32", + "7.2.2.7/32", "2.2.2.2/32"], + "226.1.1.1/32": ["9.9.9.9/32"] + }, + "Desc" : "TC - 5, 225.1.1.1 with hold time 0" + }, + "packet6" : { + "data": "01005e00000d005056961165080045c0008a795e0000016718e146000001e000000d24006cc509d5000001004600000101000018e10101000707000001000909090a0096660001000708090a00966700010007070907009668000100070702070096690001000705020700966a0001000702020700966b0001000202020200966c0001000020e20101010101000001000909090900960000", + "src_ip": "70.0.0.1/24", + "dest_ip": "70.0.0.2/24", + "bsr": "70.0.0.1/24", + "pkt_dst": "224.0.0.13", + "rp_mapping" : { + "225.1.1.0/24": ["7.8.9.10/32", + "9.9.9.10/32", "7.7.9.7/32", + "7.7.2.7/32", "7.5.2.7/32", + "7.2.2.7/32", "2.2.2.2/32"], + "226.1.1.1/32": ["9.9.9.9/32"] + }, + "Desc" : "TC - 6,High prio rp removed on 225.1.1.0/24" + }, + "packet7" : { + "data": "01005e00000d005056961165080045c0007e6ebb00000167239046000001e000000d2400090810b3000001004600000101000018e1010100080800000100020202020096640001000909090a0096640001000707020700966400010007020207009664000100070709070096640001000708090a00966400010007050207009664000100090a090a00966400", + "src_ip": "70.0.0.1/24", + "dest_ip": "70.0.0.2/24", + "bsr": "70.0.0.1/24", + "pkt_dst": "224.0.0.13", + "rp_mapping" : { + "225.1.1.0/24": ["9.10.9.10/32", "7.8.9.10/32", + "9.9.9.10/32", "7.7.9.7/32", + "7.7.2.7/32", "7.5.2.7/32", + "7.2.2.7/32", "2.2.2.2/32"] + }, + "Desc" : "TC - 8, rps with same priority" + }, + + "packet8" : { + "data": "01005e00000d005056b76687080045c000383cdf0000016755b246000001e000000d24008ad51a9f000001004600000101000020e1c86464010100000100d2d2d2d200960000", + "group": "225.200.100.100/32", + "src_ip": "70.0.0.1/24", + "dest_ip": "70.0.0.2/24", + "bsr": "70.0.0.1/24", + "pkt_dst": "224.0.0.13", + "rp_mapping" : { + "225.200.100.100/32": ["210.210.210.210/32"] + }, + "Desc" : "TC - 30, grp add with all octet" + }, + + "packet9" : { + "data": "01005e00000d005056b76687080045c000387b8600000167170b46000001e000000d2400c6282245000001000101020701000020e1c86464010100000100d2d2d2d200960000", + "group": "225.200.100.100/32", + "candidate_rp": "210.210.210.210/32", + "src_ip": "70.0.0.1/24", + "dest_ip": "70.0.0.2/24", + "bsr": "1.1.2.7/32", + "pkt_dst": "224.0.0.13", + "rp_mapping" : { + "225.200.100.100/32": ["210.210.210.210/32"] + }, + "Desc" : "TC -29, BSM with preferred ip" + } + + } + } + }, + + "b2": { + "links": { + "f1": {"ipv4": "auto", "pim": "enable"}, + "lo": {"ipv4": "auto", "type": "loopback", "pim": "enable"} + }, + "bsm": { + "bsr_packets": { + "packet1" : { + "data": "01005e00000d005056b70489080045c0003865db0000016731b641000001e000000d2400659c0c6f000001004100000101000018e10101000101000001002121212100960000", + "src_ip": "65.0.0.1/24", + "dest_ip": "65.0.0.2/24", + "bsr": "65.0.0.1/24", + "pkt_dst": "224.0.0.13", + "rp_mapping" : { + "225.1.1.0/24": ["33.33.33.33/32"], + "225.200.100.100/32": ["210.210.210.210/32"] + } + }, + "packet2" : { + "data": "01005e00000d005056b70489080045c00038663000000167316141000001e000000d24006dce0433000a01004100000101000018e10101000101000001002121212100960000", + "src_ip": "65.0.0.1/24", + "dest_ip": "65.0.0.2/24", + "bsr": "65.0.0.1/24", + "pkt_dst": "224.0.0.13", + "rp_mapping" : { + "225.1.1.0/24": ["33.33.33.33/32"] + } + }, + + "packet3" : { + "data": "01005e00000d005056b76687080045c00038f5c800000167a1c841000001e000000d2400c6621a10000001000a02010101000020e1c86464010100000100d2d2d2d200960000", + "src_ip": "65.0.0.1/24", + "dest_ip": "65.0.0.2/24", + "bsr": "10.2.1.1/32", + "pkt_dst": "224.0.0.13", + "rp_mapping" : { + "225.200.100.100/32": ["210.210.210.210/32"] + } + } + + } + } + }, + + "f1": { + "links": { + "b1": {"ipv4": "auto", "pim": "enable"}, + "b2": {"ipv4": "auto", "pim": "enable"}, + "i1": {"ipv4": "auto", "pim": "enable"}, + "s1": {"ipv4": "auto", "pim": "enable"} + } + }, + "i1": { + "links": { + "lo": {"ipv4": "auto", "type": "loopback", "pim": "enable"}, + "f1": {"ipv4": "auto", "pim": "enable"}, + "l1": {"ipv4": "auto", "pim": "enable"} + } + }, + "l1": { + "links": { + "i1": {"ipv4": "auto", "pim": "enable"}, + "r1": {"ipv4": "auto", "pim": "enable"} + }, + "igmp": { + "interfaces": { + "l1-r1-eth1" :{ + "igmp":{ + "version": "2" + } + } + } + } + }, + "s1": { + "links": { + "f1": {"ipv4": "auto", "pim": "enable"} + } + }, + "r1": { + "links": { + "l1": {"ipv4": "auto", "pim": "disable"} + } + } + } +} diff --git a/tests/topotests/multicast-pim-bsm-topo2/test_mcast_pim_bsmp_02.py b/tests/topotests/multicast-pim-bsm-topo2/test_mcast_pim_bsmp_02.py new file mode 100644 index 0000000000..459afb5a02 --- /dev/null +++ b/tests/topotests/multicast-pim-bsm-topo2/test_mcast_pim_bsmp_02.py @@ -0,0 +1,1115 @@ +#!/usr/bin/env python +# +# Copyright (c) 2020 by VMware, Inc. ("VMware") +# Used Copyright (c) 2018 by Network Device Education Foundation, +# Inc. ("NetDEF") in this file. +# +# Permission to use, copy, modify, and/or distribute this software +# for any purpose with or without fee is hereby granted, provided +# that the above copyright notice and this permission notice appear +# in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND VMWARE DISCLAIMS ALL WARRANTIES +# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL VMWARE BE LIABLE FOR +# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY +# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS +# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE +# OF THIS SOFTWARE. +# + +""" +Following tests are covered to test PIM BSM processing basic functionality: + +Test steps +- Create topology (setup module) +- Bring up topology + +Tests covered in this suite +1. Verify (*,G) mroute detail on FRR router after BSM rp installed +2. Verify group to RP updated correctly on FRR router, when BSR advertising + the overlapping group address +3. Verify group to RP info is updated correctly, when BSR advertising the + same RP with different priority +4. Verify group to RP mapping in FRR node when 2 BSR are present in the network + and both are having same BSR priority +5. Verify RP is selected based on hash function, when BSR advertising the group + to RP mapping with same priority +6. Verify fragmentation of bootstrap message +7. Verify when candidate RP advertised with 32 mask length + and contain all the contacts +""" + +import os +import sys +import json +import time +import pytest + +# Save the Current Working Directory to find configuration files. +CWD = os.path.dirname(os.path.realpath(__file__)) +sys.path.append(os.path.join(CWD, "../")) +sys.path.append(os.path.join(CWD, "../lib/")) + +# Required to instantiate the topology builder class. + +# pylint: disable=C0413 +# Import topogen and topotest helpers +from lib.topogen import Topogen, get_topogen +from mininet.topo import Topo + +from lib.common_config import ( + start_topology, + write_test_header, + write_test_footer, + step, + addKernelRoute, + create_static_routes, + iperfSendIGMPJoin, + stop_router, + start_router, + shutdown_bringup_interface, + kill_router_daemons, + start_router_daemons, + reset_config_on_routers, + do_countdown, + apply_raw_config, + kill_iperf, + run_frr_cmd, + required_linux_kernel_version, + topo_daemons, +) + +from lib.pim import ( + create_pim_config, + add_rp_interfaces_and_pim_config, + reconfig_interfaces, + scapy_send_bsr_raw_packet, + find_rp_from_bsrp_info, + verify_pim_grp_rp_source, + verify_pim_bsr, + verify_ip_mroutes, + verify_join_state_and_timer, + verify_pim_state, + verify_upstream_iif, + verify_igmp_groups, + verify_ip_pim_upstream_rpf, + enable_disable_pim_unicast_bsm, + enable_disable_pim_bsm, + clear_ip_mroute, + clear_ip_pim_interface_traffic, + verify_pim_interface_traffic, +) +from lib.topolog import logger +from lib.topojson import build_topo_from_json, build_config_from_json + +# Reading the data from JSON File for topology creation +jsonFile = "{}/mcast_pim_bsmp_02.json".format(CWD) +try: + with open(jsonFile, "r") as topoJson: + topo = json.load(topoJson) +except IOError: + assert False, "Could not read file {}".format(jsonFile) + +TOPOLOGY = """ + + b1_____ + | + | + s1-----f1-----i1-----l1----r1 + | + ______| + b2 + + b1 - BSR 1 + b2 - BSR 2 + s1 - Source + f1 - FHR + i1 - Intermediate Router (also RP) + r1 - Receiver + +""" +# Global variables +NEXT_HOP1 = "70.0.0.1" +NEXT_HOP2 = "65.0.0.1" +BSR_IP_1 = "1.1.2.7" +BSR_IP_2 = "10.2.1.1" +BSR1_ADDR = "1.1.2.7/32" +BSR2_ADDR = "10.2.1.1/32" + + +class CreateTopo(Topo): + """ + Test BasicTopo - topology 1 + + * `Topo`: Topology object + """ + + def build(self, *_args, **_opts): + """Build function""" + tgen = get_topogen(self) + + # Building topology from json file + build_topo_from_json(tgen, topo) + + +def setup_module(mod): + """ + Sets up the pytest environment + + * `mod`: module name + """ + + # Required linux kernel version for this suite to run. + result = required_linux_kernel_version("4.15") + if result is not True: + pytest.skip("Kernel requirements are not met") + + testsuite_run_time = time.asctime(time.localtime(time.time())) + logger.info("Testsuite start time: {}".format(testsuite_run_time)) + logger.info("=" * 40) + logger.info("Master Topology: \n {}".format(TOPOLOGY)) + + logger.info("Running setup_module to create topology") + + # This function initiates the topology build with Topogen... + tgen = Topogen(CreateTopo, mod.__name__) + # ... and here it calls Mininet initialization functions. + + # get list of daemons needs to be started for this suite. + daemons = topo_daemons(tgen, topo) + + # Starting topology, create tmp files which are loaded to routers + # to start deamons and then start routers + start_topology(tgen, daemons) + + # Don"t run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + # Creating configuration from JSON + build_config_from_json(tgen, topo) + + logger.info("Running setup_module() done") + + +def teardown_module(): + """Teardown the pytest environment""" + + logger.info("Running teardown_module to delete topology") + + tgen = get_topogen() + + # Stop toplogy and Remove tmp files + tgen.stop_topology() + + logger.info( + "Testsuite end time: {}".format(time.asctime(time.localtime(time.time()))) + ) + logger.info("=" * 40) + + +##################################################### +# +# Local APIs +# +##################################################### + + +def clear_bsrp_data(tgen, topo): + + """ + clear bsm databas after test" + Parameters + ---------- + * `tgen`: topogen object + + Usage + ----- + result = clear_bsrp_data(tgen, topo) + Returns + ------- + errormsg(str) or True + """ + + for dut in tgen.routers(): + + rnode = tgen.routers()[dut] + + logger.info("[DUT: %s]: clear_bsrp_data") + + run_frr_cmd(rnode, "clear ip pim bsr-data") + + return True + + +def pre_config_to_bsm(tgen, topo, tc_name, bsr, sender, receiver, fhr, rp, lhr, packet): + """ + API to do required configuration to send and receive BSR packet + """ + + # Re-configure interfaces as per BSR packet + result = reconfig_interfaces(tgen, topo, bsr, fhr, packet) + assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result) + + # Create static routes + if "bsr" in topo["routers"][bsr]["bsm"]["bsr_packets"][packet]: + bsr_route = topo["routers"][bsr]["bsm"]["bsr_packets"][packet]["bsr"] + next_hop = topo["routers"][bsr]["bsm"]["bsr_packets"][packet]["src_ip"].split( + "/" + )[0] + next_hop_rp = topo["routers"][fhr]["links"][rp]["ipv4"].split("/")[0] + next_hop_lhr = topo["routers"][rp]["links"][lhr]["ipv4"].split("/")[0] + + # Add static routes + input_dict = { + fhr: {"static_routes": [{"network": bsr_route, "next_hop": next_hop}]}, + rp: {"static_routes": [{"network": bsr_route, "next_hop": next_hop_rp}]}, + lhr: {"static_routes": [{"network": bsr_route, "next_hop": next_hop_lhr}]}, + } + + result = create_static_routes(tgen, input_dict) + assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result) + + # Add kernal route for source + group = topo["routers"][bsr]["bsm"]["bsr_packets"][packet]["pkt_dst"] + bsr_interface = topo["routers"][bsr]["links"][fhr]["interface"] + result = addKernelRoute(tgen, bsr, bsr_interface, group) + assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result) + + # RP Mapping + rp_mapping = topo["routers"][bsr]["bsm"]["bsr_packets"][packet]["rp_mapping"] + + # Add interfaces in RP for all the RPs + result = add_rp_interfaces_and_pim_config(tgen, topo, "lo", rp, rp_mapping) + assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result) + + # Add kernal routes to sender and receiver + for group, rp_list in rp_mapping.items(): + mask = group.split("/")[1] + if int(mask) == 32: + group = group.split("/")[0] + + # Add kernal routes for sender + s_interface = topo["routers"][sender]["links"][fhr]["interface"] + result = addKernelRoute(tgen, sender, s_interface, group) + assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result) + + # Add kernal routes for receiver + r_interface = topo["routers"][receiver]["links"][lhr]["interface"] + result = addKernelRoute(tgen, receiver, r_interface, group) + assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result) + + # Add static routes for RPs in FHR and LHR + next_hop_fhr = topo["routers"][rp]["links"][fhr]["ipv4"].split("/")[0] + next_hop_lhr = topo["routers"][rp]["links"][lhr]["ipv4"].split("/")[0] + input_dict = { + fhr: {"static_routes": [{"network": rp_list, "next_hop": next_hop_fhr}]}, + } + result = create_static_routes(tgen, input_dict) + assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result) + + input_dict = { + lhr: {"static_routes": [{"network": rp_list, "next_hop": next_hop_lhr}]}, + } + result = create_static_routes(tgen, input_dict) + assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result) + return True + + +##################################################### +# +# Testcases +# +##################################################### + + +def test_starg_mroute_p0(request): + """ + 1. Verify (*,G) mroute detail on FRR router after BSM rp installed + + Topology used: + b1_____ + | + | + s1-----f1-----i1-----l1----r1 + | + ______| + b2 + + b1 - BSR 1 + b2 - BSR 2 + s1 - Source + f1 - FHR + i1 - Intermediate Router (also RP) + r1 - Receiver + + """ + + tgen = get_topogen() + tc_name = request.node.name + write_test_header(tc_name) + + kill_iperf(tgen) + clear_ip_mroute(tgen) + reset_config_on_routers(tgen) + clear_ip_pim_interface_traffic(tgen, topo) + + # Don"t run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + reset_config_on_routers(tgen) + + result = pre_config_to_bsm( + tgen, topo, tc_name, "b1", "s1", "r1", "f1", "i1", "l1", "packet1" + ) + assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result) + + result = pre_config_to_bsm( + tgen, topo, tc_name, "b2", "s1", "r1", "f1", "i1", "l1", "packet1" + ) + assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result) + + GROUP_ADDRESS = "226.1.1.1" + + # Use scapy to send pre-defined packet from senser to receiver + result = scapy_send_bsr_raw_packet(tgen, topo, "b1", "f1", "packet1") + assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result) + + bsr_ip = topo["routers"]["b1"]["bsm"]["bsr_packets"]["packet1"]["bsr"].split("/")[0] + time.sleep(1) + + result = iperfSendIGMPJoin(tgen, "r1", GROUP_ADDRESS, join_interval=1) + assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result) + + # Verify bsr state in FHR + result = verify_pim_bsr(tgen, topo, "f1", bsr_ip) + assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result) + + # Check igmp groups + step("Verify IGMP groups in LHR l1") + dut = "l1" + intf = "l1-r1-eth1" + result = verify_igmp_groups(tgen, dut, intf, GROUP_ADDRESS) + assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result) + + group = "226.1.1.1/32" + src_addr = "*" + + # Find the elected rp from bsrp-info + step("Find the elected rp from bsrp-info in LHR in l1") + rp = find_rp_from_bsrp_info(tgen, dut, bsr_ip, group) + assert rp is not {}, "Testcase {} :Failed \n Error {}".format(tc_name, result) + + # Check RP detail in LHR + step("Verify RP in LHR in l1") + result = verify_pim_grp_rp_source(tgen, topo, dut, group, "BSR", rp[group]) + assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result) + + # Verify join state and join timer + step("Verify join state and join timer in l1") + iif = "l1-i1-eth0" + result = verify_join_state_and_timer(tgen, dut, iif, src_addr, GROUP_ADDRESS) + assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result) + + # Verify upstream IIF interface + step("Verify upstream IIF interface in l1") + result = verify_upstream_iif(tgen, dut, iif, src_addr, GROUP_ADDRESS) + assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result) + + # Verify IIF/OIL in pim state + oil = "l1-r1-eth1" + result = verify_pim_state(tgen, dut, iif, oil, GROUP_ADDRESS) + assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result) + + # Verify ip mroute + step("Verify ip mroute in l1") + src_addr = "*" + result = verify_ip_mroutes(tgen, dut, src_addr, GROUP_ADDRESS, iif, oil) + assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result) + + # Remove the group rp mapping and send bsm + step("Remove the grp-rp mapping by sending bsm with hold time 0 for grp-rp") + result = scapy_send_bsr_raw_packet(tgen, topo, "b1", "f1", "packet2") + assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result) + + # Check RP unreachable + step("Check RP unreachability in l1") + iif = "Unknown" + result = verify_upstream_iif( + tgen, dut, iif, src_addr, GROUP_ADDRESS, joinState="NotJoined" + ) + assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result) + + # Verify that it is not installed + step("Verify that iif is not installed in l1") + iif = "<iif?>" + result = verify_pim_state(tgen, dut, iif, oil, GROUP_ADDRESS, installed_fl=0) + assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result) + + # Verify mroute not installed + step("Verify mroute not installed in l1") + result = verify_ip_mroutes( + tgen, dut, src_addr, GROUP_ADDRESS, iif, oil, wait=20, expected=False + ) + assert result is not True, "Testcase {}:Failed \n Error: {}".format(tc_name, result) + + # Send BSM again to configure rp + step("Add back RP by sending BSM from b1") + result = scapy_send_bsr_raw_packet(tgen, topo, "b1", "f1", "packet1") + assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result) + + # Verify that (*,G) installed in mroute again + iif = "l1-i1-eth0" + result = verify_ip_mroutes(tgen, dut, src_addr, GROUP_ADDRESS, iif, oil) + assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result) + + step("clear BSM database before moving to next case") + clear_bsrp_data(tgen, topo) + + write_test_footer(tc_name) + + +def test_overlapping_group_p0(request): + """ + Verify group to RP updated correctly on FRR router, when BSR advertising + the overlapping group address + + Topology used: + b1_____ + | + | + s1-----f1-----i1-----l1----r1 + | + ______| + b2 + + b1 - BSR 1 + b2 - BSR 2 + s1 - Source + f1 - FHR + i1 - Intermediate Router (also RP) + r1 - Receiver + + """ + + tgen = get_topogen() + tc_name = request.node.name + write_test_header(tc_name) + + kill_iperf(tgen) + clear_ip_mroute(tgen) + reset_config_on_routers(tgen) + clear_ip_pim_interface_traffic(tgen, topo) + + # Don"t run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + reset_config_on_routers(tgen) + + result = pre_config_to_bsm( + tgen, topo, tc_name, "b1", "s1", "r1", "f1", "i1", "l1", "packet1" + ) + assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result) + + result = pre_config_to_bsm( + tgen, topo, tc_name, "b2", "s1", "r1", "f1", "i1", "l1", "packet1" + ) + assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result) + + GROUP_ADDRESS = "225.1.1.1" + + # Use scapy to send pre-defined packet from senser to receiver + step("Send BSR packet from b1 to FHR") + result = scapy_send_bsr_raw_packet(tgen, topo, "b1", "f1", "packet4") + assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result) + + bsr_ip = topo["routers"]["b1"]["bsm"]["bsr_packets"]["packet1"]["bsr"].split("/")[0] + time.sleep(1) + + result = iperfSendIGMPJoin(tgen, "r1", GROUP_ADDRESS, join_interval=1) + assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result) + + # Verify bsr state in FHR + step("Verify if b1 is chosen as bsr in f1") + result = verify_pim_bsr(tgen, topo, "f1", bsr_ip) + assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result) + + dut = "l1" + group1 = "225.1.1.1/32" + # Find the elected rp from bsrp-info fro group 225.1.1.1/32 + step("Find the elected rp from bsrp-info in LHR for 225.1.1.1/32") + rp1 = find_rp_from_bsrp_info(tgen, dut, bsr_ip, group1) + assert rp1 is not {}, "Testcase {} :Failed \n Error {}".format(tc_name, result) + + group2 = "225.1.1.0/24" + # Find the elected rp from bsrp-info fro group 225.1.1.0/24 + step("Find the elected rp from bsrp-info in LHR for 225.1.1.0/24") + rp2 = find_rp_from_bsrp_info(tgen, dut, bsr_ip, group2) + assert rp2 is not {}, "Testcase {} :Failed \n Error {}".format(tc_name, result) + + iif = "l1-i1-eth0" + # Verify upstream rpf for 225.1.1.1 is chosen as rp1 + step("Verify upstream rpf for 225.1.1.1 is chosen as rp1 in l1") + result = verify_ip_pim_upstream_rpf(tgen, topo, dut, iif, GROUP_ADDRESS, rp1) + assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result) + + # Send BSR packet from b1 with rp for 225.1.1.1/32 removed + step("Send BSR packet from b1 with rp for 225.1.1.1/32 removed") + result = scapy_send_bsr_raw_packet(tgen, topo, "b1", "f1", "packet5") + assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result) + + # Verify upstream rpf for 225.1.1.1 is chosen as rp1 + step("Verify upstream rpf for 225.1.1.1 is chosen as rp2 in l1") + result = verify_ip_pim_upstream_rpf(tgen, topo, dut, iif, GROUP_ADDRESS, rp2) + assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result) + + # Verify IIF/OIL in pim state + step("Verify iif is installed after rp change in l1") + oil = "l1-r1-eth1" + result = verify_pim_state(tgen, dut, iif, oil, GROUP_ADDRESS) + assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result) + + step("clear BSM database before moving to next case") + clear_bsrp_data(tgen, topo) + + write_test_footer(tc_name) + + +def test_RP_priority_p0(request): + """ + Verify group to RP info is updated correctly, when BSR advertising the + same RP with different priority + + Topology used: + b1_____ + | + | + s1-----f1-----i1-----l1----r1 + | + ______| + b2 + + b1 - BSR 1 + b2 - BSR 2 + s1 - Source + f1 - FHR + i1 - Intermediate Router (also RP) + r1 - Receiver + """ + + tgen = get_topogen() + tc_name = request.node.name + write_test_header(tc_name) + + kill_iperf(tgen) + clear_ip_mroute(tgen) + reset_config_on_routers(tgen) + clear_ip_pim_interface_traffic(tgen, topo) + + # Don"t run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + reset_config_on_routers(tgen) + + result = pre_config_to_bsm( + tgen, topo, tc_name, "b1", "s1", "r1", "f1", "i1", "l1", "packet1" + ) + assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result) + + result = pre_config_to_bsm( + tgen, topo, tc_name, "b2", "s1", "r1", "f1", "i1", "l1", "packet1" + ) + assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result) + + GROUP_ADDRESS = "225.1.1.1" + + # Use scapy to send pre-defined packet from senser to receiver + step("Send BSR packet from b1 to FHR") + result = scapy_send_bsr_raw_packet(tgen, topo, "b1", "f1", "packet1") + assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result) + + bsr_ip = topo["routers"]["b1"]["bsm"]["bsr_packets"]["packet1"]["bsr"].split("/")[0] + time.sleep(1) + + result = iperfSendIGMPJoin(tgen, "r1", GROUP_ADDRESS, join_interval=1) + assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result) + + # Verify bsr state in FHR + step("Verify if b1 is chosen as bsr in f1") + result = verify_pim_bsr(tgen, topo, "f1", bsr_ip) + assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result) + + dut = "l1" + group = "225.1.1.0/24" + # Find the elected rp from bsrp-info + step("Find the elected rp from bsrp-info in LHR l1") + rp1 = find_rp_from_bsrp_info(tgen, dut, bsr_ip, group) + assert rp1 is not {}, "Testcase {} :Failed \n Error {}".format(tc_name, result) + + # Check RP detail in LHR + step("Verify RP in LHR l1") + result = verify_pim_grp_rp_source(tgen, topo, dut, group, "BSR", rp1[group]) + assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result) + + # Send BSR packet from b1 after deleting high prio rp for 225.1.1.0/24 + step("Send BSM from b1 to FHR deleting high prio rp for 225.1.1.0/24") + result = scapy_send_bsr_raw_packet(tgen, topo, "b1", "f1", "packet6") + assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result) + + # Find the elected rp from bsrp-info + step("Find the elected rp from bsrp-info in LHR l1") + rp2 = find_rp_from_bsrp_info(tgen, dut, bsr_ip, group) + assert rp2 is not {}, "Testcase {} :Failed \n Error {}".format(tc_name, result) + logger.info("RP old: {} RP2 new: {} ".format(rp1[group], rp2[group])) + + # Verify is the rp is different now + assert rp1[group] != rp2[group], "Testcase {} :Failed \n Error {}".format( + tc_name, result + ) + + rp_add1 = rp1[group] + rp_add2 = rp2[group] + + # Verify if that rp is installed + step("Verify new RP in LHR installed") + result = verify_pim_grp_rp_source(tgen, topo, dut, group, "BSR", rp_add2) + assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result) + + # Send BSR packet from b1 after putting back high prio rp for 225.1.1.0/24 + step("Send BSM from b1 to FHR put back old high prio rp for 225.1.1.0/24") + result = scapy_send_bsr_raw_packet(tgen, topo, "b1", "f1", "packet1") + assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result) + + # Find the elected rp from bsrp-info + step("Find the elected rp from bsrp-info in LHR") + rp2 = find_rp_from_bsrp_info(tgen, dut, bsr_ip, group) + assert rp2 is not {}, "Testcase {} :Failed \n Error : RP not Found".format(tc_name) + + # Verify is the rp is different now + step("Verify now old RP is elected again") + assert ( + rp_add1 == rp2[group] + ), "Testcase {} :Failed \n Error : rp expected {} rp received {}".format( + tc_name, rp_add1, + ) + + # Verify if that rp is installed + step("Verify new RP in LHR installed") + result = verify_pim_grp_rp_source(tgen, topo, dut, group, "BSR", rp_add1) + assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result) + + step("clear BSM database before moving to next case") + clear_bsrp_data(tgen, topo) + + write_test_footer(tc_name) + + +def test_BSR_election_p0(request): + """ + Verify group to RP mapping in FRR node when 2 BSR are present in the network + and both are having same BSR priority + + Topology used: + b1_____ + | + | + s1-----f1-----i1-----l1----r1 + | + ______| + b2 + + b1 - BSR 1 + b2 - BSR 2 + s1 - Source + f1 - FHR + i1 - Intermediate Router (also RP) + r1 - Receiver + + """ + + tgen = get_topogen() + tc_name = request.node.name + write_test_header(tc_name) + + kill_iperf(tgen) + clear_ip_mroute(tgen) + reset_config_on_routers(tgen) + clear_ip_pim_interface_traffic(tgen, topo) + + # Don"t run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + reset_config_on_routers(tgen) + + result = pre_config_to_bsm( + tgen, topo, tc_name, "b1", "s1", "r1", "f1", "i1", "l1", "packet1" + ) + assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result) + + result = pre_config_to_bsm( + tgen, topo, tc_name, "b2", "s1", "r1", "f1", "i1", "l1", "packet1" + ) + assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result) + + GROUP_ADDRESS = "225.1.1.1" + + # Use scapy to send pre-defined packet from senser to receiver + step("Send BSR packet from b1 to FHR") + result = scapy_send_bsr_raw_packet(tgen, topo, "b1", "f1", "packet3") + assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result) + + bsr_ip1 = topo["routers"]["b1"]["bsm"]["bsr_packets"]["packet1"]["bsr"].split("/")[ + 0 + ] + time.sleep(1) + + result = iperfSendIGMPJoin(tgen, "r1", GROUP_ADDRESS, join_interval=1) + assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result) + + # Verify bsr state in FHR + step("Verify if b1 is chosen as bsr in f1") + result = verify_pim_bsr(tgen, topo, "f1", bsr_ip1) + assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result) + + dut = "l1" + group = "225.1.1.0/24" + # Find the elected rp from bsrp-info + step("Find the elected rp from bsrp-info in LHR in l1") + rp = find_rp_from_bsrp_info(tgen, dut, bsr_ip1, group) + assert rp is not {}, "Testcase {} :Failed \n Error {}".format(tc_name, result) + + # Check RP detail in LHR + step("Verify RP in LHR l1") + result = verify_pim_grp_rp_source(tgen, topo, dut, group, "BSR", rp[group]) + assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result) + + # Send BSR packet from b2 with same priority + step("Send BSR packet from b2 to FHR with same priority") + result = scapy_send_bsr_raw_packet(tgen, topo, "b2", "f1", "packet1") + assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result) + + bsr_ip2 = topo["routers"]["b2"]["bsm"]["bsr_packets"]["packet2"]["bsr"].split("/")[ + 0 + ] + time.sleep(1) + + logger.info("BSR b1:" + bsr_ip1 + " BSR b2:" + bsr_ip2) + # Verify bsr state in FHR + step("Verify if b2 is not chosen as bsr in f1") + result = verify_pim_bsr(tgen, topo, "f1", bsr_ip2, expected=False) + assert result is not True, "Testcase {} :Failed \n Error {}".format(tc_name, result) + + # Verify if b1 is still chosen as bsr + step("Verify if b1 is still chosen as bsr in f1") + result = verify_pim_bsr(tgen, topo, "f1", bsr_ip1) + assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result) + + # Verify if that rp is installed + step("Verify that same RP in istalled in LHR l1") + result = verify_pim_grp_rp_source(tgen, topo, dut, group, "BSR", rp[group]) + assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result) + + step("clear BSM database before moving to next case") + clear_bsrp_data(tgen, topo) + + write_test_footer(tc_name) + + +def test_RP_hash_p0(request): + """ + Verify RP is selected based on hash function, when BSR advertising the group + to RP mapping with same priority + + Topology used: + b1_____ + | + | + s1-----f1-----i1-----l1----r1 + | + ______| + b2 + + b1 - BSR 1 + b2 - BSR 2 + s1 - Source + f1 - FHR + i1 - Intermediate Router (also RP) + r1 - Receiver + + """ + + tgen = get_topogen() + tc_name = request.node.name + write_test_header(tc_name) + + kill_iperf(tgen) + clear_ip_mroute(tgen) + reset_config_on_routers(tgen) + clear_ip_pim_interface_traffic(tgen, topo) + + # Don"t run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + reset_config_on_routers(tgen) + + result = pre_config_to_bsm( + tgen, topo, tc_name, "b1", "s1", "r1", "f1", "i1", "l1", "packet1" + ) + assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result) + + result = pre_config_to_bsm( + tgen, topo, tc_name, "b2", "s1", "r1", "f1", "i1", "l1", "packet1" + ) + assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result) + + GROUP_ADDRESS = "225.1.1.1" + + # Use scapy to send pre-defined packet from senser to receiver + result = scapy_send_bsr_raw_packet(tgen, topo, "b1", "f1", "packet7") + assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result) + + bsr_ip = topo["routers"]["b1"]["bsm"]["bsr_packets"]["packet1"]["bsr"].split("/")[0] + time.sleep(1) + + result = iperfSendIGMPJoin(tgen, "r1", GROUP_ADDRESS, join_interval=1) + assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result) + + dut = "l1" + + # Verify bsr state in FHR + step("Verify if b1 chosen as BSR in f1") + result = verify_pim_bsr(tgen, topo, "f1", bsr_ip) + assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result) + + group = "225.1.1.0/24" + + # Find the elected rp from bsrp-info + step("Find the elected rp from bsrp-info in LHR l1") + rp = find_rp_from_bsrp_info(tgen, dut, bsr_ip, group) + assert rp is not {}, "Testcase {} :Failed \n Error {}".format(tc_name, result) + + # Verify if RP with highest hash value is chosen + step("Verify if RP(2.2.2.2) with highest hash value is chosen in l1") + if rp[group] == "2.2.2.2": + result = True + else: + result = "rp expected: 2.2.2.2 got:" + rp[group] + + assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result) + + # Check RP detail in LHR + step("Verify RP in LHR") + result = verify_pim_grp_rp_source(tgen, topo, dut, group, "BSR", rp[group]) + assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result) + + step("clear BSM database before moving to next case") + clear_bsrp_data(tgen, topo) + + write_test_footer(tc_name) + + +def test_BSM_fragmentation_p1(request): + """ + Verify fragmentation of bootstrap message + + Topology used: + b1_____ + | + | + s1-----f1-----i1-----l1----r1 + | + ______| + b2 + + b1 - BSR 1 + b2 - BSR 2 + s1 - Source + f1 - FHR + i1 - Intermediate Router (also RP) + r1 - Receiver + + """ + + tgen = get_topogen() + tc_name = request.node.name + write_test_header(tc_name) + + kill_iperf(tgen) + clear_ip_mroute(tgen) + reset_config_on_routers(tgen) + clear_ip_pim_interface_traffic(tgen, topo) + + reset_config_on_routers(tgen) + + # Don"t run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + result = pre_config_to_bsm( + tgen, topo, tc_name, "b1", "s1", "r1", "f1", "i1", "l1", "packet1" + ) + assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result) + + result = pre_config_to_bsm( + tgen, topo, tc_name, "b2", "s1", "r1", "f1", "i1", "l1", "packet1" + ) + assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result) + + GROUP_ADDRESS = "225.1.1.1" + + bsr_ip = topo["routers"]["b1"]["bsm"]["bsr_packets"]["packet1"]["bsr"].split("/")[0] + + step("Send BSM and verify if all routers have same bsrp before fragment") + result = scapy_send_bsr_raw_packet(tgen, topo, "b1", "f1", "packet1") + # Verify bsr state in FHR + step("Verify if b1 chosen as BSR in f1") + result = verify_pim_bsr(tgen, topo, "f1", bsr_ip) + assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result) + + fhr_node = tgen.routers()["f1"] + inter_node = tgen.routers()["i1"] + lhr_node = tgen.routers()["l1"] + + # Verify if bsrp list is same across f1, i1 and l1 + step("Verify if bsrp list is same across f1, i1 and l1") + bsrp_f1 = fhr_node.vtysh_cmd("show ip pim bsrp-info json", isjson=True) + logger.info("show_ip_pim_bsrp_info_json f1: \n %s", bsrp_f1) + bsrp_i1 = inter_node.vtysh_cmd("show ip pim bsrp-info json", isjson=True) + logger.info("show_ip_pim_bsrp_info_json i1: \n %s", bsrp_i1) + bsrp_l1 = lhr_node.vtysh_cmd("show ip pim bsrp-info json", isjson=True) + logger.info("show_ip_pim_bsrp_info_json l1: \n %s", bsrp_l1) + + if bsrp_f1 == bsrp_l1: + result = True + else: + result = "bsrp info in f1 is not same in l1" + + assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result) + + # set mtu of fhr(f1) to i1 interface to 100 so that bsm fragments + step("set mtu of fhr(f1) to i1 interface to 100 so that bsm fragments") + fhr_node.run("ifconfig f1-i1-eth2 mtu 100") + inter_node.run("ifconfig i1-f1-eth0 mtu 100") + + # Use scapy to send pre-defined packet from senser to receiver + result = scapy_send_bsr_raw_packet(tgen, topo, "b1", "f1", "packet2") + assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result) + + result = iperfSendIGMPJoin(tgen, "r1", GROUP_ADDRESS, join_interval=1) + assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result) + + # Verify bsr state in FHR + step("Verify if b1 chosen as BSR") + result = verify_pim_bsr(tgen, topo, "f1", bsr_ip) + assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result) + + # Verify if bsrp list is same across f1, i1 and l1 + step("Verify if bsrp list is same across f1, i1 and l1 after fragmentation") + bsrp_f1 = fhr_node.vtysh_cmd("show ip pim bsrp-info json", isjson=True) + logger.info("show_ip_pim_bsrp_info_json f1: \n %s", bsrp_f1) + bsrp_i1 = inter_node.vtysh_cmd("show ip pim bsrp-info json", isjson=True) + logger.info("show_ip_pim_bsrp_info_json i1: \n %s", bsrp_i1) + bsrp_l1 = lhr_node.vtysh_cmd("show ip pim bsrp-info json", isjson=True) + logger.info("show_ip_pim_bsrp_info_json l1: \n %s", bsrp_l1) + + if bsrp_f1 == bsrp_l1: + result = True + else: + result = "bsrp info in f1 is not same in l1" + + assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result) + + step("clear BSM database before moving to next case") + clear_bsrp_data(tgen, topo) + + write_test_footer(tc_name) + + +def test_RP_with_all_ip_octet_p1(request): + """ + Verify when candidate RP advertised with 32 mask length + and contain all the contacts + + Topology used: + b1_____ + | + | + s1-----f1-----i1-----l1----r1 + | + ______| + b2 + + b1 - BSR 1 + b2 - BSR 2 + s1 - Source + f1 - FHR + i1 - Intermediate Router (also RP) + r1 - Receiver + + """ + + tgen = get_topogen() + tc_name = request.node.name + write_test_header(tc_name) + + kill_iperf(tgen) + clear_ip_mroute(tgen) + reset_config_on_routers(tgen) + clear_ip_pim_interface_traffic(tgen, topo) + + # Don"t run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + step("pre-configure BSM packet") + result = pre_config_to_bsm( + tgen, topo, tc_name, "b1", "s1", "r1", "f1", "i1", "l1", "packet1" + ) + assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result) + + step("Send the IGMP group (225.100.100.100) from receiver connected to FRR") + GROUP_ADDRESS = "225.200.100.100" + + # Use scapy to send pre-defined packet from senser to receiver + step("Configure cisco-1 as BSR1") + result = scapy_send_bsr_raw_packet(tgen, topo, "b1", "f1", "packet8") + assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result) + + bsr_ip = topo["routers"]["b1"]["bsm"]["bsr_packets"]["packet8"]["bsr"].split("/")[0] + time.sleep(1) + + result = iperfSendIGMPJoin(tgen, "r1", GROUP_ADDRESS, join_interval=1) + assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result) + + dut = "l1" + step( + "Groups are shown with candidate RP with correct mask length 'show ip pim bsrp-info'" + ) + step("Verify if b1 chosen as BSR in f1") + result = verify_pim_bsr(tgen, topo, "f1", bsr_ip) + assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result) + + group = topo["routers"]["b1"]["bsm"]["bsr_packets"]["packet9"]["group"] + step("Find the elected rp from bsrp-info in LHR l1") + rp = find_rp_from_bsrp_info(tgen, dut, bsr_ip, group) + assert rp is not {}, "Testcase {} :Failed \n Error {}".format(tc_name, result) + + step("Verify RP in LHR") + result = verify_pim_grp_rp_source(tgen, topo, dut, group, "BSR", rp[group]) + assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result) + + step("clear BSM database before moving to next case") + clear_bsrp_data(tgen, topo) + + write_test_footer(tc_name) + + +if __name__ == "__main__": + args = ["-s"] + sys.argv[1:] + sys.exit(pytest.main(args)) diff --git a/tests/topotests/multicast-pim-sm-topo1/multicast_pim_sm_topo1.json b/tests/topotests/multicast-pim-sm-topo1/multicast_pim_sm_topo1.json new file mode 100644 index 0000000000..71454c2ab2 --- /dev/null +++ b/tests/topotests/multicast-pim-sm-topo1/multicast_pim_sm_topo1.json @@ -0,0 +1,140 @@ +{ + "ipv4base": "10.0.0.0", + "ipv4mask": 24, + "link_ip_start": {"ipv4": "10.0.0.0", "v4mask": 24, "link_local": "disable"}, + "lo_prefix": {"ipv4": "1.0.", "v4mask": 32}, + "routers": { + "l1": { + "links": { + "lo": {"ipv4": "auto", "type": "loopback", "pim": "enable"}, + "i1": {"ipv4": "auto", "pim": "enable"}, + "i6": {"ipv4": "auto", "pim": "enable"}, + "i7": {"ipv4": "auto", "pim": "enable"}, + "r2": {"ipv4": "auto", "pim": "enable"}, + "c1": {"ipv4": "auto", "pim": "enable"} + }, + "igmp": { + "interfaces": { + "l1-i1-eth1" :{ + "igmp":{ + "version": "2" + } + } + } + }, + "static_routes": [{ + "network": ["1.0.5.17/32", "10.0.5.0/24", "10.0.7.0/24", "10.0.6.0/24", "10.0.9.0/24"], + "next_hop": "10.0.12.2" + }, + { + "network": ["1.0.1.2/32", "1.0.3.5/32", "10.0.1.0/24", "1.0.2.2/32", "10.0.4.0/24", "10.0.3.0/24"], + "next_hop": "10.0.2.1" + }] + }, + "r2": { + "links": { + "lo": {"ipv4": "auto", "type": "loopback", "pim": "enable"}, + "l1": {"ipv4": "auto", "pim": "enable"}, + "f1": {"ipv4": "auto", "pim": "enable"}, + "i3": {"ipv4": "auto", "pim": "enable"} + }, + "static_routes": [{ + "network": ["10.0.5.0/24", "10.0.6.0/24", "1.0.2.2/32", "10.0.1.0/24"], + "next_hop": "10.0.7.1" + }, + { + "network": ["1.0.1.2/32", "10.0.8.0/24", "10.0.10.0/24", "10.0.4.0/24", "10.0.0.0/24", "10.0.11.0/24", "10.0.1.0/24", "10.0.2.0/24"], + "next_hop": "10.0.12.1" + }] + }, + "f1": { + "links": { + "lo": {"ipv4": "auto", "type": "loopback", "pim": "enable"}, + "r2": {"ipv4": "auto", "pim": "enable"}, + "c2": {"ipv4": "auto", "pim": "enable"}, + "i2": {"ipv4": "auto", "pim": "enable"}, + "i8": {"ipv4": "auto", "pim": "enable"} + }, + "static_routes": [{ + "network": ["1.0.5.17/32", "10.0.8.0/24", "10.0.9.0/24", "10.0.10.0/24", "10.0.12.0/24", "10.0.11.0/24"], + "next_hop": "10.0.7.2" + }, + { + "network": ["1.0.2.2/32", "10.0.1.0/24", "10.0.0.0/24", "10.0.4.0/24", "1.0.1.2/32"], + "next_hop": "10.0.3.1" + }] + }, + "c1": { + "links": { + "lo": {"ipv4": "auto", "type": "loopback", "pim": "enable"}, + "c2": {"ipv4": "auto", "pim": "enable"}, + "l1": {"ipv4": "auto", "pim": "enable"}, + "i4": {"ipv4": "auto", "pim": "enable"} + }, + "static_routes": [{ + "network": ["1.0.5.17/32", "10.0.6.0/24", "10.0.3.0/24", "10.0.8.0/24", "10.0.9.0/24", "10.0.12.0/24", "10.0.10.0/24", "10.0.11.0/24"], + "next_hop": "10.0.2.2" + }, + { + "network": ["10.0.5.0/24", "10.0.7.0/24", "1.0.3.5/32", "10.0.6.0/24", "1.0.2.2/32", "10.0.1.0/24", "10.0.4.0/24"], + "next_hop": "10.0.0.2" + }] + }, + "c2": { + "links": { + "lo": {"ipv4": "auto", "type": "loopback", "pim": "enable"}, + "c1": {"ipv4": "auto", "pim": "enable"}, + "f1": {"ipv4": "auto", "pim": "enable"}, + "i5": {"ipv4": "auto", "pim": "enable"} + }, + "static_routes": [{ + "network": ["1.0.5.17/32", "10.0.5.0/24", "10.0.6.0/24", "10.0.7.0/24", "10.0.8.0/24", "10.0.9.0/24", "10.0.7.0/24", "10.0.10.0/24", "10.0.11.0/24"], + "next_hop": "10.0.3.2" + }, + { + "network": ["1.0.1.2/32", "10.0.4.0/24", "10.0.2.0/24"], + "next_hop": "10.0.0.1" + }] + }, + "i1": { + "links": { + "l1": {"ipv4": "auto"} + } + }, + "i2": { + "links": { + "f1": {"ipv4": "auto"} + } + }, + "i3": { + "links": { + "r2": {"ipv4": "auto"} + } + }, + "i4": { + "links": { + "c1": {"ipv4": "auto"} + } + }, + "i5": { + "links": { + "c2": {"ipv4": "auto"} + } + }, + "i6": { + "links": { + "l1": {"ipv4": "auto"} + } + }, + "i7": { + "links": { + "l1": {"ipv4": "auto"} + } + }, + "i8": { + "links": { + "f1": {"ipv4": "auto"} + } + } + } +} diff --git a/tests/topotests/multicast-pim-sm-topo1/test_multicast_pim_sm_topo1.py b/tests/topotests/multicast-pim-sm-topo1/test_multicast_pim_sm_topo1.py new file mode 100755 index 0000000000..ac675c5c2f --- /dev/null +++ b/tests/topotests/multicast-pim-sm-topo1/test_multicast_pim_sm_topo1.py @@ -0,0 +1,1698 @@ +#!/usr/bin/env python + +# +# Copyright (c) 2020 by VMware, Inc. ("VMware") +# Used Copyright (c) 2018 by Network Device Education Foundation, +# Inc. ("NetDEF") in this file. +# +# Permission to use, copy, modify, and/or distribute this software +# for any purpose with or without fee is hereby granted, provided +# that the above copyright notice and this permission notice appear +# in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND VMWARE DISCLAIMS ALL WARRANTIES +# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL VMWARE BE LIABLE FOR +# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY +# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS +# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE +# OF THIS SOFTWARE. +# + +""" +Following tests are covered to test multicast pim sm: + +Test steps +- Create topology (setup module) +- Bring up topology + +Following tests are covered: +1. TC_1_1: Verify Multicast data traffic with static RP, (*,g) and + (s,g) OIL updated correctly +2. TC_1_2: Verify Multicast data traffic with static RP, (*,g) and + (s,g) OIL updated correctly +3. TC_4: Verify removing the RP should not impact the multicast + data traffic +4. TC_5: Verify (*,G) and (S,G) entry populated again after clear the + PIM nbr and mroute from FRR node +5. TC_9: Verify (s,g) timeout from FHR and RP when same receive + exist in LHR , FHR and RP +6. TC_19: Verify mroute detail when same receiver joining 5 + different sources +7. TC_16: Verify (*,G) and (S,G) populated correctly + when FRR is the transit router +8. TC_23: Verify (S,G) should not create if RP is not reachable +9. TC_24: Verify modification of IGMP query timer should get update + accordingly +10. TC_25: Verify modification of IGMP max query response timer + should get update accordingly +""" + +import os +import sys +import json +import time +import datetime +from time import sleep +import pytest + +pytestmark = pytest.mark.pimd + +# Save the Current Working Directory to find configuration files. +CWD = os.path.dirname(os.path.realpath(__file__)) +sys.path.append(os.path.join(CWD, "../")) +sys.path.append(os.path.join(CWD, "../lib/")) + +# Required to instantiate the topology builder class. + +# pylint: disable=C0413 +# Import topogen and topotest helpers +from lib.topogen import Topogen, get_topogen +from mininet.topo import Topo + +from lib.common_config import ( + start_topology, + write_test_header, + write_test_footer, + step, + iperfSendIGMPJoin, + addKernelRoute, + reset_config_on_routers, + iperfSendTraffic, + kill_iperf, + shutdown_bringup_interface, + kill_router_daemons, + start_router, + start_router_daemons, + stop_router, + required_linux_kernel_version, + topo_daemons, +) +from lib.pim import ( + create_pim_config, + create_igmp_config, + verify_igmp_groups, + verify_ip_mroutes, + verify_pim_interface_traffic, + verify_upstream_iif, + verify_pim_neighbors, + verify_pim_state, + verify_ip_pim_join, + clear_ip_mroute, + clear_ip_pim_interface_traffic, + verify_igmp_config, + clear_ip_mroute_verify +) +from lib.topolog import logger +from lib.topojson import build_topo_from_json, build_config_from_json + +# Reading the data from JSON File for topology creation +jsonFile = "{}/multicast_pim_sm_topo1.json".format(CWD) +try: + with open(jsonFile, "r") as topoJson: + topo = json.load(topoJson) +except IOError: + assert False, "Could not read file {}".format(jsonFile) + +TOPOLOGY = """ + + + i4-----c1-------------c2---i5 + | | + | | + i1-----l1------r2-----f1---i2 + | | | | + | | | | + i7 i6 i3 i8 + + Description: + i1, i2, i3. i4, i5, i6, i7, i8 - FRR running iperf to send IGMP + join and traffic + l1 - LHR + f1 - FHR + r2 - FRR router + c1 - FRR router + c2 - FRR router +""" + +# Global variables +GROUP_RANGE = "225.0.0.0/8" +IGMP_JOIN = "225.1.1.1" +GROUP_RANGE_1 = [ + "225.1.1.1/32", + "225.1.1.2/32", + "225.1.1.3/32", + "225.1.1.4/32", + "225.1.1.5/32", +] +IGMP_JOIN_RANGE_1 = ["225.1.1.1", "225.1.1.2", "225.1.1.3", "225.1.1.4", "225.1.1.5"] +GROUP_RANGE_2 = [ + "226.1.1.1/32", + "226.1.1.2/32", + "226.1.1.3/32", + "226.1.1.4/32", + "226.1.1.5/32", +] +IGMP_JOIN_RANGE_2 = ["226.1.1.1", "226.1.1.2", "226.1.1.3", "226.1.1.4", "226.1.1.5"] + +GROUP_RANGE_3 = [ + "227.1.1.1/32", + "227.1.1.2/32", + "227.1.1.3/32", + "227.1.1.4/32", + "227.1.1.5/32", +] +IGMP_JOIN_RANGE_3 = ["227.1.1.1", "227.1.1.2", "227.1.1.3", "227.1.1.4", "227.1.1.5"] + + +class CreateTopo(Topo): + """ + Test BasicTopo - topology 1 + + * `Topo`: Topology object + """ + + def build(self, *_args, **_opts): + """Build function""" + tgen = get_topogen(self) + + # Building topology from json file + build_topo_from_json(tgen, topo) + + +def setup_module(mod): + """ + Sets up the pytest environment + + * `mod`: module name + """ + + # Required linux kernel version for this suite to run. + result = required_linux_kernel_version("4.19") + if result is not True: + pytest.skip("Kernel requirements are not met") + + testsuite_run_time = time.asctime(time.localtime(time.time())) + logger.info("Testsuite start time: {}".format(testsuite_run_time)) + logger.info("=" * 40) + logger.info("Master Topology: \n {}".format(TOPOLOGY)) + + logger.info("Running setup_module to create topology") + + tgen = Topogen(CreateTopo, mod.__name__) + # ... and here it calls Mininet initialization functions. + + # get list of daemons needs to be started for this suite. + daemons = topo_daemons(tgen, topo) + + # Starting topology, create tmp files which are loaded to routers + # to start deamons and then start routers + start_topology(tgen, daemons) + + # Don"t run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + # Creating configuration from JSON + build_config_from_json(tgen, topo) + + logger.info("Running setup_module() done") + + +def teardown_module(): + """Teardown the pytest environment""" + + logger.info("Running teardown_module to delete topology") + + tgen = get_topogen() + + # Stop toplogy and Remove tmp files + tgen.stop_topology() + + logger.info( + "Testsuite end time: {}".format(time.asctime(time.localtime(time.time()))) + ) + logger.info("=" * 40) + + +##################################################### +# +# Testcases +# +##################################################### + + +def config_to_send_igmp_join_and_traffic( + tgen, topo, tc_name, iperf, iperf_intf, GROUP_RANGE, join=False, traffic=False +): + """ + API to do pre-configuration to send IGMP join and multicast + traffic + + parameters: + ----------- + * `tgen`: topogen object + * `topo`: input json data + * `tc_name`: caller test case name + * `iperf`: router running iperf + * `iperf_intf`: interface name router running iperf + * `GROUP_RANGE`: group range + * `join`: IGMP join, default False + * `traffic`: multicast traffic, default False + """ + + if join: + # Add route to kernal + result = addKernelRoute(tgen, iperf, iperf_intf, GROUP_RANGE) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + if traffic: + # Add route to kernal + result = addKernelRoute(tgen, iperf, iperf_intf, GROUP_RANGE) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + router_list = tgen.routers() + for router in router_list.keys(): + if router == iperf: + continue + + rnode = router_list[router] + rnode.run("echo 2 > /proc/sys/net/ipv4/conf/all/rp_filter") + + return True + + +def verify_state_incremented(state_before, state_after): + """ + API to compare interface traffic state incrementing + + Parameters + ---------- + * `state_before` : State dictionary for any particular instance + * `state_after` : State dictionary for any particular instance + """ + + for router, state_data in state_before.items(): + for state, value in state_data.items(): + if state_before[router][state] >= state_after[router][state]: + errormsg = ( + "[DUT: %s]: state %s value has not" + " incremented, Initial value: %s, " + "Current value: %s [FAILED!!]" + % ( + router, + state, + state_before[router][state], + state_after[router][state], + ) + ) + return errormsg + + logger.info( + "[DUT: %s]: State %s value is " + "incremented, Initial value: %s, Current value: %s" + " [PASSED!!]", + router, + state, + state_before[router][state], + state_after[router][state], + ) + + return True + + +def test_multicast_data_traffic_static_RP_send_join_then_traffic_p0(request): + """ + TC_1_1: Verify Multicast data traffic with static RP, (*,g) and + (s,g) OIL updated correctly + """ + + tgen = get_topogen() + tc_name = request.node.name + write_test_header(tc_name) + + # Don"t run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + step("Enable IGMP on FRR1 interface and send IGMP join (225.1.1.1)") + intf_i1_l1 = topo["routers"]["i1"]["links"]["l1"]["interface"] + result = config_to_send_igmp_join_and_traffic( + tgen, topo, tc_name, "i1", intf_i1_l1, GROUP_RANGE, join=True + ) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + step("joinRx value before join sent") + intf_r2_l1 = topo["routers"]["r2"]["links"]["l1"]["interface"] + state_dict = {"r2": {intf_r2_l1: ["joinRx"]}} + state_before = verify_pim_interface_traffic(tgen, state_dict) + assert isinstance( + state_before, dict + ), "Testcase {} : Failed \n state_before is not dictionary \n " + "Error: {}".format(tc_name, result) + + result = iperfSendIGMPJoin(tgen, "i1", IGMP_JOIN, join_interval=1) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + step("Send the IGMP join first and then start the traffic") + + step("Configure RP on R2 (loopback interface) for the" " group range 225.0.0.0/8") + + input_dict = { + "r2": { + "pim": { + "rp": [ + { + "rp_addr": topo["routers"]["r2"]["links"]["lo"]["ipv4"].split( + "/" + )[0], + "group_addr_range": GROUP_RANGE, + } + ] + } + } + } + + result = create_pim_config(tgen, topo, input_dict) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step("Send multicast traffic from FRR3 to 225.1.1.1 receiver") + intf_i2_f1 = topo["routers"]["i2"]["links"]["f1"]["interface"] + result = config_to_send_igmp_join_and_traffic( + tgen, topo, tc_name, "i2", intf_i2_f1, GROUP_RANGE, traffic=True + ) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + result = iperfSendTraffic(tgen, "i2", IGMP_JOIN, 32, 2500) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step( + "Verify 'show ip mroute' showing correct RPF and OIF" + " interface for (*,G) and (S,G) entries on all the nodes" + ) + + source = topo["routers"]["i2"]["links"]["f1"]["ipv4"].split("/")[0] + intf_l1_r2 = topo["routers"]["l1"]["links"]["r2"]["interface"] + intf_l1_i1 = topo["routers"]["l1"]["links"]["i1"]["interface"] + intf_r2_l1 = topo["routers"]["r2"]["links"]["l1"]["interface"] + intf_r2_f1 = topo["routers"]["r2"]["links"]["f1"]["interface"] + intf_f1_i2 = topo["routers"]["f1"]["links"]["i2"]["interface"] + intf_f1_r2 = topo["routers"]["f1"]["links"]["r2"]["interface"] + input_dict = [ + {"dut": "l1", "src_address": "*", "iif": intf_l1_r2, "oil": intf_l1_i1}, + {"dut": "l1", "src_address": source, "iif": intf_l1_r2, "oil": intf_l1_i1}, + {"dut": "r2", "src_address": "*", "iif": "lo", "oil": intf_r2_l1}, + {"dut": "r2", "src_address": source, "iif": intf_r2_f1, "oil": intf_r2_l1}, + {"dut": "f1", "src_address": source, "iif": intf_f1_i2, "oil": intf_f1_r2}, + ] + + for data in input_dict: + result = verify_ip_mroutes( + tgen, data["dut"], data["src_address"], IGMP_JOIN, data["iif"], data["oil"] + ) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step( + "Verify 'show ip pim upstream' showing correct OIL and IIF" " on all the nodes" + ) + for data in input_dict: + result = verify_upstream_iif( + tgen, data["dut"], data["iif"], data["src_address"], IGMP_JOIN + ) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step("joinRx value after join sent") + state_after = verify_pim_interface_traffic(tgen, state_dict) + assert isinstance( + state_after, dict + ), "Testcase {} : Failed \n state_before is not dictionary \n " + "Error: {}".format(tc_name, result) + + step( + "l1 sent PIM (*,G) join to r2 verify using" + "'show ip pim interface traffic' on RP connected interface" + ) + result = verify_state_incremented(state_before, state_after) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step("l1 sent PIM (S,G) join to f1 , verify using 'show ip pim join'") + dut = "f1" + interface = intf_f1_r2 + result = verify_ip_pim_join(tgen, topo, dut, interface, IGMP_JOIN) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + write_test_footer(tc_name) + + +def test_multicast_data_traffic_static_RP_send_traffic_then_join_p0(request): + """ + TC_1_2: Verify Multicast data traffic with static RP, (*,g) and + (s,g) OIL updated correctly + """ + + tgen = get_topogen() + tc_name = request.node.name + write_test_header(tc_name) + + # Creating configuration from JSON + kill_iperf(tgen) + clear_ip_mroute(tgen) + reset_config_on_routers(tgen) + clear_ip_pim_interface_traffic(tgen, topo) + + # Don"t run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + step("Configure RP on R2 (loopback interface) for the" " group range 225.0.0.0/8") + + input_dict = { + "r2": { + "pim": { + "rp": [ + { + "rp_addr": topo["routers"]["r2"]["links"]["lo"]["ipv4"].split( + "/" + )[0], + "group_addr_range": GROUP_RANGE, + } + ] + } + } + } + + result = create_pim_config(tgen, topo, input_dict) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step("Start traffic first and then send the IGMP join") + + step("Send multicast traffic from FRR3 to 225.1.1.1 receiver") + result = config_to_send_igmp_join_and_traffic( + tgen, topo, tc_name, "i2", "i2-f1-eth0", GROUP_RANGE, traffic=True + ) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + result = iperfSendTraffic(tgen, "i2", IGMP_JOIN, 32, 2500) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step("Enable IGMP on FRR1 interface and send IGMP join (225.1.1.1)") + result = config_to_send_igmp_join_and_traffic( + tgen, topo, tc_name, "i1", "i1-l1-eth0", GROUP_RANGE, join=True + ) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + step("joinRx value before join sent") + state_dict = {"r2": {"r2-l1-eth2": ["joinRx"]}} + state_before = verify_pim_interface_traffic(tgen, state_dict) + assert isinstance( + state_before, dict + ), "Testcase {} : Failed \n state_before is not dictionary \n " + "Error: {}".format(tc_name, result) + + result = iperfSendIGMPJoin(tgen, "i1", IGMP_JOIN, join_interval=1) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + step( + "Verify 'show ip mroute' showing correct RPF and OIF" + " interface for (*,G) and (S,G) entries on all the nodes" + ) + + source = topo["routers"]["i2"]["links"]["f1"]["ipv4"].split("/")[0] + input_dict = [ + {"dut": "l1", "src_address": "*", "iif": "l1-r2-eth4", "oil": "l1-i1-eth1"}, + {"dut": "l1", "src_address": source, "iif": "l1-r2-eth4", "oil": "l1-i1-eth1"}, + {"dut": "r2", "src_address": "*", "iif": "lo", "oil": "r2-l1-eth2"}, + {"dut": "r2", "src_address": source, "iif": "r2-f1-eth0", "oil": "r2-l1-eth2"}, + {"dut": "f1", "src_address": source, "iif": "f1-i2-eth1", "oil": "f1-r2-eth3"}, + ] + for data in input_dict: + result = verify_ip_mroutes( + tgen, data["dut"], data["src_address"], IGMP_JOIN, data["iif"], data["oil"] + ) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step( + "Verify 'show ip pim upstream' showing correct OIL and IIF" " on all the nodes" + ) + for data in input_dict: + result = verify_upstream_iif( + tgen, data["dut"], data["iif"], data["src_address"], IGMP_JOIN + ) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step("joinRx value after join sent") + state_after = verify_pim_interface_traffic(tgen, state_dict) + assert isinstance( + state_after, dict + ), "Testcase {} : Failed \n state_before is not dictionary \n " + "Error: {}".format(tc_name, result) + + step( + "l1 sent PIM (*,G) join to r2 verify using" + "'show ip pim interface traffic' on RP connected interface" + ) + result = verify_state_incremented(state_before, state_after) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step("l1 sent PIM (S,G) join to f1 , verify using 'show ip pim join'") + dut = "f1" + interface = "f1-r2-eth3" + result = verify_ip_pim_join(tgen, topo, dut, interface, IGMP_JOIN) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + write_test_footer(tc_name) + + +def test_clear_pim_neighbors_and_mroute_p0(request): + """ + TC_5: Verify (*,G) and (S,G) entry populated again after clear the + PIM nbr and mroute from FRR node + """ + + tgen = get_topogen() + tc_name = request.node.name + write_test_header(tc_name) + + # Creating configuration from JSON + kill_iperf(tgen) + clear_ip_mroute(tgen) + reset_config_on_routers(tgen) + clear_ip_pim_interface_traffic(tgen, topo) + + # Don"t run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + step("Configure static RP on c1 for group (225.1.1.1-5)") + input_dict = { + "c1": { + "pim": { + "rp": [ + { + "rp_addr": topo["routers"]["c1"]["links"]["lo"]["ipv4"].split( + "/" + )[0], + "group_addr_range": GROUP_RANGE_1, + } + ] + } + } + } + + result = create_pim_config(tgen, topo, input_dict) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step( + "Enable IGMP on FRR1 interface and send IGMP join 225.1.1.1 " + "to 225.1.1.5 from different interfaces" + ) + result = config_to_send_igmp_join_and_traffic( + tgen, topo, tc_name, "i1", "i1-l1-eth0", GROUP_RANGE_1, join=True + ) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + result = iperfSendIGMPJoin(tgen, "i1", IGMP_JOIN_RANGE_1, join_interval=1) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + step("Send multicast traffic from FRR3, wait for SPT switchover") + result = config_to_send_igmp_join_and_traffic( + tgen, topo, tc_name, "i2", "i2-f1-eth0", GROUP_RANGE_1, traffic=True + ) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + result = iperfSendTraffic(tgen, "i2", IGMP_JOIN_RANGE_1, 32, 2500) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step("Clear the mroute on l1, wait for 5 sec") + result = clear_ip_mroute_verify(tgen, "l1") + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + step( + "After clear ip mroute (*,g) entries are re-populated again" + " with same OIL and IIF, verify using 'show ip mroute' and " + " 'show ip pim upstream' " + ) + + source = topo["routers"]["i2"]["links"]["f1"]["ipv4"].split("/")[0] + input_dict = [ + {"dut": "l1", "src_address": "*", "iif": "l1-c1-eth0", "oil": "l1-i1-eth1"} + ] + + for data in input_dict: + result = verify_ip_mroutes( + tgen, data["dut"], data["src_address"], IGMP_JOIN, data["iif"], data["oil"] + ) + assert result is True, "Testcase{} : Failed Error: {}".format(tc_name, result) + + step( + "Verify 'show ip pim upstream' showing correct OIL and IIF" " on all the nodes" + ) + for data in input_dict: + result = verify_upstream_iif( + tgen, data["dut"], data["iif"], data["src_address"], IGMP_JOIN + ) + assert result is True, "Testcase{} : Failed Error: {}".format(tc_name, result) + + write_test_footer(tc_name) + + +def test_verify_mroute_when_same_receiver_in_FHR_LHR_and_RP_p0(request): + """ + TC_9: Verify (s,g) timeout from FHR and RP when same receive + exist in LHR , FHR and RP + """ + + tgen = get_topogen() + tc_name = request.node.name + write_test_header(tc_name) + + # Creating configuration from JSON + kill_iperf(tgen) + clear_ip_mroute(tgen) + reset_config_on_routers(tgen) + clear_ip_pim_interface_traffic(tgen, topo) + + # Don"t run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + step("Configure RP on R2 (loopback interface) for the" " group range 225.0.0.0/8") + + input_dict = { + "r2": { + "pim": { + "rp": [ + { + "rp_addr": topo["routers"]["r2"]["links"]["lo"]["ipv4"].split( + "/" + )[0], + "group_addr_range": GROUP_RANGE, + } + ] + } + } + } + + result = create_pim_config(tgen, topo, input_dict) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step("Enable IGMP on FRR1 interface and send IGMP join " "(225.1.1.1) to R1") + + input_dict = { + "f1": {"igmp": {"interfaces": {"f1-i8-eth2": {"igmp": {"version": "2"}}}}}, + "r2": {"igmp": {"interfaces": {"r2-i3-eth1": {"igmp": {"version": "2"}}}}}, + } + result = create_igmp_config(tgen, topo, input_dict) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + input_join = {"i1": "i1-l1-eth0", "i8": "i8-f1-eth0", "i3": "i3-r2-eth0"} + + for recvr, recvr_intf in input_join.items(): + result = config_to_send_igmp_join_and_traffic( + tgen, topo, tc_name, recvr, recvr_intf, GROUP_RANGE, join=True + ) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + result = iperfSendIGMPJoin(tgen, recvr, IGMP_JOIN, join_interval=1) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + step("Send multicast traffic from R3 to 225.1.1.1 receiver") + result = config_to_send_igmp_join_and_traffic( + tgen, topo, tc_name, "i2", "i2-f1-eth0", GROUP_RANGE, traffic=True + ) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + result = iperfSendTraffic(tgen, "i2", IGMP_JOIN, 32, 2500) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step("IGMP is received on FRR1 , FRR2 , FRR3, using " "'show ip igmp groups'") + igmp_groups = {"l1": "l1-i1-eth1", "r2": "r2-i3-eth1", "f1": "f1-i8-eth2"} + for dut, interface in igmp_groups.items(): + result = verify_igmp_groups(tgen, dut, interface, IGMP_JOIN) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step("(*,G) present on all the node with correct OIL" " using 'show ip mroute'") + + source = topo["routers"]["i2"]["links"]["f1"]["ipv4"].split("/")[0] + input_dict = [ + {"dut": "l1", "src_address": "*", "iif": "l1-r2-eth4", "oil": "l1-i1-eth1"}, + {"dut": "l1", "src_address": source, "iif": "l1-r2-eth4", "oil": "l1-i1-eth1"}, + {"dut": "r2", "src_address": "*", "iif": "lo", "oil": "r2-i3-eth1"}, + {"dut": "r2", "src_address": source, "iif": "r2-f1-eth0", "oil": "r2-i3-eth1"}, + {"dut": "f1", "src_address": "*", "iif": "f1-r2-eth3", "oil": "f1-i8-eth2"}, + {"dut": "f1", "src_address": source, "iif": "f1-i2-eth1", "oil": "f1-i8-eth2"}, + ] + for data in input_dict: + result = verify_ip_mroutes( + tgen, data["dut"], data["src_address"], IGMP_JOIN, data["iif"], data["oil"] + ) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + write_test_footer(tc_name) + + +def test_verify_mroute_when_same_receiver_joining_5_diff_sources_p0(request): + """ + TC_19: Verify mroute detail when same receiver joining 5 + different sources + """ + + tgen = get_topogen() + tc_name = request.node.name + write_test_header(tc_name) + + # Creating configuration from JSON + kill_iperf(tgen) + clear_ip_mroute(tgen) + reset_config_on_routers(tgen) + clear_ip_pim_interface_traffic(tgen, topo) + + # Don"t run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + step("Configure static RP for (226.1.1.1-5) and (232.1.1.1-5)" " in c1") + + _GROUP_RANGE = GROUP_RANGE_2 + GROUP_RANGE_3 + _IGMP_JOIN_RANGE = IGMP_JOIN_RANGE_2 + IGMP_JOIN_RANGE_3 + + input_dict = { + "c1": { + "pim": { + "rp": [ + { + "rp_addr": topo["routers"]["c1"]["links"]["lo"]["ipv4"].split( + "/" + )[0], + "group_addr_range": _GROUP_RANGE, + } + ] + } + } + } + + result = create_pim_config(tgen, topo, input_dict) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step( + "Configure IGMP interface on FRR1 and FRR3 and send IGMP join" + "for group (226.1.1.1-5, 232.1.1.1-5)" + ) + + result = config_to_send_igmp_join_and_traffic( + tgen, topo, tc_name, "i1", "i1-l1-eth0", _GROUP_RANGE, join=True + ) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + result = iperfSendIGMPJoin(tgen, "i1", _IGMP_JOIN_RANGE, join_interval=1) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + input_dict = { + "f1": {"igmp": {"interfaces": {"f1-i8-eth2": {"igmp": {"version": "2"}}}}} + } + result = create_igmp_config(tgen, topo, input_dict) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + result = config_to_send_igmp_join_and_traffic( + tgen, topo, tc_name, "i8", "i8-f1-eth0", _GROUP_RANGE, join=True + ) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + result = iperfSendIGMPJoin(tgen, "i8", _IGMP_JOIN_RANGE, join_interval=1) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + step( + "Send multicast traffic from all the sources to all the " + "receivers (226.1.1.1-5, 232.1.1.1-5)" + ) + + input_traffic = { + "i6": "i6-l1-eth0", + "i7": "i7-l1-eth0", + "i3": "i3-r2-eth0", + "i4": "i4-c1-eth0", + "i5": "i5-c2-eth0", + } + + for src, src_intf in input_traffic.items(): + result = config_to_send_igmp_join_and_traffic( + tgen, topo, tc_name, src, src_intf, _GROUP_RANGE, traffic=True + ) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + result = iperfSendTraffic(tgen, src, _IGMP_JOIN_RANGE, 32, 2500) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step("Verify (*,G) are created on FRR1 and FRR3 node " " 'show ip mroute' ") + + source_i7 = topo["routers"]["i7"]["links"]["l1"]["ipv4"].split("/")[0] + source_i6 = topo["routers"]["i6"]["links"]["l1"]["ipv4"].split("/")[0] + source_i5 = topo["routers"]["i5"]["links"]["c2"]["ipv4"].split("/")[0] + source_i3 = topo["routers"]["i3"]["links"]["r2"]["ipv4"].split("/")[0] + input_dict = [ + {"dut": "l1", "src_address": "*", "iif": "l1-c1-eth0", "oil": "l1-i1-eth1"}, + { + "dut": "l1", + "src_address": source_i5, + "iif": "l1-c1-eth0", + "oil": "l1-i1-eth1", + }, + { + "dut": "l1", + "src_address": source_i3, + "iif": "l1-r2-eth4", + "oil": "l1-i1-eth1", + }, + { + "dut": "l1", + "src_address": source_i6, + "iif": "l1-i6-eth2", + "oil": "l1-i1-eth1", + }, + { + "dut": "l1", + "src_address": source_i7, + "iif": "l1-i7-eth3", + "oil": "l1-i1-eth1", + }, + {"dut": "f1", "src_address": "*", "iif": "f1-c2-eth0", "oil": "f1-i8-eth2"}, + { + "dut": "f1", + "src_address": source_i5, + "iif": "f1-c2-eth0", + "oil": "f1-i8-eth2", + }, + { + "dut": "f1", + "src_address": source_i3, + "iif": "f1-r2-eth3", + "oil": "f1-i8-eth2", + }, + { + "dut": "f1", + "src_address": source_i6, + "iif": "f1-r2-eth3", + "oil": "f1-i8-eth2", + }, + { + "dut": "f1", + "src_address": source_i7, + "iif": "f1-r2-eth3", + "oil": "f1-i8-eth2", + }, + ] + for data in input_dict: + result = verify_ip_mroutes( + tgen, + data["dut"], + data["src_address"], + IGMP_JOIN_RANGE_2, + data["iif"], + data["oil"], + ) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step("Stop the source one by one on FRR1") + input_intf = {"i6": "i6-l1-eth0", "i7": "i7-l1-eth0"} + for dut, intf in input_intf.items(): + shutdown_bringup_interface(tgen, dut, intf, False) + + step( + "After removing the source verify traffic is stopped" + " immediately and (S,G) got timeout in sometime" + ) + + logger.info("After shut, waiting for SG timeout") + + input_dict = [ + { + "dut": "l1", + "src_address": source_i6, + "iif": "l1-i6-eth2", + "oil": "l1-i1-eth1", + }, + { + "dut": "l1", + "src_address": source_i7, + "iif": "l1-i7-eth3", + "oil": "l1-i1-eth1", + }, + ] + for data in input_dict: + result = verify_ip_mroutes( + tgen, + data["dut"], + data["src_address"], + IGMP_JOIN_RANGE_2, + data["iif"], + data["oil"], + expected=False, + ) + assert result is not True, "Testcase {} : Failed \n mroutes are" + " still present \n Error: {}".format(tc_name, result) + logger.info("Expected Behavior: {}".format(result)) + + step( + "Source which is stopped got removed , other source" + " after still present verify using 'show ip mroute' " + ) + input_dict = [ + {"dut": "l1", "src_address": "*", "iif": "l1-c1-eth0", "oil": "l1-i1-eth1"}, + { + "dut": "l1", + "src_address": source_i5, + "iif": "l1-c1-eth0", + "oil": "l1-i1-eth1", + }, + { + "dut": "l1", + "src_address": source_i3, + "iif": "l1-r2-eth4", + "oil": "l1-i1-eth1", + }, + {"dut": "f1", "src_address": "*", "iif": "f1-c2-eth0", "oil": "f1-i8-eth2"}, + { + "dut": "f1", + "src_address": source_i5, + "iif": "f1-c2-eth0", + "oil": "f1-i8-eth2", + }, + { + "dut": "f1", + "src_address": source_i3, + "iif": "f1-r2-eth3", + "oil": "f1-i8-eth2", + }, + ] + for data in input_dict: + result = verify_ip_mroutes( + tgen, + data["dut"], + data["src_address"], + IGMP_JOIN_RANGE_2, + data["iif"], + data["oil"], + ) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step("Start all the source again for all the receivers") + input_intf = {"i6": "i6-l1-eth0", "i7": "i7-l1-eth0"} + for dut, intf in input_intf.items(): + shutdown_bringup_interface(tgen, dut, intf, True) + + step( + "After starting source all the mroute entries got populated, " + "no duplicate entries present in mroute verify 'show ip mroute'" + ) + + input_dict = [ + {"dut": "l1", "src_address": "*", "iif": "l1-c1-eth0", "oil": "l1-i1-eth1"}, + { + "dut": "l1", + "src_address": source_i5, + "iif": "l1-c1-eth0", + "oil": "l1-i1-eth1", + }, + { + "dut": "l1", + "src_address": source_i3, + "iif": "l1-r2-eth4", + "oil": "l1-i1-eth1", + }, + { + "dut": "l1", + "src_address": source_i6, + "iif": "l1-i6-eth2", + "oil": "l1-i1-eth1", + }, + { + "dut": "l1", + "src_address": source_i7, + "iif": "l1-i7-eth3", + "oil": "l1-i1-eth1", + }, + {"dut": "f1", "src_address": "*", "iif": "f1-c2-eth0", "oil": "f1-i8-eth2"}, + { + "dut": "f1", + "src_address": source_i5, + "iif": "f1-c2-eth0", + "oil": "f1-i8-eth2", + }, + { + "dut": "f1", + "src_address": source_i3, + "iif": "f1-r2-eth3", + "oil": "f1-i8-eth2", + }, + { + "dut": "f1", + "src_address": source_i6, + "iif": "f1-r2-eth3", + "oil": "f1-i8-eth2", + }, + { + "dut": "f1", + "src_address": source_i7, + "iif": "f1-r2-eth3", + "oil": "f1-i8-eth2", + }, + ] + for data in input_dict: + result = verify_ip_mroutes( + tgen, + data["dut"], + data["src_address"], + IGMP_JOIN_RANGE_2, + data["iif"], + data["oil"], + ) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + write_test_footer(tc_name) + + +def test_verify_mroute_when_frr_is_transit_router_p2(request): + """ + TC_16: Verify (*,G) and (S,G) populated correctly + when FRR is the transit router + """ + + tgen = get_topogen() + tc_name = request.node.name + write_test_header(tc_name) + + # Creating configuration from JSON + kill_iperf(tgen) + clear_ip_mroute(tgen) + reset_config_on_routers(tgen) + clear_ip_pim_interface_traffic(tgen, topo) + + # Don"t run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + step("Configure static RP for (226.1.1.1-5) in c2") + input_dict = { + "c2": { + "pim": { + "rp": [ + { + "rp_addr": topo["routers"]["c2"]["links"]["lo"]["ipv4"].split( + "/" + )[0], + "group_addr_range": GROUP_RANGE_1, + } + ] + } + } + } + + result = create_pim_config(tgen, topo, input_dict) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step("Enable IGMP on FRR1 interface and send IGMP join " "(225.1.1.1-5) to FRR1") + result = config_to_send_igmp_join_and_traffic( + tgen, topo, tc_name, "i1", "i1-l1-eth0", GROUP_RANGE_1, join=True + ) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + result = iperfSendIGMPJoin(tgen, "i1", IGMP_JOIN_RANGE_1, join_interval=1) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + step("Send multicast traffic from FRR3 to 225.1.1.1-5 receivers") + result = config_to_send_igmp_join_and_traffic( + tgen, topo, tc_name, "i2", "i2-f1-eth0", GROUP_RANGE_1, traffic=True + ) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + result = iperfSendTraffic(tgen, "i2", IGMP_JOIN_RANGE_1, 32, 2500) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + # Stop r2 router to make r2 router disabled from topology + input_intf = {"l1": "l1-r2-eth4", "f1": "f1-r2-eth3"} + for dut, intf in input_intf.items(): + shutdown_bringup_interface(tgen, dut, intf, False) + + step( + "FRR4 has (S,G) and (*,G) ,created where incoming interface" + " toward FRR3 and OIL toward R2, verify using 'show ip mroute'" + " 'show ip pim state' " + ) + + source = topo["routers"]["i2"]["links"]["f1"]["ipv4"].split("/")[0] + input_dict = [ + {"dut": "c2", "src_address": "*", "iif": "lo", "oil": "c2-c1-eth0"}, + {"dut": "c2", "src_address": source, "iif": "c2-f1-eth1", "oil": "c2-c1-eth0"}, + ] + for data in input_dict: + result = verify_ip_mroutes( + tgen, data["dut"], data["src_address"], IGMP_JOIN, data["iif"], data["oil"] + ) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step("Stop multicast traffic from FRR3") + dut = "i2" + intf = "i2-f1-eth0" + shutdown_bringup_interface(tgen, dut, intf, False) + + logger.info("Waiting for 20 sec to get traffic to be stopped..") + sleep(20) + + step("top IGMP receiver from FRR1") + dut = "i1" + intf = "i1-l1-eth0" + shutdown_bringup_interface(tgen, dut, intf, False) + + logger.info("Waiting for 20 sec to get mroutes to be flused out..") + sleep(20) + + step( + "After stopping receiver (*,G) also got timeout from transit" + " router 'show ip mroute'" + ) + + result = verify_ip_mroutes( + tgen, "c1", "*", IGMP_JOIN, "c1-c2-eth1", "c1-l1-eth0", expected=False + ) + assert result is not True, "Testcase {} : Failed \n mroutes are" + " still present \n Error: {}".format(tc_name, result) + logger.info("Expected Behavior: {}".format(result)) + + write_test_footer(tc_name) + + +def test_verify_mroute_when_RP_unreachable_p1(request): + """ + TC_23: Verify (S,G) should not create if RP is not reachable + """ + + tgen = get_topogen() + tc_name = request.node.name + write_test_header(tc_name) + + # Creating configuration from JSON + kill_iperf(tgen) + clear_ip_mroute(tgen) + reset_config_on_routers(tgen) + clear_ip_pim_interface_traffic(tgen, topo) + + # Don"t run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + step("Configure RP on FRR2 (loopback interface) for " "the group range 225.0.0.0/8") + + input_dict = { + "r2": { + "pim": { + "rp": [ + { + "rp_addr": topo["routers"]["r2"]["links"]["lo"]["ipv4"].split( + "/" + )[0], + "group_addr_range": GROUP_RANGE, + } + ] + } + } + } + + result = create_pim_config(tgen, topo, input_dict) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step("Enable IGMP on FRR1 interface and send IGMP join (225.1.1.1)") + + result = config_to_send_igmp_join_and_traffic( + tgen, topo, tc_name, "i1", "i1-l1-eth0", GROUP_RANGE, join=True + ) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + result = iperfSendIGMPJoin(tgen, "i1", IGMP_JOIN, join_interval=1) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + step("Send multicast traffic from FRR3 to 225.1.1.1 receiver") + result = config_to_send_igmp_join_and_traffic( + tgen, topo, tc_name, "i2", "i2-f1-eth0", GROUP_RANGE, traffic=True + ) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + result = iperfSendTraffic(tgen, "i2", IGMP_JOIN, 32, 2500) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step("Configure one IGMP interface on FRR3 node and send IGMP" " join (225.1.1.1)") + input_dict = { + "f1": {"igmp": {"interfaces": {"f1-i8-eth2": {"igmp": {"version": "2"}}}}} + } + result = create_igmp_config(tgen, topo, input_dict) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + result = config_to_send_igmp_join_and_traffic( + tgen, topo, tc_name, "i8", "i8-f1-eth0", GROUP_RANGE, join=True + ) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + result = iperfSendIGMPJoin(tgen, "i8", IGMP_JOIN, join_interval=1) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + # Verify mroutes are present in FRR3(f1) + source = topo["routers"]["i2"]["links"]["f1"]["ipv4"].split("/")[0] + input_dict = [ + {"dut": "f1", "src_address": "*", "iif": "f1-r2-eth3", "oil": "f1-i8-eth2"}, + {"dut": "f1", "src_address": source, "iif": "f1-i2-eth1", "oil": "f1-i8-eth2"}, + ] + for data in input_dict: + result = verify_ip_mroutes( + tgen, data["dut"], data["src_address"], IGMP_JOIN, data["iif"], data["oil"] + ) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step("Shut the RP connected interface from f1 ( r2 to f1) link") + dut = "f1" + intf = "f1-r2-eth3" + shutdown_bringup_interface(tgen, dut, intf, False) + + logger.info("Waiting for 20 sec to get mroutes to be flushed out..") + sleep(20) + + step("Clear the mroute on f1") + clear_ip_mroute(tgen, "f1") + + step( + "After Shut the RP interface and clear the mroute verify all " + "(*,G) and (S,G) got timeout from FRR3 node , verify using " + " 'show ip mroute' " + ) + + result = verify_ip_mroutes( + tgen, "f1", "*", IGMP_JOIN, "f1-r2-eth3", "f1-i8-eth2", expected=False + ) + assert result is not True, "Testcase {} : Failed \n mroutes are" + " still present \n Error: {}".format(tc_name, result) + logger.info("Expected Behavior: {}".format(result)) + + step("IGMP groups are present verify using 'show ip igmp group'") + dut = "l1" + interface = "l1-i1-eth1" + result = verify_igmp_groups(tgen, dut, interface, IGMP_JOIN) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + write_test_footer(tc_name) + + +def test_modify_igmp_query_timer_p0(request): + """ + TC_24: + Verify modification of IGMP query timer should get update + accordingly + """ + + tgen = get_topogen() + tc_name = request.node.name + write_test_header(tc_name) + + # Creating configuration from JSON + kill_iperf(tgen) + clear_ip_mroute(tgen) + reset_config_on_routers(tgen) + clear_ip_pim_interface_traffic(tgen, topo) + + # Don"t run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + step("Enable IGMP on FRR1 interface and send IGMP join (225.1.1.1)") + result = config_to_send_igmp_join_and_traffic( + tgen, topo, tc_name, "i1", "i1-l1-eth0", GROUP_RANGE, join=True + ) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + result = iperfSendIGMPJoin(tgen, "i1", IGMP_JOIN, join_interval=1) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + step("Configure RP on R2 (loopback interface) for the" " group range 225.0.0.0/8") + + input_dict = { + "r2": { + "pim": { + "rp": [ + { + "rp_addr": topo["routers"]["r2"]["links"]["lo"]["ipv4"].split( + "/" + )[0], + "group_addr_range": GROUP_RANGE, + } + ] + } + } + } + + result = create_pim_config(tgen, topo, input_dict) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step("Send multicast traffic from FRR3 to 225.1.1.1 receiver") + result = config_to_send_igmp_join_and_traffic( + tgen, topo, tc_name, "i2", "i2-f1-eth0", GROUP_RANGE, traffic=True + ) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + result = iperfSendTraffic(tgen, "i2", IGMP_JOIN, 32, 2500) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step( + "Verify 'show ip mroute' showing correct RPF and OIF" + " interface for (*,G) and (S,G) entries on all the nodes" + ) + + source = topo["routers"]["i2"]["links"]["f1"]["ipv4"].split("/")[0] + input_dict_4 = [ + {"dut": "l1", "src_address": "*", "iif": "l1-r2-eth4", "oil": "l1-i1-eth1"}, + {"dut": "l1", "src_address": source, "iif": "l1-r2-eth4", "oil": "l1-i1-eth1"}, + {"dut": "f1", "src_address": source, "iif": "f1-i2-eth1", "oil": "f1-r2-eth3"}, + ] + for data in input_dict_4: + result = verify_ip_mroutes( + tgen, data["dut"], data["src_address"], IGMP_JOIN, data["iif"], data["oil"] + ) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step( + "Verify 'show ip pim upstream' showing correct OIL and IIF" " on all the nodes" + ) + for data in input_dict_4: + result = verify_upstream_iif( + tgen, data["dut"], data["iif"], data["src_address"], IGMP_JOIN + ) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step("Modify IGMP query interval default to other timer on FRR1" "3 times") + input_dict_1 = { + "l1": { + "igmp": { + "interfaces": { + "l1-i1-eth1": {"igmp": {"query": {"query-interval": 100}}} + } + } + } + } + result = create_igmp_config(tgen, topo, input_dict_1) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + result = verify_igmp_config(tgen, input_dict_1) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + input_dict_2 = { + "l1": { + "igmp": { + "interfaces": { + "l1-i1-eth1": {"igmp": {"query": {"query-interval": 200}}} + } + } + } + } + result = create_igmp_config(tgen, topo, input_dict_2) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + result = verify_igmp_config(tgen, input_dict_2) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + input_dict_3 = { + "l1": { + "igmp": { + "interfaces": { + "l1-i1-eth1": {"igmp": {"query": {"query-interval": 300}}} + } + } + } + } + result = create_igmp_config(tgen, topo, input_dict_3) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + result = verify_igmp_config(tgen, input_dict_3) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + step("Verify that no core is observed") + if tgen.routers_have_failure(): + assert False, "Testcase {}: Failed Error: {}".format(tc_name, result) + + write_test_footer(tc_name) + + +def test_modify_igmp_max_query_response_timer_p0(request): + """ + TC_25: + Verify modification of IGMP max query response timer + should get update accordingly + """ + + tgen = get_topogen() + tc_name = request.node.name + write_test_header(tc_name) + + # Creating configuration from JSON + kill_iperf(tgen) + clear_ip_mroute(tgen) + reset_config_on_routers(tgen) + clear_ip_pim_interface_traffic(tgen, topo) + + # Don"t run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + step("Enable IGMP on FRR1 interface and send IGMP join (225.1.1.1)") + result = config_to_send_igmp_join_and_traffic( + tgen, topo, tc_name, "i1", "i1-l1-eth0", GROUP_RANGE, join=True + ) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + result = iperfSendIGMPJoin(tgen, "i1", IGMP_JOIN, join_interval=1) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + step("Configure IGMP query response time to 10 sec on FRR1") + input_dict_1 = { + "l1": { + "igmp": { + "interfaces": { + "l1-i1-eth1": { + "igmp": { + "version": "2", + "query": {"query-max-response-time": 10}, + } + } + } + } + } + } + result = create_igmp_config(tgen, topo, input_dict_1) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + result = verify_igmp_config(tgen, input_dict_1) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + step("Configure RP on R2 (loopback interface) for the" " group range 225.0.0.0/8") + + input_dict = { + "r2": { + "pim": { + "rp": [ + { + "rp_addr": topo["routers"]["r2"]["links"]["lo"]["ipv4"].split( + "/" + )[0], + "group_addr_range": GROUP_RANGE, + } + ] + } + } + } + + result = create_pim_config(tgen, topo, input_dict) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step("Send multicast traffic from FRR3 to 225.1.1.1 receiver") + result = config_to_send_igmp_join_and_traffic( + tgen, topo, tc_name, "i2", "i2-f1-eth0", GROUP_RANGE, traffic=True + ) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + result = iperfSendTraffic(tgen, "i2", IGMP_JOIN, 32, 2500) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step( + "Verify 'show ip mroute' showing correct RPF and OIF" + " interface for (*,G) and (S,G) entries on all the nodes" + ) + + source = topo["routers"]["i2"]["links"]["f1"]["ipv4"].split("/")[0] + input_dict_5 = [ + {"dut": "l1", "src_address": "*", "iif": "l1-r2-eth4", "oil": "l1-i1-eth1"}, + {"dut": "l1", "src_address": source, "iif": "l1-r2-eth4", "oil": "l1-i1-eth1"}, + {"dut": "f1", "src_address": source, "iif": "f1-i2-eth1", "oil": "f1-r2-eth3"}, + ] + for data in input_dict_5: + result = verify_ip_mroutes( + tgen, data["dut"], data["src_address"], IGMP_JOIN, data["iif"], data["oil"] + ) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step( + "Verify 'show ip pim upstream' showing correct OIL and IIF" " on all the nodes" + ) + for data in input_dict_5: + result = verify_upstream_iif( + tgen, data["dut"], data["iif"], data["src_address"], IGMP_JOIN + ) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step("Delete the PIM and IGMP on FRR1") + input_dict_1 = {"l1": {"pim": {"disable": ["l1-i1-eth1"]}}} + result = create_pim_config(tgen, topo, input_dict_1) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + input_dict_2 = { + "l1": { + "igmp": { + "interfaces": { + "l1-i1-eth1": { + "igmp": { + "version": "2", + "delete": True, + "query": {"query-max-response-time": 10, "delete": True}, + } + } + } + } + } + } + result = create_igmp_config(tgen, topo, input_dict_2) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + step("Configure PIM on FRR") + result = create_pim_config(tgen, topo["routers"]) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + step("Configure max query response timer 100sec on FRR1") + input_dict_3 = { + "l1": { + "igmp": { + "interfaces": { + "l1-i1-eth1": { + "igmp": { + "version": "2", + "query": {"query-max-response-time": 100}, + } + } + } + } + } + } + result = create_igmp_config(tgen, topo, input_dict_3) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + result = verify_igmp_config(tgen, input_dict_3) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + step( + "Remove and add max query response timer cli with different" + "timer 5 times on FRR1 Enable IGMP and IGMP version 2 on FRR1" + " on FRR1" + ) + + input_dict_3 = { + "l1": { + "igmp": { + "interfaces": { + "l1-i1-eth1": { + "igmp": { + "version": "2", + "query": {"query-max-response-time": 110}, + } + } + } + } + } + } + result = create_igmp_config(tgen, topo, input_dict_3) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + result = verify_igmp_config(tgen, input_dict_3) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + input_dict_3 = { + "l1": { + "igmp": { + "interfaces": { + "l1-i1-eth1": { + "igmp": { + "version": "2", + "query": {"query-max-response-time": 120}, + } + } + } + } + } + } + result = create_igmp_config(tgen, topo, input_dict_3) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + result = verify_igmp_config(tgen, input_dict_3) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + input_dict_3 = { + "l1": { + "igmp": { + "interfaces": { + "l1-i1-eth1": { + "igmp": { + "version": "2", + "query": {"query-max-response-time": 140}, + } + } + } + } + } + } + result = create_igmp_config(tgen, topo, input_dict_3) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + result = verify_igmp_config(tgen, input_dict_3) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + input_dict_3 = { + "l1": { + "igmp": { + "interfaces": { + "l1-i1-eth1": { + "igmp": { + "version": "2", + "query": {"query-max-response-time": 150}, + } + } + } + } + } + } + result = create_igmp_config(tgen, topo, input_dict_3) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + result = verify_igmp_config(tgen, input_dict_3) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + step("Enable IGMP and IGMP version 2 on FRR1 on FRR1") + + input_dict_4 = { + "l1": {"igmp": {"interfaces": {"l1-i1-eth1": {"igmp": {"version": "2"}}}}} + } + result = create_igmp_config(tgen, topo, input_dict_4) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + step("Verify that no core is observed") + if tgen.routers_have_failure(): + assert False, "Testcase {}: Failed Error: {}".format(tc_name, result) + + write_test_footer(tc_name) + + +if __name__ == "__main__": + args = ["-s"] + sys.argv[1:] + sys.exit(pytest.main(args)) diff --git a/tests/topotests/multicast-pim-sm-topo2/multicast_pim_sm_topo2.json b/tests/topotests/multicast-pim-sm-topo2/multicast_pim_sm_topo2.json new file mode 100644 index 0000000000..71454c2ab2 --- /dev/null +++ b/tests/topotests/multicast-pim-sm-topo2/multicast_pim_sm_topo2.json @@ -0,0 +1,140 @@ +{ + "ipv4base": "10.0.0.0", + "ipv4mask": 24, + "link_ip_start": {"ipv4": "10.0.0.0", "v4mask": 24, "link_local": "disable"}, + "lo_prefix": {"ipv4": "1.0.", "v4mask": 32}, + "routers": { + "l1": { + "links": { + "lo": {"ipv4": "auto", "type": "loopback", "pim": "enable"}, + "i1": {"ipv4": "auto", "pim": "enable"}, + "i6": {"ipv4": "auto", "pim": "enable"}, + "i7": {"ipv4": "auto", "pim": "enable"}, + "r2": {"ipv4": "auto", "pim": "enable"}, + "c1": {"ipv4": "auto", "pim": "enable"} + }, + "igmp": { + "interfaces": { + "l1-i1-eth1" :{ + "igmp":{ + "version": "2" + } + } + } + }, + "static_routes": [{ + "network": ["1.0.5.17/32", "10.0.5.0/24", "10.0.7.0/24", "10.0.6.0/24", "10.0.9.0/24"], + "next_hop": "10.0.12.2" + }, + { + "network": ["1.0.1.2/32", "1.0.3.5/32", "10.0.1.0/24", "1.0.2.2/32", "10.0.4.0/24", "10.0.3.0/24"], + "next_hop": "10.0.2.1" + }] + }, + "r2": { + "links": { + "lo": {"ipv4": "auto", "type": "loopback", "pim": "enable"}, + "l1": {"ipv4": "auto", "pim": "enable"}, + "f1": {"ipv4": "auto", "pim": "enable"}, + "i3": {"ipv4": "auto", "pim": "enable"} + }, + "static_routes": [{ + "network": ["10.0.5.0/24", "10.0.6.0/24", "1.0.2.2/32", "10.0.1.0/24"], + "next_hop": "10.0.7.1" + }, + { + "network": ["1.0.1.2/32", "10.0.8.0/24", "10.0.10.0/24", "10.0.4.0/24", "10.0.0.0/24", "10.0.11.0/24", "10.0.1.0/24", "10.0.2.0/24"], + "next_hop": "10.0.12.1" + }] + }, + "f1": { + "links": { + "lo": {"ipv4": "auto", "type": "loopback", "pim": "enable"}, + "r2": {"ipv4": "auto", "pim": "enable"}, + "c2": {"ipv4": "auto", "pim": "enable"}, + "i2": {"ipv4": "auto", "pim": "enable"}, + "i8": {"ipv4": "auto", "pim": "enable"} + }, + "static_routes": [{ + "network": ["1.0.5.17/32", "10.0.8.0/24", "10.0.9.0/24", "10.0.10.0/24", "10.0.12.0/24", "10.0.11.0/24"], + "next_hop": "10.0.7.2" + }, + { + "network": ["1.0.2.2/32", "10.0.1.0/24", "10.0.0.0/24", "10.0.4.0/24", "1.0.1.2/32"], + "next_hop": "10.0.3.1" + }] + }, + "c1": { + "links": { + "lo": {"ipv4": "auto", "type": "loopback", "pim": "enable"}, + "c2": {"ipv4": "auto", "pim": "enable"}, + "l1": {"ipv4": "auto", "pim": "enable"}, + "i4": {"ipv4": "auto", "pim": "enable"} + }, + "static_routes": [{ + "network": ["1.0.5.17/32", "10.0.6.0/24", "10.0.3.0/24", "10.0.8.0/24", "10.0.9.0/24", "10.0.12.0/24", "10.0.10.0/24", "10.0.11.0/24"], + "next_hop": "10.0.2.2" + }, + { + "network": ["10.0.5.0/24", "10.0.7.0/24", "1.0.3.5/32", "10.0.6.0/24", "1.0.2.2/32", "10.0.1.0/24", "10.0.4.0/24"], + "next_hop": "10.0.0.2" + }] + }, + "c2": { + "links": { + "lo": {"ipv4": "auto", "type": "loopback", "pim": "enable"}, + "c1": {"ipv4": "auto", "pim": "enable"}, + "f1": {"ipv4": "auto", "pim": "enable"}, + "i5": {"ipv4": "auto", "pim": "enable"} + }, + "static_routes": [{ + "network": ["1.0.5.17/32", "10.0.5.0/24", "10.0.6.0/24", "10.0.7.0/24", "10.0.8.0/24", "10.0.9.0/24", "10.0.7.0/24", "10.0.10.0/24", "10.0.11.0/24"], + "next_hop": "10.0.3.2" + }, + { + "network": ["1.0.1.2/32", "10.0.4.0/24", "10.0.2.0/24"], + "next_hop": "10.0.0.1" + }] + }, + "i1": { + "links": { + "l1": {"ipv4": "auto"} + } + }, + "i2": { + "links": { + "f1": {"ipv4": "auto"} + } + }, + "i3": { + "links": { + "r2": {"ipv4": "auto"} + } + }, + "i4": { + "links": { + "c1": {"ipv4": "auto"} + } + }, + "i5": { + "links": { + "c2": {"ipv4": "auto"} + } + }, + "i6": { + "links": { + "l1": {"ipv4": "auto"} + } + }, + "i7": { + "links": { + "l1": {"ipv4": "auto"} + } + }, + "i8": { + "links": { + "f1": {"ipv4": "auto"} + } + } + } +} diff --git a/tests/topotests/multicast-pim-sm-topo2/test_multicast_pim_sm_topo2.py b/tests/topotests/multicast-pim-sm-topo2/test_multicast_pim_sm_topo2.py new file mode 100755 index 0000000000..a9d914da57 --- /dev/null +++ b/tests/topotests/multicast-pim-sm-topo2/test_multicast_pim_sm_topo2.py @@ -0,0 +1,1947 @@ +#!/usr/bin/env python + +# +# Copyright (c) 2020 by VMware, Inc. ("VMware") +# Used Copyright (c) 2018 by Network Device Education Foundation, +# Inc. ("NetDEF") in this file. +# +# Permission to use, copy, modify, and/or distribute this software +# for any purpose with or without fee is hereby granted, provided +# that the above copyright notice and this permission notice appear +# in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND VMWARE DISCLAIMS ALL WARRANTIES +# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL VMWARE BE LIABLE FOR +# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY +# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS +# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE +# OF THIS SOFTWARE. +# + +""" +Following tests are covered to test multicast pim sm: + +Test steps +- Create topology (setup module) +- Bring up topology + +Following tests are covered: +1. TC_17: Verify (*,G) and (S,G) present and multicast traffic resume, + after restart of PIMd daemon +2. TC_18: Verify (*,G) and (S,G) present and multicast traffic resume after + FRR service stop and start +3. TC_10: Verify SPT switchover working when RPT and SPT path is + different +4. TC_15: Verify (S,G) and (*,G) mroute after shut / no shut of upstream + interfaces +5. TC_7: Verify mroute detail when receiver is present + outside of FRR +6. TC_8: Verify mroute when FRR is acting as FHR and LHR +7. TC_20: Verify mroute detail when 5 different receiver joining + same source +8. TC_22: Verify OIL and IIF detail updated in (S,G) mroute after shut + and no shut of the source interface +""" + +import os +import sys +import json +import time +import datetime +from time import sleep +import pytest + +pytestmark = pytest.mark.pimd + +# Save the Current Working Directory to find configuration files. +CWD = os.path.dirname(os.path.realpath(__file__)) +sys.path.append(os.path.join(CWD, "../")) +sys.path.append(os.path.join(CWD, "../lib/")) + +# Required to instantiate the topology builder class. + +# pylint: disable=C0413 +# Import topogen and topotest helpers +from lib.topogen import Topogen, get_topogen +from mininet.topo import Topo + +from lib.common_config import ( + start_topology, + write_test_header, + write_test_footer, + step, + iperfSendIGMPJoin, + addKernelRoute, + reset_config_on_routers, + iperfSendTraffic, + kill_iperf, + shutdown_bringup_interface, + kill_router_daemons, + start_router, + start_router_daemons, + stop_router, + required_linux_kernel_version, + topo_daemons, +) +from lib.pim import ( + create_pim_config, + create_igmp_config, + verify_igmp_groups, + verify_ip_mroutes, + verify_pim_interface_traffic, + verify_upstream_iif, + verify_pim_neighbors, + verify_pim_state, + verify_ip_pim_join, + clear_ip_mroute, + clear_ip_pim_interface_traffic, + verify_igmp_config, +) +from lib.topolog import logger +from lib.topojson import build_topo_from_json, build_config_from_json + +# Reading the data from JSON File for topology creation +jsonFile = "{}/multicast_pim_sm_topo2.json".format(CWD) +try: + with open(jsonFile, "r") as topoJson: + topo = json.load(topoJson) +except IOError: + assert False, "Could not read file {}".format(jsonFile) + +TOPOLOGY = """ + + + i4-----c1-------------c2---i5 + | | + | | + i1-----l1------r2-----f1---i2 + | | | | + | | | | + i7 i6 i3 i8 + + Description: + i1, i2, i3. i4, i5, i6, i7, i8 - FRR running iperf to send IGMP + join and traffic + l1 - LHR + f1 - FHR + r2 - FRR router + c1 - FRR router + c2 - FRR router +""" + +# Global variables +GROUP_RANGE = "225.0.0.0/8" +IGMP_JOIN = "225.1.1.1" +GROUP_RANGE_1 = [ + "225.1.1.1/32", + "225.1.1.2/32", + "225.1.1.3/32", + "225.1.1.4/32", + "225.1.1.5/32", +] +IGMP_JOIN_RANGE_1 = ["225.1.1.1", "225.1.1.2", "225.1.1.3", "225.1.1.4", "225.1.1.5"] +GROUP_RANGE_2 = [ + "226.1.1.1/32", + "226.1.1.2/32", + "226.1.1.3/32", + "226.1.1.4/32", + "226.1.1.5/32", +] +IGMP_JOIN_RANGE_2 = ["226.1.1.1", "226.1.1.2", "226.1.1.3", "226.1.1.4", "226.1.1.5"] + +GROUP_RANGE_3 = [ + "227.1.1.1/32", + "227.1.1.2/32", + "227.1.1.3/32", + "227.1.1.4/32", + "227.1.1.5/32", +] +IGMP_JOIN_RANGE_3 = ["227.1.1.1", "227.1.1.2", "227.1.1.3", "227.1.1.4", "227.1.1.5"] + + +class CreateTopo(Topo): + """ + Test BasicTopo - topology 1 + + * `Topo`: Topology object + """ + + def build(self, *_args, **_opts): + """Build function""" + tgen = get_topogen(self) + + # Building topology from json file + build_topo_from_json(tgen, topo) + + +def setup_module(mod): + """ + Sets up the pytest environment + + * `mod`: module name + """ + + # Required linux kernel version for this suite to run. + result = required_linux_kernel_version("4.19") + if result is not True: + pytest.skip("Kernel requirements are not met") + + testsuite_run_time = time.asctime(time.localtime(time.time())) + logger.info("Testsuite start time: {}".format(testsuite_run_time)) + logger.info("=" * 40) + logger.info("Master Topology: \n {}".format(TOPOLOGY)) + + logger.info("Running setup_module to create topology") + + tgen = Topogen(CreateTopo, mod.__name__) + # ... and here it calls Mininet initialization functions. + + # get list of daemons needs to be started for this suite. + daemons = topo_daemons(tgen, topo) + + # Starting topology, create tmp files which are loaded to routers + # to start deamons and then start routers + start_topology(tgen, daemons) + + # Don"t run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + # Creating configuration from JSON + build_config_from_json(tgen, topo) + + logger.info("Running setup_module() done") + + +def teardown_module(): + """Teardown the pytest environment""" + + logger.info("Running teardown_module to delete topology") + + tgen = get_topogen() + + # Stop toplogy and Remove tmp files + tgen.stop_topology() + + logger.info( + "Testsuite end time: {}".format(time.asctime(time.localtime(time.time()))) + ) + logger.info("=" * 40) + + +##################################################### +# +# Testcases +# +##################################################### + + +def config_to_send_igmp_join_and_traffic( + tgen, topo, tc_name, iperf, iperf_intf, GROUP_RANGE, join=False, traffic=False +): + """ + API to do pre-configuration to send IGMP join and multicast + traffic + + parameters: + ----------- + * `tgen`: topogen object + * `topo`: input json data + * `tc_name`: caller test case name + * `iperf`: router running iperf + * `iperf_intf`: interface name router running iperf + * `GROUP_RANGE`: group range + * `join`: IGMP join, default False + * `traffic`: multicast traffic, default False + """ + + if join: + # Add route to kernal + result = addKernelRoute(tgen, iperf, iperf_intf, GROUP_RANGE) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + if traffic: + # Add route to kernal + result = addKernelRoute(tgen, iperf, iperf_intf, GROUP_RANGE) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + router_list = tgen.routers() + for router in router_list.keys(): + if router == iperf: + continue + + rnode = router_list[router] + rnode.run("echo 2 > /proc/sys/net/ipv4/conf/all/rp_filter") + + return True + + +def verify_state_incremented(state_before, state_after): + """ + API to compare interface traffic state incrementing + + Parameters + ---------- + * `state_before` : State dictionary for any particular instance + * `state_after` : State dictionary for any particular instance + """ + + for router, state_data in state_before.items(): + for state, value in state_data.items(): + if state_before[router][state] >= state_after[router][state]: + errormsg = ( + "[DUT: %s]: state %s value has not" + " incremented, Initial value: %s, " + "Current value: %s [FAILED!!]" + % ( + router, + state, + state_before[router][state], + state_after[router][state], + ) + ) + return errormsg + + logger.info( + "[DUT: %s]: State %s value is " + "incremented, Initial value: %s, Current value: %s" + " [PASSED!!]", + router, + state, + state_before[router][state], + state_after[router][state], + ) + + return True + + +def test_verify_mroute_and_traffic_when_pimd_restarted_p2(request): + """ + TC_17: Verify (*,G) and (S,G) present and multicast traffic resume, + after restart of PIMd daemon + """ + + tgen = get_topogen() + tc_name = request.node.name + write_test_header(tc_name) + + # Creating configuration from JSON + kill_iperf(tgen) + clear_ip_mroute(tgen) + reset_config_on_routers(tgen) + clear_ip_pim_interface_traffic(tgen, topo) + + # Don"t run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + step("Configure static RP for (226.1.1.1-5) in c1") + step("Configure static RP for (232.1.1.1-5) in c2") + + _GROUP_RANGE = GROUP_RANGE_2 + GROUP_RANGE_3 + _IGMP_JOIN_RANGE = IGMP_JOIN_RANGE_2 + IGMP_JOIN_RANGE_3 + + input_dict = { + "c1": { + "pim": { + "rp": [ + { + "rp_addr": topo["routers"]["c1"]["links"]["lo"]["ipv4"].split( + "/" + )[0], + "group_addr_range": GROUP_RANGE_2, + } + ] + } + }, + "c2": { + "pim": { + "rp": [ + { + "rp_addr": topo["routers"]["c2"]["links"]["lo"]["ipv4"].split( + "/" + )[0], + "group_addr_range": GROUP_RANGE_3, + } + ] + } + }, + } + + result = create_pim_config(tgen, topo, input_dict) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step( + "Enable IGMP on FRR1 interface and send IGMP join " + "(226.1.1.1-5) and (232.1.1.1-5)" + ) + step( + "Configure IGMP interface on FRR3 and send IGMP join" + " for group (226.1.1.1-5, 232.1.1.1-5)" + ) + + input_dict = { + "f1": {"igmp": {"interfaces": {"f1-i8-eth2": {"igmp": {"version": "2"}}}}} + } + result = create_igmp_config(tgen, topo, input_dict) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + input_join = {"i1": "i1-l1-eth0", "i8": "i8-f1-eth0"} + + for recvr, recvr_intf in input_join.items(): + result = config_to_send_igmp_join_and_traffic( + tgen, topo, tc_name, recvr, recvr_intf, _GROUP_RANGE, join=True + ) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + result = iperfSendIGMPJoin(tgen, recvr, _IGMP_JOIN_RANGE, join_interval=1) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + step( + "Connect one source to c2 and send multicast traffic all" + " the receivers (226.1.1.1-5, 232.1.1.1-5)" + ) + step( + "Send multicast traffic from FRR3 to all the receivers " + "(226.1.1.1-5, 232.1.1.1-5)" + ) + + input_src = {"i2": "i2-f1-eth0", "i5": "i5-c2-eth0"} + + for src, src_intf in input_src.items(): + result = config_to_send_igmp_join_and_traffic( + tgen, topo, tc_name, src, src_intf, _GROUP_RANGE, traffic=True + ) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + result = iperfSendTraffic(tgen, src, _IGMP_JOIN_RANGE, 32, 2500) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + # Verifying mroutes before PIMd restart, fetching uptime + + source = topo["routers"]["i2"]["links"]["f1"]["ipv4"].split("/")[0] + input_dict = [ + {"dut": "f1", "src_address": "*", "iif": "f1-c2-eth0", "oil": "f1-i8-eth2"}, + {"dut": "f1", "src_address": source, "iif": "f1-i2-eth1", "oil": "f1-i8-eth2"}, + ] + for data in input_dict: + result = verify_ip_mroutes( + tgen, + data["dut"], + data["src_address"], + _IGMP_JOIN_RANGE, + data["iif"], + data["oil"], + ) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + for data in input_dict: + result = verify_upstream_iif( + tgen, data["dut"], data["iif"], data["src_address"], _IGMP_JOIN_RANGE + ) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step("Restart Pimd process on FRR3 node") + kill_router_daemons(tgen, "f1", ["pimd"]) + start_router_daemons(tgen, "f1", ["pimd"]) + + for data in input_dict: + result = verify_ip_mroutes( + tgen, + data["dut"], + data["src_address"], + _IGMP_JOIN_RANGE, + data["iif"], + data["oil"], + ) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step( + "After restart of PIMd verify pim nbr is up , IGMP groups" + " received , and (*,G) (S,G) entries populated again ," + " Verify using 'show ip pim neighbor' , 'show ip igmp groups'" + " 'show ip mroute'" + ) + + result = verify_pim_neighbors(tgen, topo) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + dut = "f1" + interface = "f1-i8-eth2" + result = verify_igmp_groups(tgen, dut, interface, _IGMP_JOIN_RANGE) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + for data in input_dict: + result = verify_upstream_iif( + tgen, data["dut"], data["iif"], data["src_address"], _IGMP_JOIN_RANGE + ) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step("Stop the traffic and restart PIMd immediately on FRR3 node") + dut = "i2" + intf = "i2-f1-eth0" + shutdown_bringup_interface(tgen, dut, intf, False) + + kill_router_daemons(tgen, "f1", ["pimd"]) + start_router_daemons(tgen, "f1", ["pimd"]) + + step( + "After PIM process come , all the none of (S,G) mroute should" + " present on FRR3 'show ip mroute' " + ) + + input_dict = [ + {"dut": "f1", "src_address": "*", "iif": "f1-c2-eth0", "oil": "f1-i8-eth2"} + ] + for data in input_dict: + result = verify_ip_mroutes( + tgen, + data["dut"], + data["src_address"], + _IGMP_JOIN_RANGE, + data["iif"], + data["oil"], + ) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + input_dict = [ + {"dut": "f1", "src_address": source, "iif": "f1-i2-eth1", "oil": "none"} + ] + for data in input_dict: + result = verify_ip_mroutes( + tgen, + data["dut"], + data["src_address"], + _IGMP_JOIN_RANGE, + data["iif"], + data["oil"], + expected=False, + ) + assert result is not True, "Testcase {} : Failed \n mroutes are" + " still present \n Error: {}".format(tc_name, result) + logger.info("Expected Behavior: {}".format(result)) + + write_test_footer(tc_name) + + +def test_verify_mroute_and_traffic_when_frr_restarted_p2(request): + """ + TC_18: Verify (*,G) and (S,G) present and multicast traffic resume after + FRR service stop and start + """ + + tgen = get_topogen() + tc_name = request.node.name + write_test_header(tc_name) + + # Creating configuration from JSON + kill_iperf(tgen) + clear_ip_mroute(tgen) + reset_config_on_routers(tgen) + clear_ip_pim_interface_traffic(tgen, topo) + + # Don"t run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + step("Configure static RP for (226.1.1.1-5) in c1") + step("Configure static RP for (232.1.1.1-5) in c2") + + _GROUP_RANGE = GROUP_RANGE_2 + GROUP_RANGE_3 + _IGMP_JOIN_RANGE = IGMP_JOIN_RANGE_2 + IGMP_JOIN_RANGE_3 + + input_dict = { + "c1": { + "pim": { + "rp": [ + { + "rp_addr": topo["routers"]["c1"]["links"]["lo"]["ipv4"].split( + "/" + )[0], + "group_addr_range": GROUP_RANGE_2, + } + ] + } + }, + "c2": { + "pim": { + "rp": [ + { + "rp_addr": topo["routers"]["c2"]["links"]["lo"]["ipv4"].split( + "/" + )[0], + "group_addr_range": GROUP_RANGE_3, + } + ] + } + }, + } + + result = create_pim_config(tgen, topo, input_dict) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step( + "Enable IGMP on FRR1 interface and send IGMP join " + "(226.1.1.1-5) and (232.1.1.1-5)" + ) + step( + "Configure IGMP interface on FRR3 and send IGMP join" + " for group (226.1.1.1-5, 232.1.1.1-5)" + ) + + input_dict = { + "f1": {"igmp": {"interfaces": {"f1-i8-eth2": {"igmp": {"version": "2"}}}}} + } + result = create_igmp_config(tgen, topo, input_dict) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + input_join = {"i1": "i1-l1-eth0", "i8": "i8-f1-eth0"} + + for recvr, recvr_intf in input_join.items(): + result = config_to_send_igmp_join_and_traffic( + tgen, topo, tc_name, recvr, recvr_intf, _GROUP_RANGE, join=True + ) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + result = iperfSendIGMPJoin(tgen, recvr, _IGMP_JOIN_RANGE, join_interval=1) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + step( + "Connect one source to c2 and send multicast traffic all" + " the receivers (226.1.1.1-5, 232.1.1.1-5)" + ) + step( + "Send multicast traffic from FRR3 to all the receivers " + "(226.1.1.1-5, 232.1.1.1-5)" + ) + + input_src = {"i2": "i2-f1-eth0", "i5": "i5-c2-eth0"} + + for src, src_intf in input_src.items(): + result = config_to_send_igmp_join_and_traffic( + tgen, topo, tc_name, src, src_intf, _GROUP_RANGE, traffic=True + ) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + result = iperfSendTraffic(tgen, src, _IGMP_JOIN_RANGE, 32, 2500) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step("Verifying mroutes before FRR restart, fetching uptime") + + source = topo["routers"]["i2"]["links"]["f1"]["ipv4"].split("/")[0] + input_dict = [ + {"dut": "f1", "src_address": "*", "iif": "f1-c2-eth0", "oil": "f1-i8-eth2"}, + {"dut": "f1", "src_address": source, "iif": "f1-i2-eth1", "oil": "f1-i8-eth2"}, + ] + for data in input_dict: + result = verify_ip_mroutes( + tgen, + data["dut"], + data["src_address"], + _IGMP_JOIN_RANGE, + data["iif"], + data["oil"], + ) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + for data in input_dict: + result = verify_upstream_iif( + tgen, data["dut"], data["iif"], data["src_address"], _IGMP_JOIN_RANGE + ) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step("Stop and Start the FRR services on FRR3 node") + stop_router(tgen, "f1") + start_router(tgen, "f1") + + for data in input_dict: + result = verify_ip_mroutes( + tgen, + data["dut"], + data["src_address"], + _IGMP_JOIN_RANGE, + data["iif"], + data["oil"], + ) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step( + "After stop and start of FRR service verify pim nbr is up " + "IGMP groups received , and (*,G) (S,G) entries populated again" + " Verify using 'show ip pim neighbor' , 'show ip igmp groups'" + " 'show ip mroute'" + ) + + result = verify_pim_neighbors(tgen, topo) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + dut = "f1" + interface = "f1-i8-eth2" + result = verify_igmp_groups(tgen, dut, interface, _IGMP_JOIN_RANGE) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + for data in input_dict: + result = verify_upstream_iif( + tgen, data["dut"], data["iif"], data["src_address"], _IGMP_JOIN_RANGE + ) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step("Stop the traffic and stop and start the FRR services on" " FRR3 node") + shutdown_bringup_interface(tgen, "i2", "i2-f1-eth0", False) + + stop_router(tgen, "f1") + start_router(tgen, "f1") + + step( + "After stop and start of FRR services , all the none of (S,G)" + " mroute should present on FRR3 node verify using " + "'show ip mroute'" + ) + + input_dict = [ + {"dut": "f1", "src_address": "*", "iif": "f1-c2-eth0", "oil": "f1-i8-eth2"} + ] + for data in input_dict: + result = verify_ip_mroutes( + tgen, + data["dut"], + data["src_address"], + _IGMP_JOIN_RANGE, + data["iif"], + data["oil"], + ) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + input_dict = [ + {"dut": "f1", "src_address": source, "iif": "f1-i2-eth1", "oil": "none"} + ] + for data in input_dict: + result = verify_ip_mroutes( + tgen, + data["dut"], + data["src_address"], + _IGMP_JOIN_RANGE, + data["iif"], + data["oil"], + expected=False, + ) + assert result is not True, "Testcase {} : Failed \n mroutes are" + " still present \n Error: {}".format(tc_name, result) + logger.info("Expected Behavior: {}".format(result)) + + write_test_footer(tc_name) + + +def test_verify_SPT_switchover_when_RPT_and_SPT_path_is_different_p0(request): + """ + TC_10: Verify SPT switchover working when RPT and SPT path is + different + """ + + tgen = get_topogen() + tc_name = request.node.name + write_test_header(tc_name) + + # Creating configuration from JSON + kill_iperf(tgen) + clear_ip_mroute(tgen) + reset_config_on_routers(tgen) + clear_ip_pim_interface_traffic(tgen, topo) + + # Don"t run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + step("Configure static RP for (226.1.1.1-5) and " "(232.1.1.1-5) in c2") + + _GROUP_RANGE = GROUP_RANGE_2 + GROUP_RANGE_3 + _IGMP_JOIN_RANGE = IGMP_JOIN_RANGE_2 + IGMP_JOIN_RANGE_3 + + input_dict = { + "c2": { + "pim": { + "rp": [ + { + "rp_addr": topo["routers"]["c2"]["links"]["lo"]["ipv4"].split( + "/" + )[0], + "group_addr_range": _GROUP_RANGE, + } + ] + } + } + } + + result = create_pim_config(tgen, topo, input_dict) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step( + "Enable IGMP on FRR1 interface and send IGMP join " + "(226.1.1.1-5) and (232.1.1.1-5)" + ) + + result = config_to_send_igmp_join_and_traffic( + tgen, topo, tc_name, "i1", "i1-l1-eth0", _GROUP_RANGE, join=True + ) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + result = iperfSendIGMPJoin(tgen, "i1", _IGMP_JOIN_RANGE, join_interval=1) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + step("Send multicast traffic from FRR3 to '226.1.1.1-5'" ", '232.1.1.1-5' receiver") + + result = config_to_send_igmp_join_and_traffic( + tgen, topo, tc_name, "i2", "i2-f1-eth0", _GROUP_RANGE, traffic=True + ) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + step("registerRx and registerStopTx value before traffic sent") + state_dict = {"c2": {"c2-f1-eth1": ["registerRx", "registerStopTx"]}} + state_before = verify_pim_interface_traffic(tgen, state_dict) + assert isinstance( + state_before, dict + ), "Testcase {} : Failed \n state_before is not dictionary \n " + "Error: {}".format(tc_name, result) + + result = iperfSendTraffic(tgen, "i2", _IGMP_JOIN_RANGE, 32, 2500) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step( + "Verify in FRR3 sending initial packet to RP using" + " 'show ip mroute' and mroute OIL is towards RP." + ) + + result = verify_ip_mroutes( + tgen, + "f1", + "10.0.5.2", + _IGMP_JOIN_RANGE, + "f1-i2-eth1", + ["f1-c2-eth0", "f1-r2-eth3"], + ) + assert result is True, "Testcase {} : " "Failed Error: {}".format(tc_name, result) + + result = verify_ip_mroutes( + tgen, "f1", "10.0.5.2", _IGMP_JOIN_RANGE, "f1-i2-eth1", "f1-r2-eth3" + ) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step( + " After spt switchover traffic is flowing between" + " (LHR(FRR1)-FHR(FRR3)) and (S,G) OIL is updated toward FRR1" + " 'show ip mroute' and 'show ip pim upstream'" + ) + + source = topo["routers"]["i2"]["links"]["f1"]["ipv4"].split("/")[0] + input_dict = [ + {"dut": "f1", "src_address": source, "iif": "f1-i2-eth1", "oil": "f1-r2-eth3"}, + {"dut": "l1", "src_address": source, "iif": "l1-r2-eth4", "oil": "l1-i1-eth1"}, + ] + for data in input_dict: + result = verify_ip_mroutes( + tgen, + data["dut"], + data["src_address"], + _IGMP_JOIN_RANGE, + data["iif"], + data["oil"], + ) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + for data in input_dict: + result = verify_upstream_iif( + tgen, data["dut"], data["iif"], data["src_address"], _IGMP_JOIN_RANGE + ) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step("Stop the traffic to all the receivers") + + kill_iperf(tgen, "i2", "remove_traffic") + + step( + "Null register packet being send periodically from FRR3 to RP, " + "verify using show ip mroute on RP, have (S, G) entries null OIL" + " 'show ip mroute' and verify show ip pim interface traffic" + "(In RP Register msg should be received and Register stop should" + " be transmitted)" + ) + input_dict = [ + {"dut": "c2", "src_address": source, "iif": "c2-f1-eth1", "oil": "none"} + ] + for data in input_dict: + result = verify_ip_mroutes( + tgen, + data["dut"], + data["src_address"], + _IGMP_JOIN_RANGE, + data["iif"], + data["oil"] + ) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step("registerRx and registerStopTx value after traffic sent") + state_after = verify_pim_interface_traffic(tgen, state_dict) + assert isinstance( + state_after, dict + ), "Testcase {} : Failed \n state_before is not dictionary \n " + "Error: {}".format(tc_name, result) + + result = verify_state_incremented(state_before, state_after) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + write_test_footer(tc_name) + + +def test_verify_mroute_after_shut_noshut_of_upstream_interface_p1(request): + """ + TC_15: Verify (S,G) and (*,G) mroute after shut / no shut of upstream + interfaces + """ + + tgen = get_topogen() + tc_name = request.node.name + write_test_header(tc_name) + + # Creating configuration from JSON + kill_iperf(tgen) + clear_ip_mroute(tgen) + reset_config_on_routers(tgen) + clear_ip_pim_interface_traffic(tgen, topo) + + # Don"t run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + step("Configure static RP for (226.1.1.1-5) in c1") + step("Configure static RP for (232.1.1.1-5) in c2") + + _GROUP_RANGE = GROUP_RANGE_2 + GROUP_RANGE_3 + _IGMP_JOIN_RANGE = IGMP_JOIN_RANGE_2 + IGMP_JOIN_RANGE_3 + + input_dict = { + "c1": { + "pim": { + "rp": [ + { + "rp_addr": topo["routers"]["c1"]["links"]["lo"]["ipv4"].split( + "/" + )[0], + "group_addr_range": GROUP_RANGE_2, + } + ] + } + }, + "c2": { + "pim": { + "rp": [ + { + "rp_addr": topo["routers"]["c2"]["links"]["lo"]["ipv4"].split( + "/" + )[0], + "group_addr_range": GROUP_RANGE_3, + } + ] + } + }, + } + + result = create_pim_config(tgen, topo, input_dict) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step( + "Enable IGMP on FRR1 interface and send IGMP join " + "(226.1.1.1-5) and (232.1.1.1-5)" + ) + step( + "Configure IGMP interface on FRR3 and send IGMP join" + " for group (226.1.1.1-5, 232.1.1.1-5)" + ) + + input_dict = { + "f1": {"igmp": {"interfaces": {"f1-i8-eth2": {"igmp": {"version": "2"}}}}} + } + result = create_igmp_config(tgen, topo, input_dict) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + input_join = {"i1": "i1-l1-eth0", "i8": "i8-f1-eth0"} + + for recvr, recvr_intf in input_join.items(): + result = config_to_send_igmp_join_and_traffic( + tgen, topo, tc_name, recvr, recvr_intf, _GROUP_RANGE, join=True + ) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + result = iperfSendIGMPJoin(tgen, recvr, _IGMP_JOIN_RANGE, join_interval=1) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + step( + "Connect one source to c2 and send multicast traffic all" + " the receivers (226.1.1.1-5, 232.1.1.1-5)" + ) + step( + "Send multicast traffic from FRR3 to all the receivers " + "(226.1.1.1-5, 232.1.1.1-5)" + ) + + input_src = {"i2": "i2-f1-eth0", "i5": "i5-c2-eth0"} + + for src, src_intf in input_src.items(): + result = config_to_send_igmp_join_and_traffic( + tgen, topo, tc_name, src, src_intf, _GROUP_RANGE, traffic=True + ) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + result = iperfSendTraffic(tgen, src, _IGMP_JOIN_RANGE, 32, 2500) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step( + "FRR3 (S,G) has one OIL for local receiver one toward c2" + " verify 'show ip mroute' and 'show ip pim upstream'" + ) + + source = topo["routers"]["i2"]["links"]["f1"]["ipv4"].split("/")[0] + input_dict = [ + {"dut": "f1", "src_address": "*", "iif": "f1-c2-eth0", "oil": "f1-i8-eth2"}, + {"dut": "f1", "src_address": source, "iif": "f1-i2-eth1", "oil": "f1-i8-eth2"}, + ] + for data in input_dict: + result = verify_ip_mroutes( + tgen, + data["dut"], + data["src_address"], + IGMP_JOIN_RANGE_2, + data["iif"], + data["oil"], + ) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + for data in input_dict: + result = verify_upstream_iif( + tgen, data["dut"], data["iif"], data["src_address"], IGMP_JOIN_RANGE_2 + ) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step("Shut and No shut interface connected from FHR (FRR3)" " to c2") + dut = "f1" + intf = "f1-c2-eth0" + shutdown_bringup_interface(tgen, dut, intf, False) + shutdown_bringup_interface(tgen, dut, intf, True) + + step("Shut and No shut interface connected from LHR (FRR1)" " to c1") + dut = "l1" + intf = "l1-c1-eth0" + shutdown_bringup_interface(tgen, dut, intf, False) + shutdown_bringup_interface(tgen, dut, intf, True) + + for data in input_dict: + result = verify_ip_mroutes( + tgen, + data["dut"], + data["src_address"], + IGMP_JOIN_RANGE_2, + data["iif"], + data["oil"], + ) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step("Shut and No shut FRR1 and FRR3 interface") + shutdown_bringup_interface(tgen, "l1", "l1-r2-eth4", False) + shutdown_bringup_interface(tgen, dut, intf, True) + + shutdown_bringup_interface(tgen, "f1", "f1-r2-eth3", False) + shutdown_bringup_interface(tgen, dut, intf, True) + + step( + "After shut/no shut of interface , verify traffic resume to all" + "the receivers (S,G) OIL update for all the receivers" + ) + + for data in input_dict: + result = verify_ip_mroutes( + tgen, + data["dut"], + data["src_address"], + IGMP_JOIN_RANGE_2, + data["iif"], + data["oil"], + ) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step( + "Shut FRR1, FRR3 interface , clear mroute in FRR1" + " and No shut FRR1, FRR3 interface " + ) + dut = "l1" + intf = "l1-r2-eth4" + shutdown_bringup_interface(tgen, dut, intf, False) + + dut = "f1" + intf = "f1-r2-eth3" + shutdown_bringup_interface(tgen, dut, intf, False) + + dut = "l1" + intf = "l1-r2-eth4" + shutdown_bringup_interface(tgen, dut, intf, True) + + dut = "f1" + intf = "f1-r2-eth3" + shutdown_bringup_interface(tgen, dut, intf, True) + + clear_ip_mroute(tgen, "l1") + clear_ip_mroute(tgen, "l1") + + step( + "After no shut, verify traffic resume to all the receivers" + " (S,G) OIL update for all the receivers" + ) + + for data in input_dict: + result = verify_ip_mroutes( + tgen, + data["dut"], + data["src_address"], + IGMP_JOIN_RANGE_2, + data["iif"], + data["oil"], + ) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step( + "Shut and no shut upstream interface from FRR1 to FRR2 and " + "cisco immediate after mroute/upstream got cleared" + ) + + dut = "l1" + intf_l1_r2 = "l1-r2-eth4" + shutdown_bringup_interface(tgen, dut, intf_l1_r2, False) + + intf_l1_c1 = "l1-c1-eth0" + shutdown_bringup_interface(tgen, dut, intf_l1_c1, False) + + done_flag = False + for retry in range(1, 11): + result = verify_upstream_iif(tgen, "l1", "Unknown", source, IGMP_JOIN_RANGE_2, + expected=False) + if result is not True: + done_flag = True + else: + continue + + if done_flag: + logger.info("Expected Behavior: {}".format(result)) + break + + assert done_flag is True, ( + "Testcase {} : Failed Error: \n " + "mroutes are still present, after waiting for 10 mins".format(tc_name) + ) + + step("No shut the Source interface just after the upstream is expired" " from FRR1") + shutdown_bringup_interface(tgen, dut, intf_l1_r2, True) + shutdown_bringup_interface(tgen, dut, intf_l1_c1, True) + + for data in input_dict: + result = verify_ip_mroutes( + tgen, + data["dut"], + data["src_address"], + IGMP_JOIN_RANGE_2, + data["iif"], + data["oil"], + ) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step("Stop the traffic to all the receivers") + kill_iperf(tgen) + + for data in input_dict: + result = verify_ip_mroutes( + tgen, + data["dut"], + data["src_address"], + IGMP_JOIN_RANGE_2, + data["iif"], + data["oil"], + expected=False, + ) + assert result is not True, "Testcase {} : Failed \n mroutes are " + "still present \n Error: {}".format(tc_name, result) + logger.info("Expected Behavior: {}".format(result)) + + write_test_footer(tc_name) + + +def test_verify_mroute_when_receiver_is_outside_frr_p0(request): + """ + TC_7: Verify mroute detail when receiver is present + outside of FRR + """ + + tgen = get_topogen() + tc_name = request.node.name + write_test_header(tc_name) + + # Creating configuration from JSON + kill_iperf(tgen) + clear_ip_mroute(tgen) + reset_config_on_routers(tgen) + clear_ip_pim_interface_traffic(tgen, topo) + + # Don"t run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + step("Configure static RP on c1 for group range " "(226.1.1.1-5) and (232.1.1.1-5)") + + _GROUP_RANGE = GROUP_RANGE_2 + GROUP_RANGE_3 + _IGMP_JOIN_RANGE = IGMP_JOIN_RANGE_2 + IGMP_JOIN_RANGE_3 + + input_dict = { + "c1": { + "pim": { + "rp": [ + { + "rp_addr": topo["routers"]["c1"]["links"]["lo"]["ipv4"].split( + "/" + )[0], + "group_addr_range": _GROUP_RANGE, + } + ] + } + } + } + + result = create_pim_config(tgen, topo, input_dict) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step( + "Enable IGMP on FRR1 interface and send IGMP join" + " (226.1.1.1-5) and (232.1.1.1-5)" + ) + result = config_to_send_igmp_join_and_traffic( + tgen, topo, tc_name, "i1", "i1-l1-eth0", _GROUP_RANGE, join=True + ) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + result = iperfSendIGMPJoin(tgen, "i1", _IGMP_JOIN_RANGE, join_interval=1) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + step( + "Send multicast traffic from FRR3 to all the receivers " + "(226.1.1.1-5) and (232.1.1.1-5)" + ) + result = config_to_send_igmp_join_and_traffic( + tgen, topo, tc_name, "i2", "i2-f1-eth0", _GROUP_RANGE, traffic=True + ) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + result = iperfSendTraffic(tgen, "i2", _IGMP_JOIN_RANGE, 32, 2500) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step( + "Configure one more receiver in c2 enable IGMP and send" + " join (226.1.1.1-5) and (232.1.1.1-5)" + ) + input_dict = { + "c2": {"igmp": {"interfaces": {"c2-i5-eth2": {"igmp": {"version": "2"}}}}} + } + result = create_igmp_config(tgen, topo, input_dict) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + result = config_to_send_igmp_join_and_traffic( + tgen, topo, tc_name, "i5", "i5-c2-eth0", _GROUP_RANGE, join=True + ) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + result = iperfSendIGMPJoin(tgen, "i5", _IGMP_JOIN_RANGE, join_interval=1) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + step("FRR1 has 10 (*.G) and 10 (S,G) verify using 'show ip mroute count'") + step( + "All the receiver are receiving traffic on FRR1 and (S,G) OIL is toward" + "receivers, verify using 'show ip mroute' 'show ip pim upstream'" + ) + step( + "All the receiver are receiving traffic on c2 and (S,G) OIL is " + "toward receivers, verify using 'show ip mroute' 'show ip pim upstream'" + ) + + source = topo["routers"]["i2"]["links"]["f1"]["ipv4"].split("/")[0] + input_dict = [ + {"dut": "l1", "src_address": "*", "iif": "l1-c1-eth0", "oil": "l1-i1-eth1"}, + {"dut": "l1", "src_address": source, "iif": "l1-r2-eth4", "oil": "l1-i1-eth1"}, + {"dut": "c2", "src_address": "*", "iif": "c2-c1-eth0", "oil": "c2-i5-eth2"}, + {"dut": "c2", "src_address": source, "iif": "c2-f1-eth1", "oil": "c2-i5-eth2"}, + ] + for data in input_dict: + result = verify_ip_mroutes( + tgen, + data["dut"], + data["src_address"], + _IGMP_JOIN_RANGE, + data["iif"], + data["oil"], + ) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step( + "FRR3 has (S,G) OIL created toward c1/c2 receiver and FRR1 receiver" + "'show ip pim state'" + ) + input_dict = [ + {"dut": "f1", "src_address": source, "iif": "f1-i2-eth1", "oil": "f1-c2-eth0"}, + {"dut": "f1", "src_address": source, "iif": "f1-i2-eth1", "oil": "f1-r2-eth3"}, + ] + for data in input_dict: + result = verify_pim_state( + tgen, + data["dut"], + data["iif"], + data["oil"], + _IGMP_JOIN_RANGE, + data["src_address"], + ) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + for data in input_dict: + result = verify_upstream_iif( + tgen, data["dut"], data["iif"], data["src_address"], _IGMP_JOIN_RANGE + ) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + write_test_footer(tc_name) + + +def test_verify_mroute_when_FRR_is_FHR_and_LHR_p0(request): + """ + TC_8: Verify mroute when FRR is acting as FHR and LHR + """ + + tgen = get_topogen() + tc_name = request.node.name + write_test_header(tc_name) + + # Creating configuration from JSON + kill_iperf(tgen) + clear_ip_mroute(tgen) + reset_config_on_routers(tgen) + clear_ip_pim_interface_traffic(tgen, topo) + + # Don"t run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + step("Configure static RP for group range (226.1.1.1-5) and " "(232.1.1.1-5) on c1") + _GROUP_RANGE = GROUP_RANGE_2 + GROUP_RANGE_3 + _IGMP_JOIN_RANGE = IGMP_JOIN_RANGE_2 + IGMP_JOIN_RANGE_3 + + input_dict = { + "c1": { + "pim": { + "rp": [ + { + "rp_addr": topo["routers"]["c1"]["links"]["lo"]["ipv4"].split( + "/" + )[0], + "group_addr_range": _GROUP_RANGE, + } + ] + } + } + } + + result = create_pim_config(tgen, topo, input_dict) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step( + "Enable IGMP on FRR1 interface and send IGMP join (226.1.1.1-5)" + " and (232.1.1.1-5)" + ) + step( + "Configure receiver on FRR3 with igmp and pim enabled and " + "send IGMP join (226.1.1.1-5) and (232.1.1.1-5)" + ) + step( + "Send multicast traffic from FRR3 to all the receivers " + "(226.1.1.1-5) and (232.1.1.1-5)" + ) + + result = config_to_send_igmp_join_and_traffic( + tgen, topo, tc_name, "i1", "i1-l1-eth0", _GROUP_RANGE, join=True + ) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + result = config_to_send_igmp_join_and_traffic( + tgen, topo, tc_name, "i2", "i2-f1-eth0", _GROUP_RANGE, traffic=True + ) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + step("Send IGMP join (226.1.1.1-5, 232.1.1.1-5) to LHR(l1)") + result = iperfSendIGMPJoin(tgen, "i1", _IGMP_JOIN_RANGE, join_interval=1) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + step("Send multicast traffic from FRR3 to '226.1.1.1-5'" ", '232.1.1.1-5' receiver") + result = iperfSendTraffic(tgen, "i2", _IGMP_JOIN_RANGE, 32, 2500) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step( + "Configure receiver in f1 enable IGMP and send" + " join (226.1.1.1-5) and (232.1.1.1-5)" + ) + + step("Configure one IGMP interface on f1 node and send IGMP" " join (225.1.1.1)") + input_dict = { + "f1": {"igmp": {"interfaces": {"f1-i8-eth2": {"igmp": {"version": "2"}}}}} + } + result = create_igmp_config(tgen, topo, input_dict) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + result = config_to_send_igmp_join_and_traffic( + tgen, topo, tc_name, "i8", "i8-f1-eth0", _GROUP_RANGE, join=True + ) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + result = iperfSendIGMPJoin(tgen, "i8", _IGMP_JOIN_RANGE, join_interval=1) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + step( + "l1 and f1 has 10 IGMP groups (226.1.1.1-5, 232.1.1.1-5)," + " verify using 'show ip igmp groups'" + ) + dut = "l1" + interface = "l1-i1-eth1" + result = verify_igmp_groups(tgen, dut, interface, _IGMP_JOIN_RANGE) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + dut = "f1" + interface = "f1-i8-eth2" + result = verify_igmp_groups(tgen, dut, interface, _IGMP_JOIN_RANGE) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step( + "l1 , f1 has 10 (*,G) and 10 (S,G) for groups " + "(226.1.1.1-5, 232.1.1.1-5), verify using " + " 'show ip mroute'" + ) + + source = topo["routers"]["i2"]["links"]["f1"]["ipv4"].split("/")[0] + input_dict = [ + {"dut": "f1", "src_address": "*", "iif": "f1-c2-eth0", "oil": "f1-i8-eth2"}, + {"dut": "f1", "src_address": source, "iif": "f1-i2-eth1", "oil": "f1-r2-eth3"}, + {"dut": "l1", "src_address": "*", "iif": "l1-c1-eth0", "oil": "l1-i1-eth1"}, + {"dut": "l1", "src_address": source, "iif": "l1-r2-eth4", "oil": "l1-i1-eth1"}, + ] + for data in input_dict: + result = verify_ip_mroutes( + tgen, + data["dut"], + data["src_address"], + _IGMP_JOIN_RANGE, + data["iif"], + data["oil"], + ) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step("Join timer is running in FHR and LHR , verify using" " 'show ip pim state'") + + for data in input_dict: + result = verify_pim_state( + tgen, + data["dut"], + data["iif"], + data["oil"], + _IGMP_JOIN_RANGE, + data["src_address"], + ) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + # Stop the multicast traffic + step("Stop the traffic to all the receivers") + kill_iperf(tgen) + + step( + "After traffic stopped , verify (*,G) entries are not flushed" + " out from FRR1 node verify using 'show ip mroute' " + ) + + input_dict = [ + {"dut": "f1", "src_address": "*", "iif": "f1-c2-eth0", "oil": "f1-i8-eth2"}, + {"dut": "l1", "src_address": "*", "iif": "l1-c1-eth0", "oil": "l1-i1-eth1"}, + ] + + done_flag = False + for retry in range(1, 11): + for data in input_dict: + result = verify_ip_mroutes( + tgen, + data["dut"], + data["src_address"], + _IGMP_JOIN_RANGE, + data["iif"], + data["oil"], + ) + + if result is True: + done_flag = True + else: + continue + + if done_flag: + break + + assert done_flag is True, ( + "Testcase {} : Failed Error: \n " + "mroutes are still present, after waiting for 10 mins".format(tc_name) + ) + + step( + "After traffic stopped , verify (S,G) entries are flushed out" + " from FRR1 node verify using 'show ip mroute' " + ) + + input_dict = [ + {"dut": "l1", "src_address": source, "iif": "l1-r2-eth4", "oil": "l1-i1-eth1"}, + {"dut": "f1", "src_address": source, "iif": "i2-f1-eth0", "oil": "f1-r2-eth3"}, + ] + + done_flag = False + for retry in range(1, 11): + for data in input_dict: + result = verify_ip_mroutes( + tgen, + data["dut"], + data["src_address"], + _IGMP_JOIN_RANGE, + data["iif"], + data["oil"], + expected=False + ) + if result is not True: + done_flag = True + else: + continue + + if done_flag: + logger.info("Expected Behavior: {}".format(result)) + break + + assert done_flag is True, ( + "Testcase {} : Failed Error: \n " + "mroutes are still present, after waiting for 10 mins".format(tc_name) + ) + + write_test_footer(tc_name) + + +def test_verify_mroute_when_5_different_receiver_joining_same_sources_p0(request): + """ + TC_20: Verify mroute detail when 5 different receiver joining + same source + """ + + tgen = get_topogen() + tc_name = request.node.name + write_test_header(tc_name) + + # Creating configuration from JSON + kill_iperf(tgen) + clear_ip_mroute(tgen) + reset_config_on_routers(tgen) + clear_ip_pim_interface_traffic(tgen, topo) + + # Don"t run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + step("Configure static RP for (226.1.1.1-5) in c1") + step("Configure static RP for (232.1.1.1-5) in c2") + + _GROUP_RANGE = GROUP_RANGE_2 + GROUP_RANGE_3 + _IGMP_JOIN_RANGE = IGMP_JOIN_RANGE_2 + IGMP_JOIN_RANGE_3 + + input_dict = { + "c1": { + "pim": { + "rp": [ + { + "rp_addr": topo["routers"]["c1"]["links"]["lo"]["ipv4"].split( + "/" + )[0], + "group_addr_range": GROUP_RANGE_2, + } + ] + } + }, + "c2": { + "pim": { + "rp": [ + { + "rp_addr": topo["routers"]["c2"]["links"]["lo"]["ipv4"].split( + "/" + )[0], + "group_addr_range": GROUP_RANGE_3, + } + ] + } + }, + } + + result = create_pim_config(tgen, topo, input_dict) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step( + "Configure 2 IGMP interface on FRR1 and send IGMP join" + "for group (226.1.1.1-5, 232.1.1.1-5) from both the interface" + ) + step( + "Configure 2 IGMP interface on FRR3 and send IGMP join for" + " group (226.1.1.1-5, 232.1.1.1-5) from both the interface" + ) + step( + "Configure 1 IGMP interface on c2 and send IGMP join for" + "group (226.1.1.1-5, 232.1.1.1-5)" + ) + + input_dict = { + "f1": { + "igmp": { + "interfaces": { + "f1-i8-eth2": {"igmp": {"version": "2"}}, + "f1-i2-eth1": {"igmp": {"version": "2"}}, + } + } + }, + "l1": {"igmp": {"interfaces": {"l1-i6-eth2": {"igmp": {"version": "2"}}}}}, + } + result = create_igmp_config(tgen, topo, input_dict) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + input_join = { + "i1": "i1-l1-eth0", + "i6": "i6-l1-eth0", + "i8": "i8-f1-eth0", + "i2": "i2-f1-eth0", + } + + for recvr, recvr_intf in input_join.items(): + result = config_to_send_igmp_join_and_traffic( + tgen, topo, tc_name, recvr, recvr_intf, _GROUP_RANGE, join=True + ) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + result = iperfSendIGMPJoin(tgen, recvr, _IGMP_JOIN_RANGE, join_interval=1) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + step("Configure one source in FRR2 , one in c1") + step( + "Send multicast traffic from both the sources to all the" + "receivers (226.1.1.1-5, 232.1.1.1-5)" + ) + + input_src = {"i3": "i3-r2-eth0"} + + for src, src_intf in input_src.items(): + result = config_to_send_igmp_join_and_traffic( + tgen, topo, tc_name, src, src_intf, _GROUP_RANGE, traffic=True + ) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + result = iperfSendTraffic(tgen, src, _IGMP_JOIN_RANGE, 32, 2500) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + step( + "After all the IGMP groups received with correct port using" + " 'show ip igmp groups' in FRR1, FRR3, c2" + ) + dut = "l1" + interface = "l1-i6-eth2" + result = verify_igmp_groups(tgen, dut, interface, _IGMP_JOIN_RANGE) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + dut = "f1" + interface = "f1-i8-eth2" + result = verify_igmp_groups(tgen, dut, interface, _IGMP_JOIN_RANGE) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step( + "(*,G) entries got created with upstream interface RP connected" + " port using 'show ip pim upstream' in FRR1, FRR3, c2" + ) + step( + "(S,G) entries created for all the receiver after starting the" + " source , traffic is reaching to all the receiver , verify OIL" + " of (S,G) is receiver port using 'show ip mroute' in FRR1, " + "FRR3 c2" + ) + + source = topo["routers"]["i3"]["links"]["r2"]["ipv4"].split("/")[0] + input_dict_all = [ + {"dut": "l1", "src_address": source, "iif": "l1-r2-eth4", "oil": "l1-i1-eth1"}, + {"dut": "f1", "src_address": source, "iif": "f1-r2-eth3", "oil": "f1-i8-eth2"}, + ] + for data in input_dict_all: + result = verify_ip_mroutes( + tgen, + data["dut"], + data["src_address"], + _IGMP_JOIN_RANGE, + data["iif"], + data["oil"], + ) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + for data in input_dict_all: + result = verify_upstream_iif( + tgen, data["dut"], data["iif"], data["src_address"], _IGMP_JOIN_RANGE + ) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step("Shut the receiver interface one by one on FRR1 node") + shutdown_bringup_interface(tgen, "l1", "l1-i1-eth1", False) + shutdown_bringup_interface(tgen, "l1", "l1-i6-eth2", False) + + step( + "After shut the receiver port verify traffic is stopped immediately" + " and (S,G) got timeout immediately in FRR1, FRR3, c2" + ) + input_dict = [ + {"dut": "l1", "src_address": source, "iif": "l1-r2-eth4", "oil": "l1-i1-eth1"} + ] + for data in input_dict: + result = verify_ip_mroutes( + tgen, + data["dut"], + data["src_address"], + IGMP_JOIN_RANGE_2, + data["iif"], + data["oil"], + expected=False, + ) + assert result is not True, "Testcase {} : Failed \n mroutes are" + " still present \n Error: {}".format(tc_name, result) + logger.info("Expected Behavior: {}".format(result)) + + step( + "No traffic impact observed on other receivers verify using" + " 'show ip mroute' " + ) + input_dict = [ + {"dut": "f1", "src_address": source, "iif": "f1-r2-eth3", "oil": "f1-i8-eth2"} + ] + for data in input_dict: + result = verify_ip_mroutes( + tgen, + data["dut"], + data["src_address"], + _IGMP_JOIN_RANGE, + data["iif"], + data["oil"], + ) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step("No shut the receiver interface one by one on FRR1 node") + shutdown_bringup_interface(tgen, "l1", "l1-i1-eth1", True) + shutdown_bringup_interface(tgen, "l1", "l1-i6-eth2", True) + + step( + "After no shut of receivers all the mroute entries got populated" + ", no duplicate entries present in mroute" + ) + + for data in input_dict_all: + result = verify_ip_mroutes( + tgen, + data["dut"], + data["src_address"], + _IGMP_JOIN_RANGE, + data["iif"], + data["oil"], + ) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + write_test_footer(tc_name) + + +def test_verify_oil_iif_for_mroute_after_shut_noshut_source_interface_p1(request): + """ + TC_22: Verify OIL and IIF detail updated in (S,G) mroute after shut + and no shut of the source interface + """ + + tgen = get_topogen() + tc_name = request.node.name + write_test_header(tc_name) + + # Creating configuration from JSON + kill_iperf(tgen) + clear_ip_mroute(tgen) + reset_config_on_routers(tgen) + clear_ip_pim_interface_traffic(tgen, topo) + + # Don"t run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + step("Configure static RP for (226.1.1.1-5) in c1") + step("Configure static RP for (232.1.1.1-5) in c2") + + _GROUP_RANGE = GROUP_RANGE_2 + GROUP_RANGE_3 + _IGMP_JOIN_RANGE = IGMP_JOIN_RANGE_2 + IGMP_JOIN_RANGE_3 + + input_dict = { + "c1": { + "pim": { + "rp": [ + { + "rp_addr": topo["routers"]["c1"]["links"]["lo"]["ipv4"].split( + "/" + )[0], + "group_addr_range": GROUP_RANGE_2, + } + ] + } + }, + "c2": { + "pim": { + "rp": [ + { + "rp_addr": topo["routers"]["c2"]["links"]["lo"]["ipv4"].split( + "/" + )[0], + "group_addr_range": GROUP_RANGE_3, + } + ] + } + }, + } + + result = create_pim_config(tgen, topo, input_dict) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step( + "Configure IGMP interface on FRR1 and FRR3 and send IGMP join" + " for group (226.1.1.1-5, 232.1.1.1-5)" + ) + + input_dict = { + "f1": {"igmp": {"interfaces": {"f1-i8-eth2": {"igmp": {"version": "2"}}}}} + } + result = create_igmp_config(tgen, topo, input_dict) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + input_join = {"i1": "i1-l1-eth0", "i8": "i8-f1-eth0"} + + for recvr, recvr_intf in input_join.items(): + result = config_to_send_igmp_join_and_traffic( + tgen, topo, tc_name, recvr, recvr_intf, _GROUP_RANGE, join=True + ) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + result = iperfSendIGMPJoin(tgen, recvr, _IGMP_JOIN_RANGE, join_interval=1) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + step("Configure 1 source in FRR1 , 1 in FRR3") + step( + "Send multicast traffic from both the sources to all the " + "receivers (226.1.1.1-5, 232.1.1.1-5)" + ) + + input_src = {"i6": "i6-l1-eth0", "i2": "i2-f1-eth0"} + + for src, src_intf in input_src.items(): + result = config_to_send_igmp_join_and_traffic( + tgen, topo, tc_name, src, src_intf, _GROUP_RANGE, traffic=True + ) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + result = iperfSendTraffic(tgen, src, _IGMP_JOIN_RANGE, 32, 2500) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step( + "*,G) is created and (S,G) created on FRR1 and FRR3 for both" + " the source verify using 'show ip mroute' and " + " 'show ip pim upstream' to check the upstream interface" + " details" + ) + + source_i6 = topo["routers"]["i6"]["links"]["l1"]["ipv4"].split("/")[0] + source_i2 = topo["routers"]["i2"]["links"]["f1"]["ipv4"].split("/")[0] + input_dict_all = [ + {"dut": "l1", "src_address": "*", "iif": "l1-c1-eth0", "oil": "l1-i1-eth1"}, + { + "dut": "l1", + "src_address": source_i2, + "iif": "l1-r2-eth4", + "oil": "l1-i1-eth1", + }, + { + "dut": "l1", + "src_address": source_i6, + "iif": "l1-i6-eth2", + "oil": "l1-i1-eth1", + }, + {"dut": "f1", "src_address": "*", "iif": "f1-c2-eth0", "oil": "f1-i8-eth2"}, + { + "dut": "f1", + "src_address": source_i2, + "iif": "f1-i2-eth1", + "oil": "f1-i8-eth2", + }, + { + "dut": "f1", + "src_address": source_i6, + "iif": "f1-r2-eth3", + "oil": "f1-i8-eth2", + }, + ] + for data in input_dict_all: + result = verify_ip_mroutes( + tgen, + data["dut"], + data["src_address"], + _IGMP_JOIN_RANGE, + data["iif"], + data["oil"], + ) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + for data in input_dict_all: + result = verify_upstream_iif( + tgen, data["dut"], data["iif"], data["src_address"], _IGMP_JOIN_RANGE + ) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step("Shut the source interface one by one on FRR1") + shutdown_bringup_interface(tgen, "f1", "f1-i2-eth1", False) + + step( + "After shut of ource interface from FRR3 verify all the (S,G) " + "entries flushed out from FRR3 node 'show ip pim upstream' " + " 'show ip mroute' " + ) + + result = verify_ip_mroutes( + tgen, + "f1", + source_i2, + _IGMP_JOIN_RANGE, + "f1-i2-eth1", + "f1-i8-eth2", + expected=False, + ) + assert result is not True, ( + "Testcase {} : Failed \n mroutes are" + " still present \n Error: {}".format(tc_name, result) + ) + logger.info("Expected Behavior: {}".format(result)) + + result = verify_upstream_iif( + tgen, "f1", "Unknown", "10.0.5.2", _IGMP_JOIN_RANGE, joinState="NotJoined" + ) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + write_test_footer(tc_name) + + +if __name__ == "__main__": + args = ["-s"] + sys.argv[1:] + sys.exit(pytest.main(args)) diff --git a/tests/topotests/multicast-pim-sm-topo3/multicast_pim_sm_topo3.json b/tests/topotests/multicast-pim-sm-topo3/multicast_pim_sm_topo3.json new file mode 100644 index 0000000000..f582f4929d --- /dev/null +++ b/tests/topotests/multicast-pim-sm-topo3/multicast_pim_sm_topo3.json @@ -0,0 +1,140 @@ +{ + "ipv4base": "10.0.0.0", + "ipv4mask": 24, + "link_ip_start": {"ipv4": "10.0.0.0", "v4mask": 24, "link_local": "disable"}, + "lo_prefix": {"ipv4": "1.0.", "v4mask": 32}, + "routers": { + "l1": { + "links": { + "lo": {"ipv4": "auto", "type": "loopback", "pim": "enable"}, + "i1": {"ipv4": "auto", "pim": "enable"}, + "i6": {"ipv4": "auto", "pim": "enable"}, + "i7": {"ipv4": "auto", "pim": "enable"}, + "r2": {"ipv4": "auto", "pim": "enable"}, + "c1": {"ipv4": "auto", "pim": "enable"} + }, + "igmp": { + "interfaces": { + "l1-i1-eth1" :{ + "igmp":{ + "version": "2" + } + } + } + }, + "static_routes": [{ + "network": ["1.0.5.17/32", "10.0.5.0/24", "10.0.6.0/24", "10.0.9.0/24", "1.0.3.5/32"], + "next_hop": "10.0.12.2" + }, + { + "network": ["1.0.1.2/32", "1.0.3.5/32", "10.0.1.0/24", "1.0.2.2/32", "10.0.4.0/24"], + "next_hop": "10.0.2.1" + }] + }, + "r2": { + "links": { + "lo": {"ipv4": "auto", "type": "loopback", "pim": "enable"}, + "l1": {"ipv4": "auto", "pim": "enable"}, + "f1": {"ipv4": "auto", "pim": "enable"}, + "i3": {"ipv4": "auto", "pim": "enable"} + }, + "static_routes": [{ + "network": ["10.0.5.0/24", "10.0.6.0/24", "1.0.2.2/32", "10.0.1.0/24", "1.0.3.5/32"], + "next_hop": "10.0.7.1" + }, + { + "network": ["1.0.1.2/32", "10.0.8.0/24", "10.0.10.0/24", "10.0.4.0/24", "10.0.11.0/24", "10.0.1.0/24"], + "next_hop": "10.0.12.1" + }] + }, + "f1": { + "links": { + "lo": {"ipv4": "auto", "type": "loopback", "pim": "enable"}, + "r2": {"ipv4": "auto", "pim": "enable"}, + "c2": {"ipv4": "auto", "pim": "enable"}, + "i2": {"ipv4": "auto", "pim": "enable"}, + "i8": {"ipv4": "auto", "pim": "enable"} + }, + "static_routes": [{ + "network": ["1.0.5.17/32", "10.0.8.0/24", "10.0.9.0/24", "10.0.10.0/24", "10.0.11.0/24", "10.0.12.0/24"], + "next_hop": "10.0.7.2" + }, + { + "network": ["1.0.2.2/32", "10.0.1.0/24", "10.0.4.0/24", "1.0.1.2/32"], + "next_hop": "10.0.3.1" + }] + }, + "c1": { + "links": { + "lo": {"ipv4": "auto", "type": "loopback", "pim": "enable"}, + "c2": {"ipv4": "auto", "pim": "enable"}, + "l1": {"ipv4": "auto", "pim": "enable"}, + "i4": {"ipv4": "auto", "pim": "enable"} + }, + "static_routes": [{ + "network": ["1.0.5.17/32", "10.0.6.0/24", "10.0.8.0/24", "10.0.9.0/24", "10.0.10.0/24", "10.0.11.0/24"], + "next_hop": "10.0.2.2" + }, + { + "network": ["10.0.5.0/24", "10.0.7.0/24", "1.0.3.5/32", "10.0.6.0/24", "1.0.2.2/32", "10.0.1.0/24", "10.0.4.0/24"], + "next_hop": "10.0.0.2" + }] + }, + "c2": { + "links": { + "lo": {"ipv4": "auto", "type": "loopback", "pim": "enable"}, + "c1": {"ipv4": "auto", "pim": "enable"}, + "f1": {"ipv4": "auto", "pim": "enable"}, + "i5": {"ipv4": "auto", "pim": "enable"} + }, + "static_routes": [{ + "network": ["1.0.5.17/32", "10.0.5.0/24", "10.0.6.0/24", "10.0.7.0/24", "10.0.8.0/24", "10.0.9.0/24", "10.0.10.0/24", "10.0.11.0/24"], + "next_hop": "10.0.3.2" + }, + { + "network": ["1.0.1.2/32", "10.0.4.0/24"], + "next_hop": "10.0.0.1" + }] + }, + "i1": { + "links": { + "l1": {"ipv4": "auto"} + } + }, + "i2": { + "links": { + "f1": {"ipv4": "auto"} + } + }, + "i3": { + "links": { + "r2": {"ipv4": "auto"} + } + }, + "i4": { + "links": { + "c1": {"ipv4": "auto"} + } + }, + "i5": { + "links": { + "c2": {"ipv4": "auto"} + } + }, + "i6": { + "links": { + "l1": {"ipv4": "auto"} + } + }, + "i7": { + "links": { + "l1": {"ipv4": "auto"} + } + }, + "i8": { + "links": { + "f1": {"ipv4": "auto"} + } + } + } +} diff --git a/tests/topotests/multicast-pim-sm-topo3/multicast_pim_sm_topo4.json b/tests/topotests/multicast-pim-sm-topo3/multicast_pim_sm_topo4.json new file mode 100644 index 0000000000..4635dac7d2 --- /dev/null +++ b/tests/topotests/multicast-pim-sm-topo3/multicast_pim_sm_topo4.json @@ -0,0 +1,137 @@ +{ + "ipv4base": "10.0.0.0", + "ipv4mask": 24, + "link_ip_start": {"ipv4": "10.0.0.0", "v4mask": 24, "link_local": "disable"}, + "lo_prefix": {"ipv4": "1.0.", "v4mask": 32}, + "routers": { + "l1": { + "links": { + "lo": {"ipv4": "auto", "type": "loopback", "pim": "enable"}, + "i1": {"ipv4": "auto", "pim": "enable"}, + "i6": {"ipv4": "auto", "pim": "enable"}, + "i7": {"ipv4": "auto", "pim": "enable"}, + "r2": {"ipv4": "auto", "pim": "enable"}, + "c1": {"ipv4": "auto", "pim": "enable"} + }, + "igmp": { + "interfaces": { + "l1-i1-eth1" :{ + "igmp":{ + "version": "2" + } + } + } + }, + "static_routes": [{ + "network": ["10.0.4.0/24", "10.0.3.1/24"], + "next_hop": "10.0.12.2" + }, + { + "network": ["10.0.1.2/24"], + "next_hop": "10.0.2.1" + }] + + }, + + "r2": { + "links": { + "lo": {"ipv4": "auto", "type": "loopback", "pim": "enable"}, + "l1": {"ipv4": "auto", "pim": "enable"}, + "f1": {"ipv4": "auto", "pim": "enable"}, + "i3": {"ipv4": "auto", "pim": "enable"} + }, + "static_routes": [{ + "network": ["10.0.4.0/24","10.0.3.1/24"], + "next_hop": "10.0.7.1" + }, + { + "network": ["1.0.4.11/32", "10.0.2.1/24", "10.0.1.2/24"], + "next_hop": "10.0.12.1" + }] + }, + "f1": { + "links": { + "lo": {"ipv4": "auto", "type": "loopback", "pim": "enable"}, + "r2": {"ipv4": "auto", "pim": "enable"}, + "c2": {"ipv4": "auto", "pim": "enable"}, + "i2": {"ipv4": "auto", "pim": "enable"}, + "i8": {"ipv4": "auto", "pim": "enable"} + }, + "static_routes": [{ + "network": ["10.0.4.0/24","10.0.3.1/24"], + "next_hop": "10.0.3.1" + }, + { + "network": ["1.0.4.11/32", "10.0.2.1/24", "10.0.1.2/24"], + "next_hop": "10.0.7.2" + }] + }, + "c1": { + "links": { + "lo": {"ipv4": "auto", "type": "loopback", "pim": "enable"}, + "c2": {"ipv4": "auto", "pim": "enable"}, + "l1": {"ipv4": "auto", "pim": "enable"}, + "i4": {"ipv4": "auto", "pim": "enable"} + }, + "static_routes": [{ + "network": ["1.0.4.11/32","10.0.4.2/24", "10.0.3.1/24"], + "next_hop": "10.0.2.2" + }] + + + }, + "c2": { + "links": { + "lo": {"ipv4": "auto", "type": "loopback", "pim": "enable"}, + "c1": {"ipv4": "auto", "pim": "enable"}, + "f1": {"ipv4": "auto", "pim": "enable"}, + "i5": {"ipv4": "auto", "pim": "enable"} + }, + "static_routes": [ + { + "network": ["1.0.4.11/32", "10.0.2.1/24", "10.0.1.2/24"], + "next_hop": "10.0.3.2" + }] + }, + "i1": { + "links": { + "l1": {"ipv4": "auto"} + } + }, + "i2": { + "links": { + "f1": {"ipv4": "auto"} + } + }, + "i3": { + "links": { + "r2": {"ipv4": "auto"} + } + }, + "i4": { + "links": { + "c1": {"ipv4": "auto"} + } + }, + "i5": { + "links": { + "c2": {"ipv4": "auto"} + } + }, + "i6": { + "links": { + "l1": {"ipv4": "auto"} + } + }, + "i7": { + "links": { + "l1": {"ipv4": "auto"} + } + }, + "i8": { + "links": { + "f1": {"ipv4": "auto"} + } + } + } +} diff --git a/tests/topotests/multicast-pim-sm-topo3/test_multicast_pim_sm_topo3.py b/tests/topotests/multicast-pim-sm-topo3/test_multicast_pim_sm_topo3.py new file mode 100755 index 0000000000..fdceb77fd1 --- /dev/null +++ b/tests/topotests/multicast-pim-sm-topo3/test_multicast_pim_sm_topo3.py @@ -0,0 +1,4609 @@ +#!/usr/bin/env python + +# +# Copyright (c) 2020 by VMware, Inc. ("VMware") +# Used Copyright (c) 2018 by Network Device Education Foundation, +# Inc. ("NetDEF") in this file. +# +# Permission to use, copy, modify, and/or distribute this software +# for any purpose with or without fee is hereby granted, provided +# that the above copyright notice and this permission notice appear +# in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND VMWARE DISCLAIMS ALL WARRANTIES +# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL VMWARE BE LIABLE FOR +# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY +# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS +# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE +# OF THIS SOFTWARE. +# + +""" +Following tests are covered to test multicast pim sm: + +Test steps +- Create topology (setup module) +- Bring up topology + +Following tests are covered: + +1. verify oil when join prune sent scenario_1 p0 +2. verify oil when join prune sent scenario_2 p0 +3. shut noshut source interface when upstream cleared from LHR p0( +4. shut noshut receiver interface when upstream cleared from LHR p0( +5. verify igmp clis p0 +6. verify igmp cli generate query once p0 +7. verify remove add igmp config to receiver interface p0 +8. verify remove add igmp commands when pim configured p0 +9. verify remove add pim commands when igmp configured p0 +10. pim dr priority p0 +11. pim hello timer p0 +12. Verify mroute after removing RP sending IGMP prune p2 +13. Verify prune is sent to LHR and FHR when PIM nbr went down +14. Verify mroute flag in LHR and FHR node +15. Verify IGMP prune processed correctly when same join received from IGMP and PIM +16. Verify multicast traffic flowing fine, when LHR connected to RP +17. Verify multicast traffic is flowing fine when FHR is connected to RP +""" + +import os +import re +import sys +import json +import time +import datetime +import pytest + +pytestmark = pytest.mark.pimd + +# Save the Current Working Directory to find configuration files. +CWD = os.path.dirname(os.path.realpath(__file__)) +sys.path.append(os.path.join(CWD, "../")) +sys.path.append(os.path.join(CWD, "../lib/")) + +# Required to instantiate the topology builder class. + +# pylint: disable=C0413 +# Import topogen and topotest helpers +from lib.topogen import Topogen, get_topogen +from mininet.topo import Topo + +from lib.common_config import ( + start_topology, + write_test_header, + write_test_footer, + step, + iperfSendIGMPJoin, + addKernelRoute, + reset_config_on_routers, + iperfSendTraffic, + kill_iperf, + shutdown_bringup_interface, + kill_router_daemons, + start_router, + start_router_daemons, + stop_router, + apply_raw_config, + add_interfaces_to_vlan, + tcpdump_capture_start, + tcpdump_capture_stop, + LOGDIR, + check_router_status, + required_linux_kernel_version, + topo_daemons, +) +from lib.pim import ( + create_pim_config, + create_igmp_config, + verify_igmp_groups, + verify_ip_mroutes, + clear_ip_mroute_verify, + clear_ip_mroute, + clear_ip_pim_interface_traffic, + verify_igmp_config, + verify_pim_neighbors, + verify_pim_config, + verify_pim_interface, + verify_upstream_iif, + verify_multicast_traffic, + verify_pim_rp_info, + get_refCount_for_mroute, + verify_multicast_flag_state, +) +from lib.topolog import logger +from lib.topojson import build_topo_from_json, build_config_from_json + +# Reading the data from JSON File for topology creation +jsonFile = "{}/multicast_pim_sm_topo3.json".format(CWD) +try: + with open(jsonFile, "r") as topoJson: + topo = json.load(topoJson) +except IOError: + assert False, "Could not read file {}".format(jsonFile) + +TOPOLOGY = """ + + i4-----c1-------------c2---i5 + | | + | | + i1-----l1------r2-----f1---i2 + | | | | + | | | | + i7 i6 i3 i8 + + Description: + i1, i2, i3. i4, i5, i6, i7, i8 - FRR running iperf to send IGMP + join and traffic + l1 - LHR + f1 - FHR + r2 - FRR router + c1 - FRR router + c2 - FRR router +""" + +# Global variables +VLAN_1 = 2501 +GROUP_RANGE = "225.0.0.0/8" +IGMP_GROUP = "225.1.1.1/32" +IGMP_JOIN = "225.1.1.1" +VLAN_INTF_ADRESS_1 = "10.0.8.3/24" +GROUP_RANGE_1 = [ + "225.1.1.1/32", + "225.1.1.2/32", + "225.1.1.3/32", + "225.1.1.4/32", + "225.1.1.5/32", +] +IGMP_JOIN_RANGE_1 = ["225.1.1.1", "225.1.1.2", "225.1.1.3", "225.1.1.4", "225.1.1.5"] +GROUP_RANGE_2 = [ + "226.1.1.1/32", + "226.1.1.2/32", + "226.1.1.3/32", + "226.1.1.4/32", + "226.1.1.5/32", +] +IGMP_JOIN_RANGE_2 = ["226.1.1.1", "226.1.1.2", "226.1.1.3", "226.1.1.4", "226.1.1.5"] +GROUP_RANGE_3 = [ + "227.1.1.1/32", + "227.1.1.2/32", + "227.1.1.3/32", + "227.1.1.4/32", + "227.1.1.5/32", +] +IGMP_JOIN_RANGE_3 = ["227.1.1.1", "227.1.1.2", "227.1.1.3", "227.1.1.4", "227.1.1.5"] + +SAME_VLAN_IP_1 = {"ip": "10.1.1.1", "subnet": "255.255.255.0", "cidr": "24"} +SAME_VLAN_IP_2 = {"ip": "10.1.1.2", "subnet": "255.255.255.0", "cidr": "24"} +SAME_VLAN_IP_3 = {"ip": "10.1.1.3", "subnet": "255.255.255.0", "cidr": "24"} +SAME_VLAN_IP_4 = {"ip": "10.1.1.4", "subnet": "255.255.255.0", "cidr": "24"} +TCPDUMP_FILE = "{}/{}".format(LOGDIR, "v2query.txt") + + +class CreateTopo(Topo): + """ + Test BasicTopo - topology 1 + + * `Topo`: Topology object + """ + + def build(self, *_args, **_opts): + """Build function""" + tgen = get_topogen(self) + + # Building topology from json file + build_topo_from_json(tgen, topo) + + +def setup_module(mod): + """ + Sets up the pytest environment + + * `mod`: module name + """ + + # Required linux kernel version for this suite to run. + result = required_linux_kernel_version("4.19") + if result is not True: + pytest.skip("Kernel requirements are not met") + + testsuite_run_time = time.asctime(time.localtime(time.time())) + logger.info("Testsuite start time: {}".format(testsuite_run_time)) + logger.info("=" * 40) + logger.info("Master Topology: \n {}".format(TOPOLOGY)) + + logger.info("Running setup_module to create topology") + + tgen = Topogen(CreateTopo, mod.__name__) + # ... and here it calls Mininet initialization functions. + + # get list of daemons needs to be started for this suite. + daemons = topo_daemons(tgen, topo) + + # Starting topology, create tmp files which are loaded to routers + # to start deamons and then start routers + start_topology(tgen, daemons) + + # Don"t run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + # Creating configuration from JSON + build_config_from_json(tgen, topo) + + logger.info("Running setup_module() done") + + +def teardown_module(): + """Teardown the pytest environment""" + + logger.info("Running teardown_module to delete topology") + + tgen = get_topogen() + + # Stop toplogy and Remove tmp files + tgen.stop_topology() + + logger.info( + "Testsuite end time: {}".format(time.asctime(time.localtime(time.time()))) + ) + logger.info("=" * 40) + + +##################################################### +# +# Testcases +# +##################################################### + + +def config_to_send_igmp_join_and_traffic( + tgen, topo, tc_name, iperf, iperf_intf, GROUP_RANGE, join=False, traffic=False +): + """ + API to do pre-configuration to send IGMP join and multicast + traffic + + parameters: + ----------- + * `tgen`: topogen object + * `topo`: input json data + * `tc_name`: caller test case name + * `iperf`: router running iperf + * `iperf_intf`: interface name router running iperf + * `GROUP_RANGE`: group range + * `join`: IGMP join, default False + * `traffic`: multicast traffic, default False + """ + + if join: + # Add route to kernal + result = addKernelRoute(tgen, iperf, iperf_intf, GROUP_RANGE) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + if traffic: + # Add route to kernal + result = addKernelRoute(tgen, iperf, iperf_intf, GROUP_RANGE) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + router_list = tgen.routers() + for router in router_list.keys(): + if router == iperf: + continue + + rnode = router_list[router] + rnode.run("echo 2 > /proc/sys/net/ipv4/conf/all/rp_filter") + + for router in topo["routers"].keys(): + if "static_routes" in topo["routers"][router]: + static_routes = topo["routers"][router]["static_routes"] + for static_route in static_routes: + network = static_route["network"] + next_hop = static_route["next_hop"] + if type(network) is not list: + network = [network] + for net in network: + addKernelRoute(tgen, router, iperf_intf, net, next_hop) + return True + + +def verify_mroute_repopulated(uptime_before, uptime_after): + """ + API to compare uptime for mroutes + + Parameters + ---------- + * `uptime_before` : Uptime dictionary for any particular instance + * `uptime_after` : Uptime dictionary for any particular instance + """ + + for group in uptime_before.keys(): + for source in uptime_before[group].keys(): + if set(uptime_before[group]) != set(uptime_after[group]): + errormsg = ( + "mroute (%s, %s) has not come" + " up after mroute clear [FAILED!!]" % (source, group) + ) + return errormsg + + d1 = datetime.datetime.strptime(uptime_before[group][source], "%H:%M:%S") + d2 = datetime.datetime.strptime(uptime_after[group][source], "%H:%M:%S") + if d2 >= d1: + errormsg = "mroute (%s, %s) is not " "repopulated [FAILED!!]" % ( + source, + group, + ) + return errormsg + + logger.info("mroute (%s, %s) is " "repopulated [PASSED!!]", source, group) + + return True + + +def verify_state_incremented(state_before, state_after): + """ + API to compare interface traffic state incrementing + + Parameters + ---------- + * `state_before` : State dictionary for any particular instance + * `state_after` : State dictionary for any particular instance + """ + + for router, state_data in state_before.items(): + for state, value in state_data.items(): + if state_before[router][state] >= state_after[router][state]: + errormsg = ( + "[DUT: %s]: state %s value has not" + " incremented, Initial value: %s, " + "Current value: %s [FAILED!!]" + % ( + router, + state, + state_before[router][state], + state_after[router][state], + ) + ) + return errormsg + + logger.info( + "[DUT: %s]: State %s value is " + "incremented, Initial value: %s, Current value: %s" + " [PASSED!!]", + router, + state, + state_before[router][state], + state_after[router][state], + ) + + return True + + +def find_v2_query_msg_in_tcpdump(tgen, router, message, count, cap_file): + """ + Find v2 query messages in tcpdump file + + Parameters + ---------- + * `tgen` : Topology handler + * `router` : Device under test + * `cap_file` : tcp dump file name + + """ + + filepath = os.path.join(LOGDIR, tgen.modname, router, cap_file) + with open(filepath) as f: + if len(re.findall("{}".format(message), f.read())) < count: + errormsg = "[DUT: %s]: Verify Message: %s in tcpdump" " [FAILED!!]" % ( + router, + message, + ) + return errormsg + + logger.info( + "[DUT: %s]: Found message: %s in tcpdump " " count: %s [PASSED!!]", + router, + message, + count, + ) + return True + + +def find_tos_in_tcpdump(tgen, router, message, cap_file): + """ + Find v2 query messages in tcpdump file + + Parameters + ---------- + * `tgen` : Topology handler + * `router` : Device under test + * `cap_file` : tcp dump file name + + """ + + filepath = os.path.join(LOGDIR, tgen.modname, router, cap_file) + with open(filepath) as f: + + if len(re.findall(message, f.read())) < 1: + errormsg = "[DUT: %s]: Verify Message: %s in tcpdump" " [FAILED!!]" % ( + router, + message, + ) + return errormsg + + logger.info( + "[DUT: %s]: Found message: %s in tcpdump " "[PASSED!!]", router, message + ) + return True + + +def test_verify_oil_when_join_prune_sent_scenario_1_p1(request): + """ + TC_21_1: + Verify OIL detail updated in (S,G) and (*,G) mroute when IGMP + join/prune is sent + """ + + tgen = get_topogen() + tc_name = request.node.name + write_test_header(tc_name) + + # Creating configuration from JSON + kill_iperf(tgen) + clear_ip_mroute(tgen) + reset_config_on_routers(tgen) + clear_ip_pim_interface_traffic(tgen, topo) + check_router_status(tgen) + + # Don"t run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + step("Enable the PIM on all the interfaces of FRR1, FRR2, FRR3") + step( + "Enable IGMP of FRR1 interface and send IGMP joins " + " from FRR1 node for group range (226.1.1.1-5)" + ) + step( + "Enable IGMP of FRR3 interface and send IGMP joins " + " from FRR3 node for group range (226.1.1.1-5)" + ) + + intf_f1_i8 = topo["routers"]["f1"]["links"]["i8"]["interface"] + input_dict = { + "f1": {"igmp": {"interfaces": {intf_f1_i8: {"igmp": {"version": "2"}}}}} + } + result = create_igmp_config(tgen, topo, input_dict) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + input_join = { + "i1": topo["routers"]["i1"]["links"]["l1"]["interface"], + "i8": topo["routers"]["i8"]["links"]["f1"]["interface"], + } + + for recvr, recvr_intf in input_join.items(): + result = config_to_send_igmp_join_and_traffic( + tgen, topo, tc_name, recvr, recvr_intf, GROUP_RANGE_1, join=True + ) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + result = iperfSendIGMPJoin(tgen, recvr, IGMP_JOIN_RANGE_1, join_interval=1) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + step("Configure static RP for (226.1.1.1-5) in R2") + + input_dict = { + "r2": { + "pim": { + "rp": [ + { + "rp_addr": topo["routers"]["r2"]["links"]["lo"]["ipv4"].split( + "/" + )[0], + "group_addr_range": GROUP_RANGE, + } + ] + } + } + } + + result = create_pim_config(tgen, topo, input_dict) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step( + "Configure one source on FRR3 for all the groups and send" " multicast traffic" + ) + + input_src = {"i2": topo["routers"]["i2"]["links"]["f1"]["interface"]} + + for src, src_intf in input_src.items(): + result = config_to_send_igmp_join_and_traffic( + tgen, topo, tc_name, src, src_intf, GROUP_RANGE_1, traffic=True + ) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + result = iperfSendTraffic(tgen, src, IGMP_JOIN_RANGE_1, 32, 2500) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + source_i2 = topo["routers"]["i2"]["links"]["f1"]["ipv4"].split("/")[0] + input_dict_all = [ + { + "dut": "l1", + "src_address": "*", + "iif": topo["routers"]["l1"]["links"]["r2"]["interface"], + "oil": topo["routers"]["l1"]["links"]["i1"]["interface"], + }, + { + "dut": "l1", + "src_address": source_i2, + "iif": topo["routers"]["l1"]["links"]["r2"]["interface"], + "oil": topo["routers"]["l1"]["links"]["i1"]["interface"], + }, + { + "dut": "r2", + "src_address": "*", + "iif": "lo", + "oil": topo["routers"]["r2"]["links"]["l1"]["interface"], + }, + { + "dut": "r2", + "src_address": source_i2, + "iif": topo["routers"]["r2"]["links"]["f1"]["interface"], + "oil": topo["routers"]["r2"]["links"]["l1"]["interface"], + }, + { + "dut": "f1", + "src_address": "*", + "iif": topo["routers"]["f1"]["links"]["r2"]["interface"], + "oil": topo["routers"]["f1"]["links"]["i8"]["interface"], + }, + { + "dut": "f1", + "src_address": source_i2, + "iif": topo["routers"]["f1"]["links"]["i2"]["interface"], + "oil": topo["routers"]["f1"]["links"]["r2"]["interface"], + }, + ] + + step("Verify mroutes and iff upstream") + + for data in input_dict_all: + result = verify_ip_mroutes( + tgen, + data["dut"], + data["src_address"], + IGMP_JOIN_RANGE_1, + data["iif"], + data["oil"], + ) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + for data in input_dict_all: + result = verify_upstream_iif( + tgen, data["dut"], data["iif"], data["src_address"], IGMP_JOIN_RANGE_1 + ) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step("Send the IGMP prune from ixia to (226.1.1.1-5) receiver on " "FRR1 node") + + intf_l1_i1 = topo["routers"]["l1"]["links"]["i1"]["interface"] + shutdown_bringup_interface(tgen, "l1", intf_l1_i1, False) + + step( + "After receiving the IGMP prune from FRR1 , verify traffic " + "immediately stopped for this receiver 'show ip multicast'" + ) + + input_traffic = {"l1": {"traffic_sent": [intf_l1_i1]}} + result = verify_multicast_traffic(tgen, input_traffic, expected=False) + assert result is not True, ( + "Testcase {} : Failed \n " + " Traffic is not stopped yet \n Error: {}".format(tc_name, result) + ) + logger.info("Expected Behaviour: {}".format(result)) + + step( + "IGMP groups are remove from FRR1 node 'show ip igmp groups'" + " FRR3 IGMP still present" + ) + + dut = "l1" + result = verify_igmp_groups( + tgen, dut, intf_l1_i1, IGMP_JOIN_RANGE_1, expected=False + ) + assert result is not True, ( + "Testcase {} : Failed \n " + "IGMP groups are not deleted \n Error: {}".format(tc_name, result) + ) + logger.info("Expected Behaviour: {}".format(result)) + + dut = "f1" + result = verify_igmp_groups(tgen, dut, intf_f1_i8, IGMP_JOIN_RANGE_1) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step( + "(*,G) and (S,G) OIL got removed immediately after receiving" + " prune 'show ip pim state' and 'show ip mroute' on FRR1 node," + " no impact on FRR3 receiver" + ) + + input_dict_l1 = [ + { + "dut": "l1", + "src_address": "*", + "iif": topo["routers"]["l1"]["links"]["r2"]["interface"], + "oil": topo["routers"]["l1"]["links"]["i1"]["interface"], + }, + { + "dut": "l1", + "src_address": source_i2, + "iif": topo["routers"]["l1"]["links"]["r2"]["interface"], + "oil": topo["routers"]["l1"]["links"]["i1"]["interface"], + }, + ] + + step("Verify mroutes and iff upstream") + + for data in input_dict_l1: + result = verify_ip_mroutes( + tgen, + data["dut"], + data["src_address"], + IGMP_JOIN_RANGE_1, + data["iif"], + data["oil"], + expected=False, + ) + assert result is not True, ( + "Testcase {} : Failed \n " + "mroutes are still present \n Error: {}".format(tc_name, result) + ) + logger.info("Expected Behaviour: {}".format(result)) + + for data in input_dict_l1: + result = verify_upstream_iif( + tgen, + data["dut"], + data["iif"], + data["src_address"], + IGMP_JOIN_RANGE_1, + expected=False, + ) + assert result is not True, ( + "Testcase {} : Failed \n " + "upstream entries are still present \n Error: {}".format(tc_name, result) + ) + logger.info("Expected Behaviour: {}".format(result)) + + input_dict_f1 = [ + { + "dut": "f1", + "src_address": "*", + "iif": topo["routers"]["f1"]["links"]["r2"]["interface"], + "oil": topo["routers"]["f1"]["links"]["i8"]["interface"], + }, + { + "dut": "f1", + "src_address": source_i2, + "iif": topo["routers"]["f1"]["links"]["i2"]["interface"], + "oil": topo["routers"]["f1"]["links"]["i8"]["interface"], + }, + ] + + step("Verify mroutes and iff upstream") + + for data in input_dict_f1: + result = verify_ip_mroutes( + tgen, + data["dut"], + data["src_address"], + IGMP_JOIN_RANGE_1, + data["iif"], + data["oil"], + ) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + for data in input_dict_f1: + result = verify_upstream_iif( + tgen, data["dut"], data["iif"], data["src_address"], IGMP_JOIN_RANGE_1 + ) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step("Send the IGMP prune from ixia to (226.1.1.1-5) receiver on " " FRR3 node") + + intf_f1_i8 = topo["routers"]["f1"]["links"]["i8"]["interface"] + shutdown_bringup_interface(tgen, "f1", intf_f1_i8, False) + + step( + "After receiving the IGMP prune from FRR3s , verify traffic " + "immediately stopped for this receiver 'show ip multicast'" + ) + + input_traffic = {"f1": {"traffic_sent": [intf_f1_i8]}} + result = verify_multicast_traffic(tgen, input_traffic, expected=False) + assert result is not True, ( + "Testcase {} : Failed \n " + " Traffic is not stopped yet \n Error: {}".format(tc_name, result) + ) + logger.info("Expected Behaviour: {}".format(result)) + + step( + "IGMP groups are remove from FRR1 node 'show ip igmp groups'" + " FRR3 IGMP still present" + ) + + dut = "f1" + result = verify_igmp_groups( + tgen, dut, intf_f1_i8, IGMP_JOIN_RANGE_1, expected=False + ) + assert result is not True, ( + "Testcase {} : Failed \n " + "IGMP groups are not deleted \n Error: {}".format(tc_name, result) + ) + logger.info("Expected Behaviour: {}".format(result)) + + step( + "(*,G) and (S,G) OIL got prune state (none) from all the nodes" + "FRR1, FRR3 verify using 'show ip mroute'" + ) + + input_dict_l1 = [ + { + "dut": "l1", + "src_address": "*", + "iif": topo["routers"]["l1"]["links"]["r2"]["interface"], + "oil": topo["routers"]["l1"]["links"]["i1"]["interface"], + }, + { + "dut": "l1", + "src_address": source_i2, + "iif": topo["routers"]["l1"]["links"]["r2"]["interface"], + "oil": topo["routers"]["l1"]["links"]["i1"]["interface"], + }, + ] + + step("Verify mroutes and iff upstream") + + for data in input_dict_l1: + result = verify_ip_mroutes( + tgen, + data["dut"], + data["src_address"], + IGMP_JOIN_RANGE_1, + data["iif"], + data["oil"], + expected=False, + ) + assert result is not True, ( + "Testcase {} : Failed \n " + "mroutes are still present \n Error: {}".format(tc_name, result) + ) + logger.info("Expected Behaviour: {}".format(result)) + + for data in input_dict_l1: + result = verify_upstream_iif( + tgen, + data["dut"], + data["iif"], + data["src_address"], + IGMP_JOIN_RANGE_1, + expected=False, + ) + assert result is not True, ( + "Testcase {} : Failed \n " + "upstream entries are still present \n Error: {}".format(tc_name, result) + ) + logger.info("Expected Behaviour: {}".format(result)) + + shutdown_bringup_interface(tgen, "f1", intf_f1_i8, True) + shutdown_bringup_interface(tgen, "l1", intf_l1_i1, True) + + for data in input_dict_l1: + result = verify_upstream_iif( + tgen, data["dut"], data["iif"], data["src_address"], IGMP_JOIN_RANGE_1 + ) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + write_test_footer(tc_name) + + +def test_verify_oil_when_join_prune_sent_scenario_2_p1(request): + """ + TC_21_2: Verify OIL detail updated in (S,G) and (*,G) mroute when IGMP + join/prune is sent + """ + + tgen = get_topogen() + tc_name = request.node.name + write_test_header(tc_name) + + # Creating configuration from JSON + kill_iperf(tgen) + clear_ip_mroute(tgen) + reset_config_on_routers(tgen) + clear_ip_pim_interface_traffic(tgen, topo) + check_router_status(tgen) + + # Don"t run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + step("Removing FRR3 to simulate topo " "FHR(FRR1)---LHR(FRR2)") + + intf_l1_c1 = topo["routers"]["l1"]["links"]["c1"]["interface"] + intf_f1_c2 = topo["routers"]["f1"]["links"]["c2"]["interface"] + intf_f1_r2 = topo["routers"]["f1"]["links"]["r2"]["interface"] + shutdown_bringup_interface(tgen, "l1", intf_l1_c1, False) + shutdown_bringup_interface(tgen, "f1", intf_f1_c2, False) + shutdown_bringup_interface(tgen, "f1", intf_f1_r2, False) + + step("Enable the PIM on all the interfaces of FRR1, FRR2") + step( + "Enable IGMP of FRR1 interface and send IGMP joins " + " from FRR1 node for group range (226.1.1.1-5)" + ) + step( + "Enable IGMP of FRR3 interface and send IGMP joins " + " from FRR3 node for group range (226.1.1.1-5)" + ) + + intf_r2_i3 = topo["routers"]["r2"]["links"]["i3"]["interface"] + input_dict = { + "r2": {"igmp": {"interfaces": {intf_r2_i3: {"igmp": {"version": "2"}}}}} + } + result = create_igmp_config(tgen, topo, input_dict) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + input_join = { + "i1": topo["routers"]["i1"]["links"]["l1"]["interface"], + "i3": topo["routers"]["i3"]["links"]["r2"]["interface"], + } + + for recvr, recvr_intf in input_join.items(): + result = config_to_send_igmp_join_and_traffic( + tgen, topo, tc_name, recvr, recvr_intf, GROUP_RANGE_1, join=True + ) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + result = iperfSendIGMPJoin(tgen, recvr, IGMP_JOIN_RANGE_1, join_interval=1) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + step("Configure static RP for (226.1.1.1-5) in R2") + + input_dict = { + "r2": { + "pim": { + "rp": [ + { + "rp_addr": topo["routers"]["r2"]["links"]["lo"]["ipv4"].split( + "/" + )[0], + "group_addr_range": GROUP_RANGE, + } + ] + } + } + } + + result = create_pim_config(tgen, topo, input_dict) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + input_dict_all = [ + { + "dut": "l1", + "src_address": "*", + "iif": topo["routers"]["l1"]["links"]["r2"]["interface"], + "oil": topo["routers"]["l1"]["links"]["i1"]["interface"], + }, + { + "dut": "r2", + "src_address": "*", + "iif": "lo", + "oil": topo["routers"]["r2"]["links"]["l1"]["interface"], + }, + { + "dut": "r2", + "src_address": "*", + "iif": "lo", + "oil": topo["routers"]["r2"]["links"]["i3"]["interface"], + }, + ] + + step("Verify mroutes and iff upstream") + + for data in input_dict_all: + result = verify_ip_mroutes( + tgen, + data["dut"], + data["src_address"], + IGMP_JOIN_RANGE_1, + data["iif"], + data["oil"], + ) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + for data in input_dict_all: + result = verify_upstream_iif( + tgen, data["dut"], data["iif"], data["src_address"], IGMP_JOIN_RANGE_1 + ) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step("Send the IGMP prune from ixia to (226.1.1.1-5) receiver on " "FRR3(r2) node") + + intf_r2_i3 = topo["routers"]["r2"]["links"]["i3"]["interface"] + shutdown_bringup_interface(tgen, "r2", intf_r2_i3, False) + + step( + "After sending IGMP prune from FRR3(r2) node verify (*,G) OIL " + "immediately removed for local receiver mroute should have " + " PIM protocol , IGMP should be removed verify using " + "'show ip mroute' no impact seen on FRR1(l1) (*,G)" + ) + + input_dict_r2 = [ + { + "dut": "r2", + "src_address": "*", + "iif": "lo", + "oil": topo["routers"]["r2"]["links"]["i3"]["interface"], + } + ] + + for data in input_dict_r2: + result = verify_ip_mroutes( + tgen, + data["dut"], + data["src_address"], + IGMP_JOIN_RANGE_1, + data["iif"], + data["oil"], + expected=False, + ) + assert result is not True, ( + "Testcase {} : Failed \n " + "mroutes are still present \n Error: {}".format(tc_name, result) + ) + logger.info("Expected Behaviour: {}".format(result)) + + input_dict_l1_r2 = [ + { + "dut": "l1", + "src_address": "*", + "iif": topo["routers"]["l1"]["links"]["r2"]["interface"], + "oil": topo["routers"]["l1"]["links"]["i1"]["interface"], + }, + { + "dut": "r2", + "src_address": "*", + "iif": "lo", + "oil": topo["routers"]["r2"]["links"]["l1"]["interface"], + }, + ] + + step("Verify mroutes and iff upstream") + + for data in input_dict_l1_r2: + result = verify_ip_mroutes( + tgen, + data["dut"], + data["src_address"], + IGMP_JOIN_RANGE_1, + data["iif"], + data["oil"], + ) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step("Send the IGMP prune from ixia to (226.1.1.1-5) receiver on " "FRR1(l1) node") + + intf_l1_i1 = topo["routers"]["l1"]["links"]["i1"]["interface"] + shutdown_bringup_interface(tgen, "l1", intf_l1_i1, False) + + step( + "After sending IGMP prune from FRR1 node verify (*,G) OIL" + "got removed immediately from FRR1 node" + ) + + input_dict_l1 = [ + { + "dut": "l1", + "src_address": "*", + "iif": topo["routers"]["l1"]["links"]["r2"]["interface"], + "oil": topo["routers"]["l1"]["links"]["i1"]["interface"], + } + ] + + for data in input_dict_l1: + result = verify_ip_mroutes( + tgen, + data["dut"], + data["src_address"], + IGMP_JOIN_RANGE_1, + data["iif"], + data["oil"], + expected=False, + ) + assert result is not True, ( + "Testcase {} : Failed \n " + "mroutes are still present \n Error: {}".format(tc_name, result) + ) + logger.info("Expected Behaviour: {}".format(result)) + + step("After prune is sent verify upstream got removed in FRR1 node") + + for data in input_dict_l1: + result = verify_upstream_iif( + tgen, + data["dut"], + data["iif"], + data["src_address"], + IGMP_JOIN_RANGE_1, + expected=False, + ) + assert result is not True, ( + "Testcase {} : Failed \n " + "upstream entries are still present \n Error: {}".format(tc_name, result) + ) + logger.info("Expected Behaviour: {}".format(result)) + + write_test_footer(tc_name) + + +def test_shut_noshut_source_interface_when_upstream_cleared_from_LHR_p1(request): + """ + TC_26: Verify shut/no shut of source interface after upstream got cleared + from LHR + """ + + tgen = get_topogen() + tc_name = request.node.name + write_test_header(tc_name) + + # Creating configuration from JSON + kill_iperf(tgen) + clear_ip_mroute(tgen) + reset_config_on_routers(tgen) + clear_ip_pim_interface_traffic(tgen, topo) + check_router_status(tgen) + + # Don"t run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + step("Enable the PIM on all the interfaces of FRR1, R2 and FRR3" " routers") + step("Enable IGMP on FRR1 interface and send IGMP join " "(225.1.1.1-225.1.1.10)") + + input_join = {"i1": topo["routers"]["i1"]["links"]["l1"]["interface"]} + + for recvr, recvr_intf in input_join.items(): + result = config_to_send_igmp_join_and_traffic( + tgen, topo, tc_name, recvr, recvr_intf, GROUP_RANGE_1, join=True + ) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + result = iperfSendIGMPJoin(tgen, recvr, IGMP_JOIN_RANGE_1, join_interval=1) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + step("Configure RP on R2 (loopback interface) for " "the group range 225.0.0.0/8") + + input_dict = { + "r2": { + "pim": { + "rp": [ + { + "rp_addr": topo["routers"]["r2"]["links"]["lo"]["ipv4"].split( + "/" + )[0], + "group_addr_range": GROUP_RANGE_1, + } + ] + } + } + } + + result = create_pim_config(tgen, topo, input_dict) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step("Send multicast traffic from FRR3 to 225.1.1.1-225.1.1.10" " receiver") + + input_src = {"i2": topo["routers"]["i2"]["links"]["f1"]["interface"]} + + for src, src_intf in input_src.items(): + result = config_to_send_igmp_join_and_traffic( + tgen, topo, tc_name, src, src_intf, GROUP_RANGE_1, traffic=True + ) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + result = iperfSendTraffic(tgen, src, IGMP_JOIN_RANGE_1, 32, 2500) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step( + "'show ip mroute' showing correct RPF and OIF interface for (*,G)" + " and (S,G) entries on all the nodes" + ) + + source_i2 = topo["routers"]["i2"]["links"]["f1"]["ipv4"].split("/")[0] + input_dict_all = [ + { + "dut": "l1", + "src_address": "*", + "iif": topo["routers"]["l1"]["links"]["r2"]["interface"], + "oil": topo["routers"]["l1"]["links"]["i1"]["interface"], + }, + { + "dut": "l1", + "src_address": source_i2, + "iif": topo["routers"]["l1"]["links"]["r2"]["interface"], + "oil": topo["routers"]["l1"]["links"]["i1"]["interface"], + }, + { + "dut": "r2", + "src_address": "*", + "iif": "lo", + "oil": topo["routers"]["r2"]["links"]["l1"]["interface"], + }, + { + "dut": "r2", + "src_address": source_i2, + "iif": topo["routers"]["r2"]["links"]["f1"]["interface"], + "oil": topo["routers"]["r2"]["links"]["l1"]["interface"], + }, + { + "dut": "f1", + "src_address": source_i2, + "iif": topo["routers"]["f1"]["links"]["i2"]["interface"], + "oil": topo["routers"]["f1"]["links"]["r2"]["interface"], + }, + ] + + step( + "'show ip pim upstream' and 'show ip pim upstream-rpf' showing" + " correct OIL and IIF on all the nodes" + ) + + for data in input_dict_all: + result = verify_ip_mroutes( + tgen, + data["dut"], + data["src_address"], + IGMP_JOIN_RANGE_1, + data["iif"], + data["oil"], + ) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + for data in input_dict_all: + result = verify_upstream_iif( + tgen, data["dut"], data["iif"], data["src_address"], IGMP_JOIN_RANGE_1 + ) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step("Shut the source interface from FRR3") + intf_f1_i2 = topo["routers"]["f1"]["links"]["i2"]["interface"] + shutdown_bringup_interface(tgen, "f1", intf_f1_i2, False) + + step( + "After shut of source interface verify (S,G) mroutes are cleared" + " from all the nodes" + ) + + intf_f1_r2 = topo["routers"]["f1"]["links"]["r2"]["interface"] + result = verify_ip_mroutes( + tgen, "f1", source_i2, IGMP_JOIN_RANGE_1, intf_f1_i2, intf_f1_r2, expected=False + ) + assert result is not True, ( + "Testcase {} : Failed \n mroutes are" + " still present \n Error: {}".format(tc_name, result) + ) + logger.info("Expected Behavior: {}".format(result)) + + step( + "After waiting for (S,G) timeout from FRR1 for same" + " source verify that (S,G) is flushed from FRR1 node" + " 'show ip pim upstream' 'show ip mroute' " + ) + + done_flag = False + for retry in range(1, 11): + result = verify_upstream_iif( + tgen, "l1", "Unknown", source_i2, IGMP_JOIN_RANGE_1, expected=False + ) + if result is not True: + done_flag = True + else: + continue + if done_flag: + logger.info("Expected Behavior: {}".format(result)) + break + + assert done_flag is True, ( + "Testcase {} : Failed Error: \n " + "mroutes are still present, after waiting for 10 mins".format(tc_name) + ) + + step("No shut the Source interface just after the upstream is expired" " from FRR1") + shutdown_bringup_interface(tgen, "f1", intf_f1_i2, True) + + step( + "After no shut of source interface , verify all the (S,G) is " + " populated again on 'show ip mroute' 'show ip pim upstream' " + " with proper OIL and IIF detail" + ) + + for data in input_dict_all: + result = verify_ip_mroutes( + tgen, + data["dut"], + data["src_address"], + IGMP_JOIN_RANGE_1, + data["iif"], + data["oil"], + ) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + for data in input_dict_all: + result = verify_upstream_iif( + tgen, data["dut"], data["iif"], data["src_address"], IGMP_JOIN_RANGE_1 + ) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step("shut and no shut the source interface immediately") + shutdown_bringup_interface(tgen, "f1", intf_f1_i2, False) + shutdown_bringup_interface(tgen, "f1", intf_f1_i2, True) + + step( + "All the mroutes got updated with proper OIL after no shut of" + "interface verify using 'show ip mroute'" + ) + + for data in input_dict_all: + result = verify_ip_mroutes( + tgen, + data["dut"], + data["src_address"], + IGMP_JOIN_RANGE_1, + data["iif"], + data["oil"], + ) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + for data in input_dict_all: + result = verify_upstream_iif( + tgen, data["dut"], data["iif"], data["src_address"], IGMP_JOIN_RANGE_1 + ) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + write_test_footer(tc_name) + + +def test_shut_noshut_receiver_interface_when_upstream_cleared_from_LHR_p1(request): + """ + TC_27: Verify shut/no shut of receiver interface after upstream got + cleared from LHR + """ + + tgen = get_topogen() + tc_name = request.node.name + write_test_header(tc_name) + + # Creating configuration from JSON + kill_iperf(tgen) + clear_ip_mroute(tgen) + reset_config_on_routers(tgen) + clear_ip_pim_interface_traffic(tgen, topo) + check_router_status(tgen) + + # Don"t run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + step("Enable the PIM on all the interfaces of FRR1, R2 and FRR3" " routers") + step("Enable IGMP on FRR1 interface and send IGMP join " "(225.1.1.1-225.1.1.10)") + + input_join = {"i1": topo["routers"]["i1"]["links"]["l1"]["interface"]} + + for recvr, recvr_intf in input_join.items(): + result = config_to_send_igmp_join_and_traffic( + tgen, topo, tc_name, recvr, recvr_intf, GROUP_RANGE_1, join=True + ) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + result = iperfSendIGMPJoin(tgen, recvr, IGMP_JOIN_RANGE_1, join_interval=1) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + step("Configure RP on R2 (loopback interface) for " "the group range 225.0.0.0/8") + + input_dict = { + "r2": { + "pim": { + "rp": [ + { + "rp_addr": topo["routers"]["r2"]["links"]["lo"]["ipv4"].split( + "/" + )[0], + "group_addr_range": GROUP_RANGE_1, + } + ] + } + } + } + + result = create_pim_config(tgen, topo, input_dict) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step("Send multicast traffic from FRR3 to 225.1.1.1-225.1.1.10" " receiver") + + input_src = {"i2": topo["routers"]["i2"]["links"]["f1"]["interface"]} + + for src, src_intf in input_src.items(): + result = config_to_send_igmp_join_and_traffic( + tgen, topo, tc_name, src, src_intf, GROUP_RANGE_1, traffic=True + ) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + result = iperfSendTraffic(tgen, src, IGMP_JOIN_RANGE_1, 32, 2500) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step( + "'show ip mroute' showing correct RPF and OIF interface for (*,G)" + " and (S,G) entries on all the nodes" + ) + + source_i2 = topo["routers"]["i2"]["links"]["f1"]["ipv4"].split("/")[0] + input_dict_all = [ + { + "dut": "l1", + "src_address": "*", + "iif": topo["routers"]["l1"]["links"]["r2"]["interface"], + "oil": topo["routers"]["l1"]["links"]["i1"]["interface"], + }, + { + "dut": "l1", + "src_address": source_i2, + "iif": topo["routers"]["l1"]["links"]["r2"]["interface"], + "oil": topo["routers"]["l1"]["links"]["i1"]["interface"], + }, + { + "dut": "r2", + "src_address": "*", + "iif": "lo", + "oil": topo["routers"]["r2"]["links"]["l1"]["interface"], + }, + { + "dut": "r2", + "src_address": source_i2, + "iif": topo["routers"]["r2"]["links"]["f1"]["interface"], + "oil": topo["routers"]["r2"]["links"]["l1"]["interface"], + }, + { + "dut": "f1", + "src_address": source_i2, + "iif": topo["routers"]["f1"]["links"]["i2"]["interface"], + "oil": topo["routers"]["f1"]["links"]["r2"]["interface"], + }, + ] + + for data in input_dict_all: + result = verify_ip_mroutes( + tgen, + data["dut"], + data["src_address"], + IGMP_JOIN_RANGE_1, + data["iif"], + data["oil"], + ) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step( + "'show ip pim upstream' and 'show ip pim upstream-rpf' showing" + " correct OIL and IIF on all the nodes" + ) + + for data in input_dict_all: + result = verify_upstream_iif( + tgen, data["dut"], data["iif"], data["src_address"], IGMP_JOIN_RANGE_1 + ) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step("Shut the source interface FRR1") + intf_l1_i1 = topo["routers"]["l1"]["links"]["i1"]["interface"] + intf_f1_i2 = topo["routers"]["f1"]["links"]["i2"]["interface"] + intf_f1_r2 = topo["routers"]["f1"]["links"]["r2"]["interface"] + shutdown_bringup_interface(tgen, "l1", intf_l1_i1, False) + + step( + "After waiting for (S,G) timeout from FRR1 for same" + " source verify that (S,G) is flushed from FRR1 node" + " 'show ip pim upstream' 'show ip mroute' " + ) + + done_flag = False + for retry in range(1, 11): + result = verify_upstream_iif( + tgen, "l1", "Unknown", source_i2, IGMP_JOIN_RANGE_1, expected=False + ) + if result is not True: + done_flag = True + else: + continue + if done_flag: + logger.info("Expected Behavior: {}".format(result)) + break + + assert done_flag is True, ( + "Testcase {} : Failed Error: \n " + "mroutes are still present, after waiting for 10 mins".format(tc_name) + ) + + step("No shut the Source interface just after the upstream is expired" " from FRR1") + shutdown_bringup_interface(tgen, "l1", intf_l1_i1, True) + + step( + "After no shut of source interface , verify all the (S,G) is " + " populated again on 'show ip mroute' 'show ip pim upstream' " + " with proper OIL and IIF detail" + ) + + for data in input_dict_all: + result = verify_ip_mroutes( + tgen, + data["dut"], + data["src_address"], + IGMP_JOIN_RANGE_1, + data["iif"], + data["oil"], + ) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + for data in input_dict_all: + result = verify_upstream_iif( + tgen, data["dut"], data["iif"], data["src_address"], IGMP_JOIN_RANGE_1 + ) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step("shut and no shut the source interface immediately") + shutdown_bringup_interface(tgen, "f1", intf_f1_i2, False) + shutdown_bringup_interface(tgen, "f1", intf_f1_i2, True) + + step( + "After no shut of receiver interface , verify all the (S,G) is " + "populated again on 'show ip mroute' 'show ip pim upstream' " + "with proper OIL and IIF detail" + ) + + for data in input_dict_all: + result = verify_ip_mroutes( + tgen, + data["dut"], + data["src_address"], + IGMP_JOIN_RANGE_1, + data["iif"], + data["oil"], + ) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + for data in input_dict_all: + result = verify_upstream_iif( + tgen, data["dut"], data["iif"], data["src_address"], IGMP_JOIN_RANGE_1 + ) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + write_test_footer(tc_name) + + +def test_verify_remove_add_igmp_config_to_receiver_interface_p0(request): + """ + TC_33: Verify removing and adding IGMP config from the receiver interface + """ + tgen = get_topogen() + tc_name = request.node.name + write_test_header(tc_name) + + # Creating configuration from JSON + kill_iperf(tgen) + clear_ip_mroute(tgen) + reset_config_on_routers(tgen) + clear_ip_pim_interface_traffic(tgen, topo) + check_router_status(tgen) + + # Don"t run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + step("Enable PIM on all routers") + step("Enable IGMP on FRR1 interface and send IGMP join " "(225.1.1.1-225.1.1.10)") + + input_join = {"i1": topo["routers"]["i1"]["links"]["l1"]["interface"]} + + for recvr, recvr_intf in input_join.items(): + result = config_to_send_igmp_join_and_traffic( + tgen, topo, tc_name, recvr, recvr_intf, GROUP_RANGE_1, join=True + ) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + result = iperfSendIGMPJoin(tgen, recvr, IGMP_JOIN_RANGE_1, join_interval=1) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + step("Configure RP for (226.1.1.1-5) and (232.1.1.1-5) in cisco-1(f1)") + + input_dict = { + "r2": { + "pim": { + "rp": [ + { + "rp_addr": topo["routers"]["r2"]["links"]["lo"]["ipv4"].split( + "/" + )[0], + "group_addr_range": GROUP_RANGE, + } + ] + } + } + } + + result = create_pim_config(tgen, topo, input_dict) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step("Configure source on FRR3 and start the traffic for" " (225.1.1.1-225.1.1.10)") + + input_src = {"i2": topo["routers"]["i2"]["links"]["f1"]["interface"]} + + for src, src_intf in input_src.items(): + result = config_to_send_igmp_join_and_traffic( + tgen, topo, tc_name, src, src_intf, GROUP_RANGE_1, traffic=True + ) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + result = iperfSendTraffic(tgen, src, IGMP_JOIN_RANGE_1, 32, 2500) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step("Configure source on FRR1 and start the traffic for" " (225.1.1.1-225.1.1.10)") + + input_src = {"i6": topo["routers"]["i6"]["links"]["l1"]["interface"]} + + for src, src_intf in input_src.items(): + result = config_to_send_igmp_join_and_traffic( + tgen, topo, tc_name, src, src_intf, GROUP_RANGE_1, traffic=True + ) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + result = iperfSendTraffic(tgen, src, IGMP_JOIN_RANGE_1, 32, 2500) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + source_i6 = topo["routers"]["i6"]["links"]["l1"]["ipv4"].split("/")[0] + source_i2 = topo["routers"]["i2"]["links"]["f1"]["ipv4"].split("/")[0] + input_dict_all = [ + { + "dut": "l1", + "src_address": "*", + "iif": topo["routers"]["l1"]["links"]["r2"]["interface"], + "oil": topo["routers"]["l1"]["links"]["i1"]["interface"], + }, + { + "dut": "l1", + "src_address": source_i6, + "iif": topo["routers"]["l1"]["links"]["i6"]["interface"], + "oil": topo["routers"]["l1"]["links"]["i1"]["interface"], + }, + { + "dut": "l1", + "src_address": source_i2, + "iif": topo["routers"]["l1"]["links"]["r2"]["interface"], + "oil": topo["routers"]["l1"]["links"]["i1"]["interface"], + }, + { + "dut": "r2", + "src_address": "*", + "iif": "lo", + "oil": topo["routers"]["r2"]["links"]["l1"]["interface"], + }, + { + "dut": "f1", + "src_address": source_i2, + "iif": topo["routers"]["f1"]["links"]["i2"]["interface"], + "oil": topo["routers"]["f1"]["links"]["r2"]["interface"], + }, + ] + + for data in input_dict_all: + result = verify_ip_mroutes( + tgen, + data["dut"], + data["src_address"], + IGMP_JOIN_RANGE_1, + data["iif"], + data["oil"], + ) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + for data in input_dict_all: + result = verify_upstream_iif( + tgen, data["dut"], data["iif"], data["src_address"], IGMP_JOIN_RANGE_1 + ) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step( + "Remove igmp 'no ip igmp' and 'no ip igmp version 2' from" + " receiver interface of FRR1" + ) + + intf_l1_i1 = topo["routers"]["l1"]["links"]["i1"]["interface"] + input_dict_2 = { + "l1": { + "igmp": { + "interfaces": {intf_l1_i1: {"igmp": {"version": "2", "delete": True,}}} + } + } + } + result = create_igmp_config(tgen, topo, input_dict_2) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + step("IGMP join removed from FRR1 , verify using " "'show ip igmp groups json'") + + dut = "l1" + interface = topo["routers"]["l1"]["links"]["i1"]["interface"] + result = verify_igmp_groups(tgen, dut, interface, IGMP_JOIN_RANGE_1, expected=False) + assert result is not True, ( + "Testcase {} : Failed \n Groups are not" + " present \n Error: {}".format(tc_name, result) + ) + logger.info("Expected Behaviour: {}".format(result)) + + intf_l1_r2 = topo["routers"]["l1"]["links"]["r2"]["interface"] + intf_l1_i1 = topo["routers"]["l1"]["links"]["i1"]["interface"] + intf_f1_r2 = topo["routers"]["f1"]["links"]["r2"]["interface"] + intf_f1_i2 = topo["routers"]["f1"]["links"]["i2"]["interface"] + input_traffic = { + "l1": {"traffic_received": [intf_l1_r2], "traffic_sent": [intf_l1_i1]}, + "f1": {"traffic_sent": [intf_f1_r2], "traffic_received": [intf_f1_i2]}, + } + result = verify_multicast_traffic(tgen, input_traffic) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + step( + "Configure igmp 'ip igmp' and 'ip igmp version 2' from " + "receiver interface of FRR1" + ) + + input_dict_2 = { + "l1": {"igmp": {"interfaces": {intf_l1_i1: {"igmp": {"version": "2"}}}}} + } + result = create_igmp_config(tgen, topo, input_dict_2) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + step( + "After adding IGMP on receiver interface verify (S,G) and (*,G)" + " entries got populated and traffic is resumed on FRR1 and FRR3 node" + ) + + step( + "Verify OIL/IIF and drJoinDesired using 'show ip mroute , and traffic" + " using show ip pim upstream and show ip multicast'" + ) + + for data in input_dict_all: + result = verify_ip_mroutes( + tgen, + data["dut"], + data["src_address"], + IGMP_JOIN_RANGE_1, + data["iif"], + data["oil"], + ) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + for data in input_dict_all: + result = verify_upstream_iif( + tgen, data["dut"], data["iif"], data["src_address"], IGMP_JOIN_RANGE_1 + ) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + result = verify_multicast_traffic(tgen, input_traffic) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step("Verify that no core is observed") + if tgen.routers_have_failure(): + assert False, "Testcase {}: Failed Error: {}".format(tc_name, result) + + step( + "Remove igmp 'no ip igmp' and 'no ip igmp version 2' from" + " receiver interface of FRR1" + ) + + input_dict_2 = { + "l1": { + "igmp": { + "interfaces": {intf_l1_i1: {"igmp": {"version": "2", "delete": True,}}} + } + } + } + + result = create_igmp_config(tgen, topo, input_dict_2) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + step("IGMP join removed from FRR1 , verify using " "'show ip igmp groups json'") + + dut = "l1" + interface = topo["routers"]["l1"]["links"]["i1"]["interface"] + result = verify_igmp_groups(tgen, dut, interface, IGMP_JOIN_RANGE_1, expected=False) + assert result is not True, ( + "Testcase {} : Failed \n Groups are not" + " present \n Error: {}".format(tc_name, result) + ) + logger.info("Expected Behaviour: {}".format(result)) + + result = verify_multicast_traffic(tgen, input_traffic) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + step( + "Configure igmp 'ip igmp' and 'ip igmp version 2' from " + "receiver interface of FRR1" + ) + + input_dict_2 = { + "l1": {"igmp": {"interfaces": {intf_l1_i1: {"igmp": {"version": "2"}}}}} + } + result = create_igmp_config(tgen, topo, input_dict_2) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + step( + "After adding IGMP on receiver interface verify (S,G) and (*,G)" + " entries got populated and traffic is resumed on FRR1 and FRR3 node" + ) + + step( + "Verify OIL/IIF and drJoinDesired using 'show ip mroute , and traffic" + " using show ip pim upstream and show ip multicast'" + ) + + input_dict_l1_f1 = [ + { + "dut": "l1", + "src_address": "*", + "iif": topo["routers"]["l1"]["links"]["r2"]["interface"], + "oil": topo["routers"]["l1"]["links"]["i1"]["interface"], + }, + { + "dut": "l1", + "src_address": source_i6, + "iif": topo["routers"]["l1"]["links"]["i6"]["interface"], + "oil": topo["routers"]["l1"]["links"]["i1"]["interface"], + }, + { + "dut": "l1", + "src_address": source_i2, + "iif": topo["routers"]["l1"]["links"]["r2"]["interface"], + "oil": topo["routers"]["l1"]["links"]["i1"]["interface"], + }, + { + "dut": "f1", + "src_address": source_i2, + "iif": topo["routers"]["f1"]["links"]["i2"]["interface"], + "oil": topo["routers"]["f1"]["links"]["r2"]["interface"], + }, + ] + + for data in input_dict_l1_f1: + result = verify_ip_mroutes( + tgen, + data["dut"], + data["src_address"], + IGMP_JOIN_RANGE_1, + data["iif"], + data["oil"], + ) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + for data in input_dict_l1_f1: + result = verify_upstream_iif( + tgen, data["dut"], data["iif"], data["src_address"], IGMP_JOIN_RANGE_1 + ) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + result = verify_multicast_traffic(tgen, input_traffic) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step("Verify that no core is observed") + if tgen.routers_have_failure(): + assert False, "Testcase {}: Failed Error: {}".format(tc_name, result) + + step("Remove ip igmp and send igmp prune from FRR1 interface") + + input_dict_2 = { + "l1": { + "igmp": { + "interfaces": {intf_l1_i1: {"igmp": {"version": "2", "delete": True,}}} + } + } + } + result = create_igmp_config(tgen, topo, input_dict_2) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + step( + "Verification: After removing igmp 'no ip igmp' and " + " sending prune verify mroute and upstream got removed" + " from FRR1 verify using 'show ip mroute' and " + "'show ip pim upstream'" + ) + + dut = "l1" + iif = topo["routers"]["l1"]["links"]["i6"]["interface"] + oil = topo["routers"]["l1"]["links"]["i1"]["interface"] + source = source_i6 + result = verify_ip_mroutes( + tgen, dut, source, IGMP_JOIN_RANGE_1, iif, oil, expected=False + ) + assert result is not True, ( + "Testcase {} : Failed \n routes are still" + " present \n Error: {}".format(tc_name, result) + ) + logger.info("Expected Behaviour: {}".format(result)) + + write_test_footer(tc_name) + + +def test_verify_remove_add_igmp_commands_when_pim_configured_p0(request): + """ + TC_34: Verify removing and adding IGMP commands when PIM is already + configured + """ + + tgen = get_topogen() + tc_name = request.node.name + write_test_header(tc_name) + + # Creating configuration from JSON + kill_iperf(tgen) + clear_ip_mroute(tgen) + reset_config_on_routers(tgen) + clear_ip_pim_interface_traffic(tgen, topo) + check_router_status(tgen) + + # Don"t run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + step("Enable PIM on all routers") + step("Enable IGMP on FRR1 interface and send IGMP join " "(225.1.1.1-225.1.1.10)") + + input_join = {"i1": topo["routers"]["i1"]["links"]["l1"]["interface"]} + + for recvr, recvr_intf in input_join.items(): + result = config_to_send_igmp_join_and_traffic( + tgen, topo, tc_name, recvr, recvr_intf, GROUP_RANGE_1, join=True + ) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + result = iperfSendIGMPJoin(tgen, recvr, IGMP_JOIN_RANGE_1, join_interval=1) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + step("Configure RP for (226.1.1.1-5) and (232.1.1.1-5) in cisco-1(f1)") + + input_dict = { + "r2": { + "pim": { + "rp": [ + { + "rp_addr": topo["routers"]["r2"]["links"]["lo"]["ipv4"].split( + "/" + )[0], + "group_addr_range": GROUP_RANGE, + } + ] + } + } + } + + result = create_pim_config(tgen, topo, input_dict) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step("Configure source on FRR3 and start the traffic for" " (225.1.1.1-225.1.1.10)") + + input_src = {"i2": topo["routers"]["i2"]["links"]["f1"]["interface"]} + + for src, src_intf in input_src.items(): + result = config_to_send_igmp_join_and_traffic( + tgen, topo, tc_name, src, src_intf, GROUP_RANGE_1, traffic=True + ) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + result = iperfSendTraffic(tgen, src, IGMP_JOIN_RANGE_1, 32, 2500) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step("Configure source on FRR1 and start the traffic for" " (225.1.1.1-225.1.1.10)") + + input_src = {"i6": topo["routers"]["i6"]["links"]["l1"]["interface"]} + + for src, src_intf in input_src.items(): + result = config_to_send_igmp_join_and_traffic( + tgen, topo, tc_name, src, src_intf, GROUP_RANGE_1, traffic=True + ) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + result = iperfSendTraffic(tgen, src, IGMP_JOIN_RANGE_1, 32, 2500) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + source_i6 = topo["routers"]["i6"]["links"]["l1"]["ipv4"].split("/")[0] + source_i2 = topo["routers"]["i2"]["links"]["f1"]["ipv4"].split("/")[0] + input_dict_all = [ + { + "dut": "l1", + "src_address": "*", + "iif": topo["routers"]["l1"]["links"]["r2"]["interface"], + "oil": topo["routers"]["l1"]["links"]["i1"]["interface"], + }, + { + "dut": "l1", + "src_address": source_i6, + "iif": topo["routers"]["l1"]["links"]["i6"]["interface"], + "oil": topo["routers"]["l1"]["links"]["i1"]["interface"], + }, + { + "dut": "l1", + "src_address": source_i2, + "iif": topo["routers"]["l1"]["links"]["r2"]["interface"], + "oil": topo["routers"]["l1"]["links"]["i1"]["interface"], + }, + { + "dut": "r2", + "src_address": "*", + "iif": "lo", + "oil": topo["routers"]["r2"]["links"]["l1"]["interface"], + }, + { + "dut": "f1", + "src_address": source_i2, + "iif": topo["routers"]["f1"]["links"]["i2"]["interface"], + "oil": topo["routers"]["f1"]["links"]["r2"]["interface"], + }, + ] + + for data in input_dict_all: + result = verify_ip_mroutes( + tgen, + data["dut"], + data["src_address"], + IGMP_JOIN_RANGE_1, + data["iif"], + data["oil"], + ) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + for data in input_dict_all: + result = verify_upstream_iif( + tgen, data["dut"], data["iif"], data["src_address"], IGMP_JOIN_RANGE_1 + ) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step( + "Verification: After configuring IGMP related config , " + "verify config is present in the interface " + "'show ip igmp interface ensxx json'" + ) + + intf_l1_i1 = topo["routers"]["l1"]["links"]["i1"]["interface"] + input_dict_1 = { + "l1": {"igmp": {"interfaces": {intf_l1_i1: {"igmp": {"version": "2"}}}}} + } + + result = verify_igmp_config(tgen, input_dict_1) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step( + "Remove igmp 'no ip igmp' and 'no ip igmp version 2' from" + " receiver interface of FRR1" + ) + + input_dict_2 = { + "l1": { + "igmp": { + "interfaces": {intf_l1_i1: {"igmp": {"version": "2", "delete": True,}}} + } + } + } + + result = create_igmp_config(tgen, topo, input_dict_2) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + step( + "Verification: After removing the config CLI got removed " + "'show ip igmp interface ensxx json'" + ) + + result = verify_igmp_config(tgen, input_dict_1, expected=False) + assert result is not True, ( + "Testcase {} : Failed \n " + "IGMP interface is not removed \n Error: {}".format(tc_name, result) + ) + logger.info("Expected Behaviour: {}".format(result)) + + step("Verify that no core is observed") + if tgen.routers_have_failure(): + assert False, "Testcase {}: Failed Error: {}".format(tc_name, result) + + step("Configure 'ip igmp last-member-query-count 10' on FRR1" " receiver interface") + + input_dict_3 = { + "l1": { + "igmp": { + "interfaces": { + "l1-i1-eth1": {"igmp": {"query": {"last-member-query-count": 5}}} + } + } + } + } + result = create_igmp_config(tgen, topo, input_dict_3) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + result = verify_igmp_config(tgen, input_dict_3) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + step("Remove 'ip igmp last-member-query-count 10' on FRR1" " receiver interface") + + input_dict_3 = { + "l1": { + "igmp": { + "interfaces": { + "l1-i1-eth1": { + "igmp": { + "query": {"last-member-query-count": "", "delete": True} + } + } + } + } + } + } + result = create_igmp_config(tgen, topo, input_dict_3) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + input_dict_3 = { + "l1": { + "igmp": { + "interfaces": { + "l1-i1-eth1": {"igmp": {"query": {"last-member-query-count": 2}}} + } + } + } + } + result = verify_igmp_config(tgen, input_dict_3) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + step("Verify that no core is observed") + if tgen.routers_have_failure(): + assert False, "Testcase {}: Failed Error: {}".format(tc_name, result) + + step( + "Configure 'ip igmp last-member-query-interval 20' on FRR1" + " receiver interface" + ) + + input_dict_3 = { + "l1": { + "igmp": { + "interfaces": { + "l1-i1-eth1": { + "igmp": {"query": {"last-member-query-interval": 20}} + } + } + } + } + } + result = create_igmp_config(tgen, topo, input_dict_3) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + result = verify_igmp_config(tgen, input_dict_3) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + step("Remove 'ip igmp last-member-query-count 10' on FRR1" " receiver interface") + + input_dict_3 = { + "l1": { + "igmp": { + "interfaces": { + "l1-i1-eth1": { + "igmp": { + "query": {"last-member-query-interval": "", "delete": True} + } + } + } + } + } + } + result = create_igmp_config(tgen, topo, input_dict_3) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + input_dict_3 = { + "l1": { + "igmp": { + "interfaces": { + "l1-i1-eth1": { + "igmp": {"query": {"last-member-query-interval": 10}} + } + } + } + } + } + result = verify_igmp_config(tgen, input_dict_3) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + step("Verify that no core is observed") + if tgen.routers_have_failure(): + assert False, "Testcase {}: Failed Error: {}".format(tc_name, result) + + write_test_footer(tc_name) + + +def test_verify_remove_add_pim_commands_when_igmp_configured_p1(request): + """ + TC_35: Verify removing and adding PIM commands when IGMP is already + configured + """ + + tgen = get_topogen() + tc_name = request.node.name + write_test_header(tc_name) + + # Creating configuration from JSON + kill_iperf(tgen) + clear_ip_mroute(tgen) + reset_config_on_routers(tgen) + clear_ip_pim_interface_traffic(tgen, topo) + check_router_status(tgen) + + # Don"t run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + step("Configure 'ip pim' on receiver interface on FRR1") + step("Enable PIM on all routers") + step("Enable IGMP on FRR1 interface and send IGMP join " "(225.1.1.1-225.1.1.10)") + + input_join = {"i1": topo["routers"]["i1"]["links"]["l1"]["interface"]} + + for recvr, recvr_intf in input_join.items(): + result = config_to_send_igmp_join_and_traffic( + tgen, topo, tc_name, recvr, recvr_intf, GROUP_RANGE_1, join=True + ) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + result = iperfSendIGMPJoin(tgen, recvr, IGMP_JOIN_RANGE_1, join_interval=1) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + step("Configure RP for (226.1.1.1-5) and (232.1.1.1-5) in cisco-1(f1)") + + input_dict = { + "r2": { + "pim": { + "rp": [ + { + "rp_addr": topo["routers"]["r2"]["links"]["lo"]["ipv4"].split( + "/" + )[0], + "group_addr_range": GROUP_RANGE, + } + ] + } + } + } + + result = create_pim_config(tgen, topo, input_dict) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step("Remove 'no ip pim' on receiver interface on FRR1") + + intf_l1_i1 = topo["routers"]["l1"]["links"]["i1"]["interface"] + input_dict_1 = {"l1": {"pim": {"disable": intf_l1_i1}}} + result = create_pim_config(tgen, topo, input_dict_1) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + step("Verify that no core is observed") + if tgen.routers_have_failure(): + assert False, "Testcase {}: Failed Error: {}".format(tc_name, result) + + step("Configure 'ip pim bsm' on receiver interface on FRR1") + + raw_config = { + "l1": {"raw_config": ["interface {}".format(intf_l1_i1), "ip pim bsm"]} + } + result = apply_raw_config(tgen, raw_config) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step("Remove 'no ip pim bsm' on receiver interface on FRR1") + + raw_config = { + "l1": {"raw_config": ["interface {}".format(intf_l1_i1), "no ip pim bsm"]} + } + result = apply_raw_config(tgen, raw_config) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step("Verify that no core is observed") + if tgen.routers_have_failure(): + assert False, "Testcase {}: Failed Error: {}".format(tc_name, result) + + step("Configure 'ip pim drpriority' on receiver interface on FRR1") + + raw_config = { + "l1": { + "raw_config": ["interface {}".format(intf_l1_i1), "ip pim drpriority 10"] + } + } + result = apply_raw_config(tgen, raw_config) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step( + "Verification: After configuring PIM related config, " + "verify config is present in the interface " + "'show ip pim interface ensxx json'" + ) + + input_dict_dr = {"l1": {"pim": {"interfaces": {intf_l1_i1: {"drPriority": 10}}}}} + result = verify_pim_config(tgen, input_dict_dr) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step("Remove 'no ip pim drpriority' on receiver interface on FRR1") + + raw_config = { + "l1": { + "raw_config": ["interface {}".format(intf_l1_i1), "no ip pim drpriority 10"] + } + } + result = apply_raw_config(tgen, raw_config) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step( + "Verification: After removing the config CLI got removed " + "'show ip pim interface ensxx json'" + ) + + input_dict_dr = {"l1": {"pim": {"interfaces": {intf_l1_i1: {"drPriority": 1}}}}} + result = verify_pim_config(tgen, input_dict_dr) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step("Verify that no core is observed") + if tgen.routers_have_failure(): + assert False, "Testcase {}: Failed Error: {}".format(tc_name, result) + + step("Configure 'ip pim hello' on receiver interface on FRR1") + + raw_config = { + "l1": {"raw_config": ["interface {}".format(intf_l1_i1), "ip pim hello 50"]} + } + result = apply_raw_config(tgen, raw_config) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step( + "Verification: After configuring PIM related config, " + "verify config is present in the interface " + "'show ip pim interface ensxx json'" + ) + + input_dict_dr = {"l1": {"pim": {"interfaces": {intf_l1_i1: {"helloPeriod": 50}}}}} + result = verify_pim_config(tgen, input_dict_dr) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step("Remove 'no ip pim hello' on receiver interface on FRR1") + + raw_config = { + "l1": {"raw_config": ["interface {}".format(intf_l1_i1), "no ip pim hello"]} + } + result = apply_raw_config(tgen, raw_config) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step( + "Verification: After removing the config CLI got removed " + "'show ip pim interface ensxx json'" + ) + + input_dict_dr = {"l1": {"pim": {"interfaces": {intf_l1_i1: {"helloPeriod": 30}}}}} + result = verify_pim_config(tgen, input_dict_dr) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step("Verify that no core is observed") + if tgen.routers_have_failure(): + assert False, "Testcase {}: Failed Error: {}".format(tc_name, result) + + step("Configure 'ip pim unicast-bsm' on receiver interface on FRR1") + + raw_config = { + "l1": {"raw_config": ["interface {}".format(intf_l1_i1), "ip pim unicast-bsm"]} + } + result = apply_raw_config(tgen, raw_config) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step("Remove 'no ip pim hello' on receiver interface on FRR1") + + raw_config = { + "l1": { + "raw_config": ["interface {}".format(intf_l1_i1), "no ip pim unicast-bsm"] + } + } + result = apply_raw_config(tgen, raw_config) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step("Verify that no core is observed") + if tgen.routers_have_failure(): + assert False, "Testcase {}: Failed Error: {}".format(tc_name, result) + + write_test_footer(tc_name) + + +def test_pim_dr_priority_p0(request): + """ + TC_36: Verify highest DR priority become the PIM DR + """ + + tgen = get_topogen() + tc_name = request.node.name + write_test_header(tc_name) + + # Creating configuration from JSON + kill_iperf(tgen) + clear_ip_mroute(tgen) + reset_config_on_routers(tgen) + clear_ip_pim_interface_traffic(tgen, topo) + check_router_status(tgen) + + # Don"t run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + step("Configure 'ip pim' on receiver interface on FRR1") + step("Enable PIM on all routers") + step("Enable IGMP on FRR1 interface and send IGMP join " "(225.1.1.1-225.1.1.10)") + + input_join = {"i1": topo["routers"]["i1"]["links"]["l1"]["interface"]} + + for recvr, recvr_intf in input_join.items(): + result = config_to_send_igmp_join_and_traffic( + tgen, topo, tc_name, recvr, recvr_intf, GROUP_RANGE_1, join=True + ) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + result = iperfSendIGMPJoin(tgen, recvr, IGMP_JOIN_RANGE_1, join_interval=1) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + step("Configure RP for (226.1.1.1-5) and (232.1.1.1-5) in cisco-1(f1)") + + input_dict = { + "r2": { + "pim": { + "rp": [ + { + "rp_addr": topo["routers"]["r2"]["links"]["lo"]["ipv4"].split( + "/" + )[0], + "group_addr_range": GROUP_RANGE, + } + ] + } + } + } + + result = create_pim_config(tgen, topo, input_dict) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + input_src = {"i2": topo["routers"]["i2"]["links"]["f1"]["interface"]} + + for src, src_intf in input_src.items(): + result = config_to_send_igmp_join_and_traffic( + tgen, topo, tc_name, src, src_intf, GROUP_RANGE_1, traffic=True + ) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + result = iperfSendTraffic(tgen, src, IGMP_JOIN_RANGE_1, 32, 2500) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + source_i2 = topo["routers"]["i2"]["links"]["f1"]["ipv4"].split("/")[0] + input_dict_all = [ + { + "dut": "l1", + "src_address": "*", + "iif": topo["routers"]["l1"]["links"]["r2"]["interface"], + "oil": topo["routers"]["l1"]["links"]["i1"]["interface"], + }, + { + "dut": "l1", + "src_address": source_i2, + "iif": topo["routers"]["l1"]["links"]["r2"]["interface"], + "oil": topo["routers"]["l1"]["links"]["i1"]["interface"], + }, + { + "dut": "r2", + "src_address": "*", + "iif": "lo", + "oil": topo["routers"]["r2"]["links"]["l1"]["interface"], + }, + { + "dut": "f1", + "src_address": source_i2, + "iif": topo["routers"]["f1"]["links"]["i2"]["interface"], + "oil": topo["routers"]["f1"]["links"]["r2"]["interface"], + }, + ] + + for data in input_dict_all: + result = verify_ip_mroutes( + tgen, + data["dut"], + data["src_address"], + IGMP_JOIN_RANGE_1, + data["iif"], + data["oil"], + ) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + for data in input_dict_all: + result = verify_upstream_iif( + tgen, data["dut"], data["iif"], data["src_address"], IGMP_JOIN_RANGE_1 + ) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step("Configure 'ip pim drpriority 10' on receiver interface on FRR1(LHR)") + + intf_l1_r2 = topo["routers"]["l1"]["links"]["r2"]["interface"] + raw_config = { + "l1": { + "raw_config": ["interface {}".format(intf_l1_r2), "ip pim drpriority 10"] + } + } + result = apply_raw_config(tgen, raw_config) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step( + "DR config is successful on FRR1 node , verify using " + " 'show ip pim interface json'" + ) + + input_dict_dr = {"l1": {"pim": {"interfaces": {intf_l1_r2: {"drPriority": 10}}}}} + result = verify_pim_config(tgen, input_dict_dr) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + for data in input_dict_all: + result = verify_ip_mroutes( + tgen, + data["dut"], + data["src_address"], + IGMP_JOIN_RANGE_1, + data["iif"], + data["oil"], + ) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + for data in input_dict_all: + result = verify_upstream_iif( + tgen, data["dut"], data["iif"], data["src_address"], IGMP_JOIN_RANGE_1 + ) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step("Configure 'ip pim drpriority 20' on receiver interface on FRR3(FHR)") + + intf_f1_r2 = topo["routers"]["f1"]["links"]["r2"]["interface"] + raw_config = { + "f1": { + "raw_config": ["interface {}".format(intf_f1_r2), "ip pim drpriority 20"] + } + } + result = apply_raw_config(tgen, raw_config) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step( + "DR config is successful on FRR3 node , verify using " + " 'show ip pim interface json'" + ) + + input_dict_dr = {"f1": {"pim": {"interfaces": {intf_f1_r2: {"drPriority": 20}}}}} + result = verify_pim_config(tgen, input_dict_dr) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + for data in input_dict_all: + result = verify_ip_mroutes( + tgen, + data["dut"], + data["src_address"], + IGMP_JOIN_RANGE_1, + data["iif"], + data["oil"], + ) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + for data in input_dict_all: + result = verify_upstream_iif( + tgen, data["dut"], data["iif"], data["src_address"], IGMP_JOIN_RANGE_1 + ) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step( + "PIM is enable on FRR1, FRR2 interface and neighbor is up, " + " verify using 'show ip pim interface'" + ) + + result = verify_pim_interface(tgen, topo, "l1") + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + result = verify_pim_interface(tgen, topo, "f1") + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step( + "Highet IP become PIM DR , verify using " + "'show ip pim interface json' and 'show ip pim neighbor'" + ) + step("Highest priority become PIM DR") + + dr_address = topo["routers"]["l1"]["links"]["r2"]["ipv4"].split("/")[0] + input_dict_dr = { + "l1": {"pim": {"interfaces": {intf_l1_r2: {"drAddress": dr_address}}}} + } + result = verify_pim_config(tgen, input_dict_dr) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + dr_address = topo["routers"]["f1"]["links"]["r2"]["ipv4"].split("/")[0] + input_dict_dr = { + "f1": {"pim": {"interfaces": {intf_f1_r2: {"drAddress": dr_address}}}} + } + result = verify_pim_config(tgen, input_dict_dr) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step("Remove 'no ip pim drpriority' on receiver interface on FRR1") + + raw_config = { + "l1": { + "raw_config": ["interface {}".format(intf_l1_r2), "no ip pim drpriority 10"] + } + } + result = apply_raw_config(tgen, raw_config) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step("Remove 'no ip pim drpriority' on receiver interface on FRR3") + + raw_config = { + "f1": { + "raw_config": ["interface {}".format(intf_f1_r2), "no ip pim drpriority 20"] + } + } + result = apply_raw_config(tgen, raw_config) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step( + "After removing drpriority , config got removed from both the " + "nodes and highest IP become PIM DR" + ) + + input_dict_dr = {"l1": {"pim": {"interfaces": {intf_l1_r2: {"drPriority": 1}}}}} + result = verify_pim_config(tgen, input_dict_dr) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + input_dict_dr = {"f1": {"pim": {"interfaces": {intf_f1_r2: {"drPriority": 1}}}}} + result = verify_pim_config(tgen, input_dict_dr) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + dr_address = topo["routers"]["r2"]["links"]["l1"]["ipv4"].split("/")[0] + input_dict_dr = { + "l1": {"pim": {"interfaces": {intf_l1_r2: {"drAddress": dr_address}}}} + } + result = verify_pim_config(tgen, input_dict_dr) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + dr_address = topo["routers"]["r2"]["links"]["f1"]["ipv4"].split("/")[0] + input_dict_dr = { + "f1": {"pim": {"interfaces": {intf_f1_r2: {"drAddress": dr_address}}}} + } + result = verify_pim_config(tgen, input_dict_dr) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + for data in input_dict_all: + result = verify_ip_mroutes( + tgen, + data["dut"], + data["src_address"], + IGMP_JOIN_RANGE_1, + data["iif"], + data["oil"], + ) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + for data in input_dict_all: + result = verify_upstream_iif( + tgen, data["dut"], data["iif"], data["src_address"], IGMP_JOIN_RANGE_1 + ) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + write_test_footer(tc_name) + + +def test_pim_hello_timer_p1(request): + """ + TC_37: Verify PIM hello is sent on configured timer + """ + + tgen = get_topogen() + tc_name = request.node.name + write_test_header(tc_name) + + # Creating configuration from JSON + kill_iperf(tgen) + clear_ip_mroute(tgen) + reset_config_on_routers(tgen) + clear_ip_pim_interface_traffic(tgen, topo) + check_router_status(tgen) + + # Don"t run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + step("Configure 'ip pim' on receiver interface on FRR1") + step("Enable PIM on all routers") + step("Enable IGMP on FRR1 interface and send IGMP join " "(225.1.1.1-225.1.1.10)") + + input_join = {"i1": topo["routers"]["i1"]["links"]["l1"]["interface"]} + + for recvr, recvr_intf in input_join.items(): + result = config_to_send_igmp_join_and_traffic( + tgen, topo, tc_name, recvr, recvr_intf, GROUP_RANGE_1, join=True + ) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + result = iperfSendIGMPJoin(tgen, recvr, IGMP_JOIN_RANGE_1, join_interval=1) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + step("Configure RP for (226.1.1.1-5) and (232.1.1.1-5) in cisco-1(f1)") + + input_dict = { + "r2": { + "pim": { + "rp": [ + { + "rp_addr": topo["routers"]["r2"]["links"]["lo"]["ipv4"].split( + "/" + )[0], + "group_addr_range": GROUP_RANGE, + } + ] + } + } + } + + result = create_pim_config(tgen, topo, input_dict) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step("Configure PIM hello interval timer 100 on FRR1 node (FRR1-FRR2 link)") + + intf_l1_r2 = topo["routers"]["l1"]["links"]["r2"]["interface"] + raw_config = { + "l1": {"raw_config": ["interface {}".format(intf_l1_r2), "ip pim hello 100"]} + } + result = apply_raw_config(tgen, raw_config) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step( + "PIM hello interval is configured on interface verify using " + "'show ip pim interface'" + ) + + input_dict_hello = { + "l1": {"pim": {"interfaces": {intf_l1_r2: {"helloPeriod": 100}}}} + } + result = verify_pim_config(tgen, input_dict_hello) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step("Modify hello timer to 180 and then 50sec") + + intf_l1_r2 = topo["routers"]["l1"]["links"]["r2"]["interface"] + raw_config = { + "l1": {"raw_config": ["interface {}".format(intf_l1_r2), "ip pim hello 180"]} + } + result = apply_raw_config(tgen, raw_config) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step( + "PIM hello interval is configured on interface verify using " + "'show ip pim interface'" + ) + + input_dict_hello = { + "l1": {"pim": {"interfaces": {intf_l1_r2: {"helloPeriod": 180}}}} + } + result = verify_pim_config(tgen, input_dict_hello) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + intf_l1_r2 = topo["routers"]["l1"]["links"]["r2"]["interface"] + raw_config = { + "l1": {"raw_config": ["interface {}".format(intf_l1_r2), "ip pim hello 50"]} + } + result = apply_raw_config(tgen, raw_config) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step( + "PIM hello interval is configured on interface verify using " + "'show ip pim interface'" + ) + + input_dict_hello = { + "l1": {"pim": {"interfaces": {intf_l1_r2: {"helloPeriod": 50}}}} + } + result = verify_pim_config(tgen, input_dict_hello) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step("Verify that no core is observed") + if tgen.routers_have_failure(): + assert False, "Testcase {}: Failed Error: {}".format(tc_name, result) + + write_test_footer(tc_name) + + +def test_mroute_after_removing_RP_sending_IGMP_prune_p2(request): + """ + TC_39 Verify mroute after removing the RP and sending IGMP prune + """ + + tgen = get_topogen() + tc_name = request.node.name + write_test_header(tc_name) + + # Creating configuration from JSON + kill_iperf(tgen) + clear_ip_mroute(tgen) + reset_config_on_routers(tgen) + clear_ip_pim_interface_traffic(tgen, topo) + check_router_status(tgen) + + # Don"t run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + step( + "Remove cisco connected link to simulate topo " + "LHR(FRR1(f1))----RP(cisco(f1)---FHR(FRR3(l1))" + ) + + intf_l1_c1 = topo["routers"]["l1"]["links"]["c1"]["interface"] + intf_f1_c2 = topo["routers"]["f1"]["links"]["c2"]["interface"] + shutdown_bringup_interface(tgen, "l1", intf_l1_c1, False) + shutdown_bringup_interface(tgen, "f1", intf_f1_c2, False) + + step("Enable the PIM on all the interfaces of FRR1, FRR2, FRR3") + step( + "Enable IGMP of FRR1 interface and send IGMP joins " + " from FRR1 node for group range (225.1.1.1-5)" + ) + + intf_f1_i8 = topo["routers"]["f1"]["links"]["i8"]["interface"] + input_dict = { + "f1": {"igmp": {"interfaces": {intf_f1_i8: {"igmp": {"version": "2"}}}}} + } + result = create_igmp_config(tgen, topo, input_dict) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + input_join = {"i8": topo["routers"]["i8"]["links"]["f1"]["interface"]} + + for recvr, recvr_intf in input_join.items(): + result = config_to_send_igmp_join_and_traffic( + tgen, topo, tc_name, recvr, recvr_intf, GROUP_RANGE_1, join=True + ) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + result = iperfSendIGMPJoin(tgen, recvr, IGMP_JOIN_RANGE_1, join_interval=1) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + step("Configure static RP for (225.1.1.1-5) as R2") + + input_dict = { + "r2": { + "pim": { + "rp": [ + { + "rp_addr": topo["routers"]["r2"]["links"]["lo"]["ipv4"].split( + "/" + )[0], + "group_addr_range": GROUP_RANGE, + } + ] + } + } + } + + result = create_pim_config(tgen, topo, input_dict) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step( + "Send traffic from FHR to all the groups ( 225.1.1.1 to 225.1.1.5) and send" + " multicast traffic" + ) + + input_src = {"i6": topo["routers"]["i6"]["links"]["l1"]["interface"]} + + for src, src_intf in input_src.items(): + result = config_to_send_igmp_join_and_traffic( + tgen, topo, tc_name, src, src_intf, GROUP_RANGE_1, traffic=True + ) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + result = iperfSendTraffic(tgen, src, IGMP_JOIN_RANGE_1, 32, 2500) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + source_i2 = topo["routers"]["i6"]["links"]["l1"]["ipv4"].split("/")[0] + + input_dict_all = [ + { + "dut": "l1", + "src_address": source_i2, + "iif": topo["routers"]["l1"]["links"]["i6"]["interface"], + "oil": topo["routers"]["l1"]["links"]["r2"]["interface"], + }, + { + "dut": "f1", + "src_address": "*", + "iif": topo["routers"]["f1"]["links"]["r2"]["interface"], + "oil": topo["routers"]["f1"]["links"]["i8"]["interface"], + }, + { + "dut": "f1", + "src_address": source_i2, + "iif": topo["routers"]["f1"]["links"]["r2"]["interface"], + "oil": topo["routers"]["f1"]["links"]["i8"]["interface"], + }, + ] + + step("Verify mroutes and iff upstream") + + for data in input_dict_all: + result = verify_ip_mroutes( + tgen, + data["dut"], + data["src_address"], + IGMP_JOIN_RANGE_1, + data["iif"], + data["oil"], + ) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + for data in input_dict_all: + result = verify_upstream_iif( + tgen, data["dut"], data["iif"], data["src_address"], IGMP_JOIN_RANGE_1 + ) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step("Remove the RP config for both the range from all the nodes") + + input_dict = { + "r2": { + "pim": { + "rp": [ + { + "rp_addr": topo["routers"]["r2"]["links"]["lo"]["ipv4"].split( + "/" + )[0], + "group_addr_range": GROUP_RANGE, + "delete": True, + } + ] + } + } + } + + result = create_pim_config(tgen, topo, input_dict) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + input_dict_starg = [ + { + "dut": "f1", + "src_address": "*", + "iif": topo["routers"]["f1"]["links"]["r2"]["interface"], + "oil": topo["routers"]["f1"]["links"]["i8"]["interface"], + } + ] + + input_dict_sg = [ + { + "dut": "l1", + "src_address": source_i2, + "iif": topo["routers"]["l1"]["links"]["i6"]["interface"], + "oil": topo["routers"]["l1"]["links"]["r2"]["interface"], + }, + { + "dut": "f1", + "src_address": source_i2, + "iif": topo["routers"]["f1"]["links"]["r2"]["interface"], + "oil": topo["routers"]["f1"]["links"]["i8"]["interface"], + }, + ] + + for data in input_dict_starg: + result = verify_ip_mroutes( + tgen, + data["dut"], + data["src_address"], + IGMP_JOIN_RANGE_1, + data["iif"], + data["oil"], + expected=False, + ) + assert result is not True, ( + "Testcase {} : Failed \n " + "mroute still present \n Error: {}".format(tc_name, result) + ) + logger.info("Expected Behaviour: {}".format(result)) + + for data in input_dict_sg: + result = verify_ip_mroutes( + tgen, + data["dut"], + data["src_address"], + IGMP_JOIN_RANGE_1, + data["iif"], + data["oil"], + ) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step("Send prune from receiver-1 (using ctrl+c) on iperf interface") + kill_iperf(tgen) + + intf_f1_i8 = topo["routers"]["f1"]["links"]["i8"]["interface"] + input_traffic = {"f1": {"traffic_sent": [intf_f1_i8]}} + traffic_before = verify_multicast_traffic( + tgen, input_traffic, return_traffic=True, expected=False + ) + assert isinstance(traffic_before, dict), ( + "Testcase {} : Failed \n traffic_before is not dictionary \n " + "Error: {}".format(tc_name, result) + ) + + step("IGMP groups are remove from FRR1 node 'show ip igmp groups'") + + dut = "f1" + result = verify_igmp_groups( + tgen, dut, intf_f1_i8, IGMP_JOIN_RANGE_1, expected=False + ) + assert result is not True, ( + "Testcase {} : Failed \n " + "IGMP groups still present still present \n Error: {}".format(tc_name, result) + ) + logger.info("Expected Behaviour: {}".format(result)) + + step( + "After receiving the IGMP prune from FRR1 , verify traffic " + "immediately stopped for this receiver 'show ip multicast'" + ) + + intf_f1_i8 = topo["routers"]["f1"]["links"]["i8"]["interface"] + input_traffic = {"f1": {"traffic_sent": [intf_f1_i8]}} + traffic_after = verify_multicast_traffic( + tgen, input_traffic, return_traffic=True, expected=False + ) + assert isinstance(traffic_after, dict), ( + "Testcase {} : Failed \n traffic_after is not dictionary \n " + "Error: {}".format(tc_name, result) + ) + + result = verify_state_incremented(traffic_before, traffic_after) + assert result is not True, "Testcase {} : Failed Error: {}".format(tc_name, result) + logger.info("Expected Behaviour: {}".format(result)) + + step("Configure static RP for (225.1.1.1-5) as R2 loopback interface") + + input_dict = { + "r2": { + "pim": { + "rp": [ + { + "rp_addr": topo["routers"]["r2"]["links"]["lo"]["ipv4"].split( + "/" + )[0], + "group_addr_range": GROUP_RANGE, + } + ] + } + } + } + + result = create_pim_config(tgen, topo, input_dict) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step("Send IGMP joins again from LHR,check IGMP joins and starg received") + + for recvr, recvr_intf in input_join.items(): + result = config_to_send_igmp_join_and_traffic( + tgen, topo, tc_name, recvr, recvr_intf, GROUP_RANGE_1, join=True + ) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + result = iperfSendIGMPJoin(tgen, recvr, IGMP_JOIN_RANGE_1, join_interval=1) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + for data in input_dict_starg: + result = verify_ip_mroutes( + tgen, + data["dut"], + data["src_address"], + IGMP_JOIN_RANGE_1, + data["iif"], + data["oil"], + ) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step("Send traffic from FHR and verify mroute upstream") + + for src, src_intf in input_src.items(): + result = config_to_send_igmp_join_and_traffic( + tgen, topo, tc_name, src, src_intf, GROUP_RANGE_1, traffic=True + ) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + result = iperfSendTraffic(tgen, src, IGMP_JOIN_RANGE_1, 32, 2500) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + source_i2 = topo["routers"]["i6"]["links"]["l1"]["ipv4"].split("/")[0] + + for data in input_dict_sg: + result = verify_ip_mroutes( + tgen, + data["dut"], + data["src_address"], + IGMP_JOIN_RANGE_1, + data["iif"], + data["oil"], + ) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + write_test_footer(tc_name) + + +def test_prune_sent_to_LHR_and_FHR_when_PIMnbr_down_p2(request): + """ + TC_38 Verify prune is sent to LHR and FHR when PIM nbr went down + """ + + tgen = get_topogen() + tc_name = request.node.name + write_test_header(tc_name) + + # Creating configuration from JSON + kill_iperf(tgen) + clear_ip_mroute(tgen) + reset_config_on_routers(tgen) + clear_ip_pim_interface_traffic(tgen, topo) + check_router_status(tgen) + + # Don"t run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + step( + "Remove cisco connected link to simulate topo " + "LHR(FRR1(f1))----RP(cisco(f1)---FHR(FRR3(l1))" + ) + + intf_l1_c1 = topo["routers"]["l1"]["links"]["c1"]["interface"] + intf_f1_c2 = topo["routers"]["f1"]["links"]["c2"]["interface"] + shutdown_bringup_interface(tgen, "l1", intf_l1_c1, False) + shutdown_bringup_interface(tgen, "f1", intf_f1_c2, False) + + step("Enable the PIM on all the interfaces of FRR1, FRR2, FRR3") + step( + "Enable IGMP of FRR1 interface and send IGMP joins " + " from FRR1 node for group range (225.1.1.1-5)" + ) + + intf_f1_i8 = topo["routers"]["f1"]["links"]["i8"]["interface"] + input_dict = { + "f1": {"igmp": {"interfaces": {intf_f1_i8: {"igmp": {"version": "2"}}}}} + } + result = create_igmp_config(tgen, topo, input_dict) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + input_join = {"i8": topo["routers"]["i8"]["links"]["f1"]["interface"]} + + for recvr, recvr_intf in input_join.items(): + result = config_to_send_igmp_join_and_traffic( + tgen, topo, tc_name, recvr, recvr_intf, GROUP_RANGE_1, join=True + ) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + result = iperfSendIGMPJoin(tgen, recvr, IGMP_JOIN_RANGE_1, join_interval=1) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + step("Configure static RP for (225.1.1.1-5) as R2") + + input_dict = { + "r2": { + "pim": { + "rp": [ + { + "rp_addr": topo["routers"]["r2"]["links"]["lo"]["ipv4"].split( + "/" + )[0], + "group_addr_range": GROUP_RANGE, + } + ] + } + } + } + + result = create_pim_config(tgen, topo, input_dict) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step( + "Send traffic from FHR to all the groups ( 225.1.1.1 to 225.1.1.5) and send" + " multicast traffic" + ) + + input_src = { + "i6": topo["routers"]["i6"]["links"]["l1"]["interface"], + "i2": topo["routers"]["i2"]["links"]["f1"]["interface"], + } + + for src, src_intf in input_src.items(): + result = config_to_send_igmp_join_and_traffic( + tgen, topo, tc_name, src, src_intf, GROUP_RANGE_1, traffic=True + ) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + result = iperfSendTraffic(tgen, src, IGMP_JOIN_RANGE_1, 32, 2500) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + source_i2 = topo["routers"]["i6"]["links"]["l1"]["ipv4"].split("/")[0] + source_i1 = topo["routers"]["i2"]["links"]["f1"]["ipv4"].split("/")[0] + + input_dict_all = [ + { + "dut": "l1", + "src_address": source_i2, + "iif": topo["routers"]["l1"]["links"]["i6"]["interface"], + "oil": topo["routers"]["l1"]["links"]["r2"]["interface"], + }, + { + "dut": "f1", + "src_address": "*", + "iif": topo["routers"]["f1"]["links"]["r2"]["interface"], + "oil": topo["routers"]["f1"]["links"]["i8"]["interface"], + }, + { + "dut": "f1", + "src_address": source_i1, + "iif": topo["routers"]["f1"]["links"]["i2"]["interface"], + "oil": topo["routers"]["f1"]["links"]["i8"]["interface"], + }, + { + "dut": "f1", + "src_address": source_i2, + "iif": topo["routers"]["f1"]["links"]["r2"]["interface"], + "oil": topo["routers"]["f1"]["links"]["i8"]["interface"], + }, + ] + + step("Verify mroutes and iff upstream") + + for data in input_dict_all: + result = verify_ip_mroutes( + tgen, + data["dut"], + data["src_address"], + IGMP_JOIN_RANGE_1, + data["iif"], + data["oil"], + ) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + for data in input_dict_all: + result = verify_upstream_iif( + tgen, data["dut"], data["iif"], data["src_address"], IGMP_JOIN_RANGE_1 + ) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + step("Verify mcast traffic received") + intf_f1_i8 = topo["routers"]["f1"]["links"]["i8"]["interface"] + input_traffic = {"f1": {"traffic_sent": [intf_f1_i8]}} + + result = verify_multicast_traffic(tgen, input_traffic) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step("Shut the link from LHR to RP from RP node") + + intf_r2_f1 = topo["routers"]["r2"]["links"]["f1"]["interface"] + shutdown_bringup_interface(tgen, "r2", intf_r2_f1, False) + + step("Verify RP info after Shut the link from LHR to RP from RP node") + dut = "f1" + rp_address = "1.0.5.17" + SOURCE = "Static" + result = verify_pim_rp_info( + tgen, topo, dut, GROUP_RANGE_1, "Unknown", rp_address, SOURCE + ) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + input_dict_starg = [ + { + "dut": "f1", + "src_address": "*", + "iif": topo["routers"]["f1"]["links"]["r2"]["interface"], + "oil": topo["routers"]["f1"]["links"]["i8"]["interface"], + } + ] + + input_dict_sg_i2 = [ + { + "dut": "l1", + "src_address": source_i2, + "iif": topo["routers"]["l1"]["links"]["i6"]["interface"], + "oil": topo["routers"]["l1"]["links"]["r2"]["interface"], + }, + { + "dut": "f1", + "src_address": source_i2, + "iif": topo["routers"]["f1"]["links"]["r2"]["interface"], + "oil": topo["routers"]["f1"]["links"]["i8"]["interface"], + }, + ] + + input_dict_sg_i1 = [ + { + "dut": "f1", + "src_address": source_i1, + "iif": topo["routers"]["f1"]["links"]["i2"]["interface"], + "oil": topo["routers"]["f1"]["links"]["i8"]["interface"], + } + ] + + input_dict_sg_i2_l1 = [ + { + "dut": "l1", + "src_address": source_i2, + "iif": topo["routers"]["l1"]["links"]["i6"]["interface"], + "oil": topo["routers"]["l1"]["links"]["r2"]["interface"], + } + ] + + step("Verify mroute after Shut the link from LHR to RP from RP node") + + for data in input_dict_starg: + result = verify_ip_mroutes( + tgen, + data["dut"], + data["src_address"], + IGMP_JOIN_RANGE_1, + data["iif"], + data["oil"], + expected=False, + ) + assert result is not True, ( + "Testcase {} : Failed \n " + "mroute still present \n Error: {}".format(tc_name, result) + ) + logger.info("Expected Behaviour: {}".format(result)) + + for data in input_dict_sg_i1: + result = verify_ip_mroutes( + tgen, + data["dut"], + data["src_address"], + IGMP_JOIN_RANGE_1, + data["iif"], + data["oil"], + ) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step("Verify upstream after Shut the link from LHR to RP from RP node") + + for data in input_dict_starg: + result = verify_upstream_iif( + tgen, + data["dut"], + data["iif"], + data["src_address"], + IGMP_JOIN_RANGE_1, + expected=False, + ) + assert result is not True, ( + "Testcase {} : Failed \n " + "upstream still present \n Error: {}".format(tc_name, result) + ) + logger.info("Expected Behaviour: {}".format(result)) + + for data in input_dict_sg_i1: + result = verify_upstream_iif( + tgen, data["dut"], data["iif"], data["src_address"], IGMP_JOIN_RANGE_1 + ) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step("No shut the link from LHR to RP from RP node") + + intf_r2_f1 = topo["routers"]["r2"]["links"]["f1"]["interface"] + shutdown_bringup_interface(tgen, "r2", intf_r2_f1, True) + + step("Verify RP info after No shut the link from LHR to RP from RP node") + dut = "f1" + rp_address = "1.0.5.17" + SOURCE = "Static" + result = verify_pim_rp_info( + tgen, topo, dut, GROUP_RANGE_1, "Unknown", rp_address, SOURCE, expected=False + ) + assert result is not True, ( + "Testcase {} : Failed \n " + "RP iif is not updated \n Error: {}".format(tc_name, result) + ) + logger.info("Expected Behaviour: {}".format(result)) + + step("Verify mroute after No shut the link from LHR to RP from RP node") + + for data in input_dict_starg: + result = verify_ip_mroutes( + tgen, + data["dut"], + data["src_address"], + IGMP_JOIN_RANGE_1, + data["iif"], + data["oil"], + ) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + for data in input_dict_sg_i2: + result = verify_ip_mroutes( + tgen, + data["dut"], + data["src_address"], + IGMP_JOIN_RANGE_1, + data["iif"], + data["oil"], + ) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + for data in input_dict_sg_i1: + result = verify_ip_mroutes( + tgen, + data["dut"], + data["src_address"], + IGMP_JOIN_RANGE_1, + data["iif"], + data["oil"], + ) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step("Verify upstrem after No shut the link from LHR to RP from RP node") + + for data in input_dict_starg: + result = verify_upstream_iif( + tgen, data["dut"], data["iif"], data["src_address"], IGMP_JOIN_RANGE_1 + ) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + for data in input_dict_sg_i1: + result = verify_upstream_iif( + tgen, data["dut"], data["iif"], data["src_address"], IGMP_JOIN_RANGE_1 + ) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + for data in input_dict_sg_i2: + result = verify_upstream_iif( + tgen, data["dut"], data["iif"], data["src_address"], IGMP_JOIN_RANGE_1 + ) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step("Verify mcast traffic received after noshut LHR to RP from RP node") + + intf_f1_i8 = topo["routers"]["f1"]["links"]["i8"]["interface"] + input_traffic = {"f1": {"traffic_sent": [intf_f1_i8]}} + result = verify_multicast_traffic(tgen, input_traffic) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step("Shut the link from FHR to RP from RP node") + + intf_r2_l1 = topo["routers"]["r2"]["links"]["l1"]["interface"] + shutdown_bringup_interface(tgen, "r2", intf_r2_l1, False) + + kill_iperf(tgen, dut="i2", action="remove_traffic") + + step("Verify RP info after Shut the link from FHR to RP from RP node") + dut = "l1" + rp_address = "1.0.5.17" + SOURCE = "Static" + result = verify_pim_rp_info( + tgen, topo, dut, GROUP_RANGE_1, "Unknown", rp_address, SOURCE + ) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("Verify mroute after Shut the link from FHR to RP from RP node") + + for data in input_dict_starg: + result = verify_ip_mroutes( + tgen, + data["dut"], + data["src_address"], + IGMP_JOIN_RANGE_1, + data["iif"], + data["oil"], + ) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + for data in input_dict_sg_i1: + result = verify_ip_mroutes( + tgen, + data["dut"], + data["src_address"], + IGMP_JOIN_RANGE_1, + data["iif"], + data["oil"], + ) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step("Verify upstream after Shut the link from FHR to RP from RP node") + + for data in input_dict_starg: + result = verify_upstream_iif( + tgen, data["dut"], data["iif"], data["src_address"], IGMP_JOIN_RANGE_1 + ) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + for data in input_dict_sg_i1: + result = verify_upstream_iif( + tgen, data["dut"], data["iif"], data["src_address"], IGMP_JOIN_RANGE_1 + ) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + for data in input_dict_sg_i2_l1: + result = verify_upstream_iif( + tgen, + data["dut"], + data["iif"], + data["src_address"], + IGMP_JOIN_RANGE_1, + expected=False, + ) + assert result is not True, "Testcase {} : Failed Error: {}".format( + tc_name, result + ) + + step(" No shut the link from FHR to RP from RP node") + + intf_r2_l1 = topo["routers"]["r2"]["links"]["l1"]["interface"] + shutdown_bringup_interface(tgen, "r2", intf_r2_l1, True) + + step("Verify RP info after Noshut the link from FHR to RP from RP node") + + dut = "l1" + rp_address = "1.0.5.17" + SOURCE = "Static" + result = verify_pim_rp_info( + tgen, topo, dut, GROUP_RANGE_1, "Unknown", rp_address, SOURCE, expected=False + ) + assert result is not True, ( + "Testcase {} : Failed \n " + "RP iif is not updated \n Error: {}".format(tc_name, result) + ) + logger.info("Expected Behaviour: {}".format(result)) + + step("Verify mroute after Noshut the link from FHR to RP from RP node") + + for data in input_dict_starg: + result = verify_ip_mroutes( + tgen, + data["dut"], + data["src_address"], + IGMP_JOIN_RANGE_1, + data["iif"], + data["oil"], + ) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + for data in input_dict_sg_i2: + result = verify_ip_mroutes( + tgen, + data["dut"], + data["src_address"], + IGMP_JOIN_RANGE_1, + data["iif"], + data["oil"], + ) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + for data in input_dict_sg_i1: + result = verify_ip_mroutes( + tgen, + data["dut"], + data["src_address"], + IGMP_JOIN_RANGE_1, + data["iif"], + data["oil"], + ) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step("Verify mroute after Noshut the link from FHR to RP from RP node") + + for data in input_dict_starg: + result = verify_upstream_iif( + tgen, data["dut"], data["iif"], data["src_address"], IGMP_JOIN_RANGE_1 + ) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + for data in input_dict_sg_i1: + result = verify_upstream_iif( + tgen, data["dut"], data["iif"], data["src_address"], IGMP_JOIN_RANGE_1 + ) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + for data in input_dict_sg_i2: + result = verify_upstream_iif( + tgen, data["dut"], data["iif"], data["src_address"], IGMP_JOIN_RANGE_1 + ) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step("Verify mcast traffic received after noshut FHR to RP from RP node") + intf_f1_i8 = topo["routers"]["f1"]["links"]["i8"]["interface"] + input_traffic = {"f1": {"traffic_sent": [intf_f1_i8]}} + result = verify_multicast_traffic(tgen, input_traffic) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step("Shut the link from FHR to RP from FHR node") + + intf_l1_r2 = topo["routers"]["l1"]["links"]["r2"]["interface"] + shutdown_bringup_interface(tgen, "l1", intf_l1_r2, False) + + step("Verify PIM Nbrs after Shut the link from FHR to RP from FHR node") + + kill_iperf(tgen, dut="i6", action="remove_traffic") + + step("Verify RP info after Shut the link from FHR to RP from FHR node") + dut = "l1" + rp_address = "1.0.5.17" + SOURCE = "Static" + result = verify_pim_rp_info( + tgen, topo, dut, GROUP_RANGE_1, "Unknown", rp_address, SOURCE + ) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("Verify mroute after Shut the link from FHR to RP from FHR node") + + for data in input_dict_starg: + result = verify_ip_mroutes( + tgen, + data["dut"], + data["src_address"], + IGMP_JOIN_RANGE_1, + data["iif"], + data["oil"], + ) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + for data in input_dict_sg_i1: + result = verify_ip_mroutes( + tgen, + data["dut"], + data["src_address"], + IGMP_JOIN_RANGE_1, + data["iif"], + data["oil"], + ) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step("Verify upstream after Shut the link from FHR to RP from FHR node") + for data in input_dict_starg: + result = verify_upstream_iif( + tgen, data["dut"], data["iif"], data["src_address"], IGMP_JOIN_RANGE_1 + ) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + for data in input_dict_sg_i1: + result = verify_upstream_iif( + tgen, data["dut"], data["iif"], data["src_address"], IGMP_JOIN_RANGE_1 + ) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + for data in input_dict_sg_i2_l1: + result = verify_upstream_iif( + tgen, + data["dut"], + data["iif"], + data["src_address"], + IGMP_JOIN_RANGE_1, + expected=False, + ) + assert result is not True, "Testcase {} : Failed Error: {}".format( + tc_name, result + ) + + step(" No shut the link from FHR to RP from FHR node") + + intf_l1_r2 = topo["routers"]["l1"]["links"]["r2"]["interface"] + shutdown_bringup_interface(tgen, "l1", intf_l1_r2, True) + + step("Verify RP info after No Shut the link from FHR to RP from FHR node") + dut = "l1" + rp_address = "1.0.5.17" + SOURCE = "Static" + result = verify_pim_rp_info( + tgen, topo, dut, GROUP_RANGE_1, "Unknown", rp_address, SOURCE, expected=False + ) + assert result is not True, ( + "Testcase {} : Failed \n " + "RP iif is not updated \n Error: {}".format(tc_name, result) + ) + logger.info("Expected Behaviour: {}".format(result)) + + step("Verify mroute after No Shut the link from FHR to RP from FHR node") + + for data in input_dict_starg: + result = verify_ip_mroutes( + tgen, + data["dut"], + data["src_address"], + IGMP_JOIN_RANGE_1, + data["iif"], + data["oil"], + ) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + for data in input_dict_sg_i2: + result = verify_ip_mroutes( + tgen, + data["dut"], + data["src_address"], + IGMP_JOIN_RANGE_1, + data["iif"], + data["oil"], + ) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + for data in input_dict_sg_i1: + result = verify_ip_mroutes( + tgen, + data["dut"], + data["src_address"], + IGMP_JOIN_RANGE_1, + data["iif"], + data["oil"], + ) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step("Verify upstream after No Shut the link from FHR to RP from FHR node") + + for data in input_dict_starg: + result = verify_upstream_iif( + tgen, data["dut"], data["iif"], data["src_address"], IGMP_JOIN_RANGE_1 + ) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + for data in input_dict_sg_i1: + result = verify_upstream_iif( + tgen, data["dut"], data["iif"], data["src_address"], IGMP_JOIN_RANGE_1 + ) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + for data in input_dict_sg_i2: + result = verify_upstream_iif( + tgen, data["dut"], data["iif"], data["src_address"], IGMP_JOIN_RANGE_1 + ) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step("Verify mcast traffic received after noshut FHR to RP from FHR node") + intf_f1_i8 = topo["routers"]["f1"]["links"]["i8"]["interface"] + input_traffic = {"f1": {"traffic_sent": [intf_f1_i8]}} + result = verify_multicast_traffic(tgen, input_traffic) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + write_test_footer(tc_name) + + +def test_mroute_flags_p1(request): + """ + TC_47 Verify mroute flag in LHR and FHR node + """ + + tgen = get_topogen() + tc_name = request.node.name + write_test_header(tc_name) + + # Creating configuration from JSON + kill_iperf(tgen) + clear_ip_mroute(tgen) + reset_config_on_routers(tgen) + clear_ip_pim_interface_traffic(tgen, topo) + check_router_status(tgen) + + # Don"t run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + step( + "Remove cisco connected link to simulate topo " + "LHR(FRR1(f1))----RP(cisco(f1)---FHR(FRR3(l1))" + ) + + intf_l1_c1 = topo["routers"]["l1"]["links"]["c1"]["interface"] + intf_f1_c2 = topo["routers"]["f1"]["links"]["c2"]["interface"] + shutdown_bringup_interface(tgen, "l1", intf_l1_c1, False) + shutdown_bringup_interface(tgen, "f1", intf_f1_c2, False) + + step("Enable the PIM on all the interfaces of FRR1, FRR2, FRR3") + step( + "Enable IGMP of FRR1 interface and send IGMP joins " + " from FRR1 node for group range (225.1.1.1-5)" + ) + + intf_f1_i8 = topo["routers"]["f1"]["links"]["i8"]["interface"] + input_dict = { + "f1": {"igmp": {"interfaces": {intf_f1_i8: {"igmp": {"version": "2"}}}}} + } + result = create_igmp_config(tgen, topo, input_dict) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + input_join = {"i8": topo["routers"]["i8"]["links"]["f1"]["interface"]} + + for recvr, recvr_intf in input_join.items(): + result = config_to_send_igmp_join_and_traffic( + tgen, topo, tc_name, recvr, recvr_intf, GROUP_RANGE_1, join=True + ) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + result = iperfSendIGMPJoin(tgen, recvr, IGMP_JOIN_RANGE_1, join_interval=1) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + step("Configure static RP for (225.1.1.1-5) as R2") + + input_dict = { + "r2": { + "pim": { + "rp": [ + { + "rp_addr": topo["routers"]["r2"]["links"]["lo"]["ipv4"].split( + "/" + )[0], + "group_addr_range": GROUP_RANGE, + } + ] + } + } + } + + result = create_pim_config(tgen, topo, input_dict) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step( + "Send traffic from FHR to all the groups ( 225.1.1.1 to 225.1.1.5) and send" + " multicast traffic" + ) + + input_src = { + "i6": topo["routers"]["i6"]["links"]["l1"]["interface"], + "i2": topo["routers"]["i2"]["links"]["f1"]["interface"], + } + + for src, src_intf in input_src.items(): + result = config_to_send_igmp_join_and_traffic( + tgen, topo, tc_name, src, src_intf, GROUP_RANGE_1, traffic=True + ) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + result = iperfSendTraffic(tgen, src, IGMP_JOIN_RANGE_1, 32, 2500) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + source_i2 = topo["routers"]["i6"]["links"]["l1"]["ipv4"].split("/")[0] + source_i1 = topo["routers"]["i2"]["links"]["f1"]["ipv4"].split("/")[0] + + input_dict_all = [ + { + "dut": "l1", + "src_address": source_i2, + "iif": topo["routers"]["l1"]["links"]["i6"]["interface"], + "oil": topo["routers"]["l1"]["links"]["r2"]["interface"], + }, + { + "dut": "f1", + "src_address": "*", + "iif": topo["routers"]["f1"]["links"]["r2"]["interface"], + "oil": topo["routers"]["f1"]["links"]["i8"]["interface"], + }, + { + "dut": "f1", + "src_address": source_i1, + "iif": topo["routers"]["f1"]["links"]["i2"]["interface"], + "oil": topo["routers"]["f1"]["links"]["i8"]["interface"], + }, + { + "dut": "f1", + "src_address": source_i2, + "iif": topo["routers"]["f1"]["links"]["r2"]["interface"], + "oil": topo["routers"]["f1"]["links"]["i8"]["interface"], + }, + ] + + step("Verify mroutes and iff upstream") + + for data in input_dict_all: + result = verify_ip_mroutes( + tgen, + data["dut"], + data["src_address"], + IGMP_JOIN_RANGE_1, + data["iif"], + data["oil"], + ) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + result = verify_upstream_iif( + tgen, data["dut"], data["iif"], data["src_address"], IGMP_JOIN_RANGE_1 + ) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + dut = "f1" + step("verify flag for (*,G) on f1") + src_address = "*" + flag = "SC" + result = verify_multicast_flag_state( + tgen, dut, src_address, IGMP_JOIN_RANGE_1, flag + ) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step("verify flag for (S,G) on f1 for Remote spurce ") + src_address = source_i2 + flag = "ST" + result = verify_multicast_flag_state( + tgen, dut, src_address, IGMP_JOIN_RANGE_1, flag + ) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + write_test_footer(tc_name) + + +def test_verify_multicast_traffic_when_LHR_connected_to_RP_p1(request): + """ + TC_11: Verify multicast traffic flowing fine, when LHR connected to RP + Topology used: + FHR(FRR3(l1))---LHR(FRR1(r2)----RP(FRR2(f1)) + """ + + tgen = get_topogen() + tc_name = request.node.name + write_test_header(tc_name) + + # Creating configuration from JSON + kill_iperf(tgen) + clear_ip_mroute(tgen) + reset_config_on_routers(tgen) + clear_ip_pim_interface_traffic(tgen, topo) + check_router_status(tgen) + + # Don"t run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + step( + "Remove FRR3 to cisco connected link to simulate topo " + "FHR(FRR3(l1))---LHR(FRR1(r2)----RP(FRR2(f1))" + ) + + intf_l1_c1 = topo["routers"]["l1"]["links"]["c1"]["interface"] + intf_f1_c2 = topo["routers"]["f1"]["links"]["c2"]["interface"] + shutdown_bringup_interface(tgen, "l1", intf_l1_c1, False) + shutdown_bringup_interface(tgen, "f1", intf_f1_c2, False) + + step("Disable IGMP config from l1") + input_dict_2 = { + "l1": { + "igmp": { + "interfaces": { + "l1-i1-eth1": {"igmp": {"version": "2", "delete": True,}} + } + } + } + } + result = create_igmp_config(tgen, topo, input_dict_2) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + step("Enable the PIM on all the interfaces of FRR1, R2 and FRR3" " routers") + step( + "Enable IGMP on FRR1(r2) interface and send IGMP join (226.1.1.1-5)" + " and (232.1.1.1-5)" + ) + + intf_r2_i3 = topo["routers"]["r2"]["links"]["i3"]["interface"] + input_dict = { + "r2": {"igmp": {"interfaces": {intf_r2_i3: {"igmp": {"version": "2"}}}}} + } + result = create_igmp_config(tgen, topo, input_dict) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + _GROUP_RANGE = GROUP_RANGE_2 + GROUP_RANGE_3 + _IGMP_JOIN_RANGE = IGMP_JOIN_RANGE_2 + IGMP_JOIN_RANGE_3 + + input_join = {"i3": topo["routers"]["i3"]["links"]["r2"]["interface"]} + + for recvr, recvr_intf in input_join.items(): + result = config_to_send_igmp_join_and_traffic( + tgen, topo, tc_name, recvr, recvr_intf, _GROUP_RANGE, join=True + ) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + result = iperfSendIGMPJoin(tgen, recvr, _IGMP_JOIN_RANGE, join_interval=1) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + step("Configure RP for (226.1.1.1-5) and (232.1.1.1-5) in (f1)") + + input_dict = { + "f1": { + "pim": { + "rp": [ + { + "rp_addr": topo["routers"]["f1"]["links"]["lo"]["ipv4"].split( + "/" + )[0], + "group_addr_range": _GROUP_RANGE, + } + ] + } + } + } + + result = create_pim_config(tgen, topo, input_dict) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step("Send multicast traffic from FRR3 to 225.1.1.1-225.1.1.10" " receiver") + + input_src = {"i1": topo["routers"]["i1"]["links"]["l1"]["interface"]} + + for src, src_intf in input_src.items(): + result = config_to_send_igmp_join_and_traffic( + tgen, topo, tc_name, src, src_intf, _GROUP_RANGE, traffic=True + ) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + result = iperfSendTraffic(tgen, src, _IGMP_JOIN_RANGE, 32, 2500) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step( + "'show ip mroute' showing correct RPF and OIF interface for (*,G)" + " and (S,G) entries on all the nodes" + ) + + source_i1 = topo["routers"]["i1"]["links"]["l1"]["ipv4"].split("/")[0] + input_dict_all = [ + { + "dut": "l1", + "src_address": source_i1, + "iif": topo["routers"]["l1"]["links"]["i1"]["interface"], + "oil": topo["routers"]["l1"]["links"]["r2"]["interface"], + }, + { + "dut": "r2", + "src_address": "*", + "iif": topo["routers"]["r2"]["links"]["f1"]["interface"], + "oil": topo["routers"]["r2"]["links"]["i3"]["interface"], + }, + { + "dut": "r2", + "src_address": source_i1, + "iif": topo["routers"]["r2"]["links"]["l1"]["interface"], + "oil": topo["routers"]["r2"]["links"]["i3"]["interface"], + }, + ] + + for data in input_dict_all: + result = verify_ip_mroutes( + tgen, + data["dut"], + data["src_address"], + _IGMP_JOIN_RANGE, + data["iif"], + data["oil"], + ) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + for data in input_dict_all: + result = verify_upstream_iif( + tgen, data["dut"], data["iif"], data["src_address"], _IGMP_JOIN_RANGE + ) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step( + "Multicast traffic is flowing for all the groups verify" + "using 'show ip multicast'" + ) + + intf_l1_i1 = topo["routers"]["l1"]["links"]["i1"]["interface"] + intf_r2_l1 = topo["routers"]["r2"]["links"]["l1"]["interface"] + intf_r2_f1 = topo["routers"]["r2"]["links"]["f1"]["interface"] + intf_r2_i3 = topo["routers"]["r2"]["links"]["i3"]["interface"] + intf_f1_r2 = topo["routers"]["f1"]["links"]["r2"]["interface"] + input_traffic = { + "l1": {"traffic_received": [intf_l1_i1]}, + "r2": {"traffic_received": [intf_r2_l1], "traffic_sent": [intf_r2_i3]}, + } + result = verify_multicast_traffic(tgen, input_traffic) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step("Shut and No shut the receiver port") + + intf_r2_i3 = topo["routers"]["r2"]["links"]["i3"]["interface"] + shutdown_bringup_interface(tgen, "r2", intf_r2_i3, False) + + step( + "Verification: After Shut of receiver port, Verify (*,G) and " + "(S,G) got removed from LHR node (FRR1) using 'show ip mroute'" + ) + + input_dict_r2 = [ + { + "dut": "r2", + "src_address": "*", + "iif": topo["routers"]["r2"]["links"]["f1"]["interface"], + "oil": topo["routers"]["r2"]["links"]["i3"]["interface"], + }, + { + "dut": "r2", + "src_address": source_i1, + "iif": topo["routers"]["r2"]["links"]["l1"]["interface"], + "oil": topo["routers"]["r2"]["links"]["i3"]["interface"], + }, + ] + + for data in input_dict_r2: + result = verify_ip_mroutes( + tgen, + data["dut"], + data["src_address"], + _IGMP_JOIN_RANGE, + data["iif"], + data["oil"], + expected=False, + ) + assert result is not True, ( + "Testcase {} : Failed \n" + " Expected Behaviour: mroutes are cleared \n Error: {}".format( + tc_name, result + ) + ) + logger.info("Expected Behaviour: {}".format(result)) + + shutdown_bringup_interface(tgen, "r2", intf_r2_i3, True) + + step( + "Verification: After No shut of receiver port , Verify (*,G)" + " and (S,G) got populated on LHR node (FRR1) using " + "'show ip mroute' 'show ip pim upstream'" + ) + + for data in input_dict_r2: + result = verify_ip_mroutes( + tgen, + data["dut"], + data["src_address"], + _IGMP_JOIN_RANGE, + data["iif"], + data["oil"], + ) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + for data in input_dict_r2: + result = verify_upstream_iif( + tgen, data["dut"], data["iif"], data["src_address"], _IGMP_JOIN_RANGE + ) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step( + "Multicast traffic is resumed for all the groups verify " + "using 'show ip multicast'" + ) + + result = verify_multicast_traffic(tgen, input_traffic) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step("Shut and No shut the source port") + + intf_l1_i1 = topo["routers"]["l1"]["links"]["i1"]["interface"] + shutdown_bringup_interface(tgen, "l1", intf_l1_i1, False) + + step( + "Verification: After Shut of source port, Verify (*,G) and " + "(S,G) got removed from LHR node (FRR1) using 'show ip mroute'" + ) + + input_dict_l1 = [ + { + "dut": "l1", + "src_address": source_i1, + "iif": topo["routers"]["l1"]["links"]["i1"]["interface"], + "oil": topo["routers"]["l1"]["links"]["r2"]["interface"], + } + ] + + for data in input_dict_l1: + result = verify_ip_mroutes( + tgen, + data["dut"], + data["src_address"], + _IGMP_JOIN_RANGE, + data["iif"], + data["oil"], + expected=False, + ) + assert result is not True, ( + "Testcase {} : Failed \n" + "mroutes are cleared \n Error: {}".format(tc_name, result) + ) + logger.info("Expected Behaviour: {}".format(result)) + + shutdown_bringup_interface(tgen, "l1", intf_l1_i1, True) + + step( + "Verification: After No shut of source port , Verify (*,G)" + " and (S,G) got populated on LHR node (FRR1) using " + "'show ip mroute' 'show ip pim upstream'" + ) + + for data in input_dict_l1: + result = verify_ip_mroutes( + tgen, + data["dut"], + data["src_address"], + _IGMP_JOIN_RANGE, + data["iif"], + data["oil"], + ) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + for data in input_dict_l1: + result = verify_upstream_iif( + tgen, data["dut"], data["iif"], data["src_address"], _IGMP_JOIN_RANGE + ) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step( + "Multicast traffic is resumed for all the groups verify " + "using 'show ip multicast'" + ) + + result = verify_multicast_traffic(tgen, input_traffic) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step("Shut and No shut of LHR to cisco port from LHR side") + + intf_r2_f1 = topo["routers"]["r2"]["links"]["f1"]["interface"] + shutdown_bringup_interface(tgen, "r2", intf_r2_f1, False) + + step( + "Verification: After Shut of source port, Verify (S,G) got " + "removed from LHR and FHR using 'show ip mroute'" + ) + + input_dict_r2_f1 = [ + { + "dut": "r2", + "src_address": "*", + "iif": topo["routers"]["r2"]["links"]["f1"]["interface"], + "oil": topo["routers"]["r2"]["links"]["i3"]["interface"], + }, + { + "dut": "f1", + "src_address": "*", + "iif": "lo", + "oil": topo["routers"]["f1"]["links"]["r2"]["interface"], + }, + ] + + for data in input_dict_r2_f1: + result = verify_ip_mroutes( + tgen, + data["dut"], + data["src_address"], + _IGMP_JOIN_RANGE, + data["iif"], + data["oil"], + expected=False, + ) + assert result is not True, ( + "Testcase {} : Failed \n" + " mroutes are cleared \n Error: {}".format(tc_name, result) + ) + logger.info("Expected Behaviour: {}".format(result)) + + shutdown_bringup_interface(tgen, "r2", intf_r2_f1, True) + + step( + "Verification: After No shut of source port , Verify (*,G)" + " and (S,G) got populated on LHR node (FRR1) using " + "'show ip mroute' 'show ip pim upstream'" + ) + + for data in input_dict_all: + result = verify_ip_mroutes( + tgen, + data["dut"], + data["src_address"], + _IGMP_JOIN_RANGE, + data["iif"], + data["oil"], + ) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + for data in input_dict_all: + result = verify_upstream_iif( + tgen, data["dut"], data["iif"], data["src_address"], _IGMP_JOIN_RANGE + ) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step( + "Multicast traffic is resumed for all the groups verify " + "using 'show ip multicast'" + ) + + input_traffic_r2 = { + "r2": {"traffic_received": [intf_r2_l1], "traffic_sent": [intf_r2_i3]} + } + result = verify_multicast_traffic(tgen, input_traffic_r2) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step("Shut and no shut of FHR to LHR port from FHR side") + + intf_l1_r2 = topo["routers"]["l1"]["links"]["r2"]["interface"] + shutdown_bringup_interface(tgen, "l1", intf_l1_r2, False) + + step( + "Verification: After Shut of LHR to FHR port, Verify (S,G)" + "got removed from LHR 'show ip mroute'" + ) + + dut = "r2" + src_address = "*" + iif = topo["routers"]["r2"]["links"]["f1"]["interface"] + oil = topo["routers"]["r2"]["links"]["i3"]["interface"] + + result = verify_ip_mroutes(tgen, dut, src_address, _IGMP_JOIN_RANGE, iif, oil) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + src_address = source_i1 + iif = topo["routers"]["r2"]["links"]["l1"]["interface"] + oil = topo["routers"]["r2"]["links"]["i3"]["interface"] + + result = verify_ip_mroutes( + tgen, dut, src_address, _IGMP_JOIN_RANGE, iif, oil, expected=False + ) + assert result is not True, ( + "Testcase {} : Failed \n" + " mroutes are cleared \n Error: {}".format(tc_name, result) + ) + logger.info("Expected Behaviour: {}".format(result)) + + shutdown_bringup_interface(tgen, "l1", intf_l1_r2, True) + + step( + "Verification: After No shut of source port , Verify (*,G)" + " and (S,G) got populated on LHR node (FRR1) using " + "'show ip mroute' 'show ip pim upstream'" + ) + + for data in input_dict_all: + result = verify_ip_mroutes( + tgen, + data["dut"], + data["src_address"], + _IGMP_JOIN_RANGE, + data["iif"], + data["oil"], + ) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + for data in input_dict_all: + result = verify_upstream_iif( + tgen, data["dut"], data["iif"], data["src_address"], _IGMP_JOIN_RANGE + ) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step( + "Multicast traffic is resumed for all the groups verify " + "using 'show ip multicast'" + ) + + result = verify_multicast_traffic(tgen, input_traffic_r2) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + write_test_footer(tc_name) + + +def test_verify_multicast_traffic_when_FHR_connected_to_RP_p1(request): + """ + TC_12: Verify multicast traffic is flowing fine when FHR is connected to RP + Topology used: + LHR(FRR1)---FHR(FRR3)----RP(FRR2) + """ + + tgen = get_topogen() + tc_name = request.node.name + write_test_header(tc_name) + + # Creating configuration from JSON + kill_iperf(tgen) + clear_ip_mroute(tgen) + reset_config_on_routers(tgen) + clear_ip_pim_interface_traffic(tgen, topo) + check_router_status(tgen) + + # Don"t run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + step( + "Remove FRR3 to FRR2 connected link to simulate topo " + "FHR(FRR3)---LHR(FRR1)----RP(FFR2)" + ) + + intf_l1_c1 = topo["routers"]["l1"]["links"]["c1"]["interface"] + intf_f1_c2 = topo["routers"]["f1"]["links"]["c2"]["interface"] + shutdown_bringup_interface(tgen, "l1", intf_l1_c1, False) + shutdown_bringup_interface(tgen, "f1", intf_f1_c2, False) + + step("Enable the PIM on all the interfaces of FRR1, R2 and FRR3" " routers") + step("Enable IGMP on FRR1(l1) interface and send IGMP join " " and (225.1.1.1-5)") + + _GROUP_RANGE = GROUP_RANGE_2 + GROUP_RANGE_3 + _IGMP_JOIN_RANGE = IGMP_JOIN_RANGE_2 + IGMP_JOIN_RANGE_3 + + input_join = {"i1": topo["routers"]["i1"]["links"]["l1"]["interface"]} + + for recvr, recvr_intf in input_join.items(): + result = config_to_send_igmp_join_and_traffic( + tgen, topo, tc_name, recvr, recvr_intf, _GROUP_RANGE, join=True + ) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + result = iperfSendIGMPJoin(tgen, recvr, _IGMP_JOIN_RANGE, join_interval=1) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + step("Configure RP for (225.1.1.1-5) in (f1)") + + input_dict = { + "f1": { + "pim": { + "rp": [ + { + "rp_addr": topo["routers"]["f1"]["links"]["lo"]["ipv4"].split( + "/" + )[0], + "group_addr_range": _GROUP_RANGE, + } + ] + } + } + } + + result = create_pim_config(tgen, topo, input_dict) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step("Send multicast traffic from FRR3(r2) to 225.1.1.1-225.1.1.10" " receiver") + + input_src = {"i3": topo["routers"]["i3"]["links"]["r2"]["interface"]} + + for src, src_intf in input_src.items(): + result = config_to_send_igmp_join_and_traffic( + tgen, topo, tc_name, src, src_intf, _GROUP_RANGE, traffic=True + ) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + result = iperfSendTraffic(tgen, src, _IGMP_JOIN_RANGE, 32, 2500) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step( + "'show ip mroute' showing correct RPF and OIF interface for (*,G)" + " and (S,G) entries on all the nodes" + ) + + source_i3 = topo["routers"]["i3"]["links"]["r2"]["ipv4"].split("/")[0] + input_dict_all = [ + { + "dut": "l1", + "src_address": "*", + "iif": topo["routers"]["l1"]["links"]["r2"]["interface"], + "oil": topo["routers"]["l1"]["links"]["i1"]["interface"], + }, + { + "dut": "l1", + "src_address": source_i3, + "iif": topo["routers"]["l1"]["links"]["r2"]["interface"], + "oil": topo["routers"]["l1"]["links"]["i1"]["interface"], + }, + { + "dut": "r2", + "src_address": "*", + "iif": topo["routers"]["r2"]["links"]["f1"]["interface"], + "oil": topo["routers"]["r2"]["links"]["l1"]["interface"], + }, + { + "dut": "r2", + "src_address": source_i3, + "iif": topo["routers"]["r2"]["links"]["i3"]["interface"], + "oil": topo["routers"]["r2"]["links"]["l1"]["interface"], + }, + ] + + for data in input_dict_all: + result = verify_ip_mroutes( + tgen, + data["dut"], + data["src_address"], + _IGMP_JOIN_RANGE, + data["iif"], + data["oil"], + ) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + for data in input_dict_all: + result = verify_upstream_iif( + tgen, data["dut"], data["iif"], data["src_address"], _IGMP_JOIN_RANGE + ) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + intf_l1_r2 = topo["routers"]["l1"]["links"]["r2"]["interface"] + intf_f1_r2 = topo["routers"]["f1"]["links"]["r2"]["interface"] + intf_l1_i1 = topo["routers"]["l1"]["links"]["i1"]["interface"] + input_traffic = { + "l1": {"traffic_received": [intf_l1_r2], "traffic_sent": [intf_l1_i1]} + } + result = verify_multicast_traffic(tgen, input_traffic) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step("Shut the receiver(l1) port in 1 min interval") + + shutdown_bringup_interface(tgen, "l1", intf_l1_i1, False) + + step( + "Verification: After Shut of receiver port, Verify (*,G) and " + "(S,G) got removed from LHR node (FRR1) using 'show ip mroute'" + ) + + input_dict_l1 = [ + { + "dut": "l1", + "src_address": source_i3, + "iif": topo["routers"]["l1"]["links"]["r2"]["interface"], + "oil": topo["routers"]["l1"]["links"]["i1"]["interface"], + } + ] + + for data in input_dict_l1: + result = verify_ip_mroutes( + tgen, + data["dut"], + data["src_address"], + _IGMP_JOIN_RANGE, + data["iif"], + data["oil"], + expected=False, + ) + assert result is not True, ( + "Testcase {} : Failed \n" + " mroutes are cleared \n Error: {}".format(tc_name, result) + ) + logger.info("Expected Behaviour: {}".format(result)) + + step("No shut the receiver(l1) port in 1 min interval") + + shutdown_bringup_interface(tgen, "l1", intf_l1_i1, True) + + step( + "Verification: After No shut of receiver port , Verify (*,G)" + " and (S,G) got populated on LHR node (FRR1) using " + "'show ip mroute' 'show ip pim upstream'" + ) + + for data in input_dict_l1: + result = verify_ip_mroutes( + tgen, + data["dut"], + data["src_address"], + _IGMP_JOIN_RANGE, + data["iif"], + data["oil"], + ) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + for data in input_dict_l1: + result = verify_upstream_iif( + tgen, data["dut"], data["iif"], data["src_address"], _IGMP_JOIN_RANGE + ) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + result = verify_multicast_traffic(tgen, input_traffic) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step("Shut the source(r2) port in 1 min interval") + + intf_r2_i3 = topo["routers"]["r2"]["links"]["i3"]["interface"] + shutdown_bringup_interface(tgen, "r2", intf_r2_i3, False) + + step( + "Verification: After Shut of source port, Verify (S,G) got " + "removed from FHR using 'show ip mroute'" + ) + + input_dict_r2 = [ + { + "dut": "r2", + "src_address": source_i3, + "iif": topo["routers"]["r2"]["links"]["i3"]["interface"], + "oil": topo["routers"]["r2"]["links"]["l1"]["interface"], + } + ] + + for data in input_dict_r2: + result = verify_ip_mroutes( + tgen, + data["dut"], + data["src_address"], + _IGMP_JOIN_RANGE, + data["iif"], + data["oil"], + expected=False, + ) + assert result is not True, ( + "Testcase {} : Failed \n" + " mroutes are cleared \n Error: {}".format(tc_name, result) + ) + logger.info("Expected Behaviour: {}".format(result)) + + step("No shut the source(r2) port in 1 min interval") + + shutdown_bringup_interface(tgen, "r2", intf_r2_i3, True) + + step( + "Verification: After No shut of source port , Verify (*,G)" + " and (S,G) got populated on LHR and FHR using " + "'show ip mroute' 'show ip pim upstream'" + ) + + for data in input_dict_r2: + result = verify_ip_mroutes( + tgen, + data["dut"], + data["src_address"], + _IGMP_JOIN_RANGE, + data["iif"], + data["oil"], + ) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + for data in input_dict_r2: + result = verify_upstream_iif( + tgen, data["dut"], data["iif"], data["src_address"], _IGMP_JOIN_RANGE + ) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + result = verify_multicast_traffic(tgen, input_traffic) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step("Shut FHR to RP port from FHR side") + + intf_r2_f1 = topo["routers"]["r2"]["links"]["f1"]["interface"] + shutdown_bringup_interface(tgen, "r2", intf_r2_f1, False) + + step( + "Verification: After Shut of FHR to cisco port, Verify (*,G) " + "got removed from FHR and cisco node using 'show ip mroute'" + ) + + input_dict_all_star = [ + { + "dut": "r2", + "src_address": "*", + "iif": topo["routers"]["r2"]["links"]["f1"]["interface"], + "oil": topo["routers"]["r2"]["links"]["l1"]["interface"], + }, + { + "dut": "f1", + "src_address": "*", + "iif": "lo", + "oil": topo["routers"]["f1"]["links"]["r2"]["interface"], + }, + ] + + for data in input_dict_all_star: + result = verify_ip_mroutes( + tgen, + data["dut"], + data["src_address"], + _IGMP_JOIN_RANGE, + data["iif"], + data["oil"], + expected=False, + ) + assert result is not True, ( + "Testcase {} : Failed \n" + " mroutes are cleared \n Error: {}".format(tc_name, result) + ) + logger.info("Expected Behaviour: {}".format(result)) + + write_test_footer(tc_name) + + +if __name__ == "__main__": + args = ["-s"] + sys.argv[1:] + sys.exit(pytest.main(args)) diff --git a/tests/topotests/multicast-pim-sm-topo3/test_multicast_pim_sm_topo4.py b/tests/topotests/multicast-pim-sm-topo3/test_multicast_pim_sm_topo4.py new file mode 100755 index 0000000000..e8579e2a1e --- /dev/null +++ b/tests/topotests/multicast-pim-sm-topo3/test_multicast_pim_sm_topo4.py @@ -0,0 +1,1122 @@ +#!/usr/bin/env python + +# +# Copyright (c) 2020 by VMware, Inc. ("VMware") +# Used Copyright (c) 2018 by Network Device Education Foundation, +# Inc. ("NetDEF") in this file. +# +# Permission to use, copy, modify, and/or distribute this software +# for any purpose with or without fee is hereby granted, provided +# that the above copyright notice and this permission notice appear +# in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND VMWARE DISCLAIMS ALL WARRANTIES +# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL VMWARE BE LIABLE FOR +# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY +# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS +# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE +# OF THIS SOFTWARE. +# + +""" +Following tests are covered to test multicast pim sm: + +Test steps +- Create topology (setup module) +- Bring up topology + +Following tests are covered: + +1. TC:48 Verify mroute after configuring black-hole route for RP and source +2. TC:49 Verify mroute when RP is reachable using default route +3. TC:50 Verify mroute when LHR,FHR,RP and transit routers reachable + using default routes +4. TC:52 Verify PIM nbr after changing interface ip +5. TC:53 Verify IGMP interface updated with correct detail after changing interface config +6. TC:54 Verify received and transmit hello stats are getting cleared after PIM nbr reset + + +""" + +import os +import re +import sys +import json +import time +import datetime +from time import sleep +import pytest + +pytestmark = pytest.mark.pimd + +# Save the Current Working Directory to find configuration files. +CWD = os.path.dirname(os.path.realpath(__file__)) +sys.path.append(os.path.join(CWD, "../")) +sys.path.append(os.path.join(CWD, "../lib/")) + +# Required to instantiate the topology builder class. + +# pylint: disable=C0413 +# Import topogen and topotest helpers +from lib.topogen import Topogen, get_topogen +from mininet.topo import Topo + +from lib.common_config import ( + start_topology, + write_test_header, + write_test_footer, + step, + iperfSendIGMPJoin, + addKernelRoute, + reset_config_on_routers, + iperfSendTraffic, + kill_iperf, + shutdown_bringup_interface, + start_router, + stop_router, + apply_raw_config, + create_static_routes, + required_linux_kernel_version, + topo_daemons, +) +from lib.pim import ( + create_pim_config, + create_igmp_config, + verify_igmp_groups, + verify_ip_mroutes, + clear_ip_pim_interface_traffic, + verify_igmp_config, + verify_pim_neighbors, + verify_pim_config, + verify_pim_interface, + verify_upstream_iif, + clear_ip_mroute, + verify_multicast_traffic, + verify_pim_rp_info, + verify_pim_interface_traffic, + verify_igmp_interface, +) +from lib.topolog import logger +from lib.topojson import build_topo_from_json, build_config_from_json + +# Reading the data from JSON File for topology creation +jsonFile = "{}/multicast_pim_sm_topo4.json".format(CWD) +try: + with open(jsonFile, "r") as topoJson: + topo = json.load(topoJson) +except IOError: + assert False, "Could not read file {}".format(jsonFile) + +TOPOLOGY = """ + + + i4-----c1-------------c2---i5 + | | + | | + i1-----l1------r2-----f1---i2 + | | | | + | | | | + i7 i6 i3 i8 + + Description: + i1, i2, i3. i4, i5, i6, i7, i8 - FRR running iperf to send IGMP + join and traffic + l1 - LHR + f1 - FHR + r2 - FRR router + c1 - FRR router + c2 - FRR router +""" + +# Global variables + +GROUP_RANGE = "224.0.0.0/4" +IGMP_GROUP = "225.1.1.1/32" +IGMP_JOIN = "225.1.1.1" +GROUP_RANGE_1 = [ + "225.1.1.1/32", + "225.1.1.2/32", + "225.1.1.3/32", + "225.1.1.4/32", + "225.1.1.5/32", +] +IGMP_JOIN_RANGE_1 = ["225.1.1.1", "225.1.1.2", "225.1.1.3", "225.1.1.4", "225.1.1.5"] +NEW_ADDRESS_1 = "192.168.20.1" +NEW_ADDRESS_2 = "192.168.20.2" +NEW_ADDRESS_1_SUBNET = "192.168.20.1/24" +NEW_ADDRESS_2_SUBNET = "192.168.20.2/24" + + +class CreateTopo(Topo): + """ + Test BasicTopo - topology 1 + + * `Topo`: Topology object + """ + + def build(self, *_args, **_opts): + """Build function""" + tgen = get_topogen(self) + + # Building topology from json file + build_topo_from_json(tgen, topo) + + +def setup_module(mod): + """ + Sets up the pytest environment + + * `mod`: module name + """ + + # Required linux kernel version for this suite to run. + result = required_linux_kernel_version("4.19") + if result is not True: + pytest.skip("Kernel requirements are not met") + + testsuite_run_time = time.asctime(time.localtime(time.time())) + logger.info("Testsuite start time: {}".format(testsuite_run_time)) + logger.info("=" * 40) + logger.info("Master Topology: \n {}".format(TOPOLOGY)) + + logger.info("Running setup_module to create topology") + + tgen = Topogen(CreateTopo, mod.__name__) + # ... and here it calls Mininet initialization functions. + + # get list of daemons needs to be started for this suite. + daemons = topo_daemons(tgen, topo) + + # Starting topology, create tmp files which are loaded to routers + # to start deamons and then start routers + start_topology(tgen, daemons) + + # Don"t run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + # Creating configuration from JSON + build_config_from_json(tgen, topo) + + logger.info("Running setup_module() done") + + +def teardown_module(): + """Teardown the pytest environment""" + + logger.info("Running teardown_module to delete topology") + + tgen = get_topogen() + + # Stop toplogy and Remove tmp files + tgen.stop_topology() + + logger.info( + "Testsuite end time: {}".format(time.asctime(time.localtime(time.time()))) + ) + logger.info("=" * 40) + + +##################################################### +# +# Testcases +# +##################################################### + + +def config_to_send_igmp_join_and_traffic( + tgen, topo, tc_name, iperf, iperf_intf, GROUP_RANGE, join=False, traffic=False +): + """ + API to do pre-configuration to send IGMP join and multicast + traffic + + parameters: + ----------- + * `tgen`: topogen object + * `topo`: input json data + * `tc_name`: caller test case name + * `iperf`: router running iperf + * `iperf_intf`: interface name router running iperf + * `GROUP_RANGE`: group range + * `join`: IGMP join, default False + * `traffic`: multicast traffic, default False + """ + + if join: + # Add route to kernal + result = addKernelRoute(tgen, iperf, iperf_intf, GROUP_RANGE) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + if traffic: + # Add route to kernal + result = addKernelRoute(tgen, iperf, iperf_intf, GROUP_RANGE) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + router_list = tgen.routers() + for router in router_list.keys(): + if router == iperf: + continue + + rnode = router_list[router] + rnode.run("echo 2 > /proc/sys/net/ipv4/conf/all/rp_filter") + + for router in topo["routers"].keys(): + if "static_routes" in topo["routers"][router]: + static_routes = topo["routers"][router]["static_routes"] + for static_route in static_routes: + network = static_route["network"] + next_hop = static_route["next_hop"] + if type(network) is not list: + network = [network] + + return True + + +def verify_state_incremented(state_before, state_after): + """ + API to compare interface traffic state incrementing + + Parameters + ---------- + * `state_before` : State dictionary for any particular instance + * `state_after` : State dictionary for any particular instance + """ + + for router, state_data in state_before.items(): + for state, value in state_data.items(): + if state_before[router][state] >= state_after[router][state]: + errormsg = ( + "[DUT: %s]: state %s value has not" + " incremented, Initial value: %s, " + "Current value: %s [FAILED!!]" + % ( + router, + state, + state_before[router][state], + state_after[router][state], + ) + ) + return errormsg + + logger.info( + "[DUT: %s]: State %s value is " + "incremented, Initial value: %s, Current value: %s" + " [PASSED!!]", + router, + state, + state_before[router][state], + state_after[router][state], + ) + + return True + + +def test_mroute_when_RP_reachable_default_route_p2(request): + """ + TC_49 Verify mroute when and source RP is reachable using default route + """ + + tgen = get_topogen() + tc_name = request.node.name + write_test_header(tc_name) + + # Creating configuration from JSON + kill_iperf(tgen) + clear_ip_mroute(tgen) + reset_config_on_routers(tgen) + clear_ip_pim_interface_traffic(tgen, topo) + + # Don"t run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + step( + "Remove c1-c2 connected link to simulate topo " + "c1(FHR)---l1(RP)----r2---f1-----c2(LHR)" + ) + + intf_c1_c2 = topo["routers"]["c1"]["links"]["c2"]["interface"] + intf_c2_c1 = topo["routers"]["c2"]["links"]["c1"]["interface"] + shutdown_bringup_interface(tgen, "c1", intf_c1_c2, False) + shutdown_bringup_interface(tgen, "c2", intf_c2_c1, False) + + step("Enable the PIM on all the interfaces of FRR1, FRR2, FRR3") + step( + "Enable IGMP of FRR1 interface and send IGMP joins " + " from FRR1 node for group range (225.1.1.1-5)" + ) + + intf_c2_i5 = topo["routers"]["c2"]["links"]["i5"]["interface"] + input_dict = { + "c2": {"igmp": {"interfaces": {intf_c2_i5: {"igmp": {"version": "2"}}}}} + } + result = create_igmp_config(tgen, topo, input_dict) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + input_join = {"i5": topo["routers"]["i5"]["links"]["c2"]["interface"]} + + for recvr, recvr_intf in input_join.items(): + result = config_to_send_igmp_join_and_traffic( + tgen, topo, tc_name, recvr, recvr_intf, GROUP_RANGE_1, join=True + ) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + result = iperfSendIGMPJoin(tgen, recvr, IGMP_JOIN_RANGE_1, join_interval=1) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + step("Configure static RP for (225.1.1.1-5) as R2") + + input_dict = { + "l1": { + "pim": { + "rp": [ + { + "rp_addr": topo["routers"]["l1"]["links"]["lo"]["ipv4"].split( + "/" + )[0], + "group_addr_range": GROUP_RANGE, + } + ] + } + } + } + + result = create_pim_config(tgen, topo, input_dict) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step("Send traffic from C1 to all the groups ( 225.1.1.1 to 225.1.1.5)") + + input_src = {"i4": topo["routers"]["i4"]["links"]["c1"]["interface"]} + + for src, src_intf in input_src.items(): + result = config_to_send_igmp_join_and_traffic( + tgen, topo, tc_name, src, src_intf, GROUP_RANGE_1, traffic=True + ) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + result = iperfSendTraffic(tgen, src, IGMP_JOIN_RANGE_1, 32, 2500) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + source_i4 = topo["routers"]["i4"]["links"]["c1"]["ipv4"].split("/")[0] + + input_dict_starg = [ + { + "dut": "c2", + "src_address": "*", + "iif": topo["routers"]["c2"]["links"]["f1"]["interface"], + "oil": topo["routers"]["c2"]["links"]["i5"]["interface"], + } + ] + + input_dict_sg = [ + { + "dut": "c2", + "src_address": source_i4, + "iif": topo["routers"]["c2"]["links"]["f1"]["interface"], + "oil": topo["routers"]["c2"]["links"]["i5"]["interface"], + } + ] + + step("Verify mroutes and iff upstream") + + for data in input_dict_sg: + result = verify_ip_mroutes( + tgen, + data["dut"], + data["src_address"], + IGMP_JOIN_RANGE_1, + data["iif"], + data["oil"], + ) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + result = verify_upstream_iif( + tgen, data["dut"], data["iif"], data["src_address"], IGMP_JOIN_RANGE_1 + ) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + for data in input_dict_starg: + result = verify_ip_mroutes( + tgen, + data["dut"], + data["src_address"], + IGMP_JOIN_RANGE_1, + data["iif"], + data["oil"], + ) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + result = verify_upstream_iif( + tgen, data["dut"], data["iif"], data["src_address"], IGMP_JOIN_RANGE_1 + ) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step("Delete static routes on c2") + input_dict = { + "c2": { + "static_routes": [ + { + "network": ["1.0.4.11/32", "10.0.2.1/24", "10.0.1.2/24"], + "next_hop": "10.0.3.2", + "delete": True, + } + ] + } + } + + result = create_static_routes(tgen, input_dict) + assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result) + + step("Verify RP info unknown after removing static route from c2 ") + dut = "c2" + rp_address = topo["routers"]["l1"]["links"]["lo"]["ipv4"].split("/")[0] + SOURCE = "Static" + result = verify_pim_rp_info( + tgen, topo, dut, GROUP_RANGE_1, "Unknown", rp_address, SOURCE + ) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("Verify mroute not present after Delete of static routes on c1") + + for data in input_dict_sg: + result = verify_ip_mroutes( + tgen, + data["dut"], + data["src_address"], + IGMP_JOIN_RANGE_1, + data["iif"], + data["oil"], + expected=False, + ) + assert result is not True, "Testcase {} : Failed Error: {}".format( + tc_name, result + ) + + result = verify_upstream_iif( + tgen, + data["dut"], + data["iif"], + data["src_address"], + IGMP_JOIN_RANGE_1, + expected=False, + ) + assert result is not True, "Testcase {} : Failed Error: {}".format( + tc_name, result + ) + + for data in input_dict_starg: + result = verify_ip_mroutes( + tgen, + data["dut"], + data["src_address"], + IGMP_JOIN_RANGE_1, + data["iif"], + data["oil"], + expected=False, + ) + assert result is not True, "Testcase {} : Failed Error: {}".format( + tc_name, result + ) + + result = verify_upstream_iif( + tgen, + data["dut"], + data["iif"], + data["src_address"], + IGMP_JOIN_RANGE_1, + expected=False, + ) + assert result is not True, "Testcase {} : Failed Error: {}".format( + tc_name, result + ) + + step("Configure default routes on c2") + + intf_f1_c2 = topo["routers"]["f1"]["links"]["c2"]["ipv4"].split("/")[0] + + input_dict = { + "c2": {"static_routes": [{"network": "0.0.0.0/0", "next_hop": intf_f1_c2}]} + } + result = create_static_routes(tgen, input_dict) + assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result) + + step("applying ip nht config on c2") + + raw_config = {"c2": {"raw_config": ["ip nht resolve-via-default"]}} + + result = apply_raw_config(tgen, raw_config) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step("Verify RP info is NOT unknown after removing static route from c2 ") + result = verify_pim_rp_info( + tgen, topo, dut, GROUP_RANGE_1, "Unknown", rp_address, SOURCE, expected=False + ) + assert result is not True, "Testcase {} :Failed \n Error: {}".format( + tc_name, result + ) + + step("Verify (s,g) populated after adding default route ") + + for data in input_dict_sg: + result = verify_ip_mroutes( + tgen, + data["dut"], + data["src_address"], + IGMP_JOIN_RANGE_1, + data["iif"], + data["oil"], + ) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + result = verify_upstream_iif( + tgen, data["dut"], data["iif"], data["src_address"], IGMP_JOIN_RANGE_1 + ) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step("Verify (*,g) populated after adding default route ") + + for data in input_dict_starg: + result = verify_ip_mroutes( + tgen, + data["dut"], + data["src_address"], + IGMP_JOIN_RANGE_1, + data["iif"], + data["oil"], + ) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + result = verify_upstream_iif( + tgen, data["dut"], data["iif"], data["src_address"], IGMP_JOIN_RANGE_1 + ) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + write_test_footer(tc_name) + + +def test_mroute_with_RP_default_route_all_nodes_p2(request): + """ + TC_50 Verify mroute when LHR,FHR,RP and transit routers reachable + using default routes + """ + + tgen = get_topogen() + tc_name = request.node.name + write_test_header(tc_name) + + # Creating configuration from JSON + kill_iperf(tgen) + clear_ip_mroute(tgen) + reset_config_on_routers(tgen) + clear_ip_pim_interface_traffic(tgen, topo) + + # Don"t run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + step( + "Remove c1-c2 connected link to simulate topo " + "c1(LHR)---l1(RP)----r2---f1-----c2(FHR)" + ) + + intf_c1_c2 = topo["routers"]["c1"]["links"]["c2"]["interface"] + intf_c2_c1 = topo["routers"]["c2"]["links"]["c1"]["interface"] + shutdown_bringup_interface(tgen, "c1", intf_c1_c2, False) + shutdown_bringup_interface(tgen, "c2", intf_c2_c1, False) + + step("Enable the PIM on all the interfaces of FRR1, FRR2, FRR3") + step( + "Enable IGMP of FRR1 interface and send IGMP joins " + " from FRR1 node for group range (225.1.1.1-5)" + ) + + intf_c1_i4 = topo["routers"]["c1"]["links"]["i4"]["interface"] + input_dict = { + "c1": {"igmp": {"interfaces": {intf_c1_i4: {"igmp": {"version": "2"}}}}} + } + result = create_igmp_config(tgen, topo, input_dict) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + input_join = {"i4": topo["routers"]["i4"]["links"]["c1"]["interface"]} + + for recvr, recvr_intf in input_join.items(): + result = config_to_send_igmp_join_and_traffic( + tgen, topo, tc_name, recvr, recvr_intf, GROUP_RANGE_1, join=True + ) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + result = iperfSendIGMPJoin(tgen, recvr, IGMP_JOIN_RANGE_1, join_interval=1) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + step("Configure static RP for (225.1.1.1-5) as R2") + + input_dict = { + "l1": { + "pim": { + "rp": [ + { + "rp_addr": topo["routers"]["l1"]["links"]["lo"]["ipv4"].split( + "/" + )[0], + "group_addr_range": GROUP_RANGE, + } + ] + } + } + } + + result = create_pim_config(tgen, topo, input_dict) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step("Send traffic from C2 to all the groups ( 225.1.1.1 to 225.1.1.5)") + + input_src = {"i5": topo["routers"]["i5"]["links"]["c2"]["interface"]} + + for src, src_intf in input_src.items(): + result = config_to_send_igmp_join_and_traffic( + tgen, topo, tc_name, src, src_intf, GROUP_RANGE_1, traffic=True + ) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + result = iperfSendTraffic(tgen, src, IGMP_JOIN_RANGE_1, 32, 2500) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + source_i5 = topo["routers"]["i5"]["links"]["c2"]["ipv4"].split("/")[0] + + input_dict_starg = [ + { + "dut": "c1", + "src_address": "*", + "iif": topo["routers"]["c1"]["links"]["l1"]["interface"], + "oil": topo["routers"]["c1"]["links"]["i4"]["interface"], + } + ] + + input_dict_sg = [ + { + "dut": "c1", + "src_address": source_i5, + "iif": topo["routers"]["c1"]["links"]["l1"]["interface"], + "oil": topo["routers"]["c1"]["links"]["i4"]["interface"], + } + ] + + step("Verify mroutes and iff upstream") + + for data in input_dict_sg: + result = verify_ip_mroutes( + tgen, + data["dut"], + data["src_address"], + IGMP_JOIN_RANGE_1, + data["iif"], + data["oil"], + ) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + result = verify_upstream_iif( + tgen, data["dut"], data["iif"], data["src_address"], IGMP_JOIN_RANGE_1 + ) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + for data in input_dict_starg: + result = verify_ip_mroutes( + tgen, + data["dut"], + data["src_address"], + IGMP_JOIN_RANGE_1, + data["iif"], + data["oil"], + ) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + result = verify_upstream_iif( + tgen, data["dut"], data["iif"], data["src_address"], IGMP_JOIN_RANGE_1 + ) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step("Delete static routes RP on all the nodes") + input_dict = { + "c2": { + "static_routes": [ + {"network": ["1.0.4.11/32"], "next_hop": "10.0.3.2", "delete": True} + ] + }, + "c1": { + "static_routes": [ + {"network": ["1.0.4.11/32"], "next_hop": "10.0.2.2", "delete": True} + ] + }, + "r2": { + "static_routes": [ + {"network": ["1.0.4.11/32"], "next_hop": "10.0.12.1", "delete": True} + ] + }, + "f1": { + "static_routes": [ + {"network": ["1.0.4.11/32"], "next_hop": "10.0.7.2", "delete": True} + ] + }, + } + + result = create_static_routes(tgen, input_dict) + assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result) + + step("Verify RP info unknown after removing static route from c2 ") + dut = "c2" + rp_address = topo["routers"]["l1"]["links"]["lo"]["ipv4"].split("/")[0] + SOURCE = "Static" + result = verify_pim_rp_info( + tgen, topo, dut, GROUP_RANGE_1, "Unknown", rp_address, SOURCE + ) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + for data in input_dict_starg: + result = verify_ip_mroutes( + tgen, + data["dut"], + data["src_address"], + IGMP_JOIN_RANGE_1, + data["iif"], + data["oil"], + expected=False, + ) + assert result is not True, "Testcase {} : Failed Error: {}".format( + tc_name, result + ) + + result = verify_upstream_iif( + tgen, + data["dut"], + data["iif"], + data["src_address"], + IGMP_JOIN_RANGE_1, + expected=False, + ) + assert result is not True, "Testcase {} : Failed Error: {}".format( + tc_name, result + ) + + step("Configure default routes on all the nodes") + + intf_f1_c2 = topo["routers"]["f1"]["links"]["c2"]["ipv4"].split("/")[0] + intf_l1_c1 = topo["routers"]["l1"]["links"]["c1"]["ipv4"].split("/")[0] + intf_l1_r2 = topo["routers"]["l1"]["links"]["r2"]["ipv4"].split("/")[0] + intf_r2_f1 = topo["routers"]["r2"]["links"]["f1"]["ipv4"].split("/")[0] + + input_dict = { + "c1": {"static_routes": [{"network": "0.0.0.0/0", "next_hop": intf_l1_c1}]}, + "c2": {"static_routes": [{"network": "0.0.0.0/0", "next_hop": intf_f1_c2}]}, + "r2": {"static_routes": [{"network": "0.0.0.0/0", "next_hop": intf_l1_r2}]}, + "f1": {"static_routes": [{"network": "0.0.0.0/0", "next_hop": intf_r2_f1}]}, + } + result = create_static_routes(tgen, input_dict) + assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result) + + step("applying ip nht config on c2") + + raw_config = { + "c1": {"raw_config": ["ip nht resolve-via-default"]}, + "c2": {"raw_config": ["ip nht resolve-via-default"]}, + "r2": {"raw_config": ["ip nht resolve-via-default"]}, + "f1": {"raw_config": ["ip nht resolve-via-default"]}, + "l1": {"raw_config": ["ip nht resolve-via-default"]}, + } + + result = apply_raw_config(tgen, raw_config) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step("Verify RP info Not unknown after removing static route from c2 ") + dut = "c2" + step("Verify RP info is NOT unknown after removing static route from c2 ") + result = verify_pim_rp_info( + tgen, topo, dut, GROUP_RANGE_1, "Unknown", rp_address, SOURCE, expected=False + ) + assert result is not True, "Testcase {} :Failed \n Error: {}".format( + tc_name, result + ) + + step("Verify (s,g) populated after adding default route ") + + for data in input_dict_sg: + result = verify_ip_mroutes( + tgen, + data["dut"], + data["src_address"], + IGMP_JOIN_RANGE_1, + data["iif"], + data["oil"], + ) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + result = verify_upstream_iif( + tgen, data["dut"], data["iif"], data["src_address"], IGMP_JOIN_RANGE_1 + ) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step("Verify (*,g) populated after adding default route ") + + for data in input_dict_starg: + result = verify_ip_mroutes( + tgen, + data["dut"], + data["src_address"], + IGMP_JOIN_RANGE_1, + data["iif"], + data["oil"], + ) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + result = verify_upstream_iif( + tgen, data["dut"], data["iif"], data["src_address"], IGMP_JOIN_RANGE_1 + ) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + write_test_footer(tc_name) + + +def test_PIM_hello_tx_rx_p1(request): + """ + TC_54 Verify received and transmit hello stats + are getting cleared after PIM nbr reset + """ + + tgen = get_topogen() + tc_name = request.node.name + write_test_header(tc_name) + + # Creating configuration from JSON + kill_iperf(tgen) + clear_ip_mroute(tgen) + reset_config_on_routers(tgen) + clear_ip_pim_interface_traffic(tgen, topo) + + # Don"t run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + step( + "Remove c1-c2 connected link to simulate topo " + "c1(LHR)---l1(RP)----r2---f1-----c2(FHR)" + ) + + intf_c1_c2 = topo["routers"]["c1"]["links"]["c2"]["interface"] + intf_c2_c1 = topo["routers"]["c2"]["links"]["c1"]["interface"] + shutdown_bringup_interface(tgen, "c1", intf_c1_c2, False) + shutdown_bringup_interface(tgen, "c2", intf_c2_c1, False) + + step("Enable the PIM on all the interfaces of FRR1, FRR2, FRR3") + step( + "Enable IGMP of FRR1 interface and send IGMP joins " + " from FRR1 node for group range (225.1.1.1-5)" + ) + + intf_c1_i4 = topo["routers"]["c1"]["links"]["i4"]["interface"] + input_dict = { + "c1": {"igmp": {"interfaces": {intf_c1_i4: {"igmp": {"version": "2"}}}}} + } + result = create_igmp_config(tgen, topo, input_dict) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + input_join = {"i4": topo["routers"]["i4"]["links"]["c1"]["interface"]} + + for recvr, recvr_intf in input_join.items(): + result = config_to_send_igmp_join_and_traffic( + tgen, topo, tc_name, recvr, recvr_intf, GROUP_RANGE_1, join=True + ) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + result = iperfSendIGMPJoin(tgen, recvr, IGMP_JOIN_RANGE_1, join_interval=1) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + step("Configure static RP for (225.1.1.1-5) as R2") + + input_dict = { + "l1": { + "pim": { + "rp": [ + { + "rp_addr": topo["routers"]["l1"]["links"]["lo"]["ipv4"].split( + "/" + )[0], + "group_addr_range": GROUP_RANGE, + } + ] + } + } + } + + result = create_pim_config(tgen, topo, input_dict) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step("Send Mcast traffic from C2 to all the groups ( 225.1.1.1 to 225.1.1.5)") + + input_src = {"i5": topo["routers"]["i5"]["links"]["c2"]["interface"]} + + for src, src_intf in input_src.items(): + result = config_to_send_igmp_join_and_traffic( + tgen, topo, tc_name, src, src_intf, GROUP_RANGE_1, traffic=True + ) + assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) + + result = iperfSendTraffic(tgen, src, IGMP_JOIN_RANGE_1, 32, 2500) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + source_i5 = topo["routers"]["i5"]["links"]["c2"]["ipv4"].split("/")[0] + + input_dict_starg = [ + { + "dut": "c1", + "src_address": "*", + "iif": topo["routers"]["c1"]["links"]["l1"]["interface"], + "oil": topo["routers"]["c1"]["links"]["i4"]["interface"], + } + ] + + input_dict_sg = [ + { + "dut": "c1", + "src_address": source_i5, + "iif": topo["routers"]["c1"]["links"]["l1"]["interface"], + "oil": topo["routers"]["c1"]["links"]["i4"]["interface"], + } + ] + + step("(*,G) and (S,G) created on f1 and node verify using 'show ip mroute'") + for data in input_dict_sg: + result = verify_ip_mroutes( + tgen, + data["dut"], + data["src_address"], + IGMP_JOIN_RANGE_1, + data["iif"], + data["oil"], + ) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + for data in input_dict_starg: + result = verify_ip_mroutes( + tgen, + data["dut"], + data["src_address"], + IGMP_JOIN_RANGE_1, + data["iif"], + data["oil"], + ) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + intf_l1_c1 = topo["routers"]["l1"]["links"]["c1"]["interface"] + intf_c1_l1 = topo["routers"]["c1"]["links"]["l1"]["interface"] + + step("verify before stats on C1") + state_dict = {"c1": {intf_c1_l1: ["helloTx", "helloRx"],}} + + c1_state_before = verify_pim_interface_traffic(tgen, state_dict) + assert isinstance( + c1_state_before, dict + ), "Testcase{} : Failed \n state_before is not dictionary \n " + "Error: {}".format(tc_name, result) + + step("Flap PIM nbr while doing interface c1-l1 interface shut from f1 side") + shutdown_bringup_interface(tgen, "c1", intf_c1_l1, False) + + step( + "After shut of local interface from c1 , verify rx/tx hello counters are cleared on c1 side" + "verify using 'show ip pim interface traffic'" + ) + shutdown_bringup_interface(tgen, "c1", intf_c1_l1, True) + + step("verify stats after on c1") + c1_state_after = verify_pim_interface_traffic(tgen, state_dict) + assert isinstance( + c1_state_after, dict + ), "Testcase{} : Failed \n state_before is not dictionary \n " + "Error: {}".format(tc_name, result) + + step("verify stats not increamented on c1") + result = verify_state_incremented(c1_state_before, c1_state_after) + assert ( + result is not True + ), "Testcase{} : Failed Error: {}" "stats incremented".format(tc_name, result) + + step("verify before stats on l1") + l1_state_dict = {"l1": {intf_l1_c1: ["helloTx", "helloRx"],}} + + l1_state_before = verify_pim_interface_traffic(tgen, l1_state_dict) + assert isinstance( + l1_state_before, dict + ), "Testcase{} : Failed \n state_before is not dictionary \n " + "Error: {}".format(tc_name, result) + + step("Flap PIM nbr while doing interface r2-c1 shut from r2 side") + shutdown_bringup_interface(tgen, "l1", intf_l1_c1, False) + + step( + "After shut the interface from r2 side , verify r2 side rx and tx of hello" + "counters are resetted show ip pim interface traffic" + ) + shutdown_bringup_interface(tgen, "l1", intf_l1_c1, True) + + step("verify stats after on l1") + l1_state_after = verify_pim_interface_traffic(tgen, l1_state_dict) + assert isinstance( + l1_state_after, dict + ), "Testcase{} : Failed \n state_before is not dictionary \n " + "Error: {}".format(tc_name, result) + + step("verify stats not increamented on l1") + result = verify_state_incremented(l1_state_before, l1_state_after) + assert ( + result is not True + ), "Testcase{} : Failed Error: {}" "stats incremented".format(tc_name, result) + + step("Reinit the dict") + c1_state_before = {} + l1_state_before = {} + c1_state_after = {} + l1_state_after = {} + + step("verify before stats on C1") + state_dict = {"c1": {intf_c1_l1: ["helloTx", "helloRx"],}} + + c1_state_before = verify_pim_interface_traffic(tgen, state_dict) + assert isinstance( + c1_state_before, dict + ), "Testcase{} : Failed \n state_before is not dictionary \n " + "Error: {}".format(tc_name, result) + + step("Flap c1-r2 pim nbr while changing ip address from c1 side") + c1_l1_ip_subnet = topo["routers"]["c1"]["links"]["l1"]["ipv4"] + + raw_config = { + "c1": { + "raw_config": [ + "interface {}".format(intf_c1_l1), + "no ip address {}".format(c1_l1_ip_subnet), + "ip address {}".format(NEW_ADDRESS_2_SUBNET), + ] + } + } + + result = apply_raw_config(tgen, raw_config) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + step("verify stats after on c1") + c1_state_after = verify_pim_interface_traffic(tgen, state_dict) + assert isinstance( + c1_state_after, dict + ), "Testcase{} : Failed \n state_before is not dictionary \n " + "Error: {}".format(tc_name, result) + + step("verify stats not increamented on c1") + result = verify_state_incremented(c1_state_before, c1_state_after) + assert ( + result is not True + ), "Testcase{} : Failed Error: {}" "stats incremented".format(tc_name, result) + + write_test_footer(tc_name) + + +if __name__ == "__main__": + args = ["-s"] + sys.argv[1:] + sys.exit(pytest.main(args)) diff --git a/tests/topotests/multicast-pim-static-rp-topo1/__init__.py b/tests/topotests/multicast-pim-static-rp-topo1/__init__.py new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/tests/topotests/multicast-pim-static-rp-topo1/__init__.py diff --git a/tests/topotests/multicast-pim-static-rp-topo1/multicast_pim_static_rp.json b/tests/topotests/multicast-pim-static-rp-topo1/multicast_pim_static_rp.json new file mode 100644 index 0000000000..6d6c047b00 --- /dev/null +++ b/tests/topotests/multicast-pim-static-rp-topo1/multicast_pim_static_rp.json @@ -0,0 +1,93 @@ +{ + "ipv4base": "10.0.0.0", + "ipv4mask": 24, + "link_ip_start": {"ipv4": "10.0.0.0", "v4mask": 24}, + "lo_prefix": {"ipv4": "1.0.", "v4mask": 32}, + "routers": { + "r0": {"links": {"r1": {"ipv4": "auto"}}}, + "r1": { + "links": { + "lo": {"ipv4": "auto", "type": "loopback", "pim": "enable"}, + "r0": {"ipv4": "auto", "pim": "enable"}, + "r2": {"ipv4": "auto", "pim": "enable"}, + "r3": {"ipv4": "auto", "pim": "enable"}, + "r4": {"ipv4": "auto", "pim": "enable"} + }, + "pim": { + "rp": [{"rp_addr": "1.0.2.17", "group_addr_range": ["224.0.0.0/4"]}] + }, + "igmp": {"interfaces": {"r1-r0-eth0": {"igmp": {"version": "2"}}}}, + "static_routes": [ + {"network": "10.0.4.0/24", "next_hop": "10.0.2.2"}, + {"network": "10.0.5.0/24", "next_hop": "10.0.2.2"}, + {"network": "10.0.6.0/24", "next_hop": "10.0.2.2", "admin_distance": 1}, + {"network": "10.0.6.0/24", "next_hop": "10.0.1.2", "admin_distance": 2}, + {"network": "1.0.2.17/32", "next_hop": "10.0.1.2", "admin_distance": 1}, + {"network": "1.0.2.17/32", "next_hop": "10.0.2.2", "admin_distance": 2}, + {"network": "1.0.3.17/32", "next_hop": "10.0.2.2"}, + {"network": "1.0.4.17/32", "next_hop": "10.0.3.2"} + ] + }, + "r2": { + "links": { + "lo": {"ipv4": "auto", "type": "loopback", "pim": "enable"}, + "r1": {"ipv4": "auto", "pim": "enable"}, + "r3": {"ipv4": "auto", "pim": "enable"} + }, + "pim": { + "rp": [{"rp_addr": "1.0.2.17", "group_addr_range": ["224.0.0.0/4"]}] + }, + "static_routes": [ + {"network": "10.0.0.0/24", "next_hop": "10.0.1.1"}, + {"network": "10.0.2.0/24", "next_hop": "10.0.1.1"}, + {"network": "10.0.3.0/24", "next_hop": "10.0.1.1"}, + {"network": "10.0.5.0/24", "next_hop": "10.0.4.2"}, + {"network": "10.0.6.0/24", "next_hop": "10.0.4.2"}, + {"network": "1.0.1.17/32", "next_hop": "10.0.1.1"}, + {"network": "1.0.3.17/32", "next_hop": "10.0.4.2"}, + {"network": "1.0.4.17/32", "next_hop": "10.0.1.1"} + ] + }, + "r3": { + "links": { + "lo": {"ipv4": "auto", "type": "loopback", "pim": "enable"}, + "r1": {"ipv4": "auto", "pim": "enable"}, + "r2": {"ipv4": "auto", "pim": "enable"}, + "r4": {"ipv4": "auto", "pim": "enable"}, + "r5": {"ipv4": "auto", "pim": "enable"} + }, + "pim": { + "rp": [{"rp_addr": "1.0.2.17", "group_addr_range": ["224.0.0.0/4"]}] + }, + "static_routes": [ + {"network": "10.0.0.0/24", "next_hop": "10.0.2.1"}, + {"network": "10.0.1.0/24", "next_hop": "10.0.2.1"}, + {"network": "10.0.3.0/24", "next_hop": "10.0.2.1"}, + {"network": "1.0.1.17/32", "next_hop": "10.0.2.1"}, + {"network": "1.0.2.17/32", "next_hop": "10.0.4.1"}, + {"network": "1.0.4.17/32", "next_hop": "10.0.5.2"} + ] + }, + "r4": { + "links": { + "lo": {"ipv4": "auto", "type": "loopback", "pim": "enable"}, + "r1": {"ipv4": "auto", "pim": "enable"}, + "r3": {"ipv4": "auto", "pim": "enable"} + }, + "pim": { + "rp": [{"rp_addr": "1.0.2.17", "group_addr_range": ["224.0.0.0/4"]}] + }, + "static_routes": [ + {"network": "10.0.0.0/24", "next_hop": "10.0.3.1"}, + {"network": "10.0.1.0/24", "next_hop": "10.0.3.1"}, + {"network": "10.0.2.0/24", "next_hop": "10.0.3.1"}, + {"network": "10.0.4.0/24", "next_hop": "10.0.5.1"}, + {"network": "10.0.6.0/24", "next_hop": "10.0.5.1"}, + {"network": "1.0.1.17/32", "next_hop": "10.0.3.1"}, + {"network": "1.0.2.17/32", "next_hop": "10.0.3.1"}, + {"network": "1.0.3.17/32", "next_hop": "10.0.5.1"} + ] + }, + "r5": {"links": {"r3": {"ipv4": "auto"}}} + } +} diff --git a/tests/topotests/multicast-pim-static-rp-topo1/test_multicast_pim_static_rp.py b/tests/topotests/multicast-pim-static-rp-topo1/test_multicast_pim_static_rp.py new file mode 100755 index 0000000000..8dfdd50527 --- /dev/null +++ b/tests/topotests/multicast-pim-static-rp-topo1/test_multicast_pim_static_rp.py @@ -0,0 +1,3810 @@ +#!/usr/bin/env python + +# +# Copyright (c) 2019 by VMware, Inc. ("VMware") +# Used Copyright (c) 2018 by Network Device Education Foundation, +# Inc. ("NetDEF") in this file. +# +# Permission to use, copy, modify, and/or distribute this software +# for any purpose with or without fee is hereby granted, provided +# that the above copyright notice and this permission notice appear +# in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND VMWARE DISCLAIMS ALL WARRANTIES +# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL VMWARE BE LIABLE FOR +# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY +# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS +# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE +# OF THIS SOFTWARE. +# + +""" +Following tests are covered to test Multicast basic functionality: + +Topology: + + _______r2_____ + | | + iperf | | iperf + r0-----r1-------------r3-----r5 + | | + |_____________| + r4 + +Test steps +- Create topology (setup module) +- Bring up topology + +TC_1 : Verify upstream interfaces(IIF) and join state are updated properly + after adding and deleting the static RP +TC_2 : Verify IIF and OIL in "show ip pim state" updated properly after + adding and deleting the static RP +TC_3: (*, G) Mroute entry are cleared when static RP gets deleted +TC_4: Verify (*,G) prune is send towards the RP after deleting the static RP +TC_5: Verify OIF entry for RP is cleared when RP becomes unreachable +TC_6: Verify IIF and OIL in "show ip pim state" updated properly when RP + becomes unreachable +TC_7 : Verify upstream interfaces(IIF) and join state are updated properly + after adding and deleting the static RP +TC_8: Verify (*,G) prune is send towards the RP when RP becomes unreachable +TC_9 : Verify RP configured after IGMP join received, PIM join towards RP is + sent immediately +TC_10 : Verify RP becomes reachable after IGMP join received, PIM join + towards RP is sent immediately +TC_11 : Verify PIM join send towards the higher preferred RP +TC_12 : Verify PIM prune send towards the lower preferred RP +TC_13 : Verify RPF interface is updated in mroute (kernel) when higher + preferred overlapping RP configured +TC_14 : Verify IIF and OIL in "show ip pim state" updated properly when higher + preferred overlapping RP configured +TC_15 : Verify upstream interfaces(IIF) and join state are updated when higher + preferred overlapping RP is configured +TC_16 : Verify join is send to lower preferred RP, when higher preferred RP + gets deleted +TC_17 : Verify prune is send to higher preferred RP when higher preferred RP + gets deleted +TC_18 : Verify RPF interface updated in mroute when higher preferred RP gets + deleted +TC_19 : Verify IIF and OIL in "show ip pim state" updated when higher + preferred overlapping RP is deleted +TC_20 : Verfiy PIM upstream IIF updated when higher preferred overlapping RP + deleted +TC_21_1 : Verify OIF and RFP for (*,G) and (S,G) when static RP configure in + LHR router +TC_21_2 : Verify OIF and RFP for (*,G) and (S,G) when static RP configure in + LHR router +TC_22_1 : Verify OIF and RPF for (*,G) and (S,G) when static RP configure in + FHR router +TC_22_2 : Verify OIF and RPF for (*,G) and (S,G) when static RP configure in + FHR router +TC_23 : Verify (*,G) and (S,G) populated correctly when RPT and SPT path are + different +TC_24 : Verify (*,G) and (S,G) populated correctly when SPT and RPT share the + same path +TC_25 : Verify (*,G) and (S,G) populated correctly after clearing the PIM , + IGMP and mroutes joins +TC_26 : Restart the PIMd process and verify PIM joins , and mroutes entries +TC_27 : Configure multiple groups (10 grps) with same RP address +TC_28 : Configure multiple groups (10 grps) with different RP address +TC_29 : Verify IIF and OIL in updated in mroute when upstream interface + configure as RP +TC_30 : Verify IIF and OIL change to other path after shut the primary path +TC_31 : Verify RP info and (*,G) mroute after deleting the RP and shut / no + shut the RPF interface. +TC_32 : Verify RP info and (*,G) mroute after deleting the RP and shut / no + shut the RPF inteface +""" + +import os +import sys +import json +import time +import pytest +from time import sleep +import datetime + +# Save the Current Working Directory to find configuration files. +CWD = os.path.dirname(os.path.realpath(__file__)) +sys.path.append(os.path.join(CWD, "../")) +sys.path.append(os.path.join(CWD, "../lib/")) + +# Required to instantiate the topology builder class. + +# pylint: disable=C0413 +# Import topogen and topotest helpers +from lib.topogen import Topogen, get_topogen +from mininet.topo import Topo + +from lib.common_config import ( + start_topology, + write_test_header, + write_test_footer, + reset_config_on_routers, + step, + iperfSendIGMPJoin, + iperfSendTraffic, + addKernelRoute, + shutdown_bringup_interface, + kill_router_daemons, + start_router_daemons, + create_static_routes, + kill_iperf, + topo_daemons, +) +from lib.pim import ( + create_pim_config, + verify_igmp_groups, + verify_upstream_iif, + verify_join_state_and_timer, + verify_ip_mroutes, + verify_pim_neighbors, + verify_pim_interface_traffic, + verify_pim_rp_info, + verify_pim_state, + clear_ip_pim_interface_traffic, + clear_ip_igmp_interfaces, + clear_ip_pim_interfaces, + clear_ip_mroute, + clear_ip_mroute_verify, +) +from lib.topolog import logger +from lib.topojson import build_topo_from_json, build_config_from_json + +# Reading the data from JSON File for topology and configuration creation +jsonFile = "{}/multicast_pim_static_rp.json".format(CWD) +try: + with open(jsonFile, "r") as topoJson: + topo = json.load(topoJson) +except IOError: + logger.info("Could not read file:", jsonFile) + +# Global variables +GROUP_RANGE_ALL = "224.0.0.0/4" +GROUP_RANGE = "225.1.1.1/32" +GROUP_RANGE_LIST_1 = [ + "225.1.1.1/32", + "225.1.1.2/32", + "225.1.1.3/32", + "225.1.1.4/32", + "225.1.1.5/32", +] +GROUP_RANGE_LIST_2 = [ + "225.1.1.6/32", + "225.1.1.7/32", + "225.1.1.8/32", + "225.1.1.9/32", + "225.1.1.10/32", +] +GROUP_ADDRESS = "225.1.1.1" +GROUP_ADDRESS_LIST_1 = ["225.1.1.1", "225.1.1.2", "225.1.1.3", "225.1.1.4", "225.1.1.5"] +GROUP_ADDRESS_LIST_2 = [ + "225.1.1.6", + "225.1.1.7", + "225.1.1.8", + "225.1.1.9", + "225.1.1.10", +] +STAR = "*" +SOURCE_ADDRESS = "10.0.6.2" +SOURCE = "Static" + + +class CreateTopo(Topo): + """ + Test BasicTopo - topology 1 + + * `Topo`: Topology object + """ + + def build(self, *_args, **_opts): + """Build function""" + tgen = get_topogen(self) + + # Building topology from json file + build_topo_from_json(tgen, topo) + + +def setup_module(mod): + """ + Sets up the pytest environment + + * `mod`: module name + """ + global topo + + testsuite_run_time = time.asctime(time.localtime(time.time())) + logger.info("Testsuite start time: {}".format(testsuite_run_time)) + logger.info("=" * 40) + + topology = """ + + _______r2_____ + | | + iperf | | iperf + r0-----r1-------------r3-----r5 + | | + |_____________| + r4 + + """ + logger.info("Master Topology: \n {}".format(topology)) + + logger.info("Running setup_module to create topology") + + # This function initiates the topology build with Topogen... + tgen = Topogen(CreateTopo, mod.__name__) + + # ... and here it calls Mininet initialization functions. + + # get list of daemons needs to be started for this suite. + daemons = topo_daemons(tgen, topo) + + # Starting topology, create tmp files which are loaded to routers + # to start deamons and then start routers + start_topology(tgen, daemons) + + # Don"t run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + # Creating configuration from JSON + build_config_from_json(tgen, topo) + + # Verify PIM neighbors + result = verify_pim_neighbors(tgen, topo) + assert result is True, "setup_module :Failed \n Error:" " {}".format(result) + + logger.info("Running setup_module() done") + + +def teardown_module(): + """Teardown the pytest environment""" + + logger.info("Running teardown_module to delete topology") + + tgen = get_topogen() + + # Stop toplogy and Remove tmp files + tgen.stop_topology() + + logger.info( + "Testsuite end time: {}".format(time.asctime(time.localtime(time.time()))) + ) + logger.info("=" * 40) + + +##################################################### +# +# Testcases +# +##################################################### + +def config_to_send_igmp_join_and_traffic(tgen, tc_name): + """ + API to do pre-configuration to send IGMP join and multicast + traffic + + parameters: + ----------- + * `tgen`: topogen object + * `tc_name`: caller test case name + """ + + step("r0: Add route to kernal") + result = addKernelRoute(tgen, "r0", "r0-r1-eth0", GROUP_RANGE_ALL) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r5: Add route to kernal") + result = addKernelRoute(tgen, "r5", "r5-r3-eth0", GROUP_RANGE_ALL) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + rnode = tgen.routers()["r1"] + rnode.run("ip route add 10.0.6.0/24 via 10.0.2.2") + rnode = tgen.routers()["r2"] + rnode.run("ip route add 10.0.6.0/24 via 10.0.4.2") + rnode = tgen.routers()["r4"] + rnode.run("ip route add 10.0.6.0/24 via 10.0.5.1") + + router_list = tgen.routers() + for router in router_list.keys(): + rnode = router_list[router] + rnode.run("echo 2 > /proc/sys/net/ipv4/conf/all/rp_filter") + + return True + + +def verify_mroute_repopulated(uptime_before, uptime_after): + """ + API to compare uptime for mroutes + + Parameters + ---------- + * `uptime_before` : Uptime dictionary for any particular instance + * `uptime_after` : Uptime dictionary for any particular instance + """ + + for group in uptime_before.keys(): + for source in uptime_before[group].keys(): + if set(uptime_before[group]) != set(uptime_after[group]): + errormsg = ( + "mroute (%s, %s) has not come" + " up after mroute clear [FAILED!!]" % (source, group) + ) + return errormsg + + d1 = datetime.datetime.strptime(uptime_before[group][source], "%H:%M:%S") + d2 = datetime.datetime.strptime(uptime_after[group][source], "%H:%M:%S") + if d2 >= d1: + errormsg = "mroute (%s, %s) is not " "repopulated [FAILED!!]" % ( + source, + group, + ) + return errormsg + + logger.info("mroute (%s, %s) is " "repopulated [PASSED!!]", source, group) + + return True + + +def verify_state_incremented(state_before, state_after): + """ + API to compare interface traffic state incrementing + + Parameters + ---------- + * `state_before` : State dictionary for any particular instance + * `state_after` : State dictionary for any particular instance + """ + + for router, state_data in state_before.items(): + for state, value in state_data.items(): + if state_before[router][state] >= state_after[router][state]: + errormsg = ( + "[DUT: %s]: state %s value has not" + " incremented, Initial value: %s, " + "Current value: %s [FAILED!!]" + % ( + router, + state, + state_before[router][state], + state_after[router][state], + ) + ) + return errormsg + + logger.info( + "[DUT: %s]: State %s value is " + "incremented, Initial value: %s, Current value: %s" + " [PASSED!!]", + router, + state, + state_before[router][state], + state_after[router][state], + ) + + return True + + +def test_add_delete_static_RP_p0(request): + """ + TC_1_P0 : Verify upstream interfaces(IIF) and join state are updated + properly after adding and deleting the static RP + TC_2_P0 : Verify IIF and OIL in "show ip pim state" updated properly + after adding and deleting the static RP + TC_3_P0: (*, G) Mroute entry are cleared when static RP gets deleted + TC_4_P0: Verify (*,G) prune is send towards the RP after deleting the + static RP + + Topology used: + r0------r1-----r2 + iperf DUT RP + """ + + tgen = get_topogen() + tc_name = request.node.name + write_test_header(tc_name) + + # Don"t run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + step("pre-configuration to send IGMP join and multicast traffic") + result = config_to_send_igmp_join_and_traffic(tgen, tc_name) + assert result is True, "Testcase{}: Failed Error: {}".format(tc_name, result) + + step("Enable IGMP on r1 interface and send IGMP " "join (225.1.1.1) to r1") + step("Configure r2 loopback interface as RP") + step("Enable PIM between r1 and r3") + + step("r1: Verify show ip igmp group without any IGMP join") + dut = "r1" + interface = "r1-r0-eth0" + result = verify_igmp_groups(tgen, dut, interface, GROUP_ADDRESS, expected=False) + assert result is not True, "Testcase {} :Failed \n Error: {}".format( + tc_name, result + ) + + step("r1: Verify show ip pim interface traffic without any IGMP join") + state_dict = {"r1": {"r1-r2-eth1": ["pruneTx"]}} + + state_before = verify_pim_interface_traffic(tgen, state_dict) + assert isinstance( + state_before, dict + ), "Testcase{} : Failed \n state_before is not dictionary \n " + "Error: {}".format(tc_name, result) + + step("r0 : Send IGMP join") + result = iperfSendIGMPJoin(tgen, "r0", GROUP_ADDRESS, join_interval=1) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r1: Verify IGMP groups") + oif = "r1-r0-eth0" + result = verify_igmp_groups(tgen, dut, oif, GROUP_ADDRESS) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r1: Verify RP info") + dut = "r1" + iif = "r1-r2-eth1" + rp_address = "1.0.2.17" + result = verify_pim_rp_info( + tgen, topo, dut, GROUP_RANGE_ALL, iif, rp_address, SOURCE + ) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r1: Verify upstream IIF interface") + result = verify_upstream_iif(tgen, dut, iif, STAR, GROUP_ADDRESS) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r1: Verify upstream join state and join timer") + result = verify_join_state_and_timer(tgen, dut, iif, STAR, GROUP_ADDRESS) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + step("r1: Verify ip mroutes") + result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r1: Verify ip pim join") + result = verify_join_state_and_timer(tgen, dut, iif, STAR, GROUP_ADDRESS) + step("r1: Delete RP configuration") + + # Delete RP configuration + input_dict = { + "r1": { + "pim": { + "rp": [ + { + "rp_addr": "1.0.2.17", + "group_addr_range": GROUP_RANGE_ALL, + "delete": True, + } + ] + } + } + } + + result = create_pim_config(tgen, topo, input_dict) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r1: Verify RP info") + result = verify_pim_rp_info( + tgen, topo, dut, GROUP_RANGE_ALL, iif, rp_address, SOURCE, expected=False + ) + assert result is not True, "Testcase {} :Failed \n Error: {}".format( + tc_name, result + ) + + step("r1: Verify upstream IIF interface") + result = verify_upstream_iif(tgen, dut, iif, STAR, GROUP_ADDRESS, expected=False) + assert result is not True, "Testcase {} :Failed \n Error: {}".format( + tc_name, result + ) + + step("r1: Verify upstream join state and join timer") + result = verify_join_state_and_timer( + tgen, dut, iif, STAR, GROUP_ADDRESS, expected=False + ) + assert result is not True, "Testcase {} :Failed \n Error: {}".format( + tc_name, result + ) + + step("r1: Verify PIM state") + result = verify_pim_state(tgen, dut, iif, oif, GROUP_ADDRESS, expected=False) + assert result is not True, "Testcase {} :Failed \n Error: {}".format( + tc_name, result + ) + + step("r1: Verify ip mroutes") + result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif, expected=False) + assert result is not True, "Testcase {} :Failed \n Error: {}".format( + tc_name, result + ) + + step("r1: Verify show ip pim interface traffic without any IGMP join") + state_after = verify_pim_interface_traffic(tgen, state_dict) + assert isinstance( + state_after, dict + ), "Testcase{} : Failed \n state_before is not dictionary \n " + "Error: {}".format(tc_name, result) + + result = verify_state_incremented(state_before, state_after) + assert result is True, "Testcase{} : Failed Error: {}".format(tc_name, result) + + # Uncomment next line for debugging + # tgen.mininet_cli() + + write_test_footer(tc_name) + + +def test_SPT_RPT_path_same_p1(request): + """ + TC_24_P1 : Verify (*,G) and (S,G) populated correctly when SPT and RPT + share the same path + + Topology used: + ________r2_____ + | | + iperf | | iperf + r0-----r1 r3-----r5 + + r1 : LHR + r2 : RP + r3 : FHR + """ + + tgen = get_topogen() + tc_name = request.node.name + write_test_header(tc_name) + + # Don"t run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + step("Creating configuration from JSON") + reset_config_on_routers(tgen) + kill_iperf(tgen) + clear_ip_mroute(tgen) + clear_ip_pim_interface_traffic(tgen, topo) + + step("pre-configuration to send IGMP join and multicast traffic") + result = config_to_send_igmp_join_and_traffic(tgen, tc_name) + assert result is True, "Testcase{}: Failed Error: {}".format(tc_name, result) + + dut = "r1" + intf = "r1-r3-eth2" + shutdown_bringup_interface(tgen, dut, intf, False) + intf = "r1-r4-eth3" + shutdown_bringup_interface(tgen, dut, intf, False) + + dut = "r3" + intf = "r3-r1-eth0" + shutdown_bringup_interface(tgen, dut, intf, False) + intf = "r3-r4-eth2" + shutdown_bringup_interface(tgen, dut, intf, False) + + step("Enable IGMP on r1 interface and send IGMP join (225.1.1.1) to R1") + step("Configure RP on r2 (loopback interface) for the group range" " 224.0.0.0/4") + step("Enable the PIM on all the interfaces of r1, r2, r3 and r4 routers") + step("Send multicast traffic from R3") + + step("r2: Verify RP info") + dut = "r2" + rp_address = "1.0.2.17" + iif = "lo" + result = verify_pim_rp_info( + tgen, topo, dut, GROUP_RANGE_ALL, iif, rp_address, SOURCE + ) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r0: Send IGMP join") + result = iperfSendIGMPJoin(tgen, "r0", GROUP_ADDRESS, join_interval=1) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r1: Verify IGMP groups") + dut = "r1" + oif = "r1-r0-eth0" + result = verify_igmp_groups(tgen, dut, oif, GROUP_ADDRESS) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r5: Send multicast traffic for group 225.1.1.1") + result = iperfSendTraffic(tgen, "r5", GROUP_ADDRESS, 32, 2500) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r1: Verify (*, G) upstream IIF interface") + dut = "r1" + iif = "r1-r2-eth1" + result = verify_upstream_iif(tgen, dut, iif, STAR, GROUP_ADDRESS) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r1: Verify (*, G) upstream join state and join timer") + result = verify_join_state_and_timer(tgen, dut, iif, STAR, GROUP_ADDRESS) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r1: Verify (*, G) ip mroutes") + result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r1: Verify (S, G) upstream IIF interface") + iif = "r1-r2-eth1" + result = verify_upstream_iif(tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r1: Verify (S, G) upstream join state and join timer") + result = verify_join_state_and_timer(tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r1: Verify (S, G) ip mroutes") + result = verify_ip_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS, iif, oif) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r2: Verify (*, G) upstream IIF interface") + dut = "r2" + iif = "lo" + result = verify_upstream_iif(tgen, dut, iif, STAR, GROUP_ADDRESS) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r2: Verify (*, G) upstream join state and join timer") + result = verify_join_state_and_timer(tgen, dut, iif, STAR, GROUP_ADDRESS) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r2: Verify (*, G) ip mroutes") + oif = "r2-r1-eth0" + result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r2: Verify (S, G) upstream IIF interface") + iif = "r2-r3-eth1" + result = verify_upstream_iif(tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r2: Verify (S, G) upstream join state and join timer") + result = verify_join_state_and_timer(tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r2: Verify (S, G) ip mroutes") + result = verify_ip_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS, iif, oif) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r3: Verify (S, G) upstream IIF interface") + dut = "r3" + iif = "r3-r5-eth3" + result = verify_upstream_iif(tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r3: Verify (S, G) upstream join state and join timer") + result = verify_join_state_and_timer( + tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS, expected=False + ) + assert result is not True, "Testcase {} :Failed \n Error: {}".format( + tc_name, result + ) + + step("r3: Verify (S, G) ip mroutes") + oif = "r3-r2-eth1" + result = verify_ip_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS, iif, oif) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + # Uncomment next line for debugging + # tgen.mininet_cli() + + write_test_footer(tc_name) + + +def test_not_reachable_static_RP_p0(request): + """ + TC_5_P0: Verify OIF entry for RP is cleared when RP becomes unreachable + TC_6_P0: Verify IIF and OIL in "show ip pim state" updated properly when + RP becomes unreachable + TC_7_P0 : Verify upstream interfaces(IIF) and join state are updated + properly after adding and deleting the static RP + TC_8_P0: Verify (*,G) prune is send towards the RP when RP becomes + unreachable + + Topology used: + r0------r1-----r2 + iperf DUT RP + """ + + tgen = get_topogen() + tc_name = request.node.name + write_test_header(tc_name) + + # Don"t run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + step("Creating configuration from JSON") + reset_config_on_routers(tgen) + kill_iperf(tgen) + clear_ip_mroute(tgen) + clear_ip_pim_interface_traffic(tgen, topo) + + step("pre-configuration to send IGMP join and multicast traffic") + result = config_to_send_igmp_join_and_traffic(tgen, tc_name) + assert result is True, "Testcase{}: Failed Error: {}".format(tc_name, result) + + dut = "r1" + intf = "r1-r3-eth2" + shutdown_bringup_interface(tgen, dut, intf, False) + + dut = "r1" + intf = "r1-r4-eth3" + shutdown_bringup_interface(tgen, dut, intf, False) + + step( + "r1: (*,G) prune is not sent towards the RP interface, verify using" + "show ip pim interface traffic" + ) + state_dict = {"r1": {"r1-r2-eth1": ["pruneTx"]}} + state_before = verify_pim_interface_traffic(tgen, state_dict) + assert isinstance( + state_before, dict + ), "Testcase{} : Failed \n state_before is not dictionary \n " + "Error: {}".format(tc_name, result) + + step("Enable IGMP on r1 interface and send IGMP " "join (225.1.1.1) to r1") + step("Configure r2 loopback interface as RP") + step("Enable PIM between r1 and r2") + + step("r0 : Send IGMP join") + result = iperfSendIGMPJoin(tgen, "r0", GROUP_ADDRESS, join_interval=1) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r1 : Verify rp info") + dut = "r1" + iif = "r1-r2-eth1" + rp_address = "1.0.2.17" + result = verify_pim_rp_info( + tgen, topo, dut, GROUP_RANGE_ALL, iif, rp_address, SOURCE + ) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r1: Verify IGMP groups") + oif = "r1-r0-eth0" + result = verify_igmp_groups(tgen, dut, oif, GROUP_ADDRESS) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r1: Verify PIM state") + result = verify_pim_state(tgen, dut, iif, oif, GROUP_ADDRESS) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r1: Verify upstream IIF interface") + result = verify_upstream_iif(tgen, dut, iif, STAR, GROUP_ADDRESS) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r1: Verify upstream join state and join timer") + result = verify_join_state_and_timer(tgen, dut, iif, STAR, GROUP_ADDRESS) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r1 :Verify ip mroutes") + result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r1: Make RP un-reachable") + dut = "r1" + input_dict = { + dut: { + "static_routes": [ + {"network": "1.0.2.17/32", "next_hop": "10.0.1.2", "delete": True} + ] + } + } + + result = create_static_routes(tgen, input_dict) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + step("r1: Check RP detail using show ip pim rp-info OIF should be unknown") + result = verify_pim_rp_info( + tgen, topo, dut, GROUP_RANGE_ALL, "Unknown", rp_address, SOURCE + ) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step( + "r1 : OIL should be same and IIF should be cleared on R1 verify" + "using show ip pim state" + ) + result = verify_pim_state(tgen, dut, iif, oif, GROUP_ADDRESS, expected=False) + assert result is not True, "Testcase {} :Failed \n Error: {}".format( + tc_name, result + ) + + step("r1: upstream IIF should be unknown , verify using show ip pim" "upstream") + result = verify_upstream_iif(tgen, dut, iif, STAR, GROUP_ADDRESS, expected=False) + assert result is not True, "Testcase {} :Failed \n Error: {}".format( + tc_name, result + ) + + step( + "r1: join state should not be joined and join timer should stop," + "verify using show ip pim upstream" + ) + result = verify_join_state_and_timer( + tgen, dut, iif, STAR, GROUP_ADDRESS, expected=False + ) + assert result is not True, "Testcase {} :Failed \n Error: {}".format( + tc_name, result + ) + + step( + "r1: (*,G) prune is sent towards the RP interface, verify using" + "show ip pim interface traffic" + ) + state_after = verify_pim_interface_traffic(tgen, state_dict) + assert isinstance( + state_after, dict + ), "Testcase{} : Failed \n state_before is not dictionary \n " + "Error: {}".format(tc_name, result) + + result = verify_state_incremented(state_before, state_after) + assert result is True, "Testcase{} : Failed Error: {}".format(tc_name, result) + + step("r1: (*, G) cleared from mroute table using show ip mroute") + result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif, expected=False) + assert result is not True, "Testcase {} :Failed \n Error: {}".format( + tc_name, result + ) + logger.info("Expected behavior: {}".format(result)) + + # Uncomment next line for debugging + # tgen.mininet_cli() + + write_test_footer(tc_name) + + +def test_add_RP_after_join_received_p1(request): + """ + TC_9_P1 : Verify RP configured after IGMP join received, PIM join towards + RP is sent immediately + + Topology used: + r0------r1-----r2 + iperf DUT RP + """ + + tgen = get_topogen() + tc_name = request.node.name + write_test_header(tc_name) + + # Don"t run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + step("Creating configuration from JSON") + reset_config_on_routers(tgen) + kill_iperf(tgen) + clear_ip_mroute(tgen) + clear_ip_pim_interface_traffic(tgen, topo) + + step("pre-configuration to send IGMP join and multicast traffic") + result = config_to_send_igmp_join_and_traffic(tgen, tc_name) + assert result is True, "Testcase{}: Failed Error: {}".format(tc_name, result) + + step("Enable IGMP on R1 interface") + step("Configure r2 loopback interface as RP") + step("Enable PIM between r1 and r2") + step("Delete RP configuration from r1") + + step("r1: Delete RP configuration") + input_dict = { + "r1": { + "pim": { + "rp": [ + { + "rp_addr": "1.0.2.17", + "group_addr_range": GROUP_RANGE_ALL, + "delete": True, + } + ] + } + } + } + + result = create_pim_config(tgen, topo, input_dict) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r1: Verify rp-info") + dut = "r1" + rp_address = "1.0.2.17" + iif = "r1-r2-eth1" + result = verify_pim_rp_info( + tgen, topo, dut, GROUP_RANGE_ALL, iif, rp_address, SOURCE, expected=False + ) + assert result is not True, "Testcase {} :Failed \n Error: {}".format( + tc_name, result + ) + + step("joinTx value before join sent") + state_dict = {"r1": {"r1-r2-eth1": ["joinTx"]}} + state_before = verify_pim_interface_traffic(tgen, state_dict) + assert isinstance( + state_before, dict + ), "Testcase{} : Failed \n state_before is not dictionary \n " + "Error: {}".format(tc_name, result) + + step("r0 : Send IGMP join (225.1.1.1) to r1, when rp is not configured" "in r1") + result = iperfSendIGMPJoin(tgen, "r0", GROUP_ADDRESS, join_interval=1) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r1: IGMP group is received on R1 verify using show ip igmp groups") + oif = "r1-r0-eth0" + result = verify_igmp_groups(tgen, dut, oif, GROUP_ADDRESS) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r1: Verify upstream IIF interface") + result = verify_upstream_iif(tgen, dut, iif, STAR, GROUP_ADDRESS, expected=False) + assert result is not True, "Testcase {} :Failed \n Error: {}".format( + tc_name, result + ) + + step("r1: Verify upstream join state and join timer") + + result = verify_join_state_and_timer( + tgen, dut, iif, STAR, GROUP_ADDRESS, expected=False + ) + assert result is not True, "Testcase {} :Failed \n Error: {}".format( + tc_name, result + ) + + step("r1: Verify PIM state") + result = verify_pim_state(tgen, dut, iif, oif, GROUP_ADDRESS, expected=False) + assert result is not True, "Testcase {} :Failed \n Error: {}".format( + tc_name, result + ) + + step("r1: Verify ip mroutes") + result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif, expected=False) + assert result is not True, "Testcase {} :Failed \n Error: {}".format( + tc_name, result + ) + + step("r1: Configure static RP") + input_dict = { + "r1": { + "pim": { + "rp": [{"rp_addr": "1.0.2.17", "group_addr_range": GROUP_RANGE_ALL,}] + } + } + } + + result = create_pim_config(tgen, topo, input_dict) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r1: Verify rp-info") + result = verify_pim_rp_info( + tgen, topo, dut, GROUP_RANGE_ALL, iif, rp_address, SOURCE + ) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r1: Verify upstream IIF interface") + result = verify_upstream_iif(tgen, dut, iif, STAR, GROUP_ADDRESS) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r1 : Verify upstream join state and join timer") + result = verify_join_state_and_timer(tgen, dut, iif, STAR, GROUP_ADDRESS) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r1: Verify PIM state") + result = verify_pim_state(tgen, dut, iif, oif, GROUP_ADDRESS) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r1 : Verify ip mroutes") + result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + logger.info("Expected behavior: {}".format(result)) + + state_after = verify_pim_interface_traffic(tgen, state_dict) + assert isinstance( + state_after, dict + ), "Testcase{} : Failed \n state_before is not dictionary \n " + "Error: {}".format(tc_name, result) + + result = verify_state_incremented(state_before, state_after) + assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + + # Uncomment next line for debugging + # tgen.mininet_cli() + + write_test_footer(tc_name) + + +def test_reachable_static_RP_after_join_p0(request): + """ + TC_10_P0 : Verify RP becomes reachable after IGMP join received, PIM join + towards RP is sent immediately + + Topology used: + r0------r1-----r3 + iperf DUT RP + """ + tgen = get_topogen() + tc_name = request.node.name + write_test_header(tc_name) + + # Don"t run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + step("Creating configuration from JSON") + reset_config_on_routers(tgen) + kill_iperf(tgen) + clear_ip_mroute(tgen) + clear_ip_pim_interface_traffic(tgen, topo) + + step("pre-configuration to send IGMP join and multicast traffic") + result = config_to_send_igmp_join_and_traffic(tgen, tc_name) + assert result is True, "Testcase{}: Failed Error: {}".format(tc_name, result) + + step("Enable IGMP on r1 interface and send IGMP " "join (225.1.1.1) to r1") + step("Configure r2 loopback interface as RP") + step("Enable PIM between r1 and r2") + + step("r1 : Verify pim interface traffic") + state_dict = {"r1": {"r1-r2-eth1": ["joinTx"]}} + state_before = verify_pim_interface_traffic(tgen, state_dict) + assert isinstance( + state_before, dict + ), "Testcase{} : Failed \n state_before is not dictionary \n " + "Error: {}".format(tc_name, result) + + step("r1: Make RP un-reachable") + dut = "r1" + intf = "r1-r2-eth1" + shutdown_bringup_interface(tgen, dut, intf, False) + intf = "r1-r3-eth2" + shutdown_bringup_interface(tgen, dut, intf, False) + intf = "r1-r4-eth3" + shutdown_bringup_interface(tgen, dut, intf, False) + + step("r1: Verify rp-info") + rp_address = "1.0.2.17" + result = verify_pim_rp_info( + tgen, topo, dut, GROUP_ADDRESS, "Unknown", rp_address, SOURCE + ) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r1 : Send IGMP join for 225.1.1.1") + result = iperfSendIGMPJoin(tgen, "r0", GROUP_ADDRESS, join_interval=1) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r1 : Verify IGMP groups") + oif = "r1-r0-eth0" + result = verify_igmp_groups(tgen, dut, oif, GROUP_ADDRESS) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r1 : Verify upstream IIF interface") + iif = "r1-r2-eth1" + result = verify_upstream_iif(tgen, dut, iif, STAR, GROUP_ADDRESS, expected=False) + assert result is not True, "Testcase {} :Failed \n Error: {}".format( + tc_name, result + ) + + step("r1 : Verify upstream join state and join timer") + result = verify_join_state_and_timer( + tgen, dut, iif, STAR, GROUP_ADDRESS, expected=False + ) + assert result is not True, "Testcase {} :Failed \n Error: {}".format( + tc_name, result + ) + + step("r1 : Verify PIM state") + result = verify_pim_state(tgen, dut, iif, oif, GROUP_ADDRESS, expected=False) + assert result is not True, "Testcase {} :Failed \n Error: {}".format( + tc_name, result + ) + + step("r1 : Verify ip mroutes") + result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif, expected=False) + assert result is not True, "Testcase {} :Failed \n Error: {}".format( + tc_name, result + ) + + step("r1: Make RP reachable") + intf = "r1-r2-eth1" + shutdown_bringup_interface(tgen, dut, intf, True) + intf = "r1-r3-eth2" + shutdown_bringup_interface(tgen, dut, intf, True) + intf = "r1-r4-eth3" + shutdown_bringup_interface(tgen, dut, intf, True) + + step("r1 : Verify rp-info") + result = verify_pim_rp_info( + tgen, topo, dut, GROUP_RANGE_ALL, iif, rp_address, SOURCE + ) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r1: Verify upstream IIF interface") + result = verify_upstream_iif(tgen, dut, iif, STAR, GROUP_ADDRESS) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r1 : Verify upstream join state and join timer") + result = verify_join_state_and_timer(tgen, dut, iif, STAR, GROUP_ADDRESS) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r1 : Verify PIM state") + result = verify_pim_state(tgen, dut, iif, oif, GROUP_ADDRESS) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r1 : Verify ip mroutes") + result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + logger.info("Expected behavior: {}".format(result)) + + step("r1 : Verify pim interface traffic") + state_after = verify_pim_interface_traffic(tgen, state_dict) + assert isinstance( + state_after, dict + ), "Testcase{} : Failed \n state_before is not dictionary \n " + "Error: {}".format(tc_name, result) + + result = verify_state_incremented(state_before, state_after) + assert result is True, "Testcase{} : Failed Error: {}".format(tc_name, result) + + # Uncomment next line for debugging + # tgen.mininet_cli() + + write_test_footer(tc_name) + + +def test_send_join_on_higher_preffered_rp_p1(request): + """ + TC_11_P1 : Verify PIM join send towards the higher preferred RP + TC_12_P1 : Verify PIM prune send towards the lower preferred RP + TC_13_P1 : Verify RPF interface is updated in mroute (kernel) when higher + preferred overlapping RP configured + TC_14_P1 : Verify IIF and OIL in "show ip pim state" updated properly when + higher preferred overlapping RP configured + TC_15_P1 : Verify upstream interfaces(IIF) and join state are updated when + higher preferred overlapping RP is configured + TC_16_P1 : Verify join is send to lower preferred RP, when higher + preferred RP gets deleted + TC_17_P1 : Verify prune is send to higher preferred RP when higher + preferred RP gets deleted + TC_18_P1 : Verify RPF interface updated in mroute when higher preferred RP + gets deleted + TC_19_P1 : Verify IIF and OIL in "show ip pim state" updated when higher + preferred overlapping RP is deleted + TC_20_P1 : Verfiy PIM upstream IIF updated when higher preferred + overlapping RP deleted + + Topology used: + _______r2 + | + iperf | + r0-----r1 + | + |_______r4 + """ + + tgen = get_topogen() + tc_name = request.node.name + write_test_header(tc_name) + + # Don"t run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + step("Creating configuration from JSON") + reset_config_on_routers(tgen) + kill_iperf(tgen) + clear_ip_mroute(tgen) + clear_ip_pim_interface_traffic(tgen, topo) + + step("pre-configuration to send IGMP join and multicast traffic") + result = config_to_send_igmp_join_and_traffic(tgen, tc_name) + assert result is True, "Testcase{}: Failed Error: {}".format(tc_name, result) + + step("Enable IGMP on r1 interface") + step("Configure RP on r2 (loopback interface) for the group range " "224.0.0.0/4") + step("Configure RP on r4 (loopback interface) for the group range " "225.1.1.1/32") + + step("r3 : Make all interface not reachable") + dut = "r3" + intf = "r3-r1-eth0" + shutdown_bringup_interface(tgen, dut, intf, False) + intf = "r3-r2-eth1" + shutdown_bringup_interface(tgen, dut, intf, False) + intf = "r3-r4-eth2" + shutdown_bringup_interface(tgen, dut, intf, False) + + dut = "r2" + intf = "r2-r3-eth1" + shutdown_bringup_interface(tgen, dut, intf, False) + + dut = "r4" + intf = "r4-r3-eth1" + shutdown_bringup_interface(tgen, dut, intf, False) + + dut = "r1" + intf = "r1-r3-eth1" + shutdown_bringup_interface(tgen, dut, intf, False) + + step("r1 : Verify joinTx count before sending join") + state_dict = {"r1": {"r1-r4-eth3": ["joinTx"], "r1-r2-eth1": ["pruneTx"]}} + + state_before = verify_pim_interface_traffic(tgen, state_dict) + assert isinstance( + state_before, dict + ), "Testcase{} : Failed \n state_before is not dictionary \n " + "Error: {}".format(tc_name, result) + + step("r0 : Send IGMP join for 225.1.1.1") + result = iperfSendIGMPJoin(tgen, "r0", GROUP_ADDRESS, join_interval=1) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r1 : Verify IGMP groups") + dut = "r1" + oif = "r1-r0-eth0" + result = verify_igmp_groups(tgen, dut, oif, GROUP_ADDRESS) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("Configure static RP for group 225.1.1.1/32") + input_dict = { + "r4": { + "pim": { + "rp": [{"rp_addr": "1.0.4.17", "group_addr_range": ["225.1.1.1/32"],}] + } + } + } + + result = create_pim_config(tgen, topo, input_dict) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r1 : Verify RP info for group 224.0.0.0/4") + rp_address_1 = "1.0.2.17" + iif = "r1-r2-eth1" + result = verify_pim_rp_info( + tgen, topo, dut, GROUP_RANGE_ALL, iif, rp_address_1, SOURCE + ) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r1 : Verify RP info for group 225.1.1.1") + rp_address_2 = "1.0.4.17" + iif = "r1-r4-eth3" + result = verify_pim_rp_info(tgen, topo, dut, GROUP_RANGE, iif, rp_address_2, SOURCE) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r1 : Verify join is sent to higher preferred RP") + step("r1 : Verify prune is sent to lower preferred RP") + state_after = verify_pim_interface_traffic(tgen, state_dict) + assert isinstance( + state_after, dict + ), "Testcase{} : Failed \n state_before is not dictionary \n " + "Error: {}".format(tc_name, result) + + result = verify_state_incremented(state_before, state_after) + assert result is True, "Testcase{} : Failed Error: {}".format(tc_name, result) + + step("r1 : Verify ip mroutes") + iif = "r1-r4-eth3" + result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r1 : Verify PIM state") + result = verify_pim_state(tgen, dut, iif, oif, GROUP_ADDRESS) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r1 : Verify upstream IIF interface") + result = verify_upstream_iif(tgen, dut, iif, STAR, GROUP_ADDRESS) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r1 : Verify upstream join state and join timer") + result = verify_join_state_and_timer(tgen, dut, iif, STAR, GROUP_ADDRESS) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + clear_ip_pim_interface_traffic(tgen, topo) + + step("r1 : Verify joinTx, pruneTx count before RP gets deleted") + state_dict = {"r1": {"r1-r2-eth1": ["joinTx"], "r1-r4-eth3": ["pruneTx"]}} + + state_before = verify_pim_interface_traffic(tgen, state_dict) + assert isinstance( + state_before, dict + ), "Testcase{} : Failed \n state_before is not dictionary \n " + "Error: {}".format(tc_name, result) + + step("r1 : Delete RP configuration for 225.1.1.1") + input_dict = { + "r1": { + "pim": { + "rp": [ + { + "rp_addr": "1.0.4.17", + "group_addr_range": ["225.1.1.1/32"], + "delete": True, + } + ] + } + } + } + + result = create_pim_config(tgen, topo, input_dict) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r1 : Verify rp-info for group 224.0.0.0/4") + iif = "r1-r2-eth1" + result = verify_pim_rp_info( + tgen, topo, dut, GROUP_RANGE_ALL, iif, rp_address_1, SOURCE + ) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r1 : Verify rp-info for group 225.1.1.1") + iif = "r1-r4-eth3" + result = verify_pim_rp_info( + tgen, topo, dut, GROUP_RANGE, oif, rp_address_2, SOURCE, expected=False + ) + assert result is not True, "Testcase {} :Failed \n Error: {}".format( + tc_name, result + ) + + step( + "r1 : Verify RPF interface updated in mroute when higher preferred" + "RP gets deleted" + ) + iif = "r1-r2-eth1" + result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + logger.info("Expected behavior: {}".format(result)) + + step( + "r1 : Verify IIF and OIL in show ip pim state updated when higher" + "preferred overlapping RP is deleted" + ) + result = verify_pim_state(tgen, dut, iif, oif, GROUP_ADDRESS) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step( + "r1 : Verfiy upstream IIF updated when higher preferred overlapping" + "RP deleted" + ) + result = verify_upstream_iif(tgen, dut, iif, STAR, GROUP_ADDRESS) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step( + "r1 : Verify upstream join state and join timer updated when higher" + "preferred overlapping RP deleted" + ) + result = verify_join_state_and_timer(tgen, dut, iif, STAR, GROUP_ADDRESS) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step( + "r1 : Verify join is sent to lower preferred RP, when higher" + "preferred RP gets deleted" + ) + step( + "r1 : Verify prune is sent to higher preferred RP when higher" + " preferred RP gets deleted" + ) + state_after = verify_pim_interface_traffic(tgen, state_dict) + assert isinstance( + state_after, dict + ), "Testcase{} : Failed \n state_before is not dictionary \n " + "Error: {}".format(tc_name, result) + + result = verify_state_incremented(state_before, state_after) + assert result is True, "Testcase{} : Failed Error: {}".format(tc_name, result) + + # Uncomment next line for debugging + # tgen.mininet_cli() + + write_test_footer(tc_name) + + +def test_RP_configured_as_LHR_1_p1(request): + """ + TC_21_1_P1: Verify OIF and RPF for (*,G) and (S,G) when static RP configure + in LHR router + + Topology used: + ________r2_____ + | | + iperf | | iperf + r0-----r1-------------r3-----r5 + + r1 : LHR/RP + r3 : FHR + """ + + tgen = get_topogen() + tc_name = request.node.name + write_test_header(tc_name) + + # Don"t run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + step("Creating configuration from JSON") + reset_config_on_routers(tgen) + kill_iperf(tgen) + clear_ip_mroute(tgen) + clear_ip_pim_interface_traffic(tgen, topo) + + step("pre-configuration to send IGMP join and multicast traffic") + result = config_to_send_igmp_join_and_traffic(tgen, tc_name) + assert result is True, "Testcase{}: Failed Error: {}".format(tc_name, result) + + step("Enable IGMP on r1 interface") + step("Configure RP on r1 (loopback interface) for the group range" " 224.0.0.0/4") + step("Enable the PIM on all the interfaces of r1, r2, r3 and r4 routers") + step("Send the IGMP join from r0") + step("Send multicast traffic from r5") + + step("r1 , r2, r3, r4: Delete existing RP configuration" "configure r1(LHR) as RP") + input_dict = { + "r1": { + "pim": { + "rp": [ + { + "rp_addr": "1.0.2.17", + "group_addr_range": GROUP_RANGE_ALL, + "delete": True, + } + ] + } + }, + "r2": { + "pim": { + "rp": [ + { + "rp_addr": "1.0.2.17", + "group_addr_range": GROUP_RANGE_ALL, + "delete": True, + } + ] + } + }, + "r3": { + "pim": { + "rp": [ + { + "rp_addr": "1.0.2.17", + "group_addr_range": GROUP_RANGE_ALL, + "delete": True, + } + ] + } + }, + "r4": { + "pim": { + "rp": [ + { + "rp_addr": "1.0.2.17", + "group_addr_range": GROUP_RANGE_ALL, + "delete": True, + } + ] + } + }, + } + + result = create_pim_config(tgen, topo, input_dict) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r1: Configure r1(LHR) as RP") + input_dict = { + "r1": { + "pim": { + "rp": [{"rp_addr": "1.0.1.17", "group_addr_range": GROUP_RANGE_ALL,}] + } + }, + "r2": { + "pim": { + "rp": [{"rp_addr": "1.0.1.17", "group_addr_range": GROUP_RANGE_ALL,}] + } + }, + "r3": { + "pim": { + "rp": [{"rp_addr": "1.0.1.17", "group_addr_range": GROUP_RANGE_ALL,}] + } + }, + "r4": { + "pim": { + "rp": [{"rp_addr": "1.0.1.17", "group_addr_range": GROUP_RANGE_ALL,}] + } + }, + } + + result = create_pim_config(tgen, topo, input_dict) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + shutdown_bringup_interface(tgen, "r1", "lo", False) + sleep(5) + shutdown_bringup_interface(tgen, "r1", "lo", True) + sleep(5) + + step("r1: Verify RP info") + dut = "r1" + rp_address = "1.0.1.17" + iif = "lo" + result = verify_pim_rp_info( + tgen, topo, dut, GROUP_RANGE_ALL, iif, rp_address, SOURCE + ) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r0: Send IGMP join") + result = iperfSendIGMPJoin(tgen, "r0", GROUP_ADDRESS, join_interval=1) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r1: Verify IGMP groups") + oif = "r1-r0-eth0" + result = verify_igmp_groups(tgen, dut, oif, GROUP_ADDRESS) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r5: Send multicast traffic for group 225.1.1.1") + result = iperfSendTraffic(tgen, "r5", GROUP_ADDRESS, 32, 2500) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r1: Verify (*, G) upstream IIF interface") + result = verify_upstream_iif(tgen, dut, iif, STAR, GROUP_ADDRESS) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r1: Verify (*, G) upstream join state and join timer") + result = verify_join_state_and_timer(tgen, dut, iif, STAR, GROUP_ADDRESS) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r1: Verify (*, G) ip mroutes") + result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r1: Verify (S, G) upstream IIF interface") + iif = "r1-r3-eth2" + result = verify_upstream_iif(tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r1: Verify (S, G) upstream join state and join timer") + result = verify_join_state_and_timer(tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r1: Verify (S, G) ip mroutes") + result = verify_ip_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS, iif, oif) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r3: Verify (S, G) upstream IIF interface") + dut = "r3" + iif = "r3-r5-eth3" + result = verify_upstream_iif(tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r3: Verify (S, G) upstream join state and join timer") + result = verify_join_state_and_timer( + tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS, expected=False + ) + assert result is not True, "Testcase {} :Failed \n Error: {}".format( + tc_name, result + ) + + step("r3: Verify (S, G) ip mroutes") + oif = "r3-r1-eth0" + result = verify_ip_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS, iif, oif) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + # Uncomment next line for debugging + # tgen.mininet_cli() + + write_test_footer(tc_name) + + +def test_RP_configured_as_LHR_2_p1(request): + """ + TC_21_2_P1: Verify OIF and RPF for (*,G) and (S,G) when static RP configure + in LHR router + + Topology used: + ________r2_____ + | | + iperf | | iperf + r0-----r1-------------r3-----r5 + + r1 : LHR/RP + r3 : FHR + + """ + tgen = get_topogen() + tc_name = request.node.name + write_test_header(tc_name) + + # Don"t run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + step("Creating configuration from JSON") + reset_config_on_routers(tgen) + kill_iperf(tgen) + clear_ip_mroute(tgen) + clear_ip_pim_interface_traffic(tgen, topo) + + step("pre-configuration to send IGMP join and multicast traffic") + result = config_to_send_igmp_join_and_traffic(tgen, tc_name) + assert result is True, "Testcase{}: Failed Error: {}".format(tc_name, result) + + step("Enable IGMP on r1 interface") + step("Configure RP on r1 (loopback interface) for the group range" " 224.0.0.0/4") + step("Enable the PIM on all the interfaces of r1, r2, r3 and r4 routers") + step("Send multicast traffic from r5") + step("Send the IGMP join from r0") + + step("r1, r2, r3, r4: Delete existing RP configuration," "configure r1(LHR) as RP") + input_dict = { + "r1": { + "pim": { + "rp": [ + { + "rp_addr": "1.0.2.17", + "group_addr_range": GROUP_RANGE_ALL, + "delete": True, + } + ] + } + }, + "r2": { + "pim": { + "rp": [ + { + "rp_addr": "1.0.2.17", + "group_addr_range": GROUP_RANGE_ALL, + "delete": True, + } + ] + } + }, + "r3": { + "pim": { + "rp": [ + { + "rp_addr": "1.0.2.17", + "group_addr_range": GROUP_RANGE_ALL, + "delete": True, + } + ] + } + }, + "r4": { + "pim": { + "rp": [ + { + "rp_addr": "1.0.2.17", + "group_addr_range": GROUP_RANGE_ALL, + "delete": True, + } + ] + } + }, + } + + result = create_pim_config(tgen, topo, input_dict) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r1, r2, r3, r4: Configure r1(LHR) as RP") + input_dict = { + "r1": { + "pim": { + "rp": [{"rp_addr": "1.0.1.17", "group_addr_range": GROUP_RANGE_ALL,}] + } + }, + "r2": { + "pim": { + "rp": [{"rp_addr": "1.0.1.17", "group_addr_range": GROUP_RANGE_ALL,}] + } + }, + "r3": { + "pim": { + "rp": [{"rp_addr": "1.0.1.17", "group_addr_range": GROUP_RANGE_ALL,}] + } + }, + "r4": { + "pim": { + "rp": [{"rp_addr": "1.0.1.17", "group_addr_range": GROUP_RANGE_ALL,}] + } + }, + } + + result = create_pim_config(tgen, topo, input_dict) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r1: Verify RP info") + dut = "r1" + rp_address = "1.0.1.17" + iif = "lo" + result = verify_pim_rp_info(tgen, topo, dut, GROUP_ADDRESS, iif, rp_address, SOURCE) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r5: Send multicast traffic for group 225.1.1.1") + result = iperfSendTraffic(tgen, "r5", GROUP_ADDRESS, 32, 2500) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r0: Send IGMP join") + result = iperfSendIGMPJoin(tgen, "r0", GROUP_ADDRESS, join_interval=1) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r1: Verify IGMP groups") + oif = "r1-r0-eth0" + result = verify_igmp_groups(tgen, dut, oif, GROUP_ADDRESS) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r1: Verify (*, G) upstream IIF interface") + result = verify_upstream_iif(tgen, dut, iif, STAR, GROUP_ADDRESS) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r1: Verify (*, G) upstream join state and join timer") + result = verify_join_state_and_timer(tgen, dut, iif, STAR, GROUP_ADDRESS) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r1: Verify (*, G) ip mroutes") + result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r1: Verify (S, G) upstream IIF interface") + iif = "r1-r3-eth2" + result = verify_upstream_iif(tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r1: Verify (S, G) upstream join state and join timer") + result = verify_join_state_and_timer(tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r1: Verify (S, G) ip mroutes") + result = verify_ip_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS, iif, oif) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r3: Verify (S, G) upstream IIF interface") + dut = "r3" + iif = "r3-r5-eth3" + result = verify_upstream_iif(tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r3: Verify (S, G) upstream join state and join timer") + result = verify_join_state_and_timer( + tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS, expected=False + ) + assert result is not True, "Testcase {} :Failed \n Error: {}".format( + tc_name, result + ) + + step("r3: Verify (S, G) ip mroutes") + oif = "r3-r1-eth0" + result = verify_ip_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS, iif, oif) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + # Uncomment next line for debugging + # tgen.mininet_cli() + + write_test_footer(tc_name) + + +def test_RP_configured_as_FHR_1_p1(request): + """ + TC_22_1_P1: Verify OIF and RFP for (*,G) and (S,G) when static RP configure + in FHR router + + Topology used: + ________r2_____ + | | + iperf | | iperf + r0-----r1-------------r3-----r5 + + r1 : LHR + r3 : FHR/RP + """ + + tgen = get_topogen() + tc_name = request.node.name + write_test_header(tc_name) + + # Don"t run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + step("Creating configuration from JSON") + reset_config_on_routers(tgen) + kill_iperf(tgen) + clear_ip_mroute(tgen) + clear_ip_pim_interface_traffic(tgen, topo) + + step("pre-configuration to send IGMP join and multicast traffic") + result = config_to_send_igmp_join_and_traffic(tgen, tc_name) + assert result is True, "Testcase{}: Failed Error: {}".format(tc_name, result) + + step("Enable IGMP on r1 interface") + step("Configure RP on r2 (loopback interface) for the group range" " 225.1.1.0/24") + step("Enable the PIM on all the interfaces of r1, r2, r3 and r4 routers") + step("Send the IGMP join from r0") + step("Send multicast traffic from r5") + + step("r1, r2, r3, r4: Delete existing RP configuration" "configure r3(FHR) as RP") + input_dict = { + "r1": { + "pim": { + "rp": [ + { + "rp_addr": "1.0.2.17", + "group_addr_range": GROUP_RANGE_ALL, + "delete": True, + } + ] + } + }, + "r2": { + "pim": { + "rp": [ + { + "rp_addr": "1.0.2.17", + "group_addr_range": GROUP_RANGE_ALL, + "delete": True, + } + ] + } + }, + "r3": { + "pim": { + "rp": [ + { + "rp_addr": "1.0.2.17", + "group_addr_range": GROUP_RANGE_ALL, + "delete": True, + } + ] + } + }, + "r4": { + "pim": { + "rp": [ + { + "rp_addr": "1.0.2.17", + "group_addr_range": GROUP_RANGE_ALL, + "delete": True, + } + ] + } + }, + } + result = create_pim_config(tgen, topo, input_dict) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r1, r2, r3, r4: Configure r3(FHR) as RP") + input_dict = { + "r1": { + "pim": { + "rp": [{"rp_addr": "1.0.3.17", "group_addr_range": GROUP_RANGE_ALL,}] + } + }, + "r2": { + "pim": { + "rp": [{"rp_addr": "1.0.3.17", "group_addr_range": GROUP_RANGE_ALL,}] + } + }, + "r3": { + "pim": { + "rp": [{"rp_addr": "1.0.3.17", "group_addr_range": GROUP_RANGE_ALL,}] + } + }, + "r4": { + "pim": { + "rp": [{"rp_addr": "1.0.3.17", "group_addr_range": GROUP_RANGE_ALL,}] + } + }, + } + + result = create_pim_config(tgen, topo, input_dict) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r1: Verify RP info") + dut = "r1" + rp_address = "1.0.3.17" + iif = "r1-r3-eth2" + result = verify_pim_rp_info( + tgen, topo, dut, GROUP_RANGE_ALL, iif, rp_address, SOURCE + ) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r0: Send IGMP join") + result = iperfSendIGMPJoin(tgen, "r0", GROUP_ADDRESS, join_interval=1) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r0: Verify IGMP groups") + oif = "r1-r0-eth0" + result = verify_igmp_groups(tgen, dut, oif, GROUP_ADDRESS) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r5: Send multicast traffic for group 225.1.1.1") + result = iperfSendTraffic(tgen, "r5", GROUP_ADDRESS, 32, 2500) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r1: Verify (*, G) upstream IIF interface") + result = verify_upstream_iif(tgen, dut, iif, STAR, GROUP_ADDRESS) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r1: Verify (*, G) upstream join state and join timer") + result = verify_join_state_and_timer(tgen, dut, iif, STAR, GROUP_ADDRESS) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r1: Verify (*, G) ip mroutes") + result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r1: Verify (S, G) upstream IIF interface") + result = verify_upstream_iif(tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r1: Verify (S, G) upstream join state and join timer") + result = verify_join_state_and_timer(tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r1: Verify (S, G) ip mroutes") + result = verify_ip_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS, iif, oif) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r3: Verify (S, G) upstream IIF interface") + dut = "r3" + iif = "r3-r5-eth3" + result = verify_upstream_iif(tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r3: Verify (S, G) upstream join state and join timer") + result = verify_join_state_and_timer( + tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS, expected=False + ) + assert result is not True, "Testcase {} :Failed \n Error: {}".format( + tc_name, result + ) + + step("r3: Verify (S, G) ip mroutes") + oif = "r3-r1-eth0" + result = verify_ip_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS, iif, oif) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + # Uncomment next line for debugging + # tgen.mininet_cli() + + write_test_footer(tc_name) + + +def test_RP_configured_as_FHR_2_p2(request): + """ + TC_22_2_P2: Verify OIF and RFP for (*,G) and (S,G) when static RP configure + in FHR router + + Topology used: + ________r2_____ + | | + iperf | | iperf + r0-----r1-------------r3-----r5 + + r1 : LHR + r3 : FHR/RP + """ + tgen = get_topogen() + tc_name = request.node.name + write_test_header(tc_name) + + # Don"t run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + step("Creating configuration from JSON") + reset_config_on_routers(tgen) + kill_iperf(tgen) + clear_ip_mroute(tgen) + clear_ip_pim_interface_traffic(tgen, topo) + + step("pre-configuration to send IGMP join and multicast traffic") + result = config_to_send_igmp_join_and_traffic(tgen, tc_name) + assert result is True, "Testcase{}: Failed Error: {}".format(tc_name, result) + + step("Enable IGMP on r1 interface") + step("Configure RP on r2 (loopback interface) for the group range" " 225.1.1.0/24") + step("Enable the PIM on all the interfaces of r1, r2, r3 and r4 routers") + step("Send multicast traffic from r5") + step("Send the IGMP join from r0") + + step("r1, r2, r3, r4: Delete existing RP configuration" "configure r3(FHR) as RP") + input_dict = { + "r1": { + "pim": { + "rp": [ + { + "rp_addr": "1.0.2.17", + "group_addr_range": GROUP_RANGE_ALL, + "delete": True, + } + ] + } + }, + "r2": { + "pim": { + "rp": [ + { + "rp_addr": "1.0.2.17", + "group_addr_range": GROUP_RANGE_ALL, + "delete": True, + } + ] + } + }, + "r3": { + "pim": { + "rp": [ + { + "rp_addr": "1.0.2.17", + "group_addr_range": GROUP_RANGE_ALL, + "delete": True, + } + ] + } + }, + "r4": { + "pim": { + "rp": [ + { + "rp_addr": "1.0.2.17", + "group_addr_range": GROUP_RANGE_ALL, + "delete": True, + } + ] + } + }, + } + + result = create_pim_config(tgen, topo, input_dict) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r1, r2, r3, r4: Configure r3(FHR) as RP") + input_dict = { + "r1": { + "pim": { + "rp": [{"rp_addr": "1.0.3.17", "group_addr_range": GROUP_RANGE_ALL,}] + } + }, + "r2": { + "pim": { + "rp": [{"rp_addr": "1.0.3.17", "group_addr_range": GROUP_RANGE_ALL,}] + } + }, + "r3": { + "pim": { + "rp": [{"rp_addr": "1.0.3.17", "group_addr_range": GROUP_RANGE_ALL,}] + } + }, + "r4": { + "pim": { + "rp": [{"rp_addr": "1.0.3.17", "group_addr_range": GROUP_RANGE_ALL,}] + } + }, + } + + result = create_pim_config(tgen, topo, input_dict) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r1: Verify RP info") + dut = "r1" + rp_address = "1.0.3.17" + iif = "r1-r3-eth2" + result = verify_pim_rp_info( + tgen, topo, dut, GROUP_RANGE_ALL, iif, rp_address, SOURCE + ) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r5: Send multicast traffic for group 225.1.1.1") + result = iperfSendTraffic(tgen, "r5", GROUP_ADDRESS, 32, 2500) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r0: Send IGMP join") + result = iperfSendIGMPJoin(tgen, "r0", GROUP_ADDRESS, join_interval=1) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r0: Verify IGMP groups") + oif = "r1-r0-eth0" + result = verify_igmp_groups(tgen, dut, oif, GROUP_ADDRESS) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r1: Verify (*, G) upstream IIF interface") + result = verify_upstream_iif(tgen, dut, iif, STAR, GROUP_ADDRESS) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r1: Verify (*, G) upstream join state and join timer") + result = verify_join_state_and_timer(tgen, dut, iif, STAR, GROUP_ADDRESS) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r1: Verify (*, G) ip mroutes") + result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r1: Verify (S, G) upstream IIF interface") + iif = "r1-r3-eth2" + result = verify_upstream_iif(tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r1: Verify (S, G) upstream join state and join timer") + result = verify_join_state_and_timer(tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r1: Verify (S, G) ip mroutes") + result = verify_ip_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS, iif, oif) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r3: Verify (S, G) upstream IIF interface") + dut = "r3" + iif = "r3-r5-eth3" + result = verify_upstream_iif(tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r3: Verify (S, G) upstream join state and join timer") + result = verify_join_state_and_timer( + tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS, expected=False + ) + assert result is not True, "Testcase {} :Failed \n Error: {}".format( + tc_name, result + ) + + step("r3: Verify (S, G) ip mroutes") + oif = "r3-r1-eth0" + result = verify_ip_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS, iif, oif) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + # Uncomment next line for debugging + # tgen.mininet_cli() + + write_test_footer(tc_name) + + +def test_SPT_RPT_path_different_p1(request): + """ + TC_23_P1: Verify (*,G) and (S,G) populated correctly when RPT and SPT path + are different + + Topology used: + ________r2_____ + | | + iperf | | iperf + r0-----r1-------------r3-----r5 + + r1: LHR + r2: RP + r3: FHR + """ + + tgen = get_topogen() + tc_name = request.node.name + write_test_header(tc_name) + + # Don"t run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + step("Creating configuration from JSON") + reset_config_on_routers(tgen) + kill_iperf(tgen) + clear_ip_mroute(tgen) + clear_ip_pim_interface_traffic(tgen, topo) + + step("pre-configuration to send IGMP join and multicast traffic") + result = config_to_send_igmp_join_and_traffic(tgen, tc_name) + assert result is True, "Testcase{}: Failed Error: {}".format(tc_name, result) + + step("Enable IGMP on r1 interface and send IGMP join (225.1.1.1) to r1") + step("Configure RP on r2 (loopback interface) for the group range" " 224.0.0.0/4") + step("Enable the PIM on all the interfaces of r1, r2, r3 and r4 routers") + step("Send multicast traffic from r3") + + step("r2: Verify RP info") + dut = "r2" + rp_address = "1.0.2.17" + iif = "lo" + result = verify_pim_rp_info(tgen, topo, dut, GROUP_ADDRESS, iif, rp_address, SOURCE) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r0: Send IGMP join") + result = iperfSendIGMPJoin(tgen, "r0", GROUP_ADDRESS, join_interval=1) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r1: Verify IGMP groups") + dut = "r1" + oif = "r1-r0-eth0" + result = verify_igmp_groups(tgen, dut, oif, GROUP_ADDRESS) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r5: Send multicast traffic for group 225.1.1.1") + result = iperfSendTraffic(tgen, "r5", GROUP_ADDRESS, 32, 2500) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r1: Verify (*, G) upstream IIF interface") + iif = "r1-r2-eth1" + result = verify_upstream_iif(tgen, dut, iif, STAR, GROUP_ADDRESS) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r1: Verify (*, G) upstream join state and join timer") + result = verify_join_state_and_timer(tgen, dut, iif, STAR, GROUP_ADDRESS) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r1: Verify (*, G) ip mroutes") + result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r1: Verify (S, G) upstream IIF interface") + iif = "r1-r3-eth2" + result = verify_upstream_iif(tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r1: Verify (S, G) upstream join state and join timer") + result = verify_join_state_and_timer(tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r1: Verify (S, G) ip mroutes") + result = verify_ip_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS, iif, oif) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r2: Verify (*, G) upstream IIF interface") + dut = "r2" + iif = "lo" + result = verify_upstream_iif(tgen, dut, iif, STAR, GROUP_ADDRESS) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r2: Verify (*, G) upstream join state and join timer") + result = verify_join_state_and_timer(tgen, dut, iif, STAR, GROUP_ADDRESS) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r2: Verify (*, G) ip mroutes") + oif = "r2-r1-eth0" + result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r3: Verify (S, G) upstream IIF interface") + dut = "r3" + iif = "r3-r5-eth3" + result = verify_upstream_iif(tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r3: Verify (S, G) upstream join state and join timer") + result = verify_join_state_and_timer( + tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS, expected=False + ) + assert result is not True, "Testcase {} :Failed \n Error: {}".format( + tc_name, result + ) + + step("r3: Verify (S, G) ip mroutes") + oif = "r3-r1-eth0" + result = verify_ip_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS, iif, oif) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r2: Verify (S, G) upstream IIF interface") + dut = "r2" + iif = "r2-r3-eth1" + result = verify_upstream_iif( + tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS, joinState="NotJoined" + ) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r2: Verify (S, G) upstream join state and join timer") + result = verify_join_state_and_timer( + tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS, expected=False + ) + assert result is not True, "Testcase {} :Failed \n Error: {}".format( + tc_name, result + ) + + step("r2: Verify (S, G) ip mroutes") + oif = "none" + result = verify_ip_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS, iif, oif) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + # Uncomment next line for debugging + # tgen.mininet_cli() + + write_test_footer(tc_name) + + +def test_clear_pim_configuration_p1(request): + """ + TC_25_P1: Verify (*,G) and (S,G) populated correctly after clearing the + PIM,IGMP and mroutes joins + + Topology used: + ________r2_____ + | | + iperf | | iperf + r0-----r1-------------r3-----r5 + | | + |_____________| + r4 + r1 : LHR + r2 : RP + r3 : FHR + """ + + tgen = get_topogen() + tc_name = request.node.name + write_test_header(tc_name) + + # Don"t run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + step("Creating configuration from JSON") + reset_config_on_routers(tgen) + kill_iperf(tgen) + clear_ip_mroute(tgen) + clear_ip_pim_interface_traffic(tgen, topo) + + step("pre-configuration to send IGMP join and multicast traffic") + result = config_to_send_igmp_join_and_traffic(tgen, tc_name) + assert result is True, "Testcase{}: Failed Error: {}".format(tc_name, result) + + step("Enable IGMP on r1 interface") + step("Configure RP on r2 (loopback interface) for the group range" " 224.0.0.0/4") + step("Enable the PIM on all the interfaces of r1, r2, r3 and r4 routers") + step("Send the IGMP join from r0") + step("Send multicast traffic from r5") + + step("r2: Verify RP info") + dut = "r2" + rp_address = "1.0.2.17" + oif = "lo" + result = verify_pim_rp_info( + tgen, topo, dut, GROUP_RANGE_ALL, oif, rp_address, SOURCE + ) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r0: Send IGMP join") + result = iperfSendIGMPJoin(tgen, "r0", GROUP_ADDRESS, join_interval=1) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r1: Verify IGMP groups") + dut = "r1" + iif = "r1-r0-eth0" + result = verify_igmp_groups(tgen, dut, iif, GROUP_ADDRESS) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r5: Send multicast traffic for group 225.1.1.1") + result = iperfSendTraffic(tgen, "r5", GROUP_ADDRESS, 32, 2500) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r1: Verify (*, G) upstream IIF interface") + dut = "r1" + iif = "r1-r2-eth1" + result = verify_upstream_iif(tgen, dut, iif, STAR, GROUP_ADDRESS) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r1: Verify (*, G) upstream join state and join timer") + result = verify_join_state_and_timer(tgen, dut, iif, STAR, GROUP_ADDRESS) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r1: Verify (*, G) ip mroutes") + oif = "r1-r0-eth0" + result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r1: Verify IGMP groups timer restarted") + result = clear_ip_igmp_interfaces(tgen, dut) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r1: Verify PIM neighbor timer restarted") + result = clear_ip_pim_interfaces(tgen, dut) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r1: Verify PIM mroute timer restarted") + result = clear_ip_mroute_verify(tgen, dut) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + # Uncomment next line for debugging + # tgen.mininet_cli() + + write_test_footer(tc_name) + + +def test_restart_pimd_process_p2(request): + """ + TC_26_P2: Restart the PIMd process and verify PIM upstream and mroutes + entries + Topology used: + ________r2_____ + | | + iperf | | iperf + r0-----r1-------------r3-----r5 + | | + |_____________| + r4 + r1 : LHR + r2 : RP + r3 : FHR + """ + + tgen = get_topogen() + tc_name = request.node.name + write_test_header(tc_name) + + # Don"t run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + step("Creating configuration from JSON") + reset_config_on_routers(tgen) + kill_iperf(tgen) + clear_ip_mroute(tgen) + clear_ip_pim_interface_traffic(tgen, topo) + + step("pre-configuration to send IGMP join and multicast traffic") + result = config_to_send_igmp_join_and_traffic(tgen, tc_name) + assert result is True, "Testcase{}: Failed Error: {}".format(tc_name, result) + + step("Enable IGMP on r1 interface and send IGMP join (225.1.1.1) to R1") + step("Configure RP on r3 (loopback interface) for the group range" " 224.0.0.0/4") + step("Enable the PIM on all the interfaces of r1, r2, r3 and r4 routers") + step("Send multicast traffic from R3") + step("Restart the PIMd process") + + step("r2: Verify RP info") + dut = "r2" + rp_address = "1.0.2.17" + oif = "lo" + result = verify_pim_rp_info( + tgen, topo, dut, GROUP_RANGE_ALL, oif, rp_address, SOURCE + ) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r0: Send IGMP join") + result = iperfSendIGMPJoin(tgen, "r0", GROUP_ADDRESS, join_interval=1) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r1: Verify IGMP groups") + dut = "r1" + oif = "r1-r0-eth0" + result = verify_igmp_groups(tgen, dut, oif, GROUP_ADDRESS) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r5: Send multicast traffic for group 225.1.1.1") + result = iperfSendTraffic(tgen, "r5", GROUP_ADDRESS, 32, 2500) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r1: Verify (*, G) upstream IIF interface") + iif = "r1-r2-eth1" + result = verify_upstream_iif(tgen, dut, iif, STAR, GROUP_ADDRESS) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r1: Verify (*, G) upstream join state and join timer") + result = verify_join_state_and_timer(tgen, dut, iif, STAR, GROUP_ADDRESS) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r1: Verify (*, G) ip mroutes") + result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r1: Verify (S, G) upstream IIF interface") + iif = "r1-r3-eth2" + result = verify_upstream_iif(tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r1: Verify (S, G) upstream join state and join timer") + result = verify_join_state_and_timer(tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r1: Verify (S, G) ip mroutes") + result = verify_ip_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS, iif, oif) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r2: Verify (*, G) upstream IIF interface") + dut = "r2" + iif = "lo" + result = verify_upstream_iif(tgen, dut, iif, STAR, GROUP_ADDRESS) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r2: Verify (*, G) upstream join state and join timer") + result = verify_join_state_and_timer(tgen, dut, iif, STAR, GROUP_ADDRESS) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r2: Verify (*, G) ip mroutes") + oif = "r2-r1-eth0" + result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r3: Verify (S, G) upstream IIF interface") + dut = "r3" + iif = "r3-r5-eth3" + result = verify_upstream_iif(tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r3: Verify (S, G) upstream join state and join timer") + result = verify_join_state_and_timer( + tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS, expected=False + ) + assert result is not True, "Testcase {} :Failed \n Error: {}".format( + tc_name, result + ) + + step("r3: Verify (S, G) ip mroutes") + oif = "r3-r1-eth0" + result = verify_ip_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS, iif, oif) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + dut = "r1" + iif = "r1-r2-eth1" + oil = "r1-r0-eth0" + logger.info("waiting for 10 sec to make sure old mroute time is higher") + sleep(10) + uptime_before = verify_ip_mroutes( + tgen, dut, STAR, GROUP_ADDRESS, iif, oil, return_uptime=True, wait=60 + ) + assert isinstance(uptime_before, dict), "Testcase{} : Failed Error: {}".format( + tc_name, result + ) + + step("r1: Kill pimd process") + kill_router_daemons(tgen, "r1", ["pimd"]) + + step("r1 : Start pimd process") + start_router_daemons(tgen, "r1", ["pimd"]) + + logger.info("Waiting for 5sec to get PIMd restarted and mroute" " re-learned..") + sleep(5) + + uptime_after = verify_ip_mroutes( + tgen, dut, STAR, GROUP_ADDRESS, iif, oil, return_uptime=True, wait=10 + ) + assert isinstance(uptime_after, dict), "Testcase{} : Failed Error: {}".format( + tc_name, result + ) + + result = verify_mroute_repopulated(uptime_before, uptime_after) + assert result is True, "Testcase{} : Failed Error: {}".format(tc_name, result) + + write_test_footer(tc_name) + + +def test_multiple_groups_same_RP_address_p2(request): + """ + TC_27_P2: Configure multiple groups (10 grps) with same RP address + + Topology used: + ________r2_____ + | | + iperf | | iperf + r0-----r1-------------r3-----r5 + + r1 : LHR + r2 : RP + r3 : FHR + """ + + tgen = get_topogen() + tc_name = request.node.name + write_test_header(tc_name) + + # Don"t run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + step("Creating configuration from JSON") + reset_config_on_routers(tgen) + kill_iperf(tgen) + clear_ip_mroute(tgen) + clear_ip_pim_interface_traffic(tgen, topo) + + step("pre-configuration to send IGMP join and multicast traffic") + result = config_to_send_igmp_join_and_traffic(tgen, tc_name) + assert result is True, "Testcase{}: Failed Error: {}".format(tc_name, result) + + step("Enable IGMP on r1 interface and send IGMP join (225.1.1.1) to r1") + step("Configure RP on r2 (loopback interface) for the group range" "225.1.1.0/24") + step("Enable the PIM on all the interfaces of r1-r2-r3") + step("Send multicast traffic from r5 to all the groups") + step("r1 : Remove the groups to RP mapping one by one") + step("r1: Shut the upstream interfaces") + step("r1: No shut the upstream interfaces") + step("r1: Configure the RP again") + step("r1: Shut the receiver interfaces") + step("r1: No Shut the receiver interfaces") + step("r2: Verify RP info") + + step("r2: verify rp-info") + dut = "r2" + rp_address = "1.0.2.17" + oif = "lo" + result = verify_pim_rp_info( + tgen, topo, dut, GROUP_RANGE_ALL, oif, rp_address, SOURCE + ) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + GROUP_ADDRESS_LIST = GROUP_ADDRESS_LIST_1 + GROUP_ADDRESS_LIST_2 + step("r0: Send IGMP join for 10 groups") + result = iperfSendIGMPJoin(tgen, "r0", GROUP_ADDRESS_LIST, join_interval=1) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r1: Verify IGMP groups") + dut = "r1" + oif = "r1-r0-eth0" + result = verify_igmp_groups(tgen, dut, oif, GROUP_ADDRESS_LIST) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r5: Send multicast traffic for group 225.1.1.1") + result = iperfSendTraffic(tgen, "r5", GROUP_ADDRESS_LIST, 32, 2500) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r1: Verify (*, G) upstream IIF interface") + dut = "r1" + iif = "r1-r2-eth1" + result = verify_upstream_iif(tgen, dut, iif, STAR, GROUP_ADDRESS_LIST) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r1: Verify (*, G) upstream join state and join timer") + result = verify_join_state_and_timer(tgen, dut, iif, STAR, GROUP_ADDRESS_LIST) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r1: Verify (*, G) ip mroutes") + oif = "r1-r0-eth0" + result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS_LIST, iif, oif) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r1: Verify (S, G) upstream IIF interface") + iif = "r1-r3-eth2" + result = verify_upstream_iif(tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_LIST) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r1: Verify (S, G) upstream join state and join timer") + result = verify_join_state_and_timer( + tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_LIST + ) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r1: Verify (S, G) ip mroutes") + result = verify_ip_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS_LIST, iif, oif) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r2: Verify (*, G) upstream IIF interface") + dut = "r2" + iif = "lo" + result = verify_upstream_iif(tgen, dut, iif, STAR, GROUP_ADDRESS_LIST) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r2: Verify (*, G) upstream join state and join timer") + result = verify_join_state_and_timer(tgen, dut, iif, STAR, GROUP_ADDRESS_LIST) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r2: Verify (*, G) ip mroutes") + oif = "r2-r1-eth0" + result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS_LIST, iif, oif) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r3: Verify (S, G) upstream IIF interface") + dut = "r3" + iif = "r3-r5-eth3" + result = verify_upstream_iif(tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_LIST) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r3: Verify (S, G) upstream join state and join timer") + result = verify_join_state_and_timer( + tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_LIST, expected=False + ) + assert result is not True, "Testcase {} :Failed \n Error: {}".format( + tc_name, result + ) + + step("r3: Verify (S, G) ip mroutes") + oif = "r3-r1-eth0" + result = verify_ip_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS_LIST, iif, oif) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r2: Verify (S, G) upstream IIF interface") + dut = "r2" + iif = "r2-r3-eth1" + result = verify_upstream_iif( + tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_LIST, joinState="NotJoined" + ) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r2: Verify (S, G) upstream join state and join timer") + result = verify_join_state_and_timer( + tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_LIST, expected=False + ) + assert result is not True, "Testcase {} :Failed \n Error: {}".format( + tc_name, result + ) + + step("r2: Verify (S, G) ip mroutes") + oif = "none" + result = verify_ip_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS_LIST, iif, oif) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r1: Delete RP configuration") + input_dict = { + "r1": { + "pim": { + "rp": [ + { + "rp_addr": "1.0.2.17", + "group_addr_range": GROUP_RANGE_ALL, + "delete": True, + } + ] + } + } + } + + result = create_pim_config(tgen, topo, input_dict) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r1: Shut the interface r1-r2-eth1 from R1 to R2") + dut = "r1" + intf = "r1-r2-eth1" + shutdown_bringup_interface(tgen, dut, intf, False) + + step("r1: No Shut the interface r1-r2-eth1 from R1 to R2") + intf = "r1-r2-eth1" + shutdown_bringup_interface(tgen, dut, intf, True) + + step("r1: Configure RP") + input_dict = { + "r1": { + "pim": { + "rp": [{"rp_addr": "1.0.2.17", "group_addr_range": GROUP_RANGE_ALL,}] + } + } + } + + result = create_pim_config(tgen, topo, input_dict) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r1: Shut the interface r1-r0-eth0 from R1 to R2") + intf = "r1-r0-eth0" + shutdown_bringup_interface(tgen, dut, intf, False) + + step("r1: No Shut the interface r1-r0-eth0 from R1 to R2") + intf = "r1-r0-eth0" + shutdown_bringup_interface(tgen, dut, intf, True) + + step("r1: Verify (*, G) upstream IIF interface") + dut = "r1" + iif = "r1-r2-eth1" + result = verify_upstream_iif(tgen, dut, iif, STAR, GROUP_ADDRESS_LIST) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r1: Verify (*, G) upstream join state and join timer") + result = verify_join_state_and_timer(tgen, dut, iif, STAR, GROUP_ADDRESS_LIST) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r1: Verify (*, G) ip mroutes") + oif = "r1-r0-eth0" + result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS_LIST, iif, oif) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r1: Verify (S, G) upstream IIF interface") + iif = "r1-r3-eth2" + result = verify_upstream_iif(tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_LIST) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r1: Verify (S, G) upstream join state and join timer") + result = verify_join_state_and_timer( + tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_LIST + ) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r1: Verify (S, G) ip mroutes") + result = verify_ip_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS_LIST, iif, oif) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r2: Verify (*, G) upstream IIF interface") + dut = "r2" + iif = "lo" + result = verify_upstream_iif(tgen, dut, iif, STAR, GROUP_ADDRESS_LIST) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r2: Verify (*, G) upstream join state and join timer") + result = verify_join_state_and_timer(tgen, dut, iif, STAR, GROUP_ADDRESS_LIST) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r2: Verify (*, G) ip mroutes") + oif = "r2-r1-eth0" + result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS_LIST, iif, oif) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r2: Verify (S, G) upstream IIF interface") + dut = "r2" + iif = "r2-r3-eth1" + result = verify_upstream_iif( + tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_LIST, joinState="NotJoined" + ) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r2: Verify (S, G) upstream join state and join timer") + result = verify_join_state_and_timer( + tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_LIST, expected=False + ) + assert result is not True, "Testcase {} :Failed \n Error: {}".format( + tc_name, result + ) + + step("r2: Verify (S, G) ip mroutes") + oif = "none" + result = verify_ip_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS_LIST, iif, oif) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r3: Verify (S, G) upstream IIF interface") + dut = "r3" + iif = "r3-r5-eth3" + result = verify_upstream_iif(tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_LIST) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r3: Verify (S, G) upstream join state and join timer") + result = verify_join_state_and_timer( + tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_LIST, expected=False + ) + assert result is not True, "Testcase {} :Failed \n Error: {}".format( + tc_name, result + ) + + step("r3: Verify (S, G) ip mroutes") + oif = "r3-r1-eth0" + result = verify_ip_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS_LIST, iif, oif) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + write_test_footer(tc_name) + + +def test_multiple_groups_different_RP_address_p2(request): + """ + TC_28_P2: Verify IIF and OIL in updated in mroute when upstream interface + configure as RP + + Topology used: + ________r2_____ + | | + iperf | | iperf + r0-----r1-------------r3-----r5 + | | + |_____________| + r4 + r1 : LHR + r2 & r4 : RP + r3 : FHR + """ + + tgen = get_topogen() + tc_name = request.node.name + write_test_header(tc_name) + + # Don"t run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + step("Creating configuration from JSON") + reset_config_on_routers(tgen) + kill_iperf(tgen) + clear_ip_mroute(tgen) + clear_ip_pim_interface_traffic(tgen, topo) + + step("pre-configuration to send IGMP join and multicast traffic") + result = config_to_send_igmp_join_and_traffic(tgen, tc_name) + assert result is True, "Testcase{}: Failed Error: {}".format(tc_name, result) + + step("Delete existing RP configuration") + input_dict = { + "r2": { + "pim": { + "rp": [ + { + "rp_addr": "1.0.2.17", + "group_addr_range": GROUP_RANGE_ALL, + "delete": True, + } + ] + } + } + } + result = create_pim_config(tgen, topo, input_dict) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + input_dict = { + "r2": { + "pim": { + "rp": [{"rp_addr": "1.0.2.17", "group_addr_range": GROUP_RANGE_LIST_1,}] + } + }, + "r4": { + "pim": { + "rp": [{"rp_addr": "1.0.4.17", "group_addr_range": GROUP_RANGE_LIST_2,}] + } + }, + } + result = create_pim_config(tgen, topo, input_dict) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r2: Verify RP info") + dut = "r2" + rp_address = "1.0.2.17" + oif = "lo" + result = verify_pim_rp_info( + tgen, topo, dut, GROUP_RANGE_LIST_1, oif, rp_address, SOURCE + ) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r4: Verify RP info") + dut = "r4" + rp_address = "1.0.4.17" + result = verify_pim_rp_info( + tgen, topo, dut, GROUP_RANGE_LIST_2, oif, rp_address, SOURCE + ) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + GROUP_ADDRESS_LIST = GROUP_ADDRESS_LIST_1 + GROUP_ADDRESS_LIST_2 + step("r0: Send IGMP join") + result = iperfSendIGMPJoin(tgen, "r0", GROUP_ADDRESS_LIST, join_interval=1) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r1: Verify IGMP groups") + dut = "r1" + oif = "r1-r0-eth0" + result = verify_igmp_groups(tgen, dut, oif, GROUP_ADDRESS_LIST) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r5: Send multicast traffic for group 225.1.1.1") + result = iperfSendTraffic(tgen, "r5", GROUP_ADDRESS_LIST, 32, 2500) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r1: Verify (*, G) upstream IIF interface") + dut = "r1" + iif = "r1-r2-eth1" + result = verify_upstream_iif(tgen, dut, iif, STAR, GROUP_ADDRESS_LIST_1) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r1: Verify (*, G) upstream join state and join timer") + result = verify_join_state_and_timer(tgen, dut, iif, STAR, GROUP_ADDRESS_LIST_1) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r1: Verify (*, G) ip mroutes") + result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS_LIST_1, iif, oif) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r1: Verify (S, G) upstream IIF interface") + iif = "r1-r3-eth2" + result = verify_upstream_iif(tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_1) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r1: Verify (S, G) upstream join state and join timer") + result = verify_join_state_and_timer( + tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_1 + ) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r1: Verify (S, G) ip mroutes") + result = verify_ip_mroutes( + tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_1, iif, oif + ) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r2: Verify (*, G) upstream IIF interface") + dut = "r2" + iif = "lo" + result = verify_upstream_iif(tgen, dut, iif, STAR, GROUP_ADDRESS_LIST_1) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r2: Verify (*, G) upstream join state and join timer") + result = verify_join_state_and_timer(tgen, dut, iif, STAR, GROUP_ADDRESS_LIST_1) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r2: Verify (*, G) ip mroutes") + oif = "r2-r1-eth0" + result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS_LIST_1, iif, oif) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r2: Verify (S, G) upstream IIF interface") + iif = "r2-r3-eth1" + result = verify_upstream_iif( + tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_1, joinState="NotJoined" + ) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r2: Verify (S, G) upstream join state and join timer") + result = verify_join_state_and_timer( + tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_1, expected=False + ) + assert result is not True, "Testcase {} :Failed \n Error: {}".format( + tc_name, result + ) + + step("r2: Verify (S, G) ip mroutes") + oif = "none" + result = verify_ip_mroutes( + tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_1, iif, oif + ) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r3: Verify (S, G) upstream IIF interface") + dut = "r3" + iif = "r3-r5-eth3" + result = verify_upstream_iif(tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_1) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r3: Verify (S, G) upstream join state and join timer") + result = verify_join_state_and_timer( + tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_1, expected=False + ) + assert result is not True, "Testcase {} :Failed \n Error: {}".format( + tc_name, result + ) + + step("r3: Verify (S, G) ip mroutes") + oif = "r3-r1-eth0" + result = verify_ip_mroutes( + tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_1, iif, oif + ) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r1: Verify (*, G) upstream IIF interface") + dut = "r1" + iif = "r1-r4-eth3" + result = verify_upstream_iif(tgen, dut, iif, STAR, GROUP_ADDRESS_LIST_2) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r1: Verify (*, G) upstream join state and join timer") + result = verify_join_state_and_timer(tgen, dut, iif, STAR, GROUP_ADDRESS_LIST_2) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r1: Verify (*, G) ip mroutes") + oif = "r1-r0-eth0" + result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS_LIST_2, iif, oif) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r1: Verify (S, G) upstream IIF interface") + iif = "r1-r3-eth2" + result = verify_upstream_iif(tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_2) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r1: Verify (S, G) upstream join state and join timer") + result = verify_join_state_and_timer( + tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_2 + ) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r1: Verify (S, G) ip mroutes") + result = verify_ip_mroutes( + tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_2, iif, oif + ) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r4: Verify (*, G) upstream IIF interface") + dut = "r4" + iif = "lo" + result = verify_upstream_iif(tgen, dut, iif, STAR, GROUP_ADDRESS_LIST_2) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r4: Verify (*, G) upstream join state and join timer") + result = verify_join_state_and_timer(tgen, dut, iif, STAR, GROUP_ADDRESS_LIST_2) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r4: Verify (*, G) ip mroutes") + oif = "r4-r1-eth0" + result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS_LIST_2, iif, oif) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r4: Verify (S, G) upstream IIF interface") + iif = "r4-r3-eth1" + result = verify_upstream_iif( + tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_2, joinState="NotJoined" + ) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r4: Verify (S, G) upstream join state and join timer") + result = verify_join_state_and_timer( + tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_2, expected=False + ) + assert result is not True, "Testcase {} :Failed \n Error: {}".format( + tc_name, result + ) + + step("r4: Verify (S, G) ip mroutes") + oif = "none" + result = verify_ip_mroutes( + tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_2, iif, oif + ) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r3: Verify (S, G) upstream IIF interface") + dut = "r3" + iif = "r3-r5-eth3" + result = verify_upstream_iif(tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_2) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r3: Verify (S, G) upstream join state and join timer") + result = verify_join_state_and_timer( + tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_2, expected=False + ) + assert result is not True, "Testcase {} :Failed \n Error: {}".format( + tc_name, result + ) + + step("r3: Verify (S, G) ip mroutes") + oif = "r3-r1-eth0" + result = verify_ip_mroutes( + tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_2, iif, oif + ) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("Delete RP configuration") + input_dict = { + "r2": { + "pim": { + "rp": [ + { + "rp_addr": "1.0.2.17", + "group_addr_range": GROUP_RANGE_LIST_1, + "delete": True, + } + ] + } + }, + "r4": { + "pim": { + "rp": [ + { + "rp_addr": "1.0.4.17", + "group_addr_range": GROUP_RANGE_LIST_2, + "delete": True, + } + ] + } + }, + } + result = create_pim_config(tgen, topo, input_dict) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r1, r2, r3, r4: Re-configure RP") + input_dict = { + "r2": { + "pim": { + "rp": [{"rp_addr": "1.0.2.17", "group_addr_range": GROUP_RANGE_LIST_1,}] + } + }, + "r4": { + "pim": { + "rp": [{"rp_addr": "1.0.4.17", "group_addr_range": GROUP_RANGE_LIST_2,}] + } + }, + } + result = create_pim_config(tgen, topo, input_dict) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r1: Shut the interface r1-r2-eth1 from R1 to R2") + dut = "r1" + intf = "r1-r2-eth1" + shutdown_bringup_interface(tgen, dut, intf, False) + + step("r1: No shut the interface r1-r2-eth1 from R1 to R2") + dut = "r1" + intf = "r1-r2-eth1" + shutdown_bringup_interface(tgen, dut, intf, True) + + step("r1: Shut the interface r1-r2-eth1 from R1 to R4") + dut = "r1" + intf = "r1-r4-eth3" + shutdown_bringup_interface(tgen, dut, intf, False) + + step("r1: No shut the interface r1-r2-eth1 from R1 to r4") + dut = "r1" + intf = "r1-r4-eth3" + shutdown_bringup_interface(tgen, dut, intf, True) + + step("r1: Shut the interface r1-r0-eth0 from R1 to R0") + dut = "r1" + intf = "r1-r0-eth0" + shutdown_bringup_interface(tgen, dut, intf, False) + + step("r1: No Shut the interface r1-r0-eth0 from R1 to R0") + dut = "r1" + intf = "r1-r0-eth0" + shutdown_bringup_interface(tgen, dut, intf, True) + + step("r1: Verify (*, G) upstream IIF interface") + dut = "r1" + iif = "r1-r2-eth1" + result = verify_upstream_iif(tgen, dut, iif, STAR, GROUP_ADDRESS_LIST_1) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r1: Verify (*, G) upstream join state and join timer") + result = verify_join_state_and_timer(tgen, dut, iif, STAR, GROUP_ADDRESS_LIST_1) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r1: Verify (*, G) ip mroutes") + oif = "r1-r0-eth0" + result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS_LIST_1, iif, oif) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r1: Verify (S, G) upstream IIF interface") + iif = "r1-r3-eth2" + result = verify_upstream_iif(tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_1) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r1: Verify (S, G) upstream join state and join timer") + result = verify_join_state_and_timer( + tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_1 + ) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r1: Verify (S, G) ip mroutes") + result = verify_ip_mroutes( + tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_1, iif, oif + ) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r2: Verify (*, G) upstream IIF interface") + dut = "r2" + iif = "lo" + result = verify_upstream_iif(tgen, dut, iif, STAR, GROUP_ADDRESS_LIST_1) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r2: Verify (*, G) upstream join state and join timer") + result = verify_join_state_and_timer(tgen, dut, iif, STAR, GROUP_ADDRESS_LIST_1) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r2: Verify (*, G) ip mroutes") + oif = "r2-r1-eth0" + result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS_LIST_1, iif, oif) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r2: Verify (S, G) upstream IIF interface") + iif = "r2-r3-eth1" + result = verify_upstream_iif( + tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_1, joinState="NotJoined" + ) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r2: Verify (S, G) upstream join state and join timer") + result = verify_join_state_and_timer( + tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_1, expected=False + ) + assert result is not True, "Testcase {} :Failed \n Error: {}".format( + tc_name, result + ) + + step("r2: Verify (S, G) ip mroutes") + oif = "none" + result = verify_ip_mroutes( + tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_1, iif, oif + ) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r3: Verify (S, G) upstream IIF interface") + dut = "r3" + iif = "r3-r5-eth3" + result = verify_upstream_iif(tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_1) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r3: Verify (S, G) upstream join state and join timer") + result = verify_join_state_and_timer( + tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_1, expected=False + ) + assert result is not True, "Testcase {} :Failed \n Error: {}".format( + tc_name, result + ) + + step("r3: Verify (S, G) ip mroutes") + oif = "r3-r1-eth0" + result = verify_ip_mroutes( + tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_1, iif, oif + ) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r1: Verify (*, G) upstream IIF interface") + dut = "r1" + iif = "r1-r4-eth3" + result = verify_upstream_iif(tgen, dut, iif, STAR, GROUP_ADDRESS_LIST_2) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r1: Verify (*, G) upstream join state and join timer") + result = verify_join_state_and_timer(tgen, dut, iif, STAR, GROUP_ADDRESS_LIST_2) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r1: Verify (*, G) ip mroutes") + oif = "r1-r0-eth0" + result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS_LIST_2, iif, oif) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r1: Verify (S, G) upstream IIF interface") + iif = "r1-r3-eth2" + result = verify_upstream_iif(tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_2) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r1: Verify (S, G) upstream join state and join timer") + result = verify_join_state_and_timer( + tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_2 + ) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r1: Verify (S, G) ip mroutes") + result = verify_ip_mroutes( + tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_2, iif, oif + ) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r4: Verify (*, G) upstream IIF interface") + dut = "r4" + iif = "lo" + result = verify_upstream_iif(tgen, dut, iif, STAR, GROUP_ADDRESS_LIST_2) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r4: Verify (*, G) upstream join state and join timer") + result = verify_join_state_and_timer(tgen, dut, iif, STAR, GROUP_ADDRESS_LIST_2) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r4: Verify (*, G) ip mroutes") + oif = "r4-r1-eth0" + result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS_LIST_2, iif, oif) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r4: Verify (S, G) upstream IIF interface") + iif = "r4-r3-eth1" + result = verify_upstream_iif( + tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_2, joinState="NotJoined" + ) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r4: Verify (S, G) upstream join state and join timer") + result = verify_join_state_and_timer( + tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_2, expected=False + ) + assert result is not True, "Testcase {} :Failed \n Error: {}".format( + tc_name, result + ) + + step("r4: Verify (S, G) ip mroutes") + oif = "none" + result = verify_ip_mroutes( + tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_2, iif, oif + ) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r3: Verify (S, G) upstream IIF interface") + dut = "r3" + iif = "r3-r5-eth3" + result = verify_upstream_iif(tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_2) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r3: Verify (S, G) upstream join state and join timer") + result = verify_join_state_and_timer( + tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_2, expected=False + ) + assert result is not True, "Testcase {} :Failed \n Error: {}".format( + tc_name, result + ) + + step("r3: Verify (S, G) ip mroutes") + oif = "r3-r1-eth0" + result = verify_ip_mroutes( + tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_2, iif, oif + ) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + write_test_footer(tc_name) + + +def test_shutdown_primary_path_p1(request): + """ + TC_30_P1: Verify IIF and OIL change to other path after shut the primary + path + + Topology used: + ________r2_____ + | | + iperf | | + r0-----r1-------------r3 + """ + + tgen = get_topogen() + tc_name = request.node.name + write_test_header(tc_name) + + # Don"t run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + step("Creating configuration from JSON") + reset_config_on_routers(tgen) + kill_iperf(tgen) + clear_ip_mroute(tgen) + clear_ip_pim_interface_traffic(tgen, topo) + + step("pre-configuration to send IGMP join and multicast traffic") + result = config_to_send_igmp_join_and_traffic(tgen, tc_name) + assert result is True, "Testcase{}: Failed Error: {}".format(tc_name, result) + + # Steps to execute + step("Enable IGMP on r1 interface") + step("Configure RP on r2 (loopback interface) for the group range" " 224.0.0.0/4") + step("r1: Shut the link from r1 to r2") + step("r3: Shut the link from r1 to r3") + step("r1: No shut the link from r1 to r2") + step("r3: No shut the link from r1 to r3") + + step("r1: Verify RP info") + dut = "r1" + rp_address = "1.0.2.17" + iif = "r1-r2-eth1" + result = verify_pim_rp_info( + tgen, topo, dut, GROUP_RANGE_ALL, iif, rp_address, SOURCE + ) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r0: Send IGMP join") + result = iperfSendIGMPJoin(tgen, "r0", GROUP_ADDRESS, join_interval=1) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r1: Verify IGMP groups") + oif = "r1-r0-eth0" + result = verify_igmp_groups(tgen, dut, oif, GROUP_ADDRESS) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r1: Verify (*, G) ip mroutes") + result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r2: Verify (*, G) ip mroutes") + dut = "r2" + iif = "lo" + oif = "r2-r1-eth0" + result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r1: Shut the interface r1-r2-eth1 from R1 to R2") + dut = "r1" + intf = "r1-r2-eth1" + shutdown_bringup_interface(tgen, dut, intf, False) + + step( + "Verify after shut the R1 to R2 link , verify join is reaching to RP" + "via other path" + ) + + logger.info("Waiting for 110 sec only if test run with crucible") + + step("r1: Verify (*, G) ip mroutes") + dut = "r1" + iif = "r1-r3-eth2" + oif = "r1-r0-eth0" + result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r2: Verify (*, G) ip mroutes") + dut = "r2" + iif = "lo" + oif = "r2-r3-eth1" + result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r3: Verify (*, G) ip mroutes") + dut = "r3" + iif = "r3-r2-eth1" + oif = "r3-r1-eth0" + result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r3: Shut the link from R1 to R3 from R3 node") + dut = "r3" + intf = "r3-r1-eth0" + shutdown_bringup_interface(tgen, dut, intf, False) + + step( + "Verify after shut of R1 to R3 link , verify (*,G) entries got" + "cleared from all the node R1, R2, R3" + ) + + step("r1: Verify (*, G) ip mroutes") + dut = "r1" + iif = "r1-r3-eth2" + oif = "r1-r0-eth0" + result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif, expected=False) + assert result is not True, "Testcase {} :Failed \n Error: {}".format( + tc_name, result + ) + + step("r2: Verify (*, G) ip mroutes") + dut = "r2" + iif = "lo" + oif = "r2-r3-eth1" + result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif, expected=False) + assert result is not True, "Testcase {} :Failed \n Error: {}".format( + tc_name, result + ) + + step("r3: Verify (*, G) ip mroutes") + dut = "r3" + iif = "r3-r2-eth1" + oif = "r3-r1-eth0" + result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif, expected=False) + assert result is not True, "Testcase {} :Failed \n Error: {}".format( + tc_name, result + ) + + step("r3: No shutdown the link from R1 to R3 from R3 node") + dut = "r3" + intf = "r3-r1-eth0" + shutdown_bringup_interface(tgen, dut, intf, True) + + step("r1: Verify (*, G) ip mroutes") + dut = "r1" + iif = "r1-r3-eth2" + oif = "r1-r0-eth0" + result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r2: Verify (*, G) ip mroutes") + dut = "r2" + iif = "lo" + oif = "r2-r3-eth1" + result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r3: Verify (*, G) ip mroutes") + dut = "r3" + iif = "r3-r2-eth1" + oif = "r3-r1-eth0" + result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r1: No shutdown the link from R1 to R2 from R1 node") + dut = "r1" + intf = "r1-r2-eth1" + shutdown_bringup_interface(tgen, dut, intf, True) + + step("r1: Verify (*, G) ip mroutes") + dut = "r1" + iif = "r1-r2-eth1" + oif = "r1-r0-eth0" + result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r2: Verify (*, G) ip mroutes") + dut = "r2" + iif = "lo" + oif = "r2-r1-eth0" + result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + write_test_footer(tc_name) + + +def test_delete_RP_shut_noshut_upstream_interface_p1(request): + """ + TC_31_P1: Verify RP info and (*,G) mroute after deleting the RP and shut / + no shut the RPF interface. + Topology used: + ________r2_____ + | | + iperf | | + r0-----r1-------------r3 + """ + + tgen = get_topogen() + tc_name = request.node.name + write_test_header(tc_name) + + # Don"t run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + step("Creating configuration from JSON") + reset_config_on_routers(tgen) + kill_iperf(tgen) + clear_ip_mroute(tgen) + clear_ip_pim_interface_traffic(tgen, topo) + + step("pre-configuration to send IGMP join and multicast traffic") + result = config_to_send_igmp_join_and_traffic(tgen, tc_name) + assert result is True, "Testcase{}: Failed Error: {}".format(tc_name, result) + + step("Enable IGMP on r1 interface") + step("Configure RP on r2 (loopback interface) for the group range" " 224.0.0.0/4") + step("r1: Delete the RP config") + step("r1: Shut and no shut the upstream interface (R1-R2) connected link") + step("r1: Shut and no shut the OIL interface") + + step("r1: Verify RP info") + dut = "r1" + rp_address = "1.0.2.17" + iif = "r1-r2-eth1" + result = verify_pim_rp_info( + tgen, topo, dut, GROUP_RANGE_ALL, iif, rp_address, SOURCE + ) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r0: Send IGMP join") + result = iperfSendIGMPJoin(tgen, "r0", GROUP_ADDRESS, join_interval=1) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r1: Verify IGMP groups") + dut = "r1" + oif = "r1-r0-eth0" + result = verify_igmp_groups(tgen, dut, oif, GROUP_ADDRESS) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r1: Verify (*, G) ip mroutes created") + result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r2: Verify (*, G) ip mroutes created") + dut = "r2" + iif = "lo" + oif = "r2-r1-eth0" + result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r1: Delete RP configuration") + + # Delete RP configuration + input_dict = { + "r1": { + "pim": { + "rp": [ + { + "rp_addr": "1.0.2.17", + "group_addr_range": GROUP_RANGE_ALL, + "delete": True, + } + ] + } + } + } + + result = create_pim_config(tgen, topo, input_dict) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r1: Shut the interface r1-r2-eth1 from R1 to R2") + dut = "r1" + intf = "r1-r2-eth1" + shutdown_bringup_interface(tgen, dut, intf, False) + + step("r1: No shutdown the interface r1-r2-eth1 from R1 to R2") + dut = "r1" + intf = "r1-r2-eth1" + shutdown_bringup_interface(tgen, dut, intf, True) + + step("r1: Shutdown the OIL interface r1-r0-eth0 from R1 to R0 ") + dut = "r1" + intf = "r1-r0-eth0" + shutdown_bringup_interface(tgen, dut, intf, False) + + step("r1: No shutdown the OIL interface r1-r0-eth0 from R1 to R0") + dut = "r1" + intf = "r1-r0-eth0" + shutdown_bringup_interface(tgen, dut, intf, True) + + step("r1: Verify (*, G) ip mroutes cleared") + dut = "r1" + iif = "r1-r2-eth1" + oif = "r1-r0-eth0" + result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif, expected=False) + assert result is not True, "Testcase {} :Failed \n Error: {}".format( + tc_name, result + ) + + step("r2: Verify (*, G) ip mroutes cleared") + dut = "r2" + iif = "lo" + oif = "r2-r1-eth0" + result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif, expected=False) + assert result is not True, "Testcase {} :Failed \n Error: {}".format( + tc_name, result + ) + + write_test_footer(tc_name) + + +def test_delete_RP_shut_noshut_RP_interface_p1(request): + """ + TC_32_P1: Verify RP info and (*,G) mroute after deleting the RP and shut/ + no shut the RPF inteface + + Topology used: + ________r2_____ + | | + iperf | | + r0-----r1-------------r3 + """ + + tgen = get_topogen() + tc_name = request.node.name + write_test_header(tc_name) + + # Don"t run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + step("Creating configuration from JSON") + reset_config_on_routers(tgen) + kill_iperf(tgen) + clear_ip_mroute(tgen) + clear_ip_pim_interface_traffic(tgen, topo) + + step("pre-configuration to send IGMP join and multicast traffic") + result = config_to_send_igmp_join_and_traffic(tgen, tc_name) + assert result is True, "Testcase{}: Failed Error: {}".format(tc_name, result) + + step("Enable IGMP on r1 interface") + step("Configure RP on r2 (lo) for the group range" " 224.0.0.0/4") + step("r2: Delete the RP configuration") + step("r2: Shut the RP interface (lo)") + step("r1: Shut the interface(r1-r2-eth1, r1-r3-eth2) towards rp") + + step("r1: Verify RP info") + dut = "r1" + rp_address = "1.0.2.17" + iif = "r1-r2-eth1" + result = verify_pim_rp_info( + tgen, topo, dut, GROUP_RANGE_ALL, iif, rp_address, SOURCE + ) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r0: Send IGMP join") + result = iperfSendIGMPJoin(tgen, "r0", GROUP_ADDRESS, join_interval=1) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r1: Verify IGMP groups") + oif = "r1-r0-eth0" + result = verify_igmp_groups(tgen, dut, oif, GROUP_ADDRESS) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r1: Verify (*, G) ip mroutes created") + result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r2: Verify (*, G) ip mroutes created") + dut = "r2" + iif = "lo" + oif = "r2-r1-eth0" + result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r2: Delete RP configuration") + + # Delete RP configuration + input_dict = { + "r2": { + "pim": { + "rp": [ + { + "rp_addr": "1.0.2.17", + "group_addr_range": GROUP_RANGE_ALL, + "delete": True, + } + ] + } + } + } + + result = create_pim_config(tgen, topo, input_dict) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("r2: Shut the RP interface lo") + dut = "r2" + intf = "lo" + shutdown_bringup_interface(tgen, dut, intf, False) + + step("r1: Shut the interface r1-r2-eth1 towards RP") + dut = "r1" + intf = "r1-r2-eth1" + shutdown_bringup_interface(tgen, dut, intf, False) + + step("r1: Shut the interface r1-r3-eth2 towards RP") + dut = "r1" + intf = "r1-r3-eth2" + shutdown_bringup_interface(tgen, dut, intf, False) + + step("r1: Verify (*, G) ip mroutes cleared") + dut = "r1" + iif = "r1-r2-eth1" + oif = "r1-r0-eth0" + result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif, expected=False) + assert result is not True, "Testcase {} :Failed \n Error: {}".format( + tc_name, result + ) + + step("r2: Verify (*, G) ip mroutes cleared") + dut = "r2" + iif = "lo" + oif = "r2-r1-eth0" + result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif, expected=False) + assert result is not True, "Testcase {} :Failed \n Error: {}".format( + tc_name, result + ) + + write_test_footer(tc_name) + + +if __name__ == "__main__": + args = ["-s"] + sys.argv[1:] + sys.exit(pytest.main(args)) diff --git a/tests/topotests/ospf-dual-stack/test_ospf_dual_stack.dot b/tests/topotests/ospf-dual-stack/test_ospf_dual_stack.dot new file mode 100644 index 0000000000..2c6d0aab16 --- /dev/null +++ b/tests/topotests/ospf-dual-stack/test_ospf_dual_stack.dot @@ -0,0 +1,107 @@ +## Color coding: +######################### +## Main FRR: #f08080 red +## Switches: #d0e0d0 gray +## RIP: #19e3d9 Cyan +## RIPng: #fcb314 dark yellow +## OSPFv2: #32b835 Green +## OSPFv3: #19e3d9 Cyan +## ISIS IPv4 #fcb314 dark yellow +## ISIS IPv6 #9a81ec purple +## BGP IPv4 #eee3d3 beige +## BGP IPv6 #fdff00 yellow +##### Colors (see http://www.color-hex.com/) + +graph ospf_topo1 { + label="ospf dual stack"; + + # Routers + r1 [ + label="r1\nrtr-id 1.1.1.1/32", + shape=doubleoctagon, + fillcolor="#f08080", + style=filled, + ]; + r2 [ + label="r2\nrtr-id 2.2.2.2/32", + shape=doubleoctagon, + fillcolor="#f08080", + style=filled, + ]; + r3 [ + label="r3\nrtr-id 3.3.3.3/32", + shape=doubleoctagon, + fillcolor="#f08080", + style=filled, + ]; + r4 [ + label="r4\nrtr-id 4.4.4.4/32", + shape=doubleoctagon, + fillcolor="#f08080", + style=filled, + ]; + r5 [ + label="r5\nrtr-id 5.5.5.5/32", + shape=doubleoctagon, + fillcolor="#f08080", + style=filled, + ]; + + # Switches + s1 [ + label="s1\n10.0.13.0/24\n2013:13::/64", + shape=oval, + fillcolor="#d0e0d0", + style=filled, + ]; + s2 [ + label="s2\n10.0.23.0/24\n2023:23::/64", + shape=oval, + fillcolor="#d0e0d0", + style=filled, + ]; + s3 [ + label="s3\n10.0.34.0/24\n2034:34::/64", + shape=oval, + fillcolor="#d0e0d0", + style=filled, + ]; + s4 [ + label="s4\n10.0.24.0/24\n2024:24::/64", + shape=oval, + fillcolor="#d0e0d0", + style=filled, + ]; + s5 [ + label="s5\n10.0.45.0/24\n2045:45::/64", + shape=oval, + fillcolor="#d0e0d0", + style=filled, + ]; + + # Connections + subgraph cluster1 { + label="area 1.1.1.1" + + r1 -- s1 [label="eth0\n.1\n::1"]; + r3 -- s1 [label="eth0\n.3\n::3"]; + r3 -- s2 [label="eth1\n.3\n::3"]; + r2 -- s2 [label="eth0\n.2\n::2"]; + } + + subgraph cluster0 { + label="area 0.0.0.0" + + r3 -- s3 [label="eth2\n.3\n::3"]; + r4 -- s3 [label="eth0\n.4\n::4"]; + r2 -- s4 [label="eth1\n.2\n::2"]; + r4 -- s4 [label="eth1\n.4\n::4"]; + } + + subgraph cluster2 { + label="area 2.2.2.2" + + r4 -- s5 [label="eth2\n.4\n::4"]; + r5 -- s5 [label="eth0\n.5\n::5"]; + } +} diff --git a/tests/topotests/ospf-dual-stack/test_ospf_dual_stack.jpg b/tests/topotests/ospf-dual-stack/test_ospf_dual_stack.jpg Binary files differnew file mode 100644 index 0000000000..44efda8390 --- /dev/null +++ b/tests/topotests/ospf-dual-stack/test_ospf_dual_stack.jpg diff --git a/tests/topotests/ospf-dual-stack/test_ospf_dual_stack.json b/tests/topotests/ospf-dual-stack/test_ospf_dual_stack.json new file mode 100644 index 0000000000..c8a3ce783b --- /dev/null +++ b/tests/topotests/ospf-dual-stack/test_ospf_dual_stack.json @@ -0,0 +1,255 @@ +{ + "ipv4base": "10.0.0.0", + "ipv4mask": 24, + "link_ip_start": { + "ipv4": "10.0.0.0", + "v4mask": 24 + }, + "lo_prefix": { + "ipv4": "1.0.", + "v4mask": 32 + }, + "routers": { + "r1": { + "links": { + "r3": { + "ipv4": "10.0.13.1/24", + "ipv6": "2013:13::1/64", + "ospf": { + "area": "1.1.1.1", + "hello_interval": 1, + "dead_interval": 4, + "network": "point-to-point" + }, + "ospf6": { + "hello_interval": 1, + "dead_interval": 4 + } + } + }, + "ospf": { + "router_id": "1.1.1.1", + "neighbors": { + "r3": {} + } + }, + "ospf6": { + "router_id": "1.1.1.1", + "neighbors": { + "r3": { + "area": "1.1.1.1" + } + } + } + }, + "r2": { + "links": { + "r3": { + "ipv4": "10.0.23.2/24", + "ipv6": "2023:23::2/64", + "ospf": { + "area": "1.1.1.1", + "hello_interval": 1, + "dead_interval": 4, + "network": "point-to-point" + }, + "ospf6": { + "hello_interval": 1, + "dead_interval": 4, + "network": "point-to-point" + } + }, + "r4": { + "ipv4": "10.0.24.2/24", + "ipv6": "2034:34::2/64", + "ospf": { + "area": "0.0.0.0", + "hello_interval": 1, + "dead_interval": 4, + "network": "point-to-point" + }, + "ospf6": { + "hello_interval": 1, + "dead_interval": 4, + "network": "point-to-point" + } + } + }, + "ospf": { + "router_id": "2.2.2.2", + "neighbors": { + "r3": {}, + "r4": {} + } + }, + "ospf6": { + "router_id": "2.2.2.2", + "neighbors": { + "r3": { "area": "1.1.1.1" }, + "r4": { "area": "0.0.0.0" } + } + } + }, + "r3": { + "links": { + "r1": { + "ipv4": "10.0.13.3/24", + "ipv6": "2013:13::3/64", + "ospf": { + "area": "1.1.1.1", + "hello_interval": 1, + "dead_interval": 4, + "network": "point-to-point" + }, + "ospf6": { + "hello_interval": 1, + "dead_interval": 4, + "network": "point-to-point" + } + }, + "r2": { + "ipv4": "10.0.23.3/24", + "ipv6": "2023:23::3/64", + "ospf": { + "area": "1.1.1.1", + "hello_interval": 1, + "dead_interval": 4, + "network": "point-to-point" + }, + "ospf6": { + "hello_interval": 1, + "dead_interval": 4, + "network": "point-to-point" + } + }, + "r4": { + "ipv4": "10.0.34.3/24", + "ipv6": "2034:34::3/64", + "ospf": { + "area": "0.0.0.0", + "hello_interval": 1, + "dead_interval": 4, + "network": "point-to-point" + }, + "ospf6": { + "hello_interval": 1, + "dead_interval": 4, + "network": "point-to-point" + } + } + }, + "ospf": { + "router_id": "3.3.3.3", + "neighbors": { + "r1": {}, + "r2": {}, + "r4": {} + } + }, + "ospf6": { + "router_id": "3.3.3.3", + "neighbors": { + "r1": { "area": "1.1.1.1" }, + "r2": { "area": "1.1.1.1" }, + "r4": { "area": "0.0.0.0" } + } + } + }, + "r4": { + "links": { + "r2": { + "ipv4": "10.0.24.4/24", + "ipv6": "2024:24::4/64", + "ospf": { + "area": "0.0.0.0", + "hello_interval": 1, + "dead_interval": 4, + "network": "point-to-point" + }, + "ospf6": { + "hello_interval": 1, + "dead_interval": 4, + "network": "point-to-point" + } + }, + "r3": { + "ipv4": "10.0.34.4/24", + "ipv6": "2034:34::4/64", + "ospf": { + "area": "0.0.0.0", + "hello_interval": 1, + "dead_interval": 4, + "network": "point-to-point" + }, + "ospf6": { + "hello_interval": 1, + "dead_interval": 4, + "network": "point-to-point" + } + }, + "r5": { + "ipv4": "10.0.45.4/24", + "ipv6": "2045:45::4/64", + "ospf": { + "area": "2.2.2.2", + "hello_interval": 1, + "dead_interval": 4, + "network": "point-to-point" + }, + "ospf6": { + "hello_interval": 1, + "dead_interval": 4, + "network": "point-to-point" + } + } + }, + "ospf": { + "router_id": "4.4.4.4", + "neighbors": { + "r2": {}, + "r3": {}, + "r5": {} + } + }, + "ospf6": { + "router_id": "4.4.4.4", + "neighbors": { + "r2": { "area": "0.0.0.0" }, + "r3": { "area": "0.0.0.0" }, + "r5": { "area": "2.2.2.2" } + } + } + }, + "r5": { + "links": { + "r4": { + "ipv4": "10.0.45.5/24", + "ipv6": "2045:45::5/64", + "ospf": { + "area": "2.2.2.2", + "hello_interval": 1, + "dead_interval": 4, + "network": "point-to-point" + }, + "ospf6": { + "hello_interval": 1, + "dead_interval": 4, + "network": "point-to-point" + } + } + }, + "ospf": { + "router_id": "5.5.5.5", + "neighbors": { + "r4": {} + } + }, + "ospf6": { + "router_id": "5.5.5.5", + "neighbors": { + "r4": { "area": "2.2.2.2" } + } + } + } + } +} diff --git a/tests/topotests/ospf-dual-stack/test_ospf_dual_stack.py b/tests/topotests/ospf-dual-stack/test_ospf_dual_stack.py new file mode 100644 index 0000000000..5e7802fa04 --- /dev/null +++ b/tests/topotests/ospf-dual-stack/test_ospf_dual_stack.py @@ -0,0 +1,152 @@ +#!/usr/bin/python + +import os +import sys +import time +import pytest +import json + +CWD = os.path.dirname(os.path.realpath(__file__)) +sys.path.append(os.path.join(CWD, "../")) +sys.path.append(os.path.join(CWD, "../lib/")) + +from mininet.topo import Topo +from lib.topogen import Topogen, get_topogen + +from lib.common_config import ( + start_topology, + write_test_header, + write_test_footer, + reset_config_on_routers, + stop_router, + start_router, + verify_rib, + create_static_routes, + step, + start_router_daemons, + shutdown_bringup_interface, + topo_daemons, + create_prefix_lists, + create_interfaces_cfg, + run_frr_cmd, +) +from lib.topolog import logger +from lib.topojson import build_topo_from_json, build_config_from_json +from lib.ospf import ( + verify_ospf_neighbor, + verify_ospf6_neighbor, + create_router_ospf, + create_router_ospf6, + verify_ospf_summary, + redistribute_ospf, + verify_ospf_database, +) + +# Global variables +topo = None + +# Reading the data from JSON File for topology creation +jsonFile = "{}/test_ospf_dual_stack.json".format(CWD) +try: + with open(jsonFile, "r") as topoJson: + topo = json.load(topoJson) +except IOError: + assert False, "Could not read file {}".format(jsonFile) + + +class CreateTopo(Topo): + """Test topology builder.""" + + def build(self, *_args, **_opts): + """Build function.""" + tgen = get_topogen(self) + + # Building topology from json file + build_topo_from_json(tgen, topo) + + +def setup_module(mod): + """Sets up the pytest environment.""" + global topo + testsuite_run_time = time.asctime(time.localtime(time.time())) + logger.info("Testsuite start time: {}".format(testsuite_run_time)) + logger.info("=" * 40) + + logger.info("Running setup_module to create topology") + + # This function initiates the topology build with Topogen... + tgen = Topogen(CreateTopo, mod.__name__) + # ... and here it calls Mininet initialization functions. + + # get list of daemons needs to be started for this suite. + daemons = topo_daemons(tgen, topo) + + # Starting topology, create tmp files which are loaded to routers + # to start daemons and then start routers + start_topology(tgen, daemons) + + # Creating configuration from JSON + build_config_from_json(tgen, topo) + + # Don't run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + # Api call verify whether OSPF converged + ospf_covergence_ipv4 = verify_ospf_neighbor(tgen, topo) + assert ospf_covergence_ipv4 is True, "setup_module :Failed \n Error:" " {}".format( + ospf_covergence_ipv4 + ) + + # Api call verify whether OSPF6 converged + ospf_covergence_ipv6 = verify_ospf6_neighbor(tgen, topo) + assert ospf_covergence_ipv6 is True, "setup_module :Failed \n Error:" " {}".format( + ospf_covergence_ipv6 + ) + logger.info("Running setup_module() done") + + +def teardown_module(mod): + """ + Teardown the pytest environment. + + * `mod`: module name + """ + + logger.info("Running teardown_module to delete topology") + tgen = get_topogen() + # Stop topology and remove tmp files + tgen.stop_topology() + logger.info( + "Testsuite end time: {}".format(time.asctime(time.localtime(time.time()))) + ) + logger.info("=" * 40) + + +# +# ################################## +# Test cases start here. +# ################################## +# +# +def test_ospf_dual_stack(request): + """OSPF test dual stack.""" + + tc_name = request.node.name + write_test_header(tc_name) + + # Don't run this test if we have any failure. + tgen = get_topogen() + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + global topo + + step("Bring up the base configuration as per the JSON topology") + reset_config_on_routers(tgen) + write_test_footer(tc_name) + + +if __name__ == "__main__": + args = ["-s"] + sys.argv[1:] + sys.exit(pytest.main(args)) diff --git a/tests/topotests/ospf-tilfa-topo1/__init__.py b/tests/topotests/ospf-tilfa-topo1/__init__.py new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/tests/topotests/ospf-tilfa-topo1/__init__.py diff --git a/tests/topotests/ospf-tilfa-topo1/rt1/ospfd.conf b/tests/topotests/ospf-tilfa-topo1/rt1/ospfd.conf new file mode 100644 index 0000000000..eaef49225f --- /dev/null +++ b/tests/topotests/ospf-tilfa-topo1/rt1/ospfd.conf @@ -0,0 +1,27 @@ +debug ospf sr +debug ospf ti-lfa +! +interface lo +! +interface eth-rt2 + ip ospf network point-to-point +! +interface eth-rt3 + ip ospf network point-to-point +! +router ospf + ospf router-id 1.1.1.1 + network 1.1.1.0/24 area 0.0.0.0 + network 10.0.0.0/16 area 0.0.0.0 + area 0.0.0.0 range 10.0.0.0/16 + area 0.0.0.0 range 1.1.1.0/24 + capability opaque + mpls-te on + mpls-te router-address 1.1.1.1 + router-info area 0.0.0.0 + passive-interface lo + segment-routing on + segment-routing global-block 16000 23999 + segment-routing node-msd 8 + segment-routing prefix 1.1.1.1/32 index 10 +! diff --git a/tests/topotests/ospf-tilfa-topo1/rt1/step1/show_ip_route_initial.ref b/tests/topotests/ospf-tilfa-topo1/rt1/step1/show_ip_route_initial.ref new file mode 100644 index 0000000000..0ad2aaeade --- /dev/null +++ b/tests/topotests/ospf-tilfa-topo1/rt1/step1/show_ip_route_initial.ref @@ -0,0 +1,156 @@ +{ + "1.1.1.1\/32":[ + { + "prefix":"1.1.1.1\/32", + "protocol":"ospf", + "nexthops":[ + { + "directlyConnected":true, + "interfaceName":"lo" + } + ] + }, + { + "prefix":"1.1.1.1\/32", + "protocol":"connected", + "nexthops":[ + { + "directlyConnected":true, + "interfaceName":"lo" + } + ] + } + ], + "1.1.1.2\/32":[ + { + "prefix":"1.1.1.2\/32", + "protocol":"ospf", + "nexthops":[ + { + "ip":"10.0.1.2", + "interfaceName":"eth-rt2" + } + ] + } + ], + "1.1.1.3\/32":[ + { + "prefix":"1.1.1.3\/32", + "protocol":"ospf", + "nexthops":[ + { + "ip":"10.0.2.2", + "interfaceName":"eth-rt3" + } + ] + } + ], + "1.1.1.4\/32":[ + { + "prefix":"1.1.1.4\/32", + "protocol":"ospf", + "nexthops":[ + { + "ip":"10.0.1.2", + "interfaceName":"eth-rt2" + } + ] + } + ], + "1.1.1.5\/32":[ + { + "prefix":"1.1.1.5\/32", + "protocol":"ospf", + "nexthops":[ + { + "ip":"10.0.2.2", + "interfaceName":"eth-rt3" + } + ] + } + ], + "10.0.1.0\/24":[ + { + "prefix":"10.0.1.0\/24", + "protocol":"ospf", + "nexthops":[ + { + "directlyConnected":true, + "interfaceName":"eth-rt2" + } + ] + }, + { + "prefix":"10.0.1.0\/24", + "protocol":"connected", + "nexthops":[ + { + "directlyConnected":true, + "interfaceName":"eth-rt2" + } + ] + } + ], + "10.0.2.0\/24":[ + { + "prefix":"10.0.2.0\/24", + "protocol":"ospf", + "nexthops":[ + { + "directlyConnected":true, + "interfaceName":"eth-rt3" + } + ] + }, + { + "prefix":"10.0.2.0\/24", + "protocol":"connected", + "nexthops":[ + { + "directlyConnected":true, + "interfaceName":"eth-rt3" + } + ] + } + ], + "10.0.3.0\/24":[ + { + "prefix":"10.0.3.0\/24", + "protocol":"ospf", + "nexthops":[ + { + "ip":"10.0.1.2", + "interfaceName":"eth-rt2" + } + ] + } + ], + "10.0.4.0\/24":[ + { + "prefix":"10.0.4.0\/24", + "protocol":"ospf", + "nexthops":[ + { + "ip":"10.0.2.2", + "interfaceName":"eth-rt3" + } + ] + } + ], + "10.0.5.0\/24":[ + { + "prefix":"10.0.5.0\/24", + "protocol":"ospf", + "nexthops":[ + { + "ip":"10.0.1.2", + "interfaceName":"eth-rt2" + }, + { + "ip":"10.0.2.2", + "interfaceName":"eth-rt3" + } + ] + } + ] +} diff --git a/tests/topotests/ospf-tilfa-topo1/rt1/step2/show_ip_route_initial.ref b/tests/topotests/ospf-tilfa-topo1/rt1/step2/show_ip_route_initial.ref new file mode 100644 index 0000000000..0ad2aaeade --- /dev/null +++ b/tests/topotests/ospf-tilfa-topo1/rt1/step2/show_ip_route_initial.ref @@ -0,0 +1,156 @@ +{ + "1.1.1.1\/32":[ + { + "prefix":"1.1.1.1\/32", + "protocol":"ospf", + "nexthops":[ + { + "directlyConnected":true, + "interfaceName":"lo" + } + ] + }, + { + "prefix":"1.1.1.1\/32", + "protocol":"connected", + "nexthops":[ + { + "directlyConnected":true, + "interfaceName":"lo" + } + ] + } + ], + "1.1.1.2\/32":[ + { + "prefix":"1.1.1.2\/32", + "protocol":"ospf", + "nexthops":[ + { + "ip":"10.0.1.2", + "interfaceName":"eth-rt2" + } + ] + } + ], + "1.1.1.3\/32":[ + { + "prefix":"1.1.1.3\/32", + "protocol":"ospf", + "nexthops":[ + { + "ip":"10.0.2.2", + "interfaceName":"eth-rt3" + } + ] + } + ], + "1.1.1.4\/32":[ + { + "prefix":"1.1.1.4\/32", + "protocol":"ospf", + "nexthops":[ + { + "ip":"10.0.1.2", + "interfaceName":"eth-rt2" + } + ] + } + ], + "1.1.1.5\/32":[ + { + "prefix":"1.1.1.5\/32", + "protocol":"ospf", + "nexthops":[ + { + "ip":"10.0.2.2", + "interfaceName":"eth-rt3" + } + ] + } + ], + "10.0.1.0\/24":[ + { + "prefix":"10.0.1.0\/24", + "protocol":"ospf", + "nexthops":[ + { + "directlyConnected":true, + "interfaceName":"eth-rt2" + } + ] + }, + { + "prefix":"10.0.1.0\/24", + "protocol":"connected", + "nexthops":[ + { + "directlyConnected":true, + "interfaceName":"eth-rt2" + } + ] + } + ], + "10.0.2.0\/24":[ + { + "prefix":"10.0.2.0\/24", + "protocol":"ospf", + "nexthops":[ + { + "directlyConnected":true, + "interfaceName":"eth-rt3" + } + ] + }, + { + "prefix":"10.0.2.0\/24", + "protocol":"connected", + "nexthops":[ + { + "directlyConnected":true, + "interfaceName":"eth-rt3" + } + ] + } + ], + "10.0.3.0\/24":[ + { + "prefix":"10.0.3.0\/24", + "protocol":"ospf", + "nexthops":[ + { + "ip":"10.0.1.2", + "interfaceName":"eth-rt2" + } + ] + } + ], + "10.0.4.0\/24":[ + { + "prefix":"10.0.4.0\/24", + "protocol":"ospf", + "nexthops":[ + { + "ip":"10.0.2.2", + "interfaceName":"eth-rt3" + } + ] + } + ], + "10.0.5.0\/24":[ + { + "prefix":"10.0.5.0\/24", + "protocol":"ospf", + "nexthops":[ + { + "ip":"10.0.1.2", + "interfaceName":"eth-rt2" + }, + { + "ip":"10.0.2.2", + "interfaceName":"eth-rt3" + } + ] + } + ] +} diff --git a/tests/topotests/ospf-tilfa-topo1/rt1/step2/show_ip_route_link_protection.ref b/tests/topotests/ospf-tilfa-topo1/rt1/step2/show_ip_route_link_protection.ref new file mode 100644 index 0000000000..968570e193 --- /dev/null +++ b/tests/topotests/ospf-tilfa-topo1/rt1/step2/show_ip_route_link_protection.ref @@ -0,0 +1,226 @@ +{ + "1.1.1.1\/32":[ + { + "prefix":"1.1.1.1\/32", + "protocol":"ospf", + "nexthops":[ + { + "directlyConnected":true, + "interfaceName":"lo" + } + ] + }, + { + "prefix":"1.1.1.1\/32", + "protocol":"connected", + "nexthops":[ + { + "directlyConnected":true, + "interfaceName":"lo" + } + ] + } + ], + "1.1.1.2\/32":[ + { + "prefix":"1.1.1.2\/32", + "protocol":"ospf", + "nexthops":[ + { + "ip":"10.0.1.2", + "interfaceName":"eth-rt2" + } + ], + "backupNexthops":[ + { + "ip":"10.0.2.2", + "labels":[ + 16050 + ] + } + ] + } + ], + "1.1.1.3\/32":[ + { + "prefix":"1.1.1.3\/32", + "protocol":"ospf", + "nexthops":[ + { + "ip":"10.0.2.2", + "interfaceName":"eth-rt3" + } + ], + "backupNexthops":[ + { + "ip":"10.0.1.2", + "labels":[ + 16040 + ] + } + ] + } + ], + "1.1.1.4\/32":[ + { + "prefix":"1.1.1.4\/32", + "protocol":"ospf", + "nexthops":[ + { + "ip":"10.0.1.2", + "interfaceName":"eth-rt2", + "labels":[ + 16040 + ] + } + ], + "backupNexthops":[ + { + "ip":"10.0.2.2", + "labels":[ + 16050, + 16040 + ] + } + ] + } + ], + "1.1.1.5\/32":[ + { + "prefix":"1.1.1.5\/32", + "protocol":"ospf", + "nexthops":[ + { + "ip":"10.0.2.2", + "interfaceName":"eth-rt3", + "labels":[ + 16050 + ] + } + ], + "backupNexthops":[ + { + "ip":"10.0.1.2", + "labels":[ + 16040, + 16050 + ] + } + ] + } + ], + "10.0.1.0\/24":[ + { + "prefix":"10.0.1.0\/24", + "protocol":"ospf", + "nexthops":[ + { + "directlyConnected":true, + "interfaceName":"eth-rt2" + } + ] + }, + { + "prefix":"10.0.1.0\/24", + "protocol":"connected", + "nexthops":[ + { + "directlyConnected":true, + "interfaceName":"eth-rt2" + } + ] + } + ], + "10.0.2.0\/24":[ + { + "prefix":"10.0.2.0\/24", + "protocol":"ospf", + "nexthops":[ + { + "directlyConnected":true, + "interfaceName":"eth-rt3" + } + ] + }, + { + "prefix":"10.0.2.0\/24", + "protocol":"connected", + "nexthops":[ + { + "directlyConnected":true, + "interfaceName":"eth-rt3" + } + ] + } + ], + "10.0.3.0\/24":[ + { + "prefix":"10.0.3.0\/24", + "protocol":"ospf", + "nexthops":[ + { + "ip":"10.0.1.2", + "interfaceName":"eth-rt2" + } + ], + "backupNexthops":[ + { + "ip":"10.0.2.2", + "labels":[ + 16050 + ] + } + ] + } + ], + "10.0.4.0\/24":[ + { + "prefix":"10.0.4.0\/24", + "protocol":"ospf", + "nexthops":[ + { + "ip":"10.0.2.2", + "interfaceName":"eth-rt3" + } + ], + "backupNexthops":[ + { + "ip":"10.0.1.2", + "labels":[ + 16040 + ] + } + ] + } + ], + "10.0.5.0\/24":[ + { + "prefix":"10.0.5.0\/24", + "protocol":"ospf", + "nexthops":[ + { + "ip":"10.0.1.2", + "interfaceName":"eth-rt2" + }, + { + "ip":"10.0.2.2", + "interfaceName":"eth-rt3" + } + ], + "backupNexthops":[ + { + "ip":"10.0.1.2", + "labels":[ + 16040 + ] + }, + { + "ip":"10.0.2.2", + "labels":[ + 16050 + ] + } + ] + } + ] +} diff --git a/tests/topotests/ospf-tilfa-topo1/rt1/step3/show_ip_route_initial.ref b/tests/topotests/ospf-tilfa-topo1/rt1/step3/show_ip_route_initial.ref new file mode 100644 index 0000000000..0ad2aaeade --- /dev/null +++ b/tests/topotests/ospf-tilfa-topo1/rt1/step3/show_ip_route_initial.ref @@ -0,0 +1,156 @@ +{ + "1.1.1.1\/32":[ + { + "prefix":"1.1.1.1\/32", + "protocol":"ospf", + "nexthops":[ + { + "directlyConnected":true, + "interfaceName":"lo" + } + ] + }, + { + "prefix":"1.1.1.1\/32", + "protocol":"connected", + "nexthops":[ + { + "directlyConnected":true, + "interfaceName":"lo" + } + ] + } + ], + "1.1.1.2\/32":[ + { + "prefix":"1.1.1.2\/32", + "protocol":"ospf", + "nexthops":[ + { + "ip":"10.0.1.2", + "interfaceName":"eth-rt2" + } + ] + } + ], + "1.1.1.3\/32":[ + { + "prefix":"1.1.1.3\/32", + "protocol":"ospf", + "nexthops":[ + { + "ip":"10.0.2.2", + "interfaceName":"eth-rt3" + } + ] + } + ], + "1.1.1.4\/32":[ + { + "prefix":"1.1.1.4\/32", + "protocol":"ospf", + "nexthops":[ + { + "ip":"10.0.1.2", + "interfaceName":"eth-rt2" + } + ] + } + ], + "1.1.1.5\/32":[ + { + "prefix":"1.1.1.5\/32", + "protocol":"ospf", + "nexthops":[ + { + "ip":"10.0.2.2", + "interfaceName":"eth-rt3" + } + ] + } + ], + "10.0.1.0\/24":[ + { + "prefix":"10.0.1.0\/24", + "protocol":"ospf", + "nexthops":[ + { + "directlyConnected":true, + "interfaceName":"eth-rt2" + } + ] + }, + { + "prefix":"10.0.1.0\/24", + "protocol":"connected", + "nexthops":[ + { + "directlyConnected":true, + "interfaceName":"eth-rt2" + } + ] + } + ], + "10.0.2.0\/24":[ + { + "prefix":"10.0.2.0\/24", + "protocol":"ospf", + "nexthops":[ + { + "directlyConnected":true, + "interfaceName":"eth-rt3" + } + ] + }, + { + "prefix":"10.0.2.0\/24", + "protocol":"connected", + "nexthops":[ + { + "directlyConnected":true, + "interfaceName":"eth-rt3" + } + ] + } + ], + "10.0.3.0\/24":[ + { + "prefix":"10.0.3.0\/24", + "protocol":"ospf", + "nexthops":[ + { + "ip":"10.0.1.2", + "interfaceName":"eth-rt2" + } + ] + } + ], + "10.0.4.0\/24":[ + { + "prefix":"10.0.4.0\/24", + "protocol":"ospf", + "nexthops":[ + { + "ip":"10.0.2.2", + "interfaceName":"eth-rt3" + } + ] + } + ], + "10.0.5.0\/24":[ + { + "prefix":"10.0.5.0\/24", + "protocol":"ospf", + "nexthops":[ + { + "ip":"10.0.1.2", + "interfaceName":"eth-rt2" + }, + { + "ip":"10.0.2.2", + "interfaceName":"eth-rt3" + } + ] + } + ] +} diff --git a/tests/topotests/ospf-tilfa-topo1/rt1/step3/show_ip_route_node_protection.ref b/tests/topotests/ospf-tilfa-topo1/rt1/step3/show_ip_route_node_protection.ref new file mode 100644 index 0000000000..46a80d298e --- /dev/null +++ b/tests/topotests/ospf-tilfa-topo1/rt1/step3/show_ip_route_node_protection.ref @@ -0,0 +1,192 @@ +{ + "1.1.1.1\/32":[ + { + "prefix":"1.1.1.1\/32", + "protocol":"ospf", + "nexthops":[ + { + "directlyConnected":true, + "interfaceName":"lo" + } + ] + }, + { + "prefix":"1.1.1.1\/32", + "protocol":"connected", + "nexthops":[ + { + "directlyConnected":true, + "interfaceName":"lo" + } + ] + } + ], + "1.1.1.2\/32":[ + { + "prefix":"1.1.1.2\/32", + "protocol":"ospf", + "nexthops":[ + { + "ip":"10.0.1.2", + "interfaceName":"eth-rt2" + } + ] + } + ], + "1.1.1.3\/32":[ + { + "prefix":"1.1.1.3\/32", + "protocol":"ospf", + "nexthops":[ + { + "ip":"10.0.2.2", + "interfaceName":"eth-rt3" + } + ] + } + ], + "1.1.1.4\/32":[ + { + "prefix":"1.1.1.4\/32", + "protocol":"ospf", + "nexthops":[ + { + "ip":"10.0.1.2", + "interfaceName":"eth-rt2", + "labels":[ + 16040 + ] + } + ], + "backupNexthops":[ + { + "ip":"10.0.2.2", + "labels":[ + 16050 + ] + } + ] + } + ], + "1.1.1.5\/32":[ + { + "prefix":"1.1.1.5\/32", + "protocol":"ospf", + "nexthops":[ + { + "ip":"10.0.2.2", + "interfaceName":"eth-rt3", + "labels":[ + 16050 + ] + } + ], + "backupNexthops":[ + { + "ip":"10.0.1.2", + "labels":[ + 16040 + ] + } + ] + } + ], + "10.0.1.0\/24":[ + { + "prefix":"10.0.1.0\/24", + "protocol":"ospf", + "nexthops":[ + { + "directlyConnected":true, + "interfaceName":"eth-rt2" + } + ] + }, + { + "prefix":"10.0.1.0\/24", + "protocol":"connected", + "nexthops":[ + { + "directlyConnected":true, + "interfaceName":"eth-rt2" + } + ] + } + ], + "10.0.2.0\/24":[ + { + "prefix":"10.0.2.0\/24", + "protocol":"ospf", + "nexthops":[ + { + "directlyConnected":true, + "interfaceName":"eth-rt3" + } + ] + }, + { + "prefix":"10.0.2.0\/24", + "protocol":"connected", + "nexthops":[ + { + "directlyConnected":true, + "interfaceName":"eth-rt3" + } + ] + } + ], + "10.0.3.0\/24":[ + { + "prefix":"10.0.3.0\/24", + "protocol":"ospf", + "nexthops":[ + { + "ip":"10.0.1.2", + "interfaceName":"eth-rt2" + } + ] + } + ], + "10.0.4.0\/24":[ + { + "prefix":"10.0.4.0\/24", + "protocol":"ospf", + "nexthops":[ + { + "ip":"10.0.2.2", + "interfaceName":"eth-rt3" + } + ] + } + ], + "10.0.5.0\/24":[ + { + "prefix":"10.0.5.0\/24", + "protocol":"ospf", + "nexthops":[ + { + "ip":"10.0.1.2", + "interfaceName":"eth-rt2" + }, + { + "ip":"10.0.2.2", + "interfaceName":"eth-rt3" + } + ], + "backupNexthops":[ + { + "ip":"10.0.2.2", + "labels":[ + 16050 + ] + }, + { + "ip":"10.0.1.2", + "labels":[ + 16040 + ] + } + ] + } + ] +} diff --git a/tests/topotests/ospf-tilfa-topo1/rt1/zebra.conf b/tests/topotests/ospf-tilfa-topo1/rt1/zebra.conf new file mode 100644 index 0000000000..bf0e77a17b --- /dev/null +++ b/tests/topotests/ospf-tilfa-topo1/rt1/zebra.conf @@ -0,0 +1,17 @@ +log file zebra.log +! +hostname rt1 +! +interface lo + ip address 1.1.1.1/32 +! +interface eth-rt2 + ip address 10.0.1.1/24 +! +interface eth-rt3 + ip address 10.0.2.1/24 +! +ip forwarding +! +line vty +! diff --git a/tests/topotests/ospf-tilfa-topo1/rt2/ospfd.conf b/tests/topotests/ospf-tilfa-topo1/rt2/ospfd.conf new file mode 100644 index 0000000000..7548aad7f8 --- /dev/null +++ b/tests/topotests/ospf-tilfa-topo1/rt2/ospfd.conf @@ -0,0 +1,27 @@ +debug ospf sr +debug ospf ti-lfa +! +interface lo +! +interface eth-rt1 + ip ospf network point-to-point +! +interface eth-rt4 + ip ospf network point-to-point +! +router ospf + ospf router-id 1.1.1.2 + network 1.1.1.0/24 area 0.0.0.0 + network 10.0.0.0/16 area 0.0.0.0 + area 0.0.0.0 range 10.0.0.0/16 + area 0.0.0.0 range 1.1.1.0/24 + capability opaque + mpls-te on + mpls-te router-address 1.1.1.2 + router-info area 0.0.0.0 + passive-interface lo + segment-routing on + segment-routing global-block 16000 23999 + segment-routing node-msd 8 + segment-routing prefix 1.1.1.2/32 index 20 +! diff --git a/tests/topotests/ospf-tilfa-topo1/rt2/zebra.conf b/tests/topotests/ospf-tilfa-topo1/rt2/zebra.conf new file mode 100644 index 0000000000..add2933571 --- /dev/null +++ b/tests/topotests/ospf-tilfa-topo1/rt2/zebra.conf @@ -0,0 +1,17 @@ +log file zebra.log +! +hostname rt2 +! +interface lo + ip address 1.1.1.2/32 +! +interface eth-rt1 + ip address 10.0.1.2/24 +! +interface eth-rt4 + ip address 10.0.3.1/24 +! +ip forwarding +! +line vty +! diff --git a/tests/topotests/ospf-tilfa-topo1/rt3/ospfd.conf b/tests/topotests/ospf-tilfa-topo1/rt3/ospfd.conf new file mode 100644 index 0000000000..6258295b6f --- /dev/null +++ b/tests/topotests/ospf-tilfa-topo1/rt3/ospfd.conf @@ -0,0 +1,27 @@ +debug ospf sr +debug ospf ti-lfa +! +interface lo +! +interface eth-rt1 + ip ospf network point-to-point +! +interface eth-rt5 + ip ospf network point-to-point +! +router ospf + ospf router-id 1.1.1.3 + network 1.1.1.0/24 area 0.0.0.0 + network 10.0.0.0/16 area 0.0.0.0 + area 0.0.0.0 range 10.0.0.0/16 + area 0.0.0.0 range 1.1.1.0/24 + capability opaque + mpls-te on + mpls-te router-address 1.1.1.3 + router-info area 0.0.0.0 + passive-interface lo + segment-routing on + segment-routing global-block 16000 23999 + segment-routing node-msd 8 + segment-routing prefix 1.1.1.3/32 index 30 +! diff --git a/tests/topotests/ospf-tilfa-topo1/rt3/zebra.conf b/tests/topotests/ospf-tilfa-topo1/rt3/zebra.conf new file mode 100644 index 0000000000..1bb64bc585 --- /dev/null +++ b/tests/topotests/ospf-tilfa-topo1/rt3/zebra.conf @@ -0,0 +1,17 @@ +log file zebra.log +! +hostname rt3 +! +interface lo + ip address 1.1.1.3/32 +! +interface eth-rt1 + ip address 10.0.2.2/24 +! +interface eth-rt5 + ip address 10.0.4.1/24 +! +ip forwarding +! +line vty +! diff --git a/tests/topotests/ospf-tilfa-topo1/rt4/ospfd.conf b/tests/topotests/ospf-tilfa-topo1/rt4/ospfd.conf new file mode 100644 index 0000000000..ad02214017 --- /dev/null +++ b/tests/topotests/ospf-tilfa-topo1/rt4/ospfd.conf @@ -0,0 +1,27 @@ +debug ospf sr +debug ospf ti-lfa +! +interface lo +! +interface eth-rt2 + ip ospf network point-to-point +! +interface eth-rt5 + ip ospf network point-to-point +! +router ospf + ospf router-id 1.1.1.4 + network 1.1.1.0/24 area 0.0.0.0 + network 10.0.0.0/16 area 0.0.0.0 + area 0.0.0.0 range 10.0.0.0/16 + area 0.0.0.0 range 1.1.1.0/24 + capability opaque + mpls-te on + mpls-te router-address 1.1.1.4 + router-info area 0.0.0.0 + passive-interface lo + segment-routing on + segment-routing global-block 16000 23999 + segment-routing node-msd 8 + segment-routing prefix 1.1.1.4/32 index 40 +! diff --git a/tests/topotests/ospf-tilfa-topo1/rt4/zebra.conf b/tests/topotests/ospf-tilfa-topo1/rt4/zebra.conf new file mode 100644 index 0000000000..306f0d4925 --- /dev/null +++ b/tests/topotests/ospf-tilfa-topo1/rt4/zebra.conf @@ -0,0 +1,17 @@ +log file zebra.log +! +hostname rt4 +! +interface lo + ip address 1.1.1.4/32 +! +interface eth-rt2 + ip address 10.0.3.2/24 +! +interface eth-rt5 + ip address 10.0.5.1/24 +! +ip forwarding +! +line vty +! diff --git a/tests/topotests/ospf-tilfa-topo1/rt5/ospfd.conf b/tests/topotests/ospf-tilfa-topo1/rt5/ospfd.conf new file mode 100644 index 0000000000..1b95858f53 --- /dev/null +++ b/tests/topotests/ospf-tilfa-topo1/rt5/ospfd.conf @@ -0,0 +1,27 @@ +debug ospf sr +debug ospf ti-lfa +! +interface lo +! +interface eth-rt3 + ip ospf network point-to-point +! +interface eth-rt4 + ip ospf network point-to-point +! +router ospf + ospf router-id 1.1.1.5 + network 1.1.1.0/24 area 0.0.0.0 + network 10.0.0.0/16 area 0.0.0.0 + area 0.0.0.0 range 10.0.0.0/16 + area 0.0.0.0 range 1.1.1.0/24 + capability opaque + mpls-te on + mpls-te router-address 1.1.1.5 + router-info area 0.0.0.0 + passive-interface lo + segment-routing on + segment-routing global-block 16000 23999 + segment-routing node-msd 8 + segment-routing prefix 1.1.1.5/32 index 50 +! diff --git a/tests/topotests/ospf-tilfa-topo1/rt5/zebra.conf b/tests/topotests/ospf-tilfa-topo1/rt5/zebra.conf new file mode 100644 index 0000000000..46f759580e --- /dev/null +++ b/tests/topotests/ospf-tilfa-topo1/rt5/zebra.conf @@ -0,0 +1,17 @@ +log file zebra.log +! +hostname rt5 +! +interface lo + ip address 1.1.1.5/32 +! +interface eth-rt3 + ip address 10.0.4.2/24 +! +interface eth-rt4 + ip address 10.0.5.2/24 +! +ip forwarding +! +line vty +! diff --git a/tests/topotests/ospf-tilfa-topo1/test_ospf_tilfa_topo1.py b/tests/topotests/ospf-tilfa-topo1/test_ospf_tilfa_topo1.py new file mode 100644 index 0000000000..eb3ad5d995 --- /dev/null +++ b/tests/topotests/ospf-tilfa-topo1/test_ospf_tilfa_topo1.py @@ -0,0 +1,242 @@ +#!/usr/bin/env python + +# +# test_ospf_tilfa_topo1.py +# Part of NetDEF Topology Tests +# +# Copyright (c) 2020 by +# Network Device Education Foundation, Inc. ("NetDEF") +# +# Permission to use, copy, modify, and/or distribute this software +# for any purpose with or without fee is hereby granted, provided +# that the above copyright notice and this permission notice appear +# in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND NETDEF DISCLAIMS ALL WARRANTIES +# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NETDEF BE LIABLE FOR +# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY +# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS +# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE +# OF THIS SOFTWARE. +# + +""" +test_ospf_tilfa_topo1.py: + +This topology is intentionally kept simple, its main purpose is to verify that +generated backup label stacks are inserted correctly into the RIB. For fancy +topologies please use the unit test framework provided in `/tests/ospfd`. + + + +---------+ +---------+ + | | | | + 10.0.1.0/24 eth+rt1| RT2 |eth+rt4 eth+rt2| RT2 | + +---------------------+ 2.2.2.2 +---------------------+ 4.4.4.4 | + | | | 10.0.3.0/24 | | + |eth+rt2 +---------+ +---------+ + +---------+ eth+rt5| + | | | + | RT1 | 10.0.5.0/24| + | 1.1.1.1 | | + | | | + +---------+ eth+rt4| + |eth+rt3 +---------+ +---------+ + | | | 10.0.4.0/24 | | + +---------------------+ RT3 +---------------------+ RT5 | + 10.0.2.0/24 eth+rt1| 3.3.3.3 |eth+rt5 eth-rt3| 5.5.5.5 | + | | | | + +---------+ +---------+ +""" + +import os +import sys +import pytest +import json +import re +from time import sleep +from functools import partial + +# Save the Current Working Directory to find configuration files. +CWD = os.path.dirname(os.path.realpath(__file__)) +sys.path.append(os.path.join(CWD, "../")) + +# pylint: disable=C0413 +# Import topogen and topotest helpers +from lib import topotest +from lib.topogen import Topogen, TopoRouter, get_topogen +from lib.topolog import logger + +# Required to instantiate the topology builder class. +from mininet.topo import Topo + + +class TemplateTopo(Topo): + "Test topology builder" + + def build(self, *_args, **_opts): + "Build function" + tgen = get_topogen(self) + + # + # Define FRR Routers + # + for router in ["rt1", "rt2", "rt3", "rt4", "rt5"]: + tgen.add_router(router) + + # + # Define connections + # + switch = tgen.add_switch("s1") + switch.add_link(tgen.gears["rt1"], nodeif="eth-rt2") + switch.add_link(tgen.gears["rt2"], nodeif="eth-rt1") + + switch = tgen.add_switch("s2") + switch.add_link(tgen.gears["rt1"], nodeif="eth-rt3") + switch.add_link(tgen.gears["rt3"], nodeif="eth-rt1") + + switch = tgen.add_switch("s3") + switch.add_link(tgen.gears["rt2"], nodeif="eth-rt4") + switch.add_link(tgen.gears["rt4"], nodeif="eth-rt2") + + switch = tgen.add_switch("s4") + switch.add_link(tgen.gears["rt3"], nodeif="eth-rt5") + switch.add_link(tgen.gears["rt5"], nodeif="eth-rt3") + + switch = tgen.add_switch("s5") + switch.add_link(tgen.gears["rt4"], nodeif="eth-rt5") + switch.add_link(tgen.gears["rt5"], nodeif="eth-rt4") + + +def setup_module(mod): + "Sets up the pytest environment" + tgen = Topogen(TemplateTopo, mod.__name__) + tgen.start_topology() + + router_list = tgen.routers() + + # For all registered routers, load the zebra configuration file + for rname, router in router_list.items(): + router.load_config( + TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname)) + ) + router.load_config( + TopoRouter.RD_OSPF, os.path.join(CWD, "{}/ospfd.conf".format(rname)) + ) + + tgen.start_router() + + +def teardown_module(mod): + "Teardown the pytest environment" + tgen = get_topogen() + + # This function tears down the whole topology. + tgen.stop_topology() + + +def router_compare_json_output(rname, command, reference): + "Compare router JSON output" + + logger.info('Comparing router "%s" "%s" output', rname, command) + + tgen = get_topogen() + filename = "{}/{}/{}".format(CWD, rname, reference) + expected = json.loads(open(filename).read()) + + # Run test function until we get an result. Wait at most 60 seconds. + test_func = partial(topotest.router_json_cmp, tgen.gears[rname], command, expected) + _, diff = topotest.run_and_expect(test_func, None, count=120, wait=0.5) + assertmsg = '"{}" JSON output mismatches the expected result'.format(rname) + assert diff is None, assertmsg + + +def test_ospf_initial_convergence_step1(): + logger.info("Test (step 1): check initial convergence") + tgen = get_topogen() + + # Skip if previous fatal error condition is raised + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + router_compare_json_output( + "rt1", + "show ip route json", + "step1/show_ip_route_initial.ref", + ) + +def test_ospf_link_protection_step2(): + logger.info("Test (step 2): check OSPF link protection") + tgen = get_topogen() + + # Skip if previous fatal error condition is raised + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + # enable TI-LFA link protection on all interfaces + tgen.net["rt1"].cmd( + 'vtysh -c "conf t" -c "router ospf" -c "fast-reroute ti-lfa"' + ) + + router_compare_json_output( + "rt1", + "show ip route json", + "step2/show_ip_route_link_protection.ref", + ) + + # disable TI-LFA link protection on all interfaces + tgen.net["rt1"].cmd( + 'vtysh -c "conf t" -c "router ospf" -c "no fast-reroute ti-lfa"' + ) + + # check if we got back to the initial route table + router_compare_json_output( + "rt1", + "show ip route json", + "step2/show_ip_route_initial.ref", + ) + +def test_ospf_node_protection_step3(): + logger.info("Test (step 3): check OSPF node protection") + tgen = get_topogen() + + # Skip if previous fatal error condition is raised + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + # enable TI-LFA node protection on all interfaces + tgen.net["rt1"].cmd( + 'vtysh -c "conf t" -c "router ospf" -c "fast-reroute ti-lfa node-protection"' + ) + + router_compare_json_output( + "rt1", + "show ip route json", + "step3/show_ip_route_node_protection.ref", + ) + + # disable TI-LFA node protection on all interfaces + tgen.net["rt1"].cmd( + 'vtysh -c "conf t" -c "router ospf" -c "no fast-reroute ti-lfa node-protection"' + ) + + # check if we got back to the initial route table + router_compare_json_output( + "rt1", + "show ip route json", + "step3/show_ip_route_initial.ref", + ) + +# Memory leak test template +def test_memory_leak(): + "Run the memory leak test and report results." + tgen = get_topogen() + if not tgen.is_memleak_enabled(): + pytest.skip("Memory leak test/report is disabled") + + tgen.report_memory_leaks() + +if __name__ == "__main__": + args = ["-s"] + sys.argv[1:] + sys.exit(pytest.main(args)) diff --git a/tests/topotests/ospf-topo1-vrf/test_ospf_topo1_vrf.py b/tests/topotests/ospf-topo1-vrf/test_ospf_topo1_vrf.py index 6d44d02e5e..2421b312d2 100644 --- a/tests/topotests/ospf-topo1-vrf/test_ospf_topo1_vrf.py +++ b/tests/topotests/ospf-topo1-vrf/test_ospf_topo1_vrf.py @@ -320,8 +320,10 @@ def test_ospf_link_down_kernel_route(): # Run test function until we get an result. Wait at most 60 seconds. test_func = partial(compare_show_ip_route_vrf, router.name, expected) result, diff = topotest.run_and_expect(test_func, "", count=140, wait=0.5) - assertmsg = 'OSPF IPv4 route mismatch in router "{}" after link down: {}'.format( - router.name, diff + assertmsg = ( + 'OSPF IPv4 route mismatch in router "{}" after link down: {}'.format( + router.name, diff + ) ) assert result, assertmsg diff --git a/tests/topotests/ospf-topo1/test_ospf_topo1.py b/tests/topotests/ospf-topo1/test_ospf_topo1.py index 95193afb2a..24806dd8fc 100644 --- a/tests/topotests/ospf-topo1/test_ospf_topo1.py +++ b/tests/topotests/ospf-topo1/test_ospf_topo1.py @@ -310,7 +310,10 @@ def test_ospf_json(): # r4 has more interfaces for area 0.0.0.1 if router.name == "r4": expected["areas"]["0.0.0.1"].update( - {"areaIfActiveCounter": 2, "areaIfTotalCounter": 2,} + { + "areaIfActiveCounter": 2, + "areaIfTotalCounter": 2, + } ) # router 3 has an additional area @@ -372,16 +375,25 @@ def test_ospf_link_down_kernel_route(): } if router.name == "r1" or router.name == "r2": expected.update( - {"10.0.10.0/24": None, "172.16.0.0/24": None, "172.16.1.0/24": None,} + { + "10.0.10.0/24": None, + "172.16.0.0/24": None, + "172.16.1.0/24": None, + } ) elif router.name == "r3" or router.name == "r4": expected.update( - {"10.0.1.0/24": None, "10.0.2.0/24": None,} + { + "10.0.1.0/24": None, + "10.0.2.0/24": None, + } ) # Route '10.0.3.0' is no longer available for r4 since it is down. if router.name == "r4": expected.update( - {"10.0.3.0/24": None,} + { + "10.0.3.0/24": None, + } ) assertmsg = 'OSPF IPv4 route mismatch in router "{}" after link down'.format( router.name @@ -443,12 +455,17 @@ def test_ospf6_link_down_kernel_route(): ) elif router.name == "r3" or router.name == "r4": expected.update( - {"2001:db8:1::/64": None, "2001:db8:2::/64": None,} + { + "2001:db8:1::/64": None, + "2001:db8:2::/64": None, + } ) # Route '2001:db8:3::/64' is no longer available for r4 since it is down. if router.name == "r4": expected.update( - {"2001:db8:3::/64": None,} + { + "2001:db8:3::/64": None, + } ) assertmsg = 'OSPF IPv6 route mismatch in router "{}" after link down'.format( router.name diff --git a/tests/topotests/ospf6-topo1/r2/ip_6_address.nhg.ref b/tests/topotests/ospf6-topo1/r2/ip_6_address.nhg.ref new file mode 100644 index 0000000000..032acb5341 --- /dev/null +++ b/tests/topotests/ospf6-topo1/r2/ip_6_address.nhg.ref @@ -0,0 +1,10 @@ +fc00:1111:1111:1111::/64 nhid XXXX via fe80::__(r1-sw5)__ dev r2-sw5 proto XXXX metric 20 pref medium +fc00:1:1:1::/64 nhid XXXX via fe80::__(r1-sw5)__ dev r2-sw5 proto XXXX metric 20 pref medium +fc00:2222:2222:2222::/64 nhid XXXX via fc00:2:2:2::1234 dev r2-stubnet proto XXXX metric 20 pref medium +fc00:2:2:2::/64 dev r2-stubnet proto XXXX metric 256 pref medium +fc00:3333:3333:3333::/64 nhid XXXX via fe80::__(r3-sw5)__ dev r2-sw5 proto XXXX metric 20 pref medium +fc00:3:3:3::/64 nhid XXXX via fe80::__(r3-sw5)__ dev r2-sw5 proto XXXX metric 20 pref medium +fc00:4444:4444:4444::/64 nhid XXXX via fe80::__(r3-sw5)__ dev r2-sw5 proto XXXX metric 20 pref medium +fc00:4:4:4::/64 nhid XXXX via fe80::__(r3-sw5)__ dev r2-sw5 proto XXXX metric 20 pref medium +fc00:a:a:a::/64 dev r2-sw5 proto XXXX metric 256 pref medium +fc00:b:b:b::/64 nhid XXXX via fe80::__(r3-sw5)__ dev r2-sw5 proto XXXX metric 20 pref medium diff --git a/tests/topotests/ospf6-topo1/r3/ip_6_address.nhg.ref b/tests/topotests/ospf6-topo1/r3/ip_6_address.nhg.ref new file mode 100644 index 0000000000..101fcc95b4 --- /dev/null +++ b/tests/topotests/ospf6-topo1/r3/ip_6_address.nhg.ref @@ -0,0 +1,10 @@ +fc00:1111:1111:1111::/64 nhid XXXX via fe80::__(r1-sw5)__ dev r3-sw5 proto XXXX metric 20 pref medium +fc00:1:1:1::/64 nhid XXXX via fe80::__(r1-sw5)__ dev r3-sw5 proto XXXX metric 20 pref medium +fc00:2222:2222:2222::/64 nhid XXXX via fe80::__(r2-sw5)__ dev r3-sw5 proto XXXX metric 20 pref medium +fc00:2:2:2::/64 nhid XXXX via fe80::__(r2-sw5)__ dev r3-sw5 proto XXXX metric 20 pref medium +fc00:3333:3333:3333::/64 nhid XXXX via fc00:3:3:3::1234 dev r3-stubnet proto XXXX metric 20 pref medium +fc00:3:3:3::/64 dev r3-stubnet proto XXXX metric 256 pref medium +fc00:4444:4444:4444::/64 nhid XXXX via fe80::__(r4-sw6)__ dev r3-sw6 proto XXXX metric 20 pref medium +fc00:4:4:4::/64 nhid XXXX via fe80::__(r4-sw6)__ dev r3-sw6 proto XXXX metric 20 pref medium +fc00:a:a:a::/64 dev r3-sw5 proto XXXX metric 256 pref medium +fc00:b:b:b::/64 dev r3-sw6 proto XXXX metric 256 pref medium diff --git a/tests/topotests/ospf6-topo1/r4/ip_6_address.nhg.ref b/tests/topotests/ospf6-topo1/r4/ip_6_address.nhg.ref new file mode 100644 index 0000000000..4f11670ce3 --- /dev/null +++ b/tests/topotests/ospf6-topo1/r4/ip_6_address.nhg.ref @@ -0,0 +1,10 @@ +fc00:1111:1111:1111::/64 nhid XXXX via fe80::__(r3-sw6)__ dev r4-sw6 proto XXXX metric 20 pref medium +fc00:1:1:1::/64 nhid XXXX via fe80::__(r3-sw6)__ dev r4-sw6 proto XXXX metric 20 pref medium +fc00:2222:2222:2222::/64 nhid XXXX via fe80::__(r3-sw6)__ dev r4-sw6 proto XXXX metric 20 pref medium +fc00:2:2:2::/64 nhid XXXX via fe80::__(r3-sw6)__ dev r4-sw6 proto XXXX metric 20 pref medium +fc00:3333:3333:3333::/64 nhid XXXX via fe80::__(r3-sw6)__ dev r4-sw6 proto XXXX metric 20 pref medium +fc00:3:3:3::/64 nhid XXXX via fe80::__(r3-sw6)__ dev r4-sw6 proto XXXX metric 20 pref medium +fc00:4444:4444:4444::/64 nhid XXXX via fc00:4:4:4::1234 dev r4-stubnet proto XXXX metric 20 pref medium +fc00:4:4:4::/64 dev r4-stubnet proto XXXX metric 256 pref medium +fc00:a:a:a::/64 nhid XXXX via fe80::__(r3-sw6)__ dev r4-sw6 proto XXXX metric 20 pref medium +fc00:b:b:b::/64 dev r4-sw6 proto XXXX metric 256 pref medium diff --git a/tests/topotests/ospf6-topo1/test_ospf6_topo1.py b/tests/topotests/ospf6-topo1/test_ospf6_topo1.py index 8e3a329f10..c3efb6ff22 100644 --- a/tests/topotests/ospf6-topo1/test_ospf6_topo1.py +++ b/tests/topotests/ospf6-topo1/test_ospf6_topo1.py @@ -383,7 +383,15 @@ def test_linux_ipv6_kernel_routingTable(): "Linux Kernel IPv6 Routing Table verification failed for router r%s:\n%s" % (i, diff) ) + else: + logger.error( + "r{} failed - no nhid ref file: {}".format(i, refTableFile) + ) + assert False, ( + "Linux Kernel IPv6 Routing Table verification failed for router r%s\n" + % (i) + ) def test_shutdown_check_stderr(): diff --git a/tests/topotests/ospf_basic_functionality/ospf_chaos.json b/tests/topotests/ospf_basic_functionality/ospf_chaos.json new file mode 100644 index 0000000000..ed199f181b --- /dev/null +++ b/tests/topotests/ospf_basic_functionality/ospf_chaos.json @@ -0,0 +1,166 @@ +{ + + "ipv4base": "10.0.0.0", + "ipv4mask": 24, + "link_ip_start": { + "ipv4": "10.0.0.0", + "v4mask": 24 + }, + "lo_prefix": { + "ipv4": "1.0.", + "v4mask": 32 + }, + "routers": { + "r0": { + "links": { + "r1": { + "ipv4": "auto", + "ospf": { + "area": "0.0.0.0", + "hello_interval": 1, + "dead_interval": 4 + } + }, + "r2": { + "ipv4": "auto", + "ospf": { + "area": "0.0.0.0", + "hello_interval": 1, + "dead_interval": 4 + } + }, + "r3": { + "ipv4": "auto", + "ospf": { + "area": "0.0.0.0", + "hello_interval": 1, + "dead_interval": 4 + } + } + }, + "ospf": { + "router_id": "100.1.1.0", + "neighbors": { + "r1": {}, + "r2": {}, + "r3": {} + }, + "redistribute": [{ + "redist_type": "static" + }, + { + "redist_type": "connected" + } + ] + } + }, + "r1": { + "links": { + "r0": { + "ipv4": "auto", + "ospf": { + "area": "0.0.0.0", + "hello_interval": 1, + "dead_interval": 4 + } + }, + "r2": { + "ipv4": "auto", + "ospf": { + "area": "0.0.0.0", + "hello_interval": 1, + "dead_interval": 4 + } + }, + "r3": { + "ipv4": "auto", + "ospf": { + "area": "0.0.0.0", + "hello_interval": 1, + "dead_interval": 4 + } + } + }, + "ospf": { + "router_id": "100.1.1.1", + "neighbors": { + "r0": {}, + "r2": {}, + "r3": {} + } + } + }, + "r2": { + "links": { + "r0": { + "ipv4": "auto", + "ospf": { + "area": "0.0.0.0", + "hello_interval": 1, + "dead_interval": 4 + } + }, + "r1": { + "ipv4": "auto", + "ospf": { + "area": "0.0.0.0", + "hello_interval": 1, + "dead_interval": 4 + } + }, + "r3": { + "ipv4": "auto", + "ospf": { + "area": "0.0.0.0", + "hello_interval": 1, + "dead_interval": 4 + } + } + }, + "ospf": { + "router_id": "100.1.1.2", + "neighbors": { + "r1": {}, + "r0": {}, + "r3": {} + } + } + }, + "r3": { + "links": { + "r0": { + "ipv4": "auto", + "ospf": { + "area": "0.0.0.0", + "hello_interval": 1, + "dead_interval": 4 + } + }, + "r1": { + "ipv4": "auto", + "ospf": { + "area": "0.0.0.0", + "hello_interval": 1, + "dead_interval": 4 + } + }, + "r2": { + "ipv4": "auto", + "ospf": { + "area": "0.0.0.0", + "hello_interval": 1, + "dead_interval": 4 + } + } + }, + "ospf": { + "router_id": "100.1.1.3", + "neighbors": { + "r0": {}, + "r1": {}, + "r2": {} + } + } + } + } +} diff --git a/tests/topotests/ospf_basic_functionality/ospf_p2mp.json b/tests/topotests/ospf_basic_functionality/ospf_p2mp.json new file mode 100644 index 0000000000..40815f36f3 --- /dev/null +++ b/tests/topotests/ospf_basic_functionality/ospf_p2mp.json @@ -0,0 +1,198 @@ +{ + + "ipv4base": "10.0.0.0", + "ipv4mask": 24, + "link_ip_start": { + "ipv4": "10.0.0.0", + "v4mask": 24 + }, + "lo_prefix": { + "ipv4": "1.0.", + "v4mask": 32 + }, + "routers": { + "r0": { + "links": { + "lo": { + "ipv4": "auto", + "type": "loopback" + }, + "r1": { + "ipv4": "auto", + "ospf": { + "area": "0.0.0.0", + "hello_interval": 1, + "dead_interval": 4, + "network": "point-to-multipoint" + } + }, + "r2": { + "ipv4": "auto", + "ospf": { + "area": "0.0.0.0", + "hello_interval": 1, + "dead_interval": 4, + "network": "point-to-multipoint" + } + }, + "r3": { + "ipv4": "auto", + "ospf": { + "area": "0.0.0.0", + "hello_interval": 1, + "dead_interval": 4, + "network": "point-to-multipoint" + } + } + }, + "ospf": { + "router_id": "100.1.1.0", + "neighbors": { + "r1": {}, + "r2": {}, + "r3": {} + } + } + }, + "r1": { + "links": { + "lo": { + "ipv4": "auto", + "type": "loopback" + }, + "r0": { + "ipv4": "auto", + "ospf": { + "area": "0.0.0.0", + "hello_interval": 1, + "dead_interval": 4, + "network": "point-to-multipoint" + } + }, + "r2": { + "ipv4": "auto", + "ospf": { + "area": "0.0.0.0", + "hello_interval": 1, + "dead_interval": 4, + "network": "point-to-multipoint" + } + }, + "r3": { + "ipv4": "auto", + "ospf": { + "area": "0.0.0.0", + "hello_interval": 1, + "dead_interval": 4, + "network": "point-to-multipoint" + } + }, + "r3-link0": { + "ipv4": "auto", + "description": "DummyIntftoR3" + } + }, + "ospf": { + "router_id": "100.1.1.1", + "neighbors": { + "r0": {}, + "r2": {}, + "r3": {} + } + } + }, + "r2": { + "links": { + "lo": { + "ipv4": "auto", + "type": "loopback" + }, + "r0": { + "ipv4": "auto", + "ospf": { + "area": "0.0.0.0", + "hello_interval": 1, + "dead_interval": 4, + "network": "point-to-multipoint" + } + }, + "r1": { + "ipv4": "auto", + "ospf": { + "area": "0.0.0.0", + "hello_interval": 1, + "dead_interval": 4, + "network": "point-to-multipoint" + } + }, + "r3": { + "ipv4": "auto", + "ospf": { + "area": "0.0.0.0", + "hello_interval": 1, + "dead_interval": 4, + "network": "point-to-multipoint" + } + } + }, + "ospf": { + "router_id": "100.1.1.2", + "neighbors": { + "r1": {}, + "r0": {}, + "r3": {} + } + } + }, + "r3": { + "links": { + "lo": { + "ipv4": "auto", + "type": "loopback" + }, + "r0": { + "ipv4": "auto", + "ospf": { + "area": "0.0.0.0", + "hello_interval": 1, + "dead_interval": 4, + "network": "point-to-multipoint" + } + }, + "r1": { + "ipv4": "auto", + "ospf": { + "area": "0.0.0.0", + "hello_interval": 1, + "dead_interval": 4, + "network": "point-to-multipoint" + } + }, + "r2": { + "ipv4": "auto", + "ospf": { + "area": "0.0.0.0", + "hello_interval": 1, + "dead_interval": 4, + "network": "point-to-multipoint" + } + }, + "r1-link0": { + "ipv4": "auto", + "description": "DummyIntftoR1", + "ospf": { + "area": "0.0.0.0" + } + } + }, + "ospf": { + "router_id": "100.1.1.3", + "neighbors": { + "r0": {}, + "r1": {}, + "r2": {} + } + } + } + } +}
\ No newline at end of file diff --git a/tests/topotests/ospf_basic_functionality/test_ospf_authentication.py b/tests/topotests/ospf_basic_functionality/test_ospf_authentication.py index e92baefabf..9c3be58937 100644 --- a/tests/topotests/ospf_basic_functionality/test_ospf_authentication.py +++ b/tests/topotests/ospf_basic_functionality/test_ospf_authentication.py @@ -29,6 +29,7 @@ import pytest from time import sleep from copy import deepcopy import json +from lib.topotest import frr_unicode # Save the Current Working Directory to find configuration files. CWD = os.path.dirname(os.path.realpath(__file__)) @@ -318,7 +319,7 @@ def test_ospf_authentication_simple_pass_tc28_p1(request): topo_modify_change_ip = deepcopy(topo) intf_ip = topo_modify_change_ip["routers"]["r1"]["links"]["r2"]["ipv4"] topo_modify_change_ip["routers"]["r1"]["links"]["r2"]["ipv4"] = str( - IPv4Address(unicode(intf_ip.split("/")[0])) + 3 + IPv4Address(frr_unicode(intf_ip.split("/")[0])) + 3 ) + "/{}".format(intf_ip.split("/")[1]) build_config_from_json(tgen, topo_modify_change_ip, save_bkup=False) @@ -529,7 +530,7 @@ def test_ospf_authentication_md5_tc29_p1(request): intf_ip = topo_modify_change_ip["routers"]["r1"]["links"]["r2"]["ipv4"] topo_modify_change_ip["routers"]["r1"]["links"]["r2"]["ipv4"] = str( - IPv4Address(unicode(intf_ip.split("/")[0])) + 3 + IPv4Address(frr_unicode(intf_ip.split("/")[0])) + 3 ) + "/{}".format(intf_ip.split("/")[1]) build_config_from_json(tgen, topo_modify_change_ip, save_bkup=False) diff --git a/tests/topotests/ospf_basic_functionality/test_ospf_chaos.py b/tests/topotests/ospf_basic_functionality/test_ospf_chaos.py new file mode 100644 index 0000000000..37b7528490 --- /dev/null +++ b/tests/topotests/ospf_basic_functionality/test_ospf_chaos.py @@ -0,0 +1,576 @@ +#!/usr/bin/python + +# +# Copyright (c) 2020 by VMware, Inc. ("VMware") +# Used Copyright (c) 2018 by Network Device Education Foundation, Inc. +# ("NetDEF") in this file. +# +# Permission to use, copy, modify, and/or distribute this software +# for any purpose with or without fee is hereby granted, provided +# that the above copyright notice and this permission notice appear +# in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND VMWARE DISCLAIMS ALL WARRANTIES +# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL VMWARE BE LIABLE FOR +# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY +# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS +# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE +# OF THIS SOFTWARE. +# + + +"""OSPF Basic Functionality Automation.""" +import os +import sys +import time +import pytest +from copy import deepcopy +import json + +# Save the Current Working Directory to find configuration files. +CWD = os.path.dirname(os.path.realpath(__file__)) +sys.path.append(os.path.join(CWD, "../")) +sys.path.append(os.path.join(CWD, "../lib/")) + +# pylint: disable=C0413 +# Import topogen and topotest helpers +from mininet.topo import Topo +from lib.topogen import Topogen, get_topogen + +# Import topoJson from lib, to create topology and initial configuration +from lib.common_config import ( + start_topology, + write_test_header, + write_test_footer, + reset_config_on_routers, + step, + shutdown_bringup_interface, + topo_daemons, + verify_rib, + stop_router, start_router, + create_static_routes, + start_router_daemons, + kill_router_daemons +) + +from lib.ospf import ( + verify_ospf_neighbor, verify_ospf_rib, + create_router_ospf) + +from lib.topolog import logger +from lib.topojson import build_topo_from_json, build_config_from_json +from ipaddress import IPv4Address + + + +# Global variables +topo = None + +NETWORK = { + "ipv4": ["11.0.20.1/32", "11.0.20.2/32", "11.0.20.3/32", "11.0.20.4/32", + "11.0.20.5/32"] +} +""" +Topology: + Please view in a fixed-width font such as Courier. + +---+ A1 +---+ + +R1 +------------+R2 | + +-+-+- +--++ + | -- -- | + | -- A0 -- | + A0| ---- | + | ---- | A2 + | -- -- | + | -- -- | + +-+-+- +-+-+ + +R0 +-------------+R3 | + +---+ A3 +---+ + +TESTCASES = +1. Verify ospf functionality after restart ospfd. +2. Verify ospf functionality after restart FRR service. +3. Verify ospf functionality when staticd is restarted. + """ + +# Reading the data from JSON File for topology creation +jsonFile = "{}/ospf_chaos.json".format(CWD) +try: + with open(jsonFile, "r") as topoJson: + topo = json.load(topoJson) +except IOError: + assert False, "Could not read file {}".format(jsonFile) + +class CreateTopo(Topo): + """ + Test topology builder. + + * `Topo`: Topology object + """ + + def build(self, *_args, **_opts): + """Build function.""" + tgen = get_topogen(self) + + # Building topology from json file + build_topo_from_json(tgen, topo) + + +def setup_module(mod): + """ + Sets up the pytest environment + + * `mod`: module name + """ + global topo + testsuite_run_time = time.asctime(time.localtime(time.time())) + logger.info("Testsuite start time: {}".format(testsuite_run_time)) + logger.info("=" * 40) + + logger.info("Running setup_module to create topology") + + # This function initiates the topology build with Topogen... + tgen = Topogen(CreateTopo, mod.__name__) + # ... and here it calls Mininet initialization functions. + + # get list of daemons needs to be started for this suite. + daemons = topo_daemons(tgen, topo) + + # Starting topology, create tmp files which are loaded to routers + # to start deamons and then start routers + start_topology(tgen, daemons) + + # Creating configuration from JSON + build_config_from_json(tgen, topo) + + # Don't run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + ospf_covergence = verify_ospf_neighbor(tgen, topo) + assert ospf_covergence is True, "setup_module :Failed \n Error:" " {}".format( + ospf_covergence + ) + + logger.info("Running setup_module() done") + + +def teardown_module(mod): + """ + Teardown the pytest environment. + + * `mod`: module name + """ + + logger.info("Running teardown_module to delete topology") + + tgen = get_topogen() + + # Stop toplogy and Remove tmp files + tgen.stop_topology() + + logger.info( + "Testsuite end time: {}".format(time.asctime(time.localtime(time.time()))) + ) + logger.info("=" * 40) + + +# ################################## +# Test cases start here. +# ################################## +def test_ospf_chaos_tc31_p1(request): + """Verify ospf functionality after restart ospfd.""" + tc_name = request.node.name + write_test_header(tc_name) + tgen = get_topogen() + global topo + step("Bring up the base config as per the topology") + reset_config_on_routers(tgen) + + step( + "Create static routes(10.0.20.1/32) in R1 and redistribute " + "to OSPF using route map.") + + # Create Static routes + input_dict = { + "r0": { + "static_routes": [ + { + "network": NETWORK['ipv4'][0], + "no_of_ip": 5, + "next_hop": 'Null0', + } + ] + } + } + result = create_static_routes(tgen, input_dict) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result) + + ospf_red_r0 = { + "r0": { + "ospf": { + "redistribute": [{ + "redist_type": "static" + }] + } + } + } + result = create_router_ospf(tgen, topo, ospf_red_r0) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result) + + step("Verify OSPF neighbors after base config is done.") + # Api call verify whether OSPF is converged + ospf_covergence = verify_ospf_neighbor(tgen, topo) + assert ospf_covergence is True, ("setup_module :Failed \n Error:" + " {}".format(ospf_covergence)) + + step("Verify that route is advertised to R1.") + dut = 'r1' + protocol = 'ospf' + nh = topo['routers']['r0']['links']['r1']['ipv4'].split('/')[0] + result = verify_ospf_rib(tgen, dut, input_dict, next_hop=nh) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result) + + result = verify_rib( + tgen, "ipv4", dut, input_dict, protocol=protocol, next_hop=nh) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result) + + step("Kill OSPFd daemon on R0.") + kill_router_daemons(tgen, "r0", ["ospfd"]) + + step("Verify OSPF neighbors are down after killing ospfd in R0") + dut = 'r0' + # Api call verify whether OSPF is converged + ospf_covergence = verify_ospf_neighbor(tgen, topo, dut=dut, + expected=False) + assert ospf_covergence is not True, ("setup_module :Failed \n Error:" + " {}".format(ospf_covergence)) + + step("Verify that route advertised to R1 are deleted from RIB and FIB.") + dut = 'r1' + protocol = 'ospf' + result = verify_ospf_rib(tgen, dut, input_dict, expected=False) + assert result is not True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result) + + result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol, + expected=False) + assert result is not True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result) + + step("Bring up OSPFd daemon on R0.") + start_router_daemons(tgen, "r0", ["ospfd"]) + + step("Verify OSPF neighbors are up after bringing back ospfd in R0") + # Api call verify whether OSPF is converged + ospf_covergence = verify_ospf_neighbor(tgen, topo) + assert ospf_covergence is True, ("setup_module :Failed \n Error:" + " {}".format(ospf_covergence)) + + step( + "All the neighbours are up and routes are installed before the" + " restart. Verify OSPF route table and ip route table.") + dut = 'r1' + protocol = 'ospf' + result = verify_ospf_rib(tgen, dut, input_dict, next_hop=nh) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result) + + result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol, + next_hop=nh) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result) + + step("Kill OSPFd daemon on R1.") + kill_router_daemons(tgen, "r1", ["ospfd"]) + + step("Verify OSPF neighbors are down after killing ospfd in R1") + dut = 'r1' + # Api call verify whether OSPF is converged + ospf_covergence = verify_ospf_neighbor(tgen, topo, dut=dut, + expected=False) + assert ospf_covergence is not True, ("setup_module :Failed \n Error:" + " {}".format(ospf_covergence)) + + step("Bring up OSPFd daemon on R1.") + start_router_daemons(tgen, "r1", ["ospfd"]) + + step("Verify OSPF neighbors are up after bringing back ospfd in R1") + # Api call verify whether OSPF is converged + ospf_covergence = verify_ospf_neighbor(tgen, topo) + assert ospf_covergence is True, ("setup_module :Failed \n Error:" + " {}".format(ospf_covergence)) + + step( + "All the neighbours are up and routes are installed before the" + " restart. Verify OSPF route table and ip route table.") + + dut = 'r1' + protocol = 'ospf' + result = verify_ospf_rib(tgen, dut, input_dict, next_hop=nh) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result) + + result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol, + next_hop=nh) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result) + + write_test_footer(tc_name) + + +def test_ospf_chaos_tc32_p1(request): + """Verify ospf functionality after restart FRR service. """ + tc_name = request.node.name + write_test_header(tc_name) + tgen = get_topogen() + global topo + step("Bring up the base config as per the topology") + reset_config_on_routers(tgen) + + step( + "Create static routes(10.0.20.1/32) in R1 and redistribute " + "to OSPF using route map.") + + # Create Static routes + input_dict = { + "r0": { + "static_routes": [ + { + "network": NETWORK['ipv4'][0], + "no_of_ip": 5, + "next_hop": 'Null0', + } + ] + } + } + result = create_static_routes(tgen, input_dict) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result) + + ospf_red_r0 = { + "r0": { + "ospf": { + "redistribute": [{ + "redist_type": "static" + }] + } + } + } + result = create_router_ospf(tgen, topo, ospf_red_r0) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result) + + step("Verify OSPF neighbors after base config is done.") + # Api call verify whether OSPF is converged + ospf_covergence = verify_ospf_neighbor(tgen, topo) + assert ospf_covergence is True, ("setup_module :Failed \n Error:" + " {}".format(ospf_covergence)) + + step("Verify that route is advertised to R1.") + dut = 'r1' + protocol = 'ospf' + + nh = topo['routers']['r0']['links']['r1']['ipv4'].split('/')[0] + result = verify_ospf_rib(tgen, dut, input_dict, next_hop=nh) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result) + + result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol, + next_hop=nh) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result) + + step("Restart frr on R0") + stop_router(tgen, 'r0') + start_router(tgen, 'r0') + + step("Verify OSPF neighbors are up after restarting R0") + # Api call verify whether OSPF is converged + ospf_covergence = verify_ospf_neighbor(tgen, topo) + assert ospf_covergence is True, ("setup_module :Failed \n Error:" + " {}".format(ospf_covergence)) + + step( + "All the neighbours are up and routes are installed before the" + " restart. Verify OSPF route table and ip route table.") + dut = 'r1' + protocol = 'ospf' + result = verify_ospf_rib(tgen, dut, input_dict, next_hop=nh) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result) + + result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol, + next_hop=nh) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result) + + step("Restart frr on R1") + stop_router(tgen, 'r1') + start_router(tgen, 'r1') + + step("Verify OSPF neighbors are up after restarting R1") + # Api call verify whether OSPF is converged + ospf_covergence = verify_ospf_neighbor(tgen, topo) + assert ospf_covergence is True, ("setup_module :Failed \n Error:" + " {}".format(ospf_covergence)) + + step( + "All the neighbours are up and routes are installed before the" + " restart. Verify OSPF route table and ip route table.") + dut = 'r1' + protocol = 'ospf' + result = verify_ospf_rib(tgen, dut, input_dict, next_hop=nh) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result) + + result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol, + next_hop=nh) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result) + + write_test_footer(tc_name) + + +def test_ospf_chaos_tc34_p1(request): + """ + verify ospf functionality when staticd is restarted. + + Verify ospf functionalitywhen staticroutes are + redistributed & Staticd is restarted. + """ + tc_name = request.node.name + write_test_header(tc_name) + tgen = get_topogen() + global topo + step("Bring up the base config as per the topology") + reset_config_on_routers(tgen) + + step( + "Create static routes(10.0.20.1/32) in R1 and redistribute " + "to OSPF using route map.") + + # Create Static routes + input_dict = { + "r0": { + "static_routes": [ + { + "network": NETWORK['ipv4'][0], + "no_of_ip": 5, + "next_hop": 'Null0', + } + ] + } + } + result = create_static_routes(tgen, input_dict) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result) + + ospf_red_r0 = { + "r0": { + "ospf": { + "redistribute": [{ + "redist_type": "static" + }] + } + } + } + result = create_router_ospf(tgen, topo, ospf_red_r0) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result) + + step("Verify OSPF neighbors after base config is done.") + # Api call verify whether OSPF is converged + ospf_covergence = verify_ospf_neighbor(tgen, topo) + assert ospf_covergence is True, ("setup_module :Failed \n Error:" + " {}".format(ospf_covergence)) + + step("Verify that route is advertised to R1.") + dut = 'r1' + protocol = 'ospf' + nh = topo['routers']['r0']['links']['r1']['ipv4'].split('/')[0] + result = verify_ospf_rib(tgen, dut, input_dict, next_hop=nh) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result) + + result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol, + next_hop=nh) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result) + + step("Kill staticd daemon on R0.") + kill_router_daemons(tgen, "r0", ["staticd"]) + + step("Verify that route advertised to R1 are deleted from RIB and FIB.") + dut = 'r1' + protocol = 'ospf' + result = verify_ospf_rib(tgen, dut, input_dict, expected=False) + assert result is not True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result) + + result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol, + expected=False) + assert result is not True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result) + + step("Bring up staticd daemon on R0.") + start_router_daemons(tgen, "r0", ["staticd"]) + + step("Verify OSPF neighbors are up after bringing back ospfd in R0") + # Api call verify whether OSPF is converged + ospf_covergence = verify_ospf_neighbor(tgen, topo) + assert ospf_covergence is True, ("setup_module :Failed \n Error:" + " {}".format(ospf_covergence)) + + step( + "All the neighbours are up and routes are installed before the" + " restart. Verify OSPF route table and ip route table.") + dut = 'r1' + protocol = 'ospf' + result = verify_ospf_rib(tgen, dut, input_dict, next_hop=nh) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result) + + result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol, + next_hop=nh) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result) + + step("Kill staticd daemon on R1.") + kill_router_daemons(tgen, "r1", ["staticd"]) + + step("Bring up staticd daemon on R1.") + start_router_daemons(tgen, "r1", ["staticd"]) + + step("Verify OSPF neighbors are up after bringing back ospfd in R1") + # Api call verify whether OSPF is converged + ospf_covergence = verify_ospf_neighbor(tgen, topo) + assert ospf_covergence is True, ("setup_module :Failed \n Error:" + " {}".format(ospf_covergence)) + + step( + "All the neighbours are up and routes are installed before the" + " restart. Verify OSPF route table and ip route table.") + + dut = 'r1' + protocol = 'ospf' + result = verify_ospf_rib(tgen, dut, input_dict, next_hop=nh) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result) + + result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol, + next_hop=nh) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result) + + write_test_footer(tc_name) + + +if __name__ == "__main__": + args = ["-s"] + sys.argv[1:] + sys.exit(pytest.main(args)) diff --git a/tests/topotests/ospf_basic_functionality/test_ospf_ecmp.py b/tests/topotests/ospf_basic_functionality/test_ospf_ecmp.py index 3b37b8a92f..441368e8fa 100644 --- a/tests/topotests/ospf_basic_functionality/test_ospf_ecmp.py +++ b/tests/topotests/ospf_basic_functionality/test_ospf_ecmp.py @@ -65,6 +65,7 @@ from lib.ospf import ( verify_ospf_rib, create_router_ospf, verify_ospf_interface, + redistribute_ospf, ) topo = None @@ -184,38 +185,6 @@ def teardown_module(mod): logger.info("=" * 40) -def red_static(dut, config=True): - """Local def for Redstribute static routes inside ospf.""" - global topo - tgen = get_topogen() - if config: - ospf_red = {dut: {"ospf": {"redistribute": [{"redist_type": "static"}]}}} - else: - ospf_red = { - dut: {"ospf": {"redistribute": [{"redist_type": "static", "delete": True}]}} - } - result = create_router_ospf(tgen, topo, ospf_red) - assert result is True, "Testcase : Failed \n Error: {}".format(result) - - -def red_connected(dut, config=True): - """Local def for Redstribute connected routes inside ospf.""" - global topo - tgen = get_topogen() - if config: - ospf_red = {dut: {"ospf": {"redistribute": [{"redist_type": "connected"}]}}} - else: - ospf_red = { - dut: { - "ospf": { - "redistribute": [{"redist_type": "connected", "del_action": True}] - } - } - } - result = create_router_ospf(tgen, topo, ospf_red) - assert result is True, "Testcase: Failed \n Error: {}".format(result) - - # ################################## # Test cases start here. # ################################## @@ -252,7 +221,11 @@ def test_ospf_ecmp_tc16_p0(request): input_dict = { "r0": { "static_routes": [ - {"network": NETWORK["ipv4"][0], "no_of_ip": 5, "next_hop": "Null0",} + { + "network": NETWORK["ipv4"][0], + "no_of_ip": 5, + "next_hop": "Null0", + } ] } } @@ -260,7 +233,7 @@ def test_ospf_ecmp_tc16_p0(request): assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) dut = "r0" - red_static(dut) + redistribute_ospf(tgen, topo, dut, "static") step("Verify that route in R2 in stalled with 8 next hops.") nh = [] @@ -341,7 +314,7 @@ def test_ospf_ecmp_tc16_p0(request): step(" Un configure static route on R0") dut = "r0" - red_static(dut, config=False) + redistribute_ospf(tgen, topo, dut, "static", delete=True) # Wait for R0 to flush external LSAs. sleep(10) @@ -372,7 +345,7 @@ def test_ospf_ecmp_tc16_p0(request): step("Re configure the static route in R0.") dut = "r0" - red_static(dut) + redistribute_ospf(tgen, topo, dut, "static") dut = "r1" result = verify_ospf_rib(tgen, dut, input_dict, next_hop=nh) @@ -415,7 +388,11 @@ def test_ospf_ecmp_tc17_p0(request): input_dict = { "r0": { "static_routes": [ - {"network": NETWORK["ipv4"][0], "no_of_ip": 5, "next_hop": "Null0",} + { + "network": NETWORK["ipv4"][0], + "no_of_ip": 5, + "next_hop": "Null0", + } ] } } @@ -423,7 +400,7 @@ def test_ospf_ecmp_tc17_p0(request): assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) dut = "r0" - red_static(dut) + redistribute_ospf(tgen, topo, dut, "static") step("Verify that route in R2 in stalled with 2 next hops.") @@ -442,7 +419,7 @@ def test_ospf_ecmp_tc17_p0(request): step(" Un configure static route on R0") dut = "r0" - red_static(dut, config=False) + redistribute_ospf(tgen, topo, dut, "static", delete=True) # sleep till the route gets withdrawn sleep(10) @@ -472,7 +449,7 @@ def test_ospf_ecmp_tc17_p0(request): step("Reconfigure the static route in R0.Change ECMP value to 2.") dut = "r0" - red_static(dut) + redistribute_ospf(tgen, topo, dut, "static") step("Configure cost on R0 as 100") r0_ospf_cost = {"r0": {"links": {"r1": {"ospf": {"cost": 100}}}}} diff --git a/tests/topotests/ospf_basic_functionality/test_ospf_ecmp_lan.py b/tests/topotests/ospf_basic_functionality/test_ospf_ecmp_lan.py index 967bc44879..2da1dcd21a 100644 --- a/tests/topotests/ospf_basic_functionality/test_ospf_ecmp_lan.py +++ b/tests/topotests/ospf_basic_functionality/test_ospf_ecmp_lan.py @@ -66,6 +66,7 @@ from lib.ospf import ( verify_ospf_rib, create_router_ospf, verify_ospf_interface, + redistribute_ospf, ) from ipaddress import IPv4Address @@ -187,42 +188,6 @@ def teardown_module(): pass -def red_static(dut, config=True): - """Local def for Redstribute static routes inside ospf.""" - global topo - tgen = get_topogen() - if config: - ospf_red = {dut: {"ospf": {"redistribute": [{"redist_type": "static"}]}}} - else: - ospf_red = { - dut: { - "ospf": { - "redistribute": [{"redist_type": "static", "del_action": True}] - } - } - } - result = create_router_ospf(tgen, topo, ospf_red) - assert result is True, "Testcase : Failed \n Error: {}".format(result) - - -def red_connected(dut, config=True): - """Local def for Redstribute connected routes inside ospf.""" - global topo - tgen = get_topogen() - if config: - ospf_red = {dut: {"ospf": {"redistribute": [{"redist_type": "connected"}]}}} - else: - ospf_red = { - dut: { - "ospf": { - "redistribute": [{"redist_type": "connected", "del_action": True}] - } - } - } - result = create_router_ospf(tgen, topo, ospf_red) - assert result is True, "Testcase: Failed \n Error: {}".format(result) - - # ################################## # Test cases start here. # ################################## @@ -275,7 +240,7 @@ def test_ospf_lan_ecmp_tc18_p0(request): ) dut = rtr - red_static(dut) + redistribute_ospf(tgen, topo, dut, "static") step( "Verify that route in R0 in stalled with 8 hops. " diff --git a/tests/topotests/ospf_basic_functionality/test_ospf_lan.py b/tests/topotests/ospf_basic_functionality/test_ospf_lan.py index 1357a86c81..dac32090bc 100644 --- a/tests/topotests/ospf_basic_functionality/test_ospf_lan.py +++ b/tests/topotests/ospf_basic_functionality/test_ospf_lan.py @@ -29,6 +29,7 @@ import pytest import json from copy import deepcopy import ipaddress +from lib.topotest import frr_unicode # Save the Current Working Directory to find configuration files. CWD = os.path.dirname(os.path.realpath(__file__)) @@ -182,42 +183,6 @@ def teardown_module(): pass -def red_static(dut, config=True): - """Local def for Redstribute static routes inside ospf.""" - global topo - tgen = get_topogen() - if config: - ospf_red = {dut: {"ospf": {"redistribute": [{"redist_type": "static"}]}}} - else: - ospf_red = { - dut: { - "ospf": { - "redistribute": [{"redist_type": "static", "del_action": True}] - } - } - } - result = create_router_ospf(tgen, topo, ospf_red) - assert result is True, "Testcase : Failed \n Error: {}".format(result) - - -def red_connected(dut, config=True): - """Local def for Redstribute connected routes inside ospf.""" - global topo - tgen = get_topogen() - if config: - ospf_red = {dut: {"ospf": {"redistribute": [{"redist_type": "connected"}]}}} - else: - ospf_red = { - dut: { - "ospf": { - "redistribute": [{"redist_type": "connected", "del_action": True}] - } - } - } - result = create_router_ospf(tgen, topo, ospf_red) - assert result is True, "Testcase: Failed \n Error: {}".format(result) - - # ################################## # Test cases start here. # ################################## @@ -468,7 +433,7 @@ def test_ospf_lan_tc1_p0(request): topo_modify_change_ip = deepcopy(topo) intf_ip = topo_modify_change_ip["routers"]["r0"]["links"]["s1"]["ipv4"] topo_modify_change_ip["routers"]["r0"]["links"]["s1"]["ipv4"] = str( - IPv4Address(unicode(intf_ip.split("/")[0])) + 3 + IPv4Address(frr_unicode(intf_ip.split("/")[0])) + 3 ) + "/{}".format(intf_ip.split("/")[1]) build_config_from_json(tgen, topo_modify_change_ip, save_bkup=False) @@ -601,7 +566,7 @@ def test_ospf_lan_tc2_p0(request): topo_modify_change_ip = deepcopy(topo) intf_ip = topo_modify_change_ip["routers"]["r0"]["links"]["s1"]["ipv4"] topo_modify_change_ip["routers"]["r0"]["links"]["s1"]["ipv4"] = str( - IPv4Address(unicode(intf_ip.split("/")[0])) + 3 + IPv4Address(frr_unicode(intf_ip.split("/")[0])) + 3 ) + "/{}".format(intf_ip.split("/")[1]) build_config_from_json(tgen, topo_modify_change_ip, save_bkup=False) @@ -652,7 +617,7 @@ def test_ospf_lan_tc2_p0(request): topo_modify_change_ip = deepcopy(topo) intf_ip = topo_modify_change_ip["routers"]["r0"]["links"]["s1"]["ipv4"] topo_modify_change_ip["routers"]["r0"]["links"]["s1"]["ipv4"] = str( - IPv4Address(unicode(intf_ip.split("/")[0])) + 3 + IPv4Address(frr_unicode(intf_ip.split("/")[0])) + 3 ) + "/{}".format(int(intf_ip.split("/")[1]) + 1) build_config_from_json(tgen, topo_modify_change_ip, save_bkup=False) diff --git a/tests/topotests/ospf_basic_functionality/test_ospf_nssa.py b/tests/topotests/ospf_basic_functionality/test_ospf_nssa.py index 82a34d046c..3644bff3dc 100644 --- a/tests/topotests/ospf_basic_functionality/test_ospf_nssa.py +++ b/tests/topotests/ospf_basic_functionality/test_ospf_nssa.py @@ -30,6 +30,7 @@ from lib.ospf import ( verify_ospf_rib, create_router_ospf, verify_ospf_interface, + redistribute_ospf, ) from lib.topojson import build_topo_from_json, build_config_from_json from lib.topolog import logger @@ -181,38 +182,6 @@ def teardown_module(mod): logger.info("=" * 40) -def red_static(dut, config=True): - """Local def for Redstribute static routes inside ospf.""" - global topo - tgen = get_topogen() - if config: - ospf_red = {dut: {"ospf": {"redistribute": [{"redist_type": "static"}]}}} - else: - ospf_red = { - dut: {"ospf": {"redistribute": [{"redist_type": "static", "delete": True}]}} - } - result = create_router_ospf(tgen, topo, ospf_red) - assert result is True, "Testcase : Failed \n Error: {}".format(result) - - -def red_connected(dut, config=True): - """Local def for Redstribute connected routes inside ospf.""" - global topo - tgen = get_topogen() - if config: - ospf_red = {dut: {"ospf": {"redistribute": [{"redist_type": "connected"}]}}} - else: - ospf_red = { - dut: { - "ospf": { - "redistribute": [{"redist_type": "connected", "del_action": True}] - } - } - } - result = create_router_ospf(tgen, topo, ospf_red) - assert result is True, "Testcase: Failed \n Error: {}".format(result) - - # ################################## # Test cases start here. # ################################## @@ -268,7 +237,7 @@ def test_ospf_learning_tc15_p0(request): step("Redistribute static route in R2 ospf.") dut = "r2" - red_static(dut) + redistribute_ospf(tgen, topo, dut, "static") step("Verify that Type 5 LSA is originated by R2.") dut = "r0" diff --git a/tests/topotests/ospf_basic_functionality/test_ospf_p2mp.py b/tests/topotests/ospf_basic_functionality/test_ospf_p2mp.py new file mode 100644 index 0000000000..c90275ecb0 --- /dev/null +++ b/tests/topotests/ospf_basic_functionality/test_ospf_p2mp.py @@ -0,0 +1,416 @@ +#!/usr/bin/python + +# +# Copyright (c) 2020 by VMware, Inc. ("VMware") +# Used Copyright (c) 2018 by Network Device Education Foundation, Inc. +# ("NetDEF") in this file. +# +# Permission to use, copy, modify, and/or distribute this software +# for any purpose with or without fee is hereby granted, provided +# that the above copyright notice and this permission notice appear +# in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND VMWARE DISCLAIMS ALL WARRANTIES +# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL VMWARE BE LIABLE FOR +# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY +# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS +# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE +# OF THIS SOFTWARE. +# + + +"""OSPF Basic Functionality Automation.""" +import os +import sys +import time +import pytest +import json +from copy import deepcopy +from ipaddress import IPv4Address + +# Save the Current Working Directory to find configuration files. +CWD = os.path.dirname(os.path.realpath(__file__)) +sys.path.append(os.path.join(CWD, "../")) +sys.path.append(os.path.join(CWD, "../lib/")) + +# pylint: disable=C0413 +# Import topogen and topotest helpers +from mininet.topo import Topo +from lib.topogen import Topogen, get_topogen +import ipaddress + +# Import topoJson from lib, to create topology and initial configuration +from lib.common_config import ( + start_topology, + write_test_header, + write_test_footer, + reset_config_on_routers, + verify_rib, + create_static_routes, + step, + create_route_maps, + shutdown_bringup_interface, + create_interfaces_cfg, + topo_daemons, +) +from lib.topolog import logger +from lib.topojson import build_topo_from_json, build_config_from_json + +from lib.ospf import ( + verify_ospf_neighbor, + config_ospf_interface, + clear_ospf, + verify_ospf_rib, + create_router_ospf, + verify_ospf_interface, + verify_ospf_database, +) + +# Global variables +topo = None + +# Reading the data from JSON File for topology creation +jsonFile = "{}/ospf_p2mp.json".format(CWD) +try: + with open(jsonFile, "r") as topoJson: + topo = json.load(topoJson) +except IOError: + assert False, "Could not read file {}".format(jsonFile) + +""" +TOPOOLOGY = + Please view in a fixed-width font such as Courier. + +---+ A1 +---+ + +R1 +------------+R2 | + +-+-+- +--++ + | -- -- | + | -- A0 -- | + A0| ---- | + | ---- | A2 + | -- -- | + | -- -- | + +-+-+- +-+-+ + +R0 +-------------+R3 | + +---+ A3 +---+ + +TESTCASES = +1. OSPF P2MP -Verify state change events on p2mp network. + """ + + +class CreateTopo(Topo): + """ + Test topology builder. + + * `Topo`: Topology object + """ + + def build(self, *_args, **_opts): + """Build function.""" + tgen = get_topogen(self) + + # Building topology from json file + build_topo_from_json(tgen, topo) + + +def setup_module(mod): + """ + Sets up the pytest environment + + * `mod`: module name + """ + global topo + testsuite_run_time = time.asctime(time.localtime(time.time())) + logger.info("Testsuite start time: {}".format(testsuite_run_time)) + logger.info("=" * 40) + + logger.info("Running setup_module to create topology") + + # This function initiates the topology build with Topogen... + tgen = Topogen(CreateTopo, mod.__name__) + # ... and here it calls Mininet initialization functions. + + # get list of daemons needs to be started for this suite. + daemons = topo_daemons(tgen, topo) + + # Starting topology, create tmp files which are loaded to routers + # to start deamons and then start routers + start_topology(tgen, daemons) + + # Creating configuration from JSON + build_config_from_json(tgen, topo) + + # Don't run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + + logger.info("Running setup_module() done") + + +def teardown_module(mod): + """ + Teardown the pytest environment. + + * `mod`: module name + """ + + logger.info("Running teardown_module to delete topology") + + tgen = get_topogen() + + # Stop toplogy and Remove tmp files + tgen.stop_topology() + + logger.info( + "Testsuite end time: {}".format(time.asctime(time.localtime(time.time()))) + ) + logger.info("=" * 40) + + +# ################################## +# Test cases start here. +# ################################## + + +def test_ospf_p2mp_tc1_p0(request): + """OSPF IFSM -Verify state change events on p2mp network.""" + tc_name = request.node.name + write_test_header(tc_name) + tgen = get_topogen() + + # Don't run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + global topo + step("Bring up the base config as per the topology") + reset_config_on_routers(tgen) + step( + "Verify that OSPF is subscribed to multi cast services " + "(All SPF, all DR Routers)." + ) + step("Verify that interface is enabled in ospf.") + step("Verify that config is successful.") + dut = "r0" + input_dict = { + "r0": { + "links": { + "r3": {"ospf": {"mcastMemberOspfAllRouters": True, "ospfEnabled": True}} + } + } + } + result = verify_ospf_interface(tgen, topo, dut=dut, input_dict=input_dict) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + step("Delete the ip address") + topo1 = { + "r0": { + "links": { + "r3": { + "ipv4": topo["routers"]["r0"]["links"]["r3"]["ipv4"], + "interface": topo["routers"]["r0"]["links"]["r3"]["interface"], + "delete": True, + } + } + } + } + + result = create_interfaces_cfg(tgen, topo1) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + step("Change the ip on the R0 interface") + + topo_modify_change_ip = deepcopy(topo) + intf_ip = topo_modify_change_ip["routers"]["r0"]["links"]["r3"]["ipv4"] + topo_modify_change_ip["routers"]["r0"]["links"]["r3"]["ipv4"] = str( + IPv4Address(unicode(intf_ip.split("/")[0])) + 3 + ) + "/{}".format(intf_ip.split("/")[1]) + + build_config_from_json(tgen, topo_modify_change_ip, save_bkup=False) + step("Verify that interface is enabled in ospf.") + dut = "r0" + input_dict = { + "r0": { + "links": { + "r3": { + "ospf": { + "ipAddress": topo_modify_change_ip["routers"]["r0"]["links"][ + "r3" + ]["ipv4"].split("/")[0], + "ipAddressPrefixlen": int( + topo_modify_change_ip["routers"]["r0"]["links"]["r3"][ + "ipv4" + ].split("/")[1] + ), + } + } + } + } + } + result = verify_ospf_interface(tgen, topo, dut=dut, input_dict=input_dict) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + step("Modify the mask on the R0 interface") + ip_addr = topo_modify_change_ip["routers"]["r0"]["links"]["r3"]["ipv4"] + mask = topo_modify_change_ip["routers"]["r0"]["links"]["r3"]["ipv4"] + step("Delete the ip address") + topo1 = { + "r0": { + "links": { + "r3": { + "ipv4": ip_addr, + "interface": topo["routers"]["r0"]["links"]["r3"]["interface"], + "delete": True, + } + } + } + } + + result = create_interfaces_cfg(tgen, topo1) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + step("Change the ip on the R0 interface") + + topo_modify_change_ip = deepcopy(topo) + intf_ip = topo_modify_change_ip["routers"]["r0"]["links"]["r3"]["ipv4"] + topo_modify_change_ip["routers"]["r0"]["links"]["r3"]["ipv4"] = str( + IPv4Address(unicode(intf_ip.split("/")[0])) + 3 + ) + "/{}".format(int(intf_ip.split("/")[1]) + 1) + + build_config_from_json(tgen, topo_modify_change_ip, save_bkup=False) + step("Verify that interface is enabled in ospf.") + dut = "r0" + input_dict = { + "r0": { + "links": { + "r3": { + "ospf": { + "ipAddress": topo_modify_change_ip["routers"]["r0"]["links"][ + "r3" + ]["ipv4"].split("/")[0], + "ipAddressPrefixlen": int( + topo_modify_change_ip["routers"]["r0"]["links"]["r3"][ + "ipv4" + ].split("/")[1] + ), + } + } + } + } + } + result = verify_ospf_interface(tgen, topo, dut=dut, input_dict=input_dict) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + topo1 = { + "r0": { + "links": { + "r3": { + "ipv4": topo_modify_change_ip["routers"]["r0"]["links"]["r3"][ + "ipv4" + ], + "interface": topo_modify_change_ip["routers"]["r0"]["links"]["r3"][ + "interface" + ], + "delete": True, + } + } + } + } + + result = create_interfaces_cfg(tgen, topo1) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + build_config_from_json(tgen, topo, save_bkup=False) + + step("Change the area id on the interface") + input_dict = { + "r0": { + "links": { + "r3": { + "interface": topo["routers"]["r0"]["links"]["r3"]["interface"], + "ospf": {"area": "0.0.0.0"}, + "delete": True, + } + } + } + } + + result = create_interfaces_cfg(tgen, input_dict) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + input_dict = { + "r0": { + "links": { + "r3": { + "interface": topo["routers"]["r0"]["links"]["r3"]["interface"], + "ospf": {"area": "0.0.0.1"}, + } + } + } + } + + result = create_interfaces_cfg(tgen, input_dict) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + step("Verify that interface is enabled in ospf.") + dut = "r0" + input_dict = { + "r0": {"links": {"r3": {"ospf": {"area": "0.0.0.1", "ospfEnabled": True}}}} + } + result = verify_ospf_interface(tgen, topo, dut=dut, input_dict=input_dict) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + input_dict = { + "r0": { + "links": { + "r3": { + "interface": topo["routers"]["r0"]["links"]["r3"]["interface"], + "ospf": {"area": "0.0.0.1"}, + "delete": True, + } + } + } + } + + result = create_interfaces_cfg(tgen, input_dict) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + input_dict = { + "r0": { + "links": { + "r3": { + "interface": topo["routers"]["r0"]["links"]["r3"]["interface"], + "ospf": {"area": "0.0.0.0"}, + } + } + } + } + + result = create_interfaces_cfg(tgen, input_dict) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("Verify if interface is enabled with network type P2MP") + input_dict = { + "r0": { + "links": { + "r3": { + "interface": topo["routers"]["r0"]["links"]["r3"]["interface"], + "ospf": { + "area": "0.0.0.0", + "networkType":"POINTOMULTIPOINT" + }, + } + } + } + } + result = create_interfaces_cfg(tgen, input_dict) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + write_test_footer(tc_name) + + +if __name__ == "__main__": + args = ["-s"] + sys.argv[1:] + sys.exit(pytest.main(args)) diff --git a/tests/topotests/ospf_basic_functionality/test_ospf_routemaps.py b/tests/topotests/ospf_basic_functionality/test_ospf_routemaps.py index 04b1f4b878..ceadb3975b 100644 --- a/tests/topotests/ospf_basic_functionality/test_ospf_routemaps.py +++ b/tests/topotests/ospf_basic_functionality/test_ospf_routemaps.py @@ -62,6 +62,7 @@ from lib.ospf import ( verify_ospf_rib, create_router_ospf, verify_ospf_database, + redistribute_ospf, ) # Global variables @@ -197,6 +198,7 @@ def teardown_module(mod): # Test cases start here. # ################################## + def test_ospf_routemaps_functionality_tc19_p0(request): """ OSPF Route map - Verify OSPF route map support functionality. @@ -215,121 +217,76 @@ def test_ospf_routemaps_functionality_tc19_p0(request): "r0": { "static_routes": [ { - "network": NETWORK['ipv4'][0], + "network": NETWORK["ipv4"][0], "no_of_ip": 5, - "next_hop": 'Null0', + "next_hop": "Null0", } ] } } result = create_static_routes(tgen, input_dict) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) - ospf_red_r1 = { - "r0": { - "ospf": { - "redistribute": [{ - "redist_type": "static" - }] - } - } - } - result = create_router_ospf(tgen, topo, ospf_red_r1) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) - - dut = 'r1' - lsid = NETWORK['ipv4'][0].split("/")[0] - rid = routerids[0] - protocol = 'ospf' + redistribute_ospf(tgen, topo, "r0", "static") + + dut = "r1" + lsid = NETWORK["ipv4"][0].split("/")[0] + rid = routerids[0] + protocol = "ospf" result = verify_ospf_rib(tgen, dut, input_dict) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) - ospf_red_r1 = { - "r0": { - "ospf": { - "redistribute": [{ - "redist_type": "static", - "del_action": True - }] - } - } - } - result = create_router_ospf(tgen, topo, ospf_red_r1) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + redistribute_ospf(tgen, topo, "r0", "static", delete=True) step( - 'Create prefix-list in R0 to permit 10.0.20.1/32 prefix &' - ' deny 10.0.20.2/32') + "Create prefix-list in R0 to permit 10.0.20.1/32 prefix &" " deny 10.0.20.2/32" + ) # Create ip prefix list pfx_list = { "r0": { "prefix_lists": { "ipv4": { - "pf_list_1_ipv4": [{ - "seqid": 10, - "network": NETWORK['ipv4'][0], - "action": "permit" - }, - { - "seqid": 11, - "network": "any", - "action": "deny" - } + "pf_list_1_ipv4": [ + { + "seqid": 10, + "network": NETWORK["ipv4"][0], + "action": "permit", + }, + {"seqid": 11, "network": "any", "action": "deny"}, ] } } } } result = create_prefix_lists(tgen, pfx_list) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Create route map routemaps = { - "r0": { - "route_maps": { - "rmap_ipv4": [{ + "r0": { + "route_maps": { + "rmap_ipv4": [ + { "action": "permit", - "match": { - "ipv4": { - "prefix_lists": - "pf_list_1_ipv4" - } - } - }] - } + "match": {"ipv4": {"prefix_lists": "pf_list_1_ipv4"}}, + } + ] } + } } result = create_route_maps(tgen, routemaps) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) step( "Configure route map rmap1 and redistribute static routes to" - " ospf using route map rmap1") + " ospf using route map rmap1" + ) - ospf_red_r1 = { - "r0": { - "ospf": { - "redistribute": [{ - "redist_type": "static", - "route_map": "rmap_ipv4" - }] - } - } - } - result = create_router_ospf(tgen, topo, ospf_red_r1) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + redistribute_ospf(tgen, topo, "r0", "static", route_map="rmap_ipv4") step("Change prefix rules to permit 10.0.20.2 and deny 10.0.20.1") # Create ip prefix list @@ -337,65 +294,54 @@ def test_ospf_routemaps_functionality_tc19_p0(request): "r0": { "prefix_lists": { "ipv4": { - "pf_list_1_ipv4": [{ - "seqid": 10, - "network": NETWORK['ipv4'][1], - "action": "permit" - }, - { - "seqid": 11, - "network": "any", - "action": "deny" - } + "pf_list_1_ipv4": [ + { + "seqid": 10, + "network": NETWORK["ipv4"][1], + "action": "permit", + }, + {"seqid": 11, "network": "any", "action": "deny"}, ] } } } } result = create_prefix_lists(tgen, pfx_list) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) step("Verify that route 10.0.20.2 is allowed and 10.0.20.1 is denied.") - dut = 'r1' + dut = "r1" input_dict = { "r0": { "static_routes": [ - { - "network": NETWORK['ipv4'][1], - "no_of_ip": 1, - "next_hop": 'Null0' - } + {"network": NETWORK["ipv4"][1], "no_of_ip": 1, "next_hop": "Null0"} ] } } result = verify_ospf_rib(tgen, dut, input_dict) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) input_dict = { "r0": { "static_routes": [ - { - "network": NETWORK['ipv4'][0], - "no_of_ip": 1, - "next_hop": 'Null0' - } + {"network": NETWORK["ipv4"][0], "no_of_ip": 1, "next_hop": "Null0"} ] } } result = verify_ospf_rib(tgen, dut, input_dict, expected=False) assert result is not True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) - result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol, - expected=False) + result = verify_rib( + tgen, "ipv4", dut, input_dict, protocol=protocol, expected=False + ) assert result is not True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) step("Delete and reconfigure prefix list.") # Create ip prefix list @@ -403,114 +349,101 @@ def test_ospf_routemaps_functionality_tc19_p0(request): "r0": { "prefix_lists": { "ipv4": { - "pf_list_1_ipv4": [{ - "seqid": 10, - "network": NETWORK['ipv4'][1], - "action": "permit", - "delete": True - }, - { - "seqid": 11, - "network": "any", - "action": "deny", - "delete": True - } + "pf_list_1_ipv4": [ + { + "seqid": 10, + "network": NETWORK["ipv4"][1], + "action": "permit", + "delete": True, + }, + { + "seqid": 11, + "network": "any", + "action": "deny", + "delete": True, + }, ] } } } } result = create_prefix_lists(tgen, pfx_list) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) result = verify_prefix_lists(tgen, pfx_list) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) input_dict = { "r0": { "static_routes": [ - { - "network": NETWORK['ipv4'][0], - "no_of_ip": 5, - "next_hop": 'Null0' - } + {"network": NETWORK["ipv4"][0], "no_of_ip": 5, "next_hop": "Null0"} ] } } result = verify_ospf_rib(tgen, dut, input_dict, expected=False) assert result is not True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) - result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol, - expected=False) + result = verify_rib( + tgen, "ipv4", dut, input_dict, protocol=protocol, expected=False + ) assert result is not True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) pfx_list = { "r0": { "prefix_lists": { "ipv4": { - "pf_list_1_ipv4": [{ - "seqid": 10, - "network": NETWORK['ipv4'][1], - "action": "permit" - }, - { - "seqid": 11, - "network": "any", - "action": "deny" - } + "pf_list_1_ipv4": [ + { + "seqid": 10, + "network": NETWORK["ipv4"][1], + "action": "permit", + }, + {"seqid": 11, "network": "any", "action": "deny"}, ] } } } } result = create_prefix_lists(tgen, pfx_list) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) step("Verify that route 10.0.20.2 is allowed and 10.0.20.1 is denied.") - dut = 'r1' + dut = "r1" input_dict = { "r0": { "static_routes": [ - { - "network": NETWORK['ipv4'][1], - "no_of_ip": 1, - "next_hop": 'Null0' - } + {"network": NETWORK["ipv4"][1], "no_of_ip": 1, "next_hop": "Null0"} ] } } result = verify_ospf_rib(tgen, dut, input_dict) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) input_dict = { "r0": { "static_routes": [ - { - "network": NETWORK['ipv4'][0], - "no_of_ip": 1, - "next_hop": 'Null0' - } + {"network": NETWORK["ipv4"][0], "no_of_ip": 1, "next_hop": "Null0"} ] } } result = verify_ospf_rib(tgen, dut, input_dict, expected=False) assert result is not True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) - result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol, - expected=False) + result = verify_rib( + tgen, "ipv4", dut, input_dict, protocol=protocol, expected=False + ) assert result is not True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) write_test_footer(tc_name) @@ -535,7 +468,11 @@ def test_ospf_routemaps_functionality_tc20_p0(request): input_dict = { "r0": { "static_routes": [ - {"network": NETWORK["ipv4"][0], "no_of_ip": 5, "next_hop": "Null0",} + { + "network": NETWORK["ipv4"][0], + "no_of_ip": 5, + "next_hop": "Null0", + } ] } } @@ -543,15 +480,7 @@ def test_ospf_routemaps_functionality_tc20_p0(request): assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) step("Redistribute to ospf using route map ( non existent route map)") - ospf_red_r1 = { - "r0": { - "ospf": { - "redistribute": [{"redist_type": "static", "route_map": "rmap_ipv4"}] - } - } - } - result = create_router_ospf(tgen, topo, ospf_red_r1) - assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + redistribute_ospf(tgen, topo, "r0", "static", route_map="rmap_ipv4") step( "Verify that routes are not allowed in OSPF even tough no " @@ -663,318 +592,221 @@ def test_ospf_routemaps_functionality_tc21_p0(request): step( "Create static routes(10.0.20.1/32) in R1 and redistribute " - "to OSPF using route map.") + "to OSPF using route map." + ) # Create Static routes input_dict = { "r0": { "static_routes": [ { - "network": NETWORK['ipv4'][0], + "network": NETWORK["ipv4"][0], "no_of_ip": 5, - "next_hop": 'Null0', + "next_hop": "Null0", } ] } } result = create_static_routes(tgen, input_dict) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) - ospf_red_r0 = { - "r0": { - "ospf": { - "redistribute": [{ - "redist_type": "static", - "route_map": "rmap_ipv4" - }] - } - } - } - result = create_router_ospf(tgen, topo, ospf_red_r0) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + redistribute_ospf(tgen, topo, "r0", "static", route_map="rmap_ipv4") # Create route map routemaps = { - "r0": { - "route_maps": { - "rmap_ipv4": [{ - "action": "permit", - "seq_id": 10 - }] - } - } + "r0": {"route_maps": {"rmap_ipv4": [{"action": "permit", "seq_id": 10}]}} } result = create_route_maps(tgen, routemaps) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) step("Verify that route is advertised to R2.") - dut = 'r1' - protocol = 'ospf' + dut = "r1" + protocol = "ospf" result = verify_ospf_rib(tgen, dut, input_dict) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Create route map routemaps = { - "r0": { - "route_maps": { - "rmap_ipv4": [{ - "action": "permit", - "delete": True, - "seq_id": 10 - }] + "r0": { + "route_maps": { + "rmap_ipv4": [{"action": "permit", "delete": True, "seq_id": 10}] + } } } - } result = create_route_maps(tgen, routemaps) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) - step(' Configure route map with set clause (set metric)') + step(" Configure route map with set clause (set metric)") # Create route map routemaps = { - "r0": { - "route_maps": { - "rmap_ipv4": [{ - "action": "permit", - "set": { - "med": 123 - }, - "seq_id": 10 - }] + "r0": { + "route_maps": { + "rmap_ipv4": [{"action": "permit", "set": {"med": 123}, "seq_id": 10}] + } } } - } result = create_route_maps(tgen, routemaps) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) step("Verify that configured metric is applied to ospf routes.") - dut = 'r1' - protocol = 'ospf' + dut = "r1" + protocol = "ospf" result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) step( "Configure route map with match clause (match metric) with " - "some actions(change metric).") + "some actions(change metric)." + ) # Create route map routemaps = { - "r0": { - "route_maps": { - "rmap_ipv4": [{ - "action": "permit", - "match": { - "med": 123 - }, - "set": { - "med": 150 - }, - "seq_id": 10 - }] + "r0": { + "route_maps": { + "rmap_ipv4": [ + { + "action": "permit", + "match": {"med": 123}, + "set": {"med": 150}, + "seq_id": 10, + } + ] + } } } - } result = create_route_maps(tgen, routemaps) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) step("Configure route map with call clause") # Create ip prefix list input_dict_2 = { - 'r0': { - 'prefix_lists': { - 'ipv4': { - 'pf_list_1_ipv4': [{ - 'seqid': 10, - 'network': 'any', - 'action': 'permit' - }] - } + "r0": { + "prefix_lists": { + "ipv4": { + "pf_list_1_ipv4": [ + {"seqid": 10, "network": "any", "action": "permit"} + ] + } } } } result = create_prefix_lists(tgen, input_dict_2) - assert result is True, 'Testcase {} : Failed \n Error: {}'.format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Create route map input_dict_3 = { - "r0": { - "route_maps": { - "rmap_ipv4": [{ - "action": "permit", - "match": { - "ipv4": { - "prefix_lists": "pf_list_1_ipv4" - } - }, - "set": { - "med": 150 - }, - "call": "rmap_match_pf_2_ipv4", - "seq_id": 10 - }], - "rmap_match_pf_2_ipv4": [{ - "action": "permit", - "match": { - "ipv4": { - "prefix_lists": "pf_list_1_ipv4" - } - }, - "set": { - "med": 200 - }, - "seq_id": 10 - }] + "r0": { + "route_maps": { + "rmap_ipv4": [ + { + "action": "permit", + "match": {"ipv4": {"prefix_lists": "pf_list_1_ipv4"}}, + "set": {"med": 150}, + "call": "rmap_match_pf_2_ipv4", + "seq_id": 10, + } + ], + "rmap_match_pf_2_ipv4": [ + { + "action": "permit", + "match": {"ipv4": {"prefix_lists": "pf_list_1_ipv4"}}, + "set": {"med": 200}, + "seq_id": 10, + } + ], + } } } - } result = create_route_maps(tgen, input_dict_3) - assert result is True, 'Testcase {} : Failed \n Error: {}'.format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) result = verify_ospf_rib(tgen, dut, input_dict) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) # Create route map - routemaps = { - "r0": { - "route_maps": { - "rmap_ipv4": [{ - "delete": True - }] - } - } - } + routemaps = {"r0": {"route_maps": {"rmap_ipv4": [{"delete": True}]}}} result = create_route_maps(tgen, routemaps) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) step("Configure route map with continue clause") # Create route map input_dict_3 = { - "r0": { - "route_maps": { - "rmap_ipv4": [{ - "action": "permit", - 'seq_id': '10', - "match": { - "ipv4": { - "prefix_lists": "pf_list_1_ipv4" - } - }, - "set": { - "med": 150 - }, - "continue": "30", - "seq_id": 10 - }, - { - "action": "permit", - "match": { - "ipv4": { - "prefix_lists": "pf_list_1_ipv4" - } - }, - "set": { - "med": 100 + "r0": { + "route_maps": { + "rmap_ipv4": [ + { + "action": "permit", + "seq_id": "10", + "match": {"ipv4": {"prefix_lists": "pf_list_1_ipv4"}}, + "set": {"med": 150}, + "continue": "30", + "seq_id": 10, }, - "seq_id": 20 - }, - { - "action": "permit", - "match": { - "ipv4": { - "prefix_lists": "pf_list_1_ipv4" - } + { + "action": "permit", + "match": {"ipv4": {"prefix_lists": "pf_list_1_ipv4"}}, + "set": {"med": 100}, + "seq_id": 20, }, - "set": { - "med": 50 + { + "action": "permit", + "match": {"ipv4": {"prefix_lists": "pf_list_1_ipv4"}}, + "set": {"med": 50}, + "seq_id": 30, }, - "seq_id": 30 - } - ] + ] + } } } - } result = create_route_maps(tgen, input_dict_3) - assert result is True, 'Testcase {} : Failed \n Error: {}'.format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) result = verify_ospf_rib(tgen, dut, input_dict) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) step("Configure route map with goto clause") # Create route map input_dict_3 = { - "r0": { - "route_maps": { - "rmap_ipv4": [{ - "action": "permit", - 'seq_id': '10', - "match": { - "ipv4": { - "prefix_lists": "pf_list_1_ipv4" - } + "r0": { + "route_maps": { + "rmap_ipv4": [ + { + "action": "permit", + "seq_id": "10", + "match": {"ipv4": {"prefix_lists": "pf_list_1_ipv4"}}, + "goto": "30", }, - "goto": "30", - }, - { - "action": "permit", - 'seq_id': '20', - "match": { - "ipv4": { - "prefix_lists": "pf_list_1_ipv4" - } + { + "action": "permit", + "seq_id": "20", + "match": {"ipv4": {"prefix_lists": "pf_list_1_ipv4"}}, + "set": {"med": 100}, }, - "set": { - "med": 100 - } - }, - { - "action": "permit", - 'seq_id': '30', - "match": { - "ipv4": { - "prefix_lists": "pf_list_1_ipv4" - } + { + "action": "permit", + "seq_id": "30", + "match": {"ipv4": {"prefix_lists": "pf_list_1_ipv4"}}, + "set": {"med": 200}, }, - "set": { - "med": 200 - } - } - ] + ] + } } } - } result = create_route_maps(tgen, input_dict_3) - assert result is True, 'Testcase {} : Failed \n Error: {}'.format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) write_test_footer(tc_name) @@ -1003,22 +835,18 @@ def test_ospf_routemaps_functionality_tc24_p0(request): input_dict = { "r0": { "static_routes": [ - {"network": NETWORK["ipv4"][0], "no_of_ip": 1, "next_hop": "Null0",} + { + "network": NETWORK["ipv4"][0], + "no_of_ip": 1, + "next_hop": "Null0", + } ] } } result = create_static_routes(tgen, input_dict) assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) - ospf_red_r0 = { - "r0": { - "ospf": { - "redistribute": [{"redist_type": "static", "route_map": "rmap_ipv4"}] - } - } - } - result = create_router_ospf(tgen, topo, ospf_red_r0) - assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + redistribute_ospf(tgen, topo, "r0", "static", route_map="rmap_ipv4") # Create ip prefix list pfx_list = { @@ -1037,9 +865,10 @@ def test_ospf_routemaps_functionality_tc24_p0(request): step("verify that prefix-list is created in R0.") result = verify_prefix_lists(tgen, pfx_list) - assert result is not True, ( - "Testcase {} : Failed \n Prefix list not " - "present. Error: {}".format(tc_name, result) + assert ( + result is not True + ), "Testcase {} : Failed \n Prefix list not " "present. Error: {}".format( + tc_name, result ) # Create route map @@ -1105,9 +934,10 @@ def test_ospf_routemaps_functionality_tc24_p0(request): step("verify that prefix-list is created in R0.") result = verify_prefix_lists(tgen, pfx_list) - assert result is not True, ( - "Testcase {} : Failed \n Prefix list not " - "present. Error: {}".format(tc_name, result) + assert ( + result is not True + ), "Testcase {} : Failed \n Prefix list not " "present. Error: {}".format( + tc_name, result ) # Create route map diff --git a/tests/topotests/ospf_basic_functionality/test_ospf_rte_calc.py b/tests/topotests/ospf_basic_functionality/test_ospf_rte_calc.py index 6ac0b515df..5aa2779aee 100644 --- a/tests/topotests/ospf_basic_functionality/test_ospf_rte_calc.py +++ b/tests/topotests/ospf_basic_functionality/test_ospf_rte_calc.py @@ -61,6 +61,7 @@ from lib.ospf import ( clear_ospf, verify_ospf_rib, create_router_ospf, + redistribute_ospf, ) # Global variables @@ -183,42 +184,6 @@ def teardown_module(mod): logger.info("=" * 40) -def red_static(dut, config=True): - """Local def for Redstribute static routes inside ospf.""" - global topo - tgen = get_topogen() - if config: - ospf_red = {dut: {"ospf": {"redistribute": [{"redist_type": "static"}]}}} - else: - ospf_red = { - dut: { - "ospf": { - "redistribute": [{"redist_type": "static", "del_action": True}] - } - } - } - result = create_router_ospf(tgen, topo, ospf_red) - assert result is True, "Testcase : Failed \n Error: {}".format(result) - - -def red_connected(dut, config=True): - """Local def for Redstribute connected routes inside ospf.""" - global topo - tgen = get_topogen() - if config: - ospf_red = {dut: {"ospf": {"redistribute": [{"redist_type": "connected"}]}}} - else: - ospf_red = { - dut: { - "ospf": { - "redistribute": [{"redist_type": "connected", "del_action": True}] - } - } - } - result = create_router_ospf(tgen, topo, ospf_red) - assert result is True, "Testcase: Failed \n Error: {}".format(result) - - # ################################## # Test cases start here. # ################################## @@ -407,7 +372,13 @@ def test_ospf_redistribution_tc6_p0(request): protocol = "ospf" result = verify_rib( - tgen, "ipv4", dut, input_dict, protocol=protocol, next_hop=nh, expected=False, + tgen, + "ipv4", + dut, + input_dict, + protocol=protocol, + next_hop=nh, + expected=False, ) assert result is not True, "Testcase {} : Failed \n Error: {}".format( tc_name, result @@ -480,8 +451,8 @@ def test_ospf_redistribution_tc8_p1(request): "advertised/exchaged via ospf" ) for rtr in topo["routers"]: - red_static(rtr) - red_connected(rtr) + redistribute_ospf(tgen, topo, rtr, "static") + redistribute_ospf(tgen, topo, rtr, "connected") for node in topo["routers"]: input_dict = { "r0": { @@ -538,18 +509,16 @@ def test_ospf_redistribution_tc8_p1(request): ) for rtr in topo["routers"]: - ospf_red = { - rtr: {"ospf": {"redistribute": [{"redist_type": "static", "delete": True}]}} - } - result = create_router_ospf(tgen, topo, ospf_red) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result - ) + redistribute_ospf(tgen, topo, rtr, "static", delete=True) input_dict = { "r0": { "static_routes": [ - {"network": NETWORK["ipv4"][0], "no_of_ip": 5, "next_hop": "Null0",} + { + "network": NETWORK["ipv4"][0], + "no_of_ip": 5, + "next_hop": "Null0", + } ] } } diff --git a/tests/topotests/ospf_basic_functionality/test_ospf_single_area.py b/tests/topotests/ospf_basic_functionality/test_ospf_single_area.py index 3a269d853c..6f6b119abc 100644 --- a/tests/topotests/ospf_basic_functionality/test_ospf_single_area.py +++ b/tests/topotests/ospf_basic_functionality/test_ospf_single_area.py @@ -29,6 +29,7 @@ import pytest import json from copy import deepcopy from ipaddress import IPv4Address +from lib.topotest import frr_unicode # Save the Current Working Directory to find configuration files. CWD = os.path.dirname(os.path.realpath(__file__)) @@ -233,7 +234,7 @@ def test_ospf_p2p_tc3_p0(request): topo_modify_change_ip = deepcopy(topo) intf_ip = topo_modify_change_ip["routers"]["r0"]["links"]["r3"]["ipv4"] topo_modify_change_ip["routers"]["r0"]["links"]["r3"]["ipv4"] = str( - IPv4Address(unicode(intf_ip.split("/")[0])) + 3 + IPv4Address(frr_unicode(intf_ip.split("/")[0])) + 3 ) + "/{}".format(intf_ip.split("/")[1]) build_config_from_json(tgen, topo_modify_change_ip, save_bkup=False) @@ -284,7 +285,7 @@ def test_ospf_p2p_tc3_p0(request): topo_modify_change_ip = deepcopy(topo) intf_ip = topo_modify_change_ip["routers"]["r0"]["links"]["r3"]["ipv4"] topo_modify_change_ip["routers"]["r0"]["links"]["r3"]["ipv4"] = str( - IPv4Address(unicode(intf_ip.split("/")[0])) + 3 + IPv4Address(frr_unicode(intf_ip.split("/")[0])) + 3 ) + "/{}".format(int(intf_ip.split("/")[1]) + 1) build_config_from_json(tgen, topo_modify_change_ip, save_bkup=False) @@ -776,7 +777,6 @@ def test_ospf_show_p1(request): write_test_footer(tc_name) - def test_ospf_dead_tc11_p0(request): """ OSPF timers. @@ -798,224 +798,146 @@ def test_ospf_dead_tc11_p0(request): step("modify dead interval from default value to some other value on r1") topo1 = { - 'r1': { - 'links': { - 'r0': { - 'interface': topo['routers']['r1']['links']['r0'][ - 'interface'], - 'ospf': { - 'hello_interval': 12, - 'dead_interval': 48 - } + "r1": { + "links": { + "r0": { + "interface": topo["routers"]["r1"]["links"]["r0"]["interface"], + "ospf": {"hello_interval": 12, "dead_interval": 48}, } } } } - result = create_interfaces_cfg(tgen, topo1) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) step( "verify that new timer value is configured and applied using " - "the show ip ospf interface command.") - dut = 'r1' - input_dict= { - 'r1': { - 'links':{ - 'r0': { - 'ospf':{ - 'timerDeadSecs': 48 - } - } - } - } - } + "the show ip ospf interface command." + ) + dut = "r1" + input_dict = {"r1": {"links": {"r0": {"ospf": {"timerDeadSecs": 48}}}}} result = verify_ospf_interface(tgen, topo, dut=dut, input_dict=input_dict) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) - step( - "modify dead interval from default value to r1" - "dead interval timer on r2") + step("modify dead interval from default value to r1" "dead interval timer on r2") topo1 = { - 'r0': { - 'links': { - 'r1': { - 'interface': topo['routers']['r0']['links']['r1'][ - 'interface'], - 'ospf': { - 'dead_interval': 48, - 'hello_interval': 12 - } + "r0": { + "links": { + "r1": { + "interface": topo["routers"]["r0"]["links"]["r1"]["interface"], + "ospf": {"dead_interval": 48, "hello_interval": 12}, } } } } result = create_interfaces_cfg(tgen, topo1) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) - + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) step("verify that new timer value is configured.") - input_dict= { - 'r0': { - 'links':{ - 'r1': { - 'ospf':{ - 'timerDeadSecs': 48 - } - } - } - } - } - dut = 'r0' + input_dict = {"r0": {"links": {"r1": {"ospf": {"timerDeadSecs": 48}}}}} + dut = "r0" result = verify_ospf_interface(tgen, topo, dut=dut, input_dict=input_dict) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) step("verify that ospf neighbours are full") ospf_covergence = verify_ospf_neighbor(tgen, topo, dut=dut) - assert ospf_covergence is True, ("setup_module :Failed \n Error:" - " {}".format(ospf_covergence)) + assert ospf_covergence is True, "setup_module :Failed \n Error:" " {}".format( + ospf_covergence + ) - step( - "reconfigure the default dead interval timer value to " - "default on r1 and r2") + step("reconfigure the default dead interval timer value to " "default on r1 and r2") topo1 = { - 'r0': { - 'links': { - 'r1': { - 'interface': topo['routers']['r0']['links']['r1'][ - 'interface'], - 'ospf': { - 'dead_interval': 40 - } + "r0": { + "links": { + "r1": { + "interface": topo["routers"]["r0"]["links"]["r1"]["interface"], + "ospf": {"dead_interval": 40}, } } } } result = create_interfaces_cfg(tgen, topo1) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) topo1 = { - 'r1': { - 'links': { - 'r0': { - 'interface': topo['routers']['r1']['links']['r0'][ - 'interface'], - 'ospf': { - 'dead_interval': 40 - } + "r1": { + "links": { + "r0": { + "interface": topo["routers"]["r1"]["links"]["r0"]["interface"], + "ospf": {"dead_interval": 40}, } } } } result = create_interfaces_cfg(tgen, topo1) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) step("verify that new timer value is configured.") - input_dict= { - 'r0': { - 'links':{ - 'r1': { - 'ospf':{ - 'timerDeadSecs': 40 - } - } - } - } - } - dut = 'r0' + input_dict = {"r0": {"links": {"r1": {"ospf": {"timerDeadSecs": 40}}}}} + dut = "r0" result = verify_ospf_interface(tgen, topo, dut=dut, input_dict=input_dict) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) - + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) step("verify that ospf neighbours are full") ospf_covergence = verify_ospf_neighbor(tgen, topo, dut=dut) - assert ospf_covergence is True, ("setup_module :Failed \n Error:" - " {}".format(ospf_covergence)) - + assert ospf_covergence is True, "setup_module :Failed \n Error:" " {}".format( + ospf_covergence + ) step(" Configure dead timer = 65535 on r1 and r2") topo1 = { - 'r0': { - 'links': { - 'r1': { - 'interface': topo['routers']['r0']['links']['r1'][ - 'interface'], - 'ospf': { - 'dead_interval': 65535 - } + "r0": { + "links": { + "r1": { + "interface": topo["routers"]["r0"]["links"]["r1"]["interface"], + "ospf": {"dead_interval": 65535}, } } } } result = create_interfaces_cfg(tgen, topo1) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) topo1 = { - 'r1': { - 'links': { - 'r0': { - 'interface': topo['routers']['r1']['links']['r0'][ - 'interface'], - 'ospf': { - 'dead_interval': 65535 - } + "r1": { + "links": { + "r0": { + "interface": topo["routers"]["r1"]["links"]["r0"]["interface"], + "ospf": {"dead_interval": 65535}, } } } } result = create_interfaces_cfg(tgen, topo1) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) step("verify that new timer value is configured.") - input_dict= { - 'r0': { - 'links':{ - 'r1': { - 'ospf':{ - 'timerDeadSecs': 65535 - } - } - } - } - } - dut = 'r0' + input_dict = {"r0": {"links": {"r1": {"ospf": {"timerDeadSecs": 65535}}}}} + dut = "r0" result = verify_ospf_interface(tgen, topo, dut=dut, input_dict=input_dict) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) step("verify that ospf neighbours are full") ospf_covergence = verify_ospf_neighbor(tgen, topo, dut=dut) - assert ospf_covergence is True, ("setup_module :Failed \n Error:" - " {}".format(ospf_covergence)) - + assert ospf_covergence is True, "setup_module :Failed \n Error:" " {}".format( + ospf_covergence + ) step(" Try configuring timer values outside range for example 65536") topo1 = { - 'r0': { - 'links': { - 'r1': { - 'interface': topo['routers']['r0']['links']['r1'][ - 'interface'], - 'ospf': { - 'dead_interval': 65536 - } + "r0": { + "links": { + "r1": { + "interface": topo["routers"]["r0"]["links"]["r1"]["interface"], + "ospf": {"dead_interval": 65536}, } } } @@ -1023,47 +945,33 @@ def test_ospf_dead_tc11_p0(request): result = create_interfaces_cfg(tgen, topo1) assert result is not True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + tc_name, result + ) step("Unconfigure the dead timer from the interface from r1 and r2.") topo1 = { - 'r1': { - 'links': { - 'r0': { - 'interface': topo['routers']['r1']['links']['r0'][ - 'interface'], - 'ospf': { - 'dead_interval': 65535 - }, - 'delete': True + "r1": { + "links": { + "r0": { + "interface": topo["routers"]["r1"]["links"]["r0"]["interface"], + "ospf": {"dead_interval": 65535}, + "delete": True, } } } } result = create_interfaces_cfg(tgen, topo1) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) step( - "Verify that timer value is deleted from intf & " - "set to default value 40 sec.") - input_dict= { - 'r1': { - 'links':{ - 'r0': { - 'ospf':{ - 'timerDeadSecs': 40 - } - } - } - } - } - dut = 'r1' + "Verify that timer value is deleted from intf & " "set to default value 40 sec." + ) + input_dict = {"r1": {"links": {"r0": {"ospf": {"timerDeadSecs": 40}}}}} + dut = "r1" result = verify_ospf_interface(tgen, topo, dut=dut, input_dict=input_dict) - assert result is True, "Testcase {} : Failed \n Error: {}".format( - tc_name, result) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) write_test_footer(tc_name) diff --git a/tests/topotests/pbr-topo1/test_pbr_topo1.py b/tests/topotests/pbr-topo1/test_pbr_topo1.py index fcbe3c0adf..5161d5eec7 100644 --- a/tests/topotests/pbr-topo1/test_pbr_topo1.py +++ b/tests/topotests/pbr-topo1/test_pbr_topo1.py @@ -80,7 +80,7 @@ class NetworkTopo(Topo): ## ##################################################### - +@pytest.mark.pbr def setup_module(module): "Setup topology" tgen = Topogen(NetworkTopo, module.__name__) diff --git a/tests/topotests/pim-basic/test_pim.py b/tests/topotests/pim-basic/test_pim.py index e8a9f72b48..224b82f1fb 100644 --- a/tests/topotests/pim-basic/test_pim.py +++ b/tests/topotests/pim-basic/test_pim.py @@ -31,6 +31,8 @@ import pytest import json from functools import partial +pytestmark = pytest.mark.pimd + CWD = os.path.dirname(os.path.realpath(__file__)) sys.path.append(os.path.join(CWD, "../")) diff --git a/tests/topotests/pytest.ini b/tests/topotests/pytest.ini index 6e8e749092..0c45a09445 100644 --- a/tests/topotests/pytest.ini +++ b/tests/topotests/pytest.ini @@ -2,6 +2,29 @@ [pytest] norecursedirs = .git example-test example-topojson-test lib docker +# Markers +# +# Please consult the documentation and discuss with TSC members before applying +# any changes to this list. +markers = + babeld: Tests that run against BABELD + bfdd: Tests that run against BFDD + bgpd: Tests that run against BGPD + eigrpd: Tests that run against EIGRPD + isisd: Tests that run against ISISD + ldpd: Tests that run against LDPD + nhrpd: Tests that run against NHRPD + ospf6d: Tests that run against OSPF6D + ospfd: Tests that run against OSPFD + pathd: Tests that run against PATHD + pbrd: Tests that run against PBRD + pimd: Tests that run against PIMD + ripd: Tests that run against RIPD + ripngd: Tests that run against RIPNGD + sharpd: Tests that run against SHARPD + staticd: Tests that run against STATICD + vrrpd: Tests that run against VRRPD + [topogen] # Default configuration values # diff --git a/tests/topotests/rip-topo1/test_rip_topo1.py b/tests/topotests/rip-topo1/test_rip_topo1.py index de11b78824..edad1ff65d 100644 --- a/tests/topotests/rip-topo1/test_rip_topo1.py +++ b/tests/topotests/rip-topo1/test_rip_topo1.py @@ -104,7 +104,7 @@ class NetworkTopo(Topo): ## ##################################################### - +@pytest.mark.rip def setup_module(module): global topo, net @@ -352,9 +352,11 @@ def test_zebra_ipv4_routingTable(): else: print("r%s ok" % i) - assert failures == 0, ( - "Zebra IPv4 Routing Table verification failed for router r%s:\n%s" - % (i, diff) + assert ( + failures == 0 + ), "Zebra IPv4 Routing Table verification failed for router r%s:\n%s" % ( + i, + diff, ) # Make sure that all daemons are still running diff --git a/tests/topotests/ripng-topo1/test_ripng_topo1.py b/tests/topotests/ripng-topo1/test_ripng_topo1.py index 2976cdefe4..47b63e5b26 100644 --- a/tests/topotests/ripng-topo1/test_ripng_topo1.py +++ b/tests/topotests/ripng-topo1/test_ripng_topo1.py @@ -104,7 +104,7 @@ class NetworkTopo(Topo): ## ##################################################### - +@pytest.mark.rip def setup_module(module): global topo, net @@ -373,9 +373,11 @@ def test_zebra_ipv6_routingTable(): else: print("r%s ok" % i) - assert failures == 0, ( - "Zebra IPv6 Routing Table verification failed for router r%s:\n%s" - % (i, diff) + assert ( + failures == 0 + ), "Zebra IPv6 Routing Table verification failed for router r%s:\n%s" % ( + i, + diff, ) # Make sure that all daemons are running diff --git a/tests/topotests/simple-snmp-test/r1/bgpd.conf b/tests/topotests/simple-snmp-test/r1/bgpd.conf new file mode 100644 index 0000000000..00d1e17670 --- /dev/null +++ b/tests/topotests/simple-snmp-test/r1/bgpd.conf @@ -0,0 +1,6 @@ +log file /tmp/bgpd.log debugging +! +router bgp 100 + bgp router-id 1.1.1.1 + +agentx diff --git a/tests/topotests/simple-snmp-test/r1/isisd.conf b/tests/topotests/simple-snmp-test/r1/isisd.conf new file mode 100644 index 0000000000..b5ca993da3 --- /dev/null +++ b/tests/topotests/simple-snmp-test/r1/isisd.conf @@ -0,0 +1,46 @@ +log stdout debugging +! +debug isis route-events +debug isis events +! +interface r1-eth0 + ip router isis ISIS1 + ipv6 router isis ISIS1 + isis circuit-type level-1 + no isis hello padding + isis hello-interval 1 + isis hello-multiplier 3 + isis network point-to-point +! +interface r1-eth1 + ip router isis ISIS1 + ipv6 router isis ISIS1 + isis circuit-type level-1 + no isis hello padding + isis hello-interval 1 + isis hello-multiplier 3 + isis network point-to-point +! +interface r1-eth2 + ip router isis ISIS1 + ipv6 router isis ISIS1 + isis circuit-type level-1 + no isis hello padding + isis hello-interval 1 + isis hello-multiplier 3 + isis network point-to-point +! +interface lo + ip router isis ISIS1 + ipv6 router isis ISIS1 + isis circuit-type level-1 + isis passive + no isis hello padding +! +router isis ISIS1 + net 01.1111.0000.0000.0001.00 + is-type level-1 + topology ipv6-unicast +! +line vty +! diff --git a/tests/topotests/simple-snmp-test/r1/snmpd.conf b/tests/topotests/simple-snmp-test/r1/snmpd.conf new file mode 100644 index 0000000000..b37911da36 --- /dev/null +++ b/tests/topotests/simple-snmp-test/r1/snmpd.conf @@ -0,0 +1,15 @@ +agentAddress udp:1.1.1.1:161 + +com2sec public 1.1.1.1 public + +group public_group v1 public +group public_group v2c public + +access public_group "" any noauth prefix all all none + +view all included .1 + +iquerySecName frr +rouser frr + +master agentx diff --git a/tests/topotests/simple-snmp-test/r1/zebra.conf b/tests/topotests/simple-snmp-test/r1/zebra.conf new file mode 100644 index 0000000000..5281d0055d --- /dev/null +++ b/tests/topotests/simple-snmp-test/r1/zebra.conf @@ -0,0 +1,22 @@ +log file zebra.log +! +interface r1-eth0 + ip address 192.168.12.12/24 + ipv6 address 2000:1:1:12::12/64 +! +interface r1-eth1 + ip address 192.168.13.13/24 + ipv6 address 2000:1:1:13::13/64 +! +interface r1-eth2 + ip address 192.168.14.14/24 + ipv6 address 2000:1:1:14::14/64 +! +! +interface lo + ip address 1.1.1.1/32 + ipv6 address 2000:1:1:1::1/128 +! +! +! +line vty diff --git a/tests/topotests/simple-snmp-test/test_simple_snmp.py b/tests/topotests/simple-snmp-test/test_simple_snmp.py new file mode 100755 index 0000000000..88ff01bf0a --- /dev/null +++ b/tests/topotests/simple-snmp-test/test_simple_snmp.py @@ -0,0 +1,152 @@ +#!/usr/bin/env python + +# +# test_simple_snmp.py +# Part of NetDEF Topology Tests +# +# Copyright (c) 2020 by Volta Networks +# +# Permission to use, copy, modify, and/or distribute this software +# for any purpose with or without fee is hereby granted, provided +# that the above copyright notice and this permission notice appear +# in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND NETDEF DISCLAIMS ALL WARRANTIES +# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NETDEF BE LIABLE FOR +# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY +# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS +# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE +# OF THIS SOFTWARE. +# + +""" +test_bgp_simple snmp.py: Test snmp infrastructure. +""" + +import os +import sys +import json +from functools import partial +from time import sleep +import pytest + +# Save the Current Working Directory to find configuration files. +CWD = os.path.dirname(os.path.realpath(__file__)) +sys.path.append(os.path.join(CWD, "../")) + +# pylint: disable=C0413 +# Import topogen and topotest helpers +from lib import topotest +from lib.topogen import Topogen, TopoRouter, get_topogen +from lib.topolog import logger +from lib.snmptest import SnmpTester + +# Required to instantiate the topology builder class. +from mininet.topo import Topo + + +class TemplateTopo(Topo): + "Test topology builder" + + def build(self, *_args, **_opts): + "Build function" + tgen = get_topogen(self) + + # This function only purpose is to define allocation and relationship + # between routers, switches and hosts. + # + # + # Create routers + tgen.add_router("r1") + + # r1-eth0 + switch = tgen.add_switch("s1") + switch.add_link(tgen.gears["r1"]) + + # r1-eth1 + switch = tgen.add_switch("s2") + switch.add_link(tgen.gears["r1"]) + + # r1-eth2 + switch = tgen.add_switch("s3") + switch.add_link(tgen.gears["r1"]) + + +def setup_module(mod): + "Sets up the pytest environment" + + # skip tests is SNMP not installed + if not os.path.isfile("/usr/sbin/snmpd"): + error_msg = "SNMP not installed - skipping" + pytest.skip(error_msg) + # This function initiates the topology build with Topogen... + tgen = Topogen(TemplateTopo, mod.__name__) + # ... and here it calls Mininet initialization functions. + tgen.start_topology() + + r1 = tgen.gears["r1"] + + router_list = tgen.routers() + + # For all registred routers, load the zebra configuration file + for rname, router in router_list.items(): + router.load_config( + TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname)) + ) + router.load_config( + TopoRouter.RD_ISIS, os.path.join(CWD, "{}/isisd.conf".format(rname)) + ) + router.load_config( + TopoRouter.RD_BGP, + os.path.join(CWD, "{}/bgpd.conf".format(rname)), + "-M snmp", + ) + router.load_config( + TopoRouter.RD_SNMP, + os.path.join(CWD, "{}/snmpd.conf".format(rname)), + "-Le -Ivacm_conf,usmConf,iquery -V -DAgentX,trap", + ) + + # After loading the configurations, this function loads configured daemons. + tgen.start_router() + + +def teardown_module(mod): + "Teardown the pytest environment" + tgen = get_topogen() + + # This function tears down the whole topology. + tgen.stop_topology() + + +def test_r1_bgp_version(): + "Wait for protocol convergence" + tgen = get_topogen() + + # Skip if previous fatal error condition is raised + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + # tgen.mininet_cli() + r1 = tgen.net.get("r1") + r1_snmp = SnmpTester(r1, "1.1.1.1", "public", "2c") + assert r1_snmp.test_oid("bgpVersin", None) + assert r1_snmp.test_oid("bgpVersion", "10") + assert r1_snmp.test_oid_walk("bgpVersion", ["10"]) + assert r1_snmp.test_oid_walk("bgpVersion", ["10"], ["0"]) + + +def test_memory_leak(): + "Run the memory leak test and report results." + tgen = get_topogen() + if not tgen.is_memleak_enabled(): + pytest.skip("Memory leak test/report is disabled") + + tgen.report_memory_leaks() + + +if __name__ == "__main__": + args = ["-s"] + sys.argv[1:] + sys.exit(pytest.main(args)) diff --git a/tests/topotests/static_routing_with_ebgp/static_routes_topo1_ebgp.json b/tests/topotests/static_routing_with_ebgp/static_routes_topo1_ebgp.json new file mode 100644 index 0000000000..7099043adc --- /dev/null +++ b/tests/topotests/static_routing_with_ebgp/static_routes_topo1_ebgp.json @@ -0,0 +1,157 @@ +{ + "address_types": [ + "ipv4", + "ipv6" + ], + "ipv4base": "10.0.0.0", + "ipv4mask": 30, + "ipv6base": "fd00::", + "ipv6mask": 64, + "link_ip_start": { + "ipv4": "10.0.0.0", + "v4mask": 30, + "ipv6": "fd00::", + "v6mask": 64 + }, + "lo_prefix": { + "ipv4": "1.0.", + "v4mask": 32, + "ipv6": "2001:db8:f::", + "v6mask": 128 + }, + "routers": { + "r1": { + "links": { + "lo": { + "ipv4": "auto", + "ipv6": "auto", + "type": "loopback" + }, + "r2-link0": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r2-link1": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r3-link0": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r3-link1": { + "ipv4": "auto", + "ipv6": "auto" + } + } + }, + "r2": { + "links": { + "lo": { + "ipv4": "auto", + "ipv6": "auto", + "type": "loopback" + }, + "r1-link0": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r1-link1": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r3": { + "ipv4": "auto", + "ipv6": "auto" + } + }, + "bgp": { + "local_as": "100", + "address_family": { + "ipv4": { + "unicast": { + "neighbor": { + "r3": { + "dest_link": { + "r2": { + "keepalivetimer": 1, + "holddowntimer": 4 + } + } + } + } + } + }, + "ipv6": { + "unicast": { + "neighbor": { + "r3": { + "dest_link": { + "r2": { + "keepalivetimer": 1, + "holddowntimer": 4 + } + } + } + } + } + } + } + } + }, + "r3": { + "links": { + "lo": { + "ipv4": "auto", + "ipv6": "auto", + "type": "loopback" + }, + "r2": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r1-link0": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r1-link1": { + "ipv4": "auto", + "ipv6": "auto" + } + }, + "bgp": { + "local_as": "200", + "address_family": { + "ipv4": { + "unicast": { + "neighbor": { + "r2": { + "dest_link": { + "r3": { + "keepalivetimer": 1, + "holddowntimer": 4 + } + } + } + } + } + }, + "ipv6": { + "unicast": { + "neighbor": { + "r2": { + "dest_link": { + "r3": { + "keepalivetimer": 1, + "holddowntimer": 4 + } + } + } + } + } + } + } + } + } + } +}
\ No newline at end of file diff --git a/tests/topotests/static_routing_with_ebgp/static_routes_topo2_ebgp.json b/tests/topotests/static_routing_with_ebgp/static_routes_topo2_ebgp.json new file mode 100644 index 0000000000..91820b0491 --- /dev/null +++ b/tests/topotests/static_routing_with_ebgp/static_routes_topo2_ebgp.json @@ -0,0 +1,363 @@ +{ + "address_types": [ + "ipv4", + "ipv6" + ], + "ipv4base": "10.0.0.0", + "ipv4mask": 30, + "ipv6base": "fd00::", + "ipv6mask": 64, + "link_ip_start": { + "ipv4": "10.0.0.0", + "v4mask": 30, + "ipv6": "fd00::", + "v6mask": 64 + }, + "lo_prefix": { + "ipv4": "1.0.", + "v4mask": 32, + "ipv6": "2001:db8:f::", + "v6mask": 128 + }, + "routers": { + "r1": { + "links": { + "lo": { + "ipv4": "auto", + "ipv6": "auto", + "type": "loopback" + }, + "r2-link0": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r2-link1": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r2-link2": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r2-link3": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r2-link4": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r2-link5": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r2-link6": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r2-link7": { + "ipv4": "auto", + "ipv6": "auto" + } + } + }, + "r2": { + "links": { + "lo": { + "ipv4": "auto", + "ipv6": "auto", + "type": "loopback" + }, + "r1-link0": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r1-link1": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r1-link2": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r1-link3": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r1-link4": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r1-link5": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r1-link6": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r1-link7": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r3-link0": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r3-link1": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r3-link2": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r3-link3": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r3-link4": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r3-link5": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r3-link6": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r3-link7": { + "ipv4": "auto", + "ipv6": "auto" + } + }, + "bgp": { + "local_as": "200", + "address_family": { + "ipv4": { + "unicast": { + "neighbor": { + "r3": { + "dest_link": { + "r2-link0": { + "keepalivetimer": 1, + "holddowntimer": 4 + }, + "r2-link1": { + "keepalivetimer": 1, + "holddowntimer": 4 + }, + "r2-link2": { + "keepalivetimer": 1, + "holddowntimer": 4 + }, + "r2-link3": { + "keepalivetimer": 1, + "holddowntimer": 4 + }, + "r2-link4": { + "keepalivetimer": 1, + "holddowntimer": 4 + }, + "r2-link5": { + "keepalivetimer": 1, + "holddowntimer": 4 + }, + "r2-link6": { + "keepalivetimer": 1, + "holddowntimer": 4 + }, + "r2-link7": { + "keepalivetimer": 1, + "holddowntimer": 4 + } + } + } + }, + "redistribute": [{ + "redist_type": "static" + }] + } + }, + "ipv6": { + "unicast": { + "neighbor": { + "r3": { + "dest_link": { + "r2-link0": { + "keepalivetimer": 1, + "holddowntimer": 4 + }, + "r2-link1": { + "keepalivetimer": 1, + "holddowntimer": 4 + }, + "r2-link2": { + "keepalivetimer": 1, + "holddowntimer": 4 + }, + "r2-link3": { + "keepalivetimer": 1, + "holddowntimer": 4 + }, + "r2-link4": { + "keepalivetimer": 1, + "holddowntimer": 4 + }, + "r2-link5": { + "keepalivetimer": 1, + "holddowntimer": 4 + }, + "r2-link6": { + "keepalivetimer": 1, + "holddowntimer": 4 + }, + "r2-link7": { + "keepalivetimer": 1, + "holddowntimer": 4 + } + } + } + }, + "redistribute": [{ + "redist_type": "static" + }] + } + } + } + } + }, + "r3": { + "links": { + "lo": { + "ipv4": "auto", + "ipv6": "auto", + "type": "loopback" + }, + "r2-link0": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r2-link1": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r2-link2": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r2-link3": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r2-link4": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r2-link5": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r2-link6": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r2-link7": { + "ipv4": "auto", + "ipv6": "auto" + } + }, + "bgp": { + "local_as": "300", + "address_family": { + "ipv4": { + "unicast": { + "neighbor": { + "r2": { + "dest_link": { + "r3-link0": { + "keepalivetimer": 1, + "holddowntimer": 4 + }, + "r3-link1": { + "keepalivetimer": 1, + "holddowntimer": 4 + }, + "r3-link2": { + "keepalivetimer": 1, + "holddowntimer": 4 + }, + "r3-link3": { + "keepalivetimer": 1, + "holddowntimer": 4 + }, + "r3-link4": { + "keepalivetimer": 1, + "holddowntimer": 4 + }, + "r3-link5": { + "keepalivetimer": 1, + "holddowntimer": 4 + }, + "r3-link6": { + "keepalivetimer": 1, + "holddowntimer": 4 + }, + "r3-link7": { + "keepalivetimer": 1, + "holddowntimer": 4 + } + } + } + } + } + }, + "ipv6": { + "unicast": { + "neighbor": { + "r2": { + "dest_link": { + "r3-link0": { + "keepalivetimer": 1, + "holddowntimer": 4 + }, + "r3-link1": { + "keepalivetimer": 1, + "holddowntimer": 4 + }, + "r3-link2": { + "keepalivetimer": 1, + "holddowntimer": 4 + }, + "r3-link3": { + "keepalivetimer": 1, + "holddowntimer": 4 + }, + "r3-link4": { + "keepalivetimer": 1, + "holddowntimer": 4 + }, + "r3-link5": { + "keepalivetimer": 1, + "holddowntimer": 4 + }, + "r3-link6": { + "keepalivetimer": 1, + "holddowntimer": 4 + }, + "r3-link7": { + "keepalivetimer": 1, + "holddowntimer": 4 + } + } + } + } + } + } + } + } + } + } +}
\ No newline at end of file diff --git a/tests/topotests/static_routing_with_ebgp/static_routes_topo3_ebgp.json b/tests/topotests/static_routing_with_ebgp/static_routes_topo3_ebgp.json new file mode 100644 index 0000000000..5246a311ea --- /dev/null +++ b/tests/topotests/static_routing_with_ebgp/static_routes_topo3_ebgp.json @@ -0,0 +1,189 @@ +{ + "address_types": [ + "ipv4", + "ipv6" + ], + "ipv4base": "10.0.0.0", + "ipv4mask": 30, + "ipv6base": "fd00::", + "ipv6mask": 64, + "link_ip_start": { + "ipv4": "10.0.0.0", + "v4mask": 29, + "ipv6": "fd00::", + "v6mask": 64 + }, + "lo_prefix": { + "ipv4": "1.0.", + "v4mask": 32, + "ipv6": "2001:db8:f::", + "v6mask": 128 + }, + "routers": { + "r1": { + "links": { + "lo": { + "ipv4": "auto", + "ipv6": "auto", + "type": "loopback" + }, + "r2-link0": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r2-link1": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r2-link2": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r2-link3": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r2-link4": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r2-link5": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r2-link6": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r2-link7": { + "ipv4": "auto", + "ipv6": "auto" + } + } + }, + "r2": { + "links": { + "lo": { + "ipv4": "auto", + "ipv6": "auto", + "type": "loopback" + }, + "r1-link0": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r1-link1": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r1-link2": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r1-link3": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r1-link4": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r1-link5": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r1-link6": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r1-link7": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r3-link0": { + "ipv4": "auto", + "ipv6": "auto" + } + }, + "bgp": { + "local_as": "100", + "address_family": { + "ipv4": { + "unicast": { + "neighbor": { + "r3": { + "dest_link": { + "r2-link0": { + "keepalivetimer": 1, + "holddowntimer": 4 + } + } + } + } + } + }, + "ipv6": { + "unicast": { + "neighbor": { + "r3": { + "dest_link": { + "r2-link0": { + "keepalivetimer": 1, + "holddowntimer": 4 + } + } + } + } + } + } + } + } + }, + "r3": { + "links": { + "lo": { + "ipv4": "auto", + "ipv6": "auto", + "type": "loopback" + }, + "r2-link0": { + "ipv4": "auto", + "ipv6": "auto" + } + }, + "bgp": { + "local_as": "200", + "address_family": { + "ipv4": { + "unicast": { + "neighbor": { + "r2": { + "dest_link": { + "r3-link0": { + "keepalivetimer": 1, + "holddowntimer": 4 + } + } + } + } + } + }, + "ipv6": { + "unicast": { + "neighbor": { + "r2": { + "dest_link": { + "r3-link0": { + "keepalivetimer": 1, + "holddowntimer": 4 + } + } + } + } + } + } + } + } + } + } +}
\ No newline at end of file diff --git a/tests/topotests/static_routing_with_ebgp/static_routes_topo4_ebgp.json b/tests/topotests/static_routing_with_ebgp/static_routes_topo4_ebgp.json new file mode 100644 index 0000000000..bb72578ff3 --- /dev/null +++ b/tests/topotests/static_routing_with_ebgp/static_routes_topo4_ebgp.json @@ -0,0 +1,428 @@ +{ + "address_types": [ + "ipv4", + "ipv6" + ], + "ipv4base": "10.0.0.0", + "ipv4mask": 30, + "ipv6base": "fd00::", + "ipv6mask": 64, + "link_ip_start": { + "ipv4": "10.0.0.0", + "v4mask": 30, + "ipv6": "fd00::", + "v6mask": 64 + }, + "lo_prefix": { + "ipv4": "1.0.", + "v4mask": 32, + "ipv6": "2001:db8:f::", + "v6mask": 128 + }, + "routers": { + "r1": { + "links": { + "lo": { + "ipv4": "auto", + "ipv6": "auto", + "type": "loopback" + }, + "r2-link0": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r2-link1": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r2-link2": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r2-link3": { + "ipv4": "auto", + "ipv6": "auto" + }, + "vm4": { + "ipv4": "auto", + "ipv6": "auto" + } + }, + "bgp": { + "local_as": "100", + "address_family": { + "ipv4": { + "unicast": { + "neighbor": { + "r2": { + "dest_link": { + "r1-link0": { + "password": "r1", + "holddowntimer": 3, + "keepalivetimer": 1 + }, + "r1-link1": { + "password": "r1", + "holddowntimer": 3, + "keepalivetimer": 1 + }, + "r1-link2": { + "password": "r1", + "holddowntimer": 3, + "keepalivetimer": 1 + }, + "r1-link3": { + "password": "r1", + "holddowntimer": 3, + "keepalivetimer": 1 + } + } + } + } + } + }, + "ipv6": { + "unicast": { + "neighbor": { + "r2": { + "dest_link": { + "r1-link0": { + "password": "r1", + "holddowntimer": 3, + "keepalivetimer": 1 + }, + "r1-link1": { + "password": "r1", + "holddowntimer": 3, + "keepalivetimer": 1 + }, + "r1-link2": { + "password": "r1", + "holddowntimer": 3, + "keepalivetimer": 1 + }, + "r1-link3": { + "password": "r1", + "holddowntimer": 3, + "keepalivetimer": 1 + } + } + } + } + } + } + } + } + }, + "r2": { + "links": { + "lo": { + "ipv4": "auto", + "ipv6": "auto", + "type": "loopback" + }, + "r1-link0": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r1-link1": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r1-link2": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r1-link3": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r3-link0": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r3-link1": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r3-link2": { + "ipv4": "auto", + "ipv6": "auto" + }, + "vm1": { + "ipv4": "auto", + "ipv6": "auto" + }, + "vm2": { + "ipv4": "auto", + "ipv6": "auto" + }, + "vm3": { + "ipv4": "auto", + "ipv6": "auto" + }, + "vm6": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r3-link3": { + "ipv4": "auto", + "ipv6": "auto" + } + }, + "bgp": { + "local_as": "200", + "address_family": { + "ipv4": { + "unicast": { + "neighbor": { + "r1": { + "dest_link": { + "r2-link0": { + "password": "r1" + }, + "r2-link1": { + "password": "r1" + }, + "r2-link2": { + "password": "r1" + }, + "r2-link3": { + "password": "r1" + } + } + }, + "r3": { + "dest_link": { + "r2-link0": { + "password": "r1" + }, + "r2-link1": { + "password": "r1" + }, + "r2-link2": { + "password": "r1" + }, + "r2-link3": { + "password": "r1" + } + } + } + } + } + }, + "ipv6": { + "unicast": { + "neighbor": { + "r1": { + "dest_link": { + "r2-link0": { + "password": "r1" + }, + "r2-link1": { + "password": "r1" + }, + "r2-link2": { + "password": "r1" + }, + "r2-link3": { + "password": "r1" + } + } + }, + "r3": { + "dest_link": { + "r2-link0": { + "password": "r1" + }, + "r2-link1": { + "password": "r1" + }, + "r2-link2": { + "password": "r1" + }, + "r2-link3": { + "password": "r1" + } + } + } + } + } + } + } + } + }, + "r3": { + "links": { + "lo": { + "ipv4": "auto", + "ipv6": "auto", + "type": "loopback" + }, + "r2-link0": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r2-link1": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r2-link2": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r2-link3": { + "ipv4": "auto", + "ipv6": "auto" + }, + "vm5": { + "ipv4": "auto", + "ipv6": "auto" + } + }, + "bgp": { + "local_as": "300", + "address_family": { + "ipv4": { + "unicast": { + "neighbor": { + "r2": { + "dest_link": { + "r3-link0": { + "password": "r1", + "holddowntimer": 3, + "keepalivetimer": 1 + }, + "r3-link1": { + "password": "r1", + "holddowntimer": 3, + "keepalivetimer": 1 + }, + "r3-link2": { + "password": "r1", + "holddowntimer": 3, + "keepalivetimer": 1 + }, + "r3-link3": { + "password": "r1", + "holddowntimer": 3, + "keepalivetimer": 1 + } + } + } + } + } + }, + "ipv6": { + "unicast": { + "neighbor": { + "r2": { + "dest_link": { + "r3-link0": { + "password": "r1", + "holddowntimer": 3, + "keepalivetimer": 1 + }, + "r3-link1": { + "password": "r1", + "holddowntimer": 3, + "keepalivetimer": 1 + }, + "r3-link2": { + "password": "r1", + "holddowntimer": 3, + "keepalivetimer": 1 + }, + "r3-link3": { + "password": "r1", + "holddowntimer": 3, + "keepalivetimer": 1 + } + } + } + } + } + } + } + } + }, + "vm1": { + "links": { + "lo": { + "ipv4": "auto", + "ipv6": "auto", + "type": "loopback" + }, + "r2": { + "ipv4": "auto", + "ipv6": "auto" + } + } + }, + "vm2": { + "links": { + "lo": { + "ipv4": "auto", + "ipv6": "auto", + "type": "loopback" + }, + "r2": { + "ipv4": "auto", + "ipv6": "auto" + } + } + }, + "vm3": { + "links": { + "lo": { + "ipv4": "auto", + "ipv6": "auto", + "type": "loopback" + }, + "r2": { + "ipv4": "auto", + "ipv6": "auto" + } + } + }, + "vm4": { + "links": { + "lo": { + "ipv4": "auto", + "ipv6": "auto", + "type": "loopback" + }, + "r1": { + "ipv4": "auto", + "ipv6": "auto" + } + } + }, + "vm5": { + "links": { + "lo": { + "ipv4": "auto", + "ipv6": "auto", + "type": "loopback" + }, + "r3": { + "ipv4": "auto", + "ipv6": "auto" + } + } + }, + "vm6": { + "links": { + "lo": { + "ipv4": "auto", + "ipv6": "auto", + "type": "loopback" + }, + "r2": { + "ipv4": "auto", + "ipv6": "auto" + } + } + } + } +}
\ No newline at end of file diff --git a/tests/topotests/static_routing_with_ebgp/test_static_routes_topo1_ebgp.py b/tests/topotests/static_routing_with_ebgp/test_static_routes_topo1_ebgp.py new file mode 100644 index 0000000000..a33257de65 --- /dev/null +++ b/tests/topotests/static_routing_with_ebgp/test_static_routes_topo1_ebgp.py @@ -0,0 +1,1261 @@ +#!/usr/bin/python + +# +# Copyright (c) 2020 by VMware, Inc. ("VMware") +# Used Copyright (c) 2018 by Network Device Education Foundation, +# Inc. ("NetDEF") in this file. +# +# Permission to use, copy, modify, and/or distribute this software +# for any purpose with or without fee is hereby granted, provided +# that the above copyright notice and this permission notice appear +# in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND VMWARE DISCLAIMS ALL WARRANTIES +# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL VMWARE BE LIABLE FOR +# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY +# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS +# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE +# OF THIS SOFTWARE. +# +""" + + -Verify static route ECMP functionality with 2 next hop. + + -Verify static route functionality with 2 next hop and different AD + value. + + -Verify RIB status when same route advertise via BGP and static route. + +""" +import sys +import json +import time +import os +import pytest +import platform +# Save the Current Working Directory to find configuration files. +CWD = os.path.dirname(os.path.realpath(__file__)) +sys.path.append(os.path.join(CWD, "../")) +sys.path.append(os.path.join(CWD, "../lib/")) +# pylint: disable=C0413 +# Import topogen and topotest helpers +from mininet.topo import Topo +from lib.topogen import Topogen, get_topogen +from lib.topotest import version_cmp +# Import topoJson from lib, to create topology and initial configuration +from lib.common_config import ( + start_topology, + write_test_header, + write_test_footer, + reset_config_on_routers, + verify_rib, + create_static_routes, + check_address_types, + step, + create_interfaces_cfg, + shutdown_bringup_interface, + stop_router, + start_router, +) +from lib.topolog import logger +from lib.bgp import verify_bgp_convergence, create_router_bgp, verify_bgp_rib +from lib.topojson import build_topo_from_json, build_config_from_json + +# Reading the data from JSON File for topology creation +jsonFile = "{}/static_routes_topo1_ebgp.json".format(CWD) +try: + with open(jsonFile, "r") as topoJson: + topo = json.load(topoJson) +except IOError: + assert False, "Could not read file {}".format(jsonFile) + +# Global variables +BGP_CONVERGENCE = False +ADDR_TYPES = check_address_types() +NETWORK = {"ipv4": ["11.0.20.1/32", "11.0.20.2/32"], "ipv6": ["2::1/128", "2::2/128"]} +NETWORK2 = {"ipv4": "11.0.20.1/32", "ipv6": "2::1/128"} + +PREFIX1 = {"ipv4": "110.0.20.1/32", "ipv6": "20::1/128"} + + +class CreateTopo(Topo): + """ + Test CreateTopo - topology 1. + + * `Topo`: Topology object + """ + + def build(self, *_args, **_opts): + """Build function.""" + tgen = get_topogen(self) + + # Building topology from json file + build_topo_from_json(tgen, topo) + + +def setup_module(mod): + """ + Sets up the pytest environment. + + * `mod`: module name + """ + global topo + testsuite_run_time = time.asctime(time.localtime(time.time())) + logger.info("Testsuite start time: {}".format(testsuite_run_time)) + logger.info("=" * 40) + + logger.info("Running setup_module to create topology") + + # This function initiates the topology build with Topogen... + tgen = Topogen(CreateTopo, mod.__name__) + # ... and here it calls Mininet initialization functions. + + # Starting topology, create tmp files which are loaded to routers + # to start deamons and then start routers + start_topology(tgen) + + # Creating configuration from JSON + build_config_from_json(tgen, topo) + + if version_cmp(platform.release(), '4.19') < 0: + error_msg = ('These tests will not run. (have kernel "{}", ' + 'requires kernel >= 4.19)'.format(platform.release())) + pytest.skip(error_msg) + + # Checking BGP convergence + global BGP_CONVERGENCE + global ADDR_TYPES + # Don't run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + # Api call verify whether BGP is converged + BGP_CONVERGENCE = verify_bgp_convergence(tgen, topo) + assert BGP_CONVERGENCE is True, "setup_module :Failed \n Error: {}".format( + BGP_CONVERGENCE + ) + + logger.info("Running setup_module() done") + + +def teardown_module(mod): + """ + Teardown the pytest environment. + + * `mod`: module name + """ + + logger.info("Running teardown_module to delete topology") + + tgen = get_topogen() + + # Stop toplogy and Remove tmp files + tgen.stop_topology() + + logger.info( + "Testsuite end time: {}".format(time.asctime(time.localtime(time.time()))) + ) + logger.info("=" * 40) + + +def populate_nh(): + NEXT_HOP_IP = { + "nh1": { + "ipv4": topo["routers"]["r1"]["links"]["r2-link0"]["ipv4"].split("/")[0], + "ipv6": topo["routers"]["r1"]["links"]["r2-link0"]["ipv6"].split("/")[0], + }, + "nh2": { + "ipv4": topo["routers"]["r1"]["links"]["r2-link1"]["ipv4"].split("/")[0], + "ipv6": topo["routers"]["r1"]["links"]["r2-link1"]["ipv6"].split("/")[0], + }, + } + return NEXT_HOP_IP + + +##################################################### +# +# Testcases +# +##################################################### + + +def test_static_route_2nh_p0_tc_1_ebgp(request): + """ + Verify static route ECMP functionality with 2 next hop. + + """ + tc_name = request.node.name + write_test_header(tc_name) + tgen = get_topogen() + # Don't run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + reset_config_on_routers(tgen) + NEXT_HOP_IP = populate_nh() + + step( + "Configure IPv4 static route (10.1.1.1) in R2 with next hop N1" + "(28.1.1.2 ) and N2 (29.1.1.2) , Static route next-hop present on" + "R1" + ) + step("ex :- ip route 10.1.1.1/24 28.1.1.2 & ip route 10.1.1.1/24 29.1.1.1") + for addr_type in ADDR_TYPES: + input_dict_4 = { + "r2": { + "static_routes": [ + { + "network": NETWORK[addr_type], + "next_hop": NEXT_HOP_IP["nh1"][addr_type], + }, + { + "network": NETWORK[addr_type], + "next_hop": NEXT_HOP_IP["nh2"][addr_type], + }, + ] + } + } + + logger.info("Configure static routes") + result = create_static_routes(tgen, input_dict_4) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step( + "On R2, static route installed in RIB using show ip route" + " with 2 ECMP next hop " + ) + nh = [NEXT_HOP_IP["nh1"][addr_type], NEXT_HOP_IP["nh2"][addr_type]] + dut = "r2" + protocol = "static" + result = verify_rib( + tgen, addr_type, dut, input_dict_4, next_hop=nh, protocol=protocol + ) + assert ( + result is True + ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name) + + step("Configure IBGP IPv4 peering between R2 and R3 router.") + step("Configure redistribute static in BGP on R2 router") + + input_dict_2 = { + "r2": { + "bgp": { + "address_family": { + addr_type: { + "unicast": {"redistribute": [{"redist_type": "static"}]} + } + } + } + } + } + result = create_router_bgp(tgen, topo, input_dict_2) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step("Remove the static route configured with nexthop N1 from running config") + input_dict_4 = { + "r2": { + "static_routes": [ + { + "network": NETWORK[addr_type], + "next_hop": NEXT_HOP_IP["nh1"][addr_type], + "delete": True, + } + ] + } + } + logger.info("Configure static routes") + result = create_static_routes(tgen, input_dict_4) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step( + "On R2, after removing the static route with N1 , " + "route become active with nexthop N2 and vice versa." + ) + nh = NEXT_HOP_IP["nh1"][addr_type] + result = verify_rib( + tgen, + addr_type, + dut, + input_dict_4, + next_hop=nh, + protocol=protocol, + expected=False, + ) + assert ( + result is not True + ), "Testcase {} : Failed" "Error: Routes is still present in RIB".format( + tc_name + ) + + nh = [NEXT_HOP_IP["nh2"][addr_type]] + result = verify_rib( + tgen, addr_type, dut, input_dict_4, next_hop=nh, protocol=protocol + ) + assert result is True, "Testcase {} : Failed \nError: Routes is" + " missing in RIB".format(tc_name) + + step("Configure the static route with nexthop N1") + + input_dict_4 = { + "r2": { + "static_routes": [ + { + "network": NETWORK[addr_type], + "next_hop": NEXT_HOP_IP["nh1"][addr_type], + } + ] + } + } + + logger.info("Configure static routes") + result = create_static_routes(tgen, input_dict_4) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step("Remove the static route configured with nexthop N2 from" "running config") + + input_dict_4 = { + "r2": { + "static_routes": [ + { + "network": NETWORK[addr_type], + "next_hop": NEXT_HOP_IP["nh2"][addr_type], + "delete": True, + } + ] + } + } + + logger.info("Configure static routes") + result = create_static_routes(tgen, input_dict_4) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step( + "On R2, after removing the static route with N2 , " + "route become active with nexthop N1 and vice versa." + ) + nh = NEXT_HOP_IP["nh2"][addr_type] + result = verify_rib( + tgen, + addr_type, + dut, + input_dict_4, + next_hop=nh, + protocol=protocol, + expected=False, + ) + assert result is not True, "Testcase {} : Failed \nError: Routes is" + " still present in RIB".format(tc_name) + + nh = [NEXT_HOP_IP["nh1"][addr_type]] + result = verify_rib( + tgen, addr_type, dut, input_dict_4, next_hop=nh, protocol=protocol + ) + assert result is True, "Testcase {} : Failed \nError: Routes is" + " missing in RIB".format(tc_name) + + step("Configure the static route with nexthop N2") + input_dict_4 = { + "r2": { + "static_routes": [ + { + "network": NETWORK[addr_type], + "next_hop": NEXT_HOP_IP["nh2"][addr_type], + } + ] + } + } + + logger.info("Configure static routes") + result = create_static_routes(tgen, input_dict_4) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step("Shut nexthop interface N1") + intf = topo["routers"]["r2"]["links"]["r1-link0"]["interface"] + + shutdown_bringup_interface(tgen, dut, intf, False) + + step("Only one the nexthops should be active in RIB.") + + nh = NEXT_HOP_IP["nh2"][addr_type] + result = verify_rib( + tgen, addr_type, dut, input_dict_4, next_hop=nh, protocol=protocol + ) + assert result is True, "Testcase {} : Failed \nError: Routes is" + " missing in RIB".format(tc_name) + + nh = NEXT_HOP_IP["nh1"][addr_type] + result = verify_rib( + tgen, + addr_type, + dut, + input_dict_4, + next_hop=nh, + protocol=protocol, + expected=False, + ) + assert result is not True, "Testcase {} : Failed \nError: Routes is" + " still present in RIB".format(tc_name) + + dut = "r3" + result = verify_bgp_rib( + tgen, addr_type, dut, input_dict_4, next_hop=nh, expected=False + ) + assert result is not True, "Testcase {} : Failed \nError: Route is" + " still present in RIB".format(tc_name) + + result = verify_rib( + tgen, + addr_type, + dut, + input_dict_4, + protocol=protocol, + next_hop=nh, + expected=False, + ) + assert result is not True, "Testcase {} : Failed \nError: Route is" + " still present in RIB".format(tc_name) + + dut = "r2" + nh = [NEXT_HOP_IP["nh2"][addr_type]] + result = verify_rib( + tgen, addr_type, dut, input_dict_4, next_hop=nh, protocol=protocol + ) + assert result is True, "Testcase {} : Failed \nError: Routes is" + " missing in RIB".format(tc_name) + + dut = "r3" + result = verify_bgp_rib(tgen, addr_type, dut, input_dict_4) + assert result is True, "Testcase {} : Failed \nError: Route is" + " missing in RIB".format(tc_name) + + result = verify_rib( + tgen, addr_type, dut, input_dict_4, protocol=protocol, expected=False + ) + assert result is not True, "Testcase {} : Failed \nError: Route is" + " still present in RIB".format(tc_name) + + dut = "r2" + step("No shut the nexthop interface N1") + shutdown_bringup_interface(tgen, dut, intf, True) + + step( + "after shut of nexthop N1 , route become active " + "with nexthop N2 and vice versa." + ) + nh = [NEXT_HOP_IP["nh1"][addr_type], NEXT_HOP_IP["nh2"][addr_type]] + + result = verify_rib( + tgen, addr_type, dut, input_dict_4, next_hop=nh, protocol=protocol + ) + assert result is True, "Testcase {} : Failed \nError: Routes is" + " missing in RIB".format(tc_name) + + step("Shut nexthop interface N2") + intf = topo["routers"]["r2"]["links"]["r1-link1"]["interface"] + dut = "r2" + shutdown_bringup_interface(tgen, dut, intf, False) + + step( + " after shut of nexthop N1 , route become active with " + "nexthop N2 and vice versa." + ) + nh = NEXT_HOP_IP["nh2"][addr_type] + + result = verify_rib( + tgen, + addr_type, + dut, + input_dict_4, + next_hop=nh, + protocol=protocol, + expected=False, + ) + assert result is not True, "Testcase {} : Failed \nError: Routes is" + " still present in RIB".format(tc_name) + + nh = [NEXT_HOP_IP["nh1"][addr_type]] + dut = "r2" + protocol = "static" + result = verify_rib( + tgen, addr_type, dut, input_dict_4, next_hop=nh, protocol=protocol + ) + assert result is True, "Testcase {} : Failed \nError: Routes is" + " missing in RIB".format(tc_name) + + dut = "r3" + result = verify_bgp_rib(tgen, addr_type, dut, input_dict_4) + assert result is True, "Testcase {} : Failed \nError: Route is" + " missing in RIB".format(tc_name) + + result = verify_rib( + tgen, addr_type, dut, input_dict_4, protocol=protocol, expected=False + ) + assert result is not True, "Testcase {} : Failed \nError: Route is" + " still present in RIB".format(tc_name) + + step("No shut nexthop interface N2") + dut = "r2" + shutdown_bringup_interface(tgen, dut, intf, True) + + step( + "after shut of nexthop N1 , route become active " + "with nexthop N2 and vice versa." + ) + nh = [NEXT_HOP_IP["nh1"][addr_type], NEXT_HOP_IP["nh2"][addr_type]] + + result = verify_rib( + tgen, addr_type, dut, input_dict_4, next_hop=nh, protocol=protocol + ) + assert result is True, "Testcase {} : Failed \nError: Routes is" + " missing in RIB".format(tc_name) + + dut = "r3" + result = verify_bgp_rib(tgen, addr_type, dut, input_dict_4) + assert result is True, "Testcase {} : Failed \nError: Route is" + " missing in RIB".format(tc_name) + + result = verify_rib( + tgen, addr_type, dut, input_dict_4, protocol=protocol, expected=False + ) + assert result is not True, "Testcase {} : Failed \nError: Route is" + " still present in RIB".format(tc_name) + + step("Reload the FRR router") + # stop/start -> restart FRR router and verify + stop_router(tgen, "r2") + + start_router(tgen, "r2") + + dut = "r2" + step( + "After reload of FRR router , static route installed" + " in RIB and FIB properly ." + ) + result = verify_rib( + tgen, addr_type, dut, input_dict_4, next_hop=nh, protocol=protocol + ) + assert result is True, "Testcase {} : Failed \nError: Routes is" + " missing in RIB".format(tc_name) + + dut = "r3" + result = verify_bgp_rib(tgen, addr_type, dut, input_dict_4) + assert result is True, "Testcase {} : Failed \nError: Route is" + " still present in RIB".format(tc_name) + + result = verify_rib( + tgen, addr_type, dut, input_dict_4, protocol=protocol, expected=False + ) + assert result is not True, "Testcase {} : Failed \nError: Route is" + " still present in RIB".format(tc_name) + + write_test_footer(tc_name) + + +def test_static_route_2nh_admin_dist_p0_tc_2_ebgp(request): + """ + Verify static route functionality with 2 next hop & different AD value. + + """ + tc_name = request.node.name + write_test_header(tc_name) + tgen = get_topogen() + # Don't run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + reset_config_on_routers(tgen) + NEXT_HOP_IP = populate_nh() + step( + "Configure IPv4 static route (10.1.1.1) in R2 with next hop N1" + "(28.1.1.2 ) AD 10 and N2 (29.1.1.2) AD 20 , Static route next-hop" + "present on R1 \n ex :- ip route 10.1.1.1/24 28.1.1.2 10 & " + "ip route 10.1.1.1/24 29.1.1.2 20" + ) + + reset_config_on_routers(tgen) + NEXT_HOP_IP = populate_nh() + for addr_type in ADDR_TYPES: + input_dict_4 = { + "r2": { + "static_routes": [ + { + "network": NETWORK2[addr_type], + "next_hop": NEXT_HOP_IP["nh1"][addr_type], + "admin_distance": 10, + }, + { + "network": NETWORK2[addr_type], + "next_hop": NEXT_HOP_IP["nh2"][addr_type], + "admin_distance": 20, + }, + ] + } + } + logger.info("Configure static routes") + result = create_static_routes(tgen, input_dict_4) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step( + "On R2, static route installed in RIB using " + "show ip route with 2 next hop , lowest AD nexthop is active " + ) + rte1_nh1 = { + "r2": { + "static_routes": [ + { + "network": NETWORK2[addr_type], + "next_hop": NEXT_HOP_IP["nh1"][addr_type], + "admin_distance": 10, + } + ] + } + } + nh = [NEXT_HOP_IP["nh1"][addr_type]] + dut = "r2" + protocol = "static" + result = verify_rib( + tgen, addr_type, dut, rte1_nh1, next_hop=nh, protocol=protocol, fib=True + ) + assert result is True, "Testcase {} : Failed \nError: Routes is" + "missing in RIB".format(tc_name) + + rte2_nh2 = { + "r2": { + "static_routes": [ + { + "network": NETWORK2[addr_type], + "next_hop": NEXT_HOP_IP["nh2"][addr_type], + "admin_distance": 20, + } + ] + } + } + nh = [NEXT_HOP_IP["nh2"][addr_type]] + dut = "r2" + protocol = "static" + result = verify_rib( + tgen, + addr_type, + dut, + rte2_nh2, + next_hop=nh, + protocol=protocol, + fib=True, + expected=False, + ) + assert result is not True, "Testcase {} : Failed \nError: Routes is" + "not active in RIB".format(tc_name) + + step("Configure IBGP IPv4 peering between R2 and R3 router.") + step("Explicit route is added in R3 for R2 nexthop rechability") + rt3_rtes = { + "r3": { + "static_routes": [ + { + "network": NEXT_HOP_IP["nh1"][addr_type] + "/32", + "next_hop": topo["routers"]["r2"]["links"]["r3"][addr_type], + }, + { + "network": NEXT_HOP_IP["nh2"][addr_type] + "/32", + "next_hop": topo["routers"]["r2"]["links"]["r3"][addr_type], + }, + ] + } + } + logger.info("Configure static routes") + result = create_static_routes(tgen, rt3_rtes) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + step("Configure redistribute static in BGP on R2 router") + + input_dict_2 = { + "r2": { + "bgp": { + "address_family": { + addr_type: { + "unicast": {"redistribute": [{"redist_type": "static"}]} + } + } + } + } + } + result = create_router_bgp(tgen, topo, input_dict_2) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step("Remove the static route configured with nexthop N1 from" "running config") + rt1_nh1 = { + "r2": { + "static_routes": [ + { + "network": NETWORK[addr_type], + "next_hop": NEXT_HOP_IP["nh1"][addr_type], + "admin_distance": 10, + "delete": True, + } + ] + } + } + + logger.info("Configure static routes") + result = create_static_routes(tgen, rt1_nh1) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step( + "On R2, after removing the static route with N1 , " + "route become active with nexthop N2 and vice versa." + ) + rte1_nh1 = { + "r2": { + "static_routes": [ + { + "network": NETWORK2[addr_type], + "next_hop": NEXT_HOP_IP["nh1"][addr_type], + "admin_distance": 10, + } + ] + } + } + nh = [NEXT_HOP_IP["nh1"][addr_type]] + dut = "r2" + protocol = "static" + result = verify_rib( + tgen, + addr_type, + dut, + rte1_nh1, + next_hop=nh, + protocol=protocol, + fib=True, + expected=False, + ) + assert result is not True, "Testcase {} : Failed \nError: Routes is" + "missing in RIB".format(tc_name) + + rte2_nh2 = { + "r2": { + "static_routes": [ + { + "network": NETWORK2[addr_type], + "next_hop": NEXT_HOP_IP["nh2"][addr_type], + "admin_distance": 20, + } + ] + } + } + nh = [NEXT_HOP_IP["nh2"][addr_type]] + result = verify_rib( + tgen, addr_type, dut, rte2_nh2, next_hop=nh, protocol=protocol, fib=True + ) + assert result is True, "Testcase {} : Failed \nError: Routes is" + "not active in RIB".format(tc_name) + + step("Configure the static route with nexthop N1") + rte1_nh1 = { + "r2": { + "static_routes": [ + { + "network": NETWORK[addr_type], + "next_hop": NEXT_HOP_IP["nh1"][addr_type], + "admin_distance": 10, + } + ] + } + } + logger.info("Configure static routes") + result = create_static_routes(tgen, rte1_nh1) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step("Remove the static route configured with nexthop N2 from" "running config") + rte2_nh2 = { + "r2": { + "static_routes": [ + { + "network": NETWORK[addr_type], + "next_hop": NEXT_HOP_IP["nh2"][addr_type], + "admin_distance": 20, + "delete": True, + } + ] + } + } + logger.info("Configure static routes") + result = create_static_routes(tgen, rte2_nh2) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step( + "On R2, after removing the static route with N2 , " + "route become active with nexthop N1 and vice versa." + ) + nh = NEXT_HOP_IP["nh2"][addr_type] + result = verify_rib( + tgen, + addr_type, + dut, + rte2_nh2, + next_hop=nh, + protocol=protocol, + expected=False, + ) + assert result is not True, "Testcase {} : Failed \nError: Routes is" + " still present in RIB".format(tc_name) + + nh = [NEXT_HOP_IP["nh1"][addr_type]] + result = verify_rib( + tgen, addr_type, dut, rte1_nh1, next_hop=nh, protocol=protocol + ) + assert result is True, "Testcase {} : Failed \nError: Routes is" + " missing in RIB".format(tc_name) + + step("Configure the static route with nexthop N2") + rte2_nh2 = { + "r2": { + "static_routes": [ + { + "network": NETWORK[addr_type], + "next_hop": NEXT_HOP_IP["nh2"][addr_type], + "admin_distance": 20, + } + ] + } + } + + logger.info("Configure static routes") + result = create_static_routes(tgen, rte2_nh2) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step("Shut nexthop interface N1") + intf = topo["routers"]["r2"]["links"]["r1-link0"]["interface"] + + shutdown_bringup_interface(tgen, dut, intf, False) + + step("after shut of nexthop N1 , route become active with nexthop N2") + + nh = NEXT_HOP_IP["nh1"][addr_type] + result = verify_rib( + tgen, + addr_type, + dut, + rte1_nh1, + next_hop=nh, + protocol=protocol, + expected=False, + ) + assert result is not True, "Testcase {} : Failed \nError: Routes is" + " still present in RIB".format(tc_name) + + nh = [NEXT_HOP_IP["nh2"][addr_type]] + result = verify_rib( + tgen, addr_type, dut, rte2_nh2, next_hop=nh, protocol=protocol, fib=True + ) + assert result is True, "Testcase {} : Failed \nError: Routes is" + " missing in RIB".format(tc_name) + + step("No shut the nexthop interface N1") + shutdown_bringup_interface(tgen, dut, intf, True) + + step( + "after shut of nexthop N1 , route become active " + "with nexthop N2 and vice versa." + ) + nh = [NEXT_HOP_IP["nh1"][addr_type]] + + result = verify_rib( + tgen, addr_type, dut, rte1_nh1, next_hop=nh, protocol=protocol, fib=True + ) + assert result is True, "Testcase {} : Failed \nError: Routes is" + " missing in RIB".format(tc_name) + + step("Shut nexthop interface N2") + intf = topo["routers"]["r2"]["links"]["r1-link1"]["interface"] + + shutdown_bringup_interface(tgen, dut, intf, False) + + step( + " after shut of nexthop N1 , route become active with " + "nexthop N2 and vice versa." + ) + nh = NEXT_HOP_IP["nh2"][addr_type] + + result = verify_rib( + tgen, + addr_type, + dut, + rte2_nh2, + next_hop=nh, + protocol=protocol, + expected=False, + ) + assert result is not True, "Testcase {} : Failed \nError: Routes is" + " still present in RIB".format(tc_name) + + nh = [NEXT_HOP_IP["nh1"][addr_type]] + result = verify_rib( + tgen, addr_type, dut, rte1_nh1, next_hop=nh, protocol=protocol + ) + assert result is True, "Testcase {} : Failed \nError: Routes is" + " missing in RIB".format(tc_name) + + step("No shut nexthop interface N2") + shutdown_bringup_interface(tgen, dut, intf, True) + + step( + "after shut of nexthop N1 , route become active " + "with nexthop N2 and vice versa." + ) + rte1_nh1 = { + "r2": { + "static_routes": [ + { + "network": NETWORK2[addr_type], + "next_hop": NEXT_HOP_IP["nh1"][addr_type], + "admin_distance": 10, + } + ] + } + } + nh = [NEXT_HOP_IP["nh1"][addr_type]] + dut = "r2" + protocol = "static" + result = verify_rib( + tgen, addr_type, dut, rte1_nh1, next_hop=nh, protocol=protocol, fib=True + ) + assert result is True, "Testcase {} : Failed \nError: Routes is" + "missing in RIB".format(tc_name) + + rte2_nh2 = { + "r2": { + "static_routes": [ + { + "network": NETWORK2[addr_type], + "next_hop": NEXT_HOP_IP["nh2"][addr_type], + "admin_distance": 20, + } + ] + } + } + nh = [NEXT_HOP_IP["nh2"][addr_type]] + dut = "r2" + protocol = "static" + result = verify_rib( + tgen, + addr_type, + dut, + rte2_nh2, + next_hop=nh, + protocol=protocol, + fib=True, + expected=False, + ) + assert result is not True, "Testcase {} : Failed \nError: Routes is" + "not active in RIB".format(tc_name) + + dut = "r3" + protocol = "bgp" + + result = verify_rib( + tgen, + addr_type, + dut, + rte2_nh2, + next_hop=nh, + protocol=protocol, + fib=True, + expected=False, + ) + assert result is not True, "Testcase {} : Failed \nError: Routes is" + "not active in RIB".format(tc_name) + + dut = "r2" + step("Reload the FRR router") + # stop/start -> restart FRR router and verify + stop_router(tgen, "r2") + + start_router(tgen, "r2") + + step( + "After reload of FRR router , static route installed" + " in RIB and FIB properly ." + ) + rte1_nh1 = { + "r2": { + "static_routes": [ + { + "network": NETWORK2[addr_type], + "next_hop": NEXT_HOP_IP["nh1"][addr_type], + "admin_distance": 10, + } + ] + } + } + nh = [NEXT_HOP_IP["nh1"][addr_type]] + dut = "r2" + protocol = "static" + result = verify_rib( + tgen, addr_type, dut, rte1_nh1, next_hop=nh, protocol=protocol, fib=True + ) + assert result is True, "Testcase {} : Failed \nError: Routes is" + "missing in RIB".format(tc_name) + + dut = "r3" + protocol = "bgp" + result = verify_bgp_rib(tgen, addr_type, dut, rte1_nh1, next_hop=nh) + assert result is True, "Testcase {} : Failed \nError: Routes is" + "missing in RIB".format(tc_name) + + rte2_nh2 = { + "r2": { + "static_routes": [ + { + "network": NETWORK2[addr_type], + "next_hop": NEXT_HOP_IP["nh2"][addr_type], + "admin_distance": 20, + } + ] + } + } + nh = [NEXT_HOP_IP["nh2"][addr_type]] + dut = "r2" + protocol = "static" + result = verify_rib( + tgen, + addr_type, + dut, + rte2_nh2, + next_hop=nh, + protocol=protocol, + fib=True, + expected=False, + ) + assert result is not True, "Testcase {} : Failed \nError: Routes is" + "not active in RIB".format(tc_name) + + dut = "r3" + protocol = "bgp" + result = verify_bgp_rib(tgen, addr_type, dut, rte2_nh2, next_hop=nh) + assert result is True, "Testcase {} : Failed \nError: Routes is" + "not active in RIB".format(tc_name) + + result = verify_rib( + tgen, + addr_type, + dut, + rte2_nh2, + next_hop=nh, + protocol=protocol, + fib=True, + expected=False, + ) + assert result is not True, "Testcase {} : Failed \nError: Routes is" + "not active in RIB".format(tc_name) + + write_test_footer(tc_name) + + +def test_same_rte_from_bgp_static_p0_tc5_ebgp(request): + """ + Verify RIB status when same route advertise via BGP and static + route + + """ + tc_name = request.node.name + write_test_header(tc_name) + tgen = get_topogen() + # Don't run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + reset_config_on_routers(tgen) + + NEXT_HOP_IP = populate_nh() + step("Configure EBGP IPv4 peering between R2 and R3 router.") + + step( + "Configure IPv4 static route (10.1.1.1/24) in R2 with next hop" + "N1 (28.1.1.2 ) and N2 (29.1.1.2) , Static route next-hop present" + "on R1" + ) + + for addr_type in ADDR_TYPES: + input_dict_4 = { + "r2": { + "static_routes": [ + { + "network": NETWORK[addr_type], + "next_hop": NEXT_HOP_IP["nh1"][addr_type], + }, + { + "network": NETWORK[addr_type], + "next_hop": NEXT_HOP_IP["nh2"][addr_type], + }, + ] + } + } + logger.info("Configure static routes") + result = create_static_routes(tgen, input_dict_4) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step("Configure redistribute static in BGP.") + for addr_type in ADDR_TYPES: + input_dict_2 = { + "r2": { + "bgp": { + "address_family": { + addr_type: { + "unicast": {"redistribute": [{"redist_type": "static"}]} + } + } + } + } + } + result = create_router_bgp(tgen, topo, input_dict_2) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + step("Verify on R3 , route receive on R3 BGP table ") + dut = "r3" + result = verify_bgp_rib(tgen, addr_type, dut, input_dict_4) + assert result is True, "Testcase {} : Failed \nError: Route is" + " still present in RIB".format(tc_name) + + step("Verify route installed in the RIB and FIB of R3") + protocol = "bgp" + result = verify_rib(tgen, addr_type, dut, input_dict_4, protocol=protocol) + assert result is True, "Testcase {} : Failed \nError: Route is" + " still present in RIB".format(tc_name) + + step( + "Configure 2 links/interfaces between R1 and R3 , keep one" + "interface in shut (active) state and other interface in no shut" + "(inactive) state" + ) + dut = "r3" + intf = topo["routers"]["r3"]["links"]["r1-link0"]["interface"] + shutdown_bringup_interface(tgen, dut, intf, False) + + step( + "Configure same static route (10.1.1.1/24) in R3 with inactive" + "nexthop interface" + ) + + step( + "Configure same static route 10.1.1.1/24) again in R3 with" + "active nexthop interface" + ) + for addr_type in ADDR_TYPES: + input_dict_4 = { + "r3": { + "static_routes": [ + { + "network": NETWORK[addr_type], + "next_hop": topo["routers"]["r1"]["links"]["r3-link0"][ + addr_type + ], + }, + { + "network": NETWORK[addr_type], + "next_hop": topo["routers"]["r1"]["links"]["r3-link1"][ + addr_type + ], + }, + ] + } + } + logger.info("Configure static routes") + result = create_static_routes(tgen, input_dict_4) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step( + "Verify when static route configure with inactive nexthop , " + "verify BGP received route is active in the RIB and FIB" + ) + dut = "r3" + result = verify_bgp_rib(tgen, addr_type, dut, input_dict_4) + assert result is True, "Testcase {} : Failed \nError: Route is" + " missing in BGP RIB".format(tc_name) + + protocol = "bgp" + result = verify_rib( + tgen, addr_type, dut, input_dict_4, protocol=protocol, fib=True + ) + assert result is True, "Testcase {} : Failed \nError: Route is" + " missing in RIB".format(tc_name) + + step("Remove the static route on R3 configured with active" "interface") + for addr_type in ADDR_TYPES: + input_dict_4 = { + "r3": { + "static_routes": [ + { + "network": NETWORK[addr_type], + "next_hop": topo["routers"]["r1"]["links"]["r3-link0"][ + addr_type + ], + "delete": True, + }, + { + "network": NETWORK[addr_type], + "next_hop": topo["routers"]["r1"]["links"]["r3-link1"][ + addr_type + ], + "delete": True, + }, + ] + } + } + logger.info("Configure static routes") + result = create_static_routes(tgen, input_dict_4) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + step( + "After removing the static route with active nexthop verify " + "BGP received route is became active in RIB and FIB" + ) + dut = "r3" + result = verify_bgp_rib(tgen, addr_type, dut, input_dict_4) + assert result is True, "Testcase {} : Failed \nError: Route is" + " missing in BGP RIB".format(tc_name) + + protocol = "bgp" + result = verify_rib( + tgen, addr_type, dut, input_dict_4, protocol=protocol, fib=True + ) + assert result is True, "Testcase {} : Failed \nError: Route is" + " missing in RIB".format(tc_name) + + write_test_footer(tc_name) + + +if __name__ == "__main__": + args = ["-s"] + sys.argv[1:] + sys.exit(pytest.main(args)) diff --git a/tests/topotests/static_routing_with_ebgp/test_static_routes_topo2_ebgp.py b/tests/topotests/static_routing_with_ebgp/test_static_routes_topo2_ebgp.py new file mode 100644 index 0000000000..93320df327 --- /dev/null +++ b/tests/topotests/static_routing_with_ebgp/test_static_routes_topo2_ebgp.py @@ -0,0 +1,1711 @@ +#!/usr/bin/python + +# +# Copyright (c) 2020 by VMware, Inc. ("VMware") +# Used Copyright (c) 2018 by Network Device Education Foundation, +# Inc. ("NetDEF") in this file. +# +# Permission to use, copy, modify, and/or distribute this software +# for any purpose with or without fee is hereby granted, provided +# that the above copyright notice and this permission notice appear +# in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND VMWARE DISCLAIMS ALL WARRANTIES +# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL VMWARE BE LIABLE FOR +# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY +# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS +# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE +# OF THIS SOFTWARE. +# + + +""" + -Verify static route functionality with 8 next hop different AD value + and BGP ECMP + + -Verify 8 static route functionality with 8 next hop different AD + + -Verify static route with 8 next hop with different AD value and 8 + EBGP neighbors + + -Verify static route with 8 next hop with different AD value and 8 + IBGP neighbors + + -Delete the static route and verify the RIB and FIB state + + -Verify 8 static route functionality with 8 ECMP next hop +""" +import sys +import json +import time +import os +import pytest +import platform +import random +from lib.topotest import version_cmp + +# Save the Current Working Directory to find configuration files. +CWD = os.path.dirname(os.path.realpath(__file__)) +sys.path.append(os.path.join(CWD, "../")) +sys.path.append(os.path.join(CWD, "../lib/")) +# pylint: disable=C0413 +# Import topogen and topotest helpers +from mininet.topo import Topo +from lib.topogen import Topogen, get_topogen + +# Import topoJson from lib, to create topology and initial configuration +from lib.common_config import ( + start_topology, + write_test_header, + write_test_footer, + reset_config_on_routers, + verify_rib, + create_static_routes, + check_address_types, + step, + shutdown_bringup_interface, + stop_router, + start_router, +) +from lib.topolog import logger +from lib.bgp import verify_bgp_convergence, create_router_bgp, verify_bgp_rib +from lib.topojson import build_topo_from_json, build_config_from_json + +# Reading the data from JSON File for topology creation +jsonFile = "{}/static_routes_topo2_ebgp.json".format(CWD) +try: + with open(jsonFile, "r") as topoJson: + topo = json.load(topoJson) +except IOError: + assert False, "Could not read file {}".format(jsonFile) +# Global variables +BGP_CONVERGENCE = False +ADDR_TYPES = check_address_types() +NETWORK = { + "ipv4": [ + "11.0.20.1/32", + "11.0.20.2/32", + "11.0.20.3/32", + "11.0.20.4/32", + "11.0.20.5/32", + "11.0.20.6/32", + "11.0.20.7/32", + "11.0.20.8/32", + ], + "ipv6": [ + "2::1/128", + "2::2/128", + "2::3/128", + "2::4/128", + "2::5/128", + "2::6/128", + "2::7/128", + "2::8/128", + ], +} +PREFIX1 = {"ipv4": "110.0.20.1/32", "ipv6": "20::1/128"} +PREFIX2 = {"ipv4": "110.0.20.2/32", "ipv6": "20::2/128"} +NEXT_HOP_IP = [] +topo_diag = """ + Please view in a fixed-width font such as Courier. + +------+ +------+ +------+ + | +--------------+ +--------------+ | + | | | | | | + | R1 +---8 links----+ R2 +---8 links----+ R3 | + | | | | | | + | +--------------+ +--------------+ | + +------+ +------+ +------+ + +""" + + +class CreateTopo(Topo): + """ + Test CreateTopo - topology 1. + + * `Topo`: Topology object + """ + + def build(self, *_args, **_opts): + """Build function.""" + tgen = get_topogen(self) + + # Building topology from json file + build_topo_from_json(tgen, topo) + + +def setup_module(mod): + """ + + Set up the pytest environment. + + * `mod`: module name + """ + global topo + testsuite_run_time = time.asctime(time.localtime(time.time())) + logger.info("Testsuite start time: {}".format(testsuite_run_time)) + logger.info("=" * 40) + + logger.info("Running setup_module to create topology") + + # This function initiates the topology build with Topogen... + tgen = Topogen(CreateTopo, mod.__name__) + # ... and here it calls Mininet initialization functions. + + # Starting topology, create tmp files which are loaded to routers + # to start deamons and then start routers + start_topology(tgen) + + # Creating configuration from JSON + build_config_from_json(tgen, topo) + + if version_cmp(platform.release(), '4.19') < 0: + error_msg = ('These tests will not run. (have kernel "{}", ' + 'requires kernel >= 4.19)'.format(platform.release())) + pytest.skip(error_msg) + + # Checking BGP convergence + global BGP_CONVERGENCE + global ADDR_TYPES + # Don't run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + # Api call verify whether BGP is converged + BGP_CONVERGENCE = verify_bgp_convergence(tgen, topo) + assert BGP_CONVERGENCE is True, "setup_module :Failed \n Error: {}".format( + BGP_CONVERGENCE + ) + + logger.info("Running setup_module() done") + + +def teardown_module(mod): + """ + Teardown the pytest environment + + * `mod`: module name + """ + + logger.info("Running teardown_module to delete topology") + + tgen = get_topogen() + + # Stop toplogy and Remove tmp files + tgen.stop_topology() + + logger.info( + "Testsuite end time: {}".format(time.asctime(time.localtime(time.time()))) + ) + logger.info("=" * 40) + + +def populate_nh(): + NEXT_HOP_IP = { + "nh1": { + "ipv4": topo["routers"]["r1"]["links"]["r2-link0"]["ipv4"].split("/")[0], + "ipv6": topo["routers"]["r1"]["links"]["r2-link0"]["ipv6"].split("/")[0], + }, + "nh2": { + "ipv4": topo["routers"]["r1"]["links"]["r2-link1"]["ipv4"].split("/")[0], + "ipv6": topo["routers"]["r1"]["links"]["r2-link1"]["ipv6"].split("/")[0], + }, + "nh3": { + "ipv4": topo["routers"]["r1"]["links"]["r2-link2"]["ipv4"].split("/")[0], + "ipv6": topo["routers"]["r1"]["links"]["r2-link2"]["ipv6"].split("/")[0], + }, + "nh4": { + "ipv4": topo["routers"]["r1"]["links"]["r2-link3"]["ipv4"].split("/")[0], + "ipv6": topo["routers"]["r1"]["links"]["r2-link3"]["ipv6"].split("/")[0], + }, + "nh5": { + "ipv4": topo["routers"]["r1"]["links"]["r2-link4"]["ipv4"].split("/")[0], + "ipv6": topo["routers"]["r1"]["links"]["r2-link4"]["ipv6"].split("/")[0], + }, + "nh6": { + "ipv4": topo["routers"]["r1"]["links"]["r2-link5"]["ipv4"].split("/")[0], + "ipv6": topo["routers"]["r1"]["links"]["r2-link5"]["ipv6"].split("/")[0], + }, + "nh7": { + "ipv4": topo["routers"]["r1"]["links"]["r2-link6"]["ipv4"].split("/")[0], + "ipv6": topo["routers"]["r1"]["links"]["r2-link6"]["ipv6"].split("/")[0], + }, + "nh8": { + "ipv4": topo["routers"]["r1"]["links"]["r2-link7"]["ipv4"].split("/")[0], + "ipv6": topo["routers"]["r1"]["links"]["r2-link7"]["ipv6"].split("/")[0], + }, + } + return NEXT_HOP_IP + + +##################################################### +# +# Testcases +# +##################################################### + + +def test_static_rte_with_8ecmp_nh_p1_tc9_ebgp(request): + """ + Verify 8 static route functionality with 8 ECMP next hop + + """ + tc_name = request.node.name + write_test_header(tc_name) + tgen = get_topogen() + # Don't run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + NEXT_HOP_IP = populate_nh() + step("Configure 8 interfaces / links between R1 and R2") + step("Configure 8 interfaces / links between R2 and R3") + step("Configure 8 IBGP IPv4 peering between R2 and R3 router.") + reset_config_on_routers(tgen) + + step( + "Configure 8 IPv4 static route in R2 with 8 next hop" + "N1(21.1.1.2) , N2(22.1.1.2) , N3(23.1.1.2) , N4(24.1.1.2) ," + "N5(25.1.1.2) , N6(26.1.1.2) , N7(27.1.1.2) , N8(28.1.1.2) ," + "Static route next-hop present on R1" + ) + nh_all = {} + for addr_type in ADDR_TYPES: + # Enable static routes + for nhp in range(1, 9): + input_dict_4 = { + "r2": { + "static_routes": [ + { + "network": PREFIX1[addr_type], + "next_hop": NEXT_HOP_IP["nh" + str(nhp)][addr_type], + } + ] + } + } + logger.info("Configure static routes") + result = create_static_routes(tgen, input_dict_4) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + logger.info("Verifying %s routes on r2", addr_type) + nh_all[addr_type] = [ + NEXT_HOP_IP["nh1"][addr_type], + NEXT_HOP_IP["nh2"][addr_type], + NEXT_HOP_IP["nh3"][addr_type], + NEXT_HOP_IP["nh4"][addr_type], + NEXT_HOP_IP["nh5"][addr_type], + NEXT_HOP_IP["nh6"][addr_type], + NEXT_HOP_IP["nh7"][addr_type], + NEXT_HOP_IP["nh8"][addr_type], + ] + + dut = "r2" + protocol = "static" + result = verify_rib( + tgen, + addr_type, + dut, + input_dict_4, + next_hop=nh_all[addr_type], + protocol=protocol, + ) + assert result is True, "Testcase {} : Failed \nError: Routes are" + " missing in RIB".format(tc_name) + + step("Configure redistribute static in BGP on R2 router") + for addr_type in ADDR_TYPES: + input_dict_2 = { + "r2": { + "bgp": { + "address_family": { + addr_type: { + "unicast": {"redistribute": [{"redist_type": "static"}]} + } + } + } + } + } + result = create_router_bgp(tgen, topo, input_dict_2) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + dut = "r3" + protocol = "bgp" + result = verify_bgp_rib(tgen, addr_type, dut, input_dict_4) + assert result is True, "Testcase {} : Failed \nError: Routes are" + " missing in RIB".format(tc_name) + + step( + "Remove the static route configured with nexthop N1 to N8, one" + "by one from running config" + ) + dut = "r2" + protocol = "static" + step( + "After removing the static route with N1 to N8 one by one , " + "verify that entry is removed from RIB and FIB of R3 " + ) + for addr_type in ADDR_TYPES: + for nhp in range(1, 9): + input_dict_4 = { + "r2": { + "static_routes": [ + { + "network": PREFIX1[addr_type], + "next_hop": NEXT_HOP_IP["nh" + str(nhp)][addr_type], + "delete": True, + } + ] + } + } + logger.info("Configure static routes") + result = create_static_routes(tgen, input_dict_4) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step( + "After removing the static route with N1 to N8 one by one , " + "verify that entry is removed from RIB and FIB of R3 " + ) + nh = NEXT_HOP_IP["nh" + str(nhp)][addr_type] + result = verify_rib( + tgen, + addr_type, + dut, + input_dict_4, + next_hop=nh, + protocol=protocol, + expected=False, + ) + assert result is not True, "Testcase {} : Failed\nError: Routes is" + " still present in RIB".format(tc_name) + + step("Configure the static route with nexthop N1 to N8, one by one") + for addr_type in ADDR_TYPES: + for nhp in range(1, 9): + input_dict_4 = { + "r2": { + "static_routes": [ + { + "network": PREFIX1[addr_type], + "next_hop": NEXT_HOP_IP["nh" + str(nhp)][addr_type], + } + ] + } + } + logger.info("Configure static routes") + result = create_static_routes(tgen, input_dict_4) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + nh = NEXT_HOP_IP["nh" + str(nhp)][addr_type] + result = verify_rib( + tgen, addr_type, dut, input_dict_4, next_hop=nh, protocol=protocol + ) + assert result is True, "Testcase {} : Failed\nError: Routes are" + " missing in RIB".format(tc_name) + + protocol = "static" + step("Random shut of the nexthop interfaces") + randnum = random.randint(0, 7) + # Shutdown interface + dut = "r2" + step( + " interface which is about to be shut no shut between r1 and r2 is " "%s", + topo["routers"]["r2"]["links"]["r1-link{}".format(randnum)]["interface"], + ) + intf = topo["routers"]["r2"]["links"]["r1-link{}".format(randnum)]["interface"] + shutdown_bringup_interface(tgen, dut, intf, False) + + step("Random no shut of the nexthop interfaces") + # Bringup interface + shutdown_bringup_interface(tgen, dut, intf, True) + + step( + "After random shut/no shut of nexthop , only that " + "nexthop deleted/added from all the routes , other nexthop remain " + "unchanged" + ) + dut = "r2" + protocol = "static" + for addr_type in ADDR_TYPES: + input_dict_4 = { + "r2": { + "static_routes": [ + { + "network": PREFIX1[addr_type], + "next_hop": NEXT_HOP_IP["nh" + str(nhp)][addr_type], + } + ] + } + } + result = verify_rib( + tgen, + addr_type, + dut, + input_dict_4, + next_hop=nh_all[addr_type], + protocol=protocol, + ) + assert result is True, "Testcase {} : Failed \nError: Routes are" + " missing in RIB".format(tc_name) + + step("Remove random static route with all the nexthop") + dut = "r2" + randnum = random.randint(1, 7) + for addr_type in ADDR_TYPES: + input_dict_4 = { + "r2": { + "static_routes": [ + { + "network": PREFIX1[addr_type], + "next_hop": NEXT_HOP_IP["nh" + str(randnum)][addr_type], + "delete": True, + } + ] + } + } + logger.info("Configure static routes") + result = create_static_routes(tgen, input_dict_4) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step( + "After delete of random route , that route only got deleted from" + " RIB/FIB other route are showing properly" + ) + nh = NEXT_HOP_IP["nh{}".format(randnum)][addr_type] + result = verify_rib( + tgen, + addr_type, + dut, + input_dict_4, + next_hop=nh, + protocol=protocol, + expected=False, + ) + assert result is not True, "Testcase {} : Failed \nError: Routes are" + " missing in RIB".format(tc_name) + + for addr_type in ADDR_TYPES: + input_dict_4 = { + "r2": { + "static_routes": [ + { + "network": PREFIX1[addr_type], + "next_hop": NEXT_HOP_IP["nh" + str(randnum)][addr_type], + } + ] + } + } + logger.info("Configure static routes") + result = create_static_routes(tgen, input_dict_4) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step("Reload the FRR router") + # stop/start -> restart FRR router and verify + stop_router(tgen, "r2") + start_router(tgen, "r2") + + step( + "After reload of FRR router , static route " + "installed in RIB and FIB properly ." + ) + for addr_type in ADDR_TYPES: + # Enable static routes + nhp = 1 + input_dict_4 = { + "r2": { + "static_routes": [ + { + "network": PREFIX1[addr_type], + "next_hop": NEXT_HOP_IP["nh" + str(nhp)][addr_type], + } + ] + } + } + logger.info("Verifying %s routes on r2", addr_type) + dut = "r2" + protocol = "static" + result = verify_rib( + tgen, + addr_type, + dut, + input_dict_4, + next_hop=nh_all[addr_type], + protocol=protocol, + ) + assert result is True, "Testcase {} : Failed \nError: Routes are" + " missing in RIB".format(tc_name) + + step("Remove the redistribute static knob") + for addr_type in ADDR_TYPES: + input_dict_2 = { + "r2": { + "bgp": { + "address_family": { + addr_type: { + "unicast": { + "redistribute": [ + {"redist_type": "static", "delete": True} + ] + } + } + } + } + } + } + result = create_router_bgp(tgen, topo, input_dict_2) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step( + "After removing the BGP neighbor or redistribute static knob , " + "verify route got clear from RIB and FIB of R3 routes " + ) + dut = "r3" + protocol = "bgp" + result = verify_rib( + tgen, addr_type, dut, input_dict_4, protocol=protocol, expected=False + ) + assert result is not True, "Testcase {} : Failed \nError: Routes are" + " still present in RIB".format(tc_name) + + write_test_footer(tc_name) + + +def test_static_route_8nh_diff_AD_bgp_ecmp_p1_tc6_ebgp(request): + """ + Verify static route functionality with 8 next hop different AD + value and BGP ECMP + + """ + tc_name = request.node.name + write_test_header(tc_name) + tgen = get_topogen() + # Don't run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + step("Configure 8 interfaces / links between R1 and R2 ,") + step("Configure 8 interlaces/links between R2 and R3") + step( + "Configure IBGP IPv4 peering over loopback interface between" + "R2 and R3 router." + ) + step("Configure redistribute static in BGP on R2 router") + reset_config_on_routers(tgen) + NEXT_HOP_IP = populate_nh() + nh_all = {} + for addr_type in ADDR_TYPES: + nh_all[addr_type] = [] + for nhp in range(1, 9): + nh_all[addr_type].append(NEXT_HOP_IP["nh" + str(nhp)][addr_type]) + step( + "Configure IPv4 static route in R2 with 8 next hop" + "N1(21.1.1.2) AD 10, N2(22.1.1.2) AD 20, N3(23.1.1.2) AD 30," + "N4(24.1.1.2) AD 40, N5(25.1.1.2) AD 50, N6(26.1.1.2) AD 60," + "N7(27.1.1.2) AD 70, N8(28.1.1.2) AD 80, Static route next-hop" + "present on R1" + ) + for addr_type in ADDR_TYPES: + for nhp in range(1, 9): + input_dict_4 = { + "r2": { + "static_routes": [ + { + "network": PREFIX1[addr_type], + "next_hop": NEXT_HOP_IP["nh" + str(nhp)][addr_type], + "admin_distance": 10 * nhp, + } + ] + } + } + logger.info("Configure static routes") + result = create_static_routes(tgen, input_dict_4) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + logger.info("Verifying %s routes on r2", addr_type) + + step( + "On R2, static route installed in RIB using " + "show ip route with 8 next hop , lowest AD nexthop is active" + ) + input_dict_4 = { + "r2": { + "static_routes": [ + { + "network": PREFIX1[addr_type], + "next_hop": NEXT_HOP_IP["nh1"][addr_type], + "admin_distance": 10, + } + ] + } + } + dut = "r2" + protocol = "static" + nh = NEXT_HOP_IP["nh1"][addr_type] + result = verify_rib( + tgen, addr_type, dut, input_dict_4, next_hop=nh, protocol=protocol, fib=True + ) + assert result is True, "Testcase {} : Failed \nError: Routes are" + " missing in RIB".format(tc_name) + + nh = [] + for nhp in range(2, 9): + nh.append(NEXT_HOP_IP["nh" + str(nhp)][addr_type]) + result = verify_rib( + tgen, + addr_type, + dut, + input_dict_4, + next_hop=nh, + protocol=protocol, + fib=True, + expected=False, + wait=2, + attempts=3, + ) + assert result is not True, "Testcase {} : Failed \nError: Routes " + " are missing in RIB".format(tc_name) + + step( + "Remove the static route configured with nexthop N1 to N8, one" + "by one from running config" + ) + + for addr_type in ADDR_TYPES: + # delete static routes + for nhp in range(1, 9): + input_dict_4 = { + "r2": { + "static_routes": [ + { + "network": PREFIX1[addr_type], + "next_hop": NEXT_HOP_IP["nh" + str(nhp)][addr_type], + "admin_distance": 10 * nhp, + "delete": True, + } + ] + } + } + + logger.info("Configure static routes") + result = create_static_routes(tgen, input_dict_4) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step( + "After removing the static route with N1 to N8 one by one , " + "route become active with next preferred nexthop and nexthop which " + "got removed is not shown in RIB and FIB" + ) + result = verify_rib( + tgen, + addr_type, + dut, + input_dict_4, + next_hop=nh_all[addr_type], + protocol=protocol, + expected=False, + ) + assert result is not True, "Testcase {} : Failed \nError: Routes are" + " still present in RIB".format(tc_name) + + step("Configure the static route with nexthop N1 to N8, one by one") + + for addr_type in ADDR_TYPES: + # add static routes + for nhp in range(1, 9): + input_dict_4 = { + "r2": { + "static_routes": [ + { + "network": PREFIX1[addr_type], + "next_hop": NEXT_HOP_IP["nh" + str(nhp)][addr_type], + "admin_distance": 10 * nhp, + } + ] + } + } + logger.info("Configure static routes") + result = create_static_routes(tgen, input_dict_4) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step( + " After configuring them, route is always active with lowest AD" + " value and all the nexthop populated in RIB and FIB again" + ) + for addr_type in ADDR_TYPES: + input_dict_4 = { + "r2": { + "static_routes": [ + { + "network": PREFIX1[addr_type], + "next_hop": NEXT_HOP_IP["nh1"][addr_type], + "admin_distance": 10, + } + ] + } + } + dut = "r2" + protocol = "static" + nh = NEXT_HOP_IP["nh1"][addr_type] + result = verify_rib( + tgen, addr_type, dut, input_dict_4, next_hop=nh, protocol=protocol, fib=True + ) + assert result is True, "Testcase {} : Failed \nError: Routes are" + " missing in RIB".format(tc_name) + nh = [] + for nhp in range(2, 9): + nh.append(NEXT_HOP_IP["nh" + str(nhp)][addr_type]) + result = verify_rib( + tgen, + addr_type, + dut, + input_dict_4, + next_hop=nh, + protocol=protocol, + fib=True, + expected=False, + wait=2, + attempts=3, + ) + assert result is not True, "Testcase {} : Failed \nError: Routes " + " are missing in RIB".format(tc_name) + + step("Random shut of the nexthop interfaces") + randnum = random.randint(0, 7) + for addr_type in ADDR_TYPES: + intf = topo["routers"]["r2"]["links"]["r1-link" + str(randnum)]["interface"] + shutdown_bringup_interface(tgen, dut, intf, False) + nhip = NEXT_HOP_IP["nh" + str(randnum + 1)][addr_type] + input_dict_5 = { + "r2": { + "static_routes": [ + { + "network": PREFIX1[addr_type], + "next_hop": NEXT_HOP_IP["nh" + str(randnum + 1)][addr_type], + } + ] + } + } + result = verify_rib( + tgen, + addr_type, + dut, + input_dict_5, + next_hop=nhip, + protocol=protocol, + expected=False, + ) + assert result is not True, "Testcase {} : Failed \n" + "Error: Routes are still present in RIB".format(tc_name) + + step("Random no shut of the nexthop interfaces") + for addr_type in ADDR_TYPES: + intf = topo["routers"]["r2"]["links"]["r1-link" + str(randnum)]["interface"] + shutdown_bringup_interface(tgen, dut, intf, True) + nhip = NEXT_HOP_IP["nh" + str(randnum + 1)][addr_type] + result = verify_rib( + tgen, addr_type, dut, input_dict_5, next_hop=nhip, protocol=protocol + ) + assert result is True, "Testcase {} : Failed \n" + "Error: Routes are missing in RIB".format(tc_name) + + dut = "r2" + protocol = "static" + for addr_type in ADDR_TYPES: + input_dict_4 = {"r2": {"static_routes": [{"network": PREFIX1[addr_type]}]}} + result = verify_rib(tgen, addr_type, dut, input_dict_4, protocol=protocol) + assert ( + result is True + ), "Testcase {}: Failed \n " "Error: Routes are missing in RIB".format(tc_name) + + protocol = "bgp" + dut = "r3" + for addr_type in ADDR_TYPES: + input_dict_4 = {"r2": {"static_routes": [{"network": PREFIX1[addr_type]}]}} + result = verify_rib(tgen, addr_type, dut, input_dict_4, protocol=protocol) + assert ( + result is True + ), "Testcase {}: Failed \n " "Error: Routes are missing in RIB".format(tc_name) + + step("Reload the FRR router") + # stop/start -> restart FRR router and verify + stop_router(tgen, "r2") + + start_router(tgen, "r2") + + for addr_type in ADDR_TYPES: + input_dict_4 = {"r2": {"static_routes": [{"network": PREFIX1[addr_type]}]}} + result = verify_rib(tgen, addr_type, dut, input_dict_4, protocol=protocol) + assert result is True, ( + "Testcase {} : Failed \n" + "Error: Routes are still present in RIB".format(tc_name) + ) + + + write_test_footer(tc_name) + + +def test_static_route_8nh_diff_AD_ebgp_ecmp_p1_tc8_ebgp(request): + """ + Verify static route with 8 next hop with different AD value and 8 + EBGP neighbors + """ + tc_name = request.node.name + write_test_header(tc_name) + tgen = get_topogen() + # Don't run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + step("Configure 8 interfaces / links between R1 and R2") + step("Configure 8 interlaces/links between R2 and R3") + step("Configure 8 EBGP IPv4 peering between R2 and R3") + + reset_config_on_routers(tgen) + NEXT_HOP_IP = populate_nh() + + step("Configure redistribute static in BGP on R2 router") + for addr_type in ADDR_TYPES: + input_dict_2 = { + "r2": { + "bgp": { + "address_family": { + addr_type: { + "unicast": {"redistribute": [{"redist_type": "static"}]} + } + } + } + } + } + result = create_router_bgp(tgen, topo, input_dict_2) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step( + "Configure IPv4 static route in R2 with 8 next hop" + "N1(21.1.1.2) AD 10, N2(22.1.1.2) AD 20, N3(23.1.1.2) AD 30," + "N4(24.1.1.2) AD 40, N5(25.1.1.2) AD 50, N6(26.1.1.2) AD 60," + "N7(27.1.1.2) AD 70, N8(28.1.1.2) AD 80, Static route next-hop" + "present on R1" + ) + nh_all = {} + for addr_type in ADDR_TYPES: + nh_all[addr_type] = [] + for nhp in range(1, 9): + nh_all[addr_type].append(NEXT_HOP_IP["nh" + str(nhp)][addr_type]) + for addr_type in ADDR_TYPES: + for nhp in range(1, 9): + input_dict_4 = { + "r2": { + "static_routes": [ + { + "network": PREFIX1[addr_type], + "next_hop": NEXT_HOP_IP["nh" + str(nhp)][addr_type], + "admin_distance": 10 * nhp, + } + ] + } + } + logger.info("Configure static routes") + result = create_static_routes(tgen, input_dict_4) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + logger.info("Verifying %s routes on r2", addr_type) + + step( + "On R2, static route installed in RIB using " + "show ip route with 8 next hop , lowest AD nexthop is active" + ) + input_dict_4 = { + "r2": { + "static_routes": [ + { + "network": PREFIX1[addr_type], + "next_hop": NEXT_HOP_IP["nh1"][addr_type], + "admin_distance": 10, + } + ] + } + } + dut = "r2" + protocol = "static" + nh = NEXT_HOP_IP["nh1"][addr_type] + result = verify_rib( + tgen, addr_type, dut, input_dict_4, next_hop=nh, protocol=protocol, fib=True + ) + assert result is True, "Testcase {} : Failed \nError: Routes are" + " missing in RIB".format(tc_name) + + nh = [] + for nhp in range(2, 9): + nh.append(NEXT_HOP_IP["nh" + str(nhp)][addr_type]) + result = verify_rib( + tgen, + addr_type, + dut, + input_dict_4, + next_hop=nh, + protocol=protocol, + fib=True, + expected=False, + ) + assert result is not True, "Testcase {} : Failed \nError: Routes " + " are missing in RIB".format(tc_name) + + step( + "Remove the static route configured with nexthop N1 to N8, one" + "by one from running config" + ) + + for addr_type in ADDR_TYPES: + # delete static routes + for nhp in range(1, 9): + input_dict_4 = { + "r2": { + "static_routes": [ + { + "network": PREFIX1[addr_type], + "next_hop": NEXT_HOP_IP["nh" + str(nhp)][addr_type], + "admin_distance": 10 * nhp, + "delete": True, + } + ] + } + } + + logger.info("Configure static routes") + result = create_static_routes(tgen, input_dict_4) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step( + "After removing the static route with N1 to N8 one by one , " + "route become active with next preferred nexthop and nexthop which " + "got removed is not shown in RIB and FIB" + ) + result = verify_rib( + tgen, + addr_type, + dut, + input_dict_4, + next_hop=nh_all[addr_type], + protocol=protocol, + expected=False, + ) + assert result is not True, "Testcase {} : Failed \nError: Routes are" + " still present in RIB".format(tc_name) + + step("Configure the static route with nexthop N1 to N8, one by one") + + for addr_type in ADDR_TYPES: + # add static routes + for nhp in range(1, 9): + input_dict_4 = { + "r2": { + "static_routes": [ + { + "network": PREFIX1[addr_type], + "next_hop": NEXT_HOP_IP["nh" + str(nhp)][addr_type], + "admin_distance": 10 * nhp, + } + ] + } + } + logger.info("Configure static routes") + result = create_static_routes(tgen, input_dict_4) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step( + " After configuring them, route is always active with lowest AD" + " value and all the nexthop populated in RIB and FIB again" + ) + for addr_type in ADDR_TYPES: + input_dict_4 = { + "r2": { + "static_routes": [ + { + "network": PREFIX1[addr_type], + "next_hop": NEXT_HOP_IP["nh1"][addr_type], + "admin_distance": 10, + } + ] + } + } + dut = "r2" + protocol = "static" + nh = NEXT_HOP_IP["nh1"][addr_type] + result = verify_rib( + tgen, addr_type, dut, input_dict_4, next_hop=nh, protocol=protocol, fib=True + ) + assert result is True, "Testcase {} : Failed \nError: Routes are" + " missing in RIB".format(tc_name) + nh = [] + for nhp in range(2, 9): + nh.append(NEXT_HOP_IP["nh" + str(nhp)][addr_type]) + result = verify_rib( + tgen, + addr_type, + dut, + input_dict_4, + next_hop=nh, + protocol=protocol, + fib=True, + expected=False, + ) + assert result is not True, "Testcase {} : Failed \nError: Routes " + " are missing in RIB".format(tc_name) + + step("Random shut of the nexthop interfaces") + randnum = random.randint(0, 7) + for addr_type in ADDR_TYPES: + intf = topo["routers"]["r2"]["links"]["r1-link" + str(randnum)]["interface"] + shutdown_bringup_interface(tgen, dut, intf, False) + nhip = NEXT_HOP_IP["nh" + str(randnum + 1)][addr_type] + input_dict_5 = { + "r2": { + "static_routes": [ + { + "network": PREFIX1[addr_type], + "next_hop": NEXT_HOP_IP["nh" + str(randnum + 1)][addr_type], + } + ] + } + } + result = verify_rib( + tgen, + addr_type, + dut, + input_dict_5, + next_hop=nhip, + protocol=protocol, + expected=False, + ) + assert result is not True, "Testcase {} : Failed \n" + "Error: Routes are still present in RIB".format(tc_name) + + step("Random no shut of the nexthop interfaces") + for addr_type in ADDR_TYPES: + intf = topo["routers"]["r2"]["links"]["r1-link" + str(randnum)]["interface"] + shutdown_bringup_interface(tgen, dut, intf, True) + nhip = NEXT_HOP_IP["nh" + str(randnum + 1)][addr_type] + result = verify_rib( + tgen, addr_type, dut, input_dict_5, next_hop=nhip, protocol=protocol + ) + assert result is True, "Testcase {} : Failed \n" + "Error: Routes are missing in RIB".format(tc_name) + + dut = "r2" + protocol = "static" + + step("Reload the FRR router") + # stop/start -> restart FRR router and verify + stop_router(tgen, "r2") + + start_router(tgen, "r2") + + for addr_type in ADDR_TYPES: + input_dict_4 = {"r2": {"static_routes": [{"network": PREFIX1[addr_type]}]}} + result = verify_rib(tgen, addr_type, dut, input_dict_4, protocol=protocol) + assert result is True, ( + "Testcase {} : Failed \n" + "Error: Routes are still present in RIB".format(tc_name) + ) + + write_test_footer(tc_name) + + +def test_static_route_8nh_diff_AD_bgp_ecmp_p1_tc10_ebgp(request): + """ + Verify 8 static route functionality with 8 next hop different AD' + + """ + tc_name = request.node.name + write_test_header(tc_name) + tgen = get_topogen() + # Don't run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + NEXT_HOP_IP = populate_nh() + + step("Configure 8 interfaces / links between R1 and R2 ") + step("Configure 8 IBGP IPv4 peering between R2 and R3 router.") + reset_config_on_routers(tgen) + + step( + "Configure IPv4 static route in R2 with 8 next hop" + "N1(21.1.1.2) AD 10, N2(22.1.1.2) AD 20, N3(23.1.1.2) AD 30," + "N4(24.1.1.2) AD 40, N5(25.1.1.2) AD 50, N6(26.1.1.2) AD 60," + "N7(27.1.1.2) AD 70, N8(28.1.1.2) AD 80" + ) + step( + "Configure nexthop AD in such way for static route S1 , N1 is" + "preferred and for S2 , N2 is preferred and so on .." + ) + nh_all = {} + for addr_type in ADDR_TYPES: + nh_all[addr_type] = [] + for nhp in range(1, 9): + nh_all[addr_type].append(NEXT_HOP_IP["nh" + str(nhp)][addr_type]) + + for addr_type in ADDR_TYPES: + for nhp in range(1, 9): + input_dict_4 = { + "r2": { + "static_routes": [ + { + "network": PREFIX1[addr_type], + "next_hop": NEXT_HOP_IP["nh" + str(nhp)][addr_type], + "admin_distance": 10 * nhp, + } + ] + } + } + logger.info("Configure static routes") + result = create_static_routes(tgen, input_dict_4) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + second_rte = { + "r2": { + "static_routes": [ + { + "network": PREFIX2[addr_type], + "next_hop": NEXT_HOP_IP["nh" + str(nhp)][addr_type], + "admin_distance": 10 * nhp, + } + ] + } + } + logger.info("Configure static routes") + result = create_static_routes(tgen, second_rte) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + logger.info("Verifying %s routes on r2", addr_type) + + step( + "On R2, static route installed in RIB using " + "show ip route with 8 next hop , lowest AD nexthop is active" + ) + input_dict_4 = { + "r2": { + "static_routes": [ + { + "network": PREFIX1[addr_type], + "next_hop": NEXT_HOP_IP["nh1"][addr_type], + "admin_distance": 10, + } + ] + } + } + dut = "r2" + protocol = "static" + nh = NEXT_HOP_IP["nh1"][addr_type] + result = verify_rib( + tgen, addr_type, dut, input_dict_4, next_hop=nh, protocol=protocol, fib=True + ) + assert result is True, "Testcase {} : Failed \nError: Routes are" + " missing in RIB".format(tc_name) + + step("Verify that highest AD nexthop are inactive") + nh = [] + for nhp in range(2, 9): + nh.append(NEXT_HOP_IP["nh" + str(nhp)][addr_type]) + result = verify_rib( + tgen, + addr_type, + dut, + input_dict_4, + next_hop=nh, + protocol=protocol, + fib=True, + expected=False, + wait=2, + attempts=3, + ) + assert result is not True, "Testcase {} : Failed \nError: Routes " + " are missing in RIB".format(tc_name) + + step("Configure redistribute static in BGP on R2 router") + for addr_type in ADDR_TYPES: + input_dict_2 = { + "r2": { + "bgp": { + "address_family": { + addr_type: { + "unicast": {"redistribute": [{"redist_type": "static"}]} + } + } + } + } + } + result = create_router_bgp(tgen, topo, input_dict_2) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step( + "Remove the static route configured with nexthop N1 to N8, one" + "by one from running config" + ) + + for addr_type in ADDR_TYPES: + # delete static routes + for nhp in range(1, 9): + input_dict_4 = { + "r2": { + "static_routes": [ + { + "network": PREFIX1[addr_type], + "next_hop": NEXT_HOP_IP["nh" + str(nhp)][addr_type], + "admin_distance": 10 * nhp, + "delete": True, + } + ] + } + } + + logger.info("Configure static routes") + result = create_static_routes(tgen, input_dict_4) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step( + "After removing the static route with N1 to N8 one by one , " + "route become active with next preferred nexthop and nexthop which" + "got removed is not shown in RIB and FIB" + ) + + result = verify_rib( + tgen, + addr_type, + dut, + input_dict_4, + next_hop=nh, + protocol=protocol, + expected=False, + ) + assert result is not True, "Testcase {} : Failed \nError: Routes are" + " still present in RIB".format(tc_name) + + step("Configure the static route with nexthop N1 to N8, one by one") + for addr_type in ADDR_TYPES: + # add static routes + for nhp in range(1, 9): + input_dict_4 = { + "r2": { + "static_routes": [ + { + "network": PREFIX1[addr_type], + "next_hop": NEXT_HOP_IP["nh" + str(nhp)][addr_type], + "admin_distance": 10 * nhp, + } + ] + } + } + + logger.info("Configure static routes") + result = create_static_routes(tgen, input_dict_4) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step( + " After configuring them, route is always active with lowest AD" + " value and all the nexthop populated in RIB and FIB again" + ) + for addr_type in ADDR_TYPES: + input_dict_4 = {"r2": {"static_routes": [{"network": PREFIX1[addr_type],}]}} + nh = NEXT_HOP_IP["nh1"][addr_type] + result = verify_rib( + tgen, addr_type, dut, input_dict_4, next_hop=nh, protocol=protocol, fib=True + ) + assert result is True, "Testcase {} : Failed \nError: Route with " + "lowest AD is missing in RIB".format(tc_name) + + step("Random shut of the nexthop interfaces") + randnum = random.randint(0, 7) + for addr_type in ADDR_TYPES: + intf = topo["routers"]["r2"]["links"]["r1-link" + str(randnum)]["interface"] + shutdown_bringup_interface(tgen, dut, intf, False) + nhip = NEXT_HOP_IP["nh" + str(randnum + 1)][addr_type] + input_dict_5 = { + "r2": { + "static_routes": [ + { + "network": PREFIX1[addr_type], + "next_hop": NEXT_HOP_IP["nh" + str(randnum + 1)][addr_type], + } + ] + } + } + result = verify_rib( + tgen, + addr_type, + dut, + input_dict_5, + next_hop=nhip, + protocol=protocol, + expected=False, + ) + assert result is not True, "Testcase {} : Failed \n" + "Error: Routes are still present in RIB".format(tc_name) + + step("Random no shut of the nexthop interfaces") + for addr_type in ADDR_TYPES: + intf = topo["routers"]["r2"]["links"]["r1-link" + str(randnum)]["interface"] + shutdown_bringup_interface(tgen, dut, intf, True) + nhip = NEXT_HOP_IP["nh" + str(randnum + 1)][addr_type] + result = verify_rib( + tgen, addr_type, dut, input_dict_5, next_hop=nhip, protocol=protocol + ) + assert result is True, "Testcase {} : Failed \n" + "Error: Routes are missing in RIB".format(tc_name) + + step("Remove random static route with all the nexthop") + for addr_type in ADDR_TYPES: + # delete static routes + for nhp in range(1, 9): + input_dict_4 = { + "r2": { + "static_routes": [ + { + "network": PREFIX1[addr_type], + "next_hop": NEXT_HOP_IP["nh" + str(nhp)][addr_type], + "admin_distance": 10 * nhp, + "delete": True, + } + ] + } + } + logger.info("Configure static routes") + result = create_static_routes(tgen, input_dict_4) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step( + "After removing the static route with N1 to N8 one by one , " + "route become active with next preferred nexthop and nexthop " + "which got removed is not shown in RIB and FIB" + ) + nh = NEXT_HOP_IP["nh" + str(nhp)][addr_type] + result = verify_rib( + tgen, + addr_type, + dut, + input_dict_4, + next_hop=nh, + protocol=protocol, + expected=False, + ) + assert result is not True, "Testcase {} : Failed \nError: Route " + " is still present in RIB".format(tc_name) + + step("Reconfigure the deleted routes and verify they are installed") + for nhp in range(1, 9): + input_dict_4 = { + "r2": { + "static_routes": [ + { + "network": PREFIX1[addr_type], + "next_hop": NEXT_HOP_IP["nh" + str(nhp)][addr_type], + "admin_distance": 10 * nhp, + } + ] + } + } + logger.info("Configure static routes") + result = create_static_routes(tgen, input_dict_4) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + dut = "r2" + protocol = "static" + nh = NEXT_HOP_IP["nh1"][addr_type] + result = verify_rib(tgen, addr_type, dut, input_dict_4, protocol=protocol) + assert result is True, "Testcase {} : Failed \nError: Route " + " is still present in RIB".format(tc_name) + + step("Reload the FRR router") + # stop/start -> restart FRR router and verify + stop_router(tgen, "r2") + + start_router(tgen, "r2") + + step("After reloading, verify that routes are still present in R2.") + result = verify_rib( + tgen, + addr_type, + dut, + second_rte, + next_hop=nh, + protocol=protocol, + fib=True, + ) + assert result is True, ( + "Testcase {} : Failed \nError: Route " + " is missing in RIB".format(tc_name) + ) + + write_test_footer(tc_name) + + +def test_static_route_delete_p0_tc11_ebgp(request): + """ + Delete the static route and verify the RIB and FIB state + """ + tc_name = request.node.name + write_test_header(tc_name) + tgen = get_topogen() + # Don't run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + NEXT_HOP_IP = populate_nh() + + step("Configure 8 interfaces / links between R1 and R2 ") + step("Configure 8 IBGP IPv4 peering between R2 and R3 router.") + reset_config_on_routers(tgen) + + step( + "Configure IPv4 static route in R2 with 8 next hop" + "N1(21.1.1.2) AD 10, N2(22.1.1.2) AD 20, N3(23.1.1.2) AD 30," + "N4(24.1.1.2) AD 40, N5(25.1.1.2) AD 50, N6(26.1.1.2) AD 60," + "N7(27.1.1.2) AD 70, N8(28.1.1.2) AD 80" + ) + step( + "Configure nexthop AD in such way for static route S1 , N1 is" + "preferred and for S2 , N2 is preferred and so on .." + ) + nh_all = {} + for addr_type in ADDR_TYPES: + nh_all[addr_type] = [] + for nhp in range(1, 9): + nh_all[addr_type].append(NEXT_HOP_IP["nh" + str(nhp)][addr_type]) + + for addr_type in ADDR_TYPES: + for nhp in range(1, 9): + input_dict_4 = { + "r2": { + "static_routes": [ + { + "network": PREFIX1[addr_type], + "next_hop": NEXT_HOP_IP["nh" + str(nhp)][addr_type], + "admin_distance": 10 * nhp, + } + ] + } + } + logger.info("Configure static routes") + result = create_static_routes(tgen, input_dict_4) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + second_rte = { + "r2": { + "static_routes": [ + { + "network": PREFIX2[addr_type], + "next_hop": NEXT_HOP_IP["nh" + str(nhp)][addr_type], + "admin_distance": 10 * nhp, + } + ] + } + } + logger.info("Configure static routes") + result = create_static_routes(tgen, second_rte) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + logger.info("Verifying %s routes on r2", addr_type) + + step( + "On R2, static route installed in RIB using " + "show ip route with 8 next hop , lowest AD nexthop is active" + ) + input_dict_4 = { + "r2": { + "static_routes": [ + { + "network": PREFIX1[addr_type], + "next_hop": NEXT_HOP_IP["nh1"][addr_type], + "admin_distance": 10, + } + ] + } + } + dut = "r2" + protocol = "static" + nh = NEXT_HOP_IP["nh1"][addr_type] + result = verify_rib( + tgen, addr_type, dut, input_dict_4, next_hop=nh, protocol=protocol, fib=True + ) + assert result is True, "Testcase {} : Failed \nError: Routes are" + " missing in RIB".format(tc_name) + + step("Verify that highest AD nexthop are inactive") + nh = [] + for nhp in range(2, 9): + nh.append(NEXT_HOP_IP["nh" + str(nhp)][addr_type]) + result = verify_rib( + tgen, + addr_type, + dut, + input_dict_4, + next_hop=nh, + protocol=protocol, + fib=True, + expected=False, + ) + assert result is not True, "Testcase {} : Failed \nError: Routes " + " are missing in RIB".format(tc_name) + + step("Configure redistribute static in BGP on R2 router") + for addr_type in ADDR_TYPES: + input_dict_2 = { + "r2": { + "bgp": { + "address_family": { + addr_type: { + "unicast": {"redistribute": [{"redist_type": "static"}]} + } + } + } + } + } + result = create_router_bgp(tgen, topo, input_dict_2) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step("Remove the redistribute static knob") + for addr_type in ADDR_TYPES: + input_dict_2 = { + "r2": { + "bgp": { + "address_family": { + addr_type: { + "unicast": { + "redistribute": [ + {"redist_type": "static", "delete": True} + ] + } + } + } + } + } + } + result = create_router_bgp(tgen, topo, input_dict_2) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step( + "Verify after removing the redistribute static from BGP all the" + "routes got delete from RIB and FIB of R3 " + ) + + dut = "r3" + protocol = "bgp" + result = verify_rib( + tgen, addr_type, dut, input_dict_4, protocol=protocol, expected=False + ) + assert result is not True, "Testcase {} : Failed \nError: Routes are" + " still present in RIB".format(tc_name) + + for addr_type in ADDR_TYPES: + for nhp in range(1, 9): + input_dict_4 = { + "r2": { + "static_routes": [ + { + "network": PREFIX1[addr_type], + "next_hop": NEXT_HOP_IP["nh" + str(nhp)][addr_type], + "admin_distance": 10 * nhp, + "delete": True, + } + ] + } + } + logger.info("Configure static routes") + result = create_static_routes(tgen, input_dict_4) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + second_rte = { + "r2": { + "static_routes": [ + { + "network": PREFIX2[addr_type], + "next_hop": NEXT_HOP_IP["nh" + str(nhp)][addr_type], + "admin_distance": 10 * nhp, + } + ] + } + } + logger.info("Configure static routes") + result = create_static_routes(tgen, second_rte) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + logger.info("Verifying %s routes on r2", addr_type) + + step( + " After removing all the routes and nexthop from R2 , " + " verify R2 RIB and FIB is cleared" + ) + input_dict_4 = { + "r2": { + "static_routes": [ + { + "network": PREFIX1[addr_type], + "next_hop": NEXT_HOP_IP["nh1"][addr_type], + "admin_distance": 10, + } + ] + } + } + dut = "r2" + protocol = "static" + nh = NEXT_HOP_IP["nh1"][addr_type] + result = verify_rib( + tgen, + addr_type, + dut, + input_dict_4, + next_hop=nh, + protocol=protocol, + fib=True, + expected=False, + ) + assert result is not True, "Testcase {} : Failed \nError: Routes are" + " still active in RIB".format(tc_name) + write_test_footer(tc_name) + + +if __name__ == "__main__": + args = ["-s"] + sys.argv[1:] + sys.exit(pytest.main(args)) diff --git a/tests/topotests/static_routing_with_ebgp/test_static_routes_topo3_ebgp.py b/tests/topotests/static_routing_with_ebgp/test_static_routes_topo3_ebgp.py new file mode 100644 index 0000000000..255bb073b4 --- /dev/null +++ b/tests/topotests/static_routing_with_ebgp/test_static_routes_topo3_ebgp.py @@ -0,0 +1,1333 @@ +#!/usr/bin/python + +# +# Copyright (c) 2020 by VMware, Inc. ("VMware") +# Used Copyright (c) 2018 by Network Device Education Foundation, +# Inc. ("NetDEF") in this file. +# +# Permission to use, copy, modify, and/or distribute this software +# for any purpose with or without fee is hereby granted, provided +# that the above copyright notice and this permission notice appear +# in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND VMWARE DISCLAIMS ALL WARRANTIES +# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL VMWARE BE LIABLE FOR +# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY +# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS +# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE +# OF THIS SOFTWARE. +# +""" + -Verify static route ECMP functionality with 8 next hop + + -Verify static route functionality with 8 next hop different AD value + + -Verify static route with tag option + + -Verify BGP did not install the static route when it receive route + with local next hop + +""" +import sys +import json +import time +import os +import pytest +import random +import platform +from lib.topotest import version_cmp + +# Save the Current Working Directory to find configuration files. +CWD = os.path.dirname(os.path.realpath(__file__)) +sys.path.append(os.path.join(CWD, "../")) +sys.path.append(os.path.join(CWD, "../lib/")) +# pylint: disable=C0413 +# Import topogen and topotest helpers +from mininet.topo import Topo +from lib.topogen import Topogen, get_topogen + +from lib.common_config import ( + start_topology, + write_test_header, + write_test_footer, + reset_config_on_routers, + verify_rib, + create_static_routes, + check_address_types, + step, + create_interfaces_cfg, + shutdown_bringup_interface, + stop_router, + start_router, + create_route_maps, + verify_ip_nht, +) +from lib.topolog import logger +from lib.bgp import verify_bgp_convergence, create_router_bgp, verify_bgp_rib +from lib.topojson import build_topo_from_json, build_config_from_json + +# Reading the data from JSON File for topology creation +jsonFile = "{}/static_routes_topo3_ebgp.json".format(CWD) +try: + with open(jsonFile, "r") as topoJson: + topo = json.load(topoJson) +except IOError: + assert False, "Could not read file {}".format(jsonFile) + +# Global variables +BGP_CONVERGENCE = False +ADDR_TYPES = check_address_types() +NETWORK = { + "ipv4": [ + "11.0.20.1/32", + "11.0.20.2/32", + "11.0.20.3/32", + "11.0.20.4/32", + "11.0.20.5/32", + "11.0.20.6/32", + "11.0.20.7/32", + "11.0.20.8/32", + ], + "ipv6": [ + "2::1/128", + "2::2/128", + "2::3/128", + "2::4/128", + "2::5/128", + "2::6/128", + "2::7/128", + "2::8/128", + ], +} +PREFIX1 = {"ipv4": "110.0.20.1/32", "ipv6": "20::1/128"} +NETWORK2 = {"ipv4": ["11.0.20.1/32"], "ipv6": ["2::1/128"]} +NEXT_HOP_IP = [] + + +class CreateTopo(Topo): + """ + Test CreateTopo - topology 1. + + * `Topo`: Topology object + """ + + def build(self, *_args, **_opts): + """Build function.""" + tgen = get_topogen(self) + + # Building topology from json file + build_topo_from_json(tgen, topo) + + +def setup_module(mod): + """ + + Set up the pytest environment. + + * `mod`: module name + """ + global topo + testsuite_run_time = time.asctime(time.localtime(time.time())) + logger.info("Testsuite start time: {}".format(testsuite_run_time)) + logger.info("=" * 40) + + logger.info("Running setup_module to create topology") + + # This function initiates the topology build with Topogen... + tgen = Topogen(CreateTopo, mod.__name__) + # ... and here it calls Mininet initialization functions. + + # Starting topology, create tmp files which are loaded to routers + # to start deamons and then start routers + start_topology(tgen) + + # Creating configuration from JSON + build_config_from_json(tgen, topo) + + if version_cmp(platform.release(), "4.19") < 0: + error_msg = ( + 'These tests will not run. (have kernel "{}", ' + "requires kernel >= 4.19)".format(platform.release()) + ) + pytest.skip(error_msg) + + # Checking BGP convergence + global BGP_CONVERGENCE + global ADDR_TYPES + + # Don't run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + # Api call verify whether BGP is converged + BGP_CONVERGENCE = verify_bgp_convergence(tgen, topo) + assert BGP_CONVERGENCE is True, "setup_module :Failed \n Error: {}".format( + BGP_CONVERGENCE + ) + + logger.info("Running setup_module() done") + + +def teardown_module(mod): + """ + Teardown the pytest environment + + * `mod`: module name + """ + + logger.info("Running teardown_module to delete topology") + + tgen = get_topogen() + + # Stop toplogy and Remove tmp files + tgen.stop_topology() + + logger.info( + "Testsuite end time: {}".format(time.asctime(time.localtime(time.time()))) + ) + logger.info("=" * 40) + + +def populate_nh(): + NEXT_HOP_IP = { + "nh1": { + "ipv4": topo["routers"]["r1"]["links"]["r2-link0"]["ipv4"].split("/")[0], + "ipv6": topo["routers"]["r1"]["links"]["r2-link0"]["ipv6"].split("/")[0], + }, + "nh2": { + "ipv4": topo["routers"]["r1"]["links"]["r2-link1"]["ipv4"].split("/")[0], + "ipv6": topo["routers"]["r1"]["links"]["r2-link1"]["ipv6"].split("/")[0], + }, + "nh3": { + "ipv4": topo["routers"]["r1"]["links"]["r2-link2"]["ipv4"].split("/")[0], + "ipv6": topo["routers"]["r1"]["links"]["r2-link2"]["ipv6"].split("/")[0], + }, + "nh4": { + "ipv4": topo["routers"]["r1"]["links"]["r2-link3"]["ipv4"].split("/")[0], + "ipv6": topo["routers"]["r1"]["links"]["r2-link3"]["ipv6"].split("/")[0], + }, + "nh5": { + "ipv4": topo["routers"]["r1"]["links"]["r2-link4"]["ipv4"].split("/")[0], + "ipv6": topo["routers"]["r1"]["links"]["r2-link4"]["ipv6"].split("/")[0], + }, + "nh6": { + "ipv4": topo["routers"]["r1"]["links"]["r2-link5"]["ipv4"].split("/")[0], + "ipv6": topo["routers"]["r1"]["links"]["r2-link5"]["ipv6"].split("/")[0], + }, + "nh7": { + "ipv4": topo["routers"]["r1"]["links"]["r2-link6"]["ipv4"].split("/")[0], + "ipv6": topo["routers"]["r1"]["links"]["r2-link6"]["ipv6"].split("/")[0], + }, + "nh8": { + "ipv4": topo["routers"]["r1"]["links"]["r2-link7"]["ipv4"].split("/")[0], + "ipv6": topo["routers"]["r1"]["links"]["r2-link7"]["ipv6"].split("/")[0], + }, + } + return NEXT_HOP_IP + + +##################################################### +# +# Tests starting +# +##################################################### + + +def test_staticroute_with_ecmp_p0_tc3_ebgp(request): + """ + Verify static route ECMP functionality with 8 next hop' + + """ + tc_name = request.node.name + write_test_header(tc_name) + tgen = get_topogen() + + # Don't run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + reset_config_on_routers(tgen) + NEXT_HOP_IP = populate_nh() + + step("Configure 8 interfaces / links between R1 and R2,") + step( + "Configure IPv4 static route in R2 with 8 next hop" + "N1(21.1.1.2), N2(22.1.1.2), N3(23.1.1.2), N4(24.1.1.2)," + "N5(25.1.1.2), N6(26.1.1.2), N7(27.1.1.2),N8(28.1.1.2), Static" + "route next-hop present on R1" + ) + + step("Configure IBGP IPv4 peering between R2 and R3 router.") + + for addr_type in ADDR_TYPES: + # Enable static routes + for nhp in range(1, 9): + input_dict_4 = { + "r2": { + "static_routes": [ + { + "network": PREFIX1[addr_type], + "next_hop": NEXT_HOP_IP["nh" + str(nhp)][addr_type], + } + ] + } + } + logger.info("Configure static routes") + result = create_static_routes(tgen, input_dict_4) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + logger.info("Verifying %s routes on r2", addr_type) + nh = [ + NEXT_HOP_IP["nh1"][addr_type], + NEXT_HOP_IP["nh2"][addr_type], + NEXT_HOP_IP["nh3"][addr_type], + NEXT_HOP_IP["nh4"][addr_type], + NEXT_HOP_IP["nh5"][addr_type], + NEXT_HOP_IP["nh6"][addr_type], + NEXT_HOP_IP["nh7"][addr_type], + NEXT_HOP_IP["nh8"][addr_type], + ] + + dut = "r2" + protocol = "static" + result = verify_rib( + tgen, addr_type, dut, input_dict_4, next_hop=nh, protocol=protocol + ) + assert result is True, "Testcase {} : Failed \nError: Routes are" + " missing in RIB".format(tc_name) + step("Configure redistribute static in BGP on R2 router") + for addr_type in ADDR_TYPES: + input_dict_2 = { + "r2": { + "bgp": { + "address_family": { + addr_type: { + "unicast": {"redistribute": [{"redist_type": "static"}]} + } + } + } + } + } + result = create_router_bgp(tgen, topo, input_dict_2) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step( + "Remove the static route configured with nexthop N1 to N8, one" + "by one from running config" + ) + for addr_type in ADDR_TYPES: + # delete static routes + for nhp in range(1, 9): + input_dict_4 = { + "r2": { + "static_routes": [ + { + "network": PREFIX1[addr_type], + "next_hop": NEXT_HOP_IP["nh" + str(nhp)][addr_type], + "delete": True, + } + ] + } + } + + logger.info("Configure static routes") + result = create_static_routes(tgen, input_dict_4) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + result = verify_rib( + tgen, + addr_type, + dut, + input_dict_4, + next_hop=nh, + protocol=protocol, + expected=False, + ) + assert result is not True, "Testcase {} : Failed \nError: Routes are" + " still present in RIB".format(tc_name) + + step("Configure the static route with nexthop N1 to N8, one by" "one") + + for addr_type in ADDR_TYPES: + # add static routes + for nhp in range(1, 9): + input_dict_4 = { + "r2": { + "static_routes": [ + { + "network": PREFIX1[addr_type], + "next_hop": NEXT_HOP_IP["nh" + str(nhp)][addr_type], + } + ] + } + } + + logger.info("Configure static routes") + result = create_static_routes(tgen, input_dict_4) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + result = verify_rib( + tgen, addr_type, dut, input_dict_4, next_hop=nh, protocol=protocol + ) + assert result is True, "Testcase {} : Failed \nError: Routes are" + " missing in RIB".format(tc_name) + + step("Random shut of the nexthop interfaces") + randnum = random.randint(0, 7) + for addr_type in ADDR_TYPES: + intf = topo["routers"]["r2"]["links"]["r1-link" + str(randnum)]["interface"] + shutdown_bringup_interface(tgen, dut, intf, False) + nhip = NEXT_HOP_IP["nh" + str(randnum + 1)][addr_type] + input_dict_5 = { + "r2": { + "static_routes": [ + { + "network": PREFIX1[addr_type], + "next_hop": NEXT_HOP_IP["nh" + str(randnum + 1)][addr_type], + } + ] + } + } + result = verify_rib( + tgen, + addr_type, + dut, + input_dict_5, + next_hop=nhip, + protocol=protocol, + expected=False, + ) + assert result is not True, "Testcase {} : Failed \n" + "Error: Routes are still present in RIB".format(tc_name) + + step("Random no shut of the nexthop interfaces") + for addr_type in ADDR_TYPES: + intf = topo["routers"]["r2"]["links"]["r1-link" + str(randnum)]["interface"] + shutdown_bringup_interface(tgen, dut, intf, True) + nhip = NEXT_HOP_IP["nh" + str(randnum + 1)][addr_type] + result = verify_rib( + tgen, addr_type, dut, input_dict_5, next_hop=nhip, protocol=protocol + ) + assert result is True, "Testcase {} : Failed \n" + "Error: Routes are missing in RIB".format(tc_name) + + step("Reload the FRR router") + # stop/start -> restart FRR router and verify + stop_router(tgen, "r2") + start_router(tgen, "r2") + + result = verify_rib( + tgen, addr_type, dut, input_dict_4, next_hop=nh, protocol=protocol + ) + assert result is True, "Testcase {} : Failed \nError: Routes are" + " missing in RIB".format(tc_name) + + write_test_footer(tc_name) + + +def test_staticroute_with_ecmp_with_diff_AD_p0_tc4_ebgp(request): + """ + Verify static route ECMP functionality with 8 next hop + + """ + tc_name = request.node.name + write_test_header(tc_name) + tgen = get_topogen() + + # Don't run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + reset_config_on_routers(tgen) + NEXT_HOP_IP = populate_nh() + + step("Configure 8 interfaces / links between R1 and R2,") + step("Configure IBGP IPv4 peering between R2 and R3 router.") + reset_config_on_routers(tgen) + NEXT_HOP_IP = populate_nh() + nh_all = {} + for addr_type in ADDR_TYPES: + nh_all[addr_type] = [] + for nhp in range(1, 9): + nh_all[addr_type].append(NEXT_HOP_IP["nh" + str(nhp)][addr_type]) + step( + "Configure IPv4 static route in R2 with 8 next hop" + "N1(21.1.1.2) AD 10, N2(22.1.1.2) AD 20, N3(23.1.1.2) AD 30," + "N4(24.1.1.2) AD 40, N5(25.1.1.2) AD 50, N6(26.1.1.2) AD 60," + "N7(27.1.1.2) AD 70, N8(28.1.1.2) AD 80, Static route next-hop" + "present on R1" + ) + for addr_type in ADDR_TYPES: + for nhp in range(1, 9): + input_dict_4 = { + "r2": { + "static_routes": [ + { + "network": PREFIX1[addr_type], + "next_hop": NEXT_HOP_IP["nh" + str(nhp)][addr_type], + "admin_distance": 10 * nhp, + } + ] + } + } + logger.info("Configure static routes") + result = create_static_routes(tgen, input_dict_4) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + logger.info("Verifying %s routes on r2", addr_type) + + step( + "On R2, static route installed in RIB using " + "show ip route with 8 next hop, lowest AD nexthop is active" + ) + step("On R2, static route with lowest AD nexthop installed in FIB") + input_dict_4 = { + "r2": { + "static_routes": [ + { + "network": PREFIX1[addr_type], + "next_hop": NEXT_HOP_IP["nh1"][addr_type], + "admin_distance": 10, + } + ] + } + } + dut = "r2" + protocol = "static" + nh = NEXT_HOP_IP["nh1"][addr_type] + result = verify_rib( + tgen, addr_type, dut, input_dict_4, next_hop=nh, protocol=protocol, fib=True + ) + assert result is True, "Testcase {} : Failed \nError: Route with " + " lowest AD is missing in RIB".format(tc_name) + + nh = [] + for nhp in range(2, 9): + nh.append(NEXT_HOP_IP["nh" + str(nhp)][addr_type]) + result = verify_rib( + tgen, + addr_type, + dut, + input_dict_4, + next_hop=nh, + protocol=protocol, + fib=True, + expected=False, + ) + assert result is not True, "Testcase {} : Failed \nError: Routes " + " with high AD are active in RIB".format(tc_name) + + step("Configure redistribute static in BGP on R2 router") + for addr_type in ADDR_TYPES: + input_dict_2 = { + "r2": { + "bgp": { + "address_family": { + addr_type: { + "unicast": {"redistribute": [{"redist_type": "static"}]} + } + } + } + } + } + + logger.info("Configuring redistribute static") + result = create_router_bgp(tgen, topo, input_dict_2) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step( + "After configuring them, route is always active with lowest AD" + "value and all the nexthop populated in RIB and FIB again " + ) + input_dict_4 = { + "r2": { + "static_routes": [ + { + "network": PREFIX1[addr_type], + "next_hop": NEXT_HOP_IP["nh1"][addr_type], + "admin_distance": 10, + } + ] + } + } + dut = "r2" + protocol = "static" + nh = NEXT_HOP_IP["nh1"][addr_type] + result = verify_rib( + tgen, addr_type, dut, input_dict_4, next_hop=nh, protocol=protocol, fib=True + ) + assert result is True, "Testcase {} : Failed \nError: Route with " + " lowest AD is missing in RIB".format(tc_name) + + step( + "Remove the static route configured with nexthop N1 to N8, one" + "by one from running config" + ) + + for addr_type in ADDR_TYPES: + # delete static routes + for nhp in range(1, 9): + input_dict_4 = { + "r2": { + "static_routes": [ + { + "network": PREFIX1[addr_type], + "next_hop": NEXT_HOP_IP["nh" + str(nhp)][addr_type], + "admin_distance": 10 * nhp, + "delete": True, + } + ] + } + } + + logger.info("Configure static routes") + result = create_static_routes(tgen, input_dict_4) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step( + "After removing the static route with N1 to N8 one by one, " + "route become active with next preferred nexthop and nexthop which " + "got removed is not shown in RIB and FIB" + ) + result = verify_rib( + tgen, + addr_type, + dut, + input_dict_4, + next_hop=nh_all[addr_type], + protocol=protocol, + expected=False, + ) + assert result is not True, "Testcase {} : Failed \nError: Routes are" + " still present in RIB".format(tc_name) + + step("Configure the static route with nexthop N1 to N8, one by" "one") + for addr_type in ADDR_TYPES: + # add static routes + for nhp in range(1, 9): + input_dict_4 = { + "r2": { + "static_routes": [ + { + "network": PREFIX1[addr_type], + "next_hop": NEXT_HOP_IP["nh" + str(nhp)][addr_type], + "admin_distance": 10 * nhp, + } + ] + } + } + logger.info("Configure static routes") + result = create_static_routes(tgen, input_dict_4) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step("On R2, static route with lowest AD nexthop installed in FIB") + input_dict_4 = { + "r2": { + "static_routes": [ + { + "network": PREFIX1[addr_type], + "next_hop": NEXT_HOP_IP["nh1"][addr_type], + "admin_distance": 10, + } + ] + } + } + dut = "r2" + protocol = "static" + nh = NEXT_HOP_IP["nh1"][addr_type] + result = verify_rib( + tgen, addr_type, dut, input_dict_4, next_hop=nh, protocol=protocol, fib=True + ) + assert result is True, "Testcase {} : Failed \nError: Route with " + " lowest AD is missing in RIB".format(tc_name) + + nh = [] + for nhp in range(2, 9): + nh.append(NEXT_HOP_IP["nh" + str(nhp)][addr_type]) + result = verify_rib( + tgen, + addr_type, + dut, + input_dict_4, + next_hop=nh, + protocol=protocol, + fib=True, + expected=False, + ) + assert result is not True, "Testcase {} : Failed \nError: Routes " + " with high AD are active in RIB".format(tc_name) + + step("Random shut of the nexthop interfaces") + randnum = random.randint(0, 7) + for addr_type in ADDR_TYPES: + intf = topo["routers"]["r2"]["links"]["r1-link" + str(randnum)]["interface"] + shutdown_bringup_interface(tgen, dut, intf, False) + nhip = NEXT_HOP_IP["nh" + str(randnum + 1)][addr_type] + input_dict_5 = { + "r2": { + "static_routes": [ + { + "network": PREFIX1[addr_type], + "next_hop": NEXT_HOP_IP["nh" + str(randnum + 1)][addr_type], + } + ] + } + } + result = verify_rib( + tgen, + addr_type, + dut, + input_dict_5, + next_hop=nhip, + protocol=protocol, + expected=False, + ) + assert result is not True, "Testcase {} : Failed \n" + "Error: Routes are still present in RIB".format(tc_name) + + step("Random no shut of the nexthop interfaces") + for addr_type in ADDR_TYPES: + intf = topo["routers"]["r2"]["links"]["r1-link" + str(randnum)]["interface"] + shutdown_bringup_interface(tgen, dut, intf, True) + nhip = NEXT_HOP_IP["nh" + str(randnum + 1)][addr_type] + result = verify_rib( + tgen, addr_type, dut, input_dict_5, next_hop=nhip, protocol=protocol + ) + assert result is True, "Testcase {} : Failed \n" + "Error: Routes are missing in RIB".format(tc_name) + + step("Reload the FRR router") + # stop/start -> restart FRR router and verify + stop_router(tgen, "r2") + start_router(tgen, "r2") + + step( + "After reload of FRR router, static route installed " + "in RIB and FIB properly ." + ) + for addr_type in ADDR_TYPES: + input_dict_4 = { + "r2": { + "static_routes": [ + { + "network": PREFIX1[addr_type], + "next_hop": NEXT_HOP_IP["nh1"][addr_type], + "admin_distance": 10, + } + ] + } + } + dut = "r2" + protocol = "static" + nh = NEXT_HOP_IP["nh1"][addr_type] + result = verify_rib( + tgen, addr_type, dut, input_dict_4, next_hop=nh, protocol=protocol, fib=True + ) + assert result is True, "Testcase {} : Failed \nError: Route with " + " lowest AD is missing in RIB".format(tc_name) + + nh = [] + for nhp in range(2, 9): + nh.append(NEXT_HOP_IP["nh" + str(nhp)][addr_type]) + result = verify_rib( + tgen, + addr_type, + dut, + input_dict_4, + next_hop=nh, + protocol=protocol, + fib=True, + expected=False, + ) + assert result is not True, "Testcase {} : Failed \nError: Routes " + " with high AD are active in RIB".format(tc_name) + + write_test_footer(tc_name) + + +def test_bgp_local_nexthop_p1_tc14_ebgp(request): + """ + Verify BGP did not install the static route when it receive route + with local next hop + + """ + tc_name = request.node.name + write_test_header(tc_name) + tgen = get_topogen() + + step("Configure BGP IPv4 session between R2 and R3") + step("Configure IPv4 static route on R2") + reset_config_on_routers(tgen) + + for addr_type in ADDR_TYPES: + # Enable static routes + input_dict_4 = { + "r2": { + "static_routes": [ + { + "network": NETWORK[addr_type], + "next_hop": topo["routers"]["r3"]["links"]["r2-link0"][ + addr_type + ].split("/")[0], + } + ] + } + } + + logger.info("Configure static routes") + result = create_static_routes(tgen, input_dict_4) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step("Configure redistribute static in the BGP") + + input_dict_2 = { + "r2": { + "bgp": { + "address_family": { + addr_type: { + "unicast": {"redistribute": [{"redist_type": "static"}]} + } + } + } + } + } + result = create_router_bgp(tgen, topo, input_dict_2) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step("Verify R2 BGP table has IPv4 route") + dut = "r2" + result = verify_rib(tgen, addr_type, dut, input_dict_4) + assert result is True, "Testcase {} : Failed \nError: Routes is" + " missing in RIB of R2".format(tc_name) + + step(" Verify route did not install in the R3 BGP table, RIB/FIB") + dut = "r3" + result = verify_bgp_rib(tgen, addr_type, dut, input_dict_4, expected=False) + assert result is not True, "Testcase {} : Failed \nError: Routes is" + " still present in BGP RIB of R2".format(tc_name) + + result = verify_rib(tgen, addr_type, dut, input_dict_4, expected=False) + assert result is not True, "Testcase {} : Failed \nError: Routes is" + " still present in RIB of R2".format(tc_name) + + write_test_footer(tc_name) + + +def test_frr_intf_name_as_gw_gap_tc4_ebgp_p0(request): + """ + Verify static route configure with interface name as gateway' + 'address' + """ + tc_name = request.node.name + write_test_header(tc_name) + tgen = get_topogen() + + # Don't run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + reset_config_on_routers(tgen) + + dut = "r1" + intf = topo["routers"]["r1"]["links"]["r2-link0"]["interface"] + nh = topo["routers"]["r1"]["links"]["r2-link0"] + ip_list = { + "ipv4": [(dut, intf, ["1.1.1.1/32"], nh["ipv4"].split("/")[0])], + "ipv6": [(dut, intf, ["4001::32/128"], nh["ipv6"].split("/")[0])], + } + + step( + "Configure IPv4 and IPv6 static route in FRR with different next" + "hop (ens224 as nexthop))" + ) + step("ip route 2.2.2.0/24 20.1.1.1 ens224 ----from FRR cli") + step("ipv6 route 2000::1/120 5000::1 ens224 ----from FRR cli") + + for addr_type in ADDR_TYPES: + # Enable static routes + nh = topo["routers"]["r2"]["links"]["r1-link0"][addr_type].split("/")[0] + input_dict_4 = { + "r1": { + "static_routes": [ + {"network": ip_list[addr_type][0][2][0], "next_hop": nh} + ] + } + } + + logger.info("Configure static routes") + result = create_static_routes(tgen, input_dict_4) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step( + "IPv4 and IPv6 Static route added in FRR verify using " + "show ip route , nexthop is resolved using show nht" + ) + protocol = "static" + result = verify_rib( + tgen, addr_type, dut, input_dict_4, protocol=protocol, next_hop=nh + ) + assert result is True, "Testcase {} : Failed \nError: Routes is" + " missing in RIB".format(tc_name) + + input_dict_nh = { + "r1": { + nh: { + "Address": nh, + "resolvedVia": "connected", + "nexthops": {"nexthop1": {"Interfcae": intf}}, + } + } + } + result = verify_ip_nht(tgen, input_dict_nh) + assert result is True, "Testcase {} : Failed \nError: Nexthop is" + " missing in RIB".format(tc_name) + + step( + "Shut / no shut IPv4 and IPv6 static next hop interface from" + "kernel and FRR CLI" + ) + + shutdown_bringup_interface(tgen, dut, intf, False) + + step( + "After shut of nexthop interface, IPv4 and IPv6 route got removed " + "from RIB verify using show ip route show nht" + ) + result = verify_rib( + tgen, + addr_type, + dut, + input_dict_4, + protocol=protocol, + next_hop=nh, + expected=False, + ) + assert result is not True, "Testcase {} : Failed \nError: Routes is" + " missing in RIB".format(tc_name) + + shutdown_bringup_interface(tgen, dut, intf, True) + + step( + "After no shut route got added again in RIB /FIB using " + "show ip route nexthop is resolved using show nht" + ) + result = verify_rib(tgen, addr_type, dut, input_dict_4, protocol=protocol) + assert result is True, "Testcase {} : Failed".format(tc_name) + + for addr_type in ADDR_TYPES: + nh = topo["routers"]["r2"]["links"]["r1-link0"][addr_type].split("/")[0] + input_dict_4 = { + "r1": { + "static_routes": [ + { + "network": ip_list[addr_type][0][2][0], + "next_hop": nh, + "delete": True, + } + ] + } + } + + logger.info("Configure static routes") + result = create_static_routes(tgen, input_dict_4) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step( + "Removing FRR configured static route verify FRR route also " + "removed from FRR" + ) + result = verify_rib( + tgen, + addr_type, + dut, + input_dict_4, + protocol=protocol, + next_hop=nh, + expected=False, + ) + assert result is not True, "Testcase {} : Failed \nError: Routes" + " still present in RIB".format(tc_name) + + write_test_footer(tc_name) + + +def test_static_route_with_tag_p0_tc_13_ebgp(request): + """ + Verify static route with tag option + + """ + tc_name = request.node.name + write_test_header(tc_name) + tgen = get_topogen() + + # Don't run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + step("Configure 8 links between R1 and R2") + step("Configure 1 links between R2 and R3") + NEXT_HOP_IP = populate_nh() + + step( + "Configure 2 IPv4 static route (S1 and S2) in R2 with same" + "next hop N1 28.1.1.2" + ) + step("Configure static route S1 with tag 1 and static route S2 with" "tag2") + step("S1= ip route 10.1.1.1/24 28.1.1.2 tag 1") + step("S2= ip route 20.1.1.1/24 28.1.1.2 tag 2") + step("Enable redistribute static in BGP with route-map") + reset_config_on_routers(tgen) + + for addr_type in ADDR_TYPES: + # Enable static routes + input_dict_4 = { + "r2": { + "static_routes": [ + { + "network": NETWORK[addr_type], + "next_hop": NEXT_HOP_IP["nh1"][addr_type], + "tag": 4001, + }, + { + "network": NETWORK2[addr_type], + "next_hop": NEXT_HOP_IP["nh1"][addr_type], + "tag": 4002, + }, + ] + } + } + + logger.info("Configure static routes") + result = create_static_routes(tgen, input_dict_4) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + step("verify routes are present in RIB") + dut = "r2" + protocol = "static" + nh = NEXT_HOP_IP["nh1"][addr_type] + result = verify_rib( + tgen, addr_type, dut, input_dict_4, next_hop=nh, protocol=protocol + ) + assert result is True, "Testcase {} : Failed \nError: Routes are" + " missing in RIB".format(tc_name) + + step("Configure route-map on R2 with allow tag1 and deny tag2") + + # Create route map + input_dict_3 = { + "r2": { + "route_maps": { + "rmap_match_tag_1_{}".format(addr_type): [ + { + "action": "permit", + "seq_id": 10, + "match": {addr_type: {"tag": "4001"}}, + }, + { + "action": "deny", + "seq_id": 20, + "match": {addr_type: {"tag": "4002"}}, + }, + ] + } + } + } + result = create_route_maps(tgen, input_dict_3) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + # Configure neighbor for route map + input_dict_4 = { + "r2": { + "bgp": { + "address_family": { + addr_type: { + "unicast": { + "neighbor": { + "r3": { + "dest_link": { + "r2-link0": { + "route_maps": [ + { + "name": "rmap_match_tag_1_ipv4", + "direction": "out", + } + ] + } + } + } + }, + "redistribute": [{"redist_type": "static"}], + } + } + } + } + } + } + result = create_router_bgp(tgen, topo, input_dict_4) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step( + "Verify static route S1 advetised in BGP table when tag1 permit" + "in route-map else it is denied" + ) + dut = "r3" + input_dict_0 = { + "r2": { + "static_routes": [ + { + "network": NETWORK2[addr_type], + "next_hop": NEXT_HOP_IP["nh1"][addr_type], + "tag": 4002, + } + ] + } + } + + result = verify_rib( + tgen, addr_type, dut, input_dict_0, protocol=protocol, expected=False + ) + assert result is not True, "Testcase {} : Failed \nError: Route with " + "tag 4002 is still present in RIB".format(tc_name) + + dut = "r2" + input_dict_1 = { + "r2": {"static_routes": [{"network": NETWORK[addr_type], "tag": 4001}]} + } + + result = verify_rib(tgen, addr_type, dut, input_dict_0, protocol=protocol) + assert result is True, "Testcase {} : Failed \nError: Route with " + "tag 4001 is missing in RIB".format(tc_name) + + step("Modify the route-map to allow tag2 and deny tag1") + # Create route map + input_dict_3 = { + "r2": { + "route_maps": { + "rmap_match_tag_1_{}".format(addr_type): [ + { + "action": "deny", + "seq_id": 10, + "match": {addr_type: {"tag": "4001"}}, + }, + { + "action": "permit", + "seq_id": 20, + "match": {addr_type: {"tag": "4002"}}, + }, + ] + } + } + } + result = create_route_maps(tgen, input_dict_3) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + dut = "r3" + step( + "Verify static route S2 advertised in BGP table when tag2" + "permit in route-map else it is denied" + ) + protocol = "bgp" + input_dict_0 = { + "r2": {"static_routes": [{"network": NETWORK2[addr_type], "tag": 4002}]} + } + + result = verify_rib(tgen, addr_type, dut, input_dict_0, protocol=protocol) + assert result is True, "Testcase {} : Failed \nError: Route with " + "tag 4002 is missing in RIB".format(tc_name) + + input_dict_1 = { + "r2": {"static_routes": [{"network": NETWORK[addr_type], "tag": 4001}]} + } + result = verify_rib( + tgen, addr_type, dut, input_dict_1, protocol=protocol, expected=False + ) + assert result is not True, "Testcase {} : Failed \nError: Route with " + "tag 4001 is still present in RIB".format(tc_name, result) + + step("Configure one static route with 2 ECMP nexthop N1 and N2") + step("For N1 configure tag 1 and for N2 configure tag 2") + step("S1= ip route 10.1.1.1/24 28.1.1.2 tag 1") + step("S1= ip route 10.1.1.1/24 29.1.1.2 tag 2") + step("configure the route-map to allow tag1 and deny tag 2") + step("Modify the route-map to allow tag2 and deny tag1") + + for addr_type in ADDR_TYPES: + # Enable static routes + input_dict_4 = { + "r2": { + "static_routes": [ + { + "network": NETWORK2[addr_type], + "next_hop": NEXT_HOP_IP["nh1"][addr_type], + "tag": 4001, + }, + { + "network": NETWORK2[addr_type], + "next_hop": NEXT_HOP_IP["nh2"][addr_type], + "tag": 4002, + }, + ] + } + } + + logger.info("Configure static routes") + result = create_static_routes(tgen, input_dict_4) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + dut = "r2" + protocol = "static" + result = verify_rib( + tgen, addr_type, dut, input_dict_4, protocol=protocol, fib=True + ) + assert result is True, "Testcase {} : Failed \nError: Routes are" + " missing in RIB".format(tc_name) + + step("shut/no shut of tag1 and tag2 nexthop") + + intf = topo["routers"]["r2"]["links"]["r1-link0"]["interface"] + shutdown_bringup_interface(tgen, dut, intf, False) + + shutdown_bringup_interface(tgen, dut, intf, True) + + step("configure one static route with 3 next-hop") + step("N1-tag1, N2-tag2, N3-tag3") + step("S1= ip route 10.1.1.1/24 28.1.1.2 tag 1") + step("S1= ip route 10.1.1.1/24 29.1.1.2 tag 2") + step("S1= ip route 10.1.1.1/24 28.1.1.2 tag 3") + + for addr_type in ADDR_TYPES: + # Enable static routes + input_dict_4 = { + "r2": { + "static_routes": [ + { + "network": NETWORK2[addr_type], + "next_hop": NEXT_HOP_IP["nh1"][addr_type], + "tag": 4001, + }, + { + "network": NETWORK2[addr_type], + "next_hop": NEXT_HOP_IP["nh2"][addr_type], + "tag": 4002, + }, + { + "network": NETWORK2[addr_type], + "next_hop": NEXT_HOP_IP["nh3"][addr_type], + "tag": 4003, + }, + ] + } + } + + logger.info("Configure static routes") + result = create_static_routes(tgen, input_dict_4) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + dut = "r2" + protocol = "static" + result = verify_rib(tgen, addr_type, dut, input_dict_4, protocol=protocol) + + step("configure the route-map to allow tag2 & tag3 and deny tag1") + # Create route map + input_dict_3 = { + "r2": { + "route_maps": { + "rmap_match_tag_1_{}".format(addr_type): [ + { + "action": "deny", + "seq_id": 10, + "match": {addr_type: {"tag": "4001"}}, + }, + { + "action": "permit", + "seq_id": 20, + "match": {addr_type: {"tag": "4002"}}, + }, + { + "action": "permit", + "seq_id": 30, + "match": {addr_type: {"tag": "4003"}}, + }, + ] + } + } + } + result = create_route_maps(tgen, input_dict_3) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step( + "Verify static route advertised in BGP table with tag3" + " nexthop if tag2 is down" + ) + dut = "r3" + protocol = "bgp" + result = verify_bgp_rib(tgen, addr_type, dut, input_dict_4) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step("shut / no shut of tag2 and tag3 next-hop") + + intf = topo["routers"]["r2"]["links"]["r1-link1"]["interface"] + shutdown_bringup_interface(tgen, dut, intf, False) + + intf = topo["routers"]["r2"]["links"]["r1-link2"]["interface"] + shutdown_bringup_interface(tgen, dut, intf, False) + + step("shut/no shut of tag2 and tag3 nexthop") + intf = topo["routers"]["r2"]["links"]["r1-link1"]["interface"] + shutdown_bringup_interface(tgen, dut, intf, True) + + intf = topo["routers"]["r2"]["links"]["r1-link2"]["interface"] + shutdown_bringup_interface(tgen, dut, intf, True) + + step("Verify after shut/noshut of nexthop BGP table updated correctly") + dut = "r3" + protocol = "bgp" + result = verify_bgp_rib(tgen, addr_type, dut, input_dict_4) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + write_test_footer(tc_name) + + +if __name__ == "__main__": + args = ["-s"] + sys.argv[1:] + sys.exit(pytest.main(args)) diff --git a/tests/topotests/static_routing_with_ebgp/test_static_routes_topo4_ebgp.py b/tests/topotests/static_routing_with_ebgp/test_static_routes_topo4_ebgp.py new file mode 100644 index 0000000000..75657a8895 --- /dev/null +++ b/tests/topotests/static_routing_with_ebgp/test_static_routes_topo4_ebgp.py @@ -0,0 +1,994 @@ +#!/usr/bin/python + +# +# Copyright (c) 2020 by VMware, Inc. ("VMware") +# Used Copyright (c) 2018 by Network Device Education Foundation, +# Inc. ("NetDEF") in this file. +# +# Permission to use, copy, modify, and/or distribute this software +# for any purpose with or without fee is hereby granted, provided +# that the above copyright notice and this permission notice appear +# in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND VMWARE DISCLAIMS ALL WARRANTIES +# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL VMWARE BE LIABLE FOR +# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY +# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS +# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE +# OF THIS SOFTWARE. +# + +""" + +Following tests are covered in the script. + +- Verify static route are blocked from route-map and prefix-list + applied in BGP nbrs +- Verify Static route when FRR connected to 2 TOR +""" + +import sys +import json +import time +import os +import pytest +import platform +import ipaddress +from copy import deepcopy + +# Save the Current Working Directory to find configuration files. +CWD = os.path.dirname(os.path.realpath(__file__)) +sys.path.append(os.path.join(CWD, "../")) +sys.path.append(os.path.join(CWD, "../lib/")) +# pylint: disable=C0413 +# Import topogen and topotest helpers +from mininet.topo import Topo +from lib.topogen import Topogen, get_topogen +from lib.topotest import version_cmp + +# Import topoJson from lib, to create topology and initial configuration +from lib.common_config import ( + start_topology, + write_test_header, + write_test_footer, + reset_config_on_routers, + verify_rib, + check_address_types, + step, + create_prefix_lists, + create_route_maps, + create_interfaces_cfg, + verify_prefix_lists, + verify_route_maps, +) +from lib.topolog import logger +from lib.bgp import ( + verify_bgp_convergence, + create_router_bgp, + clear_bgp_and_verify, + clear_bgp, +) +from lib.topojson import build_topo_from_json, build_config_from_json + +# Reading the data from JSON File for topology creation +jsonFile = "{}/static_routes_topo4_ebgp.json".format(CWD) +try: + with open(jsonFile, "r") as topoJson: + topo = json.load(topoJson) +except IOError: + assert False, "Could not read file {}".format(jsonFile) + +# Global variables +BGP_CONVERGENCE = False +ADDR_TYPES = check_address_types() +NETWORK = {"ipv4": "2.2.2.2/32", "ipv6": "22:22::2/128"} +NEXT_HOP_IP = {} + + +class CreateTopo(Topo): + """ + Test CreateTopo - topology 1. + + * `Topo`: Topology object + """ + + def build(self, *_args, **_opts): + """Build function.""" + tgen = get_topogen(self) + + # Building topology from json file + build_topo_from_json(tgen, topo) + + +def setup_module(mod): + """ + Set up the pytest environment. + * `mod`: module name + """ + global topo + testsuite_run_time = time.asctime(time.localtime(time.time())) + logger.info("Testsuite start time: {}".format(testsuite_run_time)) + logger.info("=" * 40) + + logger.info("Running setup_module to create topology") + + # This function initiates the topology build with Topogen... + tgen = Topogen(CreateTopo, mod.__name__) + # ... and here it calls Mininet initialization functions. + + # Starting topology, create tmp files which are loaded to routers + # to start deamons and then start routers + start_topology(tgen) + + # Creating configuration from JSON + build_config_from_json(tgen, topo) + + if version_cmp(platform.release(), '4.19') < 0: + error_msg = ('These tests will not run. (have kernel "{}", ' + 'requires kernel >= 4.19)'.format(platform.release())) + pytest.skip(error_msg) + + # Checking BGP convergence + global BGP_CONVERGENCE + global ADDR_TYPES + # Don't run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + # Api call verify whether BGP is converged + BGP_CONVERGENCE = verify_bgp_convergence(tgen, topo) + assert BGP_CONVERGENCE is True, "setup_module :Failed \n Error: {}".format( + BGP_CONVERGENCE + ) + + logger.info("Running setup_module() done") + + +def teardown_module(mod): + """ + Teardown the pytest environment. + + * `mod`: module name + """ + logger.info("Running teardown_module to delete topology") + + tgen = get_topogen() + + # Stop toplogy and Remove tmp files + tgen.stop_topology() + + logger.info( + "Testsuite end time: {}".format(time.asctime(time.localtime(time.time()))) + ) + logger.info("=" * 40) + + +##################################################### +# +# Tests starting +# +##################################################### +def static_routes_rmap_pfxlist_p0_tc7_ebgp(request): + """ + Verify static route are blocked from route-map & prefix-list applied in BGP + nbrs + + """ + tc_name = request.node.name + write_test_header(tc_name) + tgen = get_topogen() + # Don't run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + reset_config_on_routers(tgen) + step("Configure holddown timer = 1 keep alive = 3 in all the neighbors") + step("verify bgp convergence before starting test case") + + bgp_convergence = verify_bgp_convergence(tgen, topo) + assert bgp_convergence is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, bgp_convergence + ) + + step( + "Configure 4 IPv4 and 4 IPv6 nbrs with password with mismatch " + " authentication between FRR routers " + ) + + for addr_type in ADDR_TYPES: + # Api call to modfiy BGP timerse + input_dict = { + "r2": { + "bgp": { + "local_as": "200", + "address_family": { + addr_type: { + "unicast": { + "neighbor": { + "r1": { + "dest_link": { + "r2-link0": {"password": "r2"}, + "r2-link1": {"password": "r2"}, + "r2-link2": {"password": "r2"}, + "r2-link3": {"password": "r2"}, + } + }, + "r3": { + "dest_link": { + "r2-link0": {"password": "r2"}, + "r2-link1": {"password": "r2"}, + "r2-link2": {"password": "r2"}, + "r2-link3": {"password": "r2"}, + } + }, + } + } + } + }, + } + } + } + result = create_router_bgp(tgen, topo, input_dict) + assert result is True, "Testcase {} :Failed \n Error: {}".format( + tc_name, result + ) + clear_bgp(tgen, addr_type, "r2") + + step(" All BGP nbrs are down as authentication is mismatch on both" " the sides") + + bgp_convergence = verify_bgp_convergence(tgen, topo, expected=False) + assert bgp_convergence is not True, "Testcase {} : " + "Failed \n BGP nbrs must be down. Error: {}".format(tc_name, bgp_convergence) + + step( + "Configure 4 IPv4 and 4 IPv6 nbrs with macthing password " + " authentication between FRR routers " + ) + for addr_type in ADDR_TYPES: + input_dict = { + "r2": { + "bgp": { + "local_as": "200", + "address_family": { + addr_type: { + "unicast": { + "neighbor": { + "r1": { + "dest_link": { + "r2-link0": {"password": "r1"}, + "r2-link1": {"password": "r1"}, + "r2-link2": {"password": "r1"}, + "r2-link3": {"password": "r1"}, + } + }, + "r3": { + "dest_link": { + "r2-link0": {"password": "r1"}, + "r2-link1": {"password": "r1"}, + "r2-link2": {"password": "r1"}, + "r2-link3": {"password": "r1"}, + } + }, + } + } + } + }, + } + } + } + result = create_router_bgp(tgen, topo, deepcopy(input_dict)) + assert result is True, "Testcase {} :Failed \n Error: {}".format( + tc_name, result + ) + + step("All BGP nbrs are up as authentication is matched now") + bgp_convergence = verify_bgp_convergence(tgen, topo) + assert bgp_convergence is True, "Testcase {} : Failed \n " "Error: {}".format( + tc_name, bgp_convergence + ) + + step("Create prefix list P1 to permit VM3 & deny VM1 v4 & v6 routes") + step("Create prefix list P2 to permit VM6 IPv4 and IPv6 routes") + for addr_type in ADDR_TYPES: + input_dict_2 = { + "r2": { + "prefix_lists": { + addr_type: { + "pf_list_1_{}".format(addr_type): [ + { + "seqid": 10, + "network": topo["routers"]["r2"]["links"]["vm3"][ + addr_type + ], + "action": "permit", + }, + { + "seqid": 20, + "network": topo["routers"]["r2"]["links"]["vm1"][ + addr_type + ], + "action": "deny", + }, + ], + "pf_list_2_{}".format(addr_type): [ + { + "seqid": 10, + "network": topo["routers"]["r2"]["links"]["vm6"][ + addr_type + ], + "action": "permit", + } + ], + } + } + } + } + result = create_prefix_lists(tgen, input_dict_2) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step( + "Prefix list created with matching networks deny or permit " + "show ip prefix list" + ) + result = verify_prefix_lists(tgen, input_dict_2) + assert result is not True, "Testcase {} : Failed \n" + " Error: {}".format(tc_name, result) + + step("Redistribute all the routes (connected, static)") + input_dict_2_r1 = { + "r1": { + "bgp": { + "address_family": { + addr_type: { + "unicast": {"redistribute": [{"redist_type": "static"}]} + } + } + } + } + } + result = create_router_bgp(tgen, topo, input_dict_2_r1) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + input_dict_2_r2 = { + "r2": { + "bgp": { + "address_family": { + addr_type: { + "unicast": {"redistribute": [{"redist_type": "static"}]} + } + } + } + } + } + result = create_router_bgp(tgen, topo, input_dict_2_r2) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + input_dict_2_r3 = { + "r3": { + "bgp": { + "address_family": { + addr_type: { + "unicast": {"redistribute": [{"redist_type": "static"}]} + } + } + } + } + } + result = create_router_bgp(tgen, topo, input_dict_2_r3) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step("configure redistribute connected in Router BGP") + + input_dict_2_r1 = { + "r1": { + "bgp": { + "address_family": { + addr_type: { + "unicast": {"redistribute": [{"redist_type": "connected"}]} + } + } + } + } + } + result = create_router_bgp(tgen, topo, input_dict_2_r1) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + input_dict_2_r3 = { + "r3": { + "bgp": { + "address_family": { + addr_type: { + "unicast": {"redistribute": [{"redist_type": "connected"}]} + } + } + } + } + } + result = create_router_bgp(tgen, topo, input_dict_2_r3) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + input_dict_2 = { + "r2": { + "bgp": { + "address_family": { + addr_type: { + "unicast": {"redistribute": [{"redist_type": "connected"}]} + } + } + } + } + } + result = create_router_bgp(tgen, topo, input_dict_2) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step("Apply prefix list P1 on BGP neighbors 1 2 3 4 connected from " "frr r1") + # Configure prefix list to bgp neighbor + input_dict_4 = { + "r2": { + "bgp": { + "address_family": { + addr_type: { + "unicast": { + "neighbor": { + "r1": { + "dest_link": { + "r2-link0": { + "prefix_lists": [ + { + "name": "pf_list_1_{}".format( + addr_type + ), + "direction": "out", + } + ] + }, + "r2-link1": { + "prefix_lists": [ + { + "name": "pf_list_1_{}".format( + addr_type + ), + "direction": "out", + } + ] + }, + "r2-link2": { + "prefix_lists": [ + { + "name": "pf_list_1_{}".format( + addr_type + ), + "direction": "out", + } + ] + }, + "r2-link3": { + "prefix_lists": [ + { + "name": "pf_list_1_{}".format( + addr_type + ), + "direction": "out", + } + ] + }, + } + } + } + } + } + } + } + } + } + result = create_router_bgp(tgen, topo, input_dict_4) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step("Apply prefix list P2 on BGP nbrs 5 & 6 connected from FRR-2") + # Configure prefix list to bgp neighbor + input_dict_4 = { + "r2": { + "bgp": { + "address_family": { + addr_type: { + "unicast": { + "neighbor": { + "r3": { + "dest_link": { + "r2-link0": { + "prefix_lists": [ + { + "name": "pf_list_2_{}".format( + addr_type + ), + "direction": "out", + } + ] + }, + "r2-link1": { + "prefix_lists": [ + { + "name": "pf_list_2_{}".format( + addr_type + ), + "direction": "out", + } + ] + }, + "r2-link2": { + "prefix_lists": [ + { + "name": "pf_list_2_{}".format( + addr_type + ), + "direction": "out", + } + ] + }, + "r2-link3": { + "prefix_lists": [ + { + "name": "pf_list_2_{}".format( + addr_type + ), + "direction": "out", + } + ] + }, + } + } + } + } + } + } + } + } + } + result = create_router_bgp(tgen, topo, input_dict_4) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + clear_bgp_and_verify(tgen, topo, "r2") + + step( + "VM1 IPv4 and IPv6 Route which is denied using prefix list is " + "not present on FRR1 side routing table , also not able to " + "ping the routes show ip route" + ) + + dut = "r1" + protocol = "bgp" + ntwk_r2_vm1 = str( + ipaddress.ip_interface( + u"{}".format(topo["routers"]["r2"]["links"]["vm1"][addr_type]) + ).network + ) + input_dict = {"r1": {"static_routes": [{"network": ntwk_r2_vm1}]}} + result4 = verify_rib( + tgen, addr_type, dut, input_dict, protocol=protocol, expected=False + ) + assert result4 is not True, "Testcase {} : Failed , VM1 route is " + "not filtered out via prefix list. \n Error: {}".format(tc_name, result4) + + step( + "VM4 and VM6 IPV4 and IPv6 address are present in local and " + "FRR2 routing table show ip bgp show ip route" + ) + + dut = "r2" + ntwk_r2_vm6 = str( + ipaddress.ip_interface( + u"{}".format(topo["routers"]["r2"]["links"]["vm6"][addr_type]) + ).network + ) + input_dict = {"r3": {"static_routes": [{"network": ntwk_r2_vm6}]}} + result4 = verify_rib(tgen, addr_type, dut, input_dict) + assert result4 is True, "Testcase {} : Failed.\n Error: {}".format( + tc_name, result4 + ) + + step("Remove prefix list from all the neighbors") + input_dict_4 = { + "r2": { + "bgp": { + "address_family": { + addr_type: { + "unicast": { + "neighbor": { + "r1": { + "dest_link": { + "r2-link0": { + "prefix_lists": [ + { + "name": "pf_list_1_{}".format( + addr_type + ), + "direction": "out", + "delete": True, + } + ] + }, + "r2-link1": { + "prefix_lists": [ + { + "name": "pf_list_1_{}".format( + addr_type + ), + "direction": "out", + "delete": True, + } + ] + }, + "r2-link2": { + "prefix_lists": [ + { + "name": "pf_list_1_{}".format( + addr_type + ), + "direction": "out", + "delete": True, + } + ] + }, + "r2-link3": { + "prefix_lists": [ + { + "name": "pf_list_1_{}".format( + addr_type + ), + "direction": "out", + "delete": True, + } + ] + }, + } + } + } + } + } + } + } + } + } + result = create_router_bgp(tgen, topo, input_dict_4) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + input_dict_4 = { + "r2": { + "bgp": { + "address_family": { + addr_type: { + "unicast": { + "neighbor": { + "r3": { + "dest_link": { + "r2-link0": { + "prefix_lists": [ + { + "name": "pf_list_2_{}".format( + addr_type + ), + "direction": "out", + "delete": True, + } + ] + }, + "r2-link1": { + "prefix_lists": [ + { + "name": "pf_list_2_{}".format( + addr_type + ), + "direction": "out", + "delete": True, + } + ] + }, + "r2-link2": { + "prefix_lists": [ + { + "name": "pf_list_2_{}".format( + addr_type + ), + "direction": "out", + "delete": True, + } + ] + }, + "r2-link3": { + "prefix_lists": [ + { + "name": "pf_list_2_{}".format( + addr_type + ), + "direction": "out", + "delete": True, + } + ] + }, + } + } + } + } + } + } + } + } + } + result = create_router_bgp(tgen, topo, input_dict_4) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + clear_bgp_and_verify(tgen, topo, "r2") + + step("Create RouteMap_1 with prefix list P1 and weight 50") + # Create route map + rmap_dict = { + "r2": { + "route_maps": { + "rmap_pf_list_1_{}".format(addr_type): [ + { + "action": "permit", + "set": {"weight": 50}, + "match": { + addr_type: { + "prefix_lists": "pf_list_1_{}".format(addr_type) + } + }, + } + ] + } + } + } + result = create_route_maps(tgen, rmap_dict) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step("Create RouteMap_2 with prefix list P2 and weight 50") + # Create route map + rmap_dict = { + "r2": { + "route_maps": { + "rmap_pf_list_2_{}".format(addr_type): [ + { + "action": "permit", + "set": {"weight": 50}, + "match": { + addr_type: { + "prefix_lists": "pf_list_2_{}".format(addr_type) + } + }, + } + ] + } + } + } + result = create_route_maps(tgen, rmap_dict) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step("Verify Route-map created verify using show route-map") + # verify rmap_pf_list_1 and rmap_pf_list_2 are present in router r2 + input_dict = { + "r2": { + "route_maps": [ + "rmap_pf_list_1_{}".format(addr_type), + "rmap_pf_list_2_{}".format(addr_type), + ] + } + } + result = verify_route_maps(tgen, input_dict, expected=False) + assert result is not True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step("Apply policy RouteMap_1 nbrs 1 2 3 4 to FRR 1") + # Configure prefix list to bgp neighbor + input_dict_4 = { + "r2": { + "bgp": { + "address_family": { + addr_type: { + "unicast": { + "neighbor": { + "r1": { + "dest_link": { + "r2-link0": { + "route_maps": [ + { + "name": "rmap_pf_list_1_" + "{}".format(addr_type), + "direction": "out", + } + ] + }, + "r2-link1": { + "route_maps": [ + { + "name": "rmap_pf_list_1_" + "{}".format(addr_type), + "direction": "out", + } + ] + }, + "r2-link2": { + "route_maps": [ + { + "name": "rmap_pf_list_1_" + "{}".format(addr_type), + "direction": "out", + } + ] + }, + "r2-link3": { + "route_maps": [ + { + "name": "rmap_pf_list_1_" + "{}".format(addr_type), + "direction": "out", + } + ] + }, + } + } + } + } + } + } + } + } + } + result = create_router_bgp(tgen, topo, input_dict_4) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step("Apply policy RouteMap_2 nbrs 5 and 6 to FRR2") + # Configure prefix list to bgp neighbor + input_dict_4 = { + "r2": { + "bgp": { + "address_family": { + addr_type: { + "unicast": { + "neighbor": { + "r3": { + "dest_link": { + "r2-link0": { + "route_maps": [ + { + "name": "rmap_pf_list_2_" + "{}".format(addr_type), + "direction": "out", + } + ] + }, + "r2-link1": { + "route_maps": [ + { + "name": "rmap_pf_list_2_" + "{}".format(addr_type), + "direction": "out", + } + ] + }, + "r2-link2": { + "route_maps": [ + { + "name": "rmap_pf_list_2_" + "{}".format(addr_type), + "direction": "out", + } + ] + }, + "r2-link3": { + "route_maps": [ + { + "name": "rmap_pf_list_2_" + "{}".format(addr_type), + "direction": "out", + } + ] + }, + } + } + } + } + } + } + } + } + } + result = create_router_bgp(tgen, topo, input_dict_4) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step( + "After applying to BGP neighbors verify VM1 IPv4 and IPv6 Route" + " which is denied using prefix list is not present on FRR side" + " routing table , also not able to ping the routes show ip route" + " and VM4 and VM6 IPV4 and IPv6 address are present in local and" + " FRR routing table show ip bgp show ip route" + ) + + dut = "r1" + protocol = "bgp" + ntwk_r2_vm1 = str( + ipaddress.ip_interface( + u"{}".format(topo["routers"]["r2"]["links"]["vm1"][addr_type]) + ).network + ) + input_dict = {"r1": {"static_routes": [{"network": ntwk_r2_vm1}]}} + result4 = verify_rib( + tgen, addr_type, dut, input_dict, protocol=protocol, expected=False + ) + assert result4 is not True, "Testcase {} : Failed \n" "Error: {}".format( + tc_name, result4 + ) + + step("vm4 should be present in FRR1") + dut = "r1" + ntwk_r2_vm1 = str( + ipaddress.ip_interface( + u"{}".format(topo["routers"]["r1"]["links"]["vm4"][addr_type]) + ).network + ) + input_dict = {"r1": {"static_routes": [{"network": ntwk_r2_vm1}]}} + result4 = verify_rib(tgen, addr_type, dut, input_dict) + assert result4 is True, "Testcase {} : Failed , VM1 route is " + "not filtered out via prefix list. \n Error: {}".format(tc_name, result4) + + step("vm4 should be present in FRR2") + dut = "r2" + ntwk_r2_vm1 = str( + ipaddress.ip_interface( + u"{}".format(topo["routers"]["r1"]["links"]["vm4"][addr_type]) + ).network + ) + input_dict = {"r1": {"static_routes": [{"network": ntwk_r2_vm1}]}} + result4 = verify_rib(tgen, addr_type, dut, input_dict) + assert result4 is True, "Testcase {} : Failed , VM1 route is " + "not filtered out via prefix list. \n Error: {}".format(tc_name, result4) + + dut = "r3" + protocol = "bgp" + ntwk_r2_vm6 = str( + ipaddress.ip_interface( + u"{}".format(topo["routers"]["r2"]["links"]["vm6"][addr_type]) + ).network + ) + input_dict = {"r3": {"static_routes": [{"network": ntwk_r2_vm6}]}} + result4 = verify_rib(tgen, addr_type, dut, input_dict, protocol=protocol) + assert result4 is True, "Testcase {} : Failed.\n Error: {}".format( + tc_name, result4 + ) + + write_test_footer(tc_name) + + +if __name__ == "__main__": + args = ["-s"] + sys.argv[1:] + sys.exit(pytest.main(args)) diff --git a/tests/topotests/static_routing_with_ibgp/static_routes_topo1_ibgp.json b/tests/topotests/static_routing_with_ibgp/static_routes_topo1_ibgp.json new file mode 100644 index 0000000000..99b197366a --- /dev/null +++ b/tests/topotests/static_routing_with_ibgp/static_routes_topo1_ibgp.json @@ -0,0 +1,157 @@ +{ + "address_types": [ + "ipv4", + "ipv6" + ], + "ipv4base": "10.0.0.0", + "ipv4mask": 30, + "ipv6base": "fd00::", + "ipv6mask": 64, + "link_ip_start": { + "ipv4": "10.0.0.0", + "v4mask": 30, + "ipv6": "fd00::", + "v6mask": 64 + }, + "lo_prefix": { + "ipv4": "1.0.", + "v4mask": 32, + "ipv6": "2001:db8:f::", + "v6mask": 128 + }, + "routers": { + "r1": { + "links": { + "lo": { + "ipv4": "auto", + "ipv6": "auto", + "type": "loopback" + }, + "r2-link0": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r2-link1": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r3-link0": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r3-link1": { + "ipv4": "auto", + "ipv6": "auto" + } + } + }, + "r2": { + "links": { + "lo": { + "ipv4": "auto", + "ipv6": "auto", + "type": "loopback" + }, + "r1-link0": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r1-link1": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r3": { + "ipv4": "auto", + "ipv6": "auto" + } + }, + "bgp": { + "local_as": "100", + "address_family": { + "ipv4": { + "unicast": { + "neighbor": { + "r3": { + "dest_link": { + "r2": { + "keepalivetimer": 1, + "holddowntimer": 4 + } + } + } + } + } + }, + "ipv6": { + "unicast": { + "neighbor": { + "r3": { + "dest_link": { + "r2": { + "keepalivetimer": 1, + "holddowntimer": 4 + } + } + } + } + } + } + } + } + }, + "r3": { + "links": { + "lo": { + "ipv4": "auto", + "ipv6": "auto", + "type": "loopback" + }, + "r2": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r1-link0": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r1-link1": { + "ipv4": "auto", + "ipv6": "auto" + } + }, + "bgp": { + "local_as": "100", + "address_family": { + "ipv4": { + "unicast": { + "neighbor": { + "r2": { + "dest_link": { + "r3": { + "keepalivetimer": 1, + "holddowntimer": 4 + } + } + } + } + } + }, + "ipv6": { + "unicast": { + "neighbor": { + "r2": { + "dest_link": { + "r3": { + "keepalivetimer": 1, + "holddowntimer": 4 + } + } + } + } + } + } + } + } + } + } +}
\ No newline at end of file diff --git a/tests/topotests/static_routing_with_ibgp/static_routes_topo2_ibgp.json b/tests/topotests/static_routing_with_ibgp/static_routes_topo2_ibgp.json new file mode 100644 index 0000000000..47596a0a1a --- /dev/null +++ b/tests/topotests/static_routing_with_ibgp/static_routes_topo2_ibgp.json @@ -0,0 +1,371 @@ +{ + "address_types": [ + "ipv4", + "ipv6" + ], + "ipv4base": "10.0.0.0", + "ipv4mask": 30, + "ipv6base": "fd00::", + "ipv6mask": 64, + "link_ip_start": { + "ipv4": "10.0.0.0", + "v4mask": 30, + "ipv6": "fd00::", + "v6mask": 64 + }, + "lo_prefix": { + "ipv4": "1.0.", + "v4mask": 32, + "ipv6": "2001:db8:f::", + "v6mask": 128 + }, + "routers": { + "r1": { + "links": { + "lo": { + "ipv4": "auto", + "ipv6": "auto", + "type": "loopback" + }, + "r2-link0": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r2-link1": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r2-link2": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r2-link3": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r2-link4": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r2-link5": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r2-link6": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r2-link7": { + "ipv4": "auto", + "ipv6": "auto" + } + } + }, + "r2": { + "links": { + "lo": { + "ipv4": "auto", + "ipv6": "auto", + "type": "loopback" + }, + "r1-link0": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r1-link1": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r1-link2": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r1-link3": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r1-link4": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r1-link5": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r1-link6": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r1-link7": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r3-link0": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r3-link1": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r3-link2": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r3-link3": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r3-link4": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r3-link5": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r3-link6": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r3-link7": { + "ipv4": "auto", + "ipv6": "auto" + } + }, + "bgp": { + "local_as": "100", + "address_family": { + "ipv4": { + "unicast": { + "neighbor": { + "r3": { + + + "dest_link": { + "r2-link0": { + "keepalivetimer": 1, + "holddowntimer": 4 + }, + "r2-link1": { + "keepalivetimer": 1, + "holddowntimer": 4 + }, + "r2-link2": { + "keepalivetimer": 1, + "holddowntimer": 4 + }, + "r2-link3": { + "keepalivetimer": 1, + "holddowntimer": 4 + }, + "r2-link4": { + "keepalivetimer": 1, + "holddowntimer": 4 + }, + "r2-link5": { + "keepalivetimer": 1, + "holddowntimer": 4 + }, + "r2-link6": { + "keepalivetimer": 1, + "holddowntimer": 4 + }, + "r2-link7": { + "keepalivetimer": 1, + "holddowntimer": 4 + } + } + } + }, + "redistribute": [{ + "redist_type": "static" + }] + } + }, + "ipv6": { + "unicast": { + "neighbor": { + "r3": { + + + "dest_link": { + "r2-link0": { + "keepalivetimer": 1, + "holddowntimer": 4 + }, + "r2-link1": { + "keepalivetimer": 1, + "holddowntimer": 4 + }, + "r2-link2": { + "keepalivetimer": 1, + "holddowntimer": 4 + }, + "r2-link3": { + "keepalivetimer": 1, + "holddowntimer": 4 + }, + "r2-link4": { + "keepalivetimer": 1, + "holddowntimer": 4 + }, + "r2-link5": { + "keepalivetimer": 1, + "holddowntimer": 4 + }, + "r2-link6": { + "keepalivetimer": 1, + "holddowntimer": 4 + }, + "r2-link7": { + "keepalivetimer": 1, + "holddowntimer": 4 + } + } + } + }, + "redistribute": [{ + "redist_type": "static" + }] + } + } + } + } + }, + "r3": { + "links": { + "lo": { + "ipv4": "auto", + "ipv6": "auto", + "type": "loopback" + }, + "r2-link0": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r2-link1": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r2-link2": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r2-link3": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r2-link4": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r2-link5": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r2-link6": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r2-link7": { + "ipv4": "auto", + "ipv6": "auto" + } + }, + "bgp": { + "local_as": "100", + "address_family": { + "ipv4": { + "unicast": { + "neighbor": { + "r2": { + + + "dest_link": { + "r3-link0": { + "keepalivetimer": 1, + "holddowntimer": 4 + }, + "r3-link1": { + "keepalivetimer": 1, + "holddowntimer": 4 + }, + "r3-link2": { + "keepalivetimer": 1, + "holddowntimer": 4 + }, + "r3-link3": { + "keepalivetimer": 1, + "holddowntimer": 4 + }, + "r3-link4": { + "keepalivetimer": 1, + "holddowntimer": 4 + }, + "r3-link5": { + "keepalivetimer": 1, + "holddowntimer": 4 + }, + "r3-link6": { + "keepalivetimer": 1, + "holddowntimer": 4 + }, + "r3-link7": { + "keepalivetimer": 1, + "holddowntimer": 4 + } + } + } + } + } + }, + "ipv6": { + "unicast": { + "neighbor": { + "r2": { + + + "dest_link": { + "r3-link0": { + "keepalivetimer": 1, + "holddowntimer": 4 + }, + "r3-link1": { + "keepalivetimer": 1, + "holddowntimer": 4 + }, + "r3-link2": { + "keepalivetimer": 1, + "holddowntimer": 4 + }, + "r3-link3": { + "keepalivetimer": 1, + "holddowntimer": 4 + }, + "r3-link4": { + "keepalivetimer": 1, + "holddowntimer": 4 + }, + "r3-link5": { + "keepalivetimer": 1, + "holddowntimer": 4 + }, + "r3-link6": { + "keepalivetimer": 1, + "holddowntimer": 4 + }, + "r3-link7": { + "keepalivetimer": 1, + "holddowntimer": 4 + } + } + } + } + } + } + } + } + } + } +}
\ No newline at end of file diff --git a/tests/topotests/static_routing_with_ibgp/static_routes_topo3_ibgp.json b/tests/topotests/static_routing_with_ibgp/static_routes_topo3_ibgp.json new file mode 100644 index 0000000000..4e27229f34 --- /dev/null +++ b/tests/topotests/static_routing_with_ibgp/static_routes_topo3_ibgp.json @@ -0,0 +1,189 @@ +{ + "address_types": [ + "ipv4", + "ipv6" + ], + "ipv4base": "10.0.0.0", + "ipv4mask": 30, + "ipv6base": "fd00::", + "ipv6mask": 64, + "link_ip_start": { + "ipv4": "10.0.0.0", + "v4mask": 29, + "ipv6": "fd00::", + "v6mask": 64 + }, + "lo_prefix": { + "ipv4": "1.0.", + "v4mask": 32, + "ipv6": "2001:db8:f::", + "v6mask": 128 + }, + "routers": { + "r1": { + "links": { + "lo": { + "ipv4": "auto", + "ipv6": "auto", + "type": "loopback" + }, + "r2-link0": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r2-link1": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r2-link2": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r2-link3": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r2-link4": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r2-link5": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r2-link6": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r2-link7": { + "ipv4": "auto", + "ipv6": "auto" + } + } + }, + "r2": { + "links": { + "lo": { + "ipv4": "auto", + "ipv6": "auto", + "type": "loopback" + }, + "r1-link0": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r1-link1": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r1-link2": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r1-link3": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r1-link4": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r1-link5": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r1-link6": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r1-link7": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r3-link0": { + "ipv4": "auto", + "ipv6": "auto" + } + }, + "bgp": { + "local_as": "100", + "address_family": { + "ipv4": { + "unicast": { + "neighbor": { + "r3": { + "dest_link": { + "r2-link0": { + "keepalivetimer": 1, + "holddowntimer": 4 + } + } + } + } + } + }, + "ipv6": { + "unicast": { + "neighbor": { + "r3": { + "dest_link": { + "r2-link0": { + "keepalivetimer": 1, + "holddowntimer": 4 + } + } + } + } + } + } + } + } + }, + "r3": { + "links": { + "lo": { + "ipv4": "auto", + "ipv6": "auto", + "type": "loopback" + }, + "r2-link0": { + "ipv4": "auto", + "ipv6": "auto" + } + }, + "bgp": { + "local_as": "100", + "address_family": { + "ipv4": { + "unicast": { + "neighbor": { + "r2": { + "dest_link": { + "r3-link0": { + "keepalivetimer": 1, + "holddowntimer": 4 + } + } + } + } + } + }, + "ipv6": { + "unicast": { + "neighbor": { + "r2": { + "dest_link": { + "r3-link0": { + "keepalivetimer": 1, + "holddowntimer": 4 + } + } + } + } + } + } + } + } + } + } +}
\ No newline at end of file diff --git a/tests/topotests/static_routing_with_ibgp/static_routes_topo4_ibgp.json b/tests/topotests/static_routing_with_ibgp/static_routes_topo4_ibgp.json new file mode 100644 index 0000000000..bb72578ff3 --- /dev/null +++ b/tests/topotests/static_routing_with_ibgp/static_routes_topo4_ibgp.json @@ -0,0 +1,428 @@ +{ + "address_types": [ + "ipv4", + "ipv6" + ], + "ipv4base": "10.0.0.0", + "ipv4mask": 30, + "ipv6base": "fd00::", + "ipv6mask": 64, + "link_ip_start": { + "ipv4": "10.0.0.0", + "v4mask": 30, + "ipv6": "fd00::", + "v6mask": 64 + }, + "lo_prefix": { + "ipv4": "1.0.", + "v4mask": 32, + "ipv6": "2001:db8:f::", + "v6mask": 128 + }, + "routers": { + "r1": { + "links": { + "lo": { + "ipv4": "auto", + "ipv6": "auto", + "type": "loopback" + }, + "r2-link0": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r2-link1": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r2-link2": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r2-link3": { + "ipv4": "auto", + "ipv6": "auto" + }, + "vm4": { + "ipv4": "auto", + "ipv6": "auto" + } + }, + "bgp": { + "local_as": "100", + "address_family": { + "ipv4": { + "unicast": { + "neighbor": { + "r2": { + "dest_link": { + "r1-link0": { + "password": "r1", + "holddowntimer": 3, + "keepalivetimer": 1 + }, + "r1-link1": { + "password": "r1", + "holddowntimer": 3, + "keepalivetimer": 1 + }, + "r1-link2": { + "password": "r1", + "holddowntimer": 3, + "keepalivetimer": 1 + }, + "r1-link3": { + "password": "r1", + "holddowntimer": 3, + "keepalivetimer": 1 + } + } + } + } + } + }, + "ipv6": { + "unicast": { + "neighbor": { + "r2": { + "dest_link": { + "r1-link0": { + "password": "r1", + "holddowntimer": 3, + "keepalivetimer": 1 + }, + "r1-link1": { + "password": "r1", + "holddowntimer": 3, + "keepalivetimer": 1 + }, + "r1-link2": { + "password": "r1", + "holddowntimer": 3, + "keepalivetimer": 1 + }, + "r1-link3": { + "password": "r1", + "holddowntimer": 3, + "keepalivetimer": 1 + } + } + } + } + } + } + } + } + }, + "r2": { + "links": { + "lo": { + "ipv4": "auto", + "ipv6": "auto", + "type": "loopback" + }, + "r1-link0": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r1-link1": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r1-link2": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r1-link3": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r3-link0": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r3-link1": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r3-link2": { + "ipv4": "auto", + "ipv6": "auto" + }, + "vm1": { + "ipv4": "auto", + "ipv6": "auto" + }, + "vm2": { + "ipv4": "auto", + "ipv6": "auto" + }, + "vm3": { + "ipv4": "auto", + "ipv6": "auto" + }, + "vm6": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r3-link3": { + "ipv4": "auto", + "ipv6": "auto" + } + }, + "bgp": { + "local_as": "200", + "address_family": { + "ipv4": { + "unicast": { + "neighbor": { + "r1": { + "dest_link": { + "r2-link0": { + "password": "r1" + }, + "r2-link1": { + "password": "r1" + }, + "r2-link2": { + "password": "r1" + }, + "r2-link3": { + "password": "r1" + } + } + }, + "r3": { + "dest_link": { + "r2-link0": { + "password": "r1" + }, + "r2-link1": { + "password": "r1" + }, + "r2-link2": { + "password": "r1" + }, + "r2-link3": { + "password": "r1" + } + } + } + } + } + }, + "ipv6": { + "unicast": { + "neighbor": { + "r1": { + "dest_link": { + "r2-link0": { + "password": "r1" + }, + "r2-link1": { + "password": "r1" + }, + "r2-link2": { + "password": "r1" + }, + "r2-link3": { + "password": "r1" + } + } + }, + "r3": { + "dest_link": { + "r2-link0": { + "password": "r1" + }, + "r2-link1": { + "password": "r1" + }, + "r2-link2": { + "password": "r1" + }, + "r2-link3": { + "password": "r1" + } + } + } + } + } + } + } + } + }, + "r3": { + "links": { + "lo": { + "ipv4": "auto", + "ipv6": "auto", + "type": "loopback" + }, + "r2-link0": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r2-link1": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r2-link2": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r2-link3": { + "ipv4": "auto", + "ipv6": "auto" + }, + "vm5": { + "ipv4": "auto", + "ipv6": "auto" + } + }, + "bgp": { + "local_as": "300", + "address_family": { + "ipv4": { + "unicast": { + "neighbor": { + "r2": { + "dest_link": { + "r3-link0": { + "password": "r1", + "holddowntimer": 3, + "keepalivetimer": 1 + }, + "r3-link1": { + "password": "r1", + "holddowntimer": 3, + "keepalivetimer": 1 + }, + "r3-link2": { + "password": "r1", + "holddowntimer": 3, + "keepalivetimer": 1 + }, + "r3-link3": { + "password": "r1", + "holddowntimer": 3, + "keepalivetimer": 1 + } + } + } + } + } + }, + "ipv6": { + "unicast": { + "neighbor": { + "r2": { + "dest_link": { + "r3-link0": { + "password": "r1", + "holddowntimer": 3, + "keepalivetimer": 1 + }, + "r3-link1": { + "password": "r1", + "holddowntimer": 3, + "keepalivetimer": 1 + }, + "r3-link2": { + "password": "r1", + "holddowntimer": 3, + "keepalivetimer": 1 + }, + "r3-link3": { + "password": "r1", + "holddowntimer": 3, + "keepalivetimer": 1 + } + } + } + } + } + } + } + } + }, + "vm1": { + "links": { + "lo": { + "ipv4": "auto", + "ipv6": "auto", + "type": "loopback" + }, + "r2": { + "ipv4": "auto", + "ipv6": "auto" + } + } + }, + "vm2": { + "links": { + "lo": { + "ipv4": "auto", + "ipv6": "auto", + "type": "loopback" + }, + "r2": { + "ipv4": "auto", + "ipv6": "auto" + } + } + }, + "vm3": { + "links": { + "lo": { + "ipv4": "auto", + "ipv6": "auto", + "type": "loopback" + }, + "r2": { + "ipv4": "auto", + "ipv6": "auto" + } + } + }, + "vm4": { + "links": { + "lo": { + "ipv4": "auto", + "ipv6": "auto", + "type": "loopback" + }, + "r1": { + "ipv4": "auto", + "ipv6": "auto" + } + } + }, + "vm5": { + "links": { + "lo": { + "ipv4": "auto", + "ipv6": "auto", + "type": "loopback" + }, + "r3": { + "ipv4": "auto", + "ipv6": "auto" + } + } + }, + "vm6": { + "links": { + "lo": { + "ipv4": "auto", + "ipv6": "auto", + "type": "loopback" + }, + "r2": { + "ipv4": "auto", + "ipv6": "auto" + } + } + } + } +}
\ No newline at end of file diff --git a/tests/topotests/static_routing_with_ibgp/test_static_routes_topo1_ibgp.py b/tests/topotests/static_routing_with_ibgp/test_static_routes_topo1_ibgp.py new file mode 100644 index 0000000000..130f4fd9aa --- /dev/null +++ b/tests/topotests/static_routing_with_ibgp/test_static_routes_topo1_ibgp.py @@ -0,0 +1,1083 @@ +#!/usr/bin/python + +# +# Copyright (c) 2020 by VMware, Inc. ("VMware") +# Used Copyright (c) 2018 by Network Device Education Foundation, +# Inc. ("NetDEF") in this file. +# +# Permission to use, copy, modify, and/or distribute this software +# for any purpose with or without fee is hereby granted, provided +# that the above copyright notice and this permission notice appear +# in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND VMWARE DISCLAIMS ALL WARRANTIES +# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL VMWARE BE LIABLE FOR +# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY +# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS +# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE +# OF THIS SOFTWARE. +# +""" + + -Verify static route ECMP functionality with 2 next hop + + -Verify static route functionality with 2 next hop and different AD + value + + -Verify RIB status when same route advertise via BGP and static route + +""" +import sys +import json +import time +import os +import pytest +import platform +from copy import deepcopy + +# Save the Current Working Directory to find configuration files. +CWD = os.path.dirname(os.path.realpath(__file__)) +sys.path.append(os.path.join(CWD, "../")) +sys.path.append(os.path.join(CWD, "../lib/")) +# pylint: disable=C0413 +# Import topogen and topotest helpers +from mininet.topo import Topo +from lib.topogen import Topogen, get_topogen +from lib.topotest import version_cmp + +# Import topoJson from lib, to create topology and initial configuration +from lib.common_config import ( + start_topology, + write_test_header, + write_test_footer, + reset_config_on_routers, + verify_rib, + create_static_routes, + check_address_types, + step, + create_interfaces_cfg, + shutdown_bringup_interface, + stop_router, + start_router, +) +from lib.topolog import logger +from lib.bgp import verify_bgp_convergence, create_router_bgp, verify_bgp_rib +from lib.topojson import build_topo_from_json, build_config_from_json + +# Reading the data from JSON File for topology creation +jsonFile = "{}/static_routes_topo1_ibgp.json".format(CWD) +try: + with open(jsonFile, "r") as topoJson: + topo = json.load(topoJson) +except IOError: + assert False, "Could not read file {}".format(jsonFile) + +# Global variables +BGP_CONVERGENCE = False +ADDR_TYPES = check_address_types() +NETWORK = {"ipv4": ["11.0.20.1/32", "11.0.20.2/32"], "ipv6": ["2::1/128", "2::2/128"]} +NETWORK2 = {"ipv4": "11.0.20.1/32", "ipv6": "2::1/128"} + +PREFIX1 = {"ipv4": "110.0.20.1/32", "ipv6": "20::1/128"} + + +class CreateTopo(Topo): + """ + Test CreateTopo - topology 1. + + * `Topo`: Topology object + """ + + def build(self, *_args, **_opts): + """Build function.""" + tgen = get_topogen(self) + + # Building topology from json file + build_topo_from_json(tgen, topo) + + +def setup_module(mod): + """ + + Set up the pytest environment. + + * `mod`: module name + """ + global topo + testsuite_run_time = time.asctime(time.localtime(time.time())) + logger.info("Testsuite start time: {}".format(testsuite_run_time)) + logger.info("=" * 40) + + logger.info("Running setup_module to create topology") + + # This function initiates the topology build with Topogen... + tgen = Topogen(CreateTopo, mod.__name__) + # ... and here it calls Mininet initialization functions. + + # Starting topology, create tmp files which are loaded to routers + # to start deamons and then start routers + start_topology(tgen) + + # Creating configuration from JSON + build_config_from_json(tgen, topo) + + if version_cmp(platform.release(), '4.19') < 0: + error_msg = ('These tests will not run. (have kernel "{}", ' + 'requires kernel >= 4.19)'.format(platform.release())) + pytest.skip(error_msg) + + # Checking BGP convergence + global BGP_CONVERGENCE + global ADDR_TYPES + + # Don't run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + # Api call verify whether BGP is converged + BGP_CONVERGENCE = verify_bgp_convergence(tgen, topo) + assert BGP_CONVERGENCE is True, "setup_module :Failed \n Error: {}".format( + BGP_CONVERGENCE + ) + + logger.info("Running setup_module() done") + + +def teardown_module(mod): + """ + Teardown the pytest environment + + * `mod`: module name + """ + + logger.info("Running teardown_module to delete topology") + + tgen = get_topogen() + + # Stop toplogy and Remove tmp files + tgen.stop_topology() + + logger.info( + "Testsuite end time: {}".format(time.asctime(time.localtime(time.time()))) + ) + logger.info("=" * 40) + + +def populate_nh(): + NEXT_HOP_IP = { + "nh1": { + "ipv4": topo["routers"]["r1"]["links"]["r2-link0"]["ipv4"].split("/")[0], + "ipv6": topo["routers"]["r1"]["links"]["r2-link0"]["ipv6"].split("/")[0], + }, + "nh2": { + "ipv4": topo["routers"]["r1"]["links"]["r2-link1"]["ipv4"].split("/")[0], + "ipv6": topo["routers"]["r1"]["links"]["r2-link1"]["ipv6"].split("/")[0], + }, + } + return NEXT_HOP_IP + + +##################################################### +# +# Tests starting +# +##################################################### + + +def test_static_route_2nh_p0_tc_1_ibgp(request): + """ + Verify static route ECMP functionality with 2 next hop + + """ + tc_name = request.node.name + write_test_header(tc_name) + tgen = get_topogen() + # Don't run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + reset_config_on_routers(tgen) + NEXT_HOP_IP = populate_nh() + + step( + "Configure IPv4 static route (10.1.1.1) in R2 with next hop N1" + "(28.1.1.2 ) and N2 (29.1.1.2) , Static route next-hop present on" + "R1" + ) + step("ex :- ip route 10.1.1.1/24 28.1.1.2 & ip route 10.1.1.1/24 29.1.1.1") + for addr_type in ADDR_TYPES: + input_dict_4 = { + "r2": { + "static_routes": [ + { + "network": NETWORK[addr_type], + "next_hop": NEXT_HOP_IP["nh1"][addr_type], + }, + { + "network": NETWORK[addr_type], + "next_hop": NEXT_HOP_IP["nh2"][addr_type], + }, + ] + } + } + + logger.info("Configure static routes") + result = create_static_routes(tgen, input_dict_4) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step( + "On R2, static route installed in RIB using show ip route" + " with 2 ECMP next hop " + ) + nh = [NEXT_HOP_IP["nh1"][addr_type], NEXT_HOP_IP["nh2"][addr_type]] + dut = "r2" + protocol = "static" + result = verify_rib( + tgen, addr_type, dut, input_dict_4, next_hop=nh, protocol=protocol + ) + assert result is True, "Testcase {} : Failed \nError: Routes is" + " missing in RIB".format(tc_name) + + step("Configure IBGP IPv4 peering between R2 and R3 router.") + step("Configure redistribute static in BGP on R2 router") + + input_dict_2 = { + "r2": { + "bgp": { + "address_family": { + addr_type: { + "unicast": {"redistribute": [{"redist_type": "static"}]} + } + } + } + } + } + result = create_router_bgp(tgen, topo, input_dict_2) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step("Remove the static route configured with nexthop N1 from running config") + input_dict_4 = { + "r2": { + "static_routes": [ + { + "network": NETWORK[addr_type], + "next_hop": NEXT_HOP_IP["nh1"][addr_type], + "delete": True, + } + ] + } + } + logger.info("Configure static routes") + result = create_static_routes(tgen, input_dict_4) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step( + "On R2, after removing the static route with N1 , " + "route become active with nexthop N2 and vice versa." + ) + nh = NEXT_HOP_IP["nh1"][addr_type] + result = verify_rib( + tgen, + addr_type, + dut, + input_dict_4, + next_hop=nh, + protocol=protocol, + expected=False, + ) + assert result is not True, "Testcase {} : Failed \nError: Routes is" + " still present in RIB".format(tc_name) + + nh = [NEXT_HOP_IP["nh2"][addr_type]] + result = verify_rib( + tgen, addr_type, dut, input_dict_4, next_hop=nh, protocol=protocol + ) + assert result is True, "Testcase {} : Failed \nError: Routes is" + " missing in RIB".format(tc_name) + + step("Configure the static route with nexthop N1") + + input_dict_4 = { + "r2": { + "static_routes": [ + { + "network": NETWORK[addr_type], + "next_hop": NEXT_HOP_IP["nh1"][addr_type], + } + ] + } + } + + logger.info("Configure static routes") + result = create_static_routes(tgen, input_dict_4) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step("Remove the static route configured with nexthop N2 from running config") + + input_dict_4 = { + "r2": { + "static_routes": [ + { + "network": NETWORK[addr_type], + "next_hop": NEXT_HOP_IP["nh2"][addr_type], + "delete": True, + } + ] + } + } + + logger.info("Configure static routes") + result = create_static_routes(tgen, input_dict_4) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step( + "On R2, after removing the static route with N2 , " + "route become active with nexthop N1 and vice versa." + ) + nh = NEXT_HOP_IP["nh2"][addr_type] + result = verify_rib( + tgen, + addr_type, + dut, + input_dict_4, + next_hop=nh, + protocol=protocol, + expected=False, + ) + assert result is not True, "Testcase {} : Failed \nError: Routes is" + " still present in RIB".format(tc_name) + + nh = [NEXT_HOP_IP["nh1"][addr_type]] + result = verify_rib( + tgen, addr_type, dut, input_dict_4, next_hop=nh, protocol=protocol + ) + assert result is True, "Testcase {} : Failed \nError: Routes is" + " missing in RIB".format(tc_name) + + step("Configure the static route with nexthop N2") + input_dict_4 = { + "r2": { + "static_routes": [ + { + "network": NETWORK[addr_type], + "next_hop": NEXT_HOP_IP["nh2"][addr_type], + } + ] + } + } + + logger.info("Configure static routes") + result = create_static_routes(tgen, input_dict_4) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step("Shut nexthop interface N1") + intf = topo["routers"]["r2"]["links"]["r1-link0"]["interface"] + + shutdown_bringup_interface(tgen, dut, intf, False) + + step("Only one the nexthops should be active in RIB.") + + nh = NEXT_HOP_IP["nh2"][addr_type] + result = verify_rib( + tgen, addr_type, dut, input_dict_4, next_hop=nh, protocol=protocol + ) + assert result is True, "Testcase {} : Failed \nError: Routes is" + " missing in RIB".format(tc_name) + + nh = NEXT_HOP_IP["nh1"][addr_type] + result = verify_rib( + tgen, + addr_type, + dut, + input_dict_4, + next_hop=nh, + protocol=protocol, + expected=False, + ) + assert result is not True, "Testcase {} : Failed \nError: Routes is" + " still present in RIB".format(tc_name) + + dut = "r3" + result = verify_bgp_rib( + tgen, addr_type, dut, input_dict_4, next_hop=nh, expected=False + ) + assert result is not True, "Testcase {} : Failed \nError: Route is" + " still present in RIB".format(tc_name) + + result = verify_rib( + tgen, + addr_type, + dut, + input_dict_4, + protocol=protocol, + next_hop=nh, + expected=False, + ) + assert result is not True, "Testcase {} : Failed \nError: Route is" + " still present in RIB".format(tc_name) + + dut = "r2" + nh = [NEXT_HOP_IP["nh2"][addr_type]] + result = verify_rib( + tgen, addr_type, dut, input_dict_4, next_hop=nh, protocol=protocol + ) + assert result is True, "Testcase {} : Failed \nError: Routes is" + " missing in RIB".format(tc_name) + + dut = "r3" + result = verify_bgp_rib(tgen, addr_type, dut, input_dict_4) + assert result is True, "Testcase {} : Failed \nError: Route is" + " missing in RIB".format(tc_name) + + result = verify_rib( + tgen, addr_type, dut, input_dict_4, protocol=protocol, expected=False + ) + assert result is not True, "Testcase {} : Failed \nError: Route is" + " still present in RIB".format(tc_name) + + dut = "r2" + step("No shut the nexthop interface N1") + shutdown_bringup_interface(tgen, dut, intf, True) + + step( + "after shut of nexthop N1 , route become active " + "with nexthop N2 and vice versa." + ) + nh = [NEXT_HOP_IP["nh1"][addr_type], NEXT_HOP_IP["nh2"][addr_type]] + + result = verify_rib( + tgen, addr_type, dut, input_dict_4, next_hop=nh, protocol=protocol + ) + assert result is True, "Testcase {} : Failed \nError: Routes is" + " missing in RIB".format(tc_name) + + step("Shut nexthop interface N2") + intf = topo["routers"]["r2"]["links"]["r1-link1"]["interface"] + dut = "r2" + shutdown_bringup_interface(tgen, dut, intf, False) + + step( + " after shut of nexthop N1 , route become active with " + "nexthop N2 and vice versa." + ) + nh = NEXT_HOP_IP["nh2"][addr_type] + + result = verify_rib( + tgen, + addr_type, + dut, + input_dict_4, + next_hop=nh, + protocol=protocol, + expected=False, + ) + assert result is not True, "Testcase {} : Failed \nError: Routes is" + " still present in RIB".format(tc_name) + + nh = [NEXT_HOP_IP["nh1"][addr_type]] + dut = "r2" + protocol = "static" + result = verify_rib( + tgen, addr_type, dut, input_dict_4, next_hop=nh, protocol=protocol + ) + assert result is True, "Testcase {} : Failed \nError: Routes is" + " missing in RIB".format(tc_name) + + dut = "r3" + result = verify_bgp_rib(tgen, addr_type, dut, input_dict_4) + assert result is True, "Testcase {} : Failed \nError: Route is" + " missing in RIB".format(tc_name) + + result = verify_rib( + tgen, addr_type, dut, input_dict_4, protocol=protocol, expected=False + ) + assert result is not True, "Testcase {} : Failed \nError: Route is" + " still present in RIB".format(tc_name) + + step("No shut nexthop interface N2") + dut = "r2" + shutdown_bringup_interface(tgen, dut, intf, True) + + step( + "after shut of nexthop N1 , route become active " + "with nexthop N2 and vice versa." + ) + nh = [NEXT_HOP_IP["nh1"][addr_type], NEXT_HOP_IP["nh2"][addr_type]] + + result = verify_rib( + tgen, addr_type, dut, input_dict_4, next_hop=nh, protocol=protocol + ) + assert result is True, "Testcase {} : Failed \nError: Routes is" + " missing in RIB".format(tc_name) + + dut = "r3" + result = verify_bgp_rib(tgen, addr_type, dut, input_dict_4) + assert result is True, "Testcase {} : Failed \nError: Route is" + " missing in RIB".format(tc_name) + + result = verify_rib( + tgen, addr_type, dut, input_dict_4, protocol=protocol, expected=False + ) + assert result is not True, "Testcase {} : Failed \nError: Route is" + " still present in RIB".format(tc_name) + + step("Reload the FRR router") + # stop/start -> restart FRR router and verify + stop_router(tgen, "r2") + + start_router(tgen, "r2") + + dut = "r2" + step( + "After reload of FRR router , static route installed" + " in RIB and FIB properly ." + ) + result = verify_rib( + tgen, addr_type, dut, input_dict_4, next_hop=nh, protocol=protocol + ) + assert result is True, "Testcase {} : Failed \nError: Routes is" + " missing in RIB".format(tc_name) + + dut = "r3" + result = verify_bgp_rib(tgen, addr_type, dut, input_dict_4) + assert result is True, "Testcase {} : Failed \nError: Route is" + " still present in RIB".format(tc_name) + + result = verify_rib( + tgen, addr_type, dut, input_dict_4, protocol=protocol, expected=False + ) + assert result is not True, "Testcase {} : Failed \nError: Route is" + " still present in RIB".format(tc_name) + + write_test_footer(tc_name) + + +def test_static_route_2nh_admin_dist_p0_tc_2_ibgp(request): + """ + Verify static route functionality with 2 next hop & different AD value + + """ + tc_name = request.node.name + write_test_header(tc_name) + tgen = get_topogen() + # Don't run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + reset_config_on_routers(tgen) + NEXT_HOP_IP = populate_nh() + step( + "Configure IPv4 static route (10.1.1.1) in R2 with next hop N1" + "(28.1.1.2 ) AD 10 and N2 (29.1.1.2) AD 20 , Static route next-hop" + "present on R1 \n ex :- ip route 10.1.1.1/24 28.1.1.2 10 & " + "ip route 10.1.1.1/24 29.1.1.2 20" + ) + + reset_config_on_routers(tgen) + NEXT_HOP_IP = populate_nh() + for addr_type in ADDR_TYPES: + input_dict_4 = { + "r2": { + "static_routes": [ + { + "network": NETWORK2[addr_type], + "next_hop": NEXT_HOP_IP["nh1"][addr_type], + "admin_distance": 10, + }, + { + "network": NETWORK2[addr_type], + "next_hop": NEXT_HOP_IP["nh2"][addr_type], + "admin_distance": 20, + }, + ] + } + } + logger.info("Configure static routes") + result = create_static_routes(tgen, input_dict_4) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step( + "On R2, static route installed in RIB using " + "show ip route with 2 next hop , lowest AD nexthop is active " + ) + rte1_nh1 = { + "r2": { + "static_routes": [ + { + "network": NETWORK2[addr_type], + "next_hop": NEXT_HOP_IP["nh1"][addr_type], + "admin_distance": 10, + } + ] + } + } + nh = [NEXT_HOP_IP["nh1"][addr_type]] + dut = "r2" + protocol = "static" + result = verify_rib( + tgen, addr_type, dut, rte1_nh1, next_hop=nh, protocol=protocol, fib=True + ) + assert result is True, "Testcase {} : Failed \nError: Routes is" + "missing in RIB".format(tc_name) + + rte2_nh2 = { + "r2": { + "static_routes": [ + { + "network": NETWORK2[addr_type], + "next_hop": NEXT_HOP_IP["nh2"][addr_type], + "admin_distance": 20, + } + ] + } + } + nh = [NEXT_HOP_IP["nh2"][addr_type]] + dut = "r2" + protocol = "static" + result = verify_rib( + tgen, + addr_type, + dut, + rte2_nh2, + next_hop=nh, + protocol=protocol, + fib=True, + expected=False, + ) + assert result is not True, "Testcase {} : Failed \nError: Routes is" + "not active in RIB".format(tc_name) + + step("Configure IBGP IPv4 peering between R2 and R3 router.") + step("Explicit route is added in R3 for R2 nexthop rechability") + rt3_rtes = { + "r3": { + "static_routes": [ + { + "network": NEXT_HOP_IP["nh1"][addr_type] + "/32", + "next_hop": topo["routers"]["r2"]["links"]["r3"][addr_type], + }, + { + "network": NEXT_HOP_IP["nh2"][addr_type] + "/32", + "next_hop": topo["routers"]["r2"]["links"]["r3"][addr_type], + }, + ] + } + } + logger.info("Configure static routes") + result = create_static_routes(tgen, rt3_rtes) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + step("Configure redistribute static in BGP on R2 router") + + input_dict_2 = { + "r2": { + "bgp": { + "address_family": { + addr_type: { + "unicast": {"redistribute": [{"redist_type": "static"}]} + } + } + } + } + } + result = create_router_bgp(tgen, topo, input_dict_2) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step("Remove the static route configured with nexthop N1 from running config") + rt1_nh1 = { + "r2": { + "static_routes": [ + { + "network": NETWORK[addr_type], + "next_hop": NEXT_HOP_IP["nh1"][addr_type], + "admin_distance": 10, + "delete": True, + } + ] + } + } + + logger.info("Configure static routes") + result = create_static_routes(tgen, rt1_nh1) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step( + "On R2, after removing the static route with N1 , " + "route become active with nexthop N2 and vice versa." + ) + rte1_nh1 = { + "r2": { + "static_routes": [ + { + "network": NETWORK2[addr_type], + "next_hop": NEXT_HOP_IP["nh1"][addr_type], + "admin_distance": 10, + } + ] + } + } + nh = [NEXT_HOP_IP["nh1"][addr_type]] + dut = "r2" + protocol = "static" + result = verify_rib( + tgen, + addr_type, + dut, + rte1_nh1, + next_hop=nh, + protocol=protocol, + fib=True, + expected=False, + ) + assert result is not True, "Testcase {} : Failed \nError: Routes is" + "missing in RIB".format(tc_name) + + rte2_nh2 = { + "r2": { + "static_routes": [ + { + "network": NETWORK2[addr_type], + "next_hop": NEXT_HOP_IP["nh2"][addr_type], + "admin_distance": 20, + } + ] + } + } + nh = [NEXT_HOP_IP["nh2"][addr_type]] + result = verify_rib( + tgen, addr_type, dut, rte2_nh2, next_hop=nh, protocol=protocol, fib=True + ) + assert result is True, "Testcase {} : Failed \nError: Routes is" + "not active in RIB".format(tc_name) + + step("Configure the static route with nexthop N1") + rte1_nh1 = { + "r2": { + "static_routes": [ + { + "network": NETWORK[addr_type], + "next_hop": NEXT_HOP_IP["nh1"][addr_type], + "admin_distance": 10, + } + ] + } + } + logger.info("Configure static routes") + result = create_static_routes(tgen, rte1_nh1) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step("Remove the static route configured with nexthop N2 from running config") + rte2_nh2 = { + "r2": { + "static_routes": [ + { + "network": NETWORK[addr_type], + "next_hop": NEXT_HOP_IP["nh2"][addr_type], + "admin_distance": 20, + "delete": True, + } + ] + } + } + logger.info("Configure static routes") + result = create_static_routes(tgen, rte2_nh2) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step( + "On R2, after removing the static route with N2 , " + "route become active with nexthop N1 and vice versa." + ) + nh = NEXT_HOP_IP["nh2"][addr_type] + result = verify_rib( + tgen, + addr_type, + dut, + rte2_nh2, + next_hop=nh, + protocol=protocol, + expected=False, + ) + assert result is not True, "Testcase {} : Failed \nError: Routes is" + " still present in RIB".format(tc_name) + + nh = [NEXT_HOP_IP["nh1"][addr_type]] + result = verify_rib( + tgen, addr_type, dut, rte1_nh1, next_hop=nh, protocol=protocol + ) + assert result is True, "Testcase {} : Failed \nError: Routes is" + " missing in RIB".format(tc_name) + + step("Configure the static route with nexthop N2") + rte2_nh2 = { + "r2": { + "static_routes": [ + { + "network": NETWORK[addr_type], + "next_hop": NEXT_HOP_IP["nh2"][addr_type], + "admin_distance": 20, + } + ] + } + } + + logger.info("Configure static routes") + result = create_static_routes(tgen, rte2_nh2) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step("Shut nexthop interface N1") + intf = topo["routers"]["r2"]["links"]["r1-link0"]["interface"] + + shutdown_bringup_interface(tgen, dut, intf, False) + + step("after shut of nexthop N1 , route become active with nexthop N2") + + nh = NEXT_HOP_IP["nh1"][addr_type] + result = verify_rib( + tgen, + addr_type, + dut, + rte1_nh1, + next_hop=nh, + protocol=protocol, + expected=False, + ) + assert result is not True, "Testcase {} : Failed \nError: Routes is" + " still present in RIB".format(tc_name) + + nh = [NEXT_HOP_IP["nh2"][addr_type]] + result = verify_rib( + tgen, addr_type, dut, rte2_nh2, next_hop=nh, protocol=protocol, fib=True + ) + assert result is True, "Testcase {} : Failed \nError: Routes is" + " missing in RIB".format(tc_name) + + step("No shut the nexthop interface N1") + shutdown_bringup_interface(tgen, dut, intf, True) + + step( + "after shut of nexthop N1 , route become active " + "with nexthop N2 and vice versa." + ) + nh = [NEXT_HOP_IP["nh1"][addr_type]] + + result = verify_rib( + tgen, addr_type, dut, rte1_nh1, next_hop=nh, protocol=protocol, fib=True + ) + assert result is True, "Testcase {} : Failed \nError: Routes is" + " missing in RIB".format(tc_name) + + step("Shut nexthop interface N2") + intf = topo["routers"]["r2"]["links"]["r1-link1"]["interface"] + + shutdown_bringup_interface(tgen, dut, intf, False) + + step( + " after shut of nexthop N1 , route become active with " + "nexthop N2 and vice versa." + ) + nh = NEXT_HOP_IP["nh2"][addr_type] + + result = verify_rib( + tgen, + addr_type, + dut, + rte2_nh2, + next_hop=nh, + protocol=protocol, + expected=False, + ) + assert result is not True, "Testcase {} : Failed \nError: Routes is" + " still present in RIB".format(tc_name) + + nh = [NEXT_HOP_IP["nh1"][addr_type]] + result = verify_rib( + tgen, addr_type, dut, rte1_nh1, next_hop=nh, protocol=protocol + ) + assert result is True, "Testcase {} : Failed \nError: Routes is" + " missing in RIB".format(tc_name) + + step("No shut nexthop interface N2") + shutdown_bringup_interface(tgen, dut, intf, True) + + step( + "after shut of nexthop N1 , route become active " + "with nexthop N2 and vice versa." + ) + rte1_nh1 = { + "r2": { + "static_routes": [ + { + "network": NETWORK2[addr_type], + "next_hop": NEXT_HOP_IP["nh1"][addr_type], + "admin_distance": 10, + } + ] + } + } + nh = [NEXT_HOP_IP["nh1"][addr_type]] + dut = "r2" + protocol = "static" + result = verify_rib( + tgen, addr_type, dut, rte1_nh1, next_hop=nh, protocol=protocol, fib=True + ) + assert result is True, "Testcase {} : Failed \nError: Routes is" + "missing in RIB".format(tc_name) + + rte2_nh2 = { + "r2": { + "static_routes": [ + { + "network": NETWORK2[addr_type], + "next_hop": NEXT_HOP_IP["nh2"][addr_type], + "admin_distance": 20, + } + ] + } + } + nh = [NEXT_HOP_IP["nh2"][addr_type]] + dut = "r2" + protocol = "static" + result = verify_rib( + tgen, + addr_type, + dut, + rte2_nh2, + next_hop=nh, + protocol=protocol, + fib=True, + expected=False, + ) + assert result is not True, "Testcase {} : Failed \nError: Routes is" + "not active in RIB".format(tc_name) + + dut = "r3" + protocol = "bgp" + + result = verify_rib( + tgen, + addr_type, + dut, + rte2_nh2, + next_hop=nh, + protocol=protocol, + fib=True, + expected=False, + ) + assert result is not True, "Testcase {} : Failed \nError: Routes is" + "not active in RIB".format(tc_name) + + dut = "r2" + step("Reload the FRR router") + # stop/start -> restart FRR router and verify + stop_router(tgen, "r2") + + start_router(tgen, "r2") + + step( + "After reload of FRR router , static route installed" + " in RIB and FIB properly ." + ) + rte1_nh1 = { + "r2": { + "static_routes": [ + { + "network": NETWORK2[addr_type], + "next_hop": NEXT_HOP_IP["nh1"][addr_type], + "admin_distance": 10, + } + ] + } + } + nh = [NEXT_HOP_IP["nh1"][addr_type]] + dut = "r2" + protocol = "static" + result = verify_rib( + tgen, addr_type, dut, rte1_nh1, next_hop=nh, protocol=protocol, fib=True + ) + assert result is True, "Testcase {} : Failed \nError: Routes is" + "missing in RIB".format(tc_name) + + dut = "r3" + protocol = "bgp" + result = verify_bgp_rib(tgen, addr_type, dut, rte1_nh1, next_hop=nh) + assert result is True, "Testcase {} : Failed \nError: Routes is" + "missing in RIB".format(tc_name) + + rte2_nh2 = { + "r2": { + "static_routes": [ + { + "network": NETWORK2[addr_type], + "next_hop": NEXT_HOP_IP["nh2"][addr_type], + "admin_distance": 20, + } + ] + } + } + nh = [NEXT_HOP_IP["nh2"][addr_type]] + dut = "r2" + protocol = "static" + result = verify_rib( + tgen, + addr_type, + dut, + rte2_nh2, + next_hop=nh, + protocol=protocol, + fib=True, + expected=False, + ) + assert result is not True, "Testcase {} : Failed \nError: Routes is" + "not active in RIB".format(tc_name) + + dut = "r3" + protocol = "bgp" + result = verify_bgp_rib(tgen, addr_type, dut, rte2_nh2, next_hop=nh) + assert result is True, "Testcase {} : Failed \nError: Routes is" + "not active in RIB".format(tc_name) + + result = verify_rib( + tgen, + addr_type, + dut, + rte2_nh2, + next_hop=nh, + protocol=protocol, + fib=True, + expected=False, + ) + assert result is not True, "Testcase {} : Failed \nError: Routes is" + "not active in RIB".format(tc_name) + + write_test_footer(tc_name) + + +if __name__ == "__main__": + args = ["-s"] + sys.argv[1:] + sys.exit(pytest.main(args)) diff --git a/tests/topotests/static_routing_with_ibgp/test_static_routes_topo2_ibgp.py b/tests/topotests/static_routing_with_ibgp/test_static_routes_topo2_ibgp.py new file mode 100644 index 0000000000..0a757c9f28 --- /dev/null +++ b/tests/topotests/static_routing_with_ibgp/test_static_routes_topo2_ibgp.py @@ -0,0 +1,1974 @@ +#!/usr/bin/python + +# +# Copyright (c) 2020 by VMware, Inc. ("VMware") +# Used Copyright (c) 2018 by Network Device Education Foundation, +# Inc. ("NetDEF") in this file. +# +# Permission to use, copy, modify, and/or distribute this software +# for any purpose with or without fee is hereby granted, provided +# that the above copyright notice and this permission notice appear +# in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND VMWARE DISCLAIMS ALL WARRANTIES +# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL VMWARE BE LIABLE FOR +# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY +# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS +# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE +# OF THIS SOFTWARE. +# + + +""" + -Verify static route functionality with 8 next hop different AD value + and BGP ECMP + + -Verify 8 static route functionality with 8 next hop different AD + + -Verify static route with 8 next hop with different AD value and 8 + EBGP neighbors + + -Verify static route with 8 next hop with different AD value and 8 + IBGP neighbors + + -Delete the static route and verify the RIB and FIB state + + -Verify 8 static route functionality with 8 ECMP next hop +""" +import sys +import json +import time +import os +import pytest +import platform +from time import sleep +import random + +# Save the Current Working Directory to find configuration files. +CWD = os.path.dirname(os.path.realpath(__file__)) +sys.path.append(os.path.join(CWD, "../")) +sys.path.append(os.path.join(CWD, "../lib/")) +# pylint: disable=C0413 +# Import topogen and topotest helpers +from mininet.topo import Topo +from lib.topogen import Topogen, get_topogen + +# Import topoJson from lib, to create topology and initial configuration +from lib.common_config import ( + start_topology, + write_test_header, + write_test_footer, + reset_config_on_routers, + verify_rib, + create_static_routes, + check_address_types, + step, + create_interfaces_cfg, + shutdown_bringup_interface, + stop_router, + start_router, +) +from lib.topolog import logger +from lib.bgp import verify_bgp_convergence, create_router_bgp, verify_bgp_rib +from lib.topojson import build_topo_from_json, build_config_from_json +from lib.topotest import version_cmp + +# Reading the data from JSON File for topology creation +jsonFile = "{}/static_routes_topo2_ibgp.json".format(CWD) +try: + with open(jsonFile, "r") as topoJson: + topo = json.load(topoJson) +except IOError: + assert False, "Could not read file {}".format(jsonFile) +# Global variables +BGP_CONVERGENCE = False +ADDR_TYPES = check_address_types() +NETWORK = { + "ipv4": [ + "11.0.20.1/32", + "11.0.20.2/32", + "11.0.20.3/32", + "11.0.20.4/32", + "11.0.20.5/32", + "11.0.20.6/32", + "11.0.20.7/32", + "11.0.20.8/32", + ], + "ipv6": [ + "2::1/128", + "2::2/128", + "2::3/128", + "2::4/128", + "2::5/128", + "2::6/128", + "2::7/128", + "2::8/128", + ], +} +PREFIX1 = {"ipv4": "110.0.20.1/32", "ipv6": "20::1/128"} +PREFIX2 = {"ipv4": "110.0.20.2/32", "ipv6": "20::2/128"} +NEXT_HOP_IP = [] +topo_diag = """ + Please view in a fixed-width font such as Courier. + +------+ +------+ +------+ + | +--------------+ +--------------+ | + | | | | | | + | R1 +---8 links----+ R2 +---8 links----+ R3 | + | | | | | | + | +--------------+ +--------------+ | + +------+ +------+ +------+ + +""" + + +class CreateTopo(Topo): + """ + Test CreateTopo - topology 1. + + * `Topo`: Topology object + """ + + def build(self, *_args, **_opts): + """Build function.""" + tgen = get_topogen(self) + + # Building topology from json file + build_topo_from_json(tgen, topo) + + +def setup_module(mod): + """ + + Set up the pytest environment. + + * `mod`: module name + """ + global topo + testsuite_run_time = time.asctime(time.localtime(time.time())) + logger.info("Testsuite start time: {}".format(testsuite_run_time)) + logger.info("=" * 40) + + logger.info("Running setup_module to create topology") + + # This function initiates the topology build with Topogen... + tgen = Topogen(CreateTopo, mod.__name__) + # ... and here it calls Mininet initialization functions. + + # Starting topology, create tmp files which are loaded to routers + # to start deamons and then start routers + start_topology(tgen) + + # Creating configuration from JSON + build_config_from_json(tgen, topo) + + if version_cmp(platform.release(), '4.19') < 0: + error_msg = ('These tests will not run. (have kernel "{}", ' + 'requires kernel >= 4.19)'.format(platform.release())) + pytest.skip(error_msg) + + # Checking BGP convergence + global BGP_CONVERGENCE + global ADDR_TYPES + + # Don't run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + # Api call verify whether BGP is converged + BGP_CONVERGENCE = verify_bgp_convergence(tgen, topo) + assert BGP_CONVERGENCE is True, "setup_module :Failed \n Error: {}".format( + BGP_CONVERGENCE + ) + + logger.info("Running setup_module() done") + + +def teardown_module(mod): + """ + Teardown the pytest environment + + * `mod`: module name + """ + + logger.info("Running teardown_module to delete topology") + + tgen = get_topogen() + + # Stop toplogy and Remove tmp files + tgen.stop_topology() + + logger.info( + "Testsuite end time: {}".format(time.asctime(time.localtime(time.time()))) + ) + logger.info("=" * 40) + + +def populate_nh(): + NEXT_HOP_IP = { + "nh1": { + "ipv4": topo["routers"]["r1"]["links"]["r2-link0"]["ipv4"].split("/")[0], + "ipv6": topo["routers"]["r1"]["links"]["r2-link0"]["ipv6"].split("/")[0], + }, + "nh2": { + "ipv4": topo["routers"]["r1"]["links"]["r2-link1"]["ipv4"].split("/")[0], + "ipv6": topo["routers"]["r1"]["links"]["r2-link1"]["ipv6"].split("/")[0], + }, + "nh3": { + "ipv4": topo["routers"]["r1"]["links"]["r2-link2"]["ipv4"].split("/")[0], + "ipv6": topo["routers"]["r1"]["links"]["r2-link2"]["ipv6"].split("/")[0], + }, + "nh4": { + "ipv4": topo["routers"]["r1"]["links"]["r2-link3"]["ipv4"].split("/")[0], + "ipv6": topo["routers"]["r1"]["links"]["r2-link3"]["ipv6"].split("/")[0], + }, + "nh5": { + "ipv4": topo["routers"]["r1"]["links"]["r2-link4"]["ipv4"].split("/")[0], + "ipv6": topo["routers"]["r1"]["links"]["r2-link4"]["ipv6"].split("/")[0], + }, + "nh6": { + "ipv4": topo["routers"]["r1"]["links"]["r2-link5"]["ipv4"].split("/")[0], + "ipv6": topo["routers"]["r1"]["links"]["r2-link5"]["ipv6"].split("/")[0], + }, + "nh7": { + "ipv4": topo["routers"]["r1"]["links"]["r2-link6"]["ipv4"].split("/")[0], + "ipv6": topo["routers"]["r1"]["links"]["r2-link6"]["ipv6"].split("/")[0], + }, + "nh8": { + "ipv4": topo["routers"]["r1"]["links"]["r2-link7"]["ipv4"].split("/")[0], + "ipv6": topo["routers"]["r1"]["links"]["r2-link7"]["ipv6"].split("/")[0], + }, + } + return NEXT_HOP_IP + + +##################################################### +# +# Tests starting +# +##################################################### + + +def test_static_rte_with_8ecmp_nh_p1_tc9_ibgp(request): + """ + Verify 8 static route functionality with 8 ECMP next hop + + """ + tc_name = request.node.name + write_test_header(tc_name) + tgen = get_topogen() + # Don't run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + NEXT_HOP_IP = populate_nh() + step("Configure 8 interfaces / links between R1 and R2") + step("Configure 8 interfaces / links between R2 and R3") + step("Configure 8 IBGP IPv4 peering between R2 and R3 router.") + reset_config_on_routers(tgen) + + step( + "Configure 8 IPv4 static route in R2 with 8 next hop" + "N1(21.1.1.2) , N2(22.1.1.2) , N3(23.1.1.2) , N4(24.1.1.2) ," + "N5(25.1.1.2) , N6(26.1.1.2) , N7(27.1.1.2) , N8(28.1.1.2) ," + "Static route next-hop present on R1" + ) + nh_all = {} + for addr_type in ADDR_TYPES: + # Enable static routes + for nhp in range(1, 9): + input_dict_4 = { + "r2": { + "static_routes": [ + { + "network": PREFIX1[addr_type], + "next_hop": NEXT_HOP_IP["nh" + str(nhp)][addr_type], + } + ] + } + } + logger.info("Configure static routes") + result = create_static_routes(tgen, input_dict_4) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + logger.info("Verifying %s routes on r2", addr_type) + nh_all[addr_type] = [ + NEXT_HOP_IP["nh1"][addr_type], + NEXT_HOP_IP["nh2"][addr_type], + NEXT_HOP_IP["nh3"][addr_type], + NEXT_HOP_IP["nh4"][addr_type], + NEXT_HOP_IP["nh5"][addr_type], + NEXT_HOP_IP["nh6"][addr_type], + NEXT_HOP_IP["nh7"][addr_type], + NEXT_HOP_IP["nh8"][addr_type], + ] + + dut = "r2" + protocol = "static" + result = verify_rib( + tgen, + addr_type, + dut, + input_dict_4, + next_hop=nh_all[addr_type], + protocol=protocol, + ) + assert result is True, "Testcase {} : Failed \nError: Routes are" + " missing in RIB".format(tc_name) + + step("Configure redistribute static in BGP on R2 router") + for addr_type in ADDR_TYPES: + input_dict_2 = { + "r2": { + "bgp": { + "address_family": { + addr_type: { + "unicast": {"redistribute": [{"redist_type": "static"}]} + } + } + } + } + } + result = create_router_bgp(tgen, topo, input_dict_2) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + dut = "r3" + protocol = "bgp" + result = verify_bgp_rib(tgen, addr_type, dut, input_dict_4) + assert result is True, "Testcase {} : Failed \nError: Routes are" + " missing in RIB".format(tc_name) + + step( + "Remove the static route configured with nexthop N1 to N8, one" + "by one from running config" + ) + dut = "r2" + protocol = "static" + step( + "After removing the static route with N1 to N8 one by one , " + "verify that entry is removed from RIB and FIB of R3 " + ) + for addr_type in ADDR_TYPES: + for nhp in range(1, 9): + input_dict_4 = { + "r2": { + "static_routes": [ + { + "network": PREFIX1[addr_type], + "next_hop": NEXT_HOP_IP["nh" + str(nhp)][addr_type], + "delete": True, + } + ] + } + } + logger.info("Configure static routes") + result = create_static_routes(tgen, input_dict_4) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step( + "After removing the static route with N1 to N8 one by one , " + "verify that entry is removed from RIB and FIB of R3 " + ) + nh = NEXT_HOP_IP["nh" + str(nhp)][addr_type] + result = verify_rib( + tgen, + addr_type, + dut, + input_dict_4, + next_hop=nh, + protocol=protocol, + expected=False, + ) + assert result is not True, "Testcase {} : Failed\nError: Routes is" + " still present in RIB".format(tc_name) + + step("Configure the static route with nexthop N1 to N8, one by one") + for addr_type in ADDR_TYPES: + for nhp in range(1, 9): + input_dict_4 = { + "r2": { + "static_routes": [ + { + "network": PREFIX1[addr_type], + "next_hop": NEXT_HOP_IP["nh" + str(nhp)][addr_type], + } + ] + } + } + logger.info("Configure static routes") + result = create_static_routes(tgen, input_dict_4) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + nh = NEXT_HOP_IP["nh" + str(nhp)][addr_type] + result = verify_rib( + tgen, addr_type, dut, input_dict_4, next_hop=nh, protocol=protocol + ) + assert result is True, "Testcase {} : Failed\nError: Routes are" + " missing in RIB".format(tc_name) + + protocol = "static" + step("Random shut of the nexthop interfaces") + randnum = random.randint(0, 7) + # Shutdown interface + dut = "r2" + step( + " interface which is about to be shut no shut between r1 and r2 is " "%s", + topo["routers"]["r2"]["links"]["r1-link{}".format(randnum)]["interface"], + ) + intf = topo["routers"]["r2"]["links"]["r1-link{}".format(randnum)]["interface"] + shutdown_bringup_interface(tgen, dut, intf, False) + + step("Random no shut of the nexthop interfaces") + # Bringup interface + shutdown_bringup_interface(tgen, dut, intf, True) + + step( + "After random shut/no shut of nexthop , only that " + "nexthop deleted/added from all the routes , other nexthop remain " + "unchanged" + ) + dut = "r2" + protocol = "static" + for addr_type in ADDR_TYPES: + input_dict_4 = { + "r2": { + "static_routes": [ + { + "network": PREFIX1[addr_type], + "next_hop": NEXT_HOP_IP["nh" + str(nhp)][addr_type], + } + ] + } + } + result = verify_rib( + tgen, + addr_type, + dut, + input_dict_4, + next_hop=nh_all[addr_type], + protocol=protocol, + ) + assert result is True, "Testcase {} : Failed \nError: Routes are" + " missing in RIB".format(tc_name) + + step("Remove random static route with all the nexthop") + dut = "r2" + randnum = random.randint(1, 7) + for addr_type in ADDR_TYPES: + input_dict_4 = { + "r2": { + "static_routes": [ + { + "network": PREFIX1[addr_type], + "next_hop": NEXT_HOP_IP["nh" + str(randnum)][addr_type], + "delete": True, + } + ] + } + } + logger.info("Configure static routes") + result = create_static_routes(tgen, input_dict_4) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step( + "After delete of random route , that route only got deleted from" + " RIB/FIB other route are showing properly" + ) + nh = NEXT_HOP_IP["nh{}".format(randnum)][addr_type] + result = verify_rib( + tgen, + addr_type, + dut, + input_dict_4, + next_hop=nh, + protocol=protocol, + expected=False, + ) + assert result is not True, "Testcase {} : Failed \nError: Routes are" + " missing in RIB".format(tc_name) + + for addr_type in ADDR_TYPES: + input_dict_4 = { + "r2": { + "static_routes": [ + { + "network": PREFIX1[addr_type], + "next_hop": NEXT_HOP_IP["nh" + str(randnum)][addr_type], + } + ] + } + } + logger.info("Configure static routes") + result = create_static_routes(tgen, input_dict_4) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step("Reload the FRR router") + # stop/start -> restart FRR router and verify + stop_router(tgen, "r2") + start_router(tgen, "r2") + + step( + "After reload of FRR router , static route " + "installed in RIB and FIB properly ." + ) + for addr_type in ADDR_TYPES: + # Enable static routes + nhp = 1 + input_dict_4 = { + "r2": { + "static_routes": [ + { + "network": PREFIX1[addr_type], + "next_hop": NEXT_HOP_IP["nh" + str(nhp)][addr_type], + } + ] + } + } + logger.info("Verifying %s routes on r2", addr_type) + dut = "r2" + protocol = "static" + result = verify_rib( + tgen, + addr_type, + dut, + input_dict_4, + next_hop=nh_all[addr_type], + protocol=protocol, + ) + assert result is True, "Testcase {} : Failed \nError: Routes are" + " missing in RIB".format(tc_name) + + step("Remove the redistribute static knob") + for addr_type in ADDR_TYPES: + input_dict_2 = { + "r2": { + "bgp": { + "address_family": { + addr_type: { + "unicast": { + "redistribute": [ + {"redist_type": "static", "delete": True} + ] + } + } + } + } + } + } + result = create_router_bgp(tgen, topo, input_dict_2) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step( + "After removing the BGP neighbor or redistribute static knob , " + "verify route got clear from RIB and FIB of R3 routes " + ) + dut = "r3" + protocol = "bgp" + result = verify_rib( + tgen, addr_type, dut, input_dict_4, protocol=protocol, expected=False + ) + assert result is not True, "Testcase {} : Failed \nError: Routes are" + " still present in RIB".format(tc_name) + + write_test_footer(tc_name) + + +def test_static_route_8nh_diff_AD_bgp_ecmp_p1_tc6_ibgp(request): + """ + Verify static route functionality with 8 next hop different AD + value and BGP ECMP + + """ + tc_name = request.node.name + write_test_header(tc_name) + tgen = get_topogen() + # Don't run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + step("Configure 8 interfaces / links between R1 and R2 ,") + step("Configure 8 interlaces/links between R2 and R3") + step( + "Configure IBGP IPv4 peering over loopback interface between" + "R2 and R3 router." + ) + step("Configure redistribute static in BGP on R2 router") + reset_config_on_routers(tgen) + NEXT_HOP_IP = populate_nh() + nh_all = {} + for addr_type in ADDR_TYPES: + nh_all[addr_type] = [] + for nhp in range(1, 9): + nh_all[addr_type].append(NEXT_HOP_IP["nh" + str(nhp)][addr_type]) + step( + "Configure IPv4 static route in R2 with 8 next hop" + "N1(21.1.1.2) AD 10, N2(22.1.1.2) AD 20, N3(23.1.1.2) AD 30," + "N4(24.1.1.2) AD 40, N5(25.1.1.2) AD 50, N6(26.1.1.2) AD 60," + "N7(27.1.1.2) AD 70, N8(28.1.1.2) AD 80, Static route next-hop" + "present on R1" + ) + for addr_type in ADDR_TYPES: + for nhp in range(1, 9): + input_dict_4 = { + "r2": { + "static_routes": [ + { + "network": PREFIX1[addr_type], + "next_hop": NEXT_HOP_IP["nh" + str(nhp)][addr_type], + "admin_distance": 10 * nhp, + } + ] + } + } + logger.info("Configure static routes") + result = create_static_routes(tgen, input_dict_4) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + logger.info("Verifying %s routes on r2", addr_type) + + step( + "On R2, static route installed in RIB using " + "show ip route with 8 next hop , lowest AD nexthop is active" + ) + input_dict_4 = { + "r2": { + "static_routes": [ + { + "network": PREFIX1[addr_type], + "next_hop": NEXT_HOP_IP["nh1"][addr_type], + "admin_distance": 10, + } + ] + } + } + dut = "r2" + protocol = "static" + nh = NEXT_HOP_IP["nh1"][addr_type] + result = verify_rib( + tgen, addr_type, dut, input_dict_4, next_hop=nh, protocol=protocol, fib=True + ) + assert result is True, "Testcase {} : Failed \nError: Routes are" + " missing in RIB".format(tc_name) + + nh = [] + for nhp in range(2, 9): + nh.append(NEXT_HOP_IP["nh" + str(nhp)][addr_type]) + result = verify_rib( + tgen, + addr_type, + dut, + input_dict_4, + next_hop=nh, + protocol=protocol, + fib=True, + expected=False, + wait=2, + attempts=3, + ) + assert result is not True, "Testcase {} : Failed \nError: Routes " + " are missing in RIB".format(tc_name) + + step( + "Remove the static route configured with nexthop N1 to N8, one" + "by one from running config" + ) + + for addr_type in ADDR_TYPES: + # delete static routes + for nhp in range(1, 9): + input_dict_4 = { + "r2": { + "static_routes": [ + { + "network": PREFIX1[addr_type], + "next_hop": NEXT_HOP_IP["nh" + str(nhp)][addr_type], + "admin_distance": 10 * nhp, + "delete": True, + } + ] + } + } + + logger.info("Configure static routes") + result = create_static_routes(tgen, input_dict_4) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step( + "After removing the static route with N1 to N8 one by one , " + "route become active with next preferred nexthop and nexthop which " + "got removed is not shown in RIB and FIB" + ) + result = verify_rib( + tgen, + addr_type, + dut, + input_dict_4, + next_hop=nh_all[addr_type], + protocol=protocol, + expected=False, + ) + assert result is not True, "Testcase {} : Failed \nError: Routes are" + " still present in RIB".format(tc_name) + + step("Configure the static route with nexthop N1 to N8, one by one") + + for addr_type in ADDR_TYPES: + # add static routes + for nhp in range(1, 9): + input_dict_4 = { + "r2": { + "static_routes": [ + { + "network": PREFIX1[addr_type], + "next_hop": NEXT_HOP_IP["nh" + str(nhp)][addr_type], + "admin_distance": 10 * nhp, + } + ] + } + } + logger.info("Configure static routes") + result = create_static_routes(tgen, input_dict_4) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step( + " After configuring them, route is always active with lowest AD" + " value and all the nexthop populated in RIB and FIB again" + ) + for addr_type in ADDR_TYPES: + input_dict_4 = { + "r2": { + "static_routes": [ + { + "network": PREFIX1[addr_type], + "next_hop": NEXT_HOP_IP["nh1"][addr_type], + "admin_distance": 10, + } + ] + } + } + dut = "r2" + protocol = "static" + nh = NEXT_HOP_IP["nh1"][addr_type] + result = verify_rib( + tgen, addr_type, dut, input_dict_4, next_hop=nh, protocol=protocol, fib=True + ) + assert result is True, "Testcase {} : Failed \nError: Routes are" + " missing in RIB".format(tc_name) + nh = [] + for nhp in range(2, 9): + nh.append(NEXT_HOP_IP["nh" + str(nhp)][addr_type]) + result = verify_rib( + tgen, + addr_type, + dut, + input_dict_4, + next_hop=nh, + protocol=protocol, + fib=True, + expected=False, + wait=2, + attempts=3, + ) + assert result is not True, "Testcase {} : Failed \nError: Routes " + " are missing in RIB".format(tc_name) + + step("Random shut of the nexthop interfaces") + randnum = random.randint(0, 7) + for addr_type in ADDR_TYPES: + intf = topo["routers"]["r2"]["links"]["r1-link" + str(randnum)]["interface"] + shutdown_bringup_interface(tgen, dut, intf, False) + nhip = NEXT_HOP_IP["nh" + str(randnum + 1)][addr_type] + input_dict_5 = { + "r2": { + "static_routes": [ + { + "network": PREFIX1[addr_type], + "next_hop": NEXT_HOP_IP["nh" + str(randnum + 1)][addr_type], + } + ] + } + } + result = verify_rib( + tgen, + addr_type, + dut, + input_dict_5, + next_hop=nhip, + protocol=protocol, + expected=False, + ) + assert result is not True, "Testcase {} : Failed \n" + "Error: Routes are still present in RIB".format(tc_name) + + step("Random no shut of the nexthop interfaces") + for addr_type in ADDR_TYPES: + intf = topo["routers"]["r2"]["links"]["r1-link" + str(randnum)]["interface"] + shutdown_bringup_interface(tgen, dut, intf, True) + nhip = NEXT_HOP_IP["nh" + str(randnum + 1)][addr_type] + result = verify_rib( + tgen, addr_type, dut, input_dict_5, next_hop=nhip, protocol=protocol + ) + assert result is True, "Testcase {} : Failed \n" + "Error: Routes are missing in RIB".format(tc_name) + + protocol = "bgp" + # this is next hop reachability route in r3 as we are using ibgp + dut = "r3" + for addr_type in ADDR_TYPES: + nh_as_rte = NEXT_HOP_IP["nh1"][addr_type] + "/32" + # add static routes + nh_static_rte = { + "r3": {"static_routes": [{"network": nh_as_rte, "next_hop": "Null0"}]} + } + logger.info("Configure static routes") + result = create_static_routes(tgen, nh_static_rte) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step( + "After each interface shut and no shut between R2 -R3 ,verify static" + "route is present in the RIB & FIB of R3 & R2 RIB/FIB is remain" + " unchanged" + ) + for addr_type in ADDR_TYPES: + input_dict_4 = {"r2": {"static_routes": [{"network": PREFIX1[addr_type]}]}} + result = verify_rib(tgen, addr_type, dut, input_dict_4, protocol=protocol) + assert result is True, "Testcase {} : Failed \n" + "Error: Routes are missing in RIB".format(tc_name) + + protocol = "static" + dut = "r2" + for addr_type in ADDR_TYPES: + input_dict_4 = {"r2": {"static_routes": [{"network": PREFIX1[addr_type]}]}} + result = verify_rib(tgen, addr_type, dut, input_dict_4, protocol=protocol) + assert ( + result is True + ), "Testcase {}: Failed \n " "Error: Routes are missing in RIB".format(tc_name) + + protocol = "bgp" + dut = "r3" + for addr_type in ADDR_TYPES: + input_dict_4 = {"r2": {"static_routes": [{"network": PREFIX1[addr_type]}]}} + result = verify_rib(tgen, addr_type, dut, input_dict_4, protocol=protocol) + assert ( + result is True + ), "Testcase {}: Failed \n " "Error: Routes are missing in RIB".format(tc_name) + + step("Reload the FRR router") + # stop/start -> restart FRR router and verify + stop_router(tgen, "r2") + + start_router(tgen, "r2") + + for addr_type in ADDR_TYPES: + input_dict_4 = {"r2": {"static_routes": [{"network": PREFIX1[addr_type]}]}} + result = verify_rib(tgen, addr_type, dut, input_dict_4, protocol=protocol) + assert result is True, ( + "Testcase {} : Failed \n" + "Error: Routes are still present in RIB".format(tc_name) + ) + + step("BGP neighbor remove and add") + for rtr in ["r2", "r3"]: + if "bgp" in topo["routers"][rtr].keys(): + delete_bgp = {rtr: {"bgp": {"delete": True}}} + result = create_router_bgp(tgen, topo, delete_bgp) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + create_router_bgp(tgen, topo["routers"]) + + NEXT_HOP_IP = populate_nh() + step("Verify routes are still present after delete and add bgp") + dut = "r2" + protocol = "static" + for addr_type in ADDR_TYPES: + input_dict_4 = {"r2": {"static_routes": [{"network": PREFIX1[addr_type]}]}} + result = verify_rib(tgen, addr_type, dut, input_dict_4, protocol=protocol) + assert result is True, ( + "Testcase {} : Failed \n" + "Error: Routes are still present in RIB".format(tc_name) + ) + + dut = "r3" + protocol = "bgp" + for addr_type in ADDR_TYPES: + input_dict_4 = {"r2": {"static_routes": [{"network": PREFIX1[addr_type]}]}} + result = verify_rib(tgen, addr_type, dut, input_dict_4, protocol=protocol) + assert result is True, ( + "Testcase {} : Failed \n" + "Error: Routes are still present in RIB".format(tc_name) + ) + + step("Remove the redistribute static knob") + for addr_type in ADDR_TYPES: + input_dict_2 = { + "r2": { + "bgp": { + "address_family": { + addr_type: { + "unicast": { + "redistribute": [ + {"redist_type": "static", "delete": True} + ] + } + } + } + } + } + } + + logger.info("Remove redistribute static") + result = create_router_bgp(tgen, topo, input_dict_2) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + step("verify that routes are deleted from R3 routing table") + + dut = "r3" + protocol = "bgp" + input_dict_4 = {"r2": {"static_routes": [{"network": PREFIX1[addr_type]}]}} + result = verify_rib( + tgen, + addr_type, + dut, + input_dict_4, + next_hop=nh, + protocol=protocol, + expected=False, + ) + assert result is not True, "Testcase {} : Failed \nError: Routes are" + " strill present in RIB of R3".format(tc_name) + + write_test_footer(tc_name) + + +def test_static_route_8nh_diff_AD_ibgp_ecmp_p1_tc7_ibgp(request): + """ + Verify static route with 8 next hop with different AD value and 8 + IBGP neighbors + """ + tc_name = request.node.name + write_test_header(tc_name) + tgen = get_topogen() + # Don't run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + step("Configure 8 interfaces / links between R1 and R2") + step("Configure 8 interlaces/links between R2 and R3") + step("Configure 8 IBGP IPv4 peering between R2 and R3") + + reset_config_on_routers(tgen) + NEXT_HOP_IP = populate_nh() + + step("Configure redistribute static in BGP on R2 router") + for addr_type in ADDR_TYPES: + input_dict_2 = { + "r2": { + "bgp": { + "address_family": { + addr_type: { + "unicast": {"redistribute": [{"redist_type": "static"}]} + } + } + } + } + } + result = create_router_bgp(tgen, topo, input_dict_2) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step( + "Configure IPv4 static route in R2 with 8 next hop" + "N1(21.1.1.2) AD 10, N2(22.1.1.2) AD 20, N3(23.1.1.2) AD 30," + "N4(24.1.1.2) AD 40, N5(25.1.1.2) AD 50, N6(26.1.1.2) AD 60," + "N7(27.1.1.2) AD 70, N8(28.1.1.2) AD 80, Static route next-hop" + "present on R1" + ) + nh_all = {} + for addr_type in ADDR_TYPES: + nh_all[addr_type] = [] + for nhp in range(1, 9): + nh_all[addr_type].append(NEXT_HOP_IP["nh" + str(nhp)][addr_type]) + for addr_type in ADDR_TYPES: + for nhp in range(1, 9): + input_dict_4 = { + "r2": { + "static_routes": [ + { + "network": PREFIX1[addr_type], + "next_hop": NEXT_HOP_IP["nh" + str(nhp)][addr_type], + "admin_distance": 10 * nhp, + } + ] + } + } + logger.info("Configure static routes") + result = create_static_routes(tgen, input_dict_4) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + logger.info("Verifying %s routes on r2", addr_type) + + step( + "On R2, static route installed in RIB using " + "show ip route with 8 next hop , lowest AD nexthop is active" + ) + input_dict_4 = { + "r2": { + "static_routes": [ + { + "network": PREFIX1[addr_type], + "next_hop": NEXT_HOP_IP["nh1"][addr_type], + "admin_distance": 10, + } + ] + } + } + dut = "r2" + protocol = "static" + nh = NEXT_HOP_IP["nh1"][addr_type] + result = verify_rib( + tgen, addr_type, dut, input_dict_4, next_hop=nh, protocol=protocol, fib=True + ) + assert result is True, "Testcase {} : Failed \nError: Routes are" + " missing in RIB".format(tc_name) + + nh = [] + for nhp in range(2, 9): + nh.append(NEXT_HOP_IP["nh" + str(nhp)][addr_type]) + result = verify_rib( + tgen, + addr_type, + dut, + input_dict_4, + next_hop=nh, + protocol=protocol, + fib=True, + expected=False, + ) + assert result is not True, "Testcase {} : Failed \nError: Routes " + " are missing in RIB".format(tc_name) + + step( + "Remove the static route configured with nexthop N1 to N8, one" + "by one from running config" + ) + + for addr_type in ADDR_TYPES: + # delete static routes + for nhp in range(1, 9): + input_dict_4 = { + "r2": { + "static_routes": [ + { + "network": PREFIX1[addr_type], + "next_hop": NEXT_HOP_IP["nh" + str(nhp)][addr_type], + "admin_distance": 10 * nhp, + "delete": True, + } + ] + } + } + + logger.info("Configure static routes") + result = create_static_routes(tgen, input_dict_4) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step( + "After removing the static route with N1 to N8 one by one , " + "route become active with next preferred nexthop and nexthop which " + "got removed is not shown in RIB and FIB" + ) + result = verify_rib( + tgen, + addr_type, + dut, + input_dict_4, + next_hop=nh_all[addr_type], + protocol=protocol, + expected=False, + ) + assert result is not True, "Testcase {} : Failed \nError: Routes are" + " still present in RIB".format(tc_name) + + step("Configure the static route with nexthop N1 to N8, one by one") + + for addr_type in ADDR_TYPES: + # add static routes + for nhp in range(1, 9): + input_dict_4 = { + "r2": { + "static_routes": [ + { + "network": PREFIX1[addr_type], + "next_hop": NEXT_HOP_IP["nh" + str(nhp)][addr_type], + "admin_distance": 10 * nhp, + } + ] + } + } + logger.info("Configure static routes") + result = create_static_routes(tgen, input_dict_4) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step( + " After configuring them, route is always active with lowest AD" + " value and all the nexthop populated in RIB and FIB again" + ) + for addr_type in ADDR_TYPES: + input_dict_4 = { + "r2": { + "static_routes": [ + { + "network": PREFIX1[addr_type], + "next_hop": NEXT_HOP_IP["nh1"][addr_type], + "admin_distance": 10, + } + ] + } + } + dut = "r2" + protocol = "static" + nh = NEXT_HOP_IP["nh1"][addr_type] + result = verify_rib( + tgen, addr_type, dut, input_dict_4, next_hop=nh, protocol=protocol, fib=True + ) + assert result is True, "Testcase {} : Failed \nError: Routes are" + " missing in RIB".format(tc_name) + nh = [] + for nhp in range(2, 9): + nh.append(NEXT_HOP_IP["nh" + str(nhp)][addr_type]) + result = verify_rib( + tgen, + addr_type, + dut, + input_dict_4, + next_hop=nh, + protocol=protocol, + fib=True, + expected=False, + ) + assert result is not True, "Testcase {} : Failed \nError: Routes " + " are missing in RIB".format(tc_name) + + step("Random shut of the nexthop interfaces") + randnum = random.randint(0, 7) + for addr_type in ADDR_TYPES: + intf = topo["routers"]["r2"]["links"]["r1-link" + str(randnum)]["interface"] + shutdown_bringup_interface(tgen, dut, intf, False) + nhip = NEXT_HOP_IP["nh" + str(randnum + 1)][addr_type] + input_dict_5 = { + "r2": { + "static_routes": [ + { + "network": PREFIX1[addr_type], + "next_hop": NEXT_HOP_IP["nh" + str(randnum + 1)][addr_type], + } + ] + } + } + result = verify_rib( + tgen, + addr_type, + dut, + input_dict_5, + next_hop=nhip, + protocol=protocol, + expected=False, + ) + assert result is not True, "Testcase {} : Failed \n" + "Error: Routes are still present in RIB".format(tc_name) + + step("Random no shut of the nexthop interfaces") + for addr_type in ADDR_TYPES: + intf = topo["routers"]["r2"]["links"]["r1-link" + str(randnum)]["interface"] + shutdown_bringup_interface(tgen, dut, intf, True) + nhip = NEXT_HOP_IP["nh" + str(randnum + 1)][addr_type] + result = verify_rib( + tgen, addr_type, dut, input_dict_5, next_hop=nhip, protocol=protocol + ) + assert result is True, "Testcase {} : Failed \n" + "Error: Routes are missing in RIB".format(tc_name) + + dut = "r2" + protocol = "bgp" + + # this is next hop reachability route in r3 as we are using ibgp + dut = "r3" + for addr_type in ADDR_TYPES: + nh_as_rte = NEXT_HOP_IP["nh1"][addr_type] + "/32" + # add static routes + nh_static_rte = { + "r3": {"static_routes": [{"network": nh_as_rte, "next_hop": "Null0"}]} + } + logger.info("Configure static routes") + result = create_static_routes(tgen, nh_static_rte) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step( + "After each interface shut and no shut between R2 -R3 ,verify static" + "route is present in the RIB & FIB of R3 & R2 RIB/FIB is remain" + " unchanged" + ) + for addr_type in ADDR_TYPES: + input_dict_4 = {"r2": {"static_routes": [{"network": PREFIX1[addr_type]}]}} + result = verify_rib(tgen, addr_type, dut, input_dict_4, protocol=protocol) + assert result is True, "Testcase {} : Failed \n" + "Error: Routes are missing in RIB".format(tc_name) + + protocol = "static" + dut = "r2" + for addr_type in ADDR_TYPES: + input_dict_4 = {"r2": {"static_routes": [{"network": PREFIX1[addr_type]}]}} + result = verify_rib(tgen, addr_type, dut, input_dict_4, protocol=protocol) + assert ( + result is True + ), "Testcase {}: Failed \n " "Error: Routes are missing in RIB".format(tc_name) + + protocol = "bgp" + dut = "r3" + for addr_type in ADDR_TYPES: + input_dict_4 = {"r2": {"static_routes": [{"network": PREFIX1[addr_type]}]}} + result = verify_rib(tgen, addr_type, dut, input_dict_4, protocol=protocol) + assert ( + result is True + ), "Testcase {}: Failed \n " "Error: Routes are missing in RIB".format(tc_name) + + step("Reload the FRR router") + # stop/start -> restart FRR router and verify + stop_router(tgen, "r2") + + start_router(tgen, "r2") + + for addr_type in ADDR_TYPES: + input_dict_4 = {"r2": {"static_routes": [{"network": PREFIX1[addr_type]}]}} + result = verify_rib(tgen, addr_type, dut, input_dict_4, protocol=protocol) + assert result is True, ( + "Testcase {} : Failed \n" + "Error: Routes are still present in RIB".format(tc_name) + ) + + step("BGP neighbor remove and add") + for rtr in ["r2", "r3"]: + if "bgp" in topo["routers"][rtr].keys(): + delete_bgp = {rtr: {"bgp": {"delete": True}}} + result = create_router_bgp(tgen, topo, delete_bgp) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + create_router_bgp(tgen, topo["routers"]) + + NEXT_HOP_IP = populate_nh() + step("Verify routes are still present after delete and add bgp") + dut = "r2" + protocol = "static" + for addr_type in ADDR_TYPES: + input_dict_4 = {"r2": {"static_routes": [{"network": PREFIX1[addr_type]}]}} + result = verify_rib(tgen, addr_type, dut, input_dict_4, protocol=protocol) + assert result is True, ( + "Testcase {} : Failed \n" + "Error: Routes are still present in RIB".format(tc_name) + ) + + dut = "r3" + protocol = "bgp" + for addr_type in ADDR_TYPES: + input_dict_4 = {"r2": {"static_routes": [{"network": PREFIX1[addr_type]}]}} + result = verify_rib(tgen, addr_type, dut, input_dict_4, protocol=protocol) + assert result is True, ( + "Testcase {} : Failed \n" + "Error: Routes are still present in RIB".format(tc_name) + ) + + step("Remove the redistribute static knob") + for addr_type in ADDR_TYPES: + input_dict_2 = { + "r2": { + "bgp": { + "address_family": { + addr_type: { + "unicast": { + "redistribute": [ + {"redist_type": "static", "delete": True} + ] + } + } + } + } + } + } + + logger.info("Remove redistribute static") + result = create_router_bgp(tgen, topo, input_dict_2) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + step("verify that routes are deleted from R3 routing table") + + dut = "r3" + protocol = "bgp" + input_dict_4 = {"r2": {"static_routes": [{"network": PREFIX1[addr_type]}]}} + result = verify_rib( + tgen, + addr_type, + dut, + input_dict_4, + next_hop=nh, + protocol=protocol, + expected=False, + ) + assert result is not True, "Testcase {} : Failed \nError: Routes are" + " still present in RIB of R3".format(tc_name) + + write_test_footer(tc_name) + + +def test_static_route_8nh_diff_AD_bgp_ecmp_p1_tc10_ibgp(request): + """ + Verify 8 static route functionality with 8 next hop different AD' + + """ + tc_name = request.node.name + write_test_header(tc_name) + tgen = get_topogen() + # Don't run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + NEXT_HOP_IP = populate_nh() + + step("Configure 8 interfaces / links between R1 and R2 ") + step("Configure 8 IBGP IPv4 peering between R2 and R3 router.") + reset_config_on_routers(tgen) + + step( + "Configure IPv4 static route in R2 with 8 next hop" + "N1(21.1.1.2) AD 10, N2(22.1.1.2) AD 20, N3(23.1.1.2) AD 30," + "N4(24.1.1.2) AD 40, N5(25.1.1.2) AD 50, N6(26.1.1.2) AD 60," + "N7(27.1.1.2) AD 70, N8(28.1.1.2) AD 80" + ) + step( + "Configure nexthop AD in such way for static route S1 , N1 is" + "preferred and for S2 , N2 is preferred and so on .." + ) + nh_all = {} + for addr_type in ADDR_TYPES: + nh_all[addr_type] = [] + for nhp in range(1, 9): + nh_all[addr_type].append(NEXT_HOP_IP["nh" + str(nhp)][addr_type]) + + for addr_type in ADDR_TYPES: + for nhp in range(1, 9): + input_dict_4 = { + "r2": { + "static_routes": [ + { + "network": PREFIX1[addr_type], + "next_hop": NEXT_HOP_IP["nh" + str(nhp)][addr_type], + "admin_distance": 10 * nhp, + } + ] + } + } + logger.info("Configure static routes") + result = create_static_routes(tgen, input_dict_4) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + second_rte = { + "r2": { + "static_routes": [ + { + "network": PREFIX2[addr_type], + "next_hop": NEXT_HOP_IP["nh" + str(nhp)][addr_type], + "admin_distance": 10 * nhp, + } + ] + } + } + logger.info("Configure static routes") + result = create_static_routes(tgen, second_rte) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + logger.info("Verifying %s routes on r2", addr_type) + + step( + "On R2, static route installed in RIB using " + "show ip route with 8 next hop , lowest AD nexthop is active" + ) + input_dict_4 = { + "r2": { + "static_routes": [ + { + "network": PREFIX1[addr_type], + "next_hop": NEXT_HOP_IP["nh1"][addr_type], + "admin_distance": 10, + } + ] + } + } + dut = "r2" + protocol = "static" + nh = NEXT_HOP_IP["nh1"][addr_type] + result = verify_rib( + tgen, addr_type, dut, input_dict_4, next_hop=nh, protocol=protocol, fib=True + ) + assert result is True, "Testcase {} : Failed \nError: Routes are" + " missing in RIB".format(tc_name) + + step("Verify that highest AD nexthop are inactive") + nh = [] + for nhp in range(2, 9): + nh.append(NEXT_HOP_IP["nh" + str(nhp)][addr_type]) + result = verify_rib( + tgen, + addr_type, + dut, + input_dict_4, + next_hop=nh, + protocol=protocol, + fib=True, + expected=False, + wait=2, + attempts=3, + ) + assert result is not True, "Testcase {} : Failed \nError: Routes " + " are missing in RIB".format(tc_name) + + step("Configure redistribute static in BGP on R2 router") + for addr_type in ADDR_TYPES: + input_dict_2 = { + "r2": { + "bgp": { + "address_family": { + addr_type: { + "unicast": {"redistribute": [{"redist_type": "static"}]} + } + } + } + } + } + result = create_router_bgp(tgen, topo, input_dict_2) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step( + "Remove the static route configured with nexthop N1 to N8, one" + "by one from running config" + ) + + for addr_type in ADDR_TYPES: + # delete static routes + for nhp in range(1, 9): + input_dict_4 = { + "r2": { + "static_routes": [ + { + "network": PREFIX1[addr_type], + "next_hop": NEXT_HOP_IP["nh" + str(nhp)][addr_type], + "admin_distance": 10 * nhp, + "delete": True, + } + ] + } + } + + logger.info("Configure static routes") + result = create_static_routes(tgen, input_dict_4) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step( + "After removing the static route with N1 to N8 one by one , " + "route become active with next preferred nexthop and nexthop which" + "got removed is not shown in RIB and FIB" + ) + + result = verify_rib( + tgen, + addr_type, + dut, + input_dict_4, + next_hop=nh, + protocol=protocol, + expected=False, + ) + assert result is not True, "Testcase {} : Failed \nError: Routes are" + " still present in RIB".format(tc_name) + + step("Configure the static route with nexthop N1 to N8, one by one") + for addr_type in ADDR_TYPES: + # add static routes + for nhp in range(1, 9): + input_dict_4 = { + "r2": { + "static_routes": [ + { + "network": PREFIX1[addr_type], + "next_hop": NEXT_HOP_IP["nh" + str(nhp)][addr_type], + "admin_distance": 10 * nhp, + } + ] + } + } + + logger.info("Configure static routes") + result = create_static_routes(tgen, input_dict_4) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step( + " After configuring them, route is always active with lowest AD" + " value and all the nexthop populated in RIB and FIB again" + ) + for addr_type in ADDR_TYPES: + input_dict_4 = {"r2": {"static_routes": [{"network": PREFIX1[addr_type],}]}} + nh = NEXT_HOP_IP["nh1"][addr_type] + result = verify_rib( + tgen, addr_type, dut, input_dict_4, next_hop=nh, protocol=protocol, fib=True + ) + assert result is True, "Testcase {} : Failed \nError: Route with " + "lowest AD is missing in RIB".format(tc_name) + + step("Random shut of the nexthop interfaces") + randnum = random.randint(0, 7) + for addr_type in ADDR_TYPES: + intf = topo["routers"]["r2"]["links"]["r1-link" + str(randnum)]["interface"] + shutdown_bringup_interface(tgen, dut, intf, False) + nhip = NEXT_HOP_IP["nh" + str(randnum + 1)][addr_type] + input_dict_5 = { + "r2": { + "static_routes": [ + { + "network": PREFIX1[addr_type], + "next_hop": NEXT_HOP_IP["nh" + str(randnum + 1)][addr_type], + } + ] + } + } + result = verify_rib( + tgen, + addr_type, + dut, + input_dict_5, + next_hop=nhip, + protocol=protocol, + expected=False, + ) + assert result is not True, "Testcase {} : Failed \n" + "Error: Routes are still present in RIB".format(tc_name) + + step("Random no shut of the nexthop interfaces") + for addr_type in ADDR_TYPES: + intf = topo["routers"]["r2"]["links"]["r1-link" + str(randnum)]["interface"] + shutdown_bringup_interface(tgen, dut, intf, True) + nhip = NEXT_HOP_IP["nh" + str(randnum + 1)][addr_type] + result = verify_rib( + tgen, addr_type, dut, input_dict_5, next_hop=nhip, protocol=protocol + ) + assert result is True, "Testcase {} : Failed \n" + "Error: Routes are missing in RIB".format(tc_name) + + step("Remove random static route with all the nexthop") + for addr_type in ADDR_TYPES: + # delete static routes + for nhp in range(1, 9): + input_dict_4 = { + "r2": { + "static_routes": [ + { + "network": PREFIX1[addr_type], + "next_hop": NEXT_HOP_IP["nh" + str(nhp)][addr_type], + "admin_distance": 10 * nhp, + "delete": True, + } + ] + } + } + logger.info("Configure static routes") + result = create_static_routes(tgen, input_dict_4) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step( + "After removing the static route with N1 to N8 one by one , " + "route become active with next preferred nexthop and nexthop " + "which got removed is not shown in RIB and FIB" + ) + nh = NEXT_HOP_IP["nh" + str(nhp)][addr_type] + result = verify_rib( + tgen, + addr_type, + dut, + input_dict_4, + next_hop=nh, + protocol=protocol, + expected=False, + ) + assert result is not True, "Testcase {} : Failed \nError: Route " + " is still present in RIB".format(tc_name) + + step("Reconfigure the deleted routes and verify they are installed") + for nhp in range(1, 9): + input_dict_4 = { + "r2": { + "static_routes": [ + { + "network": PREFIX1[addr_type], + "next_hop": NEXT_HOP_IP["nh" + str(nhp)][addr_type], + "admin_distance": 10 * nhp, + } + ] + } + } + logger.info("Configure static routes") + result = create_static_routes(tgen, input_dict_4) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + dut = "r2" + protocol = "static" + nh = NEXT_HOP_IP["nh1"][addr_type] + result = verify_rib(tgen, addr_type, dut, input_dict_4, protocol=protocol) + assert result is True, "Testcase {} : Failed \nError: Route " + " is still present in RIB".format(tc_name) + + step("Reload the FRR router") + # stop/start -> restart FRR router and verify + stop_router(tgen, "r2") + + start_router(tgen, "r2") + + step("After reloading, verify that routes are still present in R2.") + result = verify_rib( + tgen, + addr_type, + dut, + second_rte, + next_hop=nh, + protocol=protocol, + fib=True, + ) + assert result is True, ( + "Testcase {} : Failed \nError: Route " + " is missing in RIB".format(tc_name) + ) + + step("Remove the redistribute static knob") + for addr_type in ADDR_TYPES: + input_dict_2 = { + "r2": { + "bgp": { + "address_family": { + addr_type: { + "unicast": { + "redistribute": [ + {"redist_type": "static", "delete": True} + ] + } + } + } + } + } + } + result = create_router_bgp(tgen, topo, input_dict_2) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step( + "After removing the BGP neighbor or redistribute static knob , " + "verify route got clear from RIB and FIB of R3 routes " + ) + dut = "r3" + protocol = "bgp" + for addr_type in ADDR_TYPES: + input_dict_4 = { + "r2": { + "static_routes": [ + { + "network": PREFIX1[addr_type], + "next_hop": NEXT_HOP_IP["nh" + str(nhp)][addr_type], + } + ] + } + } + result = verify_rib( + tgen, addr_type, dut, input_dict_4, protocol=protocol, expected=False + ) + assert result is not True, "Testcase {} : Failed \nError: Routes are" + " still present in RIB".format(tc_name) + + write_test_footer(tc_name) + + +def test_static_route_delete_p0_tc11_ibgp(request): + """ + Delete the static route and verify the RIB and FIB state + """ + tc_name = request.node.name + write_test_header(tc_name) + tgen = get_topogen() + # Don't run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + NEXT_HOP_IP = populate_nh() + + step("Configure 8 interfaces / links between R1 and R2 ") + step("Configure 8 IBGP IPv4 peering between R2 and R3 router.") + reset_config_on_routers(tgen) + + step( + "Configure IPv4 static route in R2 with 8 next hop" + "N1(21.1.1.2) AD 10, N2(22.1.1.2) AD 20, N3(23.1.1.2) AD 30," + "N4(24.1.1.2) AD 40, N5(25.1.1.2) AD 50, N6(26.1.1.2) AD 60," + "N7(27.1.1.2) AD 70, N8(28.1.1.2) AD 80" + ) + step( + "Configure nexthop AD in such way for static route S1 , N1 is" + "preferred and for S2 , N2 is preferred and so on .." + ) + nh_all = {} + for addr_type in ADDR_TYPES: + nh_all[addr_type] = [] + for nhp in range(1, 9): + nh_all[addr_type].append(NEXT_HOP_IP["nh" + str(nhp)][addr_type]) + + for addr_type in ADDR_TYPES: + for nhp in range(1, 9): + input_dict_4 = { + "r2": { + "static_routes": [ + { + "network": PREFIX1[addr_type], + "next_hop": NEXT_HOP_IP["nh" + str(nhp)][addr_type], + "admin_distance": 10 * nhp, + } + ] + } + } + logger.info("Configure static routes") + result = create_static_routes(tgen, input_dict_4) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + second_rte = { + "r2": { + "static_routes": [ + { + "network": PREFIX2[addr_type], + "next_hop": NEXT_HOP_IP["nh" + str(nhp)][addr_type], + "admin_distance": 10 * nhp, + } + ] + } + } + logger.info("Configure static routes") + result = create_static_routes(tgen, second_rte) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + logger.info("Verifying %s routes on r2", addr_type) + + step( + "On R2, static route installed in RIB using " + "show ip route with 8 next hop , lowest AD nexthop is active" + ) + input_dict_4 = { + "r2": { + "static_routes": [ + { + "network": PREFIX1[addr_type], + "next_hop": NEXT_HOP_IP["nh1"][addr_type], + "admin_distance": 10, + } + ] + } + } + dut = "r2" + protocol = "static" + nh = NEXT_HOP_IP["nh1"][addr_type] + result = verify_rib( + tgen, addr_type, dut, input_dict_4, next_hop=nh, protocol=protocol, fib=True + ) + assert result is True, "Testcase {} : Failed \nError: Routes are" + " missing in RIB".format(tc_name) + + step("Verify that highest AD nexthop are inactive") + nh = [] + for nhp in range(2, 9): + nh.append(NEXT_HOP_IP["nh" + str(nhp)][addr_type]) + result = verify_rib( + tgen, + addr_type, + dut, + input_dict_4, + next_hop=nh, + protocol=protocol, + fib=True, + expected=False, + ) + assert result is not True, "Testcase {} : Failed \nError: Routes " + " are missing in RIB".format(tc_name) + + step("Configure redistribute static in BGP on R2 router") + for addr_type in ADDR_TYPES: + input_dict_2 = { + "r2": { + "bgp": { + "address_family": { + addr_type: { + "unicast": {"redistribute": [{"redist_type": "static"}]} + } + } + } + } + } + result = create_router_bgp(tgen, topo, input_dict_2) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step("Remove the redistribute static knob") + for addr_type in ADDR_TYPES: + input_dict_2 = { + "r2": { + "bgp": { + "address_family": { + addr_type: { + "unicast": { + "redistribute": [ + {"redist_type": "static", "delete": True} + ] + } + } + } + } + } + } + result = create_router_bgp(tgen, topo, input_dict_2) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step( + "Verify after removing the redistribute static from BGP all the" + "routes got delete from RIB and FIB of R3 " + ) + + dut = "r3" + protocol = "bgp" + result = verify_rib( + tgen, addr_type, dut, input_dict_4, protocol=protocol, expected=False + ) + assert result is not True, "Testcase {} : Failed \nError: Routes are" + " still present in RIB".format(tc_name) + + for addr_type in ADDR_TYPES: + for nhp in range(1, 9): + input_dict_4 = { + "r2": { + "static_routes": [ + { + "network": PREFIX1[addr_type], + "next_hop": NEXT_HOP_IP["nh" + str(nhp)][addr_type], + "admin_distance": 10 * nhp, + "delete": True, + } + ] + } + } + logger.info("Configure static routes") + result = create_static_routes(tgen, input_dict_4) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + second_rte = { + "r2": { + "static_routes": [ + { + "network": PREFIX2[addr_type], + "next_hop": NEXT_HOP_IP["nh" + str(nhp)][addr_type], + "admin_distance": 10 * nhp, + } + ] + } + } + logger.info("Configure static routes") + result = create_static_routes(tgen, second_rte) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + logger.info("Verifying %s routes on r2", addr_type) + + step( + " After removing all the routes and nexthop from R2 , " + " verify R2 RIB and FIB is cleared" + ) + input_dict_4 = { + "r2": { + "static_routes": [ + { + "network": PREFIX1[addr_type], + "next_hop": NEXT_HOP_IP["nh1"][addr_type], + "admin_distance": 10, + } + ] + } + } + dut = "r2" + protocol = "static" + nh = NEXT_HOP_IP["nh1"][addr_type] + result = verify_rib( + tgen, + addr_type, + dut, + input_dict_4, + next_hop=nh, + protocol=protocol, + fib=True, + expected=False, + ) + assert result is not True, "Testcase {} : Failed \nError: Routes are" + " still active in RIB".format(tc_name) + write_test_footer(tc_name) + + +if __name__ == "__main__": + args = ["-s"] + sys.argv[1:] + sys.exit(pytest.main(args)) diff --git a/tests/topotests/static_routing_with_ibgp/test_static_routes_topo3_ibgp.py b/tests/topotests/static_routing_with_ibgp/test_static_routes_topo3_ibgp.py new file mode 100644 index 0000000000..924fb3a598 --- /dev/null +++ b/tests/topotests/static_routing_with_ibgp/test_static_routes_topo3_ibgp.py @@ -0,0 +1,875 @@ +#!/usr/bin/python + +# +# Copyright (c) 2020 by VMware, Inc. ("VMware") +# Used Copyright (c) 2018 by Network Device Education Foundation, +# Inc. ("NetDEF") in this file. +# +# Permission to use, copy, modify, and/or distribute this software +# for any purpose with or without fee is hereby granted, provided +# that the above copyright notice and this permission notice appear +# in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND VMWARE DISCLAIMS ALL WARRANTIES +# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL VMWARE BE LIABLE FOR +# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY +# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS +# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE +# OF THIS SOFTWARE. +# +""" + -Verify static route ECMP functionality with 8 next hop + + -Verify static route functionality with 8 next hop different AD value + + -Verify static route with tag option + + -Verify BGP did not install the static route when it receive route + with local next hop + +""" +import sys +import json +import time +import os +import pytest +import platform +from copy import deepcopy +import random +from re import search as re_search + + +# Save the Current Working Directory to find configuration files. +CWD = os.path.dirname(os.path.realpath(__file__)) +sys.path.append(os.path.join(CWD, "../")) +sys.path.append(os.path.join(CWD, "../lib/")) +# pylint: disable=C0413 +# Import topogen and topotest helpers +from mininet.topo import Topo +from lib.topogen import Topogen, get_topogen +from lib.topotest import version_cmp + +from lib.common_config import ( + start_topology, + write_test_header, + write_test_footer, + reset_config_on_routers, + verify_rib, + create_static_routes, + check_address_types, + step, + create_interfaces_cfg, + shutdown_bringup_interface, + stop_router, + start_router, +) +from lib.topolog import logger +from lib.bgp import verify_bgp_convergence, create_router_bgp, verify_bgp_rib +from lib.topojson import build_topo_from_json, build_config_from_json + +# Reading the data from JSON File for topology creation +jsonFile = "{}/static_routes_topo3_ibgp.json".format(CWD) +try: + with open(jsonFile, "r") as topoJson: + topo = json.load(topoJson) +except IOError: + assert False, "Could not read file {}".format(jsonFile) + +# Global variables +BGP_CONVERGENCE = False +ADDR_TYPES = check_address_types() +NETWORK = { + "ipv4": [ + "11.0.20.1/32", + "11.0.20.2/32", + "11.0.20.3/32", + "11.0.20.4/32", + "11.0.20.5/32", + "11.0.20.6/32", + "11.0.20.7/32", + "11.0.20.8/32", + ], + "ipv6": [ + "2::1/128", + "2::2/128", + "2::3/128", + "2::4/128", + "2::5/128", + "2::6/128", + "2::7/128", + "2::8/128", + ], +} +PREFIX1 = {"ipv4": "110.0.20.1/32", "ipv6": "20::1/128"} +NETWORK2 = {"ipv4": ["11.0.20.1/32"], "ipv6": ["2::1/128"]} +NEXT_HOP_IP = [] + + +class CreateTopo(Topo): + """ + Test CreateTopo - topology 1. + + * `Topo`: Topology object + """ + + def build(self, *_args, **_opts): + """Build function.""" + tgen = get_topogen(self) + + # Building topology from json file + build_topo_from_json(tgen, topo) + + +def setup_module(mod): + """ + + Set up the pytest environment. + + * `mod`: module name + """ + global topo + testsuite_run_time = time.asctime(time.localtime(time.time())) + logger.info("Testsuite start time: {}".format(testsuite_run_time)) + logger.info("=" * 40) + + logger.info("Running setup_module to create topology") + + # This function initiates the topology build with Topogen... + tgen = Topogen(CreateTopo, mod.__name__) + # ... and here it calls Mininet initialization functions. + + # Starting topology, create tmp files which are loaded to routers + # to start deamons and then start routers + start_topology(tgen) + + # Creating configuration from JSON + build_config_from_json(tgen, topo) + + if version_cmp(platform.release(), '4.19') < 0: + error_msg = ('These tests will not run. (have kernel "{}", ' + 'requires kernel >= 4.19)'.format(platform.release())) + pytest.skip(error_msg) + + # Checking BGP convergence + global BGP_CONVERGENCE + global ADDR_TYPES + + # Don't run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + # Api call verify whether BGP is converged + BGP_CONVERGENCE = verify_bgp_convergence(tgen, topo) + assert BGP_CONVERGENCE is True, "setup_module :Failed \n Error: {}".format( + BGP_CONVERGENCE + ) + + logger.info("Running setup_module() done") + + +def teardown_module(mod): + """ + Teardown the pytest environment + + * `mod`: module name + """ + + logger.info("Running teardown_module to delete topology") + + tgen = get_topogen() + + # Stop toplogy and Remove tmp files + tgen.stop_topology() + + logger.info( + "Testsuite end time: {}".format(time.asctime(time.localtime(time.time()))) + ) + logger.info("=" * 40) + + +def populate_nh(): + NEXT_HOP_IP = { + "nh1": { + "ipv4": topo["routers"]["r1"]["links"]["r2-link0"]["ipv4"].split("/")[0], + "ipv6": topo["routers"]["r1"]["links"]["r2-link0"]["ipv6"].split("/")[0], + }, + "nh2": { + "ipv4": topo["routers"]["r1"]["links"]["r2-link1"]["ipv4"].split("/")[0], + "ipv6": topo["routers"]["r1"]["links"]["r2-link1"]["ipv6"].split("/")[0], + }, + "nh3": { + "ipv4": topo["routers"]["r1"]["links"]["r2-link2"]["ipv4"].split("/")[0], + "ipv6": topo["routers"]["r1"]["links"]["r2-link2"]["ipv6"].split("/")[0], + }, + "nh4": { + "ipv4": topo["routers"]["r1"]["links"]["r2-link3"]["ipv4"].split("/")[0], + "ipv6": topo["routers"]["r1"]["links"]["r2-link3"]["ipv6"].split("/")[0], + }, + "nh5": { + "ipv4": topo["routers"]["r1"]["links"]["r2-link4"]["ipv4"].split("/")[0], + "ipv6": topo["routers"]["r1"]["links"]["r2-link4"]["ipv6"].split("/")[0], + }, + "nh6": { + "ipv4": topo["routers"]["r1"]["links"]["r2-link5"]["ipv4"].split("/")[0], + "ipv6": topo["routers"]["r1"]["links"]["r2-link5"]["ipv6"].split("/")[0], + }, + "nh7": { + "ipv4": topo["routers"]["r1"]["links"]["r2-link6"]["ipv4"].split("/")[0], + "ipv6": topo["routers"]["r1"]["links"]["r2-link6"]["ipv6"].split("/")[0], + }, + "nh8": { + "ipv4": topo["routers"]["r1"]["links"]["r2-link7"]["ipv4"].split("/")[0], + "ipv6": topo["routers"]["r1"]["links"]["r2-link7"]["ipv6"].split("/")[0], + }, + } + return NEXT_HOP_IP + + +##################################################### +# +# Tests starting +# +##################################################### + + +def test_staticroute_with_ecmp_p0_tc3_ibgp(request): + """ + Verify static route ECMP functionality with 8 next hop' + + """ + tc_name = request.node.name + write_test_header(tc_name) + tgen = get_topogen() + + # Don't run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + reset_config_on_routers(tgen) + NEXT_HOP_IP = populate_nh() + + step("Configure 8 interfaces / links between R1 and R2,") + step( + "Configure IPv4 static route in R2 with 8 next hop" + "N1(21.1.1.2), N2(22.1.1.2), N3(23.1.1.2), N4(24.1.1.2)," + "N5(25.1.1.2), N6(26.1.1.2), N7(27.1.1.2),N8(28.1.1.2), Static" + "route next-hop present on R1" + ) + + step("Configure IBGP IPv4 peering between R2 and R3 router.") + + for addr_type in ADDR_TYPES: + # Enable static routes + for nhp in range(1, 9): + input_dict_4 = { + "r2": { + "static_routes": [ + { + "network": PREFIX1[addr_type], + "next_hop": NEXT_HOP_IP["nh" + str(nhp)][addr_type], + } + ] + } + } + logger.info("Configure static routes") + result = create_static_routes(tgen, input_dict_4) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + logger.info("Verifying %s routes on r2", addr_type) + nh = [ + NEXT_HOP_IP["nh1"][addr_type], + NEXT_HOP_IP["nh2"][addr_type], + NEXT_HOP_IP["nh3"][addr_type], + NEXT_HOP_IP["nh4"][addr_type], + NEXT_HOP_IP["nh5"][addr_type], + NEXT_HOP_IP["nh6"][addr_type], + NEXT_HOP_IP["nh7"][addr_type], + NEXT_HOP_IP["nh8"][addr_type], + ] + + dut = "r2" + protocol = "static" + result = verify_rib( + tgen, addr_type, dut, input_dict_4, next_hop=nh, protocol=protocol + ) + assert result is True, "Testcase {} : Failed \nError: Routes are" + " missing in RIB".format(tc_name) + step("Configure redistribute static in BGP on R2 router") + for addr_type in ADDR_TYPES: + input_dict_2 = { + "r2": { + "bgp": { + "address_family": { + addr_type: { + "unicast": {"redistribute": [{"redist_type": "static"}]} + } + } + } + } + } + result = create_router_bgp(tgen, topo, input_dict_2) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step( + "Remove the static route configured with nexthop N1 to N8, one" + "by one from running config" + ) + for addr_type in ADDR_TYPES: + # delete static routes + for nhp in range(1, 9): + input_dict_4 = { + "r2": { + "static_routes": [ + { + "network": PREFIX1[addr_type], + "next_hop": NEXT_HOP_IP["nh" + str(nhp)][addr_type], + "delete": True, + } + ] + } + } + + logger.info("Configure static routes") + result = create_static_routes(tgen, input_dict_4) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + result = verify_rib( + tgen, + addr_type, + dut, + input_dict_4, + next_hop=nh, + protocol=protocol, + expected=False, + ) + assert result is not True, "Testcase {} : Failed \nError: Routes are" + " still present in RIB".format(tc_name) + + step("Configure the static route with nexthop N1 to N8, one by" "one") + + for addr_type in ADDR_TYPES: + # add static routes + for nhp in range(1, 9): + input_dict_4 = { + "r2": { + "static_routes": [ + { + "network": PREFIX1[addr_type], + "next_hop": NEXT_HOP_IP["nh" + str(nhp)][addr_type], + } + ] + } + } + + logger.info("Configure static routes") + result = create_static_routes(tgen, input_dict_4) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + result = verify_rib( + tgen, addr_type, dut, input_dict_4, next_hop=nh, protocol=protocol + ) + assert result is True, "Testcase {} : Failed \nError: Routes are" + " missing in RIB".format(tc_name) + + step("Random shut of the nexthop interfaces") + randnum = random.randint(0, 7) + for addr_type in ADDR_TYPES: + intf = topo["routers"]["r2"]["links"]["r1-link" + str(randnum)]["interface"] + shutdown_bringup_interface(tgen, dut, intf, False) + nhip = NEXT_HOP_IP["nh" + str(randnum + 1)][addr_type] + input_dict_5 = { + "r2": { + "static_routes": [ + { + "network": PREFIX1[addr_type], + "next_hop": NEXT_HOP_IP["nh" + str(randnum + 1)][addr_type], + } + ] + } + } + result = verify_rib( + tgen, + addr_type, + dut, + input_dict_5, + next_hop=nhip, + protocol=protocol, + expected=False, + ) + assert result is not True, "Testcase {} : Failed \n" + "Error: Routes are still present in RIB".format(tc_name) + + step("Random no shut of the nexthop interfaces") + for addr_type in ADDR_TYPES: + intf = topo["routers"]["r2"]["links"]["r1-link" + str(randnum)]["interface"] + shutdown_bringup_interface(tgen, dut, intf, True) + nhip = NEXT_HOP_IP["nh" + str(randnum + 1)][addr_type] + result = verify_rib( + tgen, addr_type, dut, input_dict_5, next_hop=nhip, protocol=protocol + ) + assert result is True, "Testcase {} : Failed \n" + "Error: Routes are missing in RIB".format(tc_name) + + step("Reload the FRR router") + # stop/start -> restart FRR router and verify + stop_router(tgen, "r2") + start_router(tgen, "r2") + + result = verify_rib( + tgen, addr_type, dut, input_dict_4, next_hop=nh, protocol=protocol + ) + assert result is True, "Testcase {} : Failed \nError: Routes are" + " missing in RIB".format(tc_name) + + write_test_footer(tc_name) + + +def test_staticroute_with_ecmp_with_diff_AD_p0_tc4_ibgp(request): + """ + Verify static route ECMP functionality with 8 next hop + + """ + tc_name = request.node.name + write_test_header(tc_name) + tgen = get_topogen() + + # Don't run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + reset_config_on_routers(tgen) + NEXT_HOP_IP = populate_nh() + + step("Configure 8 interfaces / links between R1 and R2,") + step("Configure IBGP IPv4 peering between R2 and R3 router.") + reset_config_on_routers(tgen) + NEXT_HOP_IP = populate_nh() + nh_all = {} + for addr_type in ADDR_TYPES: + nh_all[addr_type] = [] + for nhp in range(1, 9): + nh_all[addr_type].append(NEXT_HOP_IP["nh" + str(nhp)][addr_type]) + step( + "Configure IPv4 static route in R2 with 8 next hop" + "N1(21.1.1.2) AD 10, N2(22.1.1.2) AD 20, N3(23.1.1.2) AD 30," + "N4(24.1.1.2) AD 40, N5(25.1.1.2) AD 50, N6(26.1.1.2) AD 60," + "N7(27.1.1.2) AD 70, N8(28.1.1.2) AD 80, Static route next-hop" + "present on R1" + ) + for addr_type in ADDR_TYPES: + for nhp in range(1, 9): + input_dict_4 = { + "r2": { + "static_routes": [ + { + "network": PREFIX1[addr_type], + "next_hop": NEXT_HOP_IP["nh" + str(nhp)][addr_type], + "admin_distance": 10 * nhp, + } + ] + } + } + logger.info("Configure static routes") + result = create_static_routes(tgen, input_dict_4) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + logger.info("Verifying %s routes on r2", addr_type) + + step( + "On R2, static route installed in RIB using " + "show ip route with 8 next hop, lowest AD nexthop is active" + ) + step("On R2, static route with lowest AD nexthop installed in FIB") + input_dict_4 = { + "r2": { + "static_routes": [ + { + "network": PREFIX1[addr_type], + "next_hop": NEXT_HOP_IP["nh1"][addr_type], + "admin_distance": 10, + } + ] + } + } + dut = "r2" + protocol = "static" + nh = NEXT_HOP_IP["nh1"][addr_type] + result = verify_rib( + tgen, addr_type, dut, input_dict_4, next_hop=nh, protocol=protocol, fib=True + ) + assert result is True, "Testcase {} : Failed \nError: Route with " + " lowest AD is missing in RIB".format(tc_name) + + nh = [] + for nhp in range(2, 9): + nh.append(NEXT_HOP_IP["nh" + str(nhp)][addr_type]) + result = verify_rib( + tgen, + addr_type, + dut, + input_dict_4, + next_hop=nh, + protocol=protocol, + fib=True, + expected=False, + ) + assert result is not True, "Testcase {} : Failed \nError: Routes " + " with high AD are active in RIB".format(tc_name) + + step("Configure redistribute static in BGP on R2 router") + for addr_type in ADDR_TYPES: + input_dict_2 = { + "r2": { + "bgp": { + "address_family": { + addr_type: { + "unicast": {"redistribute": [{"redist_type": "static"}]} + } + } + } + } + } + + logger.info("Configuring redistribute static") + result = create_router_bgp(tgen, topo, input_dict_2) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step( + "After configuring them, route is always active with lowest AD" + "value and all the nexthop populated in RIB and FIB again " + ) + input_dict_4 = { + "r2": { + "static_routes": [ + { + "network": PREFIX1[addr_type], + "next_hop": NEXT_HOP_IP["nh1"][addr_type], + "admin_distance": 10, + } + ] + } + } + dut = "r2" + protocol = "static" + nh = NEXT_HOP_IP["nh1"][addr_type] + result = verify_rib( + tgen, addr_type, dut, input_dict_4, next_hop=nh, protocol=protocol, fib=True + ) + assert result is True, "Testcase {} : Failed \nError: Route with " + " lowest AD is missing in RIB".format(tc_name) + + step( + "Remove the static route configured with nexthop N1 to N8, one" + "by one from running config" + ) + + for addr_type in ADDR_TYPES: + # delete static routes + for nhp in range(1, 9): + input_dict_4 = { + "r2": { + "static_routes": [ + { + "network": PREFIX1[addr_type], + "next_hop": NEXT_HOP_IP["nh" + str(nhp)][addr_type], + "admin_distance": 10 * nhp, + "delete": True, + } + ] + } + } + + logger.info("Configure static routes") + result = create_static_routes(tgen, input_dict_4) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step( + "After removing the static route with N1 to N8 one by one, " + "route become active with next preferred nexthop and nexthop which " + "got removed is not shown in RIB and FIB" + ) + result = verify_rib( + tgen, + addr_type, + dut, + input_dict_4, + next_hop=nh_all[addr_type], + protocol=protocol, + expected=False, + ) + assert result is not True, "Testcase {} : Failed \nError: Routes are" + " still present in RIB".format(tc_name) + + step("Configure the static route with nexthop N1 to N8, one by" "one") + for addr_type in ADDR_TYPES: + # add static routes + for nhp in range(1, 9): + input_dict_4 = { + "r2": { + "static_routes": [ + { + "network": PREFIX1[addr_type], + "next_hop": NEXT_HOP_IP["nh" + str(nhp)][addr_type], + "admin_distance": 10 * nhp, + } + ] + } + } + logger.info("Configure static routes") + result = create_static_routes(tgen, input_dict_4) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step("On R2, static route with lowest AD nexthop installed in FIB") + input_dict_4 = { + "r2": { + "static_routes": [ + { + "network": PREFIX1[addr_type], + "next_hop": NEXT_HOP_IP["nh1"][addr_type], + "admin_distance": 10, + } + ] + } + } + dut = "r2" + protocol = "static" + nh = NEXT_HOP_IP["nh1"][addr_type] + result = verify_rib( + tgen, addr_type, dut, input_dict_4, next_hop=nh, protocol=protocol, fib=True + ) + assert result is True, "Testcase {} : Failed \nError: Route with " + " lowest AD is missing in RIB".format(tc_name) + + nh = [] + for nhp in range(2, 9): + nh.append(NEXT_HOP_IP["nh" + str(nhp)][addr_type]) + result = verify_rib( + tgen, + addr_type, + dut, + input_dict_4, + next_hop=nh, + protocol=protocol, + fib=True, + expected=False, + ) + assert result is not True, "Testcase {} : Failed \nError: Routes " + " with high AD are active in RIB".format(tc_name) + + step("Random shut of the nexthop interfaces") + randnum = random.randint(0, 7) + for addr_type in ADDR_TYPES: + intf = topo["routers"]["r2"]["links"]["r1-link" + str(randnum)]["interface"] + shutdown_bringup_interface(tgen, dut, intf, False) + nhip = NEXT_HOP_IP["nh" + str(randnum + 1)][addr_type] + input_dict_5 = { + "r2": { + "static_routes": [ + { + "network": PREFIX1[addr_type], + "next_hop": NEXT_HOP_IP["nh" + str(randnum + 1)][addr_type], + } + ] + } + } + result = verify_rib( + tgen, + addr_type, + dut, + input_dict_5, + next_hop=nhip, + protocol=protocol, + expected=False, + ) + assert result is not True, "Testcase {} : Failed \n" + "Error: Routes are still present in RIB".format(tc_name) + + step("Random no shut of the nexthop interfaces") + for addr_type in ADDR_TYPES: + intf = topo["routers"]["r2"]["links"]["r1-link" + str(randnum)]["interface"] + shutdown_bringup_interface(tgen, dut, intf, True) + nhip = NEXT_HOP_IP["nh" + str(randnum + 1)][addr_type] + result = verify_rib( + tgen, addr_type, dut, input_dict_5, next_hop=nhip, protocol=protocol + ) + assert result is True, "Testcase {} : Failed \n" + "Error: Routes are missing in RIB".format(tc_name) + + step("Reload the FRR router") + # stop/start -> restart FRR router and verify + stop_router(tgen, "r2") + start_router(tgen, "r2") + + step( + "After reload of FRR router, static route installed " + "in RIB and FIB properly ." + ) + for addr_type in ADDR_TYPES: + input_dict_4 = { + "r2": { + "static_routes": [ + { + "network": PREFIX1[addr_type], + "next_hop": NEXT_HOP_IP["nh1"][addr_type], + "admin_distance": 10, + } + ] + } + } + dut = "r2" + protocol = "static" + nh = NEXT_HOP_IP["nh1"][addr_type] + result = verify_rib( + tgen, addr_type, dut, input_dict_4, next_hop=nh, protocol=protocol, fib=True + ) + assert result is True, "Testcase {} : Failed \nError: Route with " + " lowest AD is missing in RIB".format(tc_name) + + nh = [] + for nhp in range(2, 9): + nh.append(NEXT_HOP_IP["nh" + str(nhp)][addr_type]) + result = verify_rib( + tgen, + addr_type, + dut, + input_dict_4, + next_hop=nh, + protocol=protocol, + fib=True, + expected=False, + ) + assert result is not True, "Testcase {} : Failed \nError: Routes " + " with high AD are active in RIB".format(tc_name) + + step("Remove the redistribute static knob") + + for addr_type in ADDR_TYPES: + input_dict_2 = { + "r2": { + "bgp": { + "address_family": { + addr_type: { + "unicast": { + "redistribute": [ + {"redist_type": "static", "delete": True} + ] + } + } + } + } + } + } + + logger.info("Remove redistribute static") + result = create_router_bgp(tgen, topo, input_dict_2) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + step("verify that routes are deleted from R3 routing table") + dut = "r3" + protocol = "bgp" + result = verify_rib( + tgen, + addr_type, + dut, + input_dict_4, + next_hop=nh, + protocol=protocol, + expected=False, + ) + assert result is not True, "Testcase {} : Failed \nError: Routes are" + " strill present in RIB of R3".format(tc_name) + + write_test_footer(tc_name) + + +def test_bgp_local_nexthop_p1_tc14_ibgp(request): + """ + Verify BGP did not install the static route when it receive route + with local next hop + + """ + tc_name = request.node.name + write_test_header(tc_name) + tgen = get_topogen() + + step("Configure BGP IPv4 session between R2 and R3") + step("Configure IPv4 static route on R2") + reset_config_on_routers(tgen) + + for addr_type in ADDR_TYPES: + # Enable static routes + input_dict_4 = { + "r2": { + "static_routes": [ + { + "network": NETWORK[addr_type], + "next_hop": topo["routers"]["r3"]["links"]["r2-link0"][ + addr_type + ].split("/")[0], + } + ] + } + } + + logger.info("Configure static routes") + result = create_static_routes(tgen, input_dict_4) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step("Configure redistribute static in the BGP") + + input_dict_2 = { + "r2": { + "bgp": { + "address_family": { + addr_type: { + "unicast": {"redistribute": [{"redist_type": "static"}]} + } + } + } + } + } + result = create_router_bgp(tgen, topo, input_dict_2) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step("Verify R2 BGP table has IPv4 route") + dut = "r2" + result = verify_rib(tgen, addr_type, dut, input_dict_4) + assert result is True, "Testcase {} : Failed \nError: Routes is" + " missing in RIB of R2".format(tc_name) + + step(" Verify route did not install in the R3 BGP table, RIB/FIB") + dut = "r3" + result = verify_bgp_rib(tgen, addr_type, dut, input_dict_4, expected=False) + assert result is not True, "Testcase {} : Failed \nError: Routes is" + " still present in BGP RIB of R2".format(tc_name) + + result = verify_rib(tgen, addr_type, dut, input_dict_4, expected=False) + assert result is not True, "Testcase {} : Failed \nError: Routes is" + " still present in RIB of R2".format(tc_name) + + write_test_footer(tc_name) + + +if __name__ == "__main__": + args = ["-s"] + sys.argv[1:] + sys.exit(pytest.main(args)) diff --git a/tests/topotests/static_routing_with_ibgp/test_static_routes_topo4_ibgp.py b/tests/topotests/static_routing_with_ibgp/test_static_routes_topo4_ibgp.py new file mode 100644 index 0000000000..fdbfad25b3 --- /dev/null +++ b/tests/topotests/static_routing_with_ibgp/test_static_routes_topo4_ibgp.py @@ -0,0 +1,991 @@ +#!/usr/bin/python + +# +# Copyright (c) 2020 by VMware, Inc. ("VMware") +# Used Copyright (c) 2018 by Network Device Education Foundation, +# Inc. ("NetDEF") in this file. +# +# Permission to use, copy, modify, and/or distribute this software +# for any purpose with or without fee is hereby granted, provided +# that the above copyright notice and this permission notice appear +# in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND VMWARE DISCLAIMS ALL WARRANTIES +# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL VMWARE BE LIABLE FOR +# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY +# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS +# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE +# OF THIS SOFTWARE. +# + +""" + +Following tests are covered in the script. + +- Verify static route are blocked from route-map and prefix-list + applied in BGP nbrs +""" + +import sys +import json +import time +import os +import pytest +import platform +import ipaddress +from copy import deepcopy + +# Save the Current Working Directory to find configuration files. +CWD = os.path.dirname(os.path.realpath(__file__)) +sys.path.append(os.path.join(CWD, "../")) +sys.path.append(os.path.join(CWD, "../lib/")) +# pylint: disable=C0413 +# Import topogen and topotest helpers +from mininet.topo import Topo +from lib.topogen import Topogen, get_topogen + +from lib.common_config import ( + start_topology, + write_test_header, + write_test_footer, + reset_config_on_routers, + verify_rib, + check_address_types, + step, + create_prefix_lists, + create_route_maps, + create_interfaces_cfg, + verify_prefix_lists, + verify_route_maps, +) +from lib.topolog import logger +from lib.bgp import ( + verify_bgp_convergence, + create_router_bgp, + clear_bgp_and_verify, + clear_bgp, +) +from lib.topojson import build_topo_from_json, build_config_from_json +from lib.topotest import version_cmp +# Reading the data from JSON File for topology creation +jsonFile = "{}/static_routes_topo4_ibgp.json".format(CWD) +try: + with open(jsonFile, "r") as topoJson: + topo = json.load(topoJson) +except IOError: + assert False, "Could not read file {}".format(jsonFile) + +# Global variables +BGP_CONVERGENCE = False +ADDR_TYPES = check_address_types() +NETWORK = {"ipv4": "2.2.2.2/32", "ipv6": "22:22::2/128"} +NEXT_HOP_IP = {} + + +class CreateTopo(Topo): + """ + Test CreateTopo - topology 1. + + * `Topo`: Topology object + """ + + def build(self, *_args, **_opts): + """Build function.""" + tgen = get_topogen(self) + + # Building topology from json file + build_topo_from_json(tgen, topo) + + +def setup_module(mod): + """ + Set up the pytest environment. + * `mod`: module name + """ + global topo + testsuite_run_time = time.asctime(time.localtime(time.time())) + logger.info("Testsuite start time: {}".format(testsuite_run_time)) + logger.info("=" * 40) + + logger.info("Running setup_module to create topology") + + # This function initiates the topology build with Topogen... + tgen = Topogen(CreateTopo, mod.__name__) + # ... and here it calls Mininet initialization functions. + + # Starting topology, create tmp files which are loaded to routers + # to start deamons and then start routers + start_topology(tgen) + + # Creating configuration from JSON + build_config_from_json(tgen, topo) + + if version_cmp(platform.release(), '4.19') < 0: + error_msg = ('These tests will not run. (have kernel "{}", ' + 'requires kernel >= 4.19)'.format(platform.release())) + pytest.skip(error_msg) + + # Checking BGP convergence + global BGP_CONVERGENCE + global ADDR_TYPES + # Don't run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + # Api call verify whether BGP is converged + BGP_CONVERGENCE = verify_bgp_convergence(tgen, topo) + assert BGP_CONVERGENCE is True, "setup_module :Failed \n Error: {}".format( + BGP_CONVERGENCE + ) + + logger.info("Running setup_module() done") + + +def teardown_module(mod): + """ + Teardown the pytest environment. + + * `mod`: module name + """ + logger.info("Running teardown_module to delete topology") + + tgen = get_topogen() + + # Stop toplogy and Remove tmp files + tgen.stop_topology() + + logger.info( + "Testsuite end time: {}".format(time.asctime(time.localtime(time.time()))) + ) + logger.info("=" * 40) + + +##################################################### +# +# Tests starting +# +##################################################### +def test_static_routes_rmap_pfxlist_p0_tc7_ibgp(request): + """ + Verify static route are blocked from route-map & prefix-list applied in BGP + nbrs + + """ + tc_name = request.node.name + write_test_header(tc_name) + tgen = get_topogen() + # Don't run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + reset_config_on_routers(tgen) + step("Configure holddown timer = 1 keep alive = 3 in all the neighbors") + step("verify bgp convergence before starting test case") + + bgp_convergence = verify_bgp_convergence(tgen, topo) + assert bgp_convergence is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, bgp_convergence + ) + + step( + "Configure 4 IPv4 and 4 IPv6 nbrs with password with mismatch " + " authentication between FRR routers " + ) + + for addr_type in ADDR_TYPES: + # Api call to modfiy BGP timerse + input_dict = { + "r2": { + "bgp": { + "local_as": "200", + "address_family": { + addr_type: { + "unicast": { + "neighbor": { + "r1": { + "dest_link": { + "r2-link0": {"password": "r2"}, + "r2-link1": {"password": "r2"}, + "r2-link2": {"password": "r2"}, + "r2-link3": {"password": "r2"}, + } + }, + "r3": { + "dest_link": { + "r2-link0": {"password": "r2"}, + "r2-link1": {"password": "r2"}, + "r2-link2": {"password": "r2"}, + "r2-link3": {"password": "r2"}, + } + }, + } + } + } + }, + } + } + } + result = create_router_bgp(tgen, topo, deepcopy(input_dict)) + assert result is True, "Testcase {} :Failed \n Error: {}".format( + tc_name, result + ) + clear_bgp(tgen, addr_type, "r2") + + step(" All BGP nbrs are down as authentication is mismatch on both" " the sides") + + bgp_convergence = verify_bgp_convergence(tgen, topo, expected=False) + assert bgp_convergence is not True, "Testcase {} : " + "Failed \n BGP nbrs must be down. Error: {}".format(tc_name, bgp_convergence) + + step( + "Configure 4 IPv4 and 4 IPv6 nbrs with macthing password " + " authentication between FRR routers " + ) + for addr_type in ADDR_TYPES: + input_dict = { + "r2": { + "bgp": { + "local_as": "200", + "address_family": { + addr_type: { + "unicast": { + "neighbor": { + "r1": { + "dest_link": { + "r2-link0": {"password": "r1"}, + "r2-link1": {"password": "r1"}, + "r2-link2": {"password": "r1"}, + "r2-link3": {"password": "r1"}, + } + }, + "r3": { + "dest_link": { + "r2-link0": {"password": "r1"}, + "r2-link1": {"password": "r1"}, + "r2-link2": {"password": "r1"}, + "r2-link3": {"password": "r1"}, + } + }, + } + } + } + }, + } + } + } + result = create_router_bgp(tgen, topo, deepcopy(input_dict)) + assert result is True, "Testcase {} :Failed \n Error: {}".format( + tc_name, result + ) + + step("All BGP nbrs are up as authentication is matched now") + bgp_convergence = verify_bgp_convergence(tgen, topo) + assert bgp_convergence is True, "Testcase {} : Failed \n " "Error: {}".format( + tc_name, bgp_convergence + ) + + step("Create prefix list P1 to permit VM3 & deny VM1 v4 & v6 routes") + step("Create prefix list P2 to permit VM6 IPv4 and IPv6 routes") + for addr_type in ADDR_TYPES: + input_dict_2 = { + "r2": { + "prefix_lists": { + addr_type: { + "pf_list_1_{}".format(addr_type): [ + { + "seqid": 10, + "network": topo["routers"]["r2"]["links"]["vm3"][ + addr_type + ], + "action": "permit", + }, + { + "seqid": 20, + "network": topo["routers"]["r2"]["links"]["vm1"][ + addr_type + ], + "action": "deny", + }, + ], + "pf_list_2_{}".format(addr_type): [ + { + "seqid": 10, + "network": topo["routers"]["r2"]["links"]["vm6"][ + addr_type + ], + "action": "permit", + } + ], + } + } + } + } + result = create_prefix_lists(tgen, input_dict_2) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step( + "Prefix list created with matching networks deny or permit " + "show ip prefix list" + ) + result = verify_prefix_lists(tgen, input_dict_2) + assert result is not True, "Testcase {} : Failed \n" + " Error: {}".format(tc_name, result) + + step("Redistribute all the routes (connected, static)") + input_dict_2_r1 = { + "r1": { + "bgp": { + "address_family": { + addr_type: { + "unicast": {"redistribute": [{"redist_type": "static"}]} + } + } + } + } + } + result = create_router_bgp(tgen, topo, input_dict_2_r1) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + input_dict_2_r2 = { + "r2": { + "bgp": { + "address_family": { + addr_type: { + "unicast": {"redistribute": [{"redist_type": "static"}]} + } + } + } + } + } + result = create_router_bgp(tgen, topo, input_dict_2_r2) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + input_dict_2_r3 = { + "r3": { + "bgp": { + "address_family": { + addr_type: { + "unicast": {"redistribute": [{"redist_type": "static"}]} + } + } + } + } + } + result = create_router_bgp(tgen, topo, input_dict_2_r3) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step("configure redistribute connected in Router BGP") + + input_dict_2_r1 = { + "r1": { + "bgp": { + "address_family": { + addr_type: { + "unicast": {"redistribute": [{"redist_type": "connected"}]} + } + } + } + } + } + result = create_router_bgp(tgen, topo, input_dict_2_r1) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + input_dict_2_r3 = { + "r3": { + "bgp": { + "address_family": { + addr_type: { + "unicast": {"redistribute": [{"redist_type": "connected"}]} + } + } + } + } + } + result = create_router_bgp(tgen, topo, input_dict_2_r3) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + input_dict_2 = { + "r2": { + "bgp": { + "address_family": { + addr_type: { + "unicast": {"redistribute": [{"redist_type": "connected"}]} + } + } + } + } + } + result = create_router_bgp(tgen, topo, input_dict_2) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step("Apply prefix list P1 on BGP neighbors 1 2 3 4 connected from " "frr r1") + # Configure prefix list to bgp neighbor + input_dict_4 = { + "r2": { + "bgp": { + "address_family": { + addr_type: { + "unicast": { + "neighbor": { + "r1": { + "dest_link": { + "r2-link0": { + "prefix_lists": [ + { + "name": "pf_list_1_{}".format( + addr_type + ), + "direction": "out", + } + ] + }, + "r2-link1": { + "prefix_lists": [ + { + "name": "pf_list_1_{}".format( + addr_type + ), + "direction": "out", + } + ] + }, + "r2-link2": { + "prefix_lists": [ + { + "name": "pf_list_1_{}".format( + addr_type + ), + "direction": "out", + } + ] + }, + "r2-link3": { + "prefix_lists": [ + { + "name": "pf_list_1_{}".format( + addr_type + ), + "direction": "out", + } + ] + }, + } + } + } + } + } + } + } + } + } + result = create_router_bgp(tgen, topo, input_dict_4) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step("Apply prefix list P2 on BGP nbrs 5 & 6 connected from FRR-2") + # Configure prefix list to bgp neighbor + input_dict_4 = { + "r2": { + "bgp": { + "address_family": { + addr_type: { + "unicast": { + "neighbor": { + "r3": { + "dest_link": { + "r2-link0": { + "prefix_lists": [ + { + "name": "pf_list_2_{}".format( + addr_type + ), + "direction": "out", + } + ] + }, + "r2-link1": { + "prefix_lists": [ + { + "name": "pf_list_2_{}".format( + addr_type + ), + "direction": "out", + } + ] + }, + "r2-link2": { + "prefix_lists": [ + { + "name": "pf_list_2_{}".format( + addr_type + ), + "direction": "out", + } + ] + }, + "r2-link3": { + "prefix_lists": [ + { + "name": "pf_list_2_{}".format( + addr_type + ), + "direction": "out", + } + ] + }, + } + } + } + } + } + } + } + } + } + result = create_router_bgp(tgen, topo, input_dict_4) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + clear_bgp_and_verify(tgen, topo, "r2") + + step( + "VM1 IPv4 and IPv6 Route which is denied using prefix list is " + "not present on FRR1 side routing table , also not able to " + "ping the routes show ip route" + ) + + dut = "r1" + protocol = "bgp" + ntwk_r2_vm1 = str( + ipaddress.ip_interface( + u"{}".format(topo["routers"]["r2"]["links"]["vm1"][addr_type]) + ).network + ) + input_dict = {"r1": {"static_routes": [{"network": ntwk_r2_vm1}]}} + result4 = verify_rib( + tgen, addr_type, dut, input_dict, protocol=protocol, expected=False + ) + assert result4 is not True, "Testcase {} : Failed , VM1 route is " + "not filtered out via prefix list. \n Error: {}".format(tc_name, result4) + + step( + "VM4 and VM6 IPV4 and IPv6 address are present in local and " + "FRR2 routing table show ip bgp show ip route" + ) + + dut = "r2" + ntwk_r2_vm6 = str( + ipaddress.ip_interface( + u"{}".format(topo["routers"]["r2"]["links"]["vm6"][addr_type]) + ).network + ) + input_dict = {"r3": {"static_routes": [{"network": ntwk_r2_vm6}]}} + result4 = verify_rib(tgen, addr_type, dut, input_dict) + assert result4 is True, "Testcase {} : Failed.\n Error: {}".format( + tc_name, result4 + ) + + step("Remove prefix list from all the neighbors") + input_dict_4 = { + "r2": { + "bgp": { + "address_family": { + addr_type: { + "unicast": { + "neighbor": { + "r1": { + "dest_link": { + "r2-link0": { + "prefix_lists": [ + { + "name": "pf_list_1_{}".format( + addr_type + ), + "direction": "out", + "delete": True, + } + ] + }, + "r2-link1": { + "prefix_lists": [ + { + "name": "pf_list_1_{}".format( + addr_type + ), + "direction": "out", + "delete": True, + } + ] + }, + "r2-link2": { + "prefix_lists": [ + { + "name": "pf_list_1_{}".format( + addr_type + ), + "direction": "out", + "delete": True, + } + ] + }, + "r2-link3": { + "prefix_lists": [ + { + "name": "pf_list_1_{}".format( + addr_type + ), + "direction": "out", + "delete": True, + } + ] + }, + } + } + } + } + } + } + } + } + } + result = create_router_bgp(tgen, topo, input_dict_4) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + input_dict_4 = { + "r2": { + "bgp": { + "address_family": { + addr_type: { + "unicast": { + "neighbor": { + "r3": { + "dest_link": { + "r2-link0": { + "prefix_lists": [ + { + "name": "pf_list_2_{}".format( + addr_type + ), + "direction": "out", + "delete": True, + } + ] + }, + "r2-link1": { + "prefix_lists": [ + { + "name": "pf_list_2_{}".format( + addr_type + ), + "direction": "out", + "delete": True, + } + ] + }, + "r2-link2": { + "prefix_lists": [ + { + "name": "pf_list_2_{}".format( + addr_type + ), + "direction": "out", + "delete": True, + } + ] + }, + "r2-link3": { + "prefix_lists": [ + { + "name": "pf_list_2_{}".format( + addr_type + ), + "direction": "out", + "delete": True, + } + ] + }, + } + } + } + } + } + } + } + } + } + result = create_router_bgp(tgen, topo, input_dict_4) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + clear_bgp_and_verify(tgen, topo, "r2") + + step("Create RouteMap_1 with prefix list P1 and weight 50") + # Create route map + rmap_dict = { + "r2": { + "route_maps": { + "rmap_pf_list_1_{}".format(addr_type): [ + { + "action": "permit", + "set": {"weight": 50}, + "match": { + addr_type: { + "prefix_lists": "pf_list_1_{}".format(addr_type) + } + }, + } + ] + } + } + } + result = create_route_maps(tgen, rmap_dict) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step("Create RouteMap_2 with prefix list P2 and weight 50") + # Create route map + rmap_dict = { + "r2": { + "route_maps": { + "rmap_pf_list_2_{}".format(addr_type): [ + { + "action": "permit", + "set": {"weight": 50}, + "match": { + addr_type: { + "prefix_lists": "pf_list_2_{}".format(addr_type) + } + }, + } + ] + } + } + } + result = create_route_maps(tgen, rmap_dict) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step("Verify Route-map created verify using show route-map") + # verify rmap_pf_list_1 and rmap_pf_list_2 are present in router r2 + input_dict = { + "r2": { + "route_maps": [ + "rmap_pf_list_1_{}".format(addr_type), + "rmap_pf_list_2_{}".format(addr_type), + ] + } + } + result = verify_route_maps(tgen, input_dict, expected=False) + assert result is not True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step("Apply policy RouteMap_1 nbrs 1 2 3 4 to FRR 1") + # Configure prefix list to bgp neighbor + input_dict_4 = { + "r2": { + "bgp": { + "address_family": { + addr_type: { + "unicast": { + "neighbor": { + "r1": { + "dest_link": { + "r2-link0": { + "route_maps": [ + { + "name": "rmap_pf_list_1_" + "{}".format(addr_type), + "direction": "out", + } + ] + }, + "r2-link1": { + "route_maps": [ + { + "name": "rmap_pf_list_1_" + "{}".format(addr_type), + "direction": "out", + } + ] + }, + "r2-link2": { + "route_maps": [ + { + "name": "rmap_pf_list_1_" + "{}".format(addr_type), + "direction": "out", + } + ] + }, + "r2-link3": { + "route_maps": [ + { + "name": "rmap_pf_list_1_" + "{}".format(addr_type), + "direction": "out", + } + ] + }, + } + } + } + } + } + } + } + } + } + result = create_router_bgp(tgen, topo, input_dict_4) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step("Apply policy RouteMap_2 nbrs 5 and 6 to FRR2") + # Configure prefix list to bgp neighbor + input_dict_4 = { + "r2": { + "bgp": { + "address_family": { + addr_type: { + "unicast": { + "neighbor": { + "r3": { + "dest_link": { + "r2-link0": { + "route_maps": [ + { + "name": "rmap_pf_list_2_" + "{}".format(addr_type), + "direction": "out", + } + ] + }, + "r2-link1": { + "route_maps": [ + { + "name": "rmap_pf_list_2_" + "{}".format(addr_type), + "direction": "out", + } + ] + }, + "r2-link2": { + "route_maps": [ + { + "name": "rmap_pf_list_2_" + "{}".format(addr_type), + "direction": "out", + } + ] + }, + "r2-link3": { + "route_maps": [ + { + "name": "rmap_pf_list_2_" + "{}".format(addr_type), + "direction": "out", + } + ] + }, + } + } + } + } + } + } + } + } + } + result = create_router_bgp(tgen, topo, input_dict_4) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step( + "After applying to BGP neighbors verify VM1 IPv4 and IPv6 Route" + " which is denied using prefix list is not present on FRR side" + " routing table , also not able to ping the routes show ip route" + " and VM4 and VM6 IPV4 and IPv6 address are present in local and" + " FRR routing table show ip bgp show ip route" + ) + + dut = "r1" + protocol = "bgp" + ntwk_r2_vm1 = str( + ipaddress.ip_interface( + u"{}".format(topo["routers"]["r2"]["links"]["vm1"][addr_type]) + ).network + ) + input_dict = {"r1": {"static_routes": [{"network": ntwk_r2_vm1}]}} + result4 = verify_rib( + tgen, addr_type, dut, input_dict, protocol=protocol, expected=False + ) + assert result4 is not True, "Testcase {} : Failed \n" "Error: {}".format( + tc_name, result4 + ) + + step("vm4 should be present in FRR1") + dut = "r1" + ntwk_r2_vm1 = str( + ipaddress.ip_interface( + u"{}".format(topo["routers"]["r1"]["links"]["vm4"][addr_type]) + ).network + ) + input_dict = {"r1": {"static_routes": [{"network": ntwk_r2_vm1}]}} + result4 = verify_rib(tgen, addr_type, dut, input_dict) + assert result4 is True, "Testcase {} : Failed , VM1 route is " + "not filtered out via prefix list. \n Error: {}".format(tc_name, result4) + + step("vm4 should be present in FRR2") + dut = "r2" + ntwk_r2_vm1 = str( + ipaddress.ip_interface( + u"{}".format(topo["routers"]["r1"]["links"]["vm4"][addr_type]) + ).network + ) + input_dict = {"r1": {"static_routes": [{"network": ntwk_r2_vm1}]}} + result4 = verify_rib(tgen, addr_type, dut, input_dict) + assert result4 is True, "Testcase {} : Failed , VM1 route is " + "not filtered out via prefix list. \n Error: {}".format(tc_name, result4) + + dut = "r3" + protocol = "bgp" + ntwk_r2_vm6 = str( + ipaddress.ip_interface( + u"{}".format(topo["routers"]["r2"]["links"]["vm6"][addr_type]) + ).network + ) + input_dict = {"r3": {"static_routes": [{"network": ntwk_r2_vm6}]}} + result4 = verify_rib(tgen, addr_type, dut, input_dict, protocol=protocol) + assert result4 is True, "Testcase {} : Failed.\n Error: {}".format( + tc_name, result4 + ) + + write_test_footer(tc_name) + + +if __name__ == "__main__": + args = ["-s"] + sys.argv[1:] + sys.exit(pytest.main(args)) diff --git a/tests/topotests/zebra_netlink/test_zebra_netlink.py b/tests/topotests/zebra_netlink/test_zebra_netlink.py index 4da5303641..94baf8438f 100644 --- a/tests/topotests/zebra_netlink/test_zebra_netlink.py +++ b/tests/topotests/zebra_netlink/test_zebra_netlink.py @@ -118,7 +118,12 @@ def test_zebra_netlink_batching(): r1.vtysh_cmd("sharp install routes 2.1.3.7 nexthop 192.168.1.1 100") json_file = "{}/r1/v4_route.json".format(CWD) expected = json.loads(open(json_file).read()) - test_func = partial(topotest.router_json_cmp, r1, "show ip route json", expected,) + test_func = partial( + topotest.router_json_cmp, + r1, + "show ip route json", + expected, + ) _, result = topotest.run_and_expect(test_func, None, count=2, wait=0.5) assertmsg = '"r1" JSON output mismatches' assert result is None, assertmsg diff --git a/tests/topotests/zebra_rib/r1/iproute.ref b/tests/topotests/zebra_rib/r1/iproute.ref new file mode 100644 index 0000000000..b28182c2d1 --- /dev/null +++ b/tests/topotests/zebra_rib/r1/iproute.ref @@ -0,0 +1,512 @@ +4.5.1.0/24 via 192.168.210.2 dev r1-eth0 metric 4278198272 +4.5.2.0/24 via 192.168.211.2 dev r1-eth1 metric 16777217 +4.5.3.0/24 via 192.168.212.2 dev r1-eth2 metric 167772161 +4.5.3.0/24 via 192.168.213.2 dev r1-eth3 metric 2684354561 +10.0.0.0 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.1 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.2 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.3 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.4 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.5 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.6 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.7 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.8 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.9 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.10 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.11 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.12 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.13 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.14 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.15 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.16 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.17 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.18 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.19 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.20 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.21 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.22 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.23 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.24 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.25 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.26 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.27 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.28 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.29 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.30 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.31 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.32 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.33 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.34 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.35 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.36 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.37 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.38 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.39 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.40 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.41 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.42 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.43 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.45 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.46 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.47 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.48 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.49 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.50 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.51 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.52 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.53 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.54 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.55 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.56 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.57 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.58 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.59 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.60 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.61 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.62 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.63 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.64 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.65 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.66 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.67 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.68 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.69 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.70 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.71 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.72 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.73 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.74 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.75 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.76 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.77 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.78 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.79 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.80 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.81 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.82 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.83 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.84 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.85 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.86 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.87 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.88 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.89 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.90 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.91 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.92 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.93 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.94 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.95 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.96 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.97 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.98 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.99 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.100 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.101 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.102 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.103 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.104 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.105 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.106 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.107 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.108 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.109 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.110 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.111 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.112 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.113 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.114 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.115 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.116 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.117 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.118 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.119 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.120 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.121 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.122 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.123 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.124 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.125 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.126 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.127 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.128 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.129 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.130 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.131 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.132 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.133 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.134 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.135 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.136 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.137 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.138 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.139 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.140 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.141 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.142 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.143 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.144 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.145 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.146 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.147 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.148 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.149 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.150 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.151 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.152 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.153 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.154 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.155 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.156 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.157 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.158 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.159 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.160 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.161 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.162 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.163 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.164 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.165 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.166 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.167 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.168 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.169 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.170 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.171 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.172 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.173 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.174 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.175 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.176 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.177 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.178 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.179 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.180 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.181 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.182 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.183 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.184 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.185 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.186 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.187 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.188 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.189 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.190 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.191 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.192 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.193 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.194 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.195 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.196 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.197 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.198 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.199 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.200 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.201 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.202 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.203 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.204 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.205 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.206 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.207 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.208 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.209 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.210 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.211 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.212 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.213 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.214 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.215 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.216 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.217 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.218 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.219 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.220 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.221 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.222 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.223 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.224 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.225 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.226 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.227 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.228 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.229 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.230 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.231 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.232 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.233 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.234 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.235 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.236 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.237 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.238 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.239 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.240 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.241 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.242 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.243 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.244 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.245 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.246 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.247 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.248 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.249 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.250 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.251 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.252 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.253 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.254 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.0.255 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.213.1 metric 20 +10.0.1.0 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.1 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.2 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.3 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.4 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.5 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.6 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.7 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.8 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.9 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.10 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.11 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.12 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.13 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.14 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.15 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.16 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.17 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.18 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.19 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.20 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.21 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.22 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.23 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.24 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.25 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.26 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.27 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.28 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.29 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.30 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.31 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.32 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.33 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.34 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.35 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.36 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.37 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.38 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.39 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.40 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.41 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.42 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.43 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.44 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.45 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.46 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.47 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.48 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.49 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.50 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.51 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.52 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.53 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.54 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.55 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.56 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.57 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.58 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.59 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.60 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.61 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.62 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.63 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.64 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.65 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.66 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.67 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.68 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.69 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.70 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.71 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.72 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.73 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.74 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.75 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.76 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.77 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.78 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.79 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.80 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.81 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.82 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.83 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.84 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.85 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.86 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.87 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.88 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.89 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.90 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.91 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.92 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.93 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.94 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.95 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.96 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.97 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.98 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.99 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.100 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.101 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.102 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.103 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.104 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.105 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.106 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.107 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.108 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.109 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.110 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.111 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.112 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.113 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.114 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.115 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.116 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.117 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.118 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.119 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.120 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.121 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.122 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.123 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.124 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.125 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.126 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.127 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.128 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.129 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.130 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.131 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.132 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.133 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.134 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.135 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.136 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.137 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.138 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.139 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.140 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.141 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.142 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.143 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.144 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.145 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.146 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.147 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.148 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.149 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.150 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.151 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.152 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.153 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.154 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.155 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.156 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.157 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.158 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.159 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.160 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.161 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.162 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.163 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.164 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.165 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.166 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.167 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.168 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.169 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.170 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.171 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.172 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.173 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.174 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.175 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.176 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.177 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.178 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.179 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.180 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.181 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.182 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.183 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.184 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.185 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.186 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.187 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.188 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.189 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.190 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.191 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.192 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.193 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.194 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.195 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.196 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.197 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.198 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.199 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.200 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.201 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.202 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.203 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.204 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.205 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.206 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.207 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.208 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.209 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.210 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.211 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.212 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.213 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.214 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.215 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.216 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.217 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.218 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.219 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.220 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.221 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.222 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.223 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.224 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.225 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.226 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.227 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.228 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.229 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.230 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.231 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.232 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.233 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.234 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.235 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.236 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.237 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.238 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.239 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.240 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.241 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.242 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.0.1.243 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.214.1 metric 20 +10.100.100.100 via 192.168.216.3 dev r1-eth6 proto XXXX src 192.168.215.1 metric 20 +192.168.210.0/24 dev r1-eth0 proto XXXX scope link src 192.168.210.1 +192.168.211.0/24 dev r1-eth1 proto XXXX scope link src 192.168.211.1 +192.168.212.0/24 dev r1-eth2 proto XXXX scope link src 192.168.212.1 +192.168.213.0/24 dev r1-eth3 proto XXXX scope link src 192.168.213.1 +192.168.214.0/24 dev r1-eth4 proto XXXX scope link src 192.168.214.1 +192.168.215.0/24 dev r1-eth5 proto XXXX scope link src 192.168.215.1 +192.168.216.0/24 dev r1-eth6 proto XXXX scope link src 192.168.216.1 +192.168.217.0/24 dev r1-eth7 proto XXXX scope link src 192.168.217.1 diff --git a/tests/topotests/zebra_rib/r1/sharp_rmap.ref b/tests/topotests/zebra_rib/r1/sharp_rmap.ref new file mode 100644 index 0000000000..47a9eb6a49 --- /dev/null +++ b/tests/topotests/zebra_rib/r1/sharp_rmap.ref @@ -0,0 +1,17 @@ +ZEBRA: +route-map: sharp Invoked: 500 Optimization: enabled Processed Change: false + permit, sequence 10 Invoked 244 + Match clauses: + ip address 10 + Set clauses: + src 192.168.214.1 + Call clause: + Action: + Exit routemap + permit, sequence 20 Invoked 256 + Match clauses: + Set clauses: + src 192.168.213.1 + Call clause: + Action: + Exit routemap diff --git a/tests/topotests/zebra_rib/r1/static_rmap.ref b/tests/topotests/zebra_rib/r1/static_rmap.ref new file mode 100644 index 0000000000..2de98bd514 --- /dev/null +++ b/tests/topotests/zebra_rib/r1/static_rmap.ref @@ -0,0 +1,9 @@ +ZEBRA: +route-map: static Invoked: 2 Optimization: enabled Processed Change: false + permit, sequence 10 Invoked 2 + Match clauses: + Set clauses: + src 192.168.215.1 + Call clause: + Action: + Exit routemap diff --git a/tests/topotests/zebra_rib/test_zebra_rib.py b/tests/topotests/zebra_rib/test_zebra_rib.py index ef4b597206..daf8f7be20 100644 --- a/tests/topotests/zebra_rib/test_zebra_rib.py +++ b/tests/topotests/zebra_rib/test_zebra_rib.py @@ -41,6 +41,7 @@ sys.path.append(os.path.join(CWD, "../")) from lib import topotest from lib.topogen import Topogen, TopoRouter, get_topogen from lib.topolog import logger +from time import sleep # Required to instantiate the topology builder class. from mininet.topo import Topo @@ -75,8 +76,9 @@ def setup_module(mod): router_list = tgen.routers() for rname, router in router_list.items(): router.load_config( - TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname)) - ) + TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname))) + router.load_config( + TopoRouter.RD_SHARP, os.path.join(CWD, "{}/sharpd.conf".format(rname))) # Initialize all routers. tgen.start_router() @@ -157,6 +159,111 @@ def test_zebra_kernel_override(): _, result = topotest.run_and_expect(test_func, None, count=2, wait=0.5) assert result is None, '"r1" JSON output mismatches' +def test_route_map_usage(): + "Test that FRR only reruns over routes associated with the routemap" + logger.info("Test that FRR runs on selected re's on route-map changes") + tgen = get_topogen() + if tgen.routers_have_failure(): + pytest.skip("Skipped because of previous test failure") + + thisDir = os.path.dirname(os.path.realpath(__file__)) + + r1 = tgen.gears["r1"] + # set the delay timer to 1 to improve test coverage (HA) + r1.vtysh_cmd("conf\nzebra route-map delay-timer 1") + r1.vtysh_cmd("conf\nroute-map static permit 10\nset src 192.168.215.1") + r1.vtysh_cmd("conf\naccess-list 5 seq 5 permit 10.0.0.44/32") + r1.vtysh_cmd("conf\naccess-list 10 seq 5 permit 10.0.1.0/24") + r1.vtysh_cmd("conf\nroute-map sharp permit 10\nmatch ip address 10\nset src 192.168.214.1") + r1.vtysh_cmd("conf\nroute-map sharp permit 20\nset src 192.168.213.1") + r1.vtysh_cmd("conf\nip protocol static route-map static") + r1.vtysh_cmd("conf\nip protocol sharp route-map sharp") + sleep(4) + r1.vtysh_cmd("conf\nip route 10.100.100.100/32 192.168.216.3") + r1.vtysh_cmd("conf\nip route 10.100.100.101/32 10.0.0.44") + r1.vtysh_cmd("sharp install route 10.0.0.0 nexthop 192.168.216.3 500") + sleep(4) + + static_rmapfile = "%s/r1/static_rmap.ref" % (thisDir) + expected = open(static_rmapfile).read().rstrip() + expected = ('\n'.join(expected.splitlines()) + '\n').rstrip() + actual = r1.vtysh_cmd("show route-map static") + actual = ('\n'.join(actual.splitlines()) + '\n').rstrip() + logger.info("Does the show route-map static command run the correct number of times") + + diff = topotest.get_textdiff(actual, expected, + title1 = "Actual Route-map output", + title2 = "Expected Route-map output") + if diff: + logger.info("Actual:") + logger.info(actual) + logger.info("Expected:") + logger.info(expected) + srun = r1.vtysh_cmd("show run") + srun = ('\n'.join(srun.splitlines()) + '\n').rstrip() + logger.info("Show run") + logger.info(srun) + assert 0, "r1 static route processing:\n" + + sharp_rmapfile = "%s/r1/sharp_rmap.ref" % (thisDir) + expected = open(sharp_rmapfile).read().rstrip() + expected = ('\n'.join(expected.splitlines()) + '\n').rstrip() + actual = r1.vtysh_cmd("show route-map sharp") + actual = ('\n'.join(actual.splitlines()) + '\n').rstrip() + logger.info("Does the show route-map sharp command run the correct number of times") + + diff = topotest.get_textdiff(actual, expected, + title1 = "Actual Route-map output", + title2 = "Expected Route-map output") + if diff: + logger.info("Actual:") + logger.info(actual) + logger.info("Expected:") + logger.info(expected) + srun = r1.vtysh_cmd("show run") + srun = ('\n'.join(srun.splitlines()) + '\n').rstrip() + logger.info("Show run:") + logger.info(srun) + assert 0, "r1 sharp route-map processing:\n" + + logger.info("Add a extension to the static route-map to see the static route go away") + r1.vtysh_cmd("conf\nroute-map sharp deny 5\nmatch ip address 5") + sleep(2) + # we are only checking the kernel here as that this will give us the implied + # testing of both the route-map and staticd withdrawing the route + # let's spot check that the routes were installed correctly + # in the kernel + logger.info("Test that the routes installed are correct") + sharp_ipfile = "%s/r1/iproute.ref" % (thisDir) + expected = open(sharp_ipfile).read().rstrip() + expected = ('\n'.join(expected.splitlines()) + '\n').rstrip() + actual = r1.run("ip route show") + actual = ('\n'.join(actual.splitlines()) + '\n').rstrip() + actual = re.sub(r" nhid [0-9][0-9]", "", actual) + actual = re.sub(r" proto sharp", " proto XXXX", actual) + actual = re.sub(r" proto static", " proto XXXX", actual) + actual = re.sub(r" proto 194", " proto XXXX", actual) + actual = re.sub(r" proto 196", " proto XXXX", actual) + actual = re.sub(r" proto kernel", " proto XXXX", actual) + actual = re.sub(r" proto 2", " proto XXXX", actual) + # Some platforms have double spaces? Why?????? + actual = re.sub(r" proto XXXX ", " proto XXXX ", actual) + actual = re.sub(r" metric", " metric", actual) + actual = re.sub(r" link ", " link ", actual) + diff = topotest.get_textdiff(actual, expected, + title1 = "Actual ip route show", + title2 = "Expected ip route show") + + if diff: + logger.info("Actual:") + logger.info(actual) + logger.info("Expected:") + logger.info(expected) + srun = r1.vtysh_cmd("show run") + srun = ('\n'.join(srun.splitlines()) + '\n').rstrip() + logger.info("Show run:") + logger.info(srun) + assert 0, "r1 ip route show is not correct:" def test_memory_leak(): "Run the memory leak test and report results." diff --git a/tests/zebra/test_lm_plugin.c b/tests/zebra/test_lm_plugin.c new file mode 100644 index 0000000000..4a9344fee4 --- /dev/null +++ b/tests/zebra/test_lm_plugin.c @@ -0,0 +1,134 @@ +/* + * Label Manager tests. + * Copyright (C) 2020 Volta Networks + * Patrick Ruddy + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; see the file COPYING; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + + +#include <zebra.h> +#include "zebra/zapi_msg.h" +#include "zebra/label_manager.h" + +/* shim out unused functions/variables to allow the lablemanager to compile*/ +DEFINE_KOOH(zserv_client_close, (struct zserv * client), (client)); +unsigned long zebra_debug_packet = 0; +struct zserv *zserv_find_client_session(uint8_t proto, unsigned short instance, + uint32_t session_id) +{ + return NULL; +} + +int zsend_label_manager_connect_response(struct zserv *client, vrf_id_t vrf_id, + unsigned short result) +{ + return 0; +} + +int zsend_assign_label_chunk_response(struct zserv *client, vrf_id_t vrf_id, + struct label_manager_chunk *lmc) +{ + return 0; +} + + +static int test_client_connect(struct zserv *client, vrf_id_t vrf_id) +{ + return 0; +} + +static int test_client_disconnect(struct zserv *client) +{ + return 0; +} + +/* external test hook functions */ +static int lm_get_chunk_pi(struct label_manager_chunk **lmc, + struct zserv *client, uint8_t keep, uint32_t size, + uint32_t base, vrf_id_t vrf_id) +{ + if (base == 0) + *lmc = create_label_chunk(10, 55, 0, 1, 50, 50 + size); + else + *lmc = assign_label_chunk(10, 55, 0, 1, size, base); + + return 0; +} + +static int lm_release_chunk_pi(struct zserv *client, uint32_t start, + uint32_t end) +{ + return release_label_chunk(client->proto, client->instance, + client->session_id, start, end); +} + + +/* use external allocations */ +static void lp_plugin_init() +{ + /* register our own hooks */ + hook_register(lm_client_connect, test_client_connect); + hook_register(lm_client_disconnect, test_client_disconnect); + hook_register(lm_get_chunk, lm_get_chunk_pi); + hook_register(lm_release_chunk, lm_release_chunk_pi); +} + +static void lp_plugin_cleanup() +{ + /* register our own hooks */ + hook_unregister(lm_client_connect, test_client_connect); + hook_unregister(lm_client_disconnect, test_client_disconnect); + hook_unregister(lm_get_chunk, lm_get_chunk_pi); + hook_unregister(lm_release_chunk, lm_release_chunk_pi); +} + + +/* tests */ + +static void test_lp_plugin() +{ + struct label_manager_chunk *lmc; + + lmc = assign_label_chunk(10, 55, 0, 1, 50, 0); + fprintf(stdout, + "chunk: start %u end %u proto %u instance %u session %u keep %s\n", + lmc->start, lmc->end, lmc->proto, lmc->instance, + lmc->session_id, lmc->keep ? "yes" : "no"); + delete_label_chunk(lmc); + + lmc = assign_label_chunk(10, 55, 0, 1, 50, 100); + fprintf(stdout, + "chunk: start %u end %u proto %u instance %u session %u keep %s\n", + lmc->start, lmc->end, lmc->proto, lmc->instance, + lmc->session_id, lmc->keep ? "yes" : "no"); + release_label_chunk(10, 55, 0, lmc->start, lmc->end); +} + +int main(int argc, char **argv) +{ + /* set up label manager and release it's hooks */ + label_manager_init(); + lm_hooks_unregister(); + + /* test plugin */ + lp_plugin_init(); + test_lp_plugin(); + lp_plugin_cleanup(); + + /* this keeps the compiler happy */ + hook_call(zserv_client_close, NULL); + return 0; +} diff --git a/tests/zebra/test_lm_plugin.py b/tests/zebra/test_lm_plugin.py new file mode 100644 index 0000000000..bf4f3cef91 --- /dev/null +++ b/tests/zebra/test_lm_plugin.py @@ -0,0 +1,5 @@ +import frrtest + + +class TestLmplugin(frrtest.TestRefOut): + program = "./test_lm_plugin" diff --git a/tests/zebra/test_lm_plugin.refout b/tests/zebra/test_lm_plugin.refout new file mode 100644 index 0000000000..35824f1055 --- /dev/null +++ b/tests/zebra/test_lm_plugin.refout @@ -0,0 +1,2 @@ +chunk: start 16 end 65 proto 10 instance 55 session 0 keep yes +chunk: start 100 end 149 proto 10 instance 55 session 0 keep yes diff --git a/tools/.gitignore b/tools/.gitignore index 85dae7fd36..63a5b61c35 100644 --- a/tools/.gitignore +++ b/tools/.gitignore @@ -6,3 +6,5 @@ /watchfrr.sh /frrinit.sh /frrcommon.sh +/frr.service +/frr@.service diff --git a/tools/checkpatch.pl b/tools/checkpatch.pl index c0624d933e..cf15d00796 100755 --- a/tools/checkpatch.pl +++ b/tools/checkpatch.pl @@ -5300,8 +5300,8 @@ sub process { # uncoalesced string fragments if ($line =~ /$String\s*"/) { - WARN("STRING_FRAGMENTS", - "Consecutive strings are generally better as a single string\n" . $herecurr); + CHK("STRING_FRAGMENTS", + "Consecutive strings are generally better as a single string\n" . $herecurr); } # check for non-standard and hex prefixed decimal printf formats diff --git a/tools/etc/frr/support_bundle_commands.conf b/tools/etc/frr/support_bundle_commands.conf index 11f88e7101..087c35981d 100644 --- a/tools/etc/frr/support_bundle_commands.conf +++ b/tools/etc/frr/support_bundle_commands.conf @@ -8,17 +8,17 @@ PROC_NAME:bgp CMD_LIST_START show bgp summary -show ip bgp -show ip bgp neighbors -show ip bgp summary -show ip bgp statistics +show bgp ipv4 uni +show bgp ipv4 neighbors +show bgp ipv4 summary +show bgp ipv4 statistics -show ip bgp update-groups advertise-queue -show ip bgp update-groups advertised-routes -show ip bgp update-groups packet-queue -show ip bgp update-groups statistics -show ip bgp peer-group -show ip bgp memory +show bgp ipv4 update-groups advertise-queue +show bgp ipv4 update-groups advertised-routes +show bgp ipv4 update-groups packet-queue +show bgp ipv4 update-groups statistics +show bgp peer-group +show bgp memory show bgp ipv6 show bgp ipv6 neighbors @@ -27,8 +27,9 @@ show bgp ipv6 update-groups advertise-queue show bgp ipv6 update-groups advertised-routes show bgp ipv6 update-groups packet-queue show bgp ipv6 update-groups statistics -show ip bgp statistics +show bgp ipv6 statistics show bgp martian next-hop +show bgp nexthop show bgp evpn route CMD_LIST_END @@ -38,13 +39,15 @@ PROC_NAME:zebra CMD_LIST_START show zebra show zebra client summary +show zebra router table summary show ip nht vrf all +show ipv6 nht vrf all +show nexthop-group rib show route-map show memory show interface vrf all show vrf show zebra fpm stats -show error all show work-queues show debugging hashtable show running-config diff --git a/tools/frr-reload.py b/tools/frr-reload.py index 3121551ee5..dca877dbfe 100755 --- a/tools/frr-reload.py +++ b/tools/frr-reload.py @@ -221,6 +221,7 @@ ip forwarding for ligne in lines: self.dlines[ligne] = True + def get_normalized_es_id(line): """ The es-id or es-sys-mac need to be converted to lower case @@ -233,6 +234,7 @@ def get_normalized_es_id(line): break return line + def get_normalized_mac_ip_line(line): if line.startswith("evpn mh es"): return get_normalized_es_id(line) @@ -242,6 +244,7 @@ def get_normalized_mac_ip_line(line): return line + class Config(object): """ @@ -550,6 +553,7 @@ end "dump ", "enable ", "frr ", + "fpm ", "hostname ", "ip ", "ipv6 ", @@ -576,6 +580,18 @@ end if line.startswith("!") or line.startswith("#"): continue + if (len(ctx_keys) == 2 + and ctx_keys[0].startswith('bfd') + and ctx_keys[1].startswith('profile ') + and line == 'end'): + log.debug('LINE %-50s: popping from sub context, %-50s', line, ctx_keys) + + if main_ctx_key: + self.save_contexts(ctx_keys, current_context_lines) + ctx_keys = copy.deepcopy(main_ctx_key) + current_context_lines = [] + continue + # one line contexts # there is one exception though: ldpd accepts a 'router-id' clause # as part of its 'mpls ldp' config context. If we are processing @@ -626,6 +642,22 @@ end ctx_keys = [] current_context_lines = [] + elif ( + line == "exit" + and len(ctx_keys) > 1 + and ctx_keys[0].startswith("segment-routing") + ): + self.save_contexts(ctx_keys, current_context_lines) + + # Start a new context + ctx_keys = ctx_keys[:-1] + current_context_lines = [] + log.debug( + "LINE %-50s: popping segment routing sub-context to ctx%-50s", + line, + ctx_keys, + ) + elif line in ["exit-address-family", "exit", "exit-vnc"]: # if this exit is for address-family ipv4 unicast, ignore the pop if main_ctx_key: @@ -665,6 +697,7 @@ end current_context_lines = [] new_ctx = False log.debug("LINE %-50s: entering new context, %-50s", line, ctx_keys) + elif ( line.startswith("address-family ") or line.startswith("vnc defaults") @@ -727,6 +760,156 @@ end ) ctx_keys.append(line) + elif ( + line.startswith("traffic-eng") + and len(ctx_keys) == 1 + and ctx_keys[0].startswith("segment-routing") + ): + + # Save old context first + self.save_contexts(ctx_keys, current_context_lines) + current_context_lines = [] + log.debug( + "LINE %-50s: entering segment routing sub-context, append to ctx_keys", + line, + ) + ctx_keys.append(line) + + elif ( + line.startswith("segment-list ") + and len(ctx_keys) == 2 + and ctx_keys[0].startswith("segment-routing") + and ctx_keys[1].startswith("traffic-eng") + ): + + # Save old context first + self.save_contexts(ctx_keys, current_context_lines) + current_context_lines = [] + log.debug( + "LINE %-50s: entering segment routing sub-context, append to ctx_keys", + line, + ) + ctx_keys.append(line) + + elif ( + line.startswith("policy ") + and len(ctx_keys) == 2 + and ctx_keys[0].startswith("segment-routing") + and ctx_keys[1].startswith("traffic-eng") + ): + + # Save old context first + self.save_contexts(ctx_keys, current_context_lines) + current_context_lines = [] + log.debug( + "LINE %-50s: entering segment routing sub-context, append to ctx_keys", + line, + ) + ctx_keys.append(line) + + elif ( + line.startswith("candidate-path ") + and line.endswith(" dynamic") + and len(ctx_keys) == 3 + and ctx_keys[0].startswith("segment-routing") + and ctx_keys[1].startswith("traffic-eng") + and ctx_keys[2].startswith("policy") + ): + + # Save old context first + self.save_contexts(ctx_keys, current_context_lines) + current_context_lines = [] + main_ctx_key = copy.deepcopy(ctx_keys) + log.debug( + "LINE %-50s: entering candidate-path sub-context, append to ctx_keys", + line, + ) + ctx_keys.append(line) + + elif ( + line.startswith("pcep") + and len(ctx_keys) == 2 + and ctx_keys[0].startswith("segment-routing") + and ctx_keys[1].startswith("traffic-eng") + ): + + # Save old context first + self.save_contexts(ctx_keys, current_context_lines) + current_context_lines = [] + main_ctx_key = copy.deepcopy(ctx_keys) + log.debug( + "LINE %-50s: entering pcep sub-context, append to ctx_keys", line + ) + ctx_keys.append(line) + + elif ( + line.startswith("pce-config ") + and len(ctx_keys) == 3 + and ctx_keys[0].startswith("segment-routing") + and ctx_keys[1].startswith("traffic-eng") + and ctx_keys[2].startswith("pcep") + ): + + # Save old context first + self.save_contexts(ctx_keys, current_context_lines) + current_context_lines = [] + main_ctx_key = copy.deepcopy(ctx_keys) + log.debug( + "LINE %-50s: entering pce-config sub-context, append to ctx_keys", + line, + ) + ctx_keys.append(line) + + elif ( + line.startswith("pce ") + and len(ctx_keys) == 3 + and ctx_keys[0].startswith("segment-routing") + and ctx_keys[1].startswith("traffic-eng") + and ctx_keys[2].startswith("pcep") + ): + + # Save old context first + self.save_contexts(ctx_keys, current_context_lines) + current_context_lines = [] + main_ctx_key = copy.deepcopy(ctx_keys) + log.debug( + "LINE %-50s: entering pce sub-context, append to ctx_keys", line + ) + ctx_keys.append(line) + + elif ( + line.startswith("pcc") + and len(ctx_keys) == 3 + and ctx_keys[0].startswith("segment-routing") + and ctx_keys[1].startswith("traffic-eng") + and ctx_keys[2].startswith("pcep") + ): + + # Save old context first + self.save_contexts(ctx_keys, current_context_lines) + current_context_lines = [] + main_ctx_key = copy.deepcopy(ctx_keys) + log.debug( + "LINE %-50s: entering pcc sub-context, append to ctx_keys", line + ) + ctx_keys.append(line) + + elif ( + line.startswith('profile ') + and len(ctx_keys) == 1 + and ctx_keys[0].startswith('bfd') + ): + + # Save old context first + self.save_contexts(ctx_keys, current_context_lines) + current_context_lines = [] + main_ctx_key = copy.deepcopy(ctx_keys) + log.debug( + "LINE %-50s: entering BFD profile sub-context, append to ctx_keys", + line + ) + ctx_keys.append(line) + else: # Continuing in an existing context, add non-commented lines to it current_context_lines.append(line) @@ -1083,30 +1266,44 @@ def ignore_delete_re_add_lines(lines_to_add, lines_to_del): lines_to_add_to_del.append((ctx[0], None)) """ - ip/ipv6 prefix-list can be specified without a seq number. However, - the running config always adds 'seq x', where x is a number incremented - by 5 for every element, to the prefix list. So, ignore such lines as - well. Sample prefix-list lines: + ip/ipv6 prefix-lists and access-lists can be specified without a seq number. + However, the running config always adds 'seq x', where x is a number + incremented by 5 for every element of the prefix/access list. + So, ignore such lines as well. Sample prefix-list and acces-list lines: ip prefix-list PR-TABLE-2 seq 5 permit 20.8.2.0/24 le 32 ip prefix-list PR-TABLE-2 seq 10 permit 20.8.2.0/24 le 32 ipv6 prefix-list vrfdev6-12 permit 2000:9:2::/64 gt 64 + access-list FOO seq 5 permit 2.2.2.2/32 + ipv6 access-list BAR seq 5 permit 2:2:2::2/128 """ - re_ip_pfxlst = re.search( - "^(ip|ipv6)(\s+prefix-list\s+)(\S+\s+)(seq \d+\s+)(permit|deny)(.*)$", + re_acl_pfxlst = re.search( + "^(ip |ipv6 |)(prefix-list|access-list)(\s+\S+\s+)(seq \d+\s+)(permit|deny)(.*)$", ctx_keys[0], ) - if re_ip_pfxlst: + if re_acl_pfxlst: + found = False tmpline = ( - re_ip_pfxlst.group(1) - + re_ip_pfxlst.group(2) - + re_ip_pfxlst.group(3) - + re_ip_pfxlst.group(5) - + re_ip_pfxlst.group(6) + re_acl_pfxlst.group(1) + + re_acl_pfxlst.group(2) + + re_acl_pfxlst.group(3) + + re_acl_pfxlst.group(5) + + re_acl_pfxlst.group(6) ) for ctx in lines_to_add: if ctx[0][0] == tmpline: lines_to_del_to_del.append((ctx_keys, None)) lines_to_add_to_del.append(((tmpline,), None)) + found = True + """ + If prefix-lists or access-lists are being deleted and + not added (see comment above), add command with 'no' to + lines_to_add and remove from lines_to_del to improve + scaling performance. + """ + if found is False: + add_cmd = ("no " + ctx_keys[0],) + lines_to_add.append((add_cmd, None)) + lines_to_del_to_del.append((ctx_keys, None)) if ( len(ctx_keys) == 3 @@ -1244,6 +1441,11 @@ def compare_context_objects(newconf, running): # Compare the two Config objects to find the lines that we need to add/del lines_to_add = [] lines_to_del = [] + pollist_to_del = [] + seglist_to_del = [] + pceconf_to_del = [] + pcclist_to_del = [] + candidates_to_add = [] delete_bgpd = False # Find contexts that are in newconf but not in running @@ -1299,14 +1501,9 @@ def compare_context_objects(newconf, running): # doing vtysh -c inefficient (and can time out.) For # these commands, instead of adding them to lines_to_del, # add the "no " version to lines_to_add. - elif ( - running_ctx_keys[0].startswith("ip route") - or running_ctx_keys[0].startswith("ipv6 route") - or running_ctx_keys[0].startswith("access-list") - or running_ctx_keys[0].startswith("ipv6 access-list") - or running_ctx_keys[0].startswith("ip prefix-list") - or running_ctx_keys[0].startswith("ipv6 prefix-list") - ): + elif running_ctx_keys[0].startswith("ip route") or running_ctx_keys[ + 0 + ].startswith("ipv6 route"): add_cmd = ("no " + running_ctx_keys[0],) lines_to_add.append((add_cmd, None)) @@ -1321,11 +1518,64 @@ def compare_context_objects(newconf, running): continue # same thing for a pseudowire sub-context inside an l2vpn context - elif (len(running_ctx_keys) > 1 and running_ctx_keys[0].startswith('l2vpn') and - running_ctx_keys[1].startswith('member pseudowire') and - (running_ctx_keys[:1], None) in lines_to_del): + elif ( + len(running_ctx_keys) > 1 + and running_ctx_keys[0].startswith("l2vpn") + and running_ctx_keys[1].startswith("member pseudowire") + and (running_ctx_keys[:1], None) in lines_to_del + ): continue + # Segment routing and traffic engineering never need to be deleted + elif ( + running_ctx_keys[0].startswith("segment-routing") + and len(running_ctx_keys) < 3 + ): + continue + + # Neither the pcep command + elif ( + len(running_ctx_keys) == 3 + and running_ctx_keys[0].startswith("segment-routing") + and running_ctx_keys[2].startswith("pcep") + ): + continue + + # Segment lists can only be deleted after we removed all the candidate paths that + # use them, so add them to a separate array that is going to be appended at the end + elif ( + len(running_ctx_keys) == 3 + and running_ctx_keys[0].startswith("segment-routing") + and running_ctx_keys[2].startswith("segment-list") + ): + seglist_to_del.append((running_ctx_keys, None)) + + # Policies must be deleted after there candidate path, to be sure + # we add them to a separate array that is going to be appended at the end + elif ( + len(running_ctx_keys) == 3 + and running_ctx_keys[0].startswith("segment-routing") + and running_ctx_keys[2].startswith("policy") + ): + pollist_to_del.append((running_ctx_keys, None)) + + # pce-config must be deleted after the pce, to be sure we add them + # to a separate array that is going to be appended at the end + elif ( + len(running_ctx_keys) >= 4 + and running_ctx_keys[0].startswith("segment-routing") + and running_ctx_keys[3].startswith("pce-config") + ): + pceconf_to_del.append((running_ctx_keys, None)) + + # pcc must be deleted after the pce and pce-config too + elif ( + len(running_ctx_keys) >= 4 + and running_ctx_keys[0].startswith("segment-routing") + and running_ctx_keys[3].startswith("pcc") + ): + pcclist_to_del.append((running_ctx_keys, None)) + # Non-global context elif running_ctx_keys and not any( "address-family" in key for key in running_ctx_keys @@ -1340,6 +1590,22 @@ def compare_context_objects(newconf, running): for line in running_ctx.lines: lines_to_del.append((running_ctx_keys, line)) + # if we have some policies commands to delete, append them to lines_to_del + if len(pollist_to_del) > 0: + lines_to_del.extend(pollist_to_del) + + # if we have some segment list commands to delete, append them to lines_to_del + if len(seglist_to_del) > 0: + lines_to_del.extend(seglist_to_del) + + # if we have some pce list commands to delete, append them to lines_to_del + if len(pceconf_to_del) > 0: + lines_to_del.extend(pceconf_to_del) + + # if we have some pcc list commands to delete, append them to lines_to_del + if len(pcclist_to_del) > 0: + lines_to_del.extend(pcclist_to_del) + # Find the lines within each context to add # Find the lines within each context to del for (newconf_ctx_keys, newconf_ctx) in iteritems(newconf.contexts): @@ -1349,7 +1615,19 @@ def compare_context_objects(newconf, running): for line in newconf_ctx.lines: if line not in running_ctx.dlines: - lines_to_add.append((newconf_ctx_keys, line)) + + # candidate paths can only be added after the policy and segment list, + # so add them to a separate array that is going to be appended at the end + if ( + len(newconf_ctx_keys) == 3 + and newconf_ctx_keys[0].startswith("segment-routing") + and newconf_ctx_keys[2].startswith("policy ") + and line.startswith("candidate-path ") + ): + candidates_to_add.append((newconf_ctx_keys, line)) + + else: + lines_to_add.append((newconf_ctx_keys, line)) for line in running_ctx.lines: if line not in newconf_ctx.dlines: @@ -1358,10 +1636,27 @@ def compare_context_objects(newconf, running): for (newconf_ctx_keys, newconf_ctx) in iteritems(newconf.contexts): if newconf_ctx_keys not in running.contexts: - lines_to_add.append((newconf_ctx_keys, None)) - for line in newconf_ctx.lines: - lines_to_add.append((newconf_ctx_keys, line)) + # candidate paths can only be added after the policy and segment list, + # so add them to a separate array that is going to be appended at the end + if ( + len(newconf_ctx_keys) == 4 + and newconf_ctx_keys[0].startswith("segment-routing") + and newconf_ctx_keys[3].startswith("candidate-path") + ): + candidates_to_add.append((newconf_ctx_keys, None)) + for line in newconf_ctx.lines: + candidates_to_add.append((newconf_ctx_keys, line)) + + else: + lines_to_add.append((newconf_ctx_keys, None)) + + for line in newconf_ctx.lines: + lines_to_add.append((newconf_ctx_keys, line)) + + # if we have some candidate paths commands to add, append them to lines_to_add + if len(candidates_to_add) > 0: + lines_to_add.extend(candidates_to_add) (lines_to_add, lines_to_del) = check_for_exit_vrf(lines_to_add, lines_to_del) (lines_to_add, lines_to_del) = ignore_delete_re_add_lines( @@ -1523,10 +1818,12 @@ if __name__ == "__main__": "staticd", "vrrpd", "ldpd", + "pathd", + "bfdd", ]: - log.error( - "Daemon %s is not a valid option for 'show running-config'" % args.daemon - ) + msg = "Daemon %s is not a valid option for 'show running-config'" % args.daemon + print(msg) + log.error(msg) sys.exit(1) vtysh = Vtysh(args.bindir, args.confdir, args.vty_socket, args.pathspace) diff --git a/tools/frr.in b/tools/frr.in index b860797d5b..889c075f81 100755 --- a/tools/frr.in +++ b/tools/frr.in @@ -27,7 +27,7 @@ FRR_DEFAULT_PROFILE="@DFLT_NAME@" # traditional / datacenter # Local Daemon selection may be done by using /etc/frr/daemons. # See /usr/share/doc/frr/README.Debian.gz for further information. # Keep zebra first and do not list watchfrr! -DAEMONS="zebra bgpd ripd ripngd ospfd ospf6d isisd babeld pimd ldpd nhrpd eigrpd sharpd pbrd staticd bfdd fabricd vrrpd" +DAEMONS="zebra bgpd ripd ripngd ospfd ospf6d isisd babeld pimd ldpd nhrpd eigrpd sharpd pbrd staticd bfdd fabricd vrrpd pathd" MAX_INSTANCES=5 RELOAD_SCRIPT="$D_PATH/frr-reload.py" diff --git a/tools/frr.service b/tools/frr.service.in index aa45f420fe..836ce06be7 100644 --- a/tools/frr.service +++ b/tools/frr.service.in @@ -17,9 +17,10 @@ WatchdogSec=60s RestartSec=5 Restart=on-abnormal LimitNOFILE=1024 -ExecStart=/usr/lib/frr/frrinit.sh start -ExecStop=/usr/lib/frr/frrinit.sh stop -ExecReload=/usr/lib/frr/frrinit.sh reload +PIDFile=@CFG_STATE@/watchfrr.pid +ExecStart=@CFG_SBIN@/frrinit.sh start +ExecStop=@CFG_SBIN@/frrinit.sh stop +ExecReload=@CFG_SBIN@/frrinit.sh reload [Install] WantedBy=multi-user.target diff --git a/tools/frr@.service b/tools/frr@.service.in index 0fa41c74a3..1e5d252325 100644 --- a/tools/frr@.service +++ b/tools/frr@.service.in @@ -17,9 +17,10 @@ WatchdogSec=60s RestartSec=5 Restart=on-abnormal LimitNOFILE=1024 -ExecStart=/usr/lib/frr/frrinit.sh start %I -ExecStop=/usr/lib/frr/frrinit.sh stop %I -ExecReload=/usr/lib/frr/frrinit.sh reload %I +PIDFile=@CFG_STATE@/%I/watchfrr.pid +ExecStart=@CFG_SBIN@/frrinit.sh start %I +ExecStop=@CFG_SBIN@/frrinit.sh stop %I +ExecReload=@CFG_SBIN@/frrinit.sh reload %I [Install] WantedBy=multi-user.target diff --git a/vrrpd/vrrp.c b/vrrpd/vrrp.c index 7728717e99..f4f489c3dd 100644 --- a/vrrpd/vrrp.c +++ b/vrrpd/vrrp.c @@ -854,7 +854,7 @@ static int vrrp_recv_advertisement(struct vrrp_router *r, struct ipaddr *src, vrrp_pkt_adver_dump(dumpbuf, sizeof(dumpbuf), pkt); DEBUGD(&vrrp_dbg_proto, VRRP_LOGPFX VRRP_LOGPFX_VRID VRRP_LOGPFX_FAM - "Received VRRP Advertisement from %s:\n%s", + "Received VRRP Advertisement from %s: %s", r->vr->vrid, family2str(r->family), sipstr, dumpbuf); /* Check that VRID matches our configured VRID */ diff --git a/vrrpd/vrrp_arp.c b/vrrpd/vrrp_arp.c index 750050e8c3..749b59cc43 100644 --- a/vrrpd/vrrp_arp.c +++ b/vrrpd/vrrp_arp.c @@ -170,7 +170,7 @@ void vrrp_garp_send_all(struct vrrp_router *r) if (ifp->flags & IFF_NOARP) { zlog_warn( VRRP_LOGPFX VRRP_LOGPFX_VRID VRRP_LOGPFX_FAM - "Unable to send gratuitous ARP on %s; has IFF_NOARP\n", + "Unable to send gratuitous ARP on %s; has IFF_NOARP", r->vr->vrid, family2str(r->family), ifp->name); return; } diff --git a/vtysh/vtysh.c b/vtysh/vtysh.c index c3fb663f55..e026a28628 100644 --- a/vtysh/vtysh.c +++ b/vtysh/vtysh.c @@ -139,6 +139,7 @@ struct vtysh_client vtysh_client[] = { {.fd = -1, .name = "staticd", .flag = VTYSH_STATICD, .next = NULL}, {.fd = -1, .name = "bfdd", .flag = VTYSH_BFDD, .next = NULL}, {.fd = -1, .name = "vrrpd", .flag = VTYSH_VRRPD, .next = NULL}, + {.fd = -1, .name = "pathd", .flag = VTYSH_PATHD, .next = NULL}, }; /* Searches for client by name, returns index */ @@ -538,6 +539,15 @@ static int vtysh_execute_func(const char *line, int pager) || saved_node == LDP_IPV6_IFACE_NODE) && (tried == 1)) { vtysh_execute("exit"); + } else if ((saved_node == SR_SEGMENT_LIST_NODE + || saved_node == SR_POLICY_NODE + || saved_node == SR_CANDIDATE_DYN_NODE + || saved_node == PCEP_NODE + || saved_node == PCEP_PCE_CONFIG_NODE + || saved_node == PCEP_PCE_NODE + || saved_node == PCEP_PCC_NODE) + && (tried > 0)) { + vtysh_execute("exit"); } else if (tried) { vtysh_execute("end"); vtysh_execute("configure"); @@ -689,6 +699,7 @@ int vtysh_mark_file(const char *filename) int ret; vector vline; int tried = 0; + bool ending; const struct cmd_element *cmd; int saved_ret, prev_node; int lineno = 0; @@ -740,6 +751,12 @@ int vtysh_mark_file(const char *filename) vty->node = LDP_L2VPN_NODE; } break; + case SR_CANDIDATE_DYN_NODE: + if (strncmp(vty_buf_copy, " ", 2)) { + vty_out(vty, " exit\n"); + vty->node = SR_POLICY_NODE; + } + break; default: break; } @@ -812,6 +829,31 @@ int vtysh_mark_file(const char *filename) } else if ((prev_node == BFD_PEER_NODE) && (tried == 1)) { vty_out(vty, "exit\n"); + } else if (((prev_node == SEGMENT_ROUTING_NODE) + || (prev_node == SR_TRAFFIC_ENG_NODE) + || (prev_node == SR_SEGMENT_LIST_NODE) + || (prev_node == SR_POLICY_NODE) + || (prev_node == SR_CANDIDATE_DYN_NODE) + || (prev_node == PCEP_NODE) + || (prev_node == PCEP_PCE_CONFIG_NODE) + || (prev_node == PCEP_PCE_NODE) + || (prev_node == PCEP_PCC_NODE)) + && (tried > 0)) { + ending = (vty->node != SEGMENT_ROUTING_NODE) + && (vty->node != SR_TRAFFIC_ENG_NODE) + && (vty->node != SR_SEGMENT_LIST_NODE) + && (vty->node != SR_POLICY_NODE) + && (vty->node != SR_CANDIDATE_DYN_NODE) + && (vty->node != PCEP_NODE) + && (vty->node != PCEP_PCE_CONFIG_NODE) + && (vty->node != PCEP_PCE_NODE) + && (vty->node != PCEP_PCC_NODE); + if (ending) + tried--; + while (tried-- > 0) + vty_out(vty, "exit\n"); + if (ending) + vty_out(vty, "end\n"); } else if (tried) { vty_out(vty, "end\n"); } @@ -1219,6 +1261,73 @@ static struct cmd_node pw_node = { .prompt = "%s(config-pw)# ", }; +#if defined(HAVE_PATHD) +static struct cmd_node segment_routing_node = { + .name = "segment-routing", + .node = SEGMENT_ROUTING_NODE, + .parent_node = CONFIG_NODE, + .prompt = "%s(config-sr)# ", +}; + +static struct cmd_node sr_traffic_eng_node = { + .name = "sr traffic-eng", + .node = SR_TRAFFIC_ENG_NODE, + .parent_node = SEGMENT_ROUTING_NODE, + .prompt = "%s(config-sr-te)# ", +}; + +static struct cmd_node srte_segment_list_node = { + .name = "srte segment-list", + .node = SR_SEGMENT_LIST_NODE, + .parent_node = SR_TRAFFIC_ENG_NODE, + .prompt = "%s(config-sr-te-segment-list)# ", +}; + +static struct cmd_node srte_policy_node = { + .name = "srte policy", + .node = SR_POLICY_NODE, + .parent_node = SR_TRAFFIC_ENG_NODE, + .prompt = "%s(config-sr-te-policy)# ", +}; + +static struct cmd_node srte_candidate_dyn_node = { + .name = "srte candidate-dyn", + .node = SR_CANDIDATE_DYN_NODE, + .parent_node = SR_POLICY_NODE, + .prompt = "%s(config-sr-te-candidate)# ", +}; + +#if defined(HAVE_PATHD_PCEP) +static struct cmd_node pcep_node = { + .name = "srte pcep", + .node = PCEP_NODE, + .parent_node = SR_TRAFFIC_ENG_NODE, + .prompt = "%s(config-sr-te-pcep)# " +}; + +static struct cmd_node pcep_pcc_node = { + .name = "srte pcep pcc", + .node = PCEP_PCC_NODE, + .parent_node = PCEP_NODE, + .prompt = "%s(config-sr-te-pcep-pcc)# ", +}; + +static struct cmd_node pcep_pce_node = { + .name = "srte pcep pce-peer", + .node = PCEP_PCE_NODE, + .parent_node = PCEP_NODE, + .prompt = "%s(config-sr-te-pcep-pce-peer)# ", +}; + +static struct cmd_node pcep_pce_config_node = { + .name = "srte pcep pce-config", + .node = PCEP_PCE_CONFIG_NODE, + .parent_node = PCEP_NODE, + .prompt = "%s(pcep-sr-te-pcep-pce-config)# ", +}; +#endif /* HAVE_PATHD_PCEP */ +#endif /* HAVE_PATHD */ + static struct cmd_node vrf_node = { .name = "vrf", .node = VRF_NODE, @@ -1974,6 +2083,102 @@ DEFUNSH(VTYSH_FABRICD, router_openfabric, router_openfabric_cmd, "router openfab } #endif /* HAVE_FABRICD */ +#if defined(HAVE_PATHD) +DEFUNSH(VTYSH_PATHD, segment_routing, segment_routing_cmd, + "segment-routing", + "Configure segment routing\n") +{ + vty->node = SEGMENT_ROUTING_NODE; + return CMD_SUCCESS; +} + +DEFUNSH(VTYSH_PATHD, sr_traffic_eng, sr_traffic_eng_cmd, + "traffic-eng", + "Configure SR traffic engineering\n") +{ + vty->node = SR_TRAFFIC_ENG_NODE; + return CMD_SUCCESS; +} + +DEFUNSH(VTYSH_PATHD, srte_segment_list, srte_segment_list_cmd, + "segment-list WORD$name", + "Segment List\n" + "Segment List Name\n") +{ + vty->node = SR_SEGMENT_LIST_NODE; + return CMD_SUCCESS; +} + +DEFUNSH(VTYSH_PATHD, srte_policy, srte_policy_cmd, + "policy color (0-4294967295) endpoint <A.B.C.D|X:X::X:X>", + "Segment Routing Policy\n" + "SR Policy color\n" + "SR Policy color value\n" + "SR Policy endpoint\n" + "SR Policy endpoint IPv4 address\n" + "SR Policy endpoint IPv6 address\n") +{ + vty->node = SR_POLICY_NODE; + return CMD_SUCCESS; +} + +DEFUNSH(VTYSH_PATHD, srte_policy_candidate_dyn_path, + srte_policy_candidate_dyn_path_cmd, + "candidate-path preference (0-4294967295) name WORD dynamic", + "Segment Routing Policy Candidate Path\n" + "Segment Routing Policy Candidate Path Preference\n" + "Administrative Preference\n" + "Segment Routing Policy Candidate Path Name\n" + "Symbolic Name\n" + "Dynamic Path\n") +{ + vty->node = SR_CANDIDATE_DYN_NODE; + return CMD_SUCCESS; +} + +#if defined(HAVE_PATHD_PCEP) + +DEFUNSH(VTYSH_PATHD, pcep, pcep_cmd, + "pcep", + "Configure SR pcep\n") +{ + vty->node = PCEP_NODE; + return CMD_SUCCESS; +} + +DEFUNSH(VTYSH_PATHD, pcep_cli_pcc, pcep_cli_pcc_cmd, + "[no] pcc", + NO_STR + "PCC configuration\n") +{ + vty->node = PCEP_PCC_NODE; + return CMD_SUCCESS; +} + +DEFUNSH(VTYSH_PATHD, pcep_cli_pce, pcep_cli_pce_cmd, + "[no] pce WORD", + NO_STR + "PCE configuration\n" + "Peer name\n") +{ + vty->node = PCEP_PCE_NODE; + return CMD_SUCCESS; +} + +DEFUNSH(VTYSH_PATHD, pcep_cli_pcep_pce_config, pcep_cli_pcep_pce_config_cmd, + "[no] pce-config WORD", + NO_STR + "PCEP peer Configuration Group\n" + "PCEP peer Configuration Group name\n") +{ + vty->node = PCEP_PCE_CONFIG_NODE; + return CMD_SUCCESS; +} + +#endif /* HAVE_PATHD_PCEP */ + +#endif /* HAVE_PATHD */ + DEFUNSH(VTYSH_RMAP, vtysh_route_map, vtysh_route_map_cmd, "route-map WORD <deny|permit> (1-65535)", "Create route-map or enter route-map command mode\n" @@ -2347,6 +2552,20 @@ DEFUNSH(VTYSH_KEYS, vtysh_quit_keys, vtysh_quit_keys_cmd, "quit", return vtysh_exit_keys(self, vty, argc, argv); } +#if defined(HAVE_PATHD) +DEFUNSH(VTYSH_PATHD, vtysh_exit_pathd, vtysh_exit_pathd_cmd, "exit", + "Exit current mode and down to previous mode\n") +{ + return vtysh_exit(vty); +} + +DEFUNSH(VTYSH_PATHD, vtysh_quit_pathd, vtysh_quit_pathd_cmd, "quit", + "Exit current mode and down to previous mode\n") +{ + return vtysh_exit_pathd(self, vty, argc, argv); +} +#endif /* HAVE_PATHD */ + DEFUNSH(VTYSH_ALL, vtysh_exit_line_vty, vtysh_exit_line_vty_cmd, "exit", "Exit current mode and down to previous mode\n") { @@ -3578,11 +3797,11 @@ DEFUN_HIDDEN(show_cli_graph_vtysh, static void vtysh_install_default(enum node_type node) { - install_element(node, &config_list_cmd); - install_element(node, &find_cmd); - install_element(node, &show_cli_graph_vtysh_cmd); - install_element(node, &vtysh_output_file_cmd); - install_element(node, &no_vtysh_output_file_cmd); + _install_element(node, &config_list_cmd); + _install_element(node, &find_cmd); + _install_element(node, &show_cli_graph_vtysh_cmd); + _install_element(node, &vtysh_output_file_cmd); + _install_element(node, &no_vtysh_output_file_cmd); } /* Making connection to protocol daemon. */ @@ -3782,10 +4001,16 @@ static char *vtysh_completion_entry_function(const char *ignore, void vtysh_readline_init(void) { /* readline related settings. */ + char *disable_bracketed_paste = + XSTRDUP(MTYPE_TMP, "set enable-bracketed-paste off"); + rl_initialize(); + rl_parse_and_bind(disable_bracketed_paste); rl_bind_key('?', (rl_command_func_t *)vtysh_rl_describe); rl_completion_entry_function = vtysh_completion_entry_function; rl_attempted_completion_function = new_completion; + + XFREE(MTYPE_TMP, disable_bracketed_paste); } char *vtysh_prompt(void) @@ -4144,6 +4369,64 @@ void vtysh_init_vty(void) install_element(BFD_PROFILE_NODE, &vtysh_end_all_cmd); #endif /* HAVE_BFDD */ +#if defined(HAVE_PATHD) + install_node(&segment_routing_node); + install_node(&sr_traffic_eng_node); + install_node(&srte_segment_list_node); + install_node(&srte_policy_node); + install_node(&srte_candidate_dyn_node); + + install_element(SEGMENT_ROUTING_NODE, &vtysh_exit_pathd_cmd); + install_element(SEGMENT_ROUTING_NODE, &vtysh_quit_pathd_cmd); + install_element(SR_TRAFFIC_ENG_NODE, &vtysh_exit_pathd_cmd); + install_element(SR_TRAFFIC_ENG_NODE, &vtysh_quit_pathd_cmd); + install_element(SR_SEGMENT_LIST_NODE, &vtysh_exit_pathd_cmd); + install_element(SR_SEGMENT_LIST_NODE, &vtysh_quit_pathd_cmd); + install_element(SR_POLICY_NODE, &vtysh_exit_pathd_cmd); + install_element(SR_POLICY_NODE, &vtysh_quit_pathd_cmd); + install_element(SR_CANDIDATE_DYN_NODE, &vtysh_exit_pathd_cmd); + install_element(SR_CANDIDATE_DYN_NODE, &vtysh_quit_pathd_cmd); + + install_element(SEGMENT_ROUTING_NODE, &vtysh_end_all_cmd); + install_element(SR_TRAFFIC_ENG_NODE, &vtysh_end_all_cmd); + install_element(SR_SEGMENT_LIST_NODE, &vtysh_end_all_cmd); + install_element(SR_POLICY_NODE, &vtysh_end_all_cmd); + install_element(SR_CANDIDATE_DYN_NODE, &vtysh_end_all_cmd); + + install_element(CONFIG_NODE, &segment_routing_cmd); + install_element(SEGMENT_ROUTING_NODE, &sr_traffic_eng_cmd); + install_element(SR_TRAFFIC_ENG_NODE, &srte_segment_list_cmd); + install_element(SR_TRAFFIC_ENG_NODE, &srte_policy_cmd); + install_element(SR_POLICY_NODE, &srte_policy_candidate_dyn_path_cmd); + +#if defined(HAVE_PATHD_PCEP) + install_node(&pcep_node); + install_node(&pcep_pcc_node); + install_node(&pcep_pce_node); + install_node(&pcep_pce_config_node); + + install_element(PCEP_NODE, &vtysh_exit_pathd_cmd); + install_element(PCEP_NODE, &vtysh_quit_pathd_cmd); + install_element(PCEP_PCC_NODE, &vtysh_exit_pathd_cmd); + install_element(PCEP_PCC_NODE, &vtysh_quit_pathd_cmd); + install_element(PCEP_PCE_NODE, &vtysh_exit_pathd_cmd); + install_element(PCEP_PCE_NODE, &vtysh_quit_pathd_cmd); + install_element(PCEP_PCE_CONFIG_NODE, &vtysh_exit_pathd_cmd); + install_element(PCEP_PCE_CONFIG_NODE, &vtysh_quit_pathd_cmd); + + install_element(PCEP_NODE, &vtysh_end_all_cmd); + install_element(PCEP_PCC_NODE, &vtysh_end_all_cmd); + install_element(PCEP_PCE_NODE, &vtysh_end_all_cmd); + install_element(PCEP_PCE_CONFIG_NODE, &vtysh_end_all_cmd); + + install_element(SR_TRAFFIC_ENG_NODE, &pcep_cmd); + install_element(PCEP_NODE, &pcep_cli_pcc_cmd); + install_element(PCEP_NODE, &pcep_cli_pcep_pce_config_cmd); + install_element(PCEP_NODE, &pcep_cli_pce_cmd); +#endif /* HAVE_PATHD_PCEP */ + +#endif /* HAVE_PATHD */ + /* keychain */ install_node(&keychain_node); install_element(CONFIG_NODE, &key_chain_cmd); diff --git a/vtysh/vtysh.h b/vtysh/vtysh.h index b2e37074f7..20e1e1b0e9 100644 --- a/vtysh/vtysh.h +++ b/vtysh/vtysh.h @@ -43,6 +43,7 @@ DECLARE_MGROUP(MVTYSH) #define VTYSH_BFDD 0x10000 #define VTYSH_FABRICD 0x20000 #define VTYSH_VRRPD 0x40000 +#define VTYSH_PATHD 0x80000 #define VTYSH_WAS_ACTIVE (-2) @@ -51,7 +52,7 @@ DECLARE_MGROUP(MVTYSH) /* watchfrr is not in ALL since library CLI functions should not be * run on it (logging & co. should stay in a fixed/frozen config, and * things like prefix lists are not even initialised) */ -#define VTYSH_ALL VTYSH_ZEBRA|VTYSH_RIPD|VTYSH_RIPNGD|VTYSH_OSPFD|VTYSH_OSPF6D|VTYSH_LDPD|VTYSH_BGPD|VTYSH_ISISD|VTYSH_PIMD|VTYSH_NHRPD|VTYSH_EIGRPD|VTYSH_BABELD|VTYSH_SHARPD|VTYSH_PBRD|VTYSH_STATICD|VTYSH_BFDD|VTYSH_FABRICD|VTYSH_VRRPD +#define VTYSH_ALL VTYSH_ZEBRA|VTYSH_RIPD|VTYSH_RIPNGD|VTYSH_OSPFD|VTYSH_OSPF6D|VTYSH_LDPD|VTYSH_BGPD|VTYSH_ISISD|VTYSH_PIMD|VTYSH_NHRPD|VTYSH_EIGRPD|VTYSH_BABELD|VTYSH_SHARPD|VTYSH_PBRD|VTYSH_STATICD|VTYSH_BFDD|VTYSH_FABRICD|VTYSH_VRRPD|VTYSH_PATHD #define VTYSH_ACL VTYSH_BFDD|VTYSH_BABELD|VTYSH_BGPD|VTYSH_EIGRPD|VTYSH_ISISD|VTYSH_FABRICD|VTYSH_LDPD|VTYSH_NHRPD|VTYSH_OSPF6D|VTYSH_OSPFD|VTYSH_PBRD|VTYSH_PIMD|VTYSH_RIPD|VTYSH_RIPNGD|VTYSH_VRRPD|VTYSH_ZEBRA #define VTYSH_RMAP VTYSH_ZEBRA|VTYSH_RIPD|VTYSH_RIPNGD|VTYSH_OSPFD|VTYSH_OSPF6D|VTYSH_BGPD|VTYSH_ISISD|VTYSH_PIMD|VTYSH_EIGRPD|VTYSH_FABRICD #define VTYSH_INTERFACE VTYSH_ZEBRA|VTYSH_RIPD|VTYSH_RIPNGD|VTYSH_OSPFD|VTYSH_OSPF6D|VTYSH_ISISD|VTYSH_PIMD|VTYSH_NHRPD|VTYSH_EIGRPD|VTYSH_BABELD|VTYSH_PBRD|VTYSH_FABRICD|VTYSH_VRRPD diff --git a/watchfrr/watchfrr.c b/watchfrr/watchfrr.c index 319bd6a771..441320b193 100644 --- a/watchfrr/watchfrr.c +++ b/watchfrr/watchfrr.c @@ -1080,6 +1080,9 @@ static int valid_command(const char *cmd) { char *p; + if (cmd == NULL) + return 0; + return ((p = strchr(cmd, '%')) != NULL) && (*(p + 1) == 's') && !strchr(p + 1, '%'); } @@ -1414,7 +1417,7 @@ int main(int argc, char **argv) } break; case OPTION_NETNS: netns_en = true; - if (strchr(optarg, '/')) { + if (optarg && strchr(optarg, '/')) { fprintf(stderr, "invalid network namespace name \"%s\" (may not contain slashes)\n", optarg); diff --git a/yang/frr-bgp-common.yang b/yang/frr-bgp-common.yang index cb1a6a8f56..1840e3728c 100644 --- a/yang/frr-bgp-common.yang +++ b/yang/frr-bgp-common.yang @@ -358,6 +358,13 @@ submodule frr-bgp-common { "Apply administrative shutdown to newly configured peers."; } + leaf suppress-duplicates { + type boolean; + default "true"; + description + "Suppress duplicate updates if the route actually not changed."; + } + leaf ebgp-requires-policy { type boolean; default "true"; diff --git a/yang/frr-bgp.yang b/yang/frr-bgp.yang index 2fb5d13fa7..24998a470d 100644 --- a/yang/frr-bgp.yang +++ b/yang/frr-bgp.yang @@ -598,6 +598,8 @@ module frr-bgp { uses structure-neighbor-group-soft-reconfiguration; uses structure-neighbor-weight; + + uses structure-neighbor-group-filter-config; } augment "/frr-rt:routing/frr-rt:control-plane-protocols/frr-rt:control-plane-protocol/bgp/neighbors/neighbor/afi-safis/afi-safi/ipv4-multicast" { @@ -626,6 +628,8 @@ module frr-bgp { uses structure-neighbor-group-soft-reconfiguration; uses structure-neighbor-weight; + + uses structure-neighbor-group-filter-config; } augment "/frr-rt:routing/frr-rt:control-plane-protocols/frr-rt:control-plane-protocol/bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-multicast" { @@ -654,7 +658,9 @@ module frr-bgp { uses structure-neighbor-group-soft-reconfiguration; uses structure-neighbor-weight; - } + + uses structure-neighbor-group-filter-config; + } augment "/frr-rt:routing/frr-rt:control-plane-protocols/frr-rt:control-plane-protocol/bgp/neighbors/neighbor/afi-safis/afi-safi/ipv4-labeled-unicast" { uses structure-neighbor-group-add-paths; @@ -682,6 +688,8 @@ module frr-bgp { uses structure-neighbor-group-soft-reconfiguration; uses structure-neighbor-weight; + + uses structure-neighbor-group-filter-config; } augment "/frr-rt:routing/frr-rt:control-plane-protocols/frr-rt:control-plane-protocol/bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-labeled-unicast" { @@ -710,6 +718,8 @@ module frr-bgp { uses structure-neighbor-group-soft-reconfiguration; uses structure-neighbor-weight; + + uses structure-neighbor-group-filter-config; } augment "/frr-rt:routing/frr-rt:control-plane-protocols/frr-rt:control-plane-protocol/bgp/neighbors/neighbor/afi-safis/afi-safi/l3vpn-ipv4-unicast" { @@ -734,6 +744,8 @@ module frr-bgp { uses structure-neighbor-group-soft-reconfiguration; uses structure-neighbor-weight; + + uses structure-neighbor-group-filter-config; } augment "/frr-rt:routing/frr-rt:control-plane-protocols/frr-rt:control-plane-protocol/bgp/neighbors/neighbor/afi-safis/afi-safi/l3vpn-ipv6-unicast" { @@ -758,6 +770,8 @@ module frr-bgp { uses structure-neighbor-group-soft-reconfiguration; uses structure-neighbor-weight; + + uses structure-neighbor-group-filter-config; } augment "/frr-rt:routing/frr-rt:control-plane-protocols/frr-rt:control-plane-protocol/bgp/neighbors/neighbor/afi-safis/afi-safi/l2vpn-evpn" { @@ -772,6 +786,8 @@ module frr-bgp { uses structure-neighbor-route-server; uses structure-neighbor-group-soft-reconfiguration; + + uses structure-neighbor-group-filter-config; } augment "/frr-rt:routing/frr-rt:control-plane-protocols/frr-rt:control-plane-protocol/bgp/neighbors/neighbor/afi-safis/afi-safi/ipv4-flowspec" { @@ -780,6 +796,8 @@ module frr-bgp { uses structure-neighbor-route-server; uses structure-neighbor-group-soft-reconfiguration; + + uses structure-neighbor-group-filter-config; } augment "/frr-rt:routing/frr-rt:control-plane-protocols/frr-rt:control-plane-protocol/bgp/neighbors/neighbor/afi-safis/afi-safi/ipv6-flowspec" { @@ -788,6 +806,8 @@ module frr-bgp { uses structure-neighbor-route-server; uses structure-neighbor-group-soft-reconfiguration; + + uses structure-neighbor-group-filter-config; } augment "/frr-rt:routing/frr-rt:control-plane-protocols/frr-rt:control-plane-protocol/bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv4-unicast" { @@ -855,6 +875,8 @@ module frr-bgp { uses structure-neighbor-group-soft-reconfiguration; uses structure-neighbor-weight; + + uses structure-neighbor-group-filter-config; } augment "/frr-rt:routing/frr-rt:control-plane-protocols/frr-rt:control-plane-protocol/bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv4-multicast" { @@ -883,6 +905,8 @@ module frr-bgp { uses structure-neighbor-group-soft-reconfiguration; uses structure-neighbor-weight; + + uses structure-neighbor-group-filter-config; } augment "/frr-rt:routing/frr-rt:control-plane-protocols/frr-rt:control-plane-protocol/bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv6-multicast" { @@ -911,6 +935,8 @@ module frr-bgp { uses structure-neighbor-group-soft-reconfiguration; uses structure-neighbor-weight; + + uses structure-neighbor-group-filter-config; } augment "/frr-rt:routing/frr-rt:control-plane-protocols/frr-rt:control-plane-protocol/bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv4-labeled-unicast" { @@ -939,6 +965,8 @@ module frr-bgp { uses structure-neighbor-group-soft-reconfiguration; uses structure-neighbor-weight; + + uses structure-neighbor-group-filter-config; } augment "/frr-rt:routing/frr-rt:control-plane-protocols/frr-rt:control-plane-protocol/bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv6-labeled-unicast" { @@ -1037,6 +1065,8 @@ module frr-bgp { uses structure-neighbor-route-server; uses structure-neighbor-group-soft-reconfiguration; + + uses structure-neighbor-group-filter-config; } augment "/frr-rt:routing/frr-rt:control-plane-protocols/frr-rt:control-plane-protocol/bgp/neighbors/unnumbered-neighbor/afi-safis/afi-safi/ipv6-flowspec" { @@ -1045,6 +1075,8 @@ module frr-bgp { uses structure-neighbor-route-server; uses structure-neighbor-group-soft-reconfiguration; + + uses structure-neighbor-group-filter-config; } augment "/frr-rt:routing/frr-rt:control-plane-protocols/frr-rt:control-plane-protocol/bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv4-unicast" { @@ -1112,6 +1144,8 @@ module frr-bgp { uses structure-neighbor-group-soft-reconfiguration; uses structure-neighbor-weight; + + uses structure-neighbor-group-filter-config; } augment "/frr-rt:routing/frr-rt:control-plane-protocols/frr-rt:control-plane-protocol/bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv4-multicast" { @@ -1140,6 +1174,8 @@ module frr-bgp { uses structure-neighbor-group-soft-reconfiguration; uses structure-neighbor-weight; + + uses structure-neighbor-group-filter-config; } augment "/frr-rt:routing/frr-rt:control-plane-protocols/frr-rt:control-plane-protocol/bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-multicast" { @@ -1168,6 +1204,8 @@ module frr-bgp { uses structure-neighbor-group-soft-reconfiguration; uses structure-neighbor-weight; + + uses structure-neighbor-group-filter-config; } augment "/frr-rt:routing/frr-rt:control-plane-protocols/frr-rt:control-plane-protocol/bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv4-labeled-unicast" { @@ -1196,6 +1234,8 @@ module frr-bgp { uses structure-neighbor-group-soft-reconfiguration; uses structure-neighbor-weight; + + uses structure-neighbor-group-filter-config; } augment "/frr-rt:routing/frr-rt:control-plane-protocols/frr-rt:control-plane-protocol/bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-labeled-unicast" { @@ -1224,6 +1264,8 @@ module frr-bgp { uses structure-neighbor-group-soft-reconfiguration; uses structure-neighbor-weight; + + uses structure-neighbor-group-filter-config; } augment "/frr-rt:routing/frr-rt:control-plane-protocols/frr-rt:control-plane-protocol/bgp/peer-groups/peer-group/afi-safis/afi-safi/l3vpn-ipv4-unicast" { @@ -1248,6 +1290,8 @@ module frr-bgp { uses structure-neighbor-group-soft-reconfiguration; uses structure-neighbor-weight; + + uses structure-neighbor-group-filter-config; } augment "/frr-rt:routing/frr-rt:control-plane-protocols/frr-rt:control-plane-protocol/bgp/peer-groups/peer-group/afi-safis/afi-safi/l3vpn-ipv6-unicast" { @@ -1272,6 +1316,8 @@ module frr-bgp { uses structure-neighbor-group-soft-reconfiguration; uses structure-neighbor-weight; + + uses structure-neighbor-group-filter-config; } augment "/frr-rt:routing/frr-rt:control-plane-protocols/frr-rt:control-plane-protocol/bgp/peer-groups/peer-group/afi-safis/afi-safi/l2vpn-evpn" { @@ -1294,6 +1340,8 @@ module frr-bgp { uses structure-neighbor-route-server; uses structure-neighbor-group-soft-reconfiguration; + + uses structure-neighbor-group-filter-config; } augment "/frr-rt:routing/frr-rt:control-plane-protocols/frr-rt:control-plane-protocol/bgp/peer-groups/peer-group/afi-safis/afi-safi/ipv6-flowspec" { @@ -1302,5 +1350,7 @@ module frr-bgp { uses structure-neighbor-route-server; uses structure-neighbor-group-soft-reconfiguration; + + uses structure-neighbor-group-filter-config; } } diff --git a/yang/frr-isisd.yang b/yang/frr-isisd.yang index d751a19f07..8757ab6b8b 100644 --- a/yang/frr-isisd.yang +++ b/yang/frr-isisd.yang @@ -248,6 +248,10 @@ module frr-isisd { type string; } + typedef prefix-list-ref { + type string; + } + grouping redistribute-attributes { description "Common optional attributes of any redistribute entry."; @@ -410,6 +414,19 @@ module frr-isisd { } } + grouping global-config-remote-lfa { + container remote-lfa { + description + "Remote LFA configuration."; + + leaf prefix-list { + type prefix-list-ref; + description + "Filter PQ node router ID based on prefix list."; + } + } + } + grouping interface-config-lfa { container lfa { description @@ -428,6 +445,32 @@ module frr-isisd { } } + grouping interface-config-remote-lfa { + container remote-lfa { + description + "Remote LFA configuration."; + + leaf enable { + type boolean; + default false; + description + "Enables remote LFA computation using LDP tunnels."; + must ". = 'false' or ../../lfa/enable = 'true'" { + error-message + "Remote LFA depends on classic LFA being configured in the interface."; + } + + } + leaf maximum-metric { + type uint32 { + range "1..16777215"; + } + description + "Limit remote LFA node selection within the metric."; + } + } + } + grouping interface-config-ti-lfa { container ti-lfa { description @@ -761,6 +804,7 @@ module frr-isisd { "Can't enable both classic LFA and TI-LFA in the same interface."; } uses interface-config-lfa; + uses interface-config-remote-lfa; uses interface-config-ti-lfa; } container level-2 { @@ -771,6 +815,7 @@ module frr-isisd { "Can't enable both classic LFA and TI-LFA in the same interface."; } uses interface-config-lfa; + uses interface-config-remote-lfa; uses interface-config-ti-lfa; } } @@ -998,9 +1043,24 @@ module frr-isisd { "Dynamic hostname support for IS-IS."; } + leaf attach-send { + type boolean; + default "true"; + description + "If true, attached bits are sent in LSP if L1/L2 router for inter-area traffic."; + } + + leaf attach-receive-ignore { + type boolean; + default "false"; + description + "If false, attached bits received in LSP, cause default route add, if L1 router for inter-area traffic."; + } + leaf attached { type boolean; default "false"; + status deprecated; description "If true, identify as L1/L2 router for inter-area traffic."; } @@ -1394,11 +1454,13 @@ module frr-isisd { description "Level-1 IP Fast-reroute configuration."; uses global-config-lfa; + uses global-config-remote-lfa; } container level-2 { description "Level-2 IP Fast-reroute configuration."; uses global-config-lfa; + uses global-config-remote-lfa; } } diff --git a/yang/frr-nexthop.yang b/yang/frr-nexthop.yang index 619514de7d..2df2e2958e 100644 --- a/yang/frr-nexthop.yang +++ b/yang/frr-nexthop.yang @@ -61,7 +61,7 @@ module frr-nexthop { type union { type inet:ip-address; type string { - pattern ''; + pattern ""; } } } @@ -160,6 +160,7 @@ module frr-nexthop { description "The nexthop vrf name, if different from the route."; } + leaf gateway { type frr-nexthop:optional-ip-address; description @@ -173,15 +174,12 @@ module frr-nexthop { } leaf bh-type { - when "../nh-type = 'blackhole'"; type blackhole-type; description "A blackhole sub-type, if the nexthop is a blackhole type."; } leaf onlink { - when "../nh-type = 'ip4-ifindex' or - ../nh-type = 'ip6-ifindex'"; type boolean; default "false"; description diff --git a/yang/frr-pathd.yang b/yang/frr-pathd.yang new file mode 100644 index 0000000000..03f0d3b024 --- /dev/null +++ b/yang/frr-pathd.yang @@ -0,0 +1,480 @@ +module frr-pathd { + yang-version 1.1; + namespace "http://frrouting.org/yang/pathd"; + prefix frr-pathd; + + import ietf-inet-types { + prefix inet; + } + import ietf-yang-types { + prefix yang; + } + import ietf-routing-types { + prefix rt-types; + } + import frr-interface { + prefix frr-interface; + } + + organization + "Free Range Routing"; + contact + "FRR Users List: <mailto:frog@lists.frrouting.org> + FRR Development List: <mailto:dev@lists.frrouting.org>"; + description + "This module defines a model for managing FRR pathd daemon."; + + revision 2018-11-06 { + description + "Initial revision."; + } + + typedef protocol-origin-type { + description + "Indication for the protocol origin of an object."; + type enumeration { + enum pcep { + value 1; + description "The object was created through PCEP"; + } + enum bgp { + value 2; + description "The object was created through GBP"; + } + enum local { + value 3; + description "The object was created through CLI, Yang model via Netconf, gRPC, etc"; + } + } + } + + typedef originator-type { + type string { + length "1..64"; + } + description + "Identifier of the originator of an object, could be 'config', '1.1.1.1:4189' or '2001:db8:85a3::8a2e:370:7334:4189'"; + } + + container pathd { + container srte { + list segment-list { + key "name"; + description "Segment-list properties"; + leaf name { + type string { + length "1..64"; + } + description "Segment-list name"; + } + leaf protocol-origin { + type protocol-origin-type; + mandatory true; + description + "Indication for the protocol origin of the segment list."; + } + leaf originator { + type originator-type; + mandatory true; + description "Originator of the segment list"; + } + list segment { + key "index"; + description "Configure Segment/hop at the index"; + leaf index { + type uint32; + description "Segment index"; + } + leaf sid-value { + type rt-types:mpls-label; + mandatory true; + description "MPLS label value"; + } + container nai { + presence "The segement has a Node or Adjacency Identifier"; + leaf type { + description "NAI type"; + mandatory true; + type enumeration { + enum ipv4_node { + value 1; + description "IPv4 node identifier"; + } + enum ipv6_node { + value 2; + description "IPv6 node identifier"; + } + enum ipv4_adjacency { + value 3; + description "IPv4 adjacency"; + } + enum ipv6_adjacency { + value 4; + description "IPv6 adjacency"; + } + enum ipv4_unnumbered_adjacency { + value 5; + description "IPv4 unnumbered adjacency"; + } + } + } + leaf local-address { + type inet:ip-address; + mandatory true; + } + leaf local-interface { + type uint32; + mandatory true; + when "../type = 'ipv4_unnumbered_adjacency'"; + } + leaf remote-address { + type inet:ip-address; + mandatory true; + when "../type = 'ipv4_adjacency' or ../type = 'ipv6_adjacency' or ../type = 'ipv4_unnumbered_adjacency'"; + } + leaf remote-interface { + type uint32; + mandatory true; + when "../type = 'ipv4_unnumbered_adjacency'"; + } + } + } + } + list policy { + key "color endpoint"; + unique "name"; + leaf color { + type uint32; + description + "Color of the SR Policy."; + } + leaf endpoint { + type inet:ip-address; + description + "Indication for the endpoint of the SR Policy."; + } + leaf name { + type string { + length "1..64"; + } + description + "Name of the SR Policy."; + } + leaf binding-sid { + type rt-types:mpls-label; + description + "BSID of the SR Policy."; + } + leaf is-operational { + type boolean; + config false; + description + "True if a valid candidate path of this policy is operational in zebra, False otherwise"; + } + list candidate-path { + unique "name"; + description + "List of Candidate Paths of the SR Policy."; + key "preference"; + leaf preference { + type uint32; + description + "Administrative preference."; + } + leaf name { + type string { + length "1..64"; + } + mandatory true; + description + "Symbolic Name of the Candidate Path."; + } + leaf is-best-candidate-path { + type boolean; + config false; + description + "True if the candidate path is the best candidate path, False otherwise"; + } + leaf protocol-origin { + type protocol-origin-type; + mandatory true; + description + "Indication for the protocol origin of the Candidate Path."; + } + leaf originator { + type originator-type; + mandatory true; + description "Originator of the candidate path"; + } + leaf discriminator { + type uint32; + config false; + description "Candidate path distinguisher"; + } + leaf type { + description + "Type of the Candidate Path."; + mandatory true; + type enumeration { + enum explicit { + value 1; + } + enum dynamic { + value 2; + } + } + } + leaf segment-list-name { + type leafref { + path ../../../segment-list/name; + } + description + "The name of the Segment List to use as LSP."; + } + container constraints { + when "../type = 'dynamic'"; + description + "Generic dynamic path constraints"; + container bandwidth { + presence "If the candidate has a bandwidth constraint"; + description + "The bandwidth needed by the candidate path."; + leaf required { + type boolean; + default "true"; + description + "If the bandwidth limitation is a requirement or only a suggestion"; + } + leaf value { + mandatory true; + type decimal64 { + fraction-digits 6; + } + } + } + container affinity { + description + "Affinity let you configure how the links should be used when calculating a path."; + leaf exclude-any { + type uint32; + description + "A 32-bit vector representing a set of attribute filters which renders a link unacceptable."; + } + leaf include-any { + type uint32; + description + "A 32-bit vector representing a set of attribute filters which renders a link acceptable. A null set (all bits set to zero) automatically passes."; + } + leaf include-all { + type uint32; + description + "A 32-bit vector representing a set of attribute filters which must be present for a link to be acceptable. A null set (all bits set to zero) automatically passes."; + } + } + list metrics { + key "type"; + leaf type { + description + "Type of the metric."; + type enumeration { + enum igp { + value 1; + description "IGP metric"; + } + enum te { + value 2; + description "TE metric"; + } + enum hc { + value 3; + description "Hop Counts"; + } + enum abc { + value 4; + description "Aggregate bandwidth consumption"; + } + enum lmll { + value 5; + description "Load of the most loaded link"; + } + enum cigp { + value 6; + description "Cumulative IGP cost"; + } + enum cte { + value 7; + description "Cumulative TE cost"; + } + enum pigp { + value 8; + description "P2MP IGP metric"; + } + enum pte { + value 9; + description "P2MP TE metric"; + } + enum phc { + value 10; + description "P2MP hop count metric"; + } + enum msd { + value 11; + description "Segment-ID (SID) Depth"; + } + enum pd { + value 12; + description "Path Delay metric"; + } + enum pdv { + value 13; + description "Path Delay Variation metric"; + } + enum pl { + value 14; + description "Path Loss metric"; + } + enum ppd { + value 15; + description "P2MP Path Delay metric"; + } + enum ppdv { + value 16; + description "P2MP Path Delay variation metric"; + } + enum ppl { + value 17; + description "P2MP Path Loss metric"; + } + enum nap { + value 18; + description "Number of adaptations on a path"; + } + enum nlp { + value 19; + description "Number of layers on a path"; + } + enum dc { + value 20; + description "Domain Count metric"; + } + enum bnc { + value 21; + description "Border Node Count metric"; + } + } + } + leaf required { + type boolean; + default "true"; + description + "If the metric is a requirement, or if it is only a suggestion"; + } + leaf is-bound { + type boolean; + description + "Defines if the value is a bound (a maximum) for the path metric that must not be exceeded."; + } + leaf is-computed { + type boolean; + description + "Defines if the value has been generated by the originator of the path."; + } + leaf value { + mandatory true; + type decimal64 { + fraction-digits 6; + } + } + } + container objective-function { + presence "If the candidate has an objective function constraint"; + description + "Define objective function constraint as a list of prefered functions"; + leaf required { + type boolean; + default "true"; + description + "If an objective function is a requirement, or if it is only a suggestion"; + } + leaf type { + description + "Type of objective function."; + mandatory true; + type enumeration { + enum mcp { + value 1; + description "Minimum Cost Path"; + } + enum mlp { + value 2; + description "Minimum Load Path"; + } + enum mbp { + value 3; + description "Maximum residual Bandwidth Path"; + } + enum mbc { + value 4; + description "Minimize aggregate Bandwidth Consumption"; + } + enum mll { + value 5; + description "Minimize the Load of the most loaded Link"; + } + enum mcc { + value 6; + description "Minimize the Cumulative Cost of a set of paths"; + } + enum spt { + value 7; + description "Shortest Path Tree"; + } + enum mct { + value 8; + description "Minimum Cost Tree"; + } + enum mplp { + value 9; + description "Minimum Packet Loss Path"; + } + enum mup { + value 10; + description "Maximum Under-Utilized Path"; + } + enum mrup { + value 11; + description "Maximum Reserved Under-Utilized Path"; + } + enum mtd { + value 12; + description "Minimize the number of Transit Domains"; + } + enum mbn { + value 13; + description "Minimize the number of Border Nodes"; + } + enum mctd { + value 14; + description "Minimize the number of Common Transit Domains"; + } + enum msl { + value 15; + description "Minimize the number of Shared Links"; + } + enum mss { + value 16; + description "Minimize the number of Shared SRLGs"; + } + enum msn { + value 17; + description "Minimize the number of Shared Nodes"; + } + } + } + } + } + } + } + } + } +} diff --git a/yang/frr-staticd.yang b/yang/frr-staticd.yang index 281b4903c0..98ff3a83c6 100644 --- a/yang/frr-staticd.yang +++ b/yang/frr-staticd.yang @@ -63,7 +63,13 @@ module frr-staticd { grouping staticd-prefix-attributes { list path-list { - key "distance"; + key "table-id distance"; + leaf table-id { + type uint32; + description + "Table-id"; + } + leaf distance { type frr-rt:administrative-distance; description @@ -77,13 +83,6 @@ module frr-staticd { "Route tag"; } - leaf table-id { - type uint32; - default "0"; - description - "Table-id"; - } - uses frr-nexthop:frr-nexthop; } } diff --git a/yang/frr-vrrpd.yang b/yang/frr-vrrpd.yang index c99d6d9877..200eaeb0b5 100644 --- a/yang/frr-vrrpd.yang +++ b/yang/frr-vrrpd.yang @@ -246,7 +246,7 @@ module frr-vrrpd { } uses ip-vrrp-state { - augment "./counter/tx" { + augment "counter/tx" { leaf gratuitous-arp { type yang:zero-based-counter32; description @@ -266,7 +266,7 @@ module frr-vrrpd { } uses ip-vrrp-state { - augment "./counter/tx" { + augment "counter/tx" { leaf neighbor-advertisement { type yang:zero-based-counter32; description diff --git a/yang/subdir.am b/yang/subdir.am index 5be93dc4e9..47fc508901 100644 --- a/yang/subdir.am +++ b/yang/subdir.am @@ -82,3 +82,7 @@ dist_yangmodels_DATA += yang/frr-bgp-bmp.yang dist_yangmodels_DATA += yang/frr-bgp-types.yang dist_yangmodels_DATA += yang/frr-bgp.yang endif + +if PATHD +dist_yangmodels_DATA += yang/frr-pathd.yang +endif diff --git a/zebra/connected.c b/zebra/connected.c index 70ea2e3805..c885c533e6 100644 --- a/zebra/connected.c +++ b/zebra/connected.c @@ -402,10 +402,10 @@ void connected_down(struct interface *ifp, struct connected *ifc) * head. */ rib_delete(afi, SAFI_UNICAST, zvrf->vrf->vrf_id, ZEBRA_ROUTE_CONNECT, 0, - 0, &p, NULL, &nh, 0, zvrf->table_id, 0, 0, false, true); + 0, &p, NULL, &nh, 0, zvrf->table_id, 0, 0, false); rib_delete(afi, SAFI_MULTICAST, zvrf->vrf->vrf_id, ZEBRA_ROUTE_CONNECT, - 0, 0, &p, NULL, &nh, 0, zvrf->table_id, 0, 0, false, true); + 0, 0, &p, NULL, &nh, 0, zvrf->table_id, 0, 0, false); /* Schedule LSP forwarding entries for processing, if appropriate. */ if (zvrf->vrf->vrf_id == VRF_DEFAULT) { diff --git a/zebra/debug.c b/zebra/debug.c index 87a10ea65d..21fa765c63 100644 --- a/zebra/debug.c +++ b/zebra/debug.c @@ -41,6 +41,7 @@ unsigned long zebra_debug_dplane; unsigned long zebra_debug_mlag; unsigned long zebra_debug_nexthop; unsigned long zebra_debug_evpn_mh; +unsigned long zebra_debug_pbr; DEFINE_HOOK(zebra_debug_show_debugging, (struct vty *vty), (vty)); @@ -122,6 +123,9 @@ DEFUN_NOSH (show_debugging_zebra, if (IS_ZEBRA_DEBUG_EVPN_MH_NEIGH) vty_out(vty, " Zebra EVPN-MH Neigh debugging is on\n"); + if (IS_ZEBRA_DEBUG_PBR) + vty_out(vty, " Zebra PBR debugging is on\n"); + hook_call(zebra_debug_show_debugging, vty); return CMD_SUCCESS; } @@ -318,6 +322,17 @@ DEFUN (debug_zebra_dplane, return CMD_SUCCESS; } +DEFUN (debug_zebra_pbr, + debug_zebra_pbr_cmd, + "debug zebra pbr", + DEBUG_STR + "Zebra configuration\n" + "Debug zebra pbr events\n") +{ + SET_FLAG(zebra_debug_pbr, ZEBRA_DEBUG_PBR); + return CMD_SUCCESS; +} + DEFPY (debug_zebra_mlag, debug_zebra_mlag_cmd, "[no$no] debug zebra mlag", @@ -508,6 +523,18 @@ DEFUN (no_debug_zebra_dplane, return CMD_SUCCESS; } +DEFUN (no_debug_zebra_pbr, + no_debug_zebra_pbr_cmd, + "no debug zebra pbr", + NO_STR + DEBUG_STR + "Zebra configuration\n" + "Debug zebra pbr events\n") +{ + zebra_debug_pbr = 0; + return CMD_SUCCESS; +} + DEFPY (debug_zebra_nexthop, debug_zebra_nexthop_cmd, "[no$no] debug zebra nexthop [detail$detail]", @@ -650,6 +677,11 @@ static int config_write_debug(struct vty *vty) write++; } + if (IS_ZEBRA_DEBUG_PBR) { + vty_out(vty, "debug zebra pbr\n"); + write++; + } + return write; } @@ -668,6 +700,7 @@ void zebra_debug_init(void) zebra_debug_evpn_mh = 0; zebra_debug_nht = 0; zebra_debug_nexthop = 0; + zebra_debug_pbr = 0; install_node(&debug_node); @@ -686,6 +719,7 @@ void zebra_debug_init(void) install_element(ENABLE_NODE, &debug_zebra_dplane_cmd); install_element(ENABLE_NODE, &debug_zebra_mlag_cmd); install_element(ENABLE_NODE, &debug_zebra_nexthop_cmd); + install_element(ENABLE_NODE, &debug_zebra_pbr_cmd); install_element(ENABLE_NODE, &no_debug_zebra_events_cmd); install_element(ENABLE_NODE, &no_debug_zebra_nht_cmd); install_element(ENABLE_NODE, &no_debug_zebra_mpls_cmd); @@ -696,6 +730,7 @@ void zebra_debug_init(void) install_element(ENABLE_NODE, &no_debug_zebra_rib_cmd); install_element(ENABLE_NODE, &no_debug_zebra_fpm_cmd); install_element(ENABLE_NODE, &no_debug_zebra_dplane_cmd); + install_element(ENABLE_NODE, &no_debug_zebra_pbr_cmd); install_element(ENABLE_NODE, &debug_zebra_evpn_mh_cmd); install_element(CONFIG_NODE, &debug_zebra_events_cmd); @@ -710,6 +745,8 @@ void zebra_debug_init(void) install_element(CONFIG_NODE, &debug_zebra_fpm_cmd); install_element(CONFIG_NODE, &debug_zebra_dplane_cmd); install_element(CONFIG_NODE, &debug_zebra_nexthop_cmd); + install_element(CONFIG_NODE, &debug_zebra_pbr_cmd); + install_element(CONFIG_NODE, &no_debug_zebra_events_cmd); install_element(CONFIG_NODE, &no_debug_zebra_nht_cmd); install_element(CONFIG_NODE, &no_debug_zebra_mpls_cmd); @@ -720,6 +757,7 @@ void zebra_debug_init(void) install_element(CONFIG_NODE, &no_debug_zebra_rib_cmd); install_element(CONFIG_NODE, &no_debug_zebra_fpm_cmd); install_element(CONFIG_NODE, &no_debug_zebra_dplane_cmd); + install_element(CONFIG_NODE, &no_debug_zebra_pbr_cmd); install_element(CONFIG_NODE, &debug_zebra_mlag_cmd); install_element(CONFIG_NODE, &debug_zebra_evpn_mh_cmd); } diff --git a/zebra/debug.h b/zebra/debug.h index 8402224f19..86506846ad 100644 --- a/zebra/debug.h +++ b/zebra/debug.h @@ -67,6 +67,8 @@ extern "C" { #define ZEBRA_DEBUG_EVPN_MH_MAC 0x04 #define ZEBRA_DEBUG_EVPN_MH_NEIGH 0x08 +#define ZEBRA_DEBUG_PBR 0x01 + /* Debug related macro. */ #define IS_ZEBRA_DEBUG_EVENT (zebra_debug_event & ZEBRA_DEBUG_EVENT) @@ -114,6 +116,8 @@ extern "C" { #define IS_ZEBRA_DEBUG_EVPN_MH_NEIGH \ (zebra_debug_evpn_mh & ZEBRA_DEBUG_EVPN_MH_NEIGH) +#define IS_ZEBRA_DEBUG_PBR (zebra_debug_pbr & ZEBRA_DEBUG_PBR) + extern unsigned long zebra_debug_event; extern unsigned long zebra_debug_packet; extern unsigned long zebra_debug_kernel; @@ -127,6 +131,7 @@ extern unsigned long zebra_debug_dplane; extern unsigned long zebra_debug_mlag; extern unsigned long zebra_debug_nexthop; extern unsigned long zebra_debug_evpn_mh; +extern unsigned long zebra_debug_pbr; extern void zebra_debug_init(void); diff --git a/zebra/dplane_fpm_nl.c b/zebra/dplane_fpm_nl.c index 261b859bf6..79a5d148a6 100644 --- a/zebra/dplane_fpm_nl.c +++ b/zebra/dplane_fpm_nl.c @@ -75,9 +75,6 @@ struct fpm_nl_ctx { int socket; bool disabled; bool connecting; - bool nhg_complete; - bool rib_complete; - bool rmac_complete; bool use_nhg; struct sockaddr_storage addr; @@ -377,7 +374,6 @@ static int fpm_write_config(struct vty *vty) struct sockaddr_in *sin; struct sockaddr_in6 *sin6; int written = 0; - char addrstr[INET6_ADDRSTRLEN]; if (gfnc->disabled) return written; @@ -386,8 +382,7 @@ static int fpm_write_config(struct vty *vty) case AF_INET: written = 1; sin = (struct sockaddr_in *)&gfnc->addr; - inet_ntop(AF_INET, &sin->sin_addr, addrstr, sizeof(addrstr)); - vty_out(vty, "fpm address %s", addrstr); + vty_out(vty, "fpm address %pI4", &sin->sin_addr); if (sin->sin_port != htons(SOUTHBOUND_DEFAULT_PORT)) vty_out(vty, " port %d", ntohs(sin->sin_port)); @@ -396,8 +391,7 @@ static int fpm_write_config(struct vty *vty) case AF_INET6: written = 1; sin6 = (struct sockaddr_in6 *)&gfnc->addr; - inet_ntop(AF_INET, &sin6->sin6_addr, addrstr, sizeof(addrstr)); - vty_out(vty, "fpm address %s", addrstr); + vty_out(vty, "fpm address %pI6", &sin6->sin6_addr); if (sin6->sin6_port != htons(SOUTHBOUND_DEFAULT_PORT)) vty_out(vty, " port %d", ntohs(sin6->sin6_port)); @@ -542,6 +536,13 @@ static int fpm_write(struct thread *t) fnc->connecting = false; + /* + * Starting with LSPs walk all FPM objects, marking them + * as unsent and then replaying them. + */ + thread_add_timer(zrouter.master, fpm_lsp_reset, fnc, 0, + &fnc->t_lspreset); + /* Permit receiving messages now. */ thread_add_read(fnc->fthread->master, fpm_read, fnc, fnc->socket, &fnc->t_read); @@ -664,9 +665,12 @@ static int fpm_connect(struct thread *t) /* * Starting with LSPs walk all FPM objects, marking them * as unsent and then replaying them. + * + * If we are not connected, then delay the objects reset/send. */ - thread_add_timer(zrouter.master, fpm_lsp_reset, fnc, 0, - &fnc->t_lspreset); + if (!fnc->connecting) + thread_add_timer(zrouter.master, fpm_lsp_reset, fnc, 0, + &fnc->t_lspreset); return 0; } @@ -908,12 +912,8 @@ static int fpm_lsp_send(struct thread *t) WALK_FINISH(fnc, FNE_LSP_FINISHED); /* Now move onto routes */ - if (fnc->use_nhg) - thread_add_timer(zrouter.master, fpm_nhg_reset, fnc, 0, - &fnc->t_nhgreset); - else - thread_add_timer(zrouter.master, fpm_rib_reset, fnc, 0, - &fnc->t_ribreset); + thread_add_timer(zrouter.master, fpm_nhg_reset, fnc, 0, + &fnc->t_nhgreset); } else { /* Didn't finish - reschedule LSP walk */ thread_add_timer(zrouter.master, fpm_lsp_send, fnc, 0, @@ -966,7 +966,8 @@ static int fpm_nhg_send(struct thread *t) fna.complete = true; /* Send next hops. */ - hash_walk(zrouter.nhgs_id, fpm_nhg_send_cb, &fna); + if (fnc->use_nhg) + hash_walk(zrouter.nhgs_id, fpm_nhg_send_cb, &fna); /* `free()` allocated memory. */ dplane_ctx_fini(&fna.ctx); @@ -1051,10 +1052,10 @@ struct fpm_rmac_arg { bool complete; }; -static void fpm_enqueue_rmac_table(struct hash_bucket *backet, void *arg) +static void fpm_enqueue_rmac_table(struct hash_bucket *bucket, void *arg) { struct fpm_rmac_arg *fra = arg; - zebra_mac_t *zrmac = backet->data; + zebra_mac_t *zrmac = bucket->data; struct zebra_if *zif = fra->zl3vni->vxlan_if->info; const struct zebra_l2info_vxlan *vxl = &zif->l2info.vxl; struct zebra_if *br_zif; @@ -1083,10 +1084,10 @@ static void fpm_enqueue_rmac_table(struct hash_bucket *backet, void *arg) } } -static void fpm_enqueue_l3vni_table(struct hash_bucket *backet, void *arg) +static void fpm_enqueue_l3vni_table(struct hash_bucket *bucket, void *arg) { struct fpm_rmac_arg *fra = arg; - zebra_l3vni_t *zl3vni = backet->data; + zebra_l3vni_t *zl3vni = bucket->data; fra->zl3vni = zl3vni; hash_iterate(zl3vni->rmac_table, fpm_enqueue_rmac_table, zl3vni); @@ -1124,7 +1125,6 @@ static int fpm_nhg_reset(struct thread *t) { struct fpm_nl_ctx *fnc = THREAD_ARG(t); - fnc->nhg_complete = false; hash_iterate(zrouter.nhgs_id, fpm_nhg_reset_cb, NULL); /* Schedule next step: send next hop groups. */ @@ -1167,8 +1167,6 @@ static int fpm_rib_reset(struct thread *t) struct route_table *rt; rib_tables_iter_t rt_iter; - fnc->rib_complete = false; - rt_iter.state = RIB_TABLES_ITER_S_INIT; while ((rt = rib_tables_iter_next(&rt_iter))) { for (rn = route_top(rt); rn; rn = srcdest_route_next(rn)) { @@ -1190,16 +1188,16 @@ static int fpm_rib_reset(struct thread *t) /* * The next three function will handle RMAC table reset. */ -static void fpm_unset_rmac_table(struct hash_bucket *backet, void *arg) +static void fpm_unset_rmac_table(struct hash_bucket *bucket, void *arg) { - zebra_mac_t *zrmac = backet->data; + zebra_mac_t *zrmac = bucket->data; UNSET_FLAG(zrmac->flags, ZEBRA_MAC_FPM_SENT); } -static void fpm_unset_l3vni_table(struct hash_bucket *backet, void *arg) +static void fpm_unset_l3vni_table(struct hash_bucket *bucket, void *arg) { - zebra_l3vni_t *zl3vni = backet->data; + zebra_l3vni_t *zl3vni = bucket->data; hash_iterate(zl3vni->rmac_table, fpm_unset_rmac_table, zl3vni); } @@ -1208,7 +1206,6 @@ static int fpm_rmac_reset(struct thread *t) { struct fpm_nl_ctx *fnc = THREAD_ARG(t); - fnc->rmac_complete = false; hash_iterate(zrouter.l3vni_table, fpm_unset_l3vni_table, NULL); /* Schedule next event: send RMAC entries. */ @@ -1222,24 +1219,33 @@ static int fpm_process_queue(struct thread *t) { struct fpm_nl_ctx *fnc = THREAD_ARG(t); struct zebra_dplane_ctx *ctx; - - frr_mutex_lock_autounlock(&fnc->ctxqueue_mutex); + bool no_bufs = false; + uint64_t processed_contexts = 0; while (true) { /* No space available yet. */ - if (STREAM_WRITEABLE(fnc->obuf) < NL_PKT_BUF_SIZE) + if (STREAM_WRITEABLE(fnc->obuf) < NL_PKT_BUF_SIZE) { + no_bufs = true; break; + } /* Dequeue next item or quit processing. */ - ctx = dplane_ctx_dequeue(&fnc->ctxqueue); + frr_with_mutex (&fnc->ctxqueue_mutex) { + ctx = dplane_ctx_dequeue(&fnc->ctxqueue); + } if (ctx == NULL) break; - fpm_nl_enqueue(fnc, ctx); + /* + * Intentionally ignoring the return value + * as that we are ensuring that we can write to + * the output data in the STREAM_WRITEABLE + * check above, so we can ignore the return + */ + (void)fpm_nl_enqueue(fnc, ctx); /* Account the processed entries. */ - atomic_fetch_add_explicit(&fnc->counters.dplane_contexts, 1, - memory_order_relaxed); + processed_contexts++; atomic_fetch_sub_explicit(&fnc->counters.ctxqueue_len, 1, memory_order_relaxed); @@ -1247,10 +1253,12 @@ static int fpm_process_queue(struct thread *t) dplane_provider_enqueue_out_ctx(fnc->prov, ctx); } - /* Check for more items in the queue. */ - if (atomic_load_explicit(&fnc->counters.ctxqueue_len, - memory_order_relaxed) - > 0) + /* Update count of processed contexts */ + atomic_fetch_add_explicit(&fnc->counters.dplane_contexts, + processed_contexts, memory_order_relaxed); + + /* Re-schedule if we ran out of buffer space */ + if (no_bufs) thread_add_timer(fnc->fthread->master, fpm_process_queue, fnc, 0, &fnc->t_dequeue); @@ -1312,20 +1320,14 @@ static int fpm_process_event(struct thread *t) if (IS_ZEBRA_DEBUG_FPM) zlog_debug("%s: next hop groups walk finished", __func__); - - fnc->nhg_complete = true; break; case FNE_RIB_FINISHED: if (IS_ZEBRA_DEBUG_FPM) zlog_debug("%s: RIB walk finished", __func__); - - fnc->rib_complete = true; break; case FNE_RMAC_FINISHED: if (IS_ZEBRA_DEBUG_FPM) zlog_debug("%s: RMAC walk finished", __func__); - - fnc->rmac_complete = true; break; case FNE_LSP_FINISHED: if (IS_ZEBRA_DEBUG_FPM) @@ -1421,7 +1423,7 @@ static int fpm_nl_process(struct zebra_dplane_provider *prov) struct zebra_dplane_ctx *ctx; struct fpm_nl_ctx *fnc; int counter, limit; - uint64_t cur_queue, peak_queue; + uint64_t cur_queue, peak_queue = 0, stored_peak_queue; fnc = dplane_provider_get_data(prov); limit = dplane_provider_get_work_limit(prov); @@ -1435,22 +1437,22 @@ static int fpm_nl_process(struct zebra_dplane_provider *prov) * anyway. */ if (fnc->socket != -1 && fnc->connecting == false) { - frr_mutex_lock_autounlock(&fnc->ctxqueue_mutex); - dplane_ctx_enqueue_tail(&fnc->ctxqueue, ctx); - - /* Account the number of contexts. */ + /* + * Update the number of queued contexts *before* + * enqueueing, to ensure counter consistency. + */ atomic_fetch_add_explicit(&fnc->counters.ctxqueue_len, 1, memory_order_relaxed); + + frr_with_mutex (&fnc->ctxqueue_mutex) { + dplane_ctx_enqueue_tail(&fnc->ctxqueue, ctx); + } + cur_queue = atomic_load_explicit( &fnc->counters.ctxqueue_len, memory_order_relaxed); - peak_queue = atomic_load_explicit( - &fnc->counters.ctxqueue_len_peak, - memory_order_relaxed); if (peak_queue < cur_queue) - atomic_store_explicit( - &fnc->counters.ctxqueue_len_peak, - cur_queue, memory_order_relaxed); + peak_queue = cur_queue; continue; } @@ -1458,12 +1460,23 @@ static int fpm_nl_process(struct zebra_dplane_provider *prov) dplane_provider_enqueue_out_ctx(prov, ctx); } + /* Update peak queue length, if we just observed a new peak */ + stored_peak_queue = atomic_load_explicit( + &fnc->counters.ctxqueue_len_peak, memory_order_relaxed); + if (stored_peak_queue < peak_queue) + atomic_store_explicit(&fnc->counters.ctxqueue_len_peak, + peak_queue, memory_order_relaxed); + if (atomic_load_explicit(&fnc->counters.ctxqueue_len, memory_order_relaxed) > 0) thread_add_timer(fnc->fthread->master, fpm_process_queue, fnc, 0, &fnc->t_dequeue); + /* Ensure dataplane thread is rescheduled if we hit the work limit */ + if (counter >= limit) + dplane_provider_work_ready(); + return 0; } diff --git a/zebra/if_netlink.c b/zebra/if_netlink.c index e4dd745f42..00471b9645 100644 --- a/zebra/if_netlink.c +++ b/zebra/if_netlink.c @@ -301,7 +301,7 @@ static void netlink_vrf_change(struct nlmsghdr *h, struct rtattr *tb, struct ifinfomsg *ifi; struct rtattr *linkinfo[IFLA_INFO_MAX + 1]; struct rtattr *attr[IFLA_VRF_MAX + 1]; - struct vrf *vrf; + struct vrf *vrf = NULL; struct zebra_vrf *zvrf; uint32_t nl_table_id; @@ -350,11 +350,7 @@ static void netlink_vrf_change(struct nlmsghdr *h, struct rtattr *tb, } } - /* - * vrf_get is implied creation if it does not exist - */ - vrf = vrf_get((vrf_id_t)ifi->ifi_index, - name); // It would create vrf + vrf = vrf_update((vrf_id_t)ifi->ifi_index, name); if (!vrf) { flog_err(EC_LIB_INTERFACE, "VRF %s id %u not created", name, ifi->ifi_index); diff --git a/zebra/interface.c b/zebra/interface.c index 4072eb1568..fc34a6fb9e 100644 --- a/zebra/interface.c +++ b/zebra/interface.c @@ -1029,7 +1029,7 @@ void if_up(struct interface *ifp) /* Notify the protocol daemons. */ if (ifp->ptm_enable && (ifp->ptm_status == ZEBRA_PTM_STATUS_DOWN)) { flog_warn(EC_ZEBRA_PTM_NOT_READY, - "%s: interface %s hasn't passed ptm check\n", + "%s: interface %s hasn't passed ptm check", __func__, ifp->name); return; } @@ -2977,7 +2977,7 @@ int if_ip_address_install(struct interface *ifp, struct prefix *prefix, dplane_res = dplane_intf_addr_set(ifp, ifc); if (dplane_res == ZEBRA_DPLANE_REQUEST_FAILURE) { zlog_debug( - "dplane can't set interface IP address: %s.\n", + "dplane can't set interface IP address: %s.", dplane_res2str(dplane_res)); return NB_ERR; } @@ -3095,7 +3095,7 @@ int if_ip_address_uinstall(struct interface *ifp, struct prefix *prefix) /* Check current interface address. */ ifc = connected_check_ptp(ifp, prefix, NULL); if (!ifc) { - zlog_debug("interface %s Can't find address\n", + zlog_debug("interface %s Can't find address", ifp->name); return -1; } @@ -3106,7 +3106,7 @@ int if_ip_address_uinstall(struct interface *ifp, struct prefix *prefix) } if (!ifc) { - zlog_debug("interface %s Can't find address\n", ifp->name); + zlog_debug("interface %s Can't find address", ifp->name); return -1; } UNSET_FLAG(ifc->conf, ZEBRA_IFC_CONFIGURED); @@ -3122,7 +3122,7 @@ int if_ip_address_uinstall(struct interface *ifp, struct prefix *prefix) /* This is real route. */ dplane_res = dplane_intf_addr_unset(ifp, ifc); if (dplane_res == ZEBRA_DPLANE_REQUEST_FAILURE) { - zlog_debug("Can't unset interface IP address: %s.\n", + zlog_debug("Can't unset interface IP address: %s.", dplane_res2str(dplane_res)); return -1; } @@ -3335,7 +3335,7 @@ int if_ipv6_address_install(struct interface *ifp, struct prefix *prefix, dplane_res = dplane_intf_addr_set(ifp, ifc); if (dplane_res == ZEBRA_DPLANE_REQUEST_FAILURE) { zlog_debug( - "dplane can't set interface IP address: %s.\n", + "dplane can't set interface IP address: %s.", dplane_res2str(dplane_res)); return NB_ERR; } diff --git a/zebra/irdp_packet.c b/zebra/irdp_packet.c index 56fd35a736..6134df9c41 100644 --- a/zebra/irdp_packet.c +++ b/zebra/irdp_packet.c @@ -105,7 +105,7 @@ static void parse_irdp_packet(char *p, int len, struct interface *ifp) if (iplen < ICMP_MINLEN) { flog_err(EC_ZEBRA_IRDP_LEN_MISMATCH, - "IRDP: RX ICMP packet too short from %pI4\n", + "IRDP: RX ICMP packet too short from %pI4", &src); return; } @@ -116,7 +116,7 @@ static void parse_irdp_packet(char *p, int len, struct interface *ifp) len of IP-header) 14+20 */ if (iplen > IRDP_RX_BUF - 34) { flog_err(EC_ZEBRA_IRDP_LEN_MISMATCH, - "IRDP: RX ICMP packet too long from %pI4\n", + "IRDP: RX ICMP packet too long from %pI4", &src); return; } @@ -153,7 +153,7 @@ static void parse_irdp_packet(char *p, int len, struct interface *ifp) && !(irdp->flags & IF_BROADCAST))) { flog_warn( EC_ZEBRA_IRDP_BAD_RX_FLAGS, - "IRDP: RX illegal from %pI4 to %s while %s operates in %s; Please correct settings\n", + "IRDP: RX illegal from %pI4 to %s while %s operates in %s; Please correct settings", &src, ntohl(ip->ip_dst.s_addr) == INADDR_ALLRTRS_GROUP ? "multicast" diff --git a/zebra/kernel_netlink.c b/zebra/kernel_netlink.c index 5d64f57b3e..c77a357e9f 100644 --- a/zebra/kernel_netlink.c +++ b/zebra/kernel_netlink.c @@ -383,7 +383,7 @@ static int netlink_information_fetch(struct nlmsghdr *h, ns_id_t ns_id, * it to be sent up to us */ flog_err(EC_ZEBRA_UNKNOWN_NLMSG, - "Unknown netlink nlmsg_type %s(%d) vrf %u\n", + "Unknown netlink nlmsg_type %s(%d) vrf %u", nl_msg_type_to_str(h->nlmsg_type), h->nlmsg_type, ns_id); break; @@ -485,10 +485,23 @@ static void netlink_install_filter(int sock, __u32 pid, __u32 dplane_pid) if (setsockopt(sock, SOL_SOCKET, SO_ATTACH_FILTER, &prog, sizeof(prog)) < 0) - flog_err_sys(EC_LIB_SOCKET, "Can't install socket filter: %s\n", + flog_err_sys(EC_LIB_SOCKET, "Can't install socket filter: %s", safe_strerror(errno)); } +void netlink_parse_rtattr_flags(struct rtattr **tb, int max, + struct rtattr *rta, int len, unsigned short flags) +{ + unsigned short type; + + while (RTA_OK(rta, len)) { + type = rta->rta_type & ~flags; + if ((type <= max) && (!tb[type])) + tb[type] = rta; + rta = RTA_NEXT(rta, len); + } +} + void netlink_parse_rtattr(struct rtattr **tb, int max, struct rtattr *rta, int len) { @@ -1123,9 +1136,11 @@ static int nl_batch_read_resp(struct nl_batch *bth) * associated with any dplane context object. */ if (ctx == NULL) { - zlog_debug( - "%s: skipping unassociated response, seq number %d NS %u", - __func__, h->nlmsg_seq, bth->zns->ns_id); + if (IS_ZEBRA_DEBUG_KERNEL) + zlog_debug( + "%s: skipping unassociated response, seq number %d NS %u", + __func__, h->nlmsg_seq, + bth->zns->ns_id); continue; } @@ -1136,8 +1151,9 @@ static int nl_batch_read_resp(struct nl_batch *bth) dplane_ctx_set_status( ctx, ZEBRA_DPLANE_REQUEST_FAILURE); - zlog_debug("%s: netlink error message seq=%d ", - __func__, h->nlmsg_seq); + if (IS_ZEBRA_DEBUG_KERNEL) + zlog_debug("%s: netlink error message seq=%d ", + __func__, h->nlmsg_seq); continue; } @@ -1146,9 +1162,11 @@ static int nl_batch_read_resp(struct nl_batch *bth) * the error and instead received some other message in an * unexpected way. */ - zlog_debug("%s: ignoring message type 0x%04x(%s) NS %u", - __func__, h->nlmsg_type, - nl_msg_type_to_str(h->nlmsg_type), bth->zns->ns_id); + if (IS_ZEBRA_DEBUG_KERNEL) + zlog_debug("%s: ignoring message type 0x%04x(%s) NS %u", + __func__, h->nlmsg_type, + nl_msg_type_to_str(h->nlmsg_type), + bth->zns->ns_id); } return 0; diff --git a/zebra/kernel_netlink.h b/zebra/kernel_netlink.h index 696f9be4f6..a7b152b31b 100644 --- a/zebra/kernel_netlink.h +++ b/zebra/kernel_netlink.h @@ -79,6 +79,9 @@ extern void nl_attr_rtnh_end(struct nlmsghdr *n, struct rtnexthop *rtnh); extern void netlink_parse_rtattr(struct rtattr **tb, int max, struct rtattr *rta, int len); +extern void netlink_parse_rtattr_flags(struct rtattr **tb, int max, + struct rtattr *rta, int len, + unsigned short flags); extern void netlink_parse_rtattr_nested(struct rtattr **tb, int max, struct rtattr *rta); extern const char *nl_msg_type_to_str(uint16_t msg_type); diff --git a/zebra/kernel_socket.c b/zebra/kernel_socket.c index 9d74aeca28..adbdf54c1f 100644 --- a/zebra/kernel_socket.c +++ b/zebra/kernel_socket.c @@ -1107,7 +1107,7 @@ void rtm_read(struct rt_msghdr *rtm) if (rtm->rtm_type == RTM_CHANGE) rib_delete(afi, SAFI_UNICAST, VRF_DEFAULT, ZEBRA_ROUTE_KERNEL, 0, zebra_flags, &p, NULL, NULL, 0, RT_TABLE_MAIN, 0, - 0, true, false); + 0, true); if (rtm->rtm_type == RTM_GET || rtm->rtm_type == RTM_ADD || rtm->rtm_type == RTM_CHANGE) rib_add(afi, SAFI_UNICAST, VRF_DEFAULT, ZEBRA_ROUTE_KERNEL, 0, @@ -1116,7 +1116,7 @@ void rtm_read(struct rt_msghdr *rtm) else rib_delete(afi, SAFI_UNICAST, VRF_DEFAULT, ZEBRA_ROUTE_KERNEL, 0, zebra_flags, &p, NULL, &nh, 0, RT_TABLE_MAIN, 0, - 0, true, false); + 0, true); } /* Interface function for the kernel routing table updates. Support diff --git a/zebra/label_manager.c b/zebra/label_manager.c index feec49ecc2..2634a333ee 100644 --- a/zebra/label_manager.c +++ b/zebra/label_manager.c @@ -91,11 +91,8 @@ static int label_manager_get_chunk(struct label_manager_chunk **lmc, vrf_id_t vrf_id); static int label_manager_release_label_chunk(struct zserv *client, uint32_t start, uint32_t end); -static int release_label_chunk(uint8_t proto, unsigned short instance, - uint32_t session_id, uint32_t start, - uint32_t end); -static void delete_label_chunk(void *val) +void delete_label_chunk(void *val) { XFREE(MTYPE_LM_CHUNK, val); } @@ -178,11 +175,9 @@ void label_manager_init(void) } /* alloc and fill a label chunk */ -static struct label_manager_chunk *create_label_chunk(uint8_t proto, - unsigned short instance, - uint32_t session_id, - uint8_t keep, uint32_t start, - uint32_t end) +struct label_manager_chunk * +create_label_chunk(uint8_t proto, unsigned short instance, uint32_t session_id, + uint8_t keep, uint32_t start, uint32_t end) { /* alloc chunk, fill it and return it */ struct label_manager_chunk *lmc = @@ -305,15 +300,13 @@ assign_specific_label_chunk(uint8_t proto, unsigned short instance, * @param base Desired starting label of the chunk; if MPLS_LABEL_BASE_ANY it does not apply * @return Pointer to the assigned label chunk, or NULL if the request could not be satisfied */ -static struct label_manager_chunk *assign_label_chunk(uint8_t proto, - unsigned short instance, - uint32_t session_id, - uint8_t keep, uint32_t size, - uint32_t base) +struct label_manager_chunk * +assign_label_chunk(uint8_t proto, unsigned short instance, uint32_t session_id, + uint8_t keep, uint32_t size, uint32_t base) { struct label_manager_chunk *lmc; struct listnode *node; - uint32_t prev_end = 0; + uint32_t prev_end = MPLS_LABEL_UNRESERVED_MIN; /* handle chunks request with a specific base label */ if (base != MPLS_LABEL_BASE_ANY) @@ -335,8 +328,7 @@ static struct label_manager_chunk *assign_label_chunk(uint8_t proto, } /* check if we hadve a "hole" behind us that we can squeeze into */ - if ((lmc->start > prev_end) - && (lmc->start - prev_end >= size)) { + if ((lmc->start > prev_end) && (lmc->start - prev_end > size)) { lmc = create_label_chunk(proto, instance, session_id, keep, prev_end + 1, prev_end + size); @@ -398,7 +390,7 @@ static int label_manager_release_label_chunk(struct zserv *client, * @param end Last label of the chunk * @return 0 on success, -1 otherwise */ -static int release_label_chunk(uint8_t proto, unsigned short instance, +int release_label_chunk(uint8_t proto, unsigned short instance, uint32_t session_id, uint32_t start, uint32_t end) { struct listnode *node; diff --git a/zebra/label_manager.h b/zebra/label_manager.h index 094155f714..8636c79219 100644 --- a/zebra/label_manager.h +++ b/zebra/label_manager.h @@ -94,6 +94,12 @@ int lm_client_connect_response(uint8_t proto, uint16_t instance, int lm_get_chunk_response(struct label_manager_chunk *lmc, struct zserv *client, vrf_id_t vrf_id); +/* convenience function to allocate an lmc to be consumed by the above API */ +struct label_manager_chunk * +create_label_chunk(uint8_t proto, unsigned short instance, uint32_t session_id, + uint8_t keep, uint32_t start, uint32_t end); +void delete_label_chunk(void *val); + /* register/unregister callbacks for hooks */ void lm_hooks_register(void); void lm_hooks_unregister(void); @@ -107,6 +113,11 @@ struct label_manager { }; void label_manager_init(void); +struct label_manager_chunk * +assign_label_chunk(uint8_t proto, unsigned short instance, uint32_t session_id, + uint8_t keep, uint32_t size, uint32_t base); +int release_label_chunk(uint8_t proto, unsigned short instance, + uint32_t session_id, uint32_t start, uint32_t end); int lm_client_disconnect_cb(struct zserv *client); int release_daemon_label_chunks(struct zserv *client); void label_manager_close(void); diff --git a/zebra/redistribute.c b/zebra/redistribute.c index 1f075cfb4b..b0f124ed55 100644 --- a/zebra/redistribute.c +++ b/zebra/redistribute.c @@ -206,7 +206,7 @@ void redistribute_update(const struct prefix *p, const struct prefix *src_p, afi = family2afi(p->family); if (!afi) { flog_warn(EC_ZEBRA_REDISTRIBUTE_UNKNOWN_AF, - "%s: Unknown AFI/SAFI prefix received\n", __func__); + "%s: Unknown AFI/SAFI prefix received", __func__); return; } if (!zebra_check_addr(p)) { @@ -276,7 +276,7 @@ void redistribute_delete(const struct prefix *p, const struct prefix *src_p, afi = family2afi(p->family); if (!afi) { flog_warn(EC_ZEBRA_REDISTRIBUTE_UNKNOWN_AF, - "%s: Unknown AFI/SAFI prefix received\n", + "%s: Unknown AFI/SAFI prefix received", __func__); return; } @@ -715,7 +715,7 @@ int zebra_del_import_table_entry(struct zebra_vrf *zvrf, struct route_node *rn, rib_delete(afi, SAFI_UNICAST, zvrf->vrf->vrf_id, ZEBRA_ROUTE_TABLE, re->table, re->flags, &p, NULL, re->nhe->nhg.nexthop, re->nhe_id, zvrf->table_id, re->metric, re->distance, - false, false); + false); return 0; } diff --git a/zebra/rib.h b/zebra/rib.h index fe7073656c..86766b8175 100644 --- a/zebra/rib.h +++ b/zebra/rib.h @@ -343,8 +343,8 @@ int route_entry_update_nhe(struct route_entry *re, struct nhg_hash_entry *new_nhghe); /* NHG replace has happend, we have to update route_entry pointers to new one */ -void rib_handle_nhg_replace(struct nhg_hash_entry *old, - struct nhg_hash_entry *new); +void rib_handle_nhg_replace(struct nhg_hash_entry *old_entry, + struct nhg_hash_entry *new_entry); #define route_entry_dump(prefix, src, re) _route_entry_dump(__func__, prefix, src, re) extern void _route_entry_dump(const char *func, union prefixconstptr pp, @@ -393,7 +393,7 @@ extern void rib_delete(afi_t afi, safi_t safi, vrf_id_t vrf_id, int type, struct prefix *p, struct prefix_ipv6 *src_p, const struct nexthop *nh, uint32_t nhe_id, uint32_t table_id, uint32_t metric, uint8_t distance, - bool fromkernel, bool connected_down); + bool fromkernel); extern struct route_entry *rib_match(afi_t afi, safi_t safi, vrf_id_t vrf_id, union g_addr *addr, @@ -406,9 +406,8 @@ extern struct route_entry *rib_lookup_ipv4(struct prefix_ipv4 *p, vrf_id_t vrf_id); extern void rib_update(enum rib_update_event event); -extern void rib_update_vrf(vrf_id_t vrf_id, enum rib_update_event event); extern void rib_update_table(struct route_table *table, - enum rib_update_event event); + enum rib_update_event event, int rtype); extern int rib_sweep_route(struct thread *t); extern void rib_sweep_table(struct route_table *table); extern void rib_close_table(struct route_table *table); diff --git a/zebra/rt_netlink.c b/zebra/rt_netlink.c index f59fbae3af..1cae0b1f9b 100644 --- a/zebra/rt_netlink.c +++ b/zebra/rt_netlink.c @@ -77,16 +77,7 @@ /* Re-defining as I am unable to include <linux/if_bridge.h> which has the * UAPI for MAC sync. */ #ifndef _UAPI_LINUX_IF_BRIDGE_H -/* FDB notification bits for NDA_NOTIFY: - * - BR_FDB_NFY_STATIC - notify on activity/expire even for a static entry - * - BR_FDB_NFY_INACTIVE - mark as inactive to avoid double notification, - * used with BR_FDB_NFY_STATIC (kernel controlled) - */ -enum { - BR_FDB_NFY_STATIC, - BR_FDB_NFY_INACTIVE, - BR_FDB_NFY_MAX -}; +#define BR_SPH_LIST_SIZE 10 #endif static vlanid_t filter_vlan = 0; @@ -266,6 +257,10 @@ static inline int zebra2proto(int proto) case ZEBRA_ROUTE_NHG: proto = RTPROT_ZEBRA; break; + case ZEBRA_ROUTE_CONNECT: + case ZEBRA_ROUTE_KERNEL: + proto = RTPROT_KERNEL; + break; default: /* * When a user adds a new protocol this will show up @@ -869,7 +864,7 @@ static int netlink_route_change_read_unicast(struct nlmsghdr *h, ns_id_t ns_id, if (nhe_id) { rib_delete(afi, SAFI_UNICAST, vrf_id, proto, 0, flags, &p, &src_p, NULL, nhe_id, table, metric, - distance, true, false); + distance, true); } else { if (!tb[RTA_MULTIPATH]) { struct nexthop nh; @@ -879,13 +874,13 @@ static int netlink_route_change_read_unicast(struct nlmsghdr *h, ns_id_t ns_id, gate, afi, vrf_id); rib_delete(afi, SAFI_UNICAST, vrf_id, proto, 0, flags, &p, &src_p, &nh, 0, table, - metric, distance, true, false); + metric, distance, true); } else { /* XXX: need to compare the entire list of * nexthops here for NLM_F_APPEND stupidity */ rib_delete(afi, SAFI_UNICAST, vrf_id, proto, 0, flags, &p, &src_p, NULL, 0, table, - metric, distance, true, false); + metric, distance, true); } } } @@ -2486,13 +2481,6 @@ static int netlink_nexthop_process_group(struct rtattr **tb, return count; } -#if 0 - // TODO: Need type for something? - zlog_debug("Nexthop group type: %d", - *((uint16_t *)RTA_DATA(tb[NHA_GROUP_TYPE]))); - -#endif - for (int i = 0; ((i < count) && (i < z_grp_size)); i++) { z_grp[i].id = n_grp[i].id; z_grp[i].weight = n_grp[i].weight + 1; @@ -2762,11 +2750,23 @@ static ssize_t netlink_neigh_update_msg_encode( } if (nfy) { - if (!nl_attr_put(&req->n, datalen, NDA_NOTIFY, - &nfy_flags, sizeof(nfy_flags))) + struct rtattr *nest; + + nest = nl_attr_nest(&req->n, datalen, + NDA_FDB_EXT_ATTRS | NLA_F_NESTED); + if (!nest) + return 0; + + if (!nl_attr_put(&req->n, datalen, NFEA_ACTIVITY_NOTIFY, + &nfy_flags, sizeof(nfy_flags))) + return 0; + if (!nl_attr_put(&req->n, datalen, NFEA_DONT_REFRESH, NULL, 0)) return 0; + + nl_attr_nest_end(&req->n, nest); } + if (ext) { if (!nl_attr_put(&req->n, datalen, NDA_EXT_FLAGS, &ext_flags, sizeof(ext_flags))) @@ -2851,7 +2851,8 @@ static int netlink_macfdb_change(struct nlmsghdr *h, int len, ns_id_t ns_id) * validation of the fields. */ memset(tb, 0, sizeof tb); - netlink_parse_rtattr(tb, NDA_MAX, NDA_RTA(ndm), len); + netlink_parse_rtattr_flags(tb, NDA_MAX, NDA_RTA(ndm), len, + NLA_F_NESTED); if (!tb[NDA_LLADDR]) { if (IS_ZEBRA_DEBUG_KERNEL) @@ -2893,14 +2894,21 @@ static int netlink_macfdb_change(struct nlmsghdr *h, int len, ns_id_t ns_id) if (ndm->ndm_state & NUD_STALE) local_inactive = true; - if (tb[NDA_NOTIFY]) { - uint8_t nfy_flags; + if (tb[NDA_FDB_EXT_ATTRS]) { + struct rtattr *attr = tb[NDA_FDB_EXT_ATTRS]; + struct rtattr *nfea_tb[NFEA_MAX + 1] = {0}; - dp_static = true; - nfy_flags = *(uint8_t *)RTA_DATA(tb[NDA_NOTIFY]); - /* local activity has not been detected on the entry */ - if (nfy_flags & (1 << BR_FDB_NFY_INACTIVE)) - local_inactive = true; + netlink_parse_rtattr_nested(nfea_tb, NFEA_MAX, attr); + if (nfea_tb[NFEA_ACTIVITY_NOTIFY]) { + uint8_t nfy_flags; + + nfy_flags = *(uint8_t *)RTA_DATA( + nfea_tb[NFEA_ACTIVITY_NOTIFY]); + if (nfy_flags & FDB_NOTIFY_BIT) + dp_static = true; + if (nfy_flags & FDB_NOTIFY_INACTIVE_BIT) + local_inactive = true; + } } if (IS_ZEBRA_DEBUG_KERNEL) @@ -3202,12 +3210,12 @@ ssize_t netlink_macfdb_update_ctx(struct zebra_dplane_ctx *ctx, void *data, } else { /* local mac */ if (update_flags & DPLANE_MAC_SET_STATIC) { - nfy_flags |= (1 << BR_FDB_NFY_STATIC); + nfy_flags |= FDB_NOTIFY_BIT; state |= NUD_NOARP; } if (update_flags & DPLANE_MAC_SET_INACTIVE) - nfy_flags |= (1 << BR_FDB_NFY_INACTIVE); + nfy_flags |= FDB_NOTIFY_INACTIVE_BIT; nfy = true; } @@ -3300,6 +3308,8 @@ static int netlink_ipneigh_change(struct nlmsghdr *h, int len, ns_id_t ns_id) bool is_ext; bool is_router; bool local_inactive; + uint32_t ext_flags = 0; + bool dp_static = false; ndm = NLMSG_DATA(h); @@ -3333,7 +3343,7 @@ static int netlink_ipneigh_change(struct nlmsghdr *h, int len, ns_id_t ns_id) netlink_handle_5549(ndm, zif, ifp, &ip, false); if (IS_ZEBRA_DEBUG_KERNEL) zlog_debug( - "\tNeighbor Entry Received is a 5549 entry, finished"); + " Neighbor Entry Received is a 5549 entry, finished"); return 0; } @@ -3362,7 +3372,7 @@ static int netlink_ipneigh_change(struct nlmsghdr *h, int len, ns_id_t ns_id) else { if (IS_ZEBRA_DEBUG_KERNEL) zlog_debug( - "\tNeighbor Entry received is not on a VLAN or a BRIDGE, ignoring"); + " Neighbor Entry received is not on a VLAN or a BRIDGE, ignoring"); return 0; } @@ -3391,9 +3401,15 @@ static int netlink_ipneigh_change(struct nlmsghdr *h, int len, ns_id_t ns_id) is_ext = !!(ndm->ndm_flags & NTF_EXT_LEARNED); is_router = !!(ndm->ndm_flags & NTF_ROUTER); + if (tb[NDA_EXT_FLAGS]) { + ext_flags = *(uint32_t *)RTA_DATA(tb[NDA_EXT_FLAGS]); + if (ext_flags & NTF_E_MH_PEER_SYNC) + dp_static = true; + } + if (IS_ZEBRA_DEBUG_KERNEL) zlog_debug( - "Rx %s family %s IF %s(%u) vrf %s(%u) IP %s MAC %s state 0x%x flags 0x%x", + "Rx %s family %s IF %s(%u) vrf %s(%u) IP %s MAC %s state 0x%x flags 0x%x ext_flags 0x%x", nl_msg_type_to_str(h->nlmsg_type), nl_family_to_str(ndm->ndm_family), ifp->name, ndm->ndm_ifindex, VRF_LOGNAME(vrf), ifp->vrf_id, @@ -3401,7 +3417,7 @@ static int netlink_ipneigh_change(struct nlmsghdr *h, int len, ns_id_t ns_id) mac_present ? prefix_mac2str(&mac, buf, sizeof(buf)) : "", - ndm->ndm_state, ndm->ndm_flags); + ndm->ndm_state, ndm->ndm_flags, ext_flags); /* If the neighbor state is valid for use, process as an add or * update @@ -3410,15 +3426,19 @@ static int netlink_ipneigh_change(struct nlmsghdr *h, int len, ns_id_t ns_id) * in re-adding the neighbor if it is a valid "remote" neighbor. */ if (ndm->ndm_state & NUD_VALID) { - local_inactive = !(ndm->ndm_state & NUD_LOCAL_ACTIVE); + if (zebra_evpn_mh_do_adv_reachable_neigh_only()) + local_inactive = + !(ndm->ndm_state & NUD_LOCAL_ACTIVE); + else + /* If EVPN-MH is not enabled we treat STALE + * neighbors as locally-active and advertise + * them + */ + local_inactive = false; - /* XXX - populate dp-static based on the sync flags - * in the kernel - */ return zebra_vxlan_handle_kernel_neigh_update( - ifp, link_if, &ip, &mac, ndm->ndm_state, - is_ext, is_router, local_inactive, - false /* dp_static */); + ifp, link_if, &ip, &mac, ndm->ndm_state, is_ext, + is_router, local_inactive, dp_static); } return zebra_vxlan_handle_kernel_neigh_del(ifp, link_if, &ip); @@ -3689,12 +3709,12 @@ static ssize_t netlink_neigh_update_ctx(const struct zebra_dplane_ctx *ctx, char buf2[ETHER_ADDR_STRLEN]; zlog_debug( - "Tx %s family %s IF %s(%u) Neigh %s MAC %s flags 0x%x state 0x%x", + "Tx %s family %s IF %s(%u) Neigh %s MAC %s flags 0x%x state 0x%x %sext_flags 0x%x", nl_msg_type_to_str(cmd), nl_family_to_str(family), dplane_ctx_get_ifname(ctx), dplane_ctx_get_ifindex(ctx), ipaddr2str(ip, buf, sizeof(buf)), mac ? prefix_mac2str(mac, buf2, sizeof(buf2)) : "null", - flags, state); + flags, state, ext ? "ext " : "", ext_flags); } return netlink_neigh_update_msg_encode( diff --git a/zebra/rule_netlink.c b/zebra/rule_netlink.c index a63504992e..08a675ef3a 100644 --- a/zebra/rule_netlink.c +++ b/zebra/rule_netlink.c @@ -79,7 +79,15 @@ netlink_rule_msg_encode(int cmd, const struct zebra_dplane_ctx *ctx, if (buflen < sizeof(*req)) return 0; memset(req, 0, sizeof(*req)); - family = PREFIX_FAMILY(src_ip); + + /* Assume ipv4 if no src/dst set, we only support ipv4/ipv6 */ + if (PREFIX_FAMILY(src_ip)) + family = PREFIX_FAMILY(src_ip); + else if (PREFIX_FAMILY(dst_ip)) + family = PREFIX_FAMILY(dst_ip); + else + family = AF_INET; + bytelen = (family == AF_INET ? 4 : 16); req->n.nlmsg_type = cmd; diff --git a/zebra/sample_plugin.c b/zebra/sample_plugin.c index c96a86cc73..464205f2f3 100644 --- a/zebra/sample_plugin.c +++ b/zebra/sample_plugin.c @@ -92,7 +92,6 @@ static int sample_process(struct zebra_dplane_provider *prov) static int init_sample_plugin(struct thread_master *tm) { int ret; - struct zebra_dplane_provider *prov = NULL; /* Note that we don't use or store the thread_master 'tm'. We * don't use the zebra main pthread: our plugin code will run in diff --git a/zebra/subdir.am b/zebra/subdir.am index cdacabc102..f842a8c0f3 100644 --- a/zebra/subdir.am +++ b/zebra/subdir.am @@ -40,6 +40,11 @@ if LINUX module_LTLIBRARIES += zebra/zebra_cumulus_mlag.la endif +# Dataplane sample plugin +if DEV_BUILD +module_LTLIBRARIES += zebra/dplane_sample_plugin.la +endif + man8 += $(MANBUILD)/frr-zebra.8 ## endif ZEBRA endif @@ -206,6 +211,12 @@ zebra/zebra_fpm_dt.lo: fpm/fpm.pb-c.h qpb/qpb.pb-c.h endif endif +# Sample dataplane plugin +if DEV_BUILD +zebra_dplane_sample_plugin_la_SOURCES = zebra/sample_plugin.c +zebra_dplane_sample_plugin_la_LDFLAGS = -module -shared -avoid-version -export-dynamic +endif + nodist_zebra_zebra_SOURCES = \ yang/frr-zebra.yang.c \ # end diff --git a/zebra/zapi_msg.c b/zebra/zapi_msg.c index f2ff8d53f2..46171df848 100644 --- a/zebra/zapi_msg.c +++ b/zebra/zapi_msg.c @@ -62,6 +62,8 @@ #include "zebra/zebra_opaque.h" #include "zebra/zebra_srte.h" +static int zapi_nhg_decode(struct stream *s, int cmd, struct zapi_nhg *api_nhg); + /* Encoding helpers -------------------------------------------------------- */ static void zserv_encode_interface(struct stream *s, struct interface *ifp) @@ -102,6 +104,7 @@ static void zserv_encode_vrf(struct stream *s, struct zebra_vrf *zvrf) struct vrf_data data; const char *netns_name = zvrf_ns_name(zvrf); + memset(&data, 0, sizeof(data)); data.l.table_id = zvrf->table_id; if (netns_name) @@ -707,13 +710,13 @@ static int zsend_ipv4_nexthop_lookup_mrib(struct zserv *client, return zserv_send_message(client, s); } -static int nhg_notify(uint16_t type, uint16_t instance, uint32_t id, - enum zapi_nhg_notify_owner note) +int zsend_nhg_notify(uint16_t type, uint16_t instance, uint32_t session_id, + uint32_t id, enum zapi_nhg_notify_owner note) { struct zserv *client; struct stream *s; - client = zserv_find_client(type, instance); + client = zserv_find_client_session(type, instance, session_id); if (!client) { if (IS_ZEBRA_DEBUG_PACKET) { zlog_debug("Not Notifying Owner: %u(%u) about %u(%d)", @@ -722,6 +725,10 @@ static int nhg_notify(uint16_t type, uint16_t instance, uint32_t id, return 0; } + if (IS_ZEBRA_DEBUG_SEND) + zlog_debug("%s: type %d, id %d, note %d", + __func__, type, id, note); + s = stream_new(ZEBRA_MAX_PACKET_SIZ); stream_reset(s); @@ -1139,7 +1146,7 @@ static void zread_rnh_register(ZAPI_HANDLER_ARGS) } else { flog_err( EC_ZEBRA_UNKNOWN_FAMILY, - "rnh_register: Received unknown family type %d\n", + "rnh_register: Received unknown family type %d", p.family); return; } @@ -1230,7 +1237,7 @@ static void zread_rnh_unregister(ZAPI_HANDLER_ARGS) } else { flog_err( EC_ZEBRA_UNKNOWN_FAMILY, - "rnh_register: Received unknown family type %d\n", + "rnh_register: Received unknown family type %d", p.family); return; } @@ -1280,7 +1287,7 @@ static void zread_fec_register(ZAPI_HANDLER_ARGS) if (p.family != AF_INET && p.family != AF_INET6) { flog_err( EC_ZEBRA_UNKNOWN_FAMILY, - "fec_register: Received unknown family type %d\n", + "fec_register: Received unknown family type %d", p.family); return; } @@ -1346,7 +1353,7 @@ static void zread_fec_unregister(ZAPI_HANDLER_ARGS) if (p.family != AF_INET && p.family != AF_INET6) { flog_err( EC_ZEBRA_UNKNOWN_FAMILY, - "fec_unregister: Received unknown family type %d\n", + "fec_unregister: Received unknown family type %d", p.family); return; } @@ -1742,7 +1749,7 @@ static bool zapi_read_nexthops(struct zserv *client, struct prefix *p, return true; } -int zapi_nhg_decode(struct stream *s, int cmd, struct zapi_nhg *api_nhg) +static int zapi_nhg_decode(struct stream *s, int cmd, struct zapi_nhg *api_nhg) { uint16_t i; struct zapi_nexthop *znh; @@ -1820,16 +1827,17 @@ static void zread_nhg_del(ZAPI_HANDLER_ARGS) /* * Delete the received nhg id */ - nhe = zebra_nhg_proto_del(api_nhg.id, api_nhg.proto); if (nhe) { zebra_nhg_decrement_ref(nhe); - nhg_notify(api_nhg.proto, client->instance, api_nhg.id, - ZAPI_NHG_REMOVED); + zsend_nhg_notify(api_nhg.proto, client->instance, + client->session_id, api_nhg.id, + ZAPI_NHG_REMOVED); } else - nhg_notify(api_nhg.proto, client->instance, api_nhg.id, - ZAPI_NHG_REMOVE_FAIL); + zsend_nhg_notify(api_nhg.proto, client->instance, + client->session_id, api_nhg.id, + ZAPI_NHG_REMOVE_FAIL); } static void zread_nhg_add(ZAPI_HANDLER_ARGS) @@ -1863,7 +1871,8 @@ static void zread_nhg_add(ZAPI_HANDLER_ARGS) /* * Create the nhg */ - nhe = zebra_nhg_proto_add(api_nhg.id, api_nhg.proto, nhg, 0); + nhe = zebra_nhg_proto_add(api_nhg.id, api_nhg.proto, client->instance, + client->session_id, nhg, 0); nexthop_group_delete(&nhg); zebra_nhg_backup_free(&bnhg); @@ -1874,12 +1883,12 @@ static void zread_nhg_add(ZAPI_HANDLER_ARGS) * * Resolution is going to need some more work. */ - if (nhe) - nhg_notify(api_nhg.proto, client->instance, api_nhg.id, - ZAPI_NHG_INSTALLED); - else - nhg_notify(api_nhg.proto, client->instance, api_nhg.id, - ZAPI_NHG_FAIL_INSTALL); + + /* If there's a failure, notify sender immediately */ + if (nhe == NULL) + zsend_nhg_notify(api_nhg.proto, client->instance, + client->session_id, api_nhg.id, + ZAPI_NHG_FAIL_INSTALL); } static void zread_route_add(ZAPI_HANDLER_ARGS) @@ -2080,7 +2089,7 @@ static void zread_route_del(ZAPI_HANDLER_ARGS) rib_delete(afi, api.safi, zvrf_id(zvrf), api.type, api.instance, api.flags, &api.prefix, src_p, NULL, 0, table_id, api.metric, - api.distance, false, false); + api.distance, false); /* Stats */ switch (api.prefix.family) { diff --git a/zebra/zapi_msg.h b/zebra/zapi_msg.h index 9822d72022..c03278669a 100644 --- a/zebra/zapi_msg.h +++ b/zebra/zapi_msg.h @@ -108,6 +108,9 @@ extern int zsend_sr_policy_notify_status(uint32_t color, extern int zsend_client_close_notify(struct zserv *client, struct zserv *closed_client); +int zsend_nhg_notify(uint16_t type, uint16_t instance, uint32_t session_id, + uint32_t id, enum zapi_nhg_notify_owner note); + #ifdef __cplusplus } #endif diff --git a/zebra/zebra_dplane.c b/zebra/zebra_dplane.c index fad3c16244..db2b9e002e 100644 --- a/zebra/zebra_dplane.c +++ b/zebra/zebra_dplane.c @@ -2014,16 +2014,6 @@ int dplane_ctx_route_init(struct zebra_dplane_ctx *ctx, enum dplane_op_e op, for (ALL_NEXTHOPS(ctx->u.rinfo.zd_ng, nexthop)) { UNSET_FLAG(nexthop->flags, NEXTHOP_FLAG_FIB); - /* Check for available encapsulations. */ - if (!CHECK_FLAG(re->flags, ZEBRA_FLAG_EVPN_ROUTE)) - continue; - - zl3vni = zl3vni_from_vrf(nexthop->vrf_id); - if (zl3vni && is_l3vni_oper_up(zl3vni)) { - nexthop->nh_encap_type = NET_VXLAN; - nexthop->nh_encap.vni = zl3vni->vni; - } - /* Optionally capture extra interface info while we're in the * main zebra pthread - a plugin has to ask for this info. */ @@ -2044,6 +2034,16 @@ int dplane_ctx_route_init(struct zebra_dplane_ctx *ctx, enum dplane_op_e op, if_extra, link); } } + + /* Check for available evpn encapsulations. */ + if (!CHECK_FLAG(re->flags, ZEBRA_FLAG_EVPN_ROUTE)) + continue; + + zl3vni = zl3vni_from_vrf(nexthop->vrf_id); + if (zl3vni && is_l3vni_oper_up(zl3vni)) { + nexthop->nh_encap_type = NET_VXLAN; + nexthop->nh_encap.vni = zl3vni->vni; + } } /* Don't need some info when capturing a system notification */ @@ -3684,7 +3684,7 @@ int dplane_show_helper(struct vty *vty, bool detailed) int dplane_show_provs_helper(struct vty *vty, bool detailed) { struct zebra_dplane_provider *prov; - uint64_t in, in_max, out, out_max; + uint64_t in, in_q, in_max, out, out_q, out_max; vty_out(vty, "Zebra dataplane providers:\n"); @@ -3697,17 +3697,20 @@ int dplane_show_provs_helper(struct vty *vty, bool detailed) in = atomic_load_explicit(&prov->dp_in_counter, memory_order_relaxed); + in_q = atomic_load_explicit(&prov->dp_in_queued, + memory_order_relaxed); in_max = atomic_load_explicit(&prov->dp_in_max, memory_order_relaxed); out = atomic_load_explicit(&prov->dp_out_counter, memory_order_relaxed); + out_q = atomic_load_explicit(&prov->dp_out_queued, + memory_order_relaxed); out_max = atomic_load_explicit(&prov->dp_out_max, memory_order_relaxed); - vty_out(vty, - "%s (%u): in: %" PRIu64 ", q_max: %" PRIu64 - ", out: %" PRIu64 ", q_max: %" PRIu64 "\n", - prov->dp_name, prov->dp_id, in, in_max, out, out_max); + vty_out(vty, "%s (%u): in: %"PRIu64", q: %"PRIu64", q_max: %"PRIu64", out: %"PRIu64", q: %"PRIu64", q_max: %"PRIu64"\n", + prov->dp_name, prov->dp_id, in, in_q, in_max, + out, out_q, out_max); DPLANE_LOCK(); prov = TAILQ_NEXT(prov, dp_prov_link); @@ -3912,11 +3915,24 @@ uint32_t dplane_provider_out_ctx_queue_len(struct zebra_dplane_provider *prov) void dplane_provider_enqueue_out_ctx(struct zebra_dplane_provider *prov, struct zebra_dplane_ctx *ctx) { + uint64_t curr, high; + dplane_provider_lock(prov); TAILQ_INSERT_TAIL(&(prov->dp_ctx_out_q), ctx, zd_q_entries); + /* Maintain out-queue counters */ + atomic_fetch_add_explicit(&(prov->dp_out_queued), 1, + memory_order_relaxed); + curr = atomic_load_explicit(&prov->dp_out_queued, + memory_order_relaxed); + high = atomic_load_explicit(&prov->dp_out_max, + memory_order_relaxed); + if (curr > high) + atomic_store_explicit(&prov->dp_out_max, curr, + memory_order_relaxed); + dplane_provider_unlock(prov); atomic_fetch_add_explicit(&(prov->dp_out_counter), 1, @@ -4537,6 +4553,7 @@ static int dplane_thread_loop(struct thread *event) struct zebra_dplane_ctx *ctx, *tctx; int limit, counter, error_counter; uint64_t curr, high; + bool reschedule = false; /* Capture work limit per cycle */ limit = zdplane_info.dg_updates_per_cycle; @@ -4673,6 +4690,9 @@ static int dplane_thread_loop(struct thread *event) dplane_provider_unlock(prov); + if (counter >= limit) + reschedule = true; + if (IS_ZEBRA_DEBUG_DPLANE_DETAIL) zlog_debug("dplane dequeues %d completed work from provider %s", counter, dplane_provider_get_name(prov)); @@ -4683,6 +4703,13 @@ static int dplane_thread_loop(struct thread *event) DPLANE_UNLOCK(); } + /* + * We hit the work limit while processing at least one provider's + * output queue - ensure we come back and finish it. + */ + if (reschedule) + dplane_provider_work_ready(); + /* After all providers have been serviced, enqueue any completed * work and any errors back to zebra so it can process the results. */ diff --git a/zebra/zebra_evpn.c b/zebra/zebra_evpn.c index 67df841b21..b232c664bc 100644 --- a/zebra/zebra_evpn.c +++ b/zebra/zebra_evpn.c @@ -1046,6 +1046,9 @@ int zebra_evpn_del(zebra_evpn_t *zevpn) hash_free(zevpn->mac_table); zevpn->mac_table = NULL; + /* Remove references to the zevpn in the MH databases */ + if (zevpn->vxlan_if) + zebra_evpn_vxl_evpn_set(zevpn->vxlan_if->info, zevpn, false); zebra_evpn_es_evi_cleanup(zevpn); /* Free the EVPN hash entry and allocated memory. */ @@ -1333,7 +1336,8 @@ zebra_evpn_process_sync_macip_add(zebra_evpn_t *zevpn, struct ethaddr *macaddr, if (ipa_len) { n = zebra_evpn_neigh_lookup(zevpn, ipaddr); if (n - && !zebra_evpn_neigh_is_bgp_seq_ok(zevpn, n, macaddr, seq)) + && !zebra_evpn_neigh_is_bgp_seq_ok(zevpn, n, macaddr, seq, + true)) return; } diff --git a/zebra/zebra_evpn_mac.c b/zebra/zebra_evpn_mac.c index 44394b95aa..5227a480fc 100644 --- a/zebra/zebra_evpn_mac.c +++ b/zebra/zebra_evpn_mac.c @@ -29,6 +29,7 @@ #include "prefix.h" #include "vlan.h" #include "json.h" +#include "printfrr.h" #include "zebra/zserv.h" #include "zebra/debug.h" @@ -254,6 +255,37 @@ static void zebra_evpn_mac_get_access_info(zebra_mac_t *mac, } } +#define MAC_BUF_SIZE 256 +static char *zebra_evpn_zebra_mac_flag_dump(struct zebra_mac_t_ *mac, char *buf, + size_t len) +{ + if (mac->flags == 0) { + snprintfrr(buf, len, "None "); + return buf; + } + + snprintfrr( + buf, len, "%s%s%s%s%s%s%s%s%s%s%s%s", + CHECK_FLAG(mac->flags, ZEBRA_MAC_LOCAL) ? "LOC " : "", + CHECK_FLAG(mac->flags, ZEBRA_MAC_REMOTE) ? "REM " : "", + CHECK_FLAG(mac->flags, ZEBRA_MAC_AUTO) ? "AUTO " : "", + CHECK_FLAG(mac->flags, ZEBRA_MAC_STICKY) ? "STICKY " : "", + CHECK_FLAG(mac->flags, ZEBRA_MAC_REMOTE_RMAC) ? "REM Router " + : "", + CHECK_FLAG(mac->flags, ZEBRA_MAC_DEF_GW) ? "Default GW " : "", + CHECK_FLAG(mac->flags, ZEBRA_MAC_REMOTE_DEF_GW) ? "REM DEF GW " + : "", + CHECK_FLAG(mac->flags, ZEBRA_MAC_DUPLICATE) ? "DUP " : "", + CHECK_FLAG(mac->flags, ZEBRA_MAC_FPM_SENT) ? "FPM " : "", + CHECK_FLAG(mac->flags, ZEBRA_MAC_ES_PEER_ACTIVE) ? "LOC Active " + : "", + CHECK_FLAG(mac->flags, ZEBRA_MAC_ES_PEER_PROXY) ? "PROXY " : "", + CHECK_FLAG(mac->flags, ZEBRA_MAC_LOCAL_INACTIVE) + ? "LOC Inactive " + : ""); + return buf; +} + static int zebra_evpn_dad_mac_auto_recovery_exp(struct thread *t) { struct zebra_vrf *zvrf = NULL; @@ -278,12 +310,17 @@ static int zebra_evpn_dad_mac_auto_recovery_exp(struct thread *t) if (!mac) return 0; - if (IS_ZEBRA_DEBUG_VXLAN) + if (IS_ZEBRA_DEBUG_VXLAN) { + char mac_buf[MAC_BUF_SIZE]; + zlog_debug( - "%s: duplicate addr mac %s flags 0x%x learn count %u host count %u auto recovery expired", + "%s: duplicate addr mac %s flags %slearn count %u host count %u auto recovery expired", __func__, prefix_mac2str(&mac->macaddr, buf, sizeof(buf)), - mac->flags, mac->dad_count, listcount(mac->neigh_list)); + zebra_evpn_zebra_mac_flag_dump(mac, mac_buf, + sizeof(mac_buf)), + mac->dad_count, listcount(mac->neigh_list)); + } /* Remove all IPs as duplicate associcated with this MAC */ for (ALL_LIST_ELEMENTS_RO(mac->neigh_list, node, nbr)) { @@ -350,14 +387,17 @@ static void zebra_evpn_dup_addr_detect_for_mac(struct zebra_vrf *zvrf, * Remote MAC event -> hold on installing it. */ if (CHECK_FLAG(mac->flags, ZEBRA_MAC_DUPLICATE)) { - if (IS_ZEBRA_DEBUG_VXLAN) + if (IS_ZEBRA_DEBUG_VXLAN) { + char mac_buf[MAC_BUF_SIZE]; + zlog_debug( - "%s: duplicate addr MAC %s flags 0x%x skip update to client, learn count %u recover time %u", + "%s: duplicate addr MAC %s flags %sskip update to client, learn count %u recover time %u", __func__, prefix_mac2str(&mac->macaddr, buf, sizeof(buf)), - mac->flags, mac->dad_count, - zvrf->dad_freeze_time); - + zebra_evpn_zebra_mac_flag_dump(mac, mac_buf, + sizeof(mac_buf)), + mac->dad_count, zvrf->dad_freeze_time); + } /* For duplicate MAC do not update * client but update neigh due to * this MAC update. @@ -385,12 +425,17 @@ static void zebra_evpn_dup_addr_detect_for_mac(struct zebra_vrf *zvrf, } if (reset_params) { - if (IS_ZEBRA_DEBUG_VXLAN) + if (IS_ZEBRA_DEBUG_VXLAN) { + char mac_buf[MAC_BUF_SIZE]; + zlog_debug( - "%s: duplicate addr MAC %s flags 0x%x detection time passed, reset learn count %u", + "%s: duplicate addr MAC %s flags %sdetection time passed, reset learn count %u", __func__, prefix_mac2str(&mac->macaddr, buf, sizeof(buf)), - mac->flags, mac->dad_count); + zebra_evpn_zebra_mac_flag_dump(mac, mac_buf, + sizeof(mac_buf)), + mac->dad_count); + } mac->dad_count = 0; /* Start dup. addr detection (DAD) start time, @@ -452,13 +497,18 @@ static void zebra_evpn_dup_addr_detect_for_mac(struct zebra_vrf *zvrf, /* Start auto recovery timer for this MAC */ THREAD_OFF(mac->dad_mac_auto_recovery_timer); if (zvrf->dad_freeze && zvrf->dad_freeze_time) { - if (IS_ZEBRA_DEBUG_VXLAN) + if (IS_ZEBRA_DEBUG_VXLAN) { + char mac_buf[MAC_BUF_SIZE]; + zlog_debug( - "%s: duplicate addr MAC %s flags 0x%x auto recovery time %u start", + "%s: duplicate addr MAC %s flags %sauto recovery time %u start", __func__, prefix_mac2str(&mac->macaddr, buf, sizeof(buf)), - mac->flags, zvrf->dad_freeze_time); + zebra_evpn_zebra_mac_flag_dump( + mac, mac_buf, sizeof(mac_buf)), + zvrf->dad_freeze_time); + } thread_add_timer(zrouter.master, zebra_evpn_dad_mac_auto_recovery_exp, @@ -694,7 +744,7 @@ void zebra_evpn_print_mac(zebra_mac_t *mac, void *ctxt, json_object *json) } static char *zebra_evpn_print_mac_flags(zebra_mac_t *mac, char *flags_buf, - uint32_t flags_buf_sz) + size_t flags_buf_sz) { snprintf(flags_buf, flags_buf_sz, "%s%s%s%s", mac->sync_neigh_cnt ? @@ -903,14 +953,19 @@ int zebra_evpn_macip_send_msg_to_client(vni_t vni, struct ethaddr *macaddr, /* Write packet size. */ stream_putw_at(s, 0, stream_get_endp(s)); - if (IS_ZEBRA_DEBUG_VXLAN) + if (IS_ZEBRA_DEBUG_VXLAN) { + char flag_buf[MACIP_BUF_SIZE]; + zlog_debug( - "Send MACIP %s f 0x%x MAC %s IP %s seq %u L2-VNI %u ESI %s to %s", - (cmd == ZEBRA_MACIP_ADD) ? "Add" : "Del", flags, + "Send MACIP %s f %s MAC %s IP %s seq %u L2-VNI %u ESI %s to %s", + (cmd == ZEBRA_MACIP_ADD) ? "Add" : "Del", + zclient_evpn_dump_macip_flags(flags, flag_buf, + sizeof(flag_buf)), prefix_mac2str(macaddr, buf, sizeof(buf)), ipaddr2str(ip, buf2, sizeof(buf2)), seq, vni, es ? es->esi_str : "-", zebra_route_string(client->proto)); + } if (cmd == ZEBRA_MACIP_ADD) client->macipadd_cnt++; @@ -982,10 +1037,12 @@ zebra_mac_t *zebra_evpn_mac_add(zebra_evpn_t *zevpn, struct ethaddr *macaddr) mac->uptime = monotime(NULL); if (IS_ZEBRA_DEBUG_VXLAN || IS_ZEBRA_DEBUG_EVPN_MH_MAC) { char buf[ETHER_ADDR_STRLEN]; + char mac_buf[MAC_BUF_SIZE]; - zlog_debug("%s: MAC %s flags 0x%x", __func__, + zlog_debug("%s: MAC %s flags %s", __func__, prefix_mac2str(&mac->macaddr, buf, sizeof(buf)), - mac->flags); + zebra_evpn_zebra_mac_flag_dump(mac, mac_buf, + sizeof(mac_buf))); } return mac; } @@ -999,10 +1056,12 @@ int zebra_evpn_mac_del(zebra_evpn_t *zevpn, zebra_mac_t *mac) if (IS_ZEBRA_DEBUG_VXLAN || IS_ZEBRA_DEBUG_EVPN_MH_MAC) { char buf[ETHER_ADDR_STRLEN]; + char mac_buf[MAC_BUF_SIZE]; - zlog_debug("%s: MAC %s flags 0x%x", __func__, + zlog_debug("%s: MAC %s flags %s", __func__, prefix_mac2str(&mac->macaddr, buf, sizeof(buf)), - mac->flags); + zebra_evpn_zebra_mac_flag_dump(mac, mac_buf, + sizeof(mac_buf))); } /* If the MAC is freed before the neigh we will end up @@ -1047,11 +1106,13 @@ static bool zebra_evpn_check_mac_del_from_db(struct mac_walk_ctx *wctx, && !listcount(mac->neigh_list)) { if (IS_ZEBRA_DEBUG_VXLAN) { char buf[ETHER_ADDR_STRLEN]; + char mac_buf[MAC_BUF_SIZE]; zlog_debug( - "%s: Del MAC %s flags 0x%x", __func__, + "%s: Del MAC %s flags %s", __func__, prefix_mac2str(&mac->macaddr, buf, sizeof(buf)), - mac->flags); + zebra_evpn_zebra_mac_flag_dump( + mac, mac_buf, sizeof(mac_buf))); } wctx->uninstall = 0; @@ -1204,24 +1265,34 @@ int zebra_evpn_sync_mac_dp_install(zebra_mac_t *mac, bool set_inactive, zebra_evpn_mac_get_access_info(mac, &ifp, &vid); if (!ifp) { - if (IS_ZEBRA_DEBUG_EVPN_MH_MAC) + if (IS_ZEBRA_DEBUG_EVPN_MH_MAC) { + char mac_buf[MAC_BUF_SIZE]; + zlog_debug( - "%s: dp-install sync-mac vni %u mac %pEA es %s 0x%x %sskipped, no access-port", + "%s: dp-install sync-mac vni %u mac %pEA es %s %s%sskipped, no access-port", caller, zevpn->vni, &mac->macaddr, - mac->es ? mac->es->esi_str : "-", mac->flags, + mac->es ? mac->es->esi_str : "-", + zebra_evpn_zebra_mac_flag_dump(mac, mac_buf, + sizeof(mac_buf)), set_inactive ? "inactive " : ""); + } return -1; } zif = ifp->info; br_ifp = zif->brslave_info.br_if; if (!br_ifp) { - if (IS_ZEBRA_DEBUG_EVPN_MH_MAC) + if (IS_ZEBRA_DEBUG_EVPN_MH_MAC) { + char mac_buf[MAC_BUF_SIZE]; + zlog_debug( - "%s: dp-install sync-mac vni %u mac %pEA es %s 0x%x %sskipped, no br", + "%s: dp-install sync-mac vni %u mac %pEA es %s %s%sskipped, no br", caller, zevpn->vni, &mac->macaddr, - mac->es ? mac->es->esi_str : "-", mac->flags, + mac->es ? mac->es->esi_str : "-", + zebra_evpn_zebra_mac_flag_dump(mac, mac_buf, + sizeof(mac_buf)), set_inactive ? "inactive " : ""); + } return -1; } @@ -1236,13 +1307,18 @@ int zebra_evpn_sync_mac_dp_install(zebra_mac_t *mac, bool set_inactive, * supported and if the local ES is oper-down. */ if (mac->es && zebra_evpn_es_local_mac_via_network_port(mac->es)) { - if (IS_ZEBRA_DEBUG_EVPN_MH_MAC) + if (IS_ZEBRA_DEBUG_EVPN_MH_MAC) { + char mac_buf[MAC_BUF_SIZE]; + zlog_debug( - "dp-%s sync-nw-mac vni %u mac %pEA es %s 0x%x %s", + "dp-%s sync-nw-mac vni %u mac %pEA es %s %s%s", set_static ? "install" : "uninstall", zevpn->vni, &mac->macaddr, - mac->es ? mac->es->esi_str : "-", mac->flags, + mac->es ? mac->es->esi_str : "-", + zebra_evpn_zebra_mac_flag_dump(mac, mac_buf, + sizeof(mac_buf)), set_inactive ? "inactive " : ""); + } if (set_static) /* XXX - old_static needs to be computed more * accurately @@ -1256,13 +1332,17 @@ int zebra_evpn_sync_mac_dp_install(zebra_mac_t *mac, bool set_inactive, return 0; } - if (IS_ZEBRA_DEBUG_EVPN_MH_MAC) - zlog_debug( - "dp-install sync-mac vni %u mac %pEA es %s 0x%x %s%s", - zevpn->vni, &mac->macaddr, - mac->es ? mac->es->esi_str : "-", mac->flags, - set_static ? "static " : "", - set_inactive ? "inactive " : ""); + if (IS_ZEBRA_DEBUG_EVPN_MH_MAC) { + char mac_buf[MAC_BUF_SIZE]; + + zlog_debug("dp-install sync-mac vni %u mac %pEA es %s %s%s%s", + zevpn->vni, &mac->macaddr, + mac->es ? mac->es->esi_str : "-", + zebra_evpn_zebra_mac_flag_dump(mac, mac_buf, + sizeof(mac_buf)), + set_static ? "static " : "", + set_inactive ? "inactive " : ""); + } dplane_local_mac_add(ifp, br_ifp, vid, &mac->macaddr, sticky, set_static, set_inactive); @@ -1310,12 +1390,17 @@ static int zebra_evpn_mac_hold_exp_cb(struct thread *t) new_bgp_ready = zebra_evpn_mac_is_ready_for_bgp(mac->flags); new_static = zebra_evpn_mac_is_static(mac); - if (IS_ZEBRA_DEBUG_EVPN_MH_MAC) + if (IS_ZEBRA_DEBUG_EVPN_MH_MAC) { + char mac_buf[MAC_BUF_SIZE]; + zlog_debug( - "sync-mac vni %u mac %s es %s 0x%x hold expired", + "sync-mac vni %u mac %s es %s %shold expired", mac->zevpn->vni, prefix_mac2str(&mac->macaddr, macbuf, sizeof(macbuf)), - mac->es ? mac->es->esi_str : "-", mac->flags); + mac->es ? mac->es->esi_str : "-", + zebra_evpn_zebra_mac_flag_dump(mac, mac_buf, + sizeof(mac_buf))); + } /* re-program the local mac in the dataplane if the mac is no * longer static @@ -1340,12 +1425,17 @@ static inline void zebra_evpn_mac_start_hold_timer(zebra_mac_t *mac) if (mac->hold_timer) return; - if (IS_ZEBRA_DEBUG_EVPN_MH_MAC) + if (IS_ZEBRA_DEBUG_EVPN_MH_MAC) { + char mac_buf[MAC_BUF_SIZE]; + zlog_debug( - "sync-mac vni %u mac %s es %s 0x%x hold started", + "sync-mac vni %u mac %s es %s %shold started", mac->zevpn->vni, prefix_mac2str(&mac->macaddr, macbuf, sizeof(macbuf)), - mac->es ? mac->es->esi_str : "-", mac->flags); + mac->es ? mac->es->esi_str : "-", + zebra_evpn_zebra_mac_flag_dump(mac, mac_buf, + sizeof(mac_buf))); + } thread_add_timer(zrouter.master, zebra_evpn_mac_hold_exp_cb, mac, zmh_info->mac_hold_time, &mac->hold_timer); } @@ -1357,12 +1447,18 @@ void zebra_evpn_mac_stop_hold_timer(zebra_mac_t *mac) if (!mac->hold_timer) return; - if (IS_ZEBRA_DEBUG_EVPN_MH_MAC) + if (IS_ZEBRA_DEBUG_EVPN_MH_MAC) { + char mac_buf[MAC_BUF_SIZE]; + zlog_debug( - "sync-mac vni %u mac %s es %s 0x%x hold stopped", + "sync-mac vni %u mac %s es %s %shold stopped", mac->zevpn->vni, prefix_mac2str(&mac->macaddr, macbuf, sizeof(macbuf)), - mac->es ? mac->es->esi_str : "-", mac->flags); + mac->es ? mac->es->esi_str : "-", + zebra_evpn_zebra_mac_flag_dump(mac, mac_buf, + sizeof(mac_buf))); + } + THREAD_OFF(mac->hold_timer); } @@ -1372,13 +1468,18 @@ void zebra_evpn_sync_mac_del(zebra_mac_t *mac) bool old_static; bool new_static; - if (IS_ZEBRA_DEBUG_EVPN_MH_MAC) + if (IS_ZEBRA_DEBUG_EVPN_MH_MAC) { + char mac_buf[MAC_BUF_SIZE]; + zlog_debug( - "sync-mac del vni %u mac %s es %s seq %d f 0x%x", + "sync-mac del vni %u mac %s es %s seq %d f %s", mac->zevpn->vni, prefix_mac2str(&mac->macaddr, macbuf, sizeof(macbuf)), mac->es ? mac->es->esi_str : "-", mac->loc_seq, - mac->flags); + zebra_evpn_zebra_mac_flag_dump(mac, mac_buf, + sizeof(mac_buf))); + } + old_static = zebra_evpn_mac_is_static(mac); UNSET_FLAG(mac->flags, ZEBRA_MAC_ES_PEER_PROXY); if (CHECK_FLAG(mac->flags, ZEBRA_MAC_ES_PEER_ACTIVE)) @@ -1395,16 +1496,21 @@ void zebra_evpn_sync_mac_del(zebra_mac_t *mac) static inline bool zebra_evpn_mac_is_bgp_seq_ok(zebra_evpn_t *zevpn, zebra_mac_t *mac, uint32_t seq, uint16_t ipa_len, - struct ipaddr *ipaddr) + struct ipaddr *ipaddr, + bool sync) { char macbuf[ETHER_ADDR_STRLEN]; char ipbuf[INET6_ADDRSTRLEN]; uint32_t tmp_seq; + const char *n_type; - if (CHECK_FLAG(mac->flags, ZEBRA_MAC_LOCAL)) + if (CHECK_FLAG(mac->flags, ZEBRA_MAC_LOCAL)) { tmp_seq = mac->loc_seq; - else + n_type = "local"; + } else { tmp_seq = mac->rem_seq; + n_type = "remote"; + } if (seq < tmp_seq) { /* if the mac was never advertised to bgp we must accept @@ -1413,31 +1519,44 @@ static inline bool zebra_evpn_mac_is_bgp_seq_ok(zebra_evpn_t *zevpn, */ if (CHECK_FLAG(mac->flags, ZEBRA_MAC_LOCAL) && !zebra_evpn_mac_is_ready_for_bgp(mac->flags)) { - if (IS_ZEBRA_DEBUG_EVPN_MH_MAC) + if (IS_ZEBRA_DEBUG_EVPN_MH_MAC + || IS_ZEBRA_DEBUG_VXLAN) { + char mac_buf[MAC_BUF_SIZE]; + zlog_debug( - "sync-macip accept vni %u mac %s%s%s lower seq %u f 0x%x", - zevpn->vni, + "%s-macip accept vni %u %s-mac %s%s%s lower seq %u f %s", + sync ? "sync" : "rem", zevpn->vni, + n_type, prefix_mac2str(&mac->macaddr, macbuf, sizeof(macbuf)), ipa_len ? " IP " : "", ipa_len ? ipaddr2str(ipaddr, ipbuf, sizeof(ipbuf)) : "", - tmp_seq, mac->flags); + tmp_seq, + zebra_evpn_zebra_mac_flag_dump( + mac, mac_buf, sizeof(mac_buf))); + } + return true; } - if (IS_ZEBRA_DEBUG_EVPN_MH_MAC) + if (IS_ZEBRA_DEBUG_EVPN_MH_MAC || IS_ZEBRA_DEBUG_VXLAN) { + char mac_buf[MAC_BUF_SIZE]; + zlog_debug( - "sync-macip ignore vni %u mac %s%s%s as existing has higher seq %u f 0x%x", - zevpn->vni, + "%s-macip ignore vni %u %s-mac %s%s%s as existing has higher seq %u f %s", + sync ? "sync" : "rem", zevpn->vni, n_type, prefix_mac2str(&mac->macaddr, macbuf, sizeof(macbuf)), ipa_len ? " IP " : "", ipa_len ? ipaddr2str(ipaddr, ipbuf, sizeof(ipbuf)) : "", - tmp_seq, mac->flags); + tmp_seq, + zebra_evpn_zebra_mac_flag_dump( + mac, mac_buf, sizeof(mac_buf))); + } return false; } @@ -1518,7 +1637,7 @@ zebra_evpn_proc_sync_mac_update(zebra_evpn_t *zevpn, struct ethaddr *macaddr, return NULL; } if (!zebra_evpn_mac_is_bgp_seq_ok(zevpn, mac, seq, ipa_len, - ipaddr)) { + ipaddr, true)) { ctx->ignore_macip = true; return NULL; } @@ -1568,12 +1687,20 @@ zebra_evpn_proc_sync_mac_update(zebra_evpn_t *zevpn, struct ethaddr *macaddr, memset(&mac->fwd_info, 0, sizeof(mac->fwd_info)); mac->flags = new_flags; - if (IS_ZEBRA_DEBUG_EVPN_MH_MAC && (old_flags != new_flags)) + if (IS_ZEBRA_DEBUG_EVPN_MH_MAC && (old_flags != new_flags)) { + char mac_buf[MAC_BUF_SIZE], omac_buf[MAC_BUF_SIZE]; + struct zebra_mac_t_ omac; + + omac.flags = old_flags; zlog_debug( - "sync-mac vni %u mac %s old_f 0x%x new_f 0x%x", + "sync-mac vni %u mac %s old_f %snew_f %s", zevpn->vni, prefix_mac2str(macaddr, macbuf, sizeof(macbuf)), - old_flags, mac->flags); + zebra_evpn_zebra_mac_flag_dump( + &omac, omac_buf, sizeof(omac_buf)), + zebra_evpn_zebra_mac_flag_dump( + mac, mac_buf, sizeof(mac_buf))); + } /* update es */ es_change = zebra_evpn_es_mac_ref(mac, esi); @@ -1607,13 +1734,18 @@ zebra_evpn_proc_sync_mac_update(zebra_evpn_t *zevpn, struct ethaddr *macaddr, inform_bgp = true; } - if (IS_ZEBRA_DEBUG_EVPN_MH_MAC) - zlog_debug("sync-mac %s vni %u mac %s es %s seq %d f 0x%x%s%s", + if (IS_ZEBRA_DEBUG_EVPN_MH_MAC) { + char mac_buf[MAC_BUF_SIZE]; + + zlog_debug("sync-mac %s vni %u mac %s es %s seq %d f %s%s%s", ctx->mac_created ? "created" : "updated", zevpn->vni, prefix_mac2str(macaddr, macbuf, sizeof(macbuf)), mac->es ? mac->es->esi_str : "-", mac->loc_seq, - mac->flags, inform_bgp ? " inform_bgp" : "", + zebra_evpn_zebra_mac_flag_dump(mac, mac_buf, + sizeof(mac_buf)), + inform_bgp ? "inform_bgp" : "", inform_dataplane ? " inform_dp" : ""); + } if (inform_bgp) zebra_evpn_mac_send_add_del_to_client(mac, old_bgp_ready, @@ -1768,7 +1900,6 @@ int process_mac_remote_macip_add(zebra_evpn_t *zevpn, struct zebra_vrf *zvrf, { char buf[ETHER_ADDR_STRLEN]; char buf1[INET6_ADDRSTRLEN]; - uint32_t tmp_seq; bool sticky; bool remote_gw; int update_mac = 0; @@ -1828,8 +1959,6 @@ int process_mac_remote_macip_add(zebra_evpn_t *zevpn, struct zebra_vrf *zvrf, if (ipa_len) SET_FLAG(mac->flags, ZEBRA_MAC_AUTO); } else { - zebra_evpn_es_mac_ref(mac, esi); - /* When host moves but changes its (MAC,IP) * binding, BGP may install a MACIP entry that * corresponds to "older" location of the host @@ -1838,26 +1967,11 @@ int process_mac_remote_macip_add(zebra_evpn_t *zevpn, struct zebra_vrf *zvrf, * the sequence number and ignore this update * if appropriate. */ - if (CHECK_FLAG(mac->flags, ZEBRA_MAC_LOCAL)) - tmp_seq = mac->loc_seq; - else - tmp_seq = mac->rem_seq; - - if (seq < tmp_seq) { - if (IS_ZEBRA_DEBUG_VXLAN) - zlog_debug( - "Ignore remote MACIP ADD VNI %u MAC %s%s%s as existing MAC has higher seq %u flags 0x%x", - zevpn->vni, - prefix_mac2str(macaddr, buf, - sizeof(buf)), - ipa_len ? " IP " : "", - ipa_len ? ipaddr2str( - ipaddr, buf1, - sizeof(buf1)) - : "", - tmp_seq, mac->flags); + if (!zebra_evpn_mac_is_bgp_seq_ok( + zevpn, mac, seq, ipa_len, ipaddr, false)) return -1; - } + + zebra_evpn_es_mac_ref(mac, esi); } /* Check MAC's curent state is local (this is the case @@ -1881,14 +1995,20 @@ int process_mac_remote_macip_add(zebra_evpn_t *zevpn, struct zebra_vrf *zvrf, if (CHECK_FLAG(mac->flags, ZEBRA_MAC_LOCAL)) { /* force drop the sync flags */ old_static = zebra_evpn_mac_is_static(mac); - if (IS_ZEBRA_DEBUG_EVPN_MH_MAC) + if (IS_ZEBRA_DEBUG_EVPN_MH_MAC) { + char mac_buf[MAC_BUF_SIZE]; + zlog_debug( - "sync-mac->remote vni %u mac %s es %s seq %d f 0x%x", + "sync-mac->remote vni %u mac %s es %s seq %d f %s", zevpn->vni, prefix_mac2str(macaddr, buf, sizeof(buf)), mac->es ? mac->es->esi_str : "-", - mac->loc_seq, mac->flags); + mac->loc_seq, + zebra_evpn_zebra_mac_flag_dump( + mac, mac_buf, sizeof(mac_buf))); + } + zebra_evpn_mac_clear_sync_info(mac); zebra_evpn_mac_send_del_to_client(zevpn->vni, macaddr, mac->flags, @@ -1982,14 +2102,18 @@ int zebra_evpn_add_update_local_mac(struct zebra_vrf *zvrf, zebra_evpn_t *zevpn, SET_FLAG(mac->flags, ZEBRA_MAC_STICKY); inform_client = true; } else { - if (IS_ZEBRA_DEBUG_VXLAN || IS_ZEBRA_DEBUG_EVPN_MH_MAC) + if (IS_ZEBRA_DEBUG_VXLAN || IS_ZEBRA_DEBUG_EVPN_MH_MAC) { + char mac_buf[MAC_BUF_SIZE]; + zlog_debug( - "UPD %sMAC %s intf %s(%u) VID %u -> VNI %u %scurFlags 0x%x", + "UPD %sMAC %s intf %s(%u) VID %u -> VNI %u %scurFlags %s", sticky ? "sticky " : "", prefix_mac2str(macaddr, buf, sizeof(buf)), ifp->name, ifp->ifindex, vid, zevpn->vni, local_inactive ? "local-inactive " : "", - mac->flags); + zebra_evpn_zebra_mac_flag_dump( + mac, mac_buf, sizeof(mac_buf))); + } if (CHECK_FLAG(mac->flags, ZEBRA_MAC_LOCAL)) { struct interface *old_ifp; @@ -2137,14 +2261,19 @@ int zebra_evpn_add_update_local_mac(struct zebra_vrf *zvrf, zebra_evpn_t *zevpn, */ if ((old_local_inactive != local_inactive) || (new_bgp_ready != old_bgp_ready)) { - if (IS_ZEBRA_DEBUG_EVPN_MH_MAC) + if (IS_ZEBRA_DEBUG_EVPN_MH_MAC) { + char mac_buf[MAC_BUF_SIZE]; + zlog_debug( - "local mac vni %u mac %s es %s seq %d f 0x%x%s", + "local mac vni %u mac %s es %s seq %d f %s%s", zevpn->vni, prefix_mac2str(macaddr, buf, sizeof(buf)), mac->es ? mac->es->esi_str : "", mac->loc_seq, - mac->flags, - local_inactive ? " local-inactive" : ""); + zebra_evpn_zebra_mac_flag_dump(mac, mac_buf, + sizeof(mac_buf)), + local_inactive ? "local-inactive" : ""); + } + if (!is_dup_detect) inform_client = true; } @@ -2173,28 +2302,17 @@ int zebra_evpn_add_update_local_mac(struct zebra_vrf *zvrf, zebra_evpn_t *zevpn, return 0; } -int zebra_evpn_del_local_mac(zebra_evpn_t *zevpn, struct ethaddr *macaddr, - struct interface *ifp) +int zebra_evpn_del_local_mac(zebra_evpn_t *zevpn, zebra_mac_t *mac) { - zebra_mac_t *mac; char buf[ETHER_ADDR_STRLEN]; bool old_bgp_ready; bool new_bgp_ready; - /* If entry doesn't exist, nothing to do. */ - mac = zebra_evpn_mac_lookup(zevpn, macaddr); - if (!mac) - return 0; - - /* Is it a local entry? */ - if (!CHECK_FLAG(mac->flags, ZEBRA_MAC_LOCAL)) - return 0; if (IS_ZEBRA_DEBUG_VXLAN) - zlog_debug( - "DEL MAC %s intf %s(%u) VID %u -> VNI %u seq %u flags 0x%x nbr count %u", - prefix_mac2str(macaddr, buf, sizeof(buf)), ifp->name, - ifp->ifindex, mac->fwd_info.local.vid, zevpn->vni, - mac->loc_seq, mac->flags, listcount(mac->neigh_list)); + zlog_debug("DEL MAC %s VNI %u seq %u flags 0x%x nbr count %u", + prefix_mac2str(&mac->macaddr, buf, sizeof(buf)), + zevpn->vni, mac->loc_seq, mac->flags, + listcount(mac->neigh_list)); old_bgp_ready = zebra_evpn_mac_is_ready_for_bgp(mac->flags); if (zebra_evpn_mac_is_static(mac)) { @@ -2203,13 +2321,17 @@ int zebra_evpn_del_local_mac(zebra_evpn_t *zevpn, struct ethaddr *macaddr, */ memset(&mac->fwd_info, 0, sizeof(mac->fwd_info)); - if (IS_ZEBRA_DEBUG_EVPN_MH_MAC) + if (IS_ZEBRA_DEBUG_EVPN_MH_MAC) { + char mac_buf[MAC_BUF_SIZE]; + zlog_debug( - "re-add sync-mac vni %u mac %s es %s seq %d f 0x%x", + "re-add sync-mac vni %u mac %s es %s seq %d f %s", zevpn->vni, - prefix_mac2str(macaddr, buf, sizeof(buf)), + prefix_mac2str(&mac->macaddr, buf, sizeof(buf)), mac->es ? mac->es->esi_str : "-", mac->loc_seq, - mac->flags); + zebra_evpn_zebra_mac_flag_dump( + mac, mac_buf, sizeof(mac_buf))); + } /* inform-bgp about change in local-activity if any */ if (!CHECK_FLAG(mac->flags, ZEBRA_MAC_LOCAL_INACTIVE)) { @@ -2232,7 +2354,7 @@ int zebra_evpn_del_local_mac(zebra_evpn_t *zevpn, struct ethaddr *macaddr, zebra_evpn_process_neigh_on_local_mac_del(zevpn, mac); /* Remove MAC from BGP. */ - zebra_evpn_mac_send_del_to_client(zevpn->vni, macaddr, mac->flags, + zebra_evpn_mac_send_del_to_client(zevpn->vni, &mac->macaddr, mac->flags, false /* force */); zebra_evpn_es_mac_deref_entry(mac); diff --git a/zebra/zebra_evpn_mac.h b/zebra/zebra_evpn_mac.h index 1730991f1e..242097907f 100644 --- a/zebra/zebra_evpn_mac.h +++ b/zebra/zebra_evpn_mac.h @@ -56,6 +56,7 @@ struct zebra_mac_t_ { /* MAC address. */ struct ethaddr macaddr; + /* When modifying flags please fixup zebra_evpn_zebra_mac_flag_dump */ uint32_t flags; #define ZEBRA_MAC_LOCAL 0x01 #define ZEBRA_MAC_REMOTE 0x02 @@ -252,8 +253,7 @@ int zebra_evpn_add_update_local_mac(struct zebra_vrf *zvrf, zebra_evpn_t *zevpn, struct ethaddr *macaddr, vlanid_t vid, bool sticky, bool local_inactive, bool dp_static); -int zebra_evpn_del_local_mac(zebra_evpn_t *zevpn, struct ethaddr *macaddr, - struct interface *ifp); +int zebra_evpn_del_local_mac(zebra_evpn_t *zevpn, zebra_mac_t *mac); int zebra_evpn_mac_gw_macip_add(struct interface *ifp, zebra_evpn_t *zevpn, struct ipaddr *ip, zebra_mac_t **macp, struct ethaddr *macaddr, vlanid_t vlan_id); diff --git a/zebra/zebra_evpn_mh.c b/zebra/zebra_evpn_mh.c index 3a0df1c015..7e712bf1ee 100644 --- a/zebra/zebra_evpn_mh.c +++ b/zebra/zebra_evpn_mh.c @@ -417,15 +417,12 @@ void zebra_evpn_es_evi_show_vni(struct vty *vty, bool uj, vni_t vni, int detail) vty_out(vty, "Type: L local, R remote\n"); vty_out(vty, "%-8s %-30s %-4s\n", "VNI", "ESI", "Type"); } + zebra_evpn_es_evi_show_one_evpn(zevpn, vty, json_array, detail); } else { if (!uj) vty_out(vty, "VNI %d doesn't exist\n", vni); - - return; } - zebra_evpn_es_evi_show_one_evpn(zevpn, vty, json_array, detail); - if (uj) { vty_out(vty, "%s\n", json_object_to_json_string_ext( @@ -1867,6 +1864,8 @@ static void zebra_evpn_es_setup_evis(struct zebra_evpn_es *es) uint16_t vid; struct zebra_evpn_access_bd *acc_bd; + if (!bf_is_inited(zif->vlan_bitmap)) + return; bf_for_each_set_bit(zif->vlan_bitmap, vid, IF_VLAN_BITMAP_MAX) { acc_bd = zebra_evpn_acc_vl_find(vid); @@ -1944,6 +1943,23 @@ static void zebra_evpn_mh_dup_addr_detect_off(void) } } +/* On config of first local-ES turn off advertisement of STALE/DELAY/PROBE + * neighbors + */ +static void zebra_evpn_mh_advertise_reach_neigh_only(void) +{ + if (zmh_info->flags & ZEBRA_EVPN_MH_ADV_REACHABLE_NEIGH_ONLY) + return; + + zmh_info->flags |= ZEBRA_EVPN_MH_ADV_REACHABLE_NEIGH_ONLY; + if (IS_ZEBRA_DEBUG_EVPN_MH_ES) + zlog_debug("evpn-mh: only REACHABLE neigh advertised"); + + /* XXX - if STALE/DELAY/PROBE neighs were previously advertised we + * need to withdraw them + */ +} + static int zebra_evpn_es_df_delay_exp_cb(struct thread *t) { struct zebra_evpn_es *es; @@ -1969,6 +1985,7 @@ static void zebra_evpn_es_local_info_set(struct zebra_evpn_es *es, es->nhg_id, zif->ifp->name); zebra_evpn_mh_dup_addr_detect_off(); + zebra_evpn_mh_advertise_reach_neigh_only(); es->flags |= ZEBRA_EVPNES_LOCAL; listnode_init(&es->local_es_listnode, es); diff --git a/zebra/zebra_evpn_mh.h b/zebra/zebra_evpn_mh.h index 14b2987574..81ae740d49 100644 --- a/zebra/zebra_evpn_mh.h +++ b/zebra/zebra_evpn_mh.h @@ -195,6 +195,11 @@ struct zebra_evpn_mh_info { * first local ES, DAD is turned off */ #define ZEBRA_EVPN_MH_DUP_ADDR_DETECT_OFF (1 << 1) +/* If EVPN MH is enabled we only advertise REACHABLE neigh entries as Type-2 + * routes. As there is no global config knob for enabling EVPN MH we turn + * this flag when the first local ES is detected. + */ +#define ZEBRA_EVPN_MH_ADV_REACHABLE_NEIGH_ONLY (1 << 2) /* RB tree of Ethernet segments (used for EVPN-MH) */ struct zebra_es_rb_head es_rb_tree; @@ -275,6 +280,12 @@ static inline bool zebra_evpn_mh_do_dup_addr_detect(void) return !(zmh_info->flags & ZEBRA_EVPN_MH_DUP_ADDR_DETECT_OFF); } +static inline bool zebra_evpn_mh_do_adv_reachable_neigh_only(void) +{ + return !!(zmh_info->flags & ZEBRA_EVPN_MH_ADV_REACHABLE_NEIGH_ONLY); +} + + /*****************************************************************************/ extern esi_t *zero_esi; extern void zebra_evpn_mh_init(void); diff --git a/zebra/zebra_evpn_neigh.c b/zebra/zebra_evpn_neigh.c index 6d72bc570e..1f45b72e3a 100644 --- a/zebra/zebra_evpn_neigh.c +++ b/zebra/zebra_evpn_neigh.c @@ -529,16 +529,21 @@ static void zebra_evpn_local_neigh_deref_mac(zebra_neigh_t *n, } bool zebra_evpn_neigh_is_bgp_seq_ok(zebra_evpn_t *zevpn, zebra_neigh_t *n, - struct ethaddr *macaddr, uint32_t seq) + struct ethaddr *macaddr, uint32_t seq, + bool sync) { char macbuf[ETHER_ADDR_STRLEN]; char ipbuf[INET6_ADDRSTRLEN]; uint32_t tmp_seq; + const char *n_type; - if (CHECK_FLAG(n->flags, ZEBRA_NEIGH_LOCAL)) + if (CHECK_FLAG(n->flags, ZEBRA_NEIGH_LOCAL)) { tmp_seq = n->loc_seq; - else + n_type = "local"; + } else { tmp_seq = n->rem_seq; + n_type = "remote"; + } if (seq < tmp_seq) { /* if the neigh was never advertised to bgp we must accept @@ -547,10 +552,12 @@ bool zebra_evpn_neigh_is_bgp_seq_ok(zebra_evpn_t *zevpn, zebra_neigh_t *n, */ if (CHECK_FLAG(n->flags, ZEBRA_NEIGH_LOCAL) && !zebra_evpn_neigh_is_ready_for_bgp(n)) { - if (IS_ZEBRA_DEBUG_EVPN_MH_NEIGH) + if (IS_ZEBRA_DEBUG_EVPN_MH_NEIGH + || IS_ZEBRA_DEBUG_VXLAN) zlog_debug( - "sync-macip accept vni %u mac %s IP %s lower seq %u f 0x%x", - zevpn->vni, + "%s-macip accept vni %u %s mac %s IP %s lower seq %u f 0x%x", + sync ? "sync" : "remote", zevpn->vni, + n_type, prefix_mac2str(macaddr, macbuf, sizeof(macbuf)), ipaddr2str(&n->ip, ipbuf, @@ -559,10 +566,10 @@ bool zebra_evpn_neigh_is_bgp_seq_ok(zebra_evpn_t *zevpn, zebra_neigh_t *n, return true; } - if (IS_ZEBRA_DEBUG_EVPN_MH_NEIGH) + if (IS_ZEBRA_DEBUG_EVPN_MH_NEIGH || IS_ZEBRA_DEBUG_VXLAN) zlog_debug( - "sync-macip ignore vni %u mac %s IP %s as existing has higher seq %u f 0x%x", - zevpn->vni, + "%s-macip ignore vni %u %s mac %s IP %s as existing has higher seq %u f 0x%x", + sync ? "sync" : "remote", zevpn->vni, n_type, prefix_mac2str(macaddr, macbuf, sizeof(macbuf)), ipaddr2str(&n->ip, ipbuf, sizeof(ipbuf)), tmp_seq, n->flags); @@ -1453,6 +1460,9 @@ int zebra_evpn_local_neigh_update(zebra_evpn_t *zevpn, struct interface *ifp, new_bgp_ready = zebra_evpn_neigh_is_ready_for_bgp(n); + if (dp_static != new_static) + inform_dataplane = true; + /* Neigh is in freeze state and freeze action * is enabled, do not send update to client. */ @@ -1467,6 +1477,12 @@ int zebra_evpn_local_neigh_update(zebra_evpn_t *zevpn, struct interface *ifp, old_bgp_ready, new_bgp_ready, false, false, "flag-update"); + if (inform_dataplane) + zebra_evpn_sync_neigh_dp_install( + n, false /* set_inactive */, + false /* force_clear_static */, + __func__); + /* if the neigh can no longer be advertised * remove it from bgp */ @@ -1578,15 +1594,11 @@ int zebra_evpn_local_neigh_update(zebra_evpn_t *zevpn, struct interface *ifp, else UNSET_FLAG(n->flags, ZEBRA_NEIGH_ROUTER_FLAG); - /* if the dataplane thinks that this is a sync entry but - * zebra doesn't we need to re-concile the diff - * by re-installing the dataplane entry - */ - if (dp_static) { - new_static = zebra_evpn_neigh_is_static(n); - if (!new_static) - inform_dataplane = true; - } + /* if zebra and dataplane don't agree this is a sync entry + * re-install in the dataplane */ + new_static = zebra_evpn_neigh_is_static(n); + if (dp_static != new_static) + inform_dataplane = true; /* Check old and/or new MAC detected as duplicate mark * the neigh as duplicate @@ -2128,7 +2140,6 @@ void process_neigh_remote_macip_add(zebra_evpn_t *zevpn, struct zebra_vrf *zvrf, { zebra_neigh_t *n; int update_neigh = 0; - uint32_t tmp_seq; char buf[ETHER_ADDR_STRLEN]; char buf1[INET6_ADDRSTRLEN]; zebra_mac_t *old_mac = NULL; @@ -2165,8 +2176,6 @@ void process_neigh_remote_macip_add(zebra_evpn_t *zevpn, struct zebra_vrf *zvrf, } } else { - const char *n_type; - /* When host moves but changes its (MAC,IP) * binding, BGP may install a MACIP entry that * corresponds to "older" location of the host @@ -2175,27 +2184,10 @@ void process_neigh_remote_macip_add(zebra_evpn_t *zevpn, struct zebra_vrf *zvrf, * the sequence number and ignore this update * if appropriate. */ - if (CHECK_FLAG(n->flags, ZEBRA_NEIGH_LOCAL)) { - tmp_seq = n->loc_seq; - n_type = "local"; - } else { - tmp_seq = n->rem_seq; - n_type = "remote"; - } - if (seq < tmp_seq) { - if (IS_ZEBRA_DEBUG_VXLAN) - zlog_debug( - "Ignore remote MACIP ADD VNI %u MAC %s%s%s as existing %s Neigh has higher seq %u", - zevpn->vni, - prefix_mac2str(&mac->macaddr, - buf, - sizeof(buf)), - " IP ", - ipaddr2str(ipaddr, buf1, - sizeof(buf1)), - n_type, tmp_seq); + + if (!zebra_evpn_neigh_is_bgp_seq_ok( + zevpn, n, &mac->macaddr, seq, false)) return; - } if (CHECK_FLAG(n->flags, ZEBRA_NEIGH_LOCAL)) { old_static = zebra_evpn_neigh_is_static(n); if (IS_ZEBRA_DEBUG_EVPN_MH_NEIGH) diff --git a/zebra/zebra_evpn_neigh.h b/zebra/zebra_evpn_neigh.h index 50efdc0e0d..eac17a09b4 100644 --- a/zebra/zebra_evpn_neigh.h +++ b/zebra/zebra_evpn_neigh.h @@ -237,7 +237,8 @@ int zebra_evpn_neigh_send_del_to_client(vni_t vni, struct ipaddr *ip, struct ethaddr *macaddr, uint32_t flags, int state, bool force); bool zebra_evpn_neigh_is_bgp_seq_ok(zebra_evpn_t *zevpn, zebra_neigh_t *n, - struct ethaddr *macaddr, uint32_t seq); + struct ethaddr *macaddr, uint32_t seq, + bool sync); int zebra_evpn_neigh_del(zebra_evpn_t *zevpn, zebra_neigh_t *n); void zebra_evpn_sync_neigh_del(zebra_neigh_t *n); zebra_neigh_t * diff --git a/zebra/zebra_fpm.c b/zebra/zebra_fpm.c index 2bf48c6277..73534c4332 100644 --- a/zebra/zebra_fpm.c +++ b/zebra/zebra_fpm.c @@ -70,7 +70,7 @@ DEFINE_MTYPE_STATIC(ZEBRA, FPM_MAC_INFO, "FPM_MAC_INFO"); #define ZFPM_STATS_IVL_SECS 10 #define FPM_MAX_MAC_MSG_LEN 512 -static void zfpm_iterate_rmac_table(struct hash_bucket *backet, void *args); +static void zfpm_iterate_rmac_table(struct hash_bucket *bucket, void *args); /* * Structure that holds state for iterating over all route_node @@ -273,6 +273,11 @@ struct zfpm_glob { * If non-zero, the last time when statistics were cleared. */ time_t last_stats_clear_time; + + /* + * Flag to track the MAC dump status to FPM + */ + bool fpm_mac_dump_done; }; static struct zfpm_glob zfpm_glob_space; @@ -517,8 +522,6 @@ static int zfpm_conn_up_thread_cb(struct thread *thread) struct zfpm_rnodes_iter *iter; rib_dest_t *dest; - zfpm_g->t_conn_up = NULL; - iter = &zfpm_g->t_conn_up_state.iter; if (zfpm_g->state != ZFPM_STATE_ESTABLISHED) { @@ -528,8 +531,13 @@ static int zfpm_conn_up_thread_cb(struct thread *thread) goto done; } - /* Enqueue FPM updates for all the RMAC entries */ - hash_iterate(zrouter.l3vni_table, zfpm_iterate_rmac_table, NULL); + if (!zfpm_g->fpm_mac_dump_done) { + /* Enqueue FPM updates for all the RMAC entries */ + hash_iterate(zrouter.l3vni_table, zfpm_iterate_rmac_table, + NULL); + /* mark dump done so that its not repeated after yield */ + zfpm_g->fpm_mac_dump_done = true; + } while ((rnode = zfpm_rnodes_iter_next(iter))) { dest = rib_dest_from_rnode(rnode); @@ -547,7 +555,6 @@ static int zfpm_conn_up_thread_cb(struct thread *thread) zfpm_g->stats.t_conn_up_yields++; zfpm_rnodes_iter_pause(iter); - zfpm_g->t_conn_up = NULL; thread_add_timer_msec(zfpm_g->master, zfpm_conn_up_thread_cb, NULL, 0, &zfpm_g->t_conn_up); return 0; @@ -575,12 +582,13 @@ static void zfpm_connection_up(const char *detail) /* * Start thread to push existing routes to the FPM. */ - assert(!zfpm_g->t_conn_up); + thread_cancel(&zfpm_g->t_conn_up); zfpm_rnodes_iter_init(&zfpm_g->t_conn_up_state.iter); + zfpm_g->fpm_mac_dump_done = false; zfpm_debug("Starting conn_up thread"); - zfpm_g->t_conn_up = NULL; + thread_add_timer_msec(zfpm_g->master, zfpm_conn_up_thread_cb, NULL, 0, &zfpm_g->t_conn_up); zfpm_g->stats.t_conn_up_starts++; @@ -1627,10 +1635,10 @@ static int zfpm_trigger_rmac_update(zebra_mac_t *rmac, zebra_l3vni_t *zl3vni, * Iterate over all the RMAC entries for the given L3VNI * and enqueue the RMAC for FPM processing. */ -static void zfpm_trigger_rmac_update_wrapper(struct hash_bucket *backet, +static void zfpm_trigger_rmac_update_wrapper(struct hash_bucket *bucket, void *args) { - zebra_mac_t *zrmac = (zebra_mac_t *)backet->data; + zebra_mac_t *zrmac = (zebra_mac_t *)bucket->data; zebra_l3vni_t *zl3vni = (zebra_l3vni_t *)args; zfpm_trigger_rmac_update(zrmac, zl3vni, false, "RMAC added"); @@ -1641,9 +1649,9 @@ static void zfpm_trigger_rmac_update_wrapper(struct hash_bucket *backet, * This function iterates over all the L3VNIs to trigger * FPM updates for RMACs currently available. */ -static void zfpm_iterate_rmac_table(struct hash_bucket *backet, void *args) +static void zfpm_iterate_rmac_table(struct hash_bucket *bucket, void *args) { - zebra_l3vni_t *zl3vni = (zebra_l3vni_t *)backet->data; + zebra_l3vni_t *zl3vni = (zebra_l3vni_t *)bucket->data; hash_iterate(zl3vni->rmac_table, zfpm_trigger_rmac_update_wrapper, (void *)zl3vni); diff --git a/zebra/zebra_fpm_netlink.c b/zebra/zebra_fpm_netlink.c index 44f574073c..f7c5da5dec 100644 --- a/zebra/zebra_fpm_netlink.c +++ b/zebra/zebra_fpm_netlink.c @@ -133,6 +133,7 @@ struct netlink_nh_info { * A structure for holding information for a netlink route message. */ struct netlink_route_info { + uint32_t nlmsg_pid; uint16_t nlmsg_type; uint8_t rtm_type; uint32_t rtm_table; @@ -244,14 +245,20 @@ static int netlink_route_info_fill(struct netlink_route_info *ri, int cmd, rib_dest_t *dest, struct route_entry *re) { struct nexthop *nexthop; + struct rib_table_info *table_info = + rib_table_info(rib_dest_table(dest)); + struct zebra_vrf *zvrf = table_info->zvrf; memset(ri, 0, sizeof(*ri)); ri->prefix = rib_dest_prefix(dest); ri->af = rib_dest_af(dest); + if (zvrf && zvrf->zns) + ri->nlmsg_pid = zvrf->zns->netlink_dplane.snl.nl_pid; + ri->nlmsg_type = cmd; - ri->rtm_table = rib_table_info(rib_dest_table(dest))->table_id; + ri->rtm_table = table_info->table_id; ri->rtm_protocol = RTPROT_UNSPEC; /* @@ -357,6 +364,7 @@ static int netlink_route_info_encode(struct netlink_route_info *ri, req->n.nlmsg_len = NLMSG_LENGTH(sizeof(struct rtmsg)); req->n.nlmsg_flags = NLM_F_CREATE | NLM_F_REQUEST; + req->n.nlmsg_pid = ri->nlmsg_pid; req->n.nlmsg_type = ri->nlmsg_type; req->r.rtm_family = ri->af; diff --git a/zebra/zebra_nhg.c b/zebra/zebra_nhg.c index 375be45df0..2864b96c83 100644 --- a/zebra/zebra_nhg.c +++ b/zebra/zebra_nhg.c @@ -43,6 +43,7 @@ #include "zebra_errors.h" #include "zebra_dplane.h" #include "zebra/interface.h" +#include "zebra/zapi_msg.h" DEFINE_MTYPE_STATIC(ZEBRA, NHG, "Nexthop Group Entry"); DEFINE_MTYPE_STATIC(ZEBRA, NHG_CONNECTED, "Nexthop Group Connected"); @@ -1759,6 +1760,10 @@ static bool nexthop_valid_resolve(const struct nexthop *nexthop, if (!CHECK_FLAG(resolved->flags, NEXTHOP_FLAG_ACTIVE)) return false; + /* Must not be duplicate */ + if (CHECK_FLAG(resolved->flags, NEXTHOP_FLAG_DUPLICATE)) + return false; + switch (nexthop->type) { case NEXTHOP_TYPE_IPV4_IFINDEX: case NEXTHOP_TYPE_IPV6_IFINDEX: @@ -2105,10 +2110,10 @@ done_with_match: /* This function verifies reachability of one given nexthop, which can be * numbered or unnumbered, IPv4 or IPv6. The result is unconditionally stored * in nexthop->flags field. The nexthop->ifindex will be updated - * appropriately as well. An existing route map can turn - * (otherwise active) nexthop into inactive, but not vice versa. + * appropriately as well. An existing route map can turn an + * otherwise active nexthop into inactive, but not vice versa. * - * If it finds a nexthop recursivedly, set the resolved_id + * If it finds a nexthop recursively, set the resolved_id * to match that nexthop's nhg_hash_entry ID; * * The return value is the final value of 'ACTIVE' flag. @@ -2119,8 +2124,7 @@ static unsigned nexthop_active_check(struct route_node *rn, { struct interface *ifp; route_map_result_t ret = RMAP_PERMITMATCH; - int family; - char buf[SRCDEST2STR_BUFFER]; + afi_t family; const struct prefix *p, *src_p; struct zebra_vrf *zvrf; @@ -2132,6 +2136,7 @@ static unsigned nexthop_active_check(struct route_node *rn, family = AFI_IP6; else family = 0; + switch (nexthop->type) { case NEXTHOP_TYPE_IFINDEX: ifp = if_lookup_by_index(nexthop->ifindex, nexthop->vrf_id); @@ -2230,10 +2235,9 @@ static unsigned nexthop_active_check(struct route_node *rn, zvrf, re->tag); if (ret == RMAP_DENYMATCH) { if (IS_ZEBRA_DEBUG_RIB) { - srcdest_rnode2str(rn, buf, sizeof(buf)); zlog_debug( - "%u:%s: Filtering out with NH out %s due to route map", - re->vrf_id, buf, + "%u:%pRN: Filtering out with NH out %s due to route map", + re->vrf_id, rn, ifindex2ifname(nexthop->ifindex, nexthop->vrf_id)); } @@ -2643,6 +2647,7 @@ void zebra_nhg_dplane_result(struct zebra_dplane_ctx *ctx) EC_ZEBRA_DP_DELETE_FAIL, "Failed to uninstall Nexthop ID (%u) from the kernel", id); + /* We already free'd the data, nothing to do */ break; case DPLANE_OP_NH_INSTALL: @@ -2663,12 +2668,26 @@ void zebra_nhg_dplane_result(struct zebra_dplane_ctx *ctx) SET_FLAG(nhe->flags, NEXTHOP_GROUP_VALID); SET_FLAG(nhe->flags, NEXTHOP_GROUP_INSTALLED); zebra_nhg_handle_install(nhe); - } else + + /* If daemon nhg, send it an update */ + if (nhe->id >= ZEBRA_NHG_PROTO_LOWER) + zsend_nhg_notify(nhe->type, nhe->zapi_instance, + nhe->zapi_session, nhe->id, + ZAPI_NHG_INSTALLED); + } else { + /* If daemon nhg, send it an update */ + if (nhe->id >= ZEBRA_NHG_PROTO_LOWER) + zsend_nhg_notify(nhe->type, nhe->zapi_instance, + nhe->zapi_session, nhe->id, + ZAPI_NHG_FAIL_INSTALL); + flog_err( EC_ZEBRA_DP_INSTALL_FAIL, "Failed to install Nexthop ID (%u) into the kernel", nhe->id); + } break; + case DPLANE_OP_ROUTE_INSTALL: case DPLANE_OP_ROUTE_UPDATE: case DPLANE_OP_ROUTE_DELETE: @@ -2777,6 +2796,7 @@ bool zebra_nhg_proto_nexthops_only(void) /* Add NHE from upper level proto */ struct nhg_hash_entry *zebra_nhg_proto_add(uint32_t id, int type, + uint16_t instance, uint32_t session, struct nexthop_group *nhg, afi_t afi) { struct nhg_hash_entry lookup; @@ -2860,6 +2880,10 @@ struct nhg_hash_entry *zebra_nhg_proto_add(uint32_t id, int type, zebra_nhg_increment_ref(new); + /* Capture zapi client info */ + new->zapi_instance = instance; + new->zapi_session = session; + zebra_nhg_set_valid_if_active(new); zebra_nhg_install_kernel(new); diff --git a/zebra/zebra_nhg.h b/zebra/zebra_nhg.h index 9382b8c65d..db20f2beaf 100644 --- a/zebra/zebra_nhg.h +++ b/zebra/zebra_nhg.h @@ -50,8 +50,14 @@ struct nhg_hash_entry { uint32_t id; afi_t afi; vrf_id_t vrf_id; + + /* Source protocol - zebra or another daemon */ int type; + /* zapi instance and session id, for groups from other daemons */ + uint16_t zapi_instance; + uint32_t zapi_session; + struct nexthop_group nhg; /* If supported, a mapping of backup nexthops. */ @@ -292,6 +298,7 @@ zebra_nhg_rib_find_nhe(struct nhg_hash_entry *rt_nhe, afi_t rt_afi); * Returns allocated NHE on success, otherwise NULL. */ struct nhg_hash_entry *zebra_nhg_proto_add(uint32_t id, int type, + uint16_t instance, uint32_t session, struct nexthop_group *nhg, afi_t afi); diff --git a/zebra/zebra_pbr.c b/zebra/zebra_pbr.c index c244d2a955..87ab900092 100644 --- a/zebra/zebra_pbr.c +++ b/zebra/zebra_pbr.c @@ -32,6 +32,7 @@ #include "zebra/zapi_msg.h" #include "zebra/zebra_memory.h" #include "zebra/zserv.h" +#include "zebra/debug.h" /* definitions */ DEFINE_MTYPE_STATIC(ZEBRA, PBR_IPTABLE_IFNAME, "PBR interface list") @@ -499,10 +500,14 @@ void zebra_pbr_add_rule(struct zebra_pbr_rule *rule) */ found = pbr_rule_lookup_unique(rule); - (void)hash_get(zrouter.rules_hash, rule, pbr_rule_alloc_intern); - /* If found, this is an update */ if (found) { + if (IS_ZEBRA_DEBUG_PBR) + zlog_debug( + "%s: seq: %d, prior: %d, unique: %d, ifname: %s -- update", + __func__, rule->rule.seq, rule->rule.priority, + rule->rule.unique, rule->rule.ifname); + (void)dplane_pbr_rule_update(found, rule); if (pbr_rule_release(found)) @@ -510,12 +515,26 @@ void zebra_pbr_add_rule(struct zebra_pbr_rule *rule) "%s: Rule being updated we know nothing about", __PRETTY_FUNCTION__); - } else + } else { + if (IS_ZEBRA_DEBUG_PBR) + zlog_debug( + "%s: seq: %d, prior: %d, unique: %d, ifname: %s -- new", + __func__, rule->rule.seq, rule->rule.priority, + rule->rule.unique, rule->rule.ifname); + (void)dplane_pbr_rule_add(rule); + } + + (void)hash_get(zrouter.rules_hash, rule, pbr_rule_alloc_intern); } void zebra_pbr_del_rule(struct zebra_pbr_rule *rule) { + if (IS_ZEBRA_DEBUG_PBR) + zlog_debug("%s: seq: %d, prior: %d, unique: %d, ifname: %s", + __func__, rule->rule.seq, rule->rule.priority, + rule->rule.unique, rule->rule.ifname); + (void)dplane_pbr_rule_delete(rule); if (pbr_rule_release(rule)) diff --git a/zebra/zebra_rib.c b/zebra/zebra_rib.c index 0aea0b6cfa..c5d977017e 100644 --- a/zebra/zebra_rib.c +++ b/zebra/zebra_rib.c @@ -38,6 +38,7 @@ #include "workqueue.h" #include "nexthop_group_private.h" #include "frr_pthread.h" +#include "printfrr.h" #include "zebra/zebra_router.h" #include "zebra/connected.h" @@ -148,6 +149,30 @@ _rnode_zlog(const char *_func, vrf_id_t vrf_id, struct route_node *rn, zlog(priority, "%s: (%u:%u):%s: %s", _func, vrf_id, table, buf, msgbuf); } +static char *_dump_re_status(const struct route_entry *re, char *buf, + size_t len) +{ + if (re->status == 0) { + snprintfrr(buf, len, "None "); + return buf; + } + + snprintfrr( + buf, len, "%s%s%s%s%s%s%s", + CHECK_FLAG(re->status, ROUTE_ENTRY_REMOVED) ? "Removed " : "", + CHECK_FLAG(re->status, ROUTE_ENTRY_CHANGED) ? "Changed " : "", + CHECK_FLAG(re->status, ROUTE_ENTRY_LABELS_CHANGED) + ? "Label Changed " + : "", + CHECK_FLAG(re->status, ROUTE_ENTRY_QUEUED) ? "Queued " : "", + CHECK_FLAG(re->status, ROUTE_ENTRY_INSTALLED) ? "Installed " + : "", + CHECK_FLAG(re->status, ROUTE_ENTRY_FAILED) ? "Failed " : "", + CHECK_FLAG(re->status, ROUTE_ENTRY_USE_FIB_NHG) ? "Fib NHG " + : ""); + return buf; +} + #define rnode_debug(node, vrf_id, ...) \ _rnode_zlog(__func__, vrf_id, node, LOG_DEBUG, __VA_ARGS__) #define rnode_info(node, ...) \ @@ -250,8 +275,8 @@ done: return ret; } -void rib_handle_nhg_replace(struct nhg_hash_entry *old, - struct nhg_hash_entry *new) +void rib_handle_nhg_replace(struct nhg_hash_entry *old_entry, + struct nhg_hash_entry *new_entry) { struct zebra_router_table *zrt; struct route_node *rn; @@ -259,15 +284,15 @@ void rib_handle_nhg_replace(struct nhg_hash_entry *old, if (IS_ZEBRA_DEBUG_RIB_DETAILED || IS_ZEBRA_DEBUG_NHG_DETAIL) zlog_debug("%s: replacing routes nhe (%u) OLD %p NEW %p", - __func__, new->id, new, old); + __func__, new_entry->id, new_entry, old_entry); /* We have to do them ALL */ RB_FOREACH (zrt, zebra_router_table_head, &zrouter.tables) { for (rn = route_top(zrt->table); rn; rn = srcdest_route_next(rn)) { RNODE_FOREACH_RE_SAFE (rn, re, next) { - if (re->nhe && re->nhe == old) - route_entry_update_nhe(re, new); + if (re->nhe && re->nhe == old_entry) + route_entry_update_nhe(re, new_entry); } } } @@ -720,7 +745,7 @@ void zebra_rib_evaluate_rn_nexthops(struct route_node *rn, uint32_t seq) if (rnh->seqno == seq) { if (IS_ZEBRA_DEBUG_NHT_DETAILED) zlog_debug( - "\tNode processed and moved already"); + " Node processed and moved already"); continue; } @@ -1080,12 +1105,20 @@ static void rib_process(struct route_node *rn) } RNODE_FOREACH_RE_SAFE (rn, re, next) { - if (IS_ZEBRA_DEBUG_RIB_DETAILED) + if (IS_ZEBRA_DEBUG_RIB_DETAILED) { + char flags_buf[128]; + char status_buf[128]; + zlog_debug( - "%s(%u:%u):%s: Examine re %p (%s) status %x flags %x dist %d metric %d", + "%s(%u:%u):%s: Examine re %p (%s) status: %sflags: %sdist %d metric %d", VRF_LOGNAME(vrf), vrf_id, re->table, buf, re, - zebra_route_string(re->type), re->status, - re->flags, re->distance, re->metric); + zebra_route_string(re->type), + _dump_re_status(re, status_buf, + sizeof(status_buf)), + zclient_dump_route_flags(re->flags, flags_buf, + sizeof(flags_buf)), + re->distance, re->metric); + } /* Currently selected re. */ if (CHECK_FLAG(re->flags, ZEBRA_FLAG_SELECTED)) { @@ -1107,6 +1140,9 @@ static void rib_process(struct route_node *rn) */ if (CHECK_FLAG(re->status, ROUTE_ENTRY_CHANGED)) { if (!nexthop_active_update(rn, re)) { + const struct prefix *p; + struct rib_table_info *info; + if (re->type == ZEBRA_ROUTE_TABLE) { /* XXX: HERE BE DRAGONS!!!!! * In all honesty, I have not yet @@ -1136,6 +1172,11 @@ static void rib_process(struct route_node *rn) ROUTE_ENTRY_REMOVED); } + info = srcdest_rnode_table_info(rn); + srcdest_rnode_prefixes(rn, &p, NULL); + zsend_route_notify_owner(re, p, + ZAPI_ROUTE_FAIL_INSTALL, + info->afi, info->safi); continue; } } else { @@ -2282,14 +2323,6 @@ static void process_subq_route(struct listnode *lnode, uint8_t qindex) UNSET_FLAG(rib_dest_from_rnode(rnode)->flags, RIB_ROUTE_QUEUED(qindex)); -#if 0 - else - { - zlog_debug ("%s: called for route_node (%p, %d) with no ribs", - __func__, rnode, route_node_get_lock_count(rnode)); - zlog_backtrace(LOG_DEBUG); - } -#endif route_unlock_node(rnode); } @@ -2791,6 +2824,8 @@ void _route_entry_dump(const char *func, union prefixconstptr pp, bool is_srcdst = src_p && src_p->prefixlen; char straddr[PREFIX_STRLEN]; char srcaddr[PREFIX_STRLEN]; + char flags_buf[128]; + char status_buf[128]; struct nexthop *nexthop; struct vrf *vrf = vrf_lookup_by_id(re->vrf_id); struct nexthop_group *nhg; @@ -2804,9 +2839,12 @@ void _route_entry_dump(const char *func, union prefixconstptr pp, zlog_debug("%s: uptime == %lu, type == %u, instance == %d, table == %d", straddr, (unsigned long)re->uptime, re->type, re->instance, re->table); - zlog_debug("%s: metric == %u, mtu == %u, distance == %u, flags == %u, status == %u", - straddr, re->metric, re->mtu, re->distance, re->flags, - re->status); + zlog_debug( + "%s: metric == %u, mtu == %u, distance == %u, flags == %sstatus == %s", + straddr, re->metric, re->mtu, re->distance, + zclient_dump_route_flags(re->flags, flags_buf, + sizeof(flags_buf)), + _dump_re_status(re, status_buf, sizeof(status_buf))); zlog_debug("%s: nexthop_num == %u, nexthop_active_num == %u", straddr, nexthop_group_nexthop_num(&(re->nhe->nhg)), nexthop_group_active_nexthop_num(&(re->nhe->nhg))); @@ -2941,8 +2979,10 @@ int rib_add_multipath_nhe(afi_t afi, safi_t safi, struct prefix *p, struct nhg_hash_entry *nhe = NULL; struct route_table *table; struct route_node *rn; - struct route_entry *same = NULL; + struct route_entry *same = NULL, *first_same = NULL; int ret = 0; + int same_count = 0; + rib_dest_t *dest; if (!re || !re_nhe) return -1; @@ -3010,14 +3050,22 @@ int rib_add_multipath_nhe(afi_t afi, safi_t safi, struct prefix *p, * for the install don't do a route replace. */ RNODE_FOREACH_RE (rn, same) { - if (CHECK_FLAG(same->status, ROUTE_ENTRY_REMOVED)) + if (CHECK_FLAG(same->status, ROUTE_ENTRY_REMOVED)) { + same_count++; continue; + } /* Compare various route_entry properties */ - if (rib_compare_routes(re, same)) - break; + if (rib_compare_routes(re, same)) { + same_count++; + + if (first_same == NULL) + first_same = same; + } } + same = first_same; + /* If this route is kernel/connected route, notify the dataplane. */ if (RIB_SYSTEM_ROUTE(re)) { /* Notify dataplane */ @@ -3027,8 +3075,9 @@ int rib_add_multipath_nhe(afi_t afi, safi_t safi, struct prefix *p, /* Link new re to node.*/ if (IS_ZEBRA_DEBUG_RIB) { rnode_debug(rn, re->vrf_id, - "Inserting route rn %p, re %p (%s) existing %p", - rn, re, zebra_route_string(re->type), same); + "Inserting route rn %p, re %p (%s) existing %p, same_count %d", + rn, re, zebra_route_string(re->type), same, + same_count); if (IS_ZEBRA_DEBUG_RIB_DETAILED) route_entry_dump(p, src_p, re); @@ -3042,6 +3091,24 @@ int rib_add_multipath_nhe(afi_t afi, safi_t safi, struct prefix *p, if (same) rib_delnode(rn, same); + /* See if we can remove some RE entries that are queued for + * removal, but won't be considered in rib processing. + */ + dest = rib_dest_from_rnode(rn); + RNODE_FOREACH_RE_SAFE (rn, re, same) { + if (CHECK_FLAG(re->status, ROUTE_ENTRY_REMOVED)) { + /* If the route was used earlier, must retain it. */ + if (dest && re == dest->selected_fib) + continue; + + if (IS_ZEBRA_DEBUG_RIB) + rnode_debug(rn, re->vrf_id, "rn %p, removing unneeded re %p", + rn, re); + + rib_unlink(rn, re); + } + } + route_unlock_node(rn); return ret; } @@ -3088,7 +3155,7 @@ void rib_delete(afi_t afi, safi_t safi, vrf_id_t vrf_id, int type, unsigned short instance, uint32_t flags, struct prefix *p, struct prefix_ipv6 *src_p, const struct nexthop *nh, uint32_t nhe_id, uint32_t table_id, uint32_t metric, - uint8_t distance, bool fromkernel, bool connected_down) + uint8_t distance, bool fromkernel) { struct route_table *table; struct route_node *rn; @@ -3294,19 +3361,6 @@ void rib_delete(afi_t afi, safi_t safi, vrf_id_t vrf_id, int type, rib_delnode(rn, same); } - /* - * This is to force an immediate re-eval of this particular - * node via nexthop tracking. Why? Because there are scenarios - * where the interface is flapping and the normal queuing methodology - * will cause down/up events to very very rarely be combined into - * a non-event from nexthop tracking perspective. Leading - * to some fun timing situations with upper level routing protocol - * trying to and failing to install routes during this blip. Especially - * when zebra is under load. - */ - if (connected_down) - zebra_rib_evaluate_rn_nexthops(rn, - zebra_router_get_next_sequence()); route_unlock_node(rn); return; } @@ -3391,7 +3445,8 @@ static void rib_update_route_node(struct route_node *rn, int type) } /* Schedule routes of a particular table (address-family) based on event. */ -void rib_update_table(struct route_table *table, enum rib_update_event event) +void rib_update_table(struct route_table *table, enum rib_update_event event, + int rtype) { struct route_node *rn; @@ -3404,12 +3459,12 @@ void rib_update_table(struct route_table *table, enum rib_update_event event) : NULL; vrf = zvrf ? zvrf->vrf : NULL; - zlog_debug("%s: %s VRF %s Table %u event %s", __func__, + zlog_debug("%s: %s VRF %s Table %u event %s Route type: %s", __func__, table->info ? afi2str( ((struct rib_table_info *)table->info)->afi) : "Unknown", VRF_LOGNAME(vrf), zvrf ? zvrf->table_id : 0, - rib_update_event2str(event)); + rib_update_event2str(event), zebra_route_string(rtype)); } /* Walk all routes and queue for processing, if appropriate for @@ -3432,7 +3487,7 @@ void rib_update_table(struct route_table *table, enum rib_update_event event) break; case RIB_UPDATE_RMAP_CHANGE: case RIB_UPDATE_OTHER: - rib_update_route_node(rn, ZEBRA_ROUTE_ALL); + rib_update_route_node(rn, rtype); break; default: break; @@ -3440,7 +3495,8 @@ void rib_update_table(struct route_table *table, enum rib_update_event event) } } -static void rib_update_handle_vrf(vrf_id_t vrf_id, enum rib_update_event event) +static void rib_update_handle_vrf(vrf_id_t vrf_id, enum rib_update_event event, + int rtype) { struct route_table *table; @@ -3451,14 +3507,14 @@ static void rib_update_handle_vrf(vrf_id_t vrf_id, enum rib_update_event event) /* Process routes of interested address-families. */ table = zebra_vrf_table(AFI_IP, SAFI_UNICAST, vrf_id); if (table) - rib_update_table(table, event); + rib_update_table(table, event, rtype); table = zebra_vrf_table(AFI_IP6, SAFI_UNICAST, vrf_id); if (table) - rib_update_table(table, event); + rib_update_table(table, event, rtype); } -static void rib_update_handle_vrf_all(enum rib_update_event event) +static void rib_update_handle_vrf_all(enum rib_update_event event, int rtype) { struct zebra_router_table *zrt; @@ -3468,7 +3524,7 @@ static void rib_update_handle_vrf_all(enum rib_update_event event) /* Just iterate over all the route tables, rather than vrf lookups */ RB_FOREACH (zrt, zebra_router_table_head, &zrouter.tables) - rib_update_table(zrt->table, event); + rib_update_table(zrt->table, event, rtype); } struct rib_update_ctx { @@ -3502,9 +3558,9 @@ static int rib_update_handler(struct thread *thread) ctx = THREAD_ARG(thread); if (ctx->vrf_all) - rib_update_handle_vrf_all(ctx->event); + rib_update_handle_vrf_all(ctx->event, ZEBRA_ROUTE_ALL); else - rib_update_handle_vrf(ctx->vrf_id, ctx->event); + rib_update_handle_vrf(ctx->vrf_id, ctx->event, ZEBRA_ROUTE_ALL); rib_update_ctx_fini(&ctx); @@ -3517,26 +3573,6 @@ static int rib_update_handler(struct thread *thread) */ static struct thread *t_rib_update_threads[RIB_UPDATE_MAX]; -/* Schedule a RIB update event for specific vrf */ -void rib_update_vrf(vrf_id_t vrf_id, enum rib_update_event event) -{ - struct rib_update_ctx *ctx; - - ctx = rib_update_ctx_init(vrf_id, event); - - /* Don't worry about making sure multiple rib updates for specific vrf - * are scheduled at once for now. If it becomes a problem, we can use a - * lookup of some sort to keep track of running threads via t_vrf_id - * like how we are doing it in t_rib_update_threads[]. - */ - thread_add_event(zrouter.master, rib_update_handler, ctx, 0, NULL); - - if (IS_ZEBRA_DEBUG_EVENT) - zlog_debug("%s: Scheduled VRF %s, event %s", __func__, - vrf_id_to_name(ctx->vrf_id), - rib_update_event2str(event)); -} - /* Schedule a RIB update event for all vrfs */ void rib_update(enum rib_update_event event) { diff --git a/zebra/zebra_rnh.c b/zebra/zebra_rnh.c index 3c4dbc5e9c..48e2bafe44 100644 --- a/zebra/zebra_rnh.c +++ b/zebra/zebra_rnh.c @@ -1124,7 +1124,7 @@ int zebra_send_rnh_update(struct rnh *rnh, struct zserv *client, break; default: flog_err(EC_ZEBRA_RNH_UNKNOWN_FAMILY, - "%s: Unknown family (%d) notification attempted\n", + "%s: Unknown family (%d) notification attempted", __func__, rn->p.family); goto failure; } diff --git a/zebra/zebra_routemap.c b/zebra/zebra_routemap.c index 0ce724f608..17a9bf97f9 100644 --- a/zebra/zebra_routemap.c +++ b/zebra/zebra_routemap.c @@ -267,7 +267,8 @@ static int ip_protocol_rm_add(struct zebra_vrf *zvrf, const char *rmap, /* Process routes of interested address-families. */ table = zebra_vrf_table(afi, safi, zvrf->vrf->vrf_id); if (table) - rib_update_table(table, RIB_UPDATE_RMAP_CHANGE); + rib_update_table(table, RIB_UPDATE_RMAP_CHANGE, + rtype); } return CMD_SUCCESS; @@ -294,7 +295,8 @@ static int ip_protocol_rm_del(struct zebra_vrf *zvrf, const char *rmap, /* Process routes of interested address-families. */ table = zebra_vrf_table(afi, safi, zvrf->vrf->vrf_id); if (table) - rib_update_table(table, RIB_UPDATE_RMAP_CHANGE); + rib_update_table(table, RIB_UPDATE_RMAP_CHANGE, + rtype); } XFREE(MTYPE_ROUTE_MAP_NAME, PROTO_RM_NAME(zvrf, afi, rtype)); } @@ -576,7 +578,7 @@ DEFUN (zebra_route_map_timer, ZEBRA_STR "Set route-map parameters\n" "Time to wait before route-map updates are processed\n" - "0 means event-driven updates are disabled\n") + "0 means route-map changes are run immediately instead of delaying\n") { int idx_number = 3; uint32_t rmap_delay_timer; @@ -594,7 +596,7 @@ DEFUN (no_zebra_route_map_timer, ZEBRA_STR "Set route-map parameters\n" "Reset delay-timer to default value, 30 secs\n" - "0 means event-driven updates are disabled\n") + "0 means route-map changes are run immediately instead of delaying\n") { zebra_route_map_set_delay_timer(ZEBRA_RMAP_DEFAULT_UPDATE_TIMER); @@ -1454,8 +1456,6 @@ static void zebra_rib_table_rm_update(const char *rmap) struct vrf *vrf = NULL; struct zebra_vrf *zvrf = NULL; char *rmap_name; - char afi_ip = 0; - char afi_ipv6 = 0; struct route_map *old = NULL; RB_FOREACH (vrf, vrf_name_head, &vrfs_by_name) { @@ -1486,16 +1486,12 @@ static void zebra_rib_table_rm_update(const char *rmap) PROTO_RM_MAP(zvrf, AFI_IP, i)); /* There is single rib table for all protocols */ - if (afi_ip == 0) { - table = zvrf->table[AFI_IP] - [SAFI_UNICAST]; - if (table) { - - afi_ip = 1; - rib_update_table( - table, - RIB_UPDATE_RMAP_CHANGE); - } + table = zvrf->table[AFI_IP][SAFI_UNICAST]; + if (table) { + rib_update_table( + table, + RIB_UPDATE_RMAP_CHANGE, + i); } } rmap_name = PROTO_RM_NAME(zvrf, AFI_IP6, i); @@ -1515,16 +1511,12 @@ static void zebra_rib_table_rm_update(const char *rmap) PROTO_RM_MAP(zvrf, AFI_IP6, i)); /* There is single rib table for all protocols */ - if (afi_ipv6 == 0) { - table = zvrf->table[AFI_IP6] - [SAFI_UNICAST]; - if (table) { - - afi_ipv6 = 1; - rib_update_table( - table, - RIB_UPDATE_RMAP_CHANGE); - } + table = zvrf->table[AFI_IP6][SAFI_UNICAST]; + if (table) { + rib_update_table( + table, + RIB_UPDATE_RMAP_CHANGE, + i); } } } @@ -1628,8 +1620,6 @@ static void zebra_route_map_process_update_cb(char *rmap_name) static int zebra_route_map_update_timer(struct thread *thread) { - zebra_t_rmap_update = NULL; - if (IS_ZEBRA_DEBUG_EVENT) zlog_debug("Event driven route-map update triggered"); @@ -1654,8 +1644,8 @@ static void zebra_route_map_set_delay_timer(uint32_t value) if (!value && zebra_t_rmap_update) { /* Event driven route map updates is being disabled */ /* But there's a pending timer. Fire it off now */ - thread_cancel(&zebra_t_rmap_update); - zebra_route_map_update_timer(zebra_t_rmap_update); + THREAD_OFF(zebra_t_rmap_update); + zebra_route_map_update_timer(NULL); } } @@ -1664,20 +1654,12 @@ void zebra_routemap_finish(void) /* Set zebra_rmap_update_timer to 0 so that it wont schedule again */ zebra_rmap_update_timer = 0; /* Thread off if any scheduled already */ - thread_cancel(&zebra_t_rmap_update); + THREAD_OFF(zebra_t_rmap_update); route_map_finish(); } -void zebra_route_map_write_delay_timer(struct vty *vty) -{ - if (vty && (zebra_rmap_update_timer != ZEBRA_RMAP_DEFAULT_UPDATE_TIMER)) - vty_out(vty, "zebra route-map delay-timer %d\n", - zebra_rmap_update_timer); - return; -} - route_map_result_t -zebra_route_map_check(int family, int rib_type, uint8_t instance, +zebra_route_map_check(afi_t family, int rib_type, uint8_t instance, const struct prefix *p, struct nexthop *nexthop, struct zebra_vrf *zvrf, route_tag_t tag) { @@ -1788,12 +1770,11 @@ route_map_result_t zebra_nht_route_map_check(afi_t afi, int client_proto, static void zebra_route_map_mark_update(const char *rmap_name) { /* rmap_update_timer of 0 means don't do route updates */ - if (zebra_rmap_update_timer && !zebra_t_rmap_update) { - zebra_t_rmap_update = NULL; - thread_add_timer(zrouter.master, zebra_route_map_update_timer, - NULL, zebra_rmap_update_timer, - &zebra_t_rmap_update); - } + if (zebra_rmap_update_timer) + THREAD_OFF(zebra_t_rmap_update); + + thread_add_timer(zrouter.master, zebra_route_map_update_timer, + NULL, zebra_rmap_update_timer, &zebra_t_rmap_update); } static void zebra_route_map_add(const char *rmap_name) @@ -1870,7 +1851,8 @@ void zebra_routemap_config_write_protocol(struct vty *vty, vty_out(vty, "%sipv6 nht %s route-map %s\n", space, "any", NHT_RM_NAME(zvrf, AFI_IP6, ZEBRA_ROUTE_MAX)); - if (zebra_rmap_update_timer != ZEBRA_RMAP_DEFAULT_UPDATE_TIMER) + if (zvrf_id(zvrf) == VRF_DEFAULT + && zebra_rmap_update_timer != ZEBRA_RMAP_DEFAULT_UPDATE_TIMER) vty_out(vty, "zebra route-map delay-timer %d\n", zebra_rmap_update_timer); } diff --git a/zebra/zebra_routemap.h b/zebra/zebra_routemap.h index 56e805ea03..c016d95875 100644 --- a/zebra/zebra_routemap.h +++ b/zebra/zebra_routemap.h @@ -36,15 +36,13 @@ extern void zebra_add_import_table_route_map(afi_t afi, const char *rmap_name, uint32_t table); extern void zebra_del_import_table_route_map(afi_t afi, uint32_t table); -extern void zebra_route_map_write_delay_timer(struct vty *); - extern route_map_result_t zebra_import_table_route_map_check(int family, int rib_type, uint8_t instance, const struct prefix *p, struct nexthop *nexthop, vrf_id_t vrf_id, route_tag_t tag, const char *rmap_name); extern route_map_result_t -zebra_route_map_check(int family, int rib_type, uint8_t instance, +zebra_route_map_check(afi_t family, int rib_type, uint8_t instance, const struct prefix *p, struct nexthop *nexthop, struct zebra_vrf *zvrf, route_tag_t tag); extern route_map_result_t diff --git a/zebra/zebra_vty.c b/zebra/zebra_vty.c index 266050784a..f18d8fbb6d 100644 --- a/zebra/zebra_vty.c +++ b/zebra/zebra_vty.c @@ -1331,7 +1331,7 @@ DEFUN (ip_nht_default_route, zvrf->zebra_rnh_ip_default_route = 1; - zebra_evaluate_rnh(zvrf, AFI_IP, 1, RNH_NEXTHOP_TYPE, NULL); + zebra_evaluate_rnh(zvrf, AFI_IP, 0, RNH_NEXTHOP_TYPE, NULL); return CMD_SUCCESS; } @@ -1653,7 +1653,7 @@ DEFUN (no_ip_nht_default_route, return CMD_SUCCESS; zvrf->zebra_rnh_ip_default_route = 0; - zebra_evaluate_rnh(zvrf, AFI_IP, 1, RNH_NEXTHOP_TYPE, NULL); + zebra_evaluate_rnh(zvrf, AFI_IP, 0, RNH_NEXTHOP_TYPE, NULL); return CMD_SUCCESS; } @@ -1673,7 +1673,7 @@ DEFUN (ipv6_nht_default_route, return CMD_SUCCESS; zvrf->zebra_rnh_ipv6_default_route = 1; - zebra_evaluate_rnh(zvrf, AFI_IP6, 1, RNH_NEXTHOP_TYPE, NULL); + zebra_evaluate_rnh(zvrf, AFI_IP6, 0, RNH_NEXTHOP_TYPE, NULL); return CMD_SUCCESS; } @@ -1695,7 +1695,7 @@ DEFUN (no_ipv6_nht_default_route, return CMD_SUCCESS; zvrf->zebra_rnh_ipv6_default_route = 0; - zebra_evaluate_rnh(zvrf, AFI_IP6, 1, RNH_NEXTHOP_TYPE, NULL); + zebra_evaluate_rnh(zvrf, AFI_IP6, 0, RNH_NEXTHOP_TYPE, NULL); return CMD_SUCCESS; } diff --git a/zebra/zebra_vxlan.c b/zebra/zebra_vxlan.c index 28bed846cf..424c00d5eb 100644 --- a/zebra/zebra_vxlan.c +++ b/zebra/zebra_vxlan.c @@ -1992,7 +1992,10 @@ static void zevpn_add_to_l3vni_list(struct hash_bucket *bucket, void *ctxt) } /* - * handle transition of vni from l2 to l3 and vice versa + * Handle transition of vni from l2 to l3 and vice versa. + * This function handles only the L2VNI add/delete part of + * the above transition. + * L3VNI add/delete is handled by the calling functions. */ static int zebra_vxlan_handle_vni_transition(struct zebra_vrf *zvrf, vni_t vni, int add) @@ -2033,11 +2036,71 @@ static int zebra_vxlan_handle_vni_transition(struct zebra_vrf *zvrf, vni_t vni, return -1; } } else { - /* TODO_MITESH: This needs to be thought through. We don't have - * enough information at this point to reprogram the vni as - * l2-vni. One way is to store the required info in l3-vni and - * used it solely for this purpose - */ + struct zebra_ns *zns; + struct route_node *rn; + struct interface *ifp; + struct zebra_if *zif; + struct zebra_l2info_vxlan *vxl; + struct interface *vlan_if; + bool found = false; + + if (IS_ZEBRA_DEBUG_VXLAN) + zlog_debug("Adding L2-VNI %u - transition from L3-VNI", + vni); + + /* Find VxLAN interface for this VNI. */ + zns = zebra_ns_lookup(NS_DEFAULT); + for (rn = route_top(zns->if_table); rn; rn = route_next(rn)) { + ifp = (struct interface *)rn->info; + if (!ifp) + continue; + zif = ifp->info; + if (!zif || zif->zif_type != ZEBRA_IF_VXLAN) + continue; + + vxl = &zif->l2info.vxl; + if (vxl->vni == vni) { + found = true; + break; + } + } + + if (!found) { + if (IS_ZEBRA_DEBUG_VXLAN) + zlog_err( + "Adding L2-VNI - Failed to find VxLAN interface for VNI %u", + vni); + return -1; + } + + /* Create VNI hash entry for L2VNI */ + zevpn = zebra_evpn_lookup(vni); + if (zevpn) + return 0; + + zevpn = zebra_evpn_add(vni); + if (!zevpn) { + flog_err(EC_ZEBRA_VNI_ADD_FAILED, + "Adding L2-VNI - Failed to add VNI hash, VNI %u", + vni); + + return -1; + } + + /* Find bridge interface for the VNI */ + vlan_if = zvni_map_to_svi(vxl->access_vlan, + zif->brslave_info.br_if); + if (vlan_if) + zevpn->vrf_id = vlan_if->vrf_id; + + zevpn->vxlan_if = ifp; + zevpn->local_vtep_ip = vxl->vtep_ip; + + /* Inform BGP if the VNI is up and mapped to a bridge. */ + if (if_is_operative(ifp) && zif->brslave_info.br_if) { + zebra_evpn_send_add_to_client(zevpn); + zebra_evpn_read_mac_neigh(zevpn, ifp); + } } return 0; @@ -3270,7 +3333,7 @@ int zebra_vxlan_clear_dup_detect_vni(struct zebra_vrf *zvrf, vni_t vni) zevpn = zebra_evpn_lookup(vni); if (!zevpn) { - zlog_warn("VNI %u does not exist\n", vni); + zlog_warn("VNI %u does not exist", vni); return CMD_WARNING; } @@ -3678,13 +3741,13 @@ int zebra_vxlan_handle_kernel_neigh_update(struct interface *ifp, if (IS_ZEBRA_DEBUG_VXLAN || IS_ZEBRA_DEBUG_EVPN_MH_NEIGH) zlog_debug( - "Add/Update neighbor %s MAC %s intf %s(%u) state 0x%x %s%s%s-> L2-VNI %u", + "Add/Update neighbor %s MAC %s intf %s(%u) state 0x%x %s%s%s%s-> L2-VNI %u", ipaddr2str(ip, buf2, sizeof(buf2)), prefix_mac2str(macaddr, buf, sizeof(buf)), ifp->name, ifp->ifindex, state, is_ext ? "ext-learned " : "", is_router ? "router " : "", local_inactive ? "local_inactive " : "", - zevpn->vni); + dp_static ? "peer_sync " : "", zevpn->vni); /* Is this about a local neighbor or a remote one? */ if (!is_ext) @@ -4008,7 +4071,6 @@ int zebra_vxlan_dp_network_mac_add(struct interface *ifp, * 1. readd the remote MAC if we have it * 2. local MAC with does ES may also need to be re-installed */ -static int zebra_vxlan_do_local_mac_del(zebra_evpn_t *zevpn, zebra_mac_t *mac); int zebra_vxlan_dp_network_mac_del(struct interface *ifp, struct interface *br_if, struct ethaddr *macaddr, vlanid_t vid) @@ -4059,72 +4121,7 @@ int zebra_vxlan_dp_network_mac_del(struct interface *ifp, if (IS_ZEBRA_DEBUG_VXLAN || IS_ZEBRA_DEBUG_EVPN_MH_MAC) zlog_debug("dpDel local-nw-MAC %pEA VNI %u", macaddr, vni); - zebra_vxlan_do_local_mac_del(zevpn, mac); - } - - return 0; -} - -static int zebra_vxlan_do_local_mac_del(zebra_evpn_t *zevpn, zebra_mac_t *mac) -{ - bool old_bgp_ready; - bool new_bgp_ready; - - if (IS_ZEBRA_DEBUG_VXLAN) - zlog_debug("DEL MAC %pEA VNI %u seq %u flags 0x%x nbr count %u", - &mac->macaddr, zevpn->vni, mac->loc_seq, mac->flags, - listcount(mac->neigh_list)); - - old_bgp_ready = zebra_evpn_mac_is_ready_for_bgp(mac->flags); - if (zebra_evpn_mac_is_static(mac)) { - /* this is a synced entry and can only be removed when the - * es-peers stop advertising it. - */ - memset(&mac->fwd_info, 0, sizeof(mac->fwd_info)); - - if (IS_ZEBRA_DEBUG_EVPN_MH_MAC) - zlog_debug( - "re-add sync-mac vni %u mac %pEA es %s seq %d f 0x%x", - zevpn->vni, &mac->macaddr, - mac->es ? mac->es->esi_str : "-", mac->loc_seq, - mac->flags); - - /* inform-bgp about change in local-activity if any */ - if (!CHECK_FLAG(mac->flags, ZEBRA_MAC_LOCAL_INACTIVE)) { - SET_FLAG(mac->flags, ZEBRA_MAC_LOCAL_INACTIVE); - new_bgp_ready = - zebra_evpn_mac_is_ready_for_bgp(mac->flags); - zebra_evpn_mac_send_add_del_to_client( - mac, old_bgp_ready, new_bgp_ready); - } - - /* re-install the entry in the kernel */ - zebra_evpn_sync_mac_dp_install(mac, false /* set_inactive */, - false /* force_clear_static */, - __func__); - - return 0; - } - - /* Update all the neigh entries associated with this mac */ - zebra_evpn_process_neigh_on_local_mac_del(zevpn, mac); - - /* Remove MAC from BGP. */ - zebra_evpn_mac_send_del_to_client(zevpn->vni, &mac->macaddr, mac->flags, - false /* force */); - - zebra_evpn_es_mac_deref_entry(mac); - - /* - * If there are no neigh associated with the mac delete the mac - * else mark it as AUTO for forward reference - */ - if (!listcount(mac->neigh_list)) { - zebra_evpn_mac_del(zevpn, mac); - } else { - UNSET_FLAG(mac->flags, ZEBRA_MAC_ALL_LOCAL_FLAGS); - UNSET_FLAG(mac->flags, ZEBRA_MAC_STICKY); - SET_FLAG(mac->flags, ZEBRA_MAC_AUTO); + return zebra_evpn_del_local_mac(zevpn, mac); } return 0; @@ -4161,7 +4158,7 @@ int zebra_vxlan_local_mac_del(struct interface *ifp, struct interface *br_if, if (!CHECK_FLAG(mac->flags, ZEBRA_MAC_LOCAL)) return 0; - return zebra_vxlan_do_local_mac_del(zevpn, mac); + return zebra_evpn_del_local_mac(zevpn, mac); } /* @@ -5201,6 +5198,7 @@ int zebra_vxlan_process_vrf_vni_cmd(struct zebra_vrf *zvrf, vni_t vni, if (add) { + /* Remove L2VNI if present */ zebra_vxlan_handle_vni_transition(zvrf, vni, add); /* check if the vni is already present under zvrf */ @@ -5295,6 +5293,7 @@ int zebra_vxlan_process_vrf_vni_cmd(struct zebra_vrf *zvrf, vni_t vni, zvrf->l3vni = 0; zl3vni_del(zl3vni); + /* Add L2VNI for this VNI */ zebra_vxlan_handle_vni_transition(zvrf, vni, add); } return 0; @@ -6050,9 +6049,9 @@ static void zebra_vxlan_sg_ref(struct in_addr local_vtep_ip, zebra_vxlan_sg_do_ref(zvrf, local_vtep_ip, mcast_grp); } -static void zebra_vxlan_xg_pre_cleanup(struct hash_bucket *backet, void *arg) +static void zebra_vxlan_xg_pre_cleanup(struct hash_bucket *bucket, void *arg) { - zebra_vxlan_sg_t *vxlan_sg = (zebra_vxlan_sg_t *)backet->data; + zebra_vxlan_sg_t *vxlan_sg = (zebra_vxlan_sg_t *)bucket->data; /* increment the ref count against (*,G) to prevent them from being * deleted @@ -6061,9 +6060,9 @@ static void zebra_vxlan_xg_pre_cleanup(struct hash_bucket *backet, void *arg) ++vxlan_sg->ref_cnt; } -static void zebra_vxlan_xg_post_cleanup(struct hash_bucket *backet, void *arg) +static void zebra_vxlan_xg_post_cleanup(struct hash_bucket *bucket, void *arg) { - zebra_vxlan_sg_t *vxlan_sg = (zebra_vxlan_sg_t *)backet->data; + zebra_vxlan_sg_t *vxlan_sg = (zebra_vxlan_sg_t *)bucket->data; /* decrement the dummy ref count against (*,G) to delete them */ if (vxlan_sg->sg.src.s_addr == INADDR_ANY) { @@ -6074,9 +6073,9 @@ static void zebra_vxlan_xg_post_cleanup(struct hash_bucket *backet, void *arg) } } -static void zebra_vxlan_sg_cleanup(struct hash_bucket *backet, void *arg) +static void zebra_vxlan_sg_cleanup(struct hash_bucket *bucket, void *arg) { - zebra_vxlan_sg_t *vxlan_sg = (zebra_vxlan_sg_t *)backet->data; + zebra_vxlan_sg_t *vxlan_sg = (zebra_vxlan_sg_t *)bucket->data; zebra_vxlan_sg_del(vxlan_sg); } @@ -6094,9 +6093,9 @@ static void zebra_vxlan_cleanup_sg_table(struct zebra_vrf *zvrf) hash_iterate(zvrf->vxlan_sg_table, zebra_vxlan_xg_post_cleanup, NULL); } -static void zebra_vxlan_sg_replay_send(struct hash_bucket *backet, void *arg) +static void zebra_vxlan_sg_replay_send(struct hash_bucket *bucket, void *arg) { - zebra_vxlan_sg_t *vxlan_sg = (zebra_vxlan_sg_t *)backet->data; + zebra_vxlan_sg_t *vxlan_sg = (zebra_vxlan_sg_t *)bucket->data; zebra_vxlan_sg_send(vxlan_sg->zvrf, &vxlan_sg->sg, vxlan_sg->sg_str, ZEBRA_VXLAN_SG_ADD); diff --git a/zebra/zserv.c b/zebra/zserv.c index 484d94fac8..6c5eebe6fe 100644 --- a/zebra/zserv.c +++ b/zebra/zserv.c @@ -55,6 +55,7 @@ #include "lib/frr_pthread.h" /* for frr_pthread_new, frr_pthread_stop... */ #include "lib/frratomic.h" /* for atomic_load_explicit, atomic_stor... */ #include "lib/lib_errors.h" /* for generic ferr ids */ +#include "lib/printfrr.h" /* for string functions */ #include "zebra/debug.h" /* for various debugging macros */ #include "zebra/rib.h" /* for rib_score_proto */ @@ -1186,6 +1187,7 @@ static void zebra_show_stale_client_detail(struct vty *vty, static void zebra_show_client_brief(struct vty *vty, struct zserv *client) { + char client_string[80]; char cbuf[ZEBRA_TIME_BUF], rbuf[ZEBRA_TIME_BUF]; char wbuf[ZEBRA_TIME_BUF]; time_t connect_time, last_read_time, last_write_time; @@ -1197,8 +1199,15 @@ static void zebra_show_client_brief(struct vty *vty, struct zserv *client) last_write_time = (time_t)atomic_load_explicit(&client->last_write_time, memory_order_relaxed); - vty_out(vty, "%-10s%12s %12s%12s%8d/%-8d%8d/%-8d\n", - zebra_route_string(client->proto), + if (client->instance || client->session_id) + snprintfrr(client_string, sizeof(client_string), "%s[%u:%u]", + zebra_route_string(client->proto), client->instance, + client->session_id); + else + snprintfrr(client_string, sizeof(client_string), "%s", + zebra_route_string(client->proto)); + + vty_out(vty, "%-10s%12s %12s%12s%8d/%-8d%8d/%-8d\n", client_string, zserv_time_buf(&connect_time, cbuf, ZEBRA_TIME_BUF), zserv_time_buf(&last_read_time, rbuf, ZEBRA_TIME_BUF), zserv_time_buf(&last_write_time, wbuf, ZEBRA_TIME_BUF), |
