diff options
345 files changed, 15703 insertions, 2987 deletions
diff --git a/babeld/babel_interface.c b/babeld/babel_interface.c index 943ae9def1..b83c7b1908 100644 --- a/babeld/babel_interface.c +++ b/babeld/babel_interface.c @@ -1351,7 +1351,7 @@ babel_interface_allocate (void) /* All flags are unset */ babel_ifp->bucket_time = babel_now.tv_sec; babel_ifp->bucket = BUCKET_TOKENS_MAX; - babel_ifp->hello_seqno = (frr_weak_random() & 0xFFFF); + babel_ifp->hello_seqno = CHECK_FLAG(frr_weak_random(), 0xFFFF); babel_ifp->rtt_decay = BABEL_DEFAULT_RTT_DECAY; babel_ifp->rtt_min = BABEL_DEFAULT_RTT_MIN; babel_ifp->rtt_max = BABEL_DEFAULT_RTT_MAX; diff --git a/babeld/babeld.c b/babeld/babeld.c index 73deb1dd92..b562f0b70c 100644 --- a/babeld/babeld.c +++ b/babeld/babeld.c @@ -204,7 +204,7 @@ static void babel_read_protocol(struct event *thread) making these inits have sense. */ static void babel_init_routing_process(struct event *thread) { - myseqno = (frr_weak_random() & 0xFFFF); + myseqno = CHECK_FLAG(frr_weak_random(), 0xFFFF); babel_get_myid(); babel_load_state_file(); debugf(BABEL_DEBUG_COMMON, "My ID is : %s.", format_eui64(myid)); @@ -443,8 +443,8 @@ babel_fill_with_next_timeout(struct timeval *tv) #if (defined NO_DEBUG) #define printIfMin(a,b,c,d) #else -#define printIfMin(a, b, c, d) \ - if (unlikely(debug & BABEL_DEBUG_TIMEOUT)) { \ +#define printIfMin(a, b, c, d) \ + if (unlikely(CHECK_FLAG(debug, BABEL_DEBUG_TIMEOUT))) { \ printIfMin(a, b, c, d); \ } diff --git a/babeld/message.c b/babeld/message.c index 1b83eb9ebb..5a33d5c288 100644 --- a/babeld/message.c +++ b/babeld/message.c @@ -324,8 +324,8 @@ parse_request_subtlv(int ae, const unsigned char *a, int alen, have_src_prefix = 1; } else { debugf(BABEL_DEBUG_COMMON,"Received unknown%s Route Request sub-TLV %d.", - ((type & 0x80) != 0) ? " mandatory" : "", type); - if((type & 0x80) != 0) + (CHECK_FLAG(type, 0x80) != 0) ? " mandatory" : "", type); + if(CHECK_FLAG(type, 0x80) != 0) return -1; } @@ -588,7 +588,7 @@ parse_packet(const unsigned char *from, struct interface *ifp, else rc = -1; if(rc < 0) { - if(message[3] & 0x80) + if(CHECK_FLAG(message[3], 0x80)) have_v4_prefix = have_v6_prefix = 0; goto fail; } @@ -596,7 +596,7 @@ parse_packet(const unsigned char *from, struct interface *ifp, plen = message[4] + (message[2] == 1 ? 96 : 0); - if(message[3] & 0x80) { + if(CHECK_FLAG(message[3], 0x80)) { if(message[2] == 1) { memcpy(v4_prefix, prefix, 16); have_v4_prefix = 1; @@ -605,7 +605,7 @@ parse_packet(const unsigned char *from, struct interface *ifp, have_v6_prefix = 1; } } - if(message[3] & 0x40) { + if(CHECK_FLAG(message[3], 0x40)) { if(message[2] == 1) { memset(router_id, 0, 4); memcpy(router_id + 4, prefix + 12, 4); @@ -620,8 +620,8 @@ parse_packet(const unsigned char *from, struct interface *ifp, goto fail; } debugf(BABEL_DEBUG_COMMON,"Received update%s%s for %s from %s on %s.", - (message[3] & 0x80) ? "/prefix" : "", - (message[3] & 0x40) ? "/id" : "", + ((CHECK_FLAG(message[3], 0x80)) ? "/prefix" : ""), + ((CHECK_FLAG(message[3], 0x40)) ? "/id" : ""), format_prefix(prefix, plen), format_address(from), ifp->name); @@ -1059,7 +1059,7 @@ void send_hello_noupdate(struct interface *ifp, unsigned interval) babel_ifp->hello_seqno, interval, ifp->name); start_message(ifp, MESSAGE_HELLO, - (babel_ifp->flags & BABEL_IF_TIMESTAMPS) ? 12 : 6); + (CHECK_FLAG(babel_ifp->flags, BABEL_IF_TIMESTAMPS) ? 12 : 6)); babel_ifp->buffered_hello = babel_ifp->buffered - 2; accumulate_short(ifp, 0); accumulate_short(ifp, babel_ifp->hello_seqno); diff --git a/babeld/util.c b/babeld/util.c index 4facdabbc6..f5edb0ed1f 100644 --- a/babeld/util.c +++ b/babeld/util.c @@ -211,8 +211,8 @@ mask_prefix(unsigned char *restrict ret, memset(ret, 0, 16); memcpy(ret, prefix, plen / 8); if(plen % 8 != 0) - ret[plen / 8] = - (prefix[plen / 8] & ((0xFF << (8 - (plen % 8))) & 0xFF)); + ret[plen / 8] = CHECK_FLAG(prefix[plen / 8], + CHECK_FLAG((0xFF << (8 - (plen % 8))), 0xFF)); return ret; } @@ -353,12 +353,13 @@ martian_prefix(const unsigned char *prefix, int plen) { return (plen >= 8 && prefix[0] == 0xFF) || - (plen >= 10 && prefix[0] == 0xFE && (prefix[1] & 0xC0) == 0x80) || + (plen >= 10 && prefix[0] == 0xFE && + (CHECK_FLAG(prefix[1], 0xC0) == 0x80)) || (plen >= 128 && memcmp(prefix, zeroes, 15) == 0 && (prefix[15] == 0 || prefix[15] == 1)) || (plen >= 96 && v4mapped(prefix) && ((plen >= 104 && (prefix[12] == 127 || prefix[12] == 0)) || - (plen >= 100 && (prefix[12] & 0xE0) == 0xE0))); + (plen >= 100 && CHECK_FLAG(prefix[12], 0xE0) == 0xE0))); } int diff --git a/babeld/util.h b/babeld/util.h index ddc6a70d43..2242032c4b 100644 --- a/babeld/util.h +++ b/babeld/util.h @@ -47,19 +47,19 @@ seqno_compare(unsigned short s1, unsigned short s2) if(s1 == s2) return 0; else - return ((s2 - s1) & 0x8000) ? 1 : -1; + return (CHECK_FLAG((s2 - s1), 0x8000)) ? 1 : -1; } static inline short seqno_minus(unsigned short s1, unsigned short s2) { - return (short)((s1 - s2) & 0xFFFF); + return (short)(CHECK_FLAG((s1 - s2), 0xFFFF)); } static inline unsigned short seqno_plus(unsigned short s, int plus) { - return ((s + plus) & 0xFFFF); + return CHECK_FLAG((s + plus), 0xFFFF); } /* Returns a time in microseconds on 32 bits (thus modulo 2^32, @@ -130,7 +130,7 @@ is_default(const unsigned char *prefix, int plen) #define debugf(level, ...) \ do { \ - if (unlikely(debug & level)) \ + if (unlikely(CHECK_FLAG(debug, level))) \ zlog_debug(__VA_ARGS__); \ } while (0) diff --git a/bgpd/bgp_attr.c b/bgpd/bgp_attr.c index d79ccf9644..7cdf98cba7 100644 --- a/bgpd/bgp_attr.c +++ b/bgpd/bgp_attr.c @@ -892,19 +892,16 @@ bool attrhash_cmp(const void *p1, const void *p2) const struct attr *attr1 = p1; const struct attr *attr2 = p2; - if (attr1->flag == attr2->flag && attr1->origin == attr2->origin - && attr1->nexthop.s_addr == attr2->nexthop.s_addr - && attr1->aspath == attr2->aspath - && bgp_attr_get_community(attr1) - == bgp_attr_get_community(attr2) - && attr1->med == attr2->med - && attr1->local_pref == attr2->local_pref - && attr1->rmap_change_flags == attr2->rmap_change_flags) { + if (attr1->flag == attr2->flag && attr1->origin == attr2->origin && + attr1->nexthop.s_addr == attr2->nexthop.s_addr && + attr1->aspath == attr2->aspath && + bgp_attr_get_community(attr1) == bgp_attr_get_community(attr2) && + attr1->med == attr2->med && attr1->local_pref == attr2->local_pref && + attr1->rmap_change_flags == attr2->rmap_change_flags) { if (attr1->aggregator_as == attr2->aggregator_as && attr1->aggregator_addr.s_addr == attr2->aggregator_addr.s_addr && - attr1->weight == attr2->weight && - attr1->tag == attr2->tag && + attr1->weight == attr2->weight && attr1->tag == attr2->tag && attr1->label_index == attr2->label_index && attr1->mp_nexthop_len == attr2->mp_nexthop_len && bgp_attr_get_ecommunity(attr1) == @@ -913,10 +910,8 @@ bool attrhash_cmp(const void *p1, const void *p2) bgp_attr_get_ipv6_ecommunity(attr2) && bgp_attr_get_lcommunity(attr1) == bgp_attr_get_lcommunity(attr2) && - bgp_attr_get_cluster(attr1) == - bgp_attr_get_cluster(attr2) && - bgp_attr_get_transit(attr1) == - bgp_attr_get_transit(attr2) && + bgp_attr_get_cluster(attr1) == bgp_attr_get_cluster(attr2) && + bgp_attr_get_transit(attr1) == bgp_attr_get_transit(attr2) && bgp_attr_get_aigp_metric(attr1) == bgp_attr_get_aigp_metric(attr2) && attr1->rmap_table_id == attr2->rmap_table_id && @@ -948,8 +943,7 @@ bool attrhash_cmp(const void *p1, const void *p2) srv6_vpn_same(attr1->srv6_vpn, attr2->srv6_vpn) && attr1->srte_color == attr2->srte_color && attr1->nh_type == attr2->nh_type && - attr1->bh_type == attr2->bh_type && - attr1->otc == attr2->otc) + attr1->bh_type == attr2->bh_type && attr1->otc == attr2->otc) return true; } @@ -1161,14 +1155,14 @@ struct attr *bgp_attr_default_set(struct attr *attr, struct bgp *bgp, memset(attr, 0, sizeof(struct attr)); attr->origin = origin; - attr->flag |= ATTR_FLAG_BIT(BGP_ATTR_ORIGIN); + SET_FLAG(attr->flag, ATTR_FLAG_BIT(BGP_ATTR_ORIGIN)); attr->aspath = aspath_empty(bgp->asnotation); - attr->flag |= ATTR_FLAG_BIT(BGP_ATTR_AS_PATH); + SET_FLAG(attr->flag, ATTR_FLAG_BIT(BGP_ATTR_AS_PATH)); attr->weight = BGP_ATTR_DEFAULT_WEIGHT; attr->tag = 0; attr->label_index = BGP_INVALID_LABEL_INDEX; attr->label = MPLS_INVALID_LABEL; - attr->flag |= ATTR_FLAG_BIT(BGP_ATTR_NEXT_HOP); + SET_FLAG(attr->flag, ATTR_FLAG_BIT(BGP_ATTR_NEXT_HOP)); attr->mp_nexthop_len = IPV6_MAX_BYTELEN; attr->local_pref = bgp->default_local_pref; @@ -1190,18 +1184,18 @@ struct attr *bgp_attr_aggregate_intern( /* Origin attribute. */ attr.origin = origin; - attr.flag |= ATTR_FLAG_BIT(BGP_ATTR_ORIGIN); + SET_FLAG(attr.flag, ATTR_FLAG_BIT(BGP_ATTR_ORIGIN)); /* MED */ attr.med = 0; - attr.flag |= ATTR_FLAG_BIT(BGP_ATTR_MULTI_EXIT_DISC); + SET_FLAG(attr.flag, ATTR_FLAG_BIT(BGP_ATTR_MULTI_EXIT_DISC)); /* AS path attribute. */ if (aspath) attr.aspath = aspath_intern(aspath); else attr.aspath = aspath_empty(bgp->asnotation); - attr.flag |= ATTR_FLAG_BIT(BGP_ATTR_AS_PATH); + SET_FLAG(attr.flag, ATTR_FLAG_BIT(BGP_ATTR_AS_PATH)); if (community) { uint32_t gshut = COMMUNITY_GSHUT; @@ -1231,8 +1225,8 @@ struct attr *bgp_attr_aggregate_intern( attr.weight = BGP_ATTR_DEFAULT_WEIGHT; attr.mp_nexthop_len = IPV6_MAX_BYTELEN; if (!aggregate->as_set || atomic_aggregate) - attr.flag |= ATTR_FLAG_BIT(BGP_ATTR_ATOMIC_AGGREGATE); - attr.flag |= ATTR_FLAG_BIT(BGP_ATTR_AGGREGATOR); + SET_FLAG(attr.flag, ATTR_FLAG_BIT(BGP_ATTR_ATOMIC_AGGREGATE)); + SET_FLAG(attr.flag, ATTR_FLAG_BIT(BGP_ATTR_AGGREGATOR)); if (CHECK_FLAG(bgp->config, BGP_CONFIG_CONFEDERATION)) attr.aggregator_as = bgp->confed_id; else @@ -1250,7 +1244,7 @@ struct attr *bgp_attr_aggregate_intern( */ if (p->family == AF_INET) { /* Next hop attribute. */ - attr.flag |= ATTR_FLAG_BIT(BGP_ATTR_NEXT_HOP); + SET_FLAG(attr.flag, ATTR_FLAG_BIT(BGP_ATTR_NEXT_HOP)); attr.mp_nexthop_len = IPV4_MAX_BYTELEN; } @@ -1567,8 +1561,8 @@ bgp_attr_flags_diagnose(struct bgp_attr_parser_args *args, uint8_t real_flags = args->flags; const uint8_t attr_code = args->type; - desired_flags &= ~BGP_ATTR_FLAG_EXTLEN; - real_flags &= ~BGP_ATTR_FLAG_EXTLEN; + UNSET_FLAG(desired_flags, BGP_ATTR_FLAG_EXTLEN); + UNSET_FLAG(real_flags, BGP_ATTR_FLAG_EXTLEN); for (i = 0; i <= 2; i++) /* O,T,P, but not E */ if (CHECK_FLAG(desired_flags, attr_flag_str[i].key) != CHECK_FLAG(real_flags, attr_flag_str[i].key)) { @@ -1682,7 +1676,7 @@ static bool bgp_attr_flag_invalid(struct bgp_attr_parser_args *args) && CHECK_FLAG(flags, BGP_ATTR_FLAG_TRANS)) SET_FLAG(mask, BGP_ATTR_FLAG_PARTIAL); - if ((flags & ~mask) == attr_flags_values[attr_code]) + if (CHECK_FLAG(flags, ~mask) == attr_flags_values[attr_code]) return false; bgp_attr_flags_diagnose(args, attr_flags_values[attr_code]); @@ -1724,7 +1718,7 @@ bgp_attr_origin(struct bgp_attr_parser_args *args) } /* Set oring attribute flag. */ - attr->flag |= ATTR_FLAG_BIT(BGP_ATTR_ORIGIN); + SET_FLAG(attr->flag, ATTR_FLAG_BIT(BGP_ATTR_ORIGIN)); return 0; } @@ -1774,7 +1768,7 @@ static int bgp_attr_aspath(struct bgp_attr_parser_args *args) } /* Set aspath attribute flag. */ - attr->flag |= ATTR_FLAG_BIT(BGP_ATTR_AS_PATH); + SET_FLAG(attr->flag, ATTR_FLAG_BIT(BGP_ATTR_AS_PATH)); return BGP_ATTR_PARSE_PROCEED; } @@ -1878,7 +1872,7 @@ static int bgp_attr_as4_path(struct bgp_attr_parser_args *args, } /* Set aspath attribute flag. */ - attr->flag |= ATTR_FLAG_BIT(BGP_ATTR_AS4_PATH); + SET_FLAG(attr->flag, ATTR_FLAG_BIT(BGP_ATTR_AS4_PATH)); return BGP_ATTR_PARSE_PROCEED; } @@ -1928,7 +1922,7 @@ bgp_attr_nexthop(struct bgp_attr_parser_args *args) } attr->nexthop.s_addr = stream_get_ipv4(peer->curr); - attr->flag |= ATTR_FLAG_BIT(BGP_ATTR_NEXT_HOP); + SET_FLAG(attr->flag, ATTR_FLAG_BIT(BGP_ATTR_NEXT_HOP)); return BGP_ATTR_PARSE_PROCEED; } @@ -1951,7 +1945,7 @@ static enum bgp_attr_parse_ret bgp_attr_med(struct bgp_attr_parser_args *args) attr->med = stream_getl(peer->curr); - attr->flag |= ATTR_FLAG_BIT(BGP_ATTR_MULTI_EXIT_DISC); + SET_FLAG(attr->flag, ATTR_FLAG_BIT(BGP_ATTR_MULTI_EXIT_DISC)); return BGP_ATTR_PARSE_PROCEED; } @@ -1989,7 +1983,7 @@ bgp_attr_local_pref(struct bgp_attr_parser_args *args) STREAM_GETL(peer->curr, attr->local_pref); /* Set the local-pref flag. */ - attr->flag |= ATTR_FLAG_BIT(BGP_ATTR_LOCAL_PREF); + SET_FLAG(attr->flag, ATTR_FLAG_BIT(BGP_ATTR_LOCAL_PREF)); return BGP_ATTR_PARSE_PROCEED; @@ -2018,7 +2012,7 @@ static int bgp_attr_atomic(struct bgp_attr_parser_args *args) goto atomic_ignore; /* Set atomic aggregate flag. */ - attr->flag |= ATTR_FLAG_BIT(BGP_ATTR_ATOMIC_AGGREGATE); + SET_FLAG(attr->flag, ATTR_FLAG_BIT(BGP_ATTR_ATOMIC_AGGREGATE)); return BGP_ATTR_PARSE_PROCEED; @@ -2076,7 +2070,7 @@ static int bgp_attr_aggregator(struct bgp_attr_parser_args *args) zlog_debug("%s: attributes: %s", __func__, attr_str); } } else { - attr->flag |= ATTR_FLAG_BIT(BGP_ATTR_AGGREGATOR); + SET_FLAG(attr->flag, ATTR_FLAG_BIT(BGP_ATTR_AGGREGATOR)); } return BGP_ATTR_PARSE_PROCEED; @@ -2127,7 +2121,7 @@ bgp_attr_as4_aggregator(struct bgp_attr_parser_args *args, zlog_debug("%s: attributes: %s", __func__, attr_str); } } else { - attr->flag |= ATTR_FLAG_BIT(BGP_ATTR_AS4_AGGREGATOR); + SET_FLAG(attr->flag, ATTR_FLAG_BIT(BGP_ATTR_AS4_AGGREGATOR)); } return BGP_ATTR_PARSE_PROCEED; @@ -2166,12 +2160,13 @@ bgp_attr_munge_as4_attrs(struct peer *const peer, struct attr *const attr, * should not send them */ if (BGP_DEBUG(as4, AS4)) { - if (attr->flag & (ATTR_FLAG_BIT(BGP_ATTR_AS4_PATH))) + if (CHECK_FLAG(attr->flag, + (ATTR_FLAG_BIT(BGP_ATTR_AS4_PATH)))) zlog_debug("[AS4] %s %s AS4_PATH", peer->host, "AS4 capable peer, yet it sent"); - if (attr->flag - & (ATTR_FLAG_BIT(BGP_ATTR_AS4_AGGREGATOR))) + if (CHECK_FLAG(attr->flag, + (ATTR_FLAG_BIT(BGP_ATTR_AS4_AGGREGATOR)))) zlog_debug("[AS4] %s %s AS4_AGGREGATOR", peer->host, "AS4 capable peer, yet it sent"); @@ -2183,8 +2178,9 @@ bgp_attr_munge_as4_attrs(struct peer *const peer, struct attr *const attr, /* We have a asn16 peer. First, look for AS4_AGGREGATOR * because that may override AS4_PATH */ - if (attr->flag & (ATTR_FLAG_BIT(BGP_ATTR_AS4_AGGREGATOR))) { - if (attr->flag & (ATTR_FLAG_BIT(BGP_ATTR_AGGREGATOR))) { + if (CHECK_FLAG(attr->flag, (ATTR_FLAG_BIT(BGP_ATTR_AS4_AGGREGATOR)))) { + if (CHECK_FLAG(attr->flag, + (ATTR_FLAG_BIT(BGP_ATTR_AGGREGATOR)))) { /* received both. * if the as_number in aggregator is not AS_TRANS, * then AS4_AGGREGATOR and AS4_PATH shall be ignored @@ -2224,13 +2220,14 @@ bgp_attr_munge_as4_attrs(struct peer *const peer, struct attr *const attr, attr->aggregator_as = as4_aggregator; /* sweep it under the carpet and simulate a "good" * AGGREGATOR */ - attr->flag |= (ATTR_FLAG_BIT(BGP_ATTR_AGGREGATOR)); + SET_FLAG(attr->flag, + (ATTR_FLAG_BIT(BGP_ATTR_AGGREGATOR))); } } /* need to reconcile NEW_AS_PATH and AS_PATH */ - if (!ignore_as4_path - && (attr->flag & (ATTR_FLAG_BIT(BGP_ATTR_AS4_PATH)))) { + if (!ignore_as4_path && + (CHECK_FLAG(attr->flag, (ATTR_FLAG_BIT(BGP_ATTR_AS4_PATH))))) { newpath = aspath_reconcile_as4(attr->aspath, as4_path); if (!newpath) return BGP_ATTR_PARSE_ERROR; @@ -2315,7 +2312,7 @@ bgp_attr_originator_id(struct bgp_attr_parser_args *args) attr->originator_id.s_addr = stream_get_ipv4(peer->curr); - attr->flag |= ATTR_FLAG_BIT(BGP_ATTR_ORIGINATOR_ID); + SET_FLAG(attr->flag, ATTR_FLAG_BIT(BGP_ATTR_ORIGINATOR_ID)); return BGP_ATTR_PARSE_PROCEED; @@ -2573,7 +2570,7 @@ int bgp_mp_reach_parse(struct bgp_attr_parser_args *args, stream_forward_getp(s, nlri_len); - attr->flag |= ATTR_FLAG_BIT(BGP_ATTR_MP_REACH_NLRI); + SET_FLAG(attr->flag, ATTR_FLAG_BIT(BGP_ATTR_MP_REACH_NLRI)); return BGP_ATTR_PARSE_PROCEED; #undef LEN_LEFT @@ -2625,7 +2622,7 @@ int bgp_mp_unreach_parse(struct bgp_attr_parser_args *args, stream_forward_getp(s, withdraw_len); - attr->flag |= ATTR_FLAG_BIT(BGP_ATTR_MP_UNREACH_NLRI); + SET_FLAG(attr->flag, ATTR_FLAG_BIT(BGP_ATTR_MP_UNREACH_NLRI)); return BGP_ATTR_PARSE_PROCEED; } @@ -2685,10 +2682,9 @@ bgp_attr_ext_communities(struct bgp_attr_parser_args *args) args->total); } - ecomm = ecommunity_parse( - stream_pnt(peer->curr), length, - CHECK_FLAG(peer->flags, - PEER_FLAG_DISABLE_LINK_BW_ENCODING_IEEE)); + ecomm = ecommunity_parse(stream_pnt(peer->curr), length, + CHECK_FLAG(peer->flags, + PEER_FLAG_DISABLE_LINK_BW_ENCODING_IEEE)); bgp_attr_set_ecommunity(attr, ecomm); /* XXX: fix ecommunity_parse to use stream API */ stream_forward_getp(peer->curr, length); @@ -2719,7 +2715,7 @@ bgp_attr_ext_communities(struct bgp_attr_parser_args *args) /* Check EVPN Neighbor advertisement flags, R-bit */ bgp_attr_evpn_na_flag(attr, &proxy); if (proxy) - attr->es_flags |= ATTR_ES_PROXY_ADVERT; + SET_FLAG(attr->es_flags, ATTR_ES_PROXY_ADVERT); /* Extract the Rmac, if any */ if (bgp_attr_rmac(attr, &attr->rmac)) { @@ -3410,7 +3406,7 @@ bgp_attr_pmsi_tunnel(struct bgp_attr_parser_args *args) } } - attr->flag |= ATTR_FLAG_BIT(BGP_ATTR_PMSI_TUNNEL); + SET_FLAG(attr->flag, ATTR_FLAG_BIT(BGP_ATTR_PMSI_TUNNEL)); bgp_attr_set_pmsi_tnl_type(attr, tnl_type); stream_get(&attr->label, peer->curr, BGP_LABEL_BYTES); @@ -3493,7 +3489,7 @@ static enum bgp_attr_parse_ret bgp_attr_otc(struct bgp_attr_parser_args *args) args->total); } - attr->flag |= ATTR_FLAG_BIT(BGP_ATTR_OTC); + SET_FLAG(attr->flag, ATTR_FLAG_BIT(BGP_ATTR_OTC)); return BGP_ATTR_PARSE_PROCEED; @@ -3690,18 +3686,17 @@ enum bgp_attr_parse_ret bgp_attr_parse(struct peer *peer, struct attr *attr, * unused. They MUST be zero when sent and MUST be ignored when * received. */ - flag = 0xF0 & stream_getc(BGP_INPUT(peer)); + flag = CHECK_FLAG(0xF0, stream_getc(BGP_INPUT(peer))); type = stream_getc(BGP_INPUT(peer)); /* Check whether Extended-Length applies and is in bounds */ if (CHECK_FLAG(flag, BGP_ATTR_FLAG_EXTLEN) && ((endp - startp) < (BGP_ATTR_MIN_LEN + 1))) { - flog_warn( - EC_BGP_EXT_ATTRIBUTE_TOO_SMALL, - "%s: Extended length set, but just %lu bytes of attr header", - peer->host, - (unsigned long)(endp - - stream_pnt(BGP_INPUT(peer)))); + flog_warn(EC_BGP_EXT_ATTRIBUTE_TOO_SMALL, + "%s: Extended length set, but just %lu bytes of attr header", + peer->host, + (unsigned long)(endp - + stream_pnt(BGP_INPUT(peer)))); if (peer->sort != BGP_PEER_EBGP) { bgp_notify_send(peer->connection, @@ -4048,7 +4043,7 @@ enum bgp_attr_parse_ret bgp_attr_parse(struct peer *peer, struct attr *attr, * Finally do the checks on the aspath we did not do yet * because we waited for a potentially synthesized aspath. */ - if (attr->flag & (ATTR_FLAG_BIT(BGP_ATTR_AS_PATH))) { + if (CHECK_FLAG(attr->flag, (ATTR_FLAG_BIT(BGP_ATTR_AS_PATH)))) { ret = bgp_attr_aspath_check(peer, attr); if (ret != BGP_ATTR_PARSE_PROCEED) goto done; @@ -4226,8 +4221,8 @@ size_t bgp_packet_mpattr_start(struct stream *s, struct peer *peer, afi_t afi, case SAFI_MULTICAST: case SAFI_LABELED_UNICAST: case SAFI_EVPN: { - if (attr->mp_nexthop_len - == BGP_ATTR_NHLEN_IPV6_GLOBAL_AND_LL) { + if (attr->mp_nexthop_len == + BGP_ATTR_NHLEN_IPV6_GLOBAL_AND_LL) { stream_putc(s, BGP_ATTR_NHLEN_IPV6_GLOBAL_AND_LL); stream_put(s, &attr->mp_nexthop_global, @@ -4448,12 +4443,12 @@ static void bgp_packet_mpattr_tea(struct bgp *bgp, struct peer *peer, BGP_ATTR_FLAG_TRANS | BGP_ATTR_FLAG_OPTIONAL | BGP_ATTR_FLAG_EXTLEN); stream_putc(s, attrtype); - stream_putw(s, attrlenfield & 0xffff); + stream_putw(s, CHECK_FLAG(attrlenfield, 0xffff)); } else { /* 1-octet length field */ stream_putc(s, BGP_ATTR_FLAG_TRANS | BGP_ATTR_FLAG_OPTIONAL); stream_putc(s, attrtype); - stream_putc(s, attrlenfield & 0xff); + stream_putc(s, CHECK_FLAG(attrlenfield, 0xff)); } if (attrtype == BGP_ATTR_ENCAP) { @@ -4695,15 +4690,15 @@ bgp_size_t bgp_packet_attribute(struct bgp *bgp, struct peer *peer, && !peer_cap_enhe(peer, afi, safi)) { afi_t nh_afi = BGP_NEXTHOP_AFI_FROM_NHLEN(attr->mp_nexthop_len); - if (attr->flag & ATTR_FLAG_BIT(BGP_ATTR_NEXT_HOP)) { + if (CHECK_FLAG(attr->flag, ATTR_FLAG_BIT(BGP_ATTR_NEXT_HOP))) { stream_putc(s, BGP_ATTR_FLAG_TRANS); stream_putc(s, BGP_ATTR_NEXT_HOP); bpacket_attr_vec_arr_set_vec(vecarr, BGP_ATTR_VEC_NH, s, attr); stream_putc(s, 4); stream_put_ipv4(s, attr->nexthop.s_addr); - } else if (peer_cap_enhe(from, afi, safi) - || (nh_afi == AFI_IP6)) { + } else if (peer_cap_enhe(from, afi, safi) || + (nh_afi == AFI_IP6)) { /* * Likely this is the case when an IPv4 prefix was * received with Extended Next-hop capability in this @@ -4725,8 +4720,8 @@ bgp_size_t bgp_packet_attribute(struct bgp *bgp, struct peer *peer, } /* MED attribute. */ - if (attr->flag & ATTR_FLAG_BIT(BGP_ATTR_MULTI_EXIT_DISC) - || bgp->maxmed_active) { + if (CHECK_FLAG(attr->flag, ATTR_FLAG_BIT(BGP_ATTR_MULTI_EXIT_DISC)) || + bgp->maxmed_active) { stream_putc(s, BGP_ATTR_FLAG_OPTIONAL); stream_putc(s, BGP_ATTR_MULTI_EXIT_DISC); stream_putc(s, 4); @@ -4744,14 +4739,14 @@ bgp_size_t bgp_packet_attribute(struct bgp *bgp, struct peer *peer, } /* Atomic aggregate. */ - if (attr->flag & ATTR_FLAG_BIT(BGP_ATTR_ATOMIC_AGGREGATE)) { + if (CHECK_FLAG(attr->flag, ATTR_FLAG_BIT(BGP_ATTR_ATOMIC_AGGREGATE))) { stream_putc(s, BGP_ATTR_FLAG_TRANS); stream_putc(s, BGP_ATTR_ATOMIC_AGGREGATE); stream_putc(s, 0); } /* Aggregator. */ - if (attr->flag & ATTR_FLAG_BIT(BGP_ATTR_AGGREGATOR)) { + if (CHECK_FLAG(attr->flag, ATTR_FLAG_BIT(BGP_ATTR_AGGREGATOR))) { /* Common to BGP_ATTR_AGGREGATOR, regardless of ASN size */ stream_putc(s, BGP_ATTR_FLAG_OPTIONAL | BGP_ATTR_FLAG_TRANS); stream_putc(s, BGP_ATTR_AGGREGATOR); @@ -4782,8 +4777,8 @@ bgp_size_t bgp_packet_attribute(struct bgp *bgp, struct peer *peer, } /* Community attribute. */ - if (CHECK_FLAG(peer->af_flags[afi][safi], PEER_FLAG_SEND_COMMUNITY) - && (attr->flag & ATTR_FLAG_BIT(BGP_ATTR_COMMUNITIES))) { + if (CHECK_FLAG(peer->af_flags[afi][safi], PEER_FLAG_SEND_COMMUNITY) && + CHECK_FLAG(attr->flag, ATTR_FLAG_BIT(BGP_ATTR_COMMUNITIES))) { struct community *comm = NULL; comm = bgp_attr_get_community(attr); @@ -4807,8 +4802,8 @@ bgp_size_t bgp_packet_attribute(struct bgp *bgp, struct peer *peer, * Large Community attribute. */ if (CHECK_FLAG(peer->af_flags[afi][safi], - PEER_FLAG_SEND_LARGE_COMMUNITY) - && (attr->flag & ATTR_FLAG_BIT(BGP_ATTR_LARGE_COMMUNITIES))) { + PEER_FLAG_SEND_LARGE_COMMUNITY) && + CHECK_FLAG(attr->flag, ATTR_FLAG_BIT(BGP_ATTR_LARGE_COMMUNITIES))) { if (lcom_length(bgp_attr_get_lcommunity(attr)) > 255) { stream_putc(s, BGP_ATTR_FLAG_OPTIONAL | BGP_ATTR_FLAG_TRANS @@ -4838,7 +4833,8 @@ bgp_size_t bgp_packet_attribute(struct bgp *bgp, struct peer *peer, stream_putc(s, BGP_ATTR_ORIGINATOR_ID); stream_putc(s, 4); - if (attr->flag & ATTR_FLAG_BIT(BGP_ATTR_ORIGINATOR_ID)) + if (CHECK_FLAG(attr->flag, + ATTR_FLAG_BIT(BGP_ATTR_ORIGINATOR_ID))) stream_put_in_addr(s, &attr->originator_id); else stream_put_in_addr(s, &from->remote_id); @@ -4851,7 +4847,7 @@ bgp_size_t bgp_packet_attribute(struct bgp *bgp, struct peer *peer, stream_putc(s, cluster->length + 4); /* If this peer configuration's parent BGP has * cluster_id. */ - if (bgp->config & BGP_CONFIG_CLUSTER_ID) + if (CHECK_FLAG(bgp->config, BGP_CONFIG_CLUSTER_ID)) stream_put_in_addr(s, &bgp->cluster_id); else stream_put_in_addr(s, &bgp->router_id); @@ -4860,7 +4856,7 @@ bgp_size_t bgp_packet_attribute(struct bgp *bgp, struct peer *peer, stream_putc(s, 4); /* If this peer configuration's parent BGP has * cluster_id. */ - if (bgp->config & BGP_CONFIG_CLUSTER_ID) + if (CHECK_FLAG(bgp->config, BGP_CONFIG_CLUSTER_ID)) stream_put_in_addr(s, &bgp->cluster_id); else stream_put_in_addr(s, &bgp->router_id); @@ -5028,7 +5024,7 @@ bgp_size_t bgp_packet_attribute(struct bgp *bgp, struct peer *peer, } /* PMSI Tunnel */ - if (attr->flag & ATTR_FLAG_BIT(BGP_ATTR_PMSI_TUNNEL)) { + if (CHECK_FLAG(attr->flag, ATTR_FLAG_BIT(BGP_ATTR_PMSI_TUNNEL))) { stream_putc(s, BGP_ATTR_FLAG_OPTIONAL | BGP_ATTR_FLAG_TRANS); stream_putc(s, BGP_ATTR_PMSI_TUNNEL); stream_putc(s, 9); // Length @@ -5041,7 +5037,7 @@ bgp_size_t bgp_packet_attribute(struct bgp *bgp, struct peer *peer, } /* OTC */ - if (attr->flag & ATTR_FLAG_BIT(BGP_ATTR_OTC)) { + if (CHECK_FLAG(attr->flag, ATTR_FLAG_BIT(BGP_ATTR_OTC))) { stream_putc(s, BGP_ATTR_FLAG_OPTIONAL | BGP_ATTR_FLAG_TRANS); stream_putc(s, BGP_ATTR_OTC); stream_putc(s, 4); @@ -5049,7 +5045,7 @@ bgp_size_t bgp_packet_attribute(struct bgp *bgp, struct peer *peer, } /* AIGP */ - if (bpi && attr->flag & ATTR_FLAG_BIT(BGP_ATTR_AIGP) && + if (bpi && CHECK_FLAG(attr->flag, ATTR_FLAG_BIT(BGP_ATTR_AIGP)) && (CHECK_FLAG(peer->flags, PEER_FLAG_AIGP) || peer->sub_sort == BGP_PEER_EBGP_OAD || peer->sort != BGP_PEER_EBGP)) { @@ -5192,7 +5188,7 @@ void bgp_dump_routes_attr(struct stream *s, struct bgp_path_info *bpi, } /* MED attribute. */ - if (attr->flag & ATTR_FLAG_BIT(BGP_ATTR_MULTI_EXIT_DISC)) { + if (CHECK_FLAG(attr->flag, ATTR_FLAG_BIT(BGP_ATTR_MULTI_EXIT_DISC))) { stream_putc(s, BGP_ATTR_FLAG_OPTIONAL); stream_putc(s, BGP_ATTR_MULTI_EXIT_DISC); stream_putc(s, 4); @@ -5200,7 +5196,7 @@ void bgp_dump_routes_attr(struct stream *s, struct bgp_path_info *bpi, } /* Local preference. */ - if (attr->flag & ATTR_FLAG_BIT(BGP_ATTR_LOCAL_PREF)) { + if (CHECK_FLAG(attr->flag, ATTR_FLAG_BIT(BGP_ATTR_LOCAL_PREF))) { stream_putc(s, BGP_ATTR_FLAG_TRANS); stream_putc(s, BGP_ATTR_LOCAL_PREF); stream_putc(s, 4); @@ -5208,14 +5204,14 @@ void bgp_dump_routes_attr(struct stream *s, struct bgp_path_info *bpi, } /* Atomic aggregate. */ - if (attr->flag & ATTR_FLAG_BIT(BGP_ATTR_ATOMIC_AGGREGATE)) { + if (CHECK_FLAG(attr->flag, ATTR_FLAG_BIT(BGP_ATTR_ATOMIC_AGGREGATE))) { stream_putc(s, BGP_ATTR_FLAG_TRANS); stream_putc(s, BGP_ATTR_ATOMIC_AGGREGATE); stream_putc(s, 0); } /* Aggregator. */ - if (attr->flag & ATTR_FLAG_BIT(BGP_ATTR_AGGREGATOR)) { + if (CHECK_FLAG(attr->flag, ATTR_FLAG_BIT(BGP_ATTR_AGGREGATOR))) { stream_putc(s, BGP_ATTR_FLAG_OPTIONAL | BGP_ATTR_FLAG_TRANS); stream_putc(s, BGP_ATTR_AGGREGATOR); stream_putc(s, 8); @@ -5224,7 +5220,7 @@ void bgp_dump_routes_attr(struct stream *s, struct bgp_path_info *bpi, } /* Community attribute. */ - if (attr->flag & ATTR_FLAG_BIT(BGP_ATTR_COMMUNITIES)) { + if (CHECK_FLAG(attr->flag, ATTR_FLAG_BIT(BGP_ATTR_COMMUNITIES))) { struct community *comm = NULL; comm = bgp_attr_get_community(attr); @@ -5235,9 +5231,8 @@ void bgp_dump_routes_attr(struct stream *s, struct bgp_path_info *bpi, stream_putc(s, BGP_ATTR_COMMUNITIES); stream_putw(s, comm->size * 4); } else { - stream_putc(s, - BGP_ATTR_FLAG_OPTIONAL - | BGP_ATTR_FLAG_TRANS); + stream_putc(s, BGP_ATTR_FLAG_OPTIONAL | + BGP_ATTR_FLAG_TRANS); stream_putc(s, BGP_ATTR_COMMUNITIES); stream_putc(s, comm->size * 4); } @@ -5245,7 +5240,7 @@ void bgp_dump_routes_attr(struct stream *s, struct bgp_path_info *bpi, } /* Large Community attribute. */ - if (attr->flag & ATTR_FLAG_BIT(BGP_ATTR_LARGE_COMMUNITIES)) { + if (CHECK_FLAG(attr->flag, ATTR_FLAG_BIT(BGP_ATTR_LARGE_COMMUNITIES))) { if (lcom_length(bgp_attr_get_lcommunity(attr)) > 255) { stream_putc(s, BGP_ATTR_FLAG_OPTIONAL | BGP_ATTR_FLAG_TRANS @@ -5254,9 +5249,8 @@ void bgp_dump_routes_attr(struct stream *s, struct bgp_path_info *bpi, stream_putw(s, lcom_length(bgp_attr_get_lcommunity(attr))); } else { - stream_putc(s, - BGP_ATTR_FLAG_OPTIONAL - | BGP_ATTR_FLAG_TRANS); + stream_putc(s, BGP_ATTR_FLAG_OPTIONAL | + BGP_ATTR_FLAG_TRANS); stream_putc(s, BGP_ATTR_LARGE_COMMUNITIES); stream_putc(s, lcom_length(bgp_attr_get_lcommunity(attr))); @@ -5300,11 +5294,10 @@ void bgp_dump_routes_attr(struct stream *s, struct bgp_path_info *bpi, } /* Prefix SID */ - if (attr->flag & ATTR_FLAG_BIT(BGP_ATTR_PREFIX_SID)) { + if (CHECK_FLAG(attr->flag, ATTR_FLAG_BIT(BGP_ATTR_PREFIX_SID))) { if (attr->label_index != BGP_INVALID_LABEL_INDEX) { - stream_putc(s, - BGP_ATTR_FLAG_OPTIONAL - | BGP_ATTR_FLAG_TRANS); + stream_putc(s, BGP_ATTR_FLAG_OPTIONAL | + BGP_ATTR_FLAG_TRANS); stream_putc(s, BGP_ATTR_PREFIX_SID); stream_putc(s, 10); stream_putc(s, BGP_PREFIX_SID_LABEL_INDEX); @@ -5316,7 +5309,7 @@ void bgp_dump_routes_attr(struct stream *s, struct bgp_path_info *bpi, } /* OTC */ - if (attr->flag & ATTR_FLAG_BIT(BGP_ATTR_OTC)) { + if (CHECK_FLAG(attr->flag, ATTR_FLAG_BIT(BGP_ATTR_OTC))) { stream_putc(s, BGP_ATTR_FLAG_OPTIONAL | BGP_ATTR_FLAG_TRANS); stream_putc(s, BGP_ATTR_OTC); stream_putc(s, 4); @@ -5324,7 +5317,7 @@ void bgp_dump_routes_attr(struct stream *s, struct bgp_path_info *bpi, } /* AIGP */ - if (attr->flag & ATTR_FLAG_BIT(BGP_ATTR_AIGP)) { + if (CHECK_FLAG(attr->flag, ATTR_FLAG_BIT(BGP_ATTR_AIGP))) { /* At the moment only AIGP Metric TLV exists for AIGP * attribute. If more comes in, do not forget to update * attr_len variable to include new ones. diff --git a/bgpd/bgp_attr.h b/bgpd/bgp_attr.h index 213de45c55..a8ca8c1fa6 100644 --- a/bgpd/bgp_attr.h +++ b/bgpd/bgp_attr.h @@ -353,7 +353,7 @@ struct transit { __builtin_choose_expr((X) >= 1 && (X) <= 64, 1ULL << ((X)-1), (void)0) #define BGP_CLUSTER_LIST_LENGTH(attr) \ - (((attr)->flag & ATTR_FLAG_BIT(BGP_ATTR_CLUSTER_LIST)) \ + (CHECK_FLAG((attr)->flag, ATTR_FLAG_BIT(BGP_ATTR_CLUSTER_LIST)) \ ? bgp_attr_get_cluster((attr))->length \ : 0) diff --git a/bgpd/bgp_attr_evpn.c b/bgpd/bgp_attr_evpn.c index fc7548d9bf..a3ffe61eb8 100644 --- a/bgpd/bgp_attr_evpn.c +++ b/bgpd/bgp_attr_evpn.c @@ -218,7 +218,8 @@ uint32_t bgp_attr_mac_mobility_seqnum(struct attr *attr) continue; flags = *pnt++; - if (flags & ECOMMUNITY_EVPN_SUBTYPE_MACMOBILITY_FLAG_STICKY) + if (CHECK_FLAG(flags, + ECOMMUNITY_EVPN_SUBTYPE_MACMOBILITY_FLAG_STICKY)) SET_FLAG(attr->evpn_flags, ATTR_EVPN_FLAG_STICKY); else UNSET_FLAG(attr->evpn_flags, ATTR_EVPN_FLAG_STICKY); @@ -258,11 +259,12 @@ void bgp_attr_evpn_na_flag(struct attr *attr, bool *proxy) sub_type == ECOMMUNITY_EVPN_SUBTYPE_ND) { val = *pnt++; - if (val & ECOMMUNITY_EVPN_SUBTYPE_ND_ROUTER_FLAG) + if (CHECK_FLAG(val, + ECOMMUNITY_EVPN_SUBTYPE_ND_ROUTER_FLAG)) SET_FLAG(attr->evpn_flags, ATTR_EVPN_FLAG_ROUTER); - if (val & ECOMMUNITY_EVPN_SUBTYPE_PROXY_FLAG) + if (CHECK_FLAG(val, ECOMMUNITY_EVPN_SUBTYPE_PROXY_FLAG)) *proxy = true; break; diff --git a/bgpd/bgp_bmp.c b/bgpd/bgp_bmp.c index 675e4765e4..556738a606 100644 --- a/bgpd/bgp_bmp.c +++ b/bgpd/bgp_bmp.c @@ -2543,9 +2543,9 @@ DEFPY(bmp_monitor_cfg, bmp_monitor_cmd, prev = bt->afimon[afi][safi]; if (no) - bt->afimon[afi][safi] &= ~flag; + UNSET_FLAG(bt->afimon[afi][safi], flag); else - bt->afimon[afi][safi] |= flag; + SET_FLAG(bt->afimon[afi][safi], flag); if (prev == bt->afimon[afi][safi]) return CMD_SUCCESS; @@ -2743,7 +2743,7 @@ DEFPY(show_bmp, } out = ttable_dump(tt, "\n"); vty_out(vty, "%s", out); - XFREE(MTYPE_TMP, out); + XFREE(MTYPE_TMP_TTABLE, out); ttable_del(tt); vty_out(vty, "\n %zu connected clients:\n", @@ -2770,7 +2770,7 @@ DEFPY(show_bmp, } out = ttable_dump(tt, "\n"); vty_out(vty, "%s", out); - XFREE(MTYPE_TMP, out); + XFREE(MTYPE_TMP_TTABLE, out); ttable_del(tt); vty_out(vty, "\n"); } diff --git a/bgpd/bgp_btoa.c b/bgpd/bgp_btoa.c index 1d5034efd2..32823cc376 100644 --- a/bgpd/bgp_btoa.c +++ b/bgpd/bgp_btoa.c @@ -69,7 +69,7 @@ static void attr_parse(struct stream *s, uint16_t len) flag = stream_getc(s); type = stream_getc(s); - if (flag & BGP_ATTR_FLAG_EXTLEN) + if (CHECK_FLAG(flag, BGP_ATTR_FLAG_EXTLEN)) length = stream_getw(s); else length = stream_getc(s); diff --git a/bgpd/bgp_clist.c b/bgpd/bgp_clist.c index 153cbd6e50..ad154e638b 100644 --- a/bgpd/bgp_clist.c +++ b/bgpd/bgp_clist.c @@ -496,8 +496,8 @@ static char *community_str_get(struct community *com, int i) break; default: str = XSTRDUP(MTYPE_COMMUNITY_STR, "65536:65535"); - as = (comval >> 16) & 0xFFFF; - val = comval & 0xFFFF; + as = CHECK_FLAG((comval >> 16), 0xFFFF); + val = CHECK_FLAG(comval, 0xFFFF); snprintf(str, strlen(str), "%u:%d", as, val); break; } diff --git a/bgpd/bgp_community.c b/bgpd/bgp_community.c index 8e4c430555..602c1437af 100644 --- a/bgpd/bgp_community.c +++ b/bgpd/bgp_community.c @@ -416,13 +416,12 @@ static void set_community_string(struct community *com, bool make_json, } break; default: - as = (comval >> 16) & 0xFFFF; - val = comval & 0xFFFF; + as = CHECK_FLAG((comval >> 16), 0xFFFF); + val = CHECK_FLAG(comval, 0xFFFF); char buf[32]; snprintf(buf, sizeof(buf), "%u:%d", as, val); const char *com2alias = - translate_alias ? bgp_community2alias(buf) - : buf; + translate_alias ? bgp_community2alias(buf) : buf; strlcat(str, com2alias, len); if (make_json) { diff --git a/bgpd/bgp_debug.c b/bgpd/bgp_debug.c index 6228432bd2..97c3e5740f 100644 --- a/bgpd/bgp_debug.c +++ b/bgpd/bgp_debug.c @@ -2558,7 +2558,7 @@ static int bgp_debug_per_prefix(const struct prefix *p, struct bgp_debug_filter *filter; struct listnode *node, *nnode; - if (term_bgp_debug_type & BGP_DEBUG_TYPE) { + if (CHECK_FLAG(term_bgp_debug_type, BGP_DEBUG_TYPE)) { /* We are debugging all prefixes so return true */ if (!per_prefix_list || list_isempty(per_prefix_list)) return 1; @@ -2591,7 +2591,7 @@ static bool bgp_debug_per_peer(char *host, const struct prefix *p, struct bgp_debug_filter *filter; struct listnode *node, *nnode; - if (term_bgp_debug_type & BGP_DEBUG_TYPE) { + if (CHECK_FLAG(term_bgp_debug_type, BGP_DEBUG_TYPE)) { /* We are debugging all peers so return true */ if (!per_peer_list || list_isempty(per_peer_list)) return true; diff --git a/bgpd/bgp_ecommunity.c b/bgpd/bgp_ecommunity.c index 1beb0307d2..547dcdf7f3 100644 --- a/bgpd/bgp_ecommunity.c +++ b/bgpd/bgp_ecommunity.c @@ -515,7 +515,7 @@ static int ecommunity_encode_internal(uint8_t type, uint8_t sub_type, /* Fill in the values. */ eval->val[0] = type; if (!trans) - eval->val[0] |= ECOMMUNITY_FLAG_NON_TRANSITIVE; + SET_FLAG(eval->val[0], ECOMMUNITY_FLAG_NON_TRANSITIVE); eval->val[1] = sub_type; if (type == ECOMMUNITY_ENCODE_AS) { encode_route_target_as(as, val, eval, trans); @@ -1293,11 +1293,12 @@ char *ecommunity_ecom2str(struct ecommunity *ecom, int format, int filter) == ECOMMUNITY_EVPN_SUBTYPE_ESI_LABEL) { uint8_t flags = *++pnt; - snprintf(encbuf, - sizeof(encbuf), "ESI-label-Rt:%s", - (flags & - ECOMMUNITY_EVPN_SUBTYPE_ESI_SA_FLAG) ? - "SA":"AA"); + snprintf(encbuf, sizeof(encbuf), + "ESI-label-Rt:%s", + CHECK_FLAG(flags, + ECOMMUNITY_EVPN_SUBTYPE_ESI_SA_FLAG) + ? "SA" + : "AA"); } else if (*pnt == ECOMMUNITY_EVPN_SUBTYPE_DF_ELECTION) { uint8_t alg; @@ -1337,38 +1338,37 @@ char *ecommunity_ecom2str(struct ecommunity *ecom, int format, int filter) char buf[ECOMMUNITY_STRLEN]; memset(buf, 0, sizeof(buf)); - ecommunity_rt_soo_str_internal(buf, sizeof(buf), - (const uint8_t *)pnt, - type & - ~ECOMMUNITY_ENCODE_TRANS_EXP, - ECOMMUNITY_ROUTE_TARGET, - format, - ecom->unit_size); + ecommunity_rt_soo_str_internal( + buf, sizeof(buf), (const uint8_t *)pnt, + CHECK_FLAG(type, + ~ECOMMUNITY_ENCODE_TRANS_EXP), + ECOMMUNITY_ROUTE_TARGET, format, + ecom->unit_size); snprintf(encbuf, sizeof(encbuf), "%s", buf); } else if (sub_type == ECOMMUNITY_FLOWSPEC_REDIRECT_IPV6) { char buf[64]; memset(buf, 0, sizeof(buf)); - ecommunity_rt_soo_str_internal(buf, sizeof(buf), - (const uint8_t *)pnt, - type & - ~ECOMMUNITY_ENCODE_TRANS_EXP, - ECOMMUNITY_ROUTE_TARGET, - ECOMMUNITY_FORMAT_DISPLAY, - ecom->unit_size); + ecommunity_rt_soo_str_internal( + buf, sizeof(buf), (const uint8_t *)pnt, + CHECK_FLAG(type, + ~ECOMMUNITY_ENCODE_TRANS_EXP), + ECOMMUNITY_ROUTE_TARGET, + ECOMMUNITY_FORMAT_DISPLAY, + ecom->unit_size); snprintf(encbuf, sizeof(encbuf), "FS:redirect VRF %s", buf); } else if (sub_type == ECOMMUNITY_REDIRECT_VRF) { char buf[16]; memset(buf, 0, sizeof(buf)); - ecommunity_rt_soo_str(buf, sizeof(buf), - (const uint8_t *)pnt, - type & - ~ECOMMUNITY_ENCODE_TRANS_EXP, - ECOMMUNITY_ROUTE_TARGET, - ECOMMUNITY_FORMAT_DISPLAY); + ecommunity_rt_soo_str( + buf, sizeof(buf), (const uint8_t *)pnt, + CHECK_FLAG(type, + ~ECOMMUNITY_ENCODE_TRANS_EXP), + ECOMMUNITY_ROUTE_TARGET, + ECOMMUNITY_FORMAT_DISPLAY); snprintf(encbuf, sizeof(encbuf), "FS:redirect VRF %s", buf); snprintf(encbuf, sizeof(encbuf), @@ -1640,12 +1640,13 @@ int ecommunity_fill_pbr_action(struct ecommunity_val *ecom_eval, } else if (ecom_eval->val[1] == ECOMMUNITY_TRAFFIC_ACTION) { api->action = ACTION_TRAFFIC_ACTION; /* else distribute code is set by default */ - if (ecom_eval->val[5] & (1 << FLOWSPEC_TRAFFIC_ACTION_TERMINAL)) - api->u.za.filter |= TRAFFIC_ACTION_TERMINATE; + if (CHECK_FLAG(ecom_eval->val[5], + (1 << FLOWSPEC_TRAFFIC_ACTION_TERMINAL))) + SET_FLAG(api->u.za.filter, TRAFFIC_ACTION_TERMINATE); else - api->u.za.filter |= TRAFFIC_ACTION_DISTRIBUTE; + SET_FLAG(api->u.za.filter, TRAFFIC_ACTION_DISTRIBUTE); if (ecom_eval->val[5] == 1 << FLOWSPEC_TRAFFIC_ACTION_SAMPLE) - api->u.za.filter |= TRAFFIC_ACTION_SAMPLE; + SET_FLAG(api->u.za.filter, TRAFFIC_ACTION_SAMPLE); } else if (ecom_eval->val[1] == ECOMMUNITY_TRAFFIC_MARKING) { api->action = ACTION_MARKING; @@ -1940,7 +1941,7 @@ struct ecommunity *ecommunity_replace_linkbw(as_t as, struct ecommunity *ecom, return new; type = *eval; - if (type & ECOMMUNITY_FLAG_NON_TRANSITIVE) + if (CHECK_FLAG(type, ECOMMUNITY_FLAG_NON_TRANSITIVE)) return new; /* Transitive link-bandwidth exists, replace with the passed diff --git a/bgpd/bgp_ecommunity.h b/bgpd/bgp_ecommunity.h index 929e4e60be..67c16aeb9e 100644 --- a/bgpd/bgp_ecommunity.h +++ b/bgpd/bgp_ecommunity.h @@ -155,12 +155,12 @@ struct ecommunity_ip6 { /* Extended community value is eight octet. */ struct ecommunity_val { - char val[ECOMMUNITY_SIZE]; + uint8_t val[ECOMMUNITY_SIZE]; }; /* IPv6 Extended community value is eight octet. */ struct ecommunity_val_ipv6 { - char val[IPV6_ECOMMUNITY_SIZE]; + uint8_t val[IPV6_ECOMMUNITY_SIZE]; }; #define ecom_length_size(X, Y) ((X)->size * (Y)) diff --git a/bgpd/bgp_evpn.c b/bgpd/bgp_evpn.c index cb5c898315..4317eb9bab 100644 --- a/bgpd/bgp_evpn.c +++ b/bgpd/bgp_evpn.c @@ -117,7 +117,7 @@ int vni_list_cmp(void *p1, void *p2) static unsigned int vrf_import_rt_hash_key_make(const void *p) { const struct vrf_irt_node *irt = p; - const char *pnt = irt->rt.val; + const uint8_t *pnt = irt->rt.val; return jhash(pnt, 8, 0x5abc1234); } @@ -229,7 +229,7 @@ static int is_vrf_present_in_irt_vrfs(struct list *vrfs, struct bgp *bgp_vrf) static unsigned int import_rt_hash_key_make(const void *p) { const struct irt_node *irt = p; - const char *pnt = irt->rt.val; + const uint8_t *pnt = irt->rt.val; return jhash(pnt, 8, 0xdeadbeef); } @@ -621,7 +621,7 @@ static void form_auto_rt(struct bgp *bgp, vni_t vni, struct list *rtl, struct listnode *node; if (bgp->advertise_autort_rfc8365) - vni |= EVPN_AUTORT_VXLAN; + SET_FLAG(vni, EVPN_AUTORT_VXLAN); encode_route_target_as((bgp->as & 0xFFFF), vni, &eval, true); ecomadd = ecommunity_new(); @@ -1314,12 +1314,11 @@ enum zclient_send_status evpn_zebra_install(struct bgp *bgp, struct bgpevpn *vpn * flag set install the local entry as a router entry */ if (is_evpn_prefix_ipaddr_v6(p) && - (pi->attr->es_flags & - ATTR_ES_PEER_ROUTER)) + CHECK_FLAG(pi->attr->es_flags, ATTR_ES_PEER_ROUTER)) SET_FLAG(flags, ZEBRA_MACIP_TYPE_ROUTER_FLAG); - if (!(pi->attr->es_flags & ATTR_ES_PEER_ACTIVE)) + if (!CHECK_FLAG(pi->attr->es_flags, ATTR_ES_PEER_ACTIVE)) SET_FLAG(flags, ZEBRA_MACIP_TYPE_PROXY_ADVERT); } @@ -1897,42 +1896,44 @@ static void update_evpn_route_entry_sync_info(struct bgp *bgp, mac); attr->mm_sync_seqnum = max_sync_seq; if (active_on_peer) - attr->es_flags |= ATTR_ES_PEER_ACTIVE; + SET_FLAG(attr->es_flags, ATTR_ES_PEER_ACTIVE); else - attr->es_flags &= ~ATTR_ES_PEER_ACTIVE; + UNSET_FLAG(attr->es_flags, ATTR_ES_PEER_ACTIVE); if (proxy_from_peer) - attr->es_flags |= ATTR_ES_PEER_PROXY; + SET_FLAG(attr->es_flags, ATTR_ES_PEER_PROXY); else - attr->es_flags &= ~ATTR_ES_PEER_PROXY; + UNSET_FLAG(attr->es_flags, ATTR_ES_PEER_PROXY); if (peer_router) - attr->es_flags |= ATTR_ES_PEER_ROUTER; + SET_FLAG(attr->es_flags, ATTR_ES_PEER_ROUTER); else - attr->es_flags &= ~ATTR_ES_PEER_ROUTER; + UNSET_FLAG(attr->es_flags, ATTR_ES_PEER_ROUTER); if (BGP_DEBUG(evpn_mh, EVPN_MH_RT)) { char esi_buf[ESI_STR_LEN]; - zlog_debug( - "setup sync info for %pFX es %s max_seq %d %s%s%s", - evp, - esi_to_str(esi, esi_buf, - sizeof(esi_buf)), - max_sync_seq, - (attr->es_flags & ATTR_ES_PEER_ACTIVE) - ? "peer-active " - : "", - (attr->es_flags & ATTR_ES_PEER_PROXY) - ? "peer-proxy " - : "", - (attr->es_flags & ATTR_ES_PEER_ROUTER) - ? "peer-router " - : ""); + zlog_debug("setup sync info for %pFX es %s max_seq %d %s%s%s", + evp, + esi_to_str(esi, esi_buf, + sizeof(esi_buf)), + max_sync_seq, + CHECK_FLAG(attr->es_flags, + ATTR_ES_PEER_ACTIVE) + ? "peer-active " + : "", + CHECK_FLAG(attr->es_flags, + ATTR_ES_PEER_PROXY) + ? "peer-proxy " + : "", + CHECK_FLAG(attr->es_flags, + ATTR_ES_PEER_ROUTER) + ? "peer-router " + : ""); } } } else { attr->mm_sync_seqnum = 0; - attr->es_flags &= ~ATTR_ES_PEER_ACTIVE; - attr->es_flags &= ~ATTR_ES_PEER_PROXY; + UNSET_FLAG(attr->es_flags, ATTR_ES_PEER_ACTIVE); + UNSET_FLAG(attr->es_flags, ATTR_ES_PEER_PROXY); } } @@ -2124,22 +2125,18 @@ static void evpn_zebra_reinstall_best_route(struct bgp *bgp, } } - if (curr_select && curr_select->type == ZEBRA_ROUTE_BGP - && (curr_select->sub_type == BGP_ROUTE_IMPORTED || - bgp_evpn_attr_is_sync(curr_select->attr))) - if (curr_select && curr_select->type == ZEBRA_ROUTE_BGP && - (curr_select->sub_type == BGP_ROUTE_IMPORTED || - bgp_evpn_attr_is_sync(curr_select->attr))) { - if (CHECK_FLAG(bgp->flags, BGP_FLAG_DELETE_IN_PROGRESS)) - evpn_zebra_install(bgp, vpn, - (const struct prefix_evpn *) - bgp_dest_get_prefix( - dest), - curr_select); - else - bgp_zebra_route_install(dest, curr_select, bgp, - true, vpn, false); - } + if (curr_select && curr_select->type == ZEBRA_ROUTE_BGP && + (curr_select->sub_type == BGP_ROUTE_IMPORTED || + bgp_evpn_attr_is_sync(curr_select->attr))) { + if (CHECK_FLAG(bgp->flags, BGP_FLAG_DELETE_IN_PROGRESS)) + evpn_zebra_install(bgp, vpn, + (const struct prefix_evpn *) + bgp_dest_get_prefix(dest), + curr_select); + else + bgp_zebra_route_install(dest, curr_select, bgp, true, + vpn, false); + } } /* @@ -2221,16 +2218,16 @@ static int update_evpn_route(struct bgp *bgp, struct bgpevpn *vpn, if (CHECK_FLAG(flags, ZEBRA_MACIP_TYPE_ROUTER_FLAG)) SET_FLAG(attr.evpn_flags, ATTR_EVPN_FLAG_ROUTER); if (CHECK_FLAG(flags, ZEBRA_MACIP_TYPE_PROXY_ADVERT)) - attr.es_flags |= ATTR_ES_PROXY_ADVERT; + SET_FLAG(attr.es_flags, ATTR_ES_PROXY_ADVERT); if (esi && bgp_evpn_is_esi_valid(esi)) { memcpy(&attr.esi, esi, sizeof(esi_t)); - attr.es_flags |= ATTR_ES_IS_LOCAL; + SET_FLAG(attr.es_flags, ATTR_ES_IS_LOCAL); } /* PMSI is only needed for type-3 routes */ if (p->prefix.route_type == BGP_EVPN_IMET_ROUTE) { - attr.flag |= ATTR_FLAG_BIT(BGP_ATTR_PMSI_TUNNEL); + SET_FLAG(attr.flag, ATTR_FLAG_BIT(BGP_ATTR_PMSI_TUNNEL)); bgp_attr_set_pmsi_tnl_type(&attr, PMSI_TNLTYPE_INGR_REPL); } @@ -2262,8 +2259,12 @@ static int update_evpn_route(struct bgp *bgp, struct bgpevpn *vpn, * IPv4 or IPv6 global addresses and we're advertising L3VNI with * these routes. */ - add_l3_ecomm = bgp_evpn_route_add_l3_ecomm_ok( - vpn, p, (attr.es_flags & ATTR_ES_IS_LOCAL) ? &attr.esi : NULL); + add_l3_ecomm = + bgp_evpn_route_add_l3_ecomm_ok(vpn, p, + CHECK_FLAG(attr.es_flags, + ATTR_ES_IS_LOCAL) + ? &attr.esi + : NULL); if (bgp->evpn_info) macvrf_soo = bgp->evpn_info->soo; @@ -2535,9 +2536,12 @@ void bgp_evpn_update_type2_route_entry(struct bgp *bgp, struct bgpevpn *vpn, /* Add L3 VNI RTs and RMAC for non IPv6 link-local if * using L3 VNI for type-2 routes also. */ - add_l3_ecomm = bgp_evpn_route_add_l3_ecomm_ok( - vpn, &evp, - (attr.es_flags & ATTR_ES_IS_LOCAL) ? &attr.esi : NULL); + add_l3_ecomm = + bgp_evpn_route_add_l3_ecomm_ok(vpn, &evp, + CHECK_FLAG(attr.es_flags, + ATTR_ES_IS_LOCAL) + ? &attr.esi + : NULL); if (bgp->evpn_info) macvrf_soo = bgp->evpn_info->soo; @@ -3095,23 +3099,23 @@ static int install_evpn_route_entry_in_vrf(struct bgp *bgp_vrf, attr.mp_nexthop_len = IPV6_MAX_BYTELEN; } else { attr.nexthop = bre->gw_ip.ipaddr_v4; - attr.flag |= ATTR_FLAG_BIT(BGP_ATTR_NEXT_HOP); + SET_FLAG(attr.flag, ATTR_FLAG_BIT(BGP_ATTR_NEXT_HOP)); } } else { if (afi == AFI_IP6) evpn_convert_nexthop_to_ipv6(&attr); else { attr.nexthop = attr.mp_nexthop_global_in; - attr.flag |= ATTR_FLAG_BIT(BGP_ATTR_NEXT_HOP); + SET_FLAG(attr.flag, ATTR_FLAG_BIT(BGP_ATTR_NEXT_HOP)); } } bgp_evpn_es_vrf_use_nhg(bgp_vrf, &parent_pi->attr->esi, &use_l3nhg, &is_l3nhg_active, NULL); if (use_l3nhg) - attr.es_flags |= ATTR_ES_L3_NHG_USE; + SET_FLAG(attr.es_flags, ATTR_ES_L3_NHG_USE); if (is_l3nhg_active) - attr.es_flags |= ATTR_ES_L3_NHG_ACTIVE; + SET_FLAG(attr.es_flags, ATTR_ES_L3_NHG_ACTIVE); /* Check if route entry is already present. */ for (pi = bgp_dest_get_bgp_path_info(dest); pi; pi = pi->next) @@ -3275,8 +3279,7 @@ static int install_evpn_route_entry_in_vni_common( if (BGP_DEBUG(evpn_mh, EVPN_MH_RT)) zlog_debug("VNI %d path %pFX chg to %s es", vpn->vni, &pi->net->rn->p, - new_local_es ? "local" - : "non-local"); + new_local_es ? "local" : "non-local"); bgp_path_info_set_flag(dest, pi, BGP_PATH_ATTR_CHANGED); } @@ -3639,7 +3642,7 @@ static int is_route_matching_for_vrf(struct bgp *bgp_vrf, assert(attr); /* Route should have valid RT to be even considered. */ - if (!(attr->flag & ATTR_FLAG_BIT(BGP_ATTR_EXT_COMMUNITIES))) + if (!CHECK_FLAG(attr->flag, ATTR_FLAG_BIT(BGP_ATTR_EXT_COMMUNITIES))) return 0; ecom = bgp_attr_get_ecommunity(attr); @@ -3706,7 +3709,7 @@ static int is_route_matching_for_vni(struct bgp *bgp, struct bgpevpn *vpn, assert(attr); /* Route should have valid RT to be even considered. */ - if (!(attr->flag & ATTR_FLAG_BIT(BGP_ATTR_EXT_COMMUNITIES))) + if (!CHECK_FLAG(attr->flag, ATTR_FLAG_BIT(BGP_ATTR_EXT_COMMUNITIES))) return 0; ecom = bgp_attr_get_ecommunity(attr); @@ -4208,7 +4211,7 @@ static int bgp_evpn_install_uninstall_table(struct bgp *bgp, afi_t afi, return 0; /* If we don't have Route Target, nothing much to do. */ - if (!(attr->flag & ATTR_FLAG_BIT(BGP_ATTR_EXT_COMMUNITIES))) + if (!CHECK_FLAG(attr->flag, ATTR_FLAG_BIT(BGP_ATTR_EXT_COMMUNITIES))) return 0; /* EAD prefix in the global table doesn't include the VTEP-IP so @@ -4738,9 +4741,9 @@ static int process_type2_route(struct peer *peer, afi_t afi, safi_t safi, STREAM_GET(&attr->esi, pkt, sizeof(esi_t)); if (bgp_evpn_is_esi_local_and_non_bypass(&attr->esi)) - attr->es_flags |= ATTR_ES_IS_LOCAL; + SET_FLAG(attr->es_flags, ATTR_ES_IS_LOCAL); else - attr->es_flags &= ~ATTR_ES_IS_LOCAL; + UNSET_FLAG(attr->es_flags, ATTR_ES_IS_LOCAL); } else { STREAM_FORWARD_GETP(pkt, sizeof(esi_t)); } @@ -4842,8 +4845,7 @@ static int process_type3_route(struct peer *peer, afi_t afi, safi_t safi, * Note: We just simply ignore the values as it is not clear if * doing anything else is better. */ - if (attr && - (attr->flag & ATTR_FLAG_BIT(BGP_ATTR_PMSI_TUNNEL))) { + if (attr && CHECK_FLAG(attr->flag, ATTR_FLAG_BIT(BGP_ATTR_PMSI_TUNNEL))) { enum pta_type pmsi_tnl_type = bgp_attr_get_pmsi_tnl_type(attr); if (pmsi_tnl_type != PMSI_TNLTYPE_INGR_REPL @@ -5445,7 +5447,7 @@ void evpn_rt_delete_auto(struct bgp *bgp, vni_t vni, struct list *rtl, struct ecommunity_val eval; if (bgp->advertise_autort_rfc8365) - vni |= EVPN_AUTORT_VXLAN; + SET_FLAG(vni, EVPN_AUTORT_VXLAN); encode_route_target_as((bgp->as & 0xFFFF), vni, &eval, true); @@ -7723,18 +7725,18 @@ static void bgp_evpn_remote_ip_process_nexthops(struct bgpevpn *vpn, * MAC/IP route or SVI or tenant vrf being added to EVI. * Set nexthop as valid only if it is already L3 reachable */ - if (resolve && bnc->flags & BGP_NEXTHOP_EVPN_INCOMPLETE) { - bnc->flags &= ~BGP_NEXTHOP_EVPN_INCOMPLETE; - bnc->flags |= BGP_NEXTHOP_VALID; - bnc->change_flags |= BGP_NEXTHOP_MACIP_CHANGED; + if (resolve && CHECK_FLAG(bnc->flags, BGP_NEXTHOP_EVPN_INCOMPLETE)) { + UNSET_FLAG(bnc->flags, BGP_NEXTHOP_EVPN_INCOMPLETE); + SET_FLAG(bnc->flags, BGP_NEXTHOP_VALID); + SET_FLAG(bnc->change_flags, BGP_NEXTHOP_MACIP_CHANGED); evaluate_paths(bnc); } /* MAC/IP route or SVI or tenant vrf being deleted from EVI */ - if (!resolve && bnc->flags & BGP_NEXTHOP_VALID) { - bnc->flags &= ~BGP_NEXTHOP_VALID; - bnc->flags |= BGP_NEXTHOP_EVPN_INCOMPLETE; - bnc->change_flags |= BGP_NEXTHOP_MACIP_CHANGED; + if (!resolve && CHECK_FLAG(bnc->flags, BGP_NEXTHOP_VALID)) { + UNSET_FLAG(bnc->flags, BGP_NEXTHOP_VALID); + SET_FLAG(bnc->flags, BGP_NEXTHOP_EVPN_INCOMPLETE); + SET_FLAG(bnc->change_flags, BGP_NEXTHOP_MACIP_CHANGED); evaluate_paths(bnc); } } diff --git a/bgpd/bgp_evpn_vty.c b/bgpd/bgp_evpn_vty.c index c28cdb4a65..0a5e6a773b 100644 --- a/bgpd/bgp_evpn_vty.c +++ b/bgpd/bgp_evpn_vty.c @@ -2561,7 +2561,7 @@ static void evpn_show_route_vni_multicast(struct vty *vty, struct bgp *bgp, /* Prefix and num paths displayed once per prefix. */ route_vty_out_detail_header(vty, bgp, dest, bgp_dest_get_prefix(dest), - NULL, afi, safi, json, false); + NULL, afi, safi, json, false, true); /* Display each path for this prefix. */ for (pi = bgp_dest_get_bgp_path_info(dest); pi; pi = pi->next) { @@ -2663,7 +2663,7 @@ static void evpn_show_route_vni_macip(struct vty *vty, struct bgp *bgp, /* Prefix and num paths displayed once per prefix. */ route_vty_out_detail_header(vty, bgp, dest, (struct prefix *)&p, NULL, - afi, safi, json, false); + afi, safi, json, false, true); evp = (const struct prefix_evpn *)bgp_dest_get_prefix(dest); @@ -2798,7 +2798,7 @@ static void evpn_show_route_rd_macip(struct vty *vty, struct bgp *bgp, /* Prefix and num paths displayed once per prefix. */ route_vty_out_detail_header(vty, bgp, dest, bgp_dest_get_prefix(dest), - prd, afi, safi, json, false); + prd, afi, safi, json, false, false); if (json) json_paths = json_object_new_array(); @@ -2905,9 +2905,10 @@ static void evpn_show_route_rd(struct vty *vty, struct bgp *bgp, } /* Prefix and num paths displayed once per prefix. */ - route_vty_out_detail_header( - vty, bgp, dest, bgp_dest_get_prefix(dest), prd, - afi, safi, json_prefix, false); + route_vty_out_detail_header(vty, bgp, dest, + bgp_dest_get_prefix(dest), + prd, afi, safi, json_prefix, + false, false); prefix_cnt++; } @@ -3042,9 +3043,10 @@ static void evpn_show_route_rd_all_macip(struct vty *vty, struct bgp *bgp, p->prefixlen); } else /* Prefix and num paths displayed once per prefix. */ - route_vty_out_detail_header( - vty, bgp, dest, p, (struct prefix_rd *)rd_destp, - AFI_L2VPN, SAFI_EVPN, json_prefix, false); + route_vty_out_detail_header(vty, bgp, dest, p, + (struct prefix_rd *)rd_destp, + AFI_L2VPN, SAFI_EVPN, + json_prefix, false, false); /* For EVPN, the prefix is displayed for each path (to * fit in with code that already exists). @@ -3197,11 +3199,14 @@ static void evpn_show_all_routes(struct vty *vty, struct bgp *bgp, int type, /* Prefix and num paths displayed once per prefix. */ if (detail) - route_vty_out_detail_header( - vty, bgp, dest, - bgp_dest_get_prefix(dest), - (struct prefix_rd *)rd_destp, AFI_L2VPN, - SAFI_EVPN, json_prefix, false); + route_vty_out_detail_header(vty, bgp, dest, + bgp_dest_get_prefix( + dest), + (struct prefix_rd *) + rd_destp, + AFI_L2VPN, SAFI_EVPN, + json_prefix, false, + false); /* For EVPN, the prefix is displayed for each path (to * fit in diff --git a/bgpd/bgp_fsm.c b/bgpd/bgp_fsm.c index 42ba54ab7b..cdd9b7ae4d 100644 --- a/bgpd/bgp_fsm.c +++ b/bgpd/bgp_fsm.c @@ -696,9 +696,8 @@ static void bgp_set_llgr_stale(struct peer *peer, afi_t afi, safi_t safi) attr = *pi->attr; bgp_attr_add_llgr_community(&attr); pi->attr = bgp_attr_intern(&attr); - bgp_recalculate_afi_safi_bestpaths( - peer->bgp, afi, safi); - + bgp_process(peer->bgp, rm, pi, afi, + safi); break; } } @@ -724,9 +723,7 @@ static void bgp_set_llgr_stale(struct peer *peer, afi_t afi, safi_t safi) attr = *pi->attr; bgp_attr_add_llgr_community(&attr); pi->attr = bgp_attr_intern(&attr); - bgp_recalculate_afi_safi_bestpaths(peer->bgp, - afi, safi); - + bgp_process(peer->bgp, dest, pi, afi, safi); break; } } @@ -1802,18 +1799,14 @@ bgp_connect_fail(struct peer_connection *connection) */ static void bgp_connect_in_progress_update_connection(struct peer *peer) { - if (bgp_getsockname(peer) < 0) { - if (!peer->su_remote && - !BGP_CONNECTION_SU_UNSPEC(peer->connection)) { - /* if connect initiated, then dest port and dest addresses are well known */ - peer->su_remote = sockunion_dup(&peer->connection->su); - if (sockunion_family(peer->su_remote) == AF_INET) - peer->su_remote->sin.sin_port = - htons(peer->port); - else if (sockunion_family(peer->su_remote) == AF_INET6) - peer->su_remote->sin6.sin6_port = - htons(peer->port); - } + bgp_updatesockname(peer); + if (!peer->su_remote && !BGP_CONNECTION_SU_UNSPEC(peer->connection)) { + /* if connect initiated, then dest port and dest addresses are well known */ + peer->su_remote = sockunion_dup(&peer->connection->su); + if (sockunion_family(peer->su_remote) == AF_INET) + peer->su_remote->sin.sin_port = htons(peer->port); + else if (sockunion_family(peer->su_remote) == AF_INET6) + peer->su_remote->sin6.sin6_port = htons(peer->port); } } diff --git a/bgpd/bgp_mplsvpn.c b/bgpd/bgp_mplsvpn.c index f7ca51e146..f5dbb4aa58 100644 --- a/bgpd/bgp_mplsvpn.c +++ b/bgpd/bgp_mplsvpn.c @@ -304,7 +304,7 @@ void vpn_leak_zebra_vrf_label_update(struct bgp *bgp, afi_t afi) if (vpn_leak_to_vpn_active(bgp, afi, NULL, false)) { ifp = if_get_vrf_loopback(bgp->vrf_id); - if (ifp && if_is_vrf(ifp) && if_is_up(ifp)) + if (ifp && if_is_up(ifp)) label = bgp->vpn_policy[afi].tovpn_label; } @@ -385,6 +385,18 @@ void vpn_leak_zebra_vrf_sid_update_per_af(struct bgp *bgp, afi_t afi) if (!vrf) return; + if (bgp->vpn_policy[afi].tovpn_sid_locator) { + ctx.block_len = + bgp->vpn_policy[afi].tovpn_sid_locator->block_bits_length; + ctx.node_len = + bgp->vpn_policy[afi].tovpn_sid_locator->node_bits_length; + ctx.function_len = + bgp->vpn_policy[afi] + .tovpn_sid_locator->function_bits_length; + ctx.argument_len = + bgp->vpn_policy[afi] + .tovpn_sid_locator->argument_bits_length; + } ctx.table = vrf->data.l.table_id; act = afi == AFI_IP ? ZEBRA_SEG6_LOCAL_ACTION_END_DT4 : ZEBRA_SEG6_LOCAL_ACTION_END_DT6; @@ -436,6 +448,12 @@ void vpn_leak_zebra_vrf_sid_update_per_vrf(struct bgp *bgp) if (!vrf) return; + if (bgp->tovpn_sid_locator) { + ctx.block_len = bgp->tovpn_sid_locator->block_bits_length; + ctx.node_len = bgp->tovpn_sid_locator->node_bits_length; + ctx.function_len = bgp->tovpn_sid_locator->function_bits_length; + ctx.argument_len = bgp->tovpn_sid_locator->argument_bits_length; + } ctx.table = vrf->data.l.table_id; act = ZEBRA_SEG6_LOCAL_ACTION_END_DT46; zclient_send_localsid(zclient, tovpn_sid, bgp->vrf_id, act, &ctx); @@ -474,6 +492,8 @@ void vpn_leak_zebra_vrf_sid_update(struct bgp *bgp, afi_t afi) void vpn_leak_zebra_vrf_sid_withdraw_per_af(struct bgp *bgp, afi_t afi) { int debug = BGP_DEBUG(vpn, VPN_LEAK_LABEL); + struct srv6_sid_ctx ctx = {}; + struct seg6local_context seg6localctx = {}; if (bgp->vrf_id == VRF_UNKNOWN) { if (debug) @@ -486,12 +506,30 @@ void vpn_leak_zebra_vrf_sid_withdraw_per_af(struct bgp *bgp, afi_t afi) zlog_debug("%s: deleting sid for vrf %s afi (id=%d)", __func__, bgp->name_pretty, bgp->vrf_id); + if (bgp->vpn_policy[afi].tovpn_sid_locator) { + seg6localctx.block_len = + bgp->vpn_policy[afi].tovpn_sid_locator->block_bits_length; + seg6localctx.node_len = + bgp->vpn_policy[afi].tovpn_sid_locator->node_bits_length; + seg6localctx.function_len = + bgp->vpn_policy[afi] + .tovpn_sid_locator->function_bits_length; + seg6localctx.argument_len = + bgp->vpn_policy[afi] + .tovpn_sid_locator->argument_bits_length; + } zclient_send_localsid(zclient, - bgp->vpn_policy[afi].tovpn_zebra_vrf_sid_last_sent, - bgp->vrf_id, ZEBRA_SEG6_LOCAL_ACTION_UNSPEC, NULL); + bgp->vpn_policy[afi].tovpn_zebra_vrf_sid_last_sent, + bgp->vrf_id, ZEBRA_SEG6_LOCAL_ACTION_UNSPEC, + &seg6localctx); XFREE(MTYPE_BGP_SRV6_SID, bgp->vpn_policy[afi].tovpn_zebra_vrf_sid_last_sent); bgp->vpn_policy[afi].tovpn_zebra_vrf_sid_last_sent = NULL; + + ctx.vrf_id = bgp->vrf_id; + ctx.behavior = afi == AFI_IP ? ZEBRA_SEG6_LOCAL_ACTION_END_DT4 + : ZEBRA_SEG6_LOCAL_ACTION_END_DT6; + bgp_zebra_release_srv6_sid(&ctx); } /* @@ -501,6 +539,8 @@ void vpn_leak_zebra_vrf_sid_withdraw_per_af(struct bgp *bgp, afi_t afi) void vpn_leak_zebra_vrf_sid_withdraw_per_vrf(struct bgp *bgp) { int debug = BGP_DEBUG(vpn, VPN_LEAK_LABEL); + struct srv6_sid_ctx ctx = {}; + struct seg6local_context seg6localctx = {}; if (bgp->vrf_id == VRF_UNKNOWN) { if (debug) @@ -514,11 +554,24 @@ void vpn_leak_zebra_vrf_sid_withdraw_per_vrf(struct bgp *bgp) zlog_debug("%s: deleting sid for vrf %s (id=%d)", __func__, bgp->name_pretty, bgp->vrf_id); + if (bgp->tovpn_sid_locator) { + seg6localctx.block_len = + bgp->tovpn_sid_locator->block_bits_length; + seg6localctx.node_len = bgp->tovpn_sid_locator->node_bits_length; + seg6localctx.function_len = + bgp->tovpn_sid_locator->function_bits_length; + seg6localctx.argument_len = + bgp->tovpn_sid_locator->argument_bits_length; + } zclient_send_localsid(zclient, bgp->tovpn_zebra_vrf_sid_last_sent, bgp->vrf_id, ZEBRA_SEG6_LOCAL_ACTION_UNSPEC, - NULL); + &seg6localctx); XFREE(MTYPE_BGP_SRV6_SID, bgp->tovpn_zebra_vrf_sid_last_sent); bgp->tovpn_zebra_vrf_sid_last_sent = NULL; + + ctx.vrf_id = bgp->vrf_id; + ctx.behavior = ZEBRA_SEG6_LOCAL_ACTION_END_DT46; + bgp_zebra_release_srv6_sid(&ctx); } /* @@ -595,8 +648,8 @@ int vpn_leak_label_callback( return 0; } -static void sid_register(struct bgp *bgp, const struct in6_addr *sid, - const char *locator_name) +void sid_register(struct bgp *bgp, const struct in6_addr *sid, + const char *locator_name) { struct bgp_srv6_function *func; func = XCALLOC(MTYPE_BGP_SRV6_FUNCTION, @@ -635,108 +688,97 @@ static bool sid_exist(struct bgp *bgp, const struct in6_addr *sid) return false; } -/* - * This function generates a new SID based on bgp->srv6_locator_chunks and - * index. The locator and generated SID are stored in arguments sid_locator - * and sid, respectively. +/** + * Return the SRv6 SID value obtained by composing the LOCATOR and FUNCTION. * - * if index != 0: try to allocate as index-mode - * else: try to allocate as auto-mode + * @param sid_value SRv6 SID value returned + * @param locator Parent locator of the SRv6 SID + * @param sid_func Function part of the SID + * @return True if success, False otherwise */ -static uint32_t alloc_new_sid(struct bgp *bgp, uint32_t index, - struct srv6_locator_chunk *sid_locator_chunk, - struct in6_addr *sid) +static bool srv6_sid_compose(struct in6_addr *sid_value, + struct srv6_locator *locator, uint32_t sid_func) { int debug = BGP_DEBUG(vpn, VPN_LEAK_LABEL); - struct listnode *node; - struct srv6_locator_chunk *chunk; - bool alloced = false; int label = 0; uint8_t offset = 0; uint8_t func_len = 0, shift_len = 0; - uint32_t index_max = 0; + uint32_t sid_func_max = 0; - if (!bgp || !sid_locator_chunk || !sid) + if (!locator || !sid_value) return false; - for (ALL_LIST_ELEMENTS_RO(bgp->srv6_locator_chunks, node, chunk)) { - if (chunk->function_bits_length > - BGP_PREFIX_SID_SRV6_MAX_FUNCTION_LENGTH) { - if (debug) - zlog_debug( - "%s: invalid SRv6 Locator chunk (%pFX): Function Length must be less or equal to %d", - __func__, &chunk->prefix, - BGP_PREFIX_SID_SRV6_MAX_FUNCTION_LENGTH); - continue; - } + if (locator->function_bits_length > + BGP_PREFIX_SID_SRV6_MAX_FUNCTION_LENGTH) { + if (debug) + zlog_debug("%s: invalid SRv6 Locator (%pFX): Function Length must be less or equal to %d", + __func__, &locator->prefix, + BGP_PREFIX_SID_SRV6_MAX_FUNCTION_LENGTH); + return false; + } - index_max = (1 << chunk->function_bits_length) - 1; + /* Max value that can be encoded in the Function part of the SID */ + sid_func_max = (1 << locator->function_bits_length) - 1; - if (index > index_max) { - if (debug) - zlog_debug( - "%s: skipped SRv6 Locator chunk (%pFX): Function Length is too short to support specified index (%u)", - __func__, &chunk->prefix, index); - continue; - } + if (sid_func > sid_func_max) { + if (debug) + zlog_debug("%s: invalid SRv6 Locator (%pFX): Function Length is too short to support specified function (%u)", + __func__, &locator->prefix, sid_func); + return false; + } - *sid = chunk->prefix.prefix; - *sid_locator_chunk = *chunk; - offset = chunk->block_bits_length + chunk->node_bits_length; - func_len = chunk->function_bits_length; - shift_len = BGP_PREFIX_SID_SRV6_MAX_FUNCTION_LENGTH - func_len; + /** + * Let's build the SID value. + * sid_value = LOC:FUNC:: + */ - if (index != 0) { - label = index << shift_len; - if (label < MPLS_LABEL_UNRESERVED_MIN) { - if (debug) - zlog_debug( - "%s: skipped to allocate SRv6 SID (%pFX): Label (%u) is too small to use", - __func__, &chunk->prefix, - label); - continue; - } + /* First, we put the locator (LOC) in the most significant bits of sid_value */ + *sid_value = locator->prefix.prefix; - transpose_sid(sid, label, offset, func_len); - if (sid_exist(bgp, sid)) - continue; - alloced = true; - break; - } + /* + * Then, we compute the offset at which we have to place the function (FUNC). + * FUNC will be placed immediately after LOC, i.e. at block_bits_length + node_bits_length + */ + offset = locator->block_bits_length + locator->node_bits_length; - for (uint32_t i = 1; i < index_max; i++) { - label = i << shift_len; - if (label < MPLS_LABEL_UNRESERVED_MIN) { - if (debug) - zlog_debug( - "%s: skipped to allocate SRv6 SID (%pFX): Label (%u) is too small to use", - __func__, &chunk->prefix, - label); - continue; - } - transpose_sid(sid, label, offset, func_len); - if (sid_exist(bgp, sid)) - continue; - alloced = true; - break; - } + /* + * The FUNC part of the SID is advertised in the label field of SRv6 Service TLV. + * (see SID Transposition Scheme, RFC 9252 section #4). + * Therefore, we need to encode the FUNC in the most significant bits of the + * 20-bit label. + */ + func_len = locator->function_bits_length; + shift_len = BGP_PREFIX_SID_SRV6_MAX_FUNCTION_LENGTH - func_len; + + label = sid_func << shift_len; + if (label < MPLS_LABEL_UNRESERVED_MIN) { + if (debug) + zlog_debug("%s: skipped to allocate SRv6 SID (%pFX): Label (%u) is too small to use", + __func__, &locator->prefix, label); + return false; } - if (!alloced) - return 0; + if (sid_exist(bgp_get_default(), sid_value)) { + zlog_warn("%s: skipped to allocate SRv6 SID (%pFX): SID %pI6 already in use", + __func__, &locator->prefix, sid_value); + return false; + } - sid_register(bgp, sid, bgp->srv6_locator_name); - return label; + /* Finally, we put the FUNC in sid_value at the computed offset */ + transpose_sid(sid_value, label, offset, func_len); + + return true; } void ensure_vrf_tovpn_sid_per_af(struct bgp *bgp_vpn, struct bgp *bgp_vrf, afi_t afi) { int debug = BGP_DEBUG(vpn, VPN_LEAK_FROM_VRF); - struct srv6_locator_chunk *tovpn_sid_locator; - struct in6_addr *tovpn_sid; - uint32_t tovpn_sid_index = 0, tovpn_sid_transpose_label; + struct in6_addr tovpn_sid = {}; + uint32_t tovpn_sid_index = 0; bool tovpn_sid_auto = false; + struct srv6_sid_ctx ctx = {}; + uint32_t sid_func; if (debug) zlog_debug("%s: try to allocate new SID for vrf %s: afi %s", @@ -748,11 +790,18 @@ void ensure_vrf_tovpn_sid_per_af(struct bgp *bgp_vpn, struct bgp *bgp_vrf, /* * skip when bgp vpn instance ins't allocated - * or srv6 locator chunk isn't allocated + * or srv6 locator isn't allocated */ - if (!bgp_vpn || !bgp_vpn->srv6_locator_chunks) + if (!bgp_vpn || !bgp_vpn->srv6_locator) return; + if (bgp_vrf->vrf_id == VRF_UNKNOWN) { + if (debug) + zlog_debug("%s: vrf %s: vrf_id not set, can't set zebra vrf SRv6 SID", + __func__, bgp_vrf->name_pretty); + return; + } + tovpn_sid_index = bgp_vrf->vpn_policy[afi].tovpn_sid_index; tovpn_sid_auto = CHECK_FLAG(bgp_vrf->vpn_policy[afi].flags, BGP_VPN_POLICY_TOVPN_SID_AUTO); @@ -768,40 +817,34 @@ void ensure_vrf_tovpn_sid_per_af(struct bgp *bgp_vpn, struct bgp *bgp_vrf, return; } - tovpn_sid_locator = srv6_locator_chunk_alloc(); - tovpn_sid = XCALLOC(MTYPE_BGP_SRV6_SID, sizeof(struct in6_addr)); - - tovpn_sid_transpose_label = alloc_new_sid(bgp_vpn, tovpn_sid_index, - tovpn_sid_locator, tovpn_sid); + if (!tovpn_sid_auto) { + if (!srv6_sid_compose(&tovpn_sid, bgp_vpn->srv6_locator, + tovpn_sid_index)) { + zlog_err("%s: failed to compose sid for vrf %s: afi %s", + __func__, bgp_vrf->name_pretty, afi2str(afi)); + return; + } + } - if (tovpn_sid_transpose_label == 0) { - if (debug) - zlog_debug( - "%s: not allocated new sid for vrf %s: afi %s", - __func__, bgp_vrf->name_pretty, afi2str(afi)); - srv6_locator_chunk_free(&tovpn_sid_locator); - XFREE(MTYPE_BGP_SRV6_SID, tovpn_sid); + ctx.vrf_id = bgp_vrf->vrf_id; + ctx.behavior = afi == AFI_IP ? ZEBRA_SEG6_LOCAL_ACTION_END_DT4 + : ZEBRA_SEG6_LOCAL_ACTION_END_DT6; + if (!bgp_zebra_request_srv6_sid(&ctx, &tovpn_sid, + bgp_vpn->srv6_locator_name, &sid_func)) { + zlog_err("%s: failed to request sid for vrf %s: afi %s", + __func__, bgp_vrf->name_pretty, afi2str(afi)); return; } - - if (debug) - zlog_debug("%s: new sid %pI6 allocated for vrf %s: afi %s", - __func__, tovpn_sid, bgp_vrf->name_pretty, - afi2str(afi)); - - bgp_vrf->vpn_policy[afi].tovpn_sid = tovpn_sid; - bgp_vrf->vpn_policy[afi].tovpn_sid_locator = tovpn_sid_locator; - bgp_vrf->vpn_policy[afi].tovpn_sid_transpose_label = - tovpn_sid_transpose_label; } void ensure_vrf_tovpn_sid_per_vrf(struct bgp *bgp_vpn, struct bgp *bgp_vrf) { int debug = BGP_DEBUG(vpn, VPN_LEAK_FROM_VRF); - struct srv6_locator_chunk *tovpn_sid_locator; - struct in6_addr *tovpn_sid; - uint32_t tovpn_sid_index = 0, tovpn_sid_transpose_label; + struct in6_addr tovpn_sid = {}; + uint32_t tovpn_sid_index = 0; bool tovpn_sid_auto = false; + struct srv6_sid_ctx ctx = {}; + uint32_t sid_func; if (debug) zlog_debug("%s: try to allocate new SID for vrf %s", __func__, @@ -813,10 +856,17 @@ void ensure_vrf_tovpn_sid_per_vrf(struct bgp *bgp_vpn, struct bgp *bgp_vrf) /* * skip when bgp vpn instance ins't allocated - * or srv6 locator chunk isn't allocated + * or srv6 locator isn't allocated */ - if (!bgp_vpn || !bgp_vpn->srv6_locator_chunks) + if (!bgp_vpn || !bgp_vpn->srv6_locator) + return; + + if (bgp_vrf->vrf_id == VRF_UNKNOWN) { + if (debug) + zlog_debug("%s: vrf %s: vrf_id not set, can't set zebra vrf SRv6 SID", + __func__, bgp_vrf->name_pretty); return; + } tovpn_sid_index = bgp_vrf->tovpn_sid_index; tovpn_sid_auto = CHECK_FLAG(bgp_vrf->vrf_flags, BGP_VRF_TOVPN_SID_AUTO); @@ -832,28 +882,23 @@ void ensure_vrf_tovpn_sid_per_vrf(struct bgp *bgp_vpn, struct bgp *bgp_vrf) return; } - tovpn_sid_locator = srv6_locator_chunk_alloc(); - tovpn_sid = XCALLOC(MTYPE_BGP_SRV6_SID, sizeof(struct in6_addr)); - - tovpn_sid_transpose_label = alloc_new_sid(bgp_vpn, tovpn_sid_index, - tovpn_sid_locator, tovpn_sid); + if (!tovpn_sid_auto) { + if (!srv6_sid_compose(&tovpn_sid, bgp_vpn->srv6_locator, + bgp_vrf->tovpn_sid_index)) { + zlog_err("%s: failed to compose new sid for vrf %s", + __func__, bgp_vrf->name_pretty); + return; + } + } - if (tovpn_sid_transpose_label == 0) { - if (debug) - zlog_debug("%s: not allocated new sid for vrf %s", - __func__, bgp_vrf->name_pretty); - srv6_locator_chunk_free(&tovpn_sid_locator); - XFREE(MTYPE_BGP_SRV6_SID, tovpn_sid); + ctx.vrf_id = bgp_vrf->vrf_id; + ctx.behavior = ZEBRA_SEG6_LOCAL_ACTION_END_DT46; + if (!bgp_zebra_request_srv6_sid(&ctx, &tovpn_sid, + bgp_vpn->srv6_locator_name, &sid_func)) { + zlog_err("%s: failed to request new sid for vrf %s", __func__, + bgp_vrf->name_pretty); return; } - - if (debug) - zlog_debug("%s: new sid %pI6 allocated for vrf %s", __func__, - tovpn_sid, bgp_vrf->name_pretty); - - bgp_vrf->tovpn_sid = tovpn_sid; - bgp_vrf->tovpn_sid_locator = tovpn_sid_locator; - bgp_vrf->tovpn_sid_transpose_label = tovpn_sid_transpose_label; } void ensure_vrf_tovpn_sid(struct bgp *bgp_vpn, struct bgp *bgp_vrf, afi_t afi) @@ -876,6 +921,7 @@ void delete_vrf_tovpn_sid_per_af(struct bgp *bgp_vpn, struct bgp *bgp_vrf, int debug = BGP_DEBUG(vpn, VPN_LEAK_FROM_VRF); uint32_t tovpn_sid_index = 0; bool tovpn_sid_auto = false; + struct srv6_sid_ctx ctx = {}; if (debug) zlog_debug("%s: try to remove SID for vrf %s: afi %s", __func__, @@ -889,9 +935,22 @@ void delete_vrf_tovpn_sid_per_af(struct bgp *bgp_vpn, struct bgp *bgp_vrf, if (tovpn_sid_index != 0 || tovpn_sid_auto) return; - srv6_locator_chunk_free(&bgp_vrf->vpn_policy[afi].tovpn_sid_locator); + if (bgp_vrf->vrf_id == VRF_UNKNOWN) { + if (debug) + zlog_debug("%s: vrf %s: vrf_id not set, can't set zebra vrf label", + __func__, bgp_vrf->name_pretty); + return; + } + + srv6_locator_free(bgp_vrf->vpn_policy[afi].tovpn_sid_locator); + bgp_vrf->vpn_policy[afi].tovpn_sid_locator = NULL; if (bgp_vrf->vpn_policy[afi].tovpn_sid) { + ctx.vrf_id = bgp_vrf->vrf_id; + ctx.behavior = afi == AFI_IP ? ZEBRA_SEG6_LOCAL_ACTION_END_DT4 + : ZEBRA_SEG6_LOCAL_ACTION_END_DT6; + bgp_zebra_release_srv6_sid(&ctx); + sid_unregister(bgp_vpn, bgp_vrf->vpn_policy[afi].tovpn_sid); XFREE(MTYPE_BGP_SRV6_SID, bgp_vrf->vpn_policy[afi].tovpn_sid); } @@ -903,6 +962,7 @@ void delete_vrf_tovpn_sid_per_vrf(struct bgp *bgp_vpn, struct bgp *bgp_vrf) int debug = BGP_DEBUG(vpn, VPN_LEAK_FROM_VRF); uint32_t tovpn_sid_index = 0; bool tovpn_sid_auto = false; + struct srv6_sid_ctx ctx = {}; if (debug) zlog_debug("%s: try to remove SID for vrf %s", __func__, @@ -916,9 +976,21 @@ void delete_vrf_tovpn_sid_per_vrf(struct bgp *bgp_vpn, struct bgp *bgp_vrf) if (tovpn_sid_index != 0 || tovpn_sid_auto) return; - srv6_locator_chunk_free(&bgp_vrf->tovpn_sid_locator); + if (bgp_vrf->vrf_id == VRF_UNKNOWN) { + if (debug) + zlog_debug("%s: vrf %s: vrf_id not set, can't set zebra vrf label", + __func__, bgp_vrf->name_pretty); + return; + } + + srv6_locator_free(bgp_vrf->tovpn_sid_locator); + bgp_vrf->tovpn_sid_locator = NULL; if (bgp_vrf->tovpn_sid) { + ctx.vrf_id = bgp_vrf->vrf_id; + ctx.behavior = ZEBRA_SEG6_LOCAL_ACTION_END_DT46; + bgp_zebra_release_srv6_sid(&ctx); + sid_unregister(bgp_vpn, bgp_vrf->tovpn_sid); XFREE(MTYPE_BGP_SRV6_SID, bgp_vrf->tovpn_sid); } @@ -1763,8 +1835,9 @@ void vpn_leak_from_vrf_update(struct bgp *to_bgp, /* to */ /* Set SID for SRv6 VPN */ if (from_bgp->vpn_policy[afi].tovpn_sid_locator) { - struct srv6_locator_chunk *locator = + struct srv6_locator *locator = from_bgp->vpn_policy[afi].tovpn_sid_locator; + encode_label( from_bgp->vpn_policy[afi].tovpn_sid_transpose_label, &label); @@ -1805,8 +1878,8 @@ void vpn_leak_from_vrf_update(struct bgp *to_bgp, /* to */ .tovpn_sid_locator->prefix.prefix, sizeof(struct in6_addr)); } else if (from_bgp->tovpn_sid_locator) { - struct srv6_locator_chunk *locator = - from_bgp->tovpn_sid_locator; + struct srv6_locator *locator = from_bgp->tovpn_sid_locator; + encode_label(from_bgp->tovpn_sid_transpose_label, &label); static_attr.srv6_l3vpn = XCALLOC(MTYPE_BGP_SRV6_L3VPN, diff --git a/bgpd/bgp_mplsvpn.h b/bgpd/bgp_mplsvpn.h index 92a9fba887..39fed66781 100644 --- a/bgpd/bgp_mplsvpn.h +++ b/bgpd/bgp_mplsvpn.h @@ -419,6 +419,8 @@ struct bgp_mplsvpn_nh_label_bind_cache *bgp_mplsvpn_nh_label_bind_find( struct bgp_mplsvpn_nh_label_bind_cache_head *tree, struct prefix *p, mpls_label_t orig_label); void bgp_mplsvpn_nexthop_init(void); +extern void sid_register(struct bgp *bgp, const struct in6_addr *sid, + const char *locator_name); extern void sid_unregister(struct bgp *bgp, const struct in6_addr *sid); #endif /* _QUAGGA_BGP_MPLSVPN_H */ diff --git a/bgpd/bgp_network.c b/bgpd/bgp_network.c index e09dbc22af..de57d91806 100644 --- a/bgpd/bgp_network.c +++ b/bgpd/bgp_network.c @@ -861,8 +861,7 @@ int bgp_connect(struct peer_connection *connection) htons(peer->port), ifindex); } -/* After TCP connection is established. Get local address and port. */ -int bgp_getsockname(struct peer *peer) +void bgp_updatesockname(struct peer *peer) { if (peer->su_local) { sockunion_free(peer->su_local); @@ -876,6 +875,12 @@ int bgp_getsockname(struct peer *peer) peer->su_local = sockunion_getsockname(peer->connection->fd); peer->su_remote = sockunion_getpeername(peer->connection->fd); +} + +/* After TCP connection is established. Get local address and port. */ +int bgp_getsockname(struct peer *peer) +{ + bgp_updatesockname(peer); if (!bgp_zebra_nexthop_set(peer->su_local, peer->su_remote, &peer->nexthop, peer)) { diff --git a/bgpd/bgp_network.h b/bgpd/bgp_network.h index 7a0b3cc67d..ceb6b6f002 100644 --- a/bgpd/bgp_network.h +++ b/bgpd/bgp_network.h @@ -23,6 +23,7 @@ extern void bgp_close_vrf_socket(struct bgp *bgp); extern void bgp_close(void); extern int bgp_connect(struct peer_connection *connection); extern int bgp_getsockname(struct peer *peer); +extern void bgp_updatesockname(struct peer *peer); extern int bgp_md5_set_prefix(struct bgp *bgp, struct prefix *p, const char *password); diff --git a/bgpd/bgp_nexthop.h b/bgpd/bgp_nexthop.h index 430c8f17e8..0280960da8 100644 --- a/bgpd/bgp_nexthop.h +++ b/bgpd/bgp_nexthop.h @@ -66,6 +66,7 @@ struct bgp_nexthop_cache { #define BGP_STATIC_ROUTE (1 << 4) #define BGP_STATIC_ROUTE_EXACT_MATCH (1 << 5) #define BGP_NEXTHOP_LABELED_VALID (1 << 6) +#define BGP_NEXTHOP_ULTIMATE (1 << 7) /* * This flag is added for EVPN gateway IP nexthops. diff --git a/bgpd/bgp_nht.c b/bgpd/bgp_nht.c index 0259da06dd..c89ccc9792 100644 --- a/bgpd/bgp_nht.c +++ b/bgpd/bgp_nht.c @@ -405,12 +405,11 @@ int bgp_find_or_add_nexthop(struct bgp *bgp_route, struct bgp *bgp_nexthop, peer); } else { if (BGP_DEBUG(nht, NHT)) - zlog_debug( - "Found existing bnc %pFX(%d)(%s) flags 0x%x ifindex %d #paths %d peer %p", - &bnc->prefix, bnc->ifindex_ipv6_ll, - bnc->bgp->name_pretty, bnc->flags, - bnc->ifindex_ipv6_ll, bnc->path_count, - bnc->nht_info); + zlog_debug("Found existing bnc %pFX(%d)(%s) flags 0x%x ifindex %d #paths %d peer %p, resolved prefix %pFX", + &bnc->prefix, bnc->ifindex_ipv6_ll, + bnc->bgp->name_pretty, bnc->flags, + bnc->ifindex_ipv6_ll, bnc->path_count, + bnc->nht_info, &bnc->resolved_prefix); } if (pi && is_route_parent_evpn(pi)) @@ -485,6 +484,8 @@ int bgp_find_or_add_nexthop(struct bgp *bgp_route, struct bgp *bgp_nexthop, bnc->metric; else if (bpi_ultimate->extra) bpi_ultimate->extra->igpmetric = 0; + + SET_FLAG(bnc->flags, BGP_NEXTHOP_ULTIMATE); } else if (peer) { /* * Let's not accidentally save the peer data for a peer @@ -505,6 +506,10 @@ int bgp_find_or_add_nexthop(struct bgp *bgp_route, struct bgp *bgp_nexthop, return 1; else if (safi == SAFI_UNICAST && pi && pi->sub_type == BGP_ROUTE_IMPORTED && + CHECK_FLAG(bnc->flags, BGP_NEXTHOP_ULTIMATE)) + return bgp_isvalid_nexthop(bnc); + else if (safi == SAFI_UNICAST && pi && + pi->sub_type == BGP_ROUTE_IMPORTED && BGP_PATH_INFO_NUM_LABELS(pi) && !bnc->is_evpn_gwip_nexthop) return bgp_isvalid_nexthop_for_l3vpn(bnc, pi); else if (safi == SAFI_MPLS_VPN && pi && diff --git a/bgpd/bgp_open.c b/bgpd/bgp_open.c index 945076709c..6451c7cf38 100644 --- a/bgpd/bgp_open.c +++ b/bgpd/bgp_open.c @@ -1374,7 +1374,7 @@ int bgp_open_option_parse(struct peer *peer, uint16_t length, * Check that we can read the opt_type and fetch it */ if (STREAM_READABLE(s) < 1) { - zlog_info("%s Option length error", peer->host); + zlog_err("%s Option length error", peer->host); bgp_notify_send(peer->connection, BGP_NOTIFY_OPEN_ERR, BGP_NOTIFY_OPEN_MALFORMED_ATTR); return -1; @@ -1387,7 +1387,7 @@ int bgp_open_option_parse(struct peer *peer, uint16_t length, */ if (BGP_OPEN_EXT_OPT_PARAMS_CAPABLE(peer)) { if (STREAM_READABLE(s) < 2) { - zlog_info("%s Option length error", peer->host); + zlog_err("%s Option length error", peer->host); bgp_notify_send(peer->connection, BGP_NOTIFY_OPEN_ERR, BGP_NOTIFY_OPEN_MALFORMED_ATTR); @@ -1397,7 +1397,7 @@ int bgp_open_option_parse(struct peer *peer, uint16_t length, opt_length = stream_getw(s); } else { if (STREAM_READABLE(s) < 1) { - zlog_info("%s Option length error", peer->host); + zlog_err("%s Option length error", peer->host); bgp_notify_send(peer->connection, BGP_NOTIFY_OPEN_ERR, BGP_NOTIFY_OPEN_MALFORMED_ATTR); @@ -1409,8 +1409,8 @@ int bgp_open_option_parse(struct peer *peer, uint16_t length, /* Option length check. */ if (STREAM_READABLE(s) < opt_length) { - zlog_info("%s Option length error (%d)", peer->host, - opt_length); + zlog_err("%s Option length error (%d)", peer->host, + opt_length); bgp_notify_send(peer->connection, BGP_NOTIFY_OPEN_ERR, BGP_NOTIFY_OPEN_MALFORMED_ATTR); return -1; diff --git a/bgpd/bgp_packet.c b/bgpd/bgp_packet.c index 2a2c9bdba9..62be7ffbf7 100644 --- a/bgpd/bgp_packet.c +++ b/bgpd/bgp_packet.c @@ -1116,10 +1116,10 @@ void bgp_route_refresh_send(struct peer *peer, afi_t afi, safi_t safi, s = stream_new(peer->max_packet_size); /* Make BGP update packet. */ - if (CHECK_FLAG(peer->cap, PEER_CAP_REFRESH_RCV)) - bgp_packet_set_marker(s, BGP_MSG_ROUTE_REFRESH_NEW); - else - bgp_packet_set_marker(s, BGP_MSG_ROUTE_REFRESH_OLD); + if (!CHECK_FLAG(peer->cap, PEER_CAP_REFRESH_RCV)) + return; + + bgp_packet_set_marker(s, BGP_MSG_ROUTE_REFRESH_NEW); /* Encode Route Refresh message. */ stream_putw(s, pkt_afi); @@ -2702,6 +2702,19 @@ static int bgp_notify_receive(struct peer_connection *connection, inner.subcode == BGP_NOTIFY_OPEN_UNSUP_PARAM) UNSET_FLAG(peer->sflags, PEER_STATUS_CAPABILITY_OPEN); + /* Resend the next OPEN message with a global AS number if we received + * a `Bad Peer AS` notification. This is only valid if `dual-as` is + * configured. + */ + if (inner.code == BGP_NOTIFY_OPEN_ERR && + inner.subcode == BGP_NOTIFY_OPEN_BAD_PEER_AS && + CHECK_FLAG(peer->flags, PEER_FLAG_DUAL_AS)) { + if (peer->change_local_as != peer->bgp->as) + peer->change_local_as = peer->bgp->as; + else + peer->change_local_as = peer->local_as; + } + /* If Graceful-Restart N-bit (Notification) is exchanged, * and it's not a Hard Reset, let's retain the routes. */ diff --git a/bgpd/bgp_pbr.c b/bgpd/bgp_pbr.c index 43682de413..ec5b50a08f 100644 --- a/bgpd/bgp_pbr.c +++ b/bgpd/bgp_pbr.c @@ -775,14 +775,12 @@ int bgp_pbr_build_and_validate_entry(const struct prefix *p, } api_action = &api->actions[action_count - 1]; - if ((ecom_eval->val[1] == - (char)ECOMMUNITY_REDIRECT_VRF) && - (ecom_eval->val[0] == - (char)ECOMMUNITY_ENCODE_TRANS_EXP || + if ((ecom_eval->val[1] == ECOMMUNITY_REDIRECT_VRF) && + (ecom_eval->val[0] == ECOMMUNITY_ENCODE_TRANS_EXP || ecom_eval->val[0] == - (char)ECOMMUNITY_EXTENDED_COMMUNITY_PART_2 || + ECOMMUNITY_EXTENDED_COMMUNITY_PART_2 || ecom_eval->val[0] == - (char)ECOMMUNITY_EXTENDED_COMMUNITY_PART_3)) { + ECOMMUNITY_EXTENDED_COMMUNITY_PART_3)) { struct ecommunity *eckey = ecommunity_new(); struct ecommunity_val ecom_copy; @@ -800,9 +798,9 @@ int bgp_pbr_build_and_validate_entry(const struct prefix *p, eckey); ecommunity_free(&eckey); } else if ((ecom_eval->val[0] == - (char)ECOMMUNITY_ENCODE_REDIRECT_IP_NH) && + ECOMMUNITY_ENCODE_REDIRECT_IP_NH) && (ecom_eval->val[1] == - (char)ECOMMUNITY_REDIRECT_IP_NH)) { + ECOMMUNITY_REDIRECT_IP_NH)) { /* in case the 2 ecom present, * do not overwrite * draft-ietf-idr-flowspec-redirect @@ -861,10 +859,9 @@ int bgp_pbr_build_and_validate_entry(const struct prefix *p, = ecom_eval->val[7]; api_action_redirect_ip = api_action; } - } else if ((ecom_eval->val[0] == - (char)ECOMMUNITY_ENCODE_IP) && + } else if ((ecom_eval->val[0] == ECOMMUNITY_ENCODE_IP) && (ecom_eval->val[1] == - (char)ECOMMUNITY_FLOWSPEC_REDIRECT_IPV4)) { + ECOMMUNITY_FLOWSPEC_REDIRECT_IPV4)) { /* in case the 2 ecom present, * overwrite simpson draft * update redirect ip fields @@ -888,7 +885,7 @@ int bgp_pbr_build_and_validate_entry(const struct prefix *p, } } else { if (ecom_eval->val[0] != - (char)ECOMMUNITY_ENCODE_TRANS_EXP) + ECOMMUNITY_ENCODE_TRANS_EXP) continue; ret = ecommunity_fill_pbr_action(ecom_eval, api_action, @@ -920,9 +917,9 @@ int bgp_pbr_build_and_validate_entry(const struct prefix *p, } api_action = &api->actions[action_count - 1]; if ((ipv6_ecom_eval->val[1] == - (char)ECOMMUNITY_FLOWSPEC_REDIRECT_IPV6) && + ECOMMUNITY_FLOWSPEC_REDIRECT_IPV6) && (ipv6_ecom_eval->val[0] == - (char)ECOMMUNITY_ENCODE_TRANS_EXP)) { + ECOMMUNITY_ENCODE_TRANS_EXP)) { struct ecommunity *eckey = ecommunity_new(); struct ecommunity_val_ipv6 ecom_copy; diff --git a/bgpd/bgp_route.c b/bgpd/bgp_route.c index 7cc51331a5..f4118952fd 100644 --- a/bgpd/bgp_route.c +++ b/bgpd/bgp_route.c @@ -3760,7 +3760,8 @@ static void bgp_process_main_one(struct bgp *bgp, struct bgp_dest *dest, if (old_select || new_select) { bgp_bump_version(dest); - if (!bgp->t_rmap_def_originate_eval) + if (!bgp->t_rmap_def_originate_eval && + bgp->rmap_def_originate_eval_timer) event_add_timer( bm->master, update_group_refresh_default_originate_route_map, @@ -4014,8 +4015,9 @@ static struct bgp_process_queue *bgp_processq_alloc(struct bgp *bgp) return pqnode; } -void bgp_process(struct bgp *bgp, struct bgp_dest *dest, - struct bgp_path_info *pi, afi_t afi, safi_t safi) +static void bgp_process_internal(struct bgp *bgp, struct bgp_dest *dest, + struct bgp_path_info *pi, afi_t afi, + safi_t safi, bool early_process) { #define ARBITRARY_PROCESS_QLEN 10000 struct work_queue *wq = bgp->process_queue; @@ -4078,9 +4080,8 @@ void bgp_process(struct bgp *bgp, struct bgp_dest *dest, struct work_queue_item *item = work_queue_last_item(wq); pqnode = item->data; - if (CHECK_FLAG(pqnode->flags, BGP_PROCESS_QUEUE_EOIU_MARKER) - || pqnode->bgp != bgp - || pqnode->queued >= ARBITRARY_PROCESS_QLEN) + if (CHECK_FLAG(pqnode->flags, BGP_PROCESS_QUEUE_EOIU_MARKER) || + (pqnode->queued >= ARBITRARY_PROCESS_QLEN && !early_process)) pqnode = bgp_processq_alloc(bgp); else pqnode_reuse = 1; @@ -4094,7 +4095,10 @@ void bgp_process(struct bgp *bgp, struct bgp_dest *dest, /* can't be enqueued twice */ assert(STAILQ_NEXT(dest, pq) == NULL); - STAILQ_INSERT_TAIL(&pqnode->pqueue, dest, pq); + if (early_process) + STAILQ_INSERT_HEAD(&pqnode->pqueue, dest, pq); + else + STAILQ_INSERT_TAIL(&pqnode->pqueue, dest, pq); pqnode->queued++; if (!pqnode_reuse) @@ -4103,6 +4107,18 @@ void bgp_process(struct bgp *bgp, struct bgp_dest *dest, return; } +void bgp_process(struct bgp *bgp, struct bgp_dest *dest, + struct bgp_path_info *pi, afi_t afi, safi_t safi) +{ + bgp_process_internal(bgp, dest, pi, afi, safi, false); +} + +void bgp_process_early(struct bgp *bgp, struct bgp_dest *dest, + struct bgp_path_info *pi, afi_t afi, safi_t safi) +{ + bgp_process_internal(bgp, dest, pi, afi, safi, true); +} + void bgp_add_eoiu_mark(struct bgp *bgp) { struct bgp_process_queue *pqnode; @@ -4637,7 +4653,22 @@ void bgp_update(struct peer *peer, const struct prefix *p, uint32_t addpath_id, if (aspath_get_last_as(attr->aspath) == bgp->as) do_loop_check = 0; - if (CHECK_FLAG(peer->af_flags[afi][safi], PEER_FLAG_REFLECTOR_CLIENT)) + /* When using bgp ipv4 labeled session, the local prefix is + * received by a peer, and finds out that the proposed prefix + * and its next-hop are the same. To avoid a route loop locally, + * no nexthop entry is referenced for that prefix, and the route + * will not be selected. + * + * As it has been done for ipv4-unicast, apply the following fix + * for labeled address families: when the received peer is + * a route reflector, the prefix has to be selected, even if the + * route can not be installed locally. + */ + if (CHECK_FLAG(peer->af_flags[afi][safi], PEER_FLAG_REFLECTOR_CLIENT) || + (safi == SAFI_UNICAST && !peer->afc[afi][safi] && + peer->afc[afi][SAFI_LABELED_UNICAST] && + CHECK_FLAG(peer->af_flags[afi][SAFI_LABELED_UNICAST], + PEER_FLAG_REFLECTOR_CLIENT))) bgp_nht_param_prefix = NULL; else bgp_nht_param_prefix = p; @@ -11921,10 +11952,9 @@ static int bgp_show_table(struct vty *vty, struct bgp *bgp, afi_t afi, safi_t sa if (!use_json) route_vty_out_detail_header( vty, bgp, dest, - bgp_dest_get_prefix( - dest), + bgp_dest_get_prefix(dest), prd, table->afi, safi, - NULL, false); + NULL, false, false); route_vty_out_detail( vty, bgp, dest, dest_p, pi, @@ -11997,10 +12027,12 @@ static int bgp_show_table(struct vty *vty, struct bgp *bgp, afi_t afi, safi_t sa prd = bgp_rd_from_dest(dest, safi); - route_vty_out_detail_header( - vty, bgp, dest, - bgp_dest_get_prefix(dest), prd, - table->afi, safi, json_paths, true); + route_vty_out_detail_header(vty, bgp, dest, + bgp_dest_get_prefix( + dest), + prd, table->afi, + safi, json_paths, + true, false); vty_out(vty, "\"paths\": "); json_detail_header_used = true; @@ -12206,7 +12238,7 @@ void route_vty_out_detail_header(struct vty *vty, struct bgp *bgp, struct bgp_dest *dest, const struct prefix *p, const struct prefix_rd *prd, afi_t afi, safi_t safi, json_object *json, - bool incremental_print) + bool incremental_print, bool local_table) { struct bgp_path_info *pi; struct peer *peer; @@ -12424,8 +12456,14 @@ void route_vty_out_detail_header(struct vty *vty, struct bgp *bgp, json_object_object_add(json, "advertisedTo", json_adv_to); } else { - if (!json && first) - vty_out(vty, " Not advertised to any peer"); + if (!json && first) { + if (!local_table) + vty_out(vty, + " Not advertised to any peer"); + else + vty_out(vty, + " Local BGP table not advertised"); + } vty_out(vty, "\n"); } } @@ -12464,10 +12502,10 @@ static void bgp_show_path_info(const struct prefix_rd *pfx_rd, } if (header) { - route_vty_out_detail_header( - vty, bgp, bgp_node, - bgp_dest_get_prefix(bgp_node), pfx_rd, AFI_IP, - safi, json_header, false); + route_vty_out_detail_header(vty, bgp, bgp_node, + bgp_dest_get_prefix(bgp_node), + pfx_rd, AFI_IP, safi, + json_header, false, false); header = 0; } (*display)++; @@ -12937,7 +12975,7 @@ DEFUN (show_ip_bgp_l2vpn_evpn_statistics, struct json_object *json_afi_safi = NULL, *json = NULL; bgp_vty_find_and_parse_afi_safi_bgp(vty, argv, argc, &idx, &afi, &safi, - &bgp, false); + &bgp, uj); if (!idx) return CMD_WARNING; @@ -12975,7 +13013,7 @@ DEFUN(show_ip_bgp_afi_safi_statistics, show_ip_bgp_afi_safi_statistics_cmd, struct json_object *json_afi_safi = NULL, *json = NULL; bgp_vty_find_and_parse_afi_safi_bgp(vty, argv, argc, &idx, &afi, &safi, - &bgp, false); + &bgp, uj); if (!idx) return CMD_WARNING; @@ -13638,6 +13676,8 @@ enum bgp_stats { BGP_STATS_ASPATH_MAXSIZE, BGP_STATS_ASPATH_TOTSIZE, BGP_STATS_ASN_HIGHEST, + BGP_STATS_REDISTRIBUTED, + BGP_STATS_LOCAL_AGGREGATES, BGP_STATS_MAX, }; @@ -13667,6 +13707,8 @@ static const char *table_stats_strs[][2] = { [BGP_STATS_ASPATH_TOTSIZE] = {"Average AS-Path size (bytes)", "averageAsPathSizeBytes"}, [BGP_STATS_ASN_HIGHEST] = {"Highest public ASN", "highestPublicAsn"}, + [BGP_STATS_REDISTRIBUTED] = {"Redistributed routes", "totalRedistributed"}, + [BGP_STATS_LOCAL_AGGREGATES] = {"Local aggregates", "totalLocalAggregates"}, [BGP_STATS_MAX] = {NULL, NULL} }; @@ -13716,6 +13758,15 @@ static void bgp_table_stats_rn(struct bgp_dest *dest, struct bgp_dest *top, ATTR_FLAG_BIT(BGP_ATTR_ATOMIC_AGGREGATE))) ts->counts[BGP_STATS_AGGREGATES]++; + if (pi->peer == ts->table->bgp->peer_self) { + if (pi->sub_type == BGP_ROUTE_REDISTRIBUTE) + ts->counts[BGP_STATS_REDISTRIBUTED]++; + + if ((pi->type == ZEBRA_ROUTE_BGP) && + (pi->sub_type == BGP_ROUTE_AGGREGATE)) + ts->counts[BGP_STATS_LOCAL_AGGREGATES]++; + } + /* as-path stats */ if (pi->attr->aspath) { unsigned int hops = aspath_count_hops(pi->attr->aspath); diff --git a/bgpd/bgp_route.h b/bgpd/bgp_route.h index b1c8356301..b6df241181 100644 --- a/bgpd/bgp_route.h +++ b/bgpd/bgp_route.h @@ -804,10 +804,20 @@ extern void bgp_withdraw(struct peer *peer, const struct prefix *p, int sub_type, struct prefix_rd *prd, mpls_label_t *label, uint8_t num_labels); -/* for bgp_nexthop and bgp_damp */ +/* + * Add a route to be processed for bgp bestpath through the bgp + * workqueue. This route is added to the end of all other routes + * queued for processing + * + * bgp_process_early adds the route for processing at the beginning + * of the current queue for processing. + */ extern void bgp_process(struct bgp *bgp, struct bgp_dest *dest, struct bgp_path_info *pi, afi_t afi, safi_t safi); +extern void bgp_process_early(struct bgp *bgp, struct bgp_dest *dest, + struct bgp_path_info *pi, afi_t afi, safi_t safi); + /* * Add an end-of-initial-update marker to the process queue. This is just a * queue element with NULL bgp node. @@ -903,7 +913,8 @@ extern void route_vty_out_detail_header(struct vty *vty, struct bgp *bgp, const struct prefix *p, const struct prefix_rd *prd, afi_t afi, safi_t safi, json_object *json, - bool incremental_print); + bool incremental_print, + bool local_table); extern void route_vty_out_detail(struct vty *vty, struct bgp *bgp, struct bgp_dest *bn, const struct prefix *p, struct bgp_path_info *path, afi_t afi, diff --git a/bgpd/bgp_routemap.c b/bgpd/bgp_routemap.c index d0b65a7426..950bb8907f 100644 --- a/bgpd/bgp_routemap.c +++ b/bgpd/bgp_routemap.c @@ -1992,10 +1992,9 @@ route_set_ip_nexthop(void *rule, const struct prefix *prefix, void *object) SET_FLAG(path->attr->rmap_change_flags, BATTR_RMAP_NEXTHOP_UNCHANGED); } else if (rins->peer_address) { - if ((CHECK_FLAG(peer->rmap_type, PEER_RMAP_TYPE_IN) - || CHECK_FLAG(peer->rmap_type, PEER_RMAP_TYPE_IMPORT)) - && peer->su_remote - && sockunion_family(peer->su_remote) == AF_INET) { + if ((CHECK_FLAG(peer->rmap_type, PEER_RMAP_TYPE_IN)) && + peer->su_remote && + sockunion_family(peer->su_remote) == AF_INET) { path->attr->nexthop.s_addr = sockunion2ip(peer->su_remote); path->attr->flag |= ATTR_FLAG_BIT(BGP_ATTR_NEXT_HOP); @@ -2358,7 +2357,7 @@ static void route_aspath_exclude_free(void *rule) if (ase->exclude_aspath_acl) { acl = ase->exclude_aspath_acl; as_list_list_del(&acl->exclude_rule, ase); - } else { + } else if (ase->exclude_aspath_acl_name) { /* no ref to acl, this aspath exclude is orphan */ as_exclude_remove_orphan(ase); } @@ -3950,8 +3949,7 @@ route_set_ipv6_nexthop_prefer_global(void *rule, const struct prefix *prefix, path = object; peer = path->peer; - if (CHECK_FLAG(peer->rmap_type, PEER_RMAP_TYPE_IN) - || CHECK_FLAG(peer->rmap_type, PEER_RMAP_TYPE_IMPORT)) { + if (CHECK_FLAG(peer->rmap_type, PEER_RMAP_TYPE_IN)) { /* Set next hop preference to global */ SET_FLAG(path->attr->nh_flags, BGP_ATTR_NH_MP_PREFER_GLOBAL); SET_FLAG(path->attr->rmap_change_flags, @@ -4077,10 +4075,8 @@ route_set_ipv6_nexthop_peer(void *rule, const struct prefix *pfx, void *object) path = object; peer = path->peer; - if ((CHECK_FLAG(peer->rmap_type, PEER_RMAP_TYPE_IN) - || CHECK_FLAG(peer->rmap_type, PEER_RMAP_TYPE_IMPORT)) - && peer->su_remote - && sockunion_family(peer->su_remote) == AF_INET6) { + if ((CHECK_FLAG(peer->rmap_type, PEER_RMAP_TYPE_IN)) && + peer->su_remote && sockunion_family(peer->su_remote) == AF_INET6) { peer_address = peer->su_remote->sin6.sin6_addr; /* Set next hop value and length in attribute. */ if (IN6_IS_ADDR_LINKLOCAL(&peer_address)) { @@ -4095,7 +4091,6 @@ route_set_ipv6_nexthop_peer(void *rule, const struct prefix *pfx, void *object) path->attr->mp_nexthop_len = BGP_ATTR_NHLEN_IPV6_GLOBAL; } - } else if (CHECK_FLAG(peer->rmap_type, PEER_RMAP_TYPE_OUT)) { /* The next hop value will be set as part of packet * rewrite. diff --git a/bgpd/bgp_rpki.c b/bgpd/bgp_rpki.c index f9cbf24031..347c5d02a1 100644 --- a/bgpd/bgp_rpki.c +++ b/bgpd/bgp_rpki.c @@ -1920,81 +1920,6 @@ DEFUN (no_rpki_retry_interval, return CMD_SUCCESS; } -#if CONFDATE > 20240916 -CPP_NOTICE("Remove rpki_cache_cmd") -#endif -DEFPY(rpki_cache, rpki_cache_cmd, - "rpki cache <A.B.C.D|WORD> <TCPPORT|(1-65535)$sshport SSH_UNAME SSH_PRIVKEY [KNOWN_HOSTS_PATH]> [source <A.B.C.D>$bindaddr] preference (1-255)", - RPKI_OUTPUT_STRING - "Install a cache server to current group\n" - "IP address of cache server\n" - "Hostname of cache server\n" - "TCP port number\n" - "SSH port number\n" - "SSH user name\n" - "Path to own SSH private key\n" - "Path to the known hosts file\n" - "Configure source IP address of RPKI connection\n" - "Define a Source IP Address\n" - "Preference of the cache server\n" - "Preference value\n") -{ - int return_value; - struct listnode *cache_node; - struct cache *current_cache; - struct rpki_vrf *rpki_vrf; - bool init; - - if (vty->node == RPKI_VRF_NODE) - rpki_vrf = VTY_GET_CONTEXT_SUB(rpki_vrf); - else - rpki_vrf = VTY_GET_CONTEXT(rpki_vrf); - - if (!rpki_vrf) - return CMD_WARNING_CONFIG_FAILED; - - if (!rpki_vrf || !rpki_vrf->cache_list) - return CMD_WARNING; - - init = !!list_isempty(rpki_vrf->cache_list); - - for (ALL_LIST_ELEMENTS_RO(rpki_vrf->cache_list, cache_node, - current_cache)) { - if (current_cache->preference == preference) { - vty_out(vty, - "Cache with preference %ld is already configured\n", - preference); - return CMD_WARNING; - } - } - - // use ssh connection - if (ssh_uname) { -#if defined(FOUND_SSH) - return_value = add_ssh_cache(rpki_vrf, cache, sshport, ssh_uname, - ssh_privkey, known_hosts_path, - preference, bindaddr_str); -#else - return_value = SUCCESS; - vty_out(vty, - "ssh sockets are not supported. Please recompile rtrlib and frr with ssh support. If you want to use it\n"); -#endif - } else { // use tcp connection - return_value = add_tcp_cache(rpki_vrf, cache, tcpport, - preference, bindaddr_str); - } - - if (return_value == ERROR) { - vty_out(vty, "Could not create new rpki cache\n"); - return CMD_WARNING; - } - - if (init) - start(rpki_vrf); - - return CMD_SUCCESS; -} - DEFPY(rpki_cache_tcp, rpki_cache_tcp_cmd, "rpki cache tcp <A.B.C.D|WORD>$cache TCPPORT [source <A.B.C.D>$bindaddr] preference (1-255)", RPKI_OUTPUT_STRING @@ -2820,7 +2745,6 @@ static void install_cli_commands(void) /* Install rpki cache commands */ install_element(RPKI_NODE, &rpki_cache_tcp_cmd); install_element(RPKI_NODE, &rpki_cache_ssh_cmd); - install_element(RPKI_NODE, &rpki_cache_cmd); install_element(RPKI_NODE, &no_rpki_cache_cmd); /* RPKI_VRF_NODE commands */ @@ -2844,7 +2768,6 @@ static void install_cli_commands(void) /* Install rpki cache commands */ install_element(RPKI_VRF_NODE, &rpki_cache_tcp_cmd); install_element(RPKI_VRF_NODE, &rpki_cache_ssh_cmd); - install_element(RPKI_VRF_NODE, &rpki_cache_cmd); install_element(RPKI_VRF_NODE, &no_rpki_cache_cmd); /* Install show commands */ diff --git a/bgpd/bgp_script.h b/bgpd/bgp_script.h index f2f47e940d..9feb550135 100644 --- a/bgpd/bgp_script.h +++ b/bgpd/bgp_script.h @@ -7,7 +7,6 @@ #define __BGP_SCRIPT__ #include <zebra.h> -#include "bgpd.h" #ifdef HAVE_SCRIPTING @@ -18,6 +17,10 @@ */ void bgp_script_init(void); +/* Forward references */ +struct peer; +struct attr; + void lua_pushpeer(lua_State *L, const struct peer *peer); void lua_pushattr(lua_State *L, const struct attr *attr); diff --git a/bgpd/bgp_updgrp.c b/bgpd/bgp_updgrp.c index b717793a45..90c43b938f 100644 --- a/bgpd/bgp_updgrp.c +++ b/bgpd/bgp_updgrp.c @@ -783,8 +783,11 @@ static int update_group_show_walkcb(struct update_group *updgrp, void *arg) json_updgrp, "replaceLocalAs", CHECK_FLAG(updgrp->conf->flags, PEER_FLAG_LOCAL_AS_REPLACE_AS)); + json_object_boolean_add(json_updgrp, "dualAs", + CHECK_FLAG(updgrp->conf->flags, + PEER_FLAG_DUAL_AS)); } else { - vty_out(vty, " Local AS %u%s%s\n", + vty_out(vty, " Local AS %u%s%s%s\n", updgrp->conf->change_local_as, CHECK_FLAG(updgrp->conf->flags, PEER_FLAG_LOCAL_AS_NO_PREPEND) @@ -793,6 +796,10 @@ static int update_group_show_walkcb(struct update_group *updgrp, void *arg) CHECK_FLAG(updgrp->conf->flags, PEER_FLAG_LOCAL_AS_REPLACE_AS) ? " replace-as" + : "", + CHECK_FLAG(updgrp->conf->flags, + PEER_FLAG_DUAL_AS) + ? " dual-as" : ""); } } @@ -2016,6 +2023,8 @@ int update_group_adjust_soloness(struct peer *peer, int set) struct peer_group *group; struct listnode *node, *nnode; + peer_flag_set(peer, PEER_FLAG_LONESOUL); + if (!CHECK_FLAG(peer->sflags, PEER_STATUS_GROUP)) { peer_lonesoul_or_not(peer, set); if (peer_established(peer->connection)) diff --git a/bgpd/bgp_vty.c b/bgpd/bgp_vty.c index c9c7b80496..a3b23fb2d5 100644 --- a/bgpd/bgp_vty.c +++ b/bgpd/bgp_vty.c @@ -302,18 +302,11 @@ static const char *get_afi_safi_json_str(afi_t afi, safi_t safi) /* unset srv6 locator */ static int bgp_srv6_locator_unset(struct bgp *bgp) { - int ret; struct listnode *node, *nnode; struct srv6_locator_chunk *chunk; struct bgp_srv6_function *func; struct bgp *bgp_vrf; - /* release chunk notification via ZAPI */ - ret = bgp_zebra_srv6_manager_release_locator_chunk( - bgp->srv6_locator_name); - if (ret < 0) - return -1; - /* refresh chunks */ for (ALL_LIST_ELEMENTS(bgp->srv6_locator_chunks, node, nnode, chunk)) { listnode_delete(bgp->srv6_locator_chunks, chunk); @@ -352,20 +345,28 @@ static int bgp_srv6_locator_unset(struct bgp *bgp) continue; /* refresh vpnv4 tovpn_sid_locator */ - srv6_locator_chunk_free( - &bgp_vrf->vpn_policy[AFI_IP].tovpn_sid_locator); + srv6_locator_free(bgp_vrf->vpn_policy[AFI_IP].tovpn_sid_locator); + bgp_vrf->vpn_policy[AFI_IP].tovpn_sid_locator = NULL; /* refresh vpnv6 tovpn_sid_locator */ - srv6_locator_chunk_free( - &bgp_vrf->vpn_policy[AFI_IP6].tovpn_sid_locator); + srv6_locator_free( + bgp_vrf->vpn_policy[AFI_IP6].tovpn_sid_locator); + bgp_vrf->vpn_policy[AFI_IP6].tovpn_sid_locator = NULL; /* refresh per-vrf tovpn_sid_locator */ - srv6_locator_chunk_free(&bgp_vrf->tovpn_sid_locator); + srv6_locator_free(bgp_vrf->tovpn_sid_locator); + bgp_vrf->tovpn_sid_locator = NULL; } /* clear locator name */ memset(bgp->srv6_locator_name, 0, sizeof(bgp->srv6_locator_name)); + /* clear SRv6 locator */ + if (bgp->srv6_locator) { + srv6_locator_free(bgp->srv6_locator); + bgp->srv6_locator = NULL; + } + return 0; } @@ -5450,7 +5451,7 @@ DEFUN (neighbor_local_as, return CMD_WARNING_CONFIG_FAILED; } - ret = peer_local_as_set(peer, as, 0, 0, argv[idx_number]->arg); + ret = peer_local_as_set(peer, as, 0, 0, 0, argv[idx_number]->arg); return bgp_vty_return(vty, ret); } @@ -5479,19 +5480,20 @@ DEFUN (neighbor_local_as_no_prepend, return CMD_WARNING_CONFIG_FAILED; } - ret = peer_local_as_set(peer, as, 1, 0, argv[idx_number]->arg); + ret = peer_local_as_set(peer, as, 1, 0, 0, argv[idx_number]->arg); return bgp_vty_return(vty, ret); } -DEFUN (neighbor_local_as_no_prepend_replace_as, +DEFPY (neighbor_local_as_no_prepend_replace_as, neighbor_local_as_no_prepend_replace_as_cmd, - "neighbor <A.B.C.D|X:X::X:X|WORD> local-as ASNUM no-prepend replace-as", + "neighbor <A.B.C.D|X:X::X:X|WORD> local-as ASNUM no-prepend replace-as [dual-as$dual_as]", NEIGHBOR_STR NEIGHBOR_ADDR_STR2 "Specify a local-as number\n" "AS number expressed in dotted or plain format used as local AS\n" "Do not prepend local-as to updates from ebgp peers\n" - "Do not prepend local-as to updates from ibgp peers\n") + "Do not prepend local-as to updates from ibgp peers\n" + "Allow peering with a global AS number or local-as number\n") { int idx_peer = 1; int idx_number = 3; @@ -5509,20 +5511,21 @@ DEFUN (neighbor_local_as_no_prepend_replace_as, return CMD_WARNING_CONFIG_FAILED; } - ret = peer_local_as_set(peer, as, 1, 1, argv[idx_number]->arg); + ret = peer_local_as_set(peer, as, 1, 1, dual_as, argv[idx_number]->arg); return bgp_vty_return(vty, ret); } DEFUN (no_neighbor_local_as, no_neighbor_local_as_cmd, - "no neighbor <A.B.C.D|X:X::X:X|WORD> local-as [ASNUM [no-prepend [replace-as]]]", + "no neighbor <A.B.C.D|X:X::X:X|WORD> local-as [ASNUM [no-prepend [replace-as] [dual-as]]]", NO_STR NEIGHBOR_STR NEIGHBOR_ADDR_STR2 "Specify a local-as number\n" "AS number expressed in dotted or plain format used as local AS\n" "Do not prepend local-as to updates from ebgp peers\n" - "Do not prepend local-as to updates from ibgp peers\n") + "Do not prepend local-as to updates from ibgp peers\n" + "Allow peering with a global AS number or local-as number\n") { int idx_peer = 2; struct peer *peer; @@ -8416,7 +8419,7 @@ DEFPY (bgp_condadv_period, DEFPY (bgp_def_originate_eval, bgp_def_originate_eval_cmd, - "[no$no] bgp default-originate timer (0-3600)$timer", + "[no$no] bgp default-originate timer (0-65535)$timer", NO_STR BGP_STR "Control default-originate\n" @@ -8425,8 +8428,7 @@ DEFPY (bgp_def_originate_eval, { VTY_DECLVAR_CONTEXT(bgp, bgp); - bgp->rmap_def_originate_eval_timer = - no ? RMAP_DEFAULT_ORIGINATE_EVAL_TIMER : timer; + bgp->rmap_def_originate_eval_timer = no ? 0 : timer; if (bgp->t_rmap_def_originate_eval) EVENT_OFF(bgp->t_rmap_def_originate_eval); @@ -10878,7 +10880,7 @@ DEFPY (bgp_srv6_locator, snprintf(bgp->srv6_locator_name, sizeof(bgp->srv6_locator_name), "%s", name); - ret = bgp_zebra_srv6_manager_get_locator_chunk(name); + ret = bgp_zebra_srv6_manager_get_locator(name); if (ret < 0) return CMD_WARNING_CONFIG_FAILED; @@ -10929,6 +10931,17 @@ DEFPY (show_bgp_srv6, return CMD_SUCCESS; vty_out(vty, "locator_name: %s\n", bgp->srv6_locator_name); + if (bgp->srv6_locator) { + vty_out(vty, " prefix: %pFX\n", &bgp->srv6_locator->prefix); + vty_out(vty, " block-length: %d\n", + bgp->srv6_locator->block_bits_length); + vty_out(vty, " node-length: %d\n", + bgp->srv6_locator->node_bits_length); + vty_out(vty, " func-length: %d\n", + bgp->srv6_locator->function_bits_length); + vty_out(vty, " arg-length: %d\n", + bgp->srv6_locator->argument_bits_length); + } vty_out(vty, "locator_chunks:\n"); for (ALL_LIST_ELEMENTS_RO(bgp->srv6_locator_chunks, node, chunk)) { vty_out(vty, "- %pFX\n", &chunk->prefix); @@ -14040,6 +14053,10 @@ static void bgp_show_peer(struct vty *vty, struct peer *p, bool use_json, if (CHECK_FLAG(p->flags, PEER_FLAG_LOCAL_AS_REPLACE_AS)) json_object_boolean_true_add(json_neigh, "localAsReplaceAs"); + + json_object_boolean_add(json_neigh, "localAsReplaceAsDualAs", + !!CHECK_FLAG(p->flags, + PEER_FLAG_DUAL_AS)); } else { if (p->as_type == AS_SPECIFIED || CHECK_FLAG(p->as_type, AS_AUTO) || @@ -14054,13 +14071,15 @@ static void bgp_show_peer(struct vty *vty, struct peer *p, bool use_json, vty_out(vty, ASN_FORMAT(bgp->asnotation), p->change_local_as ? &p->change_local_as : &p->local_as); - vty_out(vty, "%s%s, ", + vty_out(vty, "%s%s%s, ", CHECK_FLAG(p->flags, PEER_FLAG_LOCAL_AS_NO_PREPEND) ? " no-prepend" : "", CHECK_FLAG(p->flags, PEER_FLAG_LOCAL_AS_REPLACE_AS) ? " replace-as" - : ""); + : "", + CHECK_FLAG(p->flags, PEER_FLAG_DUAL_AS) ? " dual-as" + : ""); } /* peer type internal or confed-internal */ if ((p->as == p->local_as) || (CHECK_FLAG(p->as_type, AS_INTERNAL))) { @@ -17072,8 +17091,13 @@ static int bgp_show_one_peer_group(struct vty *vty, struct peer_group *group, vty_out(vty, "\nBGP peer-group %s\n", group->name); } - if ((group->bgp->as == conf->as) || - CHECK_FLAG(conf->as_type, AS_INTERNAL)) { + if (CHECK_FLAG(conf->as_type, AS_AUTO)) { + if (json) + json_object_string_add(json_peer_group, "type", "auto"); + else + vty_out(vty, " Peer-group type is auto\n"); + } else if ((group->bgp->as == conf->as) || + CHECK_FLAG(conf->as_type, AS_INTERNAL)) { if (json) json_object_string_add(json_peer_group, "type", "internal"); @@ -18647,6 +18671,8 @@ static void bgp_config_write_peer_global(struct vty *vty, struct bgp *bgp, vty_out(vty, " no-prepend"); if (peergroup_flag_check(peer, PEER_FLAG_LOCAL_AS_REPLACE_AS)) vty_out(vty, " replace-as"); + if (peergroup_flag_check(peer, PEER_FLAG_DUAL_AS)) + vty_out(vty, " dual-as"); vty_out(vty, "\n"); } @@ -18678,11 +18704,8 @@ static void bgp_config_write_peer_global(struct vty *vty, struct bgp *bgp, peer->password); /* neighbor solo */ - if (CHECK_FLAG(peer->flags, PEER_FLAG_LONESOUL)) { - if (!peer_group_active(peer)) { - vty_out(vty, " neighbor %s solo\n", addr); - } - } + if (peergroup_flag_check(peer, PEER_FLAG_LONESOUL)) + vty_out(vty, " neighbor %s solo\n", addr); /* BGP port */ if (peer->port != BGP_PORT_DEFAULT) { @@ -19778,8 +19801,9 @@ int bgp_config_write(struct vty *vty) bgp->condition_check_period); /* default-originate timer configuration */ - if (bgp->rmap_def_originate_eval_timer != - RMAP_DEFAULT_ORIGINATE_EVAL_TIMER) + if (bgp->rmap_def_originate_eval_timer && + bgp->rmap_def_originate_eval_timer != + RMAP_DEFAULT_ORIGINATE_EVAL_TIMER) vty_out(vty, " bgp default-originate timer %u\n", bgp->rmap_def_originate_eval_timer); diff --git a/bgpd/bgp_zebra.c b/bgpd/bgp_zebra.c index 6e2efabf8f..bffa5a0e6b 100644 --- a/bgpd/bgp_zebra.c +++ b/bgpd/bgp_zebra.c @@ -542,7 +542,7 @@ static int zebra_read_route(ZAPI_CALLBACK_ARGS) /* Now perform the add/update. */ bgp_redistribute_add(bgp, &api.prefix, &nexthop, ifindex, - nhtype, bhtype, api.distance, api.metric, + nhtype, api.distance, bhtype, api.metric, api.type, api.instance, api.tag); } else { bgp_redistribute_delete(bgp, &api.prefix, api.type, @@ -3379,11 +3379,278 @@ static int bgp_zebra_process_srv6_locator_chunk(ZAPI_CALLBACK_ARGS) return 0; } +/** + * Internal function to process an SRv6 locator + * + * @param locator The locator to be processed + */ +static int bgp_zebra_process_srv6_locator_internal(struct srv6_locator *locator) +{ + struct bgp *bgp = bgp_get_default(); + + if (!bgp || !bgp->srv6_enabled || !locator) + return -1; + + /* + * Check if the main BGP instance is configured to use the received + * locator + */ + if (strcmp(bgp->srv6_locator_name, locator->name) != 0) { + zlog_err("%s: SRv6 Locator name unmatch %s:%s", __func__, + bgp->srv6_locator_name, locator->name); + return 0; + } + + zlog_info("%s: Received SRv6 locator %s %pFX, loc-block-len=%u, loc-node-len=%u func-len=%u, arg-len=%u", + __func__, locator->name, &locator->prefix, + locator->block_bits_length, locator->node_bits_length, + locator->function_bits_length, locator->argument_bits_length); + + /* Store the locator in the main BGP instance */ + bgp->srv6_locator = srv6_locator_alloc(locator->name); + srv6_locator_copy(bgp->srv6_locator, locator); + + /* + * Process VPN-to-VRF and VRF-to-VPN leaks to advertise new locator + * and SIDs. + */ + vpn_leak_postchange_all(); + + return 0; +} + +static int bgp_zebra_srv6_sid_notify(ZAPI_CALLBACK_ARGS) +{ + struct bgp *bgp = bgp_get_default(); + struct srv6_locator *locator; + struct srv6_sid_ctx ctx; + struct in6_addr sid_addr; + enum zapi_srv6_sid_notify note; + struct bgp *bgp_vrf; + struct vrf *vrf; + struct listnode *node, *nnode; + char buf[256]; + struct in6_addr *tovpn_sid; + struct prefix_ipv6 tmp_prefix; + uint32_t sid_func; + bool found = false; + + if (!bgp || !bgp->srv6_enabled) + return -1; + + if (!bgp->srv6_locator) { + if (BGP_DEBUG(zebra, ZEBRA)) + zlog_debug("%s: ignoring SRv6 SID notify: locator not set", + __func__); + return -1; + } + + /* Decode the received notification message */ + if (!zapi_srv6_sid_notify_decode(zclient->ibuf, &ctx, &sid_addr, + &sid_func, NULL, ¬e, NULL)) { + zlog_err("%s : error in msg decode", __func__); + return -1; + } + + if (BGP_DEBUG(zebra, ZEBRA)) + zlog_debug("%s: received SRv6 SID notify: ctx %s sid_value %pI6 %s", + __func__, srv6_sid_ctx2str(buf, sizeof(buf), &ctx), + &sid_addr, zapi_srv6_sid_notify2str(note)); + + /* Get the BGP instance for which the SID has been requested, if any */ + for (ALL_LIST_ELEMENTS(bm->bgp, node, nnode, bgp_vrf)) { + vrf = vrf_lookup_by_id(bgp_vrf->vrf_id); + if (!vrf) + continue; + + if (vrf->vrf_id == ctx.vrf_id) { + found = true; + break; + } + } + + if (!found) { + if (BGP_DEBUG(zebra, ZEBRA)) + zlog_debug("%s: ignoring SRv6 SID notify: No VRF suitable for received SID ctx %s sid_value %pI6", + __func__, + srv6_sid_ctx2str(buf, sizeof(buf), &ctx), + &sid_addr); + return -1; + } + + /* Handle notification */ + switch (note) { + case ZAPI_SRV6_SID_ALLOCATED: + if (BGP_DEBUG(zebra, ZEBRA)) + zlog_debug("SRv6 SID %pI6 %s : ALLOCATED", &sid_addr, + srv6_sid_ctx2str(buf, sizeof(buf), &ctx)); + + /* Verify that the received SID belongs to the configured locator */ + tmp_prefix.family = AF_INET6; + tmp_prefix.prefixlen = IPV6_MAX_BITLEN; + tmp_prefix.prefix = sid_addr; + + if (!prefix_match((struct prefix *)&bgp->srv6_locator->prefix, + (struct prefix *)&tmp_prefix)) + return -1; + + /* Get label */ + uint8_t func_len = bgp->srv6_locator->function_bits_length; + uint8_t shift_len = BGP_PREFIX_SID_SRV6_MAX_FUNCTION_LENGTH - + func_len; + + int label = sid_func << shift_len; + + /* Un-export VPN to VRF routes */ + vpn_leak_prechange(BGP_VPN_POLICY_DIR_TOVPN, AFI_IP, bgp, + bgp_vrf); + vpn_leak_prechange(BGP_VPN_POLICY_DIR_TOVPN, AFI_IP6, bgp, + bgp_vrf); + + locator = srv6_locator_alloc(bgp->srv6_locator_name); + srv6_locator_copy(locator, bgp->srv6_locator); + + /* Store SID, locator, and label */ + tovpn_sid = XCALLOC(MTYPE_BGP_SRV6_SID, sizeof(struct in6_addr)); + *tovpn_sid = sid_addr; + if (ctx.behavior == ZEBRA_SEG6_LOCAL_ACTION_END_DT6) { + XFREE(MTYPE_BGP_SRV6_SID, + bgp_vrf->vpn_policy[AFI_IP6].tovpn_sid); + srv6_locator_free( + bgp_vrf->vpn_policy[AFI_IP6].tovpn_sid_locator); + sid_unregister(bgp, + bgp_vrf->vpn_policy[AFI_IP6].tovpn_sid); + + bgp_vrf->vpn_policy[AFI_IP6].tovpn_sid = tovpn_sid; + bgp_vrf->vpn_policy[AFI_IP6].tovpn_sid_locator = locator; + bgp_vrf->vpn_policy[AFI_IP6].tovpn_sid_transpose_label = + label; + } else if (ctx.behavior == ZEBRA_SEG6_LOCAL_ACTION_END_DT4) { + XFREE(MTYPE_BGP_SRV6_SID, + bgp_vrf->vpn_policy[AFI_IP].tovpn_sid); + srv6_locator_free( + bgp_vrf->vpn_policy[AFI_IP].tovpn_sid_locator); + sid_unregister(bgp, + bgp_vrf->vpn_policy[AFI_IP].tovpn_sid); + + bgp_vrf->vpn_policy[AFI_IP].tovpn_sid = tovpn_sid; + bgp_vrf->vpn_policy[AFI_IP].tovpn_sid_locator = locator; + bgp_vrf->vpn_policy[AFI_IP].tovpn_sid_transpose_label = + label; + } else if (ctx.behavior == ZEBRA_SEG6_LOCAL_ACTION_END_DT46) { + XFREE(MTYPE_BGP_SRV6_SID, bgp_vrf->tovpn_sid); + srv6_locator_free(bgp_vrf->tovpn_sid_locator); + sid_unregister(bgp, bgp_vrf->tovpn_sid); + + bgp_vrf->tovpn_sid = tovpn_sid; + bgp_vrf->tovpn_sid_locator = locator; + bgp_vrf->tovpn_sid_transpose_label = label; + } else { + srv6_locator_free(locator); + if (BGP_DEBUG(zebra, ZEBRA)) + zlog_debug("Unsupported behavior. Not assigned SRv6 SID: %s %pI6", + srv6_sid_ctx2str(buf, sizeof(buf), + &ctx), + &sid_addr); + return -1; + } + + /* Register the new SID */ + sid_register(bgp, tovpn_sid, bgp->srv6_locator_name); + + /* Export VPN to VRF routes */ + vpn_leak_postchange_all(); + + break; + case ZAPI_SRV6_SID_RELEASED: + if (BGP_DEBUG(zebra, ZEBRA)) + zlog_debug("SRv6 SID %pI6 %s: RELEASED", &sid_addr, + srv6_sid_ctx2str(buf, sizeof(buf), &ctx)); + + /* Un-export VPN to VRF routes */ + vpn_leak_prechange(BGP_VPN_POLICY_DIR_TOVPN, AFI_IP, bgp, + bgp_vrf); + vpn_leak_prechange(BGP_VPN_POLICY_DIR_TOVPN, AFI_IP6, bgp, + bgp_vrf); + + /* Remove SID, locator, and label */ + if (ctx.behavior == ZEBRA_SEG6_LOCAL_ACTION_END_DT6) { + XFREE(MTYPE_BGP_SRV6_SID, + bgp_vrf->vpn_policy[AFI_IP6].tovpn_sid); + if (bgp_vrf->vpn_policy[AFI_IP6].tovpn_sid_locator) { + srv6_locator_free(bgp->vpn_policy[AFI_IP6] + .tovpn_sid_locator); + bgp_vrf->vpn_policy[AFI_IP6].tovpn_sid_locator = + NULL; + } + bgp_vrf->vpn_policy[AFI_IP6].tovpn_sid_transpose_label = + 0; + + /* Unregister the SID */ + sid_unregister(bgp, + bgp_vrf->vpn_policy[AFI_IP6].tovpn_sid); + } else if (ctx.behavior == ZEBRA_SEG6_LOCAL_ACTION_END_DT4) { + XFREE(MTYPE_BGP_SRV6_SID, + bgp_vrf->vpn_policy[AFI_IP].tovpn_sid); + if (bgp_vrf->vpn_policy[AFI_IP].tovpn_sid_locator) { + srv6_locator_free(bgp->vpn_policy[AFI_IP] + .tovpn_sid_locator); + bgp_vrf->vpn_policy[AFI_IP].tovpn_sid_locator = + NULL; + } + bgp_vrf->vpn_policy[AFI_IP].tovpn_sid_transpose_label = + 0; + + /* Unregister the SID */ + sid_unregister(bgp, + bgp_vrf->vpn_policy[AFI_IP].tovpn_sid); + } else if (ctx.behavior == ZEBRA_SEG6_LOCAL_ACTION_END_DT46) { + XFREE(MTYPE_BGP_SRV6_SID, bgp_vrf->tovpn_sid); + if (bgp_vrf->tovpn_sid_locator) { + srv6_locator_free(bgp_vrf->tovpn_sid_locator); + bgp_vrf->tovpn_sid_locator = NULL; + } + bgp_vrf->tovpn_sid_transpose_label = 0; + + /* Unregister the SID */ + sid_unregister(bgp, bgp_vrf->tovpn_sid); + } else { + if (BGP_DEBUG(zebra, ZEBRA)) + zlog_debug("Unsupported behavior. Not assigned SRv6 SID: %s %pI6", + srv6_sid_ctx2str(buf, sizeof(buf), + &ctx), + &sid_addr); + return -1; + } + + /* Export VPN to VRF routes*/ + vpn_leak_postchange_all(); + break; + case ZAPI_SRV6_SID_FAIL_ALLOC: + if (BGP_DEBUG(zebra, ZEBRA)) + zlog_debug("SRv6 SID %pI6 %s: Failed to allocate", + &sid_addr, + srv6_sid_ctx2str(buf, sizeof(buf), &ctx)); + + /* Error will be logged by zebra module */ + break; + case ZAPI_SRV6_SID_FAIL_RELEASE: + if (BGP_DEBUG(zebra, ZEBRA)) + zlog_debug("%s: SRv6 SID %pI6 %s failure to release", + __func__, &sid_addr, + srv6_sid_ctx2str(buf, sizeof(buf), &ctx)); + + /* Error will be logged by zebra module */ + break; + } + + return 0; +} + static int bgp_zebra_process_srv6_locator_add(ZAPI_CALLBACK_ARGS) { struct srv6_locator loc = {}; struct bgp *bgp = bgp_get_default(); - const char *loc_name = bgp->srv6_locator_name; if (!bgp || !bgp->srv6_enabled) return 0; @@ -3391,10 +3658,7 @@ static int bgp_zebra_process_srv6_locator_add(ZAPI_CALLBACK_ARGS) if (zapi_srv6_locator_decode(zclient->ibuf, &loc) < 0) return -1; - if (bgp_zebra_srv6_manager_get_locator_chunk(loc_name) < 0) - return -1; - - return 0; + return bgp_zebra_process_srv6_locator_internal(&loc); } static int bgp_zebra_process_srv6_locator_delete(ZAPI_CALLBACK_ARGS) @@ -3402,7 +3666,8 @@ static int bgp_zebra_process_srv6_locator_delete(ZAPI_CALLBACK_ARGS) struct srv6_locator loc = {}; struct bgp *bgp = bgp_get_default(); struct listnode *node, *nnode; - struct srv6_locator_chunk *chunk, *tovpn_sid_locator; + struct srv6_locator_chunk *chunk; + struct srv6_locator *tovpn_sid_locator; struct bgp_srv6_function *func; struct bgp *bgp_vrf; struct in6_addr *tovpn_sid; @@ -3414,6 +3679,12 @@ static int bgp_zebra_process_srv6_locator_delete(ZAPI_CALLBACK_ARGS) if (zapi_srv6_locator_decode(zclient->ibuf, &loc) < 0) return -1; + // clear SRv6 locator + if (bgp->srv6_locator) { + srv6_locator_free(bgp->srv6_locator); + bgp->srv6_locator = NULL; + } + // refresh chunks for (ALL_LIST_ELEMENTS(bgp->srv6_locator_chunks, node, nnode, chunk)) if (prefix_match((struct prefix *)&loc.prefix, @@ -3490,10 +3761,12 @@ static int bgp_zebra_process_srv6_locator_delete(ZAPI_CALLBACK_ARGS) tmp_prefi.prefixlen = IPV6_MAX_BITLEN; tmp_prefi.prefix = tovpn_sid_locator->prefix.prefix; if (prefix_match((struct prefix *)&loc.prefix, - (struct prefix *)&tmp_prefi)) - srv6_locator_chunk_free( - &bgp_vrf->vpn_policy[AFI_IP] - .tovpn_sid_locator); + (struct prefix *)&tmp_prefi)) { + srv6_locator_free(bgp_vrf->vpn_policy[AFI_IP] + .tovpn_sid_locator); + bgp_vrf->vpn_policy[AFI_IP].tovpn_sid_locator = + NULL; + } } /* refresh vpnv6 tovpn_sid_locator */ @@ -3504,10 +3777,12 @@ static int bgp_zebra_process_srv6_locator_delete(ZAPI_CALLBACK_ARGS) tmp_prefi.prefixlen = IPV6_MAX_BITLEN; tmp_prefi.prefix = tovpn_sid_locator->prefix.prefix; if (prefix_match((struct prefix *)&loc.prefix, - (struct prefix *)&tmp_prefi)) - srv6_locator_chunk_free( - &bgp_vrf->vpn_policy[AFI_IP6] - .tovpn_sid_locator); + (struct prefix *)&tmp_prefi)) { + srv6_locator_free(bgp_vrf->vpn_policy[AFI_IP6] + .tovpn_sid_locator); + bgp_vrf->vpn_policy[AFI_IP6].tovpn_sid_locator = + NULL; + } } /* refresh per-vrf tovpn_sid_locator */ @@ -3517,9 +3792,10 @@ static int bgp_zebra_process_srv6_locator_delete(ZAPI_CALLBACK_ARGS) tmp_prefi.prefixlen = IPV6_MAX_BITLEN; tmp_prefi.prefix = tovpn_sid_locator->prefix.prefix; if (prefix_match((struct prefix *)&loc.prefix, - (struct prefix *)&tmp_prefi)) - srv6_locator_chunk_free( - &bgp_vrf->tovpn_sid_locator); + (struct prefix *)&tmp_prefi)) { + srv6_locator_free(bgp_vrf->tovpn_sid_locator); + bgp_vrf->tovpn_sid_locator = NULL; + } } } @@ -3556,6 +3832,7 @@ static zclient_handler *const bgp_handlers[] = { [ZEBRA_SRV6_LOCATOR_DELETE] = bgp_zebra_process_srv6_locator_delete, [ZEBRA_SRV6_MANAGER_GET_LOCATOR_CHUNK] = bgp_zebra_process_srv6_locator_chunk, + [ZEBRA_SRV6_SID_NOTIFY] = bgp_zebra_srv6_sid_notify, }; static int bgp_if_new_hook(struct interface *ifp) @@ -3583,14 +3860,17 @@ void bgp_if_init(void) hook_register_prio(if_del, 0, bgp_if_delete_hook); } -static void bgp_start_label_manager(struct event *start) +static bool bgp_zebra_label_manager_ready(void) { - bgp_zebra_label_manager_connect(); + return (zclient_sync->sock > 0); } -static bool bgp_zebra_label_manager_ready(void) +static void bgp_start_label_manager(struct event *start) { - return (zclient_sync->sock > 0); + if (!bgp_zebra_label_manager_ready() && + !bgp_zebra_label_manager_connect()) + event_add_timer(bm->master, bgp_start_label_manager, NULL, 1, + &bm->t_bgp_start_label_manager); } static bool bgp_zebra_label_manager_connect(void) @@ -4090,6 +4370,89 @@ int bgp_zebra_srv6_manager_release_locator_chunk(const char *name) return srv6_manager_release_locator_chunk(zclient, name); } +/** + * Ask the SRv6 Manager (zebra) about a specific locator + * + * @param name Locator name + * @return 0 on success, -1 otherwise + */ +int bgp_zebra_srv6_manager_get_locator(const char *name) +{ + if (!name) + return -1; + + /* + * Send the Get Locator request to the SRv6 Manager and return the + * result + */ + return srv6_manager_get_locator(zclient, name); +} + +/** + * Ask the SRv6 Manager (zebra) to allocate a SID. + * + * Optionally, it is possible to provide an IPv6 address (sid_value parameter). + * + * When sid_value is provided, the SRv6 Manager allocates the requested SID + * address, if the request can be satisfied (explicit allocation). + * + * When sid_value is not provided, the SRv6 Manager allocates any available SID + * from the provided locator (dynamic allocation). + * + * @param ctx Context to be associated with the request SID + * @param sid_value IPv6 address to be associated with the requested SID (optional) + * @param locator_name Name of the locator from which the SID must be allocated + * @param sid_func SID Function allocated by the SRv6 Manager. + */ +bool bgp_zebra_request_srv6_sid(const struct srv6_sid_ctx *ctx, + struct in6_addr *sid_value, + const char *locator_name, uint32_t *sid_func) +{ + int ret; + + if (!ctx || !locator_name) + return false; + + /* + * Send the Get SRv6 SID request to the SRv6 Manager and check the + * result + */ + ret = srv6_manager_get_sid(zclient, ctx, sid_value, locator_name, + sid_func); + if (ret < 0) { + zlog_warn("%s: error getting SRv6 SID!", __func__); + return false; + } + + return true; +} + +/** + * Ask the SRv6 Manager (zebra) to release a previously allocated SID. + * + * This function is used to tell the SRv6 Manager that BGP no longer intends + * to use the SID. + * + * @param ctx Context to be associated with the SID to be released + */ +void bgp_zebra_release_srv6_sid(const struct srv6_sid_ctx *ctx) +{ + int ret; + + if (!ctx) + return; + + /* + * Send the Release SRv6 SID request to the SRv6 Manager and check the + * result + */ + ret = srv6_manager_release_sid(zclient, ctx); + if (ret < 0) { + zlog_warn("%s: error releasing SRv6 SID!", __func__); + return; + } +} + void bgp_zebra_send_nexthop_label(int cmd, mpls_label_t label, ifindex_t ifindex, vrf_id_t vrf_id, enum lsp_types_t ltype, struct prefix *p, diff --git a/bgpd/bgp_zebra.h b/bgpd/bgp_zebra.h index 55a4185bde..8deecba747 100644 --- a/bgpd/bgp_zebra.h +++ b/bgpd/bgp_zebra.h @@ -117,6 +117,13 @@ extern int bgp_zebra_update(struct bgp *bgp, afi_t afi, safi_t safi, extern int bgp_zebra_stale_timer_update(struct bgp *bgp); extern int bgp_zebra_srv6_manager_get_locator_chunk(const char *name); extern int bgp_zebra_srv6_manager_release_locator_chunk(const char *name); +extern int bgp_zebra_srv6_manager_get_locator(const char *name); +extern bool bgp_zebra_request_srv6_sid(const struct srv6_sid_ctx *ctx, + struct in6_addr *sid_value, + const char *locator_name, + uint32_t *sid_func); +extern void bgp_zebra_release_srv6_sid(const struct srv6_sid_ctx *ctx); + extern void bgp_zebra_send_nexthop_label(int cmd, mpls_label_t label, ifindex_t index, vrf_id_t vrfid, enum lsp_types_t ltype, diff --git a/bgpd/bgpd.c b/bgpd/bgpd.c index a88de651f5..8fe8f244da 100644 --- a/bgpd/bgpd.c +++ b/bgpd/bgpd.c @@ -1255,7 +1255,6 @@ static void peer_free(struct peer *peer) EVENT_OFF(peer->t_revalidate_all[afi][safi]); assert(!peer->connection->t_write); assert(!peer->connection->t_read); - event_cancel_event_ready(bm->master, peer->connection); /* Free connected nexthop, if present */ if (CHECK_FLAG(peer->flags, PEER_FLAG_CONFIG_NODE) @@ -1497,9 +1496,11 @@ static void bgp_srv6_init(struct bgp *bgp) static void bgp_srv6_cleanup(struct bgp *bgp) { for (afi_t afi = AFI_IP; afi < AFI_MAX; afi++) { - if (bgp->vpn_policy[afi].tovpn_sid_locator != NULL) - srv6_locator_chunk_free( - &bgp->vpn_policy[afi].tovpn_sid_locator); + if (bgp->vpn_policy[afi].tovpn_sid_locator != NULL) { + srv6_locator_free( + bgp->vpn_policy[afi].tovpn_sid_locator); + bgp->vpn_policy[afi].tovpn_sid_locator = NULL; + } if (bgp->vpn_policy[afi].tovpn_zebra_vrf_sid_last_sent != NULL) XFREE(MTYPE_BGP_SRV6_SID, bgp->vpn_policy[afi].tovpn_zebra_vrf_sid_last_sent); @@ -1510,8 +1511,10 @@ static void bgp_srv6_cleanup(struct bgp *bgp) } } - if (bgp->tovpn_sid_locator != NULL) - srv6_locator_chunk_free(&bgp->tovpn_sid_locator); + if (bgp->tovpn_sid_locator != NULL) { + srv6_locator_free(bgp->tovpn_sid_locator); + bgp->tovpn_sid_locator = NULL; + } if (bgp->tovpn_zebra_vrf_sid_last_sent != NULL) XFREE(MTYPE_BGP_SRV6_SID, bgp->tovpn_zebra_vrf_sid_last_sent); if (bgp->tovpn_sid != NULL) { @@ -1523,6 +1526,9 @@ static void bgp_srv6_cleanup(struct bgp *bgp) list_delete(&bgp->srv6_locator_chunks); if (bgp->srv6_functions) list_delete(&bgp->srv6_functions); + + srv6_locator_free(bgp->srv6_locator); + bgp->srv6_locator = NULL; } /* Allocate new peer object, implicitely locked. */ @@ -3520,7 +3526,7 @@ static struct bgp *bgp_create(as_t *as, const char *name, bgp_addpath_init_bgp_data(&bgp->tx_addpath); bgp->fast_convergence = false; bgp->llgr_stale_time = BGP_DEFAULT_LLGR_STALE_TIME; - bgp->rmap_def_originate_eval_timer = RMAP_DEFAULT_ORIGINATE_EVAL_TIMER; + bgp->rmap_def_originate_eval_timer = 0; #ifdef ENABLE_BGP_VNC if (inst_type != BGP_INSTANCE_TYPE_VRF) { @@ -4689,6 +4695,7 @@ static const struct peer_flag_action peer_flag_action_list[] = { {PEER_FLAG_LOCAL_AS, 0, peer_change_reset}, {PEER_FLAG_LOCAL_AS_NO_PREPEND, 0, peer_change_reset}, {PEER_FLAG_LOCAL_AS_REPLACE_AS, 0, peer_change_reset}, + {PEER_FLAG_DUAL_AS, 0, peer_change_reset}, {PEER_FLAG_UPDATE_SOURCE, 0, peer_change_none}, {PEER_FLAG_DISABLE_LINK_BW_ENCODING_IEEE, 0, peer_change_none}, {PEER_FLAG_EXTENDED_OPT_PARAMS, 0, peer_change_reset}, @@ -4701,6 +4708,7 @@ static const struct peer_flag_action peer_flag_action_list[] = { {PEER_FLAG_CAPABILITY_FQDN, 0, peer_change_none}, {PEER_FLAG_AS_LOOP_DETECTION, 0, peer_change_none}, {PEER_FLAG_EXTENDED_LINK_BANDWIDTH, 0, peer_change_none}, + {PEER_FLAG_LONESOUL, 0, peer_change_reset_out}, {0, 0, 0}}; static const struct peer_flag_action peer_af_flag_action_list[] = { @@ -5783,6 +5791,10 @@ int peer_default_originate_set(struct peer *peer, afi_t afi, safi_t safi, subgrp = peer_subgroup(peer, afi, safi); if (rmap) { + if (!peer->bgp->rmap_def_originate_eval_timer) + peer->bgp->rmap_def_originate_eval_timer = + RMAP_DEFAULT_ORIGINATE_EVAL_TIMER; + if (!peer->default_rmap[afi][safi].name || strcmp(rmap, peer->default_rmap[afi][safi].name) != 0) { struct route_map *map = NULL; @@ -5865,6 +5877,10 @@ int peer_default_originate_set(struct peer *peer, afi_t afi, safi_t safi, if (rmap) { struct route_map *map = NULL; + if (!member->bgp->rmap_def_originate_eval_timer) + member->bgp->rmap_def_originate_eval_timer = + RMAP_DEFAULT_ORIGINATE_EVAL_TIMER; + if (member->default_rmap[afi][safi].name) { map = route_map_lookup_by_name( member->default_rmap[afi][safi].name); @@ -6630,9 +6646,9 @@ int peer_allowas_in_unset(struct peer *peer, afi_t afi, safi_t safi) } int peer_local_as_set(struct peer *peer, as_t as, bool no_prepend, - bool replace_as, const char *as_str) + bool replace_as, bool dual_as, const char *as_str) { - bool old_no_prepend, old_replace_as; + bool old_no_prepend, old_replace_as, old_dual_as; struct bgp *bgp = peer->bgp; struct peer *member; struct listnode *node, *nnode; @@ -6645,14 +6661,16 @@ int peer_local_as_set(struct peer *peer, as_t as, bool no_prepend, !!CHECK_FLAG(peer->flags, PEER_FLAG_LOCAL_AS_NO_PREPEND); old_replace_as = !!CHECK_FLAG(peer->flags, PEER_FLAG_LOCAL_AS_REPLACE_AS); + old_dual_as = !!CHECK_FLAG(peer->flags, PEER_FLAG_DUAL_AS); /* Set flag and configuration on peer. */ peer_flag_set(peer, PEER_FLAG_LOCAL_AS); peer_flag_modify(peer, PEER_FLAG_LOCAL_AS_NO_PREPEND, no_prepend); peer_flag_modify(peer, PEER_FLAG_LOCAL_AS_REPLACE_AS, replace_as); + peer_flag_modify(peer, PEER_FLAG_DUAL_AS, dual_as); - if (peer->change_local_as == as && old_no_prepend == no_prepend - && old_replace_as == replace_as) + if (peer->change_local_as == as && old_no_prepend == no_prepend && + old_replace_as == replace_as && old_dual_as == dual_as) return 0; peer->change_local_as = as; if (as_str) { @@ -6681,10 +6699,11 @@ int peer_local_as_set(struct peer *peer, as_t as, bool no_prepend, PEER_FLAG_LOCAL_AS_NO_PREPEND); old_replace_as = CHECK_FLAG(member->flags, PEER_FLAG_LOCAL_AS_REPLACE_AS); - if (member->change_local_as == as - && CHECK_FLAG(member->flags, PEER_FLAG_LOCAL_AS) - && old_no_prepend == no_prepend - && old_replace_as == replace_as) + old_dual_as = !!CHECK_FLAG(member->flags, PEER_FLAG_DUAL_AS); + if (member->change_local_as == as && + CHECK_FLAG(member->flags, PEER_FLAG_LOCAL_AS) && + old_no_prepend == no_prepend && + old_replace_as == replace_as && old_dual_as == dual_as) continue; /* Set flag and configuration on peer-group member. */ @@ -6693,6 +6712,7 @@ int peer_local_as_set(struct peer *peer, as_t as, bool no_prepend, no_prepend); COND_FLAG(member->flags, PEER_FLAG_LOCAL_AS_REPLACE_AS, replace_as); + COND_FLAG(member->flags, PEER_FLAG_DUAL_AS, dual_as); member->change_local_as = as; if (as_str) member->change_local_as_pretty = XSTRDUP(MTYPE_BGP_NAME, @@ -6715,12 +6735,14 @@ int peer_local_as_unset(struct peer *peer) peer_flag_inherit(peer, PEER_FLAG_LOCAL_AS); peer_flag_inherit(peer, PEER_FLAG_LOCAL_AS_NO_PREPEND); peer_flag_inherit(peer, PEER_FLAG_LOCAL_AS_REPLACE_AS); + peer_flag_inherit(peer, PEER_FLAG_DUAL_AS); PEER_ATTR_INHERIT(peer, peer->group, change_local_as); } else { /* Otherwise remove flag and configuration from peer. */ peer_flag_unset(peer, PEER_FLAG_LOCAL_AS); peer_flag_unset(peer, PEER_FLAG_LOCAL_AS_NO_PREPEND); peer_flag_unset(peer, PEER_FLAG_LOCAL_AS_REPLACE_AS); + peer_flag_unset(peer, PEER_FLAG_DUAL_AS); peer->change_local_as = 0; XFREE(MTYPE_BGP_NAME, peer->change_local_as_pretty); } @@ -6752,6 +6774,7 @@ int peer_local_as_unset(struct peer *peer) UNSET_FLAG(member->flags, PEER_FLAG_LOCAL_AS); UNSET_FLAG(member->flags, PEER_FLAG_LOCAL_AS_NO_PREPEND); UNSET_FLAG(member->flags, PEER_FLAG_LOCAL_AS_REPLACE_AS); + UNSET_FLAG(member->flags, PEER_FLAG_DUAL_AS); member->change_local_as = 0; XFREE(MTYPE_BGP_NAME, member->change_local_as_pretty); member->last_reset = PEER_DOWN_LOCAL_AS_CHANGE; diff --git a/bgpd/bgpd.h b/bgpd/bgpd.h index 7f1b82d9c7..795e4fbc58 100644 --- a/bgpd/bgpd.h +++ b/bgpd/bgpd.h @@ -270,7 +270,7 @@ struct vpn_policy { */ uint32_t tovpn_sid_index; /* unset => set to 0 */ struct in6_addr *tovpn_sid; - struct srv6_locator_chunk *tovpn_sid_locator; + struct srv6_locator *tovpn_sid_locator; uint32_t tovpn_sid_transpose_label; struct in6_addr *tovpn_zebra_vrf_sid_last_sent; }; @@ -836,11 +836,12 @@ struct bgp { /* BGP VPN SRv6 backend */ bool srv6_enabled; char srv6_locator_name[SRV6_LOCNAME_SIZE]; + struct srv6_locator *srv6_locator; struct list *srv6_locator_chunks; struct list *srv6_functions; uint32_t tovpn_sid_index; /* unset => set to 0 */ struct in6_addr *tovpn_sid; - struct srv6_locator_chunk *tovpn_sid_locator; + struct srv6_locator *tovpn_sid_locator; uint32_t tovpn_sid_transpose_label; struct in6_addr *tovpn_zebra_vrf_sid_last_sent; @@ -1506,6 +1507,7 @@ struct peer { #define PEER_FLAG_CAPABILITY_FQDN (1ULL << 37) /* fqdn capability */ #define PEER_FLAG_AS_LOOP_DETECTION (1ULL << 38) /* as path loop detection */ #define PEER_FLAG_EXTENDED_LINK_BANDWIDTH (1ULL << 39) +#define PEER_FLAG_DUAL_AS (1ULL << 40) /* *GR-Disabled mode means unset PEER_FLAG_GRACEFUL_RESTART @@ -1827,16 +1829,13 @@ struct peer { struct stream *last_reset_cause; /* The kind of route-map Flags.*/ - uint16_t rmap_type; + uint8_t rmap_type; #define PEER_RMAP_TYPE_IN (1U << 0) /* neighbor route-map in */ #define PEER_RMAP_TYPE_OUT (1U << 1) /* neighbor route-map out */ #define PEER_RMAP_TYPE_NETWORK (1U << 2) /* network route-map */ #define PEER_RMAP_TYPE_REDISTRIBUTE (1U << 3) /* redistribute route-map */ #define PEER_RMAP_TYPE_DEFAULT (1U << 4) /* default-originate route-map */ -#define PEER_RMAP_TYPE_NOSET (1U << 5) /* not allow to set commands */ -#define PEER_RMAP_TYPE_IMPORT (1U << 6) /* neighbor route-map import */ -#define PEER_RMAP_TYPE_EXPORT (1U << 7) /* neighbor route-map export */ -#define PEER_RMAP_TYPE_AGGREGATE (1U << 8) /* aggregate-address route-map */ +#define PEER_RMAP_TYPE_AGGREGATE (1U << 5) /* aggregate-address route-map */ /** Peer overwrite configuration. */ struct bfd_session_config { @@ -2445,7 +2444,7 @@ extern int peer_allowas_in_set(struct peer *, afi_t, safi_t, int, int); extern int peer_allowas_in_unset(struct peer *, afi_t, safi_t); extern int peer_local_as_set(struct peer *peer, as_t as, bool no_prepend, - bool replace_as, const char *as_str); + bool replace_as, bool dual_as, const char *as_str); extern int peer_local_as_unset(struct peer *); extern int peer_prefix_list_set(struct peer *, afi_t, safi_t, int, diff --git a/doc/developer/bgpd.rst b/doc/developer/bgpd.rst index a35fa614f4..f5263ff31d 100644 --- a/doc/developer/bgpd.rst +++ b/doc/developer/bgpd.rst @@ -9,3 +9,4 @@ BGPD next-hop-tracking bgp-typecodes + bmp diff --git a/doc/developer/mgmtd-dev.rst b/doc/developer/mgmtd-dev.rst index b979af06fa..4c56cadb28 100644 --- a/doc/developer/mgmtd-dev.rst +++ b/doc/developer/mgmtd-dev.rst @@ -147,7 +147,7 @@ Front-End Interface: - change route_map_init() to route_map_init_new(false) and remove from VTYSH_ROUTE_MAP_CONFIG (leave in VTYSH_ROUTE_MAP_SHOW). - remove vrf_cmd_init(NULL) => remove from VTYSH_INTERFACE_SUBSET - ... + Back-End Interface: diff --git a/doc/developer/northbound/yang-tools.rst b/doc/developer/northbound/yang-tools.rst index fb5a287245..91a767dce7 100644 --- a/doc/developer/northbound/yang-tools.rst +++ b/doc/developer/northbound/yang-tools.rst @@ -87,7 +87,7 @@ Generate skeleton instance data: * XML: - .. code:: sh +.. code:: sh $ pyang -p <yang-search-path> \ -f sample-xml-skeleton --sample-xml-skeleton-defaults \ @@ -95,7 +95,7 @@ Generate skeleton instance data: * JSON: - .. code:: sh +.. code:: sh $ pyang -p <yang-search-path> \ -f jsonxsl module.yang -o module.xsl diff --git a/doc/developer/topotests.rst b/doc/developer/topotests.rst index 72de7b3bc9..6209749636 100644 --- a/doc/developer/topotests.rst +++ b/doc/developer/topotests.rst @@ -37,7 +37,7 @@ Installing Topotest Requirements tshark \ valgrind python3 -m pip install wheel - python3 -m pip install 'pytest>=6.2.4' 'pytest-xdist>=2.3.0' + python3 -m pip install 'pytest>=8.3.2' 'pytest-asyncio>=0.24.0' 'pytest-xdist>=3.6.1' python3 -m pip install 'scapy>=2.4.5' python3 -m pip install xmltodict python3 -m pip install git+https://github.com/Exa-Networks/exabgp@0659057837cd6c6351579e9f0fa47e9fb7de7311 @@ -731,8 +731,8 @@ packages. Code coverage can automatically be gathered for any topotest run. To support this FRR must first be compiled with the ``--enable-gcov`` configure option. -This will cause *.gnco files to be created during the build. When topotests are -run the statistics are generated and stored in *.gcda files. Topotest +This will cause \*.gnco files to be created during the build. When topotests are +run the statistics are generated and stored in \*.gcda files. Topotest infrastructure will gather these files, capture the information into a ``coverage.info`` ``lcov`` file and also report the coverage summary. @@ -741,7 +741,7 @@ If you build your FRR in a directory outside of the FRR source directory you will also need to pass the ``--cov-frr-build-dir`` argument specifying the build directory location. -During the topotest run the *.gcda files are generated into a ``gcda`` +During the topotest run the \*.gcda files are generated into a ``gcda`` sub-directory of the top-level run directory (i.e., normally ``/tmp/topotests/gcda``). These files will then be copied at the end of the topotest run into the FRR build directory where the ``gcov`` and ``lcov`` @@ -756,7 +756,7 @@ The ``coverage.info`` file can then be used to generate coverage reports or file markup (e.g., using the ``genhtml`` utility) or enable markup within your IDE/editor if supported (e.g., the emacs ``cov-mode`` package) -NOTE: the *.gcda files in ``/tmp/topotests/gcda`` are cumulative so if you do +NOTE: the \*.gcda files in ``/tmp/topotests/gcda`` are cumulative so if you do not remove them they will aggregate data across multiple topotest runs. How to reproduce failed Tests diff --git a/doc/developer/workflow.rst b/doc/developer/workflow.rst index 50bcb2976e..5e22c4cb72 100644 --- a/doc/developer/workflow.rst +++ b/doc/developer/workflow.rst @@ -6,9 +6,10 @@ Process & Workflow .. highlight:: none -FRR is a large project developed by many different groups. This section -documents standards for code style & quality, commit messages, pull requests -and best practices that all contributors are asked to follow. +FRR is a large project developed by many different groups. This +section documents standards for code style & quality, commit messages, +pull requests (PRs) and best practices that all contributors are asked +to follow. This chapter is "descriptive/post-factual" in that it documents pratices that are in use; it is not "definitive/pre-factual" in prescribing practices. This @@ -241,7 +242,7 @@ discontinued. The LTS branch duties are the following ones: - organise meetings on a (bi-)weekly or monthly basis, the handling of issues - and pull requested relative to that branch. When time permits, this may be done + and pull requests relative to that branch. When time permits, this may be done during the regularly scheduled FRR meeting. - ensure the stability of the branch, by using and eventually adapting the @@ -324,11 +325,17 @@ relevant to your work. Submitting Patches and Enhancements =================================== -FRR accepts patches using GitHub pull requests. - -The base branch for new contributions and non-critical bug fixes should be -``master``. Please ensure your pull request is based on this branch when you -submit it. +FRR accepts patches using GitHub pull requests (PRs). The typical FRR +developer will maintain a fork of the FRR project in GitHub; see the +GitHub documentation for help setting up an account and creating a +fork repository. Keep the ``master`` branch of your fork up-to-date +with the FRR version. Create a dev branch in your fork and commit your +work there. When ready, create a pull-request between your dev branch +in your fork and the main FRR repository in GitHub. + +The base branch for new contributions and non-critical bug fixes +should be ``master``. Please ensure your pull request targets this +branch when you submit it. Code submitted by pull request will be automatically tested by one or more CI systems. Once the automated tests succeed, other developers will review your diff --git a/doc/user/bgp.rst b/doc/user/bgp.rst index aa62d274f0..438c60a3f6 100644 --- a/doc/user/bgp.rst +++ b/doc/user/bgp.rst @@ -1818,7 +1818,7 @@ Configuring Peers Since sent prefix count is managed by update-groups, this option creates a separate update-group for outgoing updates. -.. clicmd:: neighbor PEER local-as AS-NUMBER [no-prepend] [replace-as] +.. clicmd:: neighbor PEER local-as AS-NUMBER [no-prepend [replace-as [dual-as]]] Specify an alternate AS for this BGP process when interacting with the specified peer. With no modifiers, the specified local-as is prepended to @@ -1834,6 +1834,10 @@ Configuring Peers Note that replace-as can only be specified if no-prepend is. + The ``dual-as`` keyword is used to configure the neighbor to establish a peering + session using the real autonomous-system number (``router bgp ASN``) or by using + the autonomous system number configured with the ``local-as``. + This command is only allowed for eBGP peers. .. clicmd:: neighbor <A.B.C.D|X:X::X:X|WORD> as-override @@ -1972,12 +1976,14 @@ Configuring Peers and will not be displayed as part of a `show run`. The no form of the command turns off this ability. -.. clicmd:: bgp default-originate timer (0-3600) +.. clicmd:: bgp default-originate timer (0-65535) Set the period to rerun the default-originate route-map scanner process. The default is 5 seconds. With a full routing table, it might be useful to increase this setting to avoid scanning the whole BGP table aggressively. + Setting to 0 turns off the scanning at all. + .. clicmd:: bgp default ipv4-unicast This command allows the user to specify that the IPv4 Unicast address @@ -2191,8 +2197,7 @@ and will share updates. .. clicmd:: neighbor PEER solo This command is used to indicate that routes advertised by the peer - should not be reflected back to the peer. This command only is only - meaningful when there is a single peer defined in the peer-group. + should not be reflected back to the peer. .. clicmd:: show [ip] bgp peer-group [json] diff --git a/doc/user/pim.rst b/doc/user/pim.rst index 5701560bd6..372fdd73ae 100644 --- a/doc/user/pim.rst +++ b/doc/user/pim.rst @@ -83,6 +83,41 @@ PIM Routers cannot see data flowing in better than 30 second chunks. This command is vrf aware, to configure for a vrf, specify the vrf in the router pim block. +.. clicmd:: bsr candidate-bsr [priority (0-255)] [source [address A.B.C.D] | [interface INTERFACE] | [loopback] | [any]] + + Configure the router to advertise itself as a candidate PIM-SM BSR. The candidate + with the highest priority becomes the BSR for the domain (high wins). When priority is the + same for more than one candidate BSR, the candidate with the highest IP address + becomes the BSR of the domain. The address can be configured explicitly + via ``address``, or be selecting an interface name using ``interface``. + If ``any`` is configured the highest address from any interface will be selected. + By default, the highest loopback address is selected, which can also be + configured via ``loopback`` + +.. clicmd:: bsr candidate-rp [interval] + + Configure the router to advertise itself as a candidate PIM-SM RP at the + specified ``interval`` in seconds. + + +.. clicmd:: bsr candidate-rp group A.B.C.D/M + + Configure the multicast group prefix that this candidate RP advertises itself for. + This command can be repeated for all desired groups that need to be added to the + candidate RP advertisement. + +.. clicmd:: bsr candidate-rp [priority (0-255)] [source [address A.B.C.D] | [interface INTERFACE] | [loopback] | [any]] + + Configure the router to advertise itself as a candidate PIM-SM RP. ``interval`` + can be used to configure the interval in seconds to send these advertisements. + The candidate with the lowest priority becomes the RP for the domain (low wins). + When priority is the same for more than one candidate RP, the candidate with + the highest IP address becomes the BSR of the domain. The address can be + configured explicitly via ``address``, or be selecting an interface name + using ``interface``. If ``any`` is configured the highest address from any + interface will be selected.By default, the highest loopback address is + selected, which can also be configured via ``loopback``. + .. clicmd:: register-accept-list PLIST When pim receives a register packet the source of the packet will be compared @@ -611,11 +646,28 @@ cause great confusion. Display PIM MLAG (multi-chassis link aggregation) session status and control message statistics. -.. clicmd:: show ip pim bsr +.. clicmd:: show ip pim bsr [vrf NAME] [json] Display current bsr, its uptime and last received bsm age. -.. clicmd:: show ip pim bsrp-info [vrf NAME] [json] +.. clicmd:: show ip pim bsr candidate-bsr [vrf NAME] [json] + + Display information about the candidate BSR state on this router. + +.. clicmd:: show ip pim bsr candidate-rp [vrf NAME] [json] + + Display information about the candidate RP state on this router. + +.. clicmd:: show ip pim bsr candidate-rp-database [vrf NAME] [json] + + Display the current list of candidate RPs received by this router. + +.. clicmd:: show ip pim bsr groups [vrf NAME] [json] + + Display the current list of multicast group mapping received by + this router from candidate RPs. + +.. clicmd:: show ip pim bsr rp-info [vrf NAME] [json] Display group-to-rp mappings received from E-BSR. diff --git a/doc/user/pimv6.rst b/doc/user/pimv6.rst index edf7a82015..e4e28d71ab 100644 --- a/doc/user/pimv6.rst +++ b/doc/user/pimv6.rst @@ -80,6 +80,29 @@ PIMv6 Router cannot see data flowing in better than 30 second chunks. This command is vrf aware, to configure for a vrf, specify the vrf in the router pim6 block. +.. clicmd:: bsr candidate-bsr [priority (0-255)] [source [address X:X::X:X] | [interface INTERFACE] | [loopback] | [any]] + + Configure the router to advertise itself as a candidate PIM-SM BSR. The candidate + with the highest priority becomes the BSR for the domain (high wins). When priority is the + same for more than one candidate BSR, the candidate with the highest IP address + becomes the BSR of the domain. The address can be configured explicitly + via ``address``, or be selecting an interface name using ``interface``. + If ``any`` is configured the highest address from any interface will be selected. + By default, the highest loopback address is selected, which can also be + configured via ``loopback`` + +.. clicmd:: bsr candidate-rp [interval (1-4294967295) ] [priority (0-255)] [source [address X:X::X:X] | [interface INTERFACE] | [loopback] | [any]] + + Configure the router to advertise itself as a candidate PIM-SM RP. ``interval`` + can be used to configure the interval in seconds to send these advertisements. + The candidate with the lowest priority becomes the RP for the domain (low wins). + When priority is the same for more than one candidate RP, the candidate with + the highest IP address becomes the BSR of the domain. The address can be + configured explicitly via ``address``, or be selecting an interface name + using ``interface``. If ``any`` is configured the highest address from any + interface will be selected.By default, the highest loopback address is + selected, which can also be configured via ``loopback``. + .. clicmd:: spt-switchover infinity-and-beyond [prefix-list PLIST] On the last hop router if it is desired to not switch over to the SPT tree @@ -391,11 +414,28 @@ General multicast routing state Display total number of S,G mroutes and number of S,G mroutes installed into the kernel for all vrfs. -.. clicmd:: show ipv6 pim bsr +.. clicmd:: show ipv6 pim bsr [vrf NAME] [json] Display current bsr, its uptime and last received bsm age. -.. clicmd:: show ipv6 pim bsrp-info [vrf NAME] [json] +.. clicmd:: show ipv6 pim bsr candidate-bsr [vrf NAME] [json] + + Display information about the candidate BSR state on this router. + +.. clicmd:: show ipv6 pim bsr candidate-rp [vrf NAME] [json] + + Display information about the candidate RP state on this router. + +.. clicmd:: show ipv6 pim bsr candidate-rp-database [vrf NAME] [json] + + Display the current list of candidate RPs received by this router. + +.. clicmd:: show ipv6 pim bsr groups [vrf NAME] [json] + + Display the current list of multicast group mapping received by + this router from candidate RPs. + +.. clicmd:: show ipv6 pim bsr rp-info [vrf NAME] [json] Display group-to-rp mappings received from E-BSR. diff --git a/doc/user/zebra.rst b/doc/user/zebra.rst index 900d2fd343..06a19a6139 100644 --- a/doc/user/zebra.rst +++ b/doc/user/zebra.rst @@ -815,6 +815,16 @@ Allocated label chunks table can be dumped using the command range is configured, static label requests that match that range are not accepted. +FEC nexthop entry resolution over MPLS networks +----------------------------------------------- + +The LSP associated with a BGP labeled route is normally restricted to +directly-connected nexthops. If connected nexthops are not available, +the LSP entry will not be installed. This command permits the use of +recursive resolution for LSPs, similar to that available for IP routes. + +.. clicmd:: mpls fec nexthop-resolution + .. _zebra-srv6: Segment-Routing IPv6 @@ -1637,7 +1647,11 @@ zebra Terminal Mode Commands option as that nexthop groups are per namespace in linux. If you specify singleton you would like to see the singleton nexthop groups that do have an afi. [type] allows you to filter those - only coming from a specific NHG type (protocol). + only coming from a specific NHG type (protocol). A nexthop group + that has `Initial Delay`, means that this nexthop group entry + was not installed because no-one was using it at that point and + Zebra can delay installing this route until it is used by something + else. .. clicmd:: show <ip|ipv6> zebra route dump [<vrf> VRFNAME] diff --git a/isisd/isis_cli.c b/isisd/isis_cli.c index 2b19cbba84..e6cc794bae 100644 --- a/isisd/isis_cli.c +++ b/isisd/isis_cli.c @@ -2065,6 +2065,12 @@ void cli_show_isis_srv6_locator(struct vty *vty, const struct lyd_node *dnode, vty_out(vty, " locator %s\n", yang_dnode_get_string(dnode, NULL)); } +void cli_show_isis_srv6_locator_end(struct vty *vty, + const struct lyd_node *dnode) +{ + vty_out(vty, " exit\n"); +} + /* * XPath: /frr-isisd:isis/instance/segment-routing-srv6/enabled */ diff --git a/isisd/isis_lsp.c b/isisd/isis_lsp.c index 391d42fba1..d588af314c 100644 --- a/isisd/isis_lsp.c +++ b/isisd/isis_lsp.c @@ -711,10 +711,6 @@ void lsp_print_common(struct isis_lsp *lsp, struct vty *vty, struct json_object } } -#if CONFDATE > 20240916 -CPP_NOTICE("Remove JSON in '-' format") -#endif - void lsp_print_json(struct isis_lsp *lsp, struct json_object *json, char dynhost, struct isis *isis) { @@ -728,19 +724,11 @@ void lsp_print_json(struct isis_lsp *lsp, struct json_object *json, own_json = json_object_new_object(); json_object_object_add(json, "lsp", own_json); json_object_string_add(own_json, "id", LSPid); -#if CONFDATE > 20240916 - CPP_NOTICE("remove own key") -#endif json_object_string_add(own_json, "own", lsp->own_lsp ? "*" : " "); if (lsp->own_lsp) json_object_boolean_add(own_json, "ownLSP", true); - json_object_int_add(json, "pdu-len", lsp->hdr.pdu_len); json_object_int_add(json, "pduLen", lsp->hdr.pdu_len); snprintfrr(buf, sizeof(buf), "0x%08x", lsp->hdr.seqno); -#if CONFDATE > 20240916 - CPP_NOTICE("remove seq-number key") -#endif - json_object_string_add(json, "seq-number", buf); json_object_string_add(json, "seqNumber", buf); snprintfrr(buf, sizeof(buf), "0x%04hx", lsp->hdr.checksum); json_object_string_add(json, "chksum", buf); @@ -751,11 +739,6 @@ void lsp_print_json(struct isis_lsp *lsp, struct json_object *json, } else { json_object_int_add(json, "holdtime", lsp->hdr.rem_lifetime); } -#if CONFDATE > 20240916 - CPP_NOTICE("remove att-p-ol key") -#endif - json_object_string_add( - json, "att-p-ol", lsp_bits2string(lsp->hdr.lsp_bits, b, sizeof(b))); json_object_string_add(json, "attPOl", lsp_bits2string(lsp->hdr.lsp_bits, b, sizeof(b))); } diff --git a/isisd/isis_mt.c b/isisd/isis_mt.c index d04a24dc46..65ba395ffc 100644 --- a/isisd/isis_mt.c +++ b/isisd/isis_mt.c @@ -226,7 +226,8 @@ struct isis_area_mt_setting **area_mt_settings(struct isis_area *area, count++; if (count > size) { - rv = XREALLOC(MTYPE_TMP, rv, count * sizeof(*rv)); + rv = XREALLOC(MTYPE_MT_AREA_SETTING, rv, + count * sizeof(*rv)); size = count; } rv[count - 1] = setting; @@ -341,7 +342,8 @@ circuit_mt_settings(struct isis_circuit *circuit, unsigned int *mt_count) count++; if (count > size) { - rv = XREALLOC(MTYPE_TMP, rv, count * sizeof(*rv)); + rv = XREALLOC(MTYPE_MT_AREA_SETTING, rv, + count * sizeof(*rv)); size = count; } rv[count - 1] = setting; @@ -376,8 +378,8 @@ bool tlvs_to_adj_mt_set(struct isis_tlvs *tlvs, bool v4_usable, bool v6_usable, old_mt_count = adj->mt_count; if (old_mt_count) { - old_mt_set = - XCALLOC(MTYPE_TMP, old_mt_count * sizeof(*old_mt_set)); + old_mt_set = XCALLOC(MTYPE_MT_AREA_SETTING, + old_mt_count * sizeof(*old_mt_set)); memcpy(old_mt_set, adj->mt_set, old_mt_count * sizeof(*old_mt_set)); } @@ -436,7 +438,7 @@ bool tlvs_to_adj_mt_set(struct isis_tlvs *tlvs, bool v4_usable, bool v6_usable, changed = true; if (old_mt_count) - XFREE(MTYPE_TMP, old_mt_set); + XFREE(MTYPE_MT_AREA_SETTING, old_mt_set); return changed; } @@ -464,7 +466,7 @@ static void mt_set_add(uint16_t **mt_set, unsigned int *size, } if (*index >= *size) { - *mt_set = XREALLOC(MTYPE_TMP, *mt_set, + *mt_set = XREALLOC(MTYPE_MT_AREA_SETTING, *mt_set, sizeof(**mt_set) * ((*index) + 1)); *size = (*index) + 1; } diff --git a/isisd/isis_nb.c b/isisd/isis_nb.c index 16cafa2ff0..8608d2b9bd 100644 --- a/isisd/isis_nb.c +++ b/isisd/isis_nb.c @@ -873,6 +873,7 @@ const struct frr_yang_module_info frr_isisd_info = { .modify = isis_instance_segment_routing_srv6_locator_modify, .destroy = isis_instance_segment_routing_srv6_locator_destroy, .cli_show = cli_show_isis_srv6_locator, + .cli_show_end = cli_show_isis_srv6_locator_end, }, }, { diff --git a/isisd/isis_nb.h b/isisd/isis_nb.h index c04a006a2e..1bf95e3db3 100644 --- a/isisd/isis_nb.h +++ b/isisd/isis_nb.h @@ -332,6 +332,8 @@ int isis_instance_segment_routing_srv6_locator_destroy( struct nb_cb_destroy_args *args); void cli_show_isis_srv6_locator(struct vty *vty, const struct lyd_node *dnode, bool show_defaults); +void cli_show_isis_srv6_locator_end(struct vty *vty, + const struct lyd_node *dnode); int isis_instance_segment_routing_srv6_msd_node_msd_max_segs_left_modify( struct nb_cb_modify_args *args); int isis_instance_segment_routing_srv6_msd_node_msd_max_end_pop_modify( diff --git a/isisd/isis_nb_config.c b/isisd/isis_nb_config.c index 2b47d5cbeb..7286a692f5 100644 --- a/isisd/isis_nb_config.c +++ b/isisd/isis_nb_config.c @@ -2890,29 +2890,22 @@ int isis_instance_flex_algo_destroy(struct nb_cb_destroy_args *args) struct isis_area *area; uint32_t algorithm; + if (args->event != NB_EV_APPLY) + return NB_OK; + algorithm = yang_dnode_get_uint32(args->dnode, "flex-algo"); area = nb_running_get_entry(args->dnode, NULL, true); - switch (args->event) { - case NB_EV_APPLY: - for (ALL_LIST_ELEMENTS(area->flex_algos->flex_algos, node, - nnode, fa)) { - if (fa->algorithm == algorithm) - flex_algo_free(area->flex_algos, fa); - } - if (list_isempty(area->flex_algos->flex_algos)) { - for (ALL_LIST_ELEMENTS_RO(area->circuit_list, node, - circuit)) - isis_link_params_update_asla(circuit, - circuit->interface); - } - lsp_regenerate_schedule(area, area->is_type, 0); - break; - case NB_EV_VALIDATE: - case NB_EV_PREPARE: - case NB_EV_ABORT: - break; + for (ALL_LIST_ELEMENTS(area->flex_algos->flex_algos, node, nnode, fa)) { + if (fa->algorithm == algorithm) + flex_algo_free(area->flex_algos, fa); + } + if (list_isempty(area->flex_algos->flex_algos)) { + for (ALL_LIST_ELEMENTS_RO(area->circuit_list, node, circuit)) + isis_link_params_update_asla(circuit, + circuit->interface); } + lsp_regenerate_schedule(area, area->is_type, 0); return NB_OK; } @@ -2960,26 +2953,22 @@ int isis_instance_flex_algo_advertise_definition_destroy( struct flex_algo *fa; uint32_t algorithm; + + if (args->event != NB_EV_APPLY) + return NB_OK; + area = nb_running_get_entry(args->dnode, NULL, true); algorithm = yang_dnode_get_uint32(args->dnode, "../flex-algo"); - switch (args->event) { - case NB_EV_APPLY: - fa = flex_algo_lookup(area->flex_algos, algorithm); - if (!fa) { - snprintf(args->errmsg, args->errmsg_len, - "flex-algo object not found"); - return NB_ERR_RESOURCE; - } - fa->advertise_definition = false; - lsp_regenerate_schedule(area, area->is_type, 0); - break; - case NB_EV_VALIDATE: - case NB_EV_PREPARE: - case NB_EV_ABORT: - break; + fa = flex_algo_lookup(area->flex_algos, algorithm); + if (!fa) { + snprintf(args->errmsg, args->errmsg_len, + "flex-algo object not found"); + return NB_ERR_RESOURCE; } + fa->advertise_definition = false; + lsp_regenerate_schedule(area, area->is_type, 0); return NB_OK; } @@ -2987,27 +2976,23 @@ int isis_instance_flex_algo_advertise_definition_destroy( static int isis_instance_flex_algo_affinity_set(struct nb_cb_create_args *args, int type) { - struct affinity_map *map; + char xpathr[XPATH_MAXLEN]; + struct lyd_node *dnode; struct isis_area *area; struct admin_group *ag; + uint16_t bit_position; struct flex_algo *fa; uint32_t algorithm; const char *val; - algorithm = yang_dnode_get_uint32(args->dnode, "../../flex-algo"); - area = nb_running_get_entry(args->dnode, NULL, true); val = yang_dnode_get_string(args->dnode, "."); switch (args->event) { case NB_EV_VALIDATE: - fa = flex_algo_lookup(area->flex_algos, algorithm); - if (!fa) { - snprintf(args->errmsg, args->errmsg_len, - "flex-algo object not found"); - return NB_ERR_RESOURCE; - } - map = affinity_map_get(val); - if (!map) { + snprintf(xpathr, sizeof(xpathr), + "/frr-affinity-map:lib/affinity-maps/affinity-map[name='%s']/value", + val); + if (!yang_dnode_get(args->dnode, xpathr)) { snprintf(args->errmsg, args->errmsg_len, "affinity map %s isn't found", val); return NB_ERR_VALIDATION; @@ -3017,14 +3002,20 @@ static int isis_instance_flex_algo_affinity_set(struct nb_cb_create_args *args, case NB_EV_ABORT: break; case NB_EV_APPLY: + algorithm = yang_dnode_get_uint32(args->dnode, + "../../flex-algo"); + area = nb_running_get_entry(args->dnode, NULL, true); fa = flex_algo_lookup(area->flex_algos, algorithm); if (!fa) { snprintf(args->errmsg, args->errmsg_len, "flex-algo object not found"); return NB_ERR_RESOURCE; } - map = affinity_map_get(val); - if (!map) { + snprintf(xpathr, sizeof(xpathr), + "/frr-affinity-map:lib/affinity-maps/affinity-map[name='%s']/value", + val); + dnode = yang_dnode_get(args->dnode, xpathr); + if (!dnode) { snprintf(args->errmsg, args->errmsg_len, "affinity map %s isn't found", val); return NB_ERR_RESOURCE; @@ -3038,7 +3029,8 @@ static int isis_instance_flex_algo_affinity_set(struct nb_cb_create_args *args, else break; - admin_group_set(ag, map->bit_position); + bit_position = yang_dnode_get_uint16(dnode, NULL); + admin_group_set(ag, bit_position); lsp_regenerate_schedule(area, area->is_type, 0); break; } @@ -3057,18 +3049,10 @@ isis_instance_flex_algo_affinity_unset(struct nb_cb_destroy_args *args, uint32_t algorithm; const char *val; - algorithm = yang_dnode_get_uint32(args->dnode, "../../flex-algo"); - area = nb_running_get_entry(args->dnode, NULL, true); val = yang_dnode_get_string(args->dnode, "."); switch (args->event) { case NB_EV_VALIDATE: - fa = flex_algo_lookup(area->flex_algos, algorithm); - if (!fa) { - snprintf(args->errmsg, args->errmsg_len, - "flex-algo object not found"); - return NB_ERR_RESOURCE; - } map = affinity_map_get(val); if (!map) { snprintf(args->errmsg, args->errmsg_len, @@ -3080,6 +3064,9 @@ isis_instance_flex_algo_affinity_unset(struct nb_cb_destroy_args *args, case NB_EV_ABORT: break; case NB_EV_APPLY: + algorithm = yang_dnode_get_uint32(args->dnode, + "../../flex-algo"); + area = nb_running_get_entry(args->dnode, NULL, true); fa = flex_algo_lookup(area->flex_algos, algorithm); if (!fa) { snprintf(args->errmsg, args->errmsg_len, @@ -3169,19 +3156,16 @@ int isis_instance_flex_algo_affinity_exclude_any_destroy( int isis_instance_flex_algo_prefix_metric_create(struct nb_cb_create_args *args) { struct isis_area *area; - const char *area_tag; struct flex_algo *fa; uint32_t algorithm; - area_tag = yang_dnode_get_string(args->dnode, "../../../area-tag"); - area = isis_area_lookup(area_tag, VRF_DEFAULT); - if (!area) - return NB_ERR_RESOURCE; - algorithm = yang_dnode_get_uint32(args->dnode, "../flex-algo"); switch (args->event) { case NB_EV_APPLY: + area = nb_running_get_entry(args->dnode, NULL, true); + if (!area) + return NB_ERR_RESOURCE; fa = flex_algo_lookup(area->flex_algos, algorithm); if (!fa) { snprintf(args->errmsg, args->errmsg_len, @@ -3204,19 +3188,17 @@ int isis_instance_flex_algo_prefix_metric_destroy( struct nb_cb_destroy_args *args) { struct isis_area *area; - const char *area_tag; struct flex_algo *fa; uint32_t algorithm; - area_tag = yang_dnode_get_string(args->dnode, "../../../area-tag"); - area = isis_area_lookup(area_tag, VRF_DEFAULT); - if (!area) - return NB_ERR_RESOURCE; - algorithm = yang_dnode_get_uint32(args->dnode, "../flex-algo"); switch (args->event) { case NB_EV_APPLY: + area = nb_running_get_entry(args->dnode, NULL, true); + if (!area) + return NB_ERR_RESOURCE; + fa = flex_algo_lookup(area->flex_algos, algorithm); if (!fa) { snprintf(args->errmsg, args->errmsg_len, @@ -3239,19 +3221,17 @@ static int isis_instance_flex_algo_dplane_set(struct nb_cb_create_args *args, int type) { struct isis_area *area; - const char *area_tag; struct flex_algo *fa; uint32_t algorithm; - area_tag = yang_dnode_get_string(args->dnode, "../../../area-tag"); - area = isis_area_lookup(area_tag, VRF_DEFAULT); - if (!area) - return NB_ERR_RESOURCE; - algorithm = yang_dnode_get_uint32(args->dnode, "../flex-algo"); switch (args->event) { case NB_EV_APPLY: + area = nb_running_get_entry(args->dnode, NULL, true); + if (!area) + return NB_ERR_RESOURCE; + fa = flex_algo_lookup(area->flex_algos, algorithm); if (!fa) { snprintf(args->errmsg, args->errmsg_len, @@ -3281,19 +3261,17 @@ static int isis_instance_flex_algo_dplane_unset(struct nb_cb_destroy_args *args, int type) { struct isis_area *area; - const char *area_tag; struct flex_algo *fa; uint32_t algorithm; - area_tag = yang_dnode_get_string(args->dnode, "../../../area-tag"); - area = isis_area_lookup(area_tag, VRF_DEFAULT); - if (!area) - return NB_ERR_RESOURCE; - algorithm = yang_dnode_get_uint32(args->dnode, "../flex-algo"); switch (args->event) { case NB_EV_APPLY: + area = nb_running_get_entry(args->dnode, NULL, true); + if (!area) + return NB_ERR_RESOURCE; + fa = flex_algo_lookup(area->flex_algos, algorithm); if (!fa) { snprintf(args->errmsg, args->errmsg_len, @@ -3363,21 +3341,19 @@ int isis_instance_flex_algo_dplane_ip_destroy(struct nb_cb_destroy_args *args) int isis_instance_flex_algo_metric_type_modify(struct nb_cb_modify_args *args) { struct isis_area *area; - const char *area_tag; struct flex_algo *fa; uint32_t algorithm; enum flex_algo_metric_type metric_type; - area_tag = yang_dnode_get_string(args->dnode, "../../../area-tag"); - area = isis_area_lookup(area_tag, VRF_DEFAULT); - if (!area) - return NB_ERR_RESOURCE; - algorithm = yang_dnode_get_uint32(args->dnode, "../flex-algo"); metric_type = yang_dnode_get_enum(args->dnode, NULL); switch (args->event) { case NB_EV_APPLY: + area = nb_running_get_entry(args->dnode, NULL, true); + if (!area) + return NB_ERR_RESOURCE; + fa = flex_algo_lookup(area->flex_algos, algorithm); if (!fa) { snprintf(args->errmsg, args->errmsg_len, @@ -3403,21 +3379,19 @@ int isis_instance_flex_algo_metric_type_modify(struct nb_cb_modify_args *args) int isis_instance_flex_algo_priority_modify(struct nb_cb_modify_args *args) { struct isis_area *area; - const char *area_tag; struct flex_algo *fa; uint32_t algorithm; uint32_t priority; - area_tag = yang_dnode_get_string(args->dnode, "../../../area-tag"); - area = isis_area_lookup(area_tag, VRF_DEFAULT); - if (!area) - return NB_ERR_RESOURCE; - algorithm = yang_dnode_get_uint32(args->dnode, "../flex-algo"); priority = yang_dnode_get_uint32(args->dnode, NULL); switch (args->event) { case NB_EV_APPLY: + area = nb_running_get_entry(args->dnode, NULL, true); + if (!area) + return NB_ERR_RESOURCE; + fa = flex_algo_lookup(area->flex_algos, algorithm); if (!fa) { snprintf(args->errmsg, args->errmsg_len, @@ -3439,21 +3413,19 @@ int isis_instance_flex_algo_priority_modify(struct nb_cb_modify_args *args) int isis_instance_flex_algo_priority_destroy(struct nb_cb_destroy_args *args) { struct isis_area *area; - const char *area_tag; struct flex_algo *fa; uint32_t algorithm; uint32_t priority = FLEX_ALGO_PRIO_DEFAULT; - area_tag = yang_dnode_get_string(args->dnode, "../../../area-tag"); - area = isis_area_lookup(area_tag, VRF_DEFAULT); - if (!area) - return NB_ERR_RESOURCE; - algorithm = yang_dnode_get_uint32(args->dnode, "../flex-algo"); priority = yang_dnode_get_uint32(args->dnode, NULL); switch (args->event) { case NB_EV_APPLY: + area = nb_running_get_entry(args->dnode, NULL, true); + if (!area) + return NB_ERR_RESOURCE; + fa = flex_algo_lookup(area->flex_algos, algorithm); if (!fa) { snprintf(args->errmsg, args->errmsg_len, diff --git a/isisd/isis_nb_state.c b/isisd/isis_nb_state.c index b7c33ed27b..da61bcced3 100644 --- a/isisd/isis_nb_state.c +++ b/isisd/isis_nb_state.c @@ -98,6 +98,8 @@ const void *lib_interface_state_isis_adjacencies_adjacency_get_next( * adjacencies list. */ list = circuit->u.bc.adjdb[ISIS_LEVEL2 - 1]; + if (!list) + break; adj_next = listnode_head(list); } break; diff --git a/isisd/isis_spf.c b/isisd/isis_spf.c index 86302076f8..8fc0f144b2 100644 --- a/isisd/isis_spf.c +++ b/isisd/isis_spf.c @@ -1469,14 +1469,13 @@ static void spf_adj_list_parse_tlv(struct isis_spftree *spftree, sadj->metric = metric; if (oldmetric) SET_FLAG(flags, F_ISIS_SPF_ADJ_OLDMETRIC); + if ((oldmetric && sadj->metric == ISIS_NARROW_METRIC_INFINITY) || + (!oldmetric && sadj->metric == ISIS_WIDE_METRIC_INFINITY)) + SET_FLAG(flags, F_ISIS_SPF_ADJ_METRIC_INFINITY); sadj->lsp = lsp; sadj->subtlvs = subtlvs; sadj->flags = flags; - if ((oldmetric && metric == ISIS_NARROW_METRIC_INFINITY) - || (!oldmetric && metric == ISIS_WIDE_METRIC_INFINITY)) - SET_FLAG(flags, F_ISIS_SPF_ADJ_METRIC_INFINITY); - /* Set real adjacency. */ if (!CHECK_FLAG(spftree->flags, F_SPFTREE_NO_ADJACENCIES) && !LSP_PSEUDO_ID(id)) { @@ -2337,7 +2336,7 @@ static void isis_print_paths(struct vty *vty, struct isis_vertex_queue *queue, if (json == NULL) { table = ttable_dump(tt, "\n"); vty_out(vty, "%s\n", table); - XFREE(MTYPE_TMP, table); + XFREE(MTYPE_TMP_TTABLE, table); } else *json = ttable_json_with_json_text( tt, "ssdsss", @@ -3016,7 +3015,7 @@ void isis_print_routes(struct vty *vty, struct isis_spftree *spftree, table = ttable_dump(tt, "\n"); vty_out(vty, "%s\n", table); - XFREE(MTYPE_TMP, table); + XFREE(MTYPE_TMP_TTABLE, table); } else if (json) { *json = ttable_json_with_json_text( tt, prefix_sid ? "sdssdsdd" : "sdsss", @@ -3458,7 +3457,7 @@ static void isis_print_frr_summary(struct vty *vty, /* Dump the generated table. */ table = ttable_dump(tt, "\n"); vty_out(vty, "%s\n", table); - XFREE(MTYPE_TMP, table); + XFREE(MTYPE_TMP_TTABLE, table); ttable_del(tt); } diff --git a/isisd/isis_sr.c b/isisd/isis_sr.c index f783038006..71e0f56e03 100644 --- a/isisd/isis_sr.c +++ b/isisd/isis_sr.c @@ -1064,7 +1064,7 @@ static void show_node(struct vty *vty, struct isis_area *area, int level, table = ttable_dump(tt, "\n"); vty_out(vty, "%s\n", table); - XFREE(MTYPE_TMP, table); + XFREE(MTYPE_TMP_TTABLE, table); } ttable_del(tt); } diff --git a/isisd/isis_srv6.c b/isisd/isis_srv6.c index b5974b1a62..2348bd043a 100644 --- a/isisd/isis_srv6.c +++ b/isisd/isis_srv6.c @@ -591,7 +591,7 @@ static void show_node(struct vty *vty, struct isis_area *area, int level) table = ttable_dump(tt, "\n"); vty_out(vty, "%s\n", table); - XFREE(MTYPE_TMP, table); + XFREE(MTYPE_TMP_TTABLE, table); } ttable_del(tt); } diff --git a/isisd/isis_te.c b/isisd/isis_te.c index a550d59387..c9af39ce5a 100644 --- a/isisd/isis_te.c +++ b/isisd/isis_te.c @@ -331,7 +331,7 @@ void isis_link_params_update(struct isis_circuit *circuit, return; /* Sanity Check */ - if ((ifp == NULL) || (circuit->state != C_STATE_UP)) + if (ifp == NULL) return; te_debug("ISIS-TE(%s): Update circuit parameters for interface %s", diff --git a/isisd/isis_tlvs.c b/isisd/isis_tlvs.c index c7f45b2469..b5caf396c1 100644 --- a/isisd/isis_tlvs.c +++ b/isisd/isis_tlvs.c @@ -565,10 +565,6 @@ static void format_item_asla_subtlvs(struct isis_asla_subtlvs *asla, asla->use_bw); } -#if CONFDATE > 20240916 -CPP_NOTICE("Remove JSON in '-' format") -#endif - /* mtid parameter is used to manage multi-topology i.e. IPv4 / IPv6 */ static void format_item_ext_subtlvs(struct isis_ext_subtlvs *exts, struct sbuf *buf, struct json_object *json, @@ -585,10 +581,6 @@ static void format_item_ext_subtlvs(struct isis_ext_subtlvs *exts, if (json) { snprintfrr(aux_buf, sizeof(aux_buf), "0x%x", exts->adm_group); -#if CONFDATE > 20240916 - CPP_NOTICE("remove deprecated key format with -") -#endif - json_object_string_add(json, "adm-group", aux_buf); json_object_string_add(json, "admGroup", aux_buf); } else { sbuf_push(buf, indent, "Admin Group: 0x%08x\n", @@ -639,13 +631,6 @@ static void format_item_ext_subtlvs(struct isis_ext_subtlvs *exts, } if (IS_SUBTLV(exts, EXT_LLRI)) { if (json) { -#if CONFDATE > 20240916 - CPP_NOTICE("remove deprecated key format with -") -#endif - json_object_int_add(json, "link-local-id", - exts->local_llri); - json_object_int_add(json, "link-remote-id", - exts->remote_llri); json_object_int_add(json, "linkLocalId", exts->local_llri); json_object_int_add(json, "linkRemoteId", @@ -661,10 +646,6 @@ static void format_item_ext_subtlvs(struct isis_ext_subtlvs *exts, if (json) { inet_ntop(AF_INET, &exts->local_addr, aux_buf, sizeof(aux_buf)); -#if CONFDATE > 20240916 - CPP_NOTICE("remove deprecated key format with -") -#endif - json_object_string_add(json, "local-iface-ip", aux_buf); json_object_string_add(json, "localIfaceIp", aux_buf); } else sbuf_push(buf, indent, @@ -675,11 +656,6 @@ static void format_item_ext_subtlvs(struct isis_ext_subtlvs *exts, if (json) { inet_ntop(AF_INET, &exts->neigh_addr, aux_buf, sizeof(aux_buf)); -#if CONFDATE > 20240916 - CPP_NOTICE("remove deprecated key format with -") -#endif - json_object_string_add(json, "remote-iface-ip", - aux_buf); json_object_string_add(json, "remoteIfaceIp", aux_buf); } else sbuf_push(buf, indent, @@ -690,11 +666,6 @@ static void format_item_ext_subtlvs(struct isis_ext_subtlvs *exts, if (json) { inet_ntop(AF_INET6, &exts->local_addr6, aux_buf, sizeof(aux_buf)); -#if CONFDATE > 20240916 - CPP_NOTICE("remove deprecated key format with -") -#endif - json_object_string_add(json, "local-iface-ipv6", - aux_buf); json_object_string_add(json, "localIfaceIpv6", aux_buf); } else sbuf_push(buf, indent, @@ -705,11 +676,6 @@ static void format_item_ext_subtlvs(struct isis_ext_subtlvs *exts, if (json) { inet_ntop(AF_INET6, &exts->neigh_addr6, aux_buf, sizeof(aux_buf)); -#if CONFDATE > 20240916 - CPP_NOTICE("remove deprecated key format with -") -#endif - json_object_string_add(json, "remote-iface-ipv6", - aux_buf); json_object_string_add(json, "remoteIfaceIpv6", aux_buf); } else sbuf_push(buf, indent, @@ -720,11 +686,6 @@ static void format_item_ext_subtlvs(struct isis_ext_subtlvs *exts, if (json) { snprintfrr(aux_buf, sizeof(aux_buf), "%g", exts->max_bw); -#if CONFDATE > 20240916 - CPP_NOTICE("remove deprecated key format with -") -#endif - json_object_string_add(json, "max-bandwith-bytes-sec", - aux_buf); json_object_string_add(json, "maxBandwithBytesSec", aux_buf); } else @@ -736,11 +697,6 @@ static void format_item_ext_subtlvs(struct isis_ext_subtlvs *exts, if (json) { snprintfrr(aux_buf, sizeof(aux_buf), "%g", exts->max_rsv_bw); -#if CONFDATE > 20240916 - CPP_NOTICE("remove deprecated key format with -") -#endif - json_object_string_add( - json, "max-res-bandwith-bytes-sec", aux_buf); json_object_string_add(json, "maxResBandwithBytesSec", aux_buf); } else @@ -763,22 +719,6 @@ static void format_item_ext_subtlvs(struct isis_ext_subtlvs *exts, json_object_string_add(unrsv_json, cnt_buf, aux_buf); } - -#if CONFDATE > 20240916 - CPP_NOTICE("remove deprecated key format with -") -#endif - /* old deprecated key format */ - unrsv_json = json_object_new_object(); - json_object_object_add(json, "unrsv-bandwith-bytes-sec", - unrsv_json); - for (int j = 0; j < MAX_CLASS_TYPE; j += 1) { - snprintfrr(cnt_buf, sizeof(cnt_buf), "%d", j); - snprintfrr(aux_buf, sizeof(aux_buf), "%g", - exts->unrsv_bw[j]); - json_object_string_add(unrsv_json, cnt_buf, - aux_buf); - } - /* end old deprecated key format */ } else { sbuf_push(buf, indent, "Unreserved Bandwidth:\n"); for (int j = 0; j < MAX_CLASS_TYPE; j += 2) { @@ -791,27 +731,18 @@ static void format_item_ext_subtlvs(struct isis_ext_subtlvs *exts, } } if (IS_SUBTLV(exts, EXT_TE_METRIC)) { - if (json) { -#if CONFDATE > 20240916 - CPP_NOTICE("remove deprecated key format with -") -#endif - json_object_int_add(json, "te-metric", exts->te_metric); + if (json) json_object_int_add(json, "teMetric", exts->te_metric); - } else + else sbuf_push(buf, indent, "Traffic Engineering Metric: %u\n", exts->te_metric); } if (IS_SUBTLV(exts, EXT_RMT_AS)) { - if (json) { -#if CONFDATE > 20240916 - CPP_NOTICE("remove deprecated key format with -") -#endif - json_object_int_add(json, "inter-as-te-remote-as", - exts->remote_as); + if (json) json_object_int_add(json, "interAsTeRemoteAs", exts->remote_as); - } else + else sbuf_push(buf, indent, "Inter-AS TE Remote AS number: %u\n", exts->remote_as); @@ -820,11 +751,6 @@ static void format_item_ext_subtlvs(struct isis_ext_subtlvs *exts, if (json) { inet_ntop(AF_INET6, &exts->remote_ip, aux_buf, sizeof(aux_buf)); -#if CONFDATE > 20240916 - CPP_NOTICE("remove deprecated key format with -") -#endif - json_object_string_add( - json, "inter-as-te-remote-asbr-ip", aux_buf); json_object_string_add(json, "interAsTeRemoteAsbrIp", aux_buf); } else @@ -836,16 +762,6 @@ static void format_item_ext_subtlvs(struct isis_ext_subtlvs *exts, if (IS_SUBTLV(exts, EXT_DELAY)) { if (json) { struct json_object *avg_json; - avg_json = json_object_new_object(); -#if CONFDATE > 20240916 - CPP_NOTICE("remove deprecated key format with -") -#endif - json_object_object_add(json, "avg-delay", avg_json); - json_object_string_add(avg_json, "delay", - IS_ANORMAL(exts->delay) - ? "Anomalous" - : "Normal"); - json_object_int_add(avg_json, "micro-sec", exts->delay); avg_json = json_object_new_object(); json_object_object_add(json, "avgDelay", avg_json); @@ -864,19 +780,6 @@ static void format_item_ext_subtlvs(struct isis_ext_subtlvs *exts, if (IS_SUBTLV(exts, EXT_MM_DELAY)) { if (json) { struct json_object *avg_json; - avg_json = json_object_new_object(); -#if CONFDATE > 20240916 - CPP_NOTICE("remove deprecated key format with -") -#endif - json_object_object_add(json, "max-min-delay", avg_json); - json_object_string_add(avg_json, "delay", - IS_ANORMAL(exts->min_delay) - ? "Anomalous" - : "Normal"); - snprintfrr(aux_buf, sizeof(aux_buf), "%u / %u", - exts->min_delay & TE_EXT_MASK, - exts->max_delay & TE_EXT_MASK); - json_object_string_add(avg_json, "micro-sec", aux_buf); avg_json = json_object_new_object(); json_object_object_add(json, "maxMinDelay", avg_json); @@ -899,15 +802,10 @@ static void format_item_ext_subtlvs(struct isis_ext_subtlvs *exts, exts->max_delay & TE_EXT_MASK); } if (IS_SUBTLV(exts, EXT_DELAY_VAR)) { - if (json) { -#if CONFDATE > 20240916 - CPP_NOTICE("remove deprecated key format with -") -#endif - json_object_int_add(json, "delay-variation-micro-sec", - exts->delay_var & TE_EXT_MASK); + if (json) json_object_int_add(json, "delayVariationMicroSec", exts->delay_var & TE_EXT_MASK); - } else + else sbuf_push(buf, indent, "Delay Variation: %u (micro-sec)\n", exts->delay_var & TE_EXT_MASK); @@ -919,20 +817,6 @@ static void format_item_ext_subtlvs(struct isis_ext_subtlvs *exts, LOSS_PRECISION)); struct json_object *link_json; -#if CONFDATE > 20240916 - CPP_NOTICE("remove deprecated key format with -") -#endif - link_json = json_object_new_object(); - json_object_object_add(json, "link-packet-loss", - link_json); - json_object_string_add(link_json, "loss", - IS_ANORMAL(exts->pkt_loss) - ? "Anomalous" - : "Normal"); - /* typo */ - json_object_string_add(link_json, "percentaje", - aux_buf); - link_json = json_object_new_object(); json_object_object_add(json, "linkPacketLoss", link_json); @@ -952,12 +836,6 @@ static void format_item_ext_subtlvs(struct isis_ext_subtlvs *exts, if (json) { snprintfrr(aux_buf, sizeof(aux_buf), "%g", (exts->res_bw)); -#if CONFDATE > 20240916 - CPP_NOTICE("remove deprecated key format with -") -#endif - json_object_string_add(json, - "unidir-residual-band-bytes-sec", - aux_buf); json_object_string_add(json, "unidirResidualBandBytesSec", aux_buf); @@ -971,12 +849,6 @@ static void format_item_ext_subtlvs(struct isis_ext_subtlvs *exts, if (json) { snprintfrr(aux_buf, sizeof(aux_buf), "%g", (exts->ava_bw)); -#if CONFDATE > 20240916 - CPP_NOTICE("remove deprecated key format with -") -#endif - json_object_string_add( - json, "unidir-available-band-bytes-sec", - aux_buf); json_object_string_add(json, "unidirAvailableBandBytesSec", aux_buf); @@ -991,12 +863,6 @@ static void format_item_ext_subtlvs(struct isis_ext_subtlvs *exts, snprintfrr(aux_buf, sizeof(aux_buf), "%g", (exts->use_bw)); json_object_string_add(json, - "unidir-utilized-band-bytes-sec", - aux_buf); -#if CONFDATE > 20240916 - CPP_NOTICE("remove deprecated key format with -") -#endif - json_object_string_add(json, "unidirUtilizedBandBytesSec", aux_buf); } else @@ -1012,50 +878,6 @@ static void format_item_ext_subtlvs(struct isis_ext_subtlvs *exts, if (json) { struct json_object *arr_adj_json, *adj_sid_json; -#if CONFDATE > 20240916 - CPP_NOTICE("remove deprecated key format with -") -#endif - /* old deprecated key format */ - arr_adj_json = json_object_new_array(); - json_object_object_add(json, "adj-sid", arr_adj_json); - for (adj = (struct isis_adj_sid *)exts->adj_sid.head; - adj; adj = adj->next) { - snprintfrr(cnt_buf, sizeof(cnt_buf), "%d", - adj->sid); - adj_sid_json = json_object_new_object(); - json_object_int_add(adj_sid_json, "sid", - adj->sid); - json_object_int_add(adj_sid_json, "weight", - adj->weight); - json_object_string_add(adj_sid_json, "flag-f", - adj->flags & EXT_SUBTLV_LINK_ADJ_SID_FFLG - ? "1" - : "0"); - json_object_string_add(adj_sid_json, "flag-b", - adj->flags & EXT_SUBTLV_LINK_ADJ_SID_BFLG - ? "1" - : "0"); - json_object_string_add(adj_sid_json, "flag-v", - adj->flags & EXT_SUBTLV_LINK_ADJ_SID_VFLG - ? "1" - : "0"); - json_object_string_add(adj_sid_json, "flag-l", - adj->flags & EXT_SUBTLV_LINK_ADJ_SID_LFLG - ? "1" - : "0"); - json_object_string_add(adj_sid_json, "flag-s", - adj->flags & EXT_SUBTLV_LINK_ADJ_SID_SFLG - ? "1" - : "0"); - json_object_string_add(adj_sid_json, "flag-p", - adj->flags & EXT_SUBTLV_LINK_ADJ_SID_PFLG - ? "1" - : "0"); - json_object_array_add(arr_adj_json, - adj_sid_json); - } - /* end old deprecated key format */ - arr_adj_json = json_object_new_array(); json_object_object_add(json, "adjSid", arr_adj_json); for (adj = (struct isis_adj_sid *)exts->adj_sid.head; @@ -1127,57 +949,6 @@ static void format_item_ext_subtlvs(struct isis_ext_subtlvs *exts, if (json) { struct json_object *arr_adj_json, *lan_adj_json; -#if CONFDATE > 20240916 - CPP_NOTICE("remove deprecated key format with -") -#endif - /* old deprecated key format */ - arr_adj_json = json_object_new_array(); - json_object_object_add(json, "lan-adj-sid", - arr_adj_json); - for (lan = (struct isis_lan_adj_sid *) - exts->adj_sid.head; - lan; lan = lan->next) { - if (((mtid == ISIS_MT_IPV4_UNICAST) && - (lan->family != AF_INET)) || - ((mtid == ISIS_MT_IPV6_UNICAST) && - (lan->family != AF_INET6))) - continue; - snprintfrr(cnt_buf, sizeof(cnt_buf), "%d", - lan->sid); - lan_adj_json = json_object_new_object(); - json_object_int_add(lan_adj_json, "sid", - lan->sid); - json_object_int_add(lan_adj_json, "weight", - lan->weight); - json_object_string_add(lan_adj_json, "flag-f", - lan->flags & EXT_SUBTLV_LINK_ADJ_SID_FFLG - ? "1" - : "0"); - json_object_string_add(lan_adj_json, "flag-b", - lan->flags & EXT_SUBTLV_LINK_ADJ_SID_BFLG - ? "1" - : "0"); - json_object_string_add(lan_adj_json, "flag-v", - lan->flags & EXT_SUBTLV_LINK_ADJ_SID_VFLG - ? "1" - : "0"); - json_object_string_add(lan_adj_json, "flag-l", - lan->flags & EXT_SUBTLV_LINK_ADJ_SID_LFLG - ? "1" - : "0"); - json_object_string_add(lan_adj_json, "flag-s", - lan->flags & EXT_SUBTLV_LINK_ADJ_SID_SFLG - ? "1" - : "0"); - json_object_string_add(lan_adj_json, "flag-p", - lan->flags & EXT_SUBTLV_LINK_ADJ_SID_PFLG - ? "1" - : "0"); - json_object_array_add(arr_adj_json, - lan_adj_json); - } - /* end old deprecated key format */ - arr_adj_json = json_object_new_array(); json_object_object_add(json, "lanAdjSid", arr_adj_json); for (lan = (struct isis_lan_adj_sid *)exts->adj_sid.head; @@ -1264,57 +1035,6 @@ static void format_item_ext_subtlvs(struct isis_ext_subtlvs *exts, if (json) { struct json_object *arr_adj_json, *srv6_endx_sid_json; -#if CONFDATE > 20240916 - CPP_NOTICE("remove deprecated key format with -") -#endif - /* old deprecated key format */ - arr_adj_json = json_object_new_array(); - json_object_object_add(json, "srv6-endx-sid", - arr_adj_json); - for (adj = (struct isis_srv6_endx_sid_subtlv *) - exts->srv6_endx_sid.head; - adj; adj = adj->next) { - snprintfrr(cnt_buf, sizeof(cnt_buf), "%pI6", - &adj->sid); - srv6_endx_sid_json = json_object_new_object(); - json_object_string_addf(srv6_endx_sid_json, - "sid", "%pI6", - &adj->sid); - json_object_string_add(srv6_endx_sid_json, - "algorithm", - sr_algorithm_string( - adj->algorithm)); - json_object_int_add(srv6_endx_sid_json, - "weight", adj->weight); - json_object_string_add(srv6_endx_sid_json, - "behavior", - seg6local_action2str( - adj->behavior)); - json_object_string_add(srv6_endx_sid_json, - "flag-b", - adj->flags & EXT_SUBTLV_LINK_SRV6_ENDX_SID_BFLG - ? "1" - : "0"); - json_object_string_add(srv6_endx_sid_json, - "flag-s", - adj->flags & EXT_SUBTLV_LINK_SRV6_ENDX_SID_SFLG - ? "1" - : "0"); - json_object_string_add(srv6_endx_sid_json, - "flag-p", - adj->flags & EXT_SUBTLV_LINK_SRV6_ENDX_SID_PFLG - ? "1" - : "0"); - json_object_array_add(arr_adj_json, - srv6_endx_sid_json); - if (adj->subsubtlvs) - isis_format_subsubtlvs(adj->subsubtlvs, - NULL, - srv6_endx_sid_json, - indent + 4); - } - /* end old deprecated key format */ - arr_adj_json = json_object_new_array(); json_object_object_add(json, "srv6EndXSID", arr_adj_json); @@ -1390,63 +1110,6 @@ static void format_item_ext_subtlvs(struct isis_ext_subtlvs *exts, struct json_object *arr_adj_json, *srv6_lan_endx_sid_json; -#if CONFDATE > 20240916 - CPP_NOTICE("remove deprecated key format with -") -#endif - /* old deprecated key format */ - arr_adj_json = json_object_new_array(); - json_object_object_add(json, "srv6-lan-endx-sid", - arr_adj_json); - for (lan = (struct isis_srv6_lan_endx_sid_subtlv *) - exts->srv6_lan_endx_sid.head; - lan; lan = lan->next) { - snprintfrr(cnt_buf, sizeof(cnt_buf), "%pI6", - &lan->sid); - srv6_lan_endx_sid_json = - json_object_new_object(); - json_object_string_addf(srv6_lan_endx_sid_json, - "sid", "%pI6", - &lan->sid); - json_object_int_add(srv6_lan_endx_sid_json, - "weight", lan->weight); - json_object_string_add(srv6_lan_endx_sid_json, - "algorithm", - sr_algorithm_string( - lan->algorithm)); - json_object_int_add(srv6_lan_endx_sid_json, - "weight", lan->weight); - json_object_string_add(srv6_lan_endx_sid_json, - "behavior", - seg6local_action2str( - lan->behavior)); - json_object_string_add(srv6_lan_endx_sid_json, - "flag-b", - lan->flags & EXT_SUBTLV_LINK_SRV6_ENDX_SID_BFLG - ? "1" - : "0"); - json_object_string_add(srv6_lan_endx_sid_json, - "flag-s", - lan->flags & EXT_SUBTLV_LINK_SRV6_ENDX_SID_SFLG - ? "1" - : "0"); - json_object_string_add(srv6_lan_endx_sid_json, - "flag-p", - lan->flags & EXT_SUBTLV_LINK_SRV6_ENDX_SID_PFLG - ? "1" - : "0"); - json_object_string_addf(srv6_lan_endx_sid_json, - "neighbor-id", "%pSY", - lan->neighbor_id); - json_object_array_add(arr_adj_json, - srv6_lan_endx_sid_json); - if (lan->subsubtlvs) - isis_format_subsubtlvs(lan->subsubtlvs, - NULL, - srv6_lan_endx_sid_json, - indent + 4); - } - /* end old deprecated key format */ - arr_adj_json = json_object_new_array(); json_object_object_add(json, "srv6LanEndxSID", arr_adj_json); @@ -1914,8 +1577,8 @@ static int unpack_item_ext_subtlv_asla(uint16_t mtid, uint8_t subtlv_len, uint8_t sabm_flag_len; /* User-defined App Identifier Bit Flags/Length */ uint8_t uabm_flag_len; - uint8_t sabm[ASLA_APP_IDENTIFIER_BIT_LENGTH] = {0}; - uint8_t uabm[ASLA_APP_IDENTIFIER_BIT_LENGTH] = {0}; + uint8_t sabm[ASLA_APP_IDENTIFIER_BIT_MAX_LENGTH] = { 0 }; + uint8_t uabm[ASLA_APP_IDENTIFIER_BIT_MAX_LENGTH] = { 0 }; uint8_t readable = subtlv_len; uint8_t subsubtlv_type; uint8_t subsubtlv_len; @@ -1946,6 +1609,15 @@ static int unpack_item_ext_subtlv_asla(uint16_t mtid, uint8_t subtlv_len, return -1; } + if ((asla->standard_apps_length > ASLA_APP_IDENTIFIER_BIT_MAX_LENGTH) || + (asla->user_def_apps_length > ASLA_APP_IDENTIFIER_BIT_MAX_LENGTH)) { + zlog_err("Standard or User-Defined Application Identifier Bit Mask Length greater than %u bytes. Received respectively a length of %u and %u bytes.", + ASLA_APP_IDENTIFIER_BIT_MAX_LENGTH, + asla->standard_apps_length, asla->user_def_apps_length); + stream_forward_getp(s, readable); + return -1; + } + for (int i = 0; i < asla->standard_apps_length; i++) sabm[i] = stream_getc(s); for (int i = 0; i < asla->user_def_apps_length; i++) @@ -2604,33 +2276,6 @@ static void format_item_prefix_sid(uint16_t mtid, struct isis_item *i, } json_object_int_add(sr_json, "alg", sid->algorithm); -#if CONFDATE > 20240916 - CPP_NOTICE("remove deprecated non boolean json") -#endif - /* old deprecated keys (no booleans) */ - json_object_string_add( - sr_json, "readvertised", - ((sid->flags & ISIS_PREFIX_SID_READVERTISED) ? "yes" - : "")); - json_object_string_add( - sr_json, "node", - ((sid->flags & ISIS_PREFIX_SID_NODE) ? "yes" : "")); - json_object_string_add(sr_json, "php", - ((sid->flags & ISIS_PREFIX_SID_NO_PHP) - ? "no-php" - : "php")); - json_object_string_add( - sr_json, "explicit-null", - ((sid->flags & ISIS_PREFIX_SID_EXPLICIT_NULL) ? "yes" - : "")); - json_object_string_add( - sr_json, "value", - ((sid->flags & ISIS_PREFIX_SID_VALUE) ? "yes" : "")); - json_object_string_add( - sr_json, "local", - ((sid->flags & ISIS_PREFIX_SID_LOCAL) ? "yes" : "")); - /* end deprecated keys (no booleans) */ - struct json_object *flags_json; flags_json = json_object_new_object(); @@ -2779,10 +2424,6 @@ static void format_subtlv_ipv6_source_prefix(struct prefix_ipv6 *p, char prefixbuf[PREFIX2STR_BUFFER]; if (json) { prefix2str(p, prefixbuf, sizeof(prefixbuf)); -#if CONFDATE > 20240916 - CPP_NOTICE("remove deprecated key format with -") -#endif - json_object_string_add(json, "ipv6-src-prefix", prefixbuf); json_object_string_add(json, "ipv6SrcPrefix", prefixbuf); } else { sbuf_push(buf, indent, "IPv6 Source Prefix: %s\n", @@ -2886,23 +2527,6 @@ static void format_subsubtlv_srv6_sid_structure( if (json) { struct json_object *sid_struct_json; -#if CONFDATE > 20240916 - CPP_NOTICE("remove deprecated key format with -") -#endif - /* old deprecated key format */ - sid_struct_json = json_object_new_object(); - json_object_object_add(json, "srv6-sid-structure", - sid_struct_json); - json_object_int_add(sid_struct_json, "loc-block-len", - sid_struct->loc_block_len); - json_object_int_add(sid_struct_json, "loc-node-len", - sid_struct->loc_node_len); - json_object_int_add(sid_struct_json, "func-len", - sid_struct->func_len); - json_object_int_add(sid_struct_json, "arg-len", - sid_struct->arg_len); - /* end old deprecated key format */ - sid_struct_json = json_object_new_object(); json_object_object_add(json, "srv6SidStructure", sid_struct_json); @@ -3196,26 +2820,6 @@ static void format_item_srv6_end_sid(uint16_t mtid, struct isis_item *i, if (json) { struct json_object *sid_json; -#if CONFDATE > 20240916 - CPP_NOTICE("remove deprecated key format with -") -#endif - /* old deprecated key format */ - sid_json = json_object_new_object(); - json_object_object_add(json, "srv6-end-sid", sid_json); - json_object_string_add(sid_json, "endpoint-behavior", - seg6local_action2str(sid->behavior)); - json_object_string_addf(sid_json, "sid-value", "%pI6", - &sid->sid); - if (sid->subsubtlvs) { - struct json_object *subtlvs_json; - subtlvs_json = json_object_new_object(); - json_object_object_add(sid_json, "subsubtlvs", - subtlvs_json); - isis_format_subsubtlvs(sid->subsubtlvs, NULL, - subtlvs_json, 0); - } - /* end old deprecated key format */ - sid_json = json_object_new_object(); json_object_object_add(json, "srv6EndSid", sid_json); json_object_string_add(sid_json, "endpointBehavior", @@ -3376,13 +2980,9 @@ static void format_item_area_address(uint16_t mtid, struct isis_item *i, memcpy(iso_addr.area_addr, addr->addr, ISO_ADDR_SIZE); iso_addr.addr_len = addr->len; - if (json) { -#if CONFDATE > 20240916 - CPP_NOTICE("remove deprecated key format with -") -#endif - json_object_string_addf(json, "area-addr", "%pIS", &iso_addr); + if (json) json_object_string_addf(json, "areaAddr", "%pIS", &iso_addr); - } else + else sbuf_push(buf, indent, "Area Address: %pIS\n", &iso_addr); } @@ -3470,22 +3070,6 @@ static void format_item_oldstyle_reach(uint16_t mtid, struct isis_item *i, if (json) { struct json_object *old_json, *array_json; -#if CONFDATE > 20240916 - CPP_NOTICE("remove deprecated key format with -") -#endif - /* old deprecated key format */ - old_json = json_object_new_object(); - json_object_object_get_ex(json, "old-reach-style", &array_json); - if (!array_json) { - array_json = json_object_new_array(); - json_object_object_add(json, "old-reach-style", - array_json); - } - json_object_array_add(array_json, old_json); - json_object_string_add(old_json, "is-reach", sys_id); - json_object_int_add(old_json, "metric", r->metric); - /* end old deprecated key format */ - old_json = json_object_new_object(); json_object_object_get_ex(json, "oldReachStyle", &array_json); if (!array_json) { @@ -3573,13 +3157,9 @@ static void format_item_lan_neighbor(uint16_t mtid, struct isis_item *i, char sys_id[ISO_SYSID_STRLEN]; snprintfrr(sys_id, ISO_SYSID_STRLEN, "%pSY", n->mac); - if (json) { -#if CONFDATE > 20240916 - CPP_NOTICE("remove deprecated key format with -") -#endif - json_object_string_add(json, "lan-neighbor", sys_id); + if (json) json_object_string_add(json, "lanNeighbor", sys_id); - } else + else sbuf_push(buf, indent, "LAN Neighbor: %s\n", sys_id); } @@ -3651,17 +3231,6 @@ static void format_item_lsp_entry(uint16_t mtid, struct isis_item *i, if (json) { char buf[255]; struct json_object *lsp_json; - lsp_json = json_object_new_object(); -#if CONFDATE > 20240916 - CPP_NOTICE("remove deprecated key format with -") -#endif - json_object_object_add(json, "lsp-entry", lsp_json); - json_object_string_add(lsp_json, "id", sys_id); - snprintfrr(buf,sizeof(buf),"0x%08x",e->seqno); - json_object_string_add(lsp_json, "seq", buf); - snprintfrr(buf,sizeof(buf),"0x%04hx",e->checksum); - json_object_string_add(lsp_json, "chksum", buf); - json_object_int_add(lsp_json, "lifetime", e->checksum); lsp_json = json_object_new_object(); json_object_object_add(json, "lspEntry", lsp_json); @@ -3753,31 +3322,6 @@ static void format_item_extended_reach(uint16_t mtid, struct isis_item *i, if (json) { struct json_object *reach_json, *array_json; -#if CONFDATE > 20240916 - CPP_NOTICE("remove deprecated key format with -") -#endif - /* old deprecated key format */ - reach_json = json_object_new_object(); - json_object_object_get_ex(json, "ext-reach", &array_json); - if (!array_json) { - array_json = json_object_new_array(); - json_object_object_add(json, "ext-reach", array_json); - } - json_object_array_add(array_json, reach_json); - json_object_string_add( - reach_json, "mt-id", - (mtid == ISIS_MT_IPV4_UNICAST) ? "Extended" : "MT"); - json_object_string_add(reach_json, "id", sys_id); - json_object_int_add(reach_json, "metric", r->metric); - if (mtid != ISIS_MT_IPV4_UNICAST) - json_object_string_add(reach_json, "mt-name", - isis_mtid2str(mtid)); - - if (r->subtlvs) - format_item_ext_subtlvs(r->subtlvs, NULL, reach_json, - indent + 2, mtid); - /* end old deprecated key format */ - reach_json = json_object_new_object(); json_object_object_get_ex(json, "extReach", &array_json); if (!array_json) { @@ -3926,24 +3470,6 @@ static void format_item_oldstyle_ip_reach(uint16_t mtid, struct isis_item *i, if (json) { struct json_object *old_json, *array_json; -#if CONFDATE > 20240916 - CPP_NOTICE("remove deprecated key format with -") -#endif - /* old deprecated key format */ - old_json = json_object_new_object(); - json_object_object_get_ex(json, "old-ip-reach-style", - &array_json); - if (!array_json) { - array_json = json_object_new_array(); - json_object_object_add(json, "old-ip-reach-style", - old_json); - } - json_object_array_add(array_json, old_json); - json_object_string_add(old_json, "prefix", - prefix2str(&r->prefix, prefixbuf, sizeof(prefixbuf))); - json_object_int_add(old_json, "metric", r->metric); - /* end old deprecated key format */ - old_json = json_object_new_object(); json_object_object_get_ex(json, "oldIpReachStyle", &array_json); if (!array_json) { @@ -4049,19 +3575,6 @@ static void format_tlv_protocols_supported(struct isis_protocols_supported *p, struct json_object *protocol_json; char buf[255]; -#if CONFDATE > 20240916 - CPP_NOTICE("remove deprecated key format with -") -#endif - /* old deprecated key format */ - protocol_json = json_object_new_object(); - json_object_object_add(json, "protocols-supported", - protocol_json); - for (uint8_t i = 0; i < p->count; i++) { - snprintfrr(buf, sizeof(buf), "%d", i); - json_object_string_add(protocol_json, buf, - nlpid2str(p->protocols[i])); - } - protocol_json = json_object_new_object(); json_object_object_add(json, "supportedProtocols", protocol_json); @@ -4070,7 +3583,6 @@ static void format_tlv_protocols_supported(struct isis_protocols_supported *p, json_object_string_add(protocol_json, buf, nlpid2str(p->protocols[i])); } - /* end old deprecated key format */ } else { sbuf_push(buf, indent, "Protocols Supported: "); for (uint8_t i = 0; i < p->count; i++) { @@ -4286,13 +3798,9 @@ static void format_item_global_ipv6_address(uint16_t mtid, struct isis_item *i, char addrbuf[INET6_ADDRSTRLEN]; inet_ntop(AF_INET6, &a->addr, addrbuf, sizeof(addrbuf)); - if (json) { -#if CONFDATE > 20240916 - CPP_NOTICE("remove deprecated key format with -") -#endif - json_object_string_add(json, "global-ipv6", addrbuf); + if (json) json_object_string_add(json, "globalIpv6", addrbuf); - } else + else sbuf_push(buf, indent, "Global IPv6 Interface Address: %s\n", addrbuf); } @@ -4374,12 +3882,6 @@ static void format_item_mt_router_info(uint16_t mtid, struct isis_item *i, json_object_string_add(mt_json, "mtDescription", isis_mtid2str(mtid)); -#if CONFDATE > 20240916 - CPP_NOTICE("remove deprecated non boolean format") -#endif - json_object_string_add(mt_json, "overload", info->overload?"true":"false"); - json_object_string_add(mt_json, "attached", info->attached?"true":"false"); - json_object_boolean_add(mt_json, "overloadBit", !!info->overload); json_object_boolean_add(mt_json, "attachedbit", @@ -4466,13 +3968,9 @@ static void format_tlv_te_router_id(const struct in_addr *id, struct sbuf *buf, char addrbuf[INET_ADDRSTRLEN]; inet_ntop(AF_INET, id, addrbuf, sizeof(addrbuf)); - if (json) { -#if CONFDATE > 20240916 - CPP_NOTICE("remove deprecated key format with -") -#endif - json_object_string_add(json, "te-router-id", addrbuf); + if (json) json_object_string_add(json, "teRouterId", addrbuf); - } else + else sbuf_push(buf, indent, "TE Router ID: %s\n", addrbuf); } @@ -4547,37 +4045,6 @@ static void format_item_extended_ip_reach(uint16_t mtid, struct isis_item *i, char prefixbuf[PREFIX2STR_BUFFER]; if (json) { -#if CONFDATE > 20240916 - CPP_NOTICE("remove deprecated key format with -") -#endif - /* old deprecated key format */ - ext_json = json_object_new_object(); - json_object_object_get_ex(json, "ext-ip-reach", &array_json); - if (!array_json) { - array_json = json_object_new_array(); - json_object_object_add(json, "ext-ip-reach", array_json); - } - json_object_array_add(array_json, ext_json); - json_object_string_add(ext_json, "mt-id", - (mtid == ISIS_MT_IPV4_UNICAST) - ? "Extended" - : "MT"); - json_object_string_add(ext_json, "ip-reach", - prefix2str(&r->prefix, prefixbuf, - sizeof(prefixbuf))); - json_object_int_add(ext_json, "ip-reach-metric", r->metric); - json_object_string_add(ext_json, "down", r->down ? "yes" : ""); - if (mtid != ISIS_MT_IPV4_UNICAST) - json_object_string_add(ext_json, "mt-name", - isis_mtid2str(mtid)); - if (r->subtlvs) { - struct json_object *subtlv_json; - subtlv_json = json_object_new_object(); - json_object_object_add(ext_json, "subtlvs", subtlv_json); - format_subtlvs(r->subtlvs, NULL, subtlv_json, 0); - } - /* end old deprecated key format */ - ext_json = json_object_new_object(); json_object_object_get_ex(json, "extIpReach", &array_json); if (!array_json) { @@ -4860,13 +4327,9 @@ static void format_tlv_te_router_id_ipv6(const struct in6_addr *id, char addrbuf[INET6_ADDRSTRLEN]; inet_ntop(AF_INET6, id, addrbuf, sizeof(addrbuf)); - if (json) { -#if CONFDATE > 20240916 - CPP_NOTICE("remove deprecated key format with -") -#endif - json_object_string_add(json, "ipv6-te-router-id", addrbuf); + if (json) json_object_string_add(json, "ipv6TeRouterId", addrbuf); - } else + else sbuf_push(buf, indent, "IPv6 TE Router ID: %s\n", addrbuf); } @@ -4944,30 +4407,6 @@ static void format_tlv_spine_leaf(const struct isis_spine_leaf *spine_leaf, if (json) { struct json_object *spine_json; -#if CONFDATE > 20240916 - CPP_NOTICE("remove deprecated key format with -") -#endif - /* old deprecated format */ - spine_json = json_object_new_object(); - json_object_object_add(json, "spine-leaf-extension", - spine_json); - if (spine_leaf->has_tier) { - snprintfrr(aux_buf, sizeof(aux_buf), "%hhu", - spine_leaf->tier); - json_object_string_add( - spine_json, "tier", - (spine_leaf->tier == ISIS_TIER_UNDEFINED) - ? "undefined" - : aux_buf); - } - json_object_string_add(spine_json, "flag-leaf", - spine_leaf->is_leaf ? "yes" : ""); - json_object_string_add(spine_json, "flag-spine", - spine_leaf->is_spine ? "yes" : ""); - json_object_string_add(spine_json, "flag-backup", - spine_leaf->is_backup ? "yes" : ""); - /* end old deprecated format */ - spine_json = json_object_new_object(); json_object_object_add(json, "spineLeafExtension", spine_json); if (spine_leaf->has_tier) { @@ -5127,26 +4566,6 @@ format_tlv_threeway_adj(const struct isis_threeway_adj *threeway_adj, if (json) { struct json_object *three_json; -#if CONFDATE > 20240916 - CPP_NOTICE("remove deprecated key format with -") -#endif - /* old deprecated key format */ - three_json = json_object_new_object(); - json_object_object_add(json, "p2p-three-way-adj", three_json); - json_object_string_add( - three_json, "state-name", - isis_threeway_state_name(threeway_adj->state)); - json_object_int_add(three_json, "state", threeway_adj->state); - json_object_int_add(three_json, "ext-local-circuit-id", - threeway_adj->local_circuit_id); - if (threeway_adj->neighbor_set) { - json_object_string_add(three_json, "neigh-system-id", - sys_id); - json_object_int_add(three_json, "neigh-ext-circuit-id", - threeway_adj->neighbor_circuit_id); - } - /* end old deprecated key format */ - three_json = json_object_new_object(); json_object_object_add(json, "p2pThreeWayAdj", three_json); json_object_string_add(three_json, "stateName", @@ -5297,40 +4716,6 @@ static void format_item_ipv6_reach(uint16_t mtid, struct isis_item *i, subtlvs_json); format_subtlvs(r->subtlvs, NULL, subtlvs_json, 0); } - -#if CONFDATE > 20240916 - CPP_NOTICE("remove deprecated key format with -") -#endif - /* old deprecated JSON key format */ - reach_json = json_object_new_object(); - json_object_object_get_ex(json, "ipv6-reach", &array_json); - if (!array_json) { - array_json = json_object_new_array(); - json_object_object_add(json, "ipv6-reach", array_json); - } - json_object_array_add(array_json, reach_json); - json_object_string_add(reach_json, "mt-id", - (mtid == ISIS_MT_IPV4_UNICAST) ? "" - : "mt"); - json_object_string_add( - reach_json, "prefix", - prefix2str(&r->prefix, prefixbuf, sizeof(prefixbuf))); - json_object_int_add(reach_json, "metric", r->metric); - json_object_string_add(reach_json, "down", - r->down ? "yes" : ""); - json_object_string_add(reach_json, "external", - r->external ? "yes" : ""); - if (mtid != ISIS_MT_IPV4_UNICAST) - json_object_string_add(reach_json, "mt-name", - isis_mtid2str(mtid)); - if (r->subtlvs) { - struct json_object *subtlvs_json; - subtlvs_json = json_object_new_object(); - json_object_object_add(reach_json, "subtlvs", - subtlvs_json); - format_subtlvs(r->subtlvs, NULL, subtlvs_json, 0); - } - /* end deprecated key format */ } else { sbuf_push(buf, indent, "%sIPv6 Reachability: %s (Metric: %u)%s%s", @@ -5543,22 +4928,6 @@ static void format_tlv_router_cap_json(const struct isis_router_cap *router_cap, /* Router ID and Flags */ struct json_object *cap_json; -#if CONFDATE > 20240916 - CPP_NOTICE("remove deprecated key format with -") -#endif - /* deprecated JSON key format */ - cap_json = json_object_new_object(); - json_object_object_add(json, "router-capability", cap_json); - inet_ntop(AF_INET, &router_cap->router_id, addrbuf, sizeof(addrbuf)); - json_object_string_add(cap_json, "id", addrbuf); - json_object_string_add( - cap_json, "flag-d", - router_cap->flags & ISIS_ROUTER_CAP_FLAG_D ? "1" : "0"); - json_object_string_add( - cap_json, "flag-s", - router_cap->flags & ISIS_ROUTER_CAP_FLAG_S ? "1" : "0"); - /* end deprecated JSON key format */ - cap_json = json_object_new_object(); json_object_object_add(json, "routerCapability", cap_json); inet_ntop(AF_INET, &router_cap->router_id, addrbuf, sizeof(addrbuf)); @@ -5573,23 +4942,6 @@ static void format_tlv_router_cap_json(const struct isis_router_cap *router_cap, if (router_cap->srgb.range_size != 0) { struct json_object *gb_json; -#if CONFDATE > 20240916 - CPP_NOTICE("remove deprecated key format with -") -#endif - /* deprecated old key format */ - gb_json = json_object_new_object(); - json_object_object_add(json, "segment-routing-gb", gb_json); - json_object_string_add(gb_json, "ipv4", - IS_SR_IPV4(&router_cap->srgb) ? "1" - : "0"); - json_object_string_add(gb_json, "ipv6", - IS_SR_IPV6(&router_cap->srgb) ? "1" - : "0"); - json_object_int_add(gb_json, "global-block-base", - router_cap->srgb.lower_bound); - json_object_int_add(gb_json, "global-block-range", - router_cap->srgb.range_size); - gb_json = json_object_new_object(); json_object_object_add(json, "segmentRoutingGb", gb_json); json_object_boolean_add(gb_json, "ipv4", @@ -5606,18 +4958,6 @@ static void format_tlv_router_cap_json(const struct isis_router_cap *router_cap, if (router_cap->srlb.range_size != 0) { struct json_object *lb_json; -#if CONFDATE > 20240916 - CPP_NOTICE("remove deprecated key format with -") -#endif - /* old deprecated key format */ - lb_json = json_object_new_object(); - json_object_object_add(json, "segment-routing-lb", lb_json); - json_object_int_add(lb_json, "global-block-base", - router_cap->srlb.lower_bound); - json_object_int_add(lb_json, "global-block-range", - router_cap->srlb.range_size); - /* end old deprecated key format */ - lb_json = json_object_new_object(); json_object_object_add(json, "segmentRoutingLb", lb_json); json_object_int_add(lb_json, "globalBlockBase", @@ -5631,23 +4971,6 @@ static void format_tlv_router_cap_json(const struct isis_router_cap *router_cap, char buf[255]; struct json_object *alg_json; -#if CONFDATE > 20240916 - CPP_NOTICE("remove deprecated key format with -") -#endif - /* old deprecated key format */ - alg_json = json_object_new_object(); - json_object_object_add(json, "segment-routing-algorithm", - alg_json); - for (int i = 0; i < SR_ALGORITHM_COUNT; i++) - if (router_cap->algo[i] != SR_ALGORITHM_UNSET) { - snprintfrr(buf, sizeof(buf), "%d", i); - json_object_string_add(alg_json, buf, - router_cap->algo[i] == 0 - ? "SPF" - : "Strict SPF"); - } - /* end old deprecated key format */ - alg_json = json_object_new_object(); json_object_object_add(json, "segmentRoutingAlgorithm", alg_json); @@ -6138,16 +5461,17 @@ static int unpack_tlv_router_cap(enum isis_tlv_context context, return 0; } - if (tlvs->router_cap) - /* Multiple Router Capability found */ - rcap = tlvs->router_cap; - else { - /* Allocate router cap structure and initialize SR Algorithms */ - rcap = XCALLOC(MTYPE_ISIS_TLV, sizeof(struct isis_router_cap)); + if (!tlvs->router_cap) { + /* First Router Capability TLV. + * Allocate router cap structure and initialize SR Algorithms */ + tlvs->router_cap = XCALLOC(MTYPE_ISIS_TLV, + sizeof(struct isis_router_cap)); for (int i = 0; i < SR_ALGORITHM_COUNT; i++) - rcap->algo[i] = SR_ALGORITHM_UNSET; + tlvs->router_cap->algo[i] = SR_ALGORITHM_UNSET; } + rcap = tlvs->router_cap; + /* Get Router ID and Flags */ rcap->router_id.s_addr = stream_get_ipv4(s); rcap->flags = stream_getc(s); @@ -6169,7 +5493,6 @@ static int unpack_tlv_router_cap(enum isis_tlv_context context, log, indent, "WARNING: Router Capability subTLV length too large compared to expected size\n"); stream_forward_getp(s, STREAM_READABLE(s)); - XFREE(MTYPE_ISIS_TLV, rcap); return 0; } @@ -6480,7 +5803,6 @@ static int unpack_tlv_router_cap(enum isis_tlv_context context, } subtlv_len = subtlv_len - length - 2; } - tlvs->router_cap = rcap; return 0; } @@ -6503,24 +5825,16 @@ static void format_item_auth(uint16_t mtid, struct isis_item *i, struct isis_auth *auth = (struct isis_auth *)i; char obuf[768]; - if (json) { -#if CONFDATE > 20240916 - CPP_NOTICE("remove deprecated key format with -") -#endif - json_object_string_add(json, "test-auth", "ok"); + if (json) json_object_string_add(json, "testAuth", "ok"); - } else + else sbuf_push(buf, indent, "Authentication:\n"); switch (auth->type) { case ISIS_PASSWD_TYPE_CLEARTXT: zlog_sanitize(obuf, sizeof(obuf), auth->value, auth->length); - if (json) { -#if CONFDATE > 20240916 - CPP_NOTICE("remove deprecated key format with -") -#endif - json_object_string_add(json, "auth-pass", obuf); + if (json) json_object_string_add(json, "authPass", obuf); - } else + else sbuf_push(buf, indent, " Password: %s\n", obuf); break; case ISIS_PASSWD_TYPE_HMAC_MD5: @@ -6528,23 +5842,15 @@ static void format_item_auth(uint16_t mtid, struct isis_item *i, snprintf(obuf + 2 * j, sizeof(obuf) - 2 * j, "%02hhx", auth->value[j]); } - if (json) { -#if CONFDATE > 20240916 - CPP_NOTICE("remove deprecated key format with -") -#endif - json_object_string_add(json, "auth-hmac-md5", obuf); + if (json) json_object_string_add(json, "authHmacMd5", obuf); - } else + else sbuf_push(buf, indent, " HMAC-MD5: %s\n", obuf); break; default: - if (json) { -#if CONFDATE > 20240916 - CPP_NOTICE("remove deprecated key format with -") -#endif - json_object_int_add(json, "auth-unknown", auth->type); + if (json) json_object_int_add(json, "authUnknown", auth->type); - } else + else sbuf_push(buf, indent, " Unknown (%hhu)\n", auth->type); break; @@ -6660,18 +5966,6 @@ static void format_tlv_purge_originator(struct isis_purge_originator *poi, if (json) { struct json_object *purge_json; -#if CONFDATE > 20240916 - CPP_NOTICE("remove deprecated key format with -") -#endif - /* old deprecated key format */ - purge_json = json_object_new_object(); - json_object_object_add(json, "purge_originator", purge_json); - - json_object_string_add(purge_json, "id", gen_id); - if (poi->sender_set) - json_object_string_add(purge_json, "rec-from", sen_id); - /* end old deprecated key format */ - purge_json = json_object_new_object(); json_object_object_add(json, "purgeOriginator", purge_json); @@ -7221,33 +6515,6 @@ static void format_item_srv6_locator(uint16_t mtid, struct isis_item *i, if (json) { struct json_object *loc_json; -#if CONFDATE > 20240916 - CPP_NOTICE("remove deprecated key format with -") -#endif - /* old json key format */ - loc_json = json_object_new_object(); - json_object_object_add(json, "srv6-locator", loc_json); - json_object_int_add(loc_json, "mt-id", mtid); - json_object_string_addf(loc_json, "prefix", "%pFX", - &loc->prefix); - json_object_int_add(loc_json, "metric", loc->metric); - json_object_string_add( - loc_json, "d-flag", - CHECK_FLAG(loc->flags, ISIS_TLV_SRV6_LOCATOR_FLAG_D) - ? "yes" - : ""); - json_object_int_add(loc_json, "algorithm", loc->algorithm); - json_object_string_add(loc_json, "mt-name", - isis_mtid2str(mtid)); - if (loc->subtlvs) { - struct json_object *subtlvs_json; - subtlvs_json = json_object_new_object(); - json_object_object_add(loc_json, "subtlvs", - subtlvs_json); - format_subtlvs(loc->subtlvs, NULL, subtlvs_json, 0); - } - /* old deprecated key format */ - loc_json = json_object_new_object(); json_object_object_add(json, "srv6Locator", loc_json); json_object_int_add(loc_json, "mtId", mtid); @@ -7549,13 +6816,9 @@ static void format_tlvs(struct isis_tlvs *tlvs, struct sbuf *buf, struct json_ob &tlvs->area_addresses, buf, json, indent); if (tlvs->mt_router_info_empty) { - if (json) { -#if CONFDATE > 20240916 - CPP_NOTICE("remove deprecated key format with -") -#endif - json_object_string_add(json, "mt-router-info", "none"); + if (json) json_object_object_add(json, "mtRouterInfo", NULL); - } else + else sbuf_push(buf, indent, "MT Router Info: None\n"); } else { format_items(ISIS_CONTEXT_LSP, ISIS_TLV_MT_ROUTER_INFO, diff --git a/isisd/isis_tlvs.h b/isisd/isis_tlvs.h index 6ecd4c5f6a..c64bbf7f69 100644 --- a/isisd/isis_tlvs.h +++ b/isisd/isis_tlvs.h @@ -717,6 +717,7 @@ struct isis_ext_subtlvs { #define ISIS_SABM_FLAG_X 0x10 /* Flex-Algorithm - RFC9350 */ #define ASLA_APP_IDENTIFIER_BIT_LENGTH 1 +#define ASLA_APP_IDENTIFIER_BIT_MAX_LENGTH 8 #define ASLA_LEGACY_FLAG 0x80 #define ASLA_APPS_LENGTH_MASK 0x7f diff --git a/lib/hash.c b/lib/hash.c index df56243985..edbfeec464 100644 --- a/lib/hash.c +++ b/lib/hash.c @@ -444,7 +444,7 @@ DEFUN_NOSH(show_hash_stats, ttable_colseps(tt, 0, RIGHT, true, '|'); char *table = ttable_dump(tt, "\n"); vty_out(vty, "%s\n", table); - XFREE(MTYPE_TMP, table); + XFREE(MTYPE_TMP_TTABLE, table); } else vty_out(vty, "No named hash tables to display.\n"); diff --git a/lib/libfrr.c b/lib/libfrr.c index 0a575abac6..a1982841d3 100644 --- a/lib/libfrr.c +++ b/lib/libfrr.c @@ -1125,9 +1125,12 @@ static void frr_terminal_close(int isexit) * don't redirect when stdout is set with --log stdout */ for (fd = 2; fd >= 0; fd--) - if (isatty(fd) && - (fd != STDOUT_FILENO || !logging_to_stdout)) + if (logging_to_stdout && isatty(fd) && + fd == STDOUT_FILENO) { + /* Do nothing. */ + } else { dup2(nullfd, fd); + } close(nullfd); } } @@ -1213,9 +1216,12 @@ void frr_run(struct event_loop *master) * stdout */ for (fd = 2; fd >= 0; fd--) - if (isatty(fd) && - (fd != STDOUT_FILENO || !logging_to_stdout)) + if (logging_to_stdout && isatty(fd) && + fd == STDOUT_FILENO) { + /* Do nothing. */ + } else { dup2(nullfd, fd); + } close(nullfd); } diff --git a/lib/memory.c b/lib/memory.c index 8fbe5c4093..ac39516edd 100644 --- a/lib/memory.c +++ b/lib/memory.c @@ -25,6 +25,7 @@ struct memgroup **mg_insert = &mg_first; DEFINE_MGROUP(LIB, "libfrr"); DEFINE_MTYPE(LIB, TMP, "Temporary memory"); +DEFINE_MTYPE(LIB, TMP_TTABLE, "Temporary memory for TTABLE"); DEFINE_MTYPE(LIB, BITFIELD, "Bitfield memory"); static inline void mt_count_alloc(struct memtype *mt, size_t size, void *ptr) diff --git a/lib/memory.h b/lib/memory.h index 65b99a5fc9..8e8c61da04 100644 --- a/lib/memory.h +++ b/lib/memory.h @@ -138,6 +138,7 @@ struct memgroup { DECLARE_MGROUP(LIB); DECLARE_MTYPE(TMP); +DECLARE_MTYPE(TMP_TTABLE); extern void *qmalloc(struct memtype *mt, size_t size) diff --git a/lib/mgmt_msg_native.h b/lib/mgmt_msg_native.h index 76a52658cd..ef03b66edc 100644 --- a/lib/mgmt_msg_native.h +++ b/lib/mgmt_msg_native.h @@ -383,11 +383,18 @@ _Static_assert(sizeof(struct mgmt_msg_edit) == /** * struct mgmt_msg_edit_reply - frontend edit reply. * - * @data: the xpath of the data node that was created. + * @changed: If true then changes in datastore resulted. + * @created: If true then object was newly created (non-existing before) + * @data: @vsplit values, second value may be zero len. + * @data: [0] the xpath of the data node that was created. + * @data: [1] Possible structured data to pass back to client (e.g., non-"error" + * yang modeled error data). */ struct mgmt_msg_edit_reply { struct mgmt_msg_header; - uint8_t resv2[8]; + uint8_t changed; + uint8_t created; + uint8_t resv2[6]; alignas(8) char data[]; }; diff --git a/lib/nexthop.c b/lib/nexthop.c index ac22e7ec84..98b05295b9 100644 --- a/lib/nexthop.c +++ b/lib/nexthop.c @@ -713,6 +713,15 @@ struct nexthop *nexthop_next(const struct nexthop *nexthop) return NULL; } +struct nexthop *nexthop_next_resolution(const struct nexthop *nexthop, + bool nexthop_resolution) +{ + if (nexthop_resolution) + return nexthop_next(nexthop); + /* no resolution attempt */ + return nexthop->next; +} + /* Return the next nexthop in the tree that is resolved and active */ struct nexthop *nexthop_next_active_resolved(const struct nexthop *nexthop) { diff --git a/lib/nexthop.h b/lib/nexthop.h index 15cfb26d82..02ea4d96f2 100644 --- a/lib/nexthop.h +++ b/lib/nexthop.h @@ -225,6 +225,8 @@ extern bool nexthop_labels_match(const struct nexthop *nh1, extern const char *nexthop2str(const struct nexthop *nexthop, char *str, int size); extern struct nexthop *nexthop_next(const struct nexthop *nexthop); +extern struct nexthop *nexthop_next_resolution(const struct nexthop *nexthop, + bool nexthop_resolution); extern struct nexthop * nexthop_next_active_resolved(const struct nexthop *nexthop); extern unsigned int nexthop_level(const struct nexthop *nexthop); diff --git a/lib/northbound.c b/lib/northbound.c index 0bc79d0277..2dae21341e 100644 --- a/lib/northbound.c +++ b/lib/northbound.c @@ -178,7 +178,7 @@ struct nb_node *nb_node_find(const char *path) struct nb_node **nb_nodes_find(const char *xpath) { - struct lysc_node **snodes = NULL; + const struct lysc_node **snodes = NULL; struct nb_node **nb_nodes = NULL; bool simple; LY_ERR err; @@ -816,8 +816,9 @@ int nb_candidate_edit(struct nb_config *candidate, const struct nb_node *nb_node static int nb_candidate_edit_tree_add(struct nb_config *candidate, enum nb_operation operation, LYD_FORMAT format, const char *xpath, - const char *data, char *xpath_created, - char *errmsg, size_t errmsg_len) + const char *data, bool *created, + char *xpath_created, char *errmsg, + size_t errmsg_len) { struct lyd_node *tree = NULL; struct lyd_node *parent = NULL; @@ -897,10 +898,18 @@ static int nb_candidate_edit_tree_add(struct nb_config *candidate, } /* check if the node already exists in candidate */ - if (operation == NB_OP_CREATE_EXCL || operation == NB_OP_REPLACE) { + if (operation == NB_OP_CREATE || operation == NB_OP_MODIFY) + existing = yang_dnode_get(candidate->dnode, xpath_created); + else if (operation == NB_OP_CREATE_EXCL || operation == NB_OP_REPLACE) { existing = yang_dnode_get(candidate->dnode, xpath_created); /* if the existing node is implicit default, ignore */ + /* Q: Is this correct for CREATE_EXCL which is supposed to error + * if the resouurce already exists? This is used by RESTCONF + * when processing the POST command, for example. RFC8040 + * doesn't say POST fails if resource exists "unless it was a + * default". + */ if (existing && (existing->flags & LYD_DEFAULT)) existing = NULL; @@ -908,7 +917,7 @@ static int nb_candidate_edit_tree_add(struct nb_config *candidate, if (operation == NB_OP_CREATE_EXCL) { snprintf(errmsg, errmsg_len, "Data already exists"); - ret = NB_ERR; + ret = NB_ERR_EXISTS; goto done; } @@ -930,7 +939,7 @@ static int nb_candidate_edit_tree_add(struct nb_config *candidate, LYD_MERGE_DESTRUCT | LYD_MERGE_WITH_FLAGS); if (err) { /* if replace failed, restore the original node */ - if (existing) { + if (existing && operation == NB_OP_REPLACE) { if (root) { /* Restoring the whole config. */ candidate->dnode = existing; @@ -954,6 +963,8 @@ static int nb_candidate_edit_tree_add(struct nb_config *candidate, ret = NB_ERR; goto done; } else { + if (!existing) + *created = true; /* * Free existing node after replace. * We're using `lyd_free_siblings` here to free the whole @@ -961,7 +972,7 @@ static int nb_candidate_edit_tree_add(struct nb_config *candidate, * siblings if it wasn't root, because the existing node * was unlinked from the tree. */ - if (existing) + if (existing && operation == NB_OP_REPLACE) lyd_free_siblings(existing); tree = NULL; /* LYD_MERGE_DESTRUCT deleted the tree */ @@ -995,7 +1006,7 @@ static int nb_candidate_edit_tree_del(struct nb_config *candidate, if (!dnode || (dnode->flags & LYD_DEFAULT)) { if (operation == NB_OP_DELETE) { snprintf(errmsg, errmsg_len, "Data missing"); - return NB_ERR; + return NB_ERR_NOT_FOUND; } else return NB_OK; } @@ -1011,7 +1022,7 @@ static int nb_candidate_edit_tree_del(struct nb_config *candidate, int nb_candidate_edit_tree(struct nb_config *candidate, enum nb_operation operation, LYD_FORMAT format, - const char *xpath, const char *data, + const char *xpath, const char *data, bool *created, char *xpath_created, char *errmsg, size_t errmsg_len) { int ret = NB_ERR; @@ -1022,8 +1033,9 @@ int nb_candidate_edit_tree(struct nb_config *candidate, case NB_OP_MODIFY: case NB_OP_REPLACE: ret = nb_candidate_edit_tree_add(candidate, operation, format, - xpath, data, xpath_created, - errmsg, errmsg_len); + xpath, data, created, + xpath_created, errmsg, + errmsg_len); break; case NB_OP_DESTROY: case NB_OP_DELETE: @@ -2605,6 +2617,8 @@ const char *nb_err_name(enum nb_error error) return "no changes"; case NB_ERR_NOT_FOUND: return "element not found"; + case NB_ERR_EXISTS: + return "element already exists"; case NB_ERR_LOCKED: return "resource is locked"; case NB_ERR_VALIDATION: diff --git a/lib/northbound.h b/lib/northbound.h index da5f5be1ee..dd3fbf8f73 100644 --- a/lib/northbound.h +++ b/lib/northbound.h @@ -678,6 +678,7 @@ enum nb_error { NB_ERR, NB_ERR_NO_CHANGES, NB_ERR_NOT_FOUND, + NB_ERR_EXISTS, NB_ERR_LOCKED, NB_ERR_VALIDATION, NB_ERR_RESOURCE, @@ -1015,6 +1016,9 @@ extern int nb_candidate_edit(struct nb_config *candidate, * data * New data tree for the node. * + * created + * OUT param set accordingly if a node was created or just updated + * * xpath_created * XPath of the created node if operation is "create". * @@ -1029,9 +1033,9 @@ extern int nb_candidate_edit(struct nb_config *candidate, * - NB_ERR for other errors. */ extern int nb_candidate_edit_tree(struct nb_config *candidate, - enum nb_operation operation, - LYD_FORMAT format, const char *xpath, - const char *data, char *xpath_created, + enum nb_operation operation, LYD_FORMAT format, + const char *xpath, const char *data, + bool *created, char *xpath_created, char *errmsg, size_t errmsg_len); /* @@ -1712,6 +1716,7 @@ extern void nb_terminate(void); extern void nb_oper_init(struct event_loop *loop); extern void nb_oper_terminate(void); +extern bool nb_oper_is_yang_lib_query(const char *xpath); #ifdef __cplusplus } diff --git a/lib/northbound_cli.c b/lib/northbound_cli.c index f70bae9ed2..f9794bee3c 100644 --- a/lib/northbound_cli.c +++ b/lib/northbound_cli.c @@ -1386,7 +1386,7 @@ static int nb_cli_show_transactions(struct vty *vty) table = ttable_dump(tt, "\n"); vty_out(vty, "%s\n", table); - XFREE(MTYPE_TMP, table); + XFREE(MTYPE_TMP_TTABLE, table); } else vty_out(vty, "No configuration transactions to display.\n\n"); @@ -1667,7 +1667,7 @@ DEFPY (show_yang_module, table = ttable_dump(tt, "\n"); vty_out(vty, "%s\n", table); - XFREE(MTYPE_TMP, table); + XFREE(MTYPE_TMP_TTABLE, table); } else vty_out(vty, "No YANG modules to display.\n\n"); @@ -1777,7 +1777,7 @@ DEFPY (show_yang_module_translator, table = ttable_dump(tt, "\n"); vty_out(vty, "%s\n", table); - XFREE(MTYPE_TMP, table); + XFREE(MTYPE_TMP_TTABLE, table); } else vty_out(vty, "No YANG module translators to display.\n\n"); diff --git a/lib/northbound_oper.c b/lib/northbound_oper.c index 5f38c970c7..a3ff360780 100644 --- a/lib/northbound_oper.c +++ b/lib/northbound_oper.c @@ -751,8 +751,8 @@ static const struct lysc_node *nb_op_sib_next(struct nb_op_yield_state *ys, /* * If the node info stack is shorter than the schema path then we are - * doign specific query still on the node from the schema path (should - * match) so just return NULL (i.e., don't process siblings) + * working our way down the specific query path so just return NULL + * (i.e., don't process siblings) */ if (darr_len(ys->schema_path) > darr_len(ys->node_infos)) return NULL; @@ -760,21 +760,21 @@ static const struct lysc_node *nb_op_sib_next(struct nb_op_yield_state *ys, * If sib is on top of the node info stack then * 1) it's a container node -or- * 2) it's a list node that we were walking and we've reach the last entry - * 3) if sib is a list and the list was empty we never would have + * + * If sib is a list and the list was empty we never would have * pushed sib on the stack so the top of the stack is the parent * * If the query string included this node then we do not process any * siblings as we are not walking all the parent's children just this * specified one give by the query string. */ - if (sib == darr_last(ys->node_infos)->schema && - darr_len(ys->schema_path) >= darr_len(ys->node_infos)) - return NULL; - /* case (3) */ - else if (sib->nodetype == LYS_LIST && - parent == darr_last(ys->node_infos)->schema && - darr_len(ys->schema_path) > darr_len(ys->node_infos)) - return NULL; + if (darr_len(ys->schema_path) == darr_len(ys->node_infos)) { + struct nb_op_node_info *node_infos = darr_last(ys->node_infos); + + assert(node_infos); + if (sib == node_infos->schema) + return NULL; + } sib = __sib_next(yn, sib->next); if (sib) @@ -801,6 +801,7 @@ static const struct lysc_node *nb_op_sib_first(struct nb_op_yield_state *ys, { const struct lysc_node *sib = lysc_node_child(parent); const struct lysc_node *first_sib; + struct nb_op_node_info *last = darr_last(ys->node_infos); /* * NOTE: when we want to handle root level walks we will need to use @@ -817,10 +818,9 @@ static const struct lysc_node *nb_op_sib_first(struct nb_op_yield_state *ys, * base of the user query, return the next schema node from the query * string (schema_path). */ - if (darr_last(ys->node_infos) != NULL && - !CHECK_FLAG(darr_last(ys->node_infos)->schema->nodetype, - LYS_CASE | LYS_CHOICE)) - assert(darr_last(ys->node_infos)->schema == parent); + if (last != NULL && + !CHECK_FLAG(last->schema->nodetype, LYS_CASE | LYS_CHOICE)) + assert(last->schema == parent); if (darr_lasti(ys->node_infos) < ys->query_base_level) return ys->schema_path[darr_lasti(ys->node_infos) + 1]; @@ -908,9 +908,10 @@ static enum nb_error __walk(struct nb_op_yield_state *ys, bool is_resume) * Otherwise get the first child of the container we are walking, * starting with non-yielding children. */ - if (is_resume) + if (is_resume) { + assert(darr_last(ys->node_infos) != NULL); sib = darr_last(ys->node_infos)->schema; - else { + } else { /* * Start with non-yielding children first. * @@ -1477,7 +1478,8 @@ static void nb_op_walk_continue(struct event *thread) goto finish; /* otherwise we are at a resumable node */ - assert(darr_last(ys->node_infos)->has_lookup_next); + assert(darr_last(ys->node_infos) && + darr_last(ys->node_infos)->has_lookup_next); ret = __walk(ys, true); if (ret == NB_YIELD) { @@ -1739,6 +1741,16 @@ static enum nb_error nb_op_walk_start(struct nb_op_yield_state *ys) return __walk(ys, false); } +bool nb_oper_is_yang_lib_query(const char *xpath) +{ + const char *libstr = "/ietf-yang-library:"; + const unsigned long liblen = strlen(libstr); + + if (strncmp(libstr, xpath, liblen)) + return false; + + return strlen(xpath) > liblen; +} void *nb_oper_walk(const char *xpath, struct yang_translator *translator, uint32_t flags, bool should_batch, nb_oper_data_cb cb, diff --git a/lib/srv6.h b/lib/srv6.h index f25c5cfcaa..9a041e3d85 100644 --- a/lib/srv6.h +++ b/lib/srv6.h @@ -106,6 +106,10 @@ struct seg6local_context { struct in6_addr nh6; uint32_t table; struct seg6local_flavors_info flv; + uint8_t block_len; + uint8_t node_len; + uint8_t function_len; + uint8_t argument_len; }; struct srv6_locator { diff --git a/lib/termtable.c b/lib/termtable.c index 88cc25bf68..ce19701389 100644 --- a/lib/termtable.c +++ b/lib/termtable.c @@ -363,7 +363,7 @@ char *ttable_dump(struct ttable *tt, const char *newline) memcpy(&right[0], newline, nl_len); /* allocate print buffer */ - buf = XCALLOC(MTYPE_TMP, width * (nlines + 1) + 1); + buf = XCALLOC(MTYPE_TMP_TTABLE, width * (nlines + 1) + 1); pos = 0; if (tt->style.border.top_on) { diff --git a/lib/termtable.h b/lib/termtable.h index 0782c82abd..d284c4f376 100644 --- a/lib/termtable.h +++ b/lib/termtable.h @@ -270,7 +270,7 @@ void ttable_rowseps(struct ttable *tt, unsigned int row, * * Caller must free this string after use with * - * XFREE (MTYPE_TMP, str); + * XFREE (MTYPE_TMP_TTABLE, str); * * @param tt the table to dump * @param newline the desired newline sequence to use, null terminated. diff --git a/lib/yang.c b/lib/yang.c index 6c1aed00cc..14d5b118c6 100644 --- a/lib/yang.c +++ b/lib/yang.c @@ -286,7 +286,7 @@ void yang_snode_get_path(const struct lysc_node *snode, } LY_ERR yang_resolve_snode_xpath(struct ly_ctx *ly_ctx, const char *xpath, - struct lysc_node ***snodes, bool *simple) + const struct lysc_node ***snodes, bool *simple) { struct lysc_node *snode; struct ly_set *set; diff --git a/lib/yang.h b/lib/yang.h index 57131f478b..c4fc78b8ae 100644 --- a/lib/yang.h +++ b/lib/yang.h @@ -827,7 +827,8 @@ extern int yang_xpath_pop_node(char *xpath); * Return: a libyang error or LY_SUCCESS. */ extern LY_ERR yang_resolve_snode_xpath(struct ly_ctx *ly_ctx, const char *xpath, - struct lysc_node ***snodes, bool *simple); + const struct lysc_node ***snodes, + bool *simple); /* * Libyang future functions diff --git a/mgmtd/mgmt.c b/mgmtd/mgmt.c index cfadad4829..02c54b9215 100644 --- a/mgmtd/mgmt.c +++ b/mgmtd/mgmt.c @@ -57,6 +57,9 @@ void mgmt_init(void) /* Initialize MGMTD Transaction module */ mgmt_txn_init(mm, mm->master); + /* Add yang-library module */ + yang_module_load("ietf-yang-library", NULL); + /* Initialize the MGMTD Frontend Adapter Module */ mgmt_fe_adapter_init(mm->master); diff --git a/mgmtd/mgmt_fe_adapter.c b/mgmtd/mgmt_fe_adapter.c index 09d1910cee..32f28a5774 100644 --- a/mgmtd/mgmt_fe_adapter.c +++ b/mgmtd/mgmt_fe_adapter.c @@ -1164,7 +1164,9 @@ done: } static int fe_adapter_send_edit_reply(struct mgmt_fe_session_ctx *session, - uint64_t req_id, const char *xpath) + uint64_t req_id, bool changed, + bool created, const char *xpath, + const char *data) { struct mgmt_msg_edit_reply *msg; int ret; @@ -1173,14 +1175,19 @@ static int fe_adapter_send_edit_reply(struct mgmt_fe_session_ctx *session, MTYPE_MSG_NATIVE_EDIT_REPLY); msg->refer_id = session->session_id; msg->req_id = req_id; + msg->changed = changed; + msg->created = created; msg->code = MGMT_MSG_CODE_EDIT_REPLY; mgmt_msg_native_xpath_encode(msg, xpath); + if (data) + mgmt_msg_native_append(msg, data, strlen(data) + 1); + __dbg("Sending edit-reply from adapter %s to session-id %" PRIu64 - " req-id %" PRIu64 " len %u", - session->adapter->name, session->session_id, req_id, - mgmt_msg_native_get_msg_len(msg)); + " req-id %" PRIu64 " changed %u created %u len %u", + session->adapter->name, session->session_id, req_id, changed, + created, mgmt_msg_native_get_msg_len(msg)); ret = fe_adapter_send_native_msg(session->adapter, msg, mgmt_msg_native_get_msg_len(msg), @@ -1282,8 +1289,7 @@ static void fe_adapter_handle_get_data(struct mgmt_fe_session_ctx *session, void *__msg, size_t msg_len) { struct mgmt_msg_get_data *msg = __msg; - struct lysc_node **snodes = NULL; - char *xpath_resolved = NULL; + const struct lysc_node **snodes = NULL; uint64_t req_id = msg->req_id; Mgmtd__DatastoreId ds_id; uint64_t clients; @@ -1331,6 +1337,31 @@ static void fe_adapter_handle_get_data(struct mgmt_fe_session_ctx *session, goto done; } + /* Check for yang-library shortcut */ + if (nb_oper_is_yang_lib_query(msg->xpath)) { + struct lyd_node *ylib = NULL; + LY_ERR err; + + err = ly_ctx_get_yanglib_data(ly_native_ctx, &ylib, "%u", + ly_ctx_get_change_count( + ly_native_ctx)); + if (err) { + fe_adapter_send_error(session, req_id, false, err, + "Error getting yang-library data, session-id: %" PRIu64 + " error: %s", + session->session_id, + ly_last_errmsg()); + } else { + yang_lyd_trim_xpath(&ylib, msg->xpath); + (void)fe_adapter_send_tree_data(session, req_id, false, + msg->result_type, + wd_options, ylib, 0); + } + if (ylib) + lyd_free_all(ylib); + goto done; + } + switch (msg->datastore) { case MGMT_MSG_DATASTORE_CANDIDATE: ds_id = MGMTD_DS_CANDIDATE; @@ -1395,7 +1426,6 @@ static void fe_adapter_handle_get_data(struct mgmt_fe_session_ctx *session, } done: darr_free(snodes); - darr_free(xpath_resolved); } static void fe_adapter_handle_edit(struct mgmt_fe_session_ctx *session, @@ -1408,7 +1438,12 @@ static void fe_adapter_handle_edit(struct mgmt_fe_session_ctx *session, bool lock, commit; int ret; - if (msg->datastore != MGMT_MSG_DATASTORE_CANDIDATE) { + lock = CHECK_FLAG(msg->flags, EDIT_FLAG_IMPLICIT_LOCK); + commit = CHECK_FLAG(msg->flags, EDIT_FLAG_IMPLICIT_COMMIT); + + if (lock && commit && msg->datastore == MGMT_MSG_DATASTORE_RUNNING) + ; + else if (msg->datastore != MGMT_MSG_DATASTORE_CANDIDATE) { fe_adapter_send_error(session, msg->req_id, false, -EINVAL, "Unsupported datastore"); return; @@ -1429,9 +1464,6 @@ static void fe_adapter_handle_edit(struct mgmt_fe_session_ctx *session, rds_ctx = mgmt_ds_get_ctx_by_id(mm, rds_id); assert(rds_ctx); - lock = CHECK_FLAG(msg->flags, EDIT_FLAG_IMPLICIT_LOCK); - commit = CHECK_FLAG(msg->flags, EDIT_FLAG_IMPLICIT_COMMIT); - if (lock) { if (mgmt_fe_session_write_lock_ds(ds_id, ds_ctx, session)) { fe_adapter_send_error(session, msg->req_id, false, @@ -1977,8 +2009,8 @@ int mgmt_fe_adapter_send_rpc_reply(uint64_t session_id, uint64_t txn_id, int mgmt_fe_adapter_send_edit_reply(uint64_t session_id, uint64_t txn_id, uint64_t req_id, bool unlock, bool commit, - const char *xpath, int16_t error, - const char *errstr) + bool created, const char *xpath, + int16_t error, const char *errstr) { struct mgmt_fe_session_ctx *session; Mgmtd__DatastoreId ds_id, rds_id; @@ -2009,11 +2041,12 @@ int mgmt_fe_adapter_send_edit_reply(uint64_t session_id, uint64_t txn_id, } } - if (error) + if (error != 0 && error != -EALREADY) ret = fe_adapter_send_error(session, req_id, false, error, "%s", errstr); else - ret = fe_adapter_send_edit_reply(session, req_id, xpath); + ret = fe_adapter_send_edit_reply(session, req_id, created, + !error, xpath, errstr); if (session->cfg_txn_id != MGMTD_TXN_ID_NONE && !commit) mgmt_destroy_txn(&session->cfg_txn_id); diff --git a/mgmtd/mgmt_fe_adapter.h b/mgmtd/mgmt_fe_adapter.h index 61d6cfae13..4d94e7604f 100644 --- a/mgmtd/mgmt_fe_adapter.h +++ b/mgmtd/mgmt_fe_adapter.h @@ -193,14 +193,16 @@ extern int mgmt_fe_adapter_send_rpc_reply(uint64_t session_id, uint64_t txn_id, * req_id: the req id for the edit message * unlock: implicit-lock flag was set in the request * commit: implicit-commit flag was set in the request - * xpath: the xpath of the data node that was created - * error: the error code, zero for successful request + * created: true if the node was just created + * xpath: the xpath of the data node that was created/updated + * error: >0 LY_ERR, < 0 -errno * errstr: the error string, if error is non-zero */ extern int mgmt_fe_adapter_send_edit_reply(uint64_t session_id, uint64_t txn_id, uint64_t req_id, bool unlock, - bool commit, const char *xpath, - int16_t error, const char *errstr); + bool commit, bool created, + const char *xpath, int16_t error, + const char *errstr); /** * Send an error back to the FE client using native messaging. @@ -210,7 +212,7 @@ extern int mgmt_fe_adapter_send_edit_reply(uint64_t session_id, uint64_t txn_id, * Args: * txn_id: the txn_id this error pertains to. * short_circuit_ok: True if OK to short-circuit the call. - * error: An integer error value. + * error: >0 LY_ERR, < 0 -errno * errfmt: An error format string (i.e., printfrr) * ...: args for use by the `errfmt` format string. * diff --git a/mgmtd/mgmt_txn.c b/mgmtd/mgmt_txn.c index ed133243a1..ccfdd7539f 100644 --- a/mgmtd/mgmt_txn.c +++ b/mgmtd/mgmt_txn.c @@ -94,6 +94,7 @@ DECLARE_LIST(mgmt_txn_batches, struct mgmt_txn_be_cfg_batch, list_linkage); struct mgmt_edit_req { char xpath_created[XPATH_MAXLEN]; + bool created; bool unlock; }; @@ -742,6 +743,8 @@ static int mgmt_txn_send_commit_cfg_reply(struct mgmt_txn_ctx *txn, .edit->unlock, true, txn->commit_cfg_req->req.commit_cfg + .edit->created, + txn->commit_cfg_req->req.commit_cfg .edit->xpath_created, success ? 0 : -1, error_if_any) != 0) { @@ -1335,7 +1338,8 @@ static int txn_get_tree_data_done(struct mgmt_txn_ctx *txn, " req_id %" PRIu64 " to requested type %u", txn->txn_id, req_id, get_tree->result_type); - (void)mgmt_fe_adapter_txn_error(txn->txn_id, req_id, false, ret, + (void)mgmt_fe_adapter_txn_error(txn->txn_id, req_id, false, + errno_from_nb_error(ret), "Error converting results of GETTREE"); } @@ -1351,7 +1355,7 @@ static int txn_rpc_done(struct mgmt_txn_ctx *txn, struct mgmt_txn_req *txn_req) EVENT_OFF(txn->rpc_timeout); if (rpc->errstr) - mgmt_fe_adapter_txn_error(txn->txn_id, req_id, false, -1, + mgmt_fe_adapter_txn_error(txn->txn_id, req_id, false, -EINVAL, rpc->errstr); else if (mgmt_fe_adapter_send_rpc_reply(txn->session_id, txn->txn_id, req_id, rpc->result_type, @@ -1360,7 +1364,8 @@ static int txn_rpc_done(struct mgmt_txn_ctx *txn, struct mgmt_txn_req *txn_req) " req_id %" PRIu64 " to requested type %u", txn->txn_id, req_id, rpc->result_type); - (void)mgmt_fe_adapter_txn_error(txn->txn_id, req_id, false, -1, + (void)mgmt_fe_adapter_txn_error(txn->txn_id, req_id, false, + -EINVAL, "Error converting results of RPC"); } @@ -2564,8 +2569,8 @@ int mgmt_txn_send_edit(uint64_t txn_id, uint64_t req_id, assert(nb_config); ret = nb_candidate_edit_tree(nb_config, operation, request_type, xpath, - data, edit->xpath_created, errstr, - sizeof(errstr)); + data, &edit->created, edit->xpath_created, + errstr, sizeof(errstr)); if (ret) goto reply; @@ -2579,8 +2584,9 @@ int mgmt_txn_send_edit(uint64_t txn_id, uint64_t req_id, } reply: mgmt_fe_adapter_send_edit_reply(txn->session_id, txn->txn_id, req_id, - unlock, commit, edit->xpath_created, - ret ? -1 : 0, errstr); + unlock, commit, edit->created, + edit->xpath_created, + errno_from_nb_error(ret), errstr); XFREE(MTYPE_MGMTD_TXN_REQ, edit); diff --git a/mgmtd/mgmt_txn.h b/mgmtd/mgmt_txn.h index b6ca288675..37dadc0171 100644 --- a/mgmtd/mgmt_txn.h +++ b/mgmtd/mgmt_txn.h @@ -69,6 +69,34 @@ static inline const char *mgmt_txn_type2str(enum mgmt_txn_type type) return "Unknown"; } + +static inline int16_t errno_from_nb_error(enum nb_error ret) +{ + switch (ret) { + case NB_OK: + return 0; + case NB_ERR_NO_CHANGES: + return -EALREADY; + case NB_ERR_NOT_FOUND: + return -ENOENT; + case NB_ERR_EXISTS: + return -EEXIST; + case NB_ERR_LOCKED: + return -EWOULDBLOCK; + case NB_ERR_VALIDATION: + return -EINVAL; + case NB_ERR_RESOURCE: + return -ENOMEM; + case NB_ERR: + case NB_ERR_INCONSISTENCY: + return -EINVAL; + case NB_YIELD: + default: + return -EINVAL; + } +} + + /* Initialise transaction module. */ extern int mgmt_txn_init(struct mgmt_master *cm, struct event_loop *tm); diff --git a/nhrpd/nhrp_peer.c b/nhrpd/nhrp_peer.c index 3495317d4c..d2c1a8c401 100644 --- a/nhrpd/nhrp_peer.c +++ b/nhrpd/nhrp_peer.c @@ -597,6 +597,12 @@ static void nhrp_handle_resolution_req(struct nhrp_packet_parser *pp) nhrp_ext_complete(zb, ext); } break; + case NHRP_EXTENSION_AUTHENTICATION: + /* Extensions can be copied from original packet except + * authentication extension which must be regenerated + * hop by hop. + */ + break; default: if (nhrp_ext_reply(zb, hdr, ifp, ext, &payload) < 0) goto err; diff --git a/nhrpd/nhrp_vty.c b/nhrpd/nhrp_vty.c index 22b6bdcec7..f202576960 100644 --- a/nhrpd/nhrp_vty.c +++ b/nhrpd/nhrp_vty.c @@ -295,10 +295,15 @@ DEFUN(tunnel_protection, tunnel_protection_cmd, } DEFUN(no_tunnel_protection, no_tunnel_protection_cmd, - "no tunnel protection", + "no tunnel protection [vici profile PROFILE [fallback-profile FALLBACK]]", NO_STR "NHRP/GRE integration\n" - "IPsec protection\n") + "IPsec protection\n" + "VICI (StrongSwan)\n" + "IPsec profile\n" + "IPsec profile name\n" + "Fallback IPsec profile\n" + "Fallback IPsec profile name\n") { VTY_DECLVAR_CONTEXT(interface, ifp); @@ -882,7 +887,7 @@ static void show_ip_nhrp_shortcut(struct nhrp_shortcut *s, void *pctx) char buf1[PREFIX_STRLEN], buf2[SU_ADDRSTRLEN]; struct json_object *json = NULL; - if (!ctx->count) { + if (!ctx->count && !ctx->json) { vty_out(vty, "%-8s %-24s %-24s %s\n", "Type", "Prefix", "Via", "Identity"); } diff --git a/ospfd/ospf_flood.c b/ospfd/ospf_flood.c index 2af4ae3170..e3398af74b 100644 --- a/ospfd/ospf_flood.c +++ b/ospfd/ospf_flood.c @@ -797,7 +797,7 @@ int ospf_flood_through_interface(struct ospf_interface *oi, ospf_ls_upd_send_lsa(nbr, lsa, OSPF_SEND_PACKET_DIRECT); } - } else + } else { /* If P2MP delayed reflooding is configured and the LSA was received from a neighbor on the P2MP interface, do not flood if back out on the interface. The LSA will be retransmitted @@ -815,9 +815,17 @@ int ospf_flood_through_interface(struct ospf_interface *oi, inbr ? &(inbr->router_id) : &(oi->ospf->router_id), IF_NAME(oi)); - } else - ospf_ls_upd_send_lsa(oi->nbr_self, lsa, - OSPF_SEND_PACKET_INDIRECT); + /* + * If reflooding is delayed, a delayed acknowledge + * should be sent since the LSA will not be immediately + * reflooded and interpreted as an implied + * acknowledgment by the sender. + */ + return 1; + } + ospf_ls_upd_send_lsa(oi->nbr_self, lsa, + OSPF_SEND_PACKET_INDIRECT); + } return 0; } @@ -1094,8 +1102,13 @@ void ospf_ls_retransmit_add(struct ospf_neighbor *nbr, struct ospf_lsa *lsa) if (ls_rxmt_node->lsa_list_entry == ospf_lsa_list_first(&nbr->ls_rxmt_list)) rxmt_head_replaced = true; + + /* Keep SA happy */ + assert(ls_rxmt_node->lsa_list_entry != NULL); + ospf_lsa_list_del(&nbr->ls_rxmt_list, ls_rxmt_node->lsa_list_entry); + XFREE(MTYPE_OSPF_LSA_LIST, ls_rxmt_node->lsa_list_entry); ospf_lsdb_delete(&nbr->ls_rxmt, old); if (IS_DEBUG_OSPF(lsa, LSA_FLOODING)) @@ -1163,8 +1176,13 @@ void ospf_ls_retransmit_delete(struct ospf_neighbor *nbr, struct ospf_lsa *lsa) rxmt_timer_reset = false; lsa->retransmit_counter--; + + /* Keep SA happy */ + assert(ls_rxmt_node->lsa_list_entry != NULL); + ospf_lsa_list_del(&nbr->ls_rxmt_list, ls_rxmt_node->lsa_list_entry); + XFREE(MTYPE_OSPF_LSA_LIST, ls_rxmt_node->lsa_list_entry); ospf_lsdb_delete(&nbr->ls_rxmt, lsa); if (IS_DEBUG_OSPF(lsa, LSA_FLOODING)) diff --git a/ospfd/ospf_packet.c b/ospfd/ospf_packet.c index e336435cbd..d35f0a1372 100644 --- a/ospfd/ospf_packet.c +++ b/ospfd/ospf_packet.c @@ -2806,9 +2806,7 @@ static enum ospf_read_return_enum ospf_read_helper(struct ospf *ospf) * or header area is backbone but ospf_interface is not * check for VLINK interface */ - if ((oi == NULL) - || (OSPF_IS_AREA_ID_BACKBONE(ospfh->area_id) - && !OSPF_IS_AREA_ID_BACKBONE(oi->area->area_id))) { + if (oi == NULL) { if ((oi = ospf_associate_packet_vl(ospf, ifp, iph, ospfh)) == NULL) { if (!ospf->instance && IS_DEBUG_OSPF_EVENT) @@ -2817,6 +2815,15 @@ static enum ospf_read_return_enum ospf_read_helper(struct ospf *ospf) &iph->ip_src, ifp->name); return OSPF_READ_CONTINUE; } + } else if (OSPF_IS_AREA_ID_BACKBONE(ospfh->area_id) && + !OSPF_IS_AREA_ID_BACKBONE(oi->area->area_id)) { + oi = ospf_associate_packet_vl(ospf, ifp, iph, ospfh); + if (oi == NULL) { + flog_warn(EC_OSPF_PACKET, + "interface %s: ospf_read invalid Area ID %pI4", + ifp->name, &ospfh->area_id); + return OSPF_READ_CONTINUE; + } } /* diff --git a/ospfd/ospf_sr.c b/ospfd/ospf_sr.c index 198309c1ef..97dc578679 100644 --- a/ospfd/ospf_sr.c +++ b/ospfd/ospf_sr.c @@ -1459,7 +1459,8 @@ void ospf_sr_ri_lsa_update(struct ospf_lsa *lsa) /* Update Algorithm, SRLB and MSD if present */ if (algo != NULL) { int i; - for (i = 0; i < ntohs(algo->header.length); i++) + for (i = 0; + i < ntohs(algo->header.length) && i < ALGORITHM_COUNT; i++) srn->algo[i] = algo->value[0]; for (; i < ALGORITHM_COUNT; i++) srn->algo[i] = SR_ALGORITHM_UNSET; diff --git a/ospfd/ospf_vty.c b/ospfd/ospf_vty.c index 13138914fa..7ae4ea04cb 100644 --- a/ospfd/ospf_vty.c +++ b/ospfd/ospf_vty.c @@ -13184,6 +13184,10 @@ static void ospf_vty_if_init(void) install_element(INTERFACE_NODE, &ip_ospf_hello_interval_cmd); install_element(INTERFACE_NODE, &no_ip_ospf_hello_interval_cmd); + /* "ip ospf graceful-restart" commands. */ + install_element(INTERFACE_NODE, &ip_ospf_gr_hdelay_cmd); + install_element(INTERFACE_NODE, &no_ip_ospf_gr_hdelay_cmd); + /* "ip ospf network" commands. */ install_element(INTERFACE_NODE, &ip_ospf_network_cmd); install_element(INTERFACE_NODE, &no_ip_ospf_network_cmd); diff --git a/ospfd/ospf_zebra.c b/ospfd/ospf_zebra.c index 2c518f2c9e..c7cba1e20f 100644 --- a/ospfd/ospf_zebra.c +++ b/ospfd/ospf_zebra.c @@ -119,8 +119,9 @@ static int ospf_interface_address_delete(ZAPI_CALLBACK_ARGS) return 0; if (IS_DEBUG_OSPF(zebra, ZEBRA_INTERFACE)) - zlog_debug("Zebra: interface %s address delete %pFX", - c->ifp->name, c->address); + zlog_debug("Zebra: interface %s address delete %pFX vrf %s id %u", + c->ifp->name, c->address, + ospf_vrf_id_to_name(vrf_id), vrf_id); ifp = c->ifp; p = *c->address; @@ -261,9 +262,8 @@ void ospf_zebra_add(struct ospf *ospf, struct prefix_ipv4 *p, if (ospf->gr_info.restart_in_progress) { if (IS_DEBUG_OSPF_GR) - zlog_debug( - "Zebra: Graceful Restart in progress -- not installing %pFX", - p); + zlog_debug("Zebra: Graceful Restart in progress -- not installing %pFX(%s)", + p, ospf_vrf_id_to_name(ospf->vrf_id)); return; } @@ -311,10 +311,10 @@ void ospf_zebra_add(struct ospf *ospf, struct prefix_ipv4 *p, ifp = if_lookup_by_index(path->ifindex, ospf->vrf_id); - zlog_debug( - "Zebra: Route add %pFX nexthop %pI4, ifindex=%d %s", - p, &path->nexthop, path->ifindex, - ifp ? ifp->name : " "); + zlog_debug("Zebra: Route add %pFX(%s) nexthop %pI4, ifindex=%d %s", + p, ospf_vrf_id_to_name(ospf->vrf_id), + &path->nexthop, path->ifindex, + ifp ? ifp->name : " "); } } @@ -331,9 +331,8 @@ void ospf_zebra_delete(struct ospf *ospf, struct prefix_ipv4 *p, if (ospf->gr_info.restart_in_progress) { if (IS_DEBUG_OSPF_GR) - zlog_debug( - "Zebra: Graceful Restart in progress -- not uninstalling %pFX", - p); + zlog_debug("Zebra: Graceful Restart in progress -- not uninstalling %pFX(%s)", + p, ospf_vrf_id_to_name(ospf->vrf_id)); return; } @@ -345,7 +344,8 @@ void ospf_zebra_delete(struct ospf *ospf, struct prefix_ipv4 *p, memcpy(&api.prefix, p, sizeof(*p)); if (IS_DEBUG_OSPF(zebra, ZEBRA_REDISTRIBUTE)) - zlog_debug("Zebra: Route delete %pFX", p); + zlog_debug("Zebra: Route delete %pFX(%s)", p, + ospf_vrf_id_to_name(ospf->vrf_id)); zclient_route_send(ZEBRA_ROUTE_DELETE, zclient, &api); } @@ -356,9 +356,8 @@ void ospf_zebra_add_discard(struct ospf *ospf, struct prefix_ipv4 *p) if (ospf->gr_info.restart_in_progress) { if (IS_DEBUG_OSPF_GR) - zlog_debug( - "Zebra: Graceful Restart in progress -- not installing %pFX", - p); + zlog_debug("Zebra: Graceful Restart in progress -- not installing %pFX(%s)", + p, ospf_vrf_id_to_name(ospf->vrf_id)); return; } @@ -373,7 +372,8 @@ void ospf_zebra_add_discard(struct ospf *ospf, struct prefix_ipv4 *p) zclient_route_send(ZEBRA_ROUTE_ADD, zclient, &api); if (IS_DEBUG_OSPF(zebra, ZEBRA_REDISTRIBUTE)) - zlog_debug("Zebra: Route add discard %pFX", p); + zlog_debug("Zebra: Route add discard %pFX(%s)", p, + ospf_vrf_id_to_name(ospf->vrf_id)); } void ospf_zebra_delete_discard(struct ospf *ospf, struct prefix_ipv4 *p) @@ -382,9 +382,8 @@ void ospf_zebra_delete_discard(struct ospf *ospf, struct prefix_ipv4 *p) if (ospf->gr_info.restart_in_progress) { if (IS_DEBUG_OSPF_GR) - zlog_debug( - "Zebra: Graceful Restart in progress -- not uninstalling %pFX", - p); + zlog_debug("Zebra: Graceful Restart in progress -- not uninstalling %pFX(%s)", + p, ospf_vrf_id_to_name(ospf->vrf_id)); return; } @@ -399,7 +398,8 @@ void ospf_zebra_delete_discard(struct ospf *ospf, struct prefix_ipv4 *p) zclient_route_send(ZEBRA_ROUTE_DELETE, zclient, &api); if (IS_DEBUG_OSPF(zebra, ZEBRA_REDISTRIBUTE)) - zlog_debug("Zebra: Route delete discard %pFX", p); + zlog_debug("Zebra: Route delete discard %pFX(%s)", p, + ospf_vrf_id_to_name(ospf->vrf_id)); } struct ospf_external *ospf_external_lookup(struct ospf *ospf, uint8_t type, @@ -475,8 +475,9 @@ bool ospf_external_default_routemap_apply_walk(struct ospf *ospf, if (ret && ei) { if (IS_DEBUG_OSPF_DEFAULT_INFO) - zlog_debug("Default originate routemap permit ei: %pI4", - &ei->p.prefix); + zlog_debug("Default originate routemap permit ei: %pI4(%s)", + &ei->p.prefix, + ospf_vrf_id_to_name(ospf->vrf_id)); return true; } @@ -507,7 +508,8 @@ static void ospf_external_lsa_default_routemap_timer(struct event *thread) if (!default_ei) { /* Nothing to be done here. */ if (IS_DEBUG_OSPF_DEFAULT_INFO) - zlog_debug("Default originate info not present"); + zlog_debug("Default originate info not present(%s)", + ospf_vrf_id_to_name(ospf->vrf_id)); return; } @@ -821,11 +823,11 @@ int ospf_redistribute_update(struct ospf *ospf, struct ospf_redist *red, ospf_external_lsa_refresh_type(ospf, type, instance, force); if (IS_DEBUG_OSPF(zebra, ZEBRA_REDISTRIBUTE)) - zlog_debug( - "Redistribute[%s][%d]: Refresh Type[%d], Metric[%d]", - ospf_redist_string(type), instance, - metric_type(ospf, type, instance), - metric_value(ospf, type, instance)); + zlog_debug("Redistribute[%s][%d][%s]: Refresh Type[%d], Metric[%d]", + ospf_redist_string(type), instance, + ospf_vrf_id_to_name(ospf->vrf_id), + metric_type(ospf, type, instance), + metric_value(ospf, type, instance)); return CMD_SUCCESS; } @@ -842,11 +844,11 @@ int ospf_redistribute_set(struct ospf *ospf, struct ospf_redist *red, int type, instance, ospf->vrf_id); if (IS_DEBUG_OSPF(zebra, ZEBRA_REDISTRIBUTE)) - zlog_debug( - "Redistribute[%s][%d] vrf id %u: Start Type[%d], Metric[%d]", - ospf_redist_string(type), instance, ospf->vrf_id, - metric_type(ospf, type, instance), - metric_value(ospf, type, instance)); + zlog_debug("Redistribute[%s][%d][%s]: Start Type[%d], Metric[%d]", + ospf_redist_string(type), instance, + ospf_vrf_id_to_name(ospf->vrf_id), + metric_type(ospf, type, instance), + metric_value(ospf, type, instance)); ospf_asbr_status_update(ospf, ++ospf->redistribute); @@ -863,8 +865,9 @@ int ospf_redistribute_unset(struct ospf *ospf, int type, instance, ospf->vrf_id); if (IS_DEBUG_OSPF(zebra, ZEBRA_REDISTRIBUTE)) - zlog_debug("Redistribute[%s][%d] vrf id %u: Stop", - ospf_redist_string(type), instance, ospf->vrf_id); + zlog_debug("Redistribute[%s][%d][%s]: Stop", + ospf_redist_string(type), instance, + ospf_vrf_id_to_name(ospf->vrf_id)); /* Remove the routes from OSPF table. */ ospf_redistribute_withdraw(ospf, type, instance); @@ -894,11 +897,11 @@ int ospf_redistribute_default_set(struct ospf *ospf, int originate, int mtype, if (cur_originate == originate) { /* Refresh the lsa since metric might different */ if (IS_DEBUG_OSPF(zebra, ZEBRA_REDISTRIBUTE)) - zlog_debug( - "Redistribute[%s]: Refresh Type[%d], Metric[%d]", - ospf_redist_string(DEFAULT_ROUTE), - metric_type(ospf, DEFAULT_ROUTE, 0), - metric_value(ospf, DEFAULT_ROUTE, 0)); + zlog_debug("Redistribute[%s][%s]: Refresh Type[%d], Metric[%d]", + ospf_redist_string(DEFAULT_ROUTE), + ospf_vrf_id_to_name(ospf->vrf_id), + metric_type(ospf, DEFAULT_ROUTE, 0), + metric_value(ospf, DEFAULT_ROUTE, 0)); ospf_external_lsa_refresh_default(ospf); return CMD_SUCCESS; @@ -939,10 +942,10 @@ int ospf_redistribute_default_set(struct ospf *ospf, int originate, int mtype, } if (IS_DEBUG_OSPF(zebra, ZEBRA_REDISTRIBUTE)) - zlog_debug("Redistribute[DEFAULT]: %s Type[%d], Metric[%d]", - type_str, - metric_type(ospf, DEFAULT_ROUTE, 0), - metric_value(ospf, DEFAULT_ROUTE, 0)); + zlog_debug("Redistribute[DEFAULT][%s]: %s Type[%d], Metric[%d]", + ospf_vrf_id_to_name(ospf->vrf_id), type_str, + metric_type(ospf, DEFAULT_ROUTE, 0), + metric_value(ospf, DEFAULT_ROUTE, 0)); ospf_external_lsa_refresh_default(ospf); ospf_asbr_status_update(ospf, ospf->redistribute); @@ -1047,16 +1050,18 @@ static bool ospf_external_lsa_default_routemap_apply(struct ospf *ospf, } if (IS_DEBUG_OSPF_DEFAULT_INFO) - zlog_debug("Apply default originate routemap on ei: %pI4 cmd: %d", - &ei->p.prefix, cmd); + zlog_debug("Apply default originate routemap on ei: %pI4(%s) cmd: %d", + &ei->p.prefix, ospf_vrf_id_to_name(ospf->vrf_id), + cmd); ret = ospf_external_info_apply_default_routemap(ospf, ei, default_ei); /* If deny then nothing to be done both in add and del case. */ if (!ret) { if (IS_DEBUG_OSPF_DEFAULT_INFO) - zlog_debug("Default originte routemap deny for ei: %pI4", - &ei->p.prefix); + zlog_debug("Default originte routemap deny for ei: %pI4(%s)", + &ei->p.prefix, + ospf_vrf_id_to_name(ospf->vrf_id)); return false; } @@ -1068,12 +1073,14 @@ static bool ospf_external_lsa_default_routemap_apply(struct ospf *ospf, /* If permit and default already advertise then return. */ if (lsa && !IS_LSA_MAXAGE(lsa)) { if (IS_DEBUG_OSPF_DEFAULT_INFO) - zlog_debug("Default lsa already originated"); + zlog_debug("Default lsa already originated(%s)", + ospf_vrf_id_to_name(ospf->vrf_id)); return true; } if (IS_DEBUG_OSPF_DEFAULT_INFO) - zlog_debug("Originating/Refreshing default lsa"); + zlog_debug("Originating/Refreshing default lsa(%s)", + ospf_vrf_id_to_name(ospf->vrf_id)); if (lsa && IS_LSA_MAXAGE(lsa)) /* Refresh lsa.*/ @@ -1088,15 +1095,15 @@ static bool ospf_external_lsa_default_routemap_apply(struct ospf *ospf, /* If deny and lsa is not originated then nothing to be done.*/ if (!lsa) { if (IS_DEBUG_OSPF_DEFAULT_INFO) - zlog_debug( - "Default lsa not originated, not flushing"); + zlog_debug("Default lsa not originated, not flushing(%s)", + ospf_vrf_id_to_name(ospf->vrf_id)); return true; } if (IS_DEBUG_OSPF_DEFAULT_INFO) - zlog_debug( - "Running default route-map again as ei: %pI4 deleted", - &ei->p.prefix); + zlog_debug("Running default route-map again as ei: %pI4(%s) deleted", + &ei->p.prefix, + ospf_vrf_id_to_name(ospf->vrf_id)); /* * if this route delete was permitted then we need to check * there are any other external info which can still trigger @@ -1142,9 +1149,10 @@ int ospf_redistribute_check(struct ospf *ospf, struct external_info *ei, if (access_list_apply(DISTRIBUTE_LIST(ospf, type), p) == FILTER_DENY) { if (IS_DEBUG_OSPF(zebra, ZEBRA_REDISTRIBUTE)) - zlog_debug( - "Redistribute[%s]: %pFX filtered by distribute-list.", - ospf_redist_string(type), p); + zlog_debug("Redistribute[%s]: %pFX(%s) filtered by distribute-list.", + ospf_redist_string(type), p, + ospf_vrf_id_to_name( + ospf->vrf_id)); return 0; } @@ -1165,9 +1173,9 @@ int ospf_redistribute_check(struct ospf *ospf, struct external_info *ei, if (ret == RMAP_DENYMATCH) { ei->route_map_set = save_values; if (IS_DEBUG_OSPF(zebra, ZEBRA_REDISTRIBUTE)) - zlog_debug( - "Redistribute[%s]: %pFX filtered by route-map.", - ospf_redist_string(type), p); + zlog_debug("Redistribute[%s]: %pFX(%s) filtered by route-map.", + ospf_redist_string(type), p, + ospf_vrf_id_to_name(ospf->vrf_id)); return 0; } @@ -1230,7 +1238,8 @@ static int ospf_zebra_gr_update(struct ospf *ospf, int command, int ospf_zebra_gr_enable(struct ospf *ospf, uint32_t stale_time) { if (IS_DEBUG_OSPF_GR) - zlog_debug("Zebra enable GR [stale time %u]", stale_time); + zlog_debug("Zebra enable GR [stale time %u] vrf %s", stale_time, + ospf_vrf_id_to_name(ospf->vrf_id)); return ospf_zebra_gr_update(ospf, ZEBRA_CLIENT_GR_CAPABILITIES, stale_time); @@ -1239,7 +1248,8 @@ int ospf_zebra_gr_enable(struct ospf *ospf, uint32_t stale_time) int ospf_zebra_gr_disable(struct ospf *ospf) { if (IS_DEBUG_OSPF_GR) - zlog_debug("Zebra disable GR"); + zlog_debug("Zebra disable GR vrf: %s", + ospf_vrf_id_to_name(ospf->vrf_id)); return ospf_zebra_gr_update(ospf, ZEBRA_CLIENT_GR_DISABLE, 0); } @@ -1286,11 +1296,11 @@ static int ospf_zebra_read_route(ZAPI_CALLBACK_ARGS) rt_type = DEFAULT_ROUTE; if (IS_DEBUG_OSPF(zebra, ZEBRA_REDISTRIBUTE)) - zlog_debug( - "%s: cmd %s from client %s: vrf_id %d, p %pFX, metric %d", - __func__, zserv_command_string(cmd), - zebra_route_string(api.type), vrf_id, &api.prefix, - api.metric); + zlog_debug("%s: cmd %s from client %s: vrf %s(%u), p %pFX, metric %d", + __func__, zserv_command_string(cmd), + zebra_route_string(api.type), + ospf_vrf_id_to_name(vrf_id), vrf_id, &api.prefix, + api.metric); if (cmd == ZEBRA_REDISTRIBUTE_ROUTE_ADD) { /* XXX|HACK|TODO|FIXME: @@ -1343,11 +1353,12 @@ static int ospf_zebra_read_route(ZAPI_CALLBACK_ARGS) return 0; if (IS_DEBUG_OSPF(lsa, EXTNL_LSA_AGGR)) - zlog_debug( - "%s: Send Aggreate LSA (%pI4/%d)", - __func__, - &aggr->p.prefix, - aggr->p.prefixlen); + zlog_debug("%s: Send Aggreate LSA (%pI4/%d)(%s)", + __func__, + &aggr->p.prefix, + aggr->p.prefixlen, + ospf_vrf_id_to_name( + ospf->vrf_id)); ospf_originate_summary_lsa(ospf, aggr, ei); @@ -1402,10 +1413,11 @@ static int ospf_zebra_read_route(ZAPI_CALLBACK_ARGS) if (IS_DEBUG_OSPF( zebra, ZEBRA_REDISTRIBUTE)) - zlog_debug( - "%s: %pI4 refreshing LSA", - __func__, - &p.prefix); + zlog_debug("%s: %pI4(%s) refreshing LSA", + __func__, + &p.prefix, + ospf_vrf_id_to_name( + ospf->vrf_id)); ospf_external_lsa_refresh( ospf, current, ei, LSA_REFRESH_FORCE, @@ -1464,7 +1476,8 @@ void ospf_zebra_import_default_route(struct ospf *ospf, bool unreg) if (zclient->sock < 0) { if (IS_DEBUG_OSPF(zebra, ZEBRA)) - zlog_debug(" Not connected to Zebra"); + zlog_debug(" Not connected to Zebra vrf: %s", + ospf_vrf_id_to_name(ospf->vrf_id)); return; } @@ -1477,14 +1490,14 @@ void ospf_zebra_import_default_route(struct ospf *ospf, bool unreg) command = ZEBRA_NEXTHOP_REGISTER; if (IS_DEBUG_OSPF(zebra, ZEBRA)) - zlog_debug("%s: sending cmd %s for %pFX (vrf %u)", __func__, + zlog_debug("%s: sending cmd %s for %pFX(%s)", __func__, zserv_command_string(command), &prefix, - ospf->vrf_id); + ospf_vrf_id_to_name(ospf->vrf_id)); if (zclient_send_rnh(zclient, command, &prefix, SAFI_UNICAST, false, true, ospf->vrf_id) == ZCLIENT_SEND_FAILURE) - flog_err(EC_LIB_ZAPI_SOCKET, "%s: zclient_send_rnh() failed", - __func__); + flog_err(EC_LIB_ZAPI_SOCKET, "%s(%s): zclient_send_rnh() failed", + __func__, ospf_vrf_id_to_name(ospf->vrf_id)); } static void ospf_zebra_import_check_update(struct vrf *vrf, struct prefix *match, @@ -1556,7 +1569,8 @@ static void ospf_distribute_list_update_timer(struct event *thread) ospf->t_distribute_update = NULL; - zlog_info("Zebra[Redistribute]: distribute-list update timer fired!"); + zlog_info("Zebra[Redistribute]: vrf: %s distribute-list update timer fired!", + ospf_vrf_id_to_name(ospf->vrf_id)); if (IS_DEBUG_OSPF_EVENT) { zlog_debug("%s: ospf distribute-list update vrf %s id %d", @@ -1607,10 +1621,12 @@ static void ospf_distribute_list_update_timer(struct event *thread) lsa, EXTNL_LSA_AGGR)) zlog_debug( - "%s: Send Aggregate LSA (%pI4/%d)", + "%s: Send Aggregate LSA (%pI4/%d)(%s)", __func__, &aggr->p.prefix, - aggr->p.prefixlen); + aggr->p.prefixlen, + ospf_vrf_id_to_name( + ospf->vrf_id)); /* Originate Aggregate * LSA diff --git a/ospfd/ospfd.c b/ospfd/ospfd.c index 1d013b260e..7638e979a2 100644 --- a/ospfd/ospfd.c +++ b/ospfd/ospfd.c @@ -1098,6 +1098,15 @@ struct ospf_interface *add_ospf_interface(struct connected *co, oi->p2mp_delay_reflood = IF_DEF_PARAMS(co->ifp)->p2mp_delay_reflood; oi->p2mp_non_broadcast = IF_DEF_PARAMS(co->ifp)->p2mp_non_broadcast; + /* + * If a neighbor filter is configured, update the neighbor filter + * for the interface. + */ + if (OSPF_IF_PARAM_CONFIGURED(IF_DEF_PARAMS(co->ifp), nbr_filter_name)) + oi->nbr_filter = prefix_list_lookup(AFI_IP, + IF_DEF_PARAMS(co->ifp) + ->nbr_filter_name); + /* Add pseudo neighbor. */ ospf_nbr_self_reset(oi, oi->ospf->router_id); diff --git a/pathd/path_cli.c b/pathd/path_cli.c index 436883c1fd..bf8a9ea028 100644 --- a/pathd/path_cli.c +++ b/pathd/path_cli.c @@ -131,7 +131,7 @@ DEFPY(show_srte_policy, /* Dump the generated table. */ table = ttable_dump(tt, "\n"); vty_out(vty, "%s\n", table); - XFREE(MTYPE_TMP, table); + XFREE(MTYPE_TMP_TTABLE, table); ttable_del(tt); diff --git a/pimd/pim6_cmd.c b/pimd/pim6_cmd.c index f7a4e0e481..f1ebdb554c 100644 --- a/pimd/pim6_cmd.c +++ b/pimd/pim6_cmd.c @@ -1259,6 +1259,62 @@ DEFPY (no_ipv6_pim_ucast_bsm, return pim_process_no_unicast_bsm_cmd(vty); } +DEFPY (pim6_bsr_candidate_bsr, + pim6_bsr_candidate_bsr_cmd, + "[no] bsr candidate-bsr [{priority (0-255)|source <address X:X::X:X|interface IFNAME|loopback$loopback|any$any>}]", + NO_STR + BSR_STR + "Make this router a Candidate BSR\n" + "BSR Priority (higher wins)\n" + "BSR Priority (higher wins)\n" + "Specify IP address for BSR operation\n" + "Local address to use\n" + "Local address to use\n" + "Interface to pick address from\n" + "Interface to pick address from\n" + "Pick highest loopback address (default)\n" + "Pick highest address from any interface\n") +{ + return pim_process_bsr_candidate_cmd(vty, FRR_PIM_CAND_BSR_XPATH, no, + false, any, ifname, address_str, + priority_str, NULL); +} + +DEFPY (pim6_bsr_candidate_rp, + pim6_bsr_candidate_rp_cmd, + "[no] bsr candidate-rp [{priority (0-255)|interval (1-4294967295)|source <address X:X::X:X|interface IFNAME|loopback$loopback|any$any>}]", + NO_STR + "Bootstrap Router configuration\n" + "Make this router a Candidate RP\n" + "RP Priority (lower wins)\n" + "RP Priority (lower wins)\n" + "Advertisement interval (seconds)\n" + "Advertisement interval (seconds)\n" + "Specify IP address for RP operation\n" + "Local address to use\n" + "Local address to use\n" + "Interface to pick address from\n" + "Interface to pick address from\n" + "Pick highest loopback address (default)\n" + "Pick highest address from any interface\n") +{ + return pim_process_bsr_candidate_cmd(vty, FRR_PIM_CAND_RP_XPATH, no, + true, any, ifname, address_str, + priority_str, interval_str); +} + +DEFPY (pim6_bsr_candidate_rp_group, + pim6_bsr_candidate_rp_group_cmd, + "[no] bsr candidate-rp group X:X::X:X/M", + NO_STR + "Bootstrap Router configuration\n" + "Make this router a Candidate RP\n" + "Configure groups to become candidate RP for\n" + "Multicast group prefix\n") +{ + return pim_process_bsr_crp_grp_cmd(vty, group_str, no); +} + DEFPY (pim6_ssmpingd, pim6_ssmpingd_cmd, "ssmpingd [X:X::X:X]$source", @@ -1719,6 +1775,90 @@ DEFPY (show_ipv6_pim_secondary, return pim_show_secondary_helper(vrf, vty); } +DEFPY (show_ipv6_pim_bsr_cand_bsr, + show_ipv6_pim_bsr_cand_bsr_cmd, + "show ipv6 pim bsr candidate-bsr [vrf NAME$vrfname] [json$json]", + SHOW_STR + IPV6_STR + PIM_STR + BSR_STR + "Current PIM router candidate BSR state\n" + VRF_CMD_HELP_STR + JSON_STR) +{ + int idx = 2; + struct vrf *vrf = pim_cmd_lookup_vrf(vty, argv, argc, &idx, !!json); + + if (!vrf || !vrf->info) + return CMD_WARNING; + + return pim_show_bsr_cand_bsr(vrf, vty, !!json); +} + +DEFPY (show_ipv6_pim_bsr_cand_rp, + show_ipv6_pim_bsr_cand_rp_cmd, + "show ipv6 pim bsr candidate-rp [vrf VRF_NAME] [json$json]", + SHOW_STR + IPV6_STR + PIM_STR + BSR_STR + "Current PIM router candidate RP state\n" + VRF_CMD_HELP_STR + JSON_STR) +{ + struct vrf *vrf = pim_cmd_lookup(vty, vrf_name); + + if (!vrf || !vrf->info) + return CMD_WARNING; + + return pim_show_bsr_cand_rp(vrf, vty, !!json); +} + +DEFPY (show_ipv6_pim_bsr_rpdb, + show_ipv6_pim_bsr_rpdb_cmd, + "show ipv6 pim bsr candidate-rp-database [vrf VRF_NAME] [json$json]", + SHOW_STR + IPV6_STR + PIM_STR + BSR_STR + "Candidate RPs database on this router (if it is the BSR)\n" + VRF_CMD_HELP_STR + JSON_STR) +{ + struct vrf *vrf = pim_cmd_lookup(vty, vrf_name); + + if (!vrf || !vrf->info) + return CMD_WARNING; + + struct pim_instance *pim = vrf->info; + struct bsm_scope *scope = &pim->global_scope; + + return pim_crp_db_show(vty, scope, !!json); +} + +DEFPY (show_ipv6_pim_bsr_groups, + show_ipv6_pim_bsr_groups_cmd, + "show ipv6 pim bsr groups [vrf VRF_NAME] [json$json]", + SHOW_STR + IPV6_STR + PIM_STR + "boot-strap router information\n" + "Candidate RP groups\n" + VRF_CMD_HELP_STR + JSON_STR) +{ + struct vrf *vrf = pim_cmd_lookup(vty, vrf_name); + + if (!vrf || !vrf->info) + return CMD_WARNING; + + struct pim_instance *pim = vrf->info; + struct bsm_scope *scope = &pim->global_scope; + + return pim_crp_groups_show(vty, scope, !!json); +} + + DEFPY (show_ipv6_pim_statistics, show_ipv6_pim_statistics_cmd, "show ipv6 pim [vrf NAME] statistics [interface WORD$word] [json$json]", @@ -2650,6 +2790,9 @@ void pim_cmd_init(void) install_element(PIM6_NODE, &no_pim6_rp_prefix_list_cmd); install_element(PIM6_NODE, &pim6_ssmpingd_cmd); install_element(PIM6_NODE, &no_pim6_ssmpingd_cmd); + install_element(PIM6_NODE, &pim6_bsr_candidate_rp_cmd); + install_element(PIM6_NODE, &pim6_bsr_candidate_rp_group_cmd); + install_element(PIM6_NODE, &pim6_bsr_candidate_bsr_cmd); install_element(CONFIG_NODE, &ipv6_mld_group_watermark_cmd); install_element(VRF_NODE, &ipv6_mld_group_watermark_cmd); @@ -2705,6 +2848,10 @@ void pim_cmd_init(void) install_element(VIEW_NODE, &show_ipv6_pim_rpf_cmd); install_element(VIEW_NODE, &show_ipv6_pim_rpf_vrf_all_cmd); install_element(VIEW_NODE, &show_ipv6_pim_secondary_cmd); + install_element(VIEW_NODE, &show_ipv6_pim_bsr_cand_bsr_cmd); + install_element(VIEW_NODE, &show_ipv6_pim_bsr_cand_rp_cmd); + install_element(VIEW_NODE, &show_ipv6_pim_bsr_rpdb_cmd); + install_element(VIEW_NODE, &show_ipv6_pim_bsr_groups_cmd); install_element(VIEW_NODE, &show_ipv6_pim_statistics_cmd); install_element(VIEW_NODE, &show_ipv6_pim_upstream_cmd); install_element(VIEW_NODE, &show_ipv6_pim_upstream_vrf_all_cmd); diff --git a/pimd/pim6_main.c b/pimd/pim6_main.c index 24443404eb..07b70ae2b3 100644 --- a/pimd/pim6_main.c +++ b/pimd/pim6_main.c @@ -103,6 +103,7 @@ static const struct frr_yang_module_info *const pim6d_yang_modules[] = { &frr_routing_info, &frr_pim_info, &frr_pim_rp_info, + &frr_pim_candidate_info, &frr_gmp_info, }; diff --git a/pimd/pim6_mld.c b/pimd/pim6_mld.c index a39d182990..8ccf42d729 100644 --- a/pimd/pim6_mld.c +++ b/pimd/pim6_mld.c @@ -2537,7 +2537,7 @@ static void gm_show_if_vrf(struct vty *vty, struct vrf *vrf, const char *ifname, if (!js && !detail) { table = ttable_dump(tt, "\n"); vty_out(vty, "%s\n", table); - XFREE(MTYPE_TMP, table); + XFREE(MTYPE_TMP_TTABLE, table); ttable_del(tt); } } @@ -3021,7 +3021,7 @@ static void gm_show_groups(struct vty *vty, struct vrf *vrf, bool uj) /* Dump the generated table. */ table = ttable_dump(tt, "\n"); vty_out(vty, "%s\n", table); - XFREE(MTYPE_TMP, table); + XFREE(MTYPE_TMP_TTABLE, table); ttable_del(tt); } } diff --git a/pimd/pim_bsm.c b/pimd/pim_bsm.c index 2d451718a9..115aec8933 100644 --- a/pimd/pim_bsm.c +++ b/pimd/pim_bsm.c @@ -10,6 +10,17 @@ #include "config.h" #endif +#include <math.h> +#include <stdint.h> +#include <stdlib.h> +#include <sys/types.h> +#include <sys/socket.h> +#include <netinet/in.h> +#include <lib/network.h> +#include <lib/iana_afi.h> +#include <lib/sockunion.h> +#include <lib/sockopt.h> + #include "if.h" #include "pimd.h" #include "pim_iface.h" @@ -23,18 +34,32 @@ #include "pim_time.h" #include "pim_zebra.h" #include "pim_util.h" +#include "pim_sock.h" /* Functions forward declaration */ static void pim_bs_timer_start(struct bsm_scope *scope, int bs_timeout); static void pim_g2rp_timer_start(struct bsm_rpinfo *bsrp, int hold_time); static inline void pim_g2rp_timer_restart(struct bsm_rpinfo *bsrp, int hold_time); +static void pim_bsm_accept_any(struct bsm_scope *scope); +static void pim_cand_bsr_trigger(struct bsm_scope *scope, bool verbose); +static void pim_cand_bsr_pending(struct bsm_scope *scope); /* Memory Types */ DEFINE_MTYPE_STATIC(PIMD, PIM_BSGRP_NODE, "PIM BSR advertised grp info"); DEFINE_MTYPE_STATIC(PIMD, PIM_BSRP_INFO, "PIM BSR advertised RP info"); -DEFINE_MTYPE_STATIC(PIMD, PIM_BSM_FRAG, "PIM BSM fragment"); +DEFINE_MTYPE(PIMD, PIM_BSM_FRAG, "PIM BSM fragment"); DEFINE_MTYPE_STATIC(PIMD, PIM_BSM_PKT_VAR_MEM, "PIM BSM Packet"); +DEFINE_MTYPE_STATIC(PIMD, PIM_CAND_RP_GRP, "PIM Candidate RP group"); + +static int cand_rp_group_cmp(const struct cand_rp_group *a, + const struct cand_rp_group *b) +{ + return prefix_cmp(&a->p, &b->p); +} + +DECLARE_RBTREE_UNIQ(cand_rp_groups, struct cand_rp_group, item, + cand_rp_group_cmp); /* All bsm packets forwarded shall be fit within ip mtu less iphdr(max) */ #define MAX_IP_HDR_LEN 24 @@ -90,7 +115,7 @@ static void pim_bsm_frag_free(struct bsm_frag *bsfrag) XFREE(MTYPE_PIM_BSM_FRAG, bsfrag); } -static void pim_bsm_frags_free(struct bsm_scope *scope) +void pim_bsm_frags_free(struct bsm_scope *scope) { struct bsm_frag *bsfrag; @@ -140,12 +165,12 @@ static struct bsgrp_node *pim_bsm_new_bsgrp_node(struct route_table *rt, return bsgrp; } +/* BS timer for NO_INFO, ACCEPT_ANY & ACCEPT_PREFERRED. + * Candidate BSR handling is separate further below + */ static void pim_on_bs_timer(struct event *t) { - struct route_node *rn; struct bsm_scope *scope; - struct bsgrp_node *bsgrp_node; - struct bsm_rpinfo *bsrp; scope = EVENT_ARG(t); EVENT_OFF(scope->bs_timer); @@ -154,7 +179,20 @@ static void pim_on_bs_timer(struct event *t) zlog_debug("%s: Bootstrap Timer expired for scope: %d", __func__, scope->sz_id); + assertf(scope->state <= ACCEPT_PREFERRED, "state=%d", scope->state); pim_nht_bsr_del(scope->pim, scope->current_bsr); + + pim_bsm_accept_any(scope); +} + +static void pim_bsm_accept_any(struct bsm_scope *scope) +{ + struct route_node *rn; + struct bsgrp_node *bsgrp_node; + struct bsm_rpinfo *bsrp; + + EVENT_OFF(scope->t_ebsr_regen_bsm); + /* Reset scope zone data */ scope->state = ACCEPT_ANY; scope->current_bsr = PIMADDR_ANY; @@ -181,6 +219,11 @@ static void pim_on_bs_timer(struct event *t) pim_bsm_rpinfos_free(bsgrp_node->partial_bsrp_list); bsgrp_node->pend_rp_cnt = 0; } + + /* we're leaving ACCEPT_PREFERRED, which doubles as C-BSR if we're + * configured to be a Candidate BSR. See if we're P-BSR now. + */ + pim_cand_bsr_trigger(scope, false); } static void pim_bs_timer_stop(struct bsm_scope *scope) @@ -212,36 +255,77 @@ static inline void pim_bs_timer_restart(struct bsm_scope *scope, int bs_timeout) pim_bs_timer_start(scope, bs_timeout); } +static void bsm_unicast_sock_read(struct event *t) +{ + struct bsm_scope *scope = EVENT_ARG(t); + + pim_sock_read_helper(scope->unicast_sock, scope->pim, false); + + event_add_read(router->master, bsm_unicast_sock_read, scope, + scope->unicast_sock, &scope->unicast_read); +} + void pim_bsm_proc_init(struct pim_instance *pim) { - memset(&pim->global_scope, 0, sizeof(struct bsm_scope)); - - pim->global_scope.sz_id = PIM_GBL_SZ_ID; - pim->global_scope.bsrp_table = route_table_init(); - pim->global_scope.accept_nofwd_bsm = true; - pim->global_scope.state = NO_INFO; - pim->global_scope.pim = pim; - bsm_frags_init(pim->global_scope.bsm_frags); - pim_bs_timer_start(&pim->global_scope, PIM_BS_TIME); + struct bsm_scope *scope = &pim->global_scope; + + memset(scope, 0, sizeof(*scope)); + + scope->sz_id = PIM_GBL_SZ_ID; + scope->bsrp_table = route_table_init(); + scope->accept_nofwd_bsm = true; + scope->state = NO_INFO; + scope->pim = pim; + bsm_frags_init(scope->bsm_frags); + pim_bs_timer_start(scope, PIM_BS_TIME); + + scope->cand_rp_interval = PIM_CRP_ADV_INTERVAL; + cand_rp_groups_init(scope->cand_rp_groups); + + scope->unicast_sock = pim_socket_raw(IPPROTO_PIM); + set_nonblocking(scope->unicast_sock); + sockopt_reuseaddr(scope->unicast_sock); + + if (setsockopt_ifindex(PIM_AF, scope->unicast_sock, 1) == -1) + zlog_warn("%s: Without IP_PKTINFO, src interface can't be determined", + __func__); + + pim_socket_ip_hdr(scope->unicast_sock); + + frr_with_privs (&pimd_privs) { + vrf_bind(pim->vrf->vrf_id, scope->unicast_sock, NULL); + } + + event_add_read(router->master, bsm_unicast_sock_read, scope, + scope->unicast_sock, &scope->unicast_read); } void pim_bsm_proc_free(struct pim_instance *pim) { + struct bsm_scope *scope = &pim->global_scope; struct route_node *rn; struct bsgrp_node *bsgrp; + struct cand_rp_group *crpgrp; - pim_bs_timer_stop(&pim->global_scope); - pim_bsm_frags_free(&pim->global_scope); + EVENT_OFF(scope->unicast_read); + close(scope->unicast_sock); - for (rn = route_top(pim->global_scope.bsrp_table); rn; - rn = route_next(rn)) { + pim_bs_timer_stop(scope); + pim_bsm_frags_free(scope); + + for (rn = route_top(scope->bsrp_table); rn; rn = route_next(rn)) { bsgrp = rn->info; if (!bsgrp) continue; pim_free_bsgrp_data(bsgrp); } - route_table_finish(pim->global_scope.bsrp_table); + while ((crpgrp = cand_rp_groups_pop(scope->cand_rp_groups))) + XFREE(MTYPE_PIM_CAND_RP_GRP, crpgrp); + + cand_rp_groups_fini(scope->cand_rp_groups); + + route_table_finish(scope->bsrp_table); } static bool is_hold_time_elapsed(void *data) @@ -512,9 +596,6 @@ static void pim_instate_pend_list(struct bsgrp_node *bsgrp_node) static bool is_preferred_bsr(struct pim_instance *pim, pim_addr bsr, uint32_t bsr_prio) { - if (!pim_addr_cmp(bsr, pim->global_scope.current_bsr)) - return true; - if (bsr_prio > pim->global_scope.current_bsr_prio) return true; @@ -523,6 +604,11 @@ static bool is_preferred_bsr(struct pim_instance *pim, pim_addr bsr, return true; else return false; + } else if (!pim_addr_cmp(bsr, pim->global_scope.current_bsr)) { + /* BSR config changed, lower prio now. local BSR check + * is handled separately in pim_bsm_update() + */ + return true; } else return false; } @@ -530,17 +616,52 @@ static bool is_preferred_bsr(struct pim_instance *pim, pim_addr bsr, static void pim_bsm_update(struct pim_instance *pim, pim_addr bsr, uint32_t bsr_prio) { - if (pim_addr_cmp(bsr, pim->global_scope.current_bsr)) { - pim_nht_bsr_del(pim, pim->global_scope.current_bsr); - pim_nht_bsr_add(pim, bsr); - - pim->global_scope.current_bsr = bsr; - pim->global_scope.current_bsr_first_ts = - pim_time_monotonic_sec(); - pim->global_scope.state = ACCEPT_PREFERRED; - } pim->global_scope.current_bsr_prio = bsr_prio; pim->global_scope.current_bsr_last_ts = pim_time_monotonic_sec(); + + if (pim->global_scope.bsr_addrsel.run && + pim->global_scope.cand_bsr_prio > bsr_prio && + pim->global_scope.state < BSR_PENDING) { + /* current BSR is now less preferred than ourselves */ + pim_cand_bsr_pending(&pim->global_scope); + return; + } + + if (!pim_addr_cmp(bsr, pim->global_scope.current_bsr)) + return; + + switch (pim->global_scope.state) { + case BSR_PENDING: + if (PIM_DEBUG_BSM) + zlog_debug("Candidate BSR dropping out of BSR election, better BSR (%u, %pPA)", + bsr_prio, &bsr); + break; + + case BSR_ELECTED: + if (PIM_DEBUG_BSM) + zlog_debug("Lost BSR status, better BSR (%u, %pPA)", + bsr_prio, &bsr); + break; + + case NO_INFO: + case ACCEPT_ANY: + case ACCEPT_PREFERRED: + break; + } + + EVENT_OFF(pim->global_scope.t_ebsr_regen_bsm); + + if (pim->global_scope.state == BSR_ELECTED) + pim_crp_db_clear(&pim->global_scope); + else + pim_nht_bsr_del(pim, pim->global_scope.current_bsr); + pim_nht_bsr_add(pim, bsr); + + pim->global_scope.current_bsr = bsr; + pim->global_scope.current_bsr_first_ts = pim_time_monotonic_sec(); + pim->global_scope.state = ACCEPT_PREFERRED; + + pim_cand_rp_trigger(&pim->global_scope); } void pim_bsm_clear(struct pim_instance *pim) @@ -555,7 +676,12 @@ void pim_bsm_clear(struct pim_instance *pim) struct rp_info *rp_info; bool upstream_updated = false; - pim_nht_bsr_del(pim, pim->global_scope.current_bsr); + EVENT_OFF(pim->global_scope.t_ebsr_regen_bsm); + + if (pim->global_scope.state == BSR_ELECTED) + pim_crp_db_clear(&pim->global_scope); + else + pim_nht_bsr_del(pim, pim->global_scope.current_bsr); /* Reset scope zone data */ pim->global_scope.accept_nofwd_bsm = false; @@ -1116,8 +1242,8 @@ static void pim_update_pending_rp_cnt(struct bsm_scope *sz, } /* Parsing BSR packet and adding to partial list of corresponding bsgrp node */ -static bool pim_bsm_parse_install_g2rp(struct bsm_scope *scope, uint8_t *buf, - int buflen, uint16_t bsm_frag_tag) +bool pim_bsm_parse_install_g2rp(struct bsm_scope *scope, uint8_t *buf, + int buflen, uint16_t bsm_frag_tag) { struct bsmmsg_grpinfo grpinfo; struct bsmmsg_rpinfo rpinfo; @@ -1338,35 +1464,6 @@ int pim_bsm_process(struct interface *ifp, pim_sgaddr *sg, uint8_t *buf, } } - /* Drop if bsr is not preferred bsr */ - if (!is_preferred_bsr(pim, bsr_addr, bshdr->bsr_prio)) { - if (PIM_DEBUG_BSM) - zlog_debug("%s : Received a non-preferred BSM", - __func__); - pim->bsm_dropped++; - return -1; - } - - if (no_fwd) { - /* only accept no-forward BSM if quick refresh on startup */ - if ((pim->global_scope.accept_nofwd_bsm) - || (frag_tag == pim->global_scope.bsm_frag_tag)) { - pim->global_scope.accept_nofwd_bsm = false; - } else { - if (PIM_DEBUG_BSM) - zlog_debug( - "%s : nofwd_bsm received on %pPAs when accpt_nofwd_bsm false", - __func__, &bsr_addr); - pim->bsm_dropped++; - pim_ifp->pim_ifstat_ucast_bsm_cfg_miss++; - return -1; - } - } - - /* BSM packet is seen, so resetting accept_nofwd_bsm to false */ - if (pim->global_scope.accept_nofwd_bsm) - pim->global_scope.accept_nofwd_bsm = false; - if (!pim_addr_cmp(sg->grp, qpim_all_pim_routers_addr)) { /* Multicast BSMs are only accepted if source interface & IP * match RPF towards the BSR's IP address, or they have @@ -1403,6 +1500,57 @@ int pim_bsm_process(struct interface *ifp, pim_sgaddr *sg, uint8_t *buf, return -1; } + /* when the BSR restarts, it can get its own BSR advertisement thrown + * back at it, and without this we'll go into ACCEPT_PREFERRED with + * ourselves as the BSR when we should be in BSR_ELECTED. + */ + if (if_address_is_local(&bshdr->bsr_addr.addr, PIM_AF, + pim->vrf->vrf_id)) { + if (PIM_DEBUG_BSM) + zlog_debug("%s : Dropping BSM from ourselves", __func__); + pim->bsm_dropped++; + return -1; + } + + /* Drop if bsr is not preferred bsr */ + if (!is_preferred_bsr(pim, bsr_addr, bshdr->bsr_prio)) { + if (pim->global_scope.state == BSR_PENDING && !no_fwd) { + /* in P-BSR state, non-preferred BSMs are forwarded, but + * content is ignored. + */ + if (PIM_DEBUG_BSM) + zlog_debug("%s : Forwarding non-preferred BSM during Pending-BSR state", + __func__); + + pim_bsm_fwd_whole_sz(pim_ifp->pim, buf, buf_size, sz); + return -1; + } + if (PIM_DEBUG_BSM) + zlog_debug("%s : Received a non-preferred BSM", + __func__); + pim->bsm_dropped++; + return -1; + } + + if (no_fwd) { + /* only accept no-forward BSM if quick refresh on startup */ + if ((pim->global_scope.accept_nofwd_bsm) || + (frag_tag == pim->global_scope.bsm_frag_tag)) { + pim->global_scope.accept_nofwd_bsm = false; + } else { + if (PIM_DEBUG_BSM) + zlog_debug("%s : nofwd_bsm received on %pPAs when accpt_nofwd_bsm false", + __func__, &bsr_addr); + pim->bsm_dropped++; + pim_ifp->pim_ifstat_ucast_bsm_cfg_miss++; + return -1; + } + } + + /* BSM packet is seen, so resetting accept_nofwd_bsm to false */ + if (pim->global_scope.accept_nofwd_bsm) + pim->global_scope.accept_nofwd_bsm = false; + if (empty_bsm) { if (PIM_DEBUG_BSM) zlog_debug("%s : Empty Pref BSM received", __func__); @@ -1413,9 +1561,8 @@ int pim_bsm_process(struct interface *ifp, pim_sgaddr *sg, uint8_t *buf, (buf + PIM_BSM_HDR_LEN + PIM_MSG_HEADER_LEN), (buf_size - PIM_BSM_HDR_LEN - PIM_MSG_HEADER_LEN), frag_tag)) { - if (PIM_DEBUG_BSM) { - zlog_debug("%s, Parsing BSM failed.", __func__); - } + zlog_warn("BSM from %pPA failed to parse", + (pim_addr *)&bshdr->bsr_addr.addr); pim->bsm_dropped++; return -1; } @@ -1452,7 +1599,594 @@ int pim_bsm_process(struct interface *ifp, pim_sgaddr *sg, uint8_t *buf, return 0; } -void pim_crp_nht_update(struct pim_instance *pim, struct pim_nexthop_cache *pnc) +static void pim_elec_bsr_timer(struct event *t) +{ + struct bsm_scope *scope = EVENT_ARG(t); + struct bsm_frag *frag; + struct bsm_hdr *hdr; + + assert(scope->state == BSR_ELECTED); + + scope->bsm_frag_tag++; + frag = bsm_frags_first(scope->bsm_frags); + assert(frag); + + hdr = (struct bsm_hdr *)(frag->data + PIM_MSG_HEADER_LEN); + hdr->frag_tag = htons(scope->bsm_frag_tag); + + unsigned int timer = PIM_BS_TIME; + + if (scope->changed_bsm_trigger) { + if (PIM_DEBUG_BSM) + zlog_debug("Sending triggered BSM"); + scope->changed_bsm_trigger--; + timer = 5; + } else { + if (PIM_DEBUG_BSM) + zlog_debug("Sending scheduled BSM"); + pim_bsm_sent(scope); + } + + pim_bsm_fwd_whole_sz(scope->pim, frag->data, frag->size, scope->sz_id); + scope->current_bsr_last_ts = pim_time_monotonic_sec(); + + event_add_timer(router->master, pim_elec_bsr_timer, scope, timer, + &scope->bs_timer); +} + +void pim_bsm_changed(struct bsm_scope *scope) +{ + struct event t; + + EVENT_OFF(scope->bs_timer); + scope->changed_bsm_trigger = 2; + + t.arg = scope; + pim_elec_bsr_timer(&t); +} + +static void pim_cand_bsr_pending_expire(struct event *t) +{ + struct bsm_scope *scope = EVENT_ARG(t); + + assertf(scope->state == BSR_PENDING, "state=%d", scope->state); + assertf(pim_addr_is_any(scope->current_bsr), "current_bsr=%pPA", + &scope->current_bsr); + + if (PIM_DEBUG_BSM) + zlog_debug("Elected BSR, wait expired without preferable BSMs"); + + scope->state = BSR_ELECTED; + scope->current_bsr_prio = scope->cand_bsr_prio; + scope->current_bsr = scope->bsr_addrsel.run_addr; + + scope->bsm_frag_tag = frr_weak_random(); + scope->current_bsr_first_ts = pim_time_monotonic_sec(); + + pim_cand_rp_trigger(scope); + pim_bsm_generate(scope); +} + +#if PIM_IPV == 6 +static float bsr_addr_delay(pim_addr best, pim_addr local) +{ + unsigned int pos; + uint32_t best_4b, local_4b; + float delay_log; + + for (pos = 0; pos < 12; pos++) { + if (best.s6_addr[pos] != local.s6_addr[pos]) + break; + } + + memcpy(&best_4b, &best.s6_addr[pos], 4); + memcpy(&local_4b, &local.s6_addr[pos], 4); + + delay_log = log2(1 + ntohl(best_4b) - ntohl(local_4b)); + delay_log += (12 - pos) * 8; + return delay_log / 64.; +} +#endif + +static void pim_cand_bsr_pending(struct bsm_scope *scope) +{ + unsigned int bs_rand_override; + uint8_t best_prio; + pim_addr best_addr; + float prio_delay, addr_delay; + + EVENT_OFF(scope->bs_timer); + EVENT_OFF(scope->t_ebsr_regen_bsm); + scope->state = BSR_PENDING; + + best_prio = MAX(scope->cand_bsr_prio, scope->current_bsr_prio); + best_addr = pim_addr_cmp(scope->bsr_addrsel.run_addr, + scope->current_bsr) > 0 + ? scope->bsr_addrsel.run_addr + : scope->current_bsr; + + /* RFC5059 sec.5 */ +#if PIM_IPV == 4 + if (scope->cand_bsr_prio == best_prio) { + prio_delay = 0.; /* log2(1) = 0 */ + addr_delay = log2(1 + ntohl(best_addr.s_addr) - + ntohl(scope->bsr_addrsel.run_addr.s_addr)) / + 16.; + } else { + prio_delay = 2. * log2(1 + best_prio - scope->cand_bsr_prio); + addr_delay = 2 - (ntohl(scope->bsr_addrsel.run_addr.s_addr) / + (float)(1 << 31)); + } +#else + if (scope->cand_bsr_prio == best_prio) { + prio_delay = 0.; /* log2(1) = 0 */ + addr_delay = bsr_addr_delay(best_addr, + scope->bsr_addrsel.run_addr); + } else { + prio_delay = 2. * log2(1 + best_prio - scope->cand_bsr_prio); + addr_delay = 2 - + (ntohl(scope->bsr_addrsel.run_addr.s6_addr32[0]) / + (float)(1 << 31)); + } +#endif + + bs_rand_override = 5000 + (int)((prio_delay + addr_delay) * 1000.); + + if (PIM_DEBUG_BSM) + zlog_debug("Pending-BSR (%u, %pPA), waiting %ums", + scope->cand_bsr_prio, &scope->bsr_addrsel.run_addr, + bs_rand_override); + + event_add_timer_msec(router->master, pim_cand_bsr_pending_expire, scope, + bs_rand_override, &scope->bs_timer); +} + +static inline pim_addr if_highest_addr(pim_addr cur, struct interface *ifp) +{ + struct connected *connected; + + frr_each (if_connected, ifp->connected, connected) { + pim_addr conn_addr; + + if (connected->address->family != PIM_AF) + continue; + + conn_addr = pim_addr_from_prefix(connected->address); + /* highest address */ + if (pim_addr_cmp(conn_addr, cur) > 0) + cur = conn_addr; + } + return cur; +} + +static void cand_addrsel_clear(struct cand_addrsel *asel) +{ + asel->run = false; + asel->run_addr = PIMADDR_ANY; +} + +/* returns whether address or active changed */ +static bool cand_addrsel_update(struct cand_addrsel *asel, struct vrf *vrf) +{ + bool is_any = false, prev_run = asel->run; + struct interface *ifp = NULL; + pim_addr new_addr = PIMADDR_ANY; + + if (!asel->cfg_enable) + goto out_disable; + + switch (asel->cfg_mode) { + case CAND_ADDR_EXPLICIT: + new_addr = asel->cfg_addr; + ifp = if_lookup_address_local(&asel->cfg_addr, PIM_AF, + vrf->vrf_id); + break; + + case CAND_ADDR_IFACE: + ifp = if_lookup_by_name_vrf(asel->cfg_ifname, vrf); + + if (ifp) + new_addr = if_highest_addr(PIMADDR_ANY, ifp); + break; + + case CAND_ADDR_ANY: + is_any = true; + /* fallthru */ + case CAND_ADDR_LO: + FOR_ALL_INTERFACES (vrf, ifp) { + if (!if_is_up(ifp)) + continue; + if (is_any || if_is_loopback(ifp) || if_is_vrf(ifp)) + new_addr = if_highest_addr(new_addr, ifp); + } + break; + } + + if (ifp && !if_is_up(ifp)) + goto out_disable; + + if (pim_addr_is_any(new_addr)) + goto out_disable; + + /* nothing changed re. address (don't care about interface changes) */ + if (asel->run && !pim_addr_cmp(asel->run_addr, new_addr)) + return !prev_run; + + asel->run = true; + asel->run_addr = new_addr; + return true; + +out_disable: + asel->run = false; + asel->run_addr = PIMADDR_ANY; + + return prev_run; +} + +static void pim_cand_bsr_stop(struct bsm_scope *scope, bool verbose) +{ + cand_addrsel_clear(&scope->bsr_addrsel); + + switch (scope->state) { + case NO_INFO: + case ACCEPT_ANY: + case ACCEPT_PREFERRED: + return; + case BSR_PENDING: + case BSR_ELECTED: + break; + } + + if (PIM_DEBUG_BSM) + zlog_debug("Candidate BSR ceasing operation"); + + EVENT_OFF(scope->t_ebsr_regen_bsm); + EVENT_OFF(scope->bs_timer); + pim_crp_db_clear(scope); + pim_bsm_accept_any(scope); +} + +static void pim_cand_bsr_trigger(struct bsm_scope *scope, bool verbose) +{ + /* this is called on all state changes even if we aren't configured + * to be C-BSR at all. + */ + if (!scope->bsr_addrsel.run) + return; + + if (scope->current_bsr_prio > scope->cand_bsr_prio) { + assert(scope->state == ACCEPT_PREFERRED); + if (!verbose) + return; + + if (PIM_DEBUG_BSM) + zlog_debug("Candidate BSR: known better BSR %pPA (higher priority %u > %u)", + &scope->current_bsr, scope->current_bsr_prio, + scope->cand_bsr_prio); + return; + } else if (scope->current_bsr_prio == scope->cand_bsr_prio && + pim_addr_cmp(scope->current_bsr, + scope->bsr_addrsel.run_addr) > 0) { + assert(scope->state == ACCEPT_PREFERRED); + if (!verbose) + return; + + if (PIM_DEBUG_BSM) + zlog_debug("Candidate BSR: known better BSR %pPA (higher address > %pPA)", + &scope->current_bsr, + &scope->bsr_addrsel.run_addr); + return; + } + + if (!pim_addr_cmp(scope->current_bsr, scope->bsr_addrsel.run_addr)) + return; + + pim_cand_bsr_pending(scope); +} + +void pim_cand_bsr_apply(struct bsm_scope *scope) +{ + if (!cand_addrsel_update(&scope->bsr_addrsel, scope->pim->vrf)) + return; + + if (!scope->bsr_addrsel.run) { + pim_cand_bsr_stop(scope, true); + return; + } + + if (PIM_DEBUG_BSM) + zlog_debug("Candidate BSR: %pPA, priority %u", + &scope->bsr_addrsel.run_addr, scope->cand_bsr_prio); + + pim_cand_bsr_trigger(scope, true); +} + +static void pim_cand_rp_adv_stop_maybe(struct bsm_scope *scope) +{ + /* actual check whether stop should be sent - covers address + * changes as well as run_addr = 0.0.0.0 (C-RP shutdown) + */ + if (pim_addr_is_any(scope->cand_rp_prev_addr) || + !pim_addr_cmp(scope->cand_rp_prev_addr, + scope->cand_rp_addrsel.run_addr)) + return; + + switch (scope->state) { + case ACCEPT_PREFERRED: + case BSR_ELECTED: + break; + + case NO_INFO: + case ACCEPT_ANY: + case BSR_PENDING: + default: + return; + } + + if (PIM_DEBUG_BSM) + zlog_debug("Candidate-RP (-, %pPA) deregistering self to %pPA", + &scope->cand_rp_prev_addr, &scope->current_bsr); + + struct cand_rp_msg *msg; + uint8_t buf[PIM_MSG_HEADER_LEN + sizeof(*msg) + sizeof(pim_encoded_group)]; + + msg = (struct cand_rp_msg *)(&buf[PIM_MSG_HEADER_LEN]); + msg->prefix_cnt = 0; + msg->rp_prio = 255; + msg->rp_holdtime = 0; + msg->rp_addr.family = PIM_IANA_AFI; + msg->rp_addr.reserved = 0; + msg->rp_addr.addr = scope->cand_rp_prev_addr; + + pim_msg_build_header(PIMADDR_ANY, scope->current_bsr, buf, sizeof(buf), + PIM_MSG_TYPE_CANDIDATE, false); + + if (pim_msg_send(scope->unicast_sock, PIMADDR_ANY, scope->current_bsr, + buf, sizeof(buf), NULL)) { + zlog_warn("failed to send Cand-RP message: %m"); + } + + scope->cand_rp_prev_addr = PIMADDR_ANY; +} + +static void pim_cand_rp_adv(struct event *t) +{ + struct bsm_scope *scope = EVENT_ARG(t); + int next_msec; + + pim_cand_rp_adv_stop_maybe(scope); + + if (!scope->cand_rp_addrsel.run) { + scope->cand_rp_adv_trigger = 0; + return; + } + + switch (scope->state) { + case ACCEPT_PREFERRED: + case BSR_ELECTED: + break; + + case ACCEPT_ANY: + case BSR_PENDING: + case NO_INFO: + default: + /* state change will retrigger */ + scope->cand_rp_adv_trigger = 0; + + zlog_warn("Candidate-RP advertisement not sent in state %d", + scope->state); + return; + } + + if (PIM_DEBUG_BSM) + zlog_debug("Candidate-RP (%u, %pPA) advertising %zu groups to %pPA", + scope->cand_rp_prio, &scope->cand_rp_addrsel.run_addr, + cand_rp_groups_count(scope->cand_rp_groups), + &scope->current_bsr); + + struct cand_rp_group *grp; + struct cand_rp_msg *msg; + uint8_t buf[PIM_MSG_HEADER_LEN + sizeof(*msg) + + sizeof(pim_encoded_group) * + cand_rp_groups_count(scope->cand_rp_groups)]; + size_t i = 0; + + + msg = (struct cand_rp_msg *)(&buf[PIM_MSG_HEADER_LEN]); + msg->prefix_cnt = cand_rp_groups_count(scope->cand_rp_groups); + msg->rp_prio = scope->cand_rp_prio; + msg->rp_holdtime = + htons(MAX(151, (scope->cand_rp_interval * 5 + 1) / 2)); + msg->rp_addr.family = PIM_IANA_AFI; + msg->rp_addr.reserved = 0; + msg->rp_addr.addr = scope->cand_rp_addrsel.run_addr; + + frr_each (cand_rp_groups, scope->cand_rp_groups, grp) { + memset(&msg->groups[i], 0, sizeof(msg->groups[i])); + + msg->groups[i].family = PIM_IANA_AFI; + msg->groups[i].mask = grp->p.prefixlen; + msg->groups[i].addr = grp->p.prefix; + i++; + } + + scope->cand_rp_prev_addr = scope->cand_rp_addrsel.run_addr; + + pim_msg_build_header(scope->cand_rp_addrsel.run_addr, scope->current_bsr, + buf, sizeof(buf), PIM_MSG_TYPE_CANDIDATE, false); + + if (pim_msg_send(scope->unicast_sock, scope->cand_rp_addrsel.run_addr, + scope->current_bsr, buf, sizeof(buf), NULL)) { + zlog_warn("failed to send Cand-RP message: %m"); + } + + /* -1s...+1s */ + next_msec = (frr_weak_random() & 2047) - 1024; + + if (scope->cand_rp_adv_trigger) { + scope->cand_rp_adv_trigger--; + next_msec += 2000; + } else + next_msec += scope->cand_rp_interval * 1000; + + event_add_timer_msec(router->master, pim_cand_rp_adv, scope, next_msec, + &scope->cand_rp_adv_timer); +} + +void pim_cand_rp_trigger(struct bsm_scope *scope) +{ + if (scope->cand_rp_adv_trigger && scope->cand_rp_addrsel.run) { + scope->cand_rp_adv_trigger = PIM_CRP_ADV_TRIGCOUNT; + + /* already scheduled to send triggered advertisements, don't + * reschedule so burst changes don't result in an advertisement + * burst + */ + return; + } + + EVENT_OFF(scope->cand_rp_adv_timer); + + if (!scope->cand_rp_addrsel.run) + return; + + scope->cand_rp_adv_trigger = PIM_CRP_ADV_TRIGCOUNT; + + struct event t; + + t.arg = scope; + pim_cand_rp_adv(&t); +} + +void pim_cand_rp_apply(struct bsm_scope *scope) { - /* stub for Candidate-RP */ + if (!cand_addrsel_update(&scope->cand_rp_addrsel, scope->pim->vrf)) + return; + + if (!scope->cand_rp_addrsel.run) { + if (PIM_DEBUG_BSM) + zlog_debug("Candidate RP ceasing operation"); + + cand_addrsel_clear(&scope->cand_rp_addrsel); + EVENT_OFF(scope->cand_rp_adv_timer); + pim_cand_rp_adv_stop_maybe(scope); + scope->cand_rp_adv_trigger = 0; + return; + } + + if (PIM_DEBUG_BSM) + zlog_debug("Candidate RP: %pPA, priority %u", + &scope->cand_rp_addrsel.run_addr, + scope->cand_rp_prio); + + pim_cand_rp_trigger(scope); +} + +void pim_cand_rp_grp_add(struct bsm_scope *scope, const prefix_pim *p) +{ + struct cand_rp_group *grp, ref; + + ref.p = *p; + grp = cand_rp_groups_find(scope->cand_rp_groups, &ref); + if (grp) + return; + + grp = XCALLOC(MTYPE_PIM_CAND_RP_GRP, sizeof(*grp)); + grp->p = *p; + cand_rp_groups_add(scope->cand_rp_groups, grp); + + pim_cand_rp_trigger(scope); +} + +void pim_cand_rp_grp_del(struct bsm_scope *scope, const prefix_pim *p) +{ + struct cand_rp_group *grp, ref; + + ref.p = *p; + grp = cand_rp_groups_find(scope->cand_rp_groups, &ref); + if (!grp) + return; + + cand_rp_groups_del(scope->cand_rp_groups, grp); + XFREE(MTYPE_PIM_CAND_RP_GRP, grp); + + pim_cand_rp_trigger(scope); +} + +static struct event *t_cand_addrs_reapply; + +static void pim_cand_addrs_reapply(struct event *t) +{ + struct vrf *vrf; + + RB_FOREACH (vrf, vrf_id_head, &vrfs_by_id) { + struct pim_instance *pi = vrf->info; + + if (!pi) + continue; + + /* these call cand_addrsel_update() and apply changes */ + pim_cand_bsr_apply(&pi->global_scope); + pim_cand_rp_apply(&pi->global_scope); + } +} + +void pim_cand_addrs_changed(void) +{ + EVENT_OFF(t_cand_addrs_reapply); + event_add_timer_msec(router->master, pim_cand_addrs_reapply, NULL, 1, + &t_cand_addrs_reapply); +} + +static void cand_addrsel_config_write(struct vty *vty, + struct cand_addrsel *addrsel) +{ + switch (addrsel->cfg_mode) { + case CAND_ADDR_LO: + break; + case CAND_ADDR_ANY: + vty_out(vty, " source any"); + break; + case CAND_ADDR_IFACE: + vty_out(vty, " source interface %s", addrsel->cfg_ifname); + break; + case CAND_ADDR_EXPLICIT: + vty_out(vty, " source address %pPA", &addrsel->cfg_addr); + break; + } +} + +int pim_cand_config_write(struct pim_instance *pim, struct vty *vty) +{ + struct bsm_scope *scope = &pim->global_scope; + int ret = 0; + + if (scope->cand_rp_addrsel.cfg_enable) { + vty_out(vty, " bsr candidate-rp"); + if (scope->cand_rp_prio != 192) + vty_out(vty, " priority %u", scope->cand_rp_prio); + if (scope->cand_rp_interval != PIM_CRP_ADV_INTERVAL) + vty_out(vty, " interval %u", scope->cand_rp_interval); + cand_addrsel_config_write(vty, &scope->cand_rp_addrsel); + vty_out(vty, "\n"); + ret++; + + struct cand_rp_group *group; + + frr_each (cand_rp_groups, scope->cand_rp_groups, group) { + vty_out(vty, " bsr candidate-rp group %pFX\n", + &group->p); + ret++; + } + } + + if (scope->bsr_addrsel.cfg_enable) { + vty_out(vty, " bsr candidate-bsr"); + if (scope->cand_bsr_prio != 64) + vty_out(vty, " priority %u", scope->cand_bsr_prio); + cand_addrsel_config_write(vty, &scope->bsr_addrsel); + vty_out(vty, "\n"); + ret++; + } + return ret; } diff --git a/pimd/pim_bsm.h b/pimd/pim_bsm.h index fb09e3b1cc..1eacc1be57 100644 --- a/pimd/pim_bsm.h +++ b/pimd/pim_bsm.h @@ -21,6 +21,13 @@ #define PIM_BS_TIME 60 /* RFC 5059 - Sec 5 */ #define PIM_BSR_DEFAULT_TIMEOUT 130 /* RFC 5059 - Sec 5 */ +/* number of times to include rp-count = 0 ranges */ +#define PIM_BSR_DEAD_COUNT 3 + +#define PIM_CRP_ADV_TRIGCOUNT 3 +#define PIM_CRP_ADV_INTERVAL 60 +#define PIM_CRP_HOLDTIME 150 + /* These structures are only encoded IPv4 specific */ #define PIM_BSM_HDR_LEN sizeof(struct bsm_hdr) #define PIM_BSM_GRP_LEN sizeof(struct bsmmsg_grpinfo) @@ -33,19 +40,61 @@ * ============== */ -/* Non candidate BSR states */ -enum ncbsr_state { +/* BSR states + * + * Candidate BSR starts at BSR_PENDING, moves to AP or E depending on + * loss/win. Will never go into AA (because in that case it'd become BSR + * itself.) + * + * Non-Candidate BSR starts at NO_INFO, moves to AP & AA depending on + * a BSR being available or not. + */ +enum bsr_state { NO_INFO = 0, ACCEPT_ANY, - ACCEPT_PREFERRED + ACCEPT_PREFERRED, /* = same as C-BSR if candidate */ + BSR_PENDING, + BSR_ELECTED, +}; + +enum cand_addr { + CAND_ADDR_LO = 0, + CAND_ADDR_ANY, + CAND_ADDR_IFACE, + CAND_ADDR_EXPLICIT, }; +/* used separately for Cand-RP and Cand-BSR */ +struct cand_addrsel { + bool cfg_enable; + enum cand_addr cfg_mode : 8; + + /* only valid for mode==CAND_ADDR_IFACE */ + char cfg_ifname[IFNAMSIZ]; + /* only valid for mode==CAND_ADDR_EXPLICIT */ + pim_addr cfg_addr; + + /* running state updated based on above on zebra events */ + pim_addr run_addr; + bool run; +}; + + PREDECL_DLIST(bsm_frags); +PREDECL_RBTREE_UNIQ(cand_rp_groups); + +/* n*m "table" accessed both by-RP and by-group */ +PREDECL_RBTREE_UNIQ(bsr_crp_rps); +PREDECL_RBTREE_UNIQ(bsr_crp_groups); + +PREDECL_RBTREE_UNIQ(bsr_crp_rp_groups); +PREDECL_RBTREE_UNIQ(bsr_crp_group_rps); /* BSM scope - bsm processing is per scope */ struct bsm_scope { int sz_id; /* scope zone id */ - enum ncbsr_state state; /* non candidate BSR state */ + enum bsr_state state; /* BSR state */ + bool accept_nofwd_bsm; /* no fwd bsm accepted for scope */ pim_addr current_bsr; /* current elected BSR for the sz */ uint32_t current_bsr_prio; /* current BSR priority */ @@ -60,6 +109,93 @@ struct bsm_scope { struct route_table *bsrp_table; /* group2rp mapping rcvd from BSR */ struct event *bs_timer; /* Boot strap timer */ + + /* Candidate BSR config */ + struct cand_addrsel bsr_addrsel; + uint8_t cand_bsr_prio; + + /* Candidate BSR state */ + uint8_t current_cand_bsr_prio; + /* if nothing changed from Cand-RP data we received, less work... */ + bool elec_rp_data_changed; + + /* data that the E-BSR keeps - not to be confused with Candidate-RP + * stuff below. These two here are the info about all the Cand-RPs + * that we as a BSR received information for in Cand-RP-adv packets. + */ + struct bsr_crp_rps_head ebsr_rps[1]; + struct bsr_crp_groups_head ebsr_groups[1]; + + /* set if we have any group ranges where we're currently advertising + * rp-count = 0 (includes both ranges without any RPs as well as + * ranges with only NHT-unreachable RPs) + */ + bool ebsr_have_dead_pending; + unsigned int changed_bsm_trigger; + + struct event *t_ebsr_regen_bsm; + + /* Candidate RP config */ + struct cand_addrsel cand_rp_addrsel; + uint8_t cand_rp_prio; + unsigned int cand_rp_interval; /* default: PIM_CRP_ADV_INTERVAL=60 */ + /* holdtime is not configurable, always 2.5 * interval. */ + struct cand_rp_groups_head cand_rp_groups[1]; + + /* Candidate RP state */ + int unicast_sock; + struct event *unicast_read; + struct event *cand_rp_adv_timer; + unsigned int cand_rp_adv_trigger; /* # trigg. C-RP-Adv left to send */ + + /* for sending holdtime=0 zap */ + pim_addr cand_rp_prev_addr; +}; + +struct cand_rp_group { + struct cand_rp_groups_item item; + + prefix_pim p; +}; + +struct bsr_crp_group { + struct bsr_crp_groups_item item; + + prefix_pim range; + struct bsr_crp_group_rps_head rps[1]; + + size_t n_selected; + bool deleted_selected : 1; + + /* number of times we've advertised this range with rp-count = 0 */ + unsigned int dead_count; +}; + +struct bsr_crp_rp { + struct bsr_crp_rps_item item; + + pim_addr addr; + struct bsr_crp_rp_groups_head groups[1]; + + struct bsm_scope *scope; + struct event *t_hold; + time_t seen_first; + time_t seen_last; + + uint16_t holdtime; + uint8_t prio; + bool nht_ok; +}; + +/* "n * m" RP<->Group tie-in */ +struct bsr_crp_item { + struct bsr_crp_rp_groups_item r_g_item; + struct bsr_crp_group_rps_item g_r_item; + + struct bsr_crp_group *group; + struct bsr_crp_rp *rp; + + bool selected : 1; }; /* BSM packet (= fragment) - this is stored as list in bsm_frags inside scope @@ -200,6 +336,14 @@ struct bsmmsg_rpinfo { uint8_t reserved; } __attribute__((packed)); +struct cand_rp_msg { + uint8_t prefix_cnt; + uint8_t rp_prio; + uint16_t rp_holdtime; + pim_encoded_unicast rp_addr; + pim_encoded_group groups[0]; +} __attribute__((packed)); + /* API */ void pim_bsm_proc_init(struct pim_instance *pim); void pim_bsm_proc_free(struct pim_instance *pim); @@ -210,4 +354,39 @@ int pim_bsm_process(struct interface *ifp, pim_sgaddr *sg, uint8_t *buf, bool pim_bsm_new_nbr_fwd(struct pim_neighbor *neigh, struct interface *ifp); struct bsgrp_node *pim_bsm_get_bsgrp_node(struct bsm_scope *scope, struct prefix *grp); + +void pim_bsm_generate(struct bsm_scope *scope); +void pim_bsm_changed(struct bsm_scope *scope); +void pim_bsm_sent(struct bsm_scope *scope); +void pim_bsm_frags_free(struct bsm_scope *scope); + +bool pim_bsm_parse_install_g2rp(struct bsm_scope *scope, uint8_t *buf, + int buflen, uint16_t bsm_frag_tag); + +void pim_cand_bsr_apply(struct bsm_scope *scope); +void pim_cand_rp_apply(struct bsm_scope *scope); +void pim_cand_rp_trigger(struct bsm_scope *scope); +void pim_cand_rp_grp_add(struct bsm_scope *scope, const prefix_pim *p); +void pim_cand_rp_grp_del(struct bsm_scope *scope, const prefix_pim *p); + +void pim_cand_addrs_changed(void); + +int pim_crp_process(struct interface *ifp, pim_sgaddr *src_dst, uint8_t *buf, + uint32_t buf_size); + +struct pim_nexthop_cache; +void pim_crp_nht_update(struct pim_instance *pim, struct pim_nexthop_cache *pnc); + +void pim_crp_db_clear(struct bsm_scope *scope); +int pim_crp_db_show(struct vty *vty, struct bsm_scope *scope, bool json); +int pim_crp_groups_show(struct vty *vty, struct bsm_scope *scope, bool json); + +int pim_cand_config_write(struct pim_instance *pim, struct vty *vty); + +DECLARE_MTYPE(PIM_BSM_FRAG); + +DECLARE_MTYPE(PIM_BSM_FRAG); + +DECLARE_MTYPE(PIM_BSM_FRAG); + #endif diff --git a/pimd/pim_bsr_rpdb.c b/pimd/pim_bsr_rpdb.c new file mode 100644 index 0000000000..3ec9f99cd1 --- /dev/null +++ b/pimd/pim_bsr_rpdb.c @@ -0,0 +1,634 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* PIM RP database for BSR operation + * Copyright (C) 2021 David Lamparter for NetDEF, Inc. + */ + +#ifdef HAVE_CONFIG_H +#include "config.h" +#endif + +#include <math.h> +#include <stdint.h> +#include <stdlib.h> +#include <sys/types.h> +#include <sys/socket.h> +#include <netinet/in.h> +#include <lib/network.h> +#include <lib/iana_afi.h> +#include <lib/sockunion.h> + +#include "if.h" +#include "pimd.h" +#include "pim_iface.h" +#include "pim_instance.h" +#include "pim_rpf.h" +#include "pim_hello.h" +#include "pim_pim.h" +#include "pim_nht.h" +#include "pim_bsm.h" +#include "pim_time.h" + +/* safety limits to prevent DoS/memory exhaustion attacks against the BSR + * + * The BSR is more susceptible than other PIM protocol operation because + * Candidate-RP messages are unicast to the BSR without any 2-way interaction + * and can thus be spoofed blindly(!) from anywhere in the internet. + * + * Everything else is on-link, multicast, or requires an adjacency - much + * harder to mess with. + */ + +/* total number of RPs we keep information for */ +static size_t bsr_max_rps = 1024; + +DEFINE_MTYPE_STATIC(PIMD, PIM_BSR_CRP, "PIM BSR C-RP"); +DEFINE_MTYPE_STATIC(PIMD, PIM_BSR_GROUP, "PIM BSR range"); +DEFINE_MTYPE_STATIC(PIMD, PIM_BSR_ITEM, "PIM BSR C-RP range item"); + +static int rp_cmp(const struct bsr_crp_rp *a, const struct bsr_crp_rp *b) +{ + return pim_addr_cmp(a->addr, b->addr); +} + +DECLARE_RBTREE_UNIQ(bsr_crp_rps, struct bsr_crp_rp, item, rp_cmp); + +static int group_cmp(const struct bsr_crp_group *a, + const struct bsr_crp_group *b) +{ + return prefix_cmp(&a->range, &b->range); +} + +DECLARE_RBTREE_UNIQ(bsr_crp_groups, struct bsr_crp_group, item, group_cmp); + +static int r_g_cmp(const struct bsr_crp_item *a, const struct bsr_crp_item *b) +{ + return prefix_cmp(&a->group->range, &b->group->range); +} + +DECLARE_RBTREE_UNIQ(bsr_crp_rp_groups, struct bsr_crp_item, r_g_item, r_g_cmp); + +static int g_r_cmp(const struct bsr_crp_item *a, const struct bsr_crp_item *b) +{ + const struct bsr_crp_rp *rp_a = a->rp, *rp_b = b->rp; + + /* NHT-failed RPs last */ + if (rp_a->nht_ok > rp_b->nht_ok) + return -1; + if (rp_a->nht_ok < rp_b->nht_ok) + return 1; + + /* This function determines BSR policy in what subset of the received + * RP candidates to advertise. The BSR is free to make its choices + * any way it deems useful + */ + + /* lower numeric values are better */ + if (rp_a->prio < rp_b->prio) + return -1; + if (rp_a->prio > rp_b->prio) + return 1; + + /* prefer older RP for less churn */ + if (rp_a->seen_first < rp_b->seen_first) + return -1; + if (rp_a->seen_first > rp_b->seen_first) + return 1; + + return pim_addr_cmp(rp_a->addr, rp_b->addr); +} + +DECLARE_RBTREE_UNIQ(bsr_crp_group_rps, struct bsr_crp_item, g_r_item, g_r_cmp); + +void pim_bsm_generate(struct bsm_scope *scope) +{ + struct bsm_frag *frag; + struct bsm_hdr *hdr; + bool have_dead = false; + + assertf(scope->state == BSR_ELECTED, "state=%d", scope->state); + + pim_bsm_frags_free(scope); + + struct bsr_crp_group *group; + struct bsr_crp_item *item; + struct bsr_crp_rp *rp; + size_t n_groups = 0, n_rps = 0; + + frr_each (bsr_crp_groups, scope->ebsr_groups, group) { + if (group->n_selected == 0) { + if (group->dead_count >= PIM_BSR_DEAD_COUNT) + continue; + + have_dead = true; + } else + group->dead_count = 0; + + n_groups++; + n_rps += group->n_selected; + } + + if (PIM_DEBUG_BSM) + zlog_debug("Generating BSM (%zu ranges, %zu RPs)", n_groups, n_rps); + + size_t datalen = PIM_MSG_HEADER_LEN + sizeof(*hdr) + + n_groups * sizeof(struct bsmmsg_grpinfo) + + n_rps * sizeof(struct bsmmsg_rpinfo); + + frag = XCALLOC(MTYPE_PIM_BSM_FRAG, sizeof(*frag) + datalen); + + uint8_t *pos = frag->data + PIM_MSG_HEADER_LEN; + uint8_t *end = frag->data + datalen; + + hdr = (struct bsm_hdr *)pos; + pos += sizeof(*hdr); + assert(pos <= end); + + /* TODO: make BSR hashmasklen configurable */ +#if PIM_IPV == 6 + hdr->hm_len = 126; +#else + hdr->hm_len = 30; +#endif + hdr->bsr_prio = scope->current_bsr_prio; + hdr->bsr_addr.family = PIM_IANA_AFI; + hdr->bsr_addr.reserved = 0; + hdr->bsr_addr.addr = scope->bsr_addrsel.run_addr; + + frr_each (bsr_crp_groups, scope->ebsr_groups, group) { + if (group->n_selected == 0 && + group->dead_count >= PIM_BSR_DEAD_COUNT) + continue; + + struct bsmmsg_grpinfo *gi = (struct bsmmsg_grpinfo *)pos; + + pos += sizeof(*gi); + assert(pos <= end); + + gi->group.family = PIM_MSG_ADDRESS_FAMILY; + gi->group.mask = group->range.prefixlen; + gi->group.addr = group->range.prefix; + + size_t n_added = 0; + + frr_each (bsr_crp_group_rps, group->rps, item) { + if (!item->selected) + break; + + struct bsmmsg_rpinfo *ri = (struct bsmmsg_rpinfo *)pos; + + pos += sizeof(*ri); + assert(pos <= end); + + rp = item->rp; + ri->rpaddr.family = PIM_MSG_ADDRESS_FAMILY; + ri->rpaddr.addr = rp->addr; + ri->rp_holdtime = htons(rp->holdtime); + ri->rp_pri = rp->prio; + + n_added++; + } + + gi->rp_count = group->n_selected; + gi->frag_rp_count = n_added; + assert(n_added == group->n_selected); + } + + assertf(pos == end, "end-pos=%td", end - pos); + frag->size = datalen; + + bsm_frags_add_head(scope->bsm_frags, frag); + + scope->ebsr_have_dead_pending = have_dead; + + /* + * The BSR itself doesn't receive (no loopback) the BSM msgs advertising + * the rps. Install the rps directly for the local BSR node. + */ + pim_bsm_parse_install_g2rp(scope, ((uint8_t *) hdr) + PIM_BSM_HDR_LEN, + datalen - PIM_BSM_HDR_LEN - PIM_MSG_HEADER_LEN, scope->bsm_frag_tag); + + pim_bsm_changed(scope); +} + +static void pim_bsm_generate_timer(struct event *t) +{ + struct bsm_scope *scope = EVENT_ARG(t); + + pim_bsm_generate(scope); +} + +static void pim_bsm_generate_sched(struct bsm_scope *scope) +{ + assertf(scope->state == BSR_ELECTED, "state=%d", scope->state); + + if (scope->t_ebsr_regen_bsm) + return; + + event_add_timer(router->master, pim_bsm_generate_timer, scope, 1, + &scope->t_ebsr_regen_bsm); +} + +void pim_bsm_sent(struct bsm_scope *scope) +{ + struct bsr_crp_group *group; + bool have_dead = false, changed = false; + + if (!scope->ebsr_have_dead_pending) + return; + + frr_each_safe (bsr_crp_groups, scope->ebsr_groups, group) { + if (group->n_selected != 0) + continue; + + if (group->dead_count < PIM_BSR_DEAD_COUNT) { + group->dead_count++; + have_dead = true; + continue; + } + + changed = true; + + if (bsr_crp_group_rps_count(group->rps)) + /* have RPs, but none selected */ + continue; + + /* no reason to keep this range anymore */ + bsr_crp_groups_del(scope->ebsr_groups, group); + bsr_crp_group_rps_fini(group->rps); + XFREE(MTYPE_PIM_BSR_GROUP, group); + continue; + } + + scope->ebsr_have_dead_pending = have_dead; + if (changed) + pim_bsm_generate_sched(scope); +} + +static void bsr_crp_reselect(struct bsm_scope *scope, + struct bsr_crp_group *group) +{ + bool changed = false; + struct bsr_crp_item *item; + size_t n_selected = 0; + + frr_each (bsr_crp_group_rps, group->rps, item) { + bool select = false; + + /* hardcode best 2 RPs for now */ + if (item->rp->nht_ok && n_selected < 2) { + select = true; + n_selected++; + } + + if (item->selected != select) { + changed = true; + item->selected = select; + } + } + + changed |= group->deleted_selected; + group->deleted_selected = false; + group->n_selected = n_selected; + + if (changed) + pim_bsm_generate_sched(scope); + + scope->elec_rp_data_changed |= changed; +} + +/* changing rp->nht_ok or rp->prio affects the sort order in group->rp + * lists, so need a delete & re-add if either changes + */ +static void pim_crp_nht_prio_change(struct bsr_crp_rp *rp, bool nht_ok, + uint8_t prio) +{ + struct bsr_crp_item *item; + + frr_each (bsr_crp_rp_groups, rp->groups, item) + bsr_crp_group_rps_del(item->group->rps, item); + + rp->prio = prio; + rp->nht_ok = nht_ok; + + frr_each (bsr_crp_rp_groups, rp->groups, item) { + bsr_crp_group_rps_add(item->group->rps, item); + bsr_crp_reselect(rp->scope, item->group); + } +} + +static struct bsr_crp_group *group_get(struct bsm_scope *scope, + prefix_pim *range) +{ + struct bsr_crp_group *group, ref; + + ref.range = *range; + group = bsr_crp_groups_find(scope->ebsr_groups, &ref); + if (!group) { + group = XCALLOC(MTYPE_PIM_BSR_GROUP, sizeof(*group)); + group->range = *range; + bsr_crp_group_rps_init(group->rps); + bsr_crp_groups_add(scope->ebsr_groups, group); + } + return group; +} + +static void pim_crp_update(struct bsr_crp_rp *rp, struct cand_rp_msg *msg, + size_t ngroups) +{ + struct bsr_crp_rp_groups_head oldgroups[1]; + struct bsr_crp_item *item, itemref; + struct bsr_crp_group *group, groupref; + + //struct bsm_scope *scope = rp->scope; + + bsr_crp_rp_groups_init(oldgroups); + bsr_crp_rp_groups_swap_all(rp->groups, oldgroups); + + itemref.rp = rp; + itemref.group = &groupref; + + assert(msg || ngroups == 0); + + for (size_t i = 0; i < ngroups; i++) { + if (msg->groups[i].family != PIM_MSG_ADDRESS_FAMILY) + continue; + if (msg->groups[i].bidir) + continue; + + prefix_pim pfx; + + pfx.family = PIM_AF; + pfx.prefixlen = msg->groups[i].mask; + pfx.prefix = msg->groups[i].addr; + +#if PIM_IPV == 4 + if (pfx.prefixlen < 4) + continue; + if (!IPV4_CLASS_DE(ntohl(pfx.prefix.s_addr))) + continue; +#endif + + apply_mask(&pfx); + + groupref.range = pfx; + item = bsr_crp_rp_groups_find(oldgroups, &itemref); + + if (item) { + bsr_crp_rp_groups_del(oldgroups, item); + bsr_crp_rp_groups_add(rp->groups, item); + continue; + } + + group = group_get(rp->scope, &pfx); + + item = XCALLOC(MTYPE_PIM_BSR_ITEM, sizeof(*item)); + item->rp = rp; + item->group = group; + + bsr_crp_group_rps_add(group->rps, item); + bsr_crp_rp_groups_add(rp->groups, item); + + bsr_crp_reselect(rp->scope, group); + } + + while ((item = bsr_crp_rp_groups_pop(oldgroups))) { + group = item->group; + if (item->selected) + group->deleted_selected = true; + + bsr_crp_group_rps_del(group->rps, item); + XFREE(MTYPE_PIM_BSR_ITEM, item); + + bsr_crp_reselect(rp->scope, group); + } + bsr_crp_rp_groups_fini(oldgroups); + + if (msg && msg->rp_prio != rp->prio) + pim_crp_nht_prio_change(rp, rp->nht_ok, msg->rp_prio); +} + +void pim_crp_nht_update(struct pim_instance *pim, struct pim_nexthop_cache *pnc) +{ + struct bsm_scope *scope = &pim->global_scope; + struct bsr_crp_rp *rp, ref; + bool ok; + + ref.addr = pnc->rpf.rpf_addr; + rp = bsr_crp_rps_find(scope->ebsr_rps, &ref); + assertf(rp, "addr=%pPA", &ref.addr); + + ok = CHECK_FLAG(pnc->flags, PIM_NEXTHOP_VALID); + if (ok == rp->nht_ok) + return; + + if (PIM_DEBUG_BSM) + zlog_debug("Candidate-RP %pPA NHT %s", &rp->addr, ok ? "UP" : "DOWN"); + pim_crp_nht_prio_change(rp, ok, rp->prio); +} + +static void pim_crp_free(struct pim_instance *pim, struct bsr_crp_rp *rp) +{ + EVENT_OFF(rp->t_hold); + pim_nht_candrp_del(pim, rp->addr); + bsr_crp_rp_groups_fini(rp->groups); + + XFREE(MTYPE_PIM_BSR_CRP, rp); +} + +static void pim_crp_expire(struct event *t) +{ + struct bsr_crp_rp *rp = EVENT_ARG(t); + struct pim_instance *pim = rp->scope->pim; + + if (PIM_DEBUG_BSM) + zlog_debug("Candidate-RP %pPA holdtime expired", &rp->addr); + + pim_crp_update(rp, NULL, 0); + + bsr_crp_rps_del(rp->scope->ebsr_rps, rp); + pim_crp_free(pim, rp); +} + +int pim_crp_process(struct interface *ifp, pim_sgaddr *src_dst, uint8_t *buf, + uint32_t buf_size) +{ + struct pim_interface *pim_ifp = NULL; + struct pim_instance *pim; + struct bsm_scope *scope; + + pim_ifp = ifp->info; + if (!pim_ifp) { + if (PIM_DEBUG_BSM) + zlog_debug("%s: multicast not enabled on interface %s", + __func__, ifp->name); + return -1; + } + + //pim_ifp->pim_ifstat_bsm_rx++; + pim = pim_ifp->pim; + //pim->bsm_rcvd++; + + if (!pim_ifp->bsm_enable) { + zlog_warn("%s: BSM not enabled on interface %s", __func__, + ifp->name); + //pim_ifp->pim_ifstat_bsm_cfg_miss++; + //pim->bsm_dropped++; + return -1; + } + + if (buf_size < (PIM_MSG_HEADER_LEN + sizeof(struct cand_rp_msg))) { + if (PIM_DEBUG_BSM) + zlog_debug("%s: received buffer length of %d which is too small to properly decode", + __func__, buf_size); + return -1; + } + + scope = &pim->global_scope; + + if (scope->state < BSR_PENDING) { + if (PIM_DEBUG_BSM) + zlog_debug("received Candidate-RP message from %pPA while not BSR", + &src_dst->src); + return -1; + } + + size_t remain = buf_size; + struct cand_rp_msg *crp_hdr; + + buf += PIM_MSG_HEADER_LEN; + remain -= PIM_MSG_HEADER_LEN; + + crp_hdr = (struct cand_rp_msg *)buf; + buf += sizeof(*crp_hdr); + remain -= sizeof(*crp_hdr); + + size_t ngroups = crp_hdr->prefix_cnt; + + if (remain < ngroups * sizeof(struct pim_encoded_group_ipv4)) { + if (PIM_DEBUG_BSM) + zlog_debug("truncated Candidate-RP advertisement for RP %pPA from %pPA (too short for %zu groups)", + (pim_addr *)&crp_hdr->rp_addr.addr, + &src_dst->src, ngroups); + return -1; + } + + if (PIM_DEBUG_BSM) + zlog_debug("Candidate-RP: %pPA, prio=%u (from %pPA, %zu groups)", + (pim_addr *)&crp_hdr->rp_addr.addr, crp_hdr->rp_prio, + &src_dst->src, ngroups); + + + struct bsr_crp_rp *rp, ref; + + ref.addr = crp_hdr->rp_addr.addr; + rp = bsr_crp_rps_find(scope->ebsr_rps, &ref); + + if (!rp) { + if (bsr_crp_rps_count(scope->ebsr_rps) >= bsr_max_rps) { + zlog_err("BSR: number of tracked Candidate RPs (%zu) exceeds DoS-protection limit (%zu), dropping advertisement for RP %pPA (packet source %pPA)", + bsr_crp_rps_count(scope->ebsr_rps), + bsr_max_rps, (pim_addr *)&crp_hdr->rp_addr.addr, + &src_dst->src); + return -1; + } + + if (PIM_DEBUG_BSM) + zlog_debug("new Candidate-RP: %pPA (from %pPA)", + (pim_addr *)&crp_hdr->rp_addr.addr, + &src_dst->src); + + rp = XCALLOC(MTYPE_PIM_BSR_CRP, sizeof(*rp)); + rp->scope = scope; + rp->addr = crp_hdr->rp_addr.addr; + rp->prio = 255; + bsr_crp_rp_groups_init(rp->groups); + rp->seen_first = monotime(NULL); + + bsr_crp_rps_add(scope->ebsr_rps, rp); + rp->nht_ok = pim_nht_candrp_add(pim, rp->addr); + } + + rp->seen_last = monotime(NULL); + rp->holdtime = ntohs(crp_hdr->rp_holdtime); + + EVENT_OFF(rp->t_hold); + event_add_timer(router->master, pim_crp_expire, rp, + ntohs(crp_hdr->rp_holdtime), &rp->t_hold); + + pim_crp_update(rp, crp_hdr, ngroups); + return 0; +} + +void pim_crp_db_clear(struct bsm_scope *scope) +{ + struct bsr_crp_rp *rp; + struct bsr_crp_group *group; + struct bsr_crp_item *item; + + while ((rp = bsr_crp_rps_pop(scope->ebsr_rps))) { + while ((item = bsr_crp_rp_groups_pop(rp->groups))) { + group = item->group; + + if (item->selected) + group->deleted_selected = true; + + bsr_crp_group_rps_del(group->rps, item); + XFREE(MTYPE_PIM_BSR_ITEM, item); + } + pim_crp_free(scope->pim, rp); + } + + while ((group = bsr_crp_groups_pop(scope->ebsr_groups))) { + assertf(!bsr_crp_group_rps_count(group->rps), + "range=%pFX rp_count=%zu", &group->range, + bsr_crp_group_rps_count(group->rps)); + + bsr_crp_group_rps_fini(group->rps); + XFREE(MTYPE_PIM_BSR_GROUP, group); + } +} + +int pim_crp_db_show(struct vty *vty, struct bsm_scope *scope, bool json) +{ + struct bsr_crp_rp *rp; + struct bsr_crp_item *item; + + vty_out(vty, "RP/Group NHT Prio Uptime Hold\n"); + + frr_each (bsr_crp_rps, scope->ebsr_rps, rp) { + vty_out(vty, "%-15pPA %4s %4u %8ld %4lu\n", &rp->addr, + rp->nht_ok ? "UP" : "DOWN", rp->prio, + (long)(monotime(NULL) - rp->seen_first), + event_timer_remain_second(rp->t_hold)); + + frr_each (bsr_crp_rp_groups, rp->groups, item) + vty_out(vty, "%c %-18pFX\n", item->selected ? '>' : ' ', + &item->group->range); + } + + return CMD_SUCCESS; +} + +int pim_crp_groups_show(struct vty *vty, struct bsm_scope *scope, bool json) +{ + struct bsr_crp_group *group; + struct bsr_crp_item *item; + + if (scope->ebsr_have_dead_pending) + vty_out(vty, "have_dead_pending\n"); + + frr_each (bsr_crp_groups, scope->ebsr_groups, group) { + vty_out(vty, "%c %pFX", group->n_selected ? '^' : '!', + &group->range); + if (group->n_selected == 0) + vty_out(vty, " (dead %u)", group->dead_count); + + vty_out(vty, "\n"); + + frr_each (bsr_crp_group_rps, group->rps, item) + vty_out(vty, "%c %pPA\n", item->selected ? '>' : ' ', + &item->rp->addr); + } + + return CMD_SUCCESS; +} diff --git a/pimd/pim_cmd.c b/pimd/pim_cmd.c index 633c46966e..d71b174487 100644 --- a/pimd/pim_cmd.c +++ b/pimd/pim_cmd.c @@ -66,27 +66,6 @@ static struct cmd_node debug_node = { .config_write = pim_debug_config_write, }; -static struct vrf *pim_cmd_lookup_vrf(struct vty *vty, struct cmd_token *argv[], - const int argc, int *idx, bool uj) -{ - struct vrf *vrf; - - if (argv_find(argv, argc, "NAME", idx)) - vrf = vrf_lookup_by_name(argv[*idx]->arg); - else - vrf = vrf_lookup_by_id(VRF_DEFAULT); - - if (!vrf) { - if (uj) - vty_json_empty(vty, NULL); - else - vty_out(vty, "Specified VRF: %s does not exist\n", - argv[*idx]->arg); - } - - return vrf; -} - static void pim_show_assert_helper(struct vty *vty, struct pim_interface *pim_ifp, struct pim_ifchannel *ch, time_t now) @@ -2864,7 +2843,7 @@ DEFPY (show_ip_pim_bsm_db, return pim_show_bsm_db_helper(vrf, vty, !!json); } -DEFPY (show_ip_pim_bsrp, +DEFPY_HIDDEN (show_ip_pim_bsrp, show_ip_pim_bsrp_cmd, "show ip pim bsrp-info [vrf NAME] [json$json]", SHOW_STR @@ -2877,6 +2856,109 @@ DEFPY (show_ip_pim_bsrp, return pim_show_group_rp_mappings_info_helper(vrf, vty, !!json); } +DEFPY (show_ip_pim_bsr_rpinfo, + show_ip_pim_bsr_rpinfo_cmd, + "show ip pim bsr rp-info [vrf NAME] [json$json]", + SHOW_STR + IP_STR + PIM_STR + BSR_STR + "PIM cached group-rp mappings information received from BSR\n" + VRF_CMD_HELP_STR + JSON_STR) +{ + return pim_show_group_rp_mappings_info_helper(vrf, vty, !!json); +} + +DEFPY (show_ip_pim_bsr_cand_bsr, + show_ip_pim_bsr_cand_bsr_cmd, + "show ip pim bsr candidate-bsr [vrf NAME$vrfname] [json$json]", + SHOW_STR + IP_STR + PIM_STR + BSR_STR + "Current PIM router candidate BSR state\n" + VRF_CMD_HELP_STR + JSON_STR) +{ + int idx = 2; + struct vrf *vrf = pim_cmd_lookup_vrf(vty, argv, argc, &idx, !!json); + + if (!vrf || !vrf->info) + return CMD_WARNING; + + return pim_show_bsr_cand_bsr(vrf, vty, !!json); +} + + +DEFPY (show_ip_pim_bsr_cand_rp, + show_ip_pim_bsr_cand_rp_cmd, + "show ip pim bsr candidate-rp [vrf NAME$vrfname] [json$json]", + SHOW_STR + IP_STR + PIM_STR + BSR_STR + "Current PIM router candidate RP state\n" + VRF_CMD_HELP_STR + JSON_STR) +{ + int idx = 2; + struct vrf *vrf = pim_cmd_lookup_vrf(vty, argv, argc, &idx, !!json); + + + if (!vrf || !vrf->info) + return CMD_WARNING; + + + return pim_show_bsr_cand_rp(vrf, vty, !!json); +} + +DEFPY (show_ip_pim_bsr_rpdb, + show_ip_pim_bsr_rpdb_cmd, + "show ip pim bsr candidate-rp-database [vrf NAME$vrfname] [json$json]", + SHOW_STR + IP_STR + PIM_STR + BSR_STR + "Candidate RPs database on this router (if it is the BSR)\n" + VRF_CMD_HELP_STR + JSON_STR) +{ + int idx = 2; + struct vrf *vrf = pim_cmd_lookup_vrf(vty, argv, argc, &idx, false); + + if (!vrf || !vrf->info) + return CMD_WARNING; + + struct pim_instance *pim = vrf->info; + struct bsm_scope *scope = &pim->global_scope; + + return pim_crp_db_show(vty, scope, !!json); +} + +DEFPY (show_ip_pim_bsr_groups, + show_ip_pim_bsr_groups_cmd, + "show ip pim bsr groups [vrf NAME$vrfname] [json$json]", + SHOW_STR + IP_STR + PIM_STR + "boot-strap router information\n" + "Candidate RP groups\n" + VRF_CMD_HELP_STR + JSON_STR) +{ + int idx = 2; + struct vrf *vrf = pim_cmd_lookup_vrf(vty, argv, argc, &idx, false); + + if (!vrf || !vrf->info) + return CMD_WARNING; + + struct pim_instance *pim = vrf->info; + struct bsm_scope *scope = &pim->global_scope; + + return pim_crp_groups_show(vty, scope, !!json); +} + DEFPY (show_ip_pim_statistics, show_ip_pim_statistics_cmd, "show ip pim [vrf NAME] statistics [interface WORD$word] [json$json]", @@ -4376,6 +4458,62 @@ DEFPY_ATTR(no_ip_pim_rp_prefix_list, return ret; } +DEFPY (pim_bsr_candidate_bsr, + pim_bsr_candidate_bsr_cmd, + "[no] bsr candidate-bsr [{priority (0-255)|source <address A.B.C.D|interface IFNAME|loopback$loopback|any$any>}]", + NO_STR + BSR_STR + "Make this router a Candidate BSR\n" + "BSR Priority (higher wins)\n" + "BSR Priority (higher wins)\n" + "Specify IP address for BSR operation\n" + "Local address to use\n" + "Local address to use\n" + "Interface to pick address from\n" + "Interface to pick address from\n" + "Pick highest loopback address (default)\n" + "Pick highest address from any interface\n") +{ + return pim_process_bsr_candidate_cmd(vty, FRR_PIM_CAND_BSR_XPATH, no, + false, any, ifname, address_str, + priority_str, NULL); +} + +DEFPY (pim_bsr_candidate_rp, + pim_bsr_candidate_rp_cmd, + "[no] bsr candidate-rp [{priority (0-255)|interval (1-4294967295)|source <address A.B.C.D|interface IFNAME|loopback$loopback|any$any>}]", + NO_STR + BSR_STR + "Make this router a Candidate RP\n" + "RP Priority (lower wins)\n" + "RP Priority (lower wins)\n" + "Advertisement interval (seconds)\n" + "Advertisement interval (seconds)\n" + "Specify IP address for RP operation\n" + "Local address to use\n" + "Local address to use\n" + "Interface to pick address from\n" + "Interface to pick address from\n" + "Pick highest loopback address (default)\n" + "Pick highest address from any interface\n") +{ + return pim_process_bsr_candidate_cmd(vty, FRR_PIM_CAND_RP_XPATH, no, + true, any, ifname, address_str, + priority_str, interval_str); +} + +DEFPY (pim_bsr_candidate_rp_group, + pim_bsr_candidate_rp_group_cmd, + "[no] bsr candidate-rp group A.B.C.D/M", + NO_STR + BSR_STR + "Make this router a Candidate RP\n" + "Configure groups to become candidate RP for (At least one group must be configured)\n" + "Multicast group prefix\n") +{ + return pim_process_bsr_crp_grp_cmd(vty, group_str, no); +} + DEFPY (pim_ssm_prefix_list, pim_ssm_prefix_list_cmd, "ssm prefix-list PREFIXLIST4_NAME$plist", @@ -8550,6 +8688,10 @@ void pim_cmd_init(void) install_element(PIM_NODE, &no_pim_msdp_mesh_group_source_cmd); install_element(PIM_NODE, &no_pim_msdp_mesh_group_cmd); + install_element(PIM_NODE, &pim_bsr_candidate_rp_cmd); + install_element(PIM_NODE, &pim_bsr_candidate_rp_group_cmd); + install_element(PIM_NODE, &pim_bsr_candidate_bsr_cmd); + install_element(INTERFACE_NODE, &interface_ip_igmp_cmd); install_element(INTERFACE_NODE, &interface_no_ip_igmp_cmd); install_element(INTERFACE_NODE, &interface_ip_igmp_join_cmd); @@ -8670,6 +8812,11 @@ void pim_cmd_init(void) install_element(VIEW_NODE, &show_ip_pim_nexthop_lookup_cmd); install_element(VIEW_NODE, &show_ip_pim_bsrp_cmd); install_element(VIEW_NODE, &show_ip_pim_bsm_db_cmd); + install_element(VIEW_NODE, &show_ip_pim_bsr_rpinfo_cmd); + install_element(VIEW_NODE, &show_ip_pim_bsr_cand_bsr_cmd); + install_element(VIEW_NODE, &show_ip_pim_bsr_cand_rp_cmd); + install_element(VIEW_NODE, &show_ip_pim_bsr_rpdb_cmd); + install_element(VIEW_NODE, &show_ip_pim_bsr_groups_cmd); install_element(VIEW_NODE, &show_ip_pim_statistics_cmd); install_element(VIEW_NODE, &show_ip_msdp_peer_detail_cmd); install_element(VIEW_NODE, &show_ip_msdp_peer_detail_vrf_all_cmd); diff --git a/pimd/pim_cmd_common.c b/pimd/pim_cmd_common.c index d1368ff1ff..2f46fcac9b 100644 --- a/pimd/pim_cmd_common.c +++ b/pimd/pim_cmd_common.c @@ -874,7 +874,7 @@ void pim_show_rpf(struct pim_instance *pim, struct vty *vty, json_object *json) if (!json) { table = ttable_dump(tt, "\n"); vty_out(vty, "%s\n", table); - XFREE(MTYPE_TMP, table); + XFREE(MTYPE_TMP_TTABLE, table); ttable_del(tt); } } @@ -926,7 +926,7 @@ void pim_show_neighbors_secondary(struct pim_instance *pim, struct vty *vty) /* Dump the generated table. */ table = ttable_dump(tt, "\n"); vty_out(vty, "%s\n", table); - XFREE(MTYPE_TMP, table); + XFREE(MTYPE_TMP_TTABLE, table); ttable_del(tt); } @@ -1180,7 +1180,7 @@ void pim_show_state(struct pim_instance *pim, struct vty *vty, #else table = ttable_dump(tt, "\n"); vty_out(vty, "%s\n", table); - XFREE(MTYPE_TMP, table); + XFREE(MTYPE_TMP_TTABLE, table); ttable_del(tt); #endif } @@ -1413,7 +1413,7 @@ void pim_show_upstream(struct pim_instance *pim, struct vty *vty, if (!json) { table = ttable_dump(tt, "\n"); vty_out(vty, "%s\n", table); - XFREE(MTYPE_TMP, table); + XFREE(MTYPE_TMP_TTABLE, table); ttable_del(tt); } } @@ -1490,7 +1490,7 @@ void pim_show_join_desired(struct pim_instance *pim, struct vty *vty, bool uj) /* Dump the generated table. */ table = ttable_dump(tt, "\n"); vty_out(vty, "%s\n", table); - XFREE(MTYPE_TMP, table); + XFREE(MTYPE_TMP_TTABLE, table); ttable_del(tt); } } @@ -1569,7 +1569,7 @@ void pim_show_upstream_rpf(struct pim_instance *pim, struct vty *vty, bool uj) /* Dump the generated table. */ table = ttable_dump(tt, "\n"); vty_out(vty, "%s\n", table); - XFREE(MTYPE_TMP, table); + XFREE(MTYPE_TMP_TTABLE, table); ttable_del(tt); } } @@ -1751,7 +1751,7 @@ void pim_show_join(struct pim_instance *pim, struct vty *vty, pim_sgaddr *sg, if (!json) { table = ttable_dump(tt, "\n"); vty_out(vty, "%s\n", table); - XFREE(MTYPE_TMP, table); + XFREE(MTYPE_TMP_TTABLE, table); ttable_del(tt); } } @@ -1831,7 +1831,7 @@ void pim_show_jp_agg_list(struct pim_instance *pim, struct vty *vty) /* Dump the generated table. */ table = ttable_dump(tt, "\n"); vty_out(vty, "%s\n", table); - XFREE(MTYPE_TMP, table); + XFREE(MTYPE_TMP_TTABLE, table); ttable_del(tt); } @@ -1974,7 +1974,7 @@ void pim_show_membership(struct pim_instance *pim, struct vty *vty, bool uj) /* Dump the generated table. */ table = ttable_dump(tt, "\n"); vty_out(vty, "%s\n", table); - XFREE(MTYPE_TMP, table); + XFREE(MTYPE_TMP_TTABLE, table); ttable_del(tt); } } @@ -2079,7 +2079,7 @@ void pim_show_channel(struct pim_instance *pim, struct vty *vty, bool uj) /* Dump the generated table. */ table = ttable_dump(tt, "\n"); vty_out(vty, "%s\n", table); - XFREE(MTYPE_TMP, table); + XFREE(MTYPE_TMP_TTABLE, table); ttable_del(tt); } } @@ -2262,7 +2262,7 @@ void pim_show_interfaces(struct pim_instance *pim, struct vty *vty, bool mlag, /* Dump the generated table. */ table = ttable_dump(tt, "\n"); vty_out(vty, "%s\n", table); - XFREE(MTYPE_TMP, table); + XFREE(MTYPE_TMP_TTABLE, table); ttable_del(tt); } @@ -2734,7 +2734,7 @@ static int pim_print_vty_pnc_cache_walkcb(struct hash_bucket *bucket, void *arg) /* Dump the generated table. */ table = ttable_dump(tt, "\n"); vty_out(vty, "%s\n", table); - XFREE(MTYPE_TMP, table); + XFREE(MTYPE_TMP_TTABLE, table); ttable_del(tt); return CMD_SUCCESS; @@ -3214,7 +3214,7 @@ void pim_show_neighbors(struct pim_instance *pim, struct vty *vty, if (!json) { table = ttable_dump(tt, "\n"); vty_out(vty, "%s\n", table); - XFREE(MTYPE_TMP, table); + XFREE(MTYPE_TMP_TTABLE, table); ttable_del(tt); } } @@ -3389,6 +3389,55 @@ int pim_process_no_unicast_bsm_cmd(struct vty *vty) FRR_PIM_AF_XPATH_VAL); } +/* helper for bsr/rp candidate commands*/ +int pim_process_bsr_candidate_cmd(struct vty *vty, const char *cand_str, + bool no, bool is_rp, bool any, + const char *ifname, const char *addr, + const char *prio, const char *interval) +{ + if (no) + nb_cli_enqueue_change(vty, ".", NB_OP_DESTROY, NULL); + else { + nb_cli_enqueue_change(vty, ".", NB_OP_CREATE, NULL); + + if (any) + nb_cli_enqueue_change(vty, "./if-any", NB_OP_CREATE, + NULL); + else if (ifname) + nb_cli_enqueue_change(vty, "./interface", NB_OP_CREATE, + ifname); + else if (addr) + nb_cli_enqueue_change(vty, "./address", NB_OP_CREATE, + addr); + else + nb_cli_enqueue_change(vty, "./if-loopback", + NB_OP_CREATE, NULL); + + if (prio) + nb_cli_enqueue_change(vty, + (is_rp ? "./rp-priority" + : "./bsr-priority"), + NB_OP_MODIFY, prio); + + /* only valid for rp candidate case*/ + if (is_rp && interval) + nb_cli_enqueue_change(vty, "./advertisement-interval", + NB_OP_MODIFY, interval); + } + + return nb_cli_apply_changes(vty, "%s", cand_str); +} + +int pim_process_bsr_crp_grp_cmd(struct vty *vty, const char *grp, bool no) +{ + if (no) + nb_cli_enqueue_change(vty, ".", NB_OP_DESTROY, grp); + else + nb_cli_enqueue_change(vty, ".", NB_OP_CREATE, grp); + + return nb_cli_apply_changes(vty, "%s/group-list", FRR_PIM_CAND_RP_XPATH); +} + static void show_scan_oil_stats(struct pim_instance *pim, struct vty *vty, time_t now) { @@ -3500,7 +3549,7 @@ void show_multicast_interfaces(struct pim_instance *pim, struct vty *vty, if (!json) { table = ttable_dump(tt, "\n"); vty_out(vty, "%s\n", table); - XFREE(MTYPE_TMP, table); + XFREE(MTYPE_TMP_TTABLE, table); ttable_del(tt); } } @@ -3939,7 +3988,7 @@ void show_mroute(struct pim_instance *pim, struct vty *vty, pim_sgaddr *sg, if (!json) { table = ttable_dump(tt, "\n"); vty_out(vty, "%s\n", table); - XFREE(MTYPE_TMP, table); + XFREE(MTYPE_TMP_TTABLE, table); ttable_del(tt); } } @@ -4022,7 +4071,7 @@ void show_mroute_count(struct pim_instance *pim, struct vty *vty, if (!json) { table = ttable_dump(tt, "\n"); vty_out(vty, "%s\n", table); - XFREE(MTYPE_TMP, table); + XFREE(MTYPE_TMP_TTABLE, table); ttable_del(tt); } } @@ -4158,6 +4207,27 @@ struct vrf *pim_cmd_lookup(struct vty *vty, const char *name) return vrf; } +struct vrf *pim_cmd_lookup_vrf(struct vty *vty, struct cmd_token *argv[], + const int argc, int *idx, bool uj) +{ + struct vrf *vrf; + + if (argv_find(argv, argc, "NAME", idx)) + vrf = vrf_lookup_by_name(argv[*idx]->arg); + else + vrf = vrf_lookup_by_id(VRF_DEFAULT); + + if (!vrf) { + if (uj) + vty_json_empty(vty, NULL); + else + vty_out(vty, "Specified VRF: %s does not exist\n", + argv[*idx]->arg); + } + + return vrf; +} + void clear_mroute(struct pim_instance *pim) { struct pim_upstream *up; @@ -5188,6 +5258,12 @@ void pim_show_bsr(struct pim_instance *pim, struct vty *vty, bool uj) case ACCEPT_PREFERRED: strlcpy(bsr_state, "ACCEPT_PREFERRED", sizeof(bsr_state)); break; + case BSR_PENDING: + strlcpy(bsr_state, "BSR_PENDING", sizeof(bsr_state)); + break; + case BSR_ELECTED: + strlcpy(bsr_state, "BSR_ELECTED", sizeof(bsr_state)); + break; default: strlcpy(bsr_state, "", sizeof(bsr_state)); } @@ -5207,7 +5283,7 @@ void pim_show_bsr(struct pim_instance *pim, struct vty *vty, bool uj) } else { - vty_out(vty, "PIMv2 Bootstrap information\n"); + vty_out(vty, "PIMv2 Bootstrap Router information\n"); vty_out(vty, "Current preferred BSR address: %pPA\n", &pim->global_scope.current_bsr); vty_out(vty, @@ -5325,7 +5401,7 @@ static void pim_show_group_rp_mappings_info(struct pim_instance *pim, table = ttable_dump(tt, "\n"); vty_out(vty, "%s\n", table); - XFREE(MTYPE_TMP, table); + XFREE(MTYPE_TMP_TTABLE, table); ttable_del(tt); tt = NULL; } @@ -5379,7 +5455,7 @@ static void pim_show_group_rp_mappings_info(struct pim_instance *pim, table = ttable_dump(tt, "\n"); vty_out(vty, "%s\n", table); - XFREE(MTYPE_TMP, table); + XFREE(MTYPE_TMP_TTABLE, table); ttable_del(tt); } if (!bsm_rpinfos_count(bsgrp->partial_bsrp_list) && !uj) @@ -5416,6 +5492,98 @@ int pim_show_group_rp_mappings_info_helper(const char *vrf, struct vty *vty, return CMD_SUCCESS; } +int pim_show_bsr_cand_rp(const struct vrf *vrf, struct vty *vty, bool uj) +{ + struct pim_instance *pim; + struct bsm_scope *scope; + json_object *jsondata = NULL; + + if (!vrf || !vrf->info) + return CMD_WARNING; + + pim = (struct pim_instance *)vrf->info; + scope = &pim->global_scope; + + if (!scope->cand_rp_addrsel.run) { + if (!!uj) + vty_out(vty, "{}\n"); + else + vty_out(vty, + "This router is not currently operating as Candidate RP\n"); + return CMD_SUCCESS; + } + + if (!!uj) { + jsondata = json_object_new_object(); + json_object_string_addf(jsondata, "address", "%pPA", + &scope->cand_rp_addrsel.run_addr); + json_object_int_add(jsondata, "priority", scope->cand_rp_prio); + json_object_int_add(jsondata, "nextAdvertisementMsec", + event_timer_remain_msec( + scope->cand_rp_adv_timer)); + + vty_json(vty, jsondata); + return CMD_SUCCESS; + } + + vty_out(vty, "Candidate-RP\nAddress: %pPA\nPriority: %u\n\n", + &scope->cand_rp_addrsel.run_addr, scope->cand_rp_prio); + vty_out(vty, "Next adv.: %lu msec\n", + event_timer_remain_msec(scope->cand_rp_adv_timer)); + + + return CMD_SUCCESS; +} + +int pim_show_bsr_cand_bsr(const struct vrf *vrf, struct vty *vty, bool uj) +{ + struct pim_instance *pim; + struct bsm_scope *scope; + json_object *jsondata = NULL; + + if (!vrf || !vrf->info) + return CMD_WARNING; + + pim = (struct pim_instance *)vrf->info; + scope = &pim->global_scope; + + if (!scope->bsr_addrsel.cfg_enable) { + if (!!uj) + vty_out(vty, "{}\n"); + else + vty_out(vty, + "This router is not currently operating as Candidate BSR\n"); + return CMD_SUCCESS; + } + + if (uj) { + char buf[INET_ADDRSTRLEN]; + + jsondata = json_object_new_object(); + inet_ntop(AF_INET, &scope->bsr_addrsel.run_addr, buf, + sizeof(buf)); + json_object_string_add(jsondata, "address", buf); + json_object_int_add(jsondata, "priority", scope->cand_bsr_prio); + json_object_boolean_add(jsondata, "elected", + pim->global_scope.state == BSR_ELECTED); + + vty_json(vty, jsondata); + return CMD_SUCCESS; + } + + vty_out(vty, + "Candidate-BSR\nAddress: %pPA\nPriority: %u\nElected: %s\n", + &scope->bsr_addrsel.run_addr, scope->cand_bsr_prio, + (pim->global_scope.state == BSR_ELECTED) ? " Yes" : " No"); + + if (!pim_addr_cmp(scope->bsr_addrsel.run_addr, PIMADDR_ANY)) + vty_out(vty, + "\nThis router is not currently operating as Candidate BSR\n" + "Configure a BSR address to enable this feature\n\n"); + + return CMD_SUCCESS; +} + /* Display the bsm database details */ static void pim_show_bsm_db(struct pim_instance *pim, struct vty *vty, bool uj) { diff --git a/pimd/pim_cmd_common.h b/pimd/pim_cmd_common.h index da2e44be58..339074b204 100644 --- a/pimd/pim_cmd_common.h +++ b/pimd/pim_cmd_common.h @@ -7,6 +7,8 @@ #ifndef PIM_CMD_COMMON_H #define PIM_CMD_COMMON_H +#define BSR_STR "Bootstrap Router configuration\n" + struct pim_upstream; struct pim_instance; @@ -53,6 +55,13 @@ int pim_process_bsm_cmd(struct vty *vty); int pim_process_no_bsm_cmd(struct vty *vty); int pim_process_unicast_bsm_cmd(struct vty *vty); int pim_process_no_unicast_bsm_cmd(struct vty *vty); + +int pim_process_bsr_candidate_cmd(struct vty *vty, const char *cand_str, + bool no, bool is_rp, bool any, + const char *ifname, const char *addr, + const char *prio, const char *interval); +int pim_process_bsr_crp_grp_cmd(struct vty *vty, const char *grp, bool no); + void json_object_pim_upstream_add(json_object *json, struct pim_upstream *up); void pim_show_rpf(struct pim_instance *pim, struct vty *vty, json_object *json); void pim_show_neighbors_secondary(struct pim_instance *pim, struct vty *vty); @@ -131,6 +140,8 @@ void show_mroute_summary(struct pim_instance *pim, struct vty *vty, json_object *json); int clear_ip_mroute_count_command(struct vty *vty, const char *name); struct vrf *pim_cmd_lookup(struct vty *vty, const char *name); +struct vrf *pim_cmd_lookup_vrf(struct vty *vty, struct cmd_token *argv[], + const int argc, int *idx, bool uj); void clear_mroute(struct pim_instance *pim); void clear_pim_statistics(struct pim_instance *pim); int clear_pim_interface_traffic(const char *vrf, struct vty *vty); @@ -182,6 +193,8 @@ int pim_show_interface_traffic_helper(const char *vrf, const char *if_name, void clear_pim_interfaces(struct pim_instance *pim); void pim_show_bsr(struct pim_instance *pim, struct vty *vty, bool uj); int pim_show_bsr_helper(const char *vrf, struct vty *vty, bool uj); +int pim_show_bsr_cand_bsr(const struct vrf *vrf, struct vty *vty, bool uj); +int pim_show_bsr_cand_rp(const struct vrf *vrf, struct vty *vty, bool uj); int pim_router_config_write(struct vty *vty); /* diff --git a/pimd/pim_iface.c b/pimd/pim_iface.c index 45a2435ae5..125d35ac46 100644 --- a/pimd/pim_iface.c +++ b/pimd/pim_iface.c @@ -1844,6 +1844,8 @@ static int pim_ifp_up(struct interface *ifp) } } } + + pim_cand_addrs_changed(); return 0; } @@ -1880,6 +1882,7 @@ static int pim_ifp_down(struct interface *ifp) pim_ifstat_reset(ifp); } + pim_cand_addrs_changed(); return 0; } diff --git a/pimd/pim_instance.c b/pimd/pim_instance.c index a9eec9a9d2..9a697c9209 100644 --- a/pimd/pim_instance.c +++ b/pimd/pim_instance.c @@ -253,6 +253,7 @@ void pim_vrf_terminate(void) if (!pim) continue; + pim_crp_db_clear(&pim->global_scope); pim_ssmpingd_destroy(pim); pim_instance_terminate(pim); diff --git a/pimd/pim_main.c b/pimd/pim_main.c index 8f2ce0bed3..f88aca719e 100644 --- a/pimd/pim_main.c +++ b/pimd/pim_main.c @@ -68,6 +68,7 @@ static const struct frr_yang_module_info *const pimd_yang_modules[] = { &frr_routing_info, &frr_pim_info, &frr_pim_rp_info, + &frr_pim_candidate_info, &frr_gmp_info, }; diff --git a/pimd/pim_msg.h b/pimd/pim_msg.h index 56923b7ec1..1f916af881 100644 --- a/pimd/pim_msg.h +++ b/pimd/pim_msg.h @@ -148,6 +148,7 @@ struct pim_encoded_source_ipv6 { typedef struct pim_encoded_ipv4_unicast pim_encoded_unicast; typedef struct pim_encoded_group_ipv4 pim_encoded_group; typedef struct pim_encoded_source_ipv4 pim_encoded_source; +#define PIM_MSG_ADDRESS_FAMILY PIM_MSG_ADDRESS_FAMILY_IPV4 typedef struct ip ipv_hdr; #define IPV_SRC(ip_hdr) ((ip_hdr))->ip_src #define IPV_DST(ip_hdr) ((ip_hdr))->ip_dst @@ -156,6 +157,7 @@ typedef struct ip ipv_hdr; typedef struct pim_encoded_ipv6_unicast pim_encoded_unicast; typedef struct pim_encoded_group_ipv6 pim_encoded_group; typedef struct pim_encoded_source_ipv6 pim_encoded_source; +#define PIM_MSG_ADDRESS_FAMILY PIM_MSG_ADDRESS_FAMILY_IPV6 typedef struct ip6_hdr ipv_hdr; #define IPV_SRC(ip_hdr) ((ip_hdr))->ip6_src #define IPV_DST(ip_hdr) ((ip_hdr))->ip6_dst diff --git a/pimd/pim_nb.c b/pimd/pim_nb.c index c154c18afa..0e8aa48f9d 100644 --- a/pimd/pim_nb.c +++ b/pimd/pim_nb.c @@ -385,6 +385,112 @@ const struct frr_yang_module_info frr_pim_rp_info = { } }; +const struct frr_yang_module_info frr_pim_candidate_info = { + .name = "frr-pim-candidate", + .nodes = { + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/frr-pim-candidate:candidate-bsr", + .cbs = { + .create = routing_control_plane_protocols_control_plane_protocol_pim_address_family_candidate_bsr_create, + .destroy = routing_control_plane_protocols_control_plane_protocol_pim_address_family_candidate_bsr_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/frr-pim-candidate:candidate-bsr/bsr-priority", + .cbs = { + .modify = routing_control_plane_protocols_control_plane_protocol_pim_address_family_candidate_bsr_priority_modify, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/frr-pim-candidate:candidate-bsr/address", + .cbs = { + .modify = routing_control_plane_protocols_control_plane_protocol_pim_address_family_candidate_bsr_addrsel_modify, + .destroy = routing_control_plane_protocols_control_plane_protocol_pim_address_family_candidate_bsr_addrsel_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/frr-pim-candidate:candidate-bsr/interface", + .cbs = { + .modify = routing_control_plane_protocols_control_plane_protocol_pim_address_family_candidate_bsr_addrsel_modify, + .destroy = routing_control_plane_protocols_control_plane_protocol_pim_address_family_candidate_bsr_addrsel_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/frr-pim-candidate:candidate-bsr/if-loopback", + .cbs = { + .create = routing_control_plane_protocols_control_plane_protocol_pim_address_family_candidate_bsr_addrsel_create, + .destroy = routing_control_plane_protocols_control_plane_protocol_pim_address_family_candidate_bsr_addrsel_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/frr-pim-candidate:candidate-bsr/if-any", + .cbs = { + .create = routing_control_plane_protocols_control_plane_protocol_pim_address_family_candidate_bsr_addrsel_create, + .destroy = routing_control_plane_protocols_control_plane_protocol_pim_address_family_candidate_bsr_addrsel_destroy, + } + }, + + /* Candidate-RP */ + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/frr-pim-candidate:candidate-rp", + .cbs = { + .create = routing_control_plane_protocols_control_plane_protocol_pim_address_family_candidate_rp_create, + .destroy = routing_control_plane_protocols_control_plane_protocol_pim_address_family_candidate_rp_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/frr-pim-candidate:candidate-rp/rp-priority", + .cbs = { + .modify = routing_control_plane_protocols_control_plane_protocol_pim_address_family_candidate_rp_priority_modify, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/frr-pim-candidate:candidate-rp/advertisement-interval", + .cbs = { + .modify = routing_control_plane_protocols_control_plane_protocol_pim_address_family_candidate_rp_adv_interval_modify, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/frr-pim-candidate:candidate-rp/group-list", + .cbs = { + .create = routing_control_plane_protocols_control_plane_protocol_pim_address_family_candidate_rp_group_list_create, + .destroy = routing_control_plane_protocols_control_plane_protocol_pim_address_family_candidate_rp_group_list_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/frr-pim-candidate:candidate-rp/address", + .cbs = { + .modify = routing_control_plane_protocols_control_plane_protocol_pim_address_family_candidate_rp_addrsel_modify, + .destroy = routing_control_plane_protocols_control_plane_protocol_pim_address_family_candidate_rp_addrsel_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/frr-pim-candidate:candidate-rp/interface", + .cbs = { + .modify = routing_control_plane_protocols_control_plane_protocol_pim_address_family_candidate_rp_addrsel_modify, + .destroy = routing_control_plane_protocols_control_plane_protocol_pim_address_family_candidate_rp_addrsel_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/frr-pim-candidate:candidate-rp/if-loopback", + .cbs = { + .create = routing_control_plane_protocols_control_plane_protocol_pim_address_family_candidate_rp_addrsel_create, + .destroy = routing_control_plane_protocols_control_plane_protocol_pim_address_family_candidate_rp_addrsel_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/frr-pim-candidate:candidate-rp/if-any", + .cbs = { + .create = routing_control_plane_protocols_control_plane_protocol_pim_address_family_candidate_rp_addrsel_create, + .destroy = routing_control_plane_protocols_control_plane_protocol_pim_address_family_candidate_rp_addrsel_destroy, + } + }, + { + .xpath = NULL, + }, + } +}; + /* clang-format off */ const struct frr_yang_module_info frr_gmp_info = { .name = "frr-gmp", diff --git a/pimd/pim_nb.h b/pimd/pim_nb.h index fc4c11cea9..55883ad29a 100644 --- a/pimd/pim_nb.h +++ b/pimd/pim_nb.h @@ -9,6 +9,7 @@ extern const struct frr_yang_module_info frr_pim_info; extern const struct frr_yang_module_info frr_pim_rp_info; +extern const struct frr_yang_module_info frr_pim_candidate_info; extern const struct frr_yang_module_info frr_gmp_info; /* frr-pim prototypes*/ @@ -159,6 +160,40 @@ int routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp int routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp_static_rp_rp_list_prefix_list_destroy( struct nb_cb_destroy_args *args); +/* frr-cand-bsr */ +int routing_control_plane_protocols_control_plane_protocol_pim_address_family_candidate_bsr_create( + struct nb_cb_create_args *args); +int routing_control_plane_protocols_control_plane_protocol_pim_address_family_candidate_bsr_destroy( + struct nb_cb_destroy_args *args); +int routing_control_plane_protocols_control_plane_protocol_pim_address_family_candidate_bsr_priority_modify( + struct nb_cb_modify_args *args); +int routing_control_plane_protocols_control_plane_protocol_pim_address_family_candidate_bsr_addrsel_create( + struct nb_cb_create_args *args); +int routing_control_plane_protocols_control_plane_protocol_pim_address_family_candidate_bsr_addrsel_modify( + struct nb_cb_modify_args *args); +int routing_control_plane_protocols_control_plane_protocol_pim_address_family_candidate_bsr_addrsel_destroy( + struct nb_cb_destroy_args *args); + +/* frr-candidate */ +int routing_control_plane_protocols_control_plane_protocol_pim_address_family_candidate_rp_create( + struct nb_cb_create_args *args); +int routing_control_plane_protocols_control_plane_protocol_pim_address_family_candidate_rp_destroy( + struct nb_cb_destroy_args *args); +int routing_control_plane_protocols_control_plane_protocol_pim_address_family_candidate_rp_priority_modify( + struct nb_cb_modify_args *args); +int routing_control_plane_protocols_control_plane_protocol_pim_address_family_candidate_rp_adv_interval_modify( + struct nb_cb_modify_args *args); +int routing_control_plane_protocols_control_plane_protocol_pim_address_family_candidate_rp_group_list_create( + struct nb_cb_create_args *args); +int routing_control_plane_protocols_control_plane_protocol_pim_address_family_candidate_rp_group_list_destroy( + struct nb_cb_destroy_args *args); +int routing_control_plane_protocols_control_plane_protocol_pim_address_family_candidate_rp_addrsel_create( + struct nb_cb_create_args *args); +int routing_control_plane_protocols_control_plane_protocol_pim_address_family_candidate_rp_addrsel_modify( + struct nb_cb_modify_args *args); +int routing_control_plane_protocols_control_plane_protocol_pim_address_family_candidate_rp_addrsel_destroy( + struct nb_cb_destroy_args *args); + /* frr-gmp prototypes*/ int lib_interface_gmp_address_family_create( struct nb_cb_create_args *args); @@ -204,6 +239,9 @@ int routing_control_plane_protocols_name_validate( #define FRR_PIM_AF_XPATH_VAL "frr-routing:ipv6" #endif +#define FRR_PIM_CAND_RP_XPATH "./frr-pim-candidate:candidate-rp" +#define FRR_PIM_CAND_BSR_XPATH "./frr-pim-candidate:candidate-bsr" + #define FRR_PIM_VRF_XPATH \ "/frr-routing:routing/control-plane-protocols/" \ "control-plane-protocol[type='%s'][name='%s'][vrf='%s']/" \ diff --git a/pimd/pim_nb_config.c b/pimd/pim_nb_config.c index 037bfea786..0c7481c27a 100644 --- a/pimd/pim_nb_config.c +++ b/pimd/pim_nb_config.c @@ -2058,6 +2058,10 @@ int lib_interface_pim_address_family_bsm_modify(struct nb_cb_modify_args *args) case NB_EV_APPLY: ifp = nb_running_get_entry(args->dnode, NULL, true); pim_ifp = ifp->info; + if (!pim_ifp) { + pim_ifp = pim_if_new(ifp, false, true, false, false); + ifp->info = pim_ifp; + } pim_ifp->bsm_enable = yang_dnode_get_bool(args->dnode, NULL); break; @@ -2083,6 +2087,10 @@ int lib_interface_pim_address_family_unicast_bsm_modify( case NB_EV_APPLY: ifp = nb_running_get_entry(args->dnode, NULL, true); pim_ifp = ifp->info; + if (!pim_ifp) { + pim_ifp = pim_if_new(ifp, false, true, false, false); + ifp->info = pim_ifp; + } pim_ifp->ucast_bsm_accept = yang_dnode_get_bool(args->dnode, NULL); @@ -2671,6 +2679,391 @@ int routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp return NB_OK; } +static void yang_addrsel(struct cand_addrsel *addrsel, + const struct lyd_node *node) +{ + memset(addrsel->cfg_ifname, 0, sizeof(addrsel->cfg_ifname)); + addrsel->cfg_addr = PIMADDR_ANY; + + if (yang_dnode_exists(node, "if-any")) { + addrsel->cfg_mode = CAND_ADDR_ANY; + } else if (yang_dnode_exists(node, "address")) { + addrsel->cfg_mode = CAND_ADDR_EXPLICIT; + yang_dnode_get_pimaddr(&addrsel->cfg_addr, node, "address"); + } else if (yang_dnode_exists(node, "interface")) { + addrsel->cfg_mode = CAND_ADDR_IFACE; + strlcpy(addrsel->cfg_ifname, + yang_dnode_get_string(node, "interface"), + sizeof(addrsel->cfg_ifname)); + } else if (yang_dnode_exists(node, "if-loopback")) { + addrsel->cfg_mode = CAND_ADDR_LO; + } +} + +static int candidate_bsr_addrsel(struct bsm_scope *scope, + const struct lyd_node *cand_bsr_node) +{ + yang_addrsel(&scope->bsr_addrsel, cand_bsr_node); + pim_cand_bsr_apply(scope); + return NB_OK; +} + +int routing_control_plane_protocols_control_plane_protocol_pim_address_family_candidate_bsr_create( + struct nb_cb_create_args *args) +{ + struct vrf *vrf; + struct pim_instance *pim; + struct bsm_scope *scope; + + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + vrf = nb_running_get_entry(args->dnode, NULL, true); + pim = vrf->info; + scope = &pim->global_scope; + + scope->bsr_addrsel.cfg_enable = true; + scope->cand_bsr_prio = yang_dnode_get_uint8(args->dnode, + "bsr-priority"); + + candidate_bsr_addrsel(scope, args->dnode); + break; + } + + return NB_OK; +} + +int routing_control_plane_protocols_control_plane_protocol_pim_address_family_candidate_bsr_destroy( + struct nb_cb_destroy_args *args) +{ + struct vrf *vrf; + struct pim_instance *pim; + struct bsm_scope *scope; + + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + vrf = nb_running_get_entry(args->dnode, NULL, true); + pim = vrf->info; + scope = &pim->global_scope; + + scope->bsr_addrsel.cfg_enable = false; + + pim_cand_bsr_apply(scope); + break; + } + + return NB_OK; +} + +int routing_control_plane_protocols_control_plane_protocol_pim_address_family_candidate_bsr_priority_modify( + struct nb_cb_modify_args *args) +{ + struct vrf *vrf; + struct pim_instance *pim; + struct bsm_scope *scope; + + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + vrf = nb_running_get_entry(args->dnode, NULL, true); + pim = vrf->info; + scope = &pim->global_scope; + + scope->cand_bsr_prio = yang_dnode_get_uint8(args->dnode, NULL); + + /* FIXME: force prio update */ + candidate_bsr_addrsel(scope, args->dnode); + break; + } + + return NB_OK; +} + +int routing_control_plane_protocols_control_plane_protocol_pim_address_family_candidate_bsr_addrsel_create( + struct nb_cb_create_args *args) +{ + struct vrf *vrf; + struct pim_instance *pim; + struct bsm_scope *scope; + const struct lyd_node *cand_bsr_node; + + cand_bsr_node = yang_dnode_get_parent(args->dnode, "candidate-bsr"); + + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + vrf = nb_running_get_entry(args->dnode, NULL, true); + pim = vrf->info; + scope = &pim->global_scope; + + return candidate_bsr_addrsel(scope, cand_bsr_node); + } + return NB_OK; +} + +int routing_control_plane_protocols_control_plane_protocol_pim_address_family_candidate_bsr_addrsel_modify( + struct nb_cb_modify_args *args) +{ + struct vrf *vrf; + struct pim_instance *pim; + struct bsm_scope *scope; + const struct lyd_node *cand_bsr_node; + + cand_bsr_node = yang_dnode_get_parent(args->dnode, "candidate-bsr"); + + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + vrf = nb_running_get_entry(args->dnode, NULL, true); + pim = vrf->info; + scope = &pim->global_scope; + + return candidate_bsr_addrsel(scope, cand_bsr_node); + } + return NB_OK; +} + +int routing_control_plane_protocols_control_plane_protocol_pim_address_family_candidate_bsr_addrsel_destroy( + struct nb_cb_destroy_args *args) +{ + /* nothing to do here, we'll get a CREATE for something else */ + return NB_OK; +} + +static int candidate_rp_addrsel(struct bsm_scope *scope, + const struct lyd_node *cand_rp_node) +{ + yang_addrsel(&scope->cand_rp_addrsel, cand_rp_node); + pim_cand_rp_apply(scope); + return NB_OK; +} + +int routing_control_plane_protocols_control_plane_protocol_pim_address_family_candidate_rp_create( + struct nb_cb_create_args *args) +{ + struct vrf *vrf; + struct pim_instance *pim; + struct bsm_scope *scope; + + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + vrf = nb_running_get_entry(args->dnode, NULL, true); + pim = vrf->info; + scope = &pim->global_scope; + + scope->cand_rp_addrsel.cfg_enable = true; + scope->cand_rp_prio = yang_dnode_get_uint8(args->dnode, + "rp-priority"); + scope->cand_rp_interval = + yang_dnode_get_uint32(args->dnode, + "advertisement-interval"); + + candidate_rp_addrsel(scope, args->dnode); + break; + } + + return NB_OK; +} + +int routing_control_plane_protocols_control_plane_protocol_pim_address_family_candidate_rp_destroy( + struct nb_cb_destroy_args *args) +{ + struct vrf *vrf; + struct pim_instance *pim; + struct bsm_scope *scope; + + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + vrf = nb_running_get_entry(args->dnode, NULL, true); + pim = vrf->info; + scope = &pim->global_scope; + + scope->cand_rp_addrsel.cfg_enable = false; + + pim_cand_rp_apply(scope); + break; + } + + return NB_OK; +} + +int routing_control_plane_protocols_control_plane_protocol_pim_address_family_candidate_rp_priority_modify( + struct nb_cb_modify_args *args) +{ + struct vrf *vrf; + struct pim_instance *pim; + struct bsm_scope *scope; + + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + vrf = nb_running_get_entry(args->dnode, NULL, true); + pim = vrf->info; + scope = &pim->global_scope; + + scope->cand_rp_prio = yang_dnode_get_uint8(args->dnode, NULL); + + pim_cand_rp_trigger(scope); + break; + } + + return NB_OK; +} + +int routing_control_plane_protocols_control_plane_protocol_pim_address_family_candidate_rp_adv_interval_modify( + struct nb_cb_modify_args *args) +{ + struct vrf *vrf; + struct pim_instance *pim; + struct bsm_scope *scope; + + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + vrf = nb_running_get_entry(args->dnode, NULL, true); + pim = vrf->info; + scope = &pim->global_scope; + + scope->cand_rp_interval = yang_dnode_get_uint32(args->dnode, + NULL); + + pim_cand_rp_trigger(scope); + break; + } + + return NB_OK; +} + +#if PIM_IPV == 4 +#define yang_dnode_get_pim_p yang_dnode_get_ipv4p +#else +#define yang_dnode_get_pim_p yang_dnode_get_ipv6p +#endif + +int routing_control_plane_protocols_control_plane_protocol_pim_address_family_candidate_rp_group_list_create( + struct nb_cb_create_args *args) +{ + struct vrf *vrf; + struct pim_instance *pim; + struct bsm_scope *scope; + prefix_pim p; + + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + vrf = nb_running_get_entry(args->dnode, NULL, true); + pim = vrf->info; + scope = &pim->global_scope; + + yang_dnode_get_pim_p(&p, args->dnode, "."); + pim_cand_rp_grp_add(scope, &p); + break; + } + return NB_OK; +} + +int routing_control_plane_protocols_control_plane_protocol_pim_address_family_candidate_rp_group_list_destroy( + struct nb_cb_destroy_args *args) +{ + struct vrf *vrf; + struct pim_instance *pim; + struct bsm_scope *scope; + prefix_pim p; + + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + vrf = nb_running_get_entry(args->dnode, NULL, true); + pim = vrf->info; + scope = &pim->global_scope; + + yang_dnode_get_pim_p(&p, args->dnode, "."); + pim_cand_rp_grp_del(scope, &p); + break; + } + return NB_OK; +} + +static int candidate_rp_addrsel_common(enum nb_event event, + const struct lyd_node *dnode) +{ + struct vrf *vrf; + struct pim_instance *pim; + struct bsm_scope *scope; + + dnode = lyd_parent(dnode); + + switch (event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + vrf = nb_running_get_entry(dnode, NULL, true); + pim = vrf->info; + scope = &pim->global_scope; + + candidate_rp_addrsel(scope, dnode); + break; + } + return NB_OK; +} + +int routing_control_plane_protocols_control_plane_protocol_pim_address_family_candidate_rp_addrsel_create( + struct nb_cb_create_args *args) +{ + return candidate_rp_addrsel_common(args->event, args->dnode); +} + +int routing_control_plane_protocols_control_plane_protocol_pim_address_family_candidate_rp_addrsel_modify( + struct nb_cb_modify_args *args) +{ + return candidate_rp_addrsel_common(args->event, args->dnode); +} + +int routing_control_plane_protocols_control_plane_protocol_pim_address_family_candidate_rp_addrsel_destroy( + struct nb_cb_destroy_args *args) +{ + /* nothing to do here - we'll get a create or modify event too */ + return NB_OK; +} + /* * XPath: /frr-interface:lib/interface/frr-gmp:gmp/address-family */ diff --git a/pimd/pim_nht.c b/pimd/pim_nht.c index 57dcff3b47..030b933e09 100644 --- a/pimd/pim_nht.c +++ b/pimd/pim_nht.c @@ -343,7 +343,8 @@ bool pim_nht_bsr_rpf_check(struct pim_instance *pim, pim_addr bsr_addr, if (!nbr) continue; - return znh->ifindex == src_ifp->ifindex; + return znh->ifindex == src_ifp->ifindex && + (!pim_addr_cmp(znh->nexthop_addr, src_ip)); } return false; } @@ -404,13 +405,12 @@ bool pim_nht_bsr_rpf_check(struct pim_instance *pim, pim_addr bsr_addr, return true; /* MRIB (IGP) may be pointing at a router where PIM is down */ - nbr = pim_neighbor_find(ifp, nhaddr, true); - if (!nbr) continue; - return nh->ifindex == src_ifp->ifindex; + return nh->ifindex == src_ifp->ifindex && + (!pim_addr_cmp(nhaddr, src_ip)); } return false; } diff --git a/pimd/pim_pim.c b/pimd/pim_pim.c index 6a7e8924f2..a41bbacea7 100644 --- a/pimd/pim_pim.c +++ b/pimd/pim_pim.c @@ -13,7 +13,6 @@ #include "network.h" #include "pimd.h" -#include "pim_instance.h" #include "pim_pim.h" #include "pim_time.h" #include "pim_iface.h" @@ -139,7 +138,7 @@ static bool pim_pkt_dst_addr_ok(enum pim_msg_type type, pim_addr addr) } int pim_pim_packet(struct interface *ifp, uint8_t *buf, size_t len, - pim_sgaddr sg) + pim_sgaddr sg, bool is_mcast) { struct iovec iov[2], *iovp = iov; #if PIM_IPV == 4 @@ -274,6 +273,22 @@ int pim_pim_packet(struct interface *ifp, uint8_t *buf, size_t len, return -1; } + if (!is_mcast) { + if (header->type == PIM_MSG_TYPE_CANDIDATE) { + if (PIM_DEBUG_PIM_PACKETS) + zlog_debug("%s %s: Candidate RP PIM message from %pPA on %s", + __FILE__, __func__, &sg.src, + ifp->name); + + return pim_crp_process(ifp, &sg, pim_msg, pim_msg_len); + } + + if (PIM_DEBUG_PIM_PACKETS) + zlog_debug( + "ignoring link traffic on BSR unicast socket"); + return -1; + } + switch (header->type) { case PIM_MSG_TYPE_HELLO: return pim_hello_recv(ifp, sg.src, pim_msg + PIM_MSG_HEADER_LEN, @@ -322,6 +337,13 @@ int pim_pim_packet(struct interface *ifp, uint8_t *buf, size_t len, return pim_bsm_process(ifp, &sg, pim_msg, pim_msg_len, no_fwd); break; + case PIM_MSG_TYPE_CANDIDATE: + /* return pim_crp_process(ifp, &sg, pim_msg, pim_msg_len); */ + if (PIM_DEBUG_PIM_PACKETS) + zlog_debug( + "ignoring Candidate-RP packet on multicast socket"); + return 0; + default: if (PIM_DEBUG_PIM_PACKETS) { zlog_debug( @@ -332,13 +354,9 @@ int pim_pim_packet(struct interface *ifp, uint8_t *buf, size_t len, } } -static void pim_sock_read_on(struct interface *ifp); - -static void pim_sock_read(struct event *t) +int pim_sock_read_helper(int fd, struct pim_instance *pim, bool is_mcast) { - struct interface *ifp, *orig_ifp; - struct pim_interface *pim_ifp; - int fd; + struct interface *ifp = NULL; struct sockaddr_storage from; struct sockaddr_storage to; socklen_t fromlen = sizeof(from); @@ -346,16 +364,9 @@ static void pim_sock_read(struct event *t) uint8_t buf[PIM_PIM_BUFSIZE_READ]; int len; ifindex_t ifindex = -1; - int result = -1; /* defaults to bad */ - static long long count = 0; - int cont = 1; - - orig_ifp = ifp = EVENT_ARG(t); - fd = EVENT_FD(t); - - pim_ifp = ifp->info; + int i; - while (cont) { + for (i = 0; i < router->packet_process; i++) { pim_sgaddr sg; len = pim_socket_recvfromto(fd, buf, sizeof(buf), &from, @@ -369,7 +380,7 @@ static void pim_sock_read(struct event *t) if (PIM_DEBUG_PIM_PACKETS) zlog_debug("Received errno: %d %s", errno, safe_strerror(errno)); - goto done; + return -1; } /* @@ -378,14 +389,21 @@ static void pim_sock_read(struct event *t) * the right ifindex, so just use it. We know * it's the right interface because we bind to it */ - ifp = if_lookup_by_index(ifindex, pim_ifp->pim->vrf->vrf_id); - if (!ifp || !ifp->info) { + if (pim != NULL) + ifp = if_lookup_by_index(ifindex, pim->vrf->vrf_id); + + /* + * unicast BSM pkts (C-RP) may arrive on non pim interfaces + * mcast pkts are only expected in pim interfaces + */ + if (!ifp || (is_mcast && !ifp->info)) { if (PIM_DEBUG_PIM_PACKETS) - zlog_debug( - "%s: Received incoming pim packet on interface(%s:%d) not yet configured for pim", - __func__, ifp ? ifp->name : "Unknown", - ifindex); - goto done; + zlog_debug("%s: Received incoming pim packet on interface(%s:%d)%s", + __func__, + ifp ? ifp->name : "Unknown", ifindex, + is_mcast ? " not yet configured for pim" + : ""); + return -1; } #if PIM_IPV == 4 sg.src = ((struct sockaddr_in *)&from)->sin_addr; @@ -395,27 +413,34 @@ static void pim_sock_read(struct event *t) sg.grp = ((struct sockaddr_in6 *)&to)->sin6_addr; #endif - int fail = pim_pim_packet(ifp, buf, len, sg); + int fail = pim_pim_packet(ifp, buf, len, sg, is_mcast); if (fail) { if (PIM_DEBUG_PIM_PACKETS) zlog_debug("%s: pim_pim_packet() return=%d", __func__, fail); - goto done; + return -1; } - - count++; - if (count % router->packet_process == 0) - cont = 0; } + return 0; +} + +static void pim_sock_read_on(struct interface *ifp); - result = 0; /* good */ +static void pim_sock_read(struct event *t) +{ + struct interface *ifp; + struct pim_interface *pim_ifp; + int fd; + + ifp = EVENT_ARG(t); + fd = EVENT_FD(t); -done: - pim_sock_read_on(orig_ifp); + pim_ifp = ifp->info; - if (result) { + if (pim_sock_read_helper(fd, pim_ifp->pim, true) == 0) ++pim_ifp->pim_ifstat_hello_recvfail; - } + + pim_sock_read_on(ifp); } static void pim_sock_read_on(struct interface *ifp) diff --git a/pimd/pim_pim.h b/pimd/pim_pim.h index 35e693013a..39b27bceda 100644 --- a/pimd/pim_pim.h +++ b/pimd/pim_pim.h @@ -10,6 +10,7 @@ #include <zebra.h> #include "if.h" +#include "pim_instance.h" #define PIM_PIM_BUFSIZE_READ (20000) #define PIM_PIM_BUFSIZE_WRITE (20000) @@ -42,10 +43,12 @@ void pim_hello_restart_now(struct interface *ifp); void pim_hello_restart_triggered(struct interface *ifp); int pim_pim_packet(struct interface *ifp, uint8_t *buf, size_t len, - pim_sgaddr sg); + pim_sgaddr sg, bool is_mcast); int pim_msg_send(int fd, pim_addr src, pim_addr dst, uint8_t *pim_msg, int pim_msg_size, struct interface *ifp); int pim_hello_send(struct interface *ifp, uint16_t holdtime); + +int pim_sock_read_helper(int fd, struct pim_instance *pim, bool is_mcast); #endif /* PIM_PIM_H */ diff --git a/pimd/pim_rp.c b/pimd/pim_rp.c index 0f8940bb16..a2ddc82164 100644 --- a/pimd/pim_rp.c +++ b/pimd/pim_rp.c @@ -1272,7 +1272,7 @@ void pim_rp_show_information(struct pim_instance *pim, struct prefix *range, if (!json) { table = ttable_dump(tt, "\n"); vty_out(vty, "%s\n", table); - XFREE(MTYPE_TMP, table); + XFREE(MTYPE_TMP_TTABLE, table); ttable_del(tt); } else { if (prev_rp_info && json_rp_rows) diff --git a/pimd/pim_sock.h b/pimd/pim_sock.h index 04ab864744..1cf01b31d6 100644 --- a/pimd/pim_sock.h +++ b/pimd/pim_sock.h @@ -26,6 +26,7 @@ struct pim_instance; int pim_socket_bind(int fd, struct interface *ifp); void pim_socket_ip_hdr(int fd); +int pim_setsockopt_packetinfo(int fd); int pim_socket_raw(int protocol); int pim_socket_mcast(int protocol, pim_addr ifaddr, struct interface *ifp, uint8_t loop); diff --git a/pimd/pim_tlv.c b/pimd/pim_tlv.c index c463fa227c..dd1bf2c059 100644 --- a/pimd/pim_tlv.c +++ b/pimd/pim_tlv.c @@ -19,12 +19,6 @@ #include "pim_iface.h" #include "pim_addr.h" -#if PIM_IPV == 4 -#define PIM_MSG_ADDRESS_FAMILY PIM_MSG_ADDRESS_FAMILY_IPV4 -#else -#define PIM_MSG_ADDRESS_FAMILY PIM_MSG_ADDRESS_FAMILY_IPV6 -#endif - uint8_t *pim_tlv_append_uint16(uint8_t *buf, const uint8_t *buf_pastend, uint16_t option_type, uint16_t option_value) { diff --git a/pimd/pim_vty.c b/pimd/pim_vty.c index 9cf4bb3e87..e5324dd873 100644 --- a/pimd/pim_vty.c +++ b/pimd/pim_vty.c @@ -182,6 +182,7 @@ int pim_global_config_write_worker(struct pim_instance *pim, struct vty *vty) } writes += pim_rp_config_write(pim, vty); + writes += pim_cand_config_write(pim, vty); if (pim->vrf->vrf_id == VRF_DEFAULT) { if (router->register_suppress_time diff --git a/pimd/pim_zebra.c b/pimd/pim_zebra.c index e25eafc28e..ce4d85a2c8 100644 --- a/pimd/pim_zebra.c +++ b/pimd/pim_zebra.c @@ -157,6 +157,8 @@ static int pim_zebra_if_address_add(ZAPI_CALLBACK_ARGS) pim_if_addr_add_all(ifp); } } + + pim_cand_addrs_changed(); return 0; } @@ -205,6 +207,8 @@ static int pim_zebra_if_address_del(ZAPI_CALLBACK_ARGS) } connected_free(&c); + + pim_cand_addrs_changed(); return 0; } diff --git a/pimd/subdir.am b/pimd/subdir.am index 1e787a3525..48f1e3b724 100644 --- a/pimd/subdir.am +++ b/pimd/subdir.am @@ -17,6 +17,7 @@ pim_common = \ pimd/pim_assert.c \ pimd/pim_bfd.c \ pimd/pim_bsm.c \ + pimd/pim_bsr_rpdb.c \ pimd/pim_cmd_common.c \ pimd/pim_errors.c \ pimd/pim_hello.c \ @@ -76,6 +77,7 @@ pimd_pimd_SOURCES = \ nodist_pimd_pimd_SOURCES = \ yang/frr-pim.yang.c \ yang/frr-pim-rp.yang.c \ + yang/frr-pim-candidate.yang.c \ yang/frr-gmp.yang.c \ # end @@ -89,6 +91,7 @@ pimd_pim6d_SOURCES = \ nodist_pimd_pim6d_SOURCES = \ yang/frr-pim.yang.c \ yang/frr-pim-rp.yang.c \ + yang/frr-pim-candidate.yang.c \ yang/frr-gmp.yang.c \ # end @@ -160,12 +163,12 @@ clippy_scan += \ # end pimd_pimd_CFLAGS = $(AM_CFLAGS) -DPIM_IPV=4 -pimd_pimd_LDADD = lib/libfrr.la $(LIBCAP) +pimd_pimd_LDADD = lib/libfrr.la $(LIBCAP) -lm if PIM6D sbin_PROGRAMS += pimd/pim6d pimd_pim6d_CFLAGS = $(AM_CFLAGS) -DPIM_IPV=6 -pimd_pim6d_LDADD = lib/libfrr.la $(LIBCAP) +pimd_pim6d_LDADD = lib/libfrr.la $(LIBCAP) -lm endif pimd_test_igmpv3_join_CFLAGS = $(AM_CFLAGS) -DPIM_IPV=4 diff --git a/tests/isisd/test_fuzz_isis_tlv_tests.h.gz b/tests/isisd/test_fuzz_isis_tlv_tests.h.gz Binary files differindex 195a7dd8c1..05e9f723a1 100644 --- a/tests/isisd/test_fuzz_isis_tlv_tests.h.gz +++ b/tests/isisd/test_fuzz_isis_tlv_tests.h.gz diff --git a/tests/lib/test_ttable.c b/tests/lib/test_ttable.c index 562ddf9d66..7ac0e3516b 100644 --- a/tests/lib/test_ttable.c +++ b/tests/lib/test_ttable.c @@ -20,7 +20,7 @@ int main(int argc, char **argv) assert(tt->nrows == 1); table = ttable_dump(tt, "\n"); fprintf(stdout, "%s\n", table); - XFREE(MTYPE_TMP, table); + XFREE(MTYPE_TMP_TTABLE, table); /* add new row with 1 column, assert that it is not added */ assert(ttable_add_row(tt, "%s", "Garbage") == NULL); @@ -28,7 +28,7 @@ int main(int argc, char **argv) assert(tt->nrows == 1); table = ttable_dump(tt, "\n"); fprintf(stdout, "%s\n", table); - XFREE(MTYPE_TMP, table); + XFREE(MTYPE_TMP_TTABLE, table); /* add new row, assert that it is added */ assert(ttable_add_row(tt, "%s|%s|%s", "a", "b", "c")); @@ -36,7 +36,7 @@ int main(int argc, char **argv) assert(tt->nrows == 2); table = ttable_dump(tt, "\n"); fprintf(stdout, "%s\n", table); - XFREE(MTYPE_TMP, table); + XFREE(MTYPE_TMP_TTABLE, table); /* add empty row, assert that it is added */ assert(ttable_add_row(tt, "||")); @@ -44,7 +44,7 @@ int main(int argc, char **argv) assert(tt->nrows == 3); table = ttable_dump(tt, "\n"); fprintf(stdout, "%s\n", table); - XFREE(MTYPE_TMP, table); + XFREE(MTYPE_TMP_TTABLE, table); /* delete 1st row, assert that it is removed */ ttable_del_row(tt, 0); @@ -52,7 +52,7 @@ int main(int argc, char **argv) assert(tt->nrows == 2); table = ttable_dump(tt, "\n"); fprintf(stdout, "%s\n", table); - XFREE(MTYPE_TMP, table); + XFREE(MTYPE_TMP_TTABLE, table); /* delete last row, assert that it is removed */ ttable_del_row(tt, 0); @@ -60,7 +60,7 @@ int main(int argc, char **argv) assert(tt->nrows == 1); table = ttable_dump(tt, "\n"); fprintf(stdout, "%s\n", table); - XFREE(MTYPE_TMP, table); + XFREE(MTYPE_TMP_TTABLE, table); /* delete the remaining row, check dumping an empty table */ ttable_del_row(tt, 0); @@ -68,7 +68,7 @@ int main(int argc, char **argv) assert(tt->nrows == 0); table = ttable_dump(tt, "\n"); fprintf(stdout, "%s\n", table); - XFREE(MTYPE_TMP, table); + XFREE(MTYPE_TMP_TTABLE, table); /* add new row */ ttable_add_row(tt, "%s|%s||%s|%9d", "slick", "black", "triple", 1337); @@ -76,7 +76,7 @@ int main(int argc, char **argv) assert(tt->nrows == 1); table = ttable_dump(tt, "\n"); fprintf(stdout, "%s\n", table); - XFREE(MTYPE_TMP, table); + XFREE(MTYPE_TMP_TTABLE, table); /* add bigger row */ ttable_add_row(tt, "%s|%s||%s|%s", @@ -86,7 +86,7 @@ int main(int argc, char **argv) assert(tt->nrows == 2); table = ttable_dump(tt, "\n"); fprintf(stdout, "%s\n", table); - XFREE(MTYPE_TMP, table); + XFREE(MTYPE_TMP_TTABLE, table); /* insert new row at beginning */ ttable_insert_row(tt, 0, "%s|%s||%d|%lf", "converting", "vegetarians", @@ -95,7 +95,7 @@ int main(int argc, char **argv) assert(tt->nrows == 3); table = ttable_dump(tt, "\n"); fprintf(stdout, "%s\n", table); - XFREE(MTYPE_TMP, table); + XFREE(MTYPE_TMP_TTABLE, table); /* insert new row at end */ ttable_insert_row(tt, tt->nrows - 1, "%s|%s||%d|%ld", "converting", @@ -104,7 +104,7 @@ int main(int argc, char **argv) assert(tt->nrows == 4); table = ttable_dump(tt, "\n"); fprintf(stdout, "%s\n", table); - XFREE(MTYPE_TMP, table); + XFREE(MTYPE_TMP_TTABLE, table); /* insert new row at middle */ ttable_insert_row(tt, 1, "%s|%s||%s|%ld", "she", "pioneer", "aki", 1l); @@ -112,7 +112,7 @@ int main(int argc, char **argv) assert(tt->nrows == 5); table = ttable_dump(tt, "\n"); fprintf(stdout, "%s\n", table); - XFREE(MTYPE_TMP, table); + XFREE(MTYPE_TMP_TTABLE, table); /* set alignment */ ttable_align(tt, 0, 1, 2, 2, LEFT); @@ -120,14 +120,14 @@ int main(int argc, char **argv) assert(tt->nrows == 5); table = ttable_dump(tt, "\n"); fprintf(stdout, "%s\n", table); - XFREE(MTYPE_TMP, table); + XFREE(MTYPE_TMP_TTABLE, table); ttable_align(tt, 0, 1, 5, 1, RIGHT); assert(tt->ncols == 5); assert(tt->nrows == 5); table = ttable_dump(tt, "\n"); fprintf(stdout, "%s\n", table); - XFREE(MTYPE_TMP, table); + XFREE(MTYPE_TMP_TTABLE, table); /* set padding */ ttable_pad(tt, 0, 1, 1, 1, RIGHT, 2); @@ -135,14 +135,14 @@ int main(int argc, char **argv) assert(tt->nrows == 5); table = ttable_dump(tt, "\n"); fprintf(stdout, "%s\n", table); - XFREE(MTYPE_TMP, table); + XFREE(MTYPE_TMP_TTABLE, table); ttable_pad(tt, 0, 0, 5, 4, LEFT, 2); assert(tt->ncols == 5); assert(tt->nrows == 5); table = ttable_dump(tt, "\n"); fprintf(stdout, "%s\n", table); - XFREE(MTYPE_TMP, table); + XFREE(MTYPE_TMP_TTABLE, table); /* restyle */ tt->style.cell.border.bottom_on = false; @@ -156,13 +156,13 @@ int main(int argc, char **argv) ttable_rowseps(tt, 1, TOP, true, '-'); table = ttable_dump(tt, "\n"); fprintf(stdout, "%s\n", table); - XFREE(MTYPE_TMP, table); + XFREE(MTYPE_TMP_TTABLE, table); /* column separators for leftmost column */ ttable_colseps(tt, 0, RIGHT, true, '|'); table = ttable_dump(tt, "\n"); fprintf(stdout, "%s\n", table); - XFREE(MTYPE_TMP, table); + XFREE(MTYPE_TMP_TTABLE, table); /* delete table */ ttable_del(tt); diff --git a/tests/topotests/all_protocol_startup/r1/ip_nht.ref b/tests/topotests/all_protocol_startup/r1/ip_nht.ref index 3592f29b54..2b4363b69e 100644 --- a/tests/topotests/all_protocol_startup/r1/ip_nht.ref +++ b/tests/topotests/all_protocol_startup/r1/ip_nht.ref @@ -1,35 +1,35 @@ VRF default: Resolve via default: on 1.1.1.1 - resolved via static + resolved via static, prefix 1.1.1.1/32 is directly connected, r1-eth1 (vrf default), weight 1 Client list: pbr(fd XX) 1.1.1.2 - resolved via static + resolved via static, prefix 1.1.1.2/32 is directly connected, r1-eth2 (vrf default), weight 1 Client list: pbr(fd XX) 1.1.1.3 - resolved via static + resolved via static, prefix 1.1.1.3/32 is directly connected, r1-eth3 (vrf default), weight 1 Client list: pbr(fd XX) 1.1.1.4 - resolved via static + resolved via static, prefix 1.1.1.4/32 is directly connected, r1-eth4 (vrf default), weight 1 Client list: pbr(fd XX) 1.1.1.5 - resolved via static + resolved via static, prefix 1.1.1.5/32 is directly connected, r1-eth5 (vrf default), weight 1 Client list: pbr(fd XX) 1.1.1.6 - resolved via static + resolved via static, prefix 1.1.1.6/32 is directly connected, r1-eth6 (vrf default), weight 1 Client list: pbr(fd XX) 1.1.1.7 - resolved via static + resolved via static, prefix 1.1.1.7/32 is directly connected, r1-eth7 (vrf default), weight 1 Client list: pbr(fd XX) 1.1.1.8 - resolved via static + resolved via static, prefix 1.1.1.8/32 is directly connected, r1-eth8 (vrf default), weight 1 Client list: pbr(fd XX) 2.2.2.1 @@ -54,19 +54,19 @@ VRF default: unresolved Client list: pbr(fd XX) 192.168.0.2 - resolved via connected + resolved via connected, prefix 192.168.0.0/24 is directly connected, r1-eth0 (vrf default), weight 1 Client list: static(fd XX) 192.168.0.4 - resolved via connected + resolved via connected, prefix 192.168.0.0/24 is directly connected, r1-eth0 (vrf default), weight 1 Client list: static(fd XX) 192.168.7.10 - resolved via connected + resolved via connected, prefix 192.168.7.0/26 is directly connected, r1-eth7 (vrf default), weight 1 Client list: bgp(fd XX) 192.168.7.20(Connected) - resolved via connected + resolved via connected, prefix 192.168.7.0/26 is directly connected, r1-eth7 (vrf default), weight 1 Client list: bgp(fd XX) 192.168.161.4 diff --git a/tests/topotests/all_protocol_startup/r1/ipv6_nht.ref b/tests/topotests/all_protocol_startup/r1/ipv6_nht.ref index 7b71761185..3f03d6fe93 100644 --- a/tests/topotests/all_protocol_startup/r1/ipv6_nht.ref +++ b/tests/topotests/all_protocol_startup/r1/ipv6_nht.ref @@ -1,15 +1,15 @@ VRF default: Resolve via default: on fc00::2 - resolved via connected + resolved via connected, prefix fc00::/64 is directly connected, r1-eth0 (vrf default), weight 1 Client list: static(fd XX) fc00:0:0:8::1000 - resolved via connected + resolved via connected, prefix fc00:0:0:8::/64 is directly connected, r1-eth8 (vrf default), weight 1 Client list: bgp(fd XX) fc00:0:0:8::2000(Connected) - resolved via connected + resolved via connected, prefix fc00:0:0:8::/64 is directly connected, r1-eth8 (vrf default), weight 1 Client list: bgp(fd XX) diff --git a/tests/topotests/bgp_dual_as/__init__.py b/tests/topotests/bgp_dual_as/__init__.py new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/tests/topotests/bgp_dual_as/__init__.py diff --git a/tests/topotests/bgp_dual_as/r1/frr.conf b/tests/topotests/bgp_dual_as/r1/frr.conf new file mode 100644 index 0000000000..9dcfe05d69 --- /dev/null +++ b/tests/topotests/bgp_dual_as/r1/frr.conf @@ -0,0 +1,11 @@ +! +interface r1-eth0 + ip address 10.0.0.1/24 +! +router bgp 65000 + no bgp ebgp-requires-policy + neighbor 10.0.0.2 remote-as 65002 + neighbor 10.0.0.2 local-as 65001 no-prepend replace-as dual-as + neighbor 10.0.0.2 timers 3 10 + neighbor 10.0.0.2 timers connect 1 +! diff --git a/tests/topotests/bgp_dual_as/r2/frr.conf b/tests/topotests/bgp_dual_as/r2/frr.conf new file mode 100644 index 0000000000..cf5731b601 --- /dev/null +++ b/tests/topotests/bgp_dual_as/r2/frr.conf @@ -0,0 +1,10 @@ +! +interface r2-eth0 + ip address 10.0.0.2/24 +! +router bgp 65002 + no bgp ebgp-requires-policy + neighbor 10.0.0.1 remote-as 65001 + neighbor 10.0.0.1 timers 3 10 + neighbor 10.0.0.1 timers connect 1 +! diff --git a/tests/topotests/bgp_dual_as/test_bgp_dual_as.py b/tests/topotests/bgp_dual_as/test_bgp_dual_as.py new file mode 100644 index 0000000000..fcac9c94ec --- /dev/null +++ b/tests/topotests/bgp_dual_as/test_bgp_dual_as.py @@ -0,0 +1,124 @@ +#!/usr/bin/env python +# SPDX-License-Identifier: ISC + +# +# Copyright (c) 2024 by +# Donatas Abraitis <donatas@opensourcerouting.org> +# + +import os +import sys +import json +import pytest +import functools + +CWD = os.path.dirname(os.path.realpath(__file__)) +sys.path.append(os.path.join(CWD, "../")) + +# pylint: disable=C0413 +from lib import topotest +from lib.topogen import Topogen, get_topogen +from lib.common_config import step + +pytestmark = [pytest.mark.bgpd] + + +def build_topo(tgen): + r1 = tgen.add_router("r1") + r2 = tgen.add_router("r2") + + switch = tgen.add_switch("s1") + switch.add_link(r1) + switch.add_link(r2) + + +def setup_module(mod): + tgen = Topogen(build_topo, mod.__name__) + tgen.start_topology() + + for _, (rname, router) in enumerate(tgen.routers().items(), 1): + router.load_frr_config(os.path.join(CWD, "{}/frr.conf".format(rname))) + + tgen.start_router() + + +def teardown_module(mod): + tgen = get_topogen() + tgen.stop_topology() + + +def test_bgp_dual_as(): + tgen = get_topogen() + + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + r1 = tgen.gears["r1"] + r2 = tgen.gears["r2"] + + def _bgp_converge_65001(): + output = json.loads(r1.vtysh_cmd("show bgp ipv4 summary json")) + expected = { + "ipv4Unicast": { + "as": 65000, + "peers": { + "10.0.0.2": { + "hostname": "r2", + "remoteAs": 65002, + "localAs": 65001, + "state": "Established", + "peerState": "OK", + } + }, + } + } + return topotest.json_cmp(output, expected) + + test_func = functools.partial(_bgp_converge_65001) + _, result = topotest.run_and_expect(test_func, None, count=30, wait=1) + assert result is None, "Can't establish BGP session using local-as AS 65001" + + step("Change remote-as from r2 to use global AS 65000") + r2.vtysh_cmd( + """ + configure terminal + router bgp + neighbor 10.0.0.1 remote-as 65000 + """ + ) + + def _bgp_converge_65000(): + output = json.loads(r1.vtysh_cmd("show bgp ipv4 summary json")) + expected = { + "ipv4Unicast": { + "as": 65000, + "peers": { + "10.0.0.2": { + "hostname": "r2", + "remoteAs": 65002, + "localAs": 65000, + "state": "Established", + "peerState": "OK", + } + }, + } + } + return topotest.json_cmp(output, expected) + + test_func = functools.partial(_bgp_converge_65000) + _, result = topotest.run_and_expect(test_func, None, count=30, wait=1) + assert result is None, "Can't establish BGP session using global AS 65000" + + +def test_memory_leak(): + "Run the memory leak test and report results." + tgen = get_topogen() + if not tgen.is_memleak_enabled(): + pytest.skip("Memory leak test/report is disabled") + + tgen.report_memory_leaks() + + +if __name__ == "__main__": + args = ["-s"] + sys.argv[1:] + sys.exit(pytest.main(args)) diff --git a/tests/topotests/bgp_l3vpn_to_bgp_vrf/test_bgp_l3vpn_to_bgp_vrf.py b/tests/topotests/bgp_l3vpn_to_bgp_vrf/test_bgp_l3vpn_to_bgp_vrf.py index 60d959fd1e..5347604250 100755 --- a/tests/topotests/bgp_l3vpn_to_bgp_vrf/test_bgp_l3vpn_to_bgp_vrf.py +++ b/tests/topotests/bgp_l3vpn_to_bgp_vrf/test_bgp_l3vpn_to_bgp_vrf.py @@ -84,6 +84,9 @@ def test_check_scale_up(): CliOnFail = None # For debugging, uncomment the next line # CliOnFail = 'tgen.mininet_cli' + # Skip test on 32bit platforms (limited memory) + if sys.maxsize <= 2**32: + pytest.skip("skipped because of limited memory on 32bit platforms") CheckFunc = "ltemplateVersionCheck('4.1', iproute2='4.9')" # uncomment next line to start cli *before* script is run # CheckFunc = 'ltemplateVersionCheck(\'4.1\', cli=True, iproute2=\'4.9\')' @@ -94,6 +97,9 @@ def test_check_scale_down(): CliOnFail = None # For debugging, uncomment the next line # CliOnFail = 'tgen.mininet_cli' + # Skip test on 32bit platforms (limited memory) + if sys.maxsize <= 2**32: + pytest.skip("skipped because of limited memory on 32bit platforms") CheckFunc = "ltemplateVersionCheck('4.1', iproute2='4.9')" # uncomment next line to start cli *before* script is run # CheckFunc = 'ltemplateVersionCheck(\'4.1\', cli=True, iproute2=\'4.9\')' diff --git a/tests/topotests/bgp_peer_group_solo/__init__.py b/tests/topotests/bgp_peer_group_solo/__init__.py new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/tests/topotests/bgp_peer_group_solo/__init__.py diff --git a/tests/topotests/bgp_peer_group_solo/r1/frr.conf b/tests/topotests/bgp_peer_group_solo/r1/frr.conf new file mode 100644 index 0000000000..53959aa134 --- /dev/null +++ b/tests/topotests/bgp_peer_group_solo/r1/frr.conf @@ -0,0 +1,21 @@ +! +int r1-eth0 + ip address 192.168.1.1/24 +! +int r1-eth1 + ip address 192.168.14.1/24 +! +router bgp 65001 + no bgp ebgp-requires-policy + no bgp network import-check + neighbor pg peer-group + neighbor pg remote-as external + neighbor pg solo + neighbor pg timers 1 3 + neighbor pg timers connect 1 + neighbor 192.168.1.2 peer-group pg + neighbor 192.168.1.3 peer-group pg + address-family ipv4 unicast + network 10.0.0.1/32 + exit-address-family +! diff --git a/tests/topotests/bgp_peer_group_solo/r2/frr.conf b/tests/topotests/bgp_peer_group_solo/r2/frr.conf new file mode 100644 index 0000000000..ba99827a47 --- /dev/null +++ b/tests/topotests/bgp_peer_group_solo/r2/frr.conf @@ -0,0 +1,10 @@ +! +int r2-eth0 + ip address 192.168.1.2/24 +! +router bgp 65002 + no bgp ebgp-requires-policy + neighbor 192.168.1.1 remote-as external + neighbor 192.168.1.1 timers 1 3 + neighbor 192.168.1.1 timers connect 1 +! diff --git a/tests/topotests/bgp_peer_group_solo/r3/frr.conf b/tests/topotests/bgp_peer_group_solo/r3/frr.conf new file mode 100644 index 0000000000..ed06170bf2 --- /dev/null +++ b/tests/topotests/bgp_peer_group_solo/r3/frr.conf @@ -0,0 +1,10 @@ +! +int r3-eth0 + ip address 192.168.1.3/24 +! +router bgp 65003 + no bgp ebgp-requires-policy + neighbor 192.168.1.1 remote-as external + neighbor 192.168.1.1 timers 1 3 + neighbor 192.168.1.1 timers connect 1 +! diff --git a/tests/topotests/bgp_peer_group_solo/test_bgp_peer_group_solo.py b/tests/topotests/bgp_peer_group_solo/test_bgp_peer_group_solo.py new file mode 100644 index 0000000000..cdbc1e02a7 --- /dev/null +++ b/tests/topotests/bgp_peer_group_solo/test_bgp_peer_group_solo.py @@ -0,0 +1,102 @@ +#!/usr/bin/env python +# SPDX-License-Identifier: ISC + +# Copyright (c) 2024 by +# Donatas Abraitis <donatas@opensourcerouting.org> +# + +import os +import re +import sys +import json +import pytest +import functools + +CWD = os.path.dirname(os.path.realpath(__file__)) +sys.path.append(os.path.join(CWD, "../")) + +# pylint: disable=C0413 +from lib import topotest +from lib.topogen import Topogen, get_topogen + +pytestmark = [pytest.mark.bgpd] + + +def setup_module(mod): + topodef = {"s1": ("r1", "r2", "r3")} + tgen = Topogen(topodef, mod.__name__) + tgen.start_topology() + + router_list = tgen.routers() + + for _, (rname, router) in enumerate(router_list.items(), 1): + router.load_frr_config(os.path.join(CWD, "{}/frr.conf".format(rname))) + + tgen.start_router() + + +def teardown_module(mod): + tgen = get_topogen() + tgen.stop_topology() + + +def test_bgp_remote_as_auto(): + tgen = get_topogen() + + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + r1 = tgen.gears["r1"] + + def _bgp_converge(): + output = json.loads(r1.vtysh_cmd("show bgp ipv4 unicast summary json")) + expected = { + "peers": { + "192.168.1.2": { + "remoteAs": 65002, + "state": "Established", + "peerState": "OK", + }, + "192.168.1.3": { + "remoteAs": 65003, + "state": "Established", + "peerState": "OK", + }, + }, + "totalPeers": 2, + } + + return topotest.json_cmp(output, expected) + + test_func = functools.partial( + _bgp_converge, + ) + _, result = topotest.run_and_expect(test_func, None, count=30, wait=1) + assert result is None, "Can't converge initial state" + + def _bgp_update_groups(): + actual = [] + output = json.loads(r1.vtysh_cmd("show bgp ipv4 unicast update-groups json")) + expected = [ + {"subGroup": [{"adjListCount": 1, "peers": ["192.168.1.2"]}]}, + {"subGroup": [{"adjListCount": 1, "peers": ["192.168.1.3"]}]}, + ] + + # update-group's number can be random and it's not deterministic, + # so we need to normalize the data a bit before checking. + # We care here about the `peers` array only actually. + for updgrp in output["default"].keys(): + actual.append(output["default"][updgrp]) + + return topotest.json_cmp(actual, expected) + + test_func = functools.partial( + _bgp_update_groups, + ) + _, result = topotest.run_and_expect(test_func, None, count=30, wait=1) + assert result is None, "Can't see separate update-groups" + + +if __name__ == "__main__": + args = ["-s"] + sys.argv[1:] + sys.exit(pytest.main(args)) diff --git a/tests/topotests/bgp_set_aspath_exclude/test_bgp_set_aspath_exclude.py b/tests/topotests/bgp_set_aspath_exclude/test_bgp_set_aspath_exclude.py index 63f1719e1d..a5232ad694 100644 --- a/tests/topotests/bgp_set_aspath_exclude/test_bgp_set_aspath_exclude.py +++ b/tests/topotests/bgp_set_aspath_exclude/test_bgp_set_aspath_exclude.py @@ -108,7 +108,7 @@ def test_bgp_set_aspath_exclude(): pytest.skip(tgen.errors) test_func = functools.partial(bgp_converge, tgen.gears["r1"], expected_1) - _, result = topotest.run_and_expect(test_func, None, count=30, wait=0.5) + _, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5) assert result is None, "Failed overriding incoming AS-PATH with route-map" @@ -128,7 +128,6 @@ def test_bgp_set_aspath_exclude_access_list(): conf bgp as-path access-list FIRST permit ^65 route-map r2 permit 6 - no set as-path exclude as-path-access-list SECOND set as-path exclude as-path-access-list FIRST """ ) @@ -140,21 +139,20 @@ clear bgp * ) test_func = functools.partial(bgp_converge, tgen.gears["r1"], expected_2) - _, result = topotest.run_and_expect(test_func, None, count=30, wait=0.5) + _, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5) assert result is None, "Failed change of exclude rule in route map" r1.vtysh_cmd( """ conf route-map r2 permit 6 - no set as-path exclude as-path-access-list FIRST set as-path exclude as-path-access-list SECOND """ ) # tgen.mininet_cli() test_func = functools.partial(bgp_converge, tgen.gears["r1"], expected_1) - _, result = topotest.run_and_expect(test_func, None, count=30, wait=0.5) + _, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5) assert result is None, "Failed reverting exclude rule in route map" @@ -182,7 +180,7 @@ clear bgp * ) test_func = functools.partial(bgp_converge, tgen.gears["r1"], expected_3) - _, result = topotest.run_and_expect(test_func, None, count=30, wait=0.5) + _, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5) assert result is None, "Failed to removing current accesslist" @@ -200,7 +198,7 @@ clear bgp * ) test_func = functools.partial(bgp_converge, tgen.gears["r1"], expected_4) - _, result = topotest.run_and_expect(test_func, None, count=30, wait=0.5) + _, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5) assert result is None, "Failed to renegotiate with peers 2" @@ -208,7 +206,7 @@ clear bgp * """ conf route-map r2 permit 6 - no set as-path exclude as-path-access-list SECOND + set as-path exclude 65555 """ ) @@ -219,7 +217,26 @@ clear bgp * ) test_func = functools.partial(bgp_converge, tgen.gears["r1"], expected_3) - _, result = topotest.run_and_expect(test_func, None, count=30, wait=0.5) + _, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5) + + assert result is None, "Failed to renegotiate with peers 2" + + r1.vtysh_cmd( + """ +conf + route-map r2 permit 6 + set as-path exclude as-path-access-list NON-EXISTING + """ + ) + + r1.vtysh_cmd( + """ +clear bgp * + """ + ) + + test_func = functools.partial(bgp_converge, tgen.gears["r1"], expected_3) + _, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5) assert result is None, "Failed to renegotiate with peers 2" diff --git a/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/test_bgp_srv6l3vpn_to_bgp_vrf3.py b/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/test_bgp_srv6l3vpn_to_bgp_vrf3.py index 2400cd2853..bba0061858 100644 --- a/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/test_bgp_srv6l3vpn_to_bgp_vrf3.py +++ b/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/test_bgp_srv6l3vpn_to_bgp_vrf3.py @@ -21,7 +21,7 @@ from lib import topotest from lib.topogen import Topogen, TopoRouter, get_topogen from lib.topolog import logger from lib.common_config import required_linux_kernel_version -from lib.checkping import check_ping, check_ping +from lib.checkping import check_ping pytestmark = [pytest.mark.bgpd] diff --git a/tests/topotests/bgp_vpn_import_nexthop_default_vrf/__init__.py b/tests/topotests/bgp_vpn_import_nexthop_default_vrf/__init__.py new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/tests/topotests/bgp_vpn_import_nexthop_default_vrf/__init__.py diff --git a/tests/topotests/bgp_vpn_import_nexthop_default_vrf/r1/frr.conf b/tests/topotests/bgp_vpn_import_nexthop_default_vrf/r1/frr.conf new file mode 100644 index 0000000000..2a2288cf05 --- /dev/null +++ b/tests/topotests/bgp_vpn_import_nexthop_default_vrf/r1/frr.conf @@ -0,0 +1,29 @@ +! +interface r1-eth0 + ip address 192.168.179.4/24 +exit +! +router bgp 65001 + bgp router-id 192.168.179.4 + no bgp ebgp-requires-policy + no bgp network import-check + neighbor 192.168.179.5 remote-as auto +! + address-family ipv4 vpn + neighbor 192.168.179.5 activate + neighbor 192.168.179.5 next-hop-self + exit-address-family +! +router bgp 65001 vrf CUSTOMER-A + bgp router-id 192.168.0.1 + no bgp ebgp-requires-policy + no bgp network import-check +! + address-family ipv4 unicast + label vpn export auto + rd vpn export 100:1 + rt vpn both 100:1 + export vpn + import vpn + exit-address-family +! diff --git a/tests/topotests/bgp_vpn_import_nexthop_default_vrf/r2/frr.conf b/tests/topotests/bgp_vpn_import_nexthop_default_vrf/r2/frr.conf new file mode 100644 index 0000000000..6fe07f5622 --- /dev/null +++ b/tests/topotests/bgp_vpn_import_nexthop_default_vrf/r2/frr.conf @@ -0,0 +1,34 @@ +! +interface r2-eth0 + ip address 192.168.179.5/24 +exit +! +interface r2-eth1 + ip address 192.168.2.2/24 +exit +! +router bgp 65002 + bgp router-id 192.168.179.5 + no bgp ebgp-requires-policy + no bgp network import-check + neighbor 192.168.179.4 remote-as auto +! + address-family ipv4 vpn + neighbor 192.168.179.4 activate + neighbor 192.168.179.4 next-hop-self + exit-address-family +! +router bgp 65002 vrf CUSTOMER-A + bgp router-id 192.168.0.2 + no bgp ebgp-requires-policy + no bgp network import-check +! + address-family ipv4 unicast + redistribute connected + label vpn export auto + rd vpn export 100:1 + rt vpn both 100:1 + export vpn + import vpn + exit-address-family +! diff --git a/tests/topotests/bgp_vpn_import_nexthop_default_vrf/test_bgp_vpn_import_nexthop_default_vrf.py b/tests/topotests/bgp_vpn_import_nexthop_default_vrf/test_bgp_vpn_import_nexthop_default_vrf.py new file mode 100644 index 0000000000..ccea88b211 --- /dev/null +++ b/tests/topotests/bgp_vpn_import_nexthop_default_vrf/test_bgp_vpn_import_nexthop_default_vrf.py @@ -0,0 +1,145 @@ +#!/usr/bin/env python +# SPDX-License-Identifier: ISC + +# +# Copyright (c) 2024 by +# Donatas Abraitis <donatas@opensourcerouting.org> +# + +import os +import sys +import json +import pytest +import functools + +CWD = os.path.dirname(os.path.realpath(__file__)) +sys.path.append(os.path.join(CWD, "../")) + +# pylint: disable=C0413 +from lib import topotest +from lib.topogen import Topogen, TopoRouter, get_topogen + +pytestmark = [pytest.mark.bgpd] + + +def build_topo(tgen): + tgen.add_router("r1") + tgen.add_router("r2") + + switch = tgen.add_switch("s1") + switch.add_link(tgen.gears["r1"]) + switch.add_link(tgen.gears["r2"]) + + switch = tgen.add_switch("s2") + switch.add_link(tgen.gears["r1"]) + + switch = tgen.add_switch("s3") + switch.add_link(tgen.gears["r2"]) + + +def setup_module(mod): + tgen = Topogen(build_topo, mod.__name__) + tgen.start_topology() + + r1 = tgen.gears["r1"] + r2 = tgen.gears["r2"] + + r1.run("ip link add CUSTOMER-A type vrf table 1001") + r1.run("ip link set up dev CUSTOMER-A") + r1.run("ip link set r1-eth1 master CUSTOMER-A") + + r2.run("ip link add CUSTOMER-A type vrf table 1001") + r2.run("ip link set up dev CUSTOMER-A") + r2.run("ip link set r2-eth1 master CUSTOMER-A") + + router_list = tgen.routers() + + for _, (rname, router) in enumerate(router_list.items(), 1): + router.load_frr_config(os.path.join(CWD, "{}/frr.conf".format(rname))) + + tgen.start_router() + + +def teardown_module(mod): + tgen = get_topogen() + tgen.stop_topology() + + +def test_issue_12502(): + tgen = get_topogen() + + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + r1 = tgen.gears["r1"] + + def _bgp_converge(): + output = json.loads(r1.vtysh_cmd("show bgp vrf CUSTOMER-A ipv4 unicast json")) + expected = { + "routes": { + "192.168.2.0/24": [ + { + "valid": True, + "pathFrom": "external", + "prefix": "192.168.2.0", + "prefixLen": 24, + "path": "65002", + "nhVrfName": "default", + "nexthops": [ + { + "ip": "192.168.179.5", + "hostname": "r1", + "afi": "ipv4", + "used": True, + } + ], + } + ] + } + } + return topotest.json_cmp(output, expected) + + test_func = functools.partial(_bgp_converge) + _, result = topotest.run_and_expect(test_func, None, count=30, wait=1) + assert result is None, "Failed to see 192.168.2.0/24 with a valid next-hop" + + def _vrf_route_imported_to_zebra(): + output = json.loads( + r1.vtysh_cmd("show ip route vrf CUSTOMER-A 192.168.2.0/24 json") + ) + expected = { + "192.168.2.0/24": [ + { + "prefix": "192.168.2.0/24", + "protocol": "bgp", + "vrfName": "CUSTOMER-A", + "selected": True, + "installed": True, + "table": 1001, + "internalNextHopNum": 1, + "internalNextHopActiveNum": 1, + "nexthops": [ + { + "fib": True, + "ip": "192.168.179.5", + "afi": "ipv4", + "interfaceName": "r1-eth0", + "vrf": "default", + "active": True, + } + ], + } + ] + } + return topotest.json_cmp(output, expected) + + test_func = functools.partial(_vrf_route_imported_to_zebra) + _, result = topotest.run_and_expect(test_func, None, count=30, wait=1) + assert ( + result is None + ), "Failed to see 192.168.2.0/24 to be imported into default VRF (Zebra)" + + +if __name__ == "__main__": + args = ["-s"] + sys.argv[1:] + sys.exit(pytest.main(args)) diff --git a/tests/topotests/bgp_vpnv4_noretain/r1/ipv4_vrf_all_routes_init.json b/tests/topotests/bgp_vpnv4_noretain/r1/ipv4_vrf_all_routes_init.json index 2769c6eb3f..cb072e3c60 100644 --- a/tests/topotests/bgp_vpnv4_noretain/r1/ipv4_vrf_all_routes_init.json +++ b/tests/topotests/bgp_vpnv4_noretain/r1/ipv4_vrf_all_routes_init.json @@ -7,6 +7,9 @@ "routes": { "10.204.0.0/24": [ { + "valid": true, + "bestpath": true, + "selectionReason": "First path received", "pathFrom": "external", "prefix": "10.204.0.0", "prefixLen": 24, @@ -63,6 +66,9 @@ ], "10.201.0.0/24": [ { + "valid": true, + "bestpath": true, + "selectionReason": "First path received", "pathFrom": "external", "prefix": "10.201.0.0", "prefixLen": 24, diff --git a/tests/topotests/bgp_vpnv4_noretain/r1/ipv4_vrf_all_routes_plus_r1_vrf1.json b/tests/topotests/bgp_vpnv4_noretain/r1/ipv4_vrf_all_routes_plus_r1_vrf1.json index 488dc4aab9..43100aad2d 100644 --- a/tests/topotests/bgp_vpnv4_noretain/r1/ipv4_vrf_all_routes_plus_r1_vrf1.json +++ b/tests/topotests/bgp_vpnv4_noretain/r1/ipv4_vrf_all_routes_plus_r1_vrf1.json @@ -7,6 +7,9 @@ "routes": { "10.204.0.0/24": [ { + "valid": true, + "bestpath": true, + "selectionReason": "First path received", "pathFrom": "external", "prefix": "10.204.0.0", "prefixLen": 24, @@ -63,6 +66,9 @@ ], "10.201.0.0/24": [ { + "valid": true, + "bestpath": true, + "selectionReason": "First path received", "pathFrom": "external", "prefix": "10.201.0.0", "prefixLen": 24, diff --git a/tests/topotests/bgp_vpnv4_noretain/r1/ipv4_vrf_all_routes_plus_r2_vrf2.json b/tests/topotests/bgp_vpnv4_noretain/r1/ipv4_vrf_all_routes_plus_r2_vrf2.json index b751756fce..b11b16bca2 100644 --- a/tests/topotests/bgp_vpnv4_noretain/r1/ipv4_vrf_all_routes_plus_r2_vrf2.json +++ b/tests/topotests/bgp_vpnv4_noretain/r1/ipv4_vrf_all_routes_plus_r2_vrf2.json @@ -7,6 +7,9 @@ "routes": { "10.204.0.0/24": [ { + "valid": true, + "bestpath": true, + "selectionReason": "First path received", "pathFrom": "external", "prefix": "10.204.0.0", "prefixLen": 24, @@ -63,6 +66,9 @@ ], "10.201.0.0/24": [ { + "valid": true, + "bestpath": true, + "selectionReason": "First path received", "pathFrom": "external", "prefix": "10.201.0.0", "prefixLen": 24, @@ -161,6 +167,9 @@ "routes": { "10.202.0.0/24": [ { + "valid": true, + "bestpath": true, + "selectionReason": "First path received", "pathFrom": "external", "prefix": "10.202.0.0", "prefixLen": 24, diff --git a/tests/topotests/bgp_vpnv4_noretain/r1/ipv4_vrf_all_routes_plus_r2_vrf3.json b/tests/topotests/bgp_vpnv4_noretain/r1/ipv4_vrf_all_routes_plus_r2_vrf3.json index 49d4066e19..643aae401d 100644 --- a/tests/topotests/bgp_vpnv4_noretain/r1/ipv4_vrf_all_routes_plus_r2_vrf3.json +++ b/tests/topotests/bgp_vpnv4_noretain/r1/ipv4_vrf_all_routes_plus_r2_vrf3.json @@ -7,6 +7,9 @@ "routes": { "10.204.0.0/24": [ { + "valid": true, + "bestpath": true, + "selectionReason": "First path received", "pathFrom": "external", "prefix": "10.204.0.0", "prefixLen": 24, @@ -63,6 +66,9 @@ ], "10.201.0.0/24": [ { + "valid": true, + "bestpath": true, + "selectionReason": "First path received", "pathFrom": "external", "prefix": "10.201.0.0", "prefixLen": 24, @@ -161,6 +167,9 @@ "routes": { "10.203.0.0/24": [ { + "valid": true, + "bestpath": true, + "selectionReason": "First path received", "pathFrom": "external", "prefix": "10.203.0.0", "prefixLen": 24, diff --git a/tests/topotests/conftest.py b/tests/topotests/conftest.py index a2315138cc..44536e945e 100755 --- a/tests/topotests/conftest.py +++ b/tests/topotests/conftest.py @@ -18,12 +18,11 @@ from pathlib import Path import lib.fixtures import pytest from lib.common_config import generate_support_bundle -from lib.micronet_compat import Mininet from lib.topogen import diagnose_env, get_topogen from lib.topolog import get_test_logdir, logger from lib.topotest import json_cmp_result from munet import cli -from munet.base import Commander, proc_error +from munet.base import BaseMunet, Commander, proc_error from munet.cleanup import cleanup_current, cleanup_previous from munet.config import ConfigOptionsProxy from munet.testing.util import pause_test @@ -32,7 +31,7 @@ from lib import topolog, topotest try: # Used by munet native tests - from munet.testing.fixtures import event_loop, unet # pylint: disable=all # noqa + from munet.testing.fixtures import unet # pylint: disable=all # noqa @pytest.fixture(scope="module") def rundir_module(pytestconfig): @@ -86,7 +85,7 @@ def pytest_addoption(parser): parser.addoption( "--cli-on-error", action="store_true", - help="Mininet cli on test failure", + help="Munet cli on test failure", ) parser.addoption( @@ -711,7 +710,7 @@ def pytest_runtest_makereport(item, call): wait_for_procs = [] # Really would like something better than using this global here. # Not all tests use topogen though so get_topogen() won't work. - for node in Mininet.g_mnet_inst.hosts.values(): + for node in BaseMunet.g_unet.hosts.values(): pause = True if is_tmux: @@ -720,13 +719,15 @@ def pytest_runtest_makereport(item, call): if not isatty else None ) - Commander.tmux_wait_gen += 1 - wait_for_channels.append(channel) + # If we don't have a tty to pause on pause for tmux windows to exit + if channel is not None: + Commander.tmux_wait_gen += 1 + wait_for_channels.append(channel) pane_info = node.run_in_window( error_cmd, new_window=win_info is None, - background=True, + background=not isatty, title="{} ({})".format(title, node.name), name=title, tmux_target=win_info, @@ -737,9 +738,13 @@ def pytest_runtest_makereport(item, call): win_info = pane_info elif is_xterm: assert isinstance(pane_info, subprocess.Popen) - wait_for_procs.append(pane_info) + # If we don't have a tty to pause on pause for xterm procs to exit + if not isatty: + wait_for_procs.append(pane_info) # Now wait on any channels + if wait_for_channels or wait_for_procs: + logger.info("Pausing for error command windows to exit") for channel in wait_for_channels: logger.debug("Waiting on TMUX channel %s", channel) commander.cmd_raises([commander.get_exec_path("tmux"), "wait", channel]) @@ -752,10 +757,10 @@ def pytest_runtest_makereport(item, call): if error and item.config.option.cli_on_error: # Really would like something better than using this global here. # Not all tests use topogen though so get_topogen() won't work. - if Mininet.g_mnet_inst: - cli.cli(Mininet.g_mnet_inst, title=title, background=False) + if BaseMunet.g_unet: + cli.cli(BaseMunet.g_unet, title=title, background=False) else: - logger.error("Could not launch CLI b/c no mininet exists yet") + logger.error("Could not launch CLI b/c no munet exists yet") if pause and isatty: pause_test() @@ -800,9 +805,20 @@ done""" def pytest_terminal_summary(terminalreporter, exitstatus, config): # Only run if we are the top level test runner is_xdist_worker = "PYTEST_XDIST_WORKER" in os.environ + is_xdist = os.environ["PYTEST_XDIST_MODE"] != "no" if config.option.cov_topotest and not is_xdist_worker: coverage_finish(terminalreporter, config) + if ( + is_xdist + and not is_xdist_worker + and ( + bool(config.getoption("--pause")) + or bool(config.getoption("--pause-at-end")) + ) + ): + pause_test("pause-at-end") + # # Add common fixtures available to all tests as parameters diff --git a/tests/topotests/dump_of_bgp/r1/frr.conf b/tests/topotests/dump_of_bgp/r1/frr.conf new file mode 100644 index 0000000000..d677e2007e --- /dev/null +++ b/tests/topotests/dump_of_bgp/r1/frr.conf @@ -0,0 +1,12 @@ +! +int r1-eth0 + ip address 192.168.1.1/24 +! +router bgp 65001 + no bgp ebgp-requires-policy + neighbor 192.168.1.2 remote-as external + neighbor 192.168.1.2 timers 1 3 + neighbor 192.168.1.2 timers connect 1 + neighbor 192.168.1.2 capability dynamic + ! +! diff --git a/tests/topotests/dump_of_bgp/r2/frr.conf b/tests/topotests/dump_of_bgp/r2/frr.conf new file mode 100644 index 0000000000..d68d13d075 --- /dev/null +++ b/tests/topotests/dump_of_bgp/r2/frr.conf @@ -0,0 +1,17 @@ +! +int lo + ip address 10.10.10.10/32 +! +int r2-eth0 + ip address 192.168.1.2/24 +! +router bgp 65002 + no bgp ebgp-requires-policy + neighbor 192.168.1.1 remote-as external + neighbor 192.168.1.1 timers 1 3 + neighbor 192.168.1.1 timers connect 1 + ! + address-family ipv4 unicast + redistribute connected + exit-address-family +! diff --git a/tests/topotests/dump_of_bgp/test_dump_of_bgp.py b/tests/topotests/dump_of_bgp/test_dump_of_bgp.py new file mode 100644 index 0000000000..1359c57f31 --- /dev/null +++ b/tests/topotests/dump_of_bgp/test_dump_of_bgp.py @@ -0,0 +1,102 @@ +#!/usr/bin/env python +# SPDX-License-Identifier: ISC + +# Copyright (c) 2024 by Nvidia Corporation +# Donald Sharp <sharpd@nvidia.com> + +import os +import sys +import json +import pytest +import functools +from lib.topolog import logger + +pytestmark = [pytest.mark.bgpd] + +CWD = os.path.dirname(os.path.realpath(__file__)) +sys.path.append(os.path.join(CWD, "../")) + +# pylint: disable=C0413 +from lib import topotest +from lib.topogen import Topogen, get_topogen + + +def setup_module(mod): + topodef = {"s1": ("r1", "r2")} + tgen = Topogen(topodef, mod.__name__) + tgen.start_topology() + + router_list = tgen.routers() + + for _, (rname, router) in enumerate(router_list.items(), 1): + router.load_frr_config(os.path.join(CWD, "{}/frr.conf".format(rname))) + + tgen.start_router() + + +def teardown_module(mod): + tgen = get_topogen() + tgen.stop_topology() + + +def test_bgp_dump(): + tgen = get_topogen() + + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + logger.info("Test the ability for bgp to dump a file specified") + r1 = tgen.gears["r1"] + + logger.info("Converge BGP") + def _converge(): + output = json.loads(r1.vtysh_cmd("show bgp ipv4 unicast 10.10.10.10/32 json")) + expected = { + "paths": [ + { + "valid": True, + "nexthops": [ + { + "hostname": "r2", + "accessible": True, + } + ], + } + ] + } + return topotest.json_cmp(output, expected) + + test_func = functools.partial( + _converge, + ) + _, result = topotest.run_and_expect(test_func, None, count=30, wait=1) + assert result is None, "Can't converge" + + logger.info("Dumping file") + #### + # Create a dump file + #### + r1.vtysh_cmd( + """ + configure terminal + dump bgp all bgp_dump.file + """ + ) + + def _test_dump_file_existence(): + dump_file = "{}/r1/bgp_dump.file".format(tgen.logdir) + + logger.info("Looking for {} file".format(dump_file)) + logger.info(os.path.isfile(dump_file)) + return os.path.isfile(dump_file) + + logger.info("Ensure that Log file exists") + _, result = topotest.run_and_expect(_test_dump_file_existence, True, count=30, wait = 3) + assert result is True + + # At this point all we have done is ensure that the dump file + # is generated for r1. What is correctness of the dump anyways? + +if __name__ == "__main__": + args = ["-s"] + sys.argv[1:] + sys.exit(pytest.main(args)) diff --git a/tests/topotests/example_munet/r1/frr.conf b/tests/topotests/example_munet/r1/frr.conf index 468bda5e01..692e4ceb47 100644 --- a/tests/topotests/example_munet/r1/frr.conf +++ b/tests/topotests/example_munet/r1/frr.conf @@ -4,4 +4,4 @@ service integrated-vtysh-config interface eth0 ip address 10.0.1.1/24 -ip route 10.0.0.0/8 blackhole +ip route 10.0.2.0/24 10.0.1.2 diff --git a/tests/topotests/example_munet/r2/frr.conf b/tests/topotests/example_munet/r2/frr.conf index 77d9892485..da3f15b801 100644 --- a/tests/topotests/example_munet/r2/frr.conf +++ b/tests/topotests/example_munet/r2/frr.conf @@ -5,6 +5,4 @@ interface eth0 ip address 10.0.1.2/24 interface eth1 - ip address 10.0.2.2/24 - -ip route 10.0.0.0/8 blackhole + ip address 10.0.2.2/24
\ No newline at end of file diff --git a/tests/topotests/example_munet/r3/frr.conf b/tests/topotests/example_munet/r3/frr.conf index e0839e6d8a..84527b34df 100644 --- a/tests/topotests/example_munet/r3/frr.conf +++ b/tests/topotests/example_munet/r3/frr.conf @@ -4,4 +4,4 @@ service integrated-vtysh-config interface eth0 ip address 10.0.2.3/24 -ip route 10.0.0.0/8 blackhole +ip route 10.0.1.0/24 10.0.2.2
\ No newline at end of file diff --git a/tests/topotests/example_munet/test_munet.py b/tests/topotests/example_munet/test_munet.py index 0d9599fa54..71052099c4 100644 --- a/tests/topotests/example_munet/test_munet.py +++ b/tests/topotests/example_munet/test_munet.py @@ -5,6 +5,22 @@ # # Copyright (c) 2023, LabN Consulting, L.L.C. # +from munet.testing.util import retry + + +@retry(retry_timeout=10) +def wait_for_route(r, p): + o = r.cmd_raises(f"ip route show {p}") + assert p in o + + async def test_native_test(unet): - o = unet.hosts["r1"].cmd_nostatus("ip addr") + r1 = unet.hosts["r1"] + o = r1.cmd_nostatus("ip addr") print(o) + + wait_for_route(r1, "10.0.2.0/24") + + r1.cmd_raises("ping -c1 10.0.1.2") + r1.cmd_raises("ping -c1 10.0.2.2") + r1.cmd_raises("ping -c1 10.0.2.3") diff --git a/tests/topotests/forwarding_on_off/r1/frr.conf b/tests/topotests/forwarding_on_off/r1/frr.conf new file mode 100644 index 0000000000..677ba8f63e --- /dev/null +++ b/tests/topotests/forwarding_on_off/r1/frr.conf @@ -0,0 +1,14 @@ +int lo + ip address 10.1.1.1/32 + ip address 10:1::1:1/128 + +int eth0 + ip address 10.1.2.1/24 + ipv6 address 10:1::2:1/120 + +ip route 10.1.1.2/32 10.1.2.2 +ip route 10.1.1.3/32 10.1.2.2 +ip route 10.1.3.0/24 10.1.2.2 +ipv6 route 10:1::1:2/128 10:1::2:2 +ipv6 route 10:1::1:3/128 10:1::2:2 +ipv6 route 10:1::3:0/90 10:1::2:2
\ No newline at end of file diff --git a/tests/topotests/forwarding_on_off/r2/frr.conf b/tests/topotests/forwarding_on_off/r2/frr.conf new file mode 100644 index 0000000000..b6da6e2231 --- /dev/null +++ b/tests/topotests/forwarding_on_off/r2/frr.conf @@ -0,0 +1,19 @@ +no ip forwarding +no ipv6 forwarding + +int lo + ip address 10.1.1.2/32 + ipv6 address 10:1::1:2/128 + +int eth0 + ip address 10.1.2.2/24 + ipv6 address 10:1::2:2/120 + +int eth1 + ip address 10.1.3.2/24 + ipv6 address 10:1::3:2/120 + +ip route 10.1.1.1/32 10.1.2.1 +ip route 10.1.1.3/32 10.1.3.3 +ipv6 route 10:1::1:1/128 10:1::2:1 +ipv6 route 10:1::1:3/128 10:1::3:3 diff --git a/tests/topotests/forwarding_on_off/r3/frr.conf b/tests/topotests/forwarding_on_off/r3/frr.conf new file mode 100644 index 0000000000..ea05f18400 --- /dev/null +++ b/tests/topotests/forwarding_on_off/r3/frr.conf @@ -0,0 +1,15 @@ +int lo + ip address 10.1.1.3/32 + ipv6 address 10:1::1:3/128 + +int eth0 + ip address 10.1.3.3/24 + ipv6 address 10:1::3:3/120 + + +ip route 10.1.1.1/32 10.1.3.2 +ip route 10.1.1.2/32 10.1.3.2 +ip route 10.1.2.0/24 10.1.3.2 +ipv6 route 10:1::1:1/128 10:1::3:2 +ipv6 route 10:1::1:2/128 10:1::3:2 +ipv6 route 10:1::2:0/120 10:1::3:2
\ No newline at end of file diff --git a/tests/topotests/forwarding_on_off/test_forwarding_on_off.py b/tests/topotests/forwarding_on_off/test_forwarding_on_off.py new file mode 100644 index 0000000000..fa0483888c --- /dev/null +++ b/tests/topotests/forwarding_on_off/test_forwarding_on_off.py @@ -0,0 +1,104 @@ +#!/usr/bin/env python +# SPDX-License-Identifier: ISC +# +# test_forwarding_on_off.py +# +# Copyright (c) 2024 by Nvidia Corporation +# Donald Sharp +# + +""" +test_forwarding_on_off.py: Test that forwarding is turned off then back on + +""" + +import ipaddress +import json +import pytest +import sys +import time + +from functools import partial +from lib import topotest +from lib.topogen import Topogen, get_topogen +from lib.topolog import logger +from lib.checkping import check_ping + +pytestmark = [ + pytest.mark.staticd, +] + + +def build_topo(tgen): + """Build the topology used by all tests below.""" + + # Create 3 routers + r1 = tgen.add_router("r1") + r2 = tgen.add_router("r2") + r3 = tgen.add_router("r3") + + # Add a link between r1 <-> r2 and r2 <-> r3 + tgen.add_link(r1, r2, ifname1="eth0", ifname2="eth0") + tgen.add_link(r2, r3, ifname1="eth1", ifname2="eth0") + + +def setup_module(mod): + tgen = Topogen(build_topo, mod.__name__) + tgen.start_topology() + + router_list = tgen.routers() + + for _, router in router_list.items(): + router.load_frr_config("frr.conf") + + tgen.start_router() + + +def teardown_module(): + tgen = get_topogen() + tgen.stop_topology() + + +def test_no_forwarding(): + tgen = get_topogen() + r2 = tgen.gears["r2"] + + def _no_forwarding(family, status): + logger.info("Testing for: {} {}".format(family, status)) + rc, o, e = r2.net.cmd_status( + 'vtysh -c "show zebra" | grep "{}" | grep "{}"'.format(family, status) + ) + + logger.info("Output: {}".format(o)) + return rc + + test_func = partial(_no_forwarding, "v4 Forwarding", "Off") + _, result = topotest.run_and_expect(test_func, 0, count=15, wait=1) + assert result == 0 + + test_func = partial(_no_forwarding, "v6 Forwarding", "Off") + _, result = topotest.run_and_expect(test_func, 0, count=15, wait=1) + assert result == 0 + + logger.info("Sending pings that should fail") + check_ping("r1", "10.1.1.3", False, 10, 1) + check_ping("r1", "10:1::1:3", False, 10, 1) + + logger.info("Turning on Forwarding") + r2.vtysh_cmd("conf\nip forwarding\nipv6 forwarding") + + test_func = partial(_no_forwarding, "v4 Forwarding", "On") + _, result = topotest.run_and_expect(test_func, 0, count=15, wait=1) + assert result == 0 + + test_func = partial(_no_forwarding, "v6 Forwarding", "On") + _, result = topotest.run_and_expect(test_func, 0, count=15, wait=1) + assert result == 0 + + check_ping("r1", "10.1.1.3", True, 10, 1) + check_ping("r1", "10:1::1:3", True, 10, 1) + + +if __name__ == "__main__": + args = ["-s"] + sys.argv[1:] + sys.exit(pytest.main(args)) diff --git a/tests/topotests/isis_topo1/test_isis_topo1.py b/tests/topotests/isis_topo1/test_isis_topo1.py index a574f43d89..1cec2f16f0 100644 --- a/tests/topotests/isis_topo1/test_isis_topo1.py +++ b/tests/topotests/isis_topo1/test_isis_topo1.py @@ -623,7 +623,7 @@ def test_isis_hello_padding_during_adjacency_formation(): assert result is True, result -@retry(retry_timeout=5) +@retry(retry_timeout=10) def check_last_iih_packet_for_padding(router, expect_padding): logfilename = "{}/{}".format(router.gearlogdir, "isisd.log") last_hello_packet_line = None diff --git a/tests/topotests/kinds.yaml b/tests/topotests/kinds.yaml index 5f4b61d4b7..20c819c79c 100644 --- a/tests/topotests/kinds.yaml +++ b/tests/topotests/kinds.yaml @@ -10,9 +10,10 @@ kinds: /usr/lib/frr/frrinit.sh stop volumes: - "./%NAME%:/etc/frr" + - "%RUNDIR%/var.lib.frr:/var/lib/frr" - "%RUNDIR%/var.log.frr:/var/log/frr" - "%RUNDIR%/var.run.frr:/var/run/frr" - - "%RUNDIR%/var.lib.frr:/var/lib/frr" + - "%RUNDIR%/var.tmp.frr:/var/tmp/frr" cap-add: - SYS_ADMIN - AUDIT_WRITE diff --git a/tests/topotests/lib/common_config.py b/tests/topotests/lib/common_config.py index e856c23d36..cb71112af3 100644 --- a/tests/topotests/lib/common_config.py +++ b/tests/topotests/lib/common_config.py @@ -936,7 +936,7 @@ def generate_support_bundle(): tgen = get_topogen() if tgen is None: - logger.warn( + logger.warning( "Support bundle attempted to be generated, but topogen is not being used" ) return True @@ -1847,7 +1847,13 @@ def retry(retry_timeout, initial_wait=0, expected=True, diag_pct=0.75): while True: seconds_left = (retry_until - datetime.now()).total_seconds() try: - ret = func(*args, **kwargs) + try: + ret = func(*args, seconds_left=seconds_left, **kwargs) + except TypeError as error: + if "seconds_left" not in str(error): + raise + ret = func(*args, **kwargs) + logger.debug("Function returned %s", ret) negative_result = ret is False or is_string(ret) @@ -1868,7 +1874,7 @@ def retry(retry_timeout, initial_wait=0, expected=True, diag_pct=0.75): return saved_failure except Exception as error: - logger.info("Function raised exception: %s", str(error)) + logger.info('Function raised exception: "%s"', repr(error)) ret = error if seconds_left < 0 and saved_failure: diff --git a/tests/topotests/lib/pim.py b/tests/topotests/lib/pim.py index 71e36b6229..eb3723be42 100644 --- a/tests/topotests/lib/pim.py +++ b/tests/topotests/lib/pim.py @@ -1607,7 +1607,7 @@ def verify_pim_rp_info( if type(group_addresses) is not list: group_addresses = [group_addresses] - if type(oif) is not list: + if oif is not None and type(oif) is not list: oif = [oif] for grp in group_addresses: diff --git a/tests/topotests/lib/topogen.py b/tests/topotests/lib/topogen.py index f49e30ea5f..7941e5c1d2 100644 --- a/tests/topotests/lib/topogen.py +++ b/tests/topotests/lib/topogen.py @@ -832,10 +832,10 @@ class TopoRouter(TopoGear): for daemon in self.RD: # This will not work for all daemons daemonstr = self.RD.get(daemon).rstrip("d") - if daemonstr == "pim": - grep_cmd = "grep 'ip {}' {}".format(daemonstr, source_path) + if daemonstr == "path": + grep_cmd = "grep 'candidate-path' {}".format(source_path) else: - grep_cmd = "grep 'router {}' {}".format(daemonstr, source_path) + grep_cmd = "grep -w '{}' {}".format(daemonstr, source_path) result = self.run(grep_cmd, warn=False).strip() if result: self.load_config(daemon, "") diff --git a/tests/topotests/lib/topotest.py b/tests/topotests/lib/topotest.py index 5a8c2e5964..d15fefc039 100644 --- a/tests/topotests/lib/topotest.py +++ b/tests/topotests/lib/topotest.py @@ -396,6 +396,9 @@ def run_and_expect(func, what, count=20, wait=3): waiting `wait` seconds between tries. By default it tries 20 times with 3 seconds delay between tries. + Changing default count/wait values, please change them below also for + `minimum_wait`, and `minimum_count`. + Returns (True, func-return) on success or (False, func-return) on failure. @@ -414,13 +417,18 @@ def run_and_expect(func, what, count=20, wait=3): # Just a safety-check to avoid running topotests with very # small wait/count arguments. + # If too low count/wait values are defined, override them + # with the minimum values. + minimum_count = 20 + minimum_wait = 3 + minimum_wait_time = 15 # The overall minimum seconds for the test to wait wait_time = wait * count - if wait_time < 5: - assert ( - wait_time >= 5 - ), "Waiting time is too small (count={}, wait={}), adjust timer values".format( - count, wait + if wait_time < minimum_wait_time: + logger.warning( + f"Waiting time is too small (count={count}, wait={wait}), using default values (count={minimum_count}, wait={minimum_wait})" ) + count = minimum_count + wait = minimum_wait logger.debug( "'{}' polling started (interval {} secs, maximum {} tries)".format( diff --git a/tests/topotests/mgmt_oper/oper.py b/tests/topotests/mgmt_oper/oper.py index 9fc504569d..f54e64ae18 100644 --- a/tests/topotests/mgmt_oper/oper.py +++ b/tests/topotests/mgmt_oper/oper.py @@ -63,7 +63,7 @@ def disable_debug(router): @retry(retry_timeout=30, initial_wait=1) -def _do_oper_test(tgen, qr): +def _do_oper_test(tgen, qr, seconds_left=None): r1 = tgen.gears["r1"].net qcmd = ( @@ -80,6 +80,8 @@ def _do_oper_test(tgen, qr): expected = open(qr[1], encoding="ascii").read() output = r1.cmd_nostatus(qcmd.format(qr[0], qr[2] if len(qr) > 2 else "")) + diag = logging.debug if seconds_left else logging.warning + try: ojson = json.loads(output) except json.decoder.JSONDecodeError as error: @@ -92,31 +94,31 @@ def _do_oper_test(tgen, qr): logging.error( "Error decoding json exp result: %s\noutput:\n%s", error, expected ) - logging.warning("FILE: {}".format(qr[1])) + diag("FILE: {}".format(qr[1])) raise if dd_json_cmp: cmpout = json_cmp(ojson, ejson, exact_match=True) if cmpout: - logging.warning( + diag( "-------DIFF---------\n%s\n---------DIFF----------", pprint.pformat(cmpout), ) else: cmpout = tt_json_cmp(ojson, ejson, exact=True) if cmpout: - logging.warning( + diag( "-------EXPECT--------\n%s\n------END-EXPECT------", json.dumps(ejson, indent=4), ) - logging.warning( + diag( "--------GOT----------\n%s\n-------END-GOT--------", json.dumps(ojson, indent=4), ) - logging.warning("----diff---\n{}".format(cmpout)) - logging.warning("Command: {}".format(qcmd.format(qr[0], qr[2] if len(qr) > 2 else ""))) - logging.warning("File: {}".format(qr[1])) - assert cmpout is None + diag("----diff---\n{}".format(cmpout)) + diag("Command: {}".format(qcmd.format(qr[0], qr[2] if len(qr) > 2 else ""))) + diag("File: {}".format(qr[1])) + return cmpout def do_oper_test(tgen, query_results): diff --git a/tests/topotests/mgmt_oper/r1/frr-yanglib.conf b/tests/topotests/mgmt_oper/r1/frr-yanglib.conf new file mode 100644 index 0000000000..f37766b158 --- /dev/null +++ b/tests/topotests/mgmt_oper/r1/frr-yanglib.conf @@ -0,0 +1,10 @@ +log timestamp precision 6 +log file frr.log + +no debug memstats-at-exit + +debug mgmt backend datastore frontend transaction + +interface r1-eth0 + ip address 1.1.1.1/24 +exit diff --git a/tests/topotests/mgmt_oper/test_yanglib.py b/tests/topotests/mgmt_oper/test_yanglib.py new file mode 100644 index 0000000000..e094ca5443 --- /dev/null +++ b/tests/topotests/mgmt_oper/test_yanglib.py @@ -0,0 +1,45 @@ +#!/usr/bin/env python +# SPDX-License-Identifier: ISC +# -*- coding: utf-8 eval: (blacken-mode 1) -*- +# +# September 17 2024, Christian Hopps <chopps@labn.net> +# +# Copyright (c) 2024, LabN Consulting, L.L.C. +# + +import json +import pytest +from lib.topogen import Topogen + +pytestmark = [pytest.mark.staticd, pytest.mark.mgmtd] + + +@pytest.fixture(scope="module") +def tgen(request): + "Setup/Teardown the environment and provide tgen argument to tests" + + topodef = {"s1": ("r1",)} + + tgen = Topogen(topodef, request.module.__name__) + tgen.start_topology() + + router_list = tgen.routers() + for rname, router in router_list.items(): + router.load_frr_config("frr-yanglib.conf") + + tgen.start_router() + yield tgen + tgen.stop_topology() + + +def test_yang_lib(tgen): + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + r1 = tgen.gears["r1"].net + output = r1.cmd_nostatus( + "vtysh -c 'show mgmt get-data /ietf-yang-library:yang-library'" + ) + ret = json.loads(output) + loaded_modules = ret['ietf-yang-library:yang-library']['module-set'][0]['module'] + assert len(loaded_modules) > 10, "Modules missing from yang-library" diff --git a/tests/topotests/munet/cli.py b/tests/topotests/munet/cli.py index 01a7091512..d273a30ead 100644 --- a/tests/topotests/munet/cli.py +++ b/tests/topotests/munet/cli.py @@ -745,7 +745,7 @@ async def cli_client_connected(unet, background, reader, writer): await writer.drain() -async def remote_cli(unet, prompt, title, background): +async def remote_cli(unet, prompt, title, background, remote_wait=False): """Open a CLI in a new window.""" try: if not unet.cli_sockpath: @@ -756,6 +756,13 @@ async def remote_cli(unet, prompt, title, background): unet.cli_sockpath = sockpath logging.info("server created on :\n%s\n", sockpath) + if remote_wait: + wait_tmux = bool(os.getenv("TMUX", "")) + wait_x11 = not wait_tmux and bool(os.getenv("DISPLAY", "")) + else: + wait_tmux = False + wait_x11 = False + # Open a new window with a new CLI python_path = await unet.async_get_exec_path(["python3", "python"]) us = os.path.realpath(__file__) @@ -765,7 +772,32 @@ async def remote_cli(unet, prompt, title, background): if prompt: cmd += f" --prompt='{prompt}'" cmd += " " + unet.cli_sockpath - unet.run_in_window(cmd, title=title, background=False) + + channel = None + if wait_tmux: + from .base import Commander # pylint: disable=import-outside-toplevel + + channel = "{}-{}".format(os.getpid(), Commander.tmux_wait_gen) + logger.info("XXX channel is %s", channel) + # If we don't have a tty to pause on pause for tmux windows to exit + if channel is not None: + Commander.tmux_wait_gen += 1 + + pane_info = unet.run_in_window( + cmd, title=title, background=False, wait_for=channel + ) + + if wait_tmux and channel: + from .base import commander # pylint: disable=import-outside-toplevel + + logger.debug("Waiting on TMUX CLI window") + await commander.async_cmd_raises( + [commander.get_exec_path("tmux"), "wait", channel] + ) + elif wait_x11 and isinstance(pane_info, subprocess.Popen): + logger.debug("Waiting on xterm CLI process %s", pane_info) + if hasattr(asyncio, "to_thread"): + await asyncio.to_thread(pane_info.wait) # pylint: disable=no-member except Exception as error: logging.error("cli server: unexpected exception: %s", error) @@ -906,8 +938,22 @@ def cli( prompt=None, background=True, ): + # In the case of no tty a remote_cli will be used, and we want it to wait on finish + # of the spawned cli.py script, otherwise it returns back here and exits async loop + # which kills the server side CLI socket operation. + remote_wait = not sys.stdin.isatty() + asyncio.run( - async_cli(unet, histfile, sockpath, force_window, title, prompt, background) + async_cli( + unet, + histfile, + sockpath, + force_window, + title, + prompt, + background, + remote_wait=remote_wait, + ) ) @@ -919,12 +965,14 @@ async def async_cli( title=None, prompt=None, background=True, + remote_wait=False, ): if prompt is None: prompt = "munet> " if force_window or not sys.stdin.isatty(): - await remote_cli(unet, prompt, title, background) + await remote_cli(unet, prompt, title, background, remote_wait) + return if not unet: logger.debug("client-cli using sockpath %s", sockpath) diff --git a/tests/topotests/munet/native.py b/tests/topotests/munet/native.py index b7c6e4a63e..e3b782396e 100644 --- a/tests/topotests/munet/native.py +++ b/tests/topotests/munet/native.py @@ -2733,7 +2733,7 @@ ff02::2\tip6-allrouters ), "format": "stdout HOST [HOST ...]", "help": "tail -f on the stdout of the qemu/cmd for this node", - "new-window": True, + "new-window": {"background": True, "ns_only": True}, }, { "name": "stderr", @@ -2743,7 +2743,7 @@ ff02::2\tip6-allrouters ), "format": "stderr HOST [HOST ...]", "help": "tail -f on the stdout of the qemu/cmd for this node", - "new-window": True, + "new-window": {"background": True, "ns_only": True}, }, ] } diff --git a/tests/topotests/munet/testing/util.py b/tests/topotests/munet/testing/util.py index a1a94bcd1b..99687c0a83 100644 --- a/tests/topotests/munet/testing/util.py +++ b/tests/topotests/munet/testing/util.py @@ -52,12 +52,13 @@ def pause_test(desc=""): asyncio.run(async_pause_test(desc)) -def retry(retry_timeout, initial_wait=0, expected=True): +def retry(retry_timeout, initial_wait=0, retry_sleep=2, expected=True): """decorator: retry while functions return is not None or raises an exception. * `retry_timeout`: Retry for at least this many seconds; after waiting initial_wait seconds * `initial_wait`: Sleeps for this many seconds before first executing function + * `retry_sleep`: The time to sleep between retries. * `expected`: if False then the return logic is inverted, except for exceptions, (i.e., a non None ends the retry loop, and returns that value) """ @@ -65,9 +66,8 @@ def retry(retry_timeout, initial_wait=0, expected=True): def _retry(func): @functools.wraps(func) def func_retry(*args, **kwargs): - retry_sleep = 2 - # Allow the wrapped function's args to override the fixtures + _retry_sleep = float(kwargs.pop("retry_sleep", retry_sleep)) _retry_timeout = kwargs.pop("retry_timeout", retry_timeout) _expected = kwargs.pop("expected", expected) _initial_wait = kwargs.pop("initial_wait", initial_wait) @@ -82,13 +82,21 @@ def retry(retry_timeout, initial_wait=0, expected=True): while True: seconds_left = (retry_until - datetime.datetime.now()).total_seconds() try: - ret = func(*args, **kwargs) - if _expected and ret is None: + try: + ret = func(*args, seconds_left=seconds_left, **kwargs) + except TypeError as error: + if "seconds_left" not in str(error): + raise + ret = func(*args, **kwargs) + + logging.debug("Function returned %s", ret) + + positive_result = ret is None + if _expected == positive_result: logging.debug("Function succeeds") return ret - logging.debug("Function returned %s", ret) except Exception as error: - logging.info("Function raised exception: %s", str(error)) + logging.info('Function raised exception: "%s"', error) ret = error if seconds_left < 0: @@ -99,10 +107,10 @@ def retry(retry_timeout, initial_wait=0, expected=True): logging.info( "Sleeping %ds until next retry with %.1f retry time left", - retry_sleep, + _retry_sleep, seconds_left, ) - time.sleep(retry_sleep) + time.sleep(_retry_sleep) func_retry._original = func # pylint: disable=W0212 return func_retry diff --git a/tests/topotests/nhrp_redundancy/host/frr.conf b/tests/topotests/nhrp_redundancy/host/frr.conf new file mode 100644 index 0000000000..8bb7da0ad6 --- /dev/null +++ b/tests/topotests/nhrp_redundancy/host/frr.conf @@ -0,0 +1,4 @@ +interface host-eth0 + ip address 10.4.4.7/24 +! +ip route 0.0.0.0/0 10.4.4.4 diff --git a/tests/topotests/nhrp_redundancy/nhc1/frr.conf b/tests/topotests/nhrp_redundancy/nhc1/frr.conf new file mode 100644 index 0000000000..98e848bccf --- /dev/null +++ b/tests/topotests/nhrp_redundancy/nhc1/frr.conf @@ -0,0 +1,25 @@ +ip forwarding +!debug nhrp all +interface nhc1-eth0 + ip address 192.168.2.4/24 +! +ip route 192.168.1.0/24 192.168.2.6 +interface nhc1-gre0 + ip address 172.16.1.4/32 + no link-detect + ipv6 nd suppress-ra + ip nhrp holdtime 10 + ip nhrp network-id 42 + ip nhrp registration no-unique + ip nhrp nhs dynamic nbma 192.168.1.1 + ip nhrp nhs dynamic nbma 192.168.1.2 + ip nhrp nhs dynamic nbma 192.168.1.3 + ip nhrp shortcut + tunnel source nhc1-eth0 +! +interface nhc1-eth1 + ip address 10.4.4.4/24 +! +ip route 0.0.0.0/0 172.16.1.1 50 +ip route 0.0.0.0/0 172.16.1.2 60 +ip route 0.0.0.0/0 172.16.1.3 70 diff --git a/tests/topotests/nhrp_redundancy/r4/nhrp_cache.json b/tests/topotests/nhrp_redundancy/nhc1/nhrp_cache.json index f87ebcf5fc..9e8a5c999d 100644 --- a/tests/topotests/nhrp_redundancy/r4/nhrp_cache.json +++ b/tests/topotests/nhrp_redundancy/nhc1/nhrp_cache.json @@ -4,9 +4,9 @@ }, "table": [ { - "interface": "r4-gre0", + "interface": "nhc1-gre0", "type": "nhs", - "protocol": "176.16.1.2", + "protocol": "172.16.1.2", "nbma": "192.168.1.2", "claimed_nbma": "192.168.1.2", "used": false, @@ -15,9 +15,9 @@ "identity": "" }, { - "interface": "r4-gre0", + "interface": "nhc1-gre0", "type": "local", - "protocol": "176.16.1.4", + "protocol": "172.16.1.4", "nbma": "192.168.2.4", "claimed_nbma": "192.168.2.4", "used": false, @@ -26,9 +26,9 @@ "identity": "-" }, { - "interface": "r4-gre0", + "interface": "nhc1-gre0", "type": "nhs", - "protocol": "176.16.1.3", + "protocol": "172.16.1.3", "nbma": "192.168.1.3", "claimed_nbma": "192.168.1.3", "used": false, @@ -37,9 +37,9 @@ "identity": "" }, { - "interface": "r4-gre0", + "interface": "nhc1-gre0", "type": "nhs", - "protocol": "176.16.1.1", + "protocol": "172.16.1.1", "nbma": "192.168.1.1", "claimed_nbma": "192.168.1.1", "used": false, diff --git a/tests/topotests/nhrp_redundancy/nhc1/nhrp_cache_nhs1_down.json b/tests/topotests/nhrp_redundancy/nhc1/nhrp_cache_nhs1_down.json new file mode 100644 index 0000000000..5b91f3bcfb --- /dev/null +++ b/tests/topotests/nhrp_redundancy/nhc1/nhrp_cache_nhs1_down.json @@ -0,0 +1,40 @@ +{ + "attr": { + "entriesCount": 3 + }, + "table": [ + { + "interface": "nhc1-gre0", + "type": "nhs", + "protocol": "172.16.1.2", + "nbma": "192.168.1.2", + "claimed_nbma": "192.168.1.2", + "used": false, + "timeout": true, + "auth": false, + "identity": "" + }, + { + "interface": "nhc1-gre0", + "type": "local", + "protocol": "172.16.1.4", + "nbma": "192.168.2.4", + "claimed_nbma": "192.168.2.4", + "used": false, + "timeout": false, + "auth": false, + "identity": "-" + }, + { + "interface": "nhc1-gre0", + "type": "nhs", + "protocol": "172.16.1.3", + "nbma": "192.168.1.3", + "claimed_nbma": "192.168.1.3", + "used": false, + "timeout": true, + "auth": false, + "identity": "" + } + ] +}
\ No newline at end of file diff --git a/tests/topotests/nhrp_redundancy/r5/nhrp_route.json b/tests/topotests/nhrp_redundancy/nhc1/nhrp_route.json index 1d1c16ffb8..083675651f 100644 --- a/tests/topotests/nhrp_redundancy/r5/nhrp_route.json +++ b/tests/topotests/nhrp_redundancy/nhc1/nhrp_route.json @@ -1,7 +1,7 @@ { - "176.16.1.1\/32": [ + "172.16.1.1\/32": [ { - "prefix": "176.16.1.1\/32", + "prefix": "172.16.1.1\/32", "protocol": "nhrp", "vrfId": 0, "vrfName": "default", @@ -16,15 +16,15 @@ { "fib": true, "directlyConnected": true, - "interfaceName": "r5-gre0", + "interfaceName": "nhc1-gre0", "active": true } ] } ], - "176.16.1.2\/32": [ + "172.16.1.2\/32": [ { - "prefix": "176.16.1.2\/32", + "prefix": "172.16.1.2\/32", "protocol": "nhrp", "vrfId": 0, "vrfName": "default", @@ -39,15 +39,15 @@ { "fib": true, "directlyConnected": true, - "interfaceName": "r5-gre0", + "interfaceName": "nhc1-gre0", "active": true } ] } ], - "176.16.1.3\/32": [ + "172.16.1.3\/32": [ { - "prefix": "176.16.1.3\/32", + "prefix": "172.16.1.3\/32", "protocol": "nhrp", "vrfId": 0, "vrfName": "default", @@ -62,7 +62,7 @@ { "fib": true, "directlyConnected": true, - "interfaceName": "r5-gre0", + "interfaceName": "nhc1-gre0", "active": true } ] diff --git a/tests/topotests/nhrp_redundancy/nhc1/nhrp_route_nhs1_down.json b/tests/topotests/nhrp_redundancy/nhc1/nhrp_route_nhs1_down.json new file mode 100644 index 0000000000..bfb468b0fb --- /dev/null +++ b/tests/topotests/nhrp_redundancy/nhc1/nhrp_route_nhs1_down.json @@ -0,0 +1,49 @@ +{ + "172.16.1.1\/32": null, + "172.16.1.2\/32": [ + { + "prefix": "172.16.1.2\/32", + "protocol": "nhrp", + "vrfId": 0, + "vrfName": "default", + "selected": true, + "destSelected": true, + "distance": 10, + "metric": 0, + "installed": true, + "internalNextHopNum": 1, + "internalNextHopActiveNum": 1, + "nexthops": [ + { + "fib": true, + "directlyConnected": true, + "interfaceName": "nhc1-gre0", + "active": true + } + ] + } + ], + "172.16.1.3\/32": [ + { + "prefix": "172.16.1.3\/32", + "protocol": "nhrp", + "vrfId": 0, + "vrfName": "default", + "selected": true, + "destSelected": true, + "distance": 10, + "metric": 0, + "installed": true, + "internalNextHopNum": 1, + "internalNextHopActiveNum": 1, + "nexthops": [ + { + "fib": true, + "directlyConnected": true, + "interfaceName": "nhc1-gre0", + "active": true + } + ] + } + ] +}
\ No newline at end of file diff --git a/tests/topotests/nhrp_redundancy/r4/nhrp_route_shortcut.json b/tests/topotests/nhrp_redundancy/nhc1/nhrp_route_shortcut.json index f8efff2059..3a91f1baaa 100644 --- a/tests/topotests/nhrp_redundancy/r4/nhrp_route_shortcut.json +++ b/tests/topotests/nhrp_redundancy/nhc1/nhrp_route_shortcut.json @@ -1,7 +1,7 @@ { - "5.5.5.5\/32": [ + "10.5.5.0\/24": [ { - "prefix": "5.5.5.5\/32", + "prefix": "10.5.5.0\/24", "protocol": "nhrp", "vrfId": 0, "vrfName": "default", @@ -15,17 +15,17 @@ "nexthops": [ { "fib": true, - "ip": "176.16.1.5", + "ip": "172.16.1.5", "afi": "ipv4", - "interfaceName": "r4-gre0", + "interfaceName": "nhc1-gre0", "active": true } ] } ], - "176.16.1.1\/32": [ + "172.16.1.1\/32": [ { - "prefix": "176.16.1.1\/32", + "prefix": "172.16.1.1\/32", "protocol": "nhrp", "vrfId": 0, "vrfName": "default", @@ -40,15 +40,15 @@ { "fib": true, "directlyConnected": true, - "interfaceName": "r4-gre0", + "interfaceName": "nhc1-gre0", "active": true } ] } ], - "176.16.1.2\/32": [ + "172.16.1.2\/32": [ { - "prefix": "176.16.1.2\/32", + "prefix": "172.16.1.2\/32", "protocol": "nhrp", "vrfId": 0, "vrfName": "default", @@ -63,15 +63,15 @@ { "fib": true, "directlyConnected": true, - "interfaceName": "r4-gre0", + "interfaceName": "nhc1-gre0", "active": true } ] } ], - "176.16.1.3\/32": [ + "172.16.1.3\/32": [ { - "prefix": "176.16.1.3\/32", + "prefix": "172.16.1.3\/32", "protocol": "nhrp", "vrfId": 0, "vrfName": "default", @@ -86,15 +86,15 @@ { "fib": true, "directlyConnected": true, - "interfaceName": "r4-gre0", + "interfaceName": "nhc1-gre0", "active": true } ] } ], - "176.16.1.5\/32": [ + "172.16.1.5\/32": [ { - "prefix": "176.16.1.5\/32", + "prefix": "172.16.1.5\/32", "protocol": "nhrp", "vrfId": 0, "vrfName": "default", @@ -109,7 +109,7 @@ { "fib": true, "directlyConnected": true, - "interfaceName": "r4-gre0", + "interfaceName": "nhc1-gre0", "active": true } ] diff --git a/tests/topotests/nhrp_redundancy/nhc1/nhrp_route_shortcut_nhs1_down.json b/tests/topotests/nhrp_redundancy/nhc1/nhrp_route_shortcut_nhs1_down.json new file mode 100644 index 0000000000..0f38feb6a8 --- /dev/null +++ b/tests/topotests/nhrp_redundancy/nhc1/nhrp_route_shortcut_nhs1_down.json @@ -0,0 +1,96 @@ +{ + "10.5.5.0\/24": [ + { + "prefix": "10.5.5.0\/24", + "protocol": "nhrp", + "vrfId": 0, + "vrfName": "default", + "selected": true, + "destSelected": true, + "distance": 10, + "metric": 0, + "installed": true, + "internalNextHopNum": 1, + "internalNextHopActiveNum": 1, + "nexthops": [ + { + "fib": true, + "ip": "172.16.1.5", + "afi": "ipv4", + "interfaceName": "nhc1-gre0", + "active": true + } + ] + } + ], + "172.16.1.1\/32": null, + "172.16.1.2\/32": [ + { + "prefix": "172.16.1.2\/32", + "protocol": "nhrp", + "vrfId": 0, + "vrfName": "default", + "selected": true, + "destSelected": true, + "distance": 10, + "metric": 0, + "installed": true, + "internalNextHopNum": 1, + "internalNextHopActiveNum": 1, + "nexthops": [ + { + "fib": true, + "directlyConnected": true, + "interfaceName": "nhc1-gre0", + "active": true + } + ] + } + ], + "172.16.1.3\/32": [ + { + "prefix": "172.16.1.3\/32", + "protocol": "nhrp", + "vrfId": 0, + "vrfName": "default", + "selected": true, + "destSelected": true, + "distance": 10, + "metric": 0, + "installed": true, + "internalNextHopNum": 1, + "internalNextHopActiveNum": 1, + "nexthops": [ + { + "fib": true, + "directlyConnected": true, + "interfaceName": "nhc1-gre0", + "active": true + } + ] + } + ], + "172.16.1.5\/32": [ + { + "prefix": "172.16.1.5\/32", + "protocol": "nhrp", + "vrfId": 0, + "vrfName": "default", + "selected": true, + "destSelected": true, + "distance": 10, + "metric": 0, + "installed": true, + "internalNextHopNum": 1, + "internalNextHopActiveNum": 1, + "nexthops": [ + { + "fib": true, + "directlyConnected": true, + "interfaceName": "nhc1-gre0", + "active": true + } + ] + } + ] +}
\ No newline at end of file diff --git a/tests/topotests/nhrp_redundancy/nhc1/nhrp_shortcut_absent.json b/tests/topotests/nhrp_redundancy/nhc1/nhrp_shortcut_absent.json new file mode 100644 index 0000000000..78563cb902 --- /dev/null +++ b/tests/topotests/nhrp_redundancy/nhc1/nhrp_shortcut_absent.json @@ -0,0 +1,5 @@ +{ + "attr":{ + "entriesCount":0 + } +} diff --git a/tests/topotests/nhrp_redundancy/nhc1/nhrp_shortcut_present.json b/tests/topotests/nhrp_redundancy/nhc1/nhrp_shortcut_present.json new file mode 100644 index 0000000000..4547c59c88 --- /dev/null +++ b/tests/topotests/nhrp_redundancy/nhc1/nhrp_shortcut_present.json @@ -0,0 +1,9 @@ +{ + "table":[ + { + "type":"dynamic", + "prefix":"10.5.5.0/24", + "via":"172.16.1.5" + } + ] +} diff --git a/tests/topotests/nhrp_redundancy/nhc2/frr.conf b/tests/topotests/nhrp_redundancy/nhc2/frr.conf new file mode 100644 index 0000000000..818dd48251 --- /dev/null +++ b/tests/topotests/nhrp_redundancy/nhc2/frr.conf @@ -0,0 +1,25 @@ +ip forwarding +!debug nhrp all +interface nhc2-eth0 + ip address 192.168.2.5/24 +! +ip route 192.168.1.0/24 192.168.2.6 +interface nhc2-gre0 + ip address 172.16.1.5/32 + no link-detect + ipv6 nd suppress-ra + ip nhrp holdtime 10 + ip nhrp network-id 42 + ip nhrp nhs dynamic nbma 192.168.1.1 + ip nhrp nhs dynamic nbma 192.168.1.2 + ip nhrp nhs dynamic nbma 192.168.1.3 + ip nhrp registration no-unique + ip nhrp shortcut + tunnel source nhc2-eth0 +! +interface nhc2-eth1 + ip address 10.5.5.5/24 +! +ip route 0.0.0.0/0 172.16.1.1 50 +ip route 0.0.0.0/0 172.16.1.2 60 +ip route 0.0.0.0/0 172.16.1.3 70 diff --git a/tests/topotests/nhrp_redundancy/r5/nhrp_cache.json b/tests/topotests/nhrp_redundancy/nhc2/nhrp_cache.json index bc041c6014..8ee02a7cbf 100644 --- a/tests/topotests/nhrp_redundancy/r5/nhrp_cache.json +++ b/tests/topotests/nhrp_redundancy/nhc2/nhrp_cache.json @@ -4,9 +4,9 @@ }, "table": [ { - "interface": "r5-gre0", + "interface": "nhc2-gre0", "type": "nhs", - "protocol": "176.16.1.2", + "protocol": "172.16.1.2", "nbma": "192.168.1.2", "claimed_nbma": "192.168.1.2", "used": false, @@ -15,9 +15,9 @@ "identity": "" }, { - "interface": "r5-gre0", + "interface": "nhc2-gre0", "type": "nhs", - "protocol": "176.16.1.3", + "protocol": "172.16.1.3", "nbma": "192.168.1.3", "claimed_nbma": "192.168.1.3", "used": false, @@ -26,9 +26,9 @@ "identity": "" }, { - "interface": "r5-gre0", + "interface": "nhc2-gre0", "type": "nhs", - "protocol": "176.16.1.1", + "protocol": "172.16.1.1", "nbma": "192.168.1.1", "claimed_nbma": "192.168.1.1", "used": false, @@ -37,9 +37,9 @@ "identity": "" }, { - "interface": "r5-gre0", + "interface": "nhc2-gre0", "type": "local", - "protocol": "176.16.1.5", + "protocol": "172.16.1.5", "nbma": "192.168.2.5", "claimed_nbma": "192.168.2.5", "used": false, diff --git a/tests/topotests/nhrp_redundancy/nhc2/nhrp_cache_nhs1_down.json b/tests/topotests/nhrp_redundancy/nhc2/nhrp_cache_nhs1_down.json new file mode 100644 index 0000000000..bb1c483718 --- /dev/null +++ b/tests/topotests/nhrp_redundancy/nhc2/nhrp_cache_nhs1_down.json @@ -0,0 +1,40 @@ +{ + "attr": { + "entriesCount": 3 + }, + "table": [ + { + "interface": "nhc2-gre0", + "type": "nhs", + "protocol": "172.16.1.2", + "nbma": "192.168.1.2", + "claimed_nbma": "192.168.1.2", + "used": false, + "timeout": true, + "auth": false, + "identity": "" + }, + { + "interface": "nhc2-gre0", + "type": "nhs", + "protocol": "172.16.1.3", + "nbma": "192.168.1.3", + "claimed_nbma": "192.168.1.3", + "used": false, + "timeout": true, + "auth": false, + "identity": "" + }, + { + "interface": "nhc2-gre0", + "type": "local", + "protocol": "172.16.1.5", + "nbma": "192.168.2.5", + "claimed_nbma": "192.168.2.5", + "used": false, + "timeout": false, + "auth": false, + "identity": "-" + } + ] +}
\ No newline at end of file diff --git a/tests/topotests/nhrp_redundancy/r4/nhrp_route.json b/tests/topotests/nhrp_redundancy/nhc2/nhrp_route.json index 4f1faee7a7..a69c0caec3 100644 --- a/tests/topotests/nhrp_redundancy/r4/nhrp_route.json +++ b/tests/topotests/nhrp_redundancy/nhc2/nhrp_route.json @@ -1,7 +1,7 @@ { - "176.16.1.1\/32": [ + "172.16.1.1\/32": [ { - "prefix": "176.16.1.1\/32", + "prefix": "172.16.1.1\/32", "protocol": "nhrp", "vrfId": 0, "vrfName": "default", @@ -16,15 +16,15 @@ { "fib": true, "directlyConnected": true, - "interfaceName": "r4-gre0", + "interfaceName": "nhc2-gre0", "active": true } ] } ], - "176.16.1.2\/32": [ + "172.16.1.2\/32": [ { - "prefix": "176.16.1.2\/32", + "prefix": "172.16.1.2\/32", "protocol": "nhrp", "vrfId": 0, "vrfName": "default", @@ -39,15 +39,15 @@ { "fib": true, "directlyConnected": true, - "interfaceName": "r4-gre0", + "interfaceName": "nhc2-gre0", "active": true } ] } ], - "176.16.1.3\/32": [ + "172.16.1.3\/32": [ { - "prefix": "176.16.1.3\/32", + "prefix": "172.16.1.3\/32", "protocol": "nhrp", "vrfId": 0, "vrfName": "default", @@ -62,7 +62,7 @@ { "fib": true, "directlyConnected": true, - "interfaceName": "r4-gre0", + "interfaceName": "nhc2-gre0", "active": true } ] diff --git a/tests/topotests/nhrp_redundancy/nhc2/nhrp_route_nhs1_down.json b/tests/topotests/nhrp_redundancy/nhc2/nhrp_route_nhs1_down.json new file mode 100644 index 0000000000..e2dd9dde23 --- /dev/null +++ b/tests/topotests/nhrp_redundancy/nhc2/nhrp_route_nhs1_down.json @@ -0,0 +1,49 @@ +{ + "172.16.1.1\/32": null, + "172.16.1.2\/32": [ + { + "prefix": "172.16.1.2\/32", + "protocol": "nhrp", + "vrfId": 0, + "vrfName": "default", + "selected": true, + "destSelected": true, + "distance": 10, + "metric": 0, + "installed": true, + "internalNextHopNum": 1, + "internalNextHopActiveNum": 1, + "nexthops": [ + { + "fib": true, + "directlyConnected": true, + "interfaceName": "nhc2-gre0", + "active": true + } + ] + } + ], + "172.16.1.3\/32": [ + { + "prefix": "172.16.1.3\/32", + "protocol": "nhrp", + "vrfId": 0, + "vrfName": "default", + "selected": true, + "destSelected": true, + "distance": 10, + "metric": 0, + "installed": true, + "internalNextHopNum": 1, + "internalNextHopActiveNum": 1, + "nexthops": [ + { + "fib": true, + "directlyConnected": true, + "interfaceName": "nhc2-gre0", + "active": true + } + ] + } + ] +}
\ No newline at end of file diff --git a/tests/topotests/nhrp_redundancy/nhs1/frr.conf b/tests/topotests/nhrp_redundancy/nhs1/frr.conf new file mode 100644 index 0000000000..583d014348 --- /dev/null +++ b/tests/topotests/nhrp_redundancy/nhs1/frr.conf @@ -0,0 +1,19 @@ +ip forwarding +!debug nhrp all +interface nhs1-eth0 + ip address 192.168.1.1/24 +! +ip route 192.168.2.0/24 192.168.1.6 +nhrp nflog-group 1 +interface nhs1-gre0 + ip address 172.16.1.1/32 + no link-detect + ipv6 nd suppress-ra + ip nhrp holdtime 10 + ip nhrp network-id 42 + ip nhrp registration no-unique + ip nhrp redirect + tunnel source nhs1-eth0 +! +ip route 10.4.4.0/24 172.16.1.4 +ip route 10.5.5.0/24 172.16.1.5 diff --git a/tests/topotests/nhrp_redundancy/r1/nhrp_cache.json b/tests/topotests/nhrp_redundancy/nhs1/nhrp_cache.json index a94dd9fecf..11d41d1b83 100644 --- a/tests/topotests/nhrp_redundancy/r1/nhrp_cache.json +++ b/tests/topotests/nhrp_redundancy/nhs1/nhrp_cache.json @@ -4,9 +4,9 @@ }, "table": [ { - "interface": "r1-gre0", + "interface": "nhs1-gre0", "type": "dynamic", - "protocol": "176.16.1.4", + "protocol": "172.16.1.4", "nbma": "192.168.2.4", "claimed_nbma": "192.168.2.4", "used": false, @@ -15,9 +15,9 @@ "identity": "" }, { - "interface": "r1-gre0", + "interface": "nhs1-gre0", "type": "local", - "protocol": "176.16.1.1", + "protocol": "172.16.1.1", "nbma": "192.168.1.1", "claimed_nbma": "192.168.1.1", "used": false, @@ -26,9 +26,9 @@ "identity": "-" }, { - "interface": "r1-gre0", + "interface": "nhs1-gre0", "type": "dynamic", - "protocol": "176.16.1.5", + "protocol": "172.16.1.5", "nbma": "192.168.2.5", "claimed_nbma": "192.168.2.5", "used": false, diff --git a/tests/topotests/nhrp_redundancy/r3/nhrp_route.json b/tests/topotests/nhrp_redundancy/nhs1/nhrp_route.json index 3d548c08fd..2574b1a5d2 100644 --- a/tests/topotests/nhrp_redundancy/r3/nhrp_route.json +++ b/tests/topotests/nhrp_redundancy/nhs1/nhrp_route.json @@ -1,7 +1,7 @@ { - "176.16.1.4\/32": [ + "172.16.1.4\/32": [ { - "prefix": "176.16.1.4\/32", + "prefix": "172.16.1.4\/32", "protocol": "nhrp", "vrfId": 0, "vrfName": "default", @@ -16,15 +16,15 @@ { "fib": true, "directlyConnected": true, - "interfaceName": "r3-gre0", + "interfaceName": "nhs1-gre0", "active": true } ] } ], - "176.16.1.5\/32": [ + "172.16.1.5\/32": [ { - "prefix": "176.16.1.5\/32", + "prefix": "172.16.1.5\/32", "protocol": "nhrp", "vrfId": 0, "vrfName": "default", @@ -39,7 +39,7 @@ { "fib": true, "directlyConnected": true, - "interfaceName": "r3-gre0", + "interfaceName": "nhs1-gre0", "active": true } ] diff --git a/tests/topotests/nhrp_redundancy/nhs2/frr.conf b/tests/topotests/nhrp_redundancy/nhs2/frr.conf new file mode 100644 index 0000000000..a6e0a98e6b --- /dev/null +++ b/tests/topotests/nhrp_redundancy/nhs2/frr.conf @@ -0,0 +1,19 @@ +ip forwarding +!debug nhrp all +interface nhs2-eth0 + ip address 192.168.1.2/24 +! +ip route 192.168.2.0/24 192.168.1.6 +nhrp nflog-group 1 +interface nhs2-gre0 + ip address 172.16.1.2/32 + no link-detect + ipv6 nd suppress-ra + ip nhrp holdtime 10 + ip nhrp network-id 42 + ip nhrp registration no-unique + ip nhrp redirect + tunnel source nhs2-eth0 +! +ip route 10.4.4.0/24 172.16.1.4 +ip route 10.5.5.0/24 172.16.1.5 diff --git a/tests/topotests/nhrp_redundancy/r2/nhrp_cache.json b/tests/topotests/nhrp_redundancy/nhs2/nhrp_cache.json index 91557a1918..6343c4deb9 100644 --- a/tests/topotests/nhrp_redundancy/r2/nhrp_cache.json +++ b/tests/topotests/nhrp_redundancy/nhs2/nhrp_cache.json @@ -4,9 +4,9 @@ }, "table": [ { - "interface": "r2-gre0", + "interface": "nhs2-gre0", "type": "local", - "protocol": "176.16.1.2", + "protocol": "172.16.1.2", "nbma": "192.168.1.2", "claimed_nbma": "192.168.1.2", "used": false, @@ -15,9 +15,9 @@ "identity": "-" }, { - "interface": "r2-gre0", + "interface": "nhs2-gre0", "type": "dynamic", - "protocol": "176.16.1.4", + "protocol": "172.16.1.4", "nbma": "192.168.2.4", "claimed_nbma": "192.168.2.4", "used": false, @@ -26,9 +26,9 @@ "identity": "" }, { - "interface": "r2-gre0", + "interface": "nhs2-gre0", "type": "dynamic", - "protocol": "176.16.1.5", + "protocol": "172.16.1.5", "nbma": "192.168.2.5", "claimed_nbma": "192.168.2.5", "used": false, diff --git a/tests/topotests/nhrp_redundancy/nhs2/nhrp_cache_nhs1_down.json b/tests/topotests/nhrp_redundancy/nhs2/nhrp_cache_nhs1_down.json new file mode 100644 index 0000000000..6343c4deb9 --- /dev/null +++ b/tests/topotests/nhrp_redundancy/nhs2/nhrp_cache_nhs1_down.json @@ -0,0 +1,40 @@ +{ + "attr": { + "entriesCount": 3 + }, + "table": [ + { + "interface": "nhs2-gre0", + "type": "local", + "protocol": "172.16.1.2", + "nbma": "192.168.1.2", + "claimed_nbma": "192.168.1.2", + "used": false, + "timeout": false, + "auth": false, + "identity": "-" + }, + { + "interface": "nhs2-gre0", + "type": "dynamic", + "protocol": "172.16.1.4", + "nbma": "192.168.2.4", + "claimed_nbma": "192.168.2.4", + "used": false, + "timeout": true, + "auth": false, + "identity": "" + }, + { + "interface": "nhs2-gre0", + "type": "dynamic", + "protocol": "172.16.1.5", + "nbma": "192.168.2.5", + "claimed_nbma": "192.168.2.5", + "used": false, + "timeout": true, + "auth": false, + "identity": "" + } + ] +}
\ No newline at end of file diff --git a/tests/topotests/nhrp_redundancy/r1/nhrp_route.json b/tests/topotests/nhrp_redundancy/nhs2/nhrp_route.json index b5f3e29e74..0ad37fc319 100644 --- a/tests/topotests/nhrp_redundancy/r1/nhrp_route.json +++ b/tests/topotests/nhrp_redundancy/nhs2/nhrp_route.json @@ -1,7 +1,7 @@ { - "176.16.1.4\/32": [ + "172.16.1.4\/32": [ { - "prefix": "176.16.1.4\/32", + "prefix": "172.16.1.4\/32", "protocol": "nhrp", "vrfId": 0, "vrfName": "default", @@ -16,15 +16,15 @@ { "fib": true, "directlyConnected": true, - "interfaceName": "r1-gre0", + "interfaceName": "nhs2-gre0", "active": true } ] } ], - "176.16.1.5\/32": [ + "172.16.1.5\/32": [ { - "prefix": "176.16.1.5\/32", + "prefix": "172.16.1.5\/32", "protocol": "nhrp", "vrfId": 0, "vrfName": "default", @@ -39,7 +39,7 @@ { "fib": true, "directlyConnected": true, - "interfaceName": "r1-gre0", + "interfaceName": "nhs2-gre0", "active": true } ] diff --git a/tests/topotests/nhrp_redundancy/nhs2/nhrp_route_nhs1_down.json b/tests/topotests/nhrp_redundancy/nhs2/nhrp_route_nhs1_down.json new file mode 100644 index 0000000000..0ad37fc319 --- /dev/null +++ b/tests/topotests/nhrp_redundancy/nhs2/nhrp_route_nhs1_down.json @@ -0,0 +1,48 @@ +{ + "172.16.1.4\/32": [ + { + "prefix": "172.16.1.4\/32", + "protocol": "nhrp", + "vrfId": 0, + "vrfName": "default", + "selected": true, + "destSelected": true, + "distance": 10, + "metric": 0, + "installed": true, + "internalNextHopNum": 1, + "internalNextHopActiveNum": 1, + "nexthops": [ + { + "fib": true, + "directlyConnected": true, + "interfaceName": "nhs2-gre0", + "active": true + } + ] + } + ], + "172.16.1.5\/32": [ + { + "prefix": "172.16.1.5\/32", + "protocol": "nhrp", + "vrfId": 0, + "vrfName": "default", + "selected": true, + "destSelected": true, + "distance": 10, + "metric": 0, + "installed": true, + "internalNextHopNum": 1, + "internalNextHopActiveNum": 1, + "nexthops": [ + { + "fib": true, + "directlyConnected": true, + "interfaceName": "nhs2-gre0", + "active": true + } + ] + } + ] +}
\ No newline at end of file diff --git a/tests/topotests/nhrp_redundancy/nhs3/frr.conf b/tests/topotests/nhrp_redundancy/nhs3/frr.conf new file mode 100644 index 0000000000..e965baf327 --- /dev/null +++ b/tests/topotests/nhrp_redundancy/nhs3/frr.conf @@ -0,0 +1,19 @@ +ip forwarding +!debug nhrp all +interface nhs3-eth0 + ip address 192.168.1.3/24 +! +ip route 192.168.2.0/24 192.168.1.6 +nhrp nflog-group 1 +interface nhs3-gre0 + ip address 172.16.1.3/32 + no link-detect + ipv6 nd suppress-ra + ip nhrp holdtime 10 + ip nhrp network-id 42 + ip nhrp registration no-unique + ip nhrp redirect + tunnel source nhs3-eth0 +! +ip route 10.4.4.0/24 172.16.1.4 +ip route 10.5.5.0/24 172.16.1.5
\ No newline at end of file diff --git a/tests/topotests/nhrp_redundancy/r3/nhrp_cache.json b/tests/topotests/nhrp_redundancy/nhs3/nhrp_cache.json index ef3ab690bc..d911de348b 100644 --- a/tests/topotests/nhrp_redundancy/r3/nhrp_cache.json +++ b/tests/topotests/nhrp_redundancy/nhs3/nhrp_cache.json @@ -4,9 +4,9 @@ }, "table": [ { - "interface": "r3-gre0", + "interface": "nhs3-gre0", "type": "dynamic", - "protocol": "176.16.1.4", + "protocol": "172.16.1.4", "nbma": "192.168.2.4", "claimed_nbma": "192.168.2.4", "used": false, @@ -15,9 +15,9 @@ "identity": "" }, { - "interface": "r3-gre0", + "interface": "nhs3-gre0", "type": "local", - "protocol": "176.16.1.3", + "protocol": "172.16.1.3", "nbma": "192.168.1.3", "claimed_nbma": "192.168.1.3", "used": false, @@ -26,9 +26,9 @@ "identity": "-" }, { - "interface": "r3-gre0", + "interface": "nhs3-gre0", "type": "dynamic", - "protocol": "176.16.1.5", + "protocol": "172.16.1.5", "nbma": "192.168.2.5", "claimed_nbma": "192.168.2.5", "used": false, diff --git a/tests/topotests/nhrp_redundancy/nhs3/nhrp_cache_nhs1_down.json b/tests/topotests/nhrp_redundancy/nhs3/nhrp_cache_nhs1_down.json new file mode 100644 index 0000000000..d911de348b --- /dev/null +++ b/tests/topotests/nhrp_redundancy/nhs3/nhrp_cache_nhs1_down.json @@ -0,0 +1,40 @@ +{ + "attr": { + "entriesCount": 3 + }, + "table": [ + { + "interface": "nhs3-gre0", + "type": "dynamic", + "protocol": "172.16.1.4", + "nbma": "192.168.2.4", + "claimed_nbma": "192.168.2.4", + "used": false, + "timeout": true, + "auth": false, + "identity": "" + }, + { + "interface": "nhs3-gre0", + "type": "local", + "protocol": "172.16.1.3", + "nbma": "192.168.1.3", + "claimed_nbma": "192.168.1.3", + "used": false, + "timeout": false, + "auth": false, + "identity": "-" + }, + { + "interface": "nhs3-gre0", + "type": "dynamic", + "protocol": "172.16.1.5", + "nbma": "192.168.2.5", + "claimed_nbma": "192.168.2.5", + "used": false, + "timeout": true, + "auth": false, + "identity": "" + } + ] +}
\ No newline at end of file diff --git a/tests/topotests/nhrp_redundancy/r2/nhrp_route.json b/tests/topotests/nhrp_redundancy/nhs3/nhrp_route.json index f1fa6e54c1..29a4f8f11b 100644 --- a/tests/topotests/nhrp_redundancy/r2/nhrp_route.json +++ b/tests/topotests/nhrp_redundancy/nhs3/nhrp_route.json @@ -1,7 +1,7 @@ { - "176.16.1.4\/32": [ + "172.16.1.4\/32": [ { - "prefix": "176.16.1.4\/32", + "prefix": "172.16.1.4\/32", "protocol": "nhrp", "vrfId": 0, "vrfName": "default", @@ -16,15 +16,15 @@ { "fib": true, "directlyConnected": true, - "interfaceName": "r2-gre0", + "interfaceName": "nhs3-gre0", "active": true } ] } ], - "176.16.1.5\/32": [ + "172.16.1.5\/32": [ { - "prefix": "176.16.1.5\/32", + "prefix": "172.16.1.5\/32", "protocol": "nhrp", "vrfId": 0, "vrfName": "default", @@ -39,7 +39,7 @@ { "fib": true, "directlyConnected": true, - "interfaceName": "r2-gre0", + "interfaceName": "nhs3-gre0", "active": true } ] diff --git a/tests/topotests/nhrp_redundancy/nhs3/nhrp_route_nhs1_down.json b/tests/topotests/nhrp_redundancy/nhs3/nhrp_route_nhs1_down.json new file mode 100644 index 0000000000..29a4f8f11b --- /dev/null +++ b/tests/topotests/nhrp_redundancy/nhs3/nhrp_route_nhs1_down.json @@ -0,0 +1,48 @@ +{ + "172.16.1.4\/32": [ + { + "prefix": "172.16.1.4\/32", + "protocol": "nhrp", + "vrfId": 0, + "vrfName": "default", + "selected": true, + "destSelected": true, + "distance": 10, + "metric": 0, + "installed": true, + "internalNextHopNum": 1, + "internalNextHopActiveNum": 1, + "nexthops": [ + { + "fib": true, + "directlyConnected": true, + "interfaceName": "nhs3-gre0", + "active": true + } + ] + } + ], + "172.16.1.5\/32": [ + { + "prefix": "172.16.1.5\/32", + "protocol": "nhrp", + "vrfId": 0, + "vrfName": "default", + "selected": true, + "destSelected": true, + "distance": 10, + "metric": 0, + "installed": true, + "internalNextHopNum": 1, + "internalNextHopActiveNum": 1, + "nexthops": [ + { + "fib": true, + "directlyConnected": true, + "interfaceName": "nhs3-gre0", + "active": true + } + ] + } + ] +}
\ No newline at end of file diff --git a/tests/topotests/nhrp_redundancy/r1/nhrpd.conf b/tests/topotests/nhrp_redundancy/r1/nhrpd.conf deleted file mode 100644 index ad48ce3769..0000000000 --- a/tests/topotests/nhrp_redundancy/r1/nhrpd.conf +++ /dev/null @@ -1,9 +0,0 @@ -!debug nhrp all -nhrp nflog-group 1 -interface r1-gre0 - ip nhrp holdtime 10 - ip nhrp network-id 42 - ip nhrp registration no-unique - ip nhrp redirect - tunnel source r1-eth0 -exit diff --git a/tests/topotests/nhrp_redundancy/r1/zebra.conf b/tests/topotests/nhrp_redundancy/r1/zebra.conf deleted file mode 100644 index 0f11563f57..0000000000 --- a/tests/topotests/nhrp_redundancy/r1/zebra.conf +++ /dev/null @@ -1,12 +0,0 @@ -ip forwarding -interface r1-eth0 - ip address 192.168.1.1/24 -! -ip route 192.168.2.0/24 192.168.1.6 -interface r1-gre0 - ip address 176.16.1.1/32 - no link-detect - ipv6 nd suppress-ra -! -ip route 4.4.4.0/24 176.16.1.4 -ip route 5.5.5.0/24 176.16.1.5 diff --git a/tests/topotests/nhrp_redundancy/r2/nhrpd.conf b/tests/topotests/nhrp_redundancy/r2/nhrpd.conf deleted file mode 100644 index 4d63f07d1f..0000000000 --- a/tests/topotests/nhrp_redundancy/r2/nhrpd.conf +++ /dev/null @@ -1,9 +0,0 @@ -!debug nhrp all -nhrp nflog-group 1 -interface r2-gre0 - ip nhrp holdtime 10 - ip nhrp network-id 42 - ip nhrp registration no-unique - ip nhrp redirect - tunnel source r2-eth0 -exit diff --git a/tests/topotests/nhrp_redundancy/r2/zebra.conf b/tests/topotests/nhrp_redundancy/r2/zebra.conf deleted file mode 100644 index 1a9c4ff915..0000000000 --- a/tests/topotests/nhrp_redundancy/r2/zebra.conf +++ /dev/null @@ -1,12 +0,0 @@ -ip forwarding -interface r2-eth0 - ip address 192.168.1.2/24 -! -ip route 192.168.2.0/24 192.168.1.6 -interface r2-gre0 - ip address 176.16.1.2/32 - no link-detect - ipv6 nd suppress-ra -! -ip route 4.4.4.0/24 176.16.1.4 -ip route 5.5.5.0/24 176.16.1.5 diff --git a/tests/topotests/nhrp_redundancy/r3/nhrpd.conf b/tests/topotests/nhrp_redundancy/r3/nhrpd.conf deleted file mode 100644 index 87cc2161f8..0000000000 --- a/tests/topotests/nhrp_redundancy/r3/nhrpd.conf +++ /dev/null @@ -1,9 +0,0 @@ -!debug nhrp all -nhrp nflog-group 1 -interface r3-gre0 - ip nhrp holdtime 10 - ip nhrp network-id 42 - ip nhrp registration no-unique - ip nhrp redirect - tunnel source r3-eth0 -exit diff --git a/tests/topotests/nhrp_redundancy/r3/zebra.conf b/tests/topotests/nhrp_redundancy/r3/zebra.conf deleted file mode 100644 index 980cfbcaab..0000000000 --- a/tests/topotests/nhrp_redundancy/r3/zebra.conf +++ /dev/null @@ -1,12 +0,0 @@ -ip forwarding -interface r3-eth0 - ip address 192.168.1.3/24 -! -ip route 192.168.2.0/24 192.168.1.6 -interface r3-gre0 - ip address 176.16.1.3/32 - no link-detect - ipv6 nd suppress-ra -! -ip route 4.4.4.0/24 176.16.1.4 -ip route 5.5.5.0/24 176.16.1.5
\ No newline at end of file diff --git a/tests/topotests/nhrp_redundancy/r4/nhrpd.conf b/tests/topotests/nhrp_redundancy/r4/nhrpd.conf deleted file mode 100644 index 8a52f3386e..0000000000 --- a/tests/topotests/nhrp_redundancy/r4/nhrpd.conf +++ /dev/null @@ -1,11 +0,0 @@ -!debug nhrp all -interface r4-gre0 - ip nhrp holdtime 10 - ip nhrp network-id 42 - ip nhrp registration no-unique - ip nhrp nhs dynamic nbma 192.168.1.1 - ip nhrp nhs dynamic nbma 192.168.1.2 - ip nhrp nhs dynamic nbma 192.168.1.3 - ip nhrp shortcut - tunnel source r4-eth0 -exit diff --git a/tests/topotests/nhrp_redundancy/r4/zebra.conf b/tests/topotests/nhrp_redundancy/r4/zebra.conf deleted file mode 100644 index e4a9a6f80f..0000000000 --- a/tests/topotests/nhrp_redundancy/r4/zebra.conf +++ /dev/null @@ -1,16 +0,0 @@ -ip forwarding -interface r4-eth0 - ip address 192.168.2.4/24 -! -ip route 192.168.1.0/24 192.168.2.6 -interface r4-gre0 - ip address 176.16.1.4/32 - no link-detect - ipv6 nd suppress-ra -! -interface r4-eth1 - ip address 4.4.4.4/24 -! -ip route 0.0.0.0/0 176.16.1.1 50 -ip route 0.0.0.0/0 176.16.1.2 60 -ip route 0.0.0.0/0 176.16.1.3 70
\ No newline at end of file diff --git a/tests/topotests/nhrp_redundancy/r5/nhrpd.conf b/tests/topotests/nhrp_redundancy/r5/nhrpd.conf deleted file mode 100644 index 7241ed592d..0000000000 --- a/tests/topotests/nhrp_redundancy/r5/nhrpd.conf +++ /dev/null @@ -1,11 +0,0 @@ -!debug nhrp all -interface r5-gre0 - ip nhrp holdtime 10 - ip nhrp network-id 42 - ip nhrp nhs dynamic nbma 192.168.1.1 - ip nhrp nhs dynamic nbma 192.168.1.2 - ip nhrp nhs dynamic nbma 192.168.1.3 - ip nhrp registration no-unique - ip nhrp shortcut - tunnel source r5-eth0 -exit diff --git a/tests/topotests/nhrp_redundancy/r5/zebra.conf b/tests/topotests/nhrp_redundancy/r5/zebra.conf deleted file mode 100644 index 9b1e1c0646..0000000000 --- a/tests/topotests/nhrp_redundancy/r5/zebra.conf +++ /dev/null @@ -1,16 +0,0 @@ -ip forwarding -interface r5-eth0 - ip address 192.168.2.5/24 -! -ip route 192.168.1.0/24 192.168.2.6 -interface r5-gre0 - ip address 176.16.1.5/32 - no link-detect - ipv6 nd suppress-ra -! -interface r5-eth1 - ip address 5.5.5.5/24 -! -ip route 0.0.0.0/0 176.16.1.1 50 -ip route 0.0.0.0/0 176.16.1.2 60 -ip route 0.0.0.0/0 176.16.1.3 70 diff --git a/tests/topotests/nhrp_redundancy/r7/zebra.conf b/tests/topotests/nhrp_redundancy/r7/zebra.conf deleted file mode 100644 index 5747b40956..0000000000 --- a/tests/topotests/nhrp_redundancy/r7/zebra.conf +++ /dev/null @@ -1,4 +0,0 @@ -interface r7-eth0 - ip address 4.4.4.7/24 -! -ip route 0.0.0.0/0 4.4.4.4 diff --git a/tests/topotests/nhrp_redundancy/r6/zebra.conf b/tests/topotests/nhrp_redundancy/router/frr.conf index 63a37cd5bf..c0eb19ca40 100644 --- a/tests/topotests/nhrp_redundancy/r6/zebra.conf +++ b/tests/topotests/nhrp_redundancy/router/frr.conf @@ -1,7 +1,7 @@ ip forwarding -interface r6-eth0 +interface router-eth0 ip address 192.168.1.6/24 ! -interface r6-eth1 +interface router-eth1 ip address 192.168.2.6/24 exit diff --git a/tests/topotests/nhrp_redundancy/test_nhrp_redundancy.dot b/tests/topotests/nhrp_redundancy/test_nhrp_redundancy.dot index c169436db0..e94e1d0734 100644 --- a/tests/topotests/nhrp_redundancy/test_nhrp_redundancy.dot +++ b/tests/topotests/nhrp_redundancy/test_nhrp_redundancy.dot @@ -16,43 +16,43 @@ graph template { label="nhrp-topo-redundant-nhs"; # Routers - r1 [ + nhs1 [ shape=doubleoctagon, label="NHS 1", fillcolor="#f08080", style=filled, ]; - r2 [ + nhs2 [ shape=doubleoctagon label="NHS 2", fillcolor="#f08080", style=filled, ]; - r3 [ + nhs3 [ shape=doubleoctagon label="NHS 3", fillcolor="#f08080", style=filled, ]; - r4 [ + nhc1 [ shape=doubleoctagon label="NHC 1", fillcolor="#f08080", style=filled, ]; - r5 [ + nhc2 [ shape=doubleoctagon label="NHC 2", fillcolor="#f08080", style=filled, ]; - r6 [ + router [ shape=doubleoctagon label="router", fillcolor="#f08080", style=filled, ]; - r7 [ + host [ shape=doubleoctagon label="host", fillcolor="#f08080", @@ -74,30 +74,30 @@ graph template { ]; sw3 [ shape=oval, - label="sw3\n4.4.4.0/24", + label="sw3\n10.4.4.0/24", fillcolor="#d0e0d0", style=filled, ]; sw4 [ shape=oval, - label="sw4\n5.5.5.0/24", + label="sw4\n10.5.5.0/24", fillcolor="#d0e0d0", style=filled, ]; # Connections - r1 -- sw1 [label="eth0"]; - r2 -- sw1 [label="eth0"]; - r3 -- sw1 [label="eth0"]; - r6 -- sw1 [label="eth0"]; + nhs1 -- sw1 [label="eth0"]; + nhs2 -- sw1 [label="eth0"]; + nhs3 -- sw1 [label="eth0"]; + router -- sw1 [label="eth0"]; - r4 -- sw2 [label="eth0"]; - r5 -- sw2 [label="eth0"]; - r6 -- sw2 [label="eth1"]; + nhc1 -- sw2 [label="eth0"]; + nhc2 -- sw2 [label="eth0"]; + router -- sw2 [label="eth1"]; - r4 -- sw3 [label="eth1"]; - r7 -- sw3 [label="eth0"]; + nhc1 -- sw3 [label="eth1"]; + host -- sw3 [label="eth0"]; - r5 -- sw4 [label="eth1"]; + nhc2 -- sw4 [label="eth1"]; } diff --git a/tests/topotests/nhrp_redundancy/test_nhrp_redundancy.py b/tests/topotests/nhrp_redundancy/test_nhrp_redundancy.py index ffd9abc9d4..d4cf98596c 100644 --- a/tests/topotests/nhrp_redundancy/test_nhrp_redundancy.py +++ b/tests/topotests/nhrp_redundancy/test_nhrp_redundancy.py @@ -29,38 +29,38 @@ test_nhrp_redundancy.py: Test NHS redundancy for NHRP """ TOPOLOGY = """ -+------------+ +------------+ +------------+ -| | | | | | -| | | | | | -| NHS 1 | | NHS 2 | | NHS 3 | -| | | | | | -+-----+------+ +-----+------+ +-----+------+ - |.1 |.2 |.3 - | | | - | | 192.168.1.0/24 | -------+-------------------------------+------------------+-------------+------ - | - |.6 - GRE P2MP between all NHS and NHC +-----+------+ - 172.16.1.x/32 | | - | | - | Router | - | | - +-----+------+ - | - | - ---------+----------------+-------------+------ - | 192.168.2.0/24 | - | | - | |.4 |.5 -+------------+ | +-------+----+ +------+-----+ | -| | | | | | | | -| | +--------+ | | | | -| Host |.7 | | NHC 1 | | NHC 2 +-----+5.5.5.0/24 -| +---------+ | | | | | -+------------+ | +------------+ +------------+ | - | | - 4.4.4.0/24 ++------------+ +------------+ +------------+ +| | | | | | +| | | | | | +| NHS 1 | | NHS 2 | | NHS 3 | +| | | | | | ++-----+------+ +-----+------+ +-----+------+ + |.1 |.2 |.3 + | | | + | | 192.168.1.0/24 | +------+-------------------------------+------------------+-------------+------ + | + |.6 + GRE P2MP between all NHS and NHC +-----+------+ + 172.16.1.x/32 | | + | | + | Router | + | | + +-----+------+ + | + | + ---------+----------------+-------------+------ + | 192.168.2.0/24 | + | | + | |.4 |.5 ++------------+ | +-------+----+ +------+-----+ | +| | | | | | | | +| | +--------+ | | | | +| Host |.7 | | NHC 1 | | NHC 2 +-----+10.5.5.0/24 +| +---------+ | | | | | ++------------+ | +------------+ +------------+ | + | | + 10.4.4.0/24 """ # Save the Current Working Directory to find configuration files. @@ -76,30 +76,26 @@ def build_topo(tgen): "Build function" # Create 7 routers - for routern in range(1, 8): - tgen.add_router("r{}".format(routern)) + for rname in ["nhs1", "nhs2", "nhs3", "nhc1", "nhc2", "router", "host"]: + tgen.add_router(rname) - # Interconnect routers 1, 2, 3, 6 switch = tgen.add_switch("s1") - switch.add_link(tgen.gears["r1"]) - switch.add_link(tgen.gears["r2"]) - switch.add_link(tgen.gears["r3"]) - switch.add_link(tgen.gears["r6"]) + switch.add_link(tgen.gears["nhs1"]) + switch.add_link(tgen.gears["nhs2"]) + switch.add_link(tgen.gears["nhs3"]) + switch.add_link(tgen.gears["router"]) - # Interconnect routers 4, 5, 6 switch = tgen.add_switch("s2") - switch.add_link(tgen.gears["r4"]) - switch.add_link(tgen.gears["r5"]) - switch.add_link(tgen.gears["r6"]) + switch.add_link(tgen.gears["nhc1"]) + switch.add_link(tgen.gears["nhc2"]) + switch.add_link(tgen.gears["router"]) - # Connect router 4, 7 switch = tgen.add_switch("s3") - switch.add_link(tgen.gears["r4"]) - switch.add_link(tgen.gears["r7"]) + switch.add_link(tgen.gears["nhc1"]) + switch.add_link(tgen.gears["host"]) - # Connect router 5 switch = tgen.add_switch("s4") - switch.add_link(tgen.gears["r5"]) + switch.add_link(tgen.gears["nhc2"]) def _populate_iface(): @@ -110,7 +106,7 @@ def _populate_iface(): "echo 0 > /proc/sys/net/ipv4/ip_forward_use_pmtu", "echo 1 > /proc/sys/net/ipv6/conf/{0}-eth0/disable_ipv6", "echo 1 > /proc/sys/net/ipv6/conf/{0}-gre0/disable_ipv6", - "iptables -A FORWARD -i {0}-gre0 -o {0}-gre0 -m hashlimit --hashlimit-upto 4/minute --hashlimit-burst 1 --hashlimit-mode srcip,dstip --hashlimit-srcmask 24 --hashlimit-dstmask 24 --hashlimit-name loglimit-0 -j NFLOG --nflog-group 1 --nflog-range 128", + "iptables -A FORWARD -i {0}-gre0 -o {0}-gre0 -m hashlimit --hashlimit-upto 4/minute --hashlimit-burst 1 --hashlimit-mode srcip,dstip --hashlimit-srcmask 24 --hashlimit-dstmask 24 --hashlimit-name loglimit-0 -j NFLOG --nflog-group 1 --nflog-size 128", ] cmds_tot = [ @@ -122,41 +118,38 @@ def _populate_iface(): ] for cmd in cmds_tot_hub: - # Router 1 - input = cmd.format("r1", "1") + input = cmd.format("nhs1", "1") logger.info("input: " + input) - output = tgen.net["r1"].cmd(input) + output = tgen.net["nhs1"].cmd(input) logger.info("output: " + output) - # Router 2 - input = cmd.format("r2", "2") + input = cmd.format("nhs2", "2") logger.info("input: " + input) - output = tgen.net["r2"].cmd(input) + output = tgen.net["nhs2"].cmd(input) logger.info("output: " + output) - # Router 3 - input = cmd.format("r3", "3") + input = cmd.format("nhs3", "3") logger.info("input: " + input) - output = tgen.net["r3"].cmd(input) + output = tgen.net["nhs3"].cmd(input) logger.info("output: " + output) for cmd in cmds_tot: - input = cmd.format("r4", "4") + input = cmd.format("nhc1", "4") logger.info("input: " + input) - output = tgen.net["r4"].cmd(input) + output = tgen.net["nhc1"].cmd(input) logger.info("output: " + output) - input = cmd.format("r5", "5") + input = cmd.format("nhc2", "5") logger.info("input: " + input) - output = tgen.net["r5"].cmd(input) + output = tgen.net["nhc2"].cmd(input) logger.info("output: " + output) def _verify_iptables(): tgen = get_topogen() # Verify iptables is installed. Required for shortcuts - rc, _, _ = tgen.net["r1"].cmd_status("iptables") - return False if rc == 127 else True + rc, _, _ = tgen.net["nhs1"].cmd_status("iptables -V") + return True if rc == 0 else False def setup_module(mod): @@ -174,14 +167,8 @@ def setup_module(mod): _populate_iface() for rname, router in router_list.items(): - router.load_config( - TopoRouter.RD_ZEBRA, - os.path.join(CWD, "{}/zebra.conf".format(rname)), - ) - if rname in ("r1", "r2", "r3", "r4", "r5"): - router.load_config( - TopoRouter.RD_NHRP, os.path.join(CWD, "{}/nhrpd.conf".format(rname)) - ) + logger.info("Loading router %s" % rname) + router.load_frr_config(os.path.join(CWD, "{}/frr.conf".format(rname))) # Initialize all routers. tgen.start_router() @@ -202,17 +189,15 @@ def test_protocols_convergence(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - logger.info("Checking NHRP cache and IPv4 routes for convergence") + logger.info("Checking NHRP cache for convergence") router_list = tgen.routers() # Check NHRP cache on servers and clients - for _, router in router_list.items(): - - json_file = "{}/{}/nhrp_cache.json".format(CWD, router.name) - if not os.path.isfile(json_file): - logger.info("skipping file {}".format(json_file)) + for rname, router in router_list.items(): + if "nh" not in rname: continue + json_file = "{}/{}/nhrp_cache.json".format(CWD, router.name) expected = json.loads(open(json_file).read()) test_func = partial( topotest.router_json_cmp, router, "show ip nhrp cache json", expected @@ -226,13 +211,12 @@ def test_protocols_convergence(): assert result is None, assertmsg # Check NHRP IPV4 routes on servers and clients + logger.info("Checking IPv4 routes for convergence") for rname, router in router_list.items(): - - json_file = "{}/{}/nhrp_route.json".format(CWD, router.name) - if not os.path.isfile(json_file): - logger.info("skipping file {}".format(json_file)) + if "nh" not in rname: continue + json_file = "{}/{}/nhrp_route.json".format(CWD, router.name) expected = json.loads(open(json_file).read()) test_func = partial( topotest.router_json_cmp, router, "show ip route nhrp json", expected @@ -246,53 +230,53 @@ def test_protocols_convergence(): assert result is None, assertmsg # Test connectivity from 1 NHRP server to all clients - pingrouter = tgen.gears["r1"] - logger.info("Check Ping IPv4 from R1 to R4 = 176.16.1.4)") - output = pingrouter.run("ping 176.16.1.4 -f -c 1000") + nhs1 = tgen.gears["nhs1"] + logger.info("Check Ping IPv4 from nhs1 to nhc1 = 172.16.1.4)") + output = nhs1.run("ping 172.16.1.4 -f -c 1000") logger.info(output) if "1000 packets transmitted, 1000 received" not in output: - assertmsg = "expected ping IPv4 from R1 to R4 should be ok" + assertmsg = "expected ping IPv4 from nhs1 to nhc1 should be ok" assert 0, assertmsg else: - logger.info("Check Ping IPv4 from R1 to R4 OK") + logger.info("Check Ping IPv4 from nhs1 to nhc1 OK") - logger.info("Check Ping IPv4 from R1 to R5 = 176.16.1.5)") - output = pingrouter.run("ping 176.16.1.5 -f -c 1000") + logger.info("Check Ping IPv4 from nhs1 to nhc2 = 172.16.1.5)") + output = nhs1.run("ping 172.16.1.5 -f -c 1000") logger.info(output) if "1000 packets transmitted, 1000 received" not in output: - assertmsg = "expected ping IPv4 from R1 to R5 should be ok" + assertmsg = "expected ping IPv4 from nhs1 to nhc2 should be ok" assert 0, assertmsg else: - logger.info("Check Ping IPv4 from R1 to R5 OK") + logger.info("Check Ping IPv4 from nhs1 to nhc2 OK") # Test connectivity from 1 NHRP client to all servers - pingrouter = tgen.gears["r4"] - logger.info("Check Ping IPv4 from R4 to R1 = 176.16.1.1)") - output = pingrouter.run("ping 176.16.1.1 -f -c 1000") + nhc1 = tgen.gears["nhc1"] + logger.info("Check Ping IPv4 from nhc1 to nhs1 = 172.16.1.1)") + output = nhc1.run("ping 172.16.1.1 -f -c 1000") logger.info(output) if "1000 packets transmitted, 1000 received" not in output: - assertmsg = "expected ping IPv4 from R4 to R1 should be ok" + assertmsg = "expected ping IPv4 from nhc1 to nhs1 should be ok" assert 0, assertmsg else: - logger.info("Check Ping IPv4 from R4 to R1 OK") + logger.info("Check Ping IPv4 from nhc1 to nhs1 OK") - logger.info("Check Ping IPv4 from R4 to R2 = 176.16.1.2)") - output = pingrouter.run("ping 176.16.1.2 -f -c 1000") + logger.info("Check Ping IPv4 from nhc1 to nhs2 = 172.16.1.2)") + output = nhc1.run("ping 172.16.1.2 -f -c 1000") logger.info(output) if "1000 packets transmitted, 1000 received" not in output: - assertmsg = "expected ping IPv4 from R4 to R2 should be ok" + assertmsg = "expected ping IPv4 from nhc1 to nhs2 should be ok" assert 0, assertmsg else: - logger.info("Check Ping IPv4 from R4 to R2 OK") + logger.info("Check Ping IPv4 from nhc1 to nhs2 OK") - logger.info("Check Ping IPv4 from R4 to R3 = 176.16.1.3)") - output = pingrouter.run("ping 176.16.1.3 -f -c 1000") + logger.info("Check Ping IPv4 from nhc1 to nhs3 = 172.16.1.3)") + output = nhc1.run("ping 172.16.1.3 -f -c 1000") logger.info(output) if "1000 packets transmitted, 1000 received" not in output: - assertmsg = "expected ping IPv4 from R4 to R3 should be ok" + assertmsg = "expected ping IPv4 from nhc1 to nhs3 should be ok" assert 0, assertmsg else: - logger.info("Check Ping IPv4 from R4 to R3 OK") + logger.info("Check Ping IPv4 from nhc1 to nhs3 OK") @retry(retry_timeout=30, initial_wait=5) @@ -301,22 +285,21 @@ def verify_shortcut_path(): Verifying that traffic flows through shortcut path """ tgen = get_topogen() - pingrouter = tgen.gears["r7"] - logger.info("Check Ping IPv4 from R7 to R5 = 5.5.5.5") + host = tgen.gears["host"] + logger.info("Check Ping IPv4 from host to nhc2 = 10.5.5.5") - output = pingrouter.run("ping 5.5.5.5 -f -c 1000") + output = host.run("ping 10.5.5.5 -f -c 1000") logger.info(output) if "1000 packets transmitted, 1000 received" not in output: - assertmsg = "expected ping IPv4 from R7 to R5 should be ok" + assertmsg = "expected ping IPv4 from host to nhc2 should be ok" assert 0, assertmsg else: - logger.info("Check Ping IPv4 from R7 to R5 OK") + logger.info("Check Ping IPv4 from host to nhc2 OK") def test_redundancy_shortcut(): """ Assert that if shortcut created and then NHS goes down, there is no traffic disruption - Stop traffic and verify next time traffic started, shortcut is initiated by backup NHS """ tgen = get_topogen() if tgen.routers_have_failure(): @@ -327,84 +310,207 @@ def test_redundancy_shortcut(): logger.info("Testing NHRP shortcuts with redundant servers") - # Verify R4 nhrp routes before shortcut creation - router = tgen.gears["r4"] - json_file = "{}/{}/nhrp_route.json".format(CWD, router.name) + # Verify nhc1 nhrp routes before shortcut creation + nhc1 = tgen.gears["nhc1"] + json_file = "{}/{}/nhrp_route.json".format(CWD, nhc1.name) assertmsg = "No nhrp_route file found" assert os.path.isfile(json_file), assertmsg expected = json.loads(open(json_file).read()) test_func = partial( - topotest.router_json_cmp, router, "show ip route nhrp json", expected + topotest.router_json_cmp, nhc1, "show ip route nhrp json", expected ) _, result = topotest.run_and_expect(test_func, None, count=40, wait=0.5) - output = router.vtysh_cmd("show ip route nhrp") + output = nhc1.vtysh_cmd("show ip route nhrp") logger.info(output) - assertmsg = '"{}" JSON output mismatches'.format(router.name) + assertmsg = '"{}" JSON output mismatches'.format(nhc1.name) assert result is None, assertmsg # Initiate shortcut by pinging between clients - pingrouter = tgen.gears["r7"] - logger.info("Check Ping IPv4 from R7 to R5 via shortcut = 5.5.5.5") + host = tgen.gears["host"] + logger.info("Check Ping IPv4 from host to nhc2 via shortcut = 10.5.5.5") - output = pingrouter.run("ping 5.5.5.5 -f -c 1000") + output = host.run("ping 10.5.5.5 -f -c 1000") logger.info(output) if "1000 packets transmitted, 1000 received" not in output: - assertmsg = "expected ping IPv4 from R7 to R5 via shortcut should be ok" + assertmsg = "expected ping IPv4 from host to nhc2 via shortcut should be ok" assert 0, assertmsg else: - logger.info("Check Ping IPv4 from R7 to R5 via shortcut OK") + logger.info("Check Ping IPv4 from host to nhc2 via shortcut OK") # Now check that NHRP shortcut route installed - json_file = "{}/{}/nhrp_route_shortcut.json".format(CWD, router.name) + json_file = "{}/{}/nhrp_route_shortcut.json".format(CWD, nhc1.name) assertmsg = "No nhrp_route file found" assert os.path.isfile(json_file), assertmsg expected = json.loads(open(json_file).read()) test_func = partial( - topotest.router_json_cmp, router, "show ip route nhrp json", expected + topotest.router_json_cmp, nhc1, "show ip route nhrp json", expected + ) + _, result = topotest.run_and_expect(test_func, None, count=40, wait=0.5) + + output = nhc1.vtysh_cmd("show ip route nhrp") + logger.info(output) + + assertmsg = '"{}" JSON output mismatches'.format(nhc1.name) + assert result is None, assertmsg + + json_file = "{}/{}/nhrp_shortcut_present.json".format(CWD, nhc1.name) + expected = json.loads(open(json_file).read()) + test_func = partial( + topotest.router_json_cmp, nhc1, "show ip nhrp shortcut json", expected ) _, result = topotest.run_and_expect(test_func, None, count=40, wait=0.5) - output = router.vtysh_cmd("show ip route nhrp") + output = nhc1.vtysh_cmd("show ip nhrp shortcut") logger.info(output) - assertmsg = '"{}" JSON output mismatches'.format(router.name) + assertmsg = '"{}" JSON output mismatches'.format(nhc1.name) assert result is None, assertmsg + # check the shortcut disappears because of no traffic + json_file = "{}/{}/nhrp_shortcut_absent.json".format(CWD, nhc1.name) + expected = json.loads(open(json_file).read()) + test_func = partial( + topotest.router_json_cmp, nhc1, "show ip nhrp shortcut json", expected + ) + _, result = topotest.run_and_expect(test_func, None, count=40, wait=0.5) + + output = nhc1.vtysh_cmd("show ip nhrp shortcut") + logger.info(output) + + assertmsg = '"{}" JSON output mismatches'.format(nhc1.name) + assert result is None, assertmsg + + +def test_redundancy_shortcut_backup(): + """ + Stop traffic and verify next time traffic started, shortcut is initiated by backup NHS + """ + tgen = get_topogen() + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + if not _verify_iptables(): + pytest.skip("iptables not installed") + + nhc1 = tgen.gears["nhc1"] + router_list = tgen.routers() + # Bring down primary GRE interface and verify shortcut is not disturbed - logger.info("Bringing down R1, primary NHRP server.") - shutdown_bringup_interface(tgen, "r1", "r1-gre0", False) + logger.info("Bringing down nhs1, primary NHRP server.") + shutdown_bringup_interface(tgen, "nhs1", "nhs1-gre0", False) + + # Check NHRP cache on servers and clients + for rname, router in router_list.items(): + if "nh" not in rname: + continue + if "nhs1" in rname: + continue + + json_file = "{}/{}/nhrp_cache_nhs1_down.json".format(CWD, router.name) + expected = json.loads(open(json_file).read()) + test_func = partial( + topotest.router_json_cmp, router, "show ip nhrp cache json", expected + ) + _, result = topotest.run_and_expect(test_func, None, count=40, wait=0.5) + + output = router.vtysh_cmd("show ip nhrp cache") + logger.info(output) + + assertmsg = '"{}" JSON output mismatches'.format(router.name) + assert result is None, assertmsg + + # Check NHRP IPV4 routes on servers and clients + logger.info("Checking IPv4 routes for convergence") + for rname, router in router_list.items(): + if "nh" not in rname: + continue + if "nhs1" in rname: + continue + + json_file = "{}/{}/nhrp_route_nhs1_down.json".format(CWD, router.name) + expected = json.loads(open(json_file).read()) + test_func = partial( + topotest.router_json_cmp, router, "show ip route nhrp json", expected + ) + _, result = topotest.run_and_expect(test_func, None, count=40, wait=0.5) + + output = router.vtysh_cmd("show ip route nhrp") + logger.info(output) + + assertmsg = '"{}" JSON output mismatches'.format(router.name) + assert result is None, assertmsg # Verify shortcut is still active - pingrouter = tgen.gears["r7"] - logger.info("Check Ping IPv4 from R7 to R5 via shortcut = 5.5.5.5") + host = tgen.gears["host"] + logger.info("Check Ping IPv4 from host to nhc2 via shortcut = 10.5.5.5") - output = pingrouter.run("ping 5.5.5.5 -f -c 1000") + output = host.run("ping 10.5.5.5 -f -c 1000") logger.info(output) if "1000 packets transmitted, 1000 received" not in output: - assertmsg = "expected ping IPv4 from R7 to R5 via shortcut should be ok" + assertmsg = "expected ping IPv4 from host to nhc2 via shortcut should be ok" assert 0, assertmsg else: - logger.info("Check Ping IPv4 from R7 to R5 via shortcut OK") + logger.info("Check Ping IPv4 from host to nhc2 via shortcut OK") + + # Verify shortcut is present in routing table + json_file = "{}/{}/nhrp_route_shortcut_nhs1_down.json".format(CWD, nhc1.name) + assertmsg = "No nhrp_route file found" + assert os.path.isfile(json_file), assertmsg + + expected = json.loads(open(json_file).read()) + test_func = partial( + topotest.router_json_cmp, nhc1, "show ip route nhrp json", expected + ) + _, result = topotest.run_and_expect(test_func, None, count=40, wait=0.5) + + output = nhc1.vtysh_cmd("show ip route nhrp") + logger.info(output) + + json_file = "{}/{}/nhrp_shortcut_present.json".format(CWD, nhc1.name) + expected = json.loads(open(json_file).read()) + test_func = partial( + topotest.router_json_cmp, nhc1, "show ip nhrp shortcut json", expected + ) + _, result = topotest.run_and_expect(test_func, None, count=40, wait=0.5) + + output = nhc1.vtysh_cmd("show ip nhrp shortcut") + logger.info(output) + + assertmsg = '"{}" JSON output mismatches'.format(nhc1.name) + assert result is None, assertmsg # Now verify shortcut is purged with lack of traffic - json_file = "{}/{}/nhrp_route.json".format(CWD, router.name) + json_file = "{}/{}/nhrp_route_nhs1_down.json".format(CWD, nhc1.name) assertmsg = "No nhrp_route file found" assert os.path.isfile(json_file), assertmsg expected = json.loads(open(json_file).read()) test_func = partial( - topotest.router_json_cmp, router, "show ip route nhrp json", expected + topotest.router_json_cmp, nhc1, "show ip route nhrp json", expected + ) + _, result = topotest.run_and_expect(test_func, None, count=40, wait=0.5) + + output = nhc1.vtysh_cmd("show ip route nhrp") + logger.info(output) + + assertmsg = '"{}" JSON output mismatches'.format(nhc1.name) + assert result is None, assertmsg + + json_file = "{}/{}/nhrp_shortcut_absent.json".format(CWD, nhc1.name) + expected = json.loads(open(json_file).read()) + test_func = partial( + topotest.router_json_cmp, nhc1, "show ip nhrp shortcut json", expected ) _, result = topotest.run_and_expect(test_func, None, count=40, wait=0.5) - output = router.vtysh_cmd("show ip route nhrp") + output = nhc1.vtysh_cmd("show ip nhrp shortcut") logger.info(output) - assertmsg = '"{}" JSON output mismatches'.format(router.name) + assertmsg = '"{}" JSON output mismatches'.format(nhc1.name) assert result is None, assertmsg diff --git a/tests/topotests/ospf_multi_vrf_bgp_route_leak/r1/zebra-vrf-default.txt b/tests/topotests/ospf_multi_vrf_bgp_route_leak/r1/zebra-vrf-default.txt index 797bced7b8..131085a47a 100644 --- a/tests/topotests/ospf_multi_vrf_bgp_route_leak/r1/zebra-vrf-default.txt +++ b/tests/topotests/ospf_multi_vrf_bgp_route_leak/r1/zebra-vrf-default.txt @@ -2,7 +2,7 @@ O 10.0.1.0/24 [110/10] is directly connected, r1-eth0, weight 1, XX:XX:XX C>* 10.0.1.0/24 is directly connected, r1-eth0, weight 1, XX:XX:XX L>* 10.0.1.1/32 is directly connected, r1-eth0, weight 1, XX:XX:XX O>* 10.0.2.0/24 [110/20] via 10.0.20.2, r1-eth1, weight 1, XX:XX:XX -B>* 10.0.3.0/24 [20/20] via 10.0.30.3, r1-eth2 (vrf neno), weight 1, XX:XX:XX +B>* 10.0.3.0/24 [110/20] via 10.0.30.3, r1-eth2 (vrf neno), weight 1, XX:XX:XX O>* 10.0.4.0/24 [110/20] via 10.0.20.2, r1-eth1, weight 1, XX:XX:XX O 10.0.20.0/24 [110/10] is directly connected, r1-eth1, weight 1, XX:XX:XX C>* 10.0.20.0/24 is directly connected, r1-eth1, weight 1, XX:XX:XX diff --git a/tests/topotests/ospf_multi_vrf_bgp_route_leak/r1/zebra-vrf-neno.txt b/tests/topotests/ospf_multi_vrf_bgp_route_leak/r1/zebra-vrf-neno.txt index 1dc574f360..45ee1071d4 100644 --- a/tests/topotests/ospf_multi_vrf_bgp_route_leak/r1/zebra-vrf-neno.txt +++ b/tests/topotests/ospf_multi_vrf_bgp_route_leak/r1/zebra-vrf-neno.txt @@ -1,7 +1,7 @@ VRF neno: O>* 10.0.3.0/24 [110/20] via 10.0.30.3, r1-eth2, weight 1, XX:XX:XX -B>* 10.0.4.0/24 [20/20] via 10.0.20.2, r1-eth1 (vrf default), weight 1, XX:XX:XX +B>* 10.0.4.0/24 [110/20] via 10.0.20.2, r1-eth1 (vrf default), weight 1, XX:XX:XX O 10.0.30.0/24 [110/10] is directly connected, r1-eth2, weight 1, XX:XX:XX C>* 10.0.30.0/24 is directly connected, r1-eth2, weight 1, XX:XX:XX L>* 10.0.30.1/32 is directly connected, r1-eth2, weight 1, XX:XX:XX -B>* 10.0.40.0/24 [20/20] via 10.0.20.2, r1-eth1 (vrf default), weight 1, XX:XX:XX +B>* 10.0.40.0/24 [110/20] via 10.0.20.2, r1-eth1 (vrf default), weight 1, XX:XX:XX diff --git a/tests/topotests/ospf_multi_vrf_bgp_route_leak/r2/zebra-vrf-default.txt b/tests/topotests/ospf_multi_vrf_bgp_route_leak/r2/zebra-vrf-default.txt index b5e81bc0e9..f3724bbb9f 100644 --- a/tests/topotests/ospf_multi_vrf_bgp_route_leak/r2/zebra-vrf-default.txt +++ b/tests/topotests/ospf_multi_vrf_bgp_route_leak/r2/zebra-vrf-default.txt @@ -4,7 +4,7 @@ O 10.0.2.0/24 [110/10] is directly connected, r2-eth0, weight 1, XX:XX:XX C>* 10.0.2.0/24 is directly connected, r2-eth0, weight 1, XX:XX:XX L>* 10.0.2.2/32 is directly connected, r2-eth0, weight 1, XX:XX:XX O>* 10.0.3.0/24 [110/20] via 10.0.20.1, r2-eth1, weight 1, XX:XX:XX -B>* 10.0.4.0/24 [20/20] via 10.0.40.4, r2-eth2 (vrf ray), weight 1, XX:XX:XX +B>* 10.0.4.0/24 [110/20] via 10.0.40.4, r2-eth2 (vrf ray), weight 1, XX:XX:XX O 10.0.20.0/24 [110/10] is directly connected, r2-eth1, weight 1, XX:XX:XX C>* 10.0.20.0/24 is directly connected, r2-eth1, weight 1, XX:XX:XX L>* 10.0.20.2/32 is directly connected, r2-eth1, weight 1, XX:XX:XX diff --git a/tests/topotests/ospf_multi_vrf_bgp_route_leak/r2/zebra-vrf-ray.txt b/tests/topotests/ospf_multi_vrf_bgp_route_leak/r2/zebra-vrf-ray.txt index c403496ff6..0f8b12bdfa 100644 --- a/tests/topotests/ospf_multi_vrf_bgp_route_leak/r2/zebra-vrf-ray.txt +++ b/tests/topotests/ospf_multi_vrf_bgp_route_leak/r2/zebra-vrf-ray.txt @@ -1,10 +1,10 @@ VRF ray: -B 10.0.1.0/24 [20/20] via 10.0.20.1, r2-eth1 (vrf default) inactive, weight 1, XX:XX:XX +B 10.0.1.0/24 [110/20] via 10.0.20.1, r2-eth1 (vrf default) inactive, weight 1, XX:XX:XX B 10.0.2.0/24 [20/0] is directly connected, r2-eth0 (vrf default) inactive, weight 1, XX:XX:XX -B>* 10.0.3.0/24 [20/20] via 10.0.20.1, r2-eth1 (vrf default), weight 1, XX:XX:XX +B>* 10.0.3.0/24 [110/20] via 10.0.20.1, r2-eth1 (vrf default), weight 1, XX:XX:XX O>* 10.0.4.0/24 [110/20] via 10.0.40.4, r2-eth2, weight 1, XX:XX:XX B 10.0.20.0/24 [20/0] is directly connected, r2-eth1 (vrf default) inactive, weight 1, XX:XX:XX -B>* 10.0.30.0/24 [20/20] via 10.0.20.1, r2-eth1 (vrf default), weight 1, XX:XX:XX +B>* 10.0.30.0/24 [110/20] via 10.0.20.1, r2-eth1 (vrf default), weight 1, XX:XX:XX O 10.0.40.0/24 [110/10] is directly connected, r2-eth2, weight 1, XX:XX:XX C>* 10.0.40.0/24 is directly connected, r2-eth2, weight 1, XX:XX:XX L>* 10.0.40.2/32 is directly connected, r2-eth2, weight 1, XX:XX:XX diff --git a/tests/topotests/pim_cand_rp_bsr/__init__.py b/tests/topotests/pim_cand_rp_bsr/__init__.py new file mode 100755 index 0000000000..e69de29bb2 --- /dev/null +++ b/tests/topotests/pim_cand_rp_bsr/__init__.py diff --git a/tests/topotests/pim_cand_rp_bsr/r1/frr.conf b/tests/topotests/pim_cand_rp_bsr/r1/frr.conf new file mode 100644 index 0000000000..d0aa3d529f --- /dev/null +++ b/tests/topotests/pim_cand_rp_bsr/r1/frr.conf @@ -0,0 +1,25 @@ +! +hostname r1 +password zebra +log file /tmp/r1-frr.log +! +!debug pim packet +!debug pim bsm +! +ip route 0.0.0.0/0 10.0.0.4 +! +interface r1-eth0 + ip address 10.0.0.1/24 + ip igmp + ip pim +! +interface r1-eth1 + ip address 10.0.1.1/24 + ip igmp + ip pim +! +router pim + bsr candidate-bsr priority 200 source address 10.0.0.1 +! +ip forwarding +! diff --git a/tests/topotests/pim_cand_rp_bsr/r2/frr.conf b/tests/topotests/pim_cand_rp_bsr/r2/frr.conf new file mode 100644 index 0000000000..741c839f19 --- /dev/null +++ b/tests/topotests/pim_cand_rp_bsr/r2/frr.conf @@ -0,0 +1,22 @@ +! +hostname r2 +password zebra +log file /tmp/r2-frr.log +! +ip route 0.0.0.0/0 10.0.0.4 +! +interface r2-eth0 + ip address 10.0.0.2/24 + ip igmp + ip pim +! +interface r2-eth1 + ip address 10.0.2.2/24 + ip igmp + ip pim +! +router pim + bsr candidate-bsr priority 100 source address 10.0.0.2 +! +ip forwarding +! diff --git a/tests/topotests/pim_cand_rp_bsr/r3/frr.conf b/tests/topotests/pim_cand_rp_bsr/r3/frr.conf new file mode 100644 index 0000000000..bd5c8ce93f --- /dev/null +++ b/tests/topotests/pim_cand_rp_bsr/r3/frr.conf @@ -0,0 +1,32 @@ +! +hostname r3 +password zebra +log file /tmp/r3-frr.log +! +!debug pim packet +!debug pim bsm +! +ip route 0.0.0.0/0 10.0.3.4 +ip route 10.0.6.0/24 10.0.3.6 +! +interface r3-eth0 + ip address 10.0.1.3/24 + ip igmp + ip pim +! +interface r3-eth1 + ip address 10.0.3.3/24 + ip igmp + ip pim +! +interface r3-eth2 + ip address 10.0.4.3/24 + ip igmp + ip pim +! +router pim + bsr candidate-rp group 239.0.0.0/16 + bsr candidate-rp priority 10 source address 10.0.3.3 +! +ip forwarding +! diff --git a/tests/topotests/pim_cand_rp_bsr/r4/frr.conf b/tests/topotests/pim_cand_rp_bsr/r4/frr.conf new file mode 100644 index 0000000000..825b227728 --- /dev/null +++ b/tests/topotests/pim_cand_rp_bsr/r4/frr.conf @@ -0,0 +1,37 @@ +! +hostname r4 +password zebra +log file /tmp/r4-frr.log +! +ip route 10.0.1.0/24 10.0.0.1 +ip route 10.0.4.0/24 10.0.3.3 +ip route 10.0.6.0/24 10.0.3.6 +! +interface r4-eth0 + ip address 10.0.2.4/24 + ip igmp + ip pim +! +interface r4-eth1 + ip address 10.0.3.4/24 + ip igmp + ip pim +! +interface r4-eth2 + ip address 10.0.5.4/24 + ip igmp + ip pim +! +interface r4-eth3 + ip address 10.0.0.4/24 + ip igmp + ip pim +! +router pim + bsr candidate-rp group 239.0.0.0/24 + bsr candidate-rp group 239.0.0.0/16 + bsr candidate-rp group 239.0.0.0/8 + bsr candidate-rp priority 20 source address 10.0.3.4 +! +ip forwarding +! diff --git a/tests/topotests/pim_cand_rp_bsr/r5/frr.conf b/tests/topotests/pim_cand_rp_bsr/r5/frr.conf new file mode 100644 index 0000000000..c934717d08 --- /dev/null +++ b/tests/topotests/pim_cand_rp_bsr/r5/frr.conf @@ -0,0 +1,17 @@ +! +hostname r5 +password zebra +log file /tmp/r5-frr.log +! +ip route 0.0.0.0/0 10.0.4.3 +! +interface r5-eth0 + ip address 10.0.4.5/24 + ip igmp + ip pim +! +interface r5-eth1 + ip address 10.0.6.5/24 +! +ip forwarding +! diff --git a/tests/topotests/pim_cand_rp_bsr/r6/frr.conf b/tests/topotests/pim_cand_rp_bsr/r6/frr.conf new file mode 100644 index 0000000000..fd9d1eb5c4 --- /dev/null +++ b/tests/topotests/pim_cand_rp_bsr/r6/frr.conf @@ -0,0 +1,22 @@ +! +hostname r6 +password zebra +log file /tmp/r6-frr.log +! +ip route 0.0.0.0/0 10.0.6.6 +! +interface r6-eth0 + ip address 10.0.5.6/24 + ip igmp + ip pim +! +interface r6-eth1 + ip address 10.0.6.6/24 +! +interface r6-eth2 + ip address 10.0.3.6/24 + ip igmp + ip pim +! +ip forwarding +! diff --git a/tests/topotests/pim_cand_rp_bsr/test_pim_cand_rp_bsr.py b/tests/topotests/pim_cand_rp_bsr/test_pim_cand_rp_bsr.py new file mode 100644 index 0000000000..ce7bc9dc56 --- /dev/null +++ b/tests/topotests/pim_cand_rp_bsr/test_pim_cand_rp_bsr.py @@ -0,0 +1,324 @@ +#!/usr/bin/env python +# SPDX-License-Identifier: ISC + +# +# test_pim_cand_rp_bsr.py +# +# Copyright (c) 2024 ATCorp +# Jafar Al-Gharaibeh +# + +import os +import sys +import pytest +import json +from functools import partial + +# pylint: disable=C0413 +# Import topogen and topotest helpers +from lib import topotest +from lib.topogen import Topogen, get_topogen +from lib.topolog import logger +from lib.pim import verify_pim_rp_info +from lib.common_config import step, write_test_header, retry + +from time import sleep + +""" +test_pim_cand_rp_bsr.py: Test candidate RP/BSR functionality +""" + +TOPOLOGY = """ + Candidate RP/BSR functionality + + +---+---+ +---+---+ + | C-BSR | 10.0.0.0/24 | C-BSR | + + R1 + <--------+---------> + R2 | + |elected| .1 | .2 | | + +---+---+ | +---+---+ + .1 | | 10.0.2.0/24 | .2 + | 10.0.1.0/24 | | + .3 | +-----| .4 | .4 + +---+---+ |---->+---+---+ + | C-RP | 10.0.3.0/24 | C-RP | + + R3 + <--------+---------> + R4 | + | prio | .3 | .4 | | + +---+---+ | +---+---+ + .3 | | | .4 + |10.0.4.0/24 | 10.0.5.0/24| + .5 | | .6 | .6 + +---+---+ +---------->+---+---+ + | | | | + + R5 + <------------------> + R6 | + | | .5 .6 | | + +---+---+ 10.0.6.0/24 +---+---+ +""" + +# Save the Current Working Directory to find configuration files. +CWD = os.path.dirname(os.path.realpath(__file__)) +sys.path.append(os.path.join(CWD, "../")) + +# Required to instantiate the topology builder class. +pytestmark = [pytest.mark.pimd] + + +def build_topo(tgen): + "Build function" + + # Create 6 routers + for rn in range(1, 7): + tgen.add_router("r{}".format(rn)) + + # Create 7 switches and connect routers + sw1 = tgen.add_switch("s1") + sw1.add_link(tgen.gears["r1"]) + sw1.add_link(tgen.gears["r2"]) + + sw = tgen.add_switch("s2") + sw.add_link(tgen.gears["r1"]) + sw.add_link(tgen.gears["r3"]) + + sw = tgen.add_switch("s3") + sw.add_link(tgen.gears["r2"]) + sw.add_link(tgen.gears["r4"]) + + sw3 = tgen.add_switch("s4") + sw3.add_link(tgen.gears["r3"]) + sw3.add_link(tgen.gears["r4"]) + + sw = tgen.add_switch("s5") + sw.add_link(tgen.gears["r3"]) + sw.add_link(tgen.gears["r5"]) + + sw = tgen.add_switch("s6") + sw.add_link(tgen.gears["r4"]) + sw.add_link(tgen.gears["r6"]) + + sw = tgen.add_switch("s7") + sw.add_link(tgen.gears["r5"]) + sw.add_link(tgen.gears["r6"]) + + # make the diagnoal connections + sw1.add_link(tgen.gears["r4"]) + sw3.add_link(tgen.gears["r6"]) + +def setup_module(mod): + logger.info("PIM Candidate RP/BSR:\n {}".format(TOPOLOGY)) + + tgen = Topogen(build_topo, mod.__name__) + tgen.start_topology() + + router_list = tgen.routers() + for rname, router in router_list.items(): + logger.info("Loading router %s" % rname) + router.load_frr_config(os.path.join(CWD, "{}/frr.conf".format(rname))) + + # Initialize all routers. + tgen.start_router() + for router in router_list.values(): + if router.has_version("<", "4.0"): + tgen.set_error("unsupported version") + + +def teardown_module(mod): + "Teardown the pytest environment" + tgen = get_topogen() + tgen.stop_topology() + +def test_pim_bsr_election_r1(request): + "Test PIM BSR Election" + tgen = get_topogen() + tc_name = request.node.name + write_test_header(tc_name) + + if tgen.routers_have_failure(): + pytest.skip("skipped because of router(s) failure") + + r2 = tgen.gears["r2"] + # r1 should be the BSR winner because it has higher priority + expected = { + "bsr":"10.0.0.1", + "priority":200, + "state":"ACCEPT_PREFERRED", + } + + test_func = partial( + topotest.router_json_cmp, r2, "show ip pim bsr json", expected + ) + _, result = topotest.run_and_expect(test_func, None, count=60, wait=1) + + assertmsg = "r2: r1 was not elected, bsr election mismatch" + assert result is None, assertmsg + +def test_pim_bsr_cand_bsr_r1(request): + "Test PIM BSR candidate BSR" + tgen = get_topogen() + tc_name = request.node.name + write_test_header(tc_name) + + if tgen.routers_have_failure(): + pytest.skip("skipped because of router(s) failure") + + r2 = tgen.gears["r2"] + + # r2 is a candidate bsr with low priority: elected = False + expected = { + "address": "10.0.0.2", + "priority": 100, + "elected": False + } + test_func = partial( + topotest.router_json_cmp, r2, "show ip pim bsr candidate-bsr json", expected + ) + _, result = topotest.run_and_expect(test_func, None, count=60, wait=1) + + assertmsg = "r2: candidate bsr mismatch " + assert result is None, assertmsg + +def test_pim_bsr_cand_rp(request): + "Test PIM BSR candidate RP" + tgen = get_topogen() + tc_name = request.node.name + write_test_header(tc_name) + + if tgen.routers_have_failure(): + pytest.skip("skipped because of router(s) failure") + + r3 = tgen.gears["r3"] + + # r3 is a candidate rp + expected = { + "address":"10.0.3.3", + "priority":10 + } + test_func = partial( + topotest.router_json_cmp, r3, "show ip pim bsr candidate-rp json", expected + ) + _, result = topotest.run_and_expect(test_func, None, count=60, wait=1) + + assertmsg = "r3: bsr candidate rp mismatch" + assert result is None, assertmsg + + +def test_pim_bsr_rp_info(request): + "Test RP info state" + tgen = get_topogen() + tc_name = request.node.name + write_test_header(tc_name) + + if tgen.routers_have_failure(): + pytest.skip("skipped because of router(s) failure") + + # At this point, all nodes, including r5 should have synced the RP state + step("Verify rp-info on r5 from BSR") + result = verify_pim_rp_info(tgen, None, "r5", "239.0.0.0/16", None, "10.0.3.3", + "BSR", False, "ipv4", True, retry_timeout = 90) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + result = verify_pim_rp_info(tgen, None, "r5", "239.0.0.0/8", None, "10.0.3.4", + "BSR", False, "ipv4", True, retry_timeout = 30) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + result = verify_pim_rp_info(tgen, None, "r5", "239.0.0.0/24", None, "10.0.3.4", + "BSR", False, "ipv4", True, retry_timeout = 30) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("Verify rp-info on the BSR node itself r1") + result = verify_pim_rp_info(tgen, None, "r1", "239.0.0.0/16", None, "10.0.3.3", + "BSR", False, "ipv4", True, retry_timeout = 10) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + result = verify_pim_rp_info(tgen, None, "r1", "239.0.0.0/8", None, "10.0.3.4", + "BSR", False, "ipv4", True, retry_timeout = 10) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + result = verify_pim_rp_info(tgen, None, "r1", "239.0.0.0/24", None, "10.0.3.4", + "BSR", False, "ipv4", True, retry_timeout = 10) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + +def test_pim_bsr_election_fallback_r2(request): + "Test PIM BSR Election Backup" + tgen = get_topogen() + tc_name = request.node.name + write_test_header(tc_name) + + if tgen.routers_have_failure(): + pytest.skip("skipped because of router(s) failure") + + step("Take r1 out from BSR candidates") + r1 = tgen.gears["r1"] + r1.vtysh_cmd( + """ + configure + router pim + no bsr candidate-bsr priority 200 source address 10.0.0.1 + """) + + step("Verify r1 is no longer a BSR candidate") + expected = {} + + test_func = partial( + topotest.router_json_cmp, r1, "show ip pim bsr candidate-bsr json", expected + ) + _, result = topotest.run_and_expect(test_func, None, count=10, wait=1) + + assertmsg = "r1: failed to remove bsr candidate configuration" + assert result is None, assertmsg + + r2 = tgen.gears["r2"] + # We should fall back to r2 as the BSR + expected = { + "bsr":"10.0.0.2", + "priority":100, + "state":"BSR_ELECTED", + } + + step("Verify that we fallback to r2 as the new BSR") + + test_func = partial( + topotest.router_json_cmp, r2, "show ip pim bsr json", expected + ) + _, result = topotest.run_and_expect(test_func, None, count=180, wait=1) + + assertmsg = "r2: failed to fallback to r2 as a BSR" + assert result is None, assertmsg + + +def test_pim_bsr_rp_info_fallback(request): + "Test RP info state on r5" + tgen = get_topogen() + tc_name = request.node.name + write_test_header(tc_name) + + if tgen.routers_have_failure(): + pytest.skip("skipped because of router(s) failure") + + step("Take r3 out from RP candidates for group 239.0.0.0/16") + r3 = tgen.gears["r3"] + r3.vtysh_cmd( + """ + configure + router pim + no bsr candidate-rp group 239.0.0.0/16 + """) + + step("Verify falling back to r4 as the new RP for 239.0.0.0/16") + + result = verify_pim_rp_info(tgen, None, "r5", "239.0.0.0/16", None, "10.0.3.4", + "BSR", False, "ipv4", True, retry_timeout = 30) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + +def test_memory_leak(): + "Run the memory leak test and report results." + tgen = get_topogen() + if not tgen.is_memleak_enabled(): + pytest.skip("Memory leak test/report is disabled") + + tgen.report_memory_leaks() + + +if __name__ == "__main__": + args = ["-s"] + sys.argv[1:] + sys.exit(pytest.main(args)) diff --git a/tests/topotests/pytest.ini b/tests/topotests/pytest.ini index db806fed39..b234a84252 100644 --- a/tests/topotests/pytest.ini +++ b/tests/topotests/pytest.ini @@ -1,6 +1,9 @@ # Skip pytests example directory [pytest] +asyncio_mode = auto +asyncio_default_fixture_loop_scope = module + # NEEDS_EXABGP_4_2_11_FRR # asyncio_mode = auto diff --git a/tests/topotests/srv6_encap_src_addr/r1/zebra.conf b/tests/topotests/srv6_encap_src_addr/r1/zebra.conf index c570756b52..c245dd2d96 100644 --- a/tests/topotests/srv6_encap_src_addr/r1/zebra.conf +++ b/tests/topotests/srv6_encap_src_addr/r1/zebra.conf @@ -4,7 +4,6 @@ hostname r1 ! debug zebra rib detailed ! log stdout notifications -log monitor notifications log commands log file zebra.log debugging ! diff --git a/tests/topotests/srv6_sid_manager/ce1/bgpd.conf b/tests/topotests/srv6_sid_manager/ce1/bgpd.conf new file mode 100644 index 0000000000..3459796629 --- /dev/null +++ b/tests/topotests/srv6_sid_manager/ce1/bgpd.conf @@ -0,0 +1,8 @@ +frr defaults traditional +! +hostname ce1 +password zebra +! +log stdout notifications +log commands +log file bgpd.log diff --git a/tests/topotests/srv6_sid_manager/ce1/ipv6_rib.json b/tests/topotests/srv6_sid_manager/ce1/ipv6_rib.json new file mode 100644 index 0000000000..a35e2b1b3d --- /dev/null +++ b/tests/topotests/srv6_sid_manager/ce1/ipv6_rib.json @@ -0,0 +1,58 @@ +{ + "::/0": [ + { + "prefix": "::/0", + "protocol": "static", + "vrfId": 0, + "vrfName": "default", + "selected": true, + "destSelected": true, + "distance": 1, + "metric": 0, + "installed": true, + "table": 254, + "internalStatus": 16, + "internalFlags": 73, + "internalNextHopNum": 1, + "internalNextHopActiveNum": 1, + "nexthops": [ + { + "flags": 3, + "fib": true, + "ip": "2001:1::1", + "afi": "ipv6", + "interfaceName": "eth-rt1", + "active": true, + "weight": 1 + } + ] + } + ], + "2001:1::/64": [ + { + "prefix": "2001:1::/64", + "protocol": "connected", + "vrfId": 0, + "vrfName": "default", + "selected": true, + "destSelected": true, + "distance": 0, + "metric": 0, + "installed": true, + "table": 254, + "internalStatus": 16, + "internalFlags": 8, + "internalNextHopNum": 1, + "internalNextHopActiveNum": 1, + "nexthops": [ + { + "flags": 3, + "fib": true, + "directlyConnected": true, + "interfaceName": "eth-rt1", + "active": true + } + ] + } + ] +} diff --git a/tests/topotests/srv6_sid_manager/ce1/zebra.conf b/tests/topotests/srv6_sid_manager/ce1/zebra.conf new file mode 100644 index 0000000000..0dea0c5751 --- /dev/null +++ b/tests/topotests/srv6_sid_manager/ce1/zebra.conf @@ -0,0 +1,14 @@ +log file zebra.log +! +hostname ce1 +! +interface eth-rt1 + ipv6 address 2001:1::2/64 +! +ip forwarding +ipv6 forwarding +! +ipv6 route ::/0 2001:1::1 +! +line vty +! diff --git a/tests/topotests/srv6_sid_manager/ce2/bgpd.conf b/tests/topotests/srv6_sid_manager/ce2/bgpd.conf new file mode 100644 index 0000000000..8ed9978749 --- /dev/null +++ b/tests/topotests/srv6_sid_manager/ce2/bgpd.conf @@ -0,0 +1,8 @@ +frr defaults traditional +! +hostname ce2 +password zebra +! +log stdout notifications +log commands +log file bgpd.log diff --git a/tests/topotests/srv6_sid_manager/ce2/ipv6_rib.json b/tests/topotests/srv6_sid_manager/ce2/ipv6_rib.json new file mode 100644 index 0000000000..b4594d1c57 --- /dev/null +++ b/tests/topotests/srv6_sid_manager/ce2/ipv6_rib.json @@ -0,0 +1,58 @@ +{ + "::/0": [ + { + "prefix": "::/0", + "protocol": "static", + "vrfId": 0, + "vrfName": "default", + "selected": true, + "destSelected": true, + "distance": 1, + "metric": 0, + "installed": true, + "table": 254, + "internalStatus": 16, + "internalFlags": 73, + "internalNextHopNum": 1, + "internalNextHopActiveNum": 1, + "nexthops": [ + { + "flags": 3, + "fib": true, + "ip": "2001:2::1", + "afi": "ipv6", + "interfaceName": "eth-rt6", + "active": true, + "weight": 1 + } + ] + } + ], + "2001:2::/64": [ + { + "prefix": "2001:2::/64", + "protocol": "connected", + "vrfId": 0, + "vrfName": "default", + "selected": true, + "destSelected": true, + "distance": 0, + "metric": 0, + "installed": true, + "table": 254, + "internalStatus": 16, + "internalFlags": 8, + "internalNextHopNum": 1, + "internalNextHopActiveNum": 1, + "nexthops": [ + { + "flags": 3, + "fib": true, + "directlyConnected": true, + "interfaceName": "eth-rt6", + "active": true + } + ] + } + ] +} diff --git a/tests/topotests/srv6_sid_manager/ce2/zebra.conf b/tests/topotests/srv6_sid_manager/ce2/zebra.conf new file mode 100644 index 0000000000..c4755f8485 --- /dev/null +++ b/tests/topotests/srv6_sid_manager/ce2/zebra.conf @@ -0,0 +1,14 @@ +log file zebra.log +! +hostname ce2 +! +interface eth-rt6 + ipv6 address 2001:2::2/64 +! +ip forwarding +ipv6 forwarding +! +ipv6 route ::/0 2001:2::1 +! +line vty +! diff --git a/tests/topotests/srv6_sid_manager/ce3/bgpd.conf b/tests/topotests/srv6_sid_manager/ce3/bgpd.conf new file mode 100644 index 0000000000..a85d9701c7 --- /dev/null +++ b/tests/topotests/srv6_sid_manager/ce3/bgpd.conf @@ -0,0 +1,8 @@ +frr defaults traditional +! +hostname ce3 +password zebra +! +log stdout notifications +log commands +log file bgpd.log diff --git a/tests/topotests/srv6_sid_manager/ce3/ipv6_rib.json b/tests/topotests/srv6_sid_manager/ce3/ipv6_rib.json new file mode 100644 index 0000000000..3a50cb0199 --- /dev/null +++ b/tests/topotests/srv6_sid_manager/ce3/ipv6_rib.json @@ -0,0 +1,58 @@ +{ + "::/0": [ + { + "prefix": "::/0", + "protocol": "static", + "vrfId": 0, + "vrfName": "default", + "selected": true, + "destSelected": true, + "distance": 1, + "metric": 0, + "installed": true, + "table": 254, + "internalStatus": 16, + "internalFlags": 73, + "internalNextHopNum": 1, + "internalNextHopActiveNum": 1, + "nexthops": [ + { + "flags": 3, + "fib": true, + "ip": "2001:3::1", + "afi": "ipv6", + "interfaceName": "eth-rt1", + "active": true, + "weight": 1 + } + ] + } + ], + "2001:3::/64": [ + { + "prefix": "2001:3::/64", + "protocol": "connected", + "vrfId": 0, + "vrfName": "default", + "selected": true, + "destSelected": true, + "distance": 0, + "metric": 0, + "installed": true, + "table": 254, + "internalStatus": 16, + "internalFlags": 8, + "internalNextHopNum": 1, + "internalNextHopActiveNum": 1, + "nexthops": [ + { + "flags": 3, + "fib": true, + "directlyConnected": true, + "interfaceName": "eth-rt1", + "active": true + } + ] + } + ] +} diff --git a/tests/topotests/srv6_sid_manager/ce3/zebra.conf b/tests/topotests/srv6_sid_manager/ce3/zebra.conf new file mode 100644 index 0000000000..046bcb6af8 --- /dev/null +++ b/tests/topotests/srv6_sid_manager/ce3/zebra.conf @@ -0,0 +1,14 @@ +log file zebra.log +! +hostname ce3 +! +interface eth-rt1 + ipv6 address 2001:3::2/64 +! +ip forwarding +ipv6 forwarding +! +ipv6 route ::/0 2001:3::1 +! +line vty +! diff --git a/tests/topotests/srv6_sid_manager/ce4/bgpd.conf b/tests/topotests/srv6_sid_manager/ce4/bgpd.conf new file mode 100644 index 0000000000..93fb32fd1b --- /dev/null +++ b/tests/topotests/srv6_sid_manager/ce4/bgpd.conf @@ -0,0 +1,8 @@ +frr defaults traditional +! +hostname ce4 +password zebra +! +log stdout notifications +log commands +log file bgpd.log diff --git a/tests/topotests/srv6_sid_manager/ce4/ipv6_rib.json b/tests/topotests/srv6_sid_manager/ce4/ipv6_rib.json new file mode 100644 index 0000000000..f6484355ba --- /dev/null +++ b/tests/topotests/srv6_sid_manager/ce4/ipv6_rib.json @@ -0,0 +1,58 @@ +{ + "::/0": [ + { + "prefix": "::/0", + "protocol": "static", + "vrfId": 0, + "vrfName": "default", + "selected": true, + "destSelected": true, + "distance": 1, + "metric": 0, + "installed": true, + "table": 254, + "internalStatus": 16, + "internalFlags": 73, + "internalNextHopNum": 1, + "internalNextHopActiveNum": 1, + "nexthops": [ + { + "flags": 3, + "fib": true, + "ip": "2001:4::1", + "afi": "ipv6", + "interfaceName": "eth-rt6", + "active": true, + "weight": 1 + } + ] + } + ], + "2001:4::/64": [ + { + "prefix": "2001:4::/64", + "protocol": "connected", + "vrfId": 0, + "vrfName": "default", + "selected": true, + "destSelected": true, + "distance": 0, + "metric": 0, + "installed": true, + "table": 254, + "internalStatus": 16, + "internalFlags": 8, + "internalNextHopNum": 1, + "internalNextHopActiveNum": 1, + "nexthops": [ + { + "flags": 3, + "fib": true, + "directlyConnected": true, + "interfaceName": "eth-rt6", + "active": true + } + ] + } + ] +} diff --git a/tests/topotests/srv6_sid_manager/ce4/zebra.conf b/tests/topotests/srv6_sid_manager/ce4/zebra.conf new file mode 100644 index 0000000000..7913d6f397 --- /dev/null +++ b/tests/topotests/srv6_sid_manager/ce4/zebra.conf @@ -0,0 +1,14 @@ +log file zebra.log +! +hostname ce4 +! +interface eth-rt6 + ipv6 address 2001:4::2/64 +! +ip forwarding +ipv6 forwarding +! +ipv6 route ::/0 2001:4::1 +! +line vty +! diff --git a/tests/topotests/srv6_sid_manager/ce5/bgpd.conf b/tests/topotests/srv6_sid_manager/ce5/bgpd.conf new file mode 100644 index 0000000000..2ab6f2d2a7 --- /dev/null +++ b/tests/topotests/srv6_sid_manager/ce5/bgpd.conf @@ -0,0 +1,8 @@ +frr defaults traditional +! +hostname ce5 +password zebra +! +log stdout notifications +log commands +log file bgpd.log diff --git a/tests/topotests/srv6_sid_manager/ce5/ipv6_rib.json b/tests/topotests/srv6_sid_manager/ce5/ipv6_rib.json new file mode 100644 index 0000000000..a88df73c5a --- /dev/null +++ b/tests/topotests/srv6_sid_manager/ce5/ipv6_rib.json @@ -0,0 +1,58 @@ +{ + "::/0": [ + { + "prefix": "::/0", + "protocol": "static", + "vrfId": 0, + "vrfName": "default", + "selected": true, + "destSelected": true, + "distance": 1, + "metric": 0, + "installed": true, + "table": 254, + "internalStatus": 16, + "internalFlags": 73, + "internalNextHopNum": 1, + "internalNextHopActiveNum": 1, + "nexthops": [ + { + "flags": 3, + "fib": true, + "ip": "2001:5::1", + "afi": "ipv6", + "interfaceName": "eth-rt1", + "active": true, + "weight": 1 + } + ] + } + ], + "2001:5::/64": [ + { + "prefix": "2001:5::/64", + "protocol": "connected", + "vrfId": 0, + "vrfName": "default", + "selected": true, + "destSelected": true, + "distance": 0, + "metric": 0, + "installed": true, + "table": 254, + "internalStatus": 16, + "internalFlags": 8, + "internalNextHopNum": 1, + "internalNextHopActiveNum": 1, + "nexthops": [ + { + "flags": 3, + "fib": true, + "directlyConnected": true, + "interfaceName": "eth-rt1", + "active": true + } + ] + } + ] +} diff --git a/tests/topotests/srv6_sid_manager/ce5/zebra.conf b/tests/topotests/srv6_sid_manager/ce5/zebra.conf new file mode 100644 index 0000000000..21414ffcbc --- /dev/null +++ b/tests/topotests/srv6_sid_manager/ce5/zebra.conf @@ -0,0 +1,14 @@ +log file zebra.log +! +hostname ce5 +! +interface eth-rt1 + ipv6 address 2001:5::2/64 +! +ip forwarding +ipv6 forwarding +! +ipv6 route ::/0 2001:5::1 +! +line vty +! diff --git a/tests/topotests/srv6_sid_manager/ce6/bgpd.conf b/tests/topotests/srv6_sid_manager/ce6/bgpd.conf new file mode 100644 index 0000000000..e0b6540514 --- /dev/null +++ b/tests/topotests/srv6_sid_manager/ce6/bgpd.conf @@ -0,0 +1,8 @@ +frr defaults traditional +! +hostname ce6 +password zebra +! +log stdout notifications +log commands +log file bgpd.log diff --git a/tests/topotests/srv6_sid_manager/ce6/ipv6_rib.json b/tests/topotests/srv6_sid_manager/ce6/ipv6_rib.json new file mode 100644 index 0000000000..ab6dfc9e13 --- /dev/null +++ b/tests/topotests/srv6_sid_manager/ce6/ipv6_rib.json @@ -0,0 +1,58 @@ +{ + "::/0": [ + { + "prefix": "::/0", + "protocol": "static", + "vrfId": 0, + "vrfName": "default", + "selected": true, + "destSelected": true, + "distance": 1, + "metric": 0, + "installed": true, + "table": 254, + "internalStatus": 16, + "internalFlags": 73, + "internalNextHopNum": 1, + "internalNextHopActiveNum": 1, + "nexthops": [ + { + "flags": 3, + "fib": true, + "ip": "2001:6::1", + "afi": "ipv6", + "interfaceName": "eth-rt6", + "active": true, + "weight": 1 + } + ] + } + ], + "2001:6::/64": [ + { + "prefix": "2001:6::/64", + "protocol": "connected", + "vrfId": 0, + "vrfName": "default", + "selected": true, + "destSelected": true, + "distance": 0, + "metric": 0, + "installed": true, + "table": 254, + "internalStatus": 16, + "internalFlags": 8, + "internalNextHopNum": 1, + "internalNextHopActiveNum": 1, + "nexthops": [ + { + "flags": 3, + "fib": true, + "directlyConnected": true, + "interfaceName": "eth-rt6", + "active": true + } + ] + } + ] +} diff --git a/tests/topotests/srv6_sid_manager/ce6/zebra.conf b/tests/topotests/srv6_sid_manager/ce6/zebra.conf new file mode 100644 index 0000000000..ebe8556092 --- /dev/null +++ b/tests/topotests/srv6_sid_manager/ce6/zebra.conf @@ -0,0 +1,14 @@ +log file zebra.log +! +hostname ce6 +! +interface eth-rt6 + ipv6 address 2001:6::2/64 +! +ip forwarding +ipv6 forwarding +! +ipv6 route ::/0 2001:6::1 +! +line vty +! diff --git a/tests/topotests/srv6_sid_manager/dst/sharpd.conf b/tests/topotests/srv6_sid_manager/dst/sharpd.conf new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/tests/topotests/srv6_sid_manager/dst/sharpd.conf diff --git a/tests/topotests/srv6_sid_manager/dst/zebra.conf b/tests/topotests/srv6_sid_manager/dst/zebra.conf new file mode 100644 index 0000000000..80741856cb --- /dev/null +++ b/tests/topotests/srv6_sid_manager/dst/zebra.conf @@ -0,0 +1,22 @@ +log file zebra.log +! +hostname dst +! +! debug zebra kernel +! debug zebra packet +! debug zebra mpls +! +interface lo + ip address 9.9.9.2/32 + ipv6 address fc00:0:9::1/128 +! +interface eth-rt6 + ip address 10.0.10.2/24 + ipv6 address 2001:db8:10::2/64 +! +ip forwarding +! +ip route 2001:db8:1::1 2001:db8:10::1 +! +line vty +! diff --git a/tests/topotests/srv6_sid_manager/rt1/bgpd.conf b/tests/topotests/srv6_sid_manager/rt1/bgpd.conf new file mode 100644 index 0000000000..20c396afde --- /dev/null +++ b/tests/topotests/srv6_sid_manager/rt1/bgpd.conf @@ -0,0 +1,66 @@ +frr defaults traditional +! +bgp send-extra-data zebra +! +hostname rt1 +password zebra +! +log stdout notifications +log commands +! +!debug bgp neighbor-events +!debug bgp zebra +!debug bgp vnc verbose +!debug bgp update-groups +!debug bgp updates in +!debug bgp updates out +!debug bgp vpn label +!debug bgp vpn leak-from-vrf +!debug bgp vpn leak-to-vrf +!debug bgp vpn rmap-event +! +router bgp 1 + bgp router-id 1.1.1.1 + no bgp ebgp-requires-policy + no bgp default ipv4-unicast + neighbor fc00:0:6::1 remote-as 6 + neighbor fc00:0:6::1 timers 3 10 + neighbor fc00:0:6::1 timers connect 1 + neighbor fc00:0:6::1 ttl-security hops 20 + ! + address-family ipv6 vpn + neighbor fc00:0:6::1 activate + exit-address-family + ! + segment-routing srv6 + locator loc1 + ! +! +router bgp 1 vrf vrf10 + bgp router-id 1.1.1.1 + no bgp ebgp-requires-policy + no bgp default ipv4-unicast + ! + address-family ipv6 unicast + sid vpn export 65024 + rd vpn export 1:10 + rt vpn both 99:99 + import vpn + export vpn + redistribute connected + exit-address-family +! +router bgp 1 vrf vrf20 + bgp router-id 1.1.1.1 + no bgp ebgp-requires-policy + no bgp default ipv4-unicast + ! + address-family ipv6 unicast + sid vpn export 65025 + rd vpn export 1:20 + rt vpn both 88:88 + import vpn + export vpn + redistribute connected + exit-address-family +! diff --git a/tests/topotests/srv6_sid_manager/rt1/isisd.conf b/tests/topotests/srv6_sid_manager/rt1/isisd.conf new file mode 100644 index 0000000000..29e1a31171 --- /dev/null +++ b/tests/topotests/srv6_sid_manager/rt1/isisd.conf @@ -0,0 +1,35 @@ +password 1 +hostname rt1 +log file isisd.log +! +! debug isis events +! debug isis route-events +! debug isis spf-events +! debug isis sr-events +! debug isis lsp-gen +! +interface lo + ip router isis 1 + ipv6 router isis 1 + isis passive +! +interface eth-sw1 + ip router isis 1 + ipv6 router isis 1 + isis hello-interval 1 + isis hello-multiplier 10 +! +router isis 1 + lsp-gen-interval 2 + net 49.0000.0000.0000.0001.00 + is-type level-1 + topology ipv6-unicast + segment-routing srv6 + locator loc1 + node-msd + max-segs-left 3 + max-end-pop 3 + max-h-encaps 2 + max-end-d 5 + interface sr0 +! diff --git a/tests/topotests/srv6_sid_manager/rt1/sharpd.conf b/tests/topotests/srv6_sid_manager/rt1/sharpd.conf new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/tests/topotests/srv6_sid_manager/rt1/sharpd.conf diff --git a/tests/topotests/srv6_sid_manager/rt1/show_ip_route.ref b/tests/topotests/srv6_sid_manager/rt1/show_ip_route.ref new file mode 100644 index 0000000000..590d75afbf --- /dev/null +++ b/tests/topotests/srv6_sid_manager/rt1/show_ip_route.ref @@ -0,0 +1,276 @@ +{ + "2.2.2.2\/32":[ + { + "prefix":"2.2.2.2\/32", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":20, + "installed":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.1.2", + "afi":"ipv4", + "interfaceName":"eth-sw1", + "active":true + } + ] + } + ], + "3.3.3.3\/32":[ + { + "prefix":"3.3.3.3\/32", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":20, + "installed":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.1.3", + "afi":"ipv4", + "interfaceName":"eth-sw1", + "active":true + } + ] + } + ], + "4.4.4.4\/32":[ + { + "prefix":"4.4.4.4\/32", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":30, + "installed":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.1.2", + "afi":"ipv4", + "interfaceName":"eth-sw1", + "active":true + } + ] + } + ], + "5.5.5.5\/32":[ + { + "prefix":"5.5.5.5\/32", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":30, + "installed":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.1.3", + "afi":"ipv4", + "interfaceName":"eth-sw1", + "active":true + } + ] + } + ], + "6.6.6.6\/32":[ + { + "prefix":"6.6.6.6\/32", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":40, + "installed":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.1.2", + "afi":"ipv4", + "interfaceName":"eth-sw1", + "active":true + }, + { + "fib":true, + "ip":"10.0.1.3", + "afi":"ipv4", + "interfaceName":"eth-sw1", + "active":true + } + ] + } + ], + "10.0.1.0\/24":[ + { + "prefix":"10.0.1.0\/24", + "protocol":"isis", + "distance":115, + "metric":20, + "nexthops":[ + { + "ip":"10.0.1.2", + "afi":"ipv4", + "interfaceName":"eth-sw1" + }, + { + "ip":"10.0.1.3", + "afi":"ipv4", + "interfaceName":"eth-sw1" + } + ] + } + ], + "10.0.2.0\/24":[ + { + "prefix":"10.0.2.0\/24", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":20, + "installed":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.1.2", + "afi":"ipv4", + "interfaceName":"eth-sw1", + "active":true + } + ] + } + ], + "10.0.3.0\/24":[ + { + "prefix":"10.0.3.0\/24", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":20, + "installed":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.1.2", + "afi":"ipv4", + "interfaceName":"eth-sw1", + "active":true + } + ] + } + ], + "10.0.4.0\/24":[ + { + "prefix":"10.0.4.0\/24", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":20, + "installed":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.1.3", + "afi":"ipv4", + "interfaceName":"eth-sw1", + "active":true + } + ] + } + ], + "10.0.5.0\/24":[ + { + "prefix":"10.0.5.0\/24", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":20, + "installed":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.1.3", + "afi":"ipv4", + "interfaceName":"eth-sw1", + "active":true + } + ] + } + ], + "10.0.6.0\/24":[ + { + "prefix":"10.0.6.0\/24", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":30, + "installed":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.1.2", + "afi":"ipv4", + "interfaceName":"eth-sw1", + "active":true + }, + { + "fib":true, + "ip":"10.0.1.3", + "afi":"ipv4", + "interfaceName":"eth-sw1", + "active":true + } + ] + } + ], + "10.0.7.0\/24":[ + { + "prefix":"10.0.7.0\/24", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":30, + "installed":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.1.2", + "afi":"ipv4", + "interfaceName":"eth-sw1", + "active":true + } + ] + } + ], + "10.0.8.0\/24":[ + { + "prefix":"10.0.8.0\/24", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":30, + "installed":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.1.3", + "afi":"ipv4", + "interfaceName":"eth-sw1", + "active":true + } + ] + } + ] +} diff --git a/tests/topotests/srv6_sid_manager/rt1/show_ipv6_route.ref b/tests/topotests/srv6_sid_manager/rt1/show_ipv6_route.ref new file mode 100644 index 0000000000..cdbec3f2c1 --- /dev/null +++ b/tests/topotests/srv6_sid_manager/rt1/show_ipv6_route.ref @@ -0,0 +1,314 @@ +{ + "fc00:0:2::1\/128":[ + { + "prefix":"fc00:0:2::1\/128", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":20, + "installed":true, + "nexthops":[ + { + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-sw1", + "active":true + } + ] + } + ], + "fc00:0:3::1\/128":[ + { + "prefix":"fc00:0:3::1\/128", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":20, + "installed":true, + "nexthops":[ + { + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-sw1", + "active":true + } + ] + } + ], + "fc00:0:4::1\/128":[ + { + "prefix":"fc00:0:4::1\/128", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":30, + "installed":true, + "nexthops":[ + { + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-sw1", + "active":true + } + ] + } + ], + "fc00:0:5::1\/128":[ + { + "prefix":"fc00:0:5::1\/128", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":30, + "installed":true, + "nexthops":[ + { + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-sw1", + "active":true + } + ] + } + ], + "fc00:0:6::1\/128":[ + { + "prefix":"fc00:0:6::1\/128", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":40, + "installed":true, + "nexthops":[ + { + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-sw1", + "active":true + }, + { + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-sw1", + "active":true + } + ] + } + ], + "fc00:0:2::\/48":[ + { + "prefix":"fc00:0:2::\/48", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":10, + "installed":true, + "nexthops":[ + { + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-sw1", + "active":true + } + ] + } + ], + "fc00:0:3::\/48":[ + { + "prefix":"fc00:0:3::\/48", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":10, + "installed":true, + "nexthops":[ + { + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-sw1", + "active":true + } + ] + } + ], + "fc00:0:4::\/48":[ + { + "prefix":"fc00:0:4::\/48", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":20, + "installed":true, + "nexthops":[ + { + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-sw1", + "active":true + } + ] + } + ], + "fc00:0:5::\/48":[ + { + "prefix":"fc00:0:5::\/48", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":20, + "installed":true, + "nexthops":[ + { + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-sw1", + "active":true + } + ] + } + ], + "fc00:0:6::\/48":[ + { + "prefix":"fc00:0:6::\/48", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":30, + "installed":true, + "nexthops":[ + { + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-sw1", + "active":true + }, + { + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-sw1", + "active":true + } + ] + } + ], + "fc00:0:1::\/48":[ + { + "prefix":"fc00:0:1::\/48", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":0, + "installed":true, + "nexthops":[ + { + "fib":true, + "directlyConnected":true, + "interfaceName":"sr0", + "active":true, + "seg6local":{ + "action":"End" + } + } + ] + } + ], + "fc00:0:1:e000::\/64":[ + { + "prefix":"fc00:0:1:e000::\/64", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":0, + "installed":true, + "nexthops":[ + { + "fib":true, + "directlyConnected":true, + "interfaceName":"eth-sw1", + "active":true, + "seg6local":{ + "action":"End.X" + } + } + ] + } + ], + "fc00:0:1:e001::\/64":[ + { + "prefix":"fc00:0:1:e001::\/64", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":0, + "installed":true, + "nexthops":[ + { + "fib":true, + "directlyConnected":true, + "interfaceName":"eth-sw1", + "active":true, + "seg6local":{ + "action":"End.X" + } + } + ] + } + ], + "fc00:0:1:fe00::\/128":[ + { + "prefix":"fc00:0:1:fe00::\/128", + "protocol":"bgp", + "selected":true, + "destSelected":true, + "distance":20, + "metric":0, + "installed":true, + "nexthops":[ + { + "fib":true, + "directlyConnected":true, + "interfaceName":"vrf10", + "active":true, + "seg6local":{ + "action":"End.DT6" + } + } + ] + } + ], + "fc00:0:1:fe01::\/128":[ + { + "prefix":"fc00:0:1:fe01::\/128", + "protocol":"bgp", + "selected":true, + "destSelected":true, + "distance":20, + "metric":0, + "installed":true, + "nexthops":[ + { + "fib":true, + "directlyConnected":true, + "interfaceName":"vrf20", + "active":true, + "seg6local":{ + "action":"End.DT6" + } + } + ] + } + ] +} diff --git a/tests/topotests/srv6_sid_manager/rt1/show_srv6_locator_table.ref b/tests/topotests/srv6_sid_manager/rt1/show_srv6_locator_table.ref new file mode 100644 index 0000000000..c4a5d7507b --- /dev/null +++ b/tests/topotests/srv6_sid_manager/rt1/show_srv6_locator_table.ref @@ -0,0 +1,15 @@ +{ + "locators":[ + { + "name":"loc1", + "prefix":"fc00:0:1::/48", + "blockBitsLength":32, + "nodeBitsLength":16, + "functionBitsLength":16, + "argumentBitsLength":0, + "statusUp":true, + "chunks":[ + ] + } + ] +} diff --git a/tests/topotests/srv6_sid_manager/rt1/show_yang_interface_isis_adjacencies.ref b/tests/topotests/srv6_sid_manager/rt1/show_yang_interface_isis_adjacencies.ref new file mode 100644 index 0000000000..9c5901b90f --- /dev/null +++ b/tests/topotests/srv6_sid_manager/rt1/show_yang_interface_isis_adjacencies.ref @@ -0,0 +1,32 @@ +{ + "frr-interface:lib": { + "interface": [ + { + "name": "eth-sw1", + "vrf": "default", + "state": { + "frr-isisd:isis": { + "adjacencies": { + "adjacency": [ + { + "neighbor-sys-type": "level-1", + "neighbor-sysid": "0000.0000.0003", + "hold-timer": 10, + "neighbor-priority": 64, + "state": "up" + }, + { + "neighbor-sys-type": "level-1", + "neighbor-sysid": "0000.0000.0002", + "hold-timer": 10, + "neighbor-priority": 64, + "state": "up" + } + ] + } + } + } + } + ] + } +} diff --git a/tests/topotests/srv6_sid_manager/rt1/vpnv6_rib.ref b/tests/topotests/srv6_sid_manager/rt1/vpnv6_rib.ref new file mode 100644 index 0000000000..52bc589a7f --- /dev/null +++ b/tests/topotests/srv6_sid_manager/rt1/vpnv6_rib.ref @@ -0,0 +1,169 @@ +{ + "vrfId": 0, + "vrfName": "default", + "routerId": "1.1.1.1", + "defaultLocPrf": 100, + "localAS": 1, + "routes": { + "routeDistinguishers": { + "1:10": { + "2001:1::/64": [ + { + "valid": true, + "bestpath": true, + "selectionReason": "First path received", + "pathFrom": "external", + "prefix": "2001:1::", + "prefixLen": 64, + "network": "2001:1::/64", + "metric": 0, + "weight": 32768, + "peerId": "(unspec)", + "path": "", + "origin": "incomplete", + "announceNexthopSelf": true, + "nhVrfName": "vrf10", + "nexthops": [ + { + "ip": "::", + "hostname": "rt1", + "afi": "ipv6", + "used": true + } + ] + } + ], + "2001:3::/64": [ + { + "valid": true, + "bestpath": true, + "selectionReason": "First path received", + "pathFrom": "external", + "prefix": "2001:3::", + "prefixLen": 64, + "network": "2001:3::/64", + "metric": 0, + "weight": 32768, + "peerId": "(unspec)", + "path": "", + "origin": "incomplete", + "announceNexthopSelf": true, + "nhVrfName": "vrf10", + "nexthops": [ + { + "ip": "::", + "hostname": "rt1", + "afi": "ipv6", + "used": true + } + ] + } + ] + }, + "1:20": { + "2001:5::/64": [ + { + "valid": true, + "bestpath": true, + "selectionReason": "First path received", + "pathFrom": "external", + "prefix": "2001:5::", + "prefixLen": 64, + "network": "2001:5::/64", + "metric": 0, + "weight": 32768, + "peerId": "(unspec)", + "path": "", + "origin": "incomplete", + "announceNexthopSelf": true, + "nhVrfName": "vrf20", + "nexthops": [ + { + "ip": "::", + "hostname": "rt1", + "afi": "ipv6", + "used": true + } + ] + } + ] + }, + "6:10": { + "2001:2::/64": [ + { + "valid": true, + "bestpath": true, + "selectionReason": "First path received", + "pathFrom": "external", + "prefix": "2001:2::", + "prefixLen": 64, + "network": "2001:2::/64", + "metric": 0, + "weight": 0, + "peerId": "fc00:0:6::1", + "path": "6", + "origin": "incomplete", + "nexthops": [ + { + "ip": "fc00:0:6::1", + "hostname": "rt6", + "afi": "ipv6", + "used": true + } + ] + } + ] + }, + "6:20": { + "2001:4::/64": [ + { + "valid": true, + "bestpath": true, + "selectionReason": "First path received", + "pathFrom": "external", + "prefix": "2001:4::", + "prefixLen": 64, + "network": "2001:4::/64", + "metric": 0, + "weight": 0, + "peerId": "fc00:0:6::1", + "path": "6", + "origin": "incomplete", + "nexthops": [ + { + "ip": "fc00:0:6::1", + "hostname": "rt6", + "afi": "ipv6", + "used": true + } + ] + } + ], + "2001:6::/64": [ + { + "valid": true, + "bestpath": true, + "selectionReason": "First path received", + "pathFrom": "external", + "prefix": "2001:6::", + "prefixLen": 64, + "network": "2001:6::/64", + "metric": 0, + "weight": 0, + "peerId": "fc00:0:6::1", + "path": "6", + "origin": "incomplete", + "nexthops": [ + { + "ip": "fc00:0:6::1", + "hostname": "rt6", + "afi": "ipv6", + "used": true + } + ] + } + ] + } + } + } +} diff --git a/tests/topotests/srv6_sid_manager/rt1/vrf10_rib.ref b/tests/topotests/srv6_sid_manager/rt1/vrf10_rib.ref new file mode 100644 index 0000000000..2aae3497f4 --- /dev/null +++ b/tests/topotests/srv6_sid_manager/rt1/vrf10_rib.ref @@ -0,0 +1,86 @@ +{ + "2001:1::/64": [ + { + "prefix": "2001:1::/64", + "protocol": "connected", + "vrfName": "vrf10", + "selected": true, + "destSelected": true, + "distance": 0, + "metric": 0, + "installed": true, + "table": 10, + "internalStatus": 16, + "internalFlags": 8, + "internalNextHopNum": 1, + "internalNextHopActiveNum": 1, + "nexthops": [ + { + "flags": 3, + "fib": true, + "directlyConnected": true, + "interfaceName": "eth-ce1", + "active": true + } + ] + } + ], + "2001:2::/64": [ + { + "prefix": "2001:2::/64", + "protocol": "bgp", + "vrfName": "vrf10", + "selected": true, + "destSelected": true, + "distance": 20, + "metric": 0, + "installed": true, + "table": 10, + "internalStatus": 16, + "internalFlags": 9, + "internalNextHopNum": 3, + "internalNextHopActiveNum": 3, + "nexthops": [ + { + "flags": 3, + "fib": true, + "afi": "ipv6", + "interfaceName": "eth-sw1", + "vrf": "default", + "active": true, + "weight": 1, + "seg6": { + "segs": "fc00:0:6:fe00::" + } + } + ], + "asPath": "6" + } + ], + "2001:3::/64": [ + { + "prefix": "2001:3::/64", + "protocol": "connected", + "vrfName": "vrf10", + "selected": true, + "destSelected": true, + "distance": 0, + "metric": 0, + "installed": true, + "table": 10, + "internalStatus": 16, + "internalFlags": 8, + "internalNextHopNum": 1, + "internalNextHopActiveNum": 1, + "nexthops": [ + { + "flags": 3, + "fib": true, + "directlyConnected": true, + "interfaceName": "eth-ce3", + "active": true + } + ] + } + ] +} diff --git a/tests/topotests/srv6_sid_manager/rt1/vrf20_rib.ref b/tests/topotests/srv6_sid_manager/rt1/vrf20_rib.ref new file mode 100644 index 0000000000..de9e450cf6 --- /dev/null +++ b/tests/topotests/srv6_sid_manager/rt1/vrf20_rib.ref @@ -0,0 +1,92 @@ +{ + "2001:4::/64": [ + { + "prefix": "2001:4::/64", + "protocol": "bgp", + "vrfName": "vrf20", + "selected": true, + "destSelected": true, + "distance": 20, + "metric": 0, + "installed": true, + "table": 20, + "internalStatus": 16, + "internalFlags": 9, + "internalNextHopNum": 3, + "internalNextHopActiveNum": 3, + "nexthops": [ + { + "flags": 3, + "fib": true, + "afi": "ipv6", + "interfaceName": "eth-sw1", + "vrf": "default", + "active": true, + "weight": 1, + "seg6": { + "segs": "fc00:0:6:fe01::" + } + } + ], + "asPath": "6" + } + ], + "2001:5::/64": [ + { + "prefix": "2001:5::/64", + "protocol": "connected", + "vrfName": "vrf20", + "selected": true, + "destSelected": true, + "distance": 0, + "metric": 0, + "installed": true, + "table": 20, + "internalStatus": 16, + "internalFlags": 8, + "internalNextHopNum": 1, + "internalNextHopActiveNum": 1, + "nexthops": [ + { + "flags": 3, + "fib": true, + "directlyConnected": true, + "interfaceName": "eth-ce5", + "active": true + } + ] + } + ], + "2001:6::/64": [ + { + "prefix": "2001:6::/64", + "protocol": "bgp", + "vrfName": "vrf20", + "selected": true, + "destSelected": true, + "distance": 20, + "metric": 0, + "installed": true, + "table": 20, + "internalStatus": 16, + "internalFlags": 9, + "internalNextHopNum": 3, + "internalNextHopActiveNum": 3, + "nexthops": [ + { + "flags": 3, + "fib": true, + "afi": "ipv6", + "interfaceName": "eth-sw1", + "vrf": "default", + "active": true, + "weight": 1, + "seg6": { + "segs": "fc00:0:6:fe01::" + } + } + ], + "asPath": "6" + } + ] +} diff --git a/tests/topotests/srv6_sid_manager/rt1/zebra.conf b/tests/topotests/srv6_sid_manager/rt1/zebra.conf new file mode 100644 index 0000000000..ef7fb78eed --- /dev/null +++ b/tests/topotests/srv6_sid_manager/rt1/zebra.conf @@ -0,0 +1,37 @@ +log file zebra.log +! +hostname rt1 +! +! debug zebra kernel +! debug zebra packet +! +interface lo + ip address 1.1.1.1/32 + ipv6 address fc00:0:1::1/128 +! +interface eth-sw1 + ip address 10.0.1.1/24 + ipv6 address 2001:db8:1::1/64 +! +interface eth-ce1 vrf vrf10 + ipv6 address 2001:1::1/64 +! +interface eth-ce3 vrf vrf10 + ipv6 address 2001:3::1/64 +! +interface eth-ce5 vrf vrf20 + ipv6 address 2001:5::1/64 +! +segment-routing + srv6 + locators + locator loc1 + prefix fc00:0:1::/48 + format usid-f3216 + ! + ! +! +ip forwarding +! +line vty +! diff --git a/tests/topotests/srv6_sid_manager/rt2/isisd.conf b/tests/topotests/srv6_sid_manager/rt2/isisd.conf new file mode 100644 index 0000000000..b095f04910 --- /dev/null +++ b/tests/topotests/srv6_sid_manager/rt2/isisd.conf @@ -0,0 +1,48 @@ +hostname rt2 +log file isisd.log +! +! debug isis events +! debug isis route-events +! debug isis spf-events +! debug isis sr-events +! debug isis lsp-gen +! +interface lo + ip router isis 1 + ipv6 router isis 1 + isis passive +! +interface eth-sw1 + ip router isis 1 + ipv6 router isis 1 + isis hello-interval 1 + isis hello-multiplier 10 +! +interface eth-rt4-1 + ip router isis 1 + ipv6 router isis 1 + isis network point-to-point + isis hello-interval 1 + isis hello-multiplier 10 +! +interface eth-rt4-2 + ip router isis 1 + ipv6 router isis 1 + isis network point-to-point + isis hello-interval 1 + isis hello-multiplier 10 +! +router isis 1 + lsp-gen-interval 2 + net 49.0000.0000.0000.0002.00 + is-type level-1 + topology ipv6-unicast + segment-routing srv6 + locator loc1 + node-msd + max-segs-left 3 + max-end-pop 3 + max-h-encaps 2 + max-end-d 5 + interface sr0 +! diff --git a/tests/topotests/srv6_sid_manager/rt2/show_ip_route.ref b/tests/topotests/srv6_sid_manager/rt2/show_ip_route.ref new file mode 100644 index 0000000000..1d4a9e9a25 --- /dev/null +++ b/tests/topotests/srv6_sid_manager/rt2/show_ip_route.ref @@ -0,0 +1,320 @@ +{ + "1.1.1.1\/32":[ + { + "prefix":"1.1.1.1\/32", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":20, + "installed":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.1.1", + "afi":"ipv4", + "interfaceName":"eth-sw1", + "active":true + } + ] + } + ], + "3.3.3.3\/32":[ + { + "prefix":"3.3.3.3\/32", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":20, + "installed":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.1.3", + "afi":"ipv4", + "interfaceName":"eth-sw1", + "active":true + } + ] + } + ], + "4.4.4.4\/32":[ + { + "prefix":"4.4.4.4\/32", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":20, + "installed":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.2.4", + "afi":"ipv4", + "interfaceName":"eth-rt4-1", + "active":true + }, + { + "fib":true, + "ip":"10.0.3.4", + "afi":"ipv4", + "interfaceName":"eth-rt4-2", + "active":true + } + ] + } + ], + "5.5.5.5\/32":[ + { + "prefix":"5.5.5.5\/32", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":30, + "installed":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.1.3", + "afi":"ipv4", + "interfaceName":"eth-sw1", + "active":true + }, + { + "fib":true, + "ip":"10.0.2.4", + "afi":"ipv4", + "interfaceName":"eth-rt4-1", + "active":true + }, + { + "fib":true, + "ip":"10.0.3.4", + "afi":"ipv4", + "interfaceName":"eth-rt4-2", + "active":true + } + ] + } + ], + "6.6.6.6\/32":[ + { + "prefix":"6.6.6.6\/32", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":30, + "installed":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.2.4", + "afi":"ipv4", + "interfaceName":"eth-rt4-1", + "active":true + }, + { + "fib":true, + "ip":"10.0.3.4", + "afi":"ipv4", + "interfaceName":"eth-rt4-2", + "active":true + } + ] + } + ], + "10.0.1.0\/24":[ + { + "prefix":"10.0.1.0\/24", + "protocol":"isis", + "distance":115, + "metric":20, + "nexthops":[ + { + "ip":"10.0.1.1", + "afi":"ipv4", + "interfaceName":"eth-sw1" + }, + { + "ip":"10.0.1.3", + "afi":"ipv4", + "interfaceName":"eth-sw1" + } + ] + } + ], + "10.0.2.0\/24":[ + { + "prefix":"10.0.2.0\/24", + "protocol":"isis", + "distance":115, + "metric":20, + "nexthops":[ + { + "ip":"10.0.2.4", + "afi":"ipv4", + "interfaceName":"eth-rt4-1" + }, + { + "ip":"10.0.3.4", + "afi":"ipv4", + "interfaceName":"eth-rt4-2", + "active":true + } + ] + } + ], + "10.0.3.0\/24":[ + { + "prefix":"10.0.3.0\/24", + "protocol":"isis", + "distance":115, + "metric":20, + "nexthops":[ + { + "ip":"10.0.2.4", + "afi":"ipv4", + "interfaceName":"eth-rt4-1", + "active":true + }, + { + "ip":"10.0.3.4", + "afi":"ipv4", + "interfaceName":"eth-rt4-2" + } + ] + } + ], + "10.0.4.0\/24":[ + { + "prefix":"10.0.4.0\/24", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":20, + "installed":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.1.3", + "afi":"ipv4", + "interfaceName":"eth-sw1", + "active":true + } + ] + } + ], + "10.0.5.0\/24":[ + { + "prefix":"10.0.5.0\/24", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":20, + "installed":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.1.3", + "afi":"ipv4", + "interfaceName":"eth-sw1", + "active":true + } + ] + } + ], + "10.0.6.0\/24":[ + { + "prefix":"10.0.6.0\/24", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":20, + "installed":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.2.4", + "afi":"ipv4", + "interfaceName":"eth-rt4-1", + "active":true + }, + { + "fib":true, + "ip":"10.0.3.4", + "afi":"ipv4", + "interfaceName":"eth-rt4-2", + "active":true + } + ] + } + ], + "10.0.7.0\/24":[ + { + "prefix":"10.0.7.0\/24", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":20, + "installed":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.2.4", + "afi":"ipv4", + "interfaceName":"eth-rt4-1", + "active":true + }, + { + "fib":true, + "ip":"10.0.3.4", + "afi":"ipv4", + "interfaceName":"eth-rt4-2", + "active":true + } + ] + } + ], + "10.0.8.0\/24":[ + { + "prefix":"10.0.8.0\/24", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":30, + "installed":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.1.3", + "afi":"ipv4", + "interfaceName":"eth-sw1", + "active":true + }, + { + "fib":true, + "ip":"10.0.2.4", + "afi":"ipv4", + "interfaceName":"eth-rt4-1", + "active":true + }, + { + "fib":true, + "ip":"10.0.3.4", + "afi":"ipv4", + "interfaceName":"eth-rt4-2", + "active":true + } + ] + } + ] +} diff --git a/tests/topotests/srv6_sid_manager/rt2/show_ipv6_route.ref b/tests/topotests/srv6_sid_manager/rt2/show_ipv6_route.ref new file mode 100644 index 0000000000..fc0f1d3bcc --- /dev/null +++ b/tests/topotests/srv6_sid_manager/rt2/show_ipv6_route.ref @@ -0,0 +1,346 @@ +{ + "fc00:0:1::1\/128":[ + { + "prefix":"fc00:0:1::1\/128", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":20, + "installed":true, + "nexthops":[ + { + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-sw1", + "active":true + } + ] + } + ], + "fc00:0:3::1\/128":[ + { + "prefix":"fc00:0:3::1\/128", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":20, + "installed":true, + "nexthops":[ + { + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-sw1", + "active":true + } + ] + } + ], + "fc00:0:4::1\/128":[ + { + "prefix":"fc00:0:4::1\/128", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":20, + "installed":true, + "nexthops":[ + { + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-rt4-1", + "active":true + }, + { + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-rt4-2", + "active":true + } + ] + } + ], + "fc00:0:5::1\/128":[ + { + "prefix":"fc00:0:5::1\/128", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":30, + "installed":true, + "nexthops":[ + { + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-rt4-1", + "active":true + }, + { + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-rt4-2", + "active":true + }, + { + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-sw1", + "active":true + } + ] + } + ], + "fc00:0:6::1\/128":[ + { + "prefix":"fc00:0:6::1\/128", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":30, + "installed":true, + "nexthops":[ + { + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-rt4-1", + "active":true + }, + { + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-rt4-2", + "active":true + } + ] + } + ], + "fc00:0:1::\/48":[ + { + "prefix":"fc00:0:1::\/48", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":10, + "installed":true, + "nexthops":[ + { + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-sw1", + "active":true + } + ] + } + ], + "fc00:0:3::\/48":[ + { + "prefix":"fc00:0:3::\/48", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":10, + "installed":true, + "nexthops":[ + { + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-sw1", + "active":true + } + ] + } + ], + "fc00:0:4::\/48":[ + { + "prefix":"fc00:0:4::\/48", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":10, + "installed":true, + "nexthops":[ + { + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-rt4-1", + "active":true + }, + { + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-rt4-2", + "active":true + } + ] + } + ], + "fc00:0:5::\/48":[ + { + "prefix":"fc00:0:5::\/48", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":20, + "installed":true, + "nexthops":[ + { + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-rt4-1", + "active":true + }, + { + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-rt4-2", + "active":true + }, + { + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-sw1", + "active":true + } + ] + } + ], + "fc00:0:6::\/48":[ + { + "prefix":"fc00:0:6::\/48", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":20, + "installed":true, + "nexthops":[ + { + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-rt4-1", + "active":true + }, + { + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-rt4-2", + "active":true + } + ] + } + ], + "fc00:0:2::\/48":[ + { + "prefix":"fc00:0:2::\/48", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":0, + "installed":true, + "nexthops":[ + { + "fib":true, + "directlyConnected":true, + "interfaceName":"sr0", + "active":true, + "seg6local":{ + "action":"End" + } + } + ] + } + ], + "fc00:0:2:e000::\/64":[ + { + "prefix":"fc00:0:2:e000::\/64", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":0, + "installed":true, + "nexthops":[ + { + "fib":true, + "directlyConnected":true, + "active":true, + "seg6local":{ + "action":"End.X" + } + } + ] + } + ], + "fc00:0:2:e001::\/64":[ + { + "prefix":"fc00:0:2:e001::\/64", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":0, + "installed":true, + "nexthops":[ + { + "fib":true, + "directlyConnected":true, + "active":true, + "seg6local":{ + "action":"End.X" + } + } + ] + } + ], + "fc00:0:2:e002::\/64":[ + { + "prefix":"fc00:0:2:e002::\/64", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":0, + "installed":true, + "nexthops":[ + { + "fib":true, + "directlyConnected":true, + "active":true, + "seg6local":{ + "action":"End.X" + } + } + ] + } + ], + "fc00:0:2:e003::\/64":[ + { + "prefix":"fc00:0:2:e003::\/64", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":0, + "installed":true, + "nexthops":[ + { + "fib":true, + "directlyConnected":true, + "active":true, + "seg6local":{ + "action":"End.X" + } + } + ] + } + ] +} diff --git a/tests/topotests/srv6_sid_manager/rt2/show_srv6_locator_table.ref b/tests/topotests/srv6_sid_manager/rt2/show_srv6_locator_table.ref new file mode 100644 index 0000000000..f8a5d93f3c --- /dev/null +++ b/tests/topotests/srv6_sid_manager/rt2/show_srv6_locator_table.ref @@ -0,0 +1,15 @@ +{ + "locators":[ + { + "name":"loc1", + "prefix":"fc00:0:2::/48", + "blockBitsLength":32, + "nodeBitsLength":16, + "functionBitsLength":16, + "argumentBitsLength":0, + "statusUp":true, + "chunks":[ + ] + } + ] +} diff --git a/tests/topotests/srv6_sid_manager/rt2/show_yang_interface_isis_adjacencies.ref b/tests/topotests/srv6_sid_manager/rt2/show_yang_interface_isis_adjacencies.ref new file mode 100644 index 0000000000..5e46ddf728 --- /dev/null +++ b/tests/topotests/srv6_sid_manager/rt2/show_yang_interface_isis_adjacencies.ref @@ -0,0 +1,70 @@ +{ + "frr-interface:lib": { + "interface": [ + { + "name": "eth-rt4-1", + "vrf": "default", + "state": { + "frr-isisd:isis": { + "adjacencies": { + "adjacency": [ + { + "neighbor-sys-type": "level-1", + "neighbor-sysid": "0000.0000.0004", + "hold-timer": 10, + "neighbor-priority": 0, + "state": "up" + } + ] + } + } + } + }, + { + "name": "eth-rt4-2", + "vrf": "default", + "state": { + "frr-isisd:isis": { + "adjacencies": { + "adjacency": [ + { + "neighbor-sys-type": "level-1", + "neighbor-sysid": "0000.0000.0004", + "hold-timer": 10, + "neighbor-priority": 0, + "state": "up" + } + ] + } + } + } + }, + { + "name": "eth-sw1", + "vrf": "default", + "state": { + "frr-isisd:isis": { + "adjacencies": { + "adjacency": [ + { + "neighbor-sys-type": "level-1", + "neighbor-sysid": "0000.0000.0003", + "hold-timer": 10, + "neighbor-priority": 64, + "state": "up" + }, + { + "neighbor-sys-type": "level-1", + "neighbor-sysid": "0000.0000.0001", + "hold-timer": 10, + "neighbor-priority": 64, + "state": "up" + } + ] + } + } + } + } + ] + } +} diff --git a/tests/topotests/srv6_sid_manager/rt2/zebra.conf b/tests/topotests/srv6_sid_manager/rt2/zebra.conf new file mode 100644 index 0000000000..32737dfcd6 --- /dev/null +++ b/tests/topotests/srv6_sid_manager/rt2/zebra.conf @@ -0,0 +1,34 @@ +log file zebra.log +! +hostname rt2 +! +! debug zebra kernel +! debug zebra packet +! +interface lo + ip address 2.2.2.2/32 + ipv6 address fc00:0:2::1/128 +! +interface eth-sw1 + ip address 10.0.1.2/24 + ipv6 address 2001:db8:1::2/64 +! +interface eth-rt4-1 + ip address 10.0.2.2/24 +! +interface eth-rt4-2 + ip address 10.0.3.2/24 +! +segment-routing + srv6 + locators + locator loc1 + prefix fc00:0:2::/48 + format usid-f3216 + ! + ! +! +ip forwarding +! +line vty +! diff --git a/tests/topotests/srv6_sid_manager/rt3/isisd.conf b/tests/topotests/srv6_sid_manager/rt3/isisd.conf new file mode 100644 index 0000000000..e237db2f49 --- /dev/null +++ b/tests/topotests/srv6_sid_manager/rt3/isisd.conf @@ -0,0 +1,48 @@ +hostname rt3 +log file isisd.log +! +! debug isis events +! debug isis route-events +! debug isis spf-events +! debug isis sr-events +! debug isis lsp-gen +! +interface lo + ip router isis 1 + ipv6 router isis 1 + isis passive +! +interface eth-sw1 + ip router isis 1 + ipv6 router isis 1 + isis hello-interval 1 + isis hello-multiplier 10 +! +interface eth-rt5-1 + ip router isis 1 + ipv6 router isis 1 + isis network point-to-point + isis hello-interval 1 + isis hello-multiplier 10 +! +interface eth-rt5-2 + ip router isis 1 + ipv6 router isis 1 + isis network point-to-point + isis hello-interval 1 + isis hello-multiplier 10 +! +router isis 1 + lsp-gen-interval 2 + net 49.0000.0000.0000.0003.00 + is-type level-1 + topology ipv6-unicast + segment-routing srv6 + locator loc1 + node-msd + max-segs-left 3 + max-end-pop 3 + max-h-encaps 2 + max-end-d 5 + interface sr0 +! diff --git a/tests/topotests/srv6_sid_manager/rt3/show_ip_route.ref b/tests/topotests/srv6_sid_manager/rt3/show_ip_route.ref new file mode 100644 index 0000000000..6ce5760e4f --- /dev/null +++ b/tests/topotests/srv6_sid_manager/rt3/show_ip_route.ref @@ -0,0 +1,320 @@ +{ + "1.1.1.1\/32":[ + { + "prefix":"1.1.1.1\/32", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":20, + "installed":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.1.1", + "afi":"ipv4", + "interfaceName":"eth-sw1", + "active":true + } + ] + } + ], + "2.2.2.2\/32":[ + { + "prefix":"2.2.2.2\/32", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":20, + "installed":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.1.2", + "afi":"ipv4", + "interfaceName":"eth-sw1", + "active":true + } + ] + } + ], + "4.4.4.4\/32":[ + { + "prefix":"4.4.4.4\/32", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":30, + "installed":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.1.2", + "afi":"ipv4", + "interfaceName":"eth-sw1", + "active":true + }, + { + "fib":true, + "ip":"10.0.4.5", + "afi":"ipv4", + "interfaceName":"eth-rt5-1", + "active":true + }, + { + "fib":true, + "ip":"10.0.5.5", + "afi":"ipv4", + "interfaceName":"eth-rt5-2", + "active":true + } + ] + } + ], + "5.5.5.5\/32":[ + { + "prefix":"5.5.5.5\/32", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":20, + "installed":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.4.5", + "afi":"ipv4", + "interfaceName":"eth-rt5-1", + "active":true + }, + { + "fib":true, + "ip":"10.0.5.5", + "afi":"ipv4", + "interfaceName":"eth-rt5-2", + "active":true + } + ] + } + ], + "6.6.6.6\/32":[ + { + "prefix":"6.6.6.6\/32", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":30, + "installed":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.4.5", + "afi":"ipv4", + "interfaceName":"eth-rt5-1", + "active":true + }, + { + "fib":true, + "ip":"10.0.5.5", + "afi":"ipv4", + "interfaceName":"eth-rt5-2", + "active":true + } + ] + } + ], + "10.0.1.0\/24":[ + { + "prefix":"10.0.1.0\/24", + "protocol":"isis", + "distance":115, + "metric":20, + "nexthops":[ + { + "ip":"10.0.1.1", + "afi":"ipv4", + "interfaceName":"eth-sw1" + }, + { + "ip":"10.0.1.2", + "afi":"ipv4", + "interfaceName":"eth-sw1" + } + ] + } + ], + "10.0.2.0\/24":[ + { + "prefix":"10.0.2.0\/24", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":20, + "installed":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.1.2", + "afi":"ipv4", + "interfaceName":"eth-sw1", + "active":true + } + ] + } + ], + "10.0.3.0\/24":[ + { + "prefix":"10.0.3.0\/24", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":20, + "installed":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.1.2", + "afi":"ipv4", + "interfaceName":"eth-sw1", + "active":true + } + ] + } + ], + "10.0.4.0\/24":[ + { + "prefix":"10.0.4.0\/24", + "protocol":"isis", + "distance":115, + "metric":20, + "nexthops":[ + { + "ip":"10.0.4.5", + "afi":"ipv4", + "interfaceName":"eth-rt5-1" + }, + { + "ip":"10.0.5.5", + "afi":"ipv4", + "interfaceName":"eth-rt5-2", + "active":true + } + ] + } + ], + "10.0.5.0\/24":[ + { + "prefix":"10.0.5.0\/24", + "protocol":"isis", + "distance":115, + "metric":20, + "nexthops":[ + { + "ip":"10.0.4.5", + "afi":"ipv4", + "interfaceName":"eth-rt5-1", + "active":true + }, + { + "ip":"10.0.5.5", + "afi":"ipv4", + "interfaceName":"eth-rt5-2" + } + ] + } + ], + "10.0.6.0\/24":[ + { + "prefix":"10.0.6.0\/24", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":20, + "installed":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.4.5", + "afi":"ipv4", + "interfaceName":"eth-rt5-1", + "active":true + }, + { + "fib":true, + "ip":"10.0.5.5", + "afi":"ipv4", + "interfaceName":"eth-rt5-2", + "active":true + } + ] + } + ], + "10.0.7.0\/24":[ + { + "prefix":"10.0.7.0\/24", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":30, + "installed":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.1.2", + "afi":"ipv4", + "interfaceName":"eth-sw1", + "active":true + }, + { + "fib":true, + "ip":"10.0.4.5", + "afi":"ipv4", + "interfaceName":"eth-rt5-1", + "active":true + }, + { + "fib":true, + "ip":"10.0.5.5", + "afi":"ipv4", + "interfaceName":"eth-rt5-2", + "active":true + } + ] + } + ], + "10.0.8.0\/24":[ + { + "prefix":"10.0.8.0\/24", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":20, + "installed":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.4.5", + "afi":"ipv4", + "interfaceName":"eth-rt5-1", + "active":true + }, + { + "fib":true, + "ip":"10.0.5.5", + "afi":"ipv4", + "interfaceName":"eth-rt5-2", + "active":true + } + ] + } + ] +} diff --git a/tests/topotests/srv6_sid_manager/rt3/show_ipv6_route.ref b/tests/topotests/srv6_sid_manager/rt3/show_ipv6_route.ref new file mode 100644 index 0000000000..c590fcfdbc --- /dev/null +++ b/tests/topotests/srv6_sid_manager/rt3/show_ipv6_route.ref @@ -0,0 +1,346 @@ +{ + "fc00:0:1::1\/128":[ + { + "prefix":"fc00:0:1::1\/128", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":20, + "installed":true, + "nexthops":[ + { + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-sw1", + "active":true + } + ] + } + ], + "fc00:0:2::1\/128":[ + { + "prefix":"fc00:0:2::1\/128", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":20, + "installed":true, + "nexthops":[ + { + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-sw1", + "active":true + } + ] + } + ], + "fc00:0:4::1\/128":[ + { + "prefix":"fc00:0:4::1\/128", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":30, + "installed":true, + "nexthops":[ + { + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-sw1", + "active":true + }, + { + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-rt5-1", + "active":true + }, + { + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-rt5-2", + "active":true + } + ] + } + ], + "fc00:0:5::1\/128":[ + { + "prefix":"fc00:0:5::1\/128", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":20, + "installed":true, + "nexthops":[ + { + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-rt5-1", + "active":true + }, + { + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-rt5-2", + "active":true + } + ] + } + ], + "fc00:0:6::1\/128":[ + { + "prefix":"fc00:0:6::1\/128", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":30, + "installed":true, + "nexthops":[ + { + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-rt5-1", + "active":true + }, + { + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-rt5-2", + "active":true + } + ] + } + ], + "fc00:0:1::\/48":[ + { + "prefix":"fc00:0:1::\/48", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":10, + "installed":true, + "nexthops":[ + { + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-sw1", + "active":true + } + ] + } + ], + "fc00:0:2::\/48":[ + { + "prefix":"fc00:0:2::\/48", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":10, + "installed":true, + "nexthops":[ + { + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-sw1", + "active":true + } + ] + } + ], + "fc00:0:4::\/48":[ + { + "prefix":"fc00:0:4::\/48", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":20, + "installed":true, + "nexthops":[ + { + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-sw1", + "active":true + }, + { + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-rt5-1", + "active":true + }, + { + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-rt5-2", + "active":true + } + ] + } + ], + "fc00:0:5::\/48":[ + { + "prefix":"fc00:0:5::\/48", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":10, + "installed":true, + "nexthops":[ + { + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-rt5-1", + "active":true + }, + { + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-rt5-2", + "active":true + } + ] + } + ], + "fc00:0:6::\/48":[ + { + "prefix":"fc00:0:6::\/48", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":20, + "installed":true, + "nexthops":[ + { + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-rt5-1", + "active":true + }, + { + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-rt5-2", + "active":true + } + ] + } + ], + "fc00:0:3::\/48":[ + { + "prefix":"fc00:0:3::\/48", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":0, + "installed":true, + "nexthops":[ + { + "fib":true, + "directlyConnected":true, + "interfaceName":"sr0", + "active":true, + "seg6local":{ + "action":"End" + } + } + ] + } + ], + "fc00:0:3:e000::\/64":[ + { + "prefix":"fc00:0:3:e000::\/64", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":0, + "installed":true, + "nexthops":[ + { + "fib":true, + "directlyConnected":true, + "active":true, + "seg6local":{ + "action":"End.X" + } + } + ] + } + ], + "fc00:0:3:e001::\/64":[ + { + "prefix":"fc00:0:3:e001::\/64", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":0, + "installed":true, + "nexthops":[ + { + "fib":true, + "directlyConnected":true, + "active":true, + "seg6local":{ + "action":"End.X" + } + } + ] + } + ], + "fc00:0:3:e002::\/64":[ + { + "prefix":"fc00:0:3:e002::\/64", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":0, + "installed":true, + "nexthops":[ + { + "fib":true, + "directlyConnected":true, + "active":true, + "seg6local":{ + "action":"End.X" + } + } + ] + } + ], + "fc00:0:3:e003::\/64":[ + { + "prefix":"fc00:0:3:e003::\/64", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":0, + "installed":true, + "nexthops":[ + { + "fib":true, + "directlyConnected":true, + "active":true, + "seg6local":{ + "action":"End.X" + } + } + ] + } + ] +} diff --git a/tests/topotests/srv6_sid_manager/rt3/show_srv6_locator_table.ref b/tests/topotests/srv6_sid_manager/rt3/show_srv6_locator_table.ref new file mode 100644 index 0000000000..c62870587b --- /dev/null +++ b/tests/topotests/srv6_sid_manager/rt3/show_srv6_locator_table.ref @@ -0,0 +1,15 @@ +{ + "locators":[ + { + "name":"loc1", + "prefix":"fc00:0:3::/48", + "blockBitsLength":32, + "nodeBitsLength":16, + "functionBitsLength":16, + "argumentBitsLength":0, + "statusUp":true, + "chunks":[ + ] + } + ] +} diff --git a/tests/topotests/srv6_sid_manager/rt3/show_yang_interface_isis_adjacencies.ref b/tests/topotests/srv6_sid_manager/rt3/show_yang_interface_isis_adjacencies.ref new file mode 100644 index 0000000000..a284240d24 --- /dev/null +++ b/tests/topotests/srv6_sid_manager/rt3/show_yang_interface_isis_adjacencies.ref @@ -0,0 +1,70 @@ +{ + "frr-interface:lib": { + "interface": [ + { + "name": "eth-rt5-1", + "vrf": "default", + "state": { + "frr-isisd:isis": { + "adjacencies": { + "adjacency": [ + { + "neighbor-sys-type": "level-1", + "neighbor-sysid": "0000.0000.0005", + "hold-timer": 10, + "neighbor-priority": 0, + "state": "up" + } + ] + } + } + } + }, + { + "name": "eth-rt5-2", + "vrf": "default", + "state": { + "frr-isisd:isis": { + "adjacencies": { + "adjacency": [ + { + "neighbor-sys-type": "level-1", + "neighbor-sysid": "0000.0000.0005", + "hold-timer": 10, + "neighbor-priority": 0, + "state": "up" + } + ] + } + } + } + }, + { + "name": "eth-sw1", + "vrf": "default", + "state": { + "frr-isisd:isis": { + "adjacencies": { + "adjacency": [ + { + "neighbor-sys-type": "level-1", + "neighbor-sysid": "0000.0000.0001", + "hold-timer": 10, + "neighbor-priority": 64, + "state": "up" + }, + { + "neighbor-sys-type": "level-1", + "neighbor-sysid": "0000.0000.0002", + "hold-timer": 10, + "neighbor-priority": 64, + "state": "up" + } + ] + } + } + } + } + ] + } +} diff --git a/tests/topotests/srv6_sid_manager/rt3/zebra.conf b/tests/topotests/srv6_sid_manager/rt3/zebra.conf new file mode 100644 index 0000000000..73cf6b08f4 --- /dev/null +++ b/tests/topotests/srv6_sid_manager/rt3/zebra.conf @@ -0,0 +1,33 @@ +log file zebra.log +! +hostname rt3 +! +! debug zebra kernel +! debug zebra packet +! +interface lo + ip address 3.3.3.3/32 + ipv6 address fc00:0:3::1/128 +! +interface eth-sw1 + ip address 10.0.1.3/24 +! +interface eth-rt5-1 + ip address 10.0.4.3/24 +! +interface eth-rt5-2 + ip address 10.0.5.3/24 +! +segment-routing + srv6 + locators + locator loc1 + prefix fc00:0:3::/48 + format usid-f3216 + ! + ! +! +ip forwarding +! +line vty +! diff --git a/tests/topotests/srv6_sid_manager/rt4/isisd.conf b/tests/topotests/srv6_sid_manager/rt4/isisd.conf new file mode 100644 index 0000000000..b4c92146a1 --- /dev/null +++ b/tests/topotests/srv6_sid_manager/rt4/isisd.conf @@ -0,0 +1,56 @@ +hostname rt4 +log file isisd.log +! +! debug isis events +! debug isis route-events +! debug isis spf-events +! debug isis sr-events +! debug isis lsp-gen +! +interface lo + ip router isis 1 + ipv6 router isis 1 + isis passive +! +interface eth-rt2-1 + ip router isis 1 + ipv6 router isis 1 + isis network point-to-point + isis hello-interval 1 + isis hello-multiplier 10 +! +interface eth-rt2-2 + ip router isis 1 + ipv6 router isis 1 + isis network point-to-point + isis hello-interval 1 + isis hello-multiplier 10 +! +interface eth-rt5 + ip router isis 1 + ipv6 router isis 1 + isis network point-to-point + isis hello-interval 1 + isis hello-multiplier 10 +! +interface eth-rt6 + ip router isis 1 + ipv6 router isis 1 + isis network point-to-point + isis hello-interval 1 + isis hello-multiplier 10 +! +router isis 1 + lsp-gen-interval 2 + net 49.0000.0000.0000.0004.00 + is-type level-1 + topology ipv6-unicast + segment-routing srv6 + locator loc1 + node-msd + max-segs-left 3 + max-end-pop 3 + max-h-encaps 2 + max-end-d 5 + interface sr0 +! diff --git a/tests/topotests/srv6_sid_manager/rt4/show_ip_route.ref b/tests/topotests/srv6_sid_manager/rt4/show_ip_route.ref new file mode 100644 index 0000000000..0f26fa5d7a --- /dev/null +++ b/tests/topotests/srv6_sid_manager/rt4/show_ip_route.ref @@ -0,0 +1,296 @@ +{ + "1.1.1.1\/32":[ + { + "prefix":"1.1.1.1\/32", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":30, + "installed":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.2.2", + "afi":"ipv4", + "interfaceName":"eth-rt2-1", + "active":true + }, + { + "fib":true, + "ip":"10.0.3.2", + "afi":"ipv4", + "interfaceName":"eth-rt2-2", + "active":true + } + ] + } + ], + "2.2.2.2\/32":[ + { + "prefix":"2.2.2.2\/32", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":20, + "installed":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.2.2", + "afi":"ipv4", + "interfaceName":"eth-rt2-1", + "active":true + }, + { + "fib":true, + "ip":"10.0.3.2", + "afi":"ipv4", + "interfaceName":"eth-rt2-2", + "active":true + } + ] + } + ], + "3.3.3.3\/32":[ + { + "prefix":"3.3.3.3\/32", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":30, + "installed":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.2.2", + "afi":"ipv4", + "interfaceName":"eth-rt2-1", + "active":true + }, + { + "fib":true, + "ip":"10.0.3.2", + "afi":"ipv4", + "interfaceName":"eth-rt2-2", + "active":true + }, + { + "fib":true, + "ip":"10.0.6.5", + "afi":"ipv4", + "interfaceName":"eth-rt5", + "active":true + } + ] + } + ], + "5.5.5.5\/32":[ + { + "prefix":"5.5.5.5\/32", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":20, + "installed":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.6.5", + "afi":"ipv4", + "interfaceName":"eth-rt5", + "active":true + } + ] + } + ], + "6.6.6.6\/32":[ + { + "prefix":"6.6.6.6\/32", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":20, + "installed":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.7.6", + "afi":"ipv4", + "interfaceName":"eth-rt6", + "active":true + } + ] + } + ], + "10.0.1.0\/24":[ + { + "prefix":"10.0.1.0\/24", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":20, + "installed":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.2.2", + "afi":"ipv4", + "interfaceName":"eth-rt2-1", + "active":true + }, + { + "fib":true, + "ip":"10.0.3.2", + "afi":"ipv4", + "interfaceName":"eth-rt2-2", + "active":true + } + ] + } + ], + "10.0.2.0\/24":[ + { + "prefix":"10.0.2.0\/24", + "protocol":"isis", + "distance":115, + "metric":20, + "nexthops":[ + { + "ip":"10.0.2.2", + "afi":"ipv4", + "interfaceName":"eth-rt2-1" + }, + { + "ip":"10.0.3.2", + "afi":"ipv4", + "interfaceName":"eth-rt2-2", + "active":true + } + ] + } + ], + "10.0.3.0\/24":[ + { + "prefix":"10.0.3.0\/24", + "protocol":"isis", + "distance":115, + "metric":20, + "nexthops":[ + { + "ip":"10.0.2.2", + "afi":"ipv4", + "interfaceName":"eth-rt2-1", + "active":true + }, + { + "ip":"10.0.3.2", + "afi":"ipv4", + "interfaceName":"eth-rt2-2" + } + ] + } + ], + "10.0.4.0\/24":[ + { + "prefix":"10.0.4.0\/24", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":20, + "installed":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.6.5", + "afi":"ipv4", + "interfaceName":"eth-rt5", + "active":true + } + ] + } + ], + "10.0.5.0\/24":[ + { + "prefix":"10.0.5.0\/24", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":20, + "installed":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.6.5", + "afi":"ipv4", + "interfaceName":"eth-rt5", + "active":true + } + ] + } + ], + "10.0.6.0\/24":[ + { + "prefix":"10.0.6.0\/24", + "protocol":"isis", + "distance":115, + "metric":20, + "nexthops":[ + { + "ip":"10.0.6.5", + "afi":"ipv4", + "interfaceName":"eth-rt5" + } + ] + } + ], + "10.0.7.0\/24":[ + { + "prefix":"10.0.7.0\/24", + "protocol":"isis", + "distance":115, + "metric":20, + "nexthops":[ + { + "ip":"10.0.7.6", + "afi":"ipv4", + "interfaceName":"eth-rt6" + } + ] + } + ], + "10.0.8.0\/24":[ + { + "prefix":"10.0.8.0\/24", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":20, + "installed":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.6.5", + "afi":"ipv4", + "interfaceName":"eth-rt5", + "active":true + }, + { + "fib":true, + "ip":"10.0.7.6", + "afi":"ipv4", + "interfaceName":"eth-rt6", + "active":true + } + ] + } + ] +} diff --git a/tests/topotests/srv6_sid_manager/rt4/show_ipv6_route.ref b/tests/topotests/srv6_sid_manager/rt4/show_ipv6_route.ref new file mode 100644 index 0000000000..7b575f1888 --- /dev/null +++ b/tests/topotests/srv6_sid_manager/rt4/show_ipv6_route.ref @@ -0,0 +1,346 @@ +{ + "fc00:0:1::1\/128":[ + { + "prefix":"fc00:0:1::1\/128", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":30, + "installed":true, + "nexthops":[ + { + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-rt2-1", + "active":true + }, + { + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-rt2-2", + "active":true + } + ] + } + ], + "fc00:0:2::1\/128":[ + { + "prefix":"fc00:0:2::1\/128", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":20, + "installed":true, + "nexthops":[ + { + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-rt2-1", + "active":true + }, + { + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-rt2-2", + "active":true + } + ] + } + ], + "fc00:0:3::1\/128":[ + { + "prefix":"fc00:0:3::1\/128", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":30, + "installed":true, + "nexthops":[ + { + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-rt2-1", + "active":true + }, + { + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-rt2-2", + "active":true + }, + { + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-rt5", + "active":true + } + ] + } + ], + "fc00:0:5::1\/128":[ + { + "prefix":"fc00:0:5::1\/128", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":20, + "installed":true, + "nexthops":[ + { + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-rt5", + "active":true + } + ] + } + ], + "fc00:0:6::1\/128":[ + { + "prefix":"fc00:0:6::1\/128", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":20, + "installed":true, + "nexthops":[ + { + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-rt6", + "active":true + } + ] + } + ], + "fc00:0:1::\/48":[ + { + "prefix":"fc00:0:1::\/48", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":20, + "installed":true, + "nexthops":[ + { + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-rt2-1", + "active":true + }, + { + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-rt2-2", + "active":true + } + ] + } + ], + "fc00:0:2::\/48":[ + { + "prefix":"fc00:0:2::\/48", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":10, + "installed":true, + "nexthops":[ + { + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-rt2-1", + "active":true + }, + { + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-rt2-2", + "active":true + } + ] + } + ], + "fc00:0:3::\/48":[ + { + "prefix":"fc00:0:3::\/48", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":20, + "installed":true, + "nexthops":[ + { + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-rt2-1", + "active":true + }, + { + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-rt2-2", + "active":true + }, + { + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-rt5", + "active":true + } + ] + } + ], + "fc00:0:5::\/48":[ + { + "prefix":"fc00:0:5::\/48", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":10, + "installed":true, + "nexthops":[ + { + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-rt5", + "active":true + } + ] + } + ], + "fc00:0:6::\/48":[ + { + "prefix":"fc00:0:6::\/48", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":10, + "installed":true, + "nexthops":[ + { + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-rt6", + "active":true + } + ] + } + ], + "fc00:0:4::\/48":[ + { + "prefix":"fc00:0:4::\/48", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":0, + "installed":true, + "nexthops":[ + { + "fib":true, + "directlyConnected":true, + "interfaceName":"sr0", + "active":true, + "seg6local":{ + "action":"End" + } + } + ] + } + ], + "fc00:0:4:e000::\/64":[ + { + "prefix":"fc00:0:4:e000::\/64", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":0, + "installed":true, + "nexthops":[ + { + "fib":true, + "directlyConnected":true, + "active":true, + "seg6local":{ + "action":"End.X" + } + } + ] + } + ], + "fc00:0:4:e001::\/64":[ + { + "prefix":"fc00:0:4:e001::\/64", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":0, + "installed":true, + "nexthops":[ + { + "fib":true, + "directlyConnected":true, + "active":true, + "seg6local":{ + "action":"End.X" + } + } + ] + } + ], + "fc00:0:4:e002::\/64":[ + { + "prefix":"fc00:0:4:e002::\/64", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":0, + "installed":true, + "nexthops":[ + { + "fib":true, + "directlyConnected":true, + "active":true, + "seg6local":{ + "action":"End.X" + } + } + ] + } + ], + "fc00:0:4:e003::\/64":[ + { + "prefix":"fc00:0:4:e003::\/64", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":0, + "installed":true, + "nexthops":[ + { + "fib":true, + "directlyConnected":true, + "active":true, + "seg6local":{ + "action":"End.X" + } + } + ] + } + ] +}
\ No newline at end of file diff --git a/tests/topotests/srv6_sid_manager/rt4/show_srv6_locator_table.ref b/tests/topotests/srv6_sid_manager/rt4/show_srv6_locator_table.ref new file mode 100644 index 0000000000..cb052dbbb5 --- /dev/null +++ b/tests/topotests/srv6_sid_manager/rt4/show_srv6_locator_table.ref @@ -0,0 +1,15 @@ +{ + "locators":[ + { + "name":"loc1", + "prefix":"fc00:0:4::/48", + "blockBitsLength":32, + "nodeBitsLength":16, + "functionBitsLength":16, + "argumentBitsLength":0, + "statusUp":true, + "chunks":[ + ] + } + ] +} diff --git a/tests/topotests/srv6_sid_manager/rt4/show_yang_interface_isis_adjacencies.ref b/tests/topotests/srv6_sid_manager/rt4/show_yang_interface_isis_adjacencies.ref new file mode 100644 index 0000000000..0ca7a76bd4 --- /dev/null +++ b/tests/topotests/srv6_sid_manager/rt4/show_yang_interface_isis_adjacencies.ref @@ -0,0 +1,82 @@ +{ + "frr-interface:lib": { + "interface": [ + { + "name": "eth-rt2-1", + "vrf": "default", + "state": { + "frr-isisd:isis": { + "adjacencies": { + "adjacency": [ + { + "neighbor-sys-type": "level-1", + "neighbor-sysid": "0000.0000.0002", + "hold-timer": 10, + "neighbor-priority": 0, + "state": "up" + } + ] + } + } + } + }, + { + "name": "eth-rt2-2", + "vrf": "default", + "state": { + "frr-isisd:isis": { + "adjacencies": { + "adjacency": [ + { + "neighbor-sys-type": "level-1", + "neighbor-sysid": "0000.0000.0002", + "hold-timer": 10, + "neighbor-priority": 0, + "state": "up" + } + ] + } + } + } + }, + { + "name": "eth-rt5", + "vrf": "default", + "state": { + "frr-isisd:isis": { + "adjacencies": { + "adjacency": [ + { + "neighbor-sys-type": "level-1", + "neighbor-sysid": "0000.0000.0005", + "hold-timer": 10, + "neighbor-priority": 0, + "state": "up" + } + ] + } + } + } + }, + { + "name": "eth-rt6", + "vrf": "default", + "state": { + "frr-isisd:isis": { + "adjacencies": { + "adjacency": [ + { + "neighbor-sys-type": "level-1", + "neighbor-sysid": "0000.0000.0006", + "hold-timer": 10, + "neighbor-priority": 0, + "state": "up" + } + ] + } + } + } + } + ] + } +} diff --git a/tests/topotests/srv6_sid_manager/rt4/zebra.conf b/tests/topotests/srv6_sid_manager/rt4/zebra.conf new file mode 100644 index 0000000000..266db7c53a --- /dev/null +++ b/tests/topotests/srv6_sid_manager/rt4/zebra.conf @@ -0,0 +1,36 @@ +log file zebra.log +! +hostname rt4 +! +! debug zebra kernel +! debug zebra packet +! +interface lo + ip address 4.4.4.4/32 + ipv6 address fc00:0:4::1/128 +! +interface eth-rt2-1 + ip address 10.0.2.4/24 +! +interface eth-rt2-2 + ip address 10.0.3.4/24 +! +interface eth-rt5 + ip address 10.0.6.4/24 +! +interface eth-rt6 + ip address 10.0.7.4/24 +! +segment-routing + srv6 + locators + locator loc1 + prefix fc00:0:4::/48 + format usid-f3216 + ! + ! +! +ip forwarding +! +line vty +! diff --git a/tests/topotests/srv6_sid_manager/rt5/isisd.conf b/tests/topotests/srv6_sid_manager/rt5/isisd.conf new file mode 100644 index 0000000000..26f895dd82 --- /dev/null +++ b/tests/topotests/srv6_sid_manager/rt5/isisd.conf @@ -0,0 +1,56 @@ +hostname rt5 +log file isisd.log +! +! debug isis events +! debug isis route-events +! debug isis spf-events +! debug isis sr-events +! debug isis lsp-gen +! +interface lo + ip router isis 1 + ipv6 router isis 1 + isis passive +! +interface eth-rt3-1 + ip router isis 1 + ipv6 router isis 1 + isis network point-to-point + isis hello-interval 1 + isis hello-multiplier 10 +! +interface eth-rt3-2 + ip router isis 1 + ipv6 router isis 1 + isis network point-to-point + isis hello-interval 1 + isis hello-multiplier 10 +! +interface eth-rt4 + ip router isis 1 + ipv6 router isis 1 + isis network point-to-point + isis hello-interval 1 + isis hello-multiplier 10 +! +interface eth-rt6 + ip router isis 1 + ipv6 router isis 1 + isis network point-to-point + isis hello-interval 1 + isis hello-multiplier 10 +! +router isis 1 + lsp-gen-interval 2 + net 49.0000.0000.0000.0005.00 + is-type level-1 + topology ipv6-unicast + segment-routing srv6 + locator loc1 + node-msd + max-segs-left 3 + max-end-pop 3 + max-h-encaps 2 + max-end-d 5 + interface sr0 +! diff --git a/tests/topotests/srv6_sid_manager/rt5/show_ip_route.ref b/tests/topotests/srv6_sid_manager/rt5/show_ip_route.ref new file mode 100644 index 0000000000..65beaa5998 --- /dev/null +++ b/tests/topotests/srv6_sid_manager/rt5/show_ip_route.ref @@ -0,0 +1,296 @@ +{ + "1.1.1.1\/32":[ + { + "prefix":"1.1.1.1\/32", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":30, + "installed":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.4.3", + "afi":"ipv4", + "interfaceName":"eth-rt3-1", + "active":true + }, + { + "fib":true, + "ip":"10.0.5.3", + "afi":"ipv4", + "interfaceName":"eth-rt3-2", + "active":true + } + ] + } + ], + "2.2.2.2\/32":[ + { + "prefix":"2.2.2.2\/32", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":30, + "installed":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.4.3", + "afi":"ipv4", + "interfaceName":"eth-rt3-1", + "active":true + }, + { + "fib":true, + "ip":"10.0.5.3", + "afi":"ipv4", + "interfaceName":"eth-rt3-2", + "active":true + }, + { + "fib":true, + "ip":"10.0.6.4", + "afi":"ipv4", + "interfaceName":"eth-rt4", + "active":true + } + ] + } + ], + "3.3.3.3\/32":[ + { + "prefix":"3.3.3.3\/32", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":20, + "installed":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.4.3", + "afi":"ipv4", + "interfaceName":"eth-rt3-1", + "active":true + }, + { + "fib":true, + "ip":"10.0.5.3", + "afi":"ipv4", + "interfaceName":"eth-rt3-2", + "active":true + } + ] + } + ], + "4.4.4.4\/32":[ + { + "prefix":"4.4.4.4\/32", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":20, + "installed":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.6.4", + "afi":"ipv4", + "interfaceName":"eth-rt4", + "active":true + } + ] + } + ], + "6.6.6.6\/32":[ + { + "prefix":"6.6.6.6\/32", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":20, + "installed":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.8.6", + "afi":"ipv4", + "interfaceName":"eth-rt6", + "active":true + } + ] + } + ], + "10.0.1.0\/24":[ + { + "prefix":"10.0.1.0\/24", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":20, + "installed":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.4.3", + "afi":"ipv4", + "interfaceName":"eth-rt3-1", + "active":true + }, + { + "fib":true, + "ip":"10.0.5.3", + "afi":"ipv4", + "interfaceName":"eth-rt3-2", + "active":true + } + ] + } + ], + "10.0.2.0\/24":[ + { + "prefix":"10.0.2.0\/24", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":20, + "installed":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.6.4", + "afi":"ipv4", + "interfaceName":"eth-rt4", + "active":true + } + ] + } + ], + "10.0.3.0\/24":[ + { + "prefix":"10.0.3.0\/24", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":20, + "installed":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.6.4", + "afi":"ipv4", + "interfaceName":"eth-rt4", + "active":true + } + ] + } + ], + "10.0.4.0\/24":[ + { + "prefix":"10.0.4.0\/24", + "protocol":"isis", + "distance":115, + "metric":20, + "nexthops":[ + { + "ip":"10.0.4.3", + "afi":"ipv4", + "interfaceName":"eth-rt3-1" + }, + { + "ip":"10.0.5.3", + "afi":"ipv4", + "interfaceName":"eth-rt3-2", + "active":true + } + ] + } + ], + "10.0.5.0\/24":[ + { + "prefix":"10.0.5.0\/24", + "protocol":"isis", + "distance":115, + "metric":20, + "nexthops":[ + { + "ip":"10.0.4.3", + "afi":"ipv4", + "interfaceName":"eth-rt3-1", + "active":true + }, + { + "ip":"10.0.5.3", + "afi":"ipv4", + "interfaceName":"eth-rt3-2" + } + ] + } + ], + "10.0.6.0\/24":[ + { + "prefix":"10.0.6.0\/24", + "protocol":"isis", + "distance":115, + "metric":20, + "nexthops":[ + { + "ip":"10.0.6.4", + "afi":"ipv4", + "interfaceName":"eth-rt4" + } + ] + } + ], + "10.0.7.0\/24":[ + { + "prefix":"10.0.7.0\/24", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":20, + "installed":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.6.4", + "afi":"ipv4", + "interfaceName":"eth-rt4", + "active":true + }, + { + "fib":true, + "ip":"10.0.8.6", + "afi":"ipv4", + "interfaceName":"eth-rt6", + "active":true + } + ] + } + ], + "10.0.8.0\/24":[ + { + "prefix":"10.0.8.0\/24", + "protocol":"isis", + "distance":115, + "metric":20, + "nexthops":[ + { + "ip":"10.0.8.6", + "afi":"ipv4", + "interfaceName":"eth-rt6" + } + ] + } + ] +} diff --git a/tests/topotests/srv6_sid_manager/rt5/show_ipv6_route.ref b/tests/topotests/srv6_sid_manager/rt5/show_ipv6_route.ref new file mode 100644 index 0000000000..a7b3262f86 --- /dev/null +++ b/tests/topotests/srv6_sid_manager/rt5/show_ipv6_route.ref @@ -0,0 +1,346 @@ +{ + "fc00:0:1::1\/128":[ + { + "prefix":"fc00:0:1::1\/128", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":30, + "installed":true, + "nexthops":[ + { + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-rt3-2", + "active":true + }, + { + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-rt3-1", + "active":true + } + ] + } + ], + "fc00:0:2::1\/128":[ + { + "prefix":"fc00:0:2::1\/128", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":30, + "installed":true, + "nexthops":[ + { + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-rt4", + "active":true + }, + { + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-rt3-2", + "active":true + }, + { + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-rt3-1", + "active":true + } + ] + } + ], + "fc00:0:3::1\/128":[ + { + "prefix":"fc00:0:3::1\/128", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":20, + "installed":true, + "nexthops":[ + { + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-rt3-2", + "active":true + }, + { + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-rt3-1", + "active":true + } + ] + } + ], + "fc00:0:4::1\/128":[ + { + "prefix":"fc00:0:4::1\/128", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":20, + "installed":true, + "nexthops":[ + { + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-rt4", + "active":true + } + ] + } + ], + "fc00:0:6::1\/128":[ + { + "prefix":"fc00:0:6::1\/128", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":20, + "installed":true, + "nexthops":[ + { + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-rt6", + "active":true + } + ] + } + ], + "fc00:0:1::\/48":[ + { + "prefix":"fc00:0:1::\/48", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":20, + "installed":true, + "nexthops":[ + { + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-rt3-2", + "active":true + }, + { + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-rt3-1", + "active":true + } + ] + } + ], + "fc00:0:2::\/48":[ + { + "prefix":"fc00:0:2::\/48", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":20, + "installed":true, + "nexthops":[ + { + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-rt4", + "active":true + }, + { + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-rt3-2", + "active":true + }, + { + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-rt3-1", + "active":true + } + ] + } + ], + "fc00:0:3::\/48":[ + { + "prefix":"fc00:0:3::\/48", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":10, + "installed":true, + "nexthops":[ + { + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-rt3-2", + "active":true + }, + { + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-rt3-1", + "active":true + } + ] + } + ], + "fc00:0:4::\/48":[ + { + "prefix":"fc00:0:4::\/48", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":10, + "installed":true, + "nexthops":[ + { + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-rt4", + "active":true + } + ] + } + ], + "fc00:0:6::\/48":[ + { + "prefix":"fc00:0:6::\/48", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":10, + "installed":true, + "nexthops":[ + { + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-rt6", + "active":true + } + ] + } + ], + "fc00:0:5::\/48":[ + { + "prefix":"fc00:0:5::\/48", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":0, + "installed":true, + "nexthops":[ + { + "fib":true, + "directlyConnected":true, + "interfaceName":"sr0", + "active":true, + "seg6local":{ + "action":"End" + } + } + ] + } + ], + "fc00:0:5:e000::\/64":[ + { + "prefix":"fc00:0:5:e000::\/64", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":0, + "installed":true, + "nexthops":[ + { + "fib":true, + "directlyConnected":true, + "active":true, + "seg6local":{ + "action":"End.X" + } + } + ] + } + ], + "fc00:0:5:e001::\/64":[ + { + "prefix":"fc00:0:5:e001::\/64", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":0, + "installed":true, + "nexthops":[ + { + "fib":true, + "directlyConnected":true, + "active":true, + "seg6local":{ + "action":"End.X" + } + } + ] + } + ], + "fc00:0:5:e002::\/64":[ + { + "prefix":"fc00:0:5:e002::\/64", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":0, + "installed":true, + "nexthops":[ + { + "fib":true, + "directlyConnected":true, + "active":true, + "seg6local":{ + "action":"End.X" + } + } + ] + } + ], + "fc00:0:5:e003::\/64":[ + { + "prefix":"fc00:0:5:e003::\/64", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":0, + "installed":true, + "nexthops":[ + { + "fib":true, + "directlyConnected":true, + "active":true, + "seg6local":{ + "action":"End.X" + } + } + ] + } + ] +} diff --git a/tests/topotests/srv6_sid_manager/rt5/show_srv6_locator_table.ref b/tests/topotests/srv6_sid_manager/rt5/show_srv6_locator_table.ref new file mode 100644 index 0000000000..ec55f24d7b --- /dev/null +++ b/tests/topotests/srv6_sid_manager/rt5/show_srv6_locator_table.ref @@ -0,0 +1,15 @@ +{ + "locators":[ + { + "name":"loc1", + "prefix":"fc00:0:5::/48", + "blockBitsLength":32, + "nodeBitsLength":16, + "functionBitsLength":16, + "argumentBitsLength":0, + "statusUp":true, + "chunks":[ + ] + } + ] +} diff --git a/tests/topotests/srv6_sid_manager/rt5/show_yang_interface_isis_adjacencies.ref b/tests/topotests/srv6_sid_manager/rt5/show_yang_interface_isis_adjacencies.ref new file mode 100644 index 0000000000..f40b0d353d --- /dev/null +++ b/tests/topotests/srv6_sid_manager/rt5/show_yang_interface_isis_adjacencies.ref @@ -0,0 +1,82 @@ +{ + "frr-interface:lib": { + "interface": [ + { + "name": "eth-rt3-1", + "vrf": "default", + "state": { + "frr-isisd:isis": { + "adjacencies": { + "adjacency": [ + { + "neighbor-sys-type": "level-1", + "neighbor-sysid": "0000.0000.0003", + "hold-timer": 10, + "neighbor-priority": 0, + "state": "up" + } + ] + } + } + } + }, + { + "name": "eth-rt3-2", + "vrf": "default", + "state": { + "frr-isisd:isis": { + "adjacencies": { + "adjacency": [ + { + "neighbor-sys-type": "level-1", + "neighbor-sysid": "0000.0000.0003", + "hold-timer": 10, + "neighbor-priority": 0, + "state": "up" + } + ] + } + } + } + }, + { + "name": "eth-rt4", + "vrf": "default", + "state": { + "frr-isisd:isis": { + "adjacencies": { + "adjacency": [ + { + "neighbor-sys-type": "level-1", + "neighbor-sysid": "0000.0000.0004", + "hold-timer": 10, + "neighbor-priority": 0, + "state": "up" + } + ] + } + } + } + }, + { + "name": "eth-rt6", + "vrf": "default", + "state": { + "frr-isisd:isis": { + "adjacencies": { + "adjacency": [ + { + "neighbor-sys-type": "level-1", + "neighbor-sysid": "0000.0000.0006", + "hold-timer": 10, + "neighbor-priority": 0, + "state": "up" + } + ] + } + } + } + } + ] + } +} diff --git a/tests/topotests/srv6_sid_manager/rt5/zebra.conf b/tests/topotests/srv6_sid_manager/rt5/zebra.conf new file mode 100644 index 0000000000..901103554b --- /dev/null +++ b/tests/topotests/srv6_sid_manager/rt5/zebra.conf @@ -0,0 +1,36 @@ +log file zebra.log +! +hostname rt5 +! +! debug zebra kernel +! debug zebra packet +! +interface lo + ip address 5.5.5.5/32 + ipv6 address fc00:0:5::1/128 +! +interface eth-rt3-1 + ip address 10.0.4.5/24 +! +interface eth-rt3-2 + ip address 10.0.5.5/24 +! +interface eth-rt4 + ip address 10.0.6.5/24 +! +interface eth-rt6 + ip address 10.0.8.5/24 +! +segment-routing + srv6 + locators + locator loc1 + prefix fc00:0:5::/48 + format usid-f3216 + ! + ! +! +ip forwarding +! +line vty +! diff --git a/tests/topotests/srv6_sid_manager/rt6/bgpd.conf b/tests/topotests/srv6_sid_manager/rt6/bgpd.conf new file mode 100644 index 0000000000..c36fae7901 --- /dev/null +++ b/tests/topotests/srv6_sid_manager/rt6/bgpd.conf @@ -0,0 +1,67 @@ +frr defaults traditional +! +bgp send-extra-data zebra +! +hostname rt6 +password zebra +! +log stdout notifications +log commands +! +!debug bgp neighbor-events +!debug bgp zebra +!debug bgp vnc verbose +!debug bgp update-groups +!debug bgp updates in +!debug bgp updates out +!debug bgp updates +!debug bgp vpn label +!debug bgp vpn leak-from-vrf +!debug bgp vpn leak-to-vrf +!!debug bgp vpn rmap-event +! +router bgp 6 + bgp router-id 6.6.6.6 + no bgp ebgp-requires-policy + no bgp default ipv4-unicast + neighbor fc00:0:1::1 remote-as 1 + neighbor fc00:0:1::1 timers 3 10 + neighbor fc00:0:1::1 timers connect 1 + neighbor fc00:0:1::1 ttl-security hops 20 + ! + address-family ipv6 vpn + neighbor fc00:0:1::1 activate + exit-address-family + ! + segment-routing srv6 + locator loc1 + ! +! +router bgp 6 vrf vrf10 + bgp router-id 6.6.6.6 + no bgp ebgp-requires-policy + no bgp default ipv4-unicast + ! + address-family ipv6 unicast + sid vpn export 65024 + rd vpn export 6:10 + rt vpn both 99:99 + import vpn + export vpn + redistribute connected + exit-address-family +! +router bgp 6 vrf vrf20 + bgp router-id 6.6.6.6 + no bgp ebgp-requires-policy + no bgp default ipv4-unicast + ! + address-family ipv6 unicast + sid vpn export 65025 + rd vpn export 6:20 + rt vpn both 88:88 + import vpn + export vpn + redistribute connected + exit-address-family +! diff --git a/tests/topotests/srv6_sid_manager/rt6/isisd.conf b/tests/topotests/srv6_sid_manager/rt6/isisd.conf new file mode 100644 index 0000000000..f8816db43a --- /dev/null +++ b/tests/topotests/srv6_sid_manager/rt6/isisd.conf @@ -0,0 +1,42 @@ +hostname rt6 +log file isisd.log +! +! debug isis events +! debug isis route-events +! debug isis spf-events +! debug isis sr-events +! debug isis lsp-gen +! +interface lo + ip router isis 1 + ipv6 router isis 1 + isis passive +! +interface eth-rt4 + ip router isis 1 + ipv6 router isis 1 + isis network point-to-point + isis hello-interval 1 + isis hello-multiplier 10 +! +interface eth-rt5 + ip router isis 1 + ipv6 router isis 1 + isis network point-to-point + isis hello-interval 1 + isis hello-multiplier 10 +! +router isis 1 + lsp-gen-interval 2 + net 49.0000.0000.0000.0006.00 + is-type level-1 + topology ipv6-unicast + segment-routing srv6 + locator loc1 + node-msd + max-segs-left 3 + max-end-pop 3 + max-h-encaps 2 + max-end-d 5 + interface sr0 +! diff --git a/tests/topotests/srv6_sid_manager/rt6/sharpd.conf b/tests/topotests/srv6_sid_manager/rt6/sharpd.conf new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/tests/topotests/srv6_sid_manager/rt6/sharpd.conf diff --git a/tests/topotests/srv6_sid_manager/rt6/show_ip_route.ref b/tests/topotests/srv6_sid_manager/rt6/show_ip_route.ref new file mode 100644 index 0000000000..5fc293b6d8 --- /dev/null +++ b/tests/topotests/srv6_sid_manager/rt6/show_ip_route.ref @@ -0,0 +1,273 @@ +{ + "1.1.1.1\/32":[ + { + "prefix":"1.1.1.1\/32", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":40, + "installed":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.7.4", + "afi":"ipv4", + "interfaceName":"eth-rt4", + "active":true + }, + { + "fib":true, + "ip":"10.0.8.5", + "afi":"ipv4", + "interfaceName":"eth-rt5", + "active":true + } + ] + } + ], + "2.2.2.2\/32":[ + { + "prefix":"2.2.2.2\/32", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":30, + "installed":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.7.4", + "afi":"ipv4", + "interfaceName":"eth-rt4", + "active":true + } + ] + } + ], + "3.3.3.3\/32":[ + { + "prefix":"3.3.3.3\/32", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":30, + "installed":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.8.5", + "afi":"ipv4", + "interfaceName":"eth-rt5", + "active":true + } + ] + } + ], + "4.4.4.4\/32":[ + { + "prefix":"4.4.4.4\/32", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":20, + "installed":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.7.4", + "afi":"ipv4", + "interfaceName":"eth-rt4", + "active":true + } + ] + } + ], + "5.5.5.5\/32":[ + { + "prefix":"5.5.5.5\/32", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":20, + "installed":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.8.5", + "afi":"ipv4", + "interfaceName":"eth-rt5", + "active":true + } + ] + } + ], + "10.0.1.0\/24":[ + { + "prefix":"10.0.1.0\/24", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":30, + "installed":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.7.4", + "afi":"ipv4", + "interfaceName":"eth-rt4", + "active":true + }, + { + "fib":true, + "ip":"10.0.8.5", + "afi":"ipv4", + "interfaceName":"eth-rt5", + "active":true + } + ] + } + ], + "10.0.2.0\/24":[ + { + "prefix":"10.0.2.0\/24", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":20, + "installed":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.7.4", + "afi":"ipv4", + "interfaceName":"eth-rt4", + "active":true + } + ] + } + ], + "10.0.3.0\/24":[ + { + "prefix":"10.0.3.0\/24", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":20, + "installed":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.7.4", + "afi":"ipv4", + "interfaceName":"eth-rt4", + "active":true + } + ] + } + ], + "10.0.4.0\/24":[ + { + "prefix":"10.0.4.0\/24", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":20, + "installed":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.8.5", + "afi":"ipv4", + "interfaceName":"eth-rt5", + "active":true + } + ] + } + ], + "10.0.5.0\/24":[ + { + "prefix":"10.0.5.0\/24", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":20, + "installed":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.8.5", + "afi":"ipv4", + "interfaceName":"eth-rt5", + "active":true + } + ] + } + ], + "10.0.6.0\/24":[ + { + "prefix":"10.0.6.0\/24", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":20, + "installed":true, + "nexthops":[ + { + "fib":true, + "ip":"10.0.7.4", + "afi":"ipv4", + "interfaceName":"eth-rt4", + "active":true + }, + { + "fib":true, + "ip":"10.0.8.5", + "afi":"ipv4", + "interfaceName":"eth-rt5", + "active":true + } + ] + } + ], + "10.0.7.0\/24":[ + { + "prefix":"10.0.7.0\/24", + "protocol":"isis", + "distance":115, + "metric":20, + "nexthops":[ + { + "ip":"10.0.7.4", + "afi":"ipv4", + "interfaceName":"eth-rt4" + } + ] + } + ], + "10.0.8.0\/24":[ + { + "prefix":"10.0.8.0\/24", + "protocol":"isis", + "distance":115, + "metric":20, + "nexthops":[ + { + "ip":"10.0.8.5", + "afi":"ipv4", + "interfaceName":"eth-rt5" + } + ] + } + ] +} diff --git a/tests/topotests/srv6_sid_manager/rt6/show_ipv6_route.ref b/tests/topotests/srv6_sid_manager/rt6/show_ipv6_route.ref new file mode 100644 index 0000000000..d06354b872 --- /dev/null +++ b/tests/topotests/srv6_sid_manager/rt6/show_ipv6_route.ref @@ -0,0 +1,312 @@ +{ + "fc00:0:1::1\/128":[ + { + "prefix":"fc00:0:1::1\/128", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":40, + "installed":true, + "nexthops":[ + { + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-rt4", + "active":true + }, + { + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-rt5", + "active":true + } + ] + } + ], + "fc00:0:2::1\/128":[ + { + "prefix":"fc00:0:2::1\/128", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":30, + "installed":true, + "nexthops":[ + { + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-rt4", + "active":true + } + ] + } + ], + "fc00:0:3::1\/128":[ + { + "prefix":"fc00:0:3::1\/128", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":30, + "installed":true, + "nexthops":[ + { + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-rt5", + "active":true + } + ] + } + ], + "fc00:0:4::1\/128":[ + { + "prefix":"fc00:0:4::1\/128", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":20, + "installed":true, + "nexthops":[ + { + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-rt4", + "active":true + } + ] + } + ], + "fc00:0:5::1\/128":[ + { + "prefix":"fc00:0:5::1\/128", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":20, + "installed":true, + "nexthops":[ + { + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-rt5", + "active":true + } + ] + } + ], + "fc00:0:1::\/48":[ + { + "prefix":"fc00:0:1::\/48", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":30, + "installed":true, + "nexthops":[ + { + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-rt4", + "active":true + }, + { + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-rt5", + "active":true + } + ] + } + ], + "fc00:0:2::\/48":[ + { + "prefix":"fc00:0:2::\/48", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":20, + "installed":true, + "nexthops":[ + { + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-rt4", + "active":true + } + ] + } + ], + "fc00:0:3::\/48":[ + { + "prefix":"fc00:0:3::\/48", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":20, + "installed":true, + "nexthops":[ + { + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-rt5", + "active":true + } + ] + } + ], + "fc00:0:4::\/48":[ + { + "prefix":"fc00:0:4::\/48", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":10, + "installed":true, + "nexthops":[ + { + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-rt4", + "active":true + } + ] + } + ], + "fc00:0:5::\/48":[ + { + "prefix":"fc00:0:5::\/48", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":10, + "installed":true, + "nexthops":[ + { + "fib":true, + "afi":"ipv6", + "interfaceName":"eth-rt5", + "active":true + } + ] + } + ], + "fc00:0:6::\/48":[ + { + "prefix":"fc00:0:6::\/48", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":0, + "installed":true, + "nexthops":[ + { + "fib":true, + "directlyConnected":true, + "interfaceName":"sr0", + "active":true, + "seg6local":{ + "action":"End" + } + } + ] + } + ], + "fc00:0:6:e000::\/64":[ + { + "prefix":"fc00:0:6:e000::\/64", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":0, + "installed":true, + "nexthops":[ + { + "fib":true, + "directlyConnected":true, + "active":true, + "seg6local":{ + "action":"End.X" + } + } + ] + } + ], + "fc00:0:6:e001::\/64":[ + { + "prefix":"fc00:0:6:e001::\/64", + "protocol":"isis", + "selected":true, + "destSelected":true, + "distance":115, + "metric":0, + "installed":true, + "nexthops":[ + { + "fib":true, + "directlyConnected":true, + "active":true, + "seg6local":{ + "action":"End.X" + } + } + ] + } + ], + "fc00:0:6:fe00::\/128":[ + { + "prefix":"fc00:0:6:fe00::\/128", + "protocol":"bgp", + "selected":true, + "destSelected":true, + "distance":20, + "metric":0, + "installed":true, + "nexthops":[ + { + "fib":true, + "directlyConnected":true, + "interfaceName":"vrf10", + "active":true, + "seg6local":{ + "action":"End.DT6" + } + } + ] + } + ], + "fc00:0:6:fe01::\/128":[ + { + "prefix":"fc00:0:6:fe01::\/128", + "protocol":"bgp", + "selected":true, + "destSelected":true, + "distance":20, + "metric":0, + "installed":true, + "nexthops":[ + { + "fib":true, + "directlyConnected":true, + "interfaceName":"vrf20", + "active":true, + "seg6local":{ + "action":"End.DT6" + } + } + ] + } + ] +} diff --git a/tests/topotests/srv6_sid_manager/rt6/show_srv6_locator_table.ref b/tests/topotests/srv6_sid_manager/rt6/show_srv6_locator_table.ref new file mode 100644 index 0000000000..abcdeddea4 --- /dev/null +++ b/tests/topotests/srv6_sid_manager/rt6/show_srv6_locator_table.ref @@ -0,0 +1,15 @@ +{ + "locators":[ + { + "name":"loc1", + "prefix":"fc00:0:6::/48", + "blockBitsLength":32, + "nodeBitsLength":16, + "functionBitsLength":16, + "argumentBitsLength":0, + "statusUp":true, + "chunks":[ + ] + } + ] +} diff --git a/tests/topotests/srv6_sid_manager/rt6/show_yang_interface_isis_adjacencies.ref b/tests/topotests/srv6_sid_manager/rt6/show_yang_interface_isis_adjacencies.ref new file mode 100644 index 0000000000..8300ca0b5c --- /dev/null +++ b/tests/topotests/srv6_sid_manager/rt6/show_yang_interface_isis_adjacencies.ref @@ -0,0 +1,44 @@ +{ + "frr-interface:lib": { + "interface": [ + { + "name": "eth-rt4", + "vrf": "default", + "state": { + "frr-isisd:isis": { + "adjacencies": { + "adjacency": [ + { + "neighbor-sys-type": "level-1", + "neighbor-sysid": "0000.0000.0004", + "hold-timer": 10, + "neighbor-priority": 0, + "state": "up" + } + ] + } + } + } + }, + { + "name": "eth-rt5", + "vrf": "default", + "state": { + "frr-isisd:isis": { + "adjacencies": { + "adjacency": [ + { + "neighbor-sys-type": "level-1", + "neighbor-sysid": "0000.0000.0005", + "hold-timer": 10, + "neighbor-priority": 0, + "state": "up" + } + ] + } + } + } + } + ] + } +} diff --git a/tests/topotests/srv6_sid_manager/rt6/vpnv6_rib.ref b/tests/topotests/srv6_sid_manager/rt6/vpnv6_rib.ref new file mode 100644 index 0000000000..fe0fa24529 --- /dev/null +++ b/tests/topotests/srv6_sid_manager/rt6/vpnv6_rib.ref @@ -0,0 +1,169 @@ +{ + "vrfId": 0, + "vrfName": "default", + "routerId": "6.6.6.6", + "defaultLocPrf": 100, + "localAS": 6, + "routes": { + "routeDistinguishers": { + "1:10": { + "2001:1::/64": [ + { + "valid": true, + "bestpath": true, + "selectionReason": "First path received", + "pathFrom": "external", + "prefix": "2001:1::", + "prefixLen": 64, + "network": "2001:1::/64", + "metric": 0, + "weight": 0, + "peerId": "fc00:0:1::1", + "path": "1", + "origin": "incomplete", + "nexthops": [ + { + "ip": "fc00:0:1::1", + "hostname": "rt1", + "afi": "ipv6", + "used": true + } + ] + } + ], + "2001:3::/64": [ + { + "valid": true, + "bestpath": true, + "selectionReason": "First path received", + "pathFrom": "external", + "prefix": "2001:3::", + "prefixLen": 64, + "network": "2001:3::/64", + "metric": 0, + "weight": 0, + "peerId": "fc00:0:1::1", + "path": "1", + "origin": "incomplete", + "nexthops": [ + { + "ip": "fc00:0:1::1", + "hostname": "rt1", + "afi": "ipv6", + "used": true + } + ] + } + ] + }, + "1:20": { + "2001:5::/64": [ + { + "valid": true, + "bestpath": true, + "selectionReason": "First path received", + "pathFrom": "external", + "prefix": "2001:5::", + "prefixLen": 64, + "network": "2001:5::/64", + "metric": 0, + "weight": 0, + "peerId": "fc00:0:1::1", + "path": "1", + "origin": "incomplete", + "nexthops": [ + { + "ip": "fc00:0:1::1", + "hostname": "rt1", + "afi": "ipv6", + "used": true + } + ] + } + ] + }, + "6:10": { + "2001:2::/64": [ + { + "valid": true, + "bestpath": true, + "selectionReason": "First path received", + "pathFrom": "external", + "prefix": "2001:2::", + "prefixLen": 64, + "network": "2001:2::/64", + "metric": 0, + "weight": 32768, + "peerId": "(unspec)", + "path": "", + "origin": "incomplete", + "announceNexthopSelf": true, + "nhVrfName": "vrf10", + "nexthops": [ + { + "ip": "::", + "hostname": "rt6", + "afi": "ipv6", + "used": true + } + ] + } + ] + }, + "6:20": { + "2001:4::/64": [ + { + "valid": true, + "bestpath": true, + "selectionReason": "First path received", + "pathFrom": "external", + "prefix": "2001:4::", + "prefixLen": 64, + "network": "2001:4::/64", + "metric": 0, + "weight": 32768, + "peerId": "(unspec)", + "path": "", + "origin": "incomplete", + "announceNexthopSelf": true, + "nhVrfName": "vrf20", + "nexthops": [ + { + "ip": "::", + "hostname": "rt6", + "afi": "ipv6", + "used": true + } + ] + } + ], + "2001:6::/64": [ + { + "valid": true, + "bestpath": true, + "selectionReason": "First path received", + "pathFrom": "external", + "prefix": "2001:6::", + "prefixLen": 64, + "network": "2001:6::/64", + "metric": 0, + "weight": 32768, + "peerId": "(unspec)", + "path": "", + "origin": "incomplete", + "announceNexthopSelf": true, + "nhVrfName": "vrf20", + "nexthops": [ + { + "ip": "::", + "hostname": "rt6", + "afi": "ipv6", + "used": true + } + ] + } + ] + } + } + } +} diff --git a/tests/topotests/srv6_sid_manager/rt6/vrf10_rib.ref b/tests/topotests/srv6_sid_manager/rt6/vrf10_rib.ref new file mode 100644 index 0000000000..87ff5a9902 --- /dev/null +++ b/tests/topotests/srv6_sid_manager/rt6/vrf10_rib.ref @@ -0,0 +1,92 @@ +{ + "2001:1::/64": [ + { + "prefix": "2001:1::/64", + "protocol": "bgp", + "vrfName": "vrf10", + "selected": true, + "destSelected": true, + "distance": 20, + "metric": 0, + "installed": true, + "table": 10, + "internalStatus": 16, + "internalFlags": 9, + "internalNextHopNum": 3, + "internalNextHopActiveNum": 3, + "nexthops": [ + { + "flags": 3, + "fib": true, + "afi": "ipv6", + "interfaceName": "eth-rt5", + "vrf": "default", + "active": true, + "weight": 1, + "seg6": { + "segs": "fc00:0:1:fe00::" + } + } + ], + "asPath": "1" + } + ], + "2001:2::/64": [ + { + "prefix": "2001:2::/64", + "protocol": "connected", + "vrfName": "vrf10", + "selected": true, + "destSelected": true, + "distance": 0, + "metric": 0, + "installed": true, + "table": 10, + "internalStatus": 16, + "internalFlags": 8, + "internalNextHopNum": 1, + "internalNextHopActiveNum": 1, + "nexthops": [ + { + "flags": 3, + "fib": true, + "directlyConnected": true, + "interfaceName": "eth-ce2", + "active": true + } + ] + } + ], + "2001:3::/64": [ + { + "prefix": "2001:3::/64", + "protocol": "bgp", + "vrfName": "vrf10", + "selected": true, + "destSelected": true, + "distance": 20, + "metric": 0, + "installed": true, + "table": 10, + "internalStatus": 16, + "internalFlags": 9, + "internalNextHopNum": 3, + "internalNextHopActiveNum": 3, + "nexthops": [ + { + "flags": 3, + "fib": true, + "afi": "ipv6", + "interfaceName": "eth-rt5", + "vrf": "default", + "active": true, + "weight": 1, + "seg6": { + "segs": "fc00:0:1:fe00::" + } + } + ], + "asPath": "1" + } + ] +} diff --git a/tests/topotests/srv6_sid_manager/rt6/vrf20_rib.ref b/tests/topotests/srv6_sid_manager/rt6/vrf20_rib.ref new file mode 100644 index 0000000000..95d7d4412b --- /dev/null +++ b/tests/topotests/srv6_sid_manager/rt6/vrf20_rib.ref @@ -0,0 +1,86 @@ +{ + "2001:4::/64": [ + { + "prefix": "2001:4::/64", + "protocol": "connected", + "vrfName": "vrf20", + "selected": true, + "destSelected": true, + "distance": 0, + "metric": 0, + "installed": true, + "table": 20, + "internalStatus": 16, + "internalFlags": 8, + "internalNextHopNum": 1, + "internalNextHopActiveNum": 1, + "nexthops": [ + { + "flags": 3, + "fib": true, + "directlyConnected": true, + "interfaceName": "eth-ce4", + "active": true + } + ] + } + ], + "2001:5::/64": [ + { + "prefix": "2001:5::/64", + "protocol": "bgp", + "vrfName": "vrf20", + "selected": true, + "destSelected": true, + "distance": 20, + "metric": 0, + "installed": true, + "table": 20, + "internalStatus": 16, + "internalFlags": 9, + "internalNextHopNum": 3, + "internalNextHopActiveNum": 3, + "nexthops": [ + { + "flags": 3, + "fib": true, + "afi": "ipv6", + "interfaceName": "eth-rt5", + "vrf": "default", + "active": true, + "weight": 1, + "seg6": { + "segs": "fc00:0:1:fe01::" + } + } + ], + "asPath": "1" + } + ], + "2001:6::/64": [ + { + "prefix": "2001:6::/64", + "protocol": "connected", + "vrfName": "vrf20", + "selected": true, + "destSelected": true, + "distance": 0, + "metric": 0, + "installed": true, + "table": 20, + "internalStatus": 16, + "internalFlags": 8, + "internalNextHopNum": 1, + "internalNextHopActiveNum": 1, + "nexthops": [ + { + "flags": 3, + "fib": true, + "directlyConnected": true, + "interfaceName": "eth-ce6", + "active": true + } + ] + } + ] +} diff --git a/tests/topotests/srv6_sid_manager/rt6/zebra.conf b/tests/topotests/srv6_sid_manager/rt6/zebra.conf new file mode 100644 index 0000000000..8ac64c559e --- /dev/null +++ b/tests/topotests/srv6_sid_manager/rt6/zebra.conf @@ -0,0 +1,45 @@ +log file zebra.log +! +hostname rt6 +! +! debug zebra kernel +! debug zebra packet +! +interface lo + ip address 6.6.6.6/32 + ipv6 address fc00:0:6::1/128 +! +interface eth-rt4 + ip address 10.0.7.6/24 +! +interface eth-rt5 + ip address 10.0.8.6/24 +! +interface eth-dst + ip address 10.0.10.1/24 + ip address 2001:db8:10::1/64 +! +interface eth-ce2 vrf vrf10 + ipv6 address 2001:2::1/64 +! +interface eth-ce4 vrf vrf20 + ipv6 address 2001:4::1/64 +! +interface eth-ce6 vrf vrf20 + ipv6 address 2001:6::1/64 +! +segment-routing + srv6 + locators + locator loc1 + prefix fc00:0:6::/48 + format usid-f3216 + ! + ! +! +ip forwarding +! +ipv6 route fc00:0:9::1/128 2001:db8:10::2 vrf vrf10 +! +line vty +! diff --git a/tests/topotests/srv6_sid_manager/test_srv6_sid_manager.py b/tests/topotests/srv6_sid_manager/test_srv6_sid_manager.py new file mode 100644 index 0000000000..31f22d9900 --- /dev/null +++ b/tests/topotests/srv6_sid_manager/test_srv6_sid_manager.py @@ -0,0 +1,421 @@ +#!/usr/bin/env python +# SPDX-License-Identifier: ISC + +# +# Copyright (c) 2023 by Carmine Scarpitta <cscarpit@cisco.com> +# + +""" +test_srv6_sid_manager.py: + + +---------+ + | | + | RT1 | + | 1.1.1.1 | + | | + +---------+ + |eth-sw1 + | + | + | + +---------+ | +---------+ + | | | | | + | RT2 |eth-sw1 | eth-sw1| RT3 | + | 2.2.2.2 +----------+----------+ 3.3.3.3 | + | | 10.0.1.0/24 | | + +---------+ +---------+ + eth-rt4-1| |eth-rt4-2 eth-rt5-1| |eth-rt5-2 + | | | | + 10.0.2.0/24| |10.0.3.0/24 10.0.4.0/24| |10.0.5.0/24 + | | | | + eth-rt2-1| |eth-rt2-2 eth-rt3-1| |eth-rt3-2 + +---------+ +---------+ + | | | | + | RT4 | 10.0.6.0/24 | RT5 | + | 4.4.4.4 +---------------------+ 5.5.5.5 | + | |eth-rt5 eth-rt4| | + +---------+ +---------+ + eth-rt6| |eth-rt6 + | | + 10.0.7.0/24| |10.0.8.0/24 + | +---------+ | + | | | | + | | RT6 | | + +----------+ 6.6.6.6 +-----------+ + eth-rt4| |eth-rt5 + +---------+ + |eth-dst (.1) + | + |10.0.10.0/24 + | + |eth-rt6 (.2) + +---------+ + | | + | DST | + | 9.9.9.2 | + | | + +---------+ + +""" + +import os +import re +import sys +import json +import functools +import pytest + +CWD = os.path.dirname(os.path.realpath(__file__)) +sys.path.append(os.path.join(CWD, "../")) + +# pylint: disable=C0413 +# Import topogen and topotest helpers +from lib import topotest +from lib.topogen import Topogen, TopoRouter, get_topogen +from lib.topolog import logger +from lib.common_config import ( + required_linux_kernel_version, + create_interface_in_kernel, +) +from lib.checkping import check_ping + +pytestmark = [pytest.mark.isisd, pytest.mark.sharpd] + + +def build_topo(tgen): + """Build function""" + + # Define FRR Routers + tgen.add_router("rt1") + tgen.add_router("rt2") + tgen.add_router("rt3") + tgen.add_router("rt4") + tgen.add_router("rt5") + tgen.add_router("rt6") + tgen.add_router("dst") + tgen.add_router("ce1") + tgen.add_router("ce2") + tgen.add_router("ce3") + tgen.add_router("ce4") + tgen.add_router("ce5") + tgen.add_router("ce6") + + # Define connections + switch = tgen.add_switch("s1") + switch.add_link(tgen.gears["rt1"], nodeif="eth-sw1") + switch.add_link(tgen.gears["rt2"], nodeif="eth-sw1") + switch.add_link(tgen.gears["rt3"], nodeif="eth-sw1") + + switch = tgen.add_switch("s2") + switch.add_link(tgen.gears["rt2"], nodeif="eth-rt4-1") + switch.add_link(tgen.gears["rt4"], nodeif="eth-rt2-1") + + switch = tgen.add_switch("s3") + switch.add_link(tgen.gears["rt2"], nodeif="eth-rt4-2") + switch.add_link(tgen.gears["rt4"], nodeif="eth-rt2-2") + + switch = tgen.add_switch("s4") + switch.add_link(tgen.gears["rt3"], nodeif="eth-rt5-1") + switch.add_link(tgen.gears["rt5"], nodeif="eth-rt3-1") + + switch = tgen.add_switch("s5") + switch.add_link(tgen.gears["rt3"], nodeif="eth-rt5-2") + switch.add_link(tgen.gears["rt5"], nodeif="eth-rt3-2") + + switch = tgen.add_switch("s6") + switch.add_link(tgen.gears["rt4"], nodeif="eth-rt5") + switch.add_link(tgen.gears["rt5"], nodeif="eth-rt4") + + switch = tgen.add_switch("s7") + switch.add_link(tgen.gears["rt4"], nodeif="eth-rt6") + switch.add_link(tgen.gears["rt6"], nodeif="eth-rt4") + + switch = tgen.add_switch("s8") + switch.add_link(tgen.gears["rt5"], nodeif="eth-rt6") + switch.add_link(tgen.gears["rt6"], nodeif="eth-rt5") + + switch = tgen.add_switch("s9") + switch.add_link(tgen.gears["rt6"], nodeif="eth-dst") + switch.add_link(tgen.gears["dst"], nodeif="eth-rt6") + + tgen.add_link(tgen.gears["ce1"], tgen.gears["rt1"], "eth-rt1", "eth-ce1") + tgen.add_link(tgen.gears["ce2"], tgen.gears["rt6"], "eth-rt6", "eth-ce2") + tgen.add_link(tgen.gears["ce3"], tgen.gears["rt1"], "eth-rt1", "eth-ce3") + tgen.add_link(tgen.gears["ce4"], tgen.gears["rt6"], "eth-rt6", "eth-ce4") + tgen.add_link(tgen.gears["ce5"], tgen.gears["rt1"], "eth-rt1", "eth-ce5") + tgen.add_link(tgen.gears["ce6"], tgen.gears["rt6"], "eth-rt6", "eth-ce6") + + tgen.gears["rt1"].run("ip link add vrf10 type vrf table 10") + tgen.gears["rt1"].run("ip link set vrf10 up") + tgen.gears["rt1"].run("ip link add vrf20 type vrf table 20") + tgen.gears["rt1"].run("ip link set vrf20 up") + tgen.gears["rt1"].run("ip link set eth-ce1 master vrf10") + tgen.gears["rt1"].run("ip link set eth-ce3 master vrf10") + tgen.gears["rt1"].run("ip link set eth-ce5 master vrf20") + + tgen.gears["rt6"].run("ip link add vrf10 type vrf table 10") + tgen.gears["rt6"].run("ip link set vrf10 up") + tgen.gears["rt6"].run("ip link add vrf20 type vrf table 20") + tgen.gears["rt6"].run("ip link set vrf20 up") + tgen.gears["rt6"].run("ip link set eth-ce2 master vrf10") + tgen.gears["rt6"].run("ip link set eth-ce4 master vrf20") + tgen.gears["rt6"].run("ip link set eth-ce6 master vrf20") + + # Add dummy interface for SRv6 + create_interface_in_kernel( + tgen, + "rt1", + "sr0", + "2001:db8::1", + netmask="128", + create=True, + ) + create_interface_in_kernel( + tgen, + "rt2", + "sr0", + "2001:db8::2", + netmask="128", + create=True, + ) + create_interface_in_kernel( + tgen, + "rt3", + "sr0", + "2001:db8::3", + netmask="128", + create=True, + ) + create_interface_in_kernel( + tgen, + "rt4", + "sr0", + "2001:db8::4", + netmask="128", + create=True, + ) + create_interface_in_kernel( + tgen, + "rt5", + "sr0", + "2001:db8::5", + netmask="128", + create=True, + ) + create_interface_in_kernel( + tgen, + "rt6", + "sr0", + "2001:db8::6", + netmask="128", + create=True, + ) + + +def setup_module(mod): + """Sets up the pytest environment""" + + # Verify if kernel requirements are satisfied + result = required_linux_kernel_version("4.10") + if result is not True: + pytest.skip("Kernel requirements are not met") + + # Build the topology + tgen = Topogen(build_topo, mod.__name__) + tgen.start_topology() + + # For all registered routers, load the zebra and isis configuration files + for rname, router in tgen.routers().items(): + router.load_config(TopoRouter.RD_ZEBRA, + os.path.join(CWD, '{}/zebra.conf'.format(rname))) + router.load_config(TopoRouter.RD_ISIS, + os.path.join(CWD, '{}/isisd.conf'.format(rname))) + router.load_config(TopoRouter.RD_BGP, + os.path.join(CWD, '{}/bgpd.conf'.format(rname))) + if (os.path.exists('{}/sharpd.conf'.format(rname))): + router.load_config(TopoRouter.RD_SHARP, + os.path.join(CWD, '{}/sharpd.conf'.format(rname))) + + # Start routers + tgen.start_router() + + +def teardown_module(mod): + "Teardown the pytest environment" + + # Teardown the topology + tgen = get_topogen() + tgen.stop_topology() + + +def router_compare_json_output(rname, command, reference): + "Compare router JSON output" + + logger.info('Comparing router "%s" "%s" output', rname, command) + + tgen = get_topogen() + filename = "{}/{}/{}".format(CWD, rname, reference) + expected = json.loads(open(filename).read()) + + # Run test function until we get an result. Wait at most 60 seconds. + test_func = functools.partial(topotest.router_json_cmp, tgen.gears[rname], command, expected) + _, diff = topotest.run_and_expect(test_func, None, count=120, wait=0.5) + assertmsg = '"{}" JSON output mismatches the expected result'.format(rname) + assert diff is None, assertmsg + + +def check_ping6(name, dest_addr, expect_connected): + def _check(name, dest_addr, match): + tgen = get_topogen() + output = tgen.gears[name].run("ping6 {} -c 1 -w 1".format(dest_addr)) + logger.info(output) + if match not in output: + return "ping fail" + + match = "{} packet loss".format("0%" if expect_connected else "100%") + logger.info("[+] check {} {} {}".format(name, dest_addr, match)) + tgen = get_topogen() + func = functools.partial(_check, name, dest_addr, match) + success, result = topotest.run_and_expect(func, None, count=10, wait=1) + assert result is None, "Failed" + + +def open_json_file(filename): + try: + with open(filename, "r") as f: + return json.load(f) + except IOError: + assert False, "Could not read file {}".format(filename) + + +def check_rib(name, cmd, expected_file): + def _check(name, cmd, expected_file): + logger.info("polling") + tgen = get_topogen() + router = tgen.gears[name] + output = json.loads(router.vtysh_cmd(cmd)) + expected = open_json_file("{}/{}".format(CWD, expected_file)) + return topotest.json_cmp(output, expected) + + logger.info('[+] check {} "{}" {}'.format(name, cmd, expected_file)) + tgen = get_topogen() + func = functools.partial(_check, name, cmd, expected_file) + success, result = topotest.run_and_expect(func, None, count=10, wait=0.5) + assert result is None, "Failed" + + +# +# Step 1 +# +# Test initial network convergence +# +def test_isis_adjacencies(): + logger.info("Test: check IS-IS adjacencies") + tgen = get_topogen() + + # Skip if previous fatal error condition is raised + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + for rname in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6"]: + router_compare_json_output( + rname, + "show yang operational-data /frr-interface:lib isisd", + "show_yang_interface_isis_adjacencies.ref", + ) + + +def test_rib_ipv4(): + logger.info("Test: verify IPv4 RIB") + tgen = get_topogen() + + # Skip if previous fatal error condition is raised + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + for rname in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6"]: + router_compare_json_output( + rname, "show ip route isis json", "show_ip_route.ref" + ) + + +def test_rib_ipv6(): + logger.info("Test: verify IPv6 RIB") + tgen = get_topogen() + + # Skip if previous fatal error condition is raised + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + for rname in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6"]: + router_compare_json_output( + rname, "show ipv6 route json", "show_ipv6_route.ref" + ) + + +def test_srv6_locator(): + logger.info("Test: verify SRv6 Locator") + tgen = get_topogen() + + # Skip if previous fatal error condition is raised + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + for rname in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6"]: + router_compare_json_output( + rname, "show segment-routing srv6 locator json", "show_srv6_locator_table.ref" + ) + + +def test_vpn_rib(): + check_rib("rt1", "show bgp ipv6 vpn json", "rt1/vpnv6_rib.ref") + check_rib("rt6", "show bgp ipv6 vpn json", "rt6/vpnv6_rib.ref") + check_rib("rt1", "show ipv6 route vrf vrf10 json", "rt1/vrf10_rib.ref") + check_rib("rt1", "show ipv6 route vrf vrf20 json", "rt1/vrf20_rib.ref") + check_rib("rt6", "show ipv6 route vrf vrf10 json", "rt6/vrf10_rib.ref") + check_rib("rt6", "show ipv6 route vrf vrf20 json", "rt6/vrf20_rib.ref") + check_rib("ce1", "show ipv6 route json", "ce1/ipv6_rib.json") + check_rib("ce2", "show ipv6 route json", "ce2/ipv6_rib.json") + check_rib("ce3", "show ipv6 route json", "ce3/ipv6_rib.json") + check_rib("ce4", "show ipv6 route json", "ce4/ipv6_rib.json") + check_rib("ce5", "show ipv6 route json", "ce5/ipv6_rib.json") + check_rib("ce6", "show ipv6 route json", "ce6/ipv6_rib.json") + + +def test_ping(): + logger.info("Test: verify ping") + tgen = get_topogen() + + # Required linux kernel version for this suite to run. + result = required_linux_kernel_version("6.1") + if result is not True: + pytest.skip("Kernel requirements are not met, kernel version should be >=6.1") + + # Skip if previous fatal error condition is raised + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + # Setup encap route on rt1, decap route on rt2 + # tgen.gears["rt1"].vtysh_cmd("sharp install seg6-routes fc00:0:9::1 nexthop-seg6 2001:db8:1::2 encap fc00:0:2:6:fe00:: 1") + tgen.gears["rt1"].cmd("ip -6 r a fc00:0:9::1/128 encap seg6 mode encap segs fc00:0:2:6:fe00:: via 2001:db8:1::2") + # tgen.gears["rt6"].vtysh_cmd("sharp install seg6local-routes fc00:0:f00d:: nexthop-seg6local eth-dst End_DT6 254 1") + tgen.gears["rt6"].cmd("ip -6 r a fc00:0:9::1/128 via 2001:db8:10::2 vrf vrf10") + tgen.gears["dst"].cmd("ip -6 r a 2001:db8:1::1/128 via 2001:db8:10::1") + + # Try to ping dst from rt1 + check_ping("rt1", "fc00:0:9::1", True, 10, 1) + + +# Memory leak test template +def test_memory_leak(): + "Run the memory leak test and report results." + tgen = get_topogen() + if not tgen.is_memleak_enabled(): + pytest.skip("Memory leak test/report is disabled") + + tgen.report_memory_leaks() + + +if __name__ == "__main__": + args = ["-s"] + sys.argv[1:] + sys.exit(pytest.main(args)) diff --git a/tests/topotests/zebra_fec_nexthop_resolution/__init__.py b/tests/topotests/zebra_fec_nexthop_resolution/__init__.py new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/tests/topotests/zebra_fec_nexthop_resolution/__init__.py diff --git a/tests/topotests/zebra_fec_nexthop_resolution/r1/bgpd.conf b/tests/topotests/zebra_fec_nexthop_resolution/r1/bgpd.conf new file mode 100644 index 0000000000..9d28957d99 --- /dev/null +++ b/tests/topotests/zebra_fec_nexthop_resolution/r1/bgpd.conf @@ -0,0 +1,24 @@ +! +router bgp 65500 + bgp router-id 192.0.2.1 + neighbor 192.0.2.3 remote-as 65500 + neighbor 192.0.2.3 update-source lo + neighbor 192.0.2.7 remote-as 65500 + neighbor 192.0.2.7 ttl-security hops 10 + neighbor 192.0.2.7 disable-connected-check + neighbor 192.0.2.7 update-source lo + ! + address-family ipv4 unicast + network 192.0.2.1/32 + no neighbor 192.0.2.3 activate + neighbor 192.0.2.7 activate + exit-address-family + ! + address-family ipv4 labeled-unicast + neighbor 192.0.2.3 activate + neighbor 192.0.2.3 route-reflector-client + neighbor 192.0.2.3 next-hop-self force + exit-address-family + ! +exit +! diff --git a/tests/topotests/zebra_fec_nexthop_resolution/r1/ospfd.conf.after b/tests/topotests/zebra_fec_nexthop_resolution/r1/ospfd.conf.after new file mode 100644 index 0000000000..3bb8cf8ac5 --- /dev/null +++ b/tests/topotests/zebra_fec_nexthop_resolution/r1/ospfd.conf.after @@ -0,0 +1,25 @@ +log stdout +! +interface lo + ip ospf passive +exit +! +interface r1-eth0 + ip ospf network point-to-point + ip ospf hello-interval 1 +exit +! +router ospf + ospf router-id 192.0.2.1 + network 192.0.2.1/32 area 0.0.0.0 + network 192.168.1.0/24 area 0.0.0.0 + passive-interface lo + capability opaque + mpls-te on + mpls-te router-address 192.0.2.1 + segment-routing on + segment-routing global-block 1000 10000 local-block 32000 32999 + segment-routing node-msd 8 + segment-routing prefix 192.0.2.1/32 index 11 +exit +! diff --git a/tests/topotests/zebra_fec_nexthop_resolution/r1/zebra.conf b/tests/topotests/zebra_fec_nexthop_resolution/r1/zebra.conf new file mode 100644 index 0000000000..1522e90398 --- /dev/null +++ b/tests/topotests/zebra_fec_nexthop_resolution/r1/zebra.conf @@ -0,0 +1,13 @@ +interface lo + ip address 192.0.2.1/32 + mpls enable +exit +! +interface r1-eth0 + ip address 192.168.1.1/24 + mpls enable + link-params + enable + exit-link-params +exit +! diff --git a/tests/topotests/zebra_fec_nexthop_resolution/r2/bgpd.conf b/tests/topotests/zebra_fec_nexthop_resolution/r2/bgpd.conf new file mode 100644 index 0000000000..46d2c9a01d --- /dev/null +++ b/tests/topotests/zebra_fec_nexthop_resolution/r2/bgpd.conf @@ -0,0 +1,23 @@ +router bgp 65500 + bgp router-id 192.0.2.2 + neighbor 192.0.2.1 remote-as 65500 + neighbor 192.0.2.1 update-source lo + neighbor 192.0.2.3 remote-as 65500 + neighbor 192.0.2.3 update-source lo + ! + address-family ipv4 unicast + network 192.0.2.2/32 + no neighbor 192.0.2.1 activate + no neighbor 192.0.2.3 activate + exit-address-family + ! + address-family ipv4 labeled-unicast + neighbor 192.0.2.1 activate + neighbor 192.0.2.1 route-reflector-client + neighbor 192.0.2.1 next-hop-self force + neighbor 192.0.2.3 activate + neighbor 192.0.2.3 route-reflector-client + neighbor 192.0.2.3 next-hop-self force + exit-address-family +exit +! diff --git a/tests/topotests/zebra_fec_nexthop_resolution/r2/isisd.conf b/tests/topotests/zebra_fec_nexthop_resolution/r2/isisd.conf new file mode 100644 index 0000000000..add181ddae --- /dev/null +++ b/tests/topotests/zebra_fec_nexthop_resolution/r2/isisd.conf @@ -0,0 +1,25 @@ +! +interface lo + ip router isis 1 + isis hello-interval 1 + isis hello-multiplier 3 +exit +! +interface r2-eth1 + ip router isis 2 + isis hello-interval 1 + isis hello-multiplier 3 +exit +! +router isis 1 + is-type level-1 + net 49.0000.0007.e901.2223.00 + lsp-timers gen-interval 1 refresh-interval 900 max-lifetime 1200 + mpls-te on + mpls-te router-address 192.0.2.2 + segment-routing on + segment-routing global-block 11000 20000 local-block 36000 36999 + segment-routing node-msd 8 + segment-routing prefix 192.0.2.2/32 index 22 no-php-flag +exit +! diff --git a/tests/topotests/zebra_fec_nexthop_resolution/r2/ospfd.conf.after b/tests/topotests/zebra_fec_nexthop_resolution/r2/ospfd.conf.after new file mode 100644 index 0000000000..8b02669862 --- /dev/null +++ b/tests/topotests/zebra_fec_nexthop_resolution/r2/ospfd.conf.after @@ -0,0 +1,32 @@ +log stdout +! +interface lo + ip ospf network point-to-point + ip ospf passive +exit +! +interface r2-eth0 + ip ospf network point-to-point + ip ospf hello-interval 1 +exit +! +interface r2-eth1 + ip ospf network point-to-point + ip ospf hello-interval 1 +exit +! +router ospf + ospf router-id 192.0.2.2 + network 192.0.2.2/32 area 0.0.0.0 + network 192.168.1.0/24 area 0.0.0.0 + network 192.168.2.0/24 area 0.0.0.0 + passive-interface lo + capability opaque + mpls-te on + mpls-te router-address 192.0.2.2 + segment-routing on + segment-routing global-block 1000 10000 local-block 36000 36999 + segment-routing node-msd 8 + segment-routing prefix 192.0.2.2/32 index 22 +exit +! diff --git a/tests/topotests/zebra_fec_nexthop_resolution/r2/zebra.conf b/tests/topotests/zebra_fec_nexthop_resolution/r2/zebra.conf new file mode 100644 index 0000000000..af0d1eb7fe --- /dev/null +++ b/tests/topotests/zebra_fec_nexthop_resolution/r2/zebra.conf @@ -0,0 +1,16 @@ +! +interface lo + ip address 192.0.2.2/32 + mpls enable +exit +! +interface r2-eth0 + ip address 192.168.1.2/24 + mpls enable +exit +! +interface r2-eth1 + ip address 192.168.2.2/24 + mpls enable +exit +! diff --git a/tests/topotests/zebra_fec_nexthop_resolution/r3/bgpd.conf b/tests/topotests/zebra_fec_nexthop_resolution/r3/bgpd.conf new file mode 100644 index 0000000000..060777e7fe --- /dev/null +++ b/tests/topotests/zebra_fec_nexthop_resolution/r3/bgpd.conf @@ -0,0 +1,23 @@ +router bgp 65500 + bgp router-id 192.0.2.3 + neighbor 192.0.2.1 remote-as 65500 + neighbor 192.0.2.1 update-source lo + neighbor 192.0.2.5 remote-as 65500 + neighbor 192.0.2.5 update-source lo + ! + address-family ipv4 unicast + network 192.0.2.3/32 + no neighbor 192.0.2.1 activate + no neighbor 192.0.2.5 activate + exit-address-family + ! + address-family ipv4 labeled-unicast + neighbor 192.0.2.1 activate + neighbor 192.0.2.1 route-reflector-client + neighbor 192.0.2.1 next-hop-self force + neighbor 192.0.2.5 activate + neighbor 192.0.2.5 route-reflector-client + neighbor 192.0.2.5 next-hop-self force + exit-address-family +exit +! diff --git a/tests/topotests/zebra_fec_nexthop_resolution/r3/isisd.conf b/tests/topotests/zebra_fec_nexthop_resolution/r3/isisd.conf new file mode 100644 index 0000000000..db6a503bb2 --- /dev/null +++ b/tests/topotests/zebra_fec_nexthop_resolution/r3/isisd.conf @@ -0,0 +1,25 @@ +! +interface lo + ip router isis 1 + isis hello-interval 1 + isis hello-multiplier 3 +exit +! +interface r3-eth1 + ip router isis 1 + isis hello-interval 1 + isis hello-multiplier 3 +exit +! +router isis 1 + is-type level-1 + net 49.0000.0007.e901.3333.00 + lsp-timers gen-interval 1 refresh-interval 900 max-lifetime 1200 + mpls-te on + mpls-te router-address 192.0.2.3 + segment-routing on + segment-routing global-block 11000 12000 local-block 36000 36999 + segment-routing node-msd 8 + segment-routing prefix 192.0.2.3/32 index 33 +exit +! diff --git a/tests/topotests/zebra_fec_nexthop_resolution/r3/ospfd.conf.after b/tests/topotests/zebra_fec_nexthop_resolution/r3/ospfd.conf.after new file mode 100644 index 0000000000..a3f5ae54f0 --- /dev/null +++ b/tests/topotests/zebra_fec_nexthop_resolution/r3/ospfd.conf.after @@ -0,0 +1,26 @@ +log stdout +! +interface lo + ip ospf network point-to-point + ip ospf passive +exit +! +interface r3-eth0 + ip ospf network point-to-point + ip ospf hello-interval 1 +exit +! +router ospf + ospf router-id 192.0.2.3 + network 192.0.2.3/32 area 0.0.0.0 + network 192.168.2.0/24 area 0.0.0.0 + passive-interface lo + capability opaque + mpls-te on + mpls-te router-address 192.0.2.3 + segment-routing on + segment-routing global-block 1000 10000 local-block 30000 30999 + segment-routing node-msd 8 + segment-routing prefix 192.0.2.3/32 index 33 +exit +! diff --git a/tests/topotests/zebra_fec_nexthop_resolution/r3/zebra.conf b/tests/topotests/zebra_fec_nexthop_resolution/r3/zebra.conf new file mode 100644 index 0000000000..b309e15afa --- /dev/null +++ b/tests/topotests/zebra_fec_nexthop_resolution/r3/zebra.conf @@ -0,0 +1,19 @@ +! +interface lo + ip address 192.0.2.3/32 + mpls enable +exit +! +interface r3-eth0 + ip address 192.168.2.3/24 + mpls enable + link-params + enable + exit-link-params +exit +! +interface r3-eth1 + ip address 192.168.3.3/24 + mpls enable +exit +! diff --git a/tests/topotests/zebra_fec_nexthop_resolution/r4/bgpd.conf b/tests/topotests/zebra_fec_nexthop_resolution/r4/bgpd.conf new file mode 100644 index 0000000000..dc052da863 --- /dev/null +++ b/tests/topotests/zebra_fec_nexthop_resolution/r4/bgpd.conf @@ -0,0 +1,24 @@ +! +router bgp 65500 + bgp router-id 192.0.2.4 + neighbor 192.0.2.1 remote-as 65500 + neighbor 192.0.2.1 ttl-security hops 10 + neighbor 192.0.2.1 disable-connected-check + neighbor 192.0.2.1 update-source lo + neighbor 192.0.2.3 remote-as 65500 + neighbor 192.0.2.3 update-source lo + ! + address-family ipv4 unicast + network 192.0.2.4/32 + neighbor 192.0.2.1 activate + no neighbor 192.0.2.3 activate + exit-address-family + ! + address-family ipv4 labeled-unicast + neighbor 192.0.2.3 activate + neighbor 192.0.2.3 route-reflector-client + neighbor 192.0.2.3 next-hop-self force + exit-address-family + ! +exit +! diff --git a/tests/topotests/zebra_fec_nexthop_resolution/r4/isisd.conf b/tests/topotests/zebra_fec_nexthop_resolution/r4/isisd.conf new file mode 100644 index 0000000000..7096ce081e --- /dev/null +++ b/tests/topotests/zebra_fec_nexthop_resolution/r4/isisd.conf @@ -0,0 +1,31 @@ +! +interface lo + ip router isis 1 + isis hello-interval 1 + isis hello-multiplier 3 +exit +! +interface r4-eth0 + ip router isis 1 + isis hello-interval 1 + isis hello-multiplier 3 +exit +! +interface r4-eth1 + ip router isis 1 + isis hello-interval 1 + isis hello-multiplier 3 +exit +! +router isis 1 + is-type level-1 + net 49.0000.0007.e901.4444.00 + lsp-timers gen-interval 1 refresh-interval 900 max-lifetime 1200 + mpls-te on + mpls-te router-address 192.0.2.4 + segment-routing on + segment-routing global-block 11000 12000 local-block 37000 37999 + segment-routing node-msd 8 + segment-routing prefix 192.0.2.4/32 index 44 +exit +! diff --git a/tests/topotests/zebra_fec_nexthop_resolution/r4/ospfd.conf b/tests/topotests/zebra_fec_nexthop_resolution/r4/ospfd.conf new file mode 100644 index 0000000000..c160049675 --- /dev/null +++ b/tests/topotests/zebra_fec_nexthop_resolution/r4/ospfd.conf @@ -0,0 +1,19 @@ +! +interface lo + ip ospf area 0 + ip ospf passive +exit +! +interface r4-eth0 + ip ospf area 0 +exit +! +router ospf + mpls-te on + mpls-te router-address 192.0.2.4 + segment-routing on + segment-routing global-block 21000 29000 local-block 31000 31999 + segment-routing node-msd 8 + segment-routing prefix 192.0.2.4/32 index 44 no-php-flag +exit +! diff --git a/tests/topotests/zebra_fec_nexthop_resolution/r4/zebra.conf b/tests/topotests/zebra_fec_nexthop_resolution/r4/zebra.conf new file mode 100644 index 0000000000..8591047906 --- /dev/null +++ b/tests/topotests/zebra_fec_nexthop_resolution/r4/zebra.conf @@ -0,0 +1,16 @@ +! +interface lo + ip address 192.0.2.4/32 + mpls enable +exit +! +interface r4-eth0 + ip address 192.168.3.4/24 + mpls enable +exit +! +interface r4-eth1 + ip address 192.168.4.4/24 + mpls enable +exit +! diff --git a/tests/topotests/zebra_fec_nexthop_resolution/r5/bgpd.conf b/tests/topotests/zebra_fec_nexthop_resolution/r5/bgpd.conf new file mode 100644 index 0000000000..1c73154e27 --- /dev/null +++ b/tests/topotests/zebra_fec_nexthop_resolution/r5/bgpd.conf @@ -0,0 +1,23 @@ +router bgp 65500 + bgp router-id 192.0.2.5 + neighbor 192.0.2.3 remote-as 65500 + neighbor 192.0.2.3 update-source lo + neighbor 192.0.2.7 remote-as 65500 + neighbor 192.0.2.7 update-source lo + ! + address-family ipv4 unicast + network 192.0.2.5/32 + no neighbor 192.0.2.3 activate + no neighbor 192.0.2.7 activate + exit-address-family + ! + address-family ipv4 labeled-unicast + neighbor 192.0.2.3 activate + neighbor 192.0.2.3 route-reflector-client + neighbor 192.0.2.3 next-hop-self force + neighbor 192.0.2.7 activate + neighbor 192.0.2.7 route-reflector-client + neighbor 192.0.2.7 next-hop-self force + exit-address-family +exit +! diff --git a/tests/topotests/zebra_fec_nexthop_resolution/r5/isisd.conf b/tests/topotests/zebra_fec_nexthop_resolution/r5/isisd.conf new file mode 100644 index 0000000000..959d5be29b --- /dev/null +++ b/tests/topotests/zebra_fec_nexthop_resolution/r5/isisd.conf @@ -0,0 +1,26 @@ +! +interface lo + ip router isis 1 + isis hello-interval 1 + isis hello-multiplier 3 + isis passive +exit +! +interface r5-eth0 + ip router isis 1 + isis hello-interval 1 + isis hello-multiplier 3 +exit +! +router isis 1 + is-type level-1 + net 49.0000.0007.e901.5555.00 + lsp-timers gen-interval 1 refresh-interval 900 max-lifetime 1200 + mpls-te on + mpls-te router-address 192.0.2.5 + segment-routing on + segment-routing global-block 11000 12000 local-block 33000 33999 + segment-routing node-msd 8 + segment-routing prefix 192.0.2.5/32 index 55 +exit +! diff --git a/tests/topotests/zebra_fec_nexthop_resolution/r5/ospfd.conf.after b/tests/topotests/zebra_fec_nexthop_resolution/r5/ospfd.conf.after new file mode 100644 index 0000000000..868129f890 --- /dev/null +++ b/tests/topotests/zebra_fec_nexthop_resolution/r5/ospfd.conf.after @@ -0,0 +1,26 @@ +log stdout +! +interface lo + ip ospf network point-to-point + ip ospf passive +exit +! +interface r5-eth1 + ip ospf network point-to-point + ip ospf hello-interval 1 +exit +! +router ospf + ospf router-id 192.0.2.5 + network 192.0.2.5/32 area 0.0.0.0 + network 192.168.5.0/24 area 0.0.0.0 + passive-interface lo + capability opaque + mpls-te on + mpls-te router-address 192.0.2.5 + segment-routing on + segment-routing global-block 21000 22000 local-block 35000 35999 + segment-routing node-msd 8 + segment-routing prefix 192.0.2.5/32 index 55 +exit +! diff --git a/tests/topotests/zebra_fec_nexthop_resolution/r5/zebra.conf b/tests/topotests/zebra_fec_nexthop_resolution/r5/zebra.conf new file mode 100644 index 0000000000..dd519e8d12 --- /dev/null +++ b/tests/topotests/zebra_fec_nexthop_resolution/r5/zebra.conf @@ -0,0 +1,19 @@ +! +interface lo + ip address 192.0.2.5/32 + mpls enable +exit +! +interface r5-eth0 + ip address 192.168.4.5/24 + mpls enable +exit +! +interface r5-eth1 + ip address 192.168.5.5/24 + mpls enable + link-params + enable + exit-link-params +exit +! diff --git a/tests/topotests/zebra_fec_nexthop_resolution/r6/ospfd.conf.after b/tests/topotests/zebra_fec_nexthop_resolution/r6/ospfd.conf.after new file mode 100644 index 0000000000..60c4928f77 --- /dev/null +++ b/tests/topotests/zebra_fec_nexthop_resolution/r6/ospfd.conf.after @@ -0,0 +1,32 @@ +log stdout +! +interface lo + ip ospf network point-to-point + ip ospf passive +exit +! +interface r6-eth0 + ip ospf network point-to-point + ip ospf hello-interval 1 +exit +! +interface r6-eth1 + ip ospf network point-to-point + ip ospf hello-interval 1 +exit +! +router ospf + ospf router-id 192.0.2.6 + segment-routing on + segment-routing global-block 21000 22000 local-block 38000 38999 + network 192.0.2.6/32 area 0.0.0.0 + network 192.168.5.0/24 area 0.0.0.0 + network 192.168.6.0/24 area 0.0.0.0 + passive-interface lo + capability opaque + mpls-te on + mpls-te router-address 192.0.2.6 + segment-routing node-msd 8 + segment-routing prefix 192.0.2.6/32 index 66 +exit +! diff --git a/tests/topotests/zebra_fec_nexthop_resolution/r6/zebra.conf b/tests/topotests/zebra_fec_nexthop_resolution/r6/zebra.conf new file mode 100644 index 0000000000..5e16e3e434 --- /dev/null +++ b/tests/topotests/zebra_fec_nexthop_resolution/r6/zebra.conf @@ -0,0 +1,22 @@ +! +interface lo + ip address 192.0.2.6/32 + mpls enable +exit +! +interface r6-eth0 + ip address 192.168.5.6/24 + mpls enable + link-params + enable + exit-link-params +exit +! +interface r6-eth1 + ip address 192.168.6.6/24 + mpls enable + link-params + enable + exit-link-params +exit +! diff --git a/tests/topotests/zebra_fec_nexthop_resolution/r7/bgpd.conf b/tests/topotests/zebra_fec_nexthop_resolution/r7/bgpd.conf new file mode 100644 index 0000000000..eeda9d9cfa --- /dev/null +++ b/tests/topotests/zebra_fec_nexthop_resolution/r7/bgpd.conf @@ -0,0 +1,24 @@ +! +router bgp 65500 + bgp router-id 192.0.2.7 + neighbor 192.0.2.1 remote-as 65500 + neighbor 192.0.2.1 ttl-security hops 10 + neighbor 192.0.2.1 disable-connected-check + neighbor 192.0.2.1 update-source lo + neighbor 192.0.2.5 remote-as 65500 + neighbor 192.0.2.5 update-source lo + ! + address-family ipv4 unicast + network 192.0.2.7/32 + neighbor 192.0.2.1 activate + no neighbor 192.0.2.5 activate + exit-address-family + ! + address-family ipv4 labeled-unicast + neighbor 192.0.2.5 activate + neighbor 192.0.2.5 route-reflector-client + neighbor 192.0.2.5 next-hop-self force + exit-address-family + ! +exit +! diff --git a/tests/topotests/zebra_fec_nexthop_resolution/r7/ospfd.conf.after b/tests/topotests/zebra_fec_nexthop_resolution/r7/ospfd.conf.after new file mode 100644 index 0000000000..f8e56e1217 --- /dev/null +++ b/tests/topotests/zebra_fec_nexthop_resolution/r7/ospfd.conf.after @@ -0,0 +1,26 @@ +log stdout +! +interface lo + ip ospf network point-to-point + ip ospf passive +exit +! +interface r7-eth0 + ip ospf network point-to-point + ip ospf hello-interval 1 +exit +! +router ospf + ospf router-id 192.0.2.7 + network 192.0.2.7/32 area 0.0.0.0 + network 192.168.6.0/24 area 0.0.0.0 + passive-interface lo + capability opaque + mpls-te on + mpls-te router-address 192.0.2.7 + segment-routing on + segment-routing global-block 21000 22000 local-block 31000 31999 + segment-routing node-msd 8 + segment-routing prefix 192.0.2.7/32 index 77 +exit +! diff --git a/tests/topotests/zebra_fec_nexthop_resolution/r7/zebra.conf b/tests/topotests/zebra_fec_nexthop_resolution/r7/zebra.conf new file mode 100644 index 0000000000..f520225476 --- /dev/null +++ b/tests/topotests/zebra_fec_nexthop_resolution/r7/zebra.conf @@ -0,0 +1,14 @@ +! +interface lo + ip address 192.0.2.7/32 + mpls enable +exit +! +interface r7-eth0 + ip address 192.168.6.7/24 + mpls enable + link-params + enable + exit-link-params +exit +! diff --git a/tests/topotests/zebra_fec_nexthop_resolution/test_zebra_fec_nexthop_resolution.py b/tests/topotests/zebra_fec_nexthop_resolution/test_zebra_fec_nexthop_resolution.py new file mode 100644 index 0000000000..984ff3c185 --- /dev/null +++ b/tests/topotests/zebra_fec_nexthop_resolution/test_zebra_fec_nexthop_resolution.py @@ -0,0 +1,259 @@ +#!/usr/bin/env python + +# +# Copyright 2022 6WIND S.A. +# +# Permission to use, copy, modify, and/or distribute this software +# for any purpose with or without fee is hereby granted, provided +# that the above copyright notice and this permission notice appear +# in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND NETDEF DISCLAIMS ALL WARRANTIES +# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NETDEF BE LIABLE FOR +# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY +# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS +# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE +# OF THIS SOFTWARE. +# + +""" +Check if fec nexthop resolution works correctly. +""" + +import os +import sys +import json +import pytest +import functools + +CWD = os.path.dirname(os.path.realpath(__file__)) +sys.path.append(os.path.join(CWD, "../")) + +# pylint: disable=C0413 +from lib import topotest +from lib.topogen import Topogen, TopoRouter, get_topogen +from lib.common_config import step + +pytestmark = [pytest.mark.bgpd] + + +def build_topo(tgen): + """ + r1 ---- r2 ---- r3 ---- r4 ----- r5 ---- r6 ---- r7 + <--- ospf ----> <---- isis -----> <--- ospf ----> + """ + for routern in range(1, 8): + tgen.add_router("r{}".format(routern)) + + switch1 = tgen.add_switch("s1") + switch1.add_link(tgen.gears["r1"]) + switch1.add_link(tgen.gears["r2"]) + + switch2 = tgen.add_switch("s2") + switch2.add_link(tgen.gears["r2"]) + switch2.add_link(tgen.gears["r3"]) + + switch3 = tgen.add_switch("s3") + switch3.add_link(tgen.gears["r3"]) + switch3.add_link(tgen.gears["r4"]) + + switch4 = tgen.add_switch("s4") + switch4.add_link(tgen.gears["r4"]) + switch4.add_link(tgen.gears["r5"]) + + switch5 = tgen.add_switch("s5") + switch5.add_link(tgen.gears["r5"]) + switch5.add_link(tgen.gears["r6"]) + + switch6 = tgen.add_switch("s6") + switch6.add_link(tgen.gears["r6"]) + switch6.add_link(tgen.gears["r7"]) + + +def setup_module(mod): + tgen = Topogen(build_topo, mod.__name__) + tgen.start_topology() + + router_list = tgen.routers() + + def _enable_mpls_misc(router): + router.run("modprobe mpls_router") + router.run("echo 100000 > /proc/sys/net/mpls/platform_labels") + router.run("echo 1 > /proc/sys/net/mpls/conf/lo/input") + + router = tgen.gears["r1"] + _enable_mpls_misc(router) + + router = tgen.gears["r2"] + _enable_mpls_misc(router) + + router = tgen.gears["r3"] + _enable_mpls_misc(router) + + router = tgen.gears["r4"] + _enable_mpls_misc(router) + + router = tgen.gears["r5"] + _enable_mpls_misc(router) + + router = tgen.gears["r6"] + _enable_mpls_misc(router) + + router = tgen.gears["r7"] + _enable_mpls_misc(router) + + for i, (rname, router) in enumerate(router_list.items(), 1): + router.load_config( + TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname)) + ) + if rname in ("r1", "r3", "r5", "r7"): + router.load_config( + TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format(rname)) + ) + if rname in ("r3", "r4", "r5"): + router.load_config( + TopoRouter.RD_ISIS, os.path.join(CWD, "{}/isisd.conf".format(rname)) + ) + if rname in ("r1", "r2", "r3", "r5", "r6", "r7"): + router.load_config( + TopoRouter.RD_OSPF, os.path.join(CWD, "{}/ospfd.conf".format(rname)) + ) + + tgen.start_router() + + +def teardown_module(mod): + tgen = get_topogen() + tgen.stop_topology() + + +# There are some startup issued when initialising OSPF +# To avoid those issues, load the ospf configuration after zebra started +def test_zebra_fec_nexthop_resolution_finalise_ospf_config(): + tgen = get_topogen() + + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + topotest.sleep(2) + + tgen.net["r1"].cmd("vtysh -f {}/r1/ospfd.conf.after".format(CWD)) + tgen.net["r2"].cmd("vtysh -f {}/r2/ospfd.conf.after".format(CWD)) + tgen.net["r3"].cmd("vtysh -f {}/r3/ospfd.conf.after".format(CWD)) + tgen.net["r5"].cmd("vtysh -f {}/r5/ospfd.conf.after".format(CWD)) + tgen.net["r6"].cmd("vtysh -f {}/r6/ospfd.conf.after".format(CWD)) + tgen.net["r7"].cmd("vtysh -f {}/r7/ospfd.conf.after".format(CWD)) + + +def test_zebra_fec_nexthop_resolution_bgp(): + tgen = get_topogen() + + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + def _check_bgp_session(): + r1 = tgen.gears["r1"] + + tgen.gears["r3"].vtysh_cmd("config \n no mpls fec nexthop-resolution \n end") + tgen.gears["r3"].vtysh_cmd("config \n mpls fec nexthop-resolution \n end") + tgen.gears["r5"].vtysh_cmd("config \n no mpls fec nexthop-resolution \n end") + tgen.gears["r5"].vtysh_cmd("config \n mpls fec nexthop-resolution \n end") + output = json.loads(r1.vtysh_cmd("show bgp summary json")) + + if output["ipv4Unicast"]["peers"]["192.0.2.7"]["state"] == "Established": + return None + return False + + test_func1 = functools.partial(_check_bgp_session) + _, result1 = topotest.run_and_expect(test_func1, None, count=60, wait=0.5) + assert result1 is None, "Failed to verify the fec_nexthop_resolution: bgp session" + + +def test_zebra_fec_nexthop_resolution_ping(): + tgen = get_topogen() + + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + def _check_ping_launch(): + r1 = tgen.gears["r1"] + + ping_launch = "ping 192.0.2.7 -I 192.0.2.1 -c 1" + selected_lines = r1.run(ping_launch).splitlines()[-2:-1] + rtx_stats = "".join(selected_lines[0].split(",")[0:3]) + current = topotest.normalize_text(rtx_stats) + + expected_stats = "1 packets transmitted 1 received 0% packet loss" + expected = topotest.normalize_text(expected_stats) + + if current == expected: + return None + + return False + + test_func2 = functools.partial(_check_ping_launch) + _, result2 = topotest.run_and_expect(test_func2, None, count=60, wait=1) + assert result2 is None, "Failed to verify the fec_nexthop_resolution: ping" + + +def test_zebra_fec_nexthop_resolution_table(): + tgen = get_topogen() + + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + def _zebra_check_mpls_table(): + r3 = tgen.gears["r3"] + inLabel = 0 + outLabels = 0 + + """ + Retrieve inLabel from MPLS FEC table + """ + mpls_fec = r3.vtysh_cmd("show mpls fec 192.0.2.7/32") + lines = mpls_fec.split("\n") + for line in lines: + if "Label" in line: + inLabel = line.split(": ", 1)[1] + + """ + Retrieve outLabel from BGP + """ + output = json.loads(r3.vtysh_cmd("show ip route 192.0.2.7/32 json")) + + outLabels = output["192.0.2.7/32"][0]["nexthops"][1]["labels"] + + if (inLabel == 0) or (outLabels == 0): + return True + + """ + Compare expected data with real data + """ + output = json.loads(r3.vtysh_cmd("show mpls table " + str(inLabel) + " json")) + + expected = { + "inLabel": int(inLabel), + "installed": True, + "nexthops": [ + { + "type": "BGP", + "outLabel": outLabels[0], + "outLabelStack": outLabels, + "distance": 20, + "installed": True, + "nexthop": "192.168.3.4", + } + ], + } + return topotest.json_cmp(output, expected) + + test_func3 = functools.partial(_zebra_check_mpls_table) + _, result3 = topotest.run_and_expect(test_func3, None, count=60, wait=0.5) + assert result3 is None, "Failed to verify the fec_nexthop_resolution: mpls table" + + +if __name__ == "__main__": + args = ["-s"] + sys.argv[1:] + sys.exit(pytest.main(args)) diff --git a/tests/topotests/zebra_seg6local_route/r1/routes.json b/tests/topotests/zebra_seg6local_route/r1/routes.json index e391922566..7dcc6450fa 100644 --- a/tests/topotests/zebra_seg6local_route/r1/routes.json +++ b/tests/topotests/zebra_seg6local_route/r1/routes.json @@ -119,5 +119,78 @@ }] }], "required_kernel": "5.14" + }, + { + "in": { + "dest": "6::1", + "context": "End_DX6 2001::1" + }, + "out":[{ + "prefix":"6::1/128", + "protocol":"sharp", + "selected":true, + "destSelected":true, + "distance":150, + "metric":0, + "installed":true, + "table":254, + "nexthops":[{ + "flags":3, + "fib":true, + "active":true, + "directlyConnected":true, + "interfaceName": "dum0", + "seg6local": { "action": "End.DX6" } + }] + }] + }, + { + "in": { + "dest": "7::1", + "context": "End_DT4 10" + }, + "out":[{ + "prefix":"7::1/128", + "protocol":"sharp", + "selected":true, + "destSelected":true, + "distance":150, + "metric":0, + "installed":true, + "table":254, + "nexthops":[{ + "flags":3, + "fib":true, + "active":true, + "directlyConnected":true, + "interfaceName": "dum0", + "seg6local": { "action": "End.DT4" } + }] + }], + "required_kernel": "5.11" + }, + { + "in": { + "dest": "8::1", + "context": "End_DT6 10" + }, + "out":[{ + "prefix":"8::1/128", + "protocol":"sharp", + "selected":true, + "destSelected":true, + "distance":150, + "metric":0, + "installed":true, + "table":254, + "nexthops":[{ + "flags":3, + "fib":true, + "active":true, + "directlyConnected":true, + "interfaceName": "dum0", + "seg6local": { "action": "End.DT6" } + }] + }] } ] diff --git a/tests/topotests/zebra_seg6local_route/test_zebra_seg6local_route.py b/tests/topotests/zebra_seg6local_route/test_zebra_seg6local_route.py index a90f5c9c98..59c681df48 100755 --- a/tests/topotests/zebra_seg6local_route/test_zebra_seg6local_route.py +++ b/tests/topotests/zebra_seg6local_route/test_zebra_seg6local_route.py @@ -42,7 +42,7 @@ def setup_module(mod): tgen = Topogen({None: "r1"}, mod.__name__) tgen.start_topology() router_list = tgen.routers() - for rname, router in tgen.routers().items(): + for rname, router in router_list.items(): router.run( "/bin/bash {}".format(os.path.join(CWD, "{}/setup.sh".format(rname))) ) diff --git a/tools/frr-reload.py b/tools/frr-reload.py index a88f6b616d..9dae348b8e 100755 --- a/tools/frr-reload.py +++ b/tools/frr-reload.py @@ -203,7 +203,7 @@ def get_normalized_es_id(line): """ sub_strs = ["evpn mh es-id", "evpn mh es-sys-mac"] for sub_str in sub_strs: - obj = re.match(sub_str + " (?P<esi>\S*)", line) + obj = re.match(sub_str + r" (?P<esi>\S*)", line) if obj: line = "%s %s" % (sub_str, obj.group("esi").lower()) break @@ -871,7 +871,7 @@ def bgp_delete_nbr_remote_as_line(lines_to_add): if ctx_keys[0] not in pg_dict: pg_dict[ctx_keys[0]] = dict() # find 'neighbor <pg_name> peer-group' - re_pg = re.match("neighbor (\S+) peer-group$", line) + re_pg = re.match(r"neighbor (\S+) peer-group$", line) if re_pg and re_pg.group(1) not in pg_dict[ctx_keys[0]]: pg_dict[ctx_keys[0]][re_pg.group(1)] = { "nbr": list(), @@ -1066,7 +1066,7 @@ def bgp_delete_move_lines(lines_to_add, lines_to_del): if ctx_keys[0] not in del_dict: del_dict[ctx_keys[0]] = dict() # find 'no neighbor <pg_name> peer-group' - re_pg = re.match("neighbor (\S+) peer-group$", line) + re_pg = re.match(r"neighbor (\S+) peer-group$", line) if re_pg and re_pg.group(1) not in del_dict[ctx_keys[0]]: del_dict[ctx_keys[0]][re_pg.group(1)] = list() found_pg_del_cmd = True diff --git a/vrrpd/vrrp_vty.c b/vrrpd/vrrp_vty.c index 985354af37..59794d9297 100644 --- a/vrrpd/vrrp_vty.c +++ b/vrrpd/vrrp_vty.c @@ -590,7 +590,7 @@ static void vrrp_show(struct vty *vty, struct vrrp_vrouter *vr) char *table = ttable_dump(tt, "\n"); vty_out(vty, "\n%s\n", table); - XFREE(MTYPE_TMP, table); + XFREE(MTYPE_TMP_TTABLE, table); ttable_del(tt); } @@ -695,7 +695,7 @@ DEFPY_YANG(vrrp_vrid_show_summary, char *table = ttable_dump(tt, "\n"); vty_out(vty, "\n%s\n", table); - XFREE(MTYPE_TMP, table); + XFREE(MTYPE_TMP_TTABLE, table); ttable_del(tt); list_delete(&ll); diff --git a/yang/frr-bgp-common.yang b/yang/frr-bgp-common.yang index 2b1babdd28..9d21597304 100644 --- a/yang/frr-bgp-common.yang +++ b/yang/frr-bgp-common.yang @@ -320,6 +320,19 @@ submodule frr-bgp-common { When set to 'false' BGP instance type is regular."; } + leaf as-notation { + type enumeration { + enum "plain" { value 0; } + enum "dot" { value 1; } + enum "dot+" { value 2; } + } + description + "The as-notation type: + - plain: use plain format for all AS values + - dot: use 'AA.BB' format for AS 4 byte values. + - dot+: use 'AA.BB' format for all AS values."; + } + leaf ebgp-multihop-connected-route-check { type boolean; default "false"; diff --git a/yang/frr-bgp-route-map.yang b/yang/frr-bgp-route-map.yang index abfb14c23c..44058ab04e 100644 --- a/yang/frr-bgp-route-map.yang +++ b/yang/frr-bgp-route-map.yang @@ -847,7 +847,7 @@ identity set-extcommunity-color { } case extcommunity-nt { - when "derived-from-or-self(/frr-route-map:lib/frr-route-map:route-map/frr-route-map:entry/frr-route-map:set-action/frr-route-map:action, 'frr-bgp-route-map:set-extcommunity-nt')"; + when "derived-from-or-self(../frr-route-map:action, 'frr-bgp-route-map:set-extcommunity-nt')"; description "Value of the ext-community"; leaf extcommunity-nt { @@ -1008,7 +1008,7 @@ identity set-extcommunity-color { } case aigp-metric { - when "derived-from-or-self(/frr-route-map:lib/frr-route-map:route-map/frr-route-map:entry/frr-route-map:set-action/frr-route-map:action, 'frr-bgp-route-map:aigp-metric')"; + when "derived-from-or-self(../frr-route-map:action, 'frr-bgp-route-map:aigp-metric')"; leaf aigp-metric { type string; description @@ -1127,16 +1127,14 @@ identity set-extcommunity-color { case comm-list-name { when "derived-from-or-self(../frr-route-map:action, 'frr-bgp-route-map:comm-list-delete') or " + "derived-from-or-self(../frr-route-map:action, 'frr-bgp-route-map:large-comm-list-delete') or " - + "derived-from-or-self(../frr-route-map:action, -'frr-bgp-route-map:extended-comm-list-delete')"; + + "derived-from-or-self(../frr-route-map:action, 'frr-bgp-route-map:extended-comm-list-delete')"; leaf comm-list-name { type bgp-filter:bgp-list-name; } } case evpn-gateway-ip-ipv4 { when - "derived-from-or-self(/frr-route-map:lib/frr-route-map:route-map/frr-route-map:entry/frr-route-map:set-action/frr-route-map:action, - 'frr-bgp-route-map:set-evpn-gateway-ip-ipv4')"; + "derived-from-or-self(../frr-route-map:action, 'frr-bgp-route-map:set-evpn-gateway-ip-ipv4')"; description "Set EVPN gateway IP overlay index IPv4"; leaf evpn-gateway-ip-ipv4 { @@ -1145,8 +1143,7 @@ identity set-extcommunity-color { } case evpn-gateway-ip-ipv6 { when - "derived-from-or-self(/frr-route-map:lib/frr-route-map:route-map/frr-route-map:entry/frr-route-map:set-action/frr-route-map:action, - 'frr-bgp-route-map:set-evpn-gateway-ip-ipv6')"; + "derived-from-or-self(../frr-route-map:action, 'frr-bgp-route-map:set-evpn-gateway-ip-ipv6')"; description "Set EVPN gateway IP overlay index IPv6"; leaf evpn-gateway-ip-ipv6 { @@ -1155,8 +1152,7 @@ identity set-extcommunity-color { } case l3vpn-nexthop-encapsulation { when - "derived-from-or-self(../frr-route-map:action, - 'frr-bgp-route-map:set-l3vpn-nexthop-encapsulation')"; + "derived-from-or-self(../frr-route-map:action, 'frr-bgp-route-map:set-l3vpn-nexthop-encapsulation')"; description "Accept L3VPN traffic over other than LSP encapsulation"; leaf l3vpn-nexthop-encapsulation { diff --git a/yang/frr-pim-candidate.yang b/yang/frr-pim-candidate.yang new file mode 100644 index 0000000000..09d0a06353 --- /dev/null +++ b/yang/frr-pim-candidate.yang @@ -0,0 +1,174 @@ +module frr-pim-candidate { + yang-version "1.1"; + namespace "http://frrouting.org/yang/pim-candidate"; + + prefix frr-pim-candidate; + + import frr-interface { + prefix frr-interface; + } + + import ietf-inet-types { + prefix "inet"; + } + + import frr-routing { + prefix "frr-rt"; + } + + import frr-pim { + prefix "frr-pim"; + } + + import frr-route-types { + prefix frr-route-types; + } + + organization + "FRRouting"; + + contact + "FRR Users List: <mailto:frog@lists.frrouting.org> + FRR Development List: <mailto:dev@lists.frrouting.org>"; + + description + "The module defines a collection of YANG definitions common for + all PIM (Protocol Independent Multicast) Candidate RP & BSR + (Rendezvous Point & Bootstrap Router) operation. + + Copyright 2020 FRRouting + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."; + + revision 2021-05-04 { + description + "Initial revision."; + reference + "TBD"; + } + + /* + * Groupings + */ + grouping candidate-bsr-container { + description + "Grouping of Candidate BSR settings."; + + container candidate-bsr { + presence + "Enable router to be a Candidate BSR."; + + description + "Candidate BSR settings"; + + leaf bsr-priority { + type uint8; + default "64"; + description + "BSR priority for this router, higher values win."; + } + + choice source-address-or-interface { + description "IP address to use for BSR operation"; + default if-loopback; + leaf address { + type inet:ip-address; + } + leaf interface { + type frr-interface:interface-ref; + } + leaf if-loopback { + type empty; + } + leaf if-any { + type empty; + } + } + } // candidate-bsr + } // candidate-bsr-container + + grouping candidate-rp-container { + description + "Grouping of Candidate RP settings."; + + container candidate-rp { + presence + "Enable router to be a Candidate RP."; + + description + "Candidate RP settings"; + + leaf rp-priority { + type uint8; + default "192"; + description + "RP priority for this router, lower values win."; + } + + leaf advertisement-interval { + type uint32 { + range 1..4294967295; + } + default "60"; + description + "RP advertisement interval (seconds). Holdtime is 2.5 times this."; + } + + leaf-list group-list { + type frr-route-types:ip-multicast-group-prefix; + description + "List of multicast group address."; + } + + choice source-address-or-interface { + description "IP address to use for RP operation"; + default if-loopback; + leaf address { + type inet:ip-address; + } + leaf interface { + type frr-interface:interface-ref; + } + leaf if-loopback { + type empty; + } + leaf if-any { + type empty; + } + } + } + } + + /* + * Configuration data nodes + */ + augment "/frr-rt:routing/frr-rt:control-plane-protocols/" + + "frr-rt:control-plane-protocol/frr-pim:pim/" + + "frr-pim:address-family" { + description "PIM Candidate RP augmentation."; + + uses candidate-bsr-container; + uses candidate-rp-container; + } +} diff --git a/yang/frr-zebra.yang b/yang/frr-zebra.yang index 1c7d1c8ef4..f97a4cc129 100644 --- a/yang/frr-zebra.yang +++ b/yang/frr-zebra.yang @@ -2856,6 +2856,16 @@ module frr-zebra { } } + container mpls { + description + "MPLS Configuration."; + leaf fec-nexthop-resolution { + type boolean; + description + "Authorise nexthop resolution over all labeled routes."; + } + } + uses ribs; uses vrf-vni-mapping; diff --git a/yang/subdir.am b/yang/subdir.am index 71aa040878..786bd0bca6 100644 --- a/yang/subdir.am +++ b/yang/subdir.am @@ -80,6 +80,7 @@ if PIMD dist_yangmodels_DATA += yang/frr-gmp.yang dist_yangmodels_DATA += yang/frr-pim.yang dist_yangmodels_DATA += yang/frr-pim-rp.yang +dist_yangmodels_DATA += yang/frr-pim-candidate.yang endif if BGPD diff --git a/zebra/dplane_fpm_nl.c b/zebra/dplane_fpm_nl.c index 09080aa616..1d2f9e695f 100644 --- a/zebra/dplane_fpm_nl.c +++ b/zebra/dplane_fpm_nl.c @@ -381,7 +381,7 @@ DEFPY(fpm_show_status, out = ttable_dump(table, "\n"); vty_out(vty, "%s\n", out); - XFREE(MTYPE_TMP, out); + XFREE(MTYPE_TMP_TTABLE, out); ttable_del(table); } @@ -1678,6 +1678,25 @@ static int fpm_nl_process(struct zebra_dplane_provider *prov) fnc = dplane_provider_get_data(prov); limit = dplane_provider_get_work_limit(prov); + + frr_with_mutex (&fnc->ctxqueue_mutex) { + cur_queue = dplane_ctx_queue_count(&fnc->ctxqueue); + } + + if (cur_queue >= (uint64_t)limit) { + if (IS_ZEBRA_DEBUG_FPM) + zlog_debug("%s: Already at a limit(%" PRIu64 + ") of internal work, hold off", + __func__, cur_queue); + limit = 0; + } else if (cur_queue != 0) { + if (IS_ZEBRA_DEBUG_FPM) + zlog_debug("%s: current queue is %" PRIu64 + ", limiting to lesser amount of %" PRIu64, + __func__, cur_queue, limit - cur_queue); + limit -= cur_queue; + } + for (counter = 0; counter < limit; counter++) { ctx = dplane_provider_dequeue_in_ctx(prov); if (ctx == NULL) diff --git a/zebra/if_netlink.c b/zebra/if_netlink.c index 3233519873..5fb908eb0d 100644 --- a/zebra/if_netlink.c +++ b/zebra/if_netlink.c @@ -1799,14 +1799,18 @@ int netlink_tunneldump_read(struct zebra_ns *zns) ret = netlink_request_tunneldump(zns, PF_BRIDGE, tmp_if->ifindex); - if (ret < 0) + if (ret < 0) { + route_unlock_node(rn); return ret; + } ret = netlink_parse_info(netlink_link_change, netlink_cmd, &dp_info, 0, true); - if (ret < 0) + if (ret < 0) { + route_unlock_node(rn); return ret; + } } return 0; diff --git a/zebra/kernel_netlink.c b/zebra/kernel_netlink.c index d2f1db67ee..84aabc4254 100644 --- a/zebra/kernel_netlink.c +++ b/zebra/kernel_netlink.c @@ -672,21 +672,6 @@ void netlink_parse_rtattr_nested(struct rtattr **tb, int max, netlink_parse_rtattr(tb, max, RTA_DATA(rta), RTA_PAYLOAD(rta)); } -bool nl_addraw_l(struct nlmsghdr *n, unsigned int maxlen, const void *data, - unsigned int len) -{ - if (NLMSG_ALIGN(n->nlmsg_len) + NLMSG_ALIGN(len) > maxlen) { - zlog_err("ERROR message exceeded bound of %d", maxlen); - return false; - } - - memcpy(NLMSG_TAIL(n), data, len); - memset((uint8_t *)NLMSG_TAIL(n) + len, 0, NLMSG_ALIGN(len) - len); - n->nlmsg_len = NLMSG_ALIGN(n->nlmsg_len) + NLMSG_ALIGN(len); - - return true; -} - bool nl_attr_put(struct nlmsghdr *n, unsigned int maxlen, int type, const void *data, unsigned int alen) { diff --git a/zebra/kernel_netlink.h b/zebra/kernel_netlink.h index e37bba0cf6..9db4e5789a 100644 --- a/zebra/kernel_netlink.h +++ b/zebra/kernel_netlink.h @@ -77,14 +77,6 @@ extern void netlink_parse_rtattr_flags(struct rtattr **tb, int max, unsigned short flags); extern void netlink_parse_rtattr_nested(struct rtattr **tb, int max, struct rtattr *rta); -/* - * nl_addraw_l copies raw form the netlink message buffer into netlink - * message header pointer. It ensures the aligned data buffer does not - * override past max length. - * return value is 0 if its successful - */ -extern bool nl_addraw_l(struct nlmsghdr *n, unsigned int maxlen, - const void *data, unsigned int len); extern const char *nl_msg_type_to_str(uint16_t msg_type); extern const char *nl_rtproto_to_str(uint8_t rtproto); extern const char *nl_family_to_str(uint8_t family); diff --git a/zebra/redistribute.c b/zebra/redistribute.c index 11c1330398..2de0917a7e 100644 --- a/zebra/redistribute.c +++ b/zebra/redistribute.c @@ -290,6 +290,7 @@ void redistribute_delete(const struct route_node *rn, if (IS_ZEBRA_DEBUG_RIB) { uint8_t old_inst, new_inst; uint32_t table = 0; + struct vrf *vrf = vrf_lookup_by_id(vrfid); old_inst = new_inst = 0; @@ -302,8 +303,8 @@ void redistribute_delete(const struct route_node *rn, table = new_re->table; } - zlog_debug("(%u:%u):%pRN: Redist del: re %p (%u:%s), new re %p (%u:%s)", - vrfid, table, rn, old_re, old_inst, + zlog_debug("(%s:%u):%pRN: Redist del: re %p (%u:%s), new re %p (%u:%s)", + VRF_LOGNAME(vrf), table, rn, old_re, old_inst, old_re ? zebra_route_string(old_re->type) : "None", new_re, new_inst, new_re ? zebra_route_string(new_re->type) : "None"); diff --git a/zebra/rib.h b/zebra/rib.h index cd6efbfb36..4293b5f240 100644 --- a/zebra/rib.h +++ b/zebra/rib.h @@ -108,8 +108,8 @@ struct route_entry { uint32_t nexthop_mtu; /* Flags of this route. - * This flag's definition is in lib/zebra.h ZEBRA_FLAG_* and is exposed - * to clients via Zserv + * This flag's definition is in lib/zclient.h ZEBRA_FLAG_* and is + * exposed to clients via Zserv */ uint32_t flags; @@ -631,6 +631,7 @@ extern int rib_add_gr_run(afi_t afi, vrf_id_t vrf_id, uint8_t proto, uint8_t instance, time_t restart_time); extern void zebra_vty_init(void); +extern uint32_t zebra_rib_dplane_results_count(void); extern pid_t pid; diff --git a/zebra/rt_netlink.c b/zebra/rt_netlink.c index ddcb83cd8c..75e4396e92 100644 --- a/zebra/rt_netlink.c +++ b/zebra/rt_netlink.c @@ -1688,7 +1688,7 @@ static bool _netlink_route_build_singlepath(const struct prefix *p, return false; if (!nl_attr_put(nlmsg, req_size, SEG6_LOCAL_NH6, &ctx->nh6, - sizeof(struct in_addr))) + sizeof(struct in6_addr))) return false; break; case ZEBRA_SEG6_LOCAL_ACTION_END_DT6: @@ -2979,7 +2979,7 @@ ssize_t netlink_nexthop_msg_encode(uint16_t cmd, if (!nl_attr_put(&req->n, buflen, SEG6_LOCAL_NH6, &ctx->nh6, - sizeof(struct in_addr))) + sizeof(struct in6_addr))) return 0; break; case SEG6_LOCAL_ACTION_END_DT6: diff --git a/zebra/zapi_msg.c b/zebra/zapi_msg.c index aecbba2ebc..c45c61a208 100644 --- a/zebra/zapi_msg.c +++ b/zebra/zapi_msg.c @@ -735,11 +735,13 @@ static int route_notify_internal(const struct route_node *rn, int type, client = zserv_find_client(type, instance); if (!client || !client->notify_owner) { - if (IS_ZEBRA_DEBUG_PACKET) - zlog_debug( - "Not Notifying Owner: %s about prefix %pRN(%u) %d vrf: %u", - zebra_route_string(type), rn, table_id, note, - vrf_id); + if (IS_ZEBRA_DEBUG_PACKET) { + struct vrf *vrf = vrf_lookup_by_id(vrf_id); + + zlog_debug("Not Notifying Owner: %s about prefix %pRN(%u) %d vrf: %s", + zebra_route_string(type), rn, table_id, note, + VRF_LOGNAME(vrf)); + } return 0; } @@ -2129,8 +2131,8 @@ static void zread_route_add(ZAPI_HANDLER_ARGS) vrf_id = zvrf_id(zvrf); if (IS_ZEBRA_DEBUG_RECV) - zlog_debug("%s: p=(%u:%u)%pFX, msg flags=0x%x, flags=0x%x", - __func__, vrf_id, api.tableid, &api.prefix, + zlog_debug("%s: p=(%s:%u)%pFX, msg flags=0x%x, flags=0x%x", + __func__, zvrf_name(zvrf), api.tableid, &api.prefix, (int)api.message, api.flags); /* Allocate new route. */ diff --git a/zebra/zebra_cli.c b/zebra/zebra_cli.c index 3e03d74775..6ee0fdbb8d 100644 --- a/zebra/zebra_cli.c +++ b/zebra/zebra_cli.c @@ -2221,6 +2221,37 @@ static void lib_vrf_zebra_ipv6_resolve_via_default_cli_write( } } +DEFPY_YANG (mpls_fec_nexthop_resolution, mpls_fec_nexthop_resolution_cmd, + "[no$no] mpls fec nexthop-resolution", + NO_STR + MPLS_STR + "MPLS FEC table\n" + "Authorise nexthop resolution over all labeled routes.\n") +{ + nb_cli_enqueue_change(vty, + "./frr-zebra:zebra/mpls/fec-nexthop-resolution", + NB_OP_MODIFY, no ? "false" : "true"); + + if (vty->node == CONFIG_NODE) + return nb_cli_apply_changes(vty, "/frr-vrf:lib/vrf[name='%s']", + VRF_DEFAULT_NAME); + + return nb_cli_apply_changes(vty, NULL); +} + +static void lib_vrf_mpls_fec_nexthop_resolution_cli_write( + struct vty *vty, const struct lyd_node *dnode, bool show_defaults) +{ + bool fec_nexthop_resolution = yang_dnode_get_bool(dnode, NULL); + + if (fec_nexthop_resolution || show_defaults) { + zebra_vrf_indent_cli_write(vty, dnode); + + vty_out(vty, "%smpls fec nexthop-resolution\n", + fec_nexthop_resolution ? "" : "no "); + } +} + DEFPY_YANG (vrf_netns, vrf_netns_cmd, "[no] netns ![NAME$netns_name]", @@ -2852,6 +2883,10 @@ const struct frr_yang_module_info frr_zebra_cli_info = { .cbs.cli_show = lib_vrf_zebra_netns_table_range_cli_write, }, { + .xpath = "/frr-vrf:lib/vrf/frr-zebra:zebra/mpls/fec-nexthop-resolution", + .cbs.cli_show = lib_vrf_mpls_fec_nexthop_resolution_cli_write, + }, + { .xpath = "/frr-vrf:lib/vrf/frr-zebra:zebra/l3vni-id", .cbs.cli_show = lib_vrf_zebra_l3vni_id_cli_write, }, @@ -2957,6 +2992,9 @@ void zebra_cli_init(void) install_element(VRF_NODE, &ip_nht_default_route_cmd); install_element(VRF_NODE, &ipv6_nht_default_route_cmd); + install_element(CONFIG_NODE, &mpls_fec_nexthop_resolution_cmd); + install_element(VRF_NODE, &mpls_fec_nexthop_resolution_cmd); + install_element(CONFIG_NODE, &vni_mapping_cmd); install_element(VRF_NODE, &vni_mapping_cmd); diff --git a/zebra/zebra_dplane.c b/zebra/zebra_dplane.c index 1cee1ebb93..75147e7136 100644 --- a/zebra/zebra_dplane.c +++ b/zebra/zebra_dplane.c @@ -483,10 +483,8 @@ struct zebra_dplane_provider { int (*dp_fini)(struct zebra_dplane_provider *prov, bool early_p); _Atomic uint32_t dp_in_counter; - _Atomic uint32_t dp_in_queued; _Atomic uint32_t dp_in_max; _Atomic uint32_t dp_out_counter; - _Atomic uint32_t dp_out_queued; _Atomic uint32_t dp_out_max; _Atomic uint32_t dp_error_counter; @@ -4503,8 +4501,21 @@ dplane_nexthop_update_internal(struct nhg_hash_entry *nhe, enum dplane_op_e op) ctx = dplane_ctx_alloc(); ret = dplane_ctx_nexthop_init(ctx, op, nhe); - if (ret == AOK) + if (ret == AOK) { + if (CHECK_FLAG(nhe->flags, NEXTHOP_GROUP_INITIAL_DELAY_INSTALL)) { + UNSET_FLAG(nhe->flags, NEXTHOP_GROUP_QUEUED); + UNSET_FLAG(nhe->flags, NEXTHOP_GROUP_REINSTALL); + SET_FLAG(nhe->flags, NEXTHOP_GROUP_INSTALLED); + + dplane_ctx_free(&ctx); + atomic_fetch_add_explicit(&zdplane_info.dg_nexthops_in, + 1, memory_order_relaxed); + + return ZEBRA_DPLANE_REQUEST_SUCCESS; + } + ret = dplane_update_enqueue(ctx); + } /* Update counter */ atomic_fetch_add_explicit(&zdplane_info.dg_nexthops_in, 1, @@ -6116,35 +6127,45 @@ int dplane_show_provs_helper(struct vty *vty, bool detailed) struct zebra_dplane_provider *prov; uint64_t in, in_q, in_max, out, out_q, out_max; - vty_out(vty, "Zebra dataplane providers:\n"); - DPLANE_LOCK(); prov = dplane_prov_list_first(&zdplane_info.dg_providers); + in = dplane_ctx_queue_count(&zdplane_info.dg_update_list); DPLANE_UNLOCK(); + vty_out(vty, "dataplane Incoming Queue from Zebra: %" PRIu64 "\n", in); + vty_out(vty, "Zebra dataplane providers:\n"); + /* Show counters, useful info from each registered provider */ while (prov) { + dplane_provider_lock(prov); + in_q = dplane_ctx_queue_count(&prov->dp_ctx_in_list); + out_q = dplane_ctx_queue_count(&prov->dp_ctx_out_list); + dplane_provider_unlock(prov); in = atomic_load_explicit(&prov->dp_in_counter, memory_order_relaxed); - in_q = atomic_load_explicit(&prov->dp_in_queued, - memory_order_relaxed); + in_max = atomic_load_explicit(&prov->dp_in_max, memory_order_relaxed); out = atomic_load_explicit(&prov->dp_out_counter, memory_order_relaxed); - out_q = atomic_load_explicit(&prov->dp_out_queued, - memory_order_relaxed); + out_max = atomic_load_explicit(&prov->dp_out_max, memory_order_relaxed); - vty_out(vty, "%s (%u): in: %"PRIu64", q: %"PRIu64", q_max: %"PRIu64", out: %"PRIu64", q: %"PRIu64", q_max: %"PRIu64"\n", - prov->dp_name, prov->dp_id, in, in_q, in_max, - out, out_q, out_max); + vty_out(vty, + " %s (%u): in: %" PRIu64 ", q: %" PRIu64 + ", q_max: %" PRIu64 ", out: %" PRIu64 ", q: %" PRIu64 + ", q_max: %" PRIu64 "\n", + prov->dp_name, prov->dp_id, in, in_q, in_max, out, + out_q, out_max); prov = dplane_prov_list_next(&zdplane_info.dg_providers, prov); } + out = zebra_rib_dplane_results_count(); + vty_out(vty, "dataplane Outgoing Queue to Zebra: %" PRIu64 "\n", out); + return CMD_SUCCESS; } @@ -6286,10 +6307,6 @@ struct zebra_dplane_ctx *dplane_provider_dequeue_in_ctx( dplane_provider_lock(prov); ctx = dplane_ctx_list_pop(&(prov->dp_ctx_in_list)); - if (ctx) { - atomic_fetch_sub_explicit(&prov->dp_in_queued, 1, - memory_order_relaxed); - } dplane_provider_unlock(prov); @@ -6317,10 +6334,6 @@ int dplane_provider_dequeue_in_list(struct zebra_dplane_provider *prov, break; } - if (ret > 0) - atomic_fetch_sub_explicit(&prov->dp_in_queued, ret, - memory_order_relaxed); - dplane_provider_unlock(prov); return ret; @@ -6345,10 +6358,7 @@ void dplane_provider_enqueue_out_ctx(struct zebra_dplane_provider *prov, dplane_ctx_list_add_tail(&(prov->dp_ctx_out_list), ctx); /* Maintain out-queue counters */ - atomic_fetch_add_explicit(&(prov->dp_out_queued), 1, - memory_order_relaxed); - curr = atomic_load_explicit(&prov->dp_out_queued, - memory_order_relaxed); + curr = dplane_ctx_queue_count(&prov->dp_ctx_out_list); high = atomic_load_explicit(&prov->dp_out_max, memory_order_relaxed); if (curr > high) @@ -6370,9 +6380,6 @@ dplane_provider_dequeue_out_ctx(struct zebra_dplane_provider *prov) if (!ctx) return NULL; - atomic_fetch_sub_explicit(&(prov->dp_out_queued), 1, - memory_order_relaxed); - return ctx; } @@ -7318,10 +7325,10 @@ static void dplane_thread_loop(struct event *event) { struct dplane_ctx_list_head work_list; struct dplane_ctx_list_head error_list; - struct zebra_dplane_provider *prov; + struct zebra_dplane_provider *prov, *next_prov; struct zebra_dplane_ctx *ctx; int limit, counter, error_counter; - uint64_t curr, high; + uint64_t curr, out_curr, high; bool reschedule = false; /* Capture work limit per cycle */ @@ -7345,18 +7352,48 @@ static void dplane_thread_loop(struct event *event) /* Locate initial registered provider */ prov = dplane_prov_list_first(&zdplane_info.dg_providers); - /* Move new work from incoming list to temp list */ - for (counter = 0; counter < limit; counter++) { - ctx = dplane_ctx_list_pop(&zdplane_info.dg_update_list); - if (ctx) { - ctx->zd_provider = prov->dp_id; + curr = dplane_ctx_queue_count(&prov->dp_ctx_in_list); + out_curr = dplane_ctx_queue_count(&prov->dp_ctx_out_list); - dplane_ctx_list_add_tail(&work_list, ctx); - } else { - break; + if (curr >= (uint64_t)limit) { + if (IS_ZEBRA_DEBUG_DPLANE_DETAIL) + zlog_debug("%s: Current first provider(%s) Input queue is %" PRIu64 + ", holding off work", + __func__, prov->dp_name, curr); + counter = 0; + } else if (out_curr >= (uint64_t)limit) { + if (IS_ZEBRA_DEBUG_DPLANE_DETAIL) + zlog_debug("%s: Current first provider(%s) Output queue is %" PRIu64 + ", holding off work", + __func__, prov->dp_name, out_curr); + counter = 0; + } else { + int tlimit; + /* + * Let's limit the work to how what can be put on the + * in or out queue without going over + */ + tlimit = limit - MAX(curr, out_curr); + /* Move new work from incoming list to temp list */ + for (counter = 0; counter < tlimit; counter++) { + ctx = dplane_ctx_list_pop(&zdplane_info.dg_update_list); + if (ctx) { + ctx->zd_provider = prov->dp_id; + + dplane_ctx_list_add_tail(&work_list, ctx); + } else { + break; + } } } + /* + * If there is anything still on the two input queues reschedule + */ + if (dplane_ctx_queue_count(&prov->dp_ctx_in_list) > 0 || + dplane_ctx_queue_count(&zdplane_info.dg_update_list) > 0) + reschedule = true; + DPLANE_UNLOCK(); atomic_fetch_sub_explicit(&zdplane_info.dg_routes_queued, counter, @@ -7375,8 +7412,9 @@ static void dplane_thread_loop(struct event *event) * items. */ if (IS_ZEBRA_DEBUG_DPLANE_DETAIL) - zlog_debug("dplane enqueues %d new work to provider '%s'", - counter, dplane_provider_get_name(prov)); + zlog_debug("dplane enqueues %d new work to provider '%s' curr is %" PRIu64, + counter, dplane_provider_get_name(prov), + curr); /* Capture current provider id in each context; check for * error status. @@ -7409,10 +7447,7 @@ static void dplane_thread_loop(struct event *event) atomic_fetch_add_explicit(&prov->dp_in_counter, counter, memory_order_relaxed); - atomic_fetch_add_explicit(&prov->dp_in_queued, counter, - memory_order_relaxed); - curr = atomic_load_explicit(&prov->dp_in_queued, - memory_order_relaxed); + curr = dplane_ctx_queue_count(&prov->dp_ctx_in_list); high = atomic_load_explicit(&prov->dp_in_max, memory_order_relaxed); if (curr > high) @@ -7437,18 +7472,61 @@ static void dplane_thread_loop(struct event *event) if (!zdplane_info.dg_run) break; + /* Locate next provider */ + next_prov = dplane_prov_list_next(&zdplane_info.dg_providers, + prov); + if (next_prov) { + curr = dplane_ctx_queue_count( + &next_prov->dp_ctx_in_list); + out_curr = dplane_ctx_queue_count( + &next_prov->dp_ctx_out_list); + } else + out_curr = curr = 0; + /* Dequeue completed work from the provider */ dplane_provider_lock(prov); - while (counter < limit) { - ctx = dplane_provider_dequeue_out_ctx(prov); - if (ctx) { - dplane_ctx_list_add_tail(&work_list, ctx); - counter++; - } else - break; + if (curr >= (uint64_t)limit) { + if (IS_ZEBRA_DEBUG_DPLANE_DETAIL) + zlog_debug("%s: Next Provider(%s) Input queue is %" PRIu64 + ", holding off work", + __func__, next_prov->dp_name, curr); + counter = 0; + } else if (out_curr >= (uint64_t)limit) { + if (IS_ZEBRA_DEBUG_DPLANE_DETAIL) + zlog_debug("%s: Next Provider(%s) Output queue is %" PRIu64 + ", holding off work", + __func__, next_prov->dp_name, + out_curr); + counter = 0; + } else { + int tlimit; + + /* + * Let's limit the work to how what can be put on the + * in or out queue without going over + */ + tlimit = limit - MAX(curr, out_curr); + while (counter < tlimit) { + ctx = dplane_provider_dequeue_out_ctx(prov); + if (ctx) { + dplane_ctx_list_add_tail(&work_list, + ctx); + counter++; + } else + break; + } } + /* + * Let's check if there are still any items on the + * input or output queus of the current provider + * if so then we know we need to reschedule. + */ + if (dplane_ctx_queue_count(&prov->dp_ctx_in_list) > 0 || + dplane_ctx_queue_count(&prov->dp_ctx_out_list) > 0) + reschedule = true; + dplane_provider_unlock(prov); if (counter >= limit) @@ -7464,7 +7542,7 @@ static void dplane_thread_loop(struct event *event) } /* Locate next provider */ - prov = dplane_prov_list_next(&zdplane_info.dg_providers, prov); + prov = next_prov; } /* diff --git a/zebra/zebra_evpn.c b/zebra/zebra_evpn.c index ebb5a42298..a733b5917f 100644 --- a/zebra/zebra_evpn.c +++ b/zebra/zebra_evpn.c @@ -663,6 +663,7 @@ static int zebra_evpn_map_vlan_ns(struct ns *ns, vni_id = zebra_vxlan_if_access_vlan_vni_find(zif, br_if); if (vni_id) { found = 1; + route_unlock_node(rn); break; } } @@ -757,6 +758,7 @@ static int zebra_evpn_from_svi_ns(struct ns *ns, zebra_vxlan_if_access_vlan_vni_find(zif, br_if); if (vni_id) { found = 1; + route_unlock_node(rn); break; } } @@ -842,6 +844,7 @@ static int zvni_map_to_macvlan_ns(struct ns *ns, void *_in_param, void **_p_ifp) if (zif->link == in_param->svi_if) { *p_ifp = tmp_if; + route_unlock_node(rn); return NS_WALK_STOP; } } diff --git a/zebra/zebra_evpn_mac.c b/zebra/zebra_evpn_mac.c index bfc060db61..0d53591336 100644 --- a/zebra/zebra_evpn_mac.c +++ b/zebra/zebra_evpn_mac.c @@ -47,9 +47,9 @@ uint32_t num_valid_macs(struct zebra_evpn *zevpn) for (i = 0; i < hash->size; i++) { for (hb = hash->index[i]; hb; hb = hb->next) { mac = (struct zebra_mac *)hb->data; - if (CHECK_FLAG(mac->flags, ZEBRA_MAC_REMOTE) - || CHECK_FLAG(mac->flags, ZEBRA_MAC_LOCAL) - || !CHECK_FLAG(mac->flags, ZEBRA_MAC_AUTO)) + if (CHECK_FLAG(mac->flags, ZEBRA_MAC_REMOTE) || + CHECK_FLAG(mac->flags, ZEBRA_MAC_LOCAL) || + !CHECK_FLAG(mac->flags, ZEBRA_MAC_AUTO)) num_macs++; } } @@ -103,7 +103,8 @@ static void zebra_evpn_mac_ifp_unlink(struct zebra_mac *zmac) if (IS_ZEBRA_DEBUG_EVPN_MH_MAC) zlog_debug("VNI %d MAC %pEA unlinked from ifp %s (%u)", - zmac->zevpn->vni, &zmac->macaddr, ifp->name, ifp->ifindex); + zmac->zevpn->vni, &zmac->macaddr, ifp->name, + ifp->ifindex); zif = ifp->info; list_delete_node(zif->mac_list, &zmac->ifp_listnode); @@ -117,16 +118,17 @@ void zebra_evpn_mac_ifp_del(struct interface *ifp) struct listnode *node; struct zebra_mac *zmac; - if (zif->mac_list) { - if (IS_ZEBRA_DEBUG_EVPN_MH_MAC) - zlog_debug("MAC list deleted for ifp %s (%u)", - zif->ifp->name, zif->ifp->ifindex); + if (!zif->mac_list) + return; - for (ALL_LIST_ELEMENTS_RO(zif->mac_list, node, zmac)) { - zebra_evpn_mac_ifp_unlink(zmac); - } - list_delete(&zif->mac_list); - } + if (IS_ZEBRA_DEBUG_EVPN_MH_MAC) + zlog_debug("MAC list deleted for ifp %s (%u)", zif->ifp->name, + zif->ifp->ifindex); + + for (ALL_LIST_ELEMENTS_RO(zif->mac_list, node, zmac)) + zebra_evpn_mac_ifp_unlink(zmac); + + list_delete(&zif->mac_list); } /* Link local mac to destination access port. This is done only if the @@ -159,7 +161,8 @@ static void zebra_evpn_mac_ifp_link(struct zebra_mac *zmac, if (IS_ZEBRA_DEBUG_EVPN_MH_MAC) zlog_debug("VNI %d MAC %pEA linked to ifp %s (%u)", - zmac->zevpn->vni, &zmac->macaddr, ifp->name, ifp->ifindex); + zmac->zevpn->vni, &zmac->macaddr, ifp->name, + ifp->ifindex); zmac->ifp = ifp; listnode_init(&zmac->ifp_listnode, zmac); @@ -201,7 +204,7 @@ int zebra_evpn_rem_mac_install(struct zebra_evpn *zevpn, struct zebra_mac *mac, return -1; sticky = !!CHECK_FLAG(mac->flags, - (ZEBRA_MAC_STICKY | ZEBRA_MAC_REMOTE_DEF_GW)); + (ZEBRA_MAC_STICKY | ZEBRA_MAC_REMOTE_DEF_GW)); /* If nexthop group for the FDB entry is inactive (not programmed in * the dataplane) the MAC entry cannot be installed @@ -245,14 +248,14 @@ int zebra_evpn_rem_mac_uninstall(struct zebra_evpn *zevpn, enum zebra_dplane_result res; /* If the MAC was not installed there is no need to uninstall it */ - if (!force && mac->es && !CHECK_FLAG(mac->es->flags, ZEBRA_EVPNES_NHG_ACTIVE)) + if (!force && mac->es && + !CHECK_FLAG(mac->es->flags, ZEBRA_EVPNES_NHG_ACTIVE)) return -1; if (!zevpn->vxlan_if) { if (IS_ZEBRA_DEBUG_VXLAN) - zlog_debug( - "VNI %u hash %p couldn't be uninstalled - no intf", - zevpn->vni, zevpn); + zlog_debug("VNI %u hash %p couldn't be uninstalled - no intf", + zevpn->vni, zevpn); return -1; } @@ -278,7 +281,8 @@ int zebra_evpn_rem_mac_uninstall(struct zebra_evpn *zevpn, ifp = zevpn->vxlan_if; vtep_ip = mac->fwd_info.r_vtep_ip; - res = dplane_rem_mac_del(ifp, br_ifp, vid, &mac->macaddr, vni->vni, vtep_ip); + res = dplane_rem_mac_del(ifp, br_ifp, vid, &mac->macaddr, vni->vni, + vtep_ip); if (res != ZEBRA_DPLANE_REQUEST_FAILURE) return 0; else @@ -297,9 +301,9 @@ void zebra_evpn_deref_ip2mac(struct zebra_evpn *zevpn, struct zebra_mac *mac) /* If all remote neighbors referencing a remote MAC go away, * we need to uninstall the MAC. */ - if (CHECK_FLAG(mac->flags, ZEBRA_MAC_REMOTE) - && remote_neigh_count(mac) == 0) { - zebra_evpn_rem_mac_uninstall(zevpn, mac, false /*force*/); + if (CHECK_FLAG(mac->flags, ZEBRA_MAC_REMOTE) && + remote_neigh_count(mac) == 0) { + zebra_evpn_rem_mac_uninstall(zevpn, mac, false); zebra_evpn_es_mac_deref_entry(mac); UNSET_FLAG(mac->flags, ZEBRA_MAC_REMOTE); } @@ -336,7 +340,8 @@ static void zebra_evpn_mac_get_access_info(struct zebra_mac *mac, *vid = mac->fwd_info.local.vid; zns = zebra_ns_lookup(mac->fwd_info.local.ns_id); - *p_ifp = if_lookup_by_index_per_ns(zns, mac->fwd_info.local.ifindex); + *p_ifp = if_lookup_by_index_per_ns(zns, + mac->fwd_info.local.ifindex); } } @@ -350,18 +355,26 @@ static char *zebra_evpn_zebra_mac_flag_dump(struct zebra_mac *mac, char *buf, } snprintfrr(buf, len, "%s%s%s%s%s%s%s%s%s%s%s%s", - CHECK_FLAG(mac->flags, ZEBRA_MAC_LOCAL) ? "LOC " : "", - CHECK_FLAG(mac->flags, ZEBRA_MAC_REMOTE) ? "REM " : "", - CHECK_FLAG(mac->flags, ZEBRA_MAC_AUTO) ? "AUTO " : "", - CHECK_FLAG(mac->flags, ZEBRA_MAC_STICKY) ? "STICKY " : "", - CHECK_FLAG(mac->flags, ZEBRA_MAC_REMOTE_RMAC) ? "REM Router " : "", - CHECK_FLAG(mac->flags, ZEBRA_MAC_DEF_GW) ? "Default GW " : "", - CHECK_FLAG(mac->flags, ZEBRA_MAC_REMOTE_DEF_GW) ? "REM DEF GW " : "", - CHECK_FLAG(mac->flags, ZEBRA_MAC_DUPLICATE) ? "DUP " : "", - CHECK_FLAG(mac->flags, ZEBRA_MAC_FPM_SENT) ? "FPM " : "", - CHECK_FLAG(mac->flags, ZEBRA_MAC_ES_PEER_ACTIVE) ? "PEER Active " : "", - CHECK_FLAG(mac->flags, ZEBRA_MAC_ES_PEER_PROXY) ? "PROXY " : "", - CHECK_FLAG(mac->flags, ZEBRA_MAC_LOCAL_INACTIVE) ? "LOC Inactive " : ""); + CHECK_FLAG(mac->flags, ZEBRA_MAC_LOCAL) ? "LOC " : "", + CHECK_FLAG(mac->flags, ZEBRA_MAC_REMOTE) ? "REM " : "", + CHECK_FLAG(mac->flags, ZEBRA_MAC_AUTO) ? "AUTO " : "", + CHECK_FLAG(mac->flags, ZEBRA_MAC_STICKY) ? "STICKY " : "", + CHECK_FLAG(mac->flags, ZEBRA_MAC_REMOTE_RMAC) ? "REM Router " + : "", + CHECK_FLAG(mac->flags, ZEBRA_MAC_DEF_GW) ? "Default GW " : "", + CHECK_FLAG(mac->flags, ZEBRA_MAC_REMOTE_DEF_GW) + ? "REM DEF GW " + : "", + CHECK_FLAG(mac->flags, ZEBRA_MAC_DUPLICATE) ? "DUP " : "", + CHECK_FLAG(mac->flags, ZEBRA_MAC_FPM_SENT) ? "FPM " : "", + CHECK_FLAG(mac->flags, ZEBRA_MAC_ES_PEER_ACTIVE) + ? "PEER Active " + : "", + CHECK_FLAG(mac->flags, ZEBRA_MAC_ES_PEER_PROXY) ? "PROXY " + : "", + CHECK_FLAG(mac->flags, ZEBRA_MAC_LOCAL_INACTIVE) + ? "LOC Inactive " + : ""); return buf; } @@ -391,11 +404,11 @@ static void zebra_evpn_dad_mac_auto_recovery_exp(struct event *t) if (IS_ZEBRA_DEBUG_VXLAN) { char mac_buf[MAC_BUF_SIZE]; - zlog_debug( - "%s: duplicate addr mac %pEA flags %slearn count %u host count %u auto recovery expired", - __func__, &mac->macaddr, - zebra_evpn_zebra_mac_flag_dump(mac, mac_buf, sizeof(mac_buf)), - mac->dad_count, listcount(mac->neigh_list)); + zlog_debug("%s: duplicate addr mac %pEA flags %slearn count %u host count %u auto recovery expired", + __func__, &mac->macaddr, + zebra_evpn_zebra_mac_flag_dump(mac, mac_buf, + sizeof(mac_buf)), + mac->dad_count, listcount(mac->neigh_list)); } /* Remove all IPs as duplicate associcated with this MAC */ @@ -404,7 +417,7 @@ static void zebra_evpn_dad_mac_auto_recovery_exp(struct event *t) if (CHECK_FLAG(nbr->flags, ZEBRA_NEIGH_LOCAL)) ZEBRA_NEIGH_SET_INACTIVE(nbr); else if (CHECK_FLAG(nbr->flags, ZEBRA_NEIGH_REMOTE)) - zebra_evpn_rem_neigh_install(zevpn, nbr, false /*was_static*/); + zebra_evpn_rem_neigh_install(zevpn, nbr, false); } UNSET_FLAG(nbr->flags, ZEBRA_NEIGH_DUPLICATE); @@ -423,11 +436,12 @@ static void zebra_evpn_dad_mac_auto_recovery_exp(struct event *t) if (CHECK_FLAG(mac->flags, ZEBRA_MAC_LOCAL)) { /* Inform to BGP */ if (zebra_evpn_mac_send_add_to_client(zevpn->vni, &mac->macaddr, - mac->flags, mac->loc_seq, mac->es)) + mac->flags, mac->loc_seq, + mac->es)) return; /* Process all neighbors associated with this MAC. */ - zebra_evpn_process_neigh_on_local_mac_change(zevpn, mac, 0, 0 /*es_change*/); + zebra_evpn_process_neigh_on_local_mac_change(zevpn, mac, 0, 0); } else if (CHECK_FLAG(mac->flags, ZEBRA_MAC_REMOTE)) { zebra_evpn_process_neigh_on_remote_mac_add(zevpn, mac); @@ -445,7 +459,7 @@ static void zebra_evpn_dup_addr_detect_for_mac(struct zebra_vrf *zvrf, { struct zebra_neigh *nbr; struct listnode *node = NULL; - struct timeval elapsed = {0, 0}; + struct timeval elapsed = { 0, 0 }; bool reset_params = false; if (!(zebra_evpn_do_dup_addr_detect(zvrf) && do_dad)) @@ -459,11 +473,11 @@ static void zebra_evpn_dup_addr_detect_for_mac(struct zebra_vrf *zvrf, if (IS_ZEBRA_DEBUG_VXLAN) { char mac_buf[MAC_BUF_SIZE]; - zlog_debug( - "%s: duplicate addr MAC %pEA flags %sskip update to client, learn count %u recover time %u", - __func__, &mac->macaddr, - zebra_evpn_zebra_mac_flag_dump(mac, mac_buf, sizeof(mac_buf)), - mac->dad_count, zvrf->dad_freeze_time); + zlog_debug("%s: duplicate addr MAC %pEA flags %sskip update to client, learn count %u recover time %u", + __func__, &mac->macaddr, + zebra_evpn_zebra_mac_flag_dump(mac, mac_buf, + sizeof(mac_buf)), + mac->dad_count, zvrf->dad_freeze_time); } /* For duplicate MAC do not update * client but update neigh due to @@ -495,11 +509,11 @@ static void zebra_evpn_dup_addr_detect_for_mac(struct zebra_vrf *zvrf, if (IS_ZEBRA_DEBUG_VXLAN) { char mac_buf[MAC_BUF_SIZE]; - zlog_debug( - "%s: duplicate addr MAC %pEA flags %sdetection time passed, reset learn count %u", - __func__, &mac->macaddr, - zebra_evpn_zebra_mac_flag_dump(mac, mac_buf, sizeof(mac_buf)), - mac->dad_count); + zlog_debug("%s: duplicate addr MAC %pEA flags %sdetection time passed, reset learn count %u", + __func__, &mac->macaddr, + zebra_evpn_zebra_mac_flag_dump(mac, mac_buf, + sizeof(mac_buf)), + mac->dad_count); } mac->dad_count = 0; @@ -526,10 +540,11 @@ static void zebra_evpn_dup_addr_detect_for_mac(struct zebra_vrf *zvrf, if (mac->dad_count >= zvrf->dad_max_moves) { flog_warn(EC_ZEBRA_DUP_MAC_DETECTED, - "VNI %u: MAC %pEA detected as duplicate during %s VTEP %pI4", - mac->zevpn->vni, &mac->macaddr, - is_local ? "local update, last" : "remote update, from", - &vtep_ip); + "VNI %u: MAC %pEA detected as duplicate during %s VTEP %pI4", + mac->zevpn->vni, &mac->macaddr, + is_local ? "local update, last" + : "remote update, from", + &vtep_ip); SET_FLAG(mac->flags, ZEBRA_MAC_DUPLICATE); @@ -540,7 +555,6 @@ static void zebra_evpn_dup_addr_detect_for_mac(struct zebra_vrf *zvrf, * associcated with this MAC */ for (ALL_LIST_ELEMENTS_RO(mac->neigh_list, node, nbr)) { - /* Ony Mark IPs which are Local */ if (!CHECK_FLAG(nbr->flags, ZEBRA_NEIGH_LOCAL)) continue; @@ -561,16 +575,18 @@ static void zebra_evpn_dup_addr_detect_for_mac(struct zebra_vrf *zvrf, if (IS_ZEBRA_DEBUG_VXLAN) { char mac_buf[MAC_BUF_SIZE]; - zlog_debug( - "%s: duplicate addr MAC %pEA flags %sauto recovery time %u start", - __func__, &mac->macaddr, - zebra_evpn_zebra_mac_flag_dump(mac, mac_buf, sizeof(mac_buf)), - zvrf->dad_freeze_time); + zlog_debug("%s: duplicate addr MAC %pEA flags %sauto recovery time %u start", + __func__, &mac->macaddr, + zebra_evpn_zebra_mac_flag_dump( + mac, mac_buf, + sizeof(mac_buf)), + zvrf->dad_freeze_time); } event_add_timer(zrouter.master, - zebra_evpn_dad_mac_auto_recovery_exp, mac, - zvrf->dad_freeze_time, &mac->dad_mac_auto_recovery_timer); + zebra_evpn_dad_mac_auto_recovery_exp, + mac, zvrf->dad_freeze_time, + &mac->dad_mac_auto_recovery_timer); } /* In case of local update, do not inform to client (BGPd), @@ -592,7 +608,7 @@ void zebra_evpn_print_mac(struct zebra_mac *mac, void *ctxt, json_object *json) char buf1[ETHER_ADDR_STRLEN]; char buf2[INET6_ADDRSTRLEN]; struct zebra_vrf *zvrf; - struct timeval detect_start_time = {0, 0}; + struct timeval detect_start_time = { 0, 0 }; char timebuf[MONOTIME_STRLEN]; char thread_buf[EVENT_TIMER_STRLEN]; time_t uptime; @@ -617,18 +633,22 @@ void zebra_evpn_print_mac(struct zebra_mac *mac, void *ctxt, json_object *json) zebra_evpn_mac_get_access_info(mac, &ifp, &vid); json_object_string_add(json_mac, "type", "local"); if (ifp) { - json_object_string_add(json_mac, "intf", ifp->name); - json_object_int_add(json_mac, "ifindex", ifp->ifindex); + json_object_string_add(json_mac, "intf", + ifp->name); + json_object_int_add(json_mac, "ifindex", + ifp->ifindex); } if (vid) json_object_int_add(json_mac, "vlan", vid); } else if (CHECK_FLAG(mac->flags, ZEBRA_MAC_REMOTE)) { json_object_string_add(json_mac, "type", "remote"); if (mac->es) - json_object_string_add(json_mac, "remoteEs", mac->es->esi_str); + json_object_string_add(json_mac, "remoteEs", + mac->es->esi_str); else - json_object_string_addf( - json_mac, "remoteVtep", "%pI4", &mac->fwd_info.r_vtep_ip); + json_object_string_addf(json_mac, "remoteVtep", + "%pI4", + &mac->fwd_info.r_vtep_ip); } else if (CHECK_FLAG(mac->flags, ZEBRA_MAC_AUTO)) json_object_string_add(json_mac, "type", "auto"); @@ -642,7 +662,8 @@ void zebra_evpn_print_mac(struct zebra_mac *mac, void *ctxt, json_object *json) json_object_boolean_true_add(json_mac, "defaultGateway"); if (CHECK_FLAG(mac->flags, ZEBRA_MAC_REMOTE_DEF_GW)) - json_object_boolean_true_add(json_mac, "remoteGatewayMac"); + json_object_boolean_true_add(json_mac, + "remoteGatewayMac"); json_object_string_add(json_mac, "uptime", up_str); json_object_int_add(json_mac, "localSequence", mac->loc_seq); @@ -663,30 +684,42 @@ void zebra_evpn_print_mac(struct zebra_mac *mac, void *ctxt, json_object *json) if (CHECK_FLAG(mac->flags, ZEBRA_MAC_ES_PEER_ACTIVE)) json_object_boolean_true_add(json_mac, "peerActive"); if (mac->hold_timer) - json_object_string_add(json_mac, "peerActiveHold", - event_timer_to_hhmmss(thread_buf, sizeof(thread_buf), mac->hold_timer)); + json_object_string_add( + json_mac, "peerActiveHold", + event_timer_to_hhmmss(thread_buf, + sizeof(thread_buf), + mac->hold_timer)); if (mac->es) - json_object_string_add(json_mac, "esi", mac->es->esi_str); + json_object_string_add(json_mac, "esi", + mac->es->esi_str); /* print all the associated neigh */ if (!listcount(mac->neigh_list)) json_object_string_add(json_mac, "neighbors", "none"); else { json_object *json_active_nbrs = json_object_new_array(); - json_object *json_inactive_nbrs = json_object_new_array(); + json_object *json_inactive_nbrs = + json_object_new_array(); json_object *json_nbrs = json_object_new_object(); for (ALL_LIST_ELEMENTS_RO(mac->neigh_list, node, n)) { if (IS_ZEBRA_NEIGH_ACTIVE(n)) - json_object_array_add(json_active_nbrs, - json_object_new_string(ipaddr2str(&n->ip, buf2, sizeof(buf2)))); + json_object_array_add( + json_active_nbrs, + json_object_new_string( + ipaddr2str(&n->ip, buf2, + sizeof(buf2)))); else json_object_array_add( json_inactive_nbrs, - json_object_new_string(ipaddr2str(&n->ip, buf2, sizeof(buf2)))); + json_object_new_string( + ipaddr2str(&n->ip, buf2, + sizeof(buf2)))); } - json_object_object_add(json_nbrs, "active", json_active_nbrs); - json_object_object_add(json_nbrs, "inactive", json_inactive_nbrs); + json_object_object_add(json_nbrs, "active", + json_active_nbrs); + json_object_object_add(json_nbrs, "inactive", + json_inactive_nbrs); json_object_object_add(json_mac, "neighbors", json_nbrs); } @@ -704,7 +737,8 @@ void zebra_evpn_print_mac(struct zebra_mac *mac, void *ctxt, json_object *json) vty_out(vty, " ESI: %s\n", mac->es->esi_str); if (ifp) - vty_out(vty, " Intf: %s(%u)", ifp->name, ifp->ifindex); + vty_out(vty, " Intf: %s(%u)", ifp->name, + ifp->ifindex); else vty_out(vty, " Intf: -"); vty_out(vty, " VLAN: %u", vid); @@ -712,7 +746,8 @@ void zebra_evpn_print_mac(struct zebra_mac *mac, void *ctxt, json_object *json) if (mac->es) vty_out(vty, " Remote ES: %s", mac->es->esi_str); else - vty_out(vty, " Remote VTEP: %pI4", &mac->fwd_info.r_vtep_ip); + vty_out(vty, " Remote VTEP: %pI4", + &mac->fwd_info.r_vtep_ip); } else if (CHECK_FLAG(mac->flags, ZEBRA_MAC_AUTO)) { vty_out(vty, " Auto Mac "); } @@ -739,18 +774,24 @@ void zebra_evpn_print_mac(struct zebra_mac *mac, void *ctxt, json_object *json) vty_out(vty, " peer-active"); if (mac->hold_timer) vty_out(vty, " (ht: %s)", - event_timer_to_hhmmss(thread_buf, sizeof(thread_buf), mac->hold_timer)); + event_timer_to_hhmmss(thread_buf, + sizeof(thread_buf), + mac->hold_timer)); vty_out(vty, "\n"); - vty_out(vty, " Local Seq: %u Remote Seq: %u\n", mac->loc_seq, mac->rem_seq); + vty_out(vty, " Local Seq: %u Remote Seq: %u\n", mac->loc_seq, + mac->rem_seq); vty_out(vty, " Uptime: %s\n", up_str); if (CHECK_FLAG(mac->flags, ZEBRA_MAC_DUPLICATE)) { vty_out(vty, " Duplicate, detected at %s", - time_to_string(mac->dad_dup_detect_time, timebuf)); + time_to_string(mac->dad_dup_detect_time, + timebuf)); } else if (mac->dad_count) { - monotime_since(&mac->detect_start_time, &detect_start_time); + monotime_since(&mac->detect_start_time, + &detect_start_time); if (detect_start_time.tv_sec <= zvrf->dad_time) { - time_to_string(mac->detect_start_time.tv_sec, timebuf); + time_to_string(mac->detect_start_time.tv_sec, + timebuf); vty_out(vty, " Duplicate detection started at %s, detection count %u\n", timebuf, mac->dad_count); @@ -765,7 +806,8 @@ void zebra_evpn_print_mac(struct zebra_mac *mac, void *ctxt, json_object *json) for (ALL_LIST_ELEMENTS_RO(mac->neigh_list, node, n)) { vty_out(vty, " %s %s\n", ipaddr2str(&n->ip, buf2, sizeof(buf2)), - (IS_ZEBRA_NEIGH_ACTIVE(n) ? "Active" : "Inactive")); + (IS_ZEBRA_NEIGH_ACTIVE(n) ? "Active" + : "Inactive")); } } @@ -817,12 +859,14 @@ void zebra_evpn_print_mac_hash(struct hash_bucket *bucket, void *ctxt) zebra_evpn_mac_get_access_info(mac, &ifp, &vid); if (json_mac_hdr == NULL) { vty_out(vty, "%-17s %-6s %-5s %-30s", buf1, "local", - zebra_evpn_print_mac_flags(mac, flags_buf, sizeof(flags_buf)), + zebra_evpn_print_mac_flags(mac, flags_buf, + sizeof(flags_buf)), ifp ? ifp->name : "-"); } else { json_object_string_add(json_mac, "type", "local"); if (ifp) - json_object_string_add(json_mac, "intf", ifp->name); + json_object_string_add(json_mac, "intf", + ifp->name); } if (vid) { if (json_mac_hdr == NULL) @@ -831,35 +875,41 @@ void zebra_evpn_print_mac_hash(struct hash_bucket *bucket, void *ctxt) json_object_int_add(json_mac, "vlan", vid); } else /* No vid? fill out the space */ if (json_mac_hdr == NULL) - vty_out(vty, " %-5s", ""); + vty_out(vty, " %-5s", ""); if (json_mac_hdr == NULL) { vty_out(vty, " %u/%u", mac->loc_seq, mac->rem_seq); vty_out(vty, "\n"); } else { - json_object_int_add(json_mac, "localSequence", mac->loc_seq); - json_object_int_add(json_mac, "remoteSequence", mac->rem_seq); - json_object_int_add(json_mac, "detectionCount", mac->dad_count); + json_object_int_add(json_mac, "localSequence", + mac->loc_seq); + json_object_int_add(json_mac, "remoteSequence", + mac->rem_seq); + json_object_int_add(json_mac, "detectionCount", + mac->dad_count); if (CHECK_FLAG(mac->flags, ZEBRA_MAC_DUPLICATE)) - json_object_boolean_true_add(json_mac, "isDuplicate"); + json_object_boolean_true_add(json_mac, + "isDuplicate"); else - json_object_boolean_false_add(json_mac, "isDuplicate"); + json_object_boolean_false_add(json_mac, + "isDuplicate"); json_object_object_add(json_mac_hdr, buf1, json_mac); } wctx->count++; } else if (CHECK_FLAG(mac->flags, ZEBRA_MAC_REMOTE)) { - - if (CHECK_FLAG(wctx->flags, SHOW_REMOTE_MAC_FROM_VTEP) - && !IPV4_ADDR_SAME(&mac->fwd_info.r_vtep_ip, &wctx->r_vtep_ip)) + if (CHECK_FLAG(wctx->flags, SHOW_REMOTE_MAC_FROM_VTEP) && + !IPV4_ADDR_SAME(&mac->fwd_info.r_vtep_ip, &wctx->r_vtep_ip)) return; if (json_mac_hdr == NULL) { - if (CHECK_FLAG(wctx->flags, SHOW_REMOTE_MAC_FROM_VTEP) - && (wctx->count == 0)) { + if (CHECK_FLAG(wctx->flags, SHOW_REMOTE_MAC_FROM_VTEP) && + (wctx->count == 0)) { vty_out(vty, "\nVNI %u\n\n", wctx->zevpn->vni); vty_out(vty, "%-17s %-6s %-5s%-30s %-5s %s\n", - "MAC", "Type", "Flags", "Intf/Remote ES/VTEP", "VLAN", "Seq #'s"); + "MAC", "Type", "Flags", + "Intf/Remote ES/VTEP", "VLAN", + "Seq #'s"); } if (mac->es == NULL) inet_ntop(AF_INET, &mac->fwd_info.r_vtep_ip, @@ -867,24 +917,32 @@ void zebra_evpn_print_mac_hash(struct hash_bucket *bucket, void *ctxt) vty_out(vty, "%-17s %-6s %-5s %-30s %-5s %u/%u\n", buf1, "remote", - zebra_evpn_print_mac_flags(mac, flags_buf, sizeof(flags_buf)), - mac->es ? mac->es->esi_str : addr_buf, - "", mac->loc_seq, mac->rem_seq); + zebra_evpn_print_mac_flags(mac, flags_buf, + sizeof(flags_buf)), + mac->es ? mac->es->esi_str : addr_buf, "", + mac->loc_seq, mac->rem_seq); } else { json_object_string_add(json_mac, "type", "remote"); if (mac->es) - json_object_string_add(json_mac, "remoteEs", mac->es->esi_str); + json_object_string_add(json_mac, "remoteEs", + mac->es->esi_str); else - json_object_string_addf( - json_mac, "remoteVtep", "%pI4", &mac->fwd_info.r_vtep_ip); + json_object_string_addf(json_mac, "remoteVtep", + "%pI4", + &mac->fwd_info.r_vtep_ip); json_object_object_add(json_mac_hdr, buf1, json_mac); - json_object_int_add(json_mac, "localSequence", mac->loc_seq); - json_object_int_add(json_mac, "remoteSequence", mac->rem_seq); - json_object_int_add(json_mac, "detectionCount", mac->dad_count); + json_object_int_add(json_mac, "localSequence", + mac->loc_seq); + json_object_int_add(json_mac, "remoteSequence", + mac->rem_seq); + json_object_int_add(json_mac, "detectionCount", + mac->dad_count); if (CHECK_FLAG(mac->flags, ZEBRA_MAC_DUPLICATE)) - json_object_boolean_true_add(json_mac, "isDuplicate"); + json_object_boolean_true_add(json_mac, + "isDuplicate"); else - json_object_boolean_false_add(json_mac, "isDuplicate"); + json_object_boolean_false_add(json_mac, + "isDuplicate"); } wctx->count++; @@ -917,8 +975,7 @@ void zebra_evpn_print_mac_hash_detail(struct hash_bucket *bucket, void *ctxt) /* * Inform BGP about local MACIP. */ -int zebra_evpn_macip_send_msg_to_client(vni_t vni, - const struct ethaddr *macaddr, +int zebra_evpn_macip_send_msg_to_client(vni_t vni, const struct ethaddr *macaddr, const struct ipaddr *ip, uint8_t flags, uint32_t seq, int state, struct zebra_evpn_es *es, uint16_t cmd) @@ -966,13 +1023,12 @@ int zebra_evpn_macip_send_msg_to_client(vni_t vni, if (IS_ZEBRA_DEBUG_VXLAN) { char flag_buf[MACIP_BUF_SIZE]; - zlog_debug( - "Send MACIP %s f %s state %u MAC %pEA IP %pIA seq %u L2-VNI %u ESI %s to %s", - (cmd == ZEBRA_MACIP_ADD) ? "Add" : "Del", - zclient_evpn_dump_macip_flags(flags, flag_buf, sizeof(flag_buf)), - state, macaddr, ip, seq, vni, - es ? es->esi_str : "-", - zebra_route_string(client->proto)); + zlog_debug("Send MACIP %s f %s state %u MAC %pEA IP %pIA seq %u L2-VNI %u ESI %s to %s", + (cmd == ZEBRA_MACIP_ADD) ? "Add" : "Del", + zclient_evpn_dump_macip_flags(flags, flag_buf, + sizeof(flag_buf)), + state, macaddr, ip, seq, vni, es ? es->esi_str : "-", + zebra_route_string(client->proto)); } if (cmd == ZEBRA_MACIP_ADD) @@ -1005,7 +1061,8 @@ static bool mac_cmp(const void *p1, const void *p2) if (pmac1 == NULL || pmac2 == NULL) return false; - return (memcmp(pmac1->macaddr.octet, pmac2->macaddr.octet, ETH_ALEN) == 0); + return (memcmp(pmac1->macaddr.octet, pmac2->macaddr.octet, ETH_ALEN) == + 0); } /* @@ -1046,7 +1103,8 @@ struct zebra_mac *zebra_evpn_mac_add(struct zebra_evpn *zevpn, char mac_buf[MAC_BUF_SIZE]; zlog_debug("%s: MAC %pEA flags %s", __func__, &mac->macaddr, - zebra_evpn_zebra_mac_flag_dump(mac, mac_buf, sizeof(mac_buf))); + zebra_evpn_zebra_mac_flag_dump(mac, mac_buf, + sizeof(mac_buf))); } return mac; } @@ -1062,7 +1120,8 @@ int zebra_evpn_mac_del(struct zebra_evpn *zevpn, struct zebra_mac *mac) char mac_buf[MAC_BUF_SIZE]; zlog_debug("%s: MAC %pEA flags %s", __func__, &mac->macaddr, - zebra_evpn_zebra_mac_flag_dump(mac, mac_buf, sizeof(mac_buf))); + zebra_evpn_zebra_mac_flag_dump(mac, mac_buf, + sizeof(mac_buf))); } /* force de-ref any ES entry linked to the MAC */ @@ -1087,10 +1146,10 @@ int zebra_evpn_mac_del(struct zebra_evpn *zevpn, struct zebra_mac *mac) */ if (!list_isempty(mac->neigh_list)) { if (IS_ZEBRA_DEBUG_VXLAN) - zlog_debug( - "MAC %pEA (flags 0x%x vni %u) has non-empty neigh list " - "count %u, mark MAC as AUTO", &mac->macaddr, mac->flags, - zevpn->vni, listcount(mac->neigh_list)); + zlog_debug("MAC %pEA (flags 0x%x vni %u) has non-empty neigh list " + "count %u, mark MAC as AUTO", + &mac->macaddr, mac->flags, zevpn->vni, + listcount(mac->neigh_list)); SET_FLAG(mac->flags, ZEBRA_MAC_AUTO); return 0; @@ -1127,25 +1186,26 @@ struct zebra_mac *zebra_evpn_mac_add_auto(struct zebra_evpn *zevpn, static bool zebra_evpn_check_mac_del_from_db(struct mac_walk_ctx *wctx, struct zebra_mac *mac) { - if (CHECK_FLAG(wctx->flags, DEL_LOCAL_MAC) - && CHECK_FLAG(mac->flags, ZEBRA_MAC_LOCAL)) + if (CHECK_FLAG(wctx->flags, DEL_LOCAL_MAC) && + CHECK_FLAG(mac->flags, ZEBRA_MAC_LOCAL)) return true; - else if (CHECK_FLAG(wctx->flags, DEL_REMOTE_MAC) - && CHECK_FLAG(mac->flags, ZEBRA_MAC_REMOTE)) + else if (CHECK_FLAG(wctx->flags, DEL_REMOTE_MAC) && + CHECK_FLAG(mac->flags, ZEBRA_MAC_REMOTE)) return true; - else if (CHECK_FLAG(wctx->flags, DEL_REMOTE_MAC_FROM_VTEP) - && CHECK_FLAG(mac->flags, ZEBRA_MAC_REMOTE) - && IPV4_ADDR_SAME(&mac->fwd_info.r_vtep_ip, &wctx->r_vtep_ip)) + else if (CHECK_FLAG(wctx->flags, DEL_REMOTE_MAC_FROM_VTEP) && + CHECK_FLAG(mac->flags, ZEBRA_MAC_REMOTE) && + IPV4_ADDR_SAME(&mac->fwd_info.r_vtep_ip, &wctx->r_vtep_ip)) return true; - else if (CHECK_FLAG(wctx->flags, DEL_LOCAL_MAC) - && CHECK_FLAG(mac->flags, ZEBRA_MAC_AUTO) - && !listcount(mac->neigh_list)) { + else if (CHECK_FLAG(wctx->flags, DEL_LOCAL_MAC) && + CHECK_FLAG(mac->flags, ZEBRA_MAC_AUTO) && + !listcount(mac->neigh_list)) { if (IS_ZEBRA_DEBUG_VXLAN) { char mac_buf[MAC_BUF_SIZE]; - zlog_debug( - "%s: Del MAC %pEA flags %s", __func__, &mac->macaddr, - zebra_evpn_zebra_mac_flag_dump(mac, mac_buf, sizeof(mac_buf))); + zlog_debug("%s: Del MAC %pEA flags %s", __func__, + &mac->macaddr, + zebra_evpn_zebra_mac_flag_dump(mac, mac_buf, + sizeof(mac_buf))); } wctx->uninstall = 0; @@ -1163,23 +1223,25 @@ static void zebra_evpn_mac_del_hash_entry(struct hash_bucket *bucket, void *arg) struct mac_walk_ctx *wctx = arg; struct zebra_mac *mac = bucket->data; - if (zebra_evpn_check_mac_del_from_db(wctx, mac)) { - if (wctx->upd_client && CHECK_FLAG(mac->flags, ZEBRA_MAC_LOCAL)) { - zebra_evpn_mac_send_del_to_client(wctx->zevpn->vni, - &mac->macaddr, mac->flags, false); - } - if (wctx->uninstall) { - if (zebra_evpn_mac_is_static(mac)) - zebra_evpn_sync_mac_dp_install(mac, false /* set_inactive */, - true /* force_clear_static */, __func__); + if (!zebra_evpn_check_mac_del_from_db(wctx, mac)) + return; - if (mac->flags & ZEBRA_MAC_REMOTE) - zebra_evpn_rem_mac_uninstall(wctx->zevpn, mac, false /*force*/); - } + if (wctx->upd_client && CHECK_FLAG(mac->flags, ZEBRA_MAC_LOCAL)) { + zebra_evpn_mac_send_del_to_client(wctx->zevpn->vni, + &mac->macaddr, mac->flags, + false); + } + if (wctx->uninstall) { + if (zebra_evpn_mac_is_static(mac)) + zebra_evpn_sync_mac_dp_install(mac, false, true, + __func__); - zebra_evpn_mac_del(wctx->zevpn, mac); + if (mac->flags & ZEBRA_MAC_REMOTE) + zebra_evpn_rem_mac_uninstall(wctx->zevpn, mac, false); } + zebra_evpn_mac_del(wctx->zevpn, mac); + return; } @@ -1249,7 +1311,8 @@ int zebra_evpn_mac_send_add_to_client(vni_t vni, const struct ethaddr *macaddr, SET_FLAG(flags, ZEBRA_MACIP_TYPE_GW); return zebra_evpn_macip_send_msg_to_client(vni, macaddr, NULL, flags, - seq, ZEBRA_NEIGH_ACTIVE, es, ZEBRA_MACIP_ADD); + seq, ZEBRA_NEIGH_ACTIVE, es, + ZEBRA_MACIP_ADD); } /* @@ -1261,8 +1324,8 @@ int zebra_evpn_mac_send_del_to_client(vni_t vni, const struct ethaddr *macaddr, int state = ZEBRA_NEIGH_ACTIVE; if (!force) { - if (CHECK_FLAG(flags, ZEBRA_MAC_LOCAL_INACTIVE) - && !CHECK_FLAG(flags, ZEBRA_MAC_ES_PEER_ACTIVE)) + if (CHECK_FLAG(flags, ZEBRA_MAC_LOCAL_INACTIVE) && + !CHECK_FLAG(flags, ZEBRA_MAC_ES_PEER_ACTIVE)) /* the host was not advertised - nothing to delete */ return 0; @@ -1275,8 +1338,8 @@ int zebra_evpn_mac_send_del_to_client(vni_t vni, const struct ethaddr *macaddr, state = ZEBRA_NEIGH_INACTIVE; } - return zebra_evpn_macip_send_msg_to_client(vni, macaddr, NULL, - 0 /* flags */, 0 /* seq */, state, NULL, ZEBRA_MACIP_DEL); + return zebra_evpn_macip_send_msg_to_client(vni, macaddr, NULL, 0, 0, + state, NULL, ZEBRA_MACIP_DEL); } /* @@ -1308,12 +1371,11 @@ int zebra_evpn_sync_mac_dp_install(struct zebra_mac *mac, bool set_inactive, es_evi = zebra_evpn_es_evi_find(mac->es, mac->zevpn); if (!es_evi) { if (IS_ZEBRA_DEBUG_EVPN_MH_MAC) - zlog_debug( - "%s: dp-install sync-mac vni %u mac %pEA es %s 0x%x %sskipped, no es-evi", - caller, zevpn->vni, &mac->macaddr, - mac->es ? mac->es->esi_str : "-", - mac->flags, - set_inactive ? "inactive " : ""); + zlog_debug("%s: dp-install sync-mac vni %u mac %pEA es %s 0x%x %sskipped, no es-evi", + caller, zevpn->vni, &mac->macaddr, + mac->es ? mac->es->esi_str : "-", + mac->flags, + set_inactive ? "inactive " : ""); return -1; } } @@ -1325,12 +1387,12 @@ int zebra_evpn_sync_mac_dp_install(struct zebra_mac *mac, bool set_inactive, if (IS_ZEBRA_DEBUG_EVPN_MH_MAC) { char mac_buf[MAC_BUF_SIZE]; - zlog_debug( - "%s: dp-install sync-mac vni %u mac %pEA es %s %s%sskipped, no access-port", - caller, zevpn->vni, &mac->macaddr, - mac->es ? mac->es->esi_str : "-", - zebra_evpn_zebra_mac_flag_dump(mac, mac_buf, sizeof(mac_buf)), - set_inactive ? "inactive " : ""); + zlog_debug("%s: dp-install sync-mac vni %u mac %pEA es %s %s%sskipped, no access-port", + caller, zevpn->vni, &mac->macaddr, + mac->es ? mac->es->esi_str : "-", + zebra_evpn_zebra_mac_flag_dump(mac, mac_buf, + sizeof(mac_buf)), + set_inactive ? "inactive " : ""); } return -1; } @@ -1341,12 +1403,12 @@ int zebra_evpn_sync_mac_dp_install(struct zebra_mac *mac, bool set_inactive, if (IS_ZEBRA_DEBUG_EVPN_MH_MAC) { char mac_buf[MAC_BUF_SIZE]; - zlog_debug( - "%s: dp-install sync-mac vni %u mac %pEA es %s %s%sskipped, no br", - caller, zevpn->vni, &mac->macaddr, - mac->es ? mac->es->esi_str : "-", - zebra_evpn_zebra_mac_flag_dump(mac, mac_buf, sizeof(mac_buf)), - set_inactive ? "inactive " : ""); + zlog_debug("%s: dp-install sync-mac vni %u mac %pEA es %s %s%sskipped, no br", + caller, zevpn->vni, &mac->macaddr, + mac->es ? mac->es->esi_str : "-", + zebra_evpn_zebra_mac_flag_dump(mac, mac_buf, + sizeof(mac_buf)), + set_inactive ? "inactive " : ""); } return -1; } @@ -1365,21 +1427,21 @@ int zebra_evpn_sync_mac_dp_install(struct zebra_mac *mac, bool set_inactive, if (IS_ZEBRA_DEBUG_EVPN_MH_MAC) { char mac_buf[MAC_BUF_SIZE]; - zlog_debug( - "dp-%s sync-nw-mac vni %u mac %pEA es %s %s%s", - set_static ? "install" : "uninstall", - zevpn->vni, &mac->macaddr, - mac->es ? mac->es->esi_str : "-", - zebra_evpn_zebra_mac_flag_dump(mac, mac_buf, sizeof(mac_buf)), - set_inactive ? "inactive " : ""); + zlog_debug("dp-%s sync-nw-mac vni %u mac %pEA es %s %s%s", + set_static ? "install" : "uninstall", + zevpn->vni, &mac->macaddr, + mac->es ? mac->es->esi_str : "-", + zebra_evpn_zebra_mac_flag_dump(mac, mac_buf, + sizeof(mac_buf)), + set_inactive ? "inactive " : ""); } if (set_static) /* XXX - old_static needs to be computed more * accurately */ - zebra_evpn_rem_mac_install(zevpn, mac, true /* old_static */); + zebra_evpn_rem_mac_install(zevpn, mac, true); else - zebra_evpn_rem_mac_uninstall(zevpn, mac, false /* force */); + zebra_evpn_rem_mac_uninstall(zevpn, mac, false); return 0; } @@ -1390,13 +1452,14 @@ int zebra_evpn_sync_mac_dp_install(struct zebra_mac *mac, bool set_inactive, zlog_debug("dp-install sync-mac vni %u mac %pEA es %s %s%s%s", zevpn->vni, &mac->macaddr, mac->es ? mac->es->esi_str : "-", - zebra_evpn_zebra_mac_flag_dump(mac, mac_buf, sizeof(mac_buf)), + zebra_evpn_zebra_mac_flag_dump(mac, mac_buf, + sizeof(mac_buf)), set_static ? "static " : "", set_inactive ? "inactive " : ""); } dplane_local_mac_add(ifp, br_ifp, vid, &mac->macaddr, sticky, - set_static, set_inactive); + set_static, set_inactive); return 0; } @@ -1406,11 +1469,11 @@ void zebra_evpn_mac_send_add_del_to_client(struct zebra_mac *mac, { if (new_bgp_ready) zebra_evpn_mac_send_add_to_client(mac->zevpn->vni, - &mac->macaddr, mac->flags, - mac->loc_seq, mac->es); + &mac->macaddr, mac->flags, + mac->loc_seq, mac->es); else if (old_bgp_ready) - zebra_evpn_mac_send_del_to_client(mac->zevpn->vni, - &mac->macaddr, mac->flags, true /* force */); + zebra_evpn_mac_send_del_to_client(mac->zevpn->vni, &mac->macaddr, + mac->flags, true); } /* MAC hold timer is used to age out peer-active flag. @@ -1443,23 +1506,23 @@ static void zebra_evpn_mac_hold_exp_cb(struct event *t) if (IS_ZEBRA_DEBUG_EVPN_MH_MAC) { char mac_buf[MAC_BUF_SIZE]; - zlog_debug( - "sync-mac vni %u mac %pEA es %s %shold expired", - mac->zevpn->vni, &mac->macaddr, - mac->es ? mac->es->esi_str : "-", - zebra_evpn_zebra_mac_flag_dump(mac, mac_buf, sizeof(mac_buf))); + zlog_debug("sync-mac vni %u mac %pEA es %s %shold expired", + mac->zevpn->vni, &mac->macaddr, + mac->es ? mac->es->esi_str : "-", + zebra_evpn_zebra_mac_flag_dump(mac, mac_buf, + sizeof(mac_buf))); } /* re-program the local mac in the dataplane if the mac is no * longer static */ if (old_static != new_static) - zebra_evpn_sync_mac_dp_install(mac, false /* set_inactive */, - false /* force_clear_static */, __func__); + zebra_evpn_sync_mac_dp_install(mac, false, false, __func__); /* inform bgp if needed */ if (old_bgp_ready != new_bgp_ready) - zebra_evpn_mac_send_add_del_to_client(mac, old_bgp_ready, new_bgp_ready); + zebra_evpn_mac_send_add_del_to_client(mac, old_bgp_ready, + new_bgp_ready); } static inline void zebra_evpn_mac_start_hold_timer(struct zebra_mac *mac) @@ -1470,11 +1533,11 @@ static inline void zebra_evpn_mac_start_hold_timer(struct zebra_mac *mac) if (IS_ZEBRA_DEBUG_EVPN_MH_MAC) { char mac_buf[MAC_BUF_SIZE]; - zlog_debug( - "sync-mac vni %u mac %pEA es %s %shold started", - mac->zevpn->vni, &mac->macaddr, - mac->es ? mac->es->esi_str : "-", - zebra_evpn_zebra_mac_flag_dump(mac, mac_buf, sizeof(mac_buf))); + zlog_debug("sync-mac vni %u mac %pEA es %s %shold started", + mac->zevpn->vni, &mac->macaddr, + mac->es ? mac->es->esi_str : "-", + zebra_evpn_zebra_mac_flag_dump(mac, mac_buf, + sizeof(mac_buf))); } event_add_timer(zrouter.master, zebra_evpn_mac_hold_exp_cb, mac, zmh_info->mac_hold_time, &mac->hold_timer); @@ -1488,11 +1551,11 @@ void zebra_evpn_mac_stop_hold_timer(struct zebra_mac *mac) if (IS_ZEBRA_DEBUG_EVPN_MH_MAC) { char mac_buf[MAC_BUF_SIZE]; - zlog_debug( - "sync-mac vni %u mac %pEA es %s %shold stopped", - mac->zevpn->vni, &mac->macaddr, - mac->es ? mac->es->esi_str : "-", - zebra_evpn_zebra_mac_flag_dump(mac, mac_buf, sizeof(mac_buf))); + zlog_debug("sync-mac vni %u mac %pEA es %s %shold stopped", + mac->zevpn->vni, &mac->macaddr, + mac->es ? mac->es->esi_str : "-", + zebra_evpn_zebra_mac_flag_dump(mac, mac_buf, + sizeof(mac_buf))); } EVENT_OFF(mac->hold_timer); @@ -1506,11 +1569,11 @@ void zebra_evpn_sync_mac_del(struct zebra_mac *mac) if (IS_ZEBRA_DEBUG_EVPN_MH_MAC) { char mac_buf[MAC_BUF_SIZE]; - zlog_debug( - "sync-mac del vni %u mac %pEA es %s seq %d f %s", - mac->zevpn->vni, &mac->macaddr, - mac->es ? mac->es->esi_str : "-", mac->loc_seq, - zebra_evpn_zebra_mac_flag_dump(mac, mac_buf, sizeof(mac_buf))); + zlog_debug("sync-mac del vni %u mac %pEA es %s seq %d f %s", + mac->zevpn->vni, &mac->macaddr, + mac->es ? mac->es->esi_str : "-", mac->loc_seq, + zebra_evpn_zebra_mac_flag_dump(mac, mac_buf, + sizeof(mac_buf))); } old_static = zebra_evpn_mac_is_static(mac); @@ -1521,8 +1584,7 @@ void zebra_evpn_sync_mac_del(struct zebra_mac *mac) if (old_static != new_static) /* program the local mac in the kernel */ - zebra_evpn_sync_mac_dp_install(mac, false /* set_inactive */, - false /* force_clear_static */, __func__); + zebra_evpn_sync_mac_dp_install(mac, false, false, __func__); } static inline bool zebra_evpn_mac_is_bgp_seq_ok(struct zebra_evpn *zevpn, @@ -1543,44 +1605,41 @@ static inline bool zebra_evpn_mac_is_bgp_seq_ok(struct zebra_evpn *zevpn, n_type = "remote"; } - if (seq < tmp_seq) { - - if (is_local && !zebra_evpn_mac_is_ready_for_bgp(mac->flags)) { - if (IS_ZEBRA_DEBUG_EVPN_MH_MAC || IS_ZEBRA_DEBUG_VXLAN) - zlog_debug( - "%s-macip not ready vni %u %s-mac %pEA lower seq %u f 0x%x", - sync ? "sync" : "rem", zevpn->vni, - n_type, &mac->macaddr, tmp_seq, mac->flags); - return true; - } - - /* if the mac was never advertised to bgp we must accept - * whatever sequence number bgp sends - */ - if (!is_local && zebra_vxlan_get_accept_bgp_seq()) { - if (IS_ZEBRA_DEBUG_EVPN_MH_MAC || - IS_ZEBRA_DEBUG_VXLAN) { - zlog_debug( - "%s-macip accept vni %u %s-mac %pEA lower seq %u f %s", - (sync ? "sync" : "rem"), - zevpn->vni, n_type, &mac->macaddr, tmp_seq, - zebra_evpn_zebra_mac_flag_dump(mac, mac_buf, sizeof(mac_buf))); - } + if (seq >= tmp_seq) + return true; - return true; - } + if (is_local && !zebra_evpn_mac_is_ready_for_bgp(mac->flags)) { + if (IS_ZEBRA_DEBUG_EVPN_MH_MAC || IS_ZEBRA_DEBUG_VXLAN) + zlog_debug("%s-macip not ready vni %u %s-mac %pEA lower seq %u f 0x%x", + sync ? "sync" : "rem", zevpn->vni, n_type, + &mac->macaddr, tmp_seq, mac->flags); + return true; + } + /* if the mac was never advertised to bgp we must accept + * whatever sequence number bgp sends + */ + if (!is_local && zebra_vxlan_get_accept_bgp_seq()) { if (IS_ZEBRA_DEBUG_EVPN_MH_MAC || IS_ZEBRA_DEBUG_VXLAN) { - zlog_debug( - "%s-macip ignore vni %u %s-mac %pEA as existing has higher seq %u f %s", - (sync ? "sync" : "rem"), zevpn->vni, n_type, &mac->macaddr, tmp_seq, - zebra_evpn_zebra_mac_flag_dump(mac, mac_buf, sizeof(mac_buf))); + zlog_debug("%s-macip accept vni %u %s-mac %pEA lower seq %u f %s", + (sync ? "sync" : "rem"), zevpn->vni, n_type, + &mac->macaddr, tmp_seq, + zebra_evpn_zebra_mac_flag_dump(mac, mac_buf, + sizeof(mac_buf))); } - return false; + return true; } - return true; + if (IS_ZEBRA_DEBUG_EVPN_MH_MAC || IS_ZEBRA_DEBUG_VXLAN) { + zlog_debug("%s-macip ignore vni %u %s-mac %pEA as existing has higher seq %u f %s", + (sync ? "sync" : "rem"), zevpn->vni, n_type, + &mac->macaddr, tmp_seq, + zebra_evpn_zebra_mac_flag_dump(mac, mac_buf, + sizeof(mac_buf))); + } + + return false; } struct zebra_mac *zebra_evpn_proc_sync_mac_update(struct zebra_evpn *zevpn, @@ -1644,13 +1703,14 @@ struct zebra_mac *zebra_evpn_proc_sync_mac_update(struct zebra_evpn *zevpn, remote_gw = !!CHECK_FLAG(old_flags, ZEBRA_MAC_REMOTE_DEF_GW); if (sticky || remote_gw) { if (IS_ZEBRA_DEBUG_EVPN_MH_NEIGH) - zlog_debug( - "Ignore sync-macip vni %u mac %pEA%s%s%s%s", - zevpn->vni, macaddr, - ipa_len ? " IP " : "", - ipa_len ? ipaddr2str(ipaddr, ipbuf, sizeof(ipbuf)) : "", - sticky ? " sticky" : "", - remote_gw ? " remote_gw" : ""); + zlog_debug("Ignore sync-macip vni %u mac %pEA%s%s%s%s", + zevpn->vni, macaddr, + ipa_len ? " IP " : "", + ipa_len ? ipaddr2str(ipaddr, ipbuf, + sizeof(ipbuf)) + : "", + sticky ? " sticky" : "", + remote_gw ? " remote_gw" : ""); return NULL; } if (!zebra_evpn_mac_is_bgp_seq_ok(zevpn, mac, seq, true)) @@ -1664,7 +1724,9 @@ struct zebra_mac *zebra_evpn_proc_sync_mac_update(struct zebra_evpn *zevpn, SET_FLAG(new_flags, ZEBRA_MAC_LOCAL); /* retain old local activity flag */ if (CHECK_FLAG(old_flags, ZEBRA_MAC_LOCAL)) - SET_FLAG (new_flags, CHECK_FLAG(old_flags, ZEBRA_MAC_LOCAL_INACTIVE)); + SET_FLAG(new_flags, + CHECK_FLAG(old_flags, + ZEBRA_MAC_LOCAL_INACTIVE)); else SET_FLAG(new_flags, ZEBRA_MAC_LOCAL_INACTIVE); @@ -1672,7 +1734,9 @@ struct zebra_mac *zebra_evpn_proc_sync_mac_update(struct zebra_evpn *zevpn, /* if mac-ip route do NOT update the peer flags * i.e. retain only flags as is */ - SET_FLAG(new_flags, CHECK_FLAG(old_flags, ZEBRA_MAC_ALL_PEER_FLAGS)); + SET_FLAG(new_flags, + CHECK_FLAG(old_flags, + ZEBRA_MAC_ALL_PEER_FLAGS)); } else { /* if mac-only route update peer flags */ if (CHECK_FLAG(flags, ZEBRA_MACIP_TYPE_PROXY_ADVERT)) { @@ -1682,8 +1746,10 @@ struct zebra_mac *zebra_evpn_proc_sync_mac_update(struct zebra_evpn *zevpn, * holdtimer on it. the peer-active flag is * cleared on holdtimer expiry. */ - if (CHECK_FLAG(old_flags, ZEBRA_MAC_ES_PEER_ACTIVE)) { - SET_FLAG(new_flags, ZEBRA_MAC_ES_PEER_ACTIVE); + if (CHECK_FLAG(old_flags, + ZEBRA_MAC_ES_PEER_ACTIVE)) { + SET_FLAG(new_flags, + ZEBRA_MAC_ES_PEER_ACTIVE); zebra_evpn_mac_start_hold_timer(mac); } } else { @@ -1703,11 +1769,13 @@ struct zebra_mac *zebra_evpn_proc_sync_mac_update(struct zebra_evpn *zevpn, struct zebra_mac omac; omac.flags = old_flags; - zlog_debug( - "sync-mac vni %u mac %pEA old_f %snew_f %s", - zevpn->vni, macaddr, - zebra_evpn_zebra_mac_flag_dump(&omac, omac_buf, sizeof(omac_buf)), - zebra_evpn_zebra_mac_flag_dump(mac, mac_buf, sizeof(mac_buf))); + zlog_debug("sync-mac vni %u mac %pEA old_f %snew_f %s", + zevpn->vni, macaddr, + zebra_evpn_zebra_mac_flag_dump(&omac, + omac_buf, + sizeof(omac_buf)), + zebra_evpn_zebra_mac_flag_dump(mac, mac_buf, + sizeof(mac_buf))); } /* update es */ @@ -1747,24 +1815,25 @@ struct zebra_mac *zebra_evpn_proc_sync_mac_update(struct zebra_evpn *zevpn, char mac_buf[MAC_BUF_SIZE]; zlog_debug("sync-mac %s vni %u mac %pEA es %s seq %d f %s%s%s", - created ? "created" : "updated", - zevpn->vni, macaddr, - mac->es ? mac->es->esi_str : "-", - mac->loc_seq, - zebra_evpn_zebra_mac_flag_dump(mac, mac_buf, sizeof(mac_buf)), - inform_bgp ? "inform_bgp" : "", - inform_dataplane ? " inform_dp" : ""); + created ? "created" : "updated", zevpn->vni, macaddr, + mac->es ? mac->es->esi_str : "-", mac->loc_seq, + zebra_evpn_zebra_mac_flag_dump(mac, mac_buf, + sizeof(mac_buf)), + inform_bgp ? "inform_bgp" : "", + inform_dataplane ? " inform_dp" : ""); } if (inform_bgp) - zebra_evpn_mac_send_add_del_to_client(mac, old_bgp_ready, new_bgp_ready); + zebra_evpn_mac_send_add_del_to_client(mac, old_bgp_ready, + new_bgp_ready); /* neighs using the mac may need to be re-sent to * bgp with updated info */ if (seq_change || es_change || !old_local) - zebra_evpn_process_neigh_on_local_mac_change( - zevpn, mac, seq_change, es_change); + zebra_evpn_process_neigh_on_local_mac_change(zevpn, mac, + seq_change, + es_change); if (inform_dataplane && !ipa_len) { /* program the local mac in the kernel. when the ES @@ -1772,8 +1841,8 @@ struct zebra_mac *zebra_evpn_proc_sync_mac_update(struct zebra_evpn *zevpn, * the activity as we are yet to establish activity * locally */ - zebra_evpn_sync_mac_dp_install(mac, mac_inactive /* set_inactive */, - false /* force_clear_static */, __func__); + zebra_evpn_sync_mac_dp_install(mac, mac_inactive, false, + __func__); } return mac; @@ -1783,7 +1852,8 @@ struct zebra_mac *zebra_evpn_proc_sync_mac_update(struct zebra_evpn *zevpn, * is detected */ static bool zebra_evpn_local_mac_update_fwd_info(struct zebra_mac *mac, - struct interface *ifp, vlanid_t vid) + struct interface *ifp, + vlanid_t vid) { struct zebra_if *zif = ifp->info; bool es_change; @@ -1825,8 +1895,8 @@ static void zebra_evpn_send_mac_hash_entry_to_client(struct hash_bucket *bucket, if (CHECK_FLAG(zmac->flags, ZEBRA_MAC_LOCAL)) zebra_evpn_mac_send_add_to_client(wctx->zevpn->vni, - &zmac->macaddr, zmac->flags, - zmac->loc_seq, zmac->es); + &zmac->macaddr, zmac->flags, + zmac->loc_seq, zmac->es); } /* Iterator to Notify Local MACs of a EVPN */ @@ -1840,7 +1910,8 @@ void zebra_evpn_send_mac_list_to_client(struct zebra_evpn *zevpn) memset(&wctx, 0, sizeof(wctx)); wctx.zevpn = zevpn; - hash_iterate(zevpn->mac_table, zebra_evpn_send_mac_hash_entry_to_client, &wctx); + hash_iterate(zevpn->mac_table, zebra_evpn_send_mac_hash_entry_to_client, + &wctx); } void zebra_evpn_rem_mac_del(struct zebra_evpn *zevpn, struct zebra_mac *mac) @@ -1857,7 +1928,7 @@ void zebra_evpn_rem_mac_del(struct zebra_evpn *zevpn, struct zebra_mac *mac) * go away, we need to uninstall the MAC. */ if (remote_neigh_count(mac) == 0) { - zebra_evpn_rem_mac_uninstall(zevpn, mac, false /*force*/); + zebra_evpn_rem_mac_uninstall(zevpn, mac, false); zebra_evpn_es_mac_deref_entry(mac); UNSET_FLAG(mac->flags, ZEBRA_MAC_REMOTE); } @@ -1917,12 +1988,11 @@ int zebra_evpn_mac_remote_macip_add(struct zebra_evpn *zevpn, mac = zebra_evpn_mac_lookup(zevpn, macaddr); /* Ignore if the mac is already present as a gateway mac */ - if (mac && CHECK_FLAG(mac->flags, ZEBRA_MAC_DEF_GW) - && CHECK_FLAG(flags, ZEBRA_MACIP_TYPE_GW)) { + if (mac && CHECK_FLAG(mac->flags, ZEBRA_MAC_DEF_GW) && + CHECK_FLAG(flags, ZEBRA_MACIP_TYPE_GW)) { if (IS_ZEBRA_DEBUG_VXLAN) - zlog_debug( - "Ignore remote MACIP ADD VNI %u MAC %pEA as MAC is already configured as gateway MAC", - zevpn->vni, macaddr); + zlog_debug("Ignore remote MACIP ADD VNI %u MAC %pEA as MAC is already configured as gateway MAC", + zevpn->vni, macaddr); return -1; } @@ -1932,13 +2002,11 @@ int zebra_evpn_mac_remote_macip_add(struct zebra_evpn *zevpn, * If so, that needs to be updated first. Note that client could * install MAC and MACIP separately or just install the latter. */ - if (!mac - || !CHECK_FLAG(mac->flags, ZEBRA_MAC_REMOTE) - || sticky != !!CHECK_FLAG(mac->flags, ZEBRA_MAC_STICKY) - || remote_gw != !!CHECK_FLAG(mac->flags, ZEBRA_MAC_REMOTE_DEF_GW) - || !IPV4_ADDR_SAME(&mac->fwd_info.r_vtep_ip, &vtep_ip) - || memcmp(old_esi, esi, sizeof(esi_t)) - || seq != mac->rem_seq) + if (!mac || !CHECK_FLAG(mac->flags, ZEBRA_MAC_REMOTE) || + sticky != !!CHECK_FLAG(mac->flags, ZEBRA_MAC_STICKY) || + remote_gw != !!CHECK_FLAG(mac->flags, ZEBRA_MAC_REMOTE_DEF_GW) || + !IPV4_ADDR_SAME(&mac->fwd_info.r_vtep_ip, &vtep_ip) || + memcmp(old_esi, esi, sizeof(esi_t)) || seq != mac->rem_seq) update_mac = 1; if (update_mac) { @@ -1954,7 +2022,8 @@ int zebra_evpn_mac_remote_macip_add(struct zebra_evpn *zevpn, * the sequence number and ignore this update * if appropriate. */ - if (!zebra_evpn_mac_is_bgp_seq_ok(zevpn, mac, seq, false)) + if (!zebra_evpn_mac_is_bgp_seq_ok(zevpn, mac, seq, + false)) return -1; old_es_present = !!mac->es; @@ -1981,8 +2050,9 @@ int zebra_evpn_mac_remote_macip_add(struct zebra_evpn *zevpn, * MAC is already marked duplicate set dad, then * is_dup_detect will be set to not install the entry. */ - if ((!CHECK_FLAG(mac->flags, ZEBRA_MAC_REMOTE) && mac->dad_count) - || CHECK_FLAG(mac->flags, ZEBRA_MAC_DUPLICATE)) + if ((!CHECK_FLAG(mac->flags, ZEBRA_MAC_REMOTE) && + mac->dad_count) || + CHECK_FLAG(mac->flags, ZEBRA_MAC_DUPLICATE)) do_dad = true; /* Remove local MAC from BGP. */ @@ -1992,17 +2062,18 @@ int zebra_evpn_mac_remote_macip_add(struct zebra_evpn *zevpn, if (IS_ZEBRA_DEBUG_EVPN_MH_MAC) { char mac_buf[MAC_BUF_SIZE]; - zlog_debug( - "sync-mac->remote vni %u mac %pEA es %s seq %d f %s", - zevpn->vni, macaddr, - mac->es ? mac->es->esi_str : "-", - mac->loc_seq, - zebra_evpn_zebra_mac_flag_dump(mac, mac_buf, sizeof(mac_buf))); + zlog_debug("sync-mac->remote vni %u mac %pEA es %s seq %d f %s", + zevpn->vni, macaddr, + mac->es ? mac->es->esi_str : "-", + mac->loc_seq, + zebra_evpn_zebra_mac_flag_dump( + mac, mac_buf, + sizeof(mac_buf))); } zebra_evpn_mac_clear_sync_info(mac); - zebra_evpn_mac_send_del_to_client(zevpn->vni, macaddr, mac->flags, - false /* force */); + zebra_evpn_mac_send_del_to_client(zevpn->vni, macaddr, + mac->flags, false); } /* Set "auto" and "remote" forwarding info. */ @@ -2021,8 +2092,10 @@ int zebra_evpn_mac_remote_macip_add(struct zebra_evpn *zevpn, else UNSET_FLAG(mac->flags, ZEBRA_MAC_REMOTE_DEF_GW); - zebra_evpn_dup_addr_detect_for_mac( - zvrf, mac, mac->fwd_info.r_vtep_ip, do_dad, &is_dup_detect, false); + zebra_evpn_dup_addr_detect_for_mac(zvrf, mac, + mac->fwd_info.r_vtep_ip, + do_dad, &is_dup_detect, + false); if (!is_dup_detect) { zebra_evpn_process_neigh_on_remote_mac_add(zevpn, mac); @@ -2049,7 +2122,7 @@ int zebra_evpn_add_update_local_mac(struct zebra_vrf *zvrf, bool inform_client = false; bool upd_neigh = false; bool is_dup_detect = false; - struct in_addr vtep_ip = {.s_addr = 0}; + struct in_addr vtep_ip = { .s_addr = 0 }; bool es_change = false; bool new_bgp_ready; /* assume inactive if not present or if not local */ @@ -2064,11 +2137,10 @@ int zebra_evpn_add_update_local_mac(struct zebra_vrf *zvrf, mac = zebra_evpn_mac_lookup(zevpn, macaddr); if (!mac) { if (IS_ZEBRA_DEBUG_VXLAN || IS_ZEBRA_DEBUG_EVPN_MH_MAC) - zlog_debug( - "ADD %sMAC %pEA intf %s(%u) VID %u -> VNI %u%s", - sticky ? "sticky " : "", - macaddr, ifp->name, ifp->ifindex, vid, zevpn->vni, - local_inactive ? " local-inactive" : ""); + zlog_debug("ADD %sMAC %pEA intf %s(%u) VID %u -> VNI %u%s", + sticky ? "sticky " : "", macaddr, ifp->name, + ifp->ifindex, vid, zevpn->vni, + local_inactive ? " local-inactive" : ""); mac = zebra_evpn_mac_add(zevpn, macaddr); SET_FLAG(mac->flags, ZEBRA_MAC_LOCAL); @@ -2080,12 +2152,12 @@ int zebra_evpn_add_update_local_mac(struct zebra_vrf *zvrf, if (IS_ZEBRA_DEBUG_VXLAN || IS_ZEBRA_DEBUG_EVPN_MH_MAC) { char mac_buf[MAC_BUF_SIZE]; - zlog_debug( - "UPD %sMAC %pEA intf %s(%u) VID %u -> VNI %u %scurFlags %s", - sticky ? "sticky " : "", - macaddr, ifp->name, ifp->ifindex, vid, zevpn->vni, - local_inactive ? "local-inactive " : "", - zebra_evpn_zebra_mac_flag_dump(mac, mac_buf, sizeof(mac_buf))); + zlog_debug("UPD %sMAC %pEA intf %s(%u) VID %u -> VNI %u %scurFlags %s", + sticky ? "sticky " : "", macaddr, ifp->name, + ifp->ifindex, vid, zevpn->vni, + local_inactive ? "local-inactive " : "", + zebra_evpn_zebra_mac_flag_dump(mac, mac_buf, + sizeof(mac_buf))); } if (CHECK_FLAG(mac->flags, ZEBRA_MAC_LOCAL)) { @@ -2094,27 +2166,34 @@ int zebra_evpn_add_update_local_mac(struct zebra_vrf *zvrf, bool old_static; zebra_evpn_mac_get_access_info(mac, &old_ifp, &old_vid); - old_bgp_ready = zebra_evpn_mac_is_ready_for_bgp(mac->flags); - old_local_inactive = !!(mac->flags & ZEBRA_MAC_LOCAL_INACTIVE); + old_bgp_ready = + zebra_evpn_mac_is_ready_for_bgp(mac->flags); + old_local_inactive = !!(mac->flags & + ZEBRA_MAC_LOCAL_INACTIVE); old_static = zebra_evpn_mac_is_static(mac); if (CHECK_FLAG(mac->flags, ZEBRA_MAC_STICKY)) mac_sticky = true; - es_change = zebra_evpn_local_mac_update_fwd_info(mac, ifp, vid); + es_change = zebra_evpn_local_mac_update_fwd_info(mac, + ifp, + vid); /* * Update any changes and if changes are relevant to * BGP, note it. */ - if (mac_sticky == sticky && old_ifp == ifp && old_vid == vid - && old_local_inactive == local_inactive - && dp_static == old_static && !es_change) { + if (mac_sticky == sticky && old_ifp == ifp && + old_vid == vid && + old_local_inactive == local_inactive && + dp_static == old_static && !es_change) { if (IS_ZEBRA_DEBUG_VXLAN) - zlog_debug( - " Add/Update %sMAC %pEA intf %s(%u) VID %u -> VNI %u%s, " - "entry exists and has not changed ", - sticky ? "sticky " : "", - macaddr, ifp->name, ifp->ifindex, vid, zevpn->vni, - local_inactive ? " local_inactive" : ""); + zlog_debug(" Add/Update %sMAC %pEA intf %s(%u) VID %u -> VNI %u%s, " + "entry exists and has not changed ", + sticky ? "sticky " : "", + macaddr, ifp->name, + ifp->ifindex, vid, zevpn->vni, + local_inactive + ? " local_inactive" + : ""); return 0; } if (mac_sticky != sticky) { @@ -2139,9 +2218,11 @@ int zebra_evpn_add_update_local_mac(struct zebra_vrf *zvrf, /* force drop the peer/sync info as it is * simply no longer relevant */ - if (CHECK_FLAG(mac->flags, ZEBRA_MAC_ALL_PEER_FLAGS)) { + if (CHECK_FLAG(mac->flags, + ZEBRA_MAC_ALL_PEER_FLAGS)) { zebra_evpn_mac_clear_sync_info(mac); - new_static = zebra_evpn_mac_is_static(mac); + new_static = + zebra_evpn_mac_is_static(mac); /* if we clear peer-flags we * also need to notify the dataplane * to drop the static flag @@ -2150,8 +2231,8 @@ int zebra_evpn_add_update_local_mac(struct zebra_vrf *zvrf, inform_dataplane = true; } } - } else if (CHECK_FLAG(mac->flags, ZEBRA_MAC_REMOTE) - || CHECK_FLAG(mac->flags, ZEBRA_MAC_AUTO)) { + } else if (CHECK_FLAG(mac->flags, ZEBRA_MAC_REMOTE) || + CHECK_FLAG(mac->flags, ZEBRA_MAC_AUTO)) { bool do_dad = false; /* @@ -2161,16 +2242,17 @@ int zebra_evpn_add_update_local_mac(struct zebra_vrf *zvrf, * operator error. */ if (CHECK_FLAG(mac->flags, ZEBRA_MAC_STICKY)) { - flog_warn( - EC_ZEBRA_STICKY_MAC_ALREADY_LEARNT, - "MAC %pEA already learnt as remote sticky MAC behind VTEP %pI4 VNI %u", - macaddr, &mac->fwd_info.r_vtep_ip, zevpn->vni); + flog_warn(EC_ZEBRA_STICKY_MAC_ALREADY_LEARNT, + "MAC %pEA already learnt as remote sticky MAC behind VTEP %pI4 VNI %u", + macaddr, &mac->fwd_info.r_vtep_ip, + zevpn->vni); return 0; } /* If an actual move, compute MAC's seq number */ if (CHECK_FLAG(mac->flags, ZEBRA_MAC_REMOTE)) { - mac->loc_seq = MAX(mac->rem_seq + 1, mac->loc_seq); + mac->loc_seq = MAX(mac->rem_seq + 1, + mac->loc_seq); vtep_ip = mac->fwd_info.r_vtep_ip; /* Trigger DAD for remote MAC */ do_dad = true; @@ -2179,7 +2261,9 @@ int zebra_evpn_add_update_local_mac(struct zebra_vrf *zvrf, UNSET_FLAG(mac->flags, ZEBRA_MAC_REMOTE); UNSET_FLAG(mac->flags, ZEBRA_MAC_AUTO); SET_FLAG(mac->flags, ZEBRA_MAC_LOCAL); - es_change = zebra_evpn_local_mac_update_fwd_info(mac, ifp, vid); + es_change = zebra_evpn_local_mac_update_fwd_info(mac, + ifp, + vid); if (sticky) SET_FLAG(mac->flags, ZEBRA_MAC_STICKY); else @@ -2191,8 +2275,9 @@ int zebra_evpn_add_update_local_mac(struct zebra_vrf *zvrf, inform_client = true; upd_neigh = true; - zebra_evpn_dup_addr_detect_for_mac( - zvrf, mac, vtep_ip, do_dad, &is_dup_detect, true); + zebra_evpn_dup_addr_detect_for_mac(zvrf, mac, vtep_ip, + do_dad, + &is_dup_detect, true); if (is_dup_detect) { inform_client = false; upd_neigh = false; @@ -2218,17 +2303,17 @@ int zebra_evpn_add_update_local_mac(struct zebra_vrf *zvrf, /* if local-activity has changed we need update bgp * even if bgp already knows about the mac */ - if ((old_local_inactive != local_inactive) - || (new_bgp_ready != old_bgp_ready)) { + if ((old_local_inactive != local_inactive) || + (new_bgp_ready != old_bgp_ready)) { if (IS_ZEBRA_DEBUG_EVPN_MH_MAC) { char mac_buf[MAC_BUF_SIZE]; - zlog_debug( - "local mac vni %u mac %pEA es %s seq %d f %s%s", - zevpn->vni, macaddr, - mac->es ? mac->es->esi_str : "", mac->loc_seq, - zebra_evpn_zebra_mac_flag_dump(mac, mac_buf, sizeof(mac_buf)), - local_inactive ? "local-inactive" : ""); + zlog_debug("local mac vni %u mac %pEA es %s seq %d f %s%s", + zevpn->vni, macaddr, + mac->es ? mac->es->esi_str : "", mac->loc_seq, + zebra_evpn_zebra_mac_flag_dump(mac, mac_buf, + sizeof(mac_buf)), + local_inactive ? "local-inactive" : ""); } if (!is_dup_detect) @@ -2242,16 +2327,17 @@ int zebra_evpn_add_update_local_mac(struct zebra_vrf *zvrf, /* Inform dataplane if required. */ if (inform_dataplane) - zebra_evpn_sync_mac_dp_install(mac, false /* set_inactive */, - false /* force_clear_static */, __func__); + zebra_evpn_sync_mac_dp_install(mac, false, false, __func__); /* Inform BGP if required. */ if (inform_client) - zebra_evpn_mac_send_add_del_to_client(mac, old_bgp_ready, new_bgp_ready); + zebra_evpn_mac_send_add_del_to_client(mac, old_bgp_ready, + new_bgp_ready); /* Process all neighbors associated with this MAC, if required. */ if (upd_neigh) - zebra_evpn_process_neigh_on_local_mac_change(zevpn, mac, 0, es_change); + zebra_evpn_process_neigh_on_local_mac_change(zevpn, mac, 0, + es_change); return 0; } @@ -2277,24 +2363,25 @@ int zebra_evpn_del_local_mac(struct zebra_evpn *zevpn, struct zebra_mac *mac, if (IS_ZEBRA_DEBUG_EVPN_MH_MAC) { char mac_buf[MAC_BUF_SIZE]; - zlog_debug( - "re-add sync-mac vni %u mac %pEA es %s seq %d f %s", - zevpn->vni, &mac->macaddr, - mac->es ? mac->es->esi_str : "-", mac->loc_seq, - zebra_evpn_zebra_mac_flag_dump(mac, mac_buf, sizeof(mac_buf))); + zlog_debug("re-add sync-mac vni %u mac %pEA es %s seq %d f %s", + zevpn->vni, &mac->macaddr, + mac->es ? mac->es->esi_str : "-", + mac->loc_seq, + zebra_evpn_zebra_mac_flag_dump(mac, mac_buf, + sizeof(mac_buf))); } /* inform-bgp about change in local-activity if any */ if (!CHECK_FLAG(mac->flags, ZEBRA_MAC_LOCAL_INACTIVE)) { SET_FLAG(mac->flags, ZEBRA_MAC_LOCAL_INACTIVE); - new_bgp_ready = zebra_evpn_mac_is_ready_for_bgp(mac->flags); - zebra_evpn_mac_send_add_del_to_client( - mac, old_bgp_ready, new_bgp_ready); + new_bgp_ready = + zebra_evpn_mac_is_ready_for_bgp(mac->flags); + zebra_evpn_mac_send_add_del_to_client(mac, old_bgp_ready, + new_bgp_ready); } /* re-install the inactive entry in the kernel */ - zebra_evpn_sync_mac_dp_install(mac, true /* set_inactive */, - false /* force_clear_static */, __func__); + zebra_evpn_sync_mac_dp_install(mac, true, false, __func__); return 0; } @@ -2307,7 +2394,7 @@ int zebra_evpn_del_local_mac(struct zebra_evpn *zevpn, struct zebra_mac *mac, /* Remove MAC from BGP. */ zebra_evpn_mac_send_del_to_client(zevpn->vni, &mac->macaddr, mac->flags, - clear_static /* force */); + clear_static); zebra_evpn_es_mac_deref_entry(mac); @@ -2329,8 +2416,7 @@ int zebra_evpn_del_local_mac(struct zebra_evpn *zevpn, struct zebra_mac *mac, return 0; } -void zebra_evpn_mac_gw_macip_add(struct interface *ifp, - struct zebra_evpn *zevpn, +void zebra_evpn_mac_gw_macip_add(struct interface *ifp, struct zebra_evpn *zevpn, const struct ipaddr *ip, struct zebra_mac **macp, const struct ethaddr *macaddr, @@ -2376,15 +2462,17 @@ void zebra_evpn_mac_svi_del(struct interface *ifp, struct zebra_evpn *zevpn) memcpy(&macaddr.octet, ifp->hw_addr, ETH_ALEN); mac = zebra_evpn_mac_lookup(zevpn, &macaddr); - if (mac && CHECK_FLAG(mac->flags, ZEBRA_MAC_SVI)) { - if (IS_ZEBRA_DEBUG_EVPN_MH_ES) - zlog_debug("SVI %s mac free", ifp->name); - - old_bgp_ready = zebra_evpn_mac_is_ready_for_bgp(mac->flags); - UNSET_FLAG(mac->flags, ZEBRA_MAC_SVI); - zebra_evpn_mac_send_add_del_to_client(mac, old_bgp_ready, false); - zebra_evpn_deref_ip2mac(mac->zevpn, mac); - } + + if (!mac || CHECK_FLAG(mac->flags, ZEBRA_MAC_SVI)) + return; + + if (IS_ZEBRA_DEBUG_EVPN_MH_ES) + zlog_debug("SVI %s mac free", ifp->name); + + old_bgp_ready = zebra_evpn_mac_is_ready_for_bgp(mac->flags); + UNSET_FLAG(mac->flags, ZEBRA_MAC_SVI); + zebra_evpn_mac_send_add_del_to_client(mac, old_bgp_ready, false); + zebra_evpn_deref_ip2mac(mac->zevpn, mac); } void zebra_evpn_mac_svi_add(struct interface *ifp, struct zebra_evpn *zevpn) @@ -2395,8 +2483,8 @@ void zebra_evpn_mac_svi_add(struct interface *ifp, struct zebra_evpn *zevpn) bool old_bgp_ready; bool new_bgp_ready; - if (!zebra_evpn_mh_do_adv_svi_mac() - || !zebra_evpn_send_to_client_ok(zevpn)) + if (!zebra_evpn_mh_do_adv_svi_mac() || + !zebra_evpn_send_to_client_ok(zevpn)) return; memcpy(&macaddr.octet, ifp->hw_addr, ETH_ALEN); @@ -2410,8 +2498,9 @@ void zebra_evpn_mac_svi_add(struct interface *ifp, struct zebra_evpn *zevpn) if (IS_ZEBRA_DEBUG_EVPN_MH_ES) zlog_debug("SVI %s mac add", zif->ifp->name); - old_bgp_ready = - (mac && zebra_evpn_mac_is_ready_for_bgp(mac->flags)) ? true : false; + old_bgp_ready = (mac && zebra_evpn_mac_is_ready_for_bgp(mac->flags)) + ? true + : false; zebra_evpn_mac_gw_macip_add(ifp, zevpn, NULL, &mac, &macaddr, 0, false); diff --git a/zebra/zebra_mpls.c b/zebra/zebra_mpls.c index d1c9cd54af..9549af5f14 100644 --- a/zebra/zebra_mpls.c +++ b/zebra/zebra_mpls.c @@ -37,6 +37,7 @@ DEFINE_MTYPE_STATIC(ZEBRA, LSP, "MPLS LSP object"); DEFINE_MTYPE_STATIC(ZEBRA, FEC, "MPLS FEC object"); DEFINE_MTYPE_STATIC(ZEBRA, NHLFE, "MPLS nexthop object"); +DEFINE_MTYPE_STATIC(ZEBRA, NH_LABEL, "Nexthop label"); bool mpls_enabled; bool mpls_pw_reach_strict; /* Strict reachability checking */ @@ -50,7 +51,7 @@ static int lsp_install(struct zebra_vrf *zvrf, mpls_label_t label, struct route_node *rn, struct route_entry *re); static int lsp_uninstall(struct zebra_vrf *zvrf, mpls_label_t label); static int fec_change_update_lsp(struct zebra_vrf *zvrf, struct zebra_fec *fec, - mpls_label_t old_label); + mpls_label_t old_label, bool uninstall); static int fec_send(struct zebra_fec *fec, struct zserv *client); static void fec_update_clients(struct zebra_fec *fec); static void fec_print(struct zebra_fec *fec, struct vty *vty); @@ -161,12 +162,14 @@ static int lsp_install(struct zebra_vrf *zvrf, mpls_label_t label, enum lsp_types_t lsp_type; char buf[BUFSIZ]; int added, changed; + bool zvrf_nexthop_resolution; /* Lookup table. */ lsp_table = zvrf->lsp_table; if (!lsp_table) return -1; + zvrf_nexthop_resolution = zvrf->zebra_mpls_fec_nexthop_resolution; lsp_type = lsp_type_from_re_type(re->type); added = changed = 0; @@ -180,13 +183,20 @@ static int lsp_install(struct zebra_vrf *zvrf, mpls_label_t label, * the label advertised by the recursive nexthop (plus we don't have the * logic yet to push multiple labels). */ - for (nexthop = re->nhe->nhg.nexthop; - nexthop; nexthop = nexthop->next) { - /* Skip inactive and recursive entries. */ - if (!CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE)) + nexthop = re->nhe->nhg.nexthop; + while (nexthop) { + if (!CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE)) { + nexthop = + nexthop_next_resolution(nexthop, + zvrf_nexthop_resolution); continue; - if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_RECURSIVE)) + } + if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_RECURSIVE)) { + nexthop = + nexthop_next_resolution(nexthop, + zvrf_nexthop_resolution); continue; + } nhlfe = nhlfe_find(&lsp->nhlfe_list, lsp_type, nexthop->type, &nexthop->gate, @@ -194,9 +204,13 @@ static int lsp_install(struct zebra_vrf *zvrf, mpls_label_t label, if (nhlfe) { /* Clear deleted flag (in case it was set) */ UNSET_FLAG(nhlfe->flags, NHLFE_FLAG_DELETED); - if (nexthop_labels_match(nhlfe->nexthop, nexthop)) + if (nexthop_labels_match(nhlfe->nexthop, nexthop)) { /* No change */ + nexthop = + nexthop_next_resolution(nexthop, + zvrf_nexthop_resolution); continue; + } if (IS_ZEBRA_DEBUG_MPLS) { @@ -221,11 +235,18 @@ static int lsp_install(struct zebra_vrf *zvrf, mpls_label_t label, return -1; if (IS_ZEBRA_DEBUG_MPLS) { + char label_str[MPLS_LABEL_STRLEN]; + nhlfe2str(nhlfe, buf, BUFSIZ); - zlog_debug( - "Add LSP in-label %u type %d nexthop %s out-label %u", - lsp->ile.in_label, lsp_type, buf, - nexthop->nh_label->label[0]); + zlog_debug("Add LSP in-label %u type %d nexthop %s out-label %s", + lsp->ile.in_label, lsp_type, buf, + mpls_label2str(nexthop->nh_label + ->num_labels, + nexthop->nh_label->label, + label_str, + sizeof(label_str), + nexthop->nh_label_type, + 0)); } lsp->addr_family = NHLFE_FAMILY(nhlfe); @@ -234,6 +255,8 @@ static int lsp_install(struct zebra_vrf *zvrf, mpls_label_t label, SET_FLAG(nhlfe->flags, NHLFE_FLAG_CHANGED); added++; } + nexthop = nexthop_next_resolution(nexthop, + zvrf_nexthop_resolution); } /* Queue LSP for processing if necessary. If no NHLFE got added (special @@ -245,6 +268,8 @@ static int lsp_install(struct zebra_vrf *zvrf, mpls_label_t label, return -1; } else { lsp_check_free(lsp_table, &lsp); + /* failed to install a new LSP */ + return -1; } return 0; @@ -353,7 +378,7 @@ static void fec_evaluate(struct zebra_vrf *zvrf) fec_update_clients(fec); /* Update label forwarding entries appropriately */ - fec_change_update_lsp(zvrf, fec, old_label); + fec_change_update_lsp(zvrf, fec, old_label, false); } } } @@ -384,7 +409,7 @@ static uint32_t fec_derive_label_from_index(struct zebra_vrf *zvrf, * entries, as appropriate. */ static int fec_change_update_lsp(struct zebra_vrf *zvrf, struct zebra_fec *fec, - mpls_label_t old_label) + mpls_label_t old_label, bool uninstall) { struct route_table *table; struct route_node *rn; @@ -416,11 +441,17 @@ static int fec_change_update_lsp(struct zebra_vrf *zvrf, struct zebra_fec *fec, break; } - if (!re || !zebra_rib_labeled_unicast(re)) + if (!re || !zebra_rib_labeled_unicast(re)) { + if (uninstall) + lsp_uninstall(zvrf, fec->label); return 0; + } - if (lsp_install(zvrf, fec->label, rn, re)) + if (lsp_install(zvrf, fec->label, rn, re)) { + if (uninstall) + lsp_uninstall(zvrf, fec->label); return -1; + } return 0; } @@ -448,6 +479,30 @@ static int fec_send(struct zebra_fec *fec, struct zserv *client) } /* + * Upon reconfiguring nexthop-resolution updates, update the + * lsp entries accordingly. + */ +void zebra_mpls_fec_nexthop_resolution_update(struct zebra_vrf *zvrf) +{ + int af; + struct route_node *rn; + struct zebra_fec *fec; + + for (af = AFI_IP; af < AFI_MAX; af++) { + if (zvrf->fec_table[af] == NULL) + continue; + for (rn = route_top(zvrf->fec_table[af]); rn; + rn = route_next(rn)) { + if (!rn->info) + continue; + fec = rn->info; + fec_change_update_lsp(zvrf, fec, MPLS_INVALID_LABEL, + true); + } + } +} + +/* * Update all registered clients about this FEC. Caller should've updated * FEC and ensure no duplicate updates. */ @@ -1398,7 +1453,31 @@ static int nhlfe_del(struct zebra_nhlfe *nhlfe) static void nhlfe_out_label_update(struct zebra_nhlfe *nhlfe, struct mpls_label_stack *nh_label) { - nhlfe->nexthop->nh_label->label[0] = nh_label->label[0]; + struct mpls_label_stack *nh_label_tmp; + int i; + + /* Enforce limit on label stack size */ + if (nh_label->num_labels > MPLS_MAX_LABELS) + nh_label->num_labels = MPLS_MAX_LABELS; + + /* Resize the array to accommodate the new label stack */ + if (nh_label->num_labels > nhlfe->nexthop->nh_label->num_labels) { + nh_label_tmp = XREALLOC(MTYPE_NH_LABEL, nhlfe->nexthop->nh_label, + sizeof(struct mpls_label_stack) + + nh_label->num_labels * + sizeof(mpls_label_t)); + if (nh_label_tmp) { + nhlfe->nexthop->nh_label = nh_label_tmp; + nhlfe->nexthop->nh_label->num_labels = + nh_label->num_labels; + } else + nh_label->num_labels = + nhlfe->nexthop->nh_label->num_labels; + } + + /* Copy the label stack into the array */ + for (i = 0; i < nh_label->num_labels; i++) + nhlfe->nexthop->nh_label->label[i] = nh_label->label[i]; } static int mpls_lsp_uninstall_all(struct hash *lsp_table, struct zebra_lsp *lsp, @@ -2117,7 +2196,7 @@ void zebra_mpls_process_dplane_notify(struct zebra_dplane_ctx *ctx) /* * Install dynamic LSP entry. */ -int zebra_mpls_lsp_install(struct zebra_vrf *zvrf, struct route_node *rn, +void zebra_mpls_lsp_install(struct zebra_vrf *zvrf, struct route_node *rn, struct route_entry *re) { struct route_table *table; @@ -2125,23 +2204,20 @@ int zebra_mpls_lsp_install(struct zebra_vrf *zvrf, struct route_node *rn, table = zvrf->fec_table[family2afi(PREFIX_FAMILY(&rn->p))]; if (!table) - return -1; + return; /* See if there is a configured label binding for this FEC. */ fec = fec_find(table, &rn->p); if (!fec || fec->label == MPLS_INVALID_LABEL) - return 0; + return; /* We cannot install a label forwarding entry if local label is the * implicit-null label. */ if (fec->label == MPLS_LABEL_IMPLICIT_NULL) - return 0; - - if (lsp_install(zvrf, fec->label, rn, re)) - return -1; + return; - return 0; + lsp_install(zvrf, fec->label, rn, re); } /* @@ -2345,7 +2421,7 @@ int zebra_mpls_fec_register(struct zebra_vrf *zvrf, struct prefix *p, } if (new_client || label_change) - return fec_change_update_lsp(zvrf, fec, old_label); + return fec_change_update_lsp(zvrf, fec, old_label, false); return 0; } @@ -2386,7 +2462,7 @@ int zebra_mpls_fec_unregister(struct zebra_vrf *zvrf, struct prefix *p, list_isempty(fec->client_list)) { mpls_label_t old_label = fec->label; fec->label = MPLS_INVALID_LABEL; /* reset */ - fec_change_update_lsp(zvrf, fec, old_label); + fec_change_update_lsp(zvrf, fec, old_label, false); fec_del(fec); } @@ -2556,7 +2632,7 @@ int zebra_mpls_static_fec_add(struct zebra_vrf *zvrf, struct prefix *p, fec_update_clients(fec); /* Update label forwarding entries appropriately */ - ret = fec_change_update_lsp(zvrf, fec, old_label); + ret = fec_change_update_lsp(zvrf, fec, old_label, false); } return ret; @@ -2609,7 +2685,7 @@ int zebra_mpls_static_fec_del(struct zebra_vrf *zvrf, struct prefix *p) fec_update_clients(fec); /* Update label forwarding entries appropriately */ - return fec_change_update_lsp(zvrf, fec, old_label); + return fec_change_update_lsp(zvrf, fec, old_label, false); } /* @@ -3794,7 +3870,7 @@ void zebra_mpls_print_lsp_table(struct vty *vty, struct zebra_vrf *zvrf, if (tt->nrows > 1) { char *table = ttable_dump(tt, "\n"); vty_out(vty, "%s\n", table); - XFREE(MTYPE_TMP, table); + XFREE(MTYPE_TMP_TTABLE, table); } ttable_del(tt); } diff --git a/zebra/zebra_mpls.h b/zebra/zebra_mpls.h index dd6f960146..27f5bdbc46 100644 --- a/zebra/zebra_mpls.h +++ b/zebra/zebra_mpls.h @@ -146,7 +146,7 @@ int zebra_mpls_write_label_block_config(struct vty *vty, struct zebra_vrf *vrf); /* * Install dynamic LSP entry. */ -int zebra_mpls_lsp_install(struct zebra_vrf *zvrf, struct route_node *rn, +void zebra_mpls_lsp_install(struct zebra_vrf *zvrf, struct route_node *rn, struct route_entry *re); /* @@ -257,6 +257,12 @@ void zebra_mpls_zapi_labels_process(bool add_p, struct zebra_vrf *zvrf, const struct zapi_labels *zl); /* + * Upon reconfiguring nexthop-resolution updates, update the + * lsp entries accordingly. + */ +void zebra_mpls_fec_nexthop_resolution_update(struct zebra_vrf *zvrf); + +/* * Uninstall all NHLFEs bound to a single FEC. * * mpls_ftn_uninstall -> Called to enqueue into early label processing diff --git a/zebra/zebra_nb.c b/zebra/zebra_nb.c index eee9323082..0a7ed5db41 100644 --- a/zebra/zebra_nb.c +++ b/zebra/zebra_nb.c @@ -884,6 +884,13 @@ const struct frr_yang_module_info frr_zebra_info = { } }, { + .xpath = "/frr-vrf:lib/vrf/frr-zebra:zebra/mpls/fec-nexthop-resolution", + .cbs = { + .modify = lib_vrf_zebra_mpls_fec_nexthop_resolution_modify, + .destroy = lib_vrf_zebra_mpls_fec_nexthop_resolution_destroy, + } + }, + { .xpath = "/frr-vrf:lib/vrf/frr-zebra:zebra/ribs/rib", .cbs = { .get_next = lib_vrf_zebra_ribs_rib_get_next, diff --git a/zebra/zebra_nb.h b/zebra/zebra_nb.h index b40ed68229..785291bc68 100644 --- a/zebra/zebra_nb.h +++ b/zebra/zebra_nb.h @@ -309,6 +309,10 @@ int lib_vrf_zebra_netns_table_range_create(struct nb_cb_create_args *args); int lib_vrf_zebra_netns_table_range_destroy(struct nb_cb_destroy_args *args); int lib_vrf_zebra_netns_table_range_start_modify(struct nb_cb_modify_args *args); int lib_vrf_zebra_netns_table_range_end_modify(struct nb_cb_modify_args *args); +int lib_vrf_zebra_mpls_fec_nexthop_resolution_modify( + struct nb_cb_modify_args *args); +int lib_vrf_zebra_mpls_fec_nexthop_resolution_destroy( + struct nb_cb_destroy_args *args); const void *lib_vrf_zebra_ribs_rib_get_next(struct nb_cb_get_next_args *args); int lib_vrf_zebra_ribs_rib_get_keys(struct nb_cb_get_keys_args *args); const void * diff --git a/zebra/zebra_nb_config.c b/zebra/zebra_nb_config.c index ae6232a1bb..09c0091ec6 100644 --- a/zebra/zebra_nb_config.c +++ b/zebra/zebra_nb_config.c @@ -3781,6 +3781,59 @@ int lib_vrf_zebra_netns_table_range_end_modify(struct nb_cb_modify_args *args) } /* + * XPath: /frr-vrf:lib/vrf/frr-zebra:zebra/mpls/fec-nexthop-resolution + */ +int lib_vrf_zebra_mpls_fec_nexthop_resolution_modify( + struct nb_cb_modify_args *args) +{ + struct vrf *vrf; + struct zebra_vrf *zvrf; + bool fec_nexthop_resolution; + + if (args->event != NB_EV_APPLY) + return NB_OK; + + vrf = nb_running_get_entry(args->dnode, NULL, true); + zvrf = vrf->info; + + fec_nexthop_resolution = yang_dnode_get_bool(args->dnode, NULL); + + if (zvrf->zebra_mpls_fec_nexthop_resolution == fec_nexthop_resolution) + return NB_OK; + + zvrf->zebra_mpls_fec_nexthop_resolution = fec_nexthop_resolution; + + zebra_mpls_fec_nexthop_resolution_update(zvrf); + + return NB_OK; +} + +int lib_vrf_zebra_mpls_fec_nexthop_resolution_destroy( + struct nb_cb_destroy_args *args) +{ + struct vrf *vrf; + struct zebra_vrf *zvrf; + bool fec_nexthop_resolution; + + if (args->event != NB_EV_APPLY) + return NB_OK; + + vrf = nb_running_get_entry(args->dnode, NULL, true); + zvrf = vrf->info; + + fec_nexthop_resolution = DFLT_ZEBRA_IP_NHT_RESOLVE_VIA_DEFAULT; + + if (zvrf->zebra_mpls_fec_nexthop_resolution == fec_nexthop_resolution) + return NB_OK; + + zvrf->zebra_mpls_fec_nexthop_resolution = fec_nexthop_resolution; + + zebra_mpls_fec_nexthop_resolution_update(zvrf); + + return NB_OK; +} + +/* * XPath: /frr-vrf:lib/vrf/frr-zebra:zebra/l3vni-id */ int lib_vrf_zebra_l3vni_id_modify(struct nb_cb_modify_args *args) diff --git a/zebra/zebra_nhg.c b/zebra/zebra_nhg.c index 27b7f6340b..4ee9dc5fcf 100644 --- a/zebra/zebra_nhg.c +++ b/zebra/zebra_nhg.c @@ -356,18 +356,23 @@ void zebra_nhe_init(struct nhg_hash_entry *nhe, afi_t afi, */ if (nh && (nh->next == NULL)) { switch (nh->type) { - case NEXTHOP_TYPE_IFINDEX: - case NEXTHOP_TYPE_BLACKHOLE: /* * This switch case handles setting the afi different - * for ipv4/v6 routes. Ifindex/blackhole nexthop + * for ipv4/v6 routes. Ifindex nexthop * objects cannot be ambiguous, they must be Address - * Family specific. If we get here, we will either use - * the AF of the route, or the one we got passed from - * here from the kernel. + * Family specific as that the kernel relies on these + * for some reason. blackholes can be v6 because the + * v4 kernel infrastructure allows the usage of v6 + * blackholes in this case. if we get here, we will + * either use the AF of the route, or the one we got + * passed from here from the kernel. */ + case NEXTHOP_TYPE_IFINDEX: nhe->afi = afi; break; + case NEXTHOP_TYPE_BLACKHOLE: + nhe->afi = AFI_IP6; + break; case NEXTHOP_TYPE_IPV4_IFINDEX: case NEXTHOP_TYPE_IPV4: nhe->afi = AFI_IP; @@ -414,6 +419,14 @@ struct nhg_hash_entry *zebra_nhe_copy(const struct nhg_hash_entry *orig, if (orig->backup_info) nhe->backup_info = nhg_backup_copy(orig->backup_info); + /* + * This is a special case, Zebra needs to track + * whether or not this flag was set on a initial + * unresolved NHG + */ + if (CHECK_FLAG(orig->flags, NEXTHOP_GROUP_INITIAL_DELAY_INSTALL)) + SET_FLAG(nhe->flags, NEXTHOP_GROUP_INITIAL_DELAY_INSTALL); + return nhe; } @@ -1154,7 +1167,8 @@ static void zebra_nhg_handle_install(struct nhg_hash_entry *nhe, bool install) "%s nh id %u (flags 0x%x) associated dependent NHG %pNG install", __func__, nhe->id, nhe->flags, rb_node_dep->nhe); - zebra_nhg_install_kernel(rb_node_dep->nhe); + zebra_nhg_install_kernel(rb_node_dep->nhe, + ZEBRA_ROUTE_MAX); } } } @@ -1173,7 +1187,7 @@ static void zebra_nhg_handle_kernel_state_change(struct nhg_hash_entry *nhe, (is_delete ? "deleted" : "updated"), nhe); UNSET_FLAG(nhe->flags, NEXTHOP_GROUP_INSTALLED); - zebra_nhg_install_kernel(nhe); + zebra_nhg_install_kernel(nhe, ZEBRA_ROUTE_MAX); } else zebra_nhg_handle_uninstall(nhe); } @@ -3144,7 +3158,7 @@ uint8_t zebra_nhg_nhe2grp(struct nh_grp *grp, struct nhg_hash_entry *nhe, return zebra_nhg_nhe2grp_internal(grp, 0, nhe, nhe, max_num); } -void zebra_nhg_install_kernel(struct nhg_hash_entry *nhe) +void zebra_nhg_install_kernel(struct nhg_hash_entry *nhe, uint8_t type) { struct nhg_connected *rb_node_dep = NULL; @@ -3157,9 +3171,16 @@ void zebra_nhg_install_kernel(struct nhg_hash_entry *nhe) nhe); } + if ((type != ZEBRA_ROUTE_CONNECT && type != ZEBRA_ROUTE_LOCAL && + type != ZEBRA_ROUTE_KERNEL) && + CHECK_FLAG(nhe->flags, NEXTHOP_GROUP_INITIAL_DELAY_INSTALL)) { + UNSET_FLAG(nhe->flags, NEXTHOP_GROUP_INITIAL_DELAY_INSTALL); + UNSET_FLAG(nhe->flags, NEXTHOP_GROUP_INSTALLED); + } + /* Make sure all depends are installed/queued */ frr_each(nhg_connected_tree, &nhe->nhg_depends, rb_node_dep) { - zebra_nhg_install_kernel(rb_node_dep->nhe); + zebra_nhg_install_kernel(rb_node_dep->nhe, type); } if (CHECK_FLAG(nhe->flags, NEXTHOP_GROUP_VALID) && @@ -3183,9 +3204,6 @@ void zebra_nhg_install_kernel(struct nhg_hash_entry *nhe) nhe); break; case ZEBRA_DPLANE_REQUEST_SUCCESS: - flog_err(EC_ZEBRA_DP_INVALID_RC, - "DPlane returned an invalid result code for attempt of installation of %pNG into the kernel", - nhe); break; } } @@ -3511,7 +3529,7 @@ struct nhg_hash_entry *zebra_nhg_proto_add(uint32_t id, int type, zebra_nhg_set_valid_if_active(new); - zebra_nhg_install_kernel(new); + zebra_nhg_install_kernel(new, ZEBRA_ROUTE_MAX); if (old) { /* @@ -3747,7 +3765,8 @@ void zebra_interface_nhg_reinstall(struct interface *ifp) "%s install nhe %pNG nh type %u flags 0x%x", __func__, rb_node_dep->nhe, nh->type, rb_node_dep->nhe->flags); - zebra_nhg_install_kernel(rb_node_dep->nhe); + zebra_nhg_install_kernel(rb_node_dep->nhe, + ZEBRA_ROUTE_MAX); /* Don't need to modify dependents if installed */ if (CHECK_FLAG(rb_node_dep->nhe->flags, diff --git a/zebra/zebra_nhg.h b/zebra/zebra_nhg.h index 3bb697aa75..712c1057a1 100644 --- a/zebra/zebra_nhg.h +++ b/zebra/zebra_nhg.h @@ -152,6 +152,25 @@ struct nhg_hash_entry { * when installation is successful. */ #define NEXTHOP_GROUP_REINSTALL (1 << 8) + +/* + * Connected routes and kernel routes received + * from the kernel or created by Zebra do no + * need to be installed. For connected, this + * is because the routes are in the local table + * but not imported and we create an amalgram + * route for it. For kernel routes if the route + * is an pre-nhg route, there is no nexthop associated + * with it and we should not create it until it + * is used by something else. + * The reason for this is because is that this just + * fills up the DPlane's nexthop slots when there + * are a bunch of interfaces or pre-existing routes + * As such let's not initially install it ( but + * pretend it was successful ) and if another route + * chooses this NHG then we can install it then. + */ +#define NEXTHOP_GROUP_INITIAL_DELAY_INSTALL (1 << 9) }; /* Upper 4 bits of the NHG are reserved for indicating the NHG type */ @@ -364,7 +383,7 @@ extern uint8_t zebra_nhg_nhe2grp(struct nh_grp *grp, struct nhg_hash_entry *nhe, int size); /* Dataplane install/uninstall */ -extern void zebra_nhg_install_kernel(struct nhg_hash_entry *nhe); +extern void zebra_nhg_install_kernel(struct nhg_hash_entry *nhe, uint8_t type); extern void zebra_nhg_uninstall_kernel(struct nhg_hash_entry *nhe); extern void zebra_interface_nhg_reinstall(struct interface *ifp); diff --git a/zebra/zebra_rib.c b/zebra/zebra_rib.c index b4baee148a..402a3104b9 100644 --- a/zebra/zebra_rib.c +++ b/zebra/zebra_rib.c @@ -316,12 +316,18 @@ static ssize_t printfrr_zebra_node(struct fbuf *buf, struct printfrr_eargs *ea, } #define rnode_debug(node, vrf_id, msg, ...) \ - zlog_debug("%s: (%u:%pZNt):%pZN: " msg, __func__, vrf_id, node, node, \ - ##__VA_ARGS__) + do { \ + struct vrf *vrf = vrf_lookup_by_id(vrf_id); \ + zlog_debug("%s: (%s:%pZNt):%pZN: " msg, __func__, \ + VRF_LOGNAME(vrf), node, node, ##__VA_ARGS__); \ + } while (0) #define rnode_info(node, vrf_id, msg, ...) \ - zlog_info("%s: (%u:%pZNt):%pZN: " msg, __func__, vrf_id, node, node, \ - ##__VA_ARGS__) + do { \ + struct vrf *vrf = vrf_lookup_by_id(vrf_id); \ + zlog_info("%s: (%s:%pZNt):%pZN: " msg, __func__, \ + VRF_LOGNAME(vrf), node, node, ##__VA_ARGS__); \ + } while (0) static char *_dump_re_status(const struct route_entry *re, char *buf, size_t len) @@ -651,8 +657,10 @@ struct route_entry *rib_lookup_ipv4(struct prefix_ipv4 *p, vrf_id_t vrf_id) int zebra_rib_labeled_unicast(struct route_entry *re) { struct nexthop *nexthop = NULL; + struct zebra_vrf *zvrf = vrf_info_lookup(re->vrf_id); - if (re->type != ZEBRA_ROUTE_BGP) + if ((re->type != ZEBRA_ROUTE_BGP) && + !zvrf->zebra_mpls_fec_nexthop_resolution) return 0; for (ALL_NEXTHOPS(re->nhe->nhg, nexthop)) @@ -688,7 +696,7 @@ void rib_install_kernel(struct route_node *rn, struct route_entry *re, /* * Install the resolved nexthop object first. */ - zebra_nhg_install_kernel(re->nhe); + zebra_nhg_install_kernel(re->nhe, re->type); /* * If this is a replace to a new RE let the originator of the RE @@ -4111,7 +4119,7 @@ void rib_delnode(struct route_node *rn, struct route_entry *re) * Helper that debugs a single nexthop within a route-entry */ static void _route_entry_dump_nh(const struct route_entry *re, - const char *straddr, + const char *straddr, const struct vrf *re_vrf, const struct nexthop *nexthop) { char nhname[PREFIX_STRLEN]; @@ -4166,35 +4174,32 @@ static void _route_entry_dump_nh(const struct route_entry *re, if (nexthop->weight) snprintf(wgt_str, sizeof(wgt_str), "wgt %d,", nexthop->weight); - zlog_debug("%s: %s %s[%u] %svrf %s(%u) %s%s with flags %s%s%s%s%s%s%s%s%s", - straddr, (nexthop->rparent ? " NH" : "NH"), nhname, - nexthop->ifindex, label_str, vrf ? vrf->name : "Unknown", - nexthop->vrf_id, + zlog_debug("%s(%s): %s %s[%u] %svrf %s(%u) %s%s with flags %s%s%s%s%s%s%s%s%s", + straddr, VRF_LOGNAME(re_vrf), + (nexthop->rparent ? " NH" : "NH"), nhname, nexthop->ifindex, + label_str, vrf ? vrf->name : "Unknown", nexthop->vrf_id, wgt_str, backup_str, - (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE) - ? "ACTIVE " - : ""), - (CHECK_FLAG(re->status, ROUTE_ENTRY_INSTALLED) - ? "FIB " - : ""), + (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE) ? "ACTIVE " + : ""), + (CHECK_FLAG(re->status, ROUTE_ENTRY_INSTALLED) ? "FIB " : ""), (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_RECURSIVE) - ? "RECURSIVE " - : ""), - (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_ONLINK) - ? "ONLINK " - : ""), + ? "RECURSIVE " + : ""), + (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_ONLINK) ? "ONLINK " + : ""), (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_DUPLICATE) - ? "DUPLICATE " - : ""), + ? "DUPLICATE " + : ""), (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_RNH_FILTERED) - ? "FILTERED " : ""), + ? "FILTERED " + : ""), (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_HAS_BACKUP) - ? "BACKUP " : ""), - (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_SRTE) - ? "SRTE " : ""), - (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_EVPN) - ? "EVPN " : "")); - + ? "BACKUP " + : ""), + (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_SRTE) ? "SRTE " + : ""), + (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_EVPN) ? "EVPN " + : "")); } /* This function dumps the contents of a given RE entry into @@ -4223,32 +4228,33 @@ void _route_entry_dump(const char *func, union prefixconstptr pp, is_srcdst ? prefix2str(src_pp, srcaddr, sizeof(srcaddr)) : "", VRF_LOGNAME(vrf), re->vrf_id); - zlog_debug("%s: uptime == %lu, type == %u, instance == %d, table == %d", - straddr, (unsigned long)re->uptime, re->type, re->instance, - re->table); - zlog_debug( - "%s: metric == %u, mtu == %u, distance == %u, flags == %sstatus == %s", - straddr, re->metric, re->mtu, re->distance, - zclient_dump_route_flags(re->flags, flags_buf, - sizeof(flags_buf)), - _dump_re_status(re, status_buf, sizeof(status_buf))); - zlog_debug("%s: tag == %u, nexthop_num == %u, nexthop_active_num == %u", - straddr, re->tag, nexthop_group_nexthop_num(&(re->nhe->nhg)), + zlog_debug("%s(%s): uptime == %lu, type == %u, instance == %d, table == %d", + straddr, VRF_LOGNAME(vrf), (unsigned long)re->uptime, + re->type, re->instance, re->table); + zlog_debug("%s(%s): metric == %u, mtu == %u, distance == %u, flags == %sstatus == %s", + straddr, VRF_LOGNAME(vrf), re->metric, re->mtu, re->distance, + zclient_dump_route_flags(re->flags, flags_buf, + sizeof(flags_buf)), + _dump_re_status(re, status_buf, sizeof(status_buf))); + zlog_debug("%s(%s): tag == %u, nexthop_num == %u, nexthop_active_num == %u", + straddr, VRF_LOGNAME(vrf), re->tag, + nexthop_group_nexthop_num(&(re->nhe->nhg)), nexthop_group_active_nexthop_num(&(re->nhe->nhg))); /* Dump nexthops */ for (ALL_NEXTHOPS(re->nhe->nhg, nexthop)) - _route_entry_dump_nh(re, straddr, nexthop); + _route_entry_dump_nh(re, straddr, vrf, nexthop); if (zebra_nhg_get_backup_nhg(re->nhe)) { - zlog_debug("%s: backup nexthops:", straddr); + zlog_debug("%s(%s): backup nexthops:", straddr, + VRF_LOGNAME(vrf)); nhg = zebra_nhg_get_backup_nhg(re->nhe); for (ALL_NEXTHOPS_PTR(nhg, nexthop)) - _route_entry_dump_nh(re, straddr, nexthop); + _route_entry_dump_nh(re, straddr, vrf, nexthop); } - zlog_debug("%s: dump complete", straddr); + zlog_debug("%s(%s): dump complete", straddr, VRF_LOGNAME(vrf)); } static int rib_meta_queue_gr_run_add(struct meta_queue *mq, void *data) @@ -4269,11 +4275,14 @@ static int rib_meta_queue_early_route_add(struct meta_queue *mq, void *data) listnode_add(mq->subq[META_QUEUE_EARLY_ROUTE], data); mq->size++; - if (IS_ZEBRA_DEBUG_RIB_DETAILED) - zlog_debug("Route %pFX(%u) (%s) queued for processing into sub-queue %s", - &ere->p, ere->re->vrf_id, + if (IS_ZEBRA_DEBUG_RIB_DETAILED) { + struct vrf *vrf = vrf_lookup_by_id(ere->re->vrf_id); + + zlog_debug("Route %pFX(%s) (%s) queued for processing into sub-queue %s", + &ere->p, VRF_LOGNAME(vrf), ere->deletion ? "delete" : "add", subqueue2str(META_QUEUE_EARLY_ROUTE)); + } return 0; } @@ -4384,9 +4393,14 @@ int rib_add_multipath(afi_t afi, safi_t safi, struct prefix *p, * Use a temporary nhe to convey info to the common/main api. */ zebra_nhe_init(&nhe, afi, (ng ? ng->nexthop : NULL)); - if (ng) + if (ng) { nhe.nhg.nexthop = ng->nexthop; - else if (re->nhe_id > 0) + + if (re->type == ZEBRA_ROUTE_CONNECT || + re->type == ZEBRA_ROUTE_LOCAL || + re->type == ZEBRA_ROUTE_KERNEL) + SET_FLAG(nhe.flags, NEXTHOP_GROUP_INITIAL_DELAY_INSTALL); + } else if (re->nhe_id > 0) nhe.id = re->nhe_id; n = zebra_nhe_copy(&nhe, 0); @@ -4413,7 +4427,8 @@ int rib_add_multipath(afi_t afi, safi_t safi, struct prefix *p, return -1; } - if (ifp->ifindex == ng->nexthop->ifindex) + if (ng && ng->nexthop && + ifp->ifindex == ng->nexthop->ifindex) re->type = ZEBRA_ROUTE_CONNECT; } } @@ -5102,6 +5117,17 @@ static int rib_dplane_results(struct dplane_ctx_list_head *ctxlist) return 0; } +uint32_t zebra_rib_dplane_results_count(void) +{ + uint32_t count; + + frr_with_mutex (&dplane_mutex) { + count = dplane_ctx_queue_count(&rib_dplane_q); + } + + return count; +} + /* * Ensure there are no empty slots in the route_info array. * Every route type in zebra should be present there. diff --git a/zebra/zebra_rnh.c b/zebra/zebra_rnh.c index 303a81bb3e..89317be74d 100644 --- a/zebra/zebra_rnh.c +++ b/zebra/zebra_rnh.c @@ -1344,13 +1344,17 @@ static void print_rnh(struct route_node *rn, struct vty *vty, json_object *json) } if (rnh->state) { - if (json) + if (json) { json_object_string_add( json_nht, "resolvedProtocol", zebra_route_string(rnh->state->type)); - else - vty_out(vty, " resolved via %s\n", - zebra_route_string(rnh->state->type)); + json_object_string_addf(json_nht, "prefix", "%pFX", + &rnh->resolved_route); + } else { + vty_out(vty, " resolved via %s, prefix %pFX\n", + zebra_route_string(rnh->state->type), + &rnh->resolved_route); + } for (nexthop = rnh->state->nhe->nhg.nexthop; nexthop; nexthop = nexthop->next) { diff --git a/zebra/zebra_vrf.h b/zebra/zebra_vrf.h index 5cbfab1ddc..f97138c811 100644 --- a/zebra/zebra_vrf.h +++ b/zebra/zebra_vrf.h @@ -173,6 +173,7 @@ struct zebra_vrf { bool zebra_rnh_ip_default_route; bool zebra_rnh_ipv6_default_route; + bool zebra_mpls_fec_nexthop_resolution; }; #define PROTO_RM_NAME(zvrf, afi, rtype) zvrf->proto_rm[afi][rtype].name #define NHT_RM_NAME(zvrf, afi, rtype) zvrf->nht_rm[afi][rtype].name diff --git a/zebra/zebra_vty.c b/zebra/zebra_vty.c index 3b786e3257..501e9d5268 100644 --- a/zebra/zebra_vty.c +++ b/zebra/zebra_vty.c @@ -1195,6 +1195,7 @@ static void show_nexthop_group_out(struct vty *vty, struct nhg_hash_entry *nhe, json_object_string_add(json, "uptime", up_str); json_object_string_add(json, "vrf", vrf_id_to_name(nhe->vrf_id)); + json_object_string_add(json, "afi", afi2str(nhe->afi)); } else { vty_out(vty, "ID: %u (%s)\n", nhe->id, @@ -1208,7 +1209,8 @@ static void show_nexthop_group_out(struct vty *vty, struct nhg_hash_entry *nhe, vty_out(vty, "\n"); vty_out(vty, " Uptime: %s\n", up_str); - vty_out(vty, " VRF: %s\n", vrf_id_to_name(nhe->vrf_id)); + vty_out(vty, " VRF: %s(%s)\n", vrf_id_to_name(nhe->vrf_id), + afi2str(nhe->afi)); } if (CHECK_FLAG(nhe->flags, NEXTHOP_GROUP_VALID)) { @@ -1228,6 +1230,13 @@ static void show_nexthop_group_out(struct vty *vty, struct nhg_hash_entry *nhe, else vty_out(vty, ", Installed"); } + if (CHECK_FLAG(nhe->flags, NEXTHOP_GROUP_INITIAL_DELAY_INSTALL)) { + if (json) + json_object_boolean_true_add(json, + "initialDelay"); + else + vty_out(vty, ", Initial Delay"); + } if (!json) vty_out(vty, "\n"); } @@ -3908,7 +3917,7 @@ DEFUN (show_zebra, out = ttable_dump(table, "\n"); vty_out(vty, "%s\n", out); - XFREE(MTYPE_TMP, out); + XFREE(MTYPE_TMP_TTABLE, out); ttable_del(table); vty_out(vty, diff --git a/zebra/zserv.c b/zebra/zserv.c index a731f7f278..07e3996643 100644 --- a/zebra/zserv.c +++ b/zebra/zserv.c @@ -161,9 +161,11 @@ void zserv_log_message(const char *errmsg, struct stream *msg, if (errmsg) zlog_debug("%s", errmsg); if (hdr) { + struct vrf *vrf = vrf_lookup_by_id(hdr->vrf_id); + zlog_debug(" Length: %d", hdr->length); zlog_debug("Command: %s", zserv_command_string(hdr->command)); - zlog_debug(" VRF: %u", hdr->vrf_id); + zlog_debug(" VRF: %s(%u)", VRF_LOGNAME(vrf), hdr->vrf_id); } stream_hexdump(msg); } @@ -425,11 +427,13 @@ static void zserv_read(struct event *thread) } /* Debug packet information. */ - if (IS_ZEBRA_DEBUG_PACKET) - zlog_debug("zebra message[%s:%u:%u] comes from socket [%d]", + if (IS_ZEBRA_DEBUG_PACKET) { + struct vrf *vrf = vrf_lookup_by_id(hdr.vrf_id); + + zlog_debug("zebra message[%s:%s:%u] comes from socket [%d]", zserv_command_string(hdr.command), - hdr.vrf_id, hdr.length, - sock); + VRF_LOGNAME(vrf), hdr.length, sock); + } stream_set_getp(client->ibuf_work, 0); struct stream *msg = stream_dup(client->ibuf_work); |
