diff options
140 files changed, 5735 insertions, 638 deletions
diff --git a/bfdd/bfd.c b/bfdd/bfd.c index a1fb67d357..4367f253e1 100644 --- a/bfdd/bfd.c +++ b/bfdd/bfd.c @@ -348,7 +348,7 @@ int bfd_session_enable(struct bfd_session *bs) /* Sanity check: don't leak open sockets. */ if (bs->sock != -1) { if (bglobal.debug_peer_event) - zlog_debug("session-enable: previous socket open"); + zlog_debug("%s: previous socket open", __func__); close(bs->sock); bs->sock = -1; @@ -952,7 +952,7 @@ int ptm_bfd_sess_del(struct bfd_peer_cfg *bpc) } if (bglobal.debug_peer_event) - zlog_debug("session-delete: %s", bs_to_string(bs)); + zlog_debug("%s: %s", __func__, bs_to_string(bs)); control_notify_config(BCM_NOTIFY_CONFIG_DELETE, bs); diff --git a/bfdd/bfd_packet.c b/bfdd/bfd_packet.c index 98411a8732..382f78a0f6 100644 --- a/bfdd/bfd_packet.c +++ b/bfdd/bfd_packet.c @@ -578,8 +578,8 @@ ssize_t bfd_recv_ipv4(int sd, uint8_t *msgbuf, size_t msgbuflen, uint8_t *ttl, memcpy(&ttlval, CMSG_DATA(cm), sizeof(ttlval)); if (ttlval > 255) { if (bglobal.debug_network) - zlog_debug("ipv4-recv: invalid TTL: %u", - ttlval); + zlog_debug("%s: invalid TTL: %u", + __func__, ttlval); return -1; } *ttl = ttlval; @@ -686,8 +686,8 @@ ssize_t bfd_recv_ipv6(int sd, uint8_t *msgbuf, size_t msgbuflen, uint8_t *ttl, memcpy(&ttlval, CMSG_DATA(cm), sizeof(ttlval)); if (ttlval > 255) { if (bglobal.debug_network) - zlog_debug("ipv6-recv: invalid TTL: %u", - ttlval); + zlog_debug("%s: invalid TTL: %u", + __func__, ttlval); return -1; } @@ -1127,13 +1127,13 @@ int bp_udp_send_fp(int sd, uint8_t *data, size_t datalen, if (wlen <= 0) { if (bglobal.debug_network) - zlog_debug("udp-send: loopback failure: (%d) %s", errno, - strerror(errno)); + zlog_debug("%s: loopback failure: (%d) %s", __func__, + errno, strerror(errno)); return -1; } else if (wlen < (ssize_t)datalen) { if (bglobal.debug_network) - zlog_debug("udp-send: partial send: %zd expected %zu", - wlen, datalen); + zlog_debug("%s: partial send: %zd expected %zu", + __func__, wlen, datalen); return -1; } @@ -1194,13 +1194,13 @@ int bp_udp_send(int sd, uint8_t ttl, uint8_t *data, size_t datalen, wlen = sendmsg(sd, &msg, 0); if (wlen <= 0) { if (bglobal.debug_network) - zlog_debug("udp-send: loopback failure: (%d) %s", errno, - strerror(errno)); + zlog_debug("%s: loopback failure: (%d) %s", __func__, + errno, strerror(errno)); return -1; } else if (wlen < (ssize_t)datalen) { if (bglobal.debug_network) - zlog_debug("udp-send: partial send: %zd expected %zu", - wlen, datalen); + zlog_debug("%s: partial send: %zd expected %zu", + __func__, wlen, datalen); return -1; } @@ -1221,7 +1221,7 @@ int bp_set_ttl(int sd, uint8_t value) int ttl = value; if (setsockopt(sd, IPPROTO_IP, IP_TTL, &ttl, sizeof(ttl)) == -1) { - zlog_warn("set-ttl: setsockopt(IP_TTL, %d): %s", value, + zlog_warn("%s: setsockopt(IP_TTL, %d): %s", __func__, value, strerror(errno)); return -1; } @@ -1234,7 +1234,7 @@ int bp_set_tos(int sd, uint8_t value) int tos = value; if (setsockopt(sd, IPPROTO_IP, IP_TOS, &tos, sizeof(tos)) == -1) { - zlog_warn("set-tos: setsockopt(IP_TOS, %d): %s", value, + zlog_warn("%s: setsockopt(IP_TOS, %d): %s", __func__, value, strerror(errno)); return -1; } @@ -1247,8 +1247,8 @@ static bool bp_set_reuse_addr(int sd) int one = 1; if (setsockopt(sd, SOL_SOCKET, SO_REUSEADDR, &one, sizeof(one)) == -1) { - zlog_warn("set-reuse-addr: setsockopt(SO_REUSEADDR, %d): %s", - one, strerror(errno)); + zlog_warn("%s: setsockopt(SO_REUSEADDR, %d): %s", __func__, one, + strerror(errno)); return false; } return true; @@ -1259,8 +1259,8 @@ static bool bp_set_reuse_port(int sd) int one = 1; if (setsockopt(sd, SOL_SOCKET, SO_REUSEPORT, &one, sizeof(one)) == -1) { - zlog_warn("set-reuse-port: setsockopt(SO_REUSEPORT, %d): %s", - one, strerror(errno)); + zlog_warn("%s: setsockopt(SO_REUSEPORT, %d): %s", __func__, one, + strerror(errno)); return false; } return true; diff --git a/bgpd/bgp_attr.h b/bgpd/bgp_attr.h index 5007fafc29..4963ea64d0 100644 --- a/bgpd/bgp_attr.h +++ b/bgpd/bgp_attr.h @@ -348,6 +348,7 @@ struct attr { #define BATTR_RMAP_IPV6_LL_NHOP_CHANGED (1 << 5) #define BATTR_RMAP_IPV6_PREFER_GLOBAL_CHANGED (1 << 6) #define BATTR_RMAP_LINK_BW_SET (1 << 7) +#define BATTR_RMAP_L3VPN_ACCEPT_GRE (1 << 8) /* Router Reflector related structure. */ struct cluster_list { diff --git a/bgpd/bgp_btoa.c b/bgpd/bgp_btoa.c index 13c42d95f4..aa14d99f18 100644 --- a/bgpd/bgp_btoa.c +++ b/bgpd/bgp_btoa.c @@ -77,7 +77,7 @@ static void attr_parse(struct stream *s, uint16_t len) lim = s->getp + len; - printf("attr_parse s->getp %zd, len %d, lim %d\n", s->getp, len, lim); + printf("%s s->getp %zd, len %d, lim %d\n", __func__, s->getp, len, lim); while (s->getp < lim) { flag = stream_getc(s); diff --git a/bgpd/bgp_dump.c b/bgpd/bgp_dump.c index 9f64341640..11e84f00b4 100644 --- a/bgpd/bgp_dump.c +++ b/bgpd/bgp_dump.c @@ -122,7 +122,7 @@ static FILE *bgp_dump_open_file(struct bgp_dump *bgp_dump) ret = strftime(realpath, MAXPATHLEN, bgp_dump->filename, &tm); if (ret == 0) { - flog_warn(EC_BGP_DUMP, "bgp_dump_open_file: strftime error"); + flog_warn(EC_BGP_DUMP, "%s: strftime error", __func__); return NULL; } @@ -134,7 +134,7 @@ static FILE *bgp_dump_open_file(struct bgp_dump *bgp_dump) bgp_dump->fp = fopen(realpath, "w"); if (bgp_dump->fp == NULL) { - flog_warn(EC_BGP_DUMP, "bgp_dump_open_file: %s: %s", realpath, + flog_warn(EC_BGP_DUMP, "%s: %s: %s", __func__, realpath, strerror(errno)); umask(oldumask); return NULL; diff --git a/bgpd/bgp_ecommunity.c b/bgpd/bgp_ecommunity.c index 4120524e63..3f627521e7 100644 --- a/bgpd/bgp_ecommunity.c +++ b/bgpd/bgp_ecommunity.c @@ -1188,6 +1188,23 @@ char *ecommunity_ecom2str(struct ecommunity *ecom, int format, int filter) return str_buf; } +bool ecommunity_include(struct ecommunity *e1, struct ecommunity *e2) +{ + uint32_t i, j; + + if (!e1 || !e2) + return false; + for (i = 0; i < e1->size; ++i) { + for (j = 0; j < e2->size; ++j) { + if (!memcmp(e1->val + (i * e1->unit_size), + e2->val + (j * e2->unit_size), + e1->unit_size)) + return true; + } + } + return false; +} + bool ecommunity_match(const struct ecommunity *ecom1, const struct ecommunity *ecom2) { diff --git a/bgpd/bgp_ecommunity.h b/bgpd/bgp_ecommunity.h index f22855c329..84e310c3f9 100644 --- a/bgpd/bgp_ecommunity.h +++ b/bgpd/bgp_ecommunity.h @@ -257,6 +257,7 @@ extern struct ecommunity *ecommunity_str2com_ipv6(const char *str, int type, int keyword_included); extern char *ecommunity_ecom2str(struct ecommunity *, int, int); extern void ecommunity_strfree(char **s); +extern bool ecommunity_include(struct ecommunity *e1, struct ecommunity *e2); extern bool ecommunity_match(const struct ecommunity *, const struct ecommunity *); extern char *ecommunity_str(struct ecommunity *); diff --git a/bgpd/bgp_evpn.c b/bgpd/bgp_evpn.c index ce05005eab..58f5e9a226 100644 --- a/bgpd/bgp_evpn.c +++ b/bgpd/bgp_evpn.c @@ -191,6 +191,11 @@ static void vrf_import_rt_free(struct vrf_irt_node *irt) XFREE(MTYPE_BGP_EVPN_VRF_IMPORT_RT, irt); } +static void hash_vrf_import_rt_free(struct vrf_irt_node *irt) +{ + XFREE(MTYPE_BGP_EVPN_VRF_IMPORT_RT, irt); +} + /* * Function to lookup Import RT node - used to map a RT to set of * VNIs importing routes with that RT. @@ -281,6 +286,11 @@ static void import_rt_free(struct bgp *bgp, struct irt_node *irt) XFREE(MTYPE_BGP_EVPN_IMPORT_RT, irt); } +static void hash_import_rt_free(struct irt_node *irt) +{ + XFREE(MTYPE_BGP_EVPN_IMPORT_RT, irt); +} + /* * Function to lookup Import RT node - used to map a RT to set of * VNIs importing routes with that RT. @@ -3621,8 +3631,10 @@ static int update_advertise_vni_routes(struct bgp *bgp, struct bgpevpn *vpn) pi->type == ZEBRA_ROUTE_BGP && pi->sub_type == BGP_ROUTE_STATIC) break; - if (!pi) /* unexpected */ + if (!pi) { + bgp_dest_unlock_node(dest); return 0; + } attr = pi->attr; global_dest = bgp_global_evpn_node_get(bgp->rib[afi][safi], @@ -5355,6 +5367,11 @@ void bgp_evpn_free(struct bgp *bgp, struct bgpevpn *vpn) XFREE(MTYPE_BGP_EVPN, vpn); } +static void hash_evpn_free(struct bgpevpn *vpn) +{ + XFREE(MTYPE_BGP_EVPN, vpn); +} + /* * Import evpn route from global table to VNI/VRF/ESI. */ @@ -5963,12 +5980,16 @@ void bgp_evpn_cleanup(struct bgp *bgp) (void (*)(struct hash_bucket *, void *))free_vni_entry, bgp); + hash_clean(bgp->import_rt_hash, (void (*)(void *))hash_import_rt_free); hash_free(bgp->import_rt_hash); bgp->import_rt_hash = NULL; + hash_clean(bgp->vrf_import_rt_hash, + (void (*)(void *))hash_vrf_import_rt_free); hash_free(bgp->vrf_import_rt_hash); bgp->vrf_import_rt_hash = NULL; + hash_clean(bgp->vni_svi_hash, (void (*)(void *))hash_evpn_free); hash_free(bgp->vni_svi_hash); bgp->vni_svi_hash = NULL; hash_free(bgp->vnihash); diff --git a/bgpd/bgp_evpn_mh.c b/bgpd/bgp_evpn_mh.c index 95a0c31b55..de63618580 100644 --- a/bgpd/bgp_evpn_mh.c +++ b/bgpd/bgp_evpn_mh.c @@ -4548,6 +4548,11 @@ static void bgp_evpn_nh_del(struct bgp_evpn_nh *n) XFREE(MTYPE_BGP_EVPN_NH, tmp_n); } +static void hash_evpn_nh_free(struct bgp_evpn_nh *ben) +{ + XFREE(MTYPE_BGP_EVPN_NH, ben); +} + static unsigned int bgp_evpn_nh_hash_keymake(const void *p) { const struct bgp_evpn_nh *n = p; @@ -4612,6 +4617,7 @@ void bgp_evpn_nh_finish(struct bgp *bgp_vrf) bgp_vrf->evpn_nh_table, (void (*)(struct hash_bucket *, void *))bgp_evpn_nh_flush_cb, NULL); + hash_clean(bgp_vrf->evpn_nh_table, (void (*)(void *))hash_evpn_nh_free); hash_free(bgp_vrf->evpn_nh_table); bgp_vrf->evpn_nh_table = NULL; } diff --git a/bgpd/bgp_evpn_vty.c b/bgpd/bgp_evpn_vty.c index 4277162339..f6b87dccdb 100644 --- a/bgpd/bgp_evpn_vty.c +++ b/bgpd/bgp_evpn_vty.c @@ -1012,13 +1012,12 @@ static void show_l3vni_entry(struct vty *vty, struct bgp *bgp, /* If there are multiple export RTs we break here and show only * one */ - if (!json) + if (!json) { + vty_out(vty, "%-37s", vrf_id_to_name(bgp->vrf_id)); break; + } } - if (!json) - vty_out(vty, "%-37s", vrf_id_to_name(bgp->vrf_id)); - if (json) { char vni_str[VNI_STR_LEN]; @@ -1151,13 +1150,13 @@ static void show_vni_entry(struct hash_bucket *bucket, void *args[]) /* If there are multiple export RTs we break here and show only * one */ - if (!json) + if (!json) { + vty_out(vty, "%-37s", + vrf_id_to_name(vpn->tenant_vrf_id)); break; + } } - if (!json) - vty_out(vty, "%-37s", vrf_id_to_name(vpn->tenant_vrf_id)); - if (json) { char vni_str[VNI_STR_LEN]; diff --git a/bgpd/bgp_fsm.c b/bgpd/bgp_fsm.c index 7b96555913..a8accc25f5 100644 --- a/bgpd/bgp_fsm.c +++ b/bgpd/bgp_fsm.c @@ -953,7 +953,7 @@ void bgp_start_routeadv(struct bgp *bgp) struct listnode *node, *nnode; struct peer *peer; - zlog_info("bgp_start_routeadv(), update hold status %d", + zlog_info("%s, update hold status %d", __func__, bgp->main_peers_update_hold); if (bgp->main_peers_update_hold) @@ -1687,9 +1687,8 @@ static void bgp_connect_check(struct thread *thread) static int bgp_connect_success(struct peer *peer) { if (peer->fd < 0) { - flog_err(EC_BGP_CONNECT, - "bgp_connect_success peer's fd is negative value %d", - peer->fd); + flog_err(EC_BGP_CONNECT, "%s peer's fd is negative value %d", + __func__, peer->fd); bgp_stop(peer); return -1; } @@ -1910,7 +1909,7 @@ int bgp_start(struct peer *peer) peer->host, peer->fd); if (peer->fd < 0) { flog_err(EC_BGP_FSM, - "bgp_start peer's fd is negative value %d", + "%s peer's fd is negative value %d", __func__, peer->fd); return -1; } diff --git a/bgpd/bgp_keepalives.c b/bgpd/bgp_keepalives.c index 158f163358..604d6c9509 100644 --- a/bgpd/bgp_keepalives.c +++ b/bgpd/bgp_keepalives.c @@ -175,6 +175,15 @@ void *bgp_keepalives_start(void *arg) struct timeval next_update = {0, 0}; struct timespec next_update_ts = {0, 0}; + /* + * The RCU mechanism for each pthread is initialized in a "locked" + * state. That's ok for pthreads using the frr_pthread, + * thread_fetch event loop, because that event loop unlocks regularly. + * For foreign pthreads, the lock needs to be unlocked so that the + * background rcu pthread can run. + */ + rcu_read_unlock(); + peerhash_mtx = XCALLOC(MTYPE_TMP, sizeof(pthread_mutex_t)); peerhash_cond = XCALLOC(MTYPE_TMP, sizeof(pthread_cond_t)); diff --git a/bgpd/bgp_main.c b/bgpd/bgp_main.c index 1297eb440e..90ae580bab 100644 --- a/bgpd/bgp_main.c +++ b/bgpd/bgp_main.c @@ -511,6 +511,8 @@ int main(int argc, char **argv) ", bgp@%s:%d", address, bm->port); } + bgp_if_init(); + frr_config_fork(); /* must be called after fork() */ bgp_gr_apply_running_config(); diff --git a/bgpd/bgp_memory.c b/bgpd/bgp_memory.c index b9f1ba3971..850657d35e 100644 --- a/bgpd/bgp_memory.c +++ b/bgpd/bgp_memory.c @@ -131,14 +131,6 @@ DEFINE_MTYPE(BGPD, BGP_EVPN_ES_EVI, "BGP EVPN ES-per-EVI Information"); DEFINE_MTYPE(BGPD, BGP_EVPN_ES_VRF, "BGP EVPN ES-per-VRF Information"); DEFINE_MTYPE(BGPD, BGP_EVPN_IMPORT_RT, "BGP EVPN Import RT"); DEFINE_MTYPE(BGPD, BGP_EVPN_VRF_IMPORT_RT, "BGP EVPN VRF Import RT"); -DEFINE_MTYPE(BGPD, BGP_EVPN_MACIP, "BGP EVPN MAC IP"); - -DEFINE_MTYPE(BGPD, BGP_FLOWSPEC, "BGP flowspec"); -DEFINE_MTYPE(BGPD, BGP_FLOWSPEC_RULE, "BGP flowspec rule"); -DEFINE_MTYPE(BGPD, BGP_FLOWSPEC_RULE_STR, "BGP flowspec rule str"); -DEFINE_MTYPE(BGPD, BGP_FLOWSPEC_COMPILED, "BGP flowspec compiled"); -DEFINE_MTYPE(BGPD, BGP_FLOWSPEC_NAME, "BGP flowspec name"); -DEFINE_MTYPE(BGPD, BGP_FLOWSPEC_INDEX, "BGP flowspec index"); DEFINE_MTYPE(BGPD, BGP_SRV6_L3VPN, "BGP prefix-sid srv6 l3vpn servcie"); DEFINE_MTYPE(BGPD, BGP_SRV6_VPN, "BGP prefix-sid srv6 vpn service"); diff --git a/bgpd/bgp_memory.h b/bgpd/bgp_memory.h index d4d7b0cf88..510cfa21c9 100644 --- a/bgpd/bgp_memory.h +++ b/bgpd/bgp_memory.h @@ -128,14 +128,6 @@ DECLARE_MTYPE(BGP_EVPN_ES_EVI_VTEP); DECLARE_MTYPE(BGP_EVPN); DECLARE_MTYPE(BGP_EVPN_IMPORT_RT); DECLARE_MTYPE(BGP_EVPN_VRF_IMPORT_RT); -DECLARE_MTYPE(BGP_EVPN_MACIP); - -DECLARE_MTYPE(BGP_FLOWSPEC); -DECLARE_MTYPE(BGP_FLOWSPEC_RULE); -DECLARE_MTYPE(BGP_FLOWSPEC_RULE_STR); -DECLARE_MTYPE(BGP_FLOWSPEC_COMPILED); -DECLARE_MTYPE(BGP_FLOWSPEC_NAME); -DECLARE_MTYPE(BGP_FLOWSPEC_INDEX); DECLARE_MTYPE(BGP_SRV6_L3VPN); DECLARE_MTYPE(BGP_SRV6_VPN); diff --git a/bgpd/bgp_mplsvpn.c b/bgpd/bgp_mplsvpn.c index e99c2ba661..5a039b25bc 100644 --- a/bgpd/bgp_mplsvpn.c +++ b/bgpd/bgp_mplsvpn.c @@ -662,25 +662,6 @@ void transpose_sid(struct in6_addr *sid, uint32_t label, uint8_t offset, } } -static bool ecom_intersect(struct ecommunity *e1, struct ecommunity *e2) -{ - uint32_t i, j; - - if (!e1 || !e2) - return false; - for (i = 0; i < e1->size; ++i) { - for (j = 0; j < e2->size; ++j) { - if (!memcmp(e1->val + (i * e1->unit_size), - e2->val + (j * e2->unit_size), - e1->unit_size)) { - - return true; - } - } - } - return false; -} - static bool labels_same(struct bgp_path_info *bpi, mpls_label_t *label, uint32_t n) { @@ -997,6 +978,11 @@ leak_update(struct bgp *to_bgp, struct bgp_dest *bn, new = info_make(ZEBRA_ROUTE_BGP, BGP_ROUTE_IMPORTED, 0, to_bgp->peer_self, new_attr, bn); + if (source_bpi->peer) { + extra = bgp_path_info_extra_get(new); + extra->peer_orig = peer_lock(source_bpi->peer); + } + if (nexthop_self_flag) bgp_path_info_set_flag(bn, new, BGP_PATH_ANNC_NH_SELF); @@ -1502,7 +1488,7 @@ vpn_leak_to_vrf_update_onevrf(struct bgp *to_bgp, /* to */ } /* Check for intersection of route targets */ - if (!ecom_intersect( + if (!ecommunity_include( to_bgp->vpn_policy[afi].rtlist[BGP_VPN_POLICY_DIR_FROMVPN], bgp_attr_get_ecommunity(path_vpn->attr))) { if (debug) @@ -1751,9 +1737,10 @@ void vpn_leak_to_vrf_withdraw(struct bgp *from_bgp, /* from */ } /* Check for intersection of route targets */ - if (!ecom_intersect(bgp->vpn_policy[afi] - .rtlist[BGP_VPN_POLICY_DIR_FROMVPN], - bgp_attr_get_ecommunity(path_vpn->attr))) { + if (!ecommunity_include( + bgp->vpn_policy[afi] + .rtlist[BGP_VPN_POLICY_DIR_FROMVPN], + bgp_attr_get_ecommunity(path_vpn->attr))) { continue; } @@ -2932,7 +2919,7 @@ vrf_id_t get_first_vrf_for_redirect_with_rt(struct ecommunity *eckey) if (ec && eckey->unit_size != ec->unit_size) continue; - if (ecom_intersect(ec, eckey)) + if (ecommunity_include(ec, eckey)) return bgp->vrf_id; } return VRF_UNKNOWN; diff --git a/bgpd/bgp_nht.c b/bgpd/bgp_nht.c index 61f1b295ca..b38e5b7a9a 100644 --- a/bgpd/bgp_nht.c +++ b/bgpd/bgp_nht.c @@ -61,22 +61,88 @@ static int bgp_isvalid_nexthop(struct bgp_nexthop_cache *bnc) && bnc->nexthop_num > 0)); } -static int bgp_isvalid_labeled_nexthop(struct bgp_nexthop_cache *bnc) +static int bgp_isvalid_nexthop_for_ebgp(struct bgp_nexthop_cache *bnc, + struct bgp_path_info *path) +{ + struct interface *ifp = NULL; + struct nexthop *nexthop; + struct bgp_interface *iifp; + struct peer *peer; + + if (!path->extra || !path->extra->peer_orig) + return false; + + peer = path->extra->peer_orig; + + /* only connected ebgp peers are valid */ + if (peer->sort != BGP_PEER_EBGP || peer->ttl != BGP_DEFAULT_TTL || + CHECK_FLAG(peer->flags, PEER_FLAG_DISABLE_CONNECTED_CHECK) || + CHECK_FLAG(peer->bgp->flags, BGP_FLAG_DISABLE_NH_CONNECTED_CHK)) + return false; + + for (nexthop = bnc->nexthop; nexthop; nexthop = nexthop->next) { + if (nexthop->type == NEXTHOP_TYPE_IFINDEX || + nexthop->type == NEXTHOP_TYPE_IPV4_IFINDEX || + nexthop->type == NEXTHOP_TYPE_IPV6_IFINDEX) { + ifp = if_lookup_by_index( + bnc->ifindex ? bnc->ifindex : nexthop->ifindex, + bnc->bgp->vrf_id); + } + if (!ifp) + continue; + iifp = ifp->info; + if (CHECK_FLAG(iifp->flags, BGP_INTERFACE_MPLS_BGP_FORWARDING)) + return true; + } + return false; +} + +static int bgp_isvalid_nexthop_for_mplsovergre(struct bgp_nexthop_cache *bnc, + struct bgp_path_info *path) +{ + struct interface *ifp = NULL; + struct nexthop *nexthop; + + for (nexthop = bnc->nexthop; nexthop; nexthop = nexthop->next) { + if (nexthop->type != NEXTHOP_TYPE_BLACKHOLE) { + ifp = if_lookup_by_index( + bnc->ifindex ? bnc->ifindex : nexthop->ifindex, + bnc->bgp->vrf_id); + if (ifp && (ifp->ll_type == ZEBRA_LLT_IPGRE || + ifp->ll_type == ZEBRA_LLT_IP6GRE)) + break; + } + } + if (!ifp) + return false; + + if (CHECK_FLAG(path->attr->rmap_change_flags, + BATTR_RMAP_L3VPN_ACCEPT_GRE)) + return true; + + return false; +} + +static int bgp_isvalid_nexthop_for_mpls(struct bgp_nexthop_cache *bnc, + struct bgp_path_info *path) { /* - * In the case of MPLS-VPN, the label is learned from LDP or other + * - In the case of MPLS-VPN, the label is learned from LDP or other * protocols, and nexthop tracking is enabled for the label. * The value is recorded as BGP_NEXTHOP_LABELED_VALID. - * In the case of SRv6-VPN, we need to track the reachability to the + * - In the case of SRv6-VPN, we need to track the reachability to the * SID (in other words, IPv6 address). As in MPLS, we need to record * the value as BGP_NEXTHOP_SID_VALID. However, this function is * currently not implemented, and this function assumes that all * Transit routes for SRv6-VPN are valid. + * - Otherwise check for mpls-gre acceptance */ - return (bgp_zebra_num_connects() == 0 - || (bnc && bnc->nexthop_num > 0 - && (CHECK_FLAG(bnc->flags, BGP_NEXTHOP_LABELED_VALID) - || bnc->bgp->srv6_enabled))); + return (bgp_zebra_num_connects() == 0 || + (bnc && (bnc->nexthop_num > 0 && + (CHECK_FLAG(bnc->flags, BGP_NEXTHOP_LABELED_VALID) || + bnc->bgp->srv6_enabled || + bgp_isvalid_nexthop_for_ebgp(bnc, path) || + bgp_isvalid_nexthop_for_mplsovergre(bnc, path))))); } static void bgp_unlink_nexthop_check(struct bgp_nexthop_cache *bnc) @@ -359,11 +425,11 @@ int bgp_find_or_add_nexthop(struct bgp *bgp_route, struct bgp *bgp_nexthop, */ if (bgp_route->inst_type == BGP_INSTANCE_TYPE_VIEW) return 1; - else if (safi == SAFI_UNICAST && pi - && pi->sub_type == BGP_ROUTE_IMPORTED && pi->extra - && pi->extra->num_labels && !bnc->is_evpn_gwip_nexthop) { - return bgp_isvalid_labeled_nexthop(bnc); - } else + else if (safi == SAFI_UNICAST && pi && + pi->sub_type == BGP_ROUTE_IMPORTED && pi->extra && + pi->extra->num_labels && !bnc->is_evpn_gwip_nexthop) + return bgp_isvalid_nexthop_for_mpls(bnc, pi); + else return (bgp_isvalid_nexthop(bnc)); } @@ -1063,7 +1129,8 @@ void evaluate_paths(struct bgp_nexthop_cache *bnc) && (path->attr->evpn_overlay.type != OVERLAY_INDEX_GATEWAY_IP)) { bnc_is_valid_nexthop = - bgp_isvalid_labeled_nexthop(bnc) ? true : false; + bgp_isvalid_nexthop_for_mpls(bnc, path) ? true + : false; } else { if (bgp_update_martian_nexthop( bnc->bgp, afi, safi, path->type, diff --git a/bgpd/bgp_route.c b/bgpd/bgp_route.c index 989b361597..9eb7407af6 100644 --- a/bgpd/bgp_route.c +++ b/bgpd/bgp_route.c @@ -255,6 +255,9 @@ void bgp_path_info_extra_free(struct bgp_path_info_extra **extra) if (e->bgp_orig) bgp_unlock(e->bgp_orig); + if (e->peer_orig) + peer_unlock(e->peer_orig); + if (e->aggr_suppressors) list_delete(&e->aggr_suppressors); @@ -1773,10 +1776,20 @@ static void bgp_peer_remove_private_as(struct bgp *bgp, afi_t afi, safi_t safi, static void bgp_peer_as_override(struct bgp *bgp, afi_t afi, safi_t safi, struct peer *peer, struct attr *attr) { + struct aspath *aspath; + if (peer->sort == BGP_PEER_EBGP && - peer_af_flag_check(peer, afi, safi, PEER_FLAG_AS_OVERRIDE)) - attr->aspath = aspath_replace_specific_asn(attr->aspath, - peer->as, bgp->as); + peer_af_flag_check(peer, afi, safi, PEER_FLAG_AS_OVERRIDE)) { + if (attr->aspath->refcnt) + aspath = aspath_dup(attr->aspath); + else + aspath = attr->aspath; + + attr->aspath = aspath_intern( + aspath_replace_specific_asn(aspath, peer->as, bgp->as)); + + aspath_free(aspath); + } } void bgp_attr_add_llgr_community(struct attr *attr) @@ -2025,8 +2038,8 @@ bool subgroup_announce_check(struct bgp_dest *dest, struct bgp_path_info *pi, && (IPV4_ADDR_SAME(&onlypeer->remote_id, &piattr->originator_id))) { if (bgp_debug_update(NULL, p, subgrp->update_group, 0)) zlog_debug( - "%s [Update:SEND] %pFX originator-id is same as remote router-id", - onlypeer->host, p); + "%pBP [Update:SEND] %pFX originator-id is same as remote router-id", + onlypeer, p); return false; } @@ -2041,8 +2054,8 @@ bool subgroup_announce_check(struct bgp_dest *dest, struct bgp_path_info *pi, if (bgp_debug_update(NULL, p, subgrp->update_group, 0)) zlog_debug( - "%s [Update:SEND] %pFX is filtered via ORF", - peer->host, p); + "%pBP [Update:SEND] %pFX is filtered via ORF", + peer, p); return false; } } @@ -2050,8 +2063,8 @@ bool subgroup_announce_check(struct bgp_dest *dest, struct bgp_path_info *pi, /* Output filter check. */ if (bgp_output_filter(peer, p, piattr, afi, safi) == FILTER_DENY) { if (bgp_debug_update(NULL, p, subgrp->update_group, 0)) - zlog_debug("%s [Update:SEND] %pFX is filtered", - peer->host, p); + zlog_debug("%pBP [Update:SEND] %pFX is filtered", peer, + p); return false; } @@ -2060,8 +2073,8 @@ bool subgroup_announce_check(struct bgp_dest *dest, struct bgp_path_info *pi, && aspath_loop_check(piattr->aspath, onlypeer->as)) { if (bgp_debug_update(NULL, p, subgrp->update_group, 0)) zlog_debug( - "%s [Update:SEND] suppress announcement to peer AS %u that is part of AS path.", - onlypeer->host, onlypeer->as); + "%pBP [Update:SEND] suppress announcement to peer AS %u that is part of AS path.", + onlypeer, onlypeer->as); return false; } @@ -2070,8 +2083,8 @@ bool subgroup_announce_check(struct bgp_dest *dest, struct bgp_path_info *pi, if (aspath_loop_check(piattr->aspath, bgp->confed_id)) { if (bgp_debug_update(NULL, p, subgrp->update_group, 0)) zlog_debug( - "%s [Update:SEND] suppress announcement to peer AS %u is AS path.", - peer->host, bgp->confed_id); + "%pBP [Update:SEND] suppress announcement to peer AS %u is AS path.", + peer, bgp->confed_id); return false; } } @@ -2278,9 +2291,8 @@ bool subgroup_announce_check(struct bgp_dest *dest, struct bgp_path_info *pi, if (ret == RMAP_DENYMATCH) { if (bgp_debug_update(NULL, p, subgrp->update_group, 0)) zlog_debug( - "%s [Update:SEND] %pFX is filtered by route-map '%s'", - peer->host, p, - ROUTE_MAP_OUT_NAME(filter)); + "%pBP [Update:SEND] %pFX is filtered by route-map '%s'", + peer, p, ROUTE_MAP_OUT_NAME(filter)); bgp_attr_flush(rmap_path.attr); return false; } @@ -2317,6 +2329,29 @@ bool subgroup_announce_check(struct bgp_dest *dest, struct bgp_path_info *pi, if (aspath_check_as_sets(attr->aspath)) return false; + /* If neighbor sso is configured, then check if the route has + * SoO extended community and validate against the configured + * one. If they match, do not announce, to prevent routing + * loops. + */ + if ((attr->flag & ATTR_FLAG_BIT(BGP_ATTR_EXT_COMMUNITIES)) && + peer->soo[afi][safi]) { + struct ecommunity *ecomm_soo = peer->soo[afi][safi]; + struct ecommunity *ecomm = bgp_attr_get_ecommunity(attr); + + if ((ecommunity_lookup(ecomm, ECOMMUNITY_ENCODE_AS, + ECOMMUNITY_SITE_ORIGIN) || + ecommunity_lookup(ecomm, ECOMMUNITY_ENCODE_AS4, + ECOMMUNITY_SITE_ORIGIN)) && + ecommunity_include(ecomm, ecomm_soo)) { + if (bgp_debug_update(NULL, p, subgrp->update_group, 0)) + zlog_debug( + "%pBP [Update:SEND] %pFX is filtered by SoO extcommunity '%s'", + peer, p, ecommunity_str(ecomm_soo)); + return false; + } + } + /* Codification of AS 0 Processing */ if (aspath_check_as_zero(attr->aspath)) return false; @@ -4058,6 +4093,30 @@ int bgp_update(struct peer *peer, const struct prefix *p, uint32_t addpath_id, return -1; } + /* If neighbor soo is configured, tag all incoming routes with + * this SoO tag and then filter out advertisements in + * subgroup_announce_check() if it matches the configured SoO + * on the other peer. + */ + if (peer->soo[afi][safi]) { + struct ecommunity *old_ecomm = + bgp_attr_get_ecommunity(&new_attr); + struct ecommunity *ecomm_soo = peer->soo[afi][safi]; + struct ecommunity *new_ecomm; + + if (old_ecomm) { + new_ecomm = ecommunity_merge(ecommunity_dup(old_ecomm), + ecomm_soo); + + if (!old_ecomm->refcnt) + ecommunity_free(&old_ecomm); + } else { + new_ecomm = ecommunity_dup(ecomm_soo); + } + + bgp_attr_set_ecommunity(&new_attr, new_ecomm); + } + attr_new = bgp_attr_intern(&new_attr); /* If the update is implicit withdraw. */ @@ -6433,6 +6492,7 @@ static int bgp_static_set(struct vty *vty, const char *negate, /* Label index cannot be changed. */ if (bgp_static->label_index != label_index) { vty_out(vty, "%% cannot change label-index\n"); + bgp_dest_unlock_node(dest); return CMD_WARNING_CONFIG_FAILED; } @@ -7217,6 +7277,7 @@ static void bgp_aggregate_install( aggregate, atomic_aggregate, p); if (!attr) { + bgp_dest_unlock_node(dest); bgp_aggregate_delete(bgp, p, afi, safi, aggregate); if (BGP_DEBUG(update_groups, UPDATE_GROUPS)) zlog_debug("%s: %pFX null attribute", __func__, @@ -7377,31 +7438,21 @@ void bgp_aggregate_toggle_suppressed(struct bgp_aggregate *aggregate, static void bgp_aggregate_med_update(struct bgp_aggregate *aggregate, struct bgp *bgp, const struct prefix *p, afi_t afi, safi_t safi, - struct bgp_path_info *pi, bool is_adding) + struct bgp_path_info *pi) { /* MED matching disabled. */ if (!aggregate->match_med) return; - /* Aggregation with different MED, nothing to do. */ - if (aggregate->med_mismatched) - return; - - /* - * Test the current entry: - * - * is_adding == true: if the new entry doesn't match then we must - * install all suppressed routes. - * - * is_adding == false: if the entry being removed was the last - * unmatching entry then we can suppress all routes. + /* Aggregation with different MED, recheck if we have got equal MEDs + * now. */ - if (!is_adding) { - if (bgp_aggregate_test_all_med(aggregate, bgp, p, afi, safi) - && aggregate->summary_only) - bgp_aggregate_toggle_suppressed(aggregate, bgp, p, afi, - safi, true); - } else + if (aggregate->med_mismatched && + bgp_aggregate_test_all_med(aggregate, bgp, p, afi, safi) && + aggregate->summary_only) + bgp_aggregate_toggle_suppressed(aggregate, bgp, p, afi, safi, + true); + else bgp_aggregate_med_match(aggregate, bgp, pi); /* No mismatches, just quit. */ @@ -7767,7 +7818,7 @@ static void bgp_add_route_to_aggregate(struct bgp *bgp, */ if (aggregate->match_med) bgp_aggregate_med_update(aggregate, bgp, aggr_p, afi, safi, - pinew, true); + pinew); if (aggregate->summary_only && AGGREGATE_MED_VALID(aggregate)) aggr_suppress_path(aggregate, pinew); @@ -7890,8 +7941,7 @@ static void bgp_remove_route_from_aggregate(struct bgp *bgp, afi_t afi, * "unsuppressing" twice. */ if (aggregate->match_med) - bgp_aggregate_med_update(aggregate, bgp, aggr_p, afi, safi, pi, - true); + bgp_aggregate_med_update(aggregate, bgp, aggr_p, afi, safi, pi); if (aggregate->count > 0) aggregate->count--; @@ -8416,6 +8466,17 @@ void bgp_redistribute_add(struct bgp *bgp, struct prefix *p, switch (nhtype) { case NEXTHOP_TYPE_IFINDEX: + switch (p->family) { + case AF_INET: + attr.nexthop.s_addr = INADDR_ANY; + attr.mp_nexthop_len = BGP_ATTR_NHLEN_IPV4; + break; + case AF_INET6: + memset(&attr.mp_nexthop_global, 0, + sizeof(attr.mp_nexthop_global)); + attr.mp_nexthop_len = BGP_ATTR_NHLEN_IPV6_GLOBAL; + break; + } break; case NEXTHOP_TYPE_IPV4: case NEXTHOP_TYPE_IPV4_IFINDEX: @@ -11099,13 +11160,14 @@ static int bgp_show_table(struct vty *vty, struct bgp *bgp, safi_t safi, if (type == bgp_show_type_route_map) { struct route_map *rmap = output_arg; struct bgp_path_info path; - struct attr dummy_attr; + struct bgp_path_info_extra extra; + struct attr dummy_attr = {}; route_map_result_t ret; dummy_attr = *pi->attr; - path.peer = pi->peer; - path.attr = &dummy_attr; + prep_for_rmap_apply(&path, &extra, dest, pi, + pi->peer, &dummy_attr); ret = route_map_apply(rmap, dest_p, &path); bgp_attr_flush(&dummy_attr); diff --git a/bgpd/bgp_route.h b/bgpd/bgp_route.h index 3e46c7043e..ddef4ca1bb 100644 --- a/bgpd/bgp_route.h +++ b/bgpd/bgp_route.h @@ -236,6 +236,12 @@ struct bgp_path_info_extra { struct bgp *bgp_orig; /* + * Original bgp session to know if the session is a + * connected EBGP session or not + */ + struct peer *peer_orig; + + /* * Nexthop in context of original bgp instance. Needed * for label resolution of core mpls routes exported to a vrf. * Set nexthop_orig.family to 0 if not valid. diff --git a/bgpd/bgp_routemap.c b/bgpd/bgp_routemap.c index 33f68c9e88..64c867f988 100644 --- a/bgpd/bgp_routemap.c +++ b/bgpd/bgp_routemap.c @@ -1953,6 +1953,57 @@ static const struct route_map_rule_cmd route_set_ip_nexthop_cmd = { route_set_ip_nexthop_free }; +/* `set l3vpn next-hop encapsulation l3vpn gre' */ + +/* Set nexthop to object */ +struct rmap_l3vpn_nexthop_encapsulation_set { + uint8_t protocol; +}; + +static enum route_map_cmd_result_t +route_set_l3vpn_nexthop_encapsulation(void *rule, const struct prefix *prefix, + void *object) +{ + struct rmap_l3vpn_nexthop_encapsulation_set *rins = rule; + struct bgp_path_info *path; + + path = object; + + if (rins->protocol != IPPROTO_GRE) + return RMAP_OKAY; + + SET_FLAG(path->attr->rmap_change_flags, BATTR_RMAP_L3VPN_ACCEPT_GRE); + return RMAP_OKAY; +} + +/* Route map `l3vpn nexthop encapsulation' compile function. */ +static void *route_set_l3vpn_nexthop_encapsulation_compile(const char *arg) +{ + struct rmap_l3vpn_nexthop_encapsulation_set *rins; + + rins = XCALLOC(MTYPE_ROUTE_MAP_COMPILED, + sizeof(struct rmap_l3vpn_nexthop_encapsulation_set)); + + /* XXX ALL GRE modes are accepted for now: gre or ip6gre */ + rins->protocol = IPPROTO_GRE; + + return rins; +} + +/* Free route map's compiled `ip nexthop' value. */ +static void route_set_l3vpn_nexthop_encapsulation_free(void *rule) +{ + XFREE(MTYPE_ROUTE_MAP_COMPILED, rule); +} + +/* Route map commands for l3vpn next-hop encapsulation set. */ +static const struct route_map_rule_cmd + route_set_l3vpn_nexthop_encapsulation_cmd = { + "l3vpn next-hop encapsulation", + route_set_l3vpn_nexthop_encapsulation, + route_set_l3vpn_nexthop_encapsulation_compile, + route_set_l3vpn_nexthop_encapsulation_free}; + /* `set local-preference LOCAL_PREF' */ /* Set local preference. */ @@ -2219,6 +2270,8 @@ route_set_aspath_replace(void *rule, const struct prefix *dummy, void *object) aspath_new, replace_asn, own_asn); } + aspath_free(aspath_new); + return RMAP_OKAY; } @@ -4092,8 +4145,6 @@ static void bgp_route_map_process_update_cb(char *rmap_name) void bgp_route_map_update_timer(struct thread *thread) { - bm->t_rmap_update = NULL; - route_map_walk_update_list(bgp_route_map_process_update_cb); } @@ -5290,6 +5341,34 @@ DEFUN_YANG (no_set_distance, return nb_cli_apply_changes(vty, NULL); } +DEFPY_YANG(set_l3vpn_nexthop_encapsulation, set_l3vpn_nexthop_encapsulation_cmd, + "[no] set l3vpn next-hop encapsulation gre", + NO_STR SET_STR + "L3VPN operations\n" + "Next hop Information\n" + "Encapsulation options (for BGP only)\n" + "Accept L3VPN traffic over GRE encapsulation\n") +{ + const char *xpath = + "./set-action[action='frr-bgp-route-map:set-l3vpn-nexthop-encapsulation']"; + const char *xpath_value = + "./set-action[action='frr-bgp-route-map:set-l3vpn-nexthop-encapsulation']/rmap-set-action/frr-bgp-route-map:l3vpn-nexthop-encapsulation"; + enum nb_operation operation; + + if (no) + operation = NB_OP_DESTROY; + else + operation = NB_OP_CREATE; + + nb_cli_enqueue_change(vty, xpath, operation, NULL); + if (operation == NB_OP_DESTROY) + return nb_cli_apply_changes(vty, NULL); + + nb_cli_enqueue_change(vty, xpath_value, NB_OP_MODIFY, "gre"); + + return nb_cli_apply_changes(vty, NULL); +} + DEFUN_YANG (set_local_pref, set_local_pref_cmd, "set local-preference WORD", @@ -6835,6 +6914,7 @@ void bgp_route_map_init(void) route_map_install_set(&route_set_ecommunity_none_cmd); route_map_install_set(&route_set_tag_cmd); route_map_install_set(&route_set_label_index_cmd); + route_map_install_set(&route_set_l3vpn_nexthop_encapsulation_cmd); install_element(RMAP_NODE, &match_peer_cmd); install_element(RMAP_NODE, &match_peer_local_cmd); @@ -6937,6 +7017,7 @@ void bgp_route_map_init(void) install_element(RMAP_NODE, &no_set_ipx_vpn_nexthop_cmd); install_element(RMAP_NODE, &set_originator_id_cmd); install_element(RMAP_NODE, &no_set_originator_id_cmd); + install_element(RMAP_NODE, &set_l3vpn_nexthop_encapsulation_cmd); route_map_install_match(&route_match_ipv6_address_cmd); route_map_install_match(&route_match_ipv6_next_hop_cmd); diff --git a/bgpd/bgp_routemap_nb.c b/bgpd/bgp_routemap_nb.c index 585596e1aa..2117334f7f 100644 --- a/bgpd/bgp_routemap_nb.c +++ b/bgpd/bgp_routemap_nb.c @@ -407,6 +407,13 @@ const struct frr_yang_module_info frr_bgp_route_map_info = { } }, { + .xpath = "/frr-route-map:lib/route-map/entry/set-action/rmap-set-action/frr-bgp-route-map:l3vpn-nexthop-encapsulation", + .cbs = { + .modify = lib_route_map_entry_set_action_rmap_set_action_l3vpn_nexthop_encapsulation_modify, + .destroy = lib_route_map_entry_set_action_rmap_set_action_l3vpn_nexthop_encapsulation_destroy, + } + }, + { .xpath = NULL, }, } diff --git a/bgpd/bgp_routemap_nb.h b/bgpd/bgp_routemap_nb.h index a01adf7d5d..cd7a70dbcf 100644 --- a/bgpd/bgp_routemap_nb.h +++ b/bgpd/bgp_routemap_nb.h @@ -150,6 +150,10 @@ int lib_route_map_entry_set_action_rmap_set_action_evpn_gateway_ip_ipv6_modify( struct nb_cb_modify_args *args); int lib_route_map_entry_set_action_rmap_set_action_evpn_gateway_ip_ipv6_destroy( struct nb_cb_destroy_args *args); +int lib_route_map_entry_set_action_rmap_set_action_l3vpn_nexthop_encapsulation_modify( + struct nb_cb_modify_args *args); +int lib_route_map_entry_set_action_rmap_set_action_l3vpn_nexthop_encapsulation_destroy( + struct nb_cb_destroy_args *args); #ifdef __cplusplus } diff --git a/bgpd/bgp_routemap_nb_config.c b/bgpd/bgp_routemap_nb_config.c index b87877b1e0..585c2a3ff0 100644 --- a/bgpd/bgp_routemap_nb_config.c +++ b/bgpd/bgp_routemap_nb_config.c @@ -2922,3 +2922,56 @@ int lib_route_map_entry_set_action_rmap_set_action_evpn_gateway_ip_ipv6_destroy( return NB_OK; } + +/* + * XPath: + * /frr-route-map:lib/route-map/entry/set-action/rmap-set-action/l3vpn-nexthop-encapsulation + */ +int lib_route_map_entry_set_action_rmap_set_action_l3vpn_nexthop_encapsulation_modify( + struct nb_cb_modify_args *args) +{ + struct routemap_hook_context *rhc; + const char *type; + int rv; + + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + /* Add configuration. */ + rhc = nb_running_get_entry(args->dnode, NULL, true); + type = yang_dnode_get_string(args->dnode, NULL); + + /* Set destroy information. */ + rhc->rhc_shook = generic_set_delete; + rhc->rhc_rule = "l3vpn next-hop encapsulation"; + rhc->rhc_event = RMAP_EVENT_SET_DELETED; + + rv = generic_set_add(rhc->rhc_rmi, + "l3vpn next-hop encapsulation", type, + args->errmsg, args->errmsg_len); + if (rv != CMD_SUCCESS) { + rhc->rhc_shook = NULL; + return NB_ERR_INCONSISTENCY; + } + } + + return NB_OK; +} + +int lib_route_map_entry_set_action_rmap_set_action_l3vpn_nexthop_encapsulation_destroy( + struct nb_cb_destroy_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + return lib_route_map_entry_set_destroy(args); + } + + return NB_OK; +} diff --git a/bgpd/bgp_rpki.c b/bgpd/bgp_rpki.c index de1c559641..b90c09c68b 100644 --- a/bgpd/bgp_rpki.c +++ b/bgpd/bgp_rpki.c @@ -428,14 +428,15 @@ static void bgpd_sync_callback(struct thread *thread) safi_t safi; for (safi = SAFI_UNICAST; safi < SAFI_MAX; safi++) { - if (!bgp->rib[afi][safi]) + struct bgp_table *table = bgp->rib[afi][safi]; + + if (!table) continue; struct bgp_dest *match; struct bgp_dest *node; - match = bgp_table_subtree_lookup(bgp->rib[afi][safi], - prefix); + match = bgp_table_subtree_lookup(table, prefix); node = match; while (node) { @@ -445,6 +446,9 @@ static void bgpd_sync_callback(struct thread *thread) node = bgp_route_next_until(node, match); } + + if (match) + bgp_dest_unlock_node(match); } } diff --git a/bgpd/bgp_table.h b/bgpd/bgp_table.h index d832383ab4..86cd4f3da1 100644 --- a/bgpd/bgp_table.h +++ b/bgpd/bgp_table.h @@ -256,28 +256,6 @@ static inline struct bgp_dest *bgp_node_match(const struct bgp_table *table, return bgp_dest_from_rnode(rn); } -/* - * bgp_node_match_ipv4 - */ -static inline struct bgp_dest * -bgp_node_match_ipv4(const struct bgp_table *table, struct in_addr *addr) -{ - struct route_node *rn = route_node_match_ipv4(table->route_table, addr); - - return bgp_dest_from_rnode(rn); -} - -/* - * bgp_node_match_ipv6 - */ -static inline struct bgp_dest * -bgp_node_match_ipv6(const struct bgp_table *table, struct in6_addr *addr) -{ - struct route_node *rn = route_node_match_ipv6(table->route_table, addr); - - return bgp_dest_from_rnode(rn); -} - static inline unsigned long bgp_table_count(const struct bgp_table *const table) { return route_table_count(table->route_table); @@ -292,59 +270,6 @@ static inline struct bgp_dest *bgp_table_get_next(const struct bgp_table *table, return bgp_dest_from_rnode(route_table_get_next(table->route_table, p)); } -/* - * bgp_table_iter_init - */ -static inline void bgp_table_iter_init(bgp_table_iter_t *iter, - struct bgp_table *table) -{ - bgp_table_lock(table); - iter->table = table; - route_table_iter_init(&iter->rt_iter, table->route_table); -} - -/* - * bgp_table_iter_next - */ -static inline struct bgp_dest *bgp_table_iter_next(bgp_table_iter_t *iter) -{ - return bgp_dest_from_rnode(route_table_iter_next(&iter->rt_iter)); -} - -/* - * bgp_table_iter_cleanup - */ -static inline void bgp_table_iter_cleanup(bgp_table_iter_t *iter) -{ - route_table_iter_cleanup(&iter->rt_iter); - bgp_table_unlock(iter->table); - iter->table = NULL; -} - -/* - * bgp_table_iter_pause - */ -static inline void bgp_table_iter_pause(bgp_table_iter_t *iter) -{ - route_table_iter_pause(&iter->rt_iter); -} - -/* - * bgp_table_iter_is_done - */ -static inline int bgp_table_iter_is_done(bgp_table_iter_t *iter) -{ - return route_table_iter_is_done(&iter->rt_iter); -} - -/* - * bgp_table_iter_started - */ -static inline int bgp_table_iter_started(bgp_table_iter_t *iter) -{ - return route_table_iter_started(&iter->rt_iter); -} - /* This would benefit from a real atomic operation... * until then. */ static inline uint64_t bgp_table_next_version(struct bgp_table *table) diff --git a/bgpd/bgp_updgrp.c b/bgpd/bgp_updgrp.c index 3a974910fa..0219535b0d 100644 --- a/bgpd/bgp_updgrp.c +++ b/bgpd/bgp_updgrp.c @@ -164,6 +164,12 @@ static void conf_copy(struct peer *dst, struct peer *src, afi_t afi, dst->change_local_as = src->change_local_as; dst->shared_network = src->shared_network; dst->local_role = src->local_role; + + if (src->soo[afi][safi]) { + ecommunity_free(&dst->soo[afi][safi]); + dst->soo[afi][safi] = ecommunity_dup(src->soo[afi][safi]); + } + memcpy(&(dst->nexthop), &(src->nexthop), sizeof(struct bgp_nexthop)); dst->group = src->group; @@ -248,6 +254,8 @@ static void conf_release(struct peer *src, afi_t afi, safi_t safi) XFREE(MTYPE_BGP_FILTER_NAME, srcfilter->advmap.cname); XFREE(MTYPE_BGP_PEER_HOST, src->host); + + ecommunity_free(&src->soo[afi][safi]); } static void peer2_updgrp_copy(struct update_group *updgrp, struct peer_af *paf) @@ -428,17 +436,24 @@ static unsigned int updgrp_hash_key_make(const void *p) */ key = jhash_1word(peer->local_role, key); + if (peer->soo[afi][safi]) { + char *soo_str = ecommunity_str(peer->soo[afi][safi]); + + key = jhash_1word(jhash(soo_str, strlen(soo_str), SEED1), key); + } + if (bgp_debug_neighbor_events(peer)) { zlog_debug( - "%pBP Update Group Hash: sort: %d UpdGrpFlags: %ju UpdGrpAFFlags: %u", + "%pBP Update Group Hash: sort: %d UpdGrpFlags: %ju UpdGrpAFFlags: %ju", peer, peer->sort, - (intmax_t)(peer->flags & PEER_UPDGRP_FLAGS), - flags & PEER_UPDGRP_AF_FLAGS); + (intmax_t)CHECK_FLAG(peer->flags, PEER_UPDGRP_FLAGS), + (intmax_t)CHECK_FLAG(flags, PEER_UPDGRP_AF_FLAGS)); zlog_debug( "%pBP Update Group Hash: addpath: %u UpdGrpCapFlag: %u UpdGrpCapAFFlag: %u route_adv: %u change local as: %u", peer, (uint32_t)peer->addpath_type[afi][safi], - peer->cap & PEER_UPDGRP_CAP_FLAGS, - peer->af_cap[afi][safi] & PEER_UPDGRP_AF_CAP_FLAGS, + CHECK_FLAG(peer->cap, PEER_UPDGRP_CAP_FLAGS), + CHECK_FLAG(peer->af_cap[afi][safi], + PEER_UPDGRP_AF_CAP_FLAGS), peer->v_routeadv, peer->change_local_as); zlog_debug( "%pBP Update Group Hash: max packet size: %u pmax_out: %u Peer Group: %s rmap out: %s", @@ -472,14 +487,14 @@ static unsigned int updgrp_hash_key_make(const void *p) peer->shared_network && peer_afi_active_nego(peer, AFI_IP6)); zlog_debug( - "%pBP Update Group Hash: Lonesoul: %d ORF prefix: %u ORF old: %u max prefix out: %u", + "%pBP Update Group Hash: Lonesoul: %d ORF prefix: %u ORF old: %u max prefix out: %ju", peer, !!CHECK_FLAG(peer->flags, PEER_FLAG_LONESOUL), CHECK_FLAG(peer->af_cap[afi][safi], PEER_CAP_ORF_PREFIX_SM_RCV), CHECK_FLAG(peer->af_cap[afi][safi], PEER_CAP_ORF_PREFIX_SM_OLD_RCV), - CHECK_FLAG(peer->af_flags[afi][safi], - PEER_FLAG_MAX_PREFIX_OUT)); + (intmax_t)CHECK_FLAG(peer->af_flags[afi][safi], + PEER_FLAG_MAX_PREFIX_OUT)); zlog_debug("%pBP Update Group Hash key: %u", peer, key); } return key; diff --git a/bgpd/bgp_vty.c b/bgpd/bgp_vty.c index cfa6614566..d84d0919f7 100644 --- a/bgpd/bgp_vty.c +++ b/bgpd/bgp_vty.c @@ -53,6 +53,7 @@ #include "bgpd/bgp_debug.h" #include "bgpd/bgp_errors.h" #include "bgpd/bgp_fsm.h" +#include "bgpd/bgp_nht.h" #include "bgpd/bgp_nexthop.h" #include "bgpd/bgp_network.h" #include "bgpd/bgp_open.h" @@ -306,12 +307,16 @@ static int bgp_srv6_locator_unset(struct bgp *bgp) return -1; /* refresh chunks */ - for (ALL_LIST_ELEMENTS(bgp->srv6_locator_chunks, node, nnode, chunk)) + for (ALL_LIST_ELEMENTS(bgp->srv6_locator_chunks, node, nnode, chunk)) { listnode_delete(bgp->srv6_locator_chunks, chunk); + srv6_locator_chunk_free(chunk); + } /* refresh functions */ - for (ALL_LIST_ELEMENTS(bgp->srv6_functions, node, nnode, func)) + for (ALL_LIST_ELEMENTS(bgp->srv6_functions, node, nnode, func)) { listnode_delete(bgp->srv6_functions, func); + XFREE(MTYPE_BGP_SRV6_FUNCTION, func); + } /* refresh tovpn_sid */ for (ALL_LIST_ELEMENTS_RO(bm->bgp, node, bgp_vrf)) { @@ -334,6 +339,20 @@ static int bgp_srv6_locator_unset(struct bgp *bgp) /* update vpn bgp processes */ vpn_leak_postchange_all(); + /* refresh tovpn_sid_locator */ + for (ALL_LIST_ELEMENTS_RO(bm->bgp, node, bgp_vrf)) { + if (bgp_vrf->inst_type != BGP_INSTANCE_TYPE_VRF) + continue; + + /* refresh vpnv4 tovpn_sid_locator */ + XFREE(MTYPE_BGP_SRV6_SID, + bgp_vrf->vpn_policy[AFI_IP].tovpn_sid_locator); + + /* refresh vpnv6 tovpn_sid_locator */ + XFREE(MTYPE_BGP_SRV6_SID, + bgp_vrf->vpn_policy[AFI_IP6].tovpn_sid_locator); + } + /* clear locator name */ memset(bgp->srv6_locator_name, 0, sizeof(bgp->srv6_locator_name)); @@ -8228,6 +8247,63 @@ ALIAS_HIDDEN( "Only give warning message when limit is exceeded\n" "Force checking all received routes not only accepted\n") +/* "neighbor soo" */ +DEFPY (neighbor_soo, + neighbor_soo_cmd, + "neighbor <A.B.C.D|X:X::X:X|WORD>$neighbor soo ASN:NN_OR_IP-ADDRESS:NN$soo", + NEIGHBOR_STR + NEIGHBOR_ADDR_STR2 + "Set the Site-of-Origin (SoO) extended community\n" + "VPN extended community\n") +{ + struct peer *peer; + afi_t afi = bgp_node_afi(vty); + safi_t safi = bgp_node_safi(vty); + struct ecommunity *ecomm_soo; + + peer = peer_and_group_lookup_vty(vty, neighbor); + if (!peer) + return CMD_WARNING_CONFIG_FAILED; + + ecomm_soo = ecommunity_str2com(soo, ECOMMUNITY_SITE_ORIGIN, 0); + if (!ecomm_soo) { + vty_out(vty, "%% Malformed SoO extended community\n"); + return CMD_WARNING; + } + ecommunity_str(ecomm_soo); + + if (!ecommunity_match(peer->soo[afi][safi], ecomm_soo)) { + ecommunity_free(&peer->soo[afi][safi]); + peer->soo[afi][safi] = ecomm_soo; + peer_af_flag_unset(peer, afi, safi, PEER_FLAG_SOO); + } + + return bgp_vty_return(vty, + peer_af_flag_set(peer, afi, safi, PEER_FLAG_SOO)); +} + +DEFPY (no_neighbor_soo, + no_neighbor_soo_cmd, + "no neighbor <A.B.C.D|X:X::X:X|WORD>$neighbor soo [ASN:NN_OR_IP-ADDRESS:NN$soo]", + NO_STR + NEIGHBOR_STR + NEIGHBOR_ADDR_STR2 + "Set the Site-of-Origin (SoO) extended community\n" + "VPN extended community\n") +{ + struct peer *peer; + afi_t afi = bgp_node_afi(vty); + safi_t safi = bgp_node_safi(vty); + + peer = peer_and_group_lookup_vty(vty, neighbor); + if (!peer) + return CMD_WARNING_CONFIG_FAILED; + + ecommunity_free(&peer->soo[afi][safi]); + + return bgp_vty_return( + vty, peer_af_flag_unset(peer, afi, safi, PEER_FLAG_SOO)); +} /* "neighbor allowas-in" */ DEFUN (neighbor_allowas_in, @@ -16432,7 +16508,7 @@ static bool peergroup_flag_check(struct peer *peer, uint64_t flag) } static bool peergroup_af_flag_check(struct peer *peer, afi_t afi, safi_t safi, - uint32_t flag) + uint64_t flag) { if (!peer_group_active(peer)) { if (CHECK_FLAG(peer->af_flags_invert[afi][safi], flag)) @@ -17219,6 +17295,15 @@ static void bgp_config_write_peer_af(struct vty *vty, struct bgp *bgp, } } + /* soo */ + if (peergroup_af_flag_check(peer, afi, safi, PEER_FLAG_SOO)) { + char *soo_str = ecommunity_ecom2str( + peer->soo[afi][safi], ECOMMUNITY_FORMAT_ROUTE_MAP, 0); + + vty_out(vty, " neighbor %s soo %s\n", addr, soo_str); + XFREE(MTYPE_ECOMMUNITY_STR, soo_str); + } + /* weight */ if (peergroup_af_flag_check(peer, afi, safi, PEER_FLAG_WEIGHT)) vty_out(vty, " neighbor %s weight %lu\n", addr, @@ -18040,6 +18125,84 @@ static void bgp_config_end(void) bgp_post_config_delay, &t_bgp_cfg); } +static int config_write_interface_one(struct vty *vty, struct vrf *vrf) +{ + int write = 0; + struct interface *ifp; + struct bgp_interface *iifp; + + FOR_ALL_INTERFACES (vrf, ifp) { + iifp = ifp->info; + if (!iifp) + continue; + + if_vty_config_start(vty, ifp); + + if (CHECK_FLAG(iifp->flags, + BGP_INTERFACE_MPLS_BGP_FORWARDING)) { + vty_out(vty, " mpls bgp forwarding\n"); + write++; + } + + if_vty_config_end(vty); + } + + return write; +} + +/* Configuration write function for bgpd. */ +static int config_write_interface(struct vty *vty) +{ + int write = 0; + struct vrf *vrf = NULL; + + /* Display all VRF aware OSPF interface configuration */ + RB_FOREACH (vrf, vrf_name_head, &vrfs_by_name) { + write += config_write_interface_one(vty, vrf); + } + + return write; +} + +DEFPY(mpls_bgp_forwarding, mpls_bgp_forwarding_cmd, + "[no$no] mpls bgp forwarding", + NO_STR MPLS_STR BGP_STR + "Enable MPLS forwarding for eBGP directly connected peers\n") +{ + bool check; + struct bgp_interface *iifp; + + VTY_DECLVAR_CONTEXT(interface, ifp); + iifp = ifp->info; + if (!iifp) { + vty_out(vty, "Interface %s not available\n", ifp->name); + return CMD_WARNING_CONFIG_FAILED; + } + check = CHECK_FLAG(iifp->flags, BGP_INTERFACE_MPLS_BGP_FORWARDING); + if (check != !no) { + if (no) + UNSET_FLAG(iifp->flags, + BGP_INTERFACE_MPLS_BGP_FORWARDING); + else + SET_FLAG(iifp->flags, + BGP_INTERFACE_MPLS_BGP_FORWARDING); + /* trigger a nht update on eBGP sessions */ + if (if_is_operative(ifp)) + bgp_nht_ifp_up(ifp); + } + return CMD_SUCCESS; +} + +/* Initialization of BGP interface. */ +static void bgp_vty_if_init(void) +{ + /* Install interface node. */ + if_cmd_init(config_write_interface); + + /* "mpls bgp forwarding" commands. */ + install_element(INTERFACE_NODE, &mpls_bgp_forwarding_cmd); +} + void bgp_vty_init(void) { cmd_variable_handler_register(bgp_var_neighbor); @@ -19303,6 +19466,26 @@ void bgp_vty_init(void) install_element(BGP_EVPN_NODE, &neighbor_allowas_in_cmd); install_element(BGP_EVPN_NODE, &no_neighbor_allowas_in_cmd); + /* "neighbor soo" */ + install_element(BGP_IPV4_NODE, &neighbor_soo_cmd); + install_element(BGP_IPV4_NODE, &no_neighbor_soo_cmd); + install_element(BGP_IPV4M_NODE, &neighbor_soo_cmd); + install_element(BGP_IPV4M_NODE, &no_neighbor_soo_cmd); + install_element(BGP_IPV4L_NODE, &neighbor_soo_cmd); + install_element(BGP_IPV4L_NODE, &no_neighbor_soo_cmd); + install_element(BGP_IPV6_NODE, &neighbor_soo_cmd); + install_element(BGP_IPV6_NODE, &no_neighbor_soo_cmd); + install_element(BGP_IPV6M_NODE, &neighbor_soo_cmd); + install_element(BGP_IPV6M_NODE, &no_neighbor_soo_cmd); + install_element(BGP_IPV6L_NODE, &neighbor_soo_cmd); + install_element(BGP_IPV6L_NODE, &no_neighbor_soo_cmd); + install_element(BGP_VPNV4_NODE, &neighbor_soo_cmd); + install_element(BGP_VPNV4_NODE, &no_neighbor_soo_cmd); + install_element(BGP_VPNV6_NODE, &neighbor_soo_cmd); + install_element(BGP_VPNV6_NODE, &no_neighbor_soo_cmd); + install_element(BGP_EVPN_NODE, &neighbor_soo_cmd); + install_element(BGP_EVPN_NODE, &no_neighbor_soo_cmd); + /* address-family commands. */ install_element(BGP_NODE, &address_family_ipv4_safi_cmd); install_element(BGP_NODE, &address_family_ipv6_safi_cmd); @@ -19477,6 +19660,8 @@ void bgp_vty_init(void) install_element(BGP_SRV6_NODE, &no_bgp_srv6_locator_cmd); install_element(BGP_IPV4_NODE, &af_sid_vpn_export_cmd); install_element(BGP_IPV6_NODE, &af_sid_vpn_export_cmd); + + bgp_vty_if_init(); } #include "memory.h" diff --git a/bgpd/bgp_zebra.c b/bgpd/bgp_zebra.c index 9c9b88e125..7dfb5046dd 100644 --- a/bgpd/bgp_zebra.c +++ b/bgpd/bgp_zebra.c @@ -75,6 +75,8 @@ struct zclient *zclient = NULL; DEFINE_HOOK(bgp_vrf_status_changed, (struct bgp *bgp, struct interface *ifp), (bgp, ifp)); +DEFINE_MTYPE_STATIC(BGPD, BGP_IF_INFO, "BGP interface context"); + /* Can we install into zebra? */ static inline bool bgp_install_info_to_zebra(struct bgp *bgp) { @@ -456,7 +458,7 @@ static int bgp_interface_vrf_update(ZAPI_CALLBACK_ARGS) if (!ifp) return 0; - if (BGP_DEBUG(zebra, ZEBRA) && ifp) + if (BGP_DEBUG(zebra, ZEBRA)) zlog_debug("Rx Intf VRF change VRF %u IF %s NewVRF %u", vrf_id, ifp->name, new_vrf_id); @@ -3209,7 +3211,7 @@ static int bgp_zebra_process_srv6_locator_delete(ZAPI_CALLBACK_ARGS) struct srv6_locator_chunk *chunk; struct bgp_srv6_function *func; struct bgp *bgp_vrf; - struct in6_addr *tovpn_sid; + struct in6_addr *tovpn_sid, *tovpn_sid_locator; struct prefix_ipv6 tmp_prefi; if (zapi_srv6_locator_decode(zclient->ibuf, &loc) < 0) @@ -3218,8 +3220,10 @@ static int bgp_zebra_process_srv6_locator_delete(ZAPI_CALLBACK_ARGS) // refresh chunks for (ALL_LIST_ELEMENTS(bgp->srv6_locator_chunks, node, nnode, chunk)) if (prefix_match((struct prefix *)&loc.prefix, - (struct prefix *)&chunk->prefix)) + (struct prefix *)&chunk->prefix)) { listnode_delete(bgp->srv6_locator_chunks, chunk); + srv6_locator_chunk_free(chunk); + } // refresh functions for (ALL_LIST_ELEMENTS(bgp->srv6_functions, node, nnode, func)) { @@ -3227,8 +3231,10 @@ static int bgp_zebra_process_srv6_locator_delete(ZAPI_CALLBACK_ARGS) tmp_prefi.prefixlen = 128; tmp_prefi.prefix = func->sid; if (prefix_match((struct prefix *)&loc.prefix, - (struct prefix *)&tmp_prefi)) + (struct prefix *)&tmp_prefi)) { listnode_delete(bgp->srv6_functions, func); + XFREE(MTYPE_BGP_SRV6_FUNCTION, func); + } } // refresh tovpn_sid @@ -3262,6 +3268,37 @@ static int bgp_zebra_process_srv6_locator_delete(ZAPI_CALLBACK_ARGS) } vpn_leak_postchange_all(); + + /* refresh tovpn_sid_locator */ + for (ALL_LIST_ELEMENTS_RO(bm->bgp, node, bgp_vrf)) { + if (bgp_vrf->inst_type != BGP_INSTANCE_TYPE_VRF) + continue; + + /* refresh vpnv4 tovpn_sid_locator */ + tovpn_sid_locator = + bgp_vrf->vpn_policy[AFI_IP].tovpn_sid_locator; + if (tovpn_sid_locator) { + tmp_prefi.family = AF_INET6; + tmp_prefi.prefixlen = IPV6_MAX_BITLEN; + tmp_prefi.prefix = *tovpn_sid_locator; + if (prefix_match((struct prefix *)&loc.prefix, + (struct prefix *)&tmp_prefi)) + XFREE(MTYPE_BGP_SRV6_SID, tovpn_sid_locator); + } + + /* refresh vpnv6 tovpn_sid_locator */ + tovpn_sid_locator = + bgp_vrf->vpn_policy[AFI_IP6].tovpn_sid_locator; + if (tovpn_sid_locator) { + tmp_prefi.family = AF_INET6; + tmp_prefi.prefixlen = IPV6_MAX_BITLEN; + tmp_prefi.prefix = *tovpn_sid_locator; + if (prefix_match((struct prefix *)&loc.prefix, + (struct prefix *)&tmp_prefi)) + XFREE(MTYPE_BGP_SRV6_SID, tovpn_sid_locator); + } + } + return 0; } @@ -3300,6 +3337,31 @@ static zclient_handler *const bgp_handlers[] = { bgp_zebra_process_srv6_locator_chunk, }; +static int bgp_if_new_hook(struct interface *ifp) +{ + struct bgp_interface *iifp; + + if (ifp->info) + return 0; + iifp = XCALLOC(MTYPE_BGP_IF_INFO, sizeof(struct bgp_interface)); + ifp->info = iifp; + + return 0; +} + +static int bgp_if_delete_hook(struct interface *ifp) +{ + XFREE(MTYPE_BGP_IF_INFO, ifp->info); + return 0; +} + +void bgp_if_init(void) +{ + /* Initialize Zebra interface data structure. */ + hook_register_prio(if_add, 0, bgp_if_new_hook); + hook_register_prio(if_del, 0, bgp_if_delete_hook); +} + void bgp_zebra_init(struct thread_master *master, unsigned short instance) { zclient_num_connects = 0; diff --git a/bgpd/bgp_zebra.h b/bgpd/bgp_zebra.h index 17f46e49cc..0a41069411 100644 --- a/bgpd/bgp_zebra.h +++ b/bgpd/bgp_zebra.h @@ -35,6 +35,7 @@ extern void bgp_zebra_init(struct thread_master *master, unsigned short instance); +extern void bgp_if_init(void); extern void bgp_zebra_init_tm_connect(struct bgp *bgp); extern uint32_t bgp_zebra_tm_get_id(void); extern bool bgp_zebra_tm_chunk_obtained(void); diff --git a/bgpd/bgpd.c b/bgpd/bgpd.c index 036bbbd6b6..4c151b2d37 100644 --- a/bgpd/bgpd.c +++ b/bgpd/bgpd.c @@ -969,7 +969,7 @@ int peer_af_flag_check(struct peer *peer, afi_t afi, safi_t safi, uint32_t flag) } void peer_af_flag_inherit(struct peer *peer, afi_t afi, safi_t safi, - uint32_t flag) + uint64_t flag) { bool group_val; @@ -1366,6 +1366,7 @@ struct peer *peer_new(struct bgp *bgp) SET_FLAG(peer->af_flags_invert[afi][safi], PEER_FLAG_SEND_LARGE_COMMUNITY); peer->addpath_type[afi][safi] = BGP_ADDPATH_NONE; + peer->soo[afi][safi] = NULL; } /* set nexthop-unchanged for l2vpn evpn by default */ @@ -1996,8 +1997,8 @@ static void peer_group2peer_config_copy_af(struct peer_group *group, { int in = FILTER_IN; int out = FILTER_OUT; - uint32_t flags_tmp; - uint32_t pflags_ovrd; + uint64_t flags_tmp; + uint64_t pflags_ovrd; uint8_t *pfilter_ovrd; struct peer *conf; @@ -2031,6 +2032,10 @@ static void peer_group2peer_config_copy_af(struct peer_group *group, if (!CHECK_FLAG(pflags_ovrd, PEER_FLAG_ALLOWAS_IN)) PEER_ATTR_INHERIT(peer, group, allowas_in[afi][safi]); + /* soo */ + if (!CHECK_FLAG(pflags_ovrd, PEER_FLAG_SOO)) + PEER_ATTR_INHERIT(peer, group, soo[afi][safi]); + /* weight */ if (!CHECK_FLAG(pflags_ovrd, PEER_FLAG_WEIGHT)) PEER_ATTR_INHERIT(peer, group, weight[afi][safi]); @@ -2537,6 +2542,7 @@ int peer_delete(struct peer *peer) XFREE(MTYPE_BGP_FILTER_NAME, filter->usmap.name); XFREE(MTYPE_ROUTE_MAP_NAME, peer->default_rmap[afi][safi].name); + ecommunity_free(&peer->soo[afi][safi]); } FOREACH_AFI_SAFI (afi, safi) @@ -4264,6 +4270,7 @@ static const struct peer_flag_action peer_af_flag_action_list[] = { {PEER_FLAG_REMOVE_PRIVATE_AS_ALL_REPLACE, 1, peer_change_reset_out}, {PEER_FLAG_WEIGHT, 0, peer_change_reset_in}, {PEER_FLAG_DISABLE_ADDPATH_RX, 0, peer_change_reset}, + {PEER_FLAG_SOO, 0, peer_change_reset}, {0, 0, 0}}; /* Proper action set. */ @@ -4567,7 +4574,7 @@ int peer_flag_unset(struct peer *peer, uint64_t flag) } static int peer_af_flag_modify(struct peer *peer, afi_t afi, safi_t safi, - uint32_t flag, bool set) + uint64_t flag, bool set) { int found; int size; @@ -4750,12 +4757,12 @@ static int peer_af_flag_modify(struct peer *peer, afi_t afi, safi_t safi, return 0; } -int peer_af_flag_set(struct peer *peer, afi_t afi, safi_t safi, uint32_t flag) +int peer_af_flag_set(struct peer *peer, afi_t afi, safi_t safi, uint64_t flag) { return peer_af_flag_modify(peer, afi, safi, flag, 1); } -int peer_af_flag_unset(struct peer *peer, afi_t afi, safi_t safi, uint32_t flag) +int peer_af_flag_unset(struct peer *peer, afi_t afi, safi_t safi, uint64_t flag) { return peer_af_flag_modify(peer, afi, safi, flag, 0); } diff --git a/bgpd/bgpd.h b/bgpd/bgpd.h index dc7ad32a50..28883c9e7c 100644 --- a/bgpd/bgpd.h +++ b/bgpd/bgpd.h @@ -778,6 +778,11 @@ struct bgp { }; DECLARE_QOBJ_TYPE(bgp); +struct bgp_interface { +#define BGP_INTERFACE_MPLS_BGP_FORWARDING (1 << 0) + uint32_t flags; +}; + DECLARE_HOOK(bgp_inst_delete, (struct bgp *bgp), (bgp)); DECLARE_HOOK(bgp_inst_config_write, (struct bgp *bgp, struct vty *vty), @@ -1380,37 +1385,38 @@ struct peer { * specific attributes are being treated the exact same way as global * peer attributes. */ - uint32_t af_flags_override[AFI_MAX][SAFI_MAX]; - uint32_t af_flags_invert[AFI_MAX][SAFI_MAX]; - uint32_t af_flags[AFI_MAX][SAFI_MAX]; -#define PEER_FLAG_SEND_COMMUNITY (1U << 0) /* send-community */ -#define PEER_FLAG_SEND_EXT_COMMUNITY (1U << 1) /* send-community ext. */ -#define PEER_FLAG_NEXTHOP_SELF (1U << 2) /* next-hop-self */ -#define PEER_FLAG_REFLECTOR_CLIENT (1U << 3) /* reflector-client */ -#define PEER_FLAG_RSERVER_CLIENT (1U << 4) /* route-server-client */ -#define PEER_FLAG_SOFT_RECONFIG (1U << 5) /* soft-reconfiguration */ -#define PEER_FLAG_AS_PATH_UNCHANGED (1U << 6) /* transparent-as */ -#define PEER_FLAG_NEXTHOP_UNCHANGED (1U << 7) /* transparent-next-hop */ -#define PEER_FLAG_MED_UNCHANGED (1U << 8) /* transparent-next-hop */ -#define PEER_FLAG_DEFAULT_ORIGINATE (1U << 9) /* default-originate */ -#define PEER_FLAG_REMOVE_PRIVATE_AS (1U << 10) /* remove-private-as */ -#define PEER_FLAG_ALLOWAS_IN (1U << 11) /* set allowas-in */ -#define PEER_FLAG_ORF_PREFIX_SM (1U << 12) /* orf capability send-mode */ -#define PEER_FLAG_ORF_PREFIX_RM (1U << 13) /* orf capability receive-mode */ -#define PEER_FLAG_MAX_PREFIX (1U << 14) /* maximum prefix */ -#define PEER_FLAG_MAX_PREFIX_WARNING (1U << 15) /* maximum prefix warning-only */ -#define PEER_FLAG_NEXTHOP_LOCAL_UNCHANGED (1U << 16) /* leave link-local nexthop unchanged */ -#define PEER_FLAG_FORCE_NEXTHOP_SELF (1U << 17) /* next-hop-self force */ -#define PEER_FLAG_REMOVE_PRIVATE_AS_ALL (1U << 18) /* remove-private-as all */ -#define PEER_FLAG_REMOVE_PRIVATE_AS_REPLACE (1U << 19) /* remove-private-as replace-as */ -#define PEER_FLAG_AS_OVERRIDE (1U << 20) /* as-override */ -#define PEER_FLAG_REMOVE_PRIVATE_AS_ALL_REPLACE (1U << 21) /* remove-private-as all replace-as */ -#define PEER_FLAG_WEIGHT (1U << 24) /* weight */ -#define PEER_FLAG_ALLOWAS_IN_ORIGIN (1U << 25) /* allowas-in origin */ -#define PEER_FLAG_SEND_LARGE_COMMUNITY (1U << 26) /* Send large Communities */ -#define PEER_FLAG_MAX_PREFIX_OUT (1U << 27) /* outgoing maximum prefix */ -#define PEER_FLAG_MAX_PREFIX_FORCE (1U << 28) /* maximum-prefix <num> force */ -#define PEER_FLAG_DISABLE_ADDPATH_RX (1U << 29) /* disable-addpath-rx */ + uint64_t af_flags_override[AFI_MAX][SAFI_MAX]; + uint64_t af_flags_invert[AFI_MAX][SAFI_MAX]; + uint64_t af_flags[AFI_MAX][SAFI_MAX]; +#define PEER_FLAG_SEND_COMMUNITY (1ULL << 0) +#define PEER_FLAG_SEND_EXT_COMMUNITY (1ULL << 1) +#define PEER_FLAG_NEXTHOP_SELF (1ULL << 2) +#define PEER_FLAG_REFLECTOR_CLIENT (1ULL << 3) +#define PEER_FLAG_RSERVER_CLIENT (1ULL << 4) +#define PEER_FLAG_SOFT_RECONFIG (1ULL << 5) +#define PEER_FLAG_AS_PATH_UNCHANGED (1ULL << 6) +#define PEER_FLAG_NEXTHOP_UNCHANGED (1ULL << 7) +#define PEER_FLAG_MED_UNCHANGED (1ULL << 8) +#define PEER_FLAG_DEFAULT_ORIGINATE (1ULL << 9) +#define PEER_FLAG_REMOVE_PRIVATE_AS (1ULL << 10) +#define PEER_FLAG_ALLOWAS_IN (1ULL << 11) +#define PEER_FLAG_ORF_PREFIX_SM (1ULL << 12) +#define PEER_FLAG_ORF_PREFIX_RM (1ULL << 13) +#define PEER_FLAG_MAX_PREFIX (1ULL << 14) +#define PEER_FLAG_MAX_PREFIX_WARNING (1ULL << 15) +#define PEER_FLAG_NEXTHOP_LOCAL_UNCHANGED (1ULL << 16) +#define PEER_FLAG_FORCE_NEXTHOP_SELF (1ULL << 17) +#define PEER_FLAG_REMOVE_PRIVATE_AS_ALL (1ULL << 18) +#define PEER_FLAG_REMOVE_PRIVATE_AS_REPLACE (1ULL << 19) +#define PEER_FLAG_AS_OVERRIDE (1ULL << 20) +#define PEER_FLAG_REMOVE_PRIVATE_AS_ALL_REPLACE (1ULL << 21) +#define PEER_FLAG_WEIGHT (1ULL << 24) +#define PEER_FLAG_ALLOWAS_IN_ORIGIN (1ULL << 25) +#define PEER_FLAG_SEND_LARGE_COMMUNITY (1ULL << 26) +#define PEER_FLAG_MAX_PREFIX_OUT (1ULL << 27) +#define PEER_FLAG_MAX_PREFIX_FORCE (1ULL << 28) +#define PEER_FLAG_DISABLE_ADDPATH_RX (1ULL << 29) +#define PEER_FLAG_SOO (1ULL << 30) enum bgp_addpath_strat addpath_type[AFI_MAX][SAFI_MAX]; @@ -1620,6 +1626,9 @@ struct peer { /* allowas-in. */ char allowas_in[AFI_MAX][SAFI_MAX]; + /* soo */ + struct ecommunity *soo[AFI_MAX][SAFI_MAX]; + /* weight */ unsigned long weight[AFI_MAX][SAFI_MAX]; @@ -2176,11 +2185,13 @@ extern int peer_flag_set(struct peer *peer, uint64_t flag); extern int peer_flag_unset(struct peer *peer, uint64_t flag); extern void peer_flag_inherit(struct peer *peer, uint64_t flag); -extern int peer_af_flag_set(struct peer *, afi_t, safi_t, uint32_t); -extern int peer_af_flag_unset(struct peer *, afi_t, safi_t, uint32_t); +extern int peer_af_flag_set(struct peer *peer, afi_t afi, safi_t safi, + uint64_t flag); +extern int peer_af_flag_unset(struct peer *peer, afi_t afi, safi_t safi, + uint64_t flag); extern int peer_af_flag_check(struct peer *, afi_t, safi_t, uint32_t); extern void peer_af_flag_inherit(struct peer *peer, afi_t afi, safi_t safi, - uint32_t flag); + uint64_t flag); extern void peer_change_action(struct peer *peer, afi_t afi, safi_t safi, enum peer_change_type type); diff --git a/bgpd/rfapi/rfapi_import.c b/bgpd/rfapi/rfapi_import.c index 6fb509fd9c..be64153cef 100644 --- a/bgpd/rfapi/rfapi_import.c +++ b/bgpd/rfapi/rfapi_import.c @@ -3750,7 +3750,7 @@ void rfapiBgpInfoFilteredImportVPN( remote_peer_match = 1; } - if (!un_match & !remote_peer_match) + if (!un_match && !remote_peer_match) continue; vnc_zlog_debug_verbose( diff --git a/configure.ac b/configure.ac index b7e17d3565..8c1fab0eab 100644 --- a/configure.ac +++ b/configure.ac @@ -1372,7 +1372,7 @@ case "${enable_vtysh}" in AC_DEFINE([VTYSH], [1], [VTY shell]) prev_libs="$LIBS" - AC_CHECK_LIB([readline], [main], [ + AC_CHECK_LIB([readline], [readline], [ LIBREADLINE="-lreadline" ], [ dnl readline failed - it might be incorrectly linked and missing its diff --git a/doc/user/bgp.rst b/doc/user/bgp.rst index e6c4076300..bd5767beba 100644 --- a/doc/user/bgp.rst +++ b/doc/user/bgp.rst @@ -2720,6 +2720,25 @@ are reached using *core* MPLS labels which are distributed using LDP or BGP labeled unicast. *bgpd* also supports inter-VRF route leaking. +L3VPN over GRE interfaces +^^^^^^^^^^^^^^^^^^^^^^^^^ + +In MPLS-VPN or SRv6-VPN, an L3VPN next-hop entry requires that the path +chosen respectively contains a labelled path or a valid SID IPv6 address. +Otherwise the L3VPN entry will not be installed. It is possible to ignore +that check when the path chosen by the next-hop uses a GRE interface, and +there is a route-map configured at inbound side of ipv4-vpn or ipv6-vpn +address family with following syntax: + +.. clicmd:: set l3vpn next-hop encapsulation gre + +The incoming BGP L3VPN entry is accepted, provided that the next hop of the +L3VPN entry uses a path that takes the GRE tunnel as outgoing interface. The +remote endpoint should be configured just behind the GRE tunnel; remote +device configuration may vary depending whether it acts at edge endpoint or +not: in any case, the expectation is that incoming MPLS traffic received at +this endpoint should be considered as a valid path for L3VPN. + .. _bgp-vrf-route-leaking: VRF Route Leaking @@ -2835,6 +2854,26 @@ of the global VPNv4/VPNv6 family. This command defaults to on and is not displayed. The `no bgp retain route-target all` form of the command is displayed. +.. clicmd:: neighbor <A.B.C.D|X:X::X:X|WORD> soo EXTCOMMUNITY + +Without this command, SoO extended community attribute is configured using +an inbound route map that sets the SoO value during the update process. +With the introduction of the new BGP per-neighbor Site-of-Origin (SoO) feature, +two new commands configured in sub-modes under router configuration mode +simplify the SoO value configuration. + +If we configure SoO per neighbor at PEs, the SoO community is automatically +added for all routes from the CPEs. Routes are validated and prevented from +being sent back to the same CPE (e.g.: multi-site). This is especially needed +when using ``as-override`` or ``allowas-in`` to prevent routing loops. + +.. clicmd:: mpls bgp forwarding + +It is possible to permit BGP install VPN prefixes without transport labels, +by issuing the following command under the interface configuration context. +This configuration will install VPN prefixes originated from an e-bgp session, +and with the next-hop directly connected. + .. _bgp-l3vpn-srv6: L3VPN SRv6 diff --git a/doc/user/pimv6.rst b/doc/user/pimv6.rst index 74891c88b9..843734e217 100644 --- a/doc/user/pimv6.rst +++ b/doc/user/pimv6.rst @@ -195,7 +195,7 @@ is in a vrf, enter the interface command with the vrf keyword at the end. Set the MLD last member query count. The default value is 2. 'no' form of this command is used to configure back to the default value. -.. clicmd:: ipv6 MLD last-member-query-interval (1-65535) +.. clicmd:: ipv6 mld last-member-query-interval (1-65535) Set the MLD last member query interval in deciseconds. The default value is 10 deciseconds. 'no' form of this command is used to to configure back to the diff --git a/doc/user/routemap.rst b/doc/user/routemap.rst index 05c9eeb755..5e222576ca 100644 --- a/doc/user/routemap.rst +++ b/doc/user/routemap.rst @@ -339,6 +339,9 @@ Route Map Set Command Set the color of a SR-TE Policy to be applied to a learned route. The SR-TE Policy is uniquely determined by the color and the BGP nexthop. +.. clicmd:: set l3vpn next-hop encapsulation gre + + Accept L3VPN traffic over GRE encapsulation. .. _route-map-call-command: diff --git a/eigrpd/eigrp_network.c b/eigrpd/eigrp_network.c index 6b3a0afc12..13db38ce91 100644 --- a/eigrpd/eigrp_network.c +++ b/eigrpd/eigrp_network.c @@ -69,8 +69,8 @@ int eigrp_sock_init(struct vrf *vrf) AF_INET, SOCK_RAW, IPPROTO_EIGRPIGP, vrf->vrf_id, vrf->vrf_id != VRF_DEFAULT ? vrf->name : NULL); if (eigrp_sock < 0) { - zlog_err("eigrp_read_sock_init: socket: %s", - safe_strerror(errno)); + zlog_err("%s: socket: %s", + __func__, safe_strerror(errno)); exit(1); } diff --git a/eigrpd/eigrp_packet.c b/eigrpd/eigrp_packet.c index dd5ba8a164..1ea6f9813b 100644 --- a/eigrpd/eigrp_packet.c +++ b/eigrpd/eigrp_packet.c @@ -727,8 +727,8 @@ static struct stream *eigrp_recv_packet(struct eigrp *eigrp, if ((unsigned int)ret < sizeof(*iph)) /* ret must be > 0 now */ { zlog_warn( - "eigrp_recv_packet: discarding runt packet of length %d (ip header size is %u)", - ret, (unsigned int)sizeof(*iph)); + "%s: discarding runt packet of length %d (ip header size is %u)", + __func__, ret, (unsigned int)sizeof(*iph)); return NULL; } @@ -772,8 +772,8 @@ static struct stream *eigrp_recv_packet(struct eigrp *eigrp, if (ret != ip_len) { zlog_warn( - "eigrp_recv_packet read length mismatch: ip_len is %d, but recvmsg returned %d", - ip_len, ret); + "%s read length mismatch: ip_len is %d, but recvmsg returned %d", + __func__, ip_len, ret); return NULL; } diff --git a/eigrpd/eigrpd.c b/eigrpd/eigrpd.c index 7bc7be9706..0ec9574813 100644 --- a/eigrpd/eigrpd.c +++ b/eigrpd/eigrpd.c @@ -163,7 +163,8 @@ static struct eigrp *eigrp_new(uint16_t as, vrf_id_t vrf_id) if (eigrp->fd < 0) { flog_err_sys( EC_LIB_SOCKET, - "eigrp_new: fatal error: eigrp_sock_init was unable to open a socket"); + "%s: fatal error: eigrp_sock_init was unable to open a socket", + __func__); exit(1); } diff --git a/isisd/isis_adjacency.c b/isisd/isis_adjacency.c index 86cf10ae17..00763135e6 100644 --- a/isisd/isis_adjacency.c +++ b/isisd/isis_adjacency.c @@ -860,13 +860,13 @@ void isis_adj_build_neigh_list(struct list *adjdb, struct list *list) struct listnode *node; if (!list) { - zlog_warn("isis_adj_build_neigh_list(): NULL list"); + zlog_warn("%s: NULL list", __func__); return; } for (ALL_LIST_ELEMENTS_RO(adjdb, node, adj)) { if (!adj) { - zlog_warn("isis_adj_build_neigh_list(): NULL adj"); + zlog_warn("%s: NULL adj", __func__); return; } @@ -883,18 +883,18 @@ void isis_adj_build_up_list(struct list *adjdb, struct list *list) struct listnode *node; if (adjdb == NULL) { - zlog_warn("isis_adj_build_up_list(): adjacency DB is empty"); + zlog_warn("%s: adjacency DB is empty", __func__); return; } if (!list) { - zlog_warn("isis_adj_build_up_list(): NULL list"); + zlog_warn("%s: NULL list", __func__); return; } for (ALL_LIST_ELEMENTS_RO(adjdb, node, adj)) { if (!adj) { - zlog_warn("isis_adj_build_up_list(): NULL adj"); + zlog_warn("%s: NULL adj", __func__); return; } diff --git a/isisd/isis_bpf.c b/isisd/isis_bpf.c index 5f3d70ed3a..11f43279ec 100644 --- a/isisd/isis_bpf.c +++ b/isisd/isis_bpf.c @@ -165,7 +165,7 @@ static int open_bpf_dev(struct isis_circuit *circuit) bpf_prog.bf_len = 8; bpf_prog.bf_insns = &(llcfilter[0]); if (ioctl(fd, BIOCSETF, (caddr_t)&bpf_prog) < 0) { - zlog_warn("open_bpf_dev(): failed to install filter: %s", + zlog_warn("%s: failed to install filter: %s", __func__, safe_strerror(errno)); return ISIS_WARNING; } @@ -198,7 +198,7 @@ int isis_sock_init(struct isis_circuit *circuit) circuit->tx = isis_send_pdu_bcast; circuit->rx = isis_recv_pdu_bcast; } else { - zlog_warn("isis_sock_init(): unknown circuit type"); + zlog_warn("%s: unknown circuit type", __func__); retval = ISIS_WARNING; break; } @@ -223,8 +223,8 @@ int isis_recv_pdu_bcast(struct isis_circuit *circuit, uint8_t *ssnpa) bytesread = read(circuit->fd, readbuff, readblen); } if (bytesread < 0) { - zlog_warn("isis_recv_pdu_bcast(): read() failed: %s", - safe_strerror(errno)); + zlog_warn("%s: read() failed: %s", __func__, + safe_strerror(errno)); return ISIS_WARNING; } @@ -267,8 +267,9 @@ int isis_send_pdu_bcast(struct isis_circuit *circuit, int level) buflen = stream_get_endp(circuit->snd_stream) + LLC_LEN + ETHER_HDR_LEN; if (buflen > sizeof(sock_buff)) { zlog_warn( - "isis_send_pdu_bcast: sock_buff size %zu is less than output pdu size %zu on circuit %s", - sizeof(sock_buff), buflen, circuit->interface->name); + "%s: sock_buff size %zu is less than output pdu size %zu on circuit %s", + __func__, sizeof(sock_buff), buflen, + circuit->interface->name); return ISIS_WARNING; } diff --git a/isisd/isis_circuit.c b/isisd/isis_circuit.c index 9e97e48937..dcc4ed6e42 100644 --- a/isisd/isis_circuit.c +++ b/isisd/isis_circuit.c @@ -217,6 +217,11 @@ void isis_circuit_del(struct isis_circuit *circuit) list_delete(&circuit->ipv6_link); list_delete(&circuit->ipv6_non_link); + if (circuit->ext) { + isis_del_ext_subtlvs(circuit->ext); + circuit->ext = NULL; + } + XFREE(MTYPE_TMP, circuit->bfd_config.profile); XFREE(MTYPE_ISIS_CIRCUIT, circuit->tag); @@ -513,7 +518,7 @@ void isis_circuit_if_add(struct isis_circuit *circuit, struct interface *ifp) } else { /* It's normal in case of loopback etc. */ if (IS_DEBUG_EVENTS) - zlog_debug("isis_circuit_if_add: unsupported media"); + zlog_debug("%s: unsupported media", __func__); circuit->circ_type = CIRCUIT_T_UNKNOWN; } @@ -678,10 +683,10 @@ int isis_circuit_up(struct isis_circuit *circuit) } #ifdef EXTREME_DEGUG if (IS_DEBUG_EVENTS) - zlog_debug( - "isis_circuit_if_add: if_id %d, isomtu %d snpa %s", - circuit->interface->ifindex, ISO_MTU(circuit), - snpa_print(circuit->u.bc.snpa)); + zlog_debug("%s: if_id %d, isomtu %d snpa %s", __func__, + circuit->interface->ifindex, + ISO_MTU(circuit), + snpa_print(circuit->u.bc.snpa)); #endif /* EXTREME_DEBUG */ circuit->u.bc.adjdb[0] = list_new(); diff --git a/isisd/isis_dlpi.c b/isisd/isis_dlpi.c index bb8c542597..f6ea0aea60 100644 --- a/isisd/isis_dlpi.c +++ b/isisd/isis_dlpi.c @@ -336,9 +336,8 @@ static int open_dlpi_dev(struct isis_circuit *circuit) /* Double check the DLPI style */ if (dia->dl_provider_style != DL_STYLE2) { - zlog_warn( - "open_dlpi_dev(): interface %s: %s is not style 2", - circuit->interface->name, devpath); + zlog_warn("%s: interface %s: %s is not style 2", + __func__, circuit->interface->name, devpath); close(fd); return ISIS_WARNING; } @@ -355,9 +354,8 @@ static int open_dlpi_dev(struct isis_circuit *circuit) } else { /* Double check the DLPI style */ if (dia->dl_provider_style != DL_STYLE1) { - zlog_warn( - "open_dlpi_dev(): interface %s: %s is not style 1", - circuit->interface->name, devpath); + zlog_warn("%s: interface %s: %s is not style 1", + __func__, circuit->interface->name, devpath); close(fd); return ISIS_WARNING; } @@ -404,9 +402,8 @@ static int open_dlpi_dev(struct isis_circuit *circuit) * so we need to be careful and use DL_PHYS_ADDR_REQ instead. */ if (dlpiaddr(fd, circuit->u.bc.snpa) == -1) { - zlog_warn( - "open_dlpi_dev(): interface %s: unable to get MAC address", - circuit->interface->name); + zlog_warn("%s: interface %s: unable to get MAC address", + __func__, circuit->interface->name); close(fd); return ISIS_WARNING; } @@ -479,7 +476,7 @@ int isis_sock_init(struct isis_circuit *circuit) circuit->tx = isis_send_pdu_bcast; circuit->rx = isis_recv_pdu_bcast; } else { - zlog_warn("isis_sock_init(): unknown circuit type"); + zlog_warn("%s: unknown circuit type", __func__); retval = ISIS_WARNING; break; } @@ -511,7 +508,7 @@ int isis_recv_pdu_bcast(struct isis_circuit *circuit, uint8_t *ssnpa) retv = getmsg(circuit->fd, &ctlbuf, &databuf, &flags); if (retv < 0) { - zlog_warn("isis_recv_pdu_bcast: getmsg failed: %s", + zlog_warn("%s: getmsg failed: %s", __func__, safe_strerror(errno)); return ISIS_WARNING; } @@ -561,8 +558,9 @@ int isis_send_pdu_bcast(struct isis_circuit *circuit, int level) buflen = stream_get_endp(circuit->snd_stream) + LLC_LEN; if ((size_t)buflen > sizeof(sock_buff)) { zlog_warn( - "isis_send_pdu_bcast: sock_buff size %zu is less than output pdu size %d on circuit %s", - sizeof(sock_buff), buflen, circuit->interface->name); + "%s: sock_buff size %zu is less than output pdu size %d on circuit %s", + __func__, sizeof(sock_buff), buflen, + circuit->interface->name); return ISIS_WARNING; } diff --git a/isisd/isis_dr.c b/isisd/isis_dr.c index b9bf49867d..400982f2fc 100644 --- a/isisd/isis_dr.c +++ b/isisd/isis_dr.c @@ -80,7 +80,8 @@ void isis_run_dr(struct thread *thread) } if (circuit->u.bc.run_dr_elect[level - 1]) - zlog_warn("isis_run_dr(): run_dr_elect already set for l%d", level); + zlog_warn("%s: run_dr_elect already set for l%d", __func__, + level); circuit->u.bc.t_run_dr[level - 1] = NULL; circuit->u.bc.run_dr_elect[level - 1] = 1; @@ -126,7 +127,7 @@ int isis_dr_elect(struct isis_circuit *circuit, int level) adjdb = circuit->u.bc.adjdb[level - 1]; if (!adjdb) { - zlog_warn("isis_dr_elect() adjdb == NULL"); + zlog_warn("%s adjdb == NULL", __func__); list_delete(&list); return ISIS_WARNING; } @@ -155,7 +156,8 @@ int isis_dr_elect(struct isis_circuit *circuit, int level) } if (cmp_res == 0) zlog_warn( - "isis_dr_elect(): multiple adjacencies with same SNPA"); + "%s: multiple adjacencies with same SNPA", + __func__); } else { adj_dr = adj; } @@ -218,7 +220,7 @@ int isis_dr_resign(struct isis_circuit *circuit, int level) uint8_t id[ISIS_SYS_ID_LEN + 2]; if (IS_DEBUG_EVENTS) - zlog_debug("isis_dr_resign l%d", level); + zlog_debug("%s l%d", __func__, level); circuit->u.bc.is_dr[level - 1] = 0; circuit->u.bc.run_dr_elect[level - 1] = 0; @@ -266,7 +268,7 @@ int isis_dr_commence(struct isis_circuit *circuit, int level) uint8_t old_dr[ISIS_SYS_ID_LEN + 2]; if (IS_DEBUG_EVENTS) - zlog_debug("isis_dr_commence l%d", level); + zlog_debug("%s l%d", __func__, level); /* Lets keep a pause in DR election */ circuit->u.bc.run_dr_elect[level - 1] = 0; diff --git a/isisd/isis_ldp_sync.c b/isisd/isis_ldp_sync.c index fb605eb07a..3568543b4a 100644 --- a/isisd/isis_ldp_sync.c +++ b/isisd/isis_ldp_sync.c @@ -82,7 +82,7 @@ int isis_ldp_sync_state_update(struct ldp_igp_sync_if_state state) return 0; /* received ldp-sync interface state from LDP */ - ils_debug("ldp_sync: rcvd %s from LDP if %s", + ils_debug("%s: rcvd %s from LDP if %s", __func__, state.sync_start ? "sync-start" : "sync-complete", ifp->name); if (state.sync_start) isis_ldp_sync_if_start(circuit, false); @@ -106,7 +106,7 @@ int isis_ldp_sync_announce_update(struct ldp_igp_sync_announce announce) if (announce.proto != ZEBRA_ROUTE_LDP) return 0; - ils_debug("ldp_sync: rcvd announce from LDP"); + ils_debug("%s: rcvd announce from LDP", __func__); /* LDP just started up: * set cost to LSInfinity @@ -128,8 +128,7 @@ void isis_ldp_sync_state_req_msg(struct isis_circuit *circuit) struct ldp_igp_sync_if_state_req request; struct interface *ifp = circuit->interface; - ils_debug("ldp_sync: send state request to LDP for %s", - ifp->name); + ils_debug("%s: send state request to LDP for %s", __func__, ifp->name); memset(&request, 0, sizeof(request)); strlcpy(request.name, ifp->name, sizeof(ifp->name)); @@ -159,7 +158,7 @@ void isis_ldp_sync_if_start(struct isis_circuit *circuit, if (ldp_sync_info && ldp_sync_info->enabled == LDP_IGP_SYNC_ENABLED && ldp_sync_info->state != LDP_IGP_SYNC_STATE_NOT_REQUIRED) { - ils_debug("ldp_sync: start on if %s state: %s", + ils_debug("%s: start on if %s state: %s", __func__, circuit->interface->name, "Holding down until Sync"); ldp_sync_info->state = LDP_IGP_SYNC_STATE_REQUIRED_NOT_UP; isis_ldp_sync_set_if_metric(circuit, true); @@ -246,7 +245,8 @@ static int isis_ldp_sync_adj_state_change(struct isis_adjacency *adj) else ldp_sync_info->state = LDP_IGP_SYNC_STATE_NOT_REQUIRED; - ils_debug("ldp_sync: down on if %s", circuit->interface->name); + ils_debug("%s: down on if %s", __func__, + circuit->interface->name); ldp_sync_if_down(circuit->ldp_sync_info); } @@ -362,8 +362,8 @@ static void isis_ldp_sync_holddown_timer(struct thread *thread) ldp_sync_info->state = LDP_IGP_SYNC_STATE_REQUIRED_UP; ldp_sync_info->t_holddown = NULL; - ils_debug("ldp_sync: holddown timer expired for %s state:sync achieved", - circuit->interface->name); + ils_debug("%s: holddown timer expired for %s state:sync achieved", + __func__, circuit->interface->name); isis_ldp_sync_set_if_metric(circuit, true); } @@ -383,7 +383,7 @@ void isis_ldp_sync_holddown_timer_add(struct isis_circuit *circuit) ldp_sync_info->holddown == LDP_IGP_SYNC_HOLDDOWN_DEFAULT) return; - ils_debug("ldp_sync: start holddown timer for %s time %d", + ils_debug("%s: start holddown timer for %s time %d", __func__, circuit->interface->name, ldp_sync_info->holddown); thread_add_timer(master, isis_ldp_sync_holddown_timer, @@ -413,7 +413,7 @@ void isis_ldp_sync_handle_client_close(struct zapi_client_close_info *info) * set cost to LSInfinity * send request to LDP for LDP-SYNC state for each interface */ - zlog_err("ldp_sync: LDP down"); + zlog_err("%s: LDP down", __func__); for (ALL_LIST_ELEMENTS_RO(isis->area_list, anode, area)) { if (!CHECK_FLAG(area->ldp_sync_cmd.flags, LDP_SYNC_FLAG_ENABLE)) @@ -488,7 +488,7 @@ void isis_if_ldp_sync_enable(struct isis_circuit *circuit) if (circuit->interface->vrf->vrf_id != VRF_DEFAULT) return; - ils_debug("ldp_sync: enable if %s", circuit->interface->name); + ils_debug("%s: enable if %s", __func__, circuit->interface->name); if (!CHECK_FLAG(area->ldp_sync_cmd.flags, LDP_SYNC_FLAG_ENABLE)) return; @@ -507,7 +507,7 @@ void isis_if_ldp_sync_enable(struct isis_circuit *circuit) isis_ldp_sync_state_req_msg(circuit); } else { ldp_sync_info->state = LDP_IGP_SYNC_STATE_NOT_REQUIRED; - ils_debug("ldp_sync: Sync only runs on P2P links %s", + ils_debug("%s: Sync only runs on P2P links %s", __func__, circuit->interface->name); } } @@ -525,7 +525,7 @@ void isis_if_ldp_sync_disable(struct isis_circuit *circuit) if (if_is_loopback(circuit->interface)) return; - ils_debug("ldp_sync: remove if %s", circuit->interface->name); + ils_debug("%s: remove if %s", __func__, circuit->interface->name); if (!CHECK_FLAG(area->ldp_sync_cmd.flags, LDP_SYNC_FLAG_ENABLE)) return; diff --git a/isisd/isis_nb_config.c b/isisd/isis_nb_config.c index 79b167718b..dbe4a017bc 100644 --- a/isisd/isis_nb_config.c +++ b/isisd/isis_nb_config.c @@ -54,7 +54,6 @@ #include "isisd/isis_dr.h" #include "isisd/isis_zebra.h" -DEFINE_MTYPE_STATIC(ISISD, ISIS_MPLS_TE, "ISIS MPLS_TE parameters"); DEFINE_MTYPE_STATIC(ISISD, ISIS_PLIST_NAME, "ISIS prefix-list name"); /* @@ -86,12 +85,17 @@ int isis_instance_create(struct nb_cb_create_args *args) int isis_instance_destroy(struct nb_cb_destroy_args *args) { struct isis_area *area; + struct isis *isis; if (args->event != NB_EV_APPLY) return NB_OK; area = nb_running_unset_entry(args->dnode); - + isis = area->isis; isis_area_destroy(area); + + if (listcount(isis->area_list) == 0) + isis_finish(isis); + return NB_OK; } @@ -1787,45 +1791,13 @@ int isis_instance_log_adjacency_changes_modify(struct nb_cb_modify_args *args) */ int isis_instance_mpls_te_create(struct nb_cb_create_args *args) { - struct listnode *node; struct isis_area *area; - struct isis_circuit *circuit; if (args->event != NB_EV_APPLY) return NB_OK; area = nb_running_get_entry(args->dnode, NULL, true); - if (area->mta == NULL) { - - struct mpls_te_area *new; - - zlog_debug("ISIS-TE(%s): Initialize MPLS Traffic Engineering", - area->area_tag); - - new = XCALLOC(MTYPE_ISIS_MPLS_TE, sizeof(struct mpls_te_area)); - - /* Initialize MPLS_TE structure */ - new->status = enable; - new->level = 0; - new->inter_as = off; - new->interas_areaid.s_addr = 0; - new->router_id.s_addr = 0; - new->ted = ls_ted_new(1, "ISIS", 0); - if (!new->ted) - zlog_warn("Unable to create Link State Data Base"); - - area->mta = new; - } else { - area->mta->status = enable; - } - - /* Initialize Link State Database */ - if (area->mta->ted) - isis_te_init_ted(area); - - /* Update Extended TLVs according to Interface link parameters */ - for (ALL_LIST_ELEMENTS_RO(area->circuit_list, node, circuit)) - isis_link_params_update(circuit, circuit->interface); + isis_mpls_te_create(area); /* Reoriginate STD_TE & GMPLS circuits */ lsp_regenerate_schedule(area, area->is_type, 0); @@ -1835,35 +1807,16 @@ int isis_instance_mpls_te_create(struct nb_cb_create_args *args) int isis_instance_mpls_te_destroy(struct nb_cb_destroy_args *args) { - struct listnode *node; struct isis_area *area; - struct isis_circuit *circuit; if (args->event != NB_EV_APPLY) return NB_OK; area = nb_running_get_entry(args->dnode, NULL, true); - if (IS_MPLS_TE(area->mta)) - area->mta->status = disable; - else - return NB_OK; - - /* Remove Link State Database */ - ls_ted_del_all(&area->mta->ted); - - /* Flush LSP if circuit engage */ - for (ALL_LIST_ELEMENTS_RO(area->circuit_list, node, circuit)) { - if (!IS_EXT_TE(circuit->ext)) - continue; - - /* disable MPLS_TE Circuit keeping SR one's */ - if (IS_SUBTLV(circuit->ext, EXT_ADJ_SID)) - circuit->ext->status = EXT_ADJ_SID; - else if (IS_SUBTLV(circuit->ext, EXT_LAN_ADJ_SID)) - circuit->ext->status = EXT_LAN_ADJ_SID; - else - circuit->ext->status = 0; - } + if (!IS_MPLS_TE(area->mta)) + return NB_OK; + + isis_mpls_te_disable(area); /* Reoriginate STD_TE & GMPLS circuits */ lsp_regenerate_schedule(area, area->is_type, 0); diff --git a/isisd/isis_pfpacket.c b/isisd/isis_pfpacket.c index a448e0043c..84278f3f71 100644 --- a/isisd/isis_pfpacket.c +++ b/isisd/isis_pfpacket.c @@ -103,15 +103,15 @@ static int isis_multicast_join(int fd, int registerto, int if_num) #ifdef EXTREME_DEBUG if (IS_DEBUG_EVENTS) zlog_debug( - "isis_multicast_join(): fd=%d, reg_to=%d, if_num=%d, address = %02x:%02x:%02x:%02x:%02x:%02x", - fd, registerto, if_num, mreq.mr_address[0], + "%s: fd=%d, reg_to=%d, if_num=%d, address = %02x:%02x:%02x:%02x:%02x:%02x", + __func__, fd, registerto, if_num, mreq.mr_address[0], mreq.mr_address[1], mreq.mr_address[2], mreq.mr_address[3], mreq.mr_address[4], mreq.mr_address[5]); #endif /* EXTREME_DEBUG */ if (setsockopt(fd, SOL_PACKET, PACKET_ADD_MEMBERSHIP, &mreq, sizeof(struct packet_mreq))) { - zlog_warn("isis_multicast_join(): setsockopt(): %s", + zlog_warn("%s: setsockopt(): %s", __func__, safe_strerror(errno)); return ISIS_WARNING; } @@ -131,13 +131,13 @@ static int open_packet_socket(struct isis_circuit *circuit) vrf->name); if (fd < 0) { - zlog_warn("open_packet_socket(): socket() failed %s", + zlog_warn("%s: socket() failed %s", __func__, safe_strerror(errno)); return ISIS_WARNING; } if (setsockopt(fd, SOL_SOCKET, SO_ATTACH_FILTER, &bpf, sizeof(bpf))) { - zlog_warn("open_packet_socket(): SO_ATTACH_FILTER failed: %s", + zlog_warn("%s: SO_ATTACH_FILTER failed: %s", __func__, safe_strerror(errno)); } @@ -151,7 +151,7 @@ static int open_packet_socket(struct isis_circuit *circuit) if (bind(fd, (struct sockaddr *)(&s_addr), sizeof(struct sockaddr_ll)) < 0) { - zlog_warn("open_packet_socket(): bind() failed: %s", + zlog_warn("%s: bind() failed: %s", __func__, safe_strerror(errno)); close(fd); return ISIS_WARNING; @@ -208,7 +208,7 @@ int isis_sock_init(struct isis_circuit *circuit) circuit->tx = isis_send_pdu_p2p; circuit->rx = isis_recv_pdu_p2p; } else { - zlog_warn("isis_sock_init(): unknown circuit type"); + zlog_warn("%s: unknown circuit type", __func__); retval = ISIS_WARNING; break; } @@ -243,8 +243,8 @@ int isis_recv_pdu_bcast(struct isis_circuit *circuit, uint8_t *ssnpa) || (s_addr.sll_ifindex != (int)circuit->interface->ifindex)) { if (bytesread < 0) { zlog_warn( - "isis_recv_packet_bcast(): ifname %s, fd %d, bytesread %d, recvfrom(): %s", - circuit->interface->name, circuit->fd, + "%s: ifname %s, fd %d, bytesread %d, recvfrom(): %s", + __func__, circuit->interface->name, circuit->fd, bytesread, safe_strerror(errno)); } if (s_addr.sll_ifindex != (int)circuit->interface->ifindex) { @@ -261,7 +261,7 @@ int isis_recv_pdu_bcast(struct isis_circuit *circuit, uint8_t *ssnpa) (socklen_t *)&addr_len); if (bytesread < 0) - zlog_warn("isis_recv_pdu_bcast(): recvfrom() failed"); + zlog_warn("%s: recvfrom() failed", __func__); return ISIS_WARNING; } @@ -276,7 +276,7 @@ int isis_recv_pdu_bcast(struct isis_circuit *circuit, uint8_t *ssnpa) (struct sockaddr *)&s_addr, (socklen_t *)&addr_len); if (bytesread < 0) - zlog_warn("isis_recv_pdu_bcast(): recvfrom() failed"); + zlog_warn("%s: recvfrom() failed", __func__); return ISIS_WARNING; } @@ -321,7 +321,7 @@ int isis_recv_pdu_p2p(struct isis_circuit *circuit, uint8_t *ssnpa) (struct sockaddr *)&s_addr, (socklen_t *)&addr_len); if (bytesread < 0) - zlog_warn("isis_recv_pdu_p2p(): recvfrom() failed"); + zlog_warn("%s: recvfrom() failed", __func__); return ISIS_WARNING; } @@ -329,7 +329,7 @@ int isis_recv_pdu_p2p(struct isis_circuit *circuit, uint8_t *ssnpa) * ISO over GRE we exit with pain :) */ if (ntohs(s_addr.sll_protocol) != 0x00FE) { - zlog_warn("isis_recv_pdu_p2p(): protocol mismatch(): %X", + zlog_warn("%s: protocol mismatch(): %X", __func__, ntohs(s_addr.sll_protocol)); return ISIS_WARNING; } diff --git a/isisd/isis_spf.c b/isisd/isis_spf.c index 6be7c2d608..bdd323e1a7 100644 --- a/isisd/isis_spf.c +++ b/isisd/isis_spf.c @@ -844,8 +844,7 @@ static int isis_spf_process_lsp(struct isis_spftree *spftree, lspfragloop: if (lsp->hdr.seqno == 0) { - zlog_warn( - "isis_spf_process_lsp(): lsp with 0 seq_num - ignore"); + zlog_warn("%s: lsp with 0 seq_num - ignore", __func__); return ISIS_WARNING; } @@ -1780,7 +1779,8 @@ void isis_run_spf(struct isis_spftree *spftree) break; case SPFTREE_COUNT: zlog_err( - "isis_run_spf should never be called with SPFTREE_COUNT as argument!"); + "%s should never be called with SPFTREE_COUNT as argument!", + __func__); exit(1); } diff --git a/isisd/isis_sr.c b/isisd/isis_sr.c index 259047ff66..f70840a637 100644 --- a/isisd/isis_sr.c +++ b/isisd/isis_sr.c @@ -1253,6 +1253,9 @@ void isis_sr_area_term(struct isis_area *area) if (area->srdb.enabled) isis_sr_stop(area); + /* Free Adjacency SID list */ + list_delete(&srdb->adj_sids); + /* Clear Prefix-SID configuration. */ while (srdb_prefix_cfg_count(&srdb->config.prefix_sids) > 0) { struct sr_prefix_cfg *pcfg; diff --git a/isisd/isis_te.c b/isisd/isis_te.c index 3faff1cc4d..0093279cde 100644 --- a/isisd/isis_te.c +++ b/isisd/isis_te.c @@ -64,10 +64,115 @@ #include "isisd/isis_te.h" #include "isisd/isis_zebra.h" +DEFINE_MTYPE_STATIC(ISISD, ISIS_MPLS_TE, "ISIS MPLS_TE parameters"); + /*------------------------------------------------------------------------* * Following are control functions for MPLS-TE parameters management. *------------------------------------------------------------------------*/ +/** + * Create MPLS Traffic Engineering structure which belongs to given area. + * + * @param area IS-IS Area + */ +void isis_mpls_te_create(struct isis_area *area) +{ + struct listnode *node; + struct isis_circuit *circuit; + + if (!area) + return; + + if (area->mta == NULL) { + + struct mpls_te_area *new; + + zlog_debug("ISIS-TE(%s): Initialize MPLS Traffic Engineering", + area->area_tag); + + new = XCALLOC(MTYPE_ISIS_MPLS_TE, sizeof(struct mpls_te_area)); + + /* Initialize MPLS_TE structure */ + new->status = enable; + new->level = 0; + new->inter_as = off; + new->interas_areaid.s_addr = 0; + new->router_id.s_addr = 0; + new->ted = ls_ted_new(1, "ISIS", 0); + if (!new->ted) + zlog_warn("Unable to create Link State Data Base"); + + area->mta = new; + } else { + area->mta->status = enable; + } + + /* Initialize Link State Database */ + if (area->mta->ted) + isis_te_init_ted(area); + + /* Update Extended TLVs according to Interface link parameters */ + for (ALL_LIST_ELEMENTS_RO(area->circuit_list, node, circuit)) + isis_link_params_update(circuit, circuit->interface); +} + +/** + * Disable MPLS Traffic Engineering structure which belongs to given area. + * + * @param area IS-IS Area + */ +void isis_mpls_te_disable(struct isis_area *area) +{ + struct listnode *node; + struct isis_circuit *circuit; + + if (!area->mta) + return; + + area->mta->status = disable; + + /* Remove Link State Database */ + ls_ted_del_all(&area->mta->ted); + + /* Disable Extended SubTLVs on all circuit */ + for (ALL_LIST_ELEMENTS_RO(area->circuit_list, node, circuit)) { + if (!IS_EXT_TE(circuit->ext)) + continue; + + /* disable MPLS_TE Circuit keeping SR one's */ + if (IS_SUBTLV(circuit->ext, EXT_ADJ_SID)) + circuit->ext->status = EXT_ADJ_SID; + else if (IS_SUBTLV(circuit->ext, EXT_LAN_ADJ_SID)) + circuit->ext->status = EXT_LAN_ADJ_SID; + else + circuit->ext->status = 0; + } +} + +void isis_mpls_te_term(struct isis_area *area) +{ + struct listnode *node; + struct isis_circuit *circuit; + + if (!area->mta) + return; + + zlog_info("TE(%s): Terminate MPLS TE", __func__); + /* Remove Link State Database */ + ls_ted_del_all(&area->mta->ted); + + /* Remove Extended SubTLVs */ + zlog_info(" |- Remove Extended SubTLVS for all circuit"); + for (ALL_LIST_ELEMENTS_RO(area->circuit_list, node, circuit)) { + zlog_info(" |- Call isis_del_ext_subtlvs()"); + isis_del_ext_subtlvs(circuit->ext); + circuit->ext = NULL; + } + + zlog_info(" |- Free MTA structure at %p", area->mta); + XFREE(MTYPE_ISIS_MPLS_TE, area->mta); +} + /* Main initialization / update function of the MPLS TE Circuit context */ /* Call when interface TE Link parameters are modified */ void isis_link_params_update(struct isis_circuit *circuit, diff --git a/isisd/isis_te.h b/isisd/isis_te.h index 56954073dd..03525962f5 100644 --- a/isisd/isis_te.h +++ b/isisd/isis_te.h @@ -123,6 +123,9 @@ enum lsp_event { LSP_UNKNOWN, LSP_ADD, LSP_UPD, LSP_DEL, LSP_INC, LSP_TICK }; /* Prototypes. */ void isis_mpls_te_init(void); +void isis_mpls_te_create(struct isis_area *area); +void isis_mpls_te_disable(struct isis_area *area); +void isis_mpls_te_term(struct isis_area *area); void isis_link_params_update(struct isis_circuit *, struct interface *); int isis_mpls_te_update(struct interface *); void isis_te_lsp_event(struct isis_lsp *lsp, enum lsp_event event); diff --git a/isisd/isis_tlvs.c b/isisd/isis_tlvs.c index b3c3fd4b0b..8907fa256b 100644 --- a/isisd/isis_tlvs.c +++ b/isisd/isis_tlvs.c @@ -139,6 +139,25 @@ struct isis_ext_subtlvs *isis_alloc_ext_subtlvs(void) return ext; } +void isis_del_ext_subtlvs(struct isis_ext_subtlvs *ext) +{ + struct isis_item *item, *next_item; + + if (!ext) + return; + + /* First, free Adj SID and LAN Adj SID list if needed */ + for (item = ext->adj_sid.head; item; item = next_item) { + next_item = item->next; + XFREE(MTYPE_ISIS_SUBTLV, item); + } + for (item = ext->lan_sid.head; item; item = next_item) { + next_item = item->next; + XFREE(MTYPE_ISIS_SUBTLV, item); + } + XFREE(MTYPE_ISIS_SUBTLV, ext); +} + /* * mtid parameter is used to determine if Adjacency is related to IPv4 or IPv6 * Multi-Topology. Special 4096 value i.e. first R flag set is used to indicate @@ -648,18 +667,7 @@ static void format_item_ext_subtlvs(struct isis_ext_subtlvs *exts, static void free_item_ext_subtlvs(struct isis_ext_subtlvs *exts) { - struct isis_item *item, *next_item; - - /* First, free Adj SID and LAN Adj SID list if needed */ - for (item = exts->adj_sid.head; item; item = next_item) { - next_item = item->next; - XFREE(MTYPE_ISIS_SUBTLV, item); - } - for (item = exts->lan_sid.head; item; item = next_item) { - next_item = item->next; - XFREE(MTYPE_ISIS_SUBTLV, item); - } - XFREE(MTYPE_ISIS_SUBTLV, exts); + isis_del_ext_subtlvs(exts); } static int pack_item_ext_subtlvs(struct isis_ext_subtlvs *exts, @@ -1059,6 +1067,7 @@ static int unpack_item_ext_subtlvs(uint16_t mtid, uint8_t len, struct stream *s, log, indent, "TLV size does not match expected size for Adjacency SID!\n"); stream_forward_getp(s, subtlv_len - 2); + XFREE(MTYPE_ISIS_SUBTLV, adj); break; } @@ -1070,6 +1079,7 @@ static int unpack_item_ext_subtlvs(uint16_t mtid, uint8_t len, struct stream *s, log, indent, "TLV size does not match expected size for Adjacency SID!\n"); stream_forward_getp(s, subtlv_len - 2); + XFREE(MTYPE_ISIS_SUBTLV, adj); break; } @@ -1114,6 +1124,7 @@ static int unpack_item_ext_subtlvs(uint16_t mtid, uint8_t len, struct stream *s, stream_forward_getp( s, subtlv_len - 2 - ISIS_SYS_ID_LEN); + XFREE(MTYPE_ISIS_SUBTLV, lan); break; } @@ -1127,6 +1138,7 @@ static int unpack_item_ext_subtlvs(uint16_t mtid, uint8_t len, struct stream *s, stream_forward_getp( s, subtlv_len - 2 - ISIS_SYS_ID_LEN); + XFREE(MTYPE_ISIS_SUBTLV, lan); break; } @@ -1892,6 +1904,7 @@ static void format_item_extended_reach(uint16_t mtid, struct isis_item *i, static void free_item_extended_reach(struct isis_item *i) { struct isis_extended_reach *item = (struct isis_extended_reach *)i; + if (item->subtlvs != NULL) free_item_ext_subtlvs(item->subtlvs); XFREE(MTYPE_ISIS_TLV, item); diff --git a/isisd/isis_tlvs.h b/isisd/isis_tlvs.h index 157450dc6c..905032bda1 100644 --- a/isisd/isis_tlvs.h +++ b/isisd/isis_tlvs.h @@ -597,6 +597,7 @@ void isis_tlvs_add_ipv6_dstsrc_reach(struct isis_tlvs *tlvs, uint16_t mtid, struct prefix_ipv6 *src, uint32_t metric); struct isis_ext_subtlvs *isis_alloc_ext_subtlvs(void); +void isis_del_ext_subtlvs(struct isis_ext_subtlvs *ext); void isis_tlvs_add_adj_sid(struct isis_ext_subtlvs *exts, struct isis_adj_sid *adj); void isis_tlvs_del_adj_sid(struct isis_ext_subtlvs *exts, diff --git a/isisd/isisd.c b/isisd/isisd.c index 3fd2476ad1..bce3dbb77b 100644 --- a/isisd/isisd.c +++ b/isisd/isisd.c @@ -224,6 +224,12 @@ struct isis *isis_new(const char *vrf_name) void isis_finish(struct isis *isis) { + struct isis_area *area; + struct listnode *node, *nnode; + + for (ALL_LIST_ELEMENTS(isis->area_list, node, nnode, area)) + isis_area_destroy(area); + struct vrf *vrf = NULL; listnode_delete(im->isis, isis); @@ -273,6 +279,13 @@ void isis_area_del_circuit(struct isis_area *area, struct isis_circuit *circuit) isis_csm_state_change(ISIS_DISABLE, circuit, area); } +static void delete_area_addr(void *arg) +{ + struct area_addr *addr = (struct area_addr *)arg; + + XFREE(MTYPE_ISIS_AREA_ADDR, addr); +} + struct isis_area *isis_area_create(const char *area_tag, const char *vrf_name) { struct isis_area *area; @@ -318,6 +331,8 @@ struct isis_area *isis_area_create(const char *area_tag, const char *vrf_name) area->circuit_list = list_new(); area->adjacency_list = list_new(); area->area_addrs = list_new(); + area->area_addrs->del = delete_area_addr; + if (!CHECK_FLAG(im->options, F_ISIS_UNIT_TEST)) thread_add_timer(master, lsp_tick, area, 1, &area->t_tick); flags_initialize(&area->flags); @@ -481,17 +496,12 @@ void isis_area_destroy(struct isis_area *area) { struct listnode *node, *nnode; struct isis_circuit *circuit; - struct area_addr *addr; QOBJ_UNREG(area); if (fabricd) fabricd_finish(area->fabricd); - /* Disable MPLS if necessary before flooding LSP */ - if (IS_MPLS_TE(area->mta)) - area->mta->status = disable; - if (area->circuit_list) { for (ALL_LIST_ELEMENTS(area->circuit_list, node, nnode, circuit)) @@ -499,6 +509,9 @@ void isis_area_destroy(struct isis_area *area) list_delete(&area->circuit_list); } + if (area->flags.free_idcs) + list_delete(&area->flags.free_idcs); + list_delete(&area->adjacency_list); lsp_db_fini(&area->lspdb[0]); @@ -510,6 +523,8 @@ void isis_area_destroy(struct isis_area *area) isis_sr_area_term(area); + isis_mpls_te_term(area); + spftree_area_del(area); if (area->spf_timer[0]) @@ -525,11 +540,7 @@ void isis_area_destroy(struct isis_area *area) if (!CHECK_FLAG(im->options, F_ISIS_UNIT_TEST)) isis_redist_area_finish(area); - for (ALL_LIST_ELEMENTS(area->area_addrs, node, nnode, addr)) { - list_delete_node(area->area_addrs, node); - XFREE(MTYPE_ISIS_AREA_ADDR, addr); - } - area->area_addrs = NULL; + list_delete(&area->area_addrs); for (int i = SPF_PREFIX_PRIO_CRITICAL; i <= SPF_PREFIX_PRIO_MEDIUM; i++) { @@ -554,10 +565,6 @@ void isis_area_destroy(struct isis_area *area) area_mt_finish(area); - if (listcount(area->isis->area_list) == 0) { - isis_finish(area->isis); - } - XFREE(MTYPE_ISIS_AREA, area); } diff --git a/ldpd/ldpe.c b/ldpd/ldpe.c index 792dcb2f2a..1b5c0e90fb 100644 --- a/ldpd/ldpe.c +++ b/ldpd/ldpe.c @@ -616,8 +616,8 @@ static void ldpe_dispatch_main(struct thread *thread) } break; default: - log_debug("ldpe_dispatch_main: error handling imsg %d", - imsg.hdr.type); + log_debug("%s: error handling imsg %d", + __func__, imsg.hdr.type); break; } imsg_free(&imsg); @@ -724,7 +724,7 @@ static void ldpe_dispatch_lde(struct thread *thread) nbr = nbr_find_peerid(imsg.hdr.peerid); if (nbr == NULL) { - log_debug("ldpe_dispatch_lde: cannot find neighbor"); + log_debug("%s: cannot find neighbor", __func__); break; } if (nbr->state != NBR_STA_OPER) @@ -744,7 +744,7 @@ static void ldpe_dispatch_lde(struct thread *thread) case IMSG_NBR_SHUTDOWN: nbr = nbr_find_peerid(imsg.hdr.peerid); if (nbr == NULL) { - log_debug("ldpe_dispatch_lde: cannot find neighbor"); + log_debug("%s: cannot find neighbor", __func__); break; } if (nbr->state != NBR_STA_OPER) @@ -752,8 +752,8 @@ static void ldpe_dispatch_lde(struct thread *thread) session_shutdown(nbr,S_SHUTDOWN,0,0); break; default: - log_debug("ldpe_dispatch_lde: error handling imsg %d", - imsg.hdr.type); + log_debug("%s: error handling imsg %d", + __func__, imsg.hdr.type); break; } imsg_free(&imsg); diff --git a/lib/routemap.c b/lib/routemap.c index 9529b79419..e6310465e3 100644 --- a/lib/routemap.c +++ b/lib/routemap.c @@ -812,17 +812,13 @@ int route_map_mark_updated(const char *name) return (ret); } -static int route_map_clear_updated(struct route_map *map) +static void route_map_clear_updated(struct route_map *map) { - int ret = -1; - if (map) { map->to_be_processed = false; if (map->deleted) route_map_free_map(map); } - - return (ret); } /* Lookup route map. If there isn't route map create one and return diff --git a/lib/routemap.h b/lib/routemap.h index ad391981e0..0152e820d0 100644 --- a/lib/routemap.h +++ b/lib/routemap.h @@ -387,6 +387,8 @@ DECLARE_QOBJ_TYPE(route_map); (strmatch(A, "frr-bgp-route-map:set-evpn-gateway-ip-ipv4")) #define IS_SET_BGP_EVPN_GATEWAY_IP_IPV6(A) \ (strmatch(A, "frr-bgp-route-map:set-evpn-gateway-ip-ipv6")) +#define IS_SET_BGP_L3VPN_NEXTHOP_ENCAPSULATION(A) \ + (strmatch(A, "frr-bgp-route-map:set-l3vpn-nexthop-encapsulation")) enum ecommunity_lb_type { EXPLICIT_BANDWIDTH, diff --git a/lib/routemap_cli.c b/lib/routemap_cli.c index ff98a14c41..42c7a05d18 100644 --- a/lib/routemap_cli.c +++ b/lib/routemap_cli.c @@ -1258,6 +1258,11 @@ void route_map_action_show(struct vty *vty, const struct lyd_node *dnode, yang_dnode_get_string( dnode, "./rmap-set-action/frr-bgp-route-map:evpn-gateway-ip-ipv6")); + } else if (IS_SET_BGP_L3VPN_NEXTHOP_ENCAPSULATION(action)) { + vty_out(vty, " set l3vpn next-hop encapsulation %s\n", + yang_dnode_get_string( + dnode, + "./rmap-set-action/frr-bgp-route-map:l3vpn-nexthop-encapsulation")); } } diff --git a/lib/thread.c b/lib/thread.c index c3613b5b0e..9eac9b410a 100644 --- a/lib/thread.c +++ b/lib/thread.c @@ -1354,9 +1354,9 @@ static void do_thread_cancel(struct thread_master *master) struct thread_list_head *list = NULL; struct thread **thread_array = NULL; struct thread *thread; - struct cancel_req *cr; struct listnode *ln; + for (ALL_LIST_ELEMENTS_RO(master->cancel_req, ln, cr)) { /* * If this is an event object cancellation, search @@ -1379,6 +1379,9 @@ static void do_thread_cancel(struct thread_master *master) if (!thread) continue; + list = NULL; + thread_array = NULL; + /* Determine the appropriate queue to cancel the thread from */ switch (thread->type) { case THREAD_READ: diff --git a/lib/zclient.c b/lib/zclient.c index e556b768ac..8ec82ab7bb 100644 --- a/lib/zclient.c +++ b/lib/zclient.c @@ -447,7 +447,7 @@ enum zclient_send_status zclient_send_localsid(struct zclient *zclient, { struct prefix_ipv6 p = {}; struct zapi_route api = {}; - struct nexthop nh = {}; + struct zapi_nexthop *znh; p.family = AF_INET6; p.prefixlen = IPV6_MAX_BITLEN; @@ -465,12 +465,16 @@ enum zclient_send_status zclient_send_localsid(struct zclient *zclient, SET_FLAG(api.flags, ZEBRA_FLAG_ALLOW_RECURSION); SET_FLAG(api.message, ZAPI_MESSAGE_NEXTHOP); - nh.type = NEXTHOP_TYPE_IFINDEX; - nh.ifindex = oif; - SET_FLAG(nh.flags, ZAPI_NEXTHOP_FLAG_SEG6LOCAL); - nexthop_add_srv6_seg6local(&nh, action, context); + znh = &api.nexthops[0]; + + memset(znh, 0, sizeof(*znh)); + + znh->type = NEXTHOP_TYPE_IFINDEX; + znh->ifindex = oif; + SET_FLAG(znh->flags, ZAPI_NEXTHOP_FLAG_SEG6LOCAL); + znh->seg6local_action = action; + memcpy(&znh->seg6local_ctx, context, sizeof(struct seg6local_context)); - zapi_nexthop_from_nexthop(&api.nexthops[0], &nh); api.nexthop_num = 1; return zclient_route_send(ZEBRA_ROUTE_ADD, zclient, &api); diff --git a/lib/zlog.c b/lib/zlog.c index e0bb34a258..6a36a0b123 100644 --- a/lib/zlog.c +++ b/lib/zlog.c @@ -991,16 +991,14 @@ void zlog_init(const char *progname, const char *protoname, zlog_instance = instance; if (instance) { - snprintfrr(zlog_tmpdir, sizeof(zlog_tmpdir), - "/var/tmp/frr/%s-%d.%ld", - progname, instance, (long)getpid()); + snprintfrr(zlog_tmpdir, sizeof(zlog_tmpdir), "%s/%s-%d.%ld", + TMPBASEDIR, progname, instance, (long)getpid()); zlog_prefixsz = snprintfrr(zlog_prefix, sizeof(zlog_prefix), "%s[%d]: ", protoname, instance); } else { - snprintfrr(zlog_tmpdir, sizeof(zlog_tmpdir), - "/var/tmp/frr/%s.%ld", - progname, (long)getpid()); + snprintfrr(zlog_tmpdir, sizeof(zlog_tmpdir), "%s/%s.%ld", + TMPBASEDIR, progname, (long)getpid()); zlog_prefixsz = snprintfrr(zlog_prefix, sizeof(zlog_prefix), "%s: ", protoname); diff --git a/ospfd/ospf_ase.c b/ospfd/ospf_ase.c index 23c7a1e706..706a22e9bf 100644 --- a/ospfd/ospf_ase.c +++ b/ospfd/ospf_ase.c @@ -212,12 +212,12 @@ int ospf_ase_calculate_route(struct ospf *ospf, struct ospf_lsa *lsa) if (lsa->data->type == OSPF_AS_NSSA_LSA) if (IS_DEBUG_OSPF_NSSA) - zlog_debug("ospf_ase_calc(): Processing Type-7"); + zlog_debug("%s: Processing Type-7", __func__); /* Stay away from any Local Translated Type-7 LSAs */ if (CHECK_FLAG(lsa->flags, OSPF_LSA_LOCAL_XLT)) { if (IS_DEBUG_OSPF_NSSA) - zlog_debug("ospf_ase_calc(): Rejecting Local Xlt'd"); + zlog_debug("%s: Rejecting Local Xlt'd", __func__); return 0; } @@ -589,9 +589,8 @@ static void ospf_ase_calculate_timer(struct thread *t) if (ospf->anyNSSA) for (ALL_LIST_ELEMENTS_RO(ospf->areas, node, area)) { if (IS_DEBUG_OSPF_NSSA) - zlog_debug( - "ospf_ase_calculate_timer(): looking at area %pI4", - &area->area_id); + zlog_debug("%s: looking at area %pI4", + __func__, &area->area_id); if (area->external_routing == OSPF_AREA_NSSA) LSDB_LOOP (NSSA_LSDB(area), rn, lsa) diff --git a/ospfd/ospf_flood.c b/ospfd/ospf_flood.c index fb3fb21e08..e686a93ba9 100644 --- a/ospfd/ospf_flood.c +++ b/ospfd/ospf_flood.c @@ -804,8 +804,7 @@ int ospf_flood_through(struct ospf *ospf, struct ospf_neighbor *inbr, /* Any P-bit was installed with the Type-7. */ if (IS_DEBUG_OSPF_NSSA) - zlog_debug( - "ospf_flood_through: LOCAL NSSA FLOOD of Type-7."); + zlog_debug("%s: LOCAL NSSA FLOOD of Type-7.", __func__); /* Fallthrough */ default: lsa_ack_flag = ospf_flood_through_area(lsa->area, inbr, lsa); diff --git a/ospfd/ospf_lsa.c b/ospfd/ospf_lsa.c index 278f263da3..0df0072f6d 100644 --- a/ospfd/ospf_lsa.c +++ b/ospfd/ospf_lsa.c @@ -2865,10 +2865,11 @@ struct ospf_lsa *ospf_lsa_install(struct ospf *ospf, struct ospf_interface *oi, * So, router should be aborted from HELPER role * if it is detected as TOPO change. */ - if (ospf->active_restarter_cnt - && CHECK_LSA_TYPE_1_TO_5_OR_7(lsa->data->type) - && ospf_lsa_different(old, lsa, true)) - ospf_helper_handle_topo_chg(ospf, lsa); + if (ospf->active_restarter_cnt && + CHECK_LSA_TYPE_1_TO_5_OR_7(lsa->data->type)) { + if (old == NULL || ospf_lsa_different(old, lsa, true)) + ospf_helper_handle_topo_chg(ospf, lsa); + } rt_recalc = 1; } @@ -3020,7 +3021,7 @@ int ospf_check_nbr_status(struct ospf *ospf) } -static void ospf_maxage_lsa_remover(struct thread *thread) +void ospf_maxage_lsa_remover(struct thread *thread) { struct ospf *ospf = THREAD_ARG(thread); struct ospf_lsa *lsa, *old; @@ -3898,8 +3899,9 @@ void ospf_refresher_register_lsa(struct ospf *ospf, struct ospf_lsa *lsa) if (lsa->refresh_list < 0) { int delay; int min_delay = - OSPF_LS_REFRESH_TIME - (2 * OSPF_LS_REFRESH_JITTER); - int max_delay = OSPF_LS_REFRESH_TIME - OSPF_LS_REFRESH_JITTER; + ospf->lsa_refresh_timer - (2 * OSPF_LS_REFRESH_JITTER); + int max_delay = + ospf->lsa_refresh_timer - OSPF_LS_REFRESH_JITTER; /* We want to refresh the LSA within OSPF_LS_REFRESH_TIME which * is diff --git a/ospfd/ospf_lsa.h b/ospfd/ospf_lsa.h index 4b3be15382..97c15d1e3c 100644 --- a/ospfd/ospf_lsa.h +++ b/ospfd/ospf_lsa.h @@ -354,4 +354,5 @@ extern void ospf_check_and_gen_init_seq_lsa(struct ospf_interface *oi, struct ospf_lsa *lsa); extern void ospf_flush_lsa_from_area(struct ospf *ospf, struct in_addr area_id, int type); +extern void ospf_maxage_lsa_remover(struct thread *thread); #endif /* _ZEBRA_OSPF_LSA_H */ diff --git a/ospfd/ospf_spf.c b/ospfd/ospf_spf.c index 74a5674273..4edc1de811 100644 --- a/ospfd/ospf_spf.c +++ b/ospfd/ospf_spf.c @@ -48,7 +48,10 @@ #include "ospfd/ospf_sr.h" #include "ospfd/ospf_ti_lfa.h" #include "ospfd/ospf_errors.h" + +#ifdef SUPPORT_OSPF_API #include "ospfd/ospf_apiserver.h" +#endif /* Variables to ensure a SPF scheduled log message is printed only once */ @@ -799,7 +802,7 @@ static unsigned int ospf_nexthop_calculation(struct ospf_area *area, unsigned int added = 0; if (IS_DEBUG_OSPF_EVENT) { - zlog_debug("ospf_nexthop_calculation(): Start"); + zlog_debug("%s: Start", __func__); ospf_vertex_dump("V (parent):", v, 1, 1); ospf_vertex_dump("W (dest) :", w, 1, 1); zlog_debug("V->W distance: %d", distance); @@ -1021,7 +1024,8 @@ static unsigned int ospf_nexthop_calculation(struct ospf_area *area, return 1; } else zlog_info( - "ospf_nexthop_calculation(): vl_data for VL link not found"); + "%s: vl_data for VL link not found", + __func__); } /* end virtual-link from V to W */ return 0; } /* end W is a Router vertex */ @@ -1569,7 +1573,7 @@ static void ospf_spf_process_stubs(struct ospf_area *area, struct vertex *v, struct vertex *child; if (IS_DEBUG_OSPF_EVENT) - zlog_debug("ospf_process_stub():processing stubs for area %pI4", + zlog_debug("%s: processing stubs for area %pI4", __func__, &area->area_id); if (v->type == OSPF_VERTEX_ROUTER) { @@ -1580,16 +1584,14 @@ static void ospf_spf_process_stubs(struct ospf_area *area, struct vertex *v, int lsa_pos = 0; if (IS_DEBUG_OSPF_EVENT) - zlog_debug( - "ospf_process_stubs():processing router LSA, id: %pI4", - &v->lsa->id); + zlog_debug("%s: processing router LSA, id: %pI4", + __func__, &v->lsa->id); router_lsa = (struct router_lsa *)v->lsa; if (IS_DEBUG_OSPF_EVENT) - zlog_debug( - "ospf_process_stubs(): we have %d links to process", - ntohs(router_lsa->links)); + zlog_debug("%s: we have %d links to process", __func__, + ntohs(router_lsa->links)); p = ((uint8_t *)v->lsa) + OSPF_LSA_HEADER_SIZE + 4; lim = ((uint8_t *)v->lsa) + ntohs(v->lsa->length); @@ -1683,9 +1685,8 @@ void ospf_spf_calculate(struct ospf_area *area, struct ospf_lsa *root_lsa, struct vertex *v; if (IS_DEBUG_OSPF_EVENT) { - zlog_debug("ospf_spf_calculate: Start"); - zlog_debug("ospf_spf_calculate: running Dijkstra for area %pI4", - &area->area_id); + zlog_debug("%s: Start: running Dijkstra for area %pI4", + __func__, &area->area_id); } /* @@ -1696,8 +1697,8 @@ void ospf_spf_calculate(struct ospf_area *area, struct ospf_lsa *root_lsa, if (!root_lsa) { if (IS_DEBUG_OSPF_EVENT) zlog_debug( - "ospf_spf_calculate: Skip area %pI4's calculation due to empty root LSA", - &area->area_id); + "%s: Skip area %pI4's calculation due to empty root LSA", + __func__, &area->area_id); return; } @@ -1777,7 +1778,7 @@ void ospf_spf_calculate(struct ospf_area *area, struct ospf_lsa *root_lsa, area->ts_spf = area->ospf->ts_spf; if (IS_DEBUG_OSPF_EVENT) - zlog_debug("ospf_spf_calculate: Stop. %zd vertices", + zlog_debug("%s: Stop. %zd vertices", __func__, mtype_stats_alloc(MTYPE_OSPF_VERTEX)); } @@ -1897,7 +1898,9 @@ static void ospf_spf_calculate_schedule_worker(struct thread *thread) /* Update all routers routing table */ ospf->oall_rtrs = ospf->all_rtrs; ospf->all_rtrs = all_rtrs; +#ifdef SUPPORT_OSPF_API ospf_apiserver_notify_reachable(ospf->oall_rtrs, ospf->all_rtrs); +#endif /* Free old ABR/ASBR routing table */ if (ospf->old_rtrs) diff --git a/ospfd/ospf_vty.c b/ospfd/ospf_vty.c index 7d72487686..2a0016ea19 100644 --- a/ospfd/ospf_vty.c +++ b/ospfd/ospf_vty.c @@ -12952,6 +12952,42 @@ DEFUN (clear_ip_ospf_interface, return CMD_SUCCESS; } +DEFPY_HIDDEN(ospf_lsa_refresh_timer, ospf_lsa_refresh_timer_cmd, + "[no$no] ospf lsa-refresh [(120-1800)]$value", + NO_STR OSPF_STR + "OSPF lsa refresh timer\n" + "timer value in seconds\n") +{ + VTY_DECLVAR_INSTANCE_CONTEXT(ospf, ospf) + + if (no) + ospf->lsa_refresh_timer = OSPF_LS_REFRESH_TIME; + else + ospf->lsa_refresh_timer = value; + + return CMD_SUCCESS; +} + +DEFPY_HIDDEN(ospf_maxage_delay_timer, ospf_maxage_delay_timer_cmd, + "[no$no] ospf maxage-delay [(0-60)]$value", + NO_STR OSPF_STR + "OSPF lsa maxage delay timer\n" + "timer value in seconds\n") +{ + VTY_DECLVAR_INSTANCE_CONTEXT(ospf, ospf) + + if (no) + ospf->maxage_delay = OSPF_LSA_MAXAGE_REMOVE_DELAY_DEFAULT; + else + ospf->maxage_delay = value; + + THREAD_OFF(ospf->t_maxage); + OSPF_TIMER_ON(ospf->t_maxage, ospf_maxage_lsa_remover, + ospf->maxage_delay); + + return CMD_SUCCESS; +} + void ospf_vty_clear_init(void) { install_element(ENABLE_NODE, &clear_ip_ospf_interface_cmd); @@ -13109,6 +13145,9 @@ void ospf_vty_init(void) vrf_cmd_init(NULL); + install_element(OSPF_NODE, &ospf_lsa_refresh_timer_cmd); + install_element(OSPF_NODE, &ospf_maxage_delay_timer_cmd); + /* Init interface related vty commands. */ ospf_vty_if_init(); diff --git a/ospfd/ospfd.c b/ospfd/ospfd.c index 8512b6a339..e0c36d86fe 100644 --- a/ospfd/ospfd.c +++ b/ospfd/ospfd.c @@ -392,6 +392,7 @@ struct ospf *ospf_new_alloc(unsigned short instance, const char *name) new->lsa_refresh_queue.index = 0; new->lsa_refresh_interval = OSPF_LSA_REFRESH_INTERVAL_DEFAULT; + new->lsa_refresh_timer = OSPF_LS_REFRESH_TIME; new->t_lsa_refresher = NULL; thread_add_timer(master, ospf_lsa_refresh_walker, new, new->lsa_refresh_interval, &new->t_lsa_refresher); diff --git a/ospfd/ospfd.h b/ospfd/ospfd.h index 8478c96ddc..3a43010f85 100644 --- a/ospfd/ospfd.h +++ b/ospfd/ospfd.h @@ -313,6 +313,7 @@ struct ospf { time_t lsa_refresher_started; #define OSPF_LSA_REFRESH_INTERVAL_DEFAULT 10 uint16_t lsa_refresh_interval; + uint16_t lsa_refresh_timer; /* Distance parameter. */ uint8_t distance_all; diff --git a/pimd/pim6_mld.c b/pimd/pim6_mld.c index badc25b473..38fa1966cd 100644 --- a/pimd/pim6_mld.c +++ b/pimd/pim6_mld.c @@ -401,7 +401,7 @@ static void gm_sg_update(struct gm_sg *sg, bool has_expired) desired = GM_SG_NOINFO; if (desired != sg->state && !gm_ifp->stopping) { - if (PIM_DEBUG_IGMP_EVENTS) + if (PIM_DEBUG_GM_EVENTS) zlog_debug(log_sg(sg, "%s => %s"), gm_states[sg->state], gm_states[desired]); @@ -418,7 +418,7 @@ static void gm_sg_update(struct gm_sg *sg, bool has_expired) gm_sg_timer_start(gm_ifp, sg, timers.expire_wait); THREAD_OFF(sg->t_sg_query); - sg->n_query = gm_ifp->cur_qrv; + sg->n_query = gm_ifp->cur_lmqc; sg->query_sbit = false; gm_trigger_specific(sg); } @@ -817,7 +817,7 @@ static void gm_handle_v2_report(struct gm_if *gm_ifp, struct gm_packet_state *pkt; if (len < sizeof(*hdr)) { - if (PIM_DEBUG_IGMP_PACKETS) + if (PIM_DEBUG_GM_PACKETS) zlog_debug(log_pkt_src( "malformed MLDv2 report (truncated header)")); gm_ifp->stats.rx_drop_malformed++; @@ -923,7 +923,7 @@ static void gm_handle_v1_report(struct gm_if *gm_ifp, size_t max_entries; if (len < sizeof(*hdr)) { - if (PIM_DEBUG_IGMP_PACKETS) + if (PIM_DEBUG_GM_PACKETS) zlog_debug(log_pkt_src( "malformed MLDv1 report (truncated)")); gm_ifp->stats.rx_drop_malformed++; @@ -989,7 +989,7 @@ static void gm_handle_v1_leave(struct gm_if *gm_ifp, struct gm_packet_sg *old_grp; if (len < sizeof(*hdr)) { - if (PIM_DEBUG_IGMP_PACKETS) + if (PIM_DEBUG_GM_PACKETS) zlog_debug(log_pkt_src( "malformed MLDv1 leave (truncated)")); gm_ifp->stats.rx_drop_malformed++; @@ -1049,7 +1049,7 @@ static void gm_t_expire(struct thread *t) remain_ms = monotime_until(&pend->expiry, &remain); if (remain_ms > 0) { - if (PIM_DEBUG_IGMP_EVENTS) + if (PIM_DEBUG_GM_EVENTS) zlog_debug( log_ifp("next general expiry in %" PRId64 "ms"), remain_ms / 1000); @@ -1063,7 +1063,7 @@ static void gm_t_expire(struct thread *t) if (timercmp(&pkt->received, &pend->query, >=)) break; - if (PIM_DEBUG_IGMP_PACKETS) + if (PIM_DEBUG_GM_PACKETS) zlog_debug(log_ifp("expire packet %p"), pkt); gm_packet_drop(pkt, true); } @@ -1073,7 +1073,7 @@ static void gm_t_expire(struct thread *t) gm_ifp->n_pending * sizeof(gm_ifp->pending[0])); } - if (PIM_DEBUG_IGMP_EVENTS) + if (PIM_DEBUG_GM_EVENTS) zlog_debug(log_ifp("next general expiry waiting for query")); } @@ -1250,7 +1250,7 @@ static void gm_t_grp_expire(struct thread *t) struct gm_if *gm_ifp = pend->iface; struct gm_sg *sg, *sg_start, sg_ref = {}; - if (PIM_DEBUG_IGMP_EVENTS) + if (PIM_DEBUG_GM_EVENTS) zlog_debug(log_ifp("*,%pPAs S,G timer expired"), &pend->grp); /* gteq lookup - try to find *,G or S,G (S,G is > *,G) @@ -1442,7 +1442,7 @@ static void gm_handle_query(struct gm_if *gm_ifp, } if (IPV6_ADDR_CMP(&pkt_src->sin6_addr, &gm_ifp->querier) < 0) { - if (PIM_DEBUG_IGMP_EVENTS) + if (PIM_DEBUG_GM_EVENTS) zlog_debug( log_pkt_src("replacing elected querier %pPA"), &gm_ifp->querier); @@ -1802,7 +1802,7 @@ static void gm_send_query(struct gm_if *gm_ifp, pim_addr grp, query.hdr.icmp6_cksum = in_cksumv(iov, iov_len); - if (PIM_DEBUG_IGMP_PACKETS) + if (PIM_DEBUG_GM_PACKETS) zlog_debug( log_ifp("MLD query %pPA -> %pI6 (grp=%pPA, %zu srcs)"), &pim_ifp->ll_lowest, &dstaddr.sin6_addr, &grp, n_srcs); @@ -2091,11 +2091,12 @@ static void gm_start(struct interface *ifp) else gm_ifp->cur_version = GM_MLDV2; - /* hardcoded for dev without CLI */ - gm_ifp->cur_qrv = 2; + gm_ifp->cur_qrv = pim_ifp->gm_default_robustness_variable; gm_ifp->cur_query_intv = pim_ifp->gm_default_query_interval * 1000; - gm_ifp->cur_query_intv_trig = gm_ifp->cur_query_intv; - gm_ifp->cur_max_resp = 250; + gm_ifp->cur_query_intv_trig = + pim_ifp->gm_specific_query_max_response_time_dsec * 100; + gm_ifp->cur_max_resp = pim_ifp->gm_query_max_response_time_dsec * 100; + gm_ifp->cur_lmqc = pim_ifp->gm_last_member_query_count; gm_ifp->cfg_timing_fuzz.tv_sec = 0; gm_ifp->cfg_timing_fuzz.tv_usec = 10 * 1000; @@ -2136,7 +2137,7 @@ void gm_ifp_teardown(struct interface *ifp) gm_ifp = pim_ifp->mld; gm_ifp->stopping = true; - if (PIM_DEBUG_IGMP_EVENTS) + if (PIM_DEBUG_GM_EVENTS) zlog_debug(log_ifp("MLD stop")); THREAD_OFF(gm_ifp->t_query); @@ -2268,10 +2269,26 @@ void gm_ifp_update(struct interface *ifp) if (gm_ifp->cur_query_intv != cfg_query_intv) { gm_ifp->cur_query_intv = cfg_query_intv; - gm_ifp->cur_query_intv_trig = cfg_query_intv; changed = true; } + unsigned int cfg_query_intv_trig = + pim_ifp->gm_specific_query_max_response_time_dsec * 100; + + if (gm_ifp->cur_query_intv_trig != cfg_query_intv_trig) { + gm_ifp->cur_query_intv_trig = cfg_query_intv_trig; + changed = true; + } + + unsigned int cfg_max_response = + pim_ifp->gm_query_max_response_time_dsec * 100; + + if (gm_ifp->cur_max_resp != cfg_max_response) + gm_ifp->cur_max_resp = cfg_max_response; + + if (gm_ifp->cur_lmqc != pim_ifp->gm_last_member_query_count) + gm_ifp->cur_lmqc = pim_ifp->gm_last_member_query_count; + enum gm_version cfg_version; if (pim_ifp->mld_version == 1) @@ -2406,6 +2423,7 @@ static void gm_show_if_one(struct vty *vty, struct interface *ifp, querier = IPV6_ADDR_SAME(&gm_ifp->querier, &pim_ifp->ll_lowest); if (js_if) { + json_object_string_add(js_if, "name", ifp->name); json_object_string_add(js_if, "state", "up"); json_object_string_addf(js_if, "version", "%d", gm_ifp->cur_version); @@ -2421,6 +2439,14 @@ static void gm_show_if_one(struct vty *vty, struct interface *ifp, json_object_string_addf(js_if, "otherQuerierTimer", "%pTH", gm_ifp->t_other_querier); + json_object_int_add(js_if, "timerRobustnessValue", + gm_ifp->cur_qrv); + json_object_int_add(js_if, "timerQueryIntervalMsec", + gm_ifp->cur_query_intv); + json_object_int_add(js_if, "timerQueryResponseTimerMsec", + gm_ifp->cur_max_resp); + json_object_int_add(js_if, "timerLastMemberQueryIntervalMsec", + gm_ifp->cur_query_intv_trig); } else { vty_out(vty, "%-16s %-5s %d %-25pPA %-5s %11pTH %pTVMs\n", ifp->name, "up", gm_ifp->cur_version, &gm_ifp->querier, diff --git a/pimd/pim6_mld.h b/pimd/pim6_mld.h index 95523c2922..540d2e1899 100644 --- a/pimd/pim6_mld.h +++ b/pimd/pim6_mld.h @@ -324,6 +324,7 @@ struct gm_if { unsigned int cur_query_intv_trig; /* ms */ unsigned int cur_max_resp; /* ms */ enum gm_version cur_version; + int cur_lmqc; /* last member query count in ds */ /* this value (positive, default 10ms) defines our "timing tolerance": * - added to deadlines for expiring joins diff --git a/pimd/pim_addr.h b/pimd/pim_addr.h index defe4070cf..7852d1788a 100644 --- a/pimd/pim_addr.h +++ b/pimd/pim_addr.h @@ -31,6 +31,7 @@ typedef struct in_addr pim_addr; #define PIM_ADDRSTRLEN INET_ADDRSTRLEN #define PIM_AF AF_INET #define PIM_AFI AFI_IP +#define PIM_PROTO_REG IPPROTO_RAW #define PIM_IPADDR IPADDR_V4 #define ipaddr_pim ipaddr_v4 #define PIM_MAX_BITLEN IPV4_MAX_BITLEN @@ -58,6 +59,7 @@ typedef struct in6_addr pim_addr; #define PIM_ADDRSTRLEN INET6_ADDRSTRLEN #define PIM_AF AF_INET6 #define PIM_AFI AFI_IP6 +#define PIM_PROTO_REG IPPROTO_PIM #define PIM_IPADDR IPADDR_V6 #define ipaddr_pim ipaddr_v6 #define PIM_MAX_BITLEN IPV6_MAX_BITLEN diff --git a/pimd/pim_cmd.c b/pimd/pim_cmd.c index f0b6037db9..c2f7396c18 100644 --- a/pimd/pim_cmd.c +++ b/pimd/pim_cmd.c @@ -4358,9 +4358,9 @@ DEFUN (debug_igmp, DEBUG_STR DEBUG_IGMP_STR) { - PIM_DO_DEBUG_IGMP_EVENTS; - PIM_DO_DEBUG_IGMP_PACKETS; - PIM_DO_DEBUG_IGMP_TRACE; + PIM_DO_DEBUG_GM_EVENTS; + PIM_DO_DEBUG_GM_PACKETS; + PIM_DO_DEBUG_GM_TRACE; return CMD_SUCCESS; } @@ -4371,9 +4371,9 @@ DEFUN (no_debug_igmp, DEBUG_STR DEBUG_IGMP_STR) { - PIM_DONT_DEBUG_IGMP_EVENTS; - PIM_DONT_DEBUG_IGMP_PACKETS; - PIM_DONT_DEBUG_IGMP_TRACE; + PIM_DONT_DEBUG_GM_EVENTS; + PIM_DONT_DEBUG_GM_PACKETS; + PIM_DONT_DEBUG_GM_TRACE; return CMD_SUCCESS; } @@ -4385,7 +4385,7 @@ DEFUN (debug_igmp_events, DEBUG_IGMP_STR DEBUG_IGMP_EVENTS_STR) { - PIM_DO_DEBUG_IGMP_EVENTS; + PIM_DO_DEBUG_GM_EVENTS; return CMD_SUCCESS; } @@ -4397,7 +4397,7 @@ DEFUN (no_debug_igmp_events, DEBUG_IGMP_STR DEBUG_IGMP_EVENTS_STR) { - PIM_DONT_DEBUG_IGMP_EVENTS; + PIM_DONT_DEBUG_GM_EVENTS; return CMD_SUCCESS; } @@ -4409,7 +4409,7 @@ DEFUN (debug_igmp_packets, DEBUG_IGMP_STR DEBUG_IGMP_PACKETS_STR) { - PIM_DO_DEBUG_IGMP_PACKETS; + PIM_DO_DEBUG_GM_PACKETS; return CMD_SUCCESS; } @@ -4421,7 +4421,7 @@ DEFUN (no_debug_igmp_packets, DEBUG_IGMP_STR DEBUG_IGMP_PACKETS_STR) { - PIM_DONT_DEBUG_IGMP_PACKETS; + PIM_DONT_DEBUG_GM_PACKETS; return CMD_SUCCESS; } @@ -4433,7 +4433,7 @@ DEFUN (debug_igmp_trace, DEBUG_IGMP_STR DEBUG_IGMP_TRACE_STR) { - PIM_DO_DEBUG_IGMP_TRACE; + PIM_DO_DEBUG_GM_TRACE; return CMD_SUCCESS; } @@ -4445,7 +4445,7 @@ DEFUN (no_debug_igmp_trace, DEBUG_IGMP_STR DEBUG_IGMP_TRACE_STR) { - PIM_DONT_DEBUG_IGMP_TRACE; + PIM_DONT_DEBUG_GM_TRACE; return CMD_SUCCESS; } @@ -4458,7 +4458,7 @@ DEFUN (debug_igmp_trace_detail, DEBUG_IGMP_TRACE_STR "detailed\n") { - PIM_DO_DEBUG_IGMP_TRACE_DETAIL; + PIM_DO_DEBUG_GM_TRACE_DETAIL; return CMD_SUCCESS; } @@ -4471,7 +4471,7 @@ DEFUN (no_debug_igmp_trace_detail, DEBUG_IGMP_TRACE_STR "detailed\n") { - PIM_DONT_DEBUG_IGMP_TRACE_DETAIL; + PIM_DONT_DEBUG_GM_TRACE_DETAIL; return CMD_SUCCESS; } diff --git a/pimd/pim_cmd_common.c b/pimd/pim_cmd_common.c index ff77b856fb..70c1544717 100644 --- a/pimd/pim_cmd_common.c +++ b/pimd/pim_cmd_common.c @@ -1038,7 +1038,7 @@ void pim_show_state(struct pim_instance *pim, struct vty *vty, if (!json) { vty_out(vty, - "Codes: J -> Pim Join, I -> IGMP Report, S -> Source, * -> Inherited from (*,G), V -> VxLAN, M -> Muted"); + "Codes: J -> Pim Join, I -> " GM " Report, S -> Source, * -> Inherited from (*,G), V -> VxLAN, M -> Muted"); vty_out(vty, "\nActive Source Group RPT IIF OIL\n"); } @@ -2316,6 +2316,7 @@ void pim_show_interfaces(struct pim_instance *pim, struct vty *vty, bool mlag, address, neighbors, pimdr, firsthpr, pimifchnl); } + json_object_free(json); /* Dump the generated table. */ table = ttable_dump(tt, "\n"); diff --git a/pimd/pim_iface.c b/pimd/pim_iface.c index e03e5a2630..40c4c2306d 100644 --- a/pimd/pim_iface.c +++ b/pimd/pim_iface.c @@ -1345,7 +1345,7 @@ ferr_r pim_if_igmp_join_add(struct interface *ifp, struct in_addr group_addr, (void)igmp_join_new(ifp, group_addr, source_addr); - if (PIM_DEBUG_IGMP_EVENTS) { + if (PIM_DEBUG_GM_EVENTS) { char group_str[INET_ADDRSTRLEN]; char source_str[INET_ADDRSTRLEN]; pim_inet4_dump("<grp?>", group_addr, group_str, diff --git a/pimd/pim_igmp.c b/pimd/pim_igmp.c index fdc56fd3f3..08ac0cb5c2 100644 --- a/pimd/pim_igmp.c +++ b/pimd/pim_igmp.c @@ -501,14 +501,14 @@ static int igmp_recv_query(struct gm_sock *igmp, int query_version, } if (!pim_if_connected_to_source(ifp, from)) { - if (PIM_DEBUG_IGMP_PACKETS) + if (PIM_DEBUG_GM_PACKETS) zlog_debug("Recv IGMP query on interface: %s from a non-connected source: %s", ifp->name, from_str); return 0; } if (if_address_is_local(&from, AF_INET, ifp->vrf->vrf_id)) { - if (PIM_DEBUG_IGMP_PACKETS) + if (PIM_DEBUG_GM_PACKETS) zlog_debug("Recv IGMP query on interface: %s from ourself %s", ifp->name, from_str); return 0; @@ -554,7 +554,7 @@ static int igmp_recv_query(struct gm_sock *igmp, int query_version, return 0; } - if (PIM_DEBUG_IGMP_PACKETS) { + if (PIM_DEBUG_GM_PACKETS) { char group_str[INET_ADDRSTRLEN]; pim_inet4_dump("<group?>", group_addr, group_str, sizeof(group_str)); @@ -750,7 +750,7 @@ int pim_igmp_packet(struct gm_sock *igmp, char *buf, size_t len) pim_inet4_dump("<src?>", ip_hdr->ip_src, from_str, sizeof(from_str)); pim_inet4_dump("<dst?>", ip_hdr->ip_dst, to_str, sizeof(to_str)); - if (PIM_DEBUG_IGMP_PACKETS) { + if (PIM_DEBUG_GM_PACKETS) { zlog_debug( "Recv IGMP packet from %s to %s on %s: size=%zu ttl=%d msg_type=%d msg_size=%d", from_str, to_str, igmp->interface->name, len, ip_hdr->ip_ttl, @@ -1362,7 +1362,7 @@ void igmp_group_timer_on(struct gm_group *group, long interval_msec, { group_timer_off(group); - if (PIM_DEBUG_IGMP_EVENTS) { + if (PIM_DEBUG_GM_EVENTS) { char group_str[INET_ADDRSTRLEN]; pim_inet4_dump("<group?>", group->group_addr, group_str, sizeof(group_str)); diff --git a/pimd/pim_igmpv2.c b/pimd/pim_igmpv2.c index 8d0925cb56..6e569e2803 100644 --- a/pimd/pim_igmpv2.c +++ b/pimd/pim_igmpv2.c @@ -68,7 +68,7 @@ void igmp_v2_send_query(struct gm_group *group, int fd, const char *ifname, checksum = in_cksum(query_buf, msg_size); *(uint16_t *)(query_buf + IGMP_CHECKSUM_OFFSET) = checksum; - if (PIM_DEBUG_IGMP_PACKETS) { + if (PIM_DEBUG_GM_PACKETS) { char dst_str[INET_ADDRSTRLEN]; char group_str[INET_ADDRSTRLEN]; pim_inet4_dump("<dst?>", dst_addr, dst_str, sizeof(dst_str)); @@ -121,7 +121,7 @@ int igmp_v2_recv_report(struct gm_sock *igmp, struct in_addr from, return 0; if (igmp_msg_len != IGMP_V12_MSG_SIZE) { - if (PIM_DEBUG_IGMP_PACKETS) + if (PIM_DEBUG_GM_PACKETS) zlog_debug( "Recv IGMPv2 REPORT from %s on %s: size=%d other than correct=%d", from_str, ifp->name, igmp_msg_len, @@ -140,7 +140,7 @@ int igmp_v2_recv_report(struct gm_sock *igmp, struct in_addr from, memcpy(&group_addr, igmp_msg + 4, sizeof(struct in_addr)); - if (PIM_DEBUG_IGMP_PACKETS) { + if (PIM_DEBUG_GM_PACKETS) { pim_inet4_dump("<dst?>", group_addr, group_str, sizeof(group_str)); zlog_debug("Recv IGMPv2 REPORT from %s on %s for %s", from_str, @@ -155,7 +155,7 @@ int igmp_v2_recv_report(struct gm_sock *igmp, struct in_addr from, * the SSM range. */ if (pim_is_grp_ssm(pim_ifp->pim, group_addr)) { - if (PIM_DEBUG_IGMP_PACKETS) { + if (PIM_DEBUG_GM_PACKETS) { zlog_debug( "Ignoring IGMPv2 group record %pI4 from %s on %s exclude mode in SSM range", &group_addr.s_addr, from_str, ifp->name); @@ -196,7 +196,7 @@ int igmp_v2_recv_leave(struct gm_sock *igmp, struct ip *ip_hdr, return 0; if (igmp_msg_len != IGMP_V12_MSG_SIZE) { - if (PIM_DEBUG_IGMP_PACKETS) + if (PIM_DEBUG_GM_PACKETS) zlog_debug( "Recv IGMPv2 LEAVE from %s on %s: size=%d other than correct=%d", from_str, ifp->name, igmp_msg_len, @@ -213,7 +213,7 @@ int igmp_v2_recv_leave(struct gm_sock *igmp, struct ip *ip_hdr, memcpy(&group_addr, igmp_msg + 4, sizeof(struct in_addr)); - if (PIM_DEBUG_IGMP_PACKETS) { + if (PIM_DEBUG_GM_PACKETS) { pim_inet4_dump("<dst?>", group_addr, group_str, sizeof(group_str)); zlog_debug("Recv IGMPv2 LEAVE from %s on %s for %s", from_str, @@ -237,7 +237,7 @@ int igmp_v2_recv_leave(struct gm_sock *igmp, struct ip *ip_hdr, */ if ((ntohl(ip_hdr->ip_dst.s_addr) != INADDR_ALLRTRS_GROUP) && (ip_hdr->ip_dst.s_addr != group_addr.s_addr)) { - if (PIM_DEBUG_IGMP_EVENTS) + if (PIM_DEBUG_GM_EVENTS) zlog_debug( "IGMPv2 Leave message is ignored since received on address other than ALL-ROUTERS or Group-address"); return -1; diff --git a/pimd/pim_igmpv3.c b/pimd/pim_igmpv3.c index 6ed5c501b2..c6d1239fba 100644 --- a/pimd/pim_igmpv3.c +++ b/pimd/pim_igmpv3.c @@ -209,7 +209,7 @@ static void igmp_source_timer_on(struct gm_group *group, source_timer_off(group, source); struct pim_interface *pim_ifp = group->interface->info; - if (PIM_DEBUG_IGMP_EVENTS) { + if (PIM_DEBUG_GM_EVENTS) { char group_str[INET_ADDRSTRLEN]; char source_str[INET_ADDRSTRLEN]; pim_inet4_dump("<group?>", group->group_addr, group_str, @@ -1622,7 +1622,7 @@ void igmp_v3_send_query(struct gm_group *group, int fd, const char *ifname, checksum = in_cksum(query_buf, msg_size); *(uint16_t *)(query_buf + IGMP_CHECKSUM_OFFSET) = checksum; - if (PIM_DEBUG_IGMP_PACKETS) { + if (PIM_DEBUG_GM_PACKETS) { char dst_str[INET_ADDRSTRLEN]; char group_str[INET_ADDRSTRLEN]; pim_inet4_dump("<dst?>", dst_addr, dst_str, sizeof(dst_str)); @@ -1835,7 +1835,7 @@ static bool igmp_pkt_grp_addr_ok(struct interface *ifp, const char *from_str, /* determine filtering status for group */ if (pim_is_group_filtered(pim_ifp, &grp)) { - if (PIM_DEBUG_IGMP_PACKETS) { + if (PIM_DEBUG_GM_PACKETS) { zlog_debug( "Filtering IGMPv3 group record %pI4 from %s on %s per prefix-list %s", &grp.s_addr, from_str, ifp->name, @@ -1852,7 +1852,7 @@ static bool igmp_pkt_grp_addr_ok(struct interface *ifp, const char *from_str, grp_addr.s_addr = ntohl(grp.s_addr); if (pim_is_group_224_0_0_0_24(grp_addr)) { - if (PIM_DEBUG_IGMP_PACKETS) { + if (PIM_DEBUG_GM_PACKETS) { zlog_debug( "Ignoring IGMPv3 group record %pI4 from %s on %s group range falls in 224.0.0.0/24", &grp.s_addr, from_str, ifp->name); @@ -1871,7 +1871,7 @@ static bool igmp_pkt_grp_addr_ok(struct interface *ifp, const char *from_str, switch (rec_type) { case IGMP_GRP_REC_TYPE_MODE_IS_EXCLUDE: case IGMP_GRP_REC_TYPE_CHANGE_TO_EXCLUDE_MODE: - if (PIM_DEBUG_IGMP_PACKETS) { + if (PIM_DEBUG_GM_PACKETS) { zlog_debug( "Ignoring IGMPv3 group record %pI4 from %s on %s exclude mode in SSM range", &grp.s_addr, from_str, ifp->name); @@ -1930,7 +1930,7 @@ int igmp_v3_recv_report(struct gm_sock *igmp, struct in_addr from, return -1; } - if (PIM_DEBUG_IGMP_PACKETS) { + if (PIM_DEBUG_GM_PACKETS) { zlog_debug( "Recv IGMP report v3 from %s on %s: size=%d groups=%d", from_str, ifp->name, igmp_msg_len, num_groups); @@ -1967,7 +1967,7 @@ int igmp_v3_recv_report(struct gm_sock *igmp, struct in_addr from, group_record + IGMP_V3_GROUP_RECORD_GROUP_OFFSET, sizeof(struct in_addr)); - if (PIM_DEBUG_IGMP_PACKETS) { + if (PIM_DEBUG_GM_PACKETS) { zlog_debug( " Recv IGMP report v3 from %s on %s: record=%d type=%d auxdatalen=%d sources=%d group=%pI4", from_str, ifp->name, i, rec_type, @@ -1988,7 +1988,7 @@ int igmp_v3_recv_report(struct gm_sock *igmp, struct in_addr from, return -1; } - if (PIM_DEBUG_IGMP_PACKETS) { + if (PIM_DEBUG_GM_PACKETS) { char src_str[200]; if (!inet_ntop(AF_INET, src, src_str, diff --git a/pimd/pim_main.c b/pimd/pim_main.c index 92c34f51a1..c168c0b098 100644 --- a/pimd/pim_main.c +++ b/pimd/pim_main.c @@ -153,9 +153,9 @@ int main(int argc, char **argv, char **envp) PIM_DO_DEBUG_PIM_EVENTS; PIM_DO_DEBUG_PIM_PACKETS; PIM_DO_DEBUG_PIM_TRACE; - PIM_DO_DEBUG_IGMP_EVENTS; - PIM_DO_DEBUG_IGMP_PACKETS; - PIM_DO_DEBUG_IGMP_TRACE; + PIM_DO_DEBUG_GM_EVENTS; + PIM_DO_DEBUG_GM_PACKETS; + PIM_DO_DEBUG_GM_TRACE; PIM_DO_DEBUG_ZEBRA; #endif diff --git a/pimd/pim_mroute.c b/pimd/pim_mroute.c index 8f1e6184d0..220706945d 100644 --- a/pimd/pim_mroute.c +++ b/pimd/pim_mroute.c @@ -635,7 +635,7 @@ static int process_igmp_packet(struct pim_instance *pim, const char *buf, connected_src = pim_if_connected_to_source(ifp, ip_hdr->ip_src); if (!connected_src) { - if (PIM_DEBUG_IGMP_PACKETS) { + if (PIM_DEBUG_GM_PACKETS) { zlog_debug( "Recv IGMP packet on interface: %s from a non-connected source: %pI4", ifp->name, &ip_hdr->ip_src); @@ -647,7 +647,7 @@ static int process_igmp_packet(struct pim_instance *pim, const char *buf, ifaddr = connected_src->u.prefix4; igmp = pim_igmp_sock_lookup_ifaddr(pim_ifp->gm_socket_list, ifaddr); - if (PIM_DEBUG_IGMP_PACKETS) { + if (PIM_DEBUG_GM_PACKETS) { zlog_debug( "%s(%s): igmp kernel upcall on %s(%p) for %pI4 -> %pI4", __func__, pim->vrf->name, ifp->name, igmp, @@ -655,7 +655,7 @@ static int process_igmp_packet(struct pim_instance *pim, const char *buf, } if (igmp) pim_igmp_packet(igmp, (char *)buf, buf_size); - else if (PIM_DEBUG_IGMP_PACKETS) { + else if (PIM_DEBUG_GM_PACKETS) { zlog_debug( "No IGMP socket on interface: %s with connected source: %pFX", ifp->name, connected_src); diff --git a/pimd/pim_nb_config.c b/pimd/pim_nb_config.c index 72b16a5f49..aaad56e543 100644 --- a/pimd/pim_nb_config.c +++ b/pimd/pim_nb_config.c @@ -454,14 +454,17 @@ static void change_query_interval(struct pim_interface *pim_ifp, } #endif -#if PIM_IPV == 4 -static void change_query_max_response_time(struct pim_interface *pim_ifp, - int query_max_response_time_dsec) +static void change_query_max_response_time(struct interface *ifp, + int query_max_response_time_dsec) { +#if PIM_IPV == 4 struct listnode *sock_node; struct gm_sock *igmp; struct listnode *grp_node; struct gm_group *grp; +#endif + + struct pim_interface *pim_ifp = ifp->info; if (pim_ifp->gm_query_max_response_time_dsec == query_max_response_time_dsec) @@ -469,6 +472,9 @@ static void change_query_max_response_time(struct pim_interface *pim_ifp, pim_ifp->gm_query_max_response_time_dsec = query_max_response_time_dsec; +#if PIM_IPV == 6 + gm_ifp_update(ifp); +#else /* * Below we modify socket/group/source timers in order to quickly * reflect the change. Otherwise, those timers would args->eventually @@ -501,8 +507,8 @@ static void change_query_max_response_time(struct pim_interface *pim_ifp, igmp_source_reset_gmi(grp, src); } } +#endif /* PIM_IPV == 4 */ } -#endif int routing_control_plane_protocols_name_validate( struct nb_cb_create_args *args) @@ -2797,7 +2803,6 @@ int lib_interface_gmp_address_family_query_interval_modify( int lib_interface_gmp_address_family_query_max_response_time_modify( struct nb_cb_modify_args *args) { -#if PIM_IPV == 4 struct interface *ifp; int query_max_response_time_dsec; @@ -2810,13 +2815,9 @@ int lib_interface_gmp_address_family_query_max_response_time_modify( ifp = nb_running_get_entry(args->dnode, NULL, true); query_max_response_time_dsec = yang_dnode_get_uint16(args->dnode, NULL); - change_query_max_response_time(ifp->info, - query_max_response_time_dsec); + change_query_max_response_time(ifp, + query_max_response_time_dsec); } -#else - /* TBD Depends on MLD data structure changes */ -#endif - return NB_OK; } @@ -2827,7 +2828,6 @@ int lib_interface_gmp_address_family_query_max_response_time_modify( int lib_interface_gmp_address_family_last_member_query_interval_modify( struct nb_cb_modify_args *args) { -#if PIM_IPV == 4 struct interface *ifp; struct pim_interface *pim_ifp; int last_member_query_interval; @@ -2847,9 +2847,6 @@ int lib_interface_gmp_address_family_last_member_query_interval_modify( break; } -#else - /* TBD Depends on MLD data structure changes */ -#endif return NB_OK; } @@ -2860,7 +2857,6 @@ int lib_interface_gmp_address_family_last_member_query_interval_modify( int lib_interface_gmp_address_family_robustness_variable_modify( struct nb_cb_modify_args *args) { -#if PIM_IPV == 4 struct interface *ifp; struct pim_interface *pim_ifp; int last_member_query_count; @@ -2879,9 +2875,6 @@ int lib_interface_gmp_address_family_robustness_variable_modify( break; } -#else - /* TBD Depends on MLD data structure changes */ -#endif return NB_OK; } diff --git a/pimd/pim_neighbor.c b/pimd/pim_neighbor.c index 6d6dbb6465..7726ac00b0 100644 --- a/pimd/pim_neighbor.c +++ b/pimd/pim_neighbor.c @@ -441,15 +441,6 @@ struct pim_neighbor *pim_neighbor_find(struct interface *ifp, return NULL; } -struct pim_neighbor *pim_neighbor_find_prefix(struct interface *ifp, - const struct prefix *src_prefix) -{ - pim_addr addr; - - addr = pim_addr_from_prefix(src_prefix); - return pim_neighbor_find(ifp, addr); -} - /* * Find the *one* interface out * this interface. If more than diff --git a/pimd/pim_neighbor.h b/pimd/pim_neighbor.h index 2673d22480..a2a2df9e04 100644 --- a/pimd/pim_neighbor.h +++ b/pimd/pim_neighbor.h @@ -52,8 +52,6 @@ void pim_neighbor_timer_reset(struct pim_neighbor *neigh, uint16_t holdtime); void pim_neighbor_free(struct pim_neighbor *neigh); struct pim_neighbor *pim_neighbor_find(struct interface *ifp, pim_addr source_addr); -struct pim_neighbor *pim_neighbor_find_prefix(struct interface *ifp, - const struct prefix *src_prefix); struct pim_neighbor *pim_neighbor_find_by_secondary(struct interface *ifp, struct prefix *src); struct pim_neighbor *pim_neighbor_find_if(struct interface *ifp); diff --git a/pimd/pim_sock.c b/pimd/pim_sock.c index b5a055c6aa..4b91bf07d9 100644 --- a/pimd/pim_sock.c +++ b/pimd/pim_sock.c @@ -185,7 +185,7 @@ int pim_reg_sock(void) long flags; frr_with_privs (&pimd_privs) { - fd = socket(PIM_AF, SOCK_RAW, IPPROTO_RAW); + fd = socket(PIM_AF, SOCK_RAW, PIM_PROTO_REG); } if (fd < 0) { diff --git a/pimd/pim_vty.c b/pimd/pim_vty.c index cfbd436981..544f926625 100644 --- a/pimd/pim_vty.c +++ b/pimd/pim_vty.c @@ -58,16 +58,16 @@ int pim_debug_config_write(struct vty *vty) vty_out(vty, "debug msdp internal\n"); ++writes; } - if (PIM_DEBUG_IGMP_EVENTS) { + if (PIM_DEBUG_GM_EVENTS) { vty_out(vty, "debug igmp events\n"); ++writes; } - if (PIM_DEBUG_IGMP_PACKETS) { + if (PIM_DEBUG_GM_PACKETS) { vty_out(vty, "debug igmp packets\n"); ++writes; } /* PIM_DEBUG_IGMP_TRACE catches _DETAIL too */ - if (router->debugs & PIM_MASK_IGMP_TRACE) { + if (router->debugs & PIM_MASK_GM_TRACE) { vty_out(vty, "debug igmp trace\n"); ++writes; } @@ -378,9 +378,29 @@ static int gm_config_write(struct vty *vty, int writes, if (pim_ifp->mld_version != MLD_DEFAULT_VERSION) vty_out(vty, " ipv6 mld version %d\n", pim_ifp->mld_version); + + /* IF ipv6 mld query-max-response-time */ + if (pim_ifp->gm_query_max_response_time_dsec != + IGMP_QUERY_MAX_RESPONSE_TIME_DSEC) + vty_out(vty, " ipv6 mld query-max-response-time %d\n", + pim_ifp->gm_query_max_response_time_dsec); + if (pim_ifp->gm_default_query_interval != IGMP_GENERAL_QUERY_INTERVAL) vty_out(vty, " ipv6 mld query-interval %d\n", pim_ifp->gm_default_query_interval); + + /* IF ipv6 mld last-member_query-count */ + if (pim_ifp->gm_last_member_query_count != + IGMP_DEFAULT_ROBUSTNESS_VARIABLE) + vty_out(vty, " ipv6 mld last-member-query-count %d\n", + pim_ifp->gm_last_member_query_count); + + /* IF ipv6 mld last-member_query-interval */ + if (pim_ifp->gm_specific_query_max_response_time_dsec != + IGMP_SPECIFIC_QUERY_MAX_RESPONSE_TIME_DSEC) + vty_out(vty, " ipv6 mld last-member-query-interval %d\n", + pim_ifp->gm_specific_query_max_response_time_dsec); + return 0; } #endif diff --git a/pimd/pimd.h b/pimd/pimd.h index 9ffa075d2a..d71b1b697a 100644 --- a/pimd/pimd.h +++ b/pimd/pimd.h @@ -90,10 +90,10 @@ #define PIM_MASK_PIM_PACKETDUMP_RECV (1 << 4) #define PIM_MASK_PIM_TRACE (1 << 5) #define PIM_MASK_PIM_TRACE_DETAIL (1 << 6) -#define PIM_MASK_IGMP_EVENTS (1 << 7) -#define PIM_MASK_IGMP_PACKETS (1 << 8) -#define PIM_MASK_IGMP_TRACE (1 << 9) -#define PIM_MASK_IGMP_TRACE_DETAIL (1 << 10) +#define PIM_MASK_GM_EVENTS (1 << 7) +#define PIM_MASK_GM_PACKETS (1 << 8) +#define PIM_MASK_GM_TRACE (1 << 9) +#define PIM_MASK_GM_TRACE_DETAIL (1 << 10) #define PIM_MASK_ZEBRA (1 << 11) #define PIM_MASK_SSMPINGD (1 << 12) #define PIM_MASK_MROUTE (1 << 13) @@ -160,12 +160,11 @@ extern uint8_t qpim_ecmp_rebalance_enable; (router->debugs & (PIM_MASK_PIM_TRACE | PIM_MASK_PIM_TRACE_DETAIL)) #define PIM_DEBUG_PIM_TRACE_DETAIL \ (router->debugs & PIM_MASK_PIM_TRACE_DETAIL) -#define PIM_DEBUG_IGMP_EVENTS (router->debugs & PIM_MASK_IGMP_EVENTS) -#define PIM_DEBUG_IGMP_PACKETS (router->debugs & PIM_MASK_IGMP_PACKETS) +#define PIM_DEBUG_GM_EVENTS (router->debugs & PIM_MASK_GM_EVENTS) +#define PIM_DEBUG_GM_PACKETS (router->debugs & PIM_MASK_GM_PACKETS) #define PIM_DEBUG_IGMP_TRACE \ - (router->debugs & (PIM_MASK_IGMP_TRACE | PIM_MASK_IGMP_TRACE_DETAIL)) -#define PIM_DEBUG_IGMP_TRACE_DETAIL \ - (router->debugs & PIM_MASK_IGMP_TRACE_DETAIL) + (router->debugs & (PIM_MASK_GM_TRACE | PIM_MASK_GM_TRACE_DETAIL)) +#define PIM_DEBUG_IGMP_TRACE_DETAIL (router->debugs & PIM_MASK_GM_TRACE_DETAIL) #define PIM_DEBUG_ZEBRA (router->debugs & PIM_MASK_ZEBRA) #define PIM_DEBUG_MLAG (router->debugs & PIM_MASK_MLAG) #define PIM_DEBUG_SSMPINGD (router->debugs & PIM_MASK_SSMPINGD) @@ -187,15 +186,13 @@ extern uint8_t qpim_ecmp_rebalance_enable; #define PIM_DEBUG_BSM (router->debugs & PIM_MASK_BSM_PROC) #define PIM_DEBUG_EVENTS \ - (router->debugs \ - & (PIM_MASK_PIM_EVENTS | PIM_MASK_IGMP_EVENTS \ - | PIM_MASK_MSDP_EVENTS | PIM_MASK_BSM_PROC)) + (router->debugs & (PIM_MASK_PIM_EVENTS | PIM_MASK_GM_EVENTS | \ + PIM_MASK_MSDP_EVENTS | PIM_MASK_BSM_PROC)) #define PIM_DEBUG_PACKETS \ - (router->debugs \ - & (PIM_MASK_PIM_PACKETS | PIM_MASK_IGMP_PACKETS \ - | PIM_MASK_MSDP_PACKETS)) + (router->debugs & \ + (PIM_MASK_PIM_PACKETS | PIM_MASK_GM_PACKETS | PIM_MASK_MSDP_PACKETS)) #define PIM_DEBUG_TRACE \ - (router->debugs & (PIM_MASK_PIM_TRACE | PIM_MASK_IGMP_TRACE)) + (router->debugs & (PIM_MASK_PIM_TRACE | PIM_MASK_GM_TRACE)) #define PIM_DO_DEBUG_PIM_EVENTS (router->debugs |= PIM_MASK_PIM_EVENTS) #define PIM_DO_DEBUG_PIM_PACKETS (router->debugs |= PIM_MASK_PIM_PACKETS) @@ -206,11 +203,11 @@ extern uint8_t qpim_ecmp_rebalance_enable; #define PIM_DO_DEBUG_PIM_TRACE (router->debugs |= PIM_MASK_PIM_TRACE) #define PIM_DO_DEBUG_PIM_TRACE_DETAIL \ (router->debugs |= PIM_MASK_PIM_TRACE_DETAIL) -#define PIM_DO_DEBUG_IGMP_EVENTS (router->debugs |= PIM_MASK_IGMP_EVENTS) -#define PIM_DO_DEBUG_IGMP_PACKETS (router->debugs |= PIM_MASK_IGMP_PACKETS) -#define PIM_DO_DEBUG_IGMP_TRACE (router->debugs |= PIM_MASK_IGMP_TRACE) -#define PIM_DO_DEBUG_IGMP_TRACE_DETAIL \ - (router->debugs |= PIM_MASK_IGMP_TRACE_DETAIL) +#define PIM_DO_DEBUG_GM_EVENTS (router->debugs |= PIM_MASK_GM_EVENTS) +#define PIM_DO_DEBUG_GM_PACKETS (router->debugs |= PIM_MASK_GM_PACKETS) +#define PIM_DO_DEBUG_GM_TRACE (router->debugs |= PIM_MASK_GM_TRACE) +#define PIM_DO_DEBUG_GM_TRACE_DETAIL \ + (router->debugs |= PIM_MASK_GM_TRACE_DETAIL) #define PIM_DO_DEBUG_ZEBRA (router->debugs |= PIM_MASK_ZEBRA) #define PIM_DO_DEBUG_MLAG (router->debugs |= PIM_MASK_MLAG) #define PIM_DO_DEBUG_SSMPINGD (router->debugs |= PIM_MASK_SSMPINGD) @@ -239,11 +236,11 @@ extern uint8_t qpim_ecmp_rebalance_enable; #define PIM_DONT_DEBUG_PIM_TRACE (router->debugs &= ~PIM_MASK_PIM_TRACE) #define PIM_DONT_DEBUG_PIM_TRACE_DETAIL \ (router->debugs &= ~PIM_MASK_PIM_TRACE_DETAIL) -#define PIM_DONT_DEBUG_IGMP_EVENTS (router->debugs &= ~PIM_MASK_IGMP_EVENTS) -#define PIM_DONT_DEBUG_IGMP_PACKETS (router->debugs &= ~PIM_MASK_IGMP_PACKETS) -#define PIM_DONT_DEBUG_IGMP_TRACE (router->debugs &= ~PIM_MASK_IGMP_TRACE) -#define PIM_DONT_DEBUG_IGMP_TRACE_DETAIL \ - (router->debugs &= ~PIM_MASK_IGMP_TRACE_DETAIL) +#define PIM_DONT_DEBUG_GM_EVENTS (router->debugs &= ~PIM_MASK_GM_EVENTS) +#define PIM_DONT_DEBUG_GM_PACKETS (router->debugs &= ~PIM_MASK_GM_PACKETS) +#define PIM_DONT_DEBUG_GM_TRACE (router->debugs &= ~PIM_MASK_GM_TRACE) +#define PIM_DONT_DEBUG_GM_TRACE_DETAIL \ + (router->debugs &= ~PIM_MASK_GM_TRACE_DETAIL) #define PIM_DONT_DEBUG_ZEBRA (router->debugs &= ~PIM_MASK_ZEBRA) #define PIM_DONT_DEBUG_MLAG (router->debugs &= ~PIM_MASK_MLAG) #define PIM_DONT_DEBUG_SSMPINGD (router->debugs &= ~PIM_MASK_SSMPINGD) diff --git a/ripd/ripd.c b/ripd/ripd.c index c3a9369a06..8a321d9a91 100644 --- a/ripd/ripd.c +++ b/ripd/ripd.c @@ -1082,10 +1082,9 @@ static void rip_auth_md5_set(struct stream *s, struct rip_interface *ri, /* Check packet length. */ if (len < (RIP_HEADER_SIZE + RIP_RTE_SIZE)) { - flog_err( - EC_RIP_PACKET, - "rip_auth_md5_set(): packet length %ld is less than minimum length.", - len); + flog_err(EC_RIP_PACKET, + "%s: packet length %ld is less than minimum length.", + __func__, len); return; } @@ -1451,9 +1450,8 @@ static int rip_send_packet(uint8_t *buf, int size, struct sockaddr_in *to, inet_ntop(AF_INET, &sin.sin_addr, dst, sizeof(dst)); } #undef ADDRESS_SIZE - zlog_debug("rip_send_packet %pI4 > %s (%s)", - &ifc->address->u.prefix4, dst, - ifc->ifp->name); + zlog_debug("%s %pI4 > %s (%s)", __func__, + &ifc->address->u.prefix4, dst, ifc->ifp->name); } if (CHECK_FLAG(ifc->flags, ZEBRA_IFA_SECONDARY)) { @@ -1772,8 +1770,8 @@ static void rip_read(struct thread *t) /* If this packet come from unknown interface, ignore it. */ if (ifp == NULL) { zlog_info( - "rip_read: cannot find interface for packet from %pI4 port %d (VRF %s)", - &from.sin_addr, ntohs(from.sin_port), + "%s: cannot find interface for packet from %pI4 port %d (VRF %s)", + __func__, &from.sin_addr, ntohs(from.sin_port), rip->vrf_name); return; } @@ -1786,8 +1784,8 @@ static void rip_read(struct thread *t) if (ifc == NULL) { zlog_info( - "rip_read: cannot find connected address for packet from %pI4 port %d on interface %s (VRF %s)", - &from.sin_addr, ntohs(from.sin_port), + "%s: cannot find connected address for packet from %pI4 port %d on interface %s (VRF %s)", + __func__, &from.sin_addr, ntohs(from.sin_port), ifp->name, rip->vrf_name); return; } diff --git a/sharpd/sharp_vty.c b/sharpd/sharp_vty.c index 2281b3ce26..3853df7cb0 100644 --- a/sharpd/sharp_vty.c +++ b/sharpd/sharp_vty.c @@ -234,6 +234,8 @@ DEFPY (install_routes, memset(&prefix, 0, sizeof(prefix)); memset(&sg.r.orig_prefix, 0, sizeof(sg.r.orig_prefix)); + nexthop_del_srv6_seg6local(&sg.r.nhop); + nexthop_del_srv6_seg6(&sg.r.nhop); memset(&sg.r.nhop, 0, sizeof(sg.r.nhop)); memset(&sg.r.nhop_group, 0, sizeof(sg.r.nhop_group)); memset(&sg.r.backup_nhop, 0, sizeof(sg.r.nhop)); @@ -376,6 +378,8 @@ DEFPY (install_seg6_routes, memset(&prefix, 0, sizeof(prefix)); memset(&sg.r.orig_prefix, 0, sizeof(sg.r.orig_prefix)); + nexthop_del_srv6_seg6local(&sg.r.nhop); + nexthop_del_srv6_seg6(&sg.r.nhop); memset(&sg.r.nhop, 0, sizeof(sg.r.nhop)); memset(&sg.r.nhop_group, 0, sizeof(sg.r.nhop_group)); memset(&sg.r.backup_nhop, 0, sizeof(sg.r.nhop)); @@ -467,6 +471,8 @@ DEFPY (install_seg6local_routes, sg.r.repeat = 0; memset(&sg.r.orig_prefix, 0, sizeof(sg.r.orig_prefix)); + nexthop_del_srv6_seg6local(&sg.r.nhop); + nexthop_del_srv6_seg6(&sg.r.nhop); memset(&sg.r.nhop, 0, sizeof(sg.r.nhop)); memset(&sg.r.nhop_group, 0, sizeof(sg.r.nhop_group)); memset(&sg.r.backup_nhop, 0, sizeof(sg.r.nhop)); @@ -924,6 +930,11 @@ DEFPY (import_te, return CMD_SUCCESS; } +static void sharp_srv6_locator_chunk_free(struct prefix_ipv6 *chunk) +{ + prefix_ipv6_free((struct prefix_ipv6 **)&chunk); +} + DEFPY (sharp_srv6_manager_get_locator_chunk, sharp_srv6_manager_get_locator_chunk_cmd, "sharp srv6-manager get-locator-chunk NAME$locator_name", @@ -947,6 +958,8 @@ DEFPY (sharp_srv6_manager_get_locator_chunk, loc = XCALLOC(MTYPE_SRV6_LOCATOR, sizeof(struct sharp_srv6_locator)); loc->chunks = list_new(); + loc->chunks->del = + (void (*)(void *))sharp_srv6_locator_chunk_free; snprintf(loc->name, SRV6_LOCNAME_SIZE, "%s", locator_name); listnode_add(sg.srv6_locators, loc); } @@ -1096,6 +1109,7 @@ DEFPY (sharp_srv6_manager_release_locator_chunk, list_delete_all_node(loc->chunks); list_delete(&loc->chunks); listnode_delete(sg.srv6_locators, loc); + XFREE(MTYPE_SRV6_LOCATOR, loc); break; } } diff --git a/tests/topotests/bgp_aggregate_address_matching_med/__init__.py b/tests/topotests/bgp_aggregate_address_matching_med/__init__.py new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/tests/topotests/bgp_aggregate_address_matching_med/__init__.py diff --git a/tests/topotests/bgp_aggregate_address_matching_med/r1/bgpd.conf b/tests/topotests/bgp_aggregate_address_matching_med/r1/bgpd.conf new file mode 100644 index 0000000000..35597602f7 --- /dev/null +++ b/tests/topotests/bgp_aggregate_address_matching_med/r1/bgpd.conf @@ -0,0 +1,21 @@ +! +router bgp 65001 + no bgp ebgp-requires-policy + neighbor 192.168.1.2 remote-as external + neighbor 192.168.1.2 timers 3 10 + address-family ipv4 unicast + redistribute connected + neighbor 192.168.1.2 route-map r2 out + exit-address-family +! +ip prefix-list p1 seq 5 permit 172.16.255.1/32 +ip prefix-list p1 seq 10 permit 172.16.255.2/32 +ip prefix-list p2 seq 15 permit 172.16.255.3/32 +! +route-map r2 permit 10 + match ip address prefix-list p1 + set metric 300 +route-map r2 permit 20 + match ip address prefix-list p2 + set metric 400 +! diff --git a/tests/topotests/bgp_aggregate_address_matching_med/r1/zebra.conf b/tests/topotests/bgp_aggregate_address_matching_med/r1/zebra.conf new file mode 100644 index 0000000000..685adb3080 --- /dev/null +++ b/tests/topotests/bgp_aggregate_address_matching_med/r1/zebra.conf @@ -0,0 +1,11 @@ +! +interface lo + ip address 172.16.255.1/32 + ip address 172.16.255.2/32 + ip address 172.16.255.3/32 +! +interface r1-eth0 + ip address 192.168.1.1/24 +! +ip forwarding +! diff --git a/tests/topotests/bgp_aggregate_address_matching_med/r2/bgpd.conf b/tests/topotests/bgp_aggregate_address_matching_med/r2/bgpd.conf new file mode 100644 index 0000000000..9bc9a3132f --- /dev/null +++ b/tests/topotests/bgp_aggregate_address_matching_med/r2/bgpd.conf @@ -0,0 +1,11 @@ +! +router bgp 65002 + no bgp ebgp-requires-policy + neighbor 192.168.1.1 remote-as external + neighbor 192.168.1.1 timers 3 10 + neighbor 192.168.2.1 remote-as external + neighbor 192.168.2.1 timers 3 10 + address-family ipv4 unicast + aggregate-address 172.16.255.0/24 summary-only matching-MED-only + exit-address-family +! diff --git a/tests/topotests/bgp_aggregate_address_matching_med/r2/zebra.conf b/tests/topotests/bgp_aggregate_address_matching_med/r2/zebra.conf new file mode 100644 index 0000000000..f229954341 --- /dev/null +++ b/tests/topotests/bgp_aggregate_address_matching_med/r2/zebra.conf @@ -0,0 +1,9 @@ +! +interface r2-eth0 + ip address 192.168.1.2/24 +! +interface r2-eth1 + ip address 192.168.2.2/24 +! +ip forwarding +! diff --git a/tests/topotests/bgp_aggregate_address_matching_med/r3/bgpd.conf b/tests/topotests/bgp_aggregate_address_matching_med/r3/bgpd.conf new file mode 100644 index 0000000000..dfb5ac7a3c --- /dev/null +++ b/tests/topotests/bgp_aggregate_address_matching_med/r3/bgpd.conf @@ -0,0 +1,6 @@ +! +router bgp 65003 + no bgp ebgp-requires-policy + neighbor 192.168.2.2 remote-as external + neighbor 192.168.2.2 timers 3 10 +! diff --git a/tests/topotests/bgp_aggregate_address_matching_med/r3/zebra.conf b/tests/topotests/bgp_aggregate_address_matching_med/r3/zebra.conf new file mode 100644 index 0000000000..11e06d47cc --- /dev/null +++ b/tests/topotests/bgp_aggregate_address_matching_med/r3/zebra.conf @@ -0,0 +1,6 @@ +! +interface r3-eth0 + ip address 192.168.2.1/24 +! +ip forwarding +! diff --git a/tests/topotests/bgp_aggregate_address_matching_med/test_bgp_aggregate_address_matching_med.py b/tests/topotests/bgp_aggregate_address_matching_med/test_bgp_aggregate_address_matching_med.py new file mode 100644 index 0000000000..edf50dc9e0 --- /dev/null +++ b/tests/topotests/bgp_aggregate_address_matching_med/test_bgp_aggregate_address_matching_med.py @@ -0,0 +1,148 @@ +#!/usr/bin/env python + +# +# Copyright (c) 2022 by +# Donatas Abraitis <donatas@opensourcerouting.org> +# +# Permission to use, copy, modify, and/or distribute this software +# for any purpose with or without fee is hereby granted, provided +# that the above copyright notice and this permission notice appear +# in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND NETDEF DISCLAIMS ALL WARRANTIES +# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NETDEF BE LIABLE FOR +# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY +# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS +# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE +# OF THIS SOFTWARE. +# + +""" +Test if aggregate-address command works fine when suppressing summary-only +and using matching-MED-only together. +""" + +import os +import sys +import json +import pytest +import functools +from lib.common_config import ( + step, +) + +# pylint: disable=C0413 +from lib import topotest +from lib.topogen import Topogen, TopoRouter, get_topogen + +pytestmark = [pytest.mark.bgpd] + +CWD = os.path.dirname(os.path.realpath(__file__)) +sys.path.append(os.path.join(CWD, "../")) + + +def build_topo(tgen): + for routern in range(1, 5): + tgen.add_router("r{}".format(routern)) + + switch = tgen.add_switch("s1") + switch.add_link(tgen.gears["r1"]) + switch.add_link(tgen.gears["r2"]) + + switch = tgen.add_switch("s2") + switch.add_link(tgen.gears["r2"]) + switch.add_link(tgen.gears["r3"]) + + +def setup_module(mod): + tgen = Topogen(build_topo, mod.__name__) + tgen.start_topology() + + router_list = tgen.routers() + + for i, (rname, router) in enumerate(router_list.items(), 1): + router.load_config( + TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname)) + ) + router.load_config( + TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format(rname)) + ) + + tgen.start_router() + + +def teardown_module(mod): + tgen = get_topogen() + tgen.stop_topology() + + +def test_aggregate_address_matching_med(): + tgen = get_topogen() + + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + r1 = tgen.gears["r1"] + r3 = tgen.gears["r3"] + + def _bgp_converge(): + output = json.loads(r3.vtysh_cmd("show bgp ipv4 unicast json")) + expected = { + "routes": { + "172.16.255.0/24": None, + "172.16.255.1/32": [{"path": "65002 65001"}], + "172.16.255.2/32": [{"path": "65002 65001"}], + "172.16.255.3/32": [{"path": "65002 65001"}], + } + } + return topotest.json_cmp(output, expected) + + test_func = functools.partial(_bgp_converge) + _, result = topotest.run_and_expect(test_func, None, count=30, wait=0.5) + assert result is None, "Failed to see unsuppressed routes from R2" + + step("Change MED for 172.16.255.3/32 from 400 to 300") + r1.vtysh_cmd( + """ + configure terminal + route-map r2 permit 20 + set metric 300 + """ + ) + + step("Check if 172.16.255.0/24 aggregated route was created and others suppressed") + + def _bgp_aggregated_summary_only_med_match(): + output = json.loads(r3.vtysh_cmd("show bgp ipv4 unicast json")) + expected = { + "routes": { + "172.16.255.0/24": [{"path": "65002"}], + "172.16.255.1/32": None, + "172.16.255.2/32": None, + "172.16.255.3/32": None, + } + } + return topotest.json_cmp(output, expected) + + test_func = functools.partial(_bgp_aggregated_summary_only_med_match) + _, result = topotest.run_and_expect(test_func, None, count=30, wait=0.5) + assert result is None, "Failed to see unsuppressed routes from R2" + + step("Change MED for 172.16.255.3/32 back to 400 from 300") + r1.vtysh_cmd( + """ + configure terminal + route-map r2 permit 20 + set metric 400 + """ + ) + test_func = functools.partial(_bgp_converge) + _, result = topotest.run_and_expect(test_func, None, count=30, wait=0.5) + assert result is None, "Failed to see unsuppressed routes from R2" + + +if __name__ == "__main__": + args = ["-s"] + sys.argv[1:] + sys.exit(pytest.main(args)) diff --git a/tests/topotests/bgp_distance_change/bgp_admin_dist.json b/tests/topotests/bgp_distance_change/bgp_admin_dist.json new file mode 100755 index 0000000000..e6a20a6585 --- /dev/null +++ b/tests/topotests/bgp_distance_change/bgp_admin_dist.json @@ -0,0 +1,402 @@ +{ + "address_types": [ + "ipv4", + "ipv6" + ], + "ipv4base": "10.0.0.0", + "ipv4mask": 30, + "ipv6base": "fd00::", + "ipv6mask": 64, + "link_ip_start": { + "ipv4": "10.0.0.0", + "v4mask": 30, + "ipv6": "fd00::", + "v6mask": 64 + }, + "lo_prefix": { + "ipv4": "1.0.", + "v4mask": 32, + "ipv6": "2001:DB8:F::", + "v6mask": 128 + }, + "routers": { + "r1": { + "links": { + "lo": { + "ipv4": "auto", + "ipv6": "auto", + "type": "loopback" + }, + "r2": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r3": { + "ipv4": "auto", + "ipv6": "auto" + } + }, + "bgp": { + "local_as": "100", + "address_family": { + "ipv4": { + "unicast": { + "neighbor": { + "r2": { + "dest_link": { + "r1": {} + } + }, + "r3": { + "dest_link": { + "r1": {} + } + } + }, + "redistribute": [ + { + "redist_type": "static" + }, + { + "redist_type": "connected" + } + ] + } + }, + "ipv6": { + "unicast": { + "neighbor": { + "r2": { + "dest_link": { + "r1": {} + } + }, + "r3": { + "dest_link": { + "r1": {} + } + } + }, + "redistribute": [ + { + "redist_type": "static" + }, + { + "redist_type": "connected" + } + ] + } + } + } + }, + "static_routes": [ + { + "network": "192.168.22.1/32", + "no_of_ip": 2, + "next_hop": "10.0.0.2" + }, + { + "network": "fc07:1::1/128", + "no_of_ip": 2, + "next_hop": "fd00::2" + }, + { + "network": "192.168.21.1/32", + "no_of_ip": 2, + "next_hop": "blackhole" + }, + { + "network": "fc07:150::1/128", + "no_of_ip": 2, + "next_hop": "blackhole" + } + ] + }, + "r2": { + "links": { + "lo": { + "ipv4": "auto", + "ipv6": "auto", + "type": "loopback" + }, + "r1": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r3": { + "ipv4": "auto", + "ipv6": "auto" + } + }, + "bgp": { + "local_as": "100", + "address_family": { + "ipv4": { + "unicast": { + "neighbor": { + "r1": { + "dest_link": { + "r2": {} + } + }, + "r3": { + "dest_link": { + "r2": {} + } + } + }, + "redistribute": [ + { + "redist_type": "static" + }, + { + "redist_type": "connected" + } + ] + } + }, + "ipv6": { + "unicast": { + "neighbor": { + "r1": { + "dest_link": { + "r2": {} + } + }, + "r3": { + "dest_link": { + "r2": {} + } + } + }, + "redistribute": [ + { + "redist_type": "static" + }, + { + "redist_type": "connected" + } + ] + } + } + } + } + }, + "r3": { + "links": { + "lo": { + "ipv4": "auto", + "ipv6": "auto", + "type": "loopback" + }, + "r1": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r2": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r4": { + "ipv4": "auto", + "ipv6": "auto" + }, + "r5": { + "ipv4": "auto", + "ipv6": "auto" + } + }, + "bgp": { + "local_as": "100", + "address_family": { + "ipv4": { + "unicast": { + "neighbor": { + "r1": { + "dest_link": { + "r3": {} + } + }, + "r2": { + "dest_link": { + "r3": {} + } + }, + "r4": { + "dest_link": { + "r3": {} + } + }, + "r5": { + "dest_link": { + "r3": {} + } + } + } + } + }, + "ipv6": { + "unicast": { + "neighbor": { + "r1": { + "dest_link": { + "r3": {} + } + }, + "r2": { + "dest_link": { + "r3": {} + } + }, + "r4": { + "dest_link": { + "r3": {} + } + }, + "r5": { + "dest_link": { + "r3": {} + } + } + } + } + } + } + } + }, + "r4": { + "links": { + "lo": { + "ipv4": "auto", + "ipv6": "auto", + "type": "loopback" + }, + "r3": { + "ipv4": "auto", + "ipv6": "auto" + } + }, + "bgp": { + "local_as": "200", + "address_family": { + "ipv4": { + "unicast": { + "neighbor": { + "r3": { + "dest_link": { + "r4": {} + } + } + }, + "redistribute": [ + { + "redist_type": "static" + }, + { + "redist_type": "connected" + } + ] + } + }, + "ipv6": { + "unicast": { + "neighbor": { + "r3": { + "dest_link": { + "r4": {} + } + } + }, + "redistribute": [ + { + "redist_type": "static" + }, + { + "redist_type": "connected" + } + ] + } + } + } + } + }, + "r5": { + "links": { + "lo": { + "ipv4": "auto", + "ipv6": "auto", + "type": "loopback" + }, + "r3": { + "ipv4": "auto", + "ipv6": "auto" + } + }, + "bgp": { + "local_as": "300", + "address_family": { + "ipv4": { + "unicast": { + "neighbor": { + "r3": { + "dest_link": { + "r5": {} + } + } + }, + "redistribute": [ + { + "redist_type": "static" + }, + { + "redist_type": "connected" + } + ] + } + }, + "ipv6": { + "unicast": { + "neighbor": { + "r3": { + "dest_link": { + "r5": {} + } + } + }, + "redistribute": [ + { + "redist_type": "static" + }, + { + "redist_type": "connected" + } + ] + } + } + } + }, + "static_routes": [ + { + "network": "192.168.20.1/32", + "no_of_ip": 2, + "next_hop": "blackhole" + }, + { + "network": "fc07:50::1/128", + "no_of_ip": 2, + "next_hop": "blackhole" + }, + { + "network": "192.168.21.1/32", + "no_of_ip": 2, + "next_hop": "blackhole" + }, + { + "network": "fc07:150::1/128", + "no_of_ip": 2, + "next_hop": "blackhole" + } + ] + } + } +}
\ No newline at end of file diff --git a/tests/topotests/bgp_distance_change/bgp_admin_dist_vrf.json b/tests/topotests/bgp_distance_change/bgp_admin_dist_vrf.json new file mode 100755 index 0000000000..23afa2c911 --- /dev/null +++ b/tests/topotests/bgp_distance_change/bgp_admin_dist_vrf.json @@ -0,0 +1,429 @@ +{ + "address_types": [ + "ipv4", + "ipv6" + ], + "ipv4base": "10.0.0.0", + "ipv4mask": 30, + "ipv6base": "fd00::", + "ipv6mask": 64, + "link_ip_start": { + "ipv4": "10.0.0.0", + "v4mask": 30, + "ipv6": "fd00::", + "v6mask": 64 + }, + "lo_prefix": { + "ipv4": "1.0.", + "v4mask": 32, + "ipv6": "2001:DB8:F::", + "v6mask": 128 + }, + "routers": { + "r1": { + "links": { + "lo": { + "ipv4": "auto", + "ipv6": "auto", + "type": "loopback", + "vrf": "RED" + }, + "r2": { + "ipv4": "auto", + "ipv6": "auto", + "vrf": "RED" + }, + "r3": { + "ipv4": "auto", + "ipv6": "auto", + "vrf": "RED" + } + }, + "vrfs": [ + { + "name": "RED", + "id": "1" + } + ], + "bgp": [{ + "local_as": "100", + "vrf": "RED", + + "address_family": { + "ipv4": { + "unicast": { + "neighbor": { + "r2": { + "dest_link": { + "r1": {} + } + }, + "r3": { + "dest_link": { + "r1": {} + } + } + }, + "redistribute": [ + { + "redist_type": "static" + }, + { + "redist_type": "connected" + } + ] + } + }, + "ipv6": { + "unicast": { + "neighbor": { + "r2": { + "dest_link": { + "r1": {} + } + }, + "r3": { + "dest_link": { + "r1": {} + } + } + }, + "redistribute": [ + { + "redist_type": "static" + }, + { + "redist_type": "connected" + } + ] + } + } + } + }], + "static_routes": [ + { + "network": "192.168.22.1/32", + "no_of_ip": 2, + "next_hop": "10.0.0.2", + "vrf": "RED" + }, + { + "network": "fc07:1::1/128", + "no_of_ip": 2, + "next_hop": "fd00::2", + "vrf": "RED" + }, + { + "network": "192.168.21.1/32", + "no_of_ip": 2, + "next_hop": "blackhole", + "vrf": "RED" + }, + { + "network": "fc07:150::1/128", + "no_of_ip": 2, + "next_hop": "blackhole", + "vrf": "RED" + } + ] + }, + "r2": { + "links": { + "lo": { + "ipv4": "auto", + "ipv6": "auto", + "type": "loopback", + "vrf": "RED" + }, + "r1": { + "ipv4": "auto", + "ipv6": "auto", + "vrf": "RED" + }, + "r3": { + "ipv4": "auto", + "ipv6": "auto", + "vrf": "RED" + } + }, + "vrfs": [ + { + "name": "RED", + "id": "1" + } + ], + "bgp": [{ + "local_as": "100", + "vrf": "RED", + "address_family": { + "ipv4": { + "unicast": { + "neighbor": { + "r1": { + "dest_link": { + "r2": {} + } + }, + "r3": { + "dest_link": { + "r2": {} + } + } + } + } + }, + "ipv6": { + "unicast": { + "neighbor": { + "r1": { + "dest_link": { + "r2": {} + } + }, + "r3": { + "dest_link": { + "r2": {} + } + } + } + } + } + } + }] + }, + "r3": { + "links": { + "lo": { + "ipv4": "auto", + "ipv6": "auto", + "type": "loopback", + "vrf": "RED" + }, + "r1": { + "ipv4": "auto", + "ipv6": "auto", + "vrf": "RED" + }, + "r2": { + "ipv4": "auto", + "ipv6": "auto", + "vrf": "RED" + }, + "r4": { + "ipv4": "auto", + "ipv6": "auto", + "vrf": "RED" + }, + "r5": { + "ipv4": "auto", + "ipv6": "auto", + "vrf": "RED" + } + }, + "vrfs": [ + { + "name": "RED", + "id": "1" + } + ], + "bgp": [{ + "local_as": "100", + "vrf": "RED", + "address_family": { + "ipv4": { + "unicast": { + "neighbor": { + "r1": { + "dest_link": { + "r3": {} + } + }, + "r2": { + "dest_link": { + "r3": {} + } + }, + "r4": { + "dest_link": { + "r3": {} + } + }, + "r5": { + "dest_link": { + "r3": {} + } + } + } + } + }, + "ipv6": { + "unicast": { + "neighbor": { + "r1": { + "dest_link": { + "r3": {} + } + }, + "r2": { + "dest_link": { + "r3": {} + } + }, + "r4": { + "dest_link": { + "r3": {} + } + }, + "r5": { + "dest_link": { + "r3": {} + } + } + } + } + } + } + }] + }, + "r4": { + "links": { + "lo": { + "ipv4": "auto", + "ipv6": "auto", + "type": "loopback", + "vrf": "RED" + }, + "r3": { + "ipv4": "auto", + "ipv6": "auto", + "vrf": "RED" + } + }, + "vrfs": [ + { + "name": "RED", + "id": "1" + } + ], + "bgp": [{ + "local_as": "200", + "vrf": "RED", + "address_family": { + "ipv4": { + "unicast": { + "neighbor": { + "r3": { + "dest_link": { + "r4": {} + } + } + } + } + }, + "ipv6": { + "unicast": { + "neighbor": { + "r3": { + "dest_link": { + "r4": {} + } + } + } + } + } + } + }] + }, + "r5": { + "links": { + "lo": { + "ipv4": "auto", + "ipv6": "auto", + "type": "loopback", + "vrf": "RED" + }, + "r3": { + "ipv4": "auto", + "ipv6": "auto", + "vrf": "RED" + } + }, + "vrfs": [ + { + "name": "RED", + "id": "1" + } + ], + "bgp": [{ + "local_as": "300", + "vrf": "RED", + "address_family": { + "ipv4": { + "unicast": { + "neighbor": { + "r3": { + "dest_link": { + "r5": {} + } + } + }, + "redistribute": [ + { + "redist_type": "static" + }, + { + "redist_type": "connected" + } + ] + } + }, + "ipv6": { + "unicast": { + "neighbor": { + "r3": { + "dest_link": { + "r5": {} + } + } + }, + "redistribute": [ + { + "redist_type": "static" + }, + { + "redist_type": "connected" + } + ] + } + } + } + }], + "static_routes": [ + { + "network": "192.168.20.1/32", + "no_of_ip": 2, + "next_hop": "blackhole", + "vrf": "RED" + }, + { + "network": "fc07:50::1/128", + "no_of_ip": 2, + "next_hop": "blackhole", + "vrf": "RED" + }, + { + "network": "192.168.21.1/32", + "no_of_ip": 2, + "next_hop": "blackhole", + "vrf": "RED" + }, + { + "network": "fc07:150::1/128", + "no_of_ip": 2, + "next_hop": "blackhole", + "vrf": "RED" + } + ] + } + } +}
\ No newline at end of file diff --git a/tests/topotests/bgp_distance_change/test_bgp_admin_dist.py b/tests/topotests/bgp_distance_change/test_bgp_admin_dist.py new file mode 100755 index 0000000000..90c3d22240 --- /dev/null +++ b/tests/topotests/bgp_distance_change/test_bgp_admin_dist.py @@ -0,0 +1,1282 @@ +#!/usr/bin/env python + +# +# Copyright (c) 2022 by VMware, Inc. ("VMware") +# Used Copyright (c) 2018 by Network Device Education Foundation, +# Inc. ("NetDEF") in this file. +# +# Permission to use, copy, modify, and/or distribute this software +# for any purpose with or without fee is hereby granted, provided +# that the above copyright notice and this permission notice appear +# in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND VMWARE DISCLAIMS ALL WARRANTIES +# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL VMWARE BE LIABLE FOR +# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY +# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS +# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE +# OF THIS SOFTWARE. +# + +import sys +import time +import pytest +import inspect +import os + + +"""Following tests are covered to test bgp admin distance functionality. +TC_1: + Verify bgp admin distance functionality when static route is + configured same as ebgp learnt route + +TC_2: + Verify ebgp admin distance functionality with ECMP. + +TC_3: + Verify ibgp admin distance functionality when static route is + configured same as bgp learnt route. +TC_4: + Verify ibgp admin distance functionality with ECMP. + +TC_7: Chaos - Verify bgp admin distance functionality with chaos. +""" + +################################# +# TOPOLOGY +################################# +""" + + +-------+ + +--------- | R2 | + | +-------+ + |iBGP | + +-------+ | + | R1 | |iBGP + +-------+ | + | | + | iBGP +-------+ eBGP +-------+ + +---------- | R3 |----------| R4 | + +-------+ +-------+ + | + |eBGP + | + +-------+ + | R5 | + +-------+ + + +""" + +# Save the Current Working Directory to find configuration files. +CWD = os.path.dirname(os.path.realpath(__file__)) +sys.path.append(os.path.join(CWD, "../")) +sys.path.append(os.path.join(CWD, "../lib/")) + +# pylint: disable=C0413 +# Import topogen and topotest helpers +from lib.topogen import Topogen, get_topogen + +# Required to instantiate the topology builder class. +from lib.common_config import ( + start_topology, + write_test_header, + step, + write_test_footer, + create_static_routes, + verify_rib, + create_route_maps, + create_prefix_lists, + check_address_types, + reset_config_on_routers, + check_router_status, + stop_router, + kill_router_daemons, + start_router_daemons, + start_router, + get_frr_ipv6_linklocal, + verify_fib_routes, +) +from lib.topolog import logger +from lib.bgp import ( + verify_bgp_convergence, + create_router_bgp, + verify_best_path_as_per_admin_distance, + clear_bgp, +) + +# pylint: disable=C0413 +# Import topogen and topotest helpers +from lib.topogen import Topogen, get_topogen +from lib.topojson import build_config_from_json +from lib.topolog import logger + +# Global variables +topo = None +bgp_convergence = False +pytestmark = [pytest.mark.bgpd, pytest.mark.staticd] + +NETWORK = { + "ipv4": [ + "192.168.20.1/32", + "192.168.20.2/32", + "192.168.21.1/32", + "192.168.21.2/32", + "192.168.22.1/32", + "192.168.22.2/32", + ], + "ipv6": [ + "fc07:50::1/128", + "fc07:50::2/128", + "fc07:150::1/128", + "fc07:150::2/128", + "fc07:1::1/128", + "fc07:1::2/128", + ], +} + +ADDR_TYPES = check_address_types() + + +def setup_module(mod): + """ + Sets up the pytest environment + + * `mod`: module name + """ + + global topo + + testsuite_run_time = time.asctime(time.localtime(time.time())) + logger.info("Testsuite start time: {}".format(testsuite_run_time)) + logger.info("=" * 40) + + logger.info("Running setup_module to create topology") + + # This function initiates the topology build with Topogen... + json_file = "{}/bgp_admin_dist.json".format(CWD) + tgen = Topogen(json_file, mod.__name__) + global topo + topo = tgen.json_topo + + # Starting topology, create tmp files which are loaded to routers + # to start deamons and then start routers + start_topology(tgen) + + # Creating configuration from JSON + build_config_from_json(tgen, topo) + + # Checking BGP convergence + global bgp_convergence + global ADDR_TYPES + + # Don't run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + # Api call verify whether BGP is converged + bgp_convergence = verify_bgp_convergence(tgen, topo) + assert bgp_convergence is True, "setup_module :Failed \n Error:" " {}".format( + bgp_convergence + ) + logger.info("Running setup_module() done") + + +def teardown_module(mod): + """teardown_module. + + Teardown the pytest environment. + * `mod`: module name + """ + logger.info("Running teardown_module to delete topology") + tgen = get_topogen() + + # Stop toplogy and Remove tmp files + tgen.stop_topology() + + logger.info( + "Testsuite end time: {}".format(time.asctime(time.localtime(time.time()))) + ) + logger.info("=" * 40) + + +##################################################### +# Tests starting +##################################################### +def test_bgp_admin_distance_ebgp_ecmp_p0(): + """ + TC: 2 + Verify ebgp admin distance functionality with ECMP. + """ + tgen = get_topogen() + global bgp_convergence + + if bgp_convergence is not True: + pytest.skip("skipping test case because of BGP Convergence failure at setup") + + # test case name + tc_name = inspect.stack()[0][3] + write_test_header(tc_name) + if tgen.routers_have_failure(): + check_router_status(tgen) + + step("Configure base config as per the topology") + reset_config_on_routers(tgen) + + step("Configure static route in R4 and R5, redistribute in bgp") + + for addr_type in ADDR_TYPES: + + input_dict = { + "r4": { + "static_routes": [{"network": NETWORK[addr_type], "next_hop": "Null0"}] + } + } + + result = create_static_routes(tgen, input_dict) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + for addr_type in ADDR_TYPES: + + input_dict = { + "r5": { + "static_routes": [{"network": NETWORK[addr_type], "next_hop": "Null0"}] + } + } + + result = create_static_routes(tgen, input_dict) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step("Verify that route is learnt in DUT via ebgp") + + # Verifying RIB routes + protocol = "bgp" + input_dict = topo["routers"] + dut = "r3" + nhop = {"ipv4": [], "ipv6": []} + nhop["ipv4"].append(topo["routers"]["r4"]["links"]["r3"]["ipv4"].split("/")[0]) + nhop["ipv4"].append(topo["routers"]["r5"]["links"]["r3"]["ipv4"].split("/")[0]) + nhop["ipv6"].append(get_frr_ipv6_linklocal(tgen, "r4", "r3-r4-eth1")) + nhop["ipv6"].append(get_frr_ipv6_linklocal(tgen, "r5", "r1-r3-eth1")) + + for addr_type in ADDR_TYPES: + input_dict = { + "r3": { + "static_routes": [ + {"network": NETWORK[addr_type][0], "next_hop": "Null0"} + ] + } + } + result4 = verify_rib(tgen, addr_type, dut, input_dict, protocol=protocol) + assert result4 is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result4 + ) + + step("Configure the static route in R3 (Dut).") + + for addr_type in ADDR_TYPES: + + input_dict = { + "r3": { + "static_routes": [ + {"network": NETWORK[addr_type][0], "next_hop": "Null0"} + ] + } + } + + result = create_static_routes(tgen, input_dict) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step("Verify that static route is selected as best route in zebra.") + + # Verifying RIB routes + protocol = "static" + dut = "r3" + + for addr_type in ADDR_TYPES: + input_dict = { + "r3": { + "static_routes": [ + {"network": NETWORK[addr_type][0], "next_hop": "Null0"} + ] + } + } + + result4 = verify_rib(tgen, addr_type, dut, input_dict, protocol=protocol) + assert result4 is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result4 + ) + + step(" Configure the admin distance of 254 to static route in R3.") + + for addr_type in ADDR_TYPES: + + input_dict = { + "r3": { + "static_routes": [ + { + "network": NETWORK[addr_type][0], + "next_hop": "Null0", + "admin_distance": 254, + } + ] + } + } + + result = create_static_routes(tgen, input_dict) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step("Verify that bgp routes are selected as best routes in zebra.") + protocol = "bgp" + dut = "r3" + + for addr_type in ADDR_TYPES: + input_dict = { + "r3": { + "static_routes": [ + {"network": NETWORK[addr_type][0], "next_hop": "Null0"} + ] + } + } + result4 = verify_rib(tgen, addr_type, dut, input_dict, protocol=protocol) + assert result4 is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result4 + ) + + input_dict_1 = { + "r3": { + "bgp": { + "local_as": 100, + "address_family": { + "ipv4": { + "unicast": { + "distance": {"ebgp": 254, "ibgp": 254, "local": 254} + } + }, + "ipv6": { + "unicast": { + "distance": {"ebgp": 254, "ibgp": 254, "local": 254} + } + }, + }, + } + } + } + + result = create_router_bgp(tgen, topo, input_dict_1) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + step("Verify that bgp routes are selected as best routes in zebra.") + # Verifying RIB routes + protocol = "bgp" + dut = "r3" + + for addr_type in ADDR_TYPES: + input_dict = { + "r3": { + "static_routes": [ + {"network": NETWORK[addr_type][0], "next_hop": "Null0"} + ] + } + } + result4 = verify_rib(tgen, addr_type, dut, input_dict, protocol=protocol) + assert result4 is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result4 + ) + + step("Configure bgp admin distance 10 with CLI in dut.") + input_dict_1 = { + "r3": { + "bgp": { + "local_as": 100, + "address_family": { + "ipv4": { + "unicast": {"distance": {"ebgp": 10, "ibgp": 254, "local": 254}} + }, + "ipv6": { + "unicast": {"distance": {"ebgp": 10, "ibgp": 254, "local": 254}} + }, + }, + } + } + } + + result = create_router_bgp(tgen, topo, input_dict_1) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + step("Verify ebgp routes have admin distance of 10 in dut.") + + protocol = "bgp" + input_dict = topo["routers"] + dut = "r3" + + for addr_type in ADDR_TYPES: + input_dict = { + "r3": { + "static_routes": [ + {"network": NETWORK[addr_type][0], "next_hop": "Null0"} + ] + } + } + result4 = verify_rib( + tgen, addr_type, dut, input_dict, protocol=protocol, admin_distance=10 + ) + assert result4 is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result4 + ) + + step( + "Configure route map with weight as 200 and apply to one of the " + "neighbor (R4 neighbor)." + ) + + # Create Prefix list + input_dict_2 = { + "r3": { + "prefix_lists": { + "ipv4": { + "pf_ls_1": [ + { + "seqid": 10, + "network": NETWORK["ipv4"][0], + "le": "32", + "action": "permit", + } + ] + }, + "ipv6": { + "pf_ls_1_ipv6": [ + { + "seqid": 100, + "network": NETWORK["ipv6"][0], + "le": "128", + "action": "permit", + } + ] + }, + } + } + } + result = create_prefix_lists(tgen, input_dict_2) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + # Create route map + input_dict_3 = { + "r3": { + "route_maps": { + "RMAP_WEIGHT": [ + { + "action": "permit", + "match": {"ipv4": {"prefix_lists": "pf_ls_1"}}, + "set": {"weight": 200}, + }, + { + "action": "permit", + "match": {"ipv6": {"prefix_lists": "pf_ls_1_ipv6"}}, + "set": {"weight": 200}, + }, + ] + } + } + } + result = create_route_maps(tgen, input_dict_3) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + # Configure neighbor for route map + input_dict_4 = { + "r3": { + "bgp": { + "address_family": { + "ipv4": { + "unicast": { + "neighbor": { + "r4": { + "dest_link": { + "r3": { + "route_maps": [ + { + "name": "RMAP_WEIGHT", + "direction": "in", + } + ] + } + } + } + } + } + }, + "ipv6": { + "unicast": { + "neighbor": { + "r4": { + "dest_link": { + "r3": { + "route_maps": [ + { + "name": "RMAP_WEIGHT", + "direction": "in", + } + ] + } + } + } + } + } + }, + } + } + } + } + result = create_router_bgp(tgen, topo, input_dict_4) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + step("Verify that bgp route is selected as best on by zebra in r3.") + + protocol = "bgp" + dut = "r3" + + for addr_type in ADDR_TYPES: + input_dict = { + "r3": { + "static_routes": [ + {"network": NETWORK[addr_type][0], "next_hop": "Null0"} + ] + } + } + result4 = verify_rib( + tgen, addr_type, dut, input_dict, protocol=protocol, admin_distance=10 + ) + assert result4 is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result4 + ) + + step("Static route should not be selected as best route.") + protocol = "static" + dut = "r3" + + for addr_type in ADDR_TYPES: + input_dict = { + "r3": { + "static_routes": [ + {"network": NETWORK[addr_type][0], "next_hop": "Null0"} + ] + } + } + result4 = verify_fib_routes( + tgen, addr_type, dut, input_dict, protocol=protocol, expected=False + ) + assert ( + result4 is not True + ), "Testcase {} : Failed. Wrong route is selected as best route.\n Error: {}".format( + tc_name, result4 + ) + + step("Reconfigure the static route without admin distance") + + for addr_type in ADDR_TYPES: + + input_dict = { + "r3": { + "static_routes": [ + { + "network": NETWORK[addr_type][0], + "next_hop": "Null0", + "admin_distance": 254, + "delete": True, + } + ] + } + } + + result = create_static_routes(tgen, input_dict) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + for addr_type in ADDR_TYPES: + input_dict = { + "r3": { + "static_routes": [ + {"network": NETWORK[addr_type][0], "next_hop": "Null0"} + ] + } + } + + result = create_static_routes(tgen, input_dict) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step("Verify that static route is installed as best route.") + protocol = "static" + dut = "r3" + + for addr_type in ADDR_TYPES: + input_dict = { + "r3": { + "static_routes": [ + {"network": NETWORK[addr_type][0], "next_hop": "Null0"} + ] + } + } + result4 = verify_rib( + tgen, addr_type, dut, input_dict, protocol=protocol, fib=True + ) + assert result4 is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result4 + ) + + step("Unconfigure the static route in R3.") + + for addr_type in ADDR_TYPES: + input_dict = { + "r3": { + "static_routes": [ + { + "network": NETWORK[addr_type][0], + "next_hop": "Null0", + "delete": True, + } + ] + } + } + + result = create_static_routes(tgen, input_dict) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step("Verify that bgp route is selected as best on by zebra in r3.") + + protocol = "bgp" + dut = "r3" + + for addr_type in ADDR_TYPES: + input_dict = { + "r3": { + "static_routes": [ + {"network": NETWORK[addr_type][0], "next_hop": "Null0"} + ] + } + } + result4 = verify_rib(tgen, addr_type, dut, input_dict, protocol=protocol) + assert result4 is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result4 + ) + + step("Un configure the route map on R3.") + + # Configure neighbor for route map + input_dict_4 = { + "r3": { + "bgp": { + "address_family": { + "ipv4": { + "unicast": { + "neighbor": { + "r4": { + "dest_link": { + "r3": { + "route_maps": [ + { + "name": "RMAP_WEIGHT", + "direction": "in", + "delete": True, + } + ] + } + } + } + } + } + }, + "ipv6": { + "unicast": { + "neighbor": { + "r4": { + "dest_link": { + "r3": { + "route_maps": [ + { + "name": "RMAP_WEIGHT", + "direction": "in", + "delete": True, + } + ] + } + } + } + } + } + }, + } + } + } + } + result = create_router_bgp(tgen, topo, input_dict_4) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + step("verify bgp routes installed in zebra.") + + # Verifying RIB routes + protocol = "bgp" + input_dict = topo["routers"] + dut = "r3" + for addr_type in ADDR_TYPES: + input_dict = { + "r3": { + "static_routes": [ + {"network": NETWORK[addr_type][0], "next_hop": "Null0"} + ] + } + } + result4 = verify_rib(tgen, addr_type, dut, input_dict, protocol=protocol) + assert result4 is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result4 + ) + + write_test_footer(tc_name) + + +def test_bgp_admin_distance_ibgp_p0(): + """ + TC: 3 + Verify bgp admin distance functionality when static route is + configured same as ibgp learnt route + """ + tgen = get_topogen() + global bgp_convergence + + if bgp_convergence is not True: + pytest.skip("skipping test case because of BGP Convergence failure at setup") + + # test case name + tc_name = inspect.stack()[0][3] + write_test_header(tc_name) + if tgen.routers_have_failure(): + check_router_status(tgen) + + step("Configure base config as per the topology") + reset_config_on_routers(tgen) + + step("Configure bgp admin distance 200 with CLI in dut.") + + input_dict_1 = { + "r3": { + "bgp": { + "local_as": 100, + "address_family": { + "ipv4": { + "unicast": { + "distance": {"ebgp": 200, "ibgp": 200, "local": 200} + } + }, + "ipv6": { + "unicast": { + "distance": {"ebgp": 200, "ibgp": 200, "local": 200} + } + }, + }, + } + } + } + + result = create_router_bgp(tgen, topo, input_dict_1) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + step("Verify bgp routes have admin distance of 200 in dut.") + # Verifying best path + dut = "r3" + attribute = "admin_distance" + + input_dict = { + "ipv4": { + "r3": { + "static_routes": [ + { + "network": "192.168.22.1/32", + "admin_distance": 200, + }, + { + "network": "192.168.22.2/32", + "admin_distance": 200, + }, + ] + } + }, + "ipv6": { + "r3": { + "static_routes": [ + { + "network": "fc07:1::1/128", + "admin_distance": 200, + }, + { + "network": "fc07:1::2/128", + "admin_distance": 200, + }, + ] + } + }, + } + + for addr_type in ADDR_TYPES: + result = verify_best_path_as_per_admin_distance( + tgen, addr_type, dut, input_dict[addr_type], attribute + ) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step("Modify the admin distance value to 150.") + + input_dict_1 = { + "r3": { + "bgp": { + "local_as": 100, + "address_family": { + "ipv4": { + "unicast": { + "distance": {"ebgp": 150, "ibgp": 150, "local": 150} + } + }, + "ipv6": { + "unicast": { + "distance": {"ebgp": 150, "ibgp": 150, "local": 150} + } + }, + }, + } + } + } + + result = create_router_bgp(tgen, topo, input_dict_1) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + step("Verify bgp routes have admin distance of 150 in dut.") + # Verifying best path + dut = "r3" + attribute = "admin_distance" + + input_dict = { + "ipv4": { + "r3": { + "static_routes": [ + { + "network": "192.168.22.1/32", + "admin_distance": 150, + }, + { + "network": "192.168.22.2/32", + "admin_distance": 150, + }, + ] + } + }, + "ipv6": { + "r3": { + "static_routes": [ + { + "network": "fc07:1::1/128", + "admin_distance": 150, + }, + { + "network": "fc07:1::2/128", + "admin_distance": 150, + }, + ] + } + }, + } + + for addr_type in ADDR_TYPES: + result = verify_best_path_as_per_admin_distance( + tgen, addr_type, dut, input_dict[addr_type], attribute + ) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step("Un configure the admin distance value on DUT") + + input_dict_1 = { + "r3": { + "bgp": { + "local_as": 100, + "address_family": { + "ipv4": { + "unicast": { + "distance": { + "ebgp": 150, + "ibgp": 150, + "local": 150, + "delete": True, + } + } + }, + "ipv6": { + "unicast": { + "distance": { + "ebgp": 150, + "ibgp": 150, + "local": 150, + "delete": True, + } + } + }, + }, + } + } + } + + result = create_router_bgp(tgen, topo, input_dict_1) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + step("Verify bgp routes have default admin distance in dut.") + # Verifying best path + dut = "r3" + attribute = "admin_distance" + + input_dict = { + "ipv4": { + "r3": { + "static_routes": [ + { + "network": "192.168.22.1/32", + "admin_distance": 20, + }, + { + "network": "192.168.22.2/32", + "admin_distance": 20, + }, + ] + } + }, + "ipv6": { + "r3": { + "static_routes": [ + { + "network": "fc07:1::1/128", + "admin_distance": 20, + }, + { + "network": "fc07:1::2/128", + "admin_distance": 20, + }, + ] + } + }, + } + + for addr_type in ADDR_TYPES: + result = verify_best_path_as_per_admin_distance( + tgen, addr_type, dut, input_dict[addr_type], attribute + ) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step( + "Learn the same route via ebgp and ibgp peer. Configure admin " + "distance of 200 in DUT for both ebgp and ibgp peer. " + ) + + step("Verify that ebgp route is preferred over ibgp.") + + # Verifying RIB routes + protocol = "bgp" + input_dict = topo["routers"] + + for addr_type in ADDR_TYPES: + result4 = verify_rib(tgen, addr_type, dut, input_dict, protocol=protocol) + assert result4 is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result4 + ) + + step("Configure static route Without any admin distance") + + for addr_type in ADDR_TYPES: + + input_dict = { + "r3": { + "static_routes": [{"network": NETWORK[addr_type], "next_hop": "Null0"}] + } + } + + result = create_static_routes(tgen, input_dict) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step("Verify that zebra selects static route.") + protocol = "static" + + for addr_type in ADDR_TYPES: + + input_dict = { + "r3": { + "static_routes": [{"network": NETWORK[addr_type], "next_hop": "Null0"}] + } + } + + result4 = verify_rib(tgen, addr_type, dut, input_dict, protocol=protocol) + assert result4 is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result4 + ) + + step("Configure static route with admin distance of 253") + for addr_type in ADDR_TYPES: + + input_dict = { + "r3": { + "static_routes": [ + { + "network": NETWORK[addr_type], + "next_hop": "Null0", + "admin_distance": 253, + } + ] + } + } + + result = create_static_routes(tgen, input_dict) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step("Verify that zebra selects bgp route.") + protocol = "bgp" + + for addr_type in ADDR_TYPES: + result4 = verify_rib(tgen, addr_type, dut, input_dict, protocol=protocol) + assert result4 is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result4 + ) + + step("Configure admin distance of 254 in bgp for route.") + + input_dict_1 = { + "r3": { + "bgp": { + "local_as": 100, + "address_family": { + "ipv4": { + "unicast": { + "distance": {"ebgp": 254, "ibgp": 254, "local": 254} + } + }, + "ipv6": { + "unicast": { + "distance": {"ebgp": 254, "ibgp": 254, "local": 254} + } + }, + }, + } + } + } + + result = create_router_bgp(tgen, topo, input_dict_1) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + step("Verify that zebra selects static route.") + protocol = "static" + + for addr_type in ADDR_TYPES: + result4 = verify_rib(tgen, addr_type, dut, input_dict, protocol=protocol) + assert result4 is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result4 + ) + + step("Delete the static route.") + for addr_type in ADDR_TYPES: + + input_dict = { + "r3": { + "static_routes": [ + { + "network": NETWORK[addr_type], + "next_hop": "Null0", + "admin_distance": 253, + "delete": True, + } + ] + } + } + + result = create_static_routes(tgen, input_dict) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step("Verify that zebra selects bgp route.") + protocol = "bgp" + + for addr_type in ADDR_TYPES: + result4 = verify_rib(tgen, addr_type, dut, input_dict, protocol=protocol) + assert result4 is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result4 + ) + + write_test_footer(tc_name) + + +def test_bgp_admin_distance_chaos_p2(): + """ + TC: 7 + Chaos - Verify bgp admin distance functionality with chaos. + """ + tgen = get_topogen() + global bgp_convergence + + if bgp_convergence is not True: + pytest.skip("skipping test case because of BGP Convergence failure at setup") + + # test case name + tc_name = inspect.stack()[0][3] + write_test_header(tc_name) + if tgen.routers_have_failure(): + check_router_status(tgen) + + step("Configure base config as per the topology") + reset_config_on_routers(tgen) + + step("Configure bgp admin distance 200 with CLI in dut.") + + input_dict_1 = { + "r3": { + "bgp": { + "local_as": 100, + "address_family": { + "ipv4": { + "unicast": { + "distance": {"ebgp": 200, "ibgp": 200, "local": 200} + } + }, + "ipv6": { + "unicast": { + "distance": {"ebgp": 200, "ibgp": 200, "local": 200} + } + }, + }, + } + } + } + + result = create_router_bgp(tgen, topo, input_dict_1) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + step("Verify bgp routes have admin distance of 200 in dut.") + # Verifying best path + dut = "r3" + attribute = "admin_distance" + + input_dict = { + "ipv4": { + "r3": { + "static_routes": [ + { + "network": NETWORK["ipv4"][0], + "admin_distance": 200, + }, + { + "network": NETWORK["ipv4"][1], + "admin_distance": 200, + }, + ] + } + }, + "ipv6": { + "r3": { + "static_routes": [ + { + "network": NETWORK["ipv6"][0], + "admin_distance": 200, + }, + { + "network": NETWORK["ipv6"][1], + "admin_distance": 200, + }, + ] + } + }, + } + + for addr_type in ADDR_TYPES: + result = verify_best_path_as_per_admin_distance( + tgen, addr_type, dut, input_dict[addr_type], attribute + ) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step("Restart frr on R3") + stop_router(tgen, "r3") + start_router(tgen, "r3") + + bgp_convergence = verify_bgp_convergence(tgen, topo) + assert bgp_convergence is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, bgp_convergence + ) + + step("Verify ebgp and ibgp routes have admin distance of 200 in dut.") + for addr_type in ADDR_TYPES: + result = verify_best_path_as_per_admin_distance( + tgen, addr_type, dut, input_dict[addr_type], attribute + ) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step("Restart bgpd process on R3") + kill_router_daemons(tgen, "r3", ["bgpd"]) + start_router_daemons(tgen, "r3", ["bgpd"]) + + bgp_convergence = verify_bgp_convergence(tgen, topo) + assert bgp_convergence is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, bgp_convergence + ) + + step("Verify ebgp and ibgp routes have admin distance of 200 in dut.") + for addr_type in ADDR_TYPES: + result = verify_best_path_as_per_admin_distance( + tgen, addr_type, dut, input_dict[addr_type], attribute + ) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step("Clear BGP") + for rtr in topo["routers"]: + clear_bgp(tgen, "ipv4", rtr) + clear_bgp(tgen, "ipv6", rtr) + + bgp_convergence = verify_bgp_convergence(tgen, topo) + assert bgp_convergence is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, bgp_convergence + ) + + step("Verify that zebra selects bgp route.") + protocol = "bgp" + + for addr_type in ADDR_TYPES: + result4 = verify_rib(tgen, addr_type, dut, input_dict, protocol=protocol) + assert result4 is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result4 + ) + + write_test_footer(tc_name) + + +if __name__ == "__main__": + args = ["-s"] + sys.argv[1:] + sys.exit(pytest.main(args)) diff --git a/tests/topotests/bgp_distance_change/test_bgp_admin_dist_vrf.py b/tests/topotests/bgp_distance_change/test_bgp_admin_dist_vrf.py new file mode 100755 index 0000000000..559dc93aa0 --- /dev/null +++ b/tests/topotests/bgp_distance_change/test_bgp_admin_dist_vrf.py @@ -0,0 +1,900 @@ +#!/usr/bin/env python + +# +# Copyright (c) 2022 by VMware, Inc. ("VMware") +# Used Copyright (c) 2018 by Network Device Education Foundation, +# Inc. ("NetDEF") in this file. +# +# Permission to use, copy, modify, and/or distribute this software +# for any purpose with or without fee is hereby granted, provided +# that the above copyright notice and this permission notice appear +# in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND VMWARE DISCLAIMS ALL WARRANTIES +# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL VMWARE BE LIABLE FOR +# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY +# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS +# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE +# OF THIS SOFTWARE. +# + +import sys +import time +import pytest +import inspect +import os + +"""Following tests are covered to test bgp admin distance functionality. +TC_5: + Verify bgp admin distance functionality when static route is configured + same as bgp learnt route in user vrf. + +TC_6: Verify bgp admin distance functionality with ECMP in user vrf. + +TC_7: + Verify bgp admin distance functionality when routes are + imported between VRFs. +""" + +################################# +# TOPOLOGY +################################# +""" + + +-------+ + +--------- | R2 | + | +-------+ + |iBGP | + +-------+ | + | R1 | |iBGP + +-------+ | + | | + | iBGP +-------+ eBGP +-------+ + +---------- | R3 |----------| R4 | + +-------+ +-------+ + | + |eBGP + | + +-------+ + | R5 | + +-------+ + + +""" + +# Save the Current Working Directory to find configuration files. +CWD = os.path.dirname(os.path.realpath(__file__)) +sys.path.append(os.path.join(CWD, "../")) +sys.path.append(os.path.join(CWD, "../lib/")) + +# pylint: disable=C0413 +# Import topogen and topotest helpers +from lib.topogen import Topogen, get_topogen + +# Required to instantiate the topology builder class. +from lib.common_config import ( + start_topology, + write_test_header, + step, + write_test_footer, + create_static_routes, + verify_rib, + check_address_types, + reset_config_on_routers, + check_router_status, +) +from lib.topolog import logger +from lib.bgp import ( + verify_bgp_convergence, + create_router_bgp, + verify_best_path_as_per_admin_distance, +) + +# pylint: disable=C0413 +# Import topogen and topotest helpers +from lib.topogen import Topogen, get_topogen +from lib.topojson import build_config_from_json +from lib.topolog import logger + +# Global variables +topo = None +bgp_convergence = False +pytestmark = [pytest.mark.bgpd, pytest.mark.staticd] + +NETWORK = { + "ipv4": [ + "192.168.20.1/32", + "192.168.20.2/32", + "192.168.21.1/32", + "192.168.21.2/32", + "192.168.22.1/32", + "192.168.22.2/32", + ], + "ipv6": [ + "fc07:50::1/128", + "fc07:50::2/128", + "fc07:150::1/128", + "fc07:150::2/128", + "fc07:1::1/128", + "fc07:1::2/128", + ], +} +ADDR_TYPES = check_address_types() + + +def setup_module(mod): + """ + Sets up the pytest environment + + * `mod`: module name + """ + + global topo + + testsuite_run_time = time.asctime(time.localtime(time.time())) + logger.info("Testsuite start time: {}".format(testsuite_run_time)) + logger.info("=" * 40) + + logger.info("Running setup_module to create topology") + + # This function initiates the topology build with Topogen... + json_file = "{}/bgp_admin_dist_vrf.json".format(CWD) + tgen = Topogen(json_file, mod.__name__) + global topo + topo = tgen.json_topo + + # Starting topology, create tmp files which are loaded to routers + # to start deamons and then start routers + start_topology(tgen) + + # Creating configuration from JSON + build_config_from_json(tgen, topo) + + # Checking BGP convergence + global bgp_convergence + global ADDR_TYPES + + # Don't run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + # Api call verify whether BGP is converged + bgp_convergence = verify_bgp_convergence(tgen, topo) + assert bgp_convergence is True, "setup_module :Failed \n Error:" " {}".format( + bgp_convergence + ) + logger.info("Running setup_module() done") + + +def teardown_module(mod): + """teardown_module. + + Teardown the pytest environment. + * `mod`: module name + """ + logger.info("Running teardown_module to delete topology") + tgen = get_topogen() + + # Stop toplogy and Remove tmp files + tgen.stop_topology() + + logger.info( + "Testsuite end time: {}".format(time.asctime(time.localtime(time.time()))) + ) + logger.info("=" * 40) + + +##################################################### +# Tests starting +##################################################### + + +def test_bgp_admin_distance_ebgp_vrf_p0(): + """ + TC: 5 + Verify bgp admin distance functionality when static route is + configured same as ebgp learnt route + """ + tgen = get_topogen() + global bgp_convergence + + if bgp_convergence is not True: + pytest.skip("skipping test case because of BGP Convergence failure at setup") + + # test case name + tc_name = inspect.stack()[0][3] + write_test_header(tc_name) + if tgen.routers_have_failure(): + check_router_status(tgen) + + step("Configure base config as per the topology") + reset_config_on_routers(tgen) + + step("Configure bgp admin distance 200 with CLI in dut.") + + input_dict_1 = { + "r3": { + "bgp": [ + { + "vrf": "RED", + "local_as": 100, + "address_family": { + "ipv4": { + "unicast": { + "distance": {"ebgp": 200, "ibgp": 200, "local": 200} + } + }, + "ipv6": { + "unicast": { + "distance": {"ebgp": 200, "ibgp": 200, "local": 200} + } + }, + }, + } + ] + } + } + + result = create_router_bgp(tgen, topo, input_dict_1) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + step("Verify bgp routes have admin distance of 200 in dut.") + # Verifying best path + dut = "r3" + attribute = "admin_distance" + + input_dict = { + "ipv4": { + "r3": { + "static_routes": [ + { + "network": NETWORK["ipv4"][0], + "admin_distance": 200, + "vrf": "RED", + }, + { + "network": NETWORK["ipv4"][1], + "admin_distance": 200, + "vrf": "RED", + }, + ] + } + }, + "ipv6": { + "r3": { + "static_routes": [ + { + "network": NETWORK["ipv6"][0], + "admin_distance": 200, + "vrf": "RED", + }, + { + "network": NETWORK["ipv6"][1], + "admin_distance": 200, + "vrf": "RED", + }, + ] + } + }, + } + + for addr_type in ADDR_TYPES: + result = verify_best_path_as_per_admin_distance( + tgen, addr_type, dut, input_dict[addr_type], attribute, vrf="RED" + ) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step("Modify the admin distance value to 150.") + + input_dict_1 = { + "r3": { + "bgp": [ + { + "local_as": 100, + "address_family": { + "ipv4": { + "unicast": { + "distance": {"ebgp": 150, "ibgp": 150, "local": 150} + } + }, + "ipv6": { + "unicast": { + "distance": {"ebgp": 150, "ibgp": 150, "local": 150} + } + }, + }, + } + ] + } + } + + result = create_router_bgp(tgen, topo, input_dict_1) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + step("Verify bgp routes have admin distance of 150 in dut.") + # Verifying best path + dut = "r3" + attribute = "admin_distance" + + input_dict = { + "ipv4": { + "r3": { + "static_routes": [ + { + "network": NETWORK["ipv4"][0], + "admin_distance": 150, + "vrf": "RED", + }, + { + "network": NETWORK["ipv4"][1], + "admin_distance": 150, + "vrf": "RED", + }, + ] + } + }, + "ipv6": { + "r3": { + "static_routes": [ + { + "network": NETWORK["ipv6"][0], + "admin_distance": 150, + "vrf": "RED", + }, + { + "network": NETWORK["ipv6"][1], + "admin_distance": 150, + "vrf": "RED", + }, + ] + } + }, + } + + for addr_type in ADDR_TYPES: + result = verify_best_path_as_per_admin_distance( + tgen, addr_type, dut, input_dict[addr_type], attribute, vrf="RED" + ) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step("Un configure the admin distance value on DUT") + + input_dict_1 = { + "r3": { + "bgp": [ + { + "local_as": 100, + "address_family": { + "ipv4": { + "unicast": { + "distance": { + "ebgp": 150, + "ibgp": 150, + "local": 150, + "delete": True, + } + } + }, + "ipv6": { + "unicast": { + "distance": { + "ebgp": 150, + "ibgp": 150, + "local": 150, + "delete": True, + } + } + }, + }, + } + ] + } + } + + result = create_router_bgp(tgen, topo, input_dict_1) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + step("Verify bgp routes have default admin distance in dut.") + # Verifying best path + dut = "r3" + attribute = "admin_distance" + + input_dict = { + "ipv4": { + "r3": { + "static_routes": [ + {"network": NETWORK["ipv4"][0], "admin_distance": 20, "vrf": "RED"}, + {"network": NETWORK["ipv4"][1], "admin_distance": 20, "vrf": "RED"}, + ] + } + }, + "ipv6": { + "r3": { + "static_routes": [ + {"network": NETWORK["ipv6"][0], "admin_distance": 20, "vrf": "RED"}, + {"network": NETWORK["ipv6"][1], "admin_distance": 20, "vrf": "RED"}, + ] + } + }, + } + + for addr_type in ADDR_TYPES: + result = verify_best_path_as_per_admin_distance( + tgen, addr_type, dut, input_dict[addr_type], attribute, vrf="RED" + ) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step("Configure static route Without any admin distance") + + for addr_type in ADDR_TYPES: + # Create Static routes + input_dict = { + "r3": { + "static_routes": [ + {"network": NETWORK[addr_type], "next_hop": "Null0", "vrf": "RED"} + ] + } + } + + result = create_static_routes(tgen, input_dict) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step("Verify that zebra selects static route.") + protocol = "static" + # dual stack changes + for addr_type in ADDR_TYPES: + input_dict = { + "r3": { + "static_routes": [ + {"network": NETWORK[addr_type], "next_hop": "Null0", "vrf": "RED"} + ] + } + } + result4 = verify_rib(tgen, addr_type, dut, input_dict, protocol=protocol) + assert result4 is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result4 + ) + + step("Configure static route with admin distance of 253") + for addr_type in ADDR_TYPES: + # Create Static routes + input_dict = { + "r3": { + "static_routes": [ + { + "network": NETWORK[addr_type], + "next_hop": "Null0", + "admin_distance": 253, + "vrf": "RED", + } + ] + } + } + + result = create_static_routes(tgen, input_dict) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step("Verify that zebra selects bgp route.") + protocol = "bgp" + + for addr_type in ADDR_TYPES: + input_dict = { + "r3": { + "static_routes": [ + { + "network": NETWORK[addr_type], + "next_hop": "Null0", + "admin_distance": 253, + "vrf": "RED", + } + ] + } + } + result4 = verify_rib(tgen, addr_type, dut, input_dict, protocol=protocol) + assert result4 is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result4 + ) + + step("Configure admin distance of 254 in bgp for route .") + + input_dict_1 = { + "r3": { + "bgp": [ + { + "local_as": 100, + "address_family": { + "ipv4": { + "unicast": { + "distance": {"ebgp": 254, "ibgp": 254, "local": 254} + } + }, + "ipv6": { + "unicast": { + "distance": {"ebgp": 254, "ibgp": 254, "local": 254} + } + }, + }, + } + ] + } + } + + result = create_router_bgp(tgen, topo, input_dict_1) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + step("Verify that zebra selects static route.") + protocol = "static" + # dual stack changes + for addr_type in ADDR_TYPES: + input_dict = { + "r3": { + "static_routes": [ + { + "network": NETWORK[addr_type], + "next_hop": "Null0", + "admin_distance": 253, + "vrf": "RED", + } + ] + } + } + + result4 = verify_rib(tgen, addr_type, dut, input_dict, protocol=protocol) + assert result4 is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result4 + ) + + step("Configure admin distance of 255 in bgp for route in vrf red") + + input_dict_1 = { + "r3": { + "bgp": [ + { + "local_as": 100, + "address_family": { + "ipv4": { + "unicast": { + "distance": {"ebgp": 255, "ibgp": 255, "local": 255} + } + }, + "ipv6": { + "unicast": { + "distance": {"ebgp": 255, "ibgp": 255, "local": 255} + } + }, + }, + } + ] + } + } + + result = create_router_bgp(tgen, topo, input_dict_1) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + step("Verify that zebra selects static route.") + protocol = "static" + # dual stack changes + for addr_type in ADDR_TYPES: + input_dict = { + "r3": { + "static_routes": [ + { + "network": NETWORK[addr_type], + "next_hop": "Null0", + "admin_distance": 253, + "vrf": "RED", + } + ] + } + } + + result4 = verify_rib(tgen, addr_type, dut, input_dict, protocol=protocol) + assert result4 is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result4 + ) + + step("Delete the static route.") + for addr_type in ADDR_TYPES: + # Create Static routes + input_dict = { + "r3": { + "static_routes": [ + { + "network": NETWORK[addr_type], + "next_hop": "Null0", + "admin_distance": 253, + "delete": True, + "vrf": "RED", + } + ] + } + } + + result = create_static_routes(tgen, input_dict) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step("Verify that zebra selects bgp route.") + protocol = "bgp" + # dual stack changes + for addr_type in ADDR_TYPES: + result4 = verify_rib(tgen, addr_type, dut, input_dict, protocol=protocol) + assert result4 is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result4 + ) + + write_test_footer(tc_name) + + +def test_bgp_admin_distance_ebgp_with_imported_rtes_vrf_p0(): + """ + TC: 5 + Verify bgp admin distance functionality when static route is configured + same as bgp learnt route in user vrf. + """ + tgen = get_topogen() + global bgp_convergence + + if bgp_convergence is not True: + pytest.skip("skipping test case because of BGP Convergence failure at setup") + + # test case name + tc_name = inspect.stack()[0][3] + write_test_header(tc_name) + if tgen.routers_have_failure(): + check_router_status(tgen) + + step("Configure base config as per the topology") + reset_config_on_routers(tgen) + step("Configure bgp admin distance 200 with CLI in dut.") + step(" Import route from vrf to default vrf") + input_dict_1 = { + "r3": { + "bgp": [ + { + "vrf": "RED", + "local_as": 100, + "address_family": { + "ipv4": { + "unicast": { + "distance": {"ebgp": 200, "ibgp": 200, "local": 200} + } + }, + "ipv6": { + "unicast": { + "distance": {"ebgp": 200, "ibgp": 200, "local": 200} + } + }, + }, + }, + { + "local_as": 100, + "address_family": { + "ipv4": { + "unicast": { + "distance": {"ebgp": 200, "ibgp": 200, "local": 200}, + "import": {"vrf": "RED"}, + } + }, + "ipv6": { + "unicast": { + "distance": {"ebgp": 200, "ibgp": 200, "local": 200}, + "import": { + "vrf": "RED", + }, + } + }, + }, + }, + ] + } + } + + result = create_router_bgp(tgen, topo, input_dict_1) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + step("Verify bgp routes have admin distance of 200 in dut.") + # Verifying best path + dut = "r3" + attribute = "admin_distance" + + input_dict = { + "ipv4": { + "r3": { + "static_routes": [ + { + "network": NETWORK["ipv4"][0], + "admin_distance": 200, + "vrf": "RED", + }, + { + "network": NETWORK["ipv4"][1], + "admin_distance": 200, + "vrf": "RED", + }, + ] + } + }, + "ipv6": { + "r3": { + "static_routes": [ + { + "network": NETWORK["ipv6"][0], + "admin_distance": 200, + "vrf": "RED", + }, + { + "network": NETWORK["ipv6"][1], + "admin_distance": 200, + "vrf": "RED", + }, + ] + } + }, + } + + for addr_type in ADDR_TYPES: + result = verify_best_path_as_per_admin_distance( + tgen, addr_type, dut, input_dict[addr_type], attribute, vrf="RED" + ) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step( + "Verify that routes are getting imported without any issues and " + "routes are calculated and installed in rib." + ) + + input_dict = { + "ipv4": { + "r3": { + "static_routes": [ + { + "network": NETWORK["ipv4"][0], + "admin_distance": 200, + }, + { + "network": NETWORK["ipv4"][1], + "admin_distance": 200, + }, + ] + } + }, + "ipv6": { + "r3": { + "static_routes": [ + { + "network": NETWORK["ipv6"][0], + "admin_distance": 200, + }, + { + "network": NETWORK["ipv6"][1], + "admin_distance": 200, + }, + ] + } + }, + } + + step("Verify that zebra selects bgp route.") + protocol = "bgp" + # dual stack changes + for addr_type in ADDR_TYPES: + result4 = verify_rib(tgen, addr_type, dut, input_dict, protocol=protocol) + assert result4 is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result4 + ) + + step(" Un configure import route vrf red inside default vrf.") + input_dict_1 = { + "r3": { + "bgp": [ + { + "vrf": "RED", + "local_as": 100, + "address_family": { + "ipv4": { + "unicast": { + "distance": {"ebgp": 200, "ibgp": 200, "local": 200} + } + }, + "ipv6": { + "unicast": { + "distance": {"ebgp": 200, "ibgp": 200, "local": 200} + } + }, + }, + }, + { + "local_as": 100, + "address_family": { + "ipv4": { + "unicast": { + "distance": {"ebgp": 200, "ibgp": 200, "local": 200}, + "import": {"vrf": "RED", "delete": True}, + } + }, + "ipv6": { + "unicast": { + "distance": {"ebgp": 200, "ibgp": 200, "local": 200}, + "import": {"vrf": "RED", "delete": True}, + } + }, + }, + }, + ] + } + } + + result = create_router_bgp(tgen, topo, input_dict_1) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + input_dict = { + "ipv4": { + "r3": { + "static_routes": [ + { + "network": NETWORK["ipv4"][0], + "admin_distance": 200, + }, + { + "network": NETWORK["ipv4"][1], + "admin_distance": 200, + }, + ] + } + }, + "ipv6": { + "r3": { + "static_routes": [ + { + "network": NETWORK["ipv6"][0], + "admin_distance": 200, + }, + { + "network": NETWORK["ipv6"][1], + "admin_distance": 200, + }, + ] + } + }, + } + + step("Verify that route withdrawal happens properly.") + protocol = "bgp" + # dual stack changes + for addr_type in ADDR_TYPES: + result4 = verify_rib( + tgen, + addr_type, + dut, + input_dict[addr_type], + protocol=protocol, + expected=False, + ) + assert ( + result4 is not True + ), "Testcase {} : Failed \n Route is not withdrawn. Error: {}".format( + tc_name, result4 + ) + + write_test_footer(tc_name) + + +if __name__ == "__main__": + args = ["-s"] + sys.argv[1:] + sys.exit(pytest.main(args)) diff --git a/tests/topotests/bgp_remove_private_as/test_bgp_remove_private_as.py b/tests/topotests/bgp_remove_private_as/test_bgp_remove_private_as.py index 7d52048ebe..1108919e13 100644 --- a/tests/topotests/bgp_remove_private_as/test_bgp_remove_private_as.py +++ b/tests/topotests/bgp_remove_private_as/test_bgp_remove_private_as.py @@ -361,13 +361,6 @@ def test_bgp_remove_private_as(): return True return False - def _get_pfx_path_from_nh(router, prefix, nh): - """Return as-path for a specific route + path.""" - output = json.loads(tgen.gears[router].vtysh_cmd(f"show ip bgp {prefix} json")) - for path in output[prefix]: - if path["nexthops"]["ip"] == nh: - return path["aspath"]["string"] - def _routers_up(tx_rtrs, rx_rtrs): """Ensure all BGP sessions are up and all routes are installed.""" # all sessions go through tx_routers, so ensure all their peers are up @@ -408,11 +401,7 @@ def test_bgp_remove_private_as(): for pfx in prefixes: good_path = expected_paths[rtr][remove_type][peer][pfx] real_path = adj_rib_in["receivedRoutes"][pfx]["path"] - msg = ( - f"{rtr} received incorrect AS-Path from {peer} " - f'({p_ip}) for {pfx}. remove_type: "{remove_type}"' - ) - assert real_path == good_path, msg + return real_path == good_path ####################### # Begin Test @@ -424,7 +413,11 @@ def test_bgp_remove_private_as(): # test each variation of remove-private-AS for rmv_type in remove_types: _change_remove_type(rmv_type, "add") - _validate_paths(rmv_type) + + test_func = partial(_validate_paths, rmv_type) + _, result = topotest.run_and_expect(test_func, True, count=60, wait=0.5) + assert result == True, "Not all routes have correct AS-Path values!" + # each variation sets a separate peer flag in bgpd. we need to clear # the old flag after each iteration so we only test the flags we expect. _change_remove_type(rmv_type, "del") diff --git a/tests/topotests/bgp_soo/__init__.py b/tests/topotests/bgp_soo/__init__.py new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/tests/topotests/bgp_soo/__init__.py diff --git a/tests/topotests/bgp_soo/cpe1/bgpd.conf b/tests/topotests/bgp_soo/cpe1/bgpd.conf new file mode 100644 index 0000000000..a8984d4e8b --- /dev/null +++ b/tests/topotests/bgp_soo/cpe1/bgpd.conf @@ -0,0 +1,10 @@ +router bgp 65000 + no bgp ebgp-requires-policy + neighbor 192.168.1.2 remote-as external + neighbor 192.168.1.2 timers 1 3 + neighbor 192.168.1.2 timers connect 1 + neighbor 10.0.0.2 remote-as internal + address-family ipv4 unicast + redistribute connected + exit-address-family +! diff --git a/tests/topotests/bgp_soo/cpe1/zebra.conf b/tests/topotests/bgp_soo/cpe1/zebra.conf new file mode 100644 index 0000000000..669cb91295 --- /dev/null +++ b/tests/topotests/bgp_soo/cpe1/zebra.conf @@ -0,0 +1,12 @@ +! +interface lo + ip address 172.16.255.1/32 +! +interface cpe1-eth0 + ip address 192.168.1.1/24 +! +interface cpe1-eth1 + ip address 10.0.0.1/24 +! +ip forwarding +! diff --git a/tests/topotests/bgp_soo/cpe2/bgpd.conf b/tests/topotests/bgp_soo/cpe2/bgpd.conf new file mode 100644 index 0000000000..19f7a24e2b --- /dev/null +++ b/tests/topotests/bgp_soo/cpe2/bgpd.conf @@ -0,0 +1,10 @@ +router bgp 65000 + no bgp ebgp-requires-policy + neighbor 192.168.2.2 remote-as external + neighbor 192.168.2.2 timers 1 3 + neighbor 192.168.2.2 timers connect 1 + neighbor 10.0.0.1 remote-as internal + address-family ipv4 unicast + redistribute connected + exit-address-family +! diff --git a/tests/topotests/bgp_soo/cpe2/zebra.conf b/tests/topotests/bgp_soo/cpe2/zebra.conf new file mode 100644 index 0000000000..52f36c06e8 --- /dev/null +++ b/tests/topotests/bgp_soo/cpe2/zebra.conf @@ -0,0 +1,9 @@ +! +interface cpe2-eth0 + ip address 192.168.2.1/24 +! +interface cpe2-eth1 + ip address 10.0.0.2/24 +! +ip forwarding +! diff --git a/tests/topotests/bgp_soo/pe1/bgpd.conf b/tests/topotests/bgp_soo/pe1/bgpd.conf new file mode 100644 index 0000000000..04a6857c7c --- /dev/null +++ b/tests/topotests/bgp_soo/pe1/bgpd.conf @@ -0,0 +1,27 @@ +router bgp 65001 + bgp router-id 10.10.10.10 + no bgp ebgp-requires-policy + no bgp default ipv4-unicast + neighbor 10.10.10.20 remote-as internal + neighbor 10.10.10.20 update-source 10.10.10.10 + address-family ipv4 vpn + neighbor 10.10.10.20 activate + exit-address-family +! +router bgp 65001 vrf RED + bgp router-id 192.168.1.2 + no bgp ebgp-requires-policy + neighbor 192.168.1.1 remote-as external + neighbor 192.168.1.1 timers 1 3 + neighbor 192.168.1.1 timers connect 1 + address-family ipv4 unicast + neighbor 192.168.1.1 as-override + neighbor 192.168.1.1 soo 65000:1 + label vpn export 1111 + rd vpn export 192.168.1.2:2 + rt vpn import 192.168.2.2:2 192.168.1.2:2 + rt vpn export 192.168.1.2:2 + export vpn + import vpn + exit-address-family +! diff --git a/tests/topotests/bgp_soo/pe1/ldpd.conf b/tests/topotests/bgp_soo/pe1/ldpd.conf new file mode 100644 index 0000000000..fb40f06fa7 --- /dev/null +++ b/tests/topotests/bgp_soo/pe1/ldpd.conf @@ -0,0 +1,10 @@ +mpls ldp + router-id 10.10.10.10 + ! + address-family ipv4 + discovery transport-address 10.10.10.10 + ! + interface pe1-eth1 + ! + ! +! diff --git a/tests/topotests/bgp_soo/pe1/ospfd.conf b/tests/topotests/bgp_soo/pe1/ospfd.conf new file mode 100644 index 0000000000..34f0899c95 --- /dev/null +++ b/tests/topotests/bgp_soo/pe1/ospfd.conf @@ -0,0 +1,7 @@ +interface pe1-eth1 + ip ospf dead-interval 4 + ip ospf hello-interval 1 +! +router ospf + router-id 10.10.10.10 + network 0.0.0.0/0 area 0 diff --git a/tests/topotests/bgp_soo/pe1/zebra.conf b/tests/topotests/bgp_soo/pe1/zebra.conf new file mode 100644 index 0000000000..cc8ff1983a --- /dev/null +++ b/tests/topotests/bgp_soo/pe1/zebra.conf @@ -0,0 +1,12 @@ +! +interface lo + ip address 10.10.10.10/32 +! +interface pe1-eth0 vrf RED + ip address 192.168.1.2/24 +! +interface pe1-eth1 + ip address 10.0.1.1/24 +! +ip forwarding +! diff --git a/tests/topotests/bgp_soo/pe2/bgpd.conf b/tests/topotests/bgp_soo/pe2/bgpd.conf new file mode 100644 index 0000000000..efebc02f27 --- /dev/null +++ b/tests/topotests/bgp_soo/pe2/bgpd.conf @@ -0,0 +1,31 @@ +router bgp 65001 + bgp router-id 10.10.10.20 + no bgp ebgp-requires-policy + no bgp default ipv4-unicast + neighbor 10.10.10.10 remote-as internal + neighbor 10.10.10.10 update-source 10.10.10.20 + address-family ipv4 vpn + neighbor 10.10.10.10 activate + exit-address-family +! +router bgp 65001 vrf RED + bgp router-id 192.168.2.2 + no bgp ebgp-requires-policy + neighbor 192.168.2.1 remote-as external + neighbor 192.168.2.1 timers 1 3 + neighbor 192.168.2.1 timers connect 1 + address-family ipv4 unicast + neighbor 192.168.2.1 as-override + neighbor 192.168.2.1 route-map cpe2-in in + label vpn export 2222 + rd vpn export 192.168.2.2:2 + rt vpn import 192.168.2.2:2 192.168.1.2:2 + rt vpn export 192.168.2.2:2 + export vpn + import vpn + exit-address-family +! +! To prefer internal MPLS route over eBGP +route-map cpe2-in permit 10 + set local-preference 50 +exit diff --git a/tests/topotests/bgp_soo/pe2/ldpd.conf b/tests/topotests/bgp_soo/pe2/ldpd.conf new file mode 100644 index 0000000000..e2b5359993 --- /dev/null +++ b/tests/topotests/bgp_soo/pe2/ldpd.conf @@ -0,0 +1,10 @@ +mpls ldp + router-id 10.10.10.20 + ! + address-family ipv4 + discovery transport-address 10.10.10.20 + ! + interface pe2-eth0 + ! + ! +! diff --git a/tests/topotests/bgp_soo/pe2/ospfd.conf b/tests/topotests/bgp_soo/pe2/ospfd.conf new file mode 100644 index 0000000000..4c4b1374d1 --- /dev/null +++ b/tests/topotests/bgp_soo/pe2/ospfd.conf @@ -0,0 +1,7 @@ +interface pe2-eth0 + ip ospf dead-interval 4 + ip ospf hello-interval 1 +! +router ospf + router-id 10.10.10.20 + network 0.0.0.0/0 area 0 diff --git a/tests/topotests/bgp_soo/pe2/zebra.conf b/tests/topotests/bgp_soo/pe2/zebra.conf new file mode 100644 index 0000000000..8049a74601 --- /dev/null +++ b/tests/topotests/bgp_soo/pe2/zebra.conf @@ -0,0 +1,12 @@ +! +interface lo + ip address 10.10.10.20/32 +! +interface pe2-eth1 vrf RED + ip address 192.168.2.2/24 +! +interface pe2-eth0 + ip address 10.0.1.2/24 +! +ip forwarding +! diff --git a/tests/topotests/bgp_soo/test_bgp_soo.py b/tests/topotests/bgp_soo/test_bgp_soo.py new file mode 100644 index 0000000000..e3a7334c60 --- /dev/null +++ b/tests/topotests/bgp_soo/test_bgp_soo.py @@ -0,0 +1,186 @@ +#!/usr/bin/env python + +# +# Copyright (c) 2022 by +# Donatas Abraitis <donatas@opensourcerouting.org> +# +# Permission to use, copy, modify, and/or distribute this software +# for any purpose with or without fee is hereby granted, provided +# that the above copyright notice and this permission notice appear +# in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND NETDEF DISCLAIMS ALL WARRANTIES +# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NETDEF BE LIABLE FOR +# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY +# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS +# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE +# OF THIS SOFTWARE. +# + +""" +Test if BGP SoO per neighbor works correctly. Routes having SoO +extended community MUST be rejected if the neighbor is configured +with soo (neighbor soo). +""" + +import os +import sys +import json +import pytest +import functools + +CWD = os.path.dirname(os.path.realpath(__file__)) +sys.path.append(os.path.join(CWD, "../")) + +# pylint: disable=C0413 +from lib import topotest +from lib.topogen import Topogen, TopoRouter, get_topogen +from lib.common_config import step + +pytestmark = [pytest.mark.bgpd] + + +def build_topo(tgen): + tgen.add_router("cpe1") + tgen.add_router("cpe2") + tgen.add_router("pe1") + tgen.add_router("pe2") + + switch = tgen.add_switch("s1") + switch.add_link(tgen.gears["cpe1"]) + switch.add_link(tgen.gears["pe1"]) + + switch = tgen.add_switch("s2") + switch.add_link(tgen.gears["pe1"]) + switch.add_link(tgen.gears["pe2"]) + + switch = tgen.add_switch("s3") + switch.add_link(tgen.gears["pe2"]) + switch.add_link(tgen.gears["cpe2"]) + + switch = tgen.add_switch("s4") + switch.add_link(tgen.gears["cpe2"]) + switch.add_link(tgen.gears["cpe1"]) + + +def setup_module(mod): + tgen = Topogen(build_topo, mod.__name__) + tgen.start_topology() + + pe1 = tgen.gears["pe1"] + pe2 = tgen.gears["pe2"] + + pe1.run("ip link add RED type vrf table 1001") + pe1.run("ip link set up dev RED") + pe2.run("ip link add RED type vrf table 1001") + pe2.run("ip link set up dev RED") + pe1.run("ip link set pe1-eth0 master RED") + pe2.run("ip link set pe2-eth1 master RED") + + pe1.run("sysctl -w net.ipv4.ip_forward=1") + pe2.run("sysctl -w net.ipv4.ip_forward=1") + pe1.run("sysctl -w net.mpls.conf.pe1-eth0.input=1") + pe2.run("sysctl -w net.mpls.conf.pe2-eth1.input=1") + + router_list = tgen.routers() + + for i, (rname, router) in enumerate(router_list.items(), 1): + router.load_config( + TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname)) + ) + router.load_config( + TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format(rname)) + ) + router.load_config( + TopoRouter.RD_OSPF, os.path.join(CWD, "{}/ospfd.conf".format(rname)) + ) + router.load_config( + TopoRouter.RD_LDP, os.path.join(CWD, "{}/ldpd.conf".format(rname)) + ) + + tgen.start_router() + + +def teardown_module(mod): + tgen = get_topogen() + tgen.stop_topology() + + +def test_bgp_soo(): + tgen = get_topogen() + + pe2 = tgen.gears["pe2"] + + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + def _bgp_soo_unconfigured(): + output = json.loads( + pe2.vtysh_cmd( + "show bgp vrf RED ipv4 unicast neighbors 192.168.2.1 advertised-routes json" + ) + ) + expected = {"advertisedRoutes": {"172.16.255.1/32": {"path": "65001"}}} + return topotest.json_cmp(output, expected) + + test_func = functools.partial(_bgp_soo_unconfigured) + _, result = topotest.run_and_expect(test_func, None, count=30, wait=0.5) + assert result is None, "Failed to see BGP convergence in pe2" + + step("Configure SoO (65000:1) for PE2 -- CPE2 session") + pe2.vtysh_cmd( + """ + configure terminal + router bgp 65001 vrf RED + address-family ipv4 unicast + neighbor 192.168.2.1 soo 65000:1 + """ + ) + + def _bgp_soo_configured(): + output = json.loads( + pe2.vtysh_cmd( + "show bgp vrf RED ipv4 unicast neighbors 192.168.2.1 advertised-routes json" + ) + ) + expected = {"advertisedRoutes": {"172.16.255.1/32": None}} + return topotest.json_cmp(output, expected) + + test_func = functools.partial(_bgp_soo_configured) + _, result = topotest.run_and_expect(test_func, None, count=30, wait=0.5) + assert result is None, "SoO filtering does not work from pe2" + + step("Configure SoO (65000:2) for PE2 -- CPE2 session") + pe2.vtysh_cmd( + """ + configure terminal + router bgp 65001 vrf RED + address-family ipv4 unicast + neighbor 192.168.2.1 soo 65000:2 + """ + ) + + test_func = functools.partial(_bgp_soo_unconfigured) + _, result = topotest.run_and_expect(test_func, None, count=30, wait=0.5) + assert result is None, "SoO filtering does not work from pe2" + + step("Unconfigure SoO for PE2 -- CPE2 session") + pe2.vtysh_cmd( + """ + configure terminal + router bgp 65001 vrf RED + address-family ipv4 unicast + no neighbor 192.168.2.1 soo + """ + ) + + test_func = functools.partial(_bgp_soo_unconfigured) + _, result = topotest.run_and_expect(test_func, None, count=30, wait=0.5) + assert result is None, "SoO filtering does not work from pe2" + + +if __name__ == "__main__": + args = ["-s"] + sys.argv[1:] + sys.exit(pytest.main(args)) diff --git a/tests/topotests/bgp_vpnv4_ebgp/__init__.py b/tests/topotests/bgp_vpnv4_ebgp/__init__.py new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/tests/topotests/bgp_vpnv4_ebgp/__init__.py diff --git a/tests/topotests/bgp_vpnv4_ebgp/r1/bgpd.conf b/tests/topotests/bgp_vpnv4_ebgp/r1/bgpd.conf new file mode 100644 index 0000000000..2eebe5e6dd --- /dev/null +++ b/tests/topotests/bgp_vpnv4_ebgp/r1/bgpd.conf @@ -0,0 +1,25 @@ +router bgp 65500 + bgp router-id 1.1.1.1 + no bgp ebgp-requires-policy + neighbor 10.125.0.2 remote-as 65501 + address-family ipv4 unicast + no neighbor 10.125.0.2 activate + exit-address-family + address-family ipv4 vpn + neighbor 10.125.0.2 activate + exit-address-family +! +router bgp 65500 vrf vrf1 + bgp router-id 1.1.1.1 + address-family ipv4 unicast + redistribute connected + label vpn export 101 + rd vpn export 444:1 + rt vpn both 52:100 + export vpn + import vpn + exit-address-family +! +interface r1-eth0 + mpls bgp forwarding +!
\ No newline at end of file diff --git a/tests/topotests/bgp_vpnv4_ebgp/r1/ipv4_routes.json b/tests/topotests/bgp_vpnv4_ebgp/r1/ipv4_routes.json new file mode 100644 index 0000000000..da7d281833 --- /dev/null +++ b/tests/topotests/bgp_vpnv4_ebgp/r1/ipv4_routes.json @@ -0,0 +1,50 @@ +{ + "10.200.0.0/24": [ + { + "prefix": "10.200.0.0/24", + "prefixLen": 24, + "protocol": "bgp", + "vrfName": "vrf1", + "selected": true, + "destSelected": true, + "distance": 20, + "metric": 0, + "nexthops": [ + { + "flags": 3, + "fib": true, + "ip": "10.125.0.2", + "afi": "ipv4", + "interfaceName": "r1-eth0", + "vrf": "default", + "active": true, + "labels":[ + 102 + ] + } + ] + } + ], + "10.201.0.0/24": [ + { + "prefix": "10.201.0.0/24", + "prefixLen": 24, + "protocol": "connected", + "vrfName": "vrf1", + "selected": true, + "destSelected": true, + "distance": 0, + "metric": 0, + "installed": true, + "nexthops":[ + { + "flags": 3, + "fib": true, + "directlyConnected": true, + "interfaceName": "r1-eth1", + "active": true + } + ] + } + ] +} diff --git a/tests/topotests/bgp_vpnv4_ebgp/r1/zebra.conf b/tests/topotests/bgp_vpnv4_ebgp/r1/zebra.conf new file mode 100644 index 0000000000..e9ae4e9831 --- /dev/null +++ b/tests/topotests/bgp_vpnv4_ebgp/r1/zebra.conf @@ -0,0 +1,7 @@ +log stdout +interface r1-eth1 vrf vrf1 + ip address 10.201.0.1/24 +! +interface r1-eth0 + ip address 10.125.0.1/24 +! diff --git a/tests/topotests/bgp_vpnv4_ebgp/r2/bgp_ipv4_routes.json b/tests/topotests/bgp_vpnv4_ebgp/r2/bgp_ipv4_routes.json new file mode 100644 index 0000000000..19797dd561 --- /dev/null +++ b/tests/topotests/bgp_vpnv4_ebgp/r2/bgp_ipv4_routes.json @@ -0,0 +1,38 @@ +{ + "vrfName": "vrf1", + "localAS": 65501, + "routes": + { + "10.201.0.0/24": [ + { + "prefix": "10.201.0.0", + "prefixLen": 24, + "network": "10.201.0.0\/24", + "nhVrfName": "default", + "nexthops": [ + { + "ip": "10.125.0.1", + "afi": "ipv4", + "used": true + } + ] + } + ], + "10.200.0.0/24": [ + { + "valid": true, + "bestpath": true, + "prefix": "10.200.0.0", + "prefixLen": 24, + "network": "10.200.0.0\/24", + "nexthops": [ + { + "ip": "0.0.0.0", + "afi": "ipv4", + "used": true + } + ] + } + ] + } +} diff --git a/tests/topotests/bgp_vpnv4_ebgp/r2/bgpd.conf b/tests/topotests/bgp_vpnv4_ebgp/r2/bgpd.conf new file mode 100644 index 0000000000..e38c99d69c --- /dev/null +++ b/tests/topotests/bgp_vpnv4_ebgp/r2/bgpd.conf @@ -0,0 +1,25 @@ +router bgp 65501 + bgp router-id 2.2.2.2 + no bgp ebgp-requires-policy + neighbor 10.125.0.1 remote-as 65500 + address-family ipv4 unicast + no neighbor 10.125.0.1 activate + exit-address-family + address-family ipv4 vpn + neighbor 10.125.0.1 activate + exit-address-family +! +router bgp 65501 vrf vrf1 + bgp router-id 2.2.2.2 + address-family ipv4 unicast + redistribute connected + label vpn export 102 + rd vpn export 444:2 + rt vpn both 52:100 + export vpn + import vpn + exit-address-family +! +interface r2-eth0 + mpls bgp forwarding +! diff --git a/tests/topotests/bgp_vpnv4_ebgp/r2/zebra.conf b/tests/topotests/bgp_vpnv4_ebgp/r2/zebra.conf new file mode 100644 index 0000000000..6c433aef2b --- /dev/null +++ b/tests/topotests/bgp_vpnv4_ebgp/r2/zebra.conf @@ -0,0 +1,7 @@ +log stdout +interface r2-eth1 vrf vrf1 + ip address 10.200.0.2/24 +! +interface r2-eth0 + ip address 10.125.0.2/24 +! diff --git a/tests/topotests/bgp_vpnv4_ebgp/test_bgp_vpnv4_ebgp.py b/tests/topotests/bgp_vpnv4_ebgp/test_bgp_vpnv4_ebgp.py new file mode 100644 index 0000000000..cd8a9b6d6f --- /dev/null +++ b/tests/topotests/bgp_vpnv4_ebgp/test_bgp_vpnv4_ebgp.py @@ -0,0 +1,187 @@ +#!/usr/bin/env python + +# +# test_bgp_vpnv4_ebgp.py +# Part of NetDEF Topology Tests +# +# Copyright (c) 2022 by 6WIND +# +# Permission to use, copy, modify, and/or distribute this software +# for any purpose with or without fee is hereby granted, provided +# that the above copyright notice and this permission notice appear +# in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND NETDEF DISCLAIMS ALL WARRANTIES +# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NETDEF BE LIABLE FOR +# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY +# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS +# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE +# OF THIS SOFTWARE. +# + +""" + test_bgp_vpnv4_ebgp.py: Test the FRR BGP daemon with EBGP direct connection +""" + +import os +import sys +import json +from functools import partial +import pytest + +# Save the Current Working Directory to find configuration files. +CWD = os.path.dirname(os.path.realpath(__file__)) +sys.path.append(os.path.join(CWD, "../")) + +# pylint: disable=C0413 +# Import topogen and topotest helpers +from lib import topotest +from lib.topogen import Topogen, TopoRouter, get_topogen +from lib.topolog import logger + +# Required to instantiate the topology builder class. + + +pytestmark = [pytest.mark.bgpd] + + +def build_topo(tgen): + "Build function" + + # Create 2 routers. + tgen.add_router("r1") + tgen.add_router("r2") + + switch = tgen.add_switch("s1") + switch.add_link(tgen.gears["r1"]) + switch.add_link(tgen.gears["r2"]) + + switch = tgen.add_switch("s2") + switch.add_link(tgen.gears["r1"]) + + switch = tgen.add_switch("s3") + switch.add_link(tgen.gears["r2"]) + +def _populate_iface(): + tgen = get_topogen() + cmds_list = [ + 'ip link add vrf1 type vrf table 10', + 'echo 100000 > /proc/sys/net/mpls/platform_labels', + 'ip link set dev vrf1 up', + 'ip link set dev {0}-eth1 master vrf1', + 'echo 1 > /proc/sys/net/mpls/conf/{0}-eth0/input', + ] + + for cmd in cmds_list: + input = cmd.format('r1', '1', '2') + logger.info('input: ' + cmd) + output = tgen.net['r1'].cmd(cmd.format('r1', '1', '2')) + logger.info('output: ' + output) + + for cmd in cmds_list: + input = cmd.format('r2', '2', '1') + logger.info('input: ' + cmd) + output = tgen.net['r2'].cmd(cmd.format('r2', '2', '1')) + logger.info('output: ' + output) + +def setup_module(mod): + "Sets up the pytest environment" + tgen = Topogen(build_topo, mod.__name__) + tgen.start_topology() + + router_list = tgen.routers() + _populate_iface() + + for rname, router in router_list.items(): + router.load_config( + TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname)) + ) + router.load_config( + TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format(rname)) + ) + + # Initialize all routers. + tgen.start_router() + + +def teardown_module(_mod): + "Teardown the pytest environment" + tgen = get_topogen() + + tgen.stop_topology() + + +def test_protocols_convergence(): + """ + Assert that all protocols have converged + statuses as they depend on it. + """ + tgen = get_topogen() + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + router = tgen.gears['r1'] + logger.info("Dump some context for r1") + router.vtysh_cmd("show bgp ipv4 vpn") + router.vtysh_cmd("show bgp summary") + router.vtysh_cmd("show bgp vrf vrf1 ipv4") + router.vtysh_cmd("show running-config") + router = tgen.gears['r2'] + logger.info("Dump some context for r2") + router.vtysh_cmd("show bgp ipv4 vpn") + router.vtysh_cmd("show bgp summary") + router.vtysh_cmd("show bgp vrf vrf1 ipv4") + router.vtysh_cmd("show running-config") + + # Check IPv4 routing tables on r1 + logger.info("Checking IPv4 routes for convergence on r1") + router = tgen.gears['r1'] + json_file = "{}/{}/ipv4_routes.json".format(CWD, router.name) + if not os.path.isfile(json_file): + logger.info("skipping file {}".format(json_file)) + assert 0, 'ipv4_routes.json file not found' + return + + expected = json.loads(open(json_file).read()) + test_func = partial( + topotest.router_json_cmp, + router, + "show ip route vrf vrf1 json", + expected, + ) + _, result = topotest.run_and_expect(test_func, None, count=40, wait=2) + assertmsg = '"{}" JSON output mismatches'.format(router.name) + assert result is None, assertmsg + + # Check BGP IPv4 routing tables on r2 not installed + logger.info("Checking BGP IPv4 routes for convergence on r2") + router = tgen.gears['r2'] + json_file = "{}/{}/bgp_ipv4_routes.json".format(CWD, router.name) + if not os.path.isfile(json_file): + assert 0, 'bgp_ipv4_routes.json file not found' + + expected = json.loads(open(json_file).read()) + test_func = partial( + topotest.router_json_cmp, + router, + "show bgp vrf vrf1 ipv4 json", + expected, + ) + _, result = topotest.run_and_expect(test_func, None, count=40, wait=2) + assertmsg = '"{}" JSON output mismatches'.format(router.name) + assert result is None, assertmsg + +def test_memory_leak(): + "Run the memory leak test and report results." + tgen = get_topogen() + if not tgen.is_memleak_enabled(): + pytest.skip("Memory leak test/report is disabled") + + tgen.report_memory_leaks() + + +if __name__ == "__main__": + args = ["-s"] + sys.argv[1:] + sys.exit(pytest.main(args)) diff --git a/tests/topotests/bgp_vpnv4_gre/__init__.py b/tests/topotests/bgp_vpnv4_gre/__init__.py new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/tests/topotests/bgp_vpnv4_gre/__init__.py diff --git a/tests/topotests/bgp_vpnv4_gre/r1/bgpd.conf b/tests/topotests/bgp_vpnv4_gre/r1/bgpd.conf new file mode 100644 index 0000000000..0e2d3a8248 --- /dev/null +++ b/tests/topotests/bgp_vpnv4_gre/r1/bgpd.conf @@ -0,0 +1,26 @@ +router bgp 65500 + bgp router-id 192.0.2.1 + neighbor 192.0.2.2 remote-as 65500 + neighbor 192.0.2.2 update-source 192.0.2.1 + address-family ipv4 unicast + no neighbor 192.0.2.2 activate + exit-address-family + address-family ipv4 vpn + neighbor 192.0.2.2 activate + neighbor 192.0.2.2 route-map rmap in + exit-address-family +! +router bgp 65500 vrf vrf1 + bgp router-id 192.0.2.1 + address-family ipv4 unicast + redistribute connected + label vpn export 101 + rd vpn export 444:1 + rt vpn both 52:100 + export vpn + import vpn + exit-address-family +! +route-map rmap permit 1 + set l3vpn next-hop encapsulation gre +! diff --git a/tests/topotests/bgp_vpnv4_gre/r1/ipv4_routes.json b/tests/topotests/bgp_vpnv4_gre/r1/ipv4_routes.json new file mode 100644 index 0000000000..5f2732aab0 --- /dev/null +++ b/tests/topotests/bgp_vpnv4_gre/r1/ipv4_routes.json @@ -0,0 +1,50 @@ +{ + "10.200.0.0/24": [ + { + "prefix": "10.200.0.0/24", + "prefixLen": 24, + "protocol": "bgp", + "vrfName": "vrf1", + "selected": true, + "destSelected": true, + "distance": 20, + "metric": 0, + "nexthops": [ + { + "flags": 3, + "fib": true, + "ip": "192.168.0.2", + "afi": "ipv4", + "interfaceName": "r1-gre0", + "vrf": "default", + "active": true, + "labels":[ + 102 + ] + } + ] + } + ], + "10.201.0.0/24": [ + { + "prefix": "10.201.0.0/24", + "prefixLen": 24, + "protocol": "connected", + "vrfName": "vrf1", + "selected": true, + "destSelected": true, + "distance": 0, + "metric": 0, + "installed": true, + "nexthops":[ + { + "flags": 3, + "fib": true, + "directlyConnected": true, + "interfaceName": "r1-eth1", + "active": true + } + ] + } + ] +} diff --git a/tests/topotests/bgp_vpnv4_gre/r1/zebra.conf b/tests/topotests/bgp_vpnv4_gre/r1/zebra.conf new file mode 100644 index 0000000000..11780a874c --- /dev/null +++ b/tests/topotests/bgp_vpnv4_gre/r1/zebra.conf @@ -0,0 +1,14 @@ +log stdout +ip route 192.0.2.2/32 192.168.0.2 +interface lo + ip address 192.0.2.1/32 +! +interface r1-gre0 + ip address 192.168.0.1/24 +! +interface r1-eth1 vrf vrf1 + ip address 10.201.0.1/24 +! +interface r1-eth0 + ip address 10.125.0.1/24 +! diff --git a/tests/topotests/bgp_vpnv4_gre/r2/bgp_ipv4_routes.json b/tests/topotests/bgp_vpnv4_gre/r2/bgp_ipv4_routes.json new file mode 100644 index 0000000000..e50d5dd084 --- /dev/null +++ b/tests/topotests/bgp_vpnv4_gre/r2/bgp_ipv4_routes.json @@ -0,0 +1,38 @@ +{ + "vrfName": "vrf1", + "localAS": 65500, + "routes": + { + "10.201.0.0/24": [ + { + "prefix": "10.201.0.0", + "prefixLen": 24, + "network": "10.201.0.0\/24", + "nhVrfName": "default", + "nexthops": [ + { + "ip": "192.0.2.1", + "afi": "ipv4", + "used": true + } + ] + } + ], + "10.200.0.0/24": [ + { + "valid": true, + "bestpath": true, + "prefix": "10.200.0.0", + "prefixLen": 24, + "network": "10.200.0.0\/24", + "nexthops": [ + { + "ip": "0.0.0.0", + "afi": "ipv4", + "used": true + } + ] + } + ] + } +} diff --git a/tests/topotests/bgp_vpnv4_gre/r2/bgpd.conf b/tests/topotests/bgp_vpnv4_gre/r2/bgpd.conf new file mode 100644 index 0000000000..bf05866da4 --- /dev/null +++ b/tests/topotests/bgp_vpnv4_gre/r2/bgpd.conf @@ -0,0 +1,22 @@ +router bgp 65500 + bgp router-id 192.0.2.2 + neighbor 192.0.2.1 remote-as 65500 + neighbor 192.0.2.1 update-source 192.0.2.2 + address-family ipv4 unicast + no neighbor 192.0.2.1 activate + exit-address-family + address-family ipv4 vpn + neighbor 192.0.2.1 activate + exit-address-family +! +router bgp 65500 vrf vrf1 + bgp router-id 192.0.2.2 + address-family ipv4 unicast + redistribute connected + label vpn export 102 + rd vpn export 444:2 + rt vpn both 52:100 + export vpn + import vpn + exit-address-family +! diff --git a/tests/topotests/bgp_vpnv4_gre/r2/zebra.conf b/tests/topotests/bgp_vpnv4_gre/r2/zebra.conf new file mode 100644 index 0000000000..de88a4b64e --- /dev/null +++ b/tests/topotests/bgp_vpnv4_gre/r2/zebra.conf @@ -0,0 +1,14 @@ +log stdout +ip route 192.0.2.1/32 192.168.0.1 +interface lo + ip address 192.0.2.2/32 +! +interface r2-gre0 + ip address 192.168.0.2/24 +! +interface r2-eth1 vrf vrf1 + ip address 10.200.0.2/24 +! +interface r2-eth0 + ip address 10.125.0.2/24 +! diff --git a/tests/topotests/bgp_vpnv4_gre/test_bgp_vpnv4_gre.py b/tests/topotests/bgp_vpnv4_gre/test_bgp_vpnv4_gre.py new file mode 100644 index 0000000000..f562f44179 --- /dev/null +++ b/tests/topotests/bgp_vpnv4_gre/test_bgp_vpnv4_gre.py @@ -0,0 +1,191 @@ +#!/usr/bin/env python + +# +# test_bgp_vpnv4_gre.py +# Part of NetDEF Topology Tests +# +# Copyright (c) 2021 by 6WIND +# +# Permission to use, copy, modify, and/or distribute this software +# for any purpose with or without fee is hereby granted, provided +# that the above copyright notice and this permission notice appear +# in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND NETDEF DISCLAIMS ALL WARRANTIES +# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NETDEF BE LIABLE FOR +# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY +# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS +# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE +# OF THIS SOFTWARE. +# + +""" + test_bgp_vpnv4_gre.py: Test the FRR BGP daemon with BGP IPv6 interface + with route advertisements on a separate netns. +""" + +import os +import sys +import json +from functools import partial +import pytest + +# Save the Current Working Directory to find configuration files. +CWD = os.path.dirname(os.path.realpath(__file__)) +sys.path.append(os.path.join(CWD, "../")) + +# pylint: disable=C0413 +# Import topogen and topotest helpers +from lib import topotest +from lib.topogen import Topogen, TopoRouter, get_topogen +from lib.topolog import logger + +# Required to instantiate the topology builder class. + + +pytestmark = [pytest.mark.bgpd] + + +def build_topo(tgen): + "Build function" + + # Create 2 routers. + tgen.add_router("r1") + tgen.add_router("r2") + + switch = tgen.add_switch("s1") + switch.add_link(tgen.gears["r1"]) + switch.add_link(tgen.gears["r2"]) + + switch = tgen.add_switch("s2") + switch.add_link(tgen.gears["r1"]) + + switch = tgen.add_switch("s3") + switch.add_link(tgen.gears["r2"]) + +def _populate_iface(): + tgen = get_topogen() + cmds_list = [ + 'ip link add vrf1 type vrf table 10', + 'echo 10 > /proc/sys/net/mpls/platform_labels', + 'ip link set dev vrf1 up', + 'ip link set dev {0}-eth1 master vrf1', + 'echo 1 > /proc/sys/net/mpls/conf/{0}-eth0/input', + 'ip tunnel add {0}-gre0 mode gre ttl 64 dev {0}-eth0 local 10.125.0.{1} remote 10.125.0.{2}', + 'ip link set dev {0}-gre0 up', + 'echo 1 > /proc/sys/net/mpls/conf/{0}-gre0/input', + ] + + for cmd in cmds_list: + input = cmd.format('r1', '1', '2') + logger.info('input: ' + cmd) + output = tgen.net['r1'].cmd(cmd.format('r1', '1', '2')) + logger.info('output: ' + output) + + for cmd in cmds_list: + input = cmd.format('r2', '2', '1') + logger.info('input: ' + cmd) + output = tgen.net['r2'].cmd(cmd.format('r2', '2', '1')) + logger.info('output: ' + output) + +def setup_module(mod): + "Sets up the pytest environment" + tgen = Topogen(build_topo, mod.__name__) + tgen.start_topology() + + router_list = tgen.routers() + _populate_iface() + + for rname, router in router_list.items(): + router.load_config( + TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname)) + ) + router.load_config( + TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format(rname)) + ) + + # Initialize all routers. + tgen.start_router() + + +def teardown_module(_mod): + "Teardown the pytest environment" + tgen = get_topogen() + + tgen.stop_topology() + + +def test_protocols_convergence(): + """ + Assert that all protocols have converged + statuses as they depend on it. + """ + tgen = get_topogen() + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + router = tgen.gears['r1'] + logger.info("Dump some context for r1") + router.vtysh_cmd("show bgp ipv4 vpn") + router.vtysh_cmd("show bgp summary") + router.vtysh_cmd("show bgp vrf vrf1 ipv4") + router.vtysh_cmd("show running-config") + router = tgen.gears['r2'] + logger.info("Dump some context for r2") + router.vtysh_cmd("show bgp ipv4 vpn") + router.vtysh_cmd("show bgp summary") + router.vtysh_cmd("show bgp vrf vrf1 ipv4") + router.vtysh_cmd("show running-config") + + # Check IPv4 routing tables on r1 + logger.info("Checking IPv4 routes for convergence on r1") + router = tgen.gears['r1'] + json_file = "{}/{}/ipv4_routes.json".format(CWD, router.name) + if not os.path.isfile(json_file): + logger.info("skipping file {}".format(json_file)) + assert 0, 'ipv4_routes.json file not found' + return + + expected = json.loads(open(json_file).read()) + test_func = partial( + topotest.router_json_cmp, + router, + "show ip route vrf vrf1 json", + expected, + ) + _, result = topotest.run_and_expect(test_func, None, count=40, wait=2) + assertmsg = '"{}" JSON output mismatches'.format(router.name) + assert result is None, assertmsg + + # Check BGP IPv4 routing tables on r2 not installed + logger.info("Checking BGP IPv4 routes for convergence on r2") + router = tgen.gears['r2'] + json_file = "{}/{}/bgp_ipv4_routes.json".format(CWD, router.name) + if not os.path.isfile(json_file): + assert 0, 'bgp_ipv4_routes.json file not found' + + expected = json.loads(open(json_file).read()) + test_func = partial( + topotest.router_json_cmp, + router, + "show bgp vrf vrf1 ipv4 json", + expected, + ) + _, result = topotest.run_and_expect(test_func, None, count=40, wait=2) + assertmsg = '"{}" JSON output mismatches'.format(router.name) + assert result is None, assertmsg + +def test_memory_leak(): + "Run the memory leak test and report results." + tgen = get_topogen() + if not tgen.is_memleak_enabled(): + pytest.skip("Memory leak test/report is disabled") + + tgen.report_memory_leaks() + + +if __name__ == "__main__": + args = ["-s"] + sys.argv[1:] + sys.exit(pytest.main(args)) diff --git a/tests/topotests/conftest.py b/tests/topotests/conftest.py index f79ca71a64..2a57f6c26e 100755 --- a/tests/topotests/conftest.py +++ b/tests/topotests/conftest.py @@ -232,6 +232,9 @@ def pytest_configure(config): Assert that the environment is correctly configured, and get extra config. """ + if config.getoption("--collect-only"): + return + if "PYTEST_XDIST_WORKER" not in os.environ: os.environ["PYTEST_XDIST_MODE"] = config.getoption("dist", "no") os.environ["PYTEST_TOPOTEST_WORKER"] = "" diff --git a/tests/topotests/lib/bgp.py b/tests/topotests/lib/bgp.py index 341ec25a19..7ab36c4fcd 100644 --- a/tests/topotests/lib/bgp.py +++ b/tests/topotests/lib/bgp.py @@ -491,6 +491,25 @@ def __create_bgp_unicast_neighbor( cmd = "no {}".format(cmd) config_data.append(cmd) + admin_dist_data = addr_data.setdefault("distance", {}) + if admin_dist_data: + if len(admin_dist_data) < 2: + logger.debug( + "Router %s: pass the admin distance values for " + "ebgp, ibgp and local routes", + router, + ) + cmd = "distance bgp {} {} {}".format( + admin_dist_data["ebgp"], + admin_dist_data["ibgp"], + admin_dist_data["local"], + ) + + del_action = admin_dist_data.setdefault("delete", False) + if del_action: + cmd = "no distance bgp" + config_data.append(cmd) + import_vrf_data = addr_data.setdefault("import", {}) if import_vrf_data: cmd = "import vrf {}".format(import_vrf_data["vrf"]) @@ -2662,7 +2681,7 @@ def verify_best_path_as_per_bgp_attribute( @retry(retry_timeout=10) def verify_best_path_as_per_admin_distance( - tgen, addr_type, router, input_dict, attribute, expected=True + tgen, addr_type, router, input_dict, attribute, expected=True, vrf=None ): """ API is to verify best path according to admin distance for given @@ -2678,6 +2697,7 @@ def verify_best_path_as_per_admin_distance( * `input_dict`: defines different routes with different admin distance to calculate for which route best path is selected * `expected` : expected results from API, by-default True + * `vrf`: Pass vrf name check for perticular vrf. Usage ----- @@ -2710,9 +2730,14 @@ def verify_best_path_as_per_admin_distance( # Show ip route cmd if addr_type == "ipv4": - command = "show ip route json" + command = "show ip route" + else: + command = "show ipv6 route" + + if vrf: + command = "{} vrf {} json".format(command, vrf) else: - command = "show ipv6 route json" + command = "{} json".format(command) for routes_from_router in input_dict.keys(): sh_ip_route_json = router_list[routes_from_router].vtysh_cmd( diff --git a/tests/topotests/lib/common_config.py b/tests/topotests/lib/common_config.py index fa33b02ed1..5f4c280715 100644 --- a/tests/topotests/lib/common_config.py +++ b/tests/topotests/lib/common_config.py @@ -3339,6 +3339,7 @@ def verify_rib( metric=None, fib=None, count_only=False, + admin_distance=None, ): """ Data will be read from input_dict or input JSON file, API will generate @@ -3611,6 +3612,30 @@ def verify_rib( ) return errormsg + if admin_distance is not None: + if "distance" not in rib_routes_json[st_rt][0]: + errormsg = ( + "[DUT: {}]: admin distance is" + " not present for" + " route {} in RIB \n".format(dut, st_rt) + ) + return errormsg + + if ( + admin_distance + != rib_routes_json[st_rt][0]["distance"] + ): + errormsg = ( + "[DUT: {}]: admin distance value " + "{} is not matched for " + "route {} in RIB \n".format( + dut, + admin_distance, + st_rt, + ) + ) + return errormsg + if metric is not None: if "metric" not in rib_routes_json[st_rt][0]: errormsg = ( @@ -3764,7 +3789,7 @@ def verify_rib( @retry(retry_timeout=12) -def verify_fib_routes(tgen, addr_type, dut, input_dict, next_hop=None): +def verify_fib_routes(tgen, addr_type, dut, input_dict, next_hop=None, protocol=None): """ Data will be read from input_dict or input JSON file, API will generate same prefixes, which were redistributed by either create_static_routes() or @@ -3822,6 +3847,9 @@ def verify_fib_routes(tgen, addr_type, dut, input_dict, next_hop=None): found_routes = [] missing_routes = [] + if protocol: + command = "{} {}".format(command, protocol) + if "static_routes" in input_dict[routerInput]: static_routes = input_dict[routerInput]["static_routes"] @@ -5039,7 +5067,7 @@ def verify_ip_nht(tgen, input_dict): for nh in nh_list: if nh in show_ip_nht: - nht = run_frr_cmd(rnode, f"show ip nht {nh}") + nht = run_frr_cmd(rnode, "show ip nht {}".format(nh)) if "unresolved" in nht: errormsg = "Nexthop {} became unresolved on {}".format(nh, router) return errormsg diff --git a/vtysh/vtysh.h b/vtysh/vtysh.h index 1f02efee20..68841e2d38 100644 --- a/vtysh/vtysh.h +++ b/vtysh/vtysh.h @@ -60,8 +60,13 @@ extern struct thread_master *master; #define VTYSH_ALL VTYSH_ZEBRA|VTYSH_RIPD|VTYSH_RIPNGD|VTYSH_OSPFD|VTYSH_OSPF6D|VTYSH_LDPD|VTYSH_BGPD|VTYSH_ISISD|VTYSH_PIMD|VTYSH_PIM6D|VTYSH_NHRPD|VTYSH_EIGRPD|VTYSH_BABELD|VTYSH_SHARPD|VTYSH_PBRD|VTYSH_STATICD|VTYSH_BFDD|VTYSH_FABRICD|VTYSH_VRRPD|VTYSH_PATHD #define VTYSH_ACL VTYSH_BFDD|VTYSH_BABELD|VTYSH_BGPD|VTYSH_EIGRPD|VTYSH_ISISD|VTYSH_FABRICD|VTYSH_LDPD|VTYSH_NHRPD|VTYSH_OSPF6D|VTYSH_OSPFD|VTYSH_PBRD|VTYSH_PIMD|VTYSH_PIM6D|VTYSH_RIPD|VTYSH_RIPNGD|VTYSH_VRRPD|VTYSH_ZEBRA #define VTYSH_RMAP VTYSH_ZEBRA|VTYSH_RIPD|VTYSH_RIPNGD|VTYSH_OSPFD|VTYSH_OSPF6D|VTYSH_BGPD|VTYSH_ISISD|VTYSH_PIMD|VTYSH_PIM6D|VTYSH_EIGRPD|VTYSH_FABRICD -#define VTYSH_INTERFACE VTYSH_ZEBRA|VTYSH_RIPD|VTYSH_RIPNGD|VTYSH_OSPFD|VTYSH_OSPF6D|VTYSH_ISISD|VTYSH_PIMD|VTYSH_PIM6D|VTYSH_NHRPD|VTYSH_EIGRPD|VTYSH_BABELD|VTYSH_PBRD|VTYSH_FABRICD|VTYSH_VRRPD -#define VTYSH_VRF VTYSH_INTERFACE|VTYSH_STATICD +#define VTYSH_INTERFACE_SUBSET \ + VTYSH_ZEBRA | VTYSH_RIPD | VTYSH_RIPNGD | VTYSH_OSPFD | VTYSH_OSPF6D | \ + VTYSH_ISISD | VTYSH_PIMD | VTYSH_PIM6D | VTYSH_NHRPD | \ + VTYSH_EIGRPD | VTYSH_BABELD | VTYSH_PBRD | VTYSH_FABRICD | \ + VTYSH_VRRPD +#define VTYSH_INTERFACE VTYSH_INTERFACE_SUBSET | VTYSH_BGPD +#define VTYSH_VRF VTYSH_INTERFACE_SUBSET | VTYSH_STATICD #define VTYSH_KEYS VTYSH_RIPD | VTYSH_EIGRPD | VTYSH_OSPF6D /* Daemons who can process nexthop-group configs */ #define VTYSH_NH_GROUP VTYSH_PBRD|VTYSH_SHARPD diff --git a/yang/frr-bgp-route-map.yang b/yang/frr-bgp-route-map.yang index eaa7891f0c..fcfd14e4fe 100644 --- a/yang/frr-bgp-route-map.yang +++ b/yang/frr-bgp-route-map.yang @@ -330,6 +330,12 @@ module frr-bgp-route-map { "Set EVPN gateway IP overlay index IPv6"; } + identity set-l3vpn-nexthop-encapsulation { + base frr-route-map:rmap-set-type; + description + "Accept L3VPN traffic over other than LSP encapsulation"; + } + grouping extcommunity-non-transitive-types { leaf two-octet-as-specific { type boolean; @@ -902,5 +908,21 @@ module frr-bgp-route-map { type inet:ipv6-address; } } + case l3vpn-nexthop-encapsulation { + when + "derived-from-or-self(/frr-route-map:lib/frr-route-map:route-map/frr-route-map:entry/frr-route-map:set-action/frr-route-map:action, + 'frr-bgp-route-map:set-l3vpn-nexthop-encapsulation')"; + description + "Accept L3VPN traffic over other than LSP encapsulation"; + leaf l3vpn-nexthop-encapsulation { + type enumeration { + enum "gre" { + value 0; + description + "GRE protocol"; + } + } + } + } } } diff --git a/zebra/kernel_netlink.c b/zebra/kernel_netlink.c index 45a372f88c..a8b56bb8f2 100644 --- a/zebra/kernel_netlink.c +++ b/zebra/kernel_netlink.c @@ -1033,12 +1033,18 @@ static int netlink_parse_error(const struct nlsock *nl, struct nlmsghdr *h, return 1; } - /* Deal with errors that occur because of races in link handling. */ - if (is_cmd - && ((msg_type == RTM_DELROUTE - && (-errnum == ENODEV || -errnum == ESRCH)) - || (msg_type == RTM_NEWROUTE - && (-errnum == ENETDOWN || -errnum == EEXIST)))) { + /* + * Deal with errors that occur because of races in link handling + * or types are not supported in kernel. + */ + if (is_cmd && + ((msg_type == RTM_DELROUTE && + (-errnum == ENODEV || -errnum == ESRCH)) || + (msg_type == RTM_NEWROUTE && + (-errnum == ENETDOWN || -errnum == EEXIST)) || + ((msg_type == RTM_NEWTUNNEL || msg_type == RTM_DELTUNNEL || + msg_type == RTM_GETTUNNEL) && + (-errnum == EOPNOTSUPP)))) { if (IS_ZEBRA_DEBUG_KERNEL) zlog_debug("%s: error: %s type=%s(%u), seq=%u, pid=%u", nl->name, safe_strerror(-errnum), diff --git a/zebra/zebra_srv6.c b/zebra/zebra_srv6.c index 219d047694..36506cacc7 100644 --- a/zebra/zebra_srv6.c +++ b/zebra/zebra_srv6.c @@ -162,6 +162,7 @@ void zebra_srv6_locator_delete(struct srv6_locator *locator) } listnode_delete(srv6->locators, locator); + srv6_locator_free(locator); } struct srv6_locator *zebra_srv6_locator_lookup(const char *name) diff --git a/zebra/zebra_vrf.c b/zebra/zebra_vrf.c index 6624f0beb9..a2844ca956 100644 --- a/zebra/zebra_vrf.c +++ b/zebra/zebra_vrf.c @@ -595,7 +595,7 @@ int zebra_vrf_netns_handler_create(struct vty *vty, struct vrf *vrf, zlog_info( "VRF %u already configured with NETNS %s", vrf->vrf_id, ns->name); - return CMD_WARNING_CONFIG_FAILED; + return CMD_WARNING; } } ns = ns_lookup_name(pathname); diff --git a/zebra/zebra_vxlan.c b/zebra/zebra_vxlan.c index 5a6321ae7e..34cce71cd7 100644 --- a/zebra/zebra_vxlan.c +++ b/zebra/zebra_vxlan.c @@ -2090,6 +2090,7 @@ static int zebra_vxlan_handle_vni_transition(struct zebra_vrf *zvrf, vni_t vni, int add) { struct zebra_evpn *zevpn = NULL; + struct zebra_l3vni *zl3vni = NULL; /* There is a possibility that VNI notification was already received * from kernel and we programmed it as L2-VNI @@ -2117,6 +2118,10 @@ static int zebra_vxlan_handle_vni_transition(struct zebra_vrf *zvrf, vni_t vni, /* Free up all remote VTEPs, if any. */ zebra_evpn_vtep_del_all(zevpn, 1); + zl3vni = zl3vni_from_vrf(zevpn->vrf_id); + if (zl3vni) + listnode_delete(zl3vni->l2vnis, zevpn); + /* Delete the hash entry. */ if (zebra_evpn_vxlan_del(zevpn)) { flog_err(EC_ZEBRA_VNI_DEL_FAILED, @@ -2172,8 +2177,12 @@ static int zebra_vxlan_handle_vni_transition(struct zebra_vrf *zvrf, vni_t vni, /* Find bridge interface for the VNI */ vlan_if = zvni_map_to_svi(vxl->access_vlan, zif->brslave_info.br_if); - if (vlan_if) + if (vlan_if) { zevpn->vrf_id = vlan_if->vrf->vrf_id; + zl3vni = zl3vni_from_vrf(vlan_if->vrf->vrf_id); + if (zl3vni) + listnode_add_sort_nodup(zl3vni->l2vnis, zevpn); + } zevpn->vxlan_if = ifp; zevpn->local_vtep_ip = vxl->vtep_ip; @@ -5139,10 +5148,9 @@ int zebra_vxlan_if_update(struct interface *ifp, uint16_t chgflags) return 0; /* Inform BGP, if there is a change of interest. */ - if (chgflags - & (ZEBRA_VXLIF_MASTER_CHANGE | - ZEBRA_VXLIF_LOCAL_IP_CHANGE | - ZEBRA_VXLIF_MCAST_GRP_CHANGE)) + if (chgflags & + (ZEBRA_VXLIF_MASTER_CHANGE | ZEBRA_VXLIF_LOCAL_IP_CHANGE | + ZEBRA_VXLIF_MCAST_GRP_CHANGE | ZEBRA_VXLIF_VLAN_CHANGE)) zebra_evpn_send_add_to_client(zevpn); /* If there is a valid new master or a VLAN mapping change, |
