diff options
71 files changed, 2633 insertions, 1954 deletions
diff --git a/bfdd/bfdd_nb.c b/bfdd/bfdd_nb.c index 7135c50763..114fbc2bdd 100644 --- a/bfdd/bfdd_nb.c +++ b/bfdd/bfdd_nb.c @@ -74,7 +74,6 @@ const struct frr_yang_module_info frr_bfdd_info = { .xpath = "/frr-bfdd:bfdd/bfd/profile/minimum-ttl", .cbs = { .modify = bfdd_bfd_profile_minimum_ttl_modify, - .destroy = bfdd_bfd_profile_minimum_ttl_destroy, .cli_show = bfd_cli_show_minimum_ttl, } }, @@ -361,7 +360,6 @@ const struct frr_yang_module_info frr_bfdd_info = { .xpath = "/frr-bfdd:bfdd/bfd/sessions/multi-hop/minimum-ttl", .cbs = { .modify = bfdd_bfd_sessions_multi_hop_minimum_ttl_modify, - .destroy = bfdd_bfd_sessions_multi_hop_minimum_ttl_destroy, .cli_show = bfd_cli_show_minimum_ttl, } }, diff --git a/bfdd/bfdd_nb.h b/bfdd/bfdd_nb.h index 7a0e724d28..b5b00b57e4 100644 --- a/bfdd/bfdd_nb.h +++ b/bfdd/bfdd_nb.h @@ -25,7 +25,6 @@ int bfdd_bfd_profile_required_receive_interval_modify( int bfdd_bfd_profile_administrative_down_modify(struct nb_cb_modify_args *args); int bfdd_bfd_profile_passive_mode_modify(struct nb_cb_modify_args *args); int bfdd_bfd_profile_minimum_ttl_modify(struct nb_cb_modify_args *args); -int bfdd_bfd_profile_minimum_ttl_destroy(struct nb_cb_destroy_args *args); int bfdd_bfd_profile_echo_mode_modify(struct nb_cb_modify_args *args); int bfdd_bfd_profile_desired_echo_transmission_interval_modify( struct nb_cb_modify_args *args); @@ -128,8 +127,6 @@ int bfdd_bfd_sessions_multi_hop_administrative_down_modify( struct nb_cb_modify_args *args); int bfdd_bfd_sessions_multi_hop_minimum_ttl_modify( struct nb_cb_modify_args *args); -int bfdd_bfd_sessions_multi_hop_minimum_ttl_destroy( - struct nb_cb_destroy_args *args); struct yang_data * bfdd_bfd_sessions_multi_hop_stats_local_discriminator_get_elem( struct nb_cb_get_elem_args *args); diff --git a/bfdd/bfdd_nb_config.c b/bfdd/bfdd_nb_config.c index e4e97404d8..8cf2f0a6f1 100644 --- a/bfdd/bfdd_nb_config.c +++ b/bfdd/bfdd_nb_config.c @@ -423,20 +423,6 @@ int bfdd_bfd_profile_minimum_ttl_modify(struct nb_cb_modify_args *args) return NB_OK; } -int bfdd_bfd_profile_minimum_ttl_destroy(struct nb_cb_destroy_args *args) -{ - struct bfd_profile *bp; - - if (args->event != NB_EV_APPLY) - return NB_OK; - - bp = nb_running_get_entry(args->dnode, NULL, true); - bp->minimum_ttl = BFD_DEF_MHOP_TTL; - bfd_profile_update(bp); - - return NB_OK; -} - /* * XPath: /frr-bfdd:bfdd/bfd/profile/echo-mode */ @@ -859,27 +845,3 @@ int bfdd_bfd_sessions_multi_hop_minimum_ttl_modify( return NB_OK; } - -int bfdd_bfd_sessions_multi_hop_minimum_ttl_destroy( - struct nb_cb_destroy_args *args) -{ - struct bfd_session *bs; - - switch (args->event) { - case NB_EV_VALIDATE: - case NB_EV_PREPARE: - return NB_OK; - - case NB_EV_APPLY: - break; - - case NB_EV_ABORT: - return NB_OK; - } - - bs = nb_running_get_entry(args->dnode, NULL, true); - bs->peer_profile.minimum_ttl = BFD_DEF_MHOP_TTL; - bfd_session_apply(bs); - - return NB_OK; -} diff --git a/bgpd/bgp_clist.c b/bgpd/bgp_clist.c index 1d2ba3bf58..f3c308afb9 100644 --- a/bgpd/bgp_clist.c +++ b/bgpd/bgp_clist.c @@ -659,9 +659,6 @@ bool community_list_match(struct community *com, struct community_list *list) struct community_entry *entry; for (entry = list->head; entry; entry = entry->next) { - if (entry->any) - return entry->direct == COMMUNITY_PERMIT; - if (entry->style == COMMUNITY_LIST_STANDARD) { if (community_include(entry->u.com, COMMUNITY_INTERNET)) return entry->direct == COMMUNITY_PERMIT; @@ -681,9 +678,6 @@ bool lcommunity_list_match(struct lcommunity *lcom, struct community_list *list) struct community_entry *entry; for (entry = list->head; entry; entry = entry->next) { - if (entry->any) - return entry->direct == COMMUNITY_PERMIT; - if (entry->style == LARGE_COMMUNITY_LIST_STANDARD) { if (lcommunity_match(lcom, entry->u.lcom)) return entry->direct == COMMUNITY_PERMIT; @@ -705,9 +699,6 @@ bool lcommunity_list_exact_match(struct lcommunity *lcom, struct community_entry *entry; for (entry = list->head; entry; entry = entry->next) { - if (entry->any) - return entry->direct == COMMUNITY_PERMIT; - if (entry->style == LARGE_COMMUNITY_LIST_STANDARD) { if (lcommunity_cmp(lcom, entry->u.lcom)) return entry->direct == COMMUNITY_PERMIT; @@ -724,9 +715,6 @@ bool ecommunity_list_match(struct ecommunity *ecom, struct community_list *list) struct community_entry *entry; for (entry = list->head; entry; entry = entry->next) { - if (entry->any) - return entry->direct == COMMUNITY_PERMIT; - if (entry->style == EXTCOMMUNITY_LIST_STANDARD) { if (ecommunity_match(ecom, entry->u.ecom)) return entry->direct == COMMUNITY_PERMIT; @@ -746,9 +734,6 @@ bool community_list_exact_match(struct community *com, struct community_entry *entry; for (entry = list->head; entry; entry = entry->next) { - if (entry->any) - return entry->direct == COMMUNITY_PERMIT; - if (entry->style == COMMUNITY_LIST_STANDARD) { if (community_include(entry->u.com, COMMUNITY_INTERNET)) return entry->direct == COMMUNITY_PERMIT; @@ -781,28 +766,18 @@ struct community *community_list_match_delete(struct community *com, val = community_val_get(com, i); for (entry = list->head; entry; entry = entry->next) { - if (entry->any) { + if ((entry->style == COMMUNITY_LIST_STANDARD) && + (community_include(entry->u.com, + COMMUNITY_INTERNET) || + community_include(entry->u.com, val))) { if (entry->direct == COMMUNITY_PERMIT) { com_index_to_delete[delete_index] = i; delete_index++; } break; - } - - else if ((entry->style == COMMUNITY_LIST_STANDARD) - && (community_include(entry->u.com, - COMMUNITY_INTERNET) - || community_include(entry->u.com, val))) { - if (entry->direct == COMMUNITY_PERMIT) { - com_index_to_delete[delete_index] = i; - delete_index++; - } - break; - } - - else if ((entry->style == COMMUNITY_LIST_EXPANDED) - && community_regexp_include(entry->reg, com, - i)) { + } else if ((entry->style == COMMUNITY_LIST_EXPANDED) && + community_regexp_include(entry->reg, com, + i)) { if (entry->direct == COMMUNITY_PERMIT) { com_index_to_delete[delete_index] = i; delete_index++; @@ -836,12 +811,6 @@ static bool community_list_dup_check(struct community_list *list, if (entry->direct != new->direct) continue; - if (entry->any != new->any) - continue; - - if (entry->any) - return true; - switch (entry->style) { case COMMUNITY_LIST_STANDARD: if (community_cmp(entry->u.com, new->u.com)) @@ -899,20 +868,17 @@ int community_list_set(struct community_list_handler *ch, const char *name, } } - if (str) { - if (style == COMMUNITY_LIST_STANDARD) - com = community_str2com(str); - else - regex = bgp_regcomp(str); + if (style == COMMUNITY_LIST_STANDARD) + com = community_str2com(str); + else + regex = bgp_regcomp(str); - if (!com && !regex) - return COMMUNITY_LIST_ERR_MALFORMED_VAL; - } + if (!com && !regex) + return COMMUNITY_LIST_ERR_MALFORMED_VAL; entry = community_entry_new(); entry->direct = direct; entry->style = style; - entry->any = (str ? false : true); entry->u.com = com; entry->reg = regex; entry->seq = seqnum; @@ -989,16 +955,8 @@ struct lcommunity *lcommunity_list_match_delete(struct lcommunity *lcom, for (i = 0; i < lcom->size; i++) { ptr = lcom->val + (i * LCOMMUNITY_SIZE); for (entry = list->head; entry; entry = entry->next) { - if (entry->any) { - if (entry->direct == COMMUNITY_PERMIT) { - com_index_to_delete[delete_index] = i; - delete_index++; - } - break; - } - - else if ((entry->style == LARGE_COMMUNITY_LIST_STANDARD) - && lcommunity_include(entry->u.lcom, ptr)) { + if ((entry->style == LARGE_COMMUNITY_LIST_STANDARD) && + lcommunity_include(entry->u.lcom, ptr)) { if (entry->direct == COMMUNITY_PERMIT) { com_index_to_delete[delete_index] = i; delete_index++; @@ -1006,9 +964,10 @@ struct lcommunity *lcommunity_list_match_delete(struct lcommunity *lcom, break; } - else if ((entry->style == LARGE_COMMUNITY_LIST_EXPANDED) - && lcommunity_regexp_include(entry->reg, lcom, - i)) { + else if ((entry->style == + LARGE_COMMUNITY_LIST_EXPANDED) && + lcommunity_regexp_include(entry->reg, lcom, + i)) { if (entry->direct == COMMUNITY_PERMIT) { com_index_to_delete[delete_index] = i; delete_index++; @@ -1127,7 +1086,6 @@ int lcommunity_list_set(struct community_list_handler *ch, const char *name, entry = community_entry_new(); entry->direct = direct; entry->style = style; - entry->any = (str ? false : true); entry->u.lcom = lcom; entry->reg = regex; entry->seq = seqnum; @@ -1248,7 +1206,6 @@ int extcommunity_list_set(struct community_list_handler *ch, const char *name, entry = community_entry_new(); entry->direct = direct; entry->style = style; - entry->any = false; if (ecom) entry->config = ecommunity_ecom2str( ecom, ECOMMUNITY_FORMAT_COMMUNITY_LIST, 0); diff --git a/bgpd/bgp_clist.h b/bgpd/bgp_clist.h index 7a9b28038c..8e5d637bab 100644 --- a/bgpd/bgp_clist.h +++ b/bgpd/bgp_clist.h @@ -65,9 +65,6 @@ struct community_entry { /* Standard or expanded. */ uint8_t style; - /* Any match. */ - bool any; - /* Sequence number. */ int64_t seq; diff --git a/bgpd/bgp_flowspec.c b/bgpd/bgp_flowspec.c index 70bdbaf035..6165bf892e 100644 --- a/bgpd/bgp_flowspec.c +++ b/bgpd/bgp_flowspec.c @@ -189,13 +189,16 @@ int bgp_nlri_parse_flowspec(struct peer *peer, struct attr *attr, zlog_info("%s", local_string); } /* Process the route. */ - if (!withdraw) + if (!withdraw) { bgp_update(peer, &p, 0, attr, afi, safi, ZEBRA_ROUTE_BGP, BGP_ROUTE_NORMAL, NULL, NULL, 0, 0, NULL); - else + } else { bgp_withdraw(peer, &p, 0, afi, safi, ZEBRA_ROUTE_BGP, BGP_ROUTE_NORMAL, NULL, NULL, 0, NULL); + } + + XFREE(MTYPE_TMP, temp); } return BGP_NLRI_PARSE_OK; } diff --git a/bgpd/bgp_fsm.c b/bgpd/bgp_fsm.c index a289d3d67a..ad6906d092 100644 --- a/bgpd/bgp_fsm.c +++ b/bgpd/bgp_fsm.c @@ -2656,12 +2656,12 @@ int bgp_event_update(struct peer *peer, enum bgp_fsm_events event) ret != BGP_FSM_FAILURE_AND_DELETE) { flog_err( EC_BGP_FSM, - "%s [FSM] Failure handling event %s in state %s, prior events %s, %s, fd %d", + "%s [FSM] Failure handling event %s in state %s, prior events %s, %s, fd %d, last reset: %s", peer->host, bgp_event_str[peer->cur_event], lookup_msg(bgp_status_msg, peer->status, NULL), bgp_event_str[peer->last_event], - bgp_event_str[peer->last_major_event], - peer->fd); + bgp_event_str[peer->last_major_event], peer->fd, + peer_down_str[peer->last_reset]); bgp_stop(peer); bgp_fsm_change_status(peer, Idle); bgp_timer_set(peer); diff --git a/bgpd/bgp_mplsvpn.c b/bgpd/bgp_mplsvpn.c index ecc84533b0..dc9bd3cff5 100644 --- a/bgpd/bgp_mplsvpn.c +++ b/bgpd/bgp_mplsvpn.c @@ -1467,13 +1467,12 @@ static mpls_label_t _vpn_leak_from_vrf_get_per_nexthop_label( /* Unlink from any existing nexthop cache. Free the entry if unused. */ bgp_mplsvpn_path_nh_label_unlink(pi); - if (blnc) { - /* updates NHT pi list reference */ - LIST_INSERT_HEAD(&(blnc->paths), pi, label_nh_thread); - pi->label_nexthop_cache = blnc; - pi->label_nexthop_cache->path_count++; - blnc->last_update = monotime(NULL); - } + + /* updates NHT pi list reference */ + LIST_INSERT_HEAD(&(blnc->paths), pi, label_nh_thread); + pi->label_nexthop_cache = blnc; + pi->label_nexthop_cache->path_count++; + blnc->last_update = monotime(NULL); /* then add or update the selected nexthop */ if (!blnc->nh) diff --git a/bgpd/bgp_nht.c b/bgpd/bgp_nht.c index bda163d7a5..d7b1429881 100644 --- a/bgpd/bgp_nht.c +++ b/bgpd/bgp_nht.c @@ -547,7 +547,7 @@ static void bgp_process_nexthop_update(struct bgp_nexthop_cache *bnc, char bnc_buf[BNC_FLAG_DUMP_SIZE]; zlog_debug( - "%s(%u): Rcvd NH update %pFX(%u)%u) - metric %d/%d #nhops %d/%d flags %s", + "%s(%u): Rcvd NH update %pFX(%u)(%u) - metric %d/%d #nhops %d/%d flags %s", bnc->bgp->name_pretty, bnc->bgp->vrf_id, &nhr->prefix, bnc->ifindex, bnc->srte_color, nhr->metric, bnc->metric, nhr->nexthop_num, bnc->nexthop_num, @@ -849,7 +849,7 @@ void bgp_parse_nexthop_update(int command, vrf_id_t vrf_id) if (!bnc_nhc) { if (BGP_DEBUG(nht, NHT)) zlog_debug( - "parse nexthop update(%pFX(%u)(%s)): bnc info not found for nexthop cache", + "parse nexthop update %pFX(%u)(%s): bnc info not found for nexthop cache", &nhr.prefix, nhr.srte_color, bgp->name_pretty); } else bgp_process_nexthop_update(bnc_nhc, &nhr, false); @@ -860,7 +860,7 @@ void bgp_parse_nexthop_update(int command, vrf_id_t vrf_id) if (!bnc_import) { if (BGP_DEBUG(nht, NHT)) zlog_debug( - "parse nexthop update(%pFX(%u)(%s)): bnc info not found for import check", + "parse nexthop update %pFX(%u)(%s): bnc info not found for import check", &nhr.prefix, nhr.srte_color, bgp->name_pretty); } else bgp_process_nexthop_update(bnc_import, &nhr, true); diff --git a/bgpd/bgp_vty.c b/bgpd/bgp_vty.c index e9d5c5d3a8..6d7b745713 100644 --- a/bgpd/bgp_vty.c +++ b/bgpd/bgp_vty.c @@ -9219,6 +9219,8 @@ DEFPY(af_label_vpn_export_allocation_mode, bool old_per_nexthop, new_per_nexthop; afi = vpn_policy_getafi(vty, bgp, false); + if (afi == AFI_MAX) + return CMD_WARNING_CONFIG_FAILED; old_per_nexthop = !!CHECK_FLAG(bgp->vpn_policy[afi].flags, BGP_VPN_POLICY_TOVPN_LABEL_PER_NEXTHOP); @@ -20692,6 +20694,7 @@ DEFUN (community_list_standard, argv_find(argv, argc, "AA:NN", &idx); char *str = argv_concat(argv, argc, idx); + assert(str); int ret = community_list_set(bgp_clist, cl_name_or_number, str, seq, direct, style); @@ -20804,6 +20807,7 @@ DEFUN (community_list_expanded_all, argv_find(argv, argc, "AA:NN", &idx); char *str = argv_concat(argv, argc, idx); + assert(str); int ret = community_list_set(bgp_clist, cl_name_or_number, str, seq, direct, style); @@ -20888,16 +20892,13 @@ static const char *community_list_config_str(struct community_entry *entry) { const char *str; - if (entry->any) - str = ""; - else { - if (entry->style == COMMUNITY_LIST_STANDARD) - str = community_str(entry->u.com, false, false); - else if (entry->style == LARGE_COMMUNITY_LIST_STANDARD) - str = lcommunity_str(entry->u.lcom, false, false); - else - str = entry->config; - } + if (entry->style == COMMUNITY_LIST_STANDARD) + str = community_str(entry->u.com, false, false); + else if (entry->style == LARGE_COMMUNITY_LIST_STANDARD) + str = lcommunity_str(entry->u.lcom, false, false); + else + str = entry->config; + return str; } @@ -20920,13 +20921,8 @@ static void community_list_show(struct vty *vty, struct community_list *list) : "expanded", list->name); } - if (entry->any) - vty_out(vty, " %s\n", - community_direct_str(entry->direct)); - else - vty_out(vty, " %s %s\n", - community_direct_str(entry->direct), - community_list_config_str(entry)); + vty_out(vty, " %s %s\n", community_direct_str(entry->direct), + community_list_config_str(entry)); } } @@ -21285,13 +21281,8 @@ static void lcommunity_list_show(struct vty *vty, struct community_list *list) : "expanded", list->name); } - if (entry->any) - vty_out(vty, " %s\n", - community_direct_str(entry->direct)); - else - vty_out(vty, " %s %s\n", - community_direct_str(entry->direct), - community_list_config_str(entry)); + vty_out(vty, " %s %s\n", community_direct_str(entry->direct), + community_list_config_str(entry)); } } @@ -21587,13 +21578,8 @@ static void extcommunity_list_show(struct vty *vty, struct community_list *list) : "expanded", list->name); } - if (entry->any) - vty_out(vty, " %s\n", - community_direct_str(entry->direct)); - else - vty_out(vty, " %s %s\n", - community_direct_str(entry->direct), - community_list_config_str(entry)); + vty_out(vty, " %s %s\n", community_direct_str(entry->direct), + community_list_config_str(entry)); } } diff --git a/configure.ac b/configure.ac index 0120c517c6..47ee44a7df 100644 --- a/configure.ac +++ b/configure.ac @@ -7,7 +7,7 @@ ## AC_PREREQ([2.69]) -AC_INIT([frr], [9.0-dev], [https://github.com/frrouting/frr/issues]) +AC_INIT([frr], [9.1-dev], [https://github.com/frrouting/frr/issues]) PACKAGE_URL="https://frrouting.org/" AC_SUBST([PACKAGE_URL]) PACKAGE_FULLNAME="FRRouting" diff --git a/debian/changelog b/debian/changelog index 008a97c7d5..5c0429d69d 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,14 +1,14 @@ -frr (9.0~dev-1) UNRELEASED; urgency=medium +frr (9.1~dev-1) UNRELEASED; urgency=medium - * FRR Dev 9.0 + * FRR Dev 9.1 - -- Donatas Abraitis <donatas@opensourcerouting.org> Tue, 07 Feb 2023 16:00:00 +0500 + -- Jafar Al-Gharaibeh <jafar@atcorp.com> Tue, 06 Jun 2023 12:00:00 -0600 -frr (8.5-1) UNRELEASED; urgency=medium +frr (8.5-0) unstable; urgency=medium * New upstream release FRR 8.5 - -- Donatas Abraitis <donatas@opensourcerouting.org> Tue, 07 Feb 2023 16:00:00 +0500 + -- Jafar Al-Gharaibeh <jafar@atcorp.com> Fri, 10 Mar 2023 02:00:00 -0600 frr (8.4.2-1) unstable; urgency=medium diff --git a/doc/developer/workflow.rst b/doc/developer/workflow.rst index 28cf5d0ab1..65befaccba 100644 --- a/doc/developer/workflow.rst +++ b/doc/developer/workflow.rst @@ -166,15 +166,15 @@ as early as possible, i.e. the first 2-week window. For reference, the expected release schedule according to the above is: -+---------+------------+------------+------------+------------+------------+ -| Release | 2023-03-07 | 2023-07-04 | 2023-10-31 | 2024-02-27 | 2024-06-25 | -+---------+------------+------------+------------+------------+------------+ -| RC | 2023-02-21 | 2023-06-20 | 2023-10-17 | 2024-02-13 | 2024-06-11 | -+---------+------------+------------+------------+------------+------------+ -| dev/X.Y | 2023-02-07 | 2023-06-06 | 2023-10-03 | 2024-01-30 | 2024-05-28 | -+---------+------------+------------+------------+------------+------------+ -| freeze | 2023-01-24 | 2023-05-23 | 2023-09-19 | 2024-01-16 | 2024-05-14 | -+---------+------------+------------+------------+------------+------------+ ++---------+------------+------------+------------+ +| Release | 2023-07-04 | 2023-10-31 | 2024-02-27 | ++---------+------------+------------+------------+ +| RC | 2023-06-20 | 2023-10-17 | 2024-02-13 | ++---------+------------+------------+------------+ +| dev/X.Y | 2023-06-06 | 2023-10-03 | 2024-01-30 | ++---------+------------+------------+------------+ +| freeze | 2023-05-23 | 2023-09-19 | 2024-01-16 | ++---------+------------+------------+------------+ Here is the hint on how to get the dates easily: diff --git a/doc/user/bgp.rst b/doc/user/bgp.rst index 2c1d455535..a2585f3a57 100644 --- a/doc/user/bgp.rst +++ b/doc/user/bgp.rst @@ -455,7 +455,7 @@ Administrative Distance Metrics .. _bgp-requires-policy: Require policy on EBGP -------------------------------- +---------------------- .. clicmd:: bgp ebgp-requires-policy @@ -3888,6 +3888,12 @@ structure is extended with :clicmd:`show bgp [afi] [safi]`. Total number of neighbors 1 exit1# +If PfxRcd and/or PfxSnt is shown as ``(Policy)``, that means that the EBGP +default policy is turned on, but you don't have any filters applied for +incoming/outgoing directions. + +.. seealso:: :ref:`bgp-requires-policy` + .. clicmd:: show bgp [afi] [safi] [all] [wide|json] .. clicmd:: show bgp vrfs [<VRFNAME$vrf_name>] [json] diff --git a/eigrpd/eigrp_update.c b/eigrpd/eigrp_update.c index 2237a611e8..a056267bf7 100644 --- a/eigrpd/eigrp_update.c +++ b/eigrpd/eigrp_update.c @@ -842,9 +842,6 @@ static void eigrp_update_send_GR_part(struct eigrp_neighbor *nbr) eigrp_fsm_event(&fsm_msg); } - /* NULL the pointer */ - dest_addr = NULL; - /* delete processed prefix from list */ listnode_delete(prefixes, pe); diff --git a/isisd/isis_spf.c b/isisd/isis_spf.c index de467c8262..e114467e07 100644 --- a/isisd/isis_spf.c +++ b/isisd/isis_spf.c @@ -1843,6 +1843,9 @@ void isis_run_spf(struct isis_spftree *spftree) struct timeval time_end; struct isis_mt_router_info *mt_router_info; uint16_t mtid = 0; +#ifndef FABRICD + bool flex_algo_enabled; +#endif /* ifndef FABRICD */ /* Get time that can't roll backwards. */ monotime(&time_start); @@ -1885,16 +1888,27 @@ void isis_run_spf(struct isis_spftree *spftree) * not supported by the node, it MUST stop participating in such * Flexible-Algorithm. */ - if (flex_algo_id_valid(spftree->algorithm) && - !flex_algo_get_state(spftree->area->flex_algos, - spftree->algorithm)) { - if (!CHECK_FLAG(spftree->flags, F_SPFTREE_DISABLED)) { - isis_spftree_clear(spftree); - SET_FLAG(spftree->flags, F_SPFTREE_DISABLED); + if (flex_algo_id_valid(spftree->algorithm)) { + flex_algo_enabled = isis_flex_algo_elected_supported( + spftree->algorithm, spftree->area); + if (flex_algo_enabled != + flex_algo_get_state(spftree->area->flex_algos, + spftree->algorithm)) { + /* actual state is inconsistent with local LSP */ lsp_regenerate_schedule(spftree->area, spftree->area->is_type, 0); + goto out; + } + if (!flex_algo_enabled) { + if (!CHECK_FLAG(spftree->flags, F_SPFTREE_DISABLED)) { + isis_spftree_clear(spftree); + SET_FLAG(spftree->flags, F_SPFTREE_DISABLED); + lsp_regenerate_schedule(spftree->area, + spftree->area->is_type, + 0); + } + goto out; } - goto out; } #endif /* ifndef FABRICD */ diff --git a/ldpd/init.c b/ldpd/init.c index 15d653b747..f0cb98e5c0 100644 --- a/ldpd/init.c +++ b/ldpd/init.c @@ -31,13 +31,13 @@ send_init(struct nbr *nbr) if ((buf = ibuf_open(size)) == NULL) fatal(__func__); - err |= gen_ldp_hdr(buf, size); + SET_FLAG(err, gen_ldp_hdr(buf, size)); size -= LDP_HDR_SIZE; - err |= gen_msg_hdr(buf, MSG_TYPE_INIT, size); - err |= gen_init_prms_tlv(buf, nbr); - err |= gen_cap_dynamic_tlv(buf); - err |= gen_cap_twcard_tlv(buf, 1); - err |= gen_cap_unotif_tlv(buf, 1); + SET_FLAG(err, gen_msg_hdr(buf, MSG_TYPE_INIT, size)); + SET_FLAG(err, gen_init_prms_tlv(buf, nbr)); + SET_FLAG(err, gen_cap_dynamic_tlv(buf)); + SET_FLAG(err, gen_cap_twcard_tlv(buf, 1)); + SET_FLAG(err, gen_cap_unotif_tlv(buf, 1)); if (err) { ibuf_free(buf); return; @@ -121,62 +121,56 @@ recv_init(struct nbr *nbr, char *buf, uint16_t len) return (-1); case TLV_TYPE_DYNAMIC_CAP: if (tlv_len != CAP_TLV_DYNAMIC_LEN) { - session_shutdown(nbr, S_BAD_TLV_LEN, msg.id, - msg.type); + session_shutdown(nbr, S_BAD_TLV_LEN, msg.id, msg.type); return (-1); } - if (caps_rcvd & F_CAP_TLV_RCVD_DYNAMIC) { - session_shutdown(nbr, S_BAD_TLV_VAL, msg.id, - msg.type); + if (CHECK_FLAG(caps_rcvd, F_CAP_TLV_RCVD_DYNAMIC)) { + session_shutdown(nbr, S_BAD_TLV_VAL, msg.id, msg.type); return (-1); } - caps_rcvd |= F_CAP_TLV_RCVD_DYNAMIC; + SET_FLAG(caps_rcvd, F_CAP_TLV_RCVD_DYNAMIC); - nbr->flags |= F_NBR_CAP_DYNAMIC; + SET_FLAG(nbr->flags, F_NBR_CAP_DYNAMIC); log_debug("%s: lsr-id %pI4 announced the Dynamic Capability Announcement capability", __func__, &nbr->id); break; case TLV_TYPE_TWCARD_CAP: if (tlv_len != CAP_TLV_TWCARD_LEN) { - session_shutdown(nbr, S_BAD_TLV_LEN, msg.id, - msg.type); + session_shutdown(nbr, S_BAD_TLV_LEN, msg.id, msg.type); return (-1); } - if (caps_rcvd & F_CAP_TLV_RCVD_TWCARD) { - session_shutdown(nbr, S_BAD_TLV_VAL, msg.id, - msg.type); + if (CHECK_FLAG(caps_rcvd, F_CAP_TLV_RCVD_TWCARD)) { + session_shutdown(nbr, S_BAD_TLV_VAL, msg.id, msg.type); return (-1); } - caps_rcvd |= F_CAP_TLV_RCVD_TWCARD; + SET_FLAG(caps_rcvd, F_CAP_TLV_RCVD_TWCARD); - nbr->flags |= F_NBR_CAP_TWCARD; + SET_FLAG(nbr->flags, F_NBR_CAP_TWCARD); log_debug("%s: lsr-id %pI4 announced the Typed Wildcard FEC capability", __func__, &nbr->id); break; case TLV_TYPE_UNOTIF_CAP: if (tlv_len != CAP_TLV_UNOTIF_LEN) { - session_shutdown(nbr, S_BAD_TLV_LEN, msg.id, - msg.type); + session_shutdown(nbr, S_BAD_TLV_LEN, msg.id, msg.type); return (-1); } - if (caps_rcvd & F_CAP_TLV_RCVD_UNOTIF) { - session_shutdown(nbr, S_BAD_TLV_VAL, msg.id, - msg.type); + if (CHECK_FLAG(caps_rcvd, F_CAP_TLV_RCVD_UNOTIF)) { + session_shutdown(nbr, S_BAD_TLV_VAL, msg.id, msg.type); return (-1); } - caps_rcvd |= F_CAP_TLV_RCVD_UNOTIF; + SET_FLAG(caps_rcvd, F_CAP_TLV_RCVD_UNOTIF); - nbr->flags |= F_NBR_CAP_UNOTIF; + SET_FLAG(nbr->flags, F_NBR_CAP_UNOTIF); log_debug("%s: lsr-id %pI4 announced the Unrecognized Notification capability", __func__, &nbr->id); break; default: - if (!(ntohs(tlv.type) & UNKNOWN_FLAG)) + if (!CHECK_FLAG(ntohs(tlv.type), UNKNOWN_FLAG)) send_notification_rtlvs(nbr, S_UNSSUPORTDCAP, msg.id, msg.type, tlv_type, tlv_len, buf); /* ignore unknown tlv */ @@ -217,16 +211,16 @@ send_capability(struct nbr *nbr, uint16_t capability, int enable) if ((buf = ibuf_open(size)) == NULL) fatal(__func__); - err |= gen_ldp_hdr(buf, size); + SET_FLAG(err, gen_ldp_hdr(buf, size)); size -= LDP_HDR_SIZE; - err |= gen_msg_hdr(buf, MSG_TYPE_CAPABILITY, size); + SET_FLAG(err, gen_msg_hdr(buf, MSG_TYPE_CAPABILITY, size)); switch (capability) { case TLV_TYPE_TWCARD_CAP: - err |= gen_cap_twcard_tlv(buf, enable); + SET_FLAG(err, gen_cap_twcard_tlv(buf, enable)); break; case TLV_TYPE_UNOTIF_CAP: - err |= gen_cap_unotif_tlv(buf, enable); + SET_FLAG(err, gen_cap_unotif_tlv(buf, enable)); break; case TLV_TYPE_DYNAMIC_CAP: /* @@ -288,52 +282,47 @@ recv_capability(struct nbr *nbr, char *buf, uint16_t len) switch (tlv_type) { case TLV_TYPE_TWCARD_CAP: if (tlv_len != CAP_TLV_TWCARD_LEN) { - session_shutdown(nbr, S_BAD_TLV_LEN, msg.id, - msg.type); + session_shutdown(nbr, S_BAD_TLV_LEN, msg.id, msg.type); return (-1); } - if (caps_rcvd & F_CAP_TLV_RCVD_TWCARD) { - session_shutdown(nbr, S_BAD_TLV_VAL, msg.id, - msg.type); + if (CHECK_FLAG(caps_rcvd, F_CAP_TLV_RCVD_TWCARD)) { + session_shutdown(nbr, S_BAD_TLV_VAL, msg.id, msg.type); return (-1); } - caps_rcvd |= F_CAP_TLV_RCVD_TWCARD; + SET_FLAG(caps_rcvd, F_CAP_TLV_RCVD_TWCARD); memcpy(&reserved, buf, sizeof(reserved)); enable = reserved & STATE_BIT; if (enable) - nbr->flags |= F_NBR_CAP_TWCARD; + SET_FLAG(nbr->flags, F_NBR_CAP_TWCARD); else - nbr->flags &= ~F_NBR_CAP_TWCARD; + UNSET_FLAG(nbr->flags, F_NBR_CAP_TWCARD); log_debug("%s: lsr-id %pI4 %s the Typed Wildcard FEC capability", __func__, &nbr->id, (enable) ? "announced" : "withdrew"); break; case TLV_TYPE_UNOTIF_CAP: if (tlv_len != CAP_TLV_UNOTIF_LEN) { - session_shutdown(nbr, S_BAD_TLV_LEN, msg.id, - msg.type); + session_shutdown(nbr, S_BAD_TLV_LEN, msg.id, msg.type); return (-1); } - if (caps_rcvd & F_CAP_TLV_RCVD_UNOTIF) { - session_shutdown(nbr, S_BAD_TLV_VAL, msg.id, - msg.type); + if (CHECK_FLAG(caps_rcvd, F_CAP_TLV_RCVD_UNOTIF)) { + session_shutdown(nbr, S_BAD_TLV_VAL, msg.id, msg.type); return (-1); } - caps_rcvd |= F_CAP_TLV_RCVD_UNOTIF; + SET_FLAG(caps_rcvd, F_CAP_TLV_RCVD_UNOTIF); memcpy(&reserved, buf, sizeof(reserved)); enable = reserved & STATE_BIT; if (enable) - nbr->flags |= F_NBR_CAP_UNOTIF; + SET_FLAG(nbr->flags, F_NBR_CAP_UNOTIF); else - nbr->flags &= ~F_NBR_CAP_UNOTIF; + UNSET_FLAG(nbr->flags, F_NBR_CAP_UNOTIF); log_debug("%s: lsr-id %pI4 %s the Unrecognized Notification capability", __func__, - &nbr->id, (enable) ? "announced" : - "withdrew"); + &nbr->id, (enable) ? "announced" : "withdrew"); break; case TLV_TYPE_DYNAMIC_CAP: /* @@ -346,7 +335,7 @@ recv_capability(struct nbr *nbr, char *buf, uint16_t len) */ /* FALLTHROUGH */ default: - if (!(ntohs(tlv.type) & UNKNOWN_FLAG)) + if (!CHECK_FLAG(ntohs(tlv.type), UNKNOWN_FLAG)) send_notification_rtlvs(nbr, S_UNSSUPORTDCAP, msg.id, msg.type, tlv_type, tlv_len, buf); /* ignore unknown tlv */ diff --git a/ldpd/l2vpn.c b/ldpd/l2vpn.c index 4664b1f894..ce038acdcb 100644 --- a/ldpd/l2vpn.c +++ b/ldpd/l2vpn.c @@ -161,7 +161,7 @@ l2vpn_if_update(struct l2vpn_if *lif) fec.type = MAP_TYPE_PWID; fec.fec.pwid.type = l2vpn->pw_type; fec.fec.pwid.group_id = 0; - fec.flags |= F_MAP_PW_ID; + SET_FLAG(fec.flags, F_MAP_PW_ID); fec.fec.pwid.pwid = pw->pwid; send_mac_withdrawal(nbr, &fec, lif->mac); @@ -274,17 +274,17 @@ l2vpn_pw_reset(struct l2vpn_pw *pw) pw->local_status = PW_FORWARDING; pw->remote_status = PW_NOT_FORWARDING; - if (pw->flags & F_PW_CWORD_CONF) - pw->flags |= F_PW_CWORD; + if (CHECK_FLAG(pw->flags, F_PW_CWORD_CONF)) + SET_FLAG(pw->flags, F_PW_CWORD); else - pw->flags &= ~F_PW_CWORD; + UNSET_FLAG(pw->flags, F_PW_CWORD); - if (pw->flags & F_PW_STATUSTLV_CONF) - pw->flags |= F_PW_STATUSTLV; + if (CHECK_FLAG(pw->flags, F_PW_STATUSTLV_CONF)) + SET_FLAG(pw->flags, F_PW_STATUSTLV); else - pw->flags &= ~F_PW_STATUSTLV; + UNSET_FLAG(pw->flags, F_PW_STATUSTLV); - if (pw->flags & F_PW_STATUSTLV_CONF) { + if (CHECK_FLAG(pw->flags, F_PW_STATUSTLV_CONF)) { struct fec_node *fn; struct fec fec; l2vpn_pw_fec(pw, &fec); @@ -300,8 +300,7 @@ l2vpn_pw_ok(struct l2vpn_pw *pw, struct fec_nh *fnh) { /* check for a remote label */ if (fnh->remote_label == NO_LABEL) { - log_warnx("%s: pseudowire %s: no remote label", __func__, - pw->ifname); + log_warnx("%s: pseudowire %s: no remote label", __func__, pw->ifname); pw->reason = F_PW_NO_REMOTE_LABEL; return (0); } @@ -315,10 +314,9 @@ l2vpn_pw_ok(struct l2vpn_pw *pw, struct fec_nh *fnh) } /* check pw status if applicable */ - if ((pw->flags & F_PW_STATUSTLV) && + if (CHECK_FLAG(pw->flags, F_PW_STATUSTLV) && pw->remote_status != PW_FORWARDING) { - log_warnx("%s: pseudowire %s: remote end is down", __func__, - pw->ifname); + log_warnx("%s: pseudowire %s: remote end is down", __func__, pw->ifname); pw->reason = F_PW_REMOTE_NOT_FWD; return (0); } @@ -345,34 +343,34 @@ l2vpn_pw_negotiate(struct lde_nbr *ln, struct fec_node *fn, struct map *map) /* RFC4447 - Section 6.2: control word negotiation */ if (fec_find(&ln->sent_map, &fn->fec)) { - if ((map->flags & F_MAP_PW_CWORD) && - !(pw->flags & F_PW_CWORD_CONF)) { + if (CHECK_FLAG(map->flags, F_MAP_PW_CWORD) && + !CHECK_FLAG(pw->flags, F_PW_CWORD_CONF)) { /* ignore the received label mapping */ return (1); - } else if (!(map->flags & F_MAP_PW_CWORD) && - (pw->flags & F_PW_CWORD_CONF)) { + } else if (!CHECK_FLAG(map->flags, F_MAP_PW_CWORD) && + CHECK_FLAG(pw->flags, F_PW_CWORD_CONF)) { /* append a "Wrong C-bit" status code */ st.status_code = S_WRONG_CBIT; st.msg_id = map->msg_id; st.msg_type = htons(MSG_TYPE_LABELMAPPING); lde_send_labelwithdraw(ln, fn, NULL, &st); - pw->flags &= ~F_PW_CWORD; + UNSET_FLAG(pw->flags, F_PW_CWORD); lde_send_labelmapping(ln, fn, 1); } - } else if (map->flags & F_MAP_PW_CWORD) { - if (pw->flags & F_PW_CWORD_CONF) - pw->flags |= F_PW_CWORD; + } else if (CHECK_FLAG(map->flags, F_MAP_PW_CWORD)) { + if (CHECK_FLAG(pw->flags, F_PW_CWORD_CONF)) + SET_FLAG(pw->flags, F_PW_CWORD); else /* act as if no label mapping had been received */ return (1); } else - pw->flags &= ~F_PW_CWORD; + UNSET_FLAG(pw->flags, F_PW_CWORD); /* RFC4447 - Section 5.4.3: pseudowire status negotiation */ if (fec_find(&ln->recv_map, &fn->fec) == NULL && - !(map->flags & F_MAP_PW_STATUS)) - pw->flags &= ~F_PW_STATUSTLV; + !CHECK_FLAG(map->flags, F_MAP_PW_STATUS)) + UNSET_FLAG(pw->flags, F_PW_STATUSTLV); return (0); } @@ -385,12 +383,11 @@ l2vpn_send_pw_status(struct lde_nbr *ln, uint32_t status, struct fec *fec) memset(&nm, 0, sizeof(nm)); nm.status_code = S_PW_STATUS; nm.pw_status = status; - nm.flags |= F_NOTIF_PW_STATUS; + SET_FLAG(nm.flags, F_NOTIF_PW_STATUS); lde_fec2map(fec, &nm.fec); - nm.flags |= F_NOTIF_FEC; + SET_FLAG(nm.flags, F_NOTIF_FEC); - lde_imsg_compose_ldpe(IMSG_NOTIFICATION_SEND, ln->peerid, 0, &nm, - sizeof(nm)); + lde_imsg_compose_ldpe(IMSG_NOTIFICATION_SEND, ln->peerid, 0, &nm, sizeof(nm)); } void @@ -402,14 +399,13 @@ l2vpn_send_pw_status_wcard(struct lde_nbr *ln, uint32_t status, memset(&nm, 0, sizeof(nm)); nm.status_code = S_PW_STATUS; nm.pw_status = status; - nm.flags |= F_NOTIF_PW_STATUS; + SET_FLAG(nm.flags, F_NOTIF_PW_STATUS); nm.fec.type = MAP_TYPE_PWID; nm.fec.fec.pwid.type = pw_type; nm.fec.fec.pwid.group_id = group_id; - nm.flags |= F_NOTIF_FEC; + SET_FLAG(nm.flags, F_NOTIF_FEC); - lde_imsg_compose_ldpe(IMSG_NOTIFICATION_SEND, ln->peerid, 0, &nm, - sizeof(nm)); + lde_imsg_compose_ldpe(IMSG_NOTIFICATION_SEND, ln->peerid, 0, &nm, sizeof(nm)); } void @@ -421,7 +417,7 @@ l2vpn_recv_pw_status(struct lde_nbr *ln, struct notify_msg *nm) struct l2vpn_pw *pw; if (nm->fec.type == MAP_TYPE_TYPED_WCARD || - !(nm->fec.flags & F_MAP_PW_ID)) { + !CHECK_FLAG(nm->fec.flags, F_MAP_PW_ID)) { l2vpn_recv_pw_status_wcard(ln, nm); return; } @@ -540,7 +536,7 @@ l2vpn_pw_status_update(struct zapi_pw_status *zpw) if (ln == NULL) return (0); l2vpn_pw_fec(pw, &fec); - if (pw->flags & F_PW_STATUSTLV) + if (CHECK_FLAG(pw->flags, F_PW_STATUSTLV)) l2vpn_send_pw_status(ln, local_status, &fec); else { struct fec_node *fn; @@ -611,8 +607,7 @@ l2vpn_binding_ctl(pid_t pid) pwctl.local_label = fn->local_label; pwctl.local_gid = 0; pwctl.local_ifmtu = pw->l2vpn->mtu; - pwctl.local_cword = (pw->flags & F_PW_CWORD_CONF) ? - 1 : 0; + pwctl.local_cword = CHECK_FLAG(pw->flags, F_PW_CWORD_CONF) ? 1 : 0; pwctl.reason = pw->reason; } else pwctl.local_label = NO_LABEL; @@ -624,11 +619,10 @@ l2vpn_binding_ctl(pid_t pid) if (me) { pwctl.remote_label = me->map.label; pwctl.remote_gid = me->map.fec.pwid.group_id; - if (me->map.flags & F_MAP_PW_IFMTU) + if (CHECK_FLAG(me->map.flags, F_MAP_PW_IFMTU)) pwctl.remote_ifmtu = me->map.fec.pwid.ifmtu; if (pw) - pwctl.remote_cword = (pw->flags & F_PW_CWORD) ? - 1 : 0; + pwctl.remote_cword = CHECK_FLAG(pw->flags, F_PW_CWORD) ? 1 : 0; lde_imsg_compose_ldpe(IMSG_CTL_SHOW_L2VPN_BINDING, 0, pid, &pwctl, sizeof(pwctl)); diff --git a/ldpd/ldp_zebra.c b/ldpd/ldp_zebra.c index e3ace30582..2010829035 100644 --- a/ldpd/ldp_zebra.c +++ b/ldpd/ldp_zebra.c @@ -22,8 +22,7 @@ #include "ldp_debug.h" static void ifp2kif(struct interface *, struct kif *); -static void ifc2kaddr(struct interface *, struct connected *, - struct kaddr *); +static void ifc2kaddr(struct interface *, struct connected *, struct kaddr *); static int ldp_zebra_send_mpls_labels(int, struct kroute *); static int ldp_router_id_update(ZAPI_CALLBACK_ARGS); static int ldp_interface_address_add(ZAPI_CALLBACK_ARGS); @@ -295,8 +294,7 @@ kmpw_add(struct zapi_pw *zpw) debug_zebra_out("pseudowire %s nexthop %s (add)", zpw->ifname, log_addr(zpw->af, (union ldpd_addr *)&zpw->nexthop)); - return zebra_send_pw(zclient, ZEBRA_PW_ADD, zpw) - == ZCLIENT_SEND_FAILURE; + return zebra_send_pw(zclient, ZEBRA_PW_ADD, zpw) == ZCLIENT_SEND_FAILURE; } int @@ -305,8 +303,7 @@ kmpw_del(struct zapi_pw *zpw) debug_zebra_out("pseudowire %s nexthop %s (del)", zpw->ifname, log_addr(zpw->af, (union ldpd_addr *)&zpw->nexthop)); - return zebra_send_pw(zclient, ZEBRA_PW_DELETE, zpw) - == ZCLIENT_SEND_FAILURE; + return zebra_send_pw(zclient, ZEBRA_PW_DELETE, zpw) == ZCLIENT_SEND_FAILURE; } int @@ -316,8 +313,7 @@ kmpw_set(struct zapi_pw *zpw) zpw->ifname, log_addr(zpw->af, (union ldpd_addr *)&zpw->nexthop), zpw->local_label, zpw->remote_label); - return zebra_send_pw(zclient, ZEBRA_PW_SET, zpw) - == ZCLIENT_SEND_FAILURE; + return zebra_send_pw(zclient, ZEBRA_PW_SET, zpw) == ZCLIENT_SEND_FAILURE; } int @@ -326,8 +322,7 @@ kmpw_unset(struct zapi_pw *zpw) debug_zebra_out("pseudowire %s nexthop %s (unset)", zpw->ifname, log_addr(zpw->af, (union ldpd_addr *)&zpw->nexthop)); - return zebra_send_pw(zclient, ZEBRA_PW_UNSET, zpw) - == ZCLIENT_SEND_FAILURE; + return zebra_send_pw(zclient, ZEBRA_PW_UNSET, zpw) == ZCLIENT_SEND_FAILURE; } void diff --git a/ldpd/notification.c b/ldpd/notification.c index af5bb267d7..1709098d09 100644 --- a/ldpd/notification.c +++ b/ldpd/notification.c @@ -25,28 +25,28 @@ send_notification_full(struct tcp_conn *tcp, struct notify_msg *nm) /* calculate size */ size = LDP_HDR_SIZE + LDP_MSG_SIZE + STATUS_SIZE; - if (nm->flags & F_NOTIF_PW_STATUS) + if (CHECK_FLAG(nm->flags, F_NOTIF_PW_STATUS)) size += PW_STATUS_TLV_SIZE; - if (nm->flags & F_NOTIF_FEC) + if (CHECK_FLAG(nm->flags, F_NOTIF_FEC)) size += len_fec_tlv(&nm->fec); - if (nm->flags & F_NOTIF_RETURNED_TLVS) + if (CHECK_FLAG(nm->flags, F_NOTIF_RETURNED_TLVS)) size += TLV_HDR_SIZE * 2 + nm->rtlvs.length; if ((buf = ibuf_open(size)) == NULL) fatal(__func__); - err |= gen_ldp_hdr(buf, size); + SET_FLAG(err, gen_ldp_hdr(buf, size)); size -= LDP_HDR_SIZE; - err |= gen_msg_hdr(buf, MSG_TYPE_NOTIFICATION, size); - err |= gen_status_tlv(buf, nm->status_code, nm->msg_id, nm->msg_type); + SET_FLAG(err, gen_msg_hdr(buf, MSG_TYPE_NOTIFICATION, size)); + SET_FLAG(err, gen_status_tlv(buf, nm->status_code, nm->msg_id, nm->msg_type)); /* optional tlvs */ - if (nm->flags & F_NOTIF_PW_STATUS) - err |= gen_pw_status_tlv(buf, nm->pw_status); - if (nm->flags & F_NOTIF_FEC) - err |= gen_fec_tlv(buf, &nm->fec); - if (nm->flags & F_NOTIF_RETURNED_TLVS) - err |= gen_returned_tlvs(buf, nm->rtlvs.type, nm->rtlvs.length, - nm->rtlvs.data); + if (CHECK_FLAG(nm->flags, F_NOTIF_PW_STATUS)) + SET_FLAG(err, gen_pw_status_tlv(buf, nm->pw_status)); + if (CHECK_FLAG(nm->flags, F_NOTIF_FEC)) + SET_FLAG(err, gen_fec_tlv(buf, &nm->fec)); + if (CHECK_FLAG(nm->flags, F_NOTIF_RETURNED_TLVS)) + SET_FLAG(err, gen_returned_tlvs(buf, nm->rtlvs.type, nm->rtlvs.length, + nm->rtlvs.data)); if (err) { ibuf_free(buf); return; @@ -121,7 +121,7 @@ send_notification_rtlvs(struct nbr *nbr, uint32_t status_code, uint32_t msg_id, nm.rtlvs.type = tlv_type; nm.rtlvs.length = tlv_len; nm.rtlvs.data = tlv_data; - nm.flags |= F_NOTIF_RETURNED_TLVS; + SET_FLAG(nm.flags, F_NOTIF_RETURNED_TLVS); } send_notification_full(nbr->tcp, &nm); @@ -189,13 +189,12 @@ recv_notification(struct nbr *nbr, char *buf, uint16_t len) break; case TLV_TYPE_PW_STATUS: if (tlv_len != 4) { - session_shutdown(nbr, S_BAD_TLV_LEN, - msg.id, msg.type); + session_shutdown(nbr, S_BAD_TLV_LEN, msg.id, msg.type); return (-1); } nm.pw_status = ntohl(*(uint32_t *)buf); - nm.flags |= F_NOTIF_PW_STATUS; + SET_FLAG(nm.flags, F_NOTIF_PW_STATUS); break; case TLV_TYPE_FEC: if ((tlen = tlv_decode_fec_elm(nbr, &msg, buf, @@ -203,12 +202,11 @@ recv_notification(struct nbr *nbr, char *buf, uint16_t len) return (-1); /* allow only one fec element */ if (tlen != tlv_len) { - session_shutdown(nbr, S_BAD_TLV_VAL, - msg.id, msg.type); + session_shutdown(nbr, S_BAD_TLV_VAL, msg.id, msg.type); leconf->stats.bad_tlv_len++; return (-1); } - nm.flags |= F_NOTIF_FEC; + SET_FLAG(nm.flags, F_NOTIF_FEC); break; default: if (!(ntohs(tlv.type) & UNKNOWN_FLAG)) { @@ -226,9 +224,8 @@ recv_notification(struct nbr *nbr, char *buf, uint16_t len) /* sanity checks */ switch (nm.status_code) { case S_PW_STATUS: - if (!(nm.flags & (F_NOTIF_PW_STATUS|F_NOTIF_FEC))) { - send_notification(nbr->tcp, S_MISS_MSG, - msg.id, msg.type); + if (!CHECK_FLAG(nm.flags, (F_NOTIF_PW_STATUS|F_NOTIF_FEC))) { + send_notification(nbr->tcp, S_MISS_MSG, msg.id, msg.type); return (-1); } @@ -236,20 +233,17 @@ recv_notification(struct nbr *nbr, char *buf, uint16_t len) case MAP_TYPE_PWID: break; default: - send_notification(nbr->tcp, S_BAD_TLV_VAL, - msg.id, msg.type); + send_notification(nbr->tcp, S_BAD_TLV_VAL, msg.id, msg.type); return (-1); } break; case S_ENDOFLIB: - if (!(nm.flags & F_NOTIF_FEC)) { - send_notification(nbr->tcp, S_MISS_MSG, - msg.id, msg.type); + if (!CHECK_FLAG(nm.flags, F_NOTIF_FEC)) { + send_notification(nbr->tcp, S_MISS_MSG, msg.id, msg.type); return (-1); } if (nm.fec.type != MAP_TYPE_TYPED_WCARD) { - send_notification(nbr->tcp, S_BAD_TLV_VAL, - msg.id, msg.type); + send_notification(nbr->tcp, S_BAD_TLV_VAL, msg.id, msg.type); return (-1); } break; @@ -259,7 +253,7 @@ recv_notification(struct nbr *nbr, char *buf, uint16_t len) log_msg_notification(0, nbr, &nm); - if (st.status_code & htonl(STATUS_FATAL)) { + if (CHECK_FLAG(st.status_code, htonl(STATUS_FATAL))) { if (nbr->state == NBR_STA_OPENSENT) nbr_start_idtimer(nbr); @@ -269,11 +263,9 @@ recv_notification(struct nbr *nbr, char *buf, uint16_t len) * initialization, it SHOULD transmit a Shutdown message and * then close the transport connection". */ - if (nbr->state != NBR_STA_OPER && - nm.status_code == S_SHUTDOWN) { + if (nbr->state != NBR_STA_OPER && nm.status_code == S_SHUTDOWN) { leconf->stats.session_attempts++; - send_notification(nbr->tcp, S_SHUTDOWN, - msg.id, msg.type); + send_notification(nbr->tcp, S_SHUTDOWN, msg.id, msg.type); } leconf->stats.shutdown_rcv_notify++; @@ -287,8 +279,7 @@ recv_notification(struct nbr *nbr, char *buf, uint16_t len) switch (nm.status_code) { case S_PW_STATUS: case S_ENDOFLIB: - ldpe_imsg_compose_lde(IMSG_NOTIFICATION, nbr->peerid, 0, - &nm, sizeof(nm)); + ldpe_imsg_compose_lde(IMSG_NOTIFICATION, nbr->peerid, 0, &nm, sizeof(nm)); break; case S_NO_HELLO: leconf->stats.session_rejects_hello++; @@ -361,8 +352,8 @@ gen_returned_tlvs(struct ibuf *buf, uint16_t type, uint16_t length, tlv.length = htons(length); err = ibuf_add(buf, &rtlvs, sizeof(rtlvs)); - err |= ibuf_add(buf, &tlv, sizeof(tlv)); - err |= ibuf_add(buf, tlv_data, length); + SET_FLAG(err, ibuf_add(buf, &tlv, sizeof(tlv))); + SET_FLAG(err, ibuf_add(buf, tlv_data, length)); return (err); } @@ -378,9 +369,9 @@ log_msg_notification(int out, struct nbr *nbr, struct notify_msg *nm) debug_msg(out, "notification: lsr-id %pI4, status %s", &nbr->id, status_code_name(nm->status_code)); - if (nm->flags & F_NOTIF_FEC) + if (CHECK_FLAG(nm->flags, F_NOTIF_FEC)) debug_msg(out, "notification: fec %s", log_map(&nm->fec)); - if (nm->flags & F_NOTIF_PW_STATUS) + if (CHECK_FLAG(nm->flags, F_NOTIF_PW_STATUS)) debug_msg(out, "notification: pw-status %s", (nm->pw_status == PW_FORWARDING) ? "forwarding" : "not forwarding"); } diff --git a/ldpd/socket.c b/ldpd/socket.c index ec6d8be3d5..6b7e475d7f 100644 --- a/ldpd/socket.c +++ b/ldpd/socket.c @@ -89,8 +89,7 @@ ldp_create_socket(int af, enum socket_type type) return (-1); } if (type == LDP_SOCKET_DISC) { - if (sock_set_ipv4_mcast_ttl(fd, - IP_DEFAULT_MULTICAST_TTL) == -1) { + if (sock_set_ipv4_mcast_ttl(fd, IP_DEFAULT_MULTICAST_TTL) == -1) { close(fd); return (-1); } @@ -141,7 +140,7 @@ ldp_create_socket(int af, enum socket_type type) close(fd); return (-1); } - if (!(ldpd_conf->ipv6.flags & F_LDPD_AF_NO_GTSM)) { + if (!CHECK_FLAG(ldpd_conf->ipv6.flags, F_LDPD_AF_NO_GTSM)) { /* ignore any possible error */ sock_set_ipv6_minhopcount(fd, 255); } @@ -171,8 +170,7 @@ ldp_create_socket(int af, enum socket_type type) #ifdef __OpenBSD__ opt = 1; - if (setsockopt(fd, IPPROTO_TCP, TCP_MD5SIG, &opt, - sizeof(opt)) == -1) { + if (setsockopt(fd, IPPROTO_TCP, TCP_MD5SIG, &opt, sizeof(opt)) == -1) { if (errno == ENOPROTOOPT) { /* system w/o md5sig */ log_warnx("md5sig not available, disabling"); sysdep.no_md5sig = 1; @@ -196,7 +194,7 @@ sock_set_nonblock(int fd) if ((flags = fcntl(fd, F_GETFL, 0)) == -1) fatal("fcntl F_GETFL"); - flags |= O_NONBLOCK; + SET_FLAG(flags, O_NONBLOCK); if (fcntl(fd, F_SETFL, flags) == -1) fatal("fcntl F_SETFL"); @@ -210,7 +208,7 @@ sock_set_cloexec(int fd) if ((flags = fcntl(fd, F_GETFD, 0)) == -1) fatal("fcntl F_GETFD"); - flags |= FD_CLOEXEC; + SET_FLAG(flags, FD_CLOEXEC); if (fcntl(fd, F_SETFD, flags) == -1) fatal("fcntl F_SETFD"); @@ -222,16 +220,14 @@ sock_set_recvbuf(int fd) int bsize; bsize = 65535; - while (setsockopt(fd, SOL_SOCKET, SO_RCVBUF, &bsize, - sizeof(bsize)) == -1) + while (setsockopt(fd, SOL_SOCKET, SO_RCVBUF, &bsize, sizeof(bsize)) == -1) bsize /= 2; } int sock_set_reuse(int fd, int enable) { - if (setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &enable, - sizeof(int)) < 0) { + if (setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &enable, sizeof(int)) < 0) { log_warn("%s: error setting SO_REUSEADDR", __func__); return (-1); } @@ -244,8 +240,7 @@ sock_set_bindany(int fd, int enable) { #ifdef HAVE_SO_BINDANY frr_with_privs(&ldpd_privs) { - if (setsockopt(fd, SOL_SOCKET, SO_BINDANY, &enable, - sizeof(int)) < 0) { + if (setsockopt(fd, SOL_SOCKET, SO_BINDANY, &enable, sizeof(int)) < 0) { log_warn("%s: error setting SO_BINDANY", __func__); return (-1); } @@ -259,8 +254,7 @@ sock_set_bindany(int fd, int enable) return (0); #elif defined(IP_BINDANY) frr_with_privs(&ldpd_privs) { - if (setsockopt(fd, IPPROTO_IP, IP_BINDANY, &enable, sizeof(int)) - < 0) { + if (setsockopt(fd, IPPROTO_IP, IP_BINDANY, &enable, sizeof(int)) < 0) { log_warn("%s: error setting IP_BINDANY", __func__); return (-1); } @@ -343,10 +337,8 @@ sock_set_ipv4_ucast_ttl(int fd, int ttl) int sock_set_ipv4_mcast_ttl(int fd, uint8_t ttl) { - if (setsockopt(fd, IPPROTO_IP, IP_MULTICAST_TTL, - (char *)&ttl, sizeof(ttl)) < 0) { - log_warn("%s: error setting IP_MULTICAST_TTL to %d", - __func__, ttl); + if (setsockopt(fd, IPPROTO_IP, IP_MULTICAST_TTL, (char *)&ttl, sizeof(ttl)) < 0) { + log_warn("%s: error setting IP_MULTICAST_TTL to %d", __func__, ttl); return (-1); } @@ -358,8 +350,7 @@ sock_set_ipv4_mcast_ttl(int fd, uint8_t ttl) int sock_set_ipv4_pktinfo(int fd, int enable) { - if (setsockopt(fd, IPPROTO_IP, IP_PKTINFO, &enable, - sizeof(enable)) < 0) { + if (setsockopt(fd, IPPROTO_IP, IP_PKTINFO, &enable, sizeof(enable)) < 0) { log_warn("%s: error setting IP_PKTINFO", __func__); return (-1); } @@ -370,8 +361,7 @@ sock_set_ipv4_pktinfo(int fd, int enable) int sock_set_ipv4_recvdstaddr(int fd, int enable) { - if (setsockopt(fd, IPPROTO_IP, IP_RECVDSTADDR, &enable, - sizeof(enable)) < 0) { + if (setsockopt(fd, IPPROTO_IP, IP_RECVDSTADDR, &enable, sizeof(enable)) < 0) { log_warn("%s: error setting IP_RECVDSTADDR", __func__); return (-1); } @@ -409,8 +399,7 @@ sock_set_ipv4_mcast_loop(int fd) int sock_set_ipv6_dscp(int fd, int dscp) { - if (setsockopt(fd, IPPROTO_IPV6, IPV6_TCLASS, &dscp, - sizeof(dscp)) < 0) { + if (setsockopt(fd, IPPROTO_IPV6, IPV6_TCLASS, &dscp, sizeof(dscp)) < 0) { log_warn("%s: error setting IPV6_TCLASS", __func__); return (-1); } @@ -421,8 +410,7 @@ sock_set_ipv6_dscp(int fd, int dscp) int sock_set_ipv6_pktinfo(int fd, int enable) { - if (setsockopt(fd, IPPROTO_IPV6, IPV6_RECVPKTINFO, &enable, - sizeof(enable)) < 0) { + if (setsockopt(fd, IPPROTO_IPV6, IPV6_RECVPKTINFO, &enable, sizeof(enable)) < 0) { log_warn("%s: error setting IPV6_RECVPKTINFO", __func__); return (-1); } diff --git a/lib/command.c b/lib/command.c index e92251160f..0995637219 100644 --- a/lib/command.c +++ b/lib/command.c @@ -735,9 +735,13 @@ char *cmd_variable_comp2str(vector comps, unsigned short cols) char *item = vector_slot(comps, j); itemlen = strlen(item); - if (cs + itemlen + AUTOCOMP_INDENT + 3 >= bsz) - buf = XREALLOC(MTYPE_TMP, buf, (bsz *= 2)); + size_t next_sz = cs + itemlen + AUTOCOMP_INDENT + 3; + if (next_sz > bsz) { + /* Make sure the buf size is large enough */ + bsz = next_sz; + buf = XREALLOC(MTYPE_TMP, buf, bsz); + } if (lc + itemlen + 1 >= cols) { cs += snprintf(&buf[cs], bsz - cs, "\n%*s", AUTOCOMP_INDENT, ""); diff --git a/lib/mgmt_be_client.c b/lib/mgmt_be_client.c index 534dc43405..5c875204f7 100644 --- a/lib/mgmt_be_client.c +++ b/lib/mgmt_be_client.c @@ -28,6 +28,8 @@ #define MGMTD_DBG_BE_CLIENT_CHECK() \ DEBUG_MODE_CHECK(&mgmt_dbg_be_client, DEBUG_MODE_ALL) +DEFINE_MTYPE_STATIC(LIB, MGMTD_BE_CLIENT, "backend client"); +DEFINE_MTYPE_STATIC(LIB, MGMTD_BE_CLIENT_NAME, "backend client name"); DEFINE_MTYPE_STATIC(LIB, MGMTD_BE_BATCH, "backend transaction batch data"); DEFINE_MTYPE_STATIC(LIB, MGMTD_BE_TXN, "backend transaction data"); @@ -70,8 +72,6 @@ struct mgmt_be_batch_ctx { #define MGMTD_BE_TXN_FLAGS_CFG_APPLIED (1U << 1) DECLARE_LIST(mgmt_be_batches, struct mgmt_be_batch_ctx, list_linkage); -struct mgmt_be_client_ctx; - PREDECL_LIST(mgmt_be_txns); struct mgmt_be_txn_ctx { /* Txn-Id as assigned by MGMTD */ @@ -79,7 +79,7 @@ struct mgmt_be_txn_ctx { uint32_t flags; struct mgmt_be_client_txn_ctx client_data; - struct mgmt_be_client_ctx *client_ctx; + struct mgmt_be_client *client; /* List of batches belonging to this transaction */ struct mgmt_be_batches_head cfg_batches; @@ -100,9 +100,11 @@ DECLARE_LIST(mgmt_be_txns, struct mgmt_be_txn_ctx, list_linkage); #define FOREACH_BE_APPLY_BATCH_IN_LIST(txn, batch) \ frr_each_safe (mgmt_be_batches, &(txn)->apply_cfgs, (batch)) -struct mgmt_be_client_ctx { +struct mgmt_be_client { struct msg_client client; + char *name; + struct nb_config *candidate_config; struct nb_config *running_config; @@ -114,7 +116,9 @@ struct mgmt_be_client_ctx { unsigned long avg_apply_nb_cfg_tm; struct mgmt_be_txns_head txn_head; - struct mgmt_be_client_params client_params; + + struct mgmt_be_client_cbs cbs; + uintptr_t user_data; }; #define FOREACH_BE_TXN_IN_LIST(client_ctx, txn) \ @@ -122,9 +126,6 @@ struct mgmt_be_client_ctx { struct debug mgmt_dbg_be_client = {0, "Management backend client operations"}; -static struct mgmt_be_client_ctx mgmt_be_client_ctx = { - .client = {.conn = {.fd = -1}}}; - const char *mgmt_be_client_names[MGMTD_BE_CLIENT_ID_MAX + 1] = { #ifdef HAVE_STATICD [MGMTD_BE_CLIENT_ID_STATICD] = "staticd", @@ -132,7 +133,7 @@ const char *mgmt_be_client_names[MGMTD_BE_CLIENT_ID_MAX + 1] = { [MGMTD_BE_CLIENT_ID_MAX] = "Unknown/Invalid", }; -static int mgmt_be_client_send_msg(struct mgmt_be_client_ctx *client_ctx, +static int mgmt_be_client_send_msg(struct mgmt_be_client *client_ctx, Mgmtd__BeMessage *be_msg) { return msg_conn_send_msg( @@ -216,8 +217,7 @@ static void mgmt_be_cleanup_all_batches(struct mgmt_be_txn_ctx *txn) } static struct mgmt_be_txn_ctx * -mgmt_be_find_txn_by_id(struct mgmt_be_client_ctx *client_ctx, - uint64_t txn_id) +mgmt_be_find_txn_by_id(struct mgmt_be_client *client_ctx, uint64_t txn_id) { struct mgmt_be_txn_ctx *txn = NULL; @@ -230,8 +230,7 @@ mgmt_be_find_txn_by_id(struct mgmt_be_client_ctx *client_ctx, } static struct mgmt_be_txn_ctx * -mgmt_be_txn_create(struct mgmt_be_client_ctx *client_ctx, - uint64_t txn_id) +mgmt_be_txn_create(struct mgmt_be_client *client_ctx, uint64_t txn_id) { struct mgmt_be_txn_ctx *txn = NULL; @@ -242,7 +241,7 @@ mgmt_be_txn_create(struct mgmt_be_client_ctx *client_ctx, assert(txn); txn->txn_id = txn_id; - txn->client_ctx = client_ctx; + txn->client = client_ctx; mgmt_be_batches_init(&txn->cfg_batches); mgmt_be_batches_init(&txn->apply_cfgs); mgmt_be_txns_add_tail(&client_ctx->txn_head, txn); @@ -253,8 +252,8 @@ mgmt_be_txn_create(struct mgmt_be_client_ctx *client_ctx, return txn; } -static void mgmt_be_txn_delete(struct mgmt_be_client_ctx *client_ctx, - struct mgmt_be_txn_ctx **txn) +static void mgmt_be_txn_delete(struct mgmt_be_client *client_ctx, + struct mgmt_be_txn_ctx **txn) { char err_msg[] = "MGMT Transaction Delete"; @@ -274,12 +273,10 @@ static void mgmt_be_txn_delete(struct mgmt_be_client_ctx *client_ctx, * CFGDATA_CREATE_REQs. But first notify the client * about the transaction delete. */ - if (client_ctx->client_params.txn_notify) - (void)(*client_ctx->client_params - .txn_notify)( - (uintptr_t)client_ctx, - client_ctx->client_params.user_data, - &(*txn)->client_data, true); + if (client_ctx->cbs.txn_notify) + (void)(*client_ctx->cbs.txn_notify)(client_ctx, + client_ctx->user_data, + &(*txn)->client_data, true); mgmt_be_cleanup_all_batches(*txn); if ((*txn)->nb_txn) @@ -290,8 +287,7 @@ static void mgmt_be_txn_delete(struct mgmt_be_client_ctx *client_ctx, *txn = NULL; } -static void -mgmt_be_cleanup_all_txns(struct mgmt_be_client_ctx *client_ctx) +static void mgmt_be_cleanup_all_txns(struct mgmt_be_client *client_ctx) { struct mgmt_be_txn_ctx *txn = NULL; @@ -300,9 +296,8 @@ mgmt_be_cleanup_all_txns(struct mgmt_be_client_ctx *client_ctx) } } -static int mgmt_be_send_txn_reply(struct mgmt_be_client_ctx *client_ctx, - uint64_t txn_id, bool create, - bool success) +static int mgmt_be_send_txn_reply(struct mgmt_be_client *client_ctx, + uint64_t txn_id, bool create, bool success) { Mgmtd__BeMessage be_msg; Mgmtd__BeTxnReply txn_reply; @@ -321,8 +316,8 @@ static int mgmt_be_send_txn_reply(struct mgmt_be_client_ctx *client_ctx, return mgmt_be_client_send_msg(client_ctx, &be_msg); } -static int mgmt_be_process_txn_req(struct mgmt_be_client_ctx *client_ctx, - uint64_t txn_id, bool create) +static int mgmt_be_process_txn_req(struct mgmt_be_client *client_ctx, + uint64_t txn_id, bool create) { struct mgmt_be_txn_ctx *txn; @@ -342,11 +337,9 @@ static int mgmt_be_process_txn_req(struct mgmt_be_client_ctx *client_ctx, MGMTD_BE_CLIENT_DBG("Created new txn-id %" PRIu64, txn_id); txn = mgmt_be_txn_create(client_ctx, txn_id); - if (client_ctx->client_params.txn_notify) - (void)(*client_ctx->client_params - .txn_notify)( - (uintptr_t)client_ctx, - client_ctx->client_params.user_data, + if (client_ctx->cbs.txn_notify) + (void)(*client_ctx->cbs.txn_notify)( + client_ctx, client_ctx->user_data, &txn->client_data, false); } else { if (!txn) { @@ -368,10 +361,10 @@ static int mgmt_be_process_txn_req(struct mgmt_be_client_ctx *client_ctx, return 0; } -static int -mgmt_be_send_cfgdata_create_reply(struct mgmt_be_client_ctx *client_ctx, - uint64_t txn_id, uint64_t batch_id, - bool success, const char *error_if_any) +static int mgmt_be_send_cfgdata_create_reply(struct mgmt_be_client *client_ctx, + uint64_t txn_id, uint64_t batch_id, + bool success, + const char *error_if_any) { Mgmtd__BeMessage be_msg; Mgmtd__BeCfgDataCreateReply cfgdata_reply; @@ -398,7 +391,7 @@ static void mgmt_be_txn_cfg_abort(struct mgmt_be_txn_ctx *txn) { char errmsg[BUFSIZ] = {0}; - assert(txn && txn->client_ctx); + assert(txn && txn->client); if (txn->nb_txn) { MGMTD_BE_CLIENT_ERR( "Aborting configs after prep for txn-id: %" PRIu64, @@ -416,13 +409,13 @@ static void mgmt_be_txn_cfg_abort(struct mgmt_be_txn_ctx *txn) MGMTD_BE_CLIENT_DBG( "Reset candidate configurations after abort of txn-id: %" PRIu64, txn->txn_id); - nb_config_replace(txn->client_ctx->candidate_config, - txn->client_ctx->running_config, true); + nb_config_replace(txn->client->candidate_config, + txn->client->running_config, true); } static int mgmt_be_txn_cfg_prepare(struct mgmt_be_txn_ctx *txn) { - struct mgmt_be_client_ctx *client_ctx; + struct mgmt_be_client *client_ctx; struct mgmt_be_txn_req *txn_req = NULL; struct nb_context nb_ctx = {0}; struct timeval edit_nb_cfg_start; @@ -437,15 +430,15 @@ static int mgmt_be_txn_cfg_prepare(struct mgmt_be_txn_ctx *txn) size_t num_processed; int err; - assert(txn && txn->client_ctx); - client_ctx = txn->client_ctx; + assert(txn && txn->client); + client_ctx = txn->client; num_processed = 0; FOREACH_BE_TXN_BATCH_IN_LIST (txn, batch) { txn_req = &batch->txn_req; error = false; nb_ctx.client = NB_CLIENT_CLI; - nb_ctx.user = (void *)client_ctx->client_params.user_data; + nb_ctx.user = (void *)client_ctx->user_data; if (!txn->nb_txn) { /* @@ -492,7 +485,7 @@ static int mgmt_be_txn_cfg_prepare(struct mgmt_be_txn_ctx *txn) * Now prepare all the batches we have applied in one go. */ nb_ctx.client = NB_CLIENT_CLI; - nb_ctx.user = (void *)client_ctx->client_params.user_data; + nb_ctx.user = (void *)client_ctx->user_data; gettimeofday(&prep_nb_cfg_start, NULL); err = nb_candidate_commit_prepare(nb_ctx, client_ctx->candidate_config, @@ -556,12 +549,11 @@ static int mgmt_be_txn_cfg_prepare(struct mgmt_be_txn_ctx *txn) /* * Process all CFG_DATA_REQs received so far and prepare them all in one go. */ -static int -mgmt_be_update_setcfg_in_batch(struct mgmt_be_client_ctx *client_ctx, - struct mgmt_be_txn_ctx *txn, - uint64_t batch_id, - Mgmtd__YangCfgDataReq * cfg_req[], - int num_req) +static int mgmt_be_update_setcfg_in_batch(struct mgmt_be_client *client_ctx, + struct mgmt_be_txn_ctx *txn, + uint64_t batch_id, + Mgmtd__YangCfgDataReq *cfg_req[], + int num_req) { struct mgmt_be_batch_ctx *batch = NULL; struct mgmt_be_txn_req *txn_req = NULL; @@ -611,11 +603,10 @@ mgmt_be_update_setcfg_in_batch(struct mgmt_be_client_ctx *client_ctx, return 0; } -static int -mgmt_be_process_cfgdata_req(struct mgmt_be_client_ctx *client_ctx, - uint64_t txn_id, uint64_t batch_id, - Mgmtd__YangCfgDataReq * cfg_req[], int num_req, - bool end_of_data) +static int mgmt_be_process_cfgdata_req(struct mgmt_be_client *client_ctx, + uint64_t txn_id, uint64_t batch_id, + Mgmtd__YangCfgDataReq *cfg_req[], + int num_req, bool end_of_data) { struct mgmt_be_txn_ctx *txn; @@ -640,10 +631,10 @@ mgmt_be_process_cfgdata_req(struct mgmt_be_client_ctx *client_ctx, return 0; } -static int mgmt_be_send_apply_reply(struct mgmt_be_client_ctx *client_ctx, - uint64_t txn_id, uint64_t batch_ids[], - size_t num_batch_ids, bool success, - const char *error_if_any) +static int mgmt_be_send_apply_reply(struct mgmt_be_client *client_ctx, + uint64_t txn_id, uint64_t batch_ids[], + size_t num_batch_ids, bool success, + const char *error_if_any) { Mgmtd__BeMessage be_msg; Mgmtd__BeCfgDataApplyReply apply_reply; @@ -673,7 +664,7 @@ static int mgmt_be_send_apply_reply(struct mgmt_be_client_ctx *client_ctx, static int mgmt_be_txn_proc_cfgapply(struct mgmt_be_txn_ctx *txn) { - struct mgmt_be_client_ctx *client_ctx; + struct mgmt_be_client *client_ctx; struct timeval apply_nb_cfg_start; struct timeval apply_nb_cfg_end; unsigned long apply_nb_cfg_tm; @@ -682,8 +673,8 @@ static int mgmt_be_txn_proc_cfgapply(struct mgmt_be_txn_ctx *txn) size_t num_processed; static uint64_t batch_ids[MGMTD_BE_MAX_BATCH_IDS_IN_REQ]; - assert(txn && txn->client_ctx); - client_ctx = txn->client_ctx; + assert(txn && txn->client); + client_ctx = txn->client; assert(txn->nb_txn); num_processed = 0; @@ -735,9 +726,8 @@ static int mgmt_be_txn_proc_cfgapply(struct mgmt_be_txn_ctx *txn) return 0; } -static int -mgmt_be_process_cfg_apply(struct mgmt_be_client_ctx *client_ctx, - uint64_t txn_id) +static int mgmt_be_process_cfg_apply(struct mgmt_be_client *client_ctx, + uint64_t txn_id) { struct mgmt_be_txn_ctx *txn; @@ -754,9 +744,8 @@ mgmt_be_process_cfg_apply(struct mgmt_be_client_ctx *client_ctx, return 0; } -static int -mgmt_be_client_handle_msg(struct mgmt_be_client_ctx *client_ctx, - Mgmtd__BeMessage *be_msg) +static int mgmt_be_client_handle_msg(struct mgmt_be_client *client_ctx, + Mgmtd__BeMessage *be_msg) { /* * protobuf-c adds a max size enum with an internal, and changing by @@ -833,12 +822,12 @@ mgmt_be_client_handle_msg(struct mgmt_be_client_ctx *client_ctx, static void mgmt_be_client_process_msg(uint8_t version, uint8_t *data, size_t len, struct msg_conn *conn) { - struct mgmt_be_client_ctx *client_ctx; + struct mgmt_be_client *client_ctx; struct msg_client *client; Mgmtd__BeMessage *be_msg; client = container_of(conn, struct msg_client, conn); - client_ctx = container_of(client, struct mgmt_be_client_ctx, client); + client_ctx = container_of(client, struct mgmt_be_client, client); be_msg = mgmtd__be_message__unpack(NULL, len, data); if (!be_msg) { @@ -853,17 +842,17 @@ static void mgmt_be_client_process_msg(uint8_t version, uint8_t *data, mgmtd__be_message__free_unpacked(be_msg, NULL); } -static int mgmt_be_send_subscr_req(struct mgmt_be_client_ctx *client_ctx, - bool subscr_xpaths, uint16_t num_reg_xpaths, - char **reg_xpaths) +int mgmt_be_send_subscr_req(struct mgmt_be_client *client_ctx, + bool subscr_xpaths, int num_xpaths, + char **reg_xpaths) { Mgmtd__BeMessage be_msg; Mgmtd__BeSubscribeReq subscr_req; mgmtd__be_subscribe_req__init(&subscr_req); - subscr_req.client_name = client_ctx->client_params.name; - subscr_req.n_xpath_reg = num_reg_xpaths; - if (num_reg_xpaths) + subscr_req.client_name = client_ctx->name; + subscr_req.n_xpath_reg = num_xpaths; + if (num_xpaths) subscr_req.xpath_reg = reg_xpaths; else subscr_req.xpath_reg = NULL; @@ -881,24 +870,24 @@ static int mgmt_be_send_subscr_req(struct mgmt_be_client_ctx *client_ctx, return mgmt_be_client_send_msg(client_ctx, &be_msg); } -static int _notify_conenct_disconnect(struct msg_client *client, bool connected) +static int _notify_conenct_disconnect(struct msg_client *msg_client, + bool connected) { - struct mgmt_be_client_ctx *client_ctx = - container_of(client, struct mgmt_be_client_ctx, client); + struct mgmt_be_client *client = + container_of(msg_client, struct mgmt_be_client, client); int ret; if (connected) { - assert(client->conn.fd != -1); - ret = mgmt_be_send_subscr_req(client_ctx, false, 0, NULL); + assert(msg_client->conn.fd != -1); + ret = mgmt_be_send_subscr_req(client, false, 0, NULL); if (ret) return ret; } /* Notify BE client through registered callback (if any) */ - if (client_ctx->client_params.client_connect_notify) - (void)(*client_ctx->client_params.client_connect_notify)( - (uintptr_t)client_ctx, - client_ctx->client_params.user_data, connected); + if (client->cbs.client_connect_notify) + (void)(*client->cbs.client_connect_notify)( + client, client->user_data, connected); return 0; } @@ -914,6 +903,10 @@ static int mgmt_be_client_notify_disconenct(struct msg_conn *conn) return _notify_conenct_disconnect(client, false); } +/* + * Debug Flags + */ + DEFPY(debug_mgmt_client_be, debug_mgmt_client_be_cmd, "[no] debug mgmt client backend", NO_STR DEBUG_STR MGMTD_STR @@ -956,32 +949,33 @@ static struct cmd_node mgmt_dbg_node = { .config_write = mgmt_debug_be_client_config_write, }; -/* - * Initialize library and try connecting with MGMTD. - */ -uintptr_t mgmt_be_client_lib_init(struct mgmt_be_client_params *params, - struct event_loop *master_thread) +struct mgmt_be_client *mgmt_be_client_create(const char *client_name, + struct mgmt_be_client_cbs *cbs, + uintptr_t user_data, + struct event_loop *event_loop) { - /* Don't call twice */ - assert(!mgmt_be_client_ctx.client.conn.loop); + struct mgmt_be_client *client = + XCALLOC(MTYPE_MGMTD_BE_CLIENT, sizeof(*client)); /* Only call after frr_init() */ assert(running_config); - mgmt_be_client_ctx.running_config = running_config; - mgmt_be_client_ctx.candidate_config = nb_config_new(NULL); - mgmt_be_client_ctx.client_params = *params; - mgmt_be_txns_init(&mgmt_be_client_ctx.txn_head); - msg_client_init(&mgmt_be_client_ctx.client, master_thread, - MGMTD_BE_SERVER_PATH, mgmt_be_client_notify_conenct, + client->name = XSTRDUP(MTYPE_MGMTD_BE_CLIENT_NAME, client_name); + client->running_config = running_config; + client->candidate_config = nb_config_new(NULL); + if (cbs) + client->cbs = *cbs; + mgmt_be_txns_init(&client->txn_head); + msg_client_init(&client->client, event_loop, MGMTD_BE_SERVER_PATH, + mgmt_be_client_notify_conenct, mgmt_be_client_notify_disconenct, mgmt_be_client_process_msg, MGMTD_BE_MAX_NUM_MSG_PROC, MGMTD_BE_MAX_NUM_MSG_WRITE, MGMTD_BE_MSG_MAX_LEN, false, "BE-client", MGMTD_DBG_BE_CLIENT_CHECK()); - MGMTD_BE_CLIENT_DBG("Initialized client '%s'", params->name); + MGMTD_BE_CLIENT_DBG("Initialized client '%s'", client_name); - return (uintptr_t)&mgmt_be_client_ctx; + return client; } @@ -993,86 +987,16 @@ void mgmt_be_client_lib_vty_init(void) install_element(CONFIG_NODE, &debug_mgmt_client_be_cmd); } - -/* - * Subscribe with MGMTD for one or more YANG subtree(s). - */ -enum mgmt_result mgmt_be_subscribe_yang_data(uintptr_t lib_hndl, - char *reg_yang_xpaths[], - int num_reg_xpaths) +void mgmt_be_client_destroy(struct mgmt_be_client *client) { - struct mgmt_be_client_ctx *client_ctx; - - if (!num_reg_xpaths) - return MGMTD_SUCCESS; - - client_ctx = (struct mgmt_be_client_ctx *)lib_hndl; - if (!client_ctx) - return MGMTD_INVALID_PARAM; - - if (mgmt_be_send_subscr_req(client_ctx, true, num_reg_xpaths, - reg_yang_xpaths) - != 0) - return MGMTD_INTERNAL_ERROR; - - return MGMTD_SUCCESS; -} - -/* - * Unsubscribe with MGMTD for one or more YANG subtree(s). - */ -enum mgmt_result mgmt_be_unsubscribe_yang_data(uintptr_t lib_hndl, - char *reg_yang_xpaths[], - int num_reg_xpaths) -{ - struct mgmt_be_client_ctx *client_ctx; - - if (!num_reg_xpaths) - return MGMTD_SUCCESS; - - client_ctx = (struct mgmt_be_client_ctx *)lib_hndl; - if (!client_ctx) - return MGMTD_INVALID_PARAM; - - - if (mgmt_be_send_subscr_req(client_ctx, false, num_reg_xpaths, - reg_yang_xpaths) - < 0) - return MGMTD_INTERNAL_ERROR; - - return MGMTD_SUCCESS; -} - -/* - * Send one or more YANG notifications to MGMTD daemon. - */ -enum mgmt_result mgmt_be_send_yang_notify(uintptr_t lib_hndl, - Mgmtd__YangData * data_elems[], - int num_elems) -{ - struct mgmt_be_client_ctx *client_ctx; - - client_ctx = (struct mgmt_be_client_ctx *)lib_hndl; - if (!client_ctx) - return MGMTD_INVALID_PARAM; - - return MGMTD_SUCCESS; -} - -/* - * Destroy library and cleanup everything. - */ -void mgmt_be_client_lib_destroy(void) -{ - struct mgmt_be_client_ctx *client_ctx = &mgmt_be_client_ctx; - MGMTD_BE_CLIENT_DBG("Destroying MGMTD Backend Client '%s'", - client_ctx->client_params.name); + client->name); - msg_client_cleanup(&client_ctx->client); - mgmt_be_cleanup_all_txns(client_ctx); - mgmt_be_txns_fini(&client_ctx->txn_head); - nb_config_free(client_ctx->candidate_config); + msg_client_cleanup(&client->client); + mgmt_be_cleanup_all_txns(client); + mgmt_be_txns_fini(&client->txn_head); + nb_config_free(client->candidate_config); - memset(client_ctx, 0, sizeof(*client_ctx)); + XFREE(MTYPE_MGMTD_BE_CLIENT_NAME, client->name); + XFREE(MTYPE_MGMTD_BE_CLIENT, client); } diff --git a/lib/mgmt_be_client.h b/lib/mgmt_be_client.h index bbe938b5b4..4d8a1f51a1 100644 --- a/lib/mgmt_be_client.h +++ b/lib/mgmt_be_client.h @@ -82,67 +82,26 @@ enum mgmt_be_client_id { #define MGMTD_BE_MAX_CLIENTS_PER_XPATH_REG 32 +struct mgmt_be_client; + struct mgmt_be_client_txn_ctx { uintptr_t *user_ctx; }; -/* - * All the client-specific information this library needs to - * initialize itself, setup connection with MGMTD BackEnd interface - * and carry on all required procedures appropriately. +/** + * Backend client callbacks. * - * BackEnd clients need to initialise a instance of this structure - * with appropriate data and pass it while calling the API - * to initialize the library (See mgmt_be_client_lib_init for - * more details). + * Callbacks: + * client_connect_notify: called when connection is made/lost to mgmtd. + * txn_notify: called when a txn has been created */ -struct mgmt_be_client_params { - char name[MGMTD_CLIENT_NAME_MAX_LEN]; - uintptr_t user_data; - unsigned long conn_retry_intvl_sec; - - void (*client_connect_notify)(uintptr_t lib_hndl, - uintptr_t usr_data, - bool connected); - - void (*client_subscribe_notify)( - uintptr_t lib_hndl, uintptr_t usr_data, - struct nb_yang_xpath **xpath, - enum mgmt_result subscribe_result[], int num_paths); - - void (*txn_notify)( - uintptr_t lib_hndl, uintptr_t usr_data, - struct mgmt_be_client_txn_ctx *txn_ctx, bool destroyed); - - enum mgmt_result (*data_validate)( - uintptr_t lib_hndl, uintptr_t usr_data, - struct mgmt_be_client_txn_ctx *txn_ctx, - struct nb_yang_xpath *xpath, struct nb_yang_value *data, - bool delete, char *error_if_any); - - enum mgmt_result (*data_apply)( - uintptr_t lib_hndl, uintptr_t usr_data, - struct mgmt_be_client_txn_ctx *txn_ctx, - struct nb_yang_xpath *xpath, struct nb_yang_value *data, - bool delete); - - enum mgmt_result (*get_data_elem)( - uintptr_t lib_hndl, uintptr_t usr_data, - struct mgmt_be_client_txn_ctx *txn_ctx, - struct nb_yang_xpath *xpath, struct nb_yang_xpath_elem *elem); - - enum mgmt_result (*get_data)( - uintptr_t lib_hndl, uintptr_t usr_data, - struct mgmt_be_client_txn_ctx *txn_ctx, - struct nb_yang_xpath *xpath, bool keys_only, - struct nb_yang_xpath_elem **elems, int *num_elems, - int *next_key); - - enum mgmt_result (*get_next_data)( - uintptr_t lib_hndl, uintptr_t usr_data, - struct mgmt_be_client_txn_ctx *txn_ctx, - struct nb_yang_xpath *xpath, bool keys_only, - struct nb_yang_xpath_elem **elems, int *num_elems); +struct mgmt_be_client_cbs { + void (*client_connect_notify)(struct mgmt_be_client *client, + uintptr_t usr_data, bool connected); + + void (*txn_notify)(struct mgmt_be_client *client, uintptr_t usr_data, + struct mgmt_be_client_txn_ctx *txn_ctx, + bool destroyed); }; /*************************************************************** @@ -176,20 +135,20 @@ mgmt_be_client_name2id(const char *name) * API prototypes ***************************************************************/ -/* - * Initialize library and try connecting with MGMTD. - * - * params - * Backend client parameters. +/** + * Create backend client and connect to MGMTD. * - * master_thread - * Thread master. + * Args: + * client_name: the name of the client + * cbs: callbacks for various events. + * event_loop: the main event loop. * * Returns: - * Backend client lib handler (nothing but address of mgmt_be_client_ctx) + * Backend client object. */ -extern uintptr_t mgmt_be_client_lib_init(struct mgmt_be_client_params *params, - struct event_loop *master_thread); +extern struct mgmt_be_client * +mgmt_be_client_create(const char *name, struct mgmt_be_client_cbs *cbs, + uintptr_t user_data, struct event_loop *event_loop); /* * Initialize library vty (adds debug support). @@ -206,13 +165,13 @@ extern void mgmt_be_client_lib_vty_init(void); extern void mgmt_debug_be_client_show_debug(struct vty *vty); /* - * Subscribe with MGMTD for one or more YANG subtree(s). + * [Un]-subscribe with MGMTD for one or more YANG subtree(s). * - * lib_hndl - * Client library handler. + * client + * The client object. * * reg_yang_xpaths - * Yang xpath(s) that needs to be subscribed to. + * Yang xpath(s) that needs to be [un]-subscribed from/to * * num_xpaths * Number of xpaths @@ -220,52 +179,14 @@ extern void mgmt_debug_be_client_show_debug(struct vty *vty); * Returns: * MGMTD_SUCCESS on success, MGMTD_* otherwise. */ -extern enum mgmt_result mgmt_be_subscribe_yang_data(uintptr_t lib_hndl, - char **reg_yang_xpaths, - int num_xpaths); - -/* - * Send one or more YANG notifications to MGMTD daemon. - * - * lib_hndl - * Client library handler. - * - * data_elems - * Yang data elements from data tree. - * - * num_elems - * Number of data elements. - * - * Returns: - * MGMTD_SUCCESS on success, MGMTD_* otherwise. - */ -extern enum mgmt_result -mgmt_be_send_yang_notify(uintptr_t lib_hndl, Mgmtd__YangData **data_elems, - int num_elems); - -/* - * Un-subscribe with MGMTD for one or more YANG subtree(s). - * - * lib_hndl - * Client library handler. - * - * reg_yang_xpaths - * Yang xpath(s) that needs to be un-subscribed from. - * - * num_reg_xpaths - * Number of subscribed xpaths - * - * Returns: - * MGMTD_SUCCESS on success, MGMTD_* otherwise. - */ -enum mgmt_result mgmt_be_unsubscribe_yang_data(uintptr_t lib_hndl, - char **reg_yang_xpaths, - int num_reg_xpaths); +extern int mgmt_be_send_subscr_req(struct mgmt_be_client *client, + bool subscr_xpaths, int num_xpaths, + char **reg_xpaths); /* - * Destroy library and cleanup everything. + * Destroy backend client and cleanup everything. */ -extern void mgmt_be_client_lib_destroy(void); +extern void mgmt_be_client_destroy(struct mgmt_be_client *client); #ifdef __cplusplus } diff --git a/lib/mgmt_fe_client.c b/lib/mgmt_fe_client.c index 83f60ea58b..35a6d7d909 100644 --- a/lib/mgmt_fe_client.c +++ b/lib/mgmt_fe_client.c @@ -19,14 +19,12 @@ #include "lib/mgmt_fe_client_clippy.c" -struct mgmt_fe_client_ctx; - PREDECL_LIST(mgmt_sessions); struct mgmt_fe_client_session { uint64_t client_id; /* FE client identifies itself with this ID */ uint64_t session_id; /* FE adapter identified session with this ID */ - struct mgmt_fe_client_ctx *client_ctx; + struct mgmt_fe_client *client; uintptr_t user_ctx; struct mgmt_sessions_item list_linkage; @@ -34,29 +32,31 @@ struct mgmt_fe_client_session { DECLARE_LIST(mgmt_sessions, struct mgmt_fe_client_session, list_linkage); -DEFINE_MTYPE_STATIC(LIB, MGMTD_FE_SESSION, "MGMTD Frontend session"); +DEFINE_MTYPE_STATIC(LIB, MGMTD_FE_CLIENT, "frontend client"); +DEFINE_MTYPE_STATIC(LIB, MGMTD_FE_CLIENT_NAME, "frontend client name"); +DEFINE_MTYPE_STATIC(LIB, MGMTD_FE_SESSION, "frontend session"); -struct mgmt_fe_client_ctx { +struct mgmt_fe_client { struct msg_client client; - struct mgmt_fe_client_params client_params; - struct mgmt_sessions_head client_sessions; + char *name; + struct mgmt_fe_client_cbs cbs; + uintptr_t user_data; + struct mgmt_sessions_head sessions; }; -#define FOREACH_SESSION_IN_LIST(client_ctx, session) \ - frr_each_safe (mgmt_sessions, &(client_ctx)->client_sessions, (session)) +#define FOREACH_SESSION_IN_LIST(client, session) \ + frr_each_safe (mgmt_sessions, &(client)->sessions, (session)) struct debug mgmt_dbg_fe_client = {0, "Management frontend client operations"}; -static struct mgmt_fe_client_ctx mgmt_fe_client_ctx = { - .client = {.conn = {.fd = -1}}}; static struct mgmt_fe_client_session * -mgmt_fe_find_session_by_client_id(struct mgmt_fe_client_ctx *client_ctx, +mgmt_fe_find_session_by_client_id(struct mgmt_fe_client *client, uint64_t client_id) { struct mgmt_fe_client_session *session; - FOREACH_SESSION_IN_LIST (client_ctx, session) { + FOREACH_SESSION_IN_LIST (client, session) { if (session->client_id == client_id) { MGMTD_FE_CLIENT_DBG("Found session-id %" PRIu64 " using client-id %" PRIu64, @@ -70,12 +70,12 @@ mgmt_fe_find_session_by_client_id(struct mgmt_fe_client_ctx *client_ctx, } static struct mgmt_fe_client_session * -mgmt_fe_find_session_by_session_id(struct mgmt_fe_client_ctx *client_ctx, +mgmt_fe_find_session_by_session_id(struct mgmt_fe_client *client, uint64_t session_id) { struct mgmt_fe_client_session *session; - FOREACH_SESSION_IN_LIST (client_ctx, session) { + FOREACH_SESSION_IN_LIST (client, session) { if (session->session_id == session_id) { MGMTD_FE_CLIENT_DBG( "Found session of client-id %" PRIu64 @@ -89,24 +89,24 @@ mgmt_fe_find_session_by_session_id(struct mgmt_fe_client_ctx *client_ctx, return NULL; } -static int mgmt_fe_client_send_msg(struct mgmt_fe_client_ctx *client_ctx, +static int mgmt_fe_client_send_msg(struct mgmt_fe_client *client, Mgmtd__FeMessage *fe_msg, bool short_circuit_ok) { return msg_conn_send_msg( - &client_ctx->client.conn, MGMT_MSG_VERSION_PROTOBUF, fe_msg, + &client->client.conn, MGMT_MSG_VERSION_PROTOBUF, fe_msg, mgmtd__fe_message__get_packed_size(fe_msg), (size_t(*)(void *, void *))mgmtd__fe_message__pack, short_circuit_ok); } -static int mgmt_fe_send_register_req(struct mgmt_fe_client_ctx *client_ctx) +static int mgmt_fe_send_register_req(struct mgmt_fe_client *client) { Mgmtd__FeMessage fe_msg; Mgmtd__FeRegisterReq rgstr_req; mgmtd__fe_register_req__init(&rgstr_req); - rgstr_req.client_name = client_ctx->client_params.name; + rgstr_req.client_name = client->name; mgmtd__fe_message__init(&fe_msg); fe_msg.message_case = MGMTD__FE_MESSAGE__MESSAGE_REGISTER_REQ; @@ -115,10 +115,10 @@ static int mgmt_fe_send_register_req(struct mgmt_fe_client_ctx *client_ctx) MGMTD_FE_CLIENT_DBG( "Sending REGISTER_REQ message to MGMTD Frontend server"); - return mgmt_fe_client_send_msg(client_ctx, &fe_msg, true); + return mgmt_fe_client_send_msg(client, &fe_msg, true); } -static int mgmt_fe_send_session_req(struct mgmt_fe_client_ctx *client_ctx, +static int mgmt_fe_send_session_req(struct mgmt_fe_client *client, struct mgmt_fe_client_session *session, bool create) { @@ -146,12 +146,12 @@ static int mgmt_fe_send_session_req(struct mgmt_fe_client_ctx *client_ctx, "Sending SESSION_REQ %s message for client-id %" PRIu64, create ? "create" : "destroy", session->client_id); - return mgmt_fe_client_send_msg(client_ctx, &fe_msg, scok); + return mgmt_fe_client_send_msg(client, &fe_msg, scok); } -static int mgmt_fe_send_lockds_req(struct mgmt_fe_client_ctx *client_ctx, - uint64_t session_id, bool lock, - uint64_t req_id, Mgmtd__DatastoreId ds_id) +int mgmt_fe_send_lockds_req(struct mgmt_fe_client *client, uint64_t session_id, + uint64_t req_id, Mgmtd__DatastoreId ds_id, + bool lock) { (void)req_id; Mgmtd__FeMessage fe_msg; @@ -171,15 +171,13 @@ static int mgmt_fe_send_lockds_req(struct mgmt_fe_client_ctx *client_ctx, "Sending %sLOCK_REQ message for Ds:%d session-id %" PRIu64, lock ? "" : "UN", ds_id, session_id); - return mgmt_fe_client_send_msg(client_ctx, &fe_msg, false); + return mgmt_fe_client_send_msg(client, &fe_msg, false); } -static int mgmt_fe_send_setcfg_req(struct mgmt_fe_client_ctx *client_ctx, - uint64_t session_id, uint64_t req_id, - Mgmtd__DatastoreId ds_id, - Mgmtd__YangCfgDataReq **data_req, - int num_data_reqs, bool implicit_commit, - Mgmtd__DatastoreId dst_ds_id) +int mgmt_fe_send_setcfg_req(struct mgmt_fe_client *client, uint64_t session_id, + uint64_t req_id, Mgmtd__DatastoreId ds_id, + Mgmtd__YangCfgDataReq **data_req, int num_data_reqs, + bool implicit_commit, Mgmtd__DatastoreId dst_ds_id) { (void)req_id; Mgmtd__FeMessage fe_msg; @@ -203,14 +201,14 @@ static int mgmt_fe_send_setcfg_req(struct mgmt_fe_client_ctx *client_ctx, " (#xpaths:%d)", ds_id, session_id, num_data_reqs); - return mgmt_fe_client_send_msg(client_ctx, &fe_msg, false); + return mgmt_fe_client_send_msg(client, &fe_msg, false); } -static int mgmt_fe_send_commitcfg_req(struct mgmt_fe_client_ctx *client_ctx, - uint64_t session_id, uint64_t req_id, - Mgmtd__DatastoreId src_ds_id, - Mgmtd__DatastoreId dest_ds_id, - bool validate_only, bool abort) +int mgmt_fe_send_commitcfg_req(struct mgmt_fe_client *client, + uint64_t session_id, uint64_t req_id, + Mgmtd__DatastoreId src_ds_id, + Mgmtd__DatastoreId dest_ds_id, + bool validate_only, bool abort) { (void)req_id; Mgmtd__FeMessage fe_msg; @@ -232,14 +230,13 @@ static int mgmt_fe_send_commitcfg_req(struct mgmt_fe_client_ctx *client_ctx, "Sending COMMIT_CONFIG_REQ message for Src-Ds:%d, Dst-Ds:%d session-id %" PRIu64, src_ds_id, dest_ds_id, session_id); - return mgmt_fe_client_send_msg(client_ctx, &fe_msg, false); + return mgmt_fe_client_send_msg(client, &fe_msg, false); } -static int mgmt_fe_send_getcfg_req(struct mgmt_fe_client_ctx *client_ctx, - uint64_t session_id, uint64_t req_id, - Mgmtd__DatastoreId ds_id, - Mgmtd__YangGetDataReq *data_req[], - int num_data_reqs) +int mgmt_fe_send_getcfg_req(struct mgmt_fe_client *client, uint64_t session_id, + uint64_t req_id, Mgmtd__DatastoreId ds_id, + Mgmtd__YangGetDataReq *data_req[], + int num_data_reqs) { (void)req_id; Mgmtd__FeMessage fe_msg; @@ -261,14 +258,13 @@ static int mgmt_fe_send_getcfg_req(struct mgmt_fe_client_ctx *client_ctx, " (#xpaths:%d)", ds_id, session_id, num_data_reqs); - return mgmt_fe_client_send_msg(client_ctx, &fe_msg, false); + return mgmt_fe_client_send_msg(client, &fe_msg, false); } -static int mgmt_fe_send_getdata_req(struct mgmt_fe_client_ctx *client_ctx, - uint64_t session_id, uint64_t req_id, - Mgmtd__DatastoreId ds_id, - Mgmtd__YangGetDataReq *data_req[], - int num_data_reqs) +int mgmt_fe_send_getdata_req(struct mgmt_fe_client *client, uint64_t session_id, + uint64_t req_id, Mgmtd__DatastoreId ds_id, + Mgmtd__YangGetDataReq *data_req[], + int num_data_reqs) { (void)req_id; Mgmtd__FeMessage fe_msg; @@ -290,15 +286,14 @@ static int mgmt_fe_send_getdata_req(struct mgmt_fe_client_ctx *client_ctx, " (#xpaths:%d)", ds_id, session_id, num_data_reqs); - return mgmt_fe_client_send_msg(client_ctx, &fe_msg, false); + return mgmt_fe_client_send_msg(client, &fe_msg, false); } -static int mgmt_fe_send_regnotify_req(struct mgmt_fe_client_ctx *client_ctx, - uint64_t session_id, uint64_t req_id, - Mgmtd__DatastoreId ds_id, - bool register_req, - Mgmtd__YangDataXPath *data_req[], - int num_data_reqs) +int mgmt_fe_send_regnotify_req(struct mgmt_fe_client *client, + uint64_t session_id, uint64_t req_id, + Mgmtd__DatastoreId ds_id, bool register_req, + Mgmtd__YangDataXPath *data_req[], + int num_data_reqs) { (void)req_id; Mgmtd__FeMessage fe_msg; @@ -315,10 +310,10 @@ static int mgmt_fe_send_regnotify_req(struct mgmt_fe_client_ctx *client_ctx, fe_msg.message_case = MGMTD__FE_MESSAGE__MESSAGE_REGNOTIFY_REQ; fe_msg.regnotify_req = ®ntfy_req; - return mgmt_fe_client_send_msg(client_ctx, &fe_msg, false); + return mgmt_fe_client_send_msg(client, &fe_msg, false); } -static int mgmt_fe_client_handle_msg(struct mgmt_fe_client_ctx *client_ctx, +static int mgmt_fe_client_handle_msg(struct mgmt_fe_client *client, Mgmtd__FeMessage *fe_msg) { struct mgmt_fe_client_session *session = NULL; @@ -338,8 +333,7 @@ static int mgmt_fe_client_handle_msg(struct mgmt_fe_client_ctx *client_ctx, fe_msg->session_reply->session_id); session = mgmt_fe_find_session_by_client_id( - client_ctx, - fe_msg->session_reply->client_conn_id); + client, fe_msg->session_reply->client_conn_id); if (session && fe_msg->session_reply->success) { MGMTD_FE_CLIENT_DBG( @@ -358,17 +352,14 @@ static int mgmt_fe_client_handle_msg(struct mgmt_fe_client_ctx *client_ctx, fe_msg->session_reply->session_id); session = mgmt_fe_find_session_by_session_id( - client_ctx, fe_msg->session_req->session_id); + client, fe_msg->session_req->session_id); } /* The session state may be deleted by the callback */ - if (session && session->client_ctx && - session->client_ctx->client_params.client_session_notify) - (*session->client_ctx->client_params - .client_session_notify)( - (uintptr_t)client_ctx, - client_ctx->client_params.user_data, - session->client_id, + if (session && session->client && + session->client->cbs.client_session_notify) + (*session->client->cbs.client_session_notify)( + client, client->user_data, session->client_id, fe_msg->session_reply->create, fe_msg->session_reply->success, fe_msg->session_reply->session_id, @@ -378,14 +369,12 @@ static int mgmt_fe_client_handle_msg(struct mgmt_fe_client_ctx *client_ctx, MGMTD_FE_CLIENT_DBG("Got LOCKDS_REPLY for session-id %" PRIu64, fe_msg->lockds_reply->session_id); session = mgmt_fe_find_session_by_session_id( - client_ctx, fe_msg->lockds_reply->session_id); - - if (session && session->client_ctx && - session->client_ctx->client_params.lock_ds_notify) - (*session->client_ctx->client_params.lock_ds_notify)( - (uintptr_t)client_ctx, - client_ctx->client_params.user_data, - session->client_id, + client, fe_msg->lockds_reply->session_id); + + if (session && session->client && + session->client->cbs.lock_ds_notify) + (*session->client->cbs.lock_ds_notify)( + client, client->user_data, session->client_id, fe_msg->lockds_reply->session_id, session->user_ctx, fe_msg->lockds_reply->req_id, fe_msg->lockds_reply->lock, @@ -398,14 +387,12 @@ static int mgmt_fe_client_handle_msg(struct mgmt_fe_client_ctx *client_ctx, fe_msg->setcfg_reply->session_id); session = mgmt_fe_find_session_by_session_id( - client_ctx, fe_msg->setcfg_reply->session_id); - - if (session && session->client_ctx && - session->client_ctx->client_params.set_config_notify) - (*session->client_ctx->client_params.set_config_notify)( - (uintptr_t)client_ctx, - client_ctx->client_params.user_data, - session->client_id, + client, fe_msg->setcfg_reply->session_id); + + if (session && session->client && + session->client->cbs.set_config_notify) + (*session->client->cbs.set_config_notify)( + client, client->user_data, session->client_id, fe_msg->setcfg_reply->session_id, session->user_ctx, fe_msg->setcfg_reply->req_id, fe_msg->setcfg_reply->success, @@ -417,15 +404,12 @@ static int mgmt_fe_client_handle_msg(struct mgmt_fe_client_ctx *client_ctx, fe_msg->commcfg_reply->session_id); session = mgmt_fe_find_session_by_session_id( - client_ctx, fe_msg->commcfg_reply->session_id); - - if (session && session->client_ctx && - session->client_ctx->client_params.commit_config_notify) - (*session->client_ctx->client_params - .commit_config_notify)( - (uintptr_t)client_ctx, - client_ctx->client_params.user_data, - session->client_id, + client, fe_msg->commcfg_reply->session_id); + + if (session && session->client && + session->client->cbs.commit_config_notify) + (*session->client->cbs.commit_config_notify)( + client, client->user_data, session->client_id, fe_msg->commcfg_reply->session_id, session->user_ctx, fe_msg->commcfg_reply->req_id, @@ -440,14 +424,12 @@ static int mgmt_fe_client_handle_msg(struct mgmt_fe_client_ctx *client_ctx, fe_msg->getcfg_reply->session_id); session = mgmt_fe_find_session_by_session_id( - client_ctx, fe_msg->getcfg_reply->session_id); - - if (session && session->client_ctx && - session->client_ctx->client_params.get_data_notify) - (*session->client_ctx->client_params.get_data_notify)( - (uintptr_t)client_ctx, - client_ctx->client_params.user_data, - session->client_id, + client, fe_msg->getcfg_reply->session_id); + + if (session && session->client && + session->client->cbs.get_data_notify) + (*session->client->cbs.get_data_notify)( + client, client->user_data, session->client_id, fe_msg->getcfg_reply->session_id, session->user_ctx, fe_msg->getcfg_reply->req_id, fe_msg->getcfg_reply->success, @@ -468,14 +450,12 @@ static int mgmt_fe_client_handle_msg(struct mgmt_fe_client_ctx *client_ctx, fe_msg->getdata_reply->session_id); session = mgmt_fe_find_session_by_session_id( - client_ctx, fe_msg->getdata_reply->session_id); - - if (session && session->client_ctx && - session->client_ctx->client_params.get_data_notify) - (*session->client_ctx->client_params.get_data_notify)( - (uintptr_t)client_ctx, - client_ctx->client_params.user_data, - session->client_id, + client, fe_msg->getdata_reply->session_id); + + if (session && session->client && + session->client->cbs.get_data_notify) + (*session->client->cbs.get_data_notify)( + client, client->user_data, session->client_id, fe_msg->getdata_reply->session_id, session->user_ctx, fe_msg->getdata_reply->req_id, @@ -526,12 +506,12 @@ static int mgmt_fe_client_handle_msg(struct mgmt_fe_client_ctx *client_ctx, static void mgmt_fe_client_process_msg(uint8_t version, uint8_t *data, size_t len, struct msg_conn *conn) { - struct mgmt_fe_client_ctx *client_ctx; - struct msg_client *client; + struct mgmt_fe_client *client; + struct msg_client *msg_client; Mgmtd__FeMessage *fe_msg; - client = container_of(conn, struct msg_client, conn); - client_ctx = container_of(client, struct mgmt_fe_client_ctx, client); + msg_client = container_of(conn, struct msg_client, conn); + client = container_of(msg_client, struct mgmt_fe_client, client); fe_msg = mgmtd__fe_message__unpack(NULL, len, data); if (!fe_msg) { @@ -542,41 +522,38 @@ static void mgmt_fe_client_process_msg(uint8_t version, uint8_t *data, MGMTD_FE_CLIENT_DBG( "Decoded %zu bytes of message(msg: %u/%u) from server", len, fe_msg->message_case, fe_msg->message_case); - (void)mgmt_fe_client_handle_msg(client_ctx, fe_msg); + (void)mgmt_fe_client_handle_msg(client, fe_msg); mgmtd__fe_message__free_unpacked(fe_msg, NULL); } -static int _notify_connect_disconnect(struct msg_client *client, bool connected) +static int _notify_connect_disconnect(struct msg_client *msg_client, + bool connected) { - struct mgmt_fe_client_ctx *client_ctx = - container_of(client, struct mgmt_fe_client_ctx, client); + struct mgmt_fe_client *client = + container_of(msg_client, struct mgmt_fe_client, client); struct mgmt_fe_client_session *session; int ret; /* Send REGISTER_REQ message */ if (connected) { - if ((ret = mgmt_fe_send_register_req(client_ctx)) != 0) + if ((ret = mgmt_fe_send_register_req(client)) != 0) return ret; } /* Walk list of sessions for this FE client deleting them */ - if (!connected && mgmt_sessions_count(&client_ctx->client_sessions)) { + if (!connected && mgmt_sessions_count(&client->sessions)) { MGMTD_FE_CLIENT_DBG("Cleaning up existing sessions"); - FOREACH_SESSION_IN_LIST (client_ctx, session) { - assert(session->client_ctx); + FOREACH_SESSION_IN_LIST (client, session) { + assert(session->client); /* unlink from list first this avoids double free */ - mgmt_sessions_del(&client_ctx->client_sessions, - session); + mgmt_sessions_del(&client->sessions, session); /* notify FE client the session is being deleted */ - if (session->client_ctx->client_params - .client_session_notify) { - (*session->client_ctx->client_params - .client_session_notify)( - (uintptr_t)client_ctx, - client_ctx->client_params.user_data, + if (session->client->cbs.client_session_notify) { + (*session->client->cbs.client_session_notify)( + client, client->user_data, session->client_id, false, true, session->session_id, session->user_ctx); } @@ -586,10 +563,9 @@ static int _notify_connect_disconnect(struct msg_client *client, bool connected) } /* Notify FE client through registered callback (if any). */ - if (client_ctx->client_params.client_connect_notify) - (void)(*client_ctx->client_params.client_connect_notify)( - (uintptr_t)client_ctx, - client_ctx->client_params.user_data, connected); + if (client->cbs.client_connect_notify) + (void)(*client->cbs.client_connect_notify)( + client, client->user_data, connected); return 0; } @@ -651,26 +627,31 @@ static struct cmd_node mgmt_dbg_node = { /* * Initialize library and try connecting with MGMTD. */ -uintptr_t mgmt_fe_client_lib_init(struct mgmt_fe_client_params *params, - struct event_loop *master_thread) +struct mgmt_fe_client *mgmt_fe_client_create(const char *client_name, + struct mgmt_fe_client_cbs *cbs, + uintptr_t user_data, + struct event_loop *event_loop) { - /* Don't call twice */ - assert(!mgmt_fe_client_ctx.client.conn.loop); + struct mgmt_fe_client *client = + XCALLOC(MTYPE_MGMTD_FE_CLIENT, sizeof(*client)); - mgmt_fe_client_ctx.client_params = *params; + client->name = XSTRDUP(MTYPE_MGMTD_FE_CLIENT_NAME, client_name); + client->user_data = user_data; + if (cbs) + client->cbs = *cbs; - mgmt_sessions_init(&mgmt_fe_client_ctx.client_sessions); + mgmt_sessions_init(&client->sessions); - msg_client_init(&mgmt_fe_client_ctx.client, master_thread, - MGMTD_FE_SERVER_PATH, mgmt_fe_client_notify_connect, + msg_client_init(&client->client, event_loop, MGMTD_FE_SERVER_PATH, + mgmt_fe_client_notify_connect, mgmt_fe_client_notify_disconnect, mgmt_fe_client_process_msg, MGMTD_FE_MAX_NUM_MSG_PROC, MGMTD_FE_MAX_NUM_MSG_WRITE, MGMTD_FE_MSG_MAX_LEN, true, "FE-client", MGMTD_DBG_FE_CLIENT_CHECK()); - MGMTD_FE_CLIENT_DBG("Initialized client '%s'", params->name); + MGMTD_FE_CLIENT_DBG("Initialized client '%s'", client_name); - return (uintptr_t)&mgmt_fe_client_ctx; + return client; } void mgmt_fe_client_lib_vty_init(void) @@ -681,39 +662,31 @@ void mgmt_fe_client_lib_vty_init(void) install_element(CONFIG_NODE, &debug_mgmt_client_fe_cmd); } -uint mgmt_fe_client_session_count(uintptr_t lib_hndl) +uint mgmt_fe_client_session_count(struct mgmt_fe_client *client) { - struct mgmt_fe_client_ctx *client_ctx = - (struct mgmt_fe_client_ctx *)lib_hndl; - - return mgmt_sessions_count(&client_ctx->client_sessions); + return mgmt_sessions_count(&client->sessions); } /* * Create a new Session for a Frontend Client connection. */ -enum mgmt_result mgmt_fe_create_client_session(uintptr_t lib_hndl, +enum mgmt_result mgmt_fe_create_client_session(struct mgmt_fe_client *client, uint64_t client_id, uintptr_t user_ctx) { - struct mgmt_fe_client_ctx *client_ctx; struct mgmt_fe_client_session *session; - client_ctx = (struct mgmt_fe_client_ctx *)lib_hndl; - if (!client_ctx) - return MGMTD_INVALID_PARAM; - session = XCALLOC(MTYPE_MGMTD_FE_SESSION, sizeof(struct mgmt_fe_client_session)); assert(session); session->user_ctx = user_ctx; session->client_id = client_id; - session->client_ctx = client_ctx; + session->client = client; session->session_id = 0; - mgmt_sessions_add_tail(&client_ctx->client_sessions, session); + mgmt_sessions_add_tail(&client->sessions, session); - if (mgmt_fe_send_session_req(client_ctx, session, true) != 0) { + if (mgmt_fe_send_session_req(client, session, true) != 0) { XFREE(MTYPE_MGMTD_FE_SESSION, session); return MGMTD_INTERNAL_ERROR; } @@ -724,189 +697,42 @@ enum mgmt_result mgmt_fe_create_client_session(uintptr_t lib_hndl, /* * Delete an existing Session for a Frontend Client connection. */ -enum mgmt_result mgmt_fe_destroy_client_session(uintptr_t lib_hndl, +enum mgmt_result mgmt_fe_destroy_client_session(struct mgmt_fe_client *client, uint64_t client_id) { - struct mgmt_fe_client_ctx *client_ctx; struct mgmt_fe_client_session *session; - client_ctx = (struct mgmt_fe_client_ctx *)lib_hndl; - if (!client_ctx) - return MGMTD_INVALID_PARAM; - - session = mgmt_fe_find_session_by_client_id(client_ctx, client_id); - if (!session || session->client_ctx != client_ctx) + session = mgmt_fe_find_session_by_client_id(client, client_id); + if (!session || session->client != client) return MGMTD_INVALID_PARAM; if (session->session_id && - mgmt_fe_send_session_req(client_ctx, session, false) != 0) + mgmt_fe_send_session_req(client, session, false) != 0) MGMTD_FE_CLIENT_ERR( "Failed to send session destroy request for the session-id %" PRIu64, session->session_id); - mgmt_sessions_del(&client_ctx->client_sessions, session); + mgmt_sessions_del(&client->sessions, session); XFREE(MTYPE_MGMTD_FE_SESSION, session); return MGMTD_SUCCESS; } -static void mgmt_fe_destroy_client_sessions(uintptr_t lib_hndl) -{ - struct mgmt_fe_client_ctx *client_ctx; - struct mgmt_fe_client_session *session; - - client_ctx = (struct mgmt_fe_client_ctx *)lib_hndl; - if (!client_ctx) - return; - - FOREACH_SESSION_IN_LIST (client_ctx, session) - mgmt_fe_destroy_client_session(lib_hndl, session->client_id); -} - -/* - * Send UN/LOCK_DS_REQ to MGMTD for a specific Datastore DS. - */ -enum mgmt_result mgmt_fe_lock_ds(uintptr_t lib_hndl, uint64_t session_id, - uint64_t req_id, Mgmtd__DatastoreId ds_id, - bool lock_ds) -{ - struct mgmt_fe_client_ctx *client_ctx; - - client_ctx = (struct mgmt_fe_client_ctx *)lib_hndl; - if (!client_ctx) - return MGMTD_INVALID_PARAM; - - if (mgmt_fe_send_lockds_req(client_ctx, session_id, lock_ds, req_id, - ds_id) != 0) - return MGMTD_INTERNAL_ERROR; - - return MGMTD_SUCCESS; -} - -/* - * Send SET_CONFIG_REQ to MGMTD for one or more config data(s). - */ -enum mgmt_result mgmt_fe_set_config_data(uintptr_t lib_hndl, - uint64_t session_id, uint64_t req_id, - Mgmtd__DatastoreId ds_id, - Mgmtd__YangCfgDataReq **config_req, - int num_reqs, bool implicit_commit, - Mgmtd__DatastoreId dst_ds_id) -{ - struct mgmt_fe_client_ctx *client_ctx; - - client_ctx = (struct mgmt_fe_client_ctx *)lib_hndl; - if (!client_ctx) - return MGMTD_INVALID_PARAM; - - if (mgmt_fe_send_setcfg_req(client_ctx, session_id, req_id, ds_id, - config_req, num_reqs, implicit_commit, - dst_ds_id) != 0) - return MGMTD_INTERNAL_ERROR; - - return MGMTD_SUCCESS; -} - -/* - * Send SET_CONFIG_REQ to MGMTD for one or more config data(s). - */ -enum mgmt_result mgmt_fe_commit_config_data(uintptr_t lib_hndl, - uint64_t session_id, - uint64_t req_id, - Mgmtd__DatastoreId src_ds_id, - Mgmtd__DatastoreId dst_ds_id, - bool validate_only, bool abort) -{ - struct mgmt_fe_client_ctx *client_ctx; - - client_ctx = (struct mgmt_fe_client_ctx *)lib_hndl; - if (!client_ctx) - return MGMTD_INVALID_PARAM; - - if (mgmt_fe_send_commitcfg_req(client_ctx, session_id, req_id, - src_ds_id, dst_ds_id, validate_only, - abort) != 0) - return MGMTD_INTERNAL_ERROR; - - return MGMTD_SUCCESS; -} - -/* - * Send GET_CONFIG_REQ to MGMTD for one or more config data item(s). - */ -enum mgmt_result mgmt_fe_get_config_data(uintptr_t lib_hndl, - uint64_t session_id, uint64_t req_id, - Mgmtd__DatastoreId ds_id, - Mgmtd__YangGetDataReq *data_req[], - int num_reqs) -{ - struct mgmt_fe_client_ctx *client_ctx; - - client_ctx = (struct mgmt_fe_client_ctx *)lib_hndl; - if (!client_ctx) - return MGMTD_INVALID_PARAM; - - if (mgmt_fe_send_getcfg_req(client_ctx, session_id, req_id, ds_id, - data_req, num_reqs) != 0) - return MGMTD_INTERNAL_ERROR; - - return MGMTD_SUCCESS; -} - -/* - * Send GET_DATA_REQ to MGMTD for one or more config data item(s). - */ -enum mgmt_result mgmt_fe_get_data(uintptr_t lib_hndl, uint64_t session_id, - uint64_t req_id, Mgmtd__DatastoreId ds_id, - Mgmtd__YangGetDataReq *data_req[], - int num_reqs) -{ - struct mgmt_fe_client_ctx *client_ctx; - - client_ctx = (struct mgmt_fe_client_ctx *)lib_hndl; - if (!client_ctx) - return MGMTD_INVALID_PARAM; - - if (mgmt_fe_send_getdata_req(client_ctx, session_id, req_id, ds_id, - data_req, num_reqs) != 0) - return MGMTD_INTERNAL_ERROR; - - return MGMTD_SUCCESS; -} - -/* - * Send NOTIFY_REGISTER_REQ to MGMTD daemon. - */ -enum mgmt_result -mgmt_fe_register_yang_notify(uintptr_t lib_hndl, uint64_t session_id, - uint64_t req_id, Mgmtd__DatastoreId ds_id, - bool register_req, - Mgmtd__YangDataXPath *data_req[], int num_reqs) -{ - struct mgmt_fe_client_ctx *client_ctx; - - client_ctx = (struct mgmt_fe_client_ctx *)lib_hndl; - if (!client_ctx) - return MGMTD_INVALID_PARAM; - - if (mgmt_fe_send_regnotify_req(client_ctx, session_id, req_id, ds_id, - register_req, data_req, num_reqs) != 0) - return MGMTD_INTERNAL_ERROR; - - return MGMTD_SUCCESS; -} - /* * Destroy library and cleanup everything. */ -void mgmt_fe_client_lib_destroy(void) +void mgmt_fe_client_destroy(struct mgmt_fe_client *client) { - struct mgmt_fe_client_ctx *client_ctx = &mgmt_fe_client_ctx; + struct mgmt_fe_client_session *session; MGMTD_FE_CLIENT_DBG("Destroying MGMTD Frontend Client '%s'", - client_ctx->client_params.name); + client->name); + + FOREACH_SESSION_IN_LIST (client, session) + mgmt_fe_destroy_client_session(client, session->client_id); + + msg_client_cleanup(&client->client); - mgmt_fe_destroy_client_sessions((uintptr_t)client_ctx); - msg_client_cleanup(&client_ctx->client); - memset(client_ctx, 0, sizeof(*client_ctx)); + XFREE(MTYPE_MGMTD_FE_CLIENT_NAME, client->name); + XFREE(MTYPE_MGMTD_FE_CLIENT, client); } diff --git a/lib/mgmt_fe_client.h b/lib/mgmt_fe_client.h index 7ce6c5eef5..edf861746c 100644 --- a/lib/mgmt_fe_client.h +++ b/lib/mgmt_fe_client.h @@ -56,6 +56,9 @@ extern "C" { #define MGMTD_DS_OPERATIONAL MGMTD__DATASTORE_ID__OPERATIONAL_DS #define MGMTD_DS_MAX_ID MGMTD_DS_OPERATIONAL + 1 +struct mgmt_fe_client; + + /* * All the client specific information this library needs to * initialize itself, setup connection with MGMTD FrontEnd interface @@ -66,52 +69,52 @@ extern "C" { * to initialize the library (See mgmt_fe_client_lib_init for * more details). */ -struct mgmt_fe_client_params { - char name[MGMTD_CLIENT_NAME_MAX_LEN]; - uintptr_t user_data; - unsigned long conn_retry_intvl_sec; - - void (*client_connect_notify)(uintptr_t lib_hndl, - uintptr_t user_data, - bool connected); - - void (*client_session_notify)(uintptr_t lib_hndl, - uintptr_t user_data, - uint64_t client_id, +struct mgmt_fe_client_cbs { + void (*client_connect_notify)(struct mgmt_fe_client *client, + uintptr_t user_data, bool connected); + + void (*client_session_notify)(struct mgmt_fe_client *client, + uintptr_t user_data, uint64_t client_id, bool create, bool success, uintptr_t session_id, - uintptr_t user_session_ctx); + uintptr_t user_session_client); - void (*lock_ds_notify)(uintptr_t lib_hndl, uintptr_t user_data, - uint64_t client_id, uintptr_t session_id, - uintptr_t user_session_ctx, uint64_t req_id, + void (*lock_ds_notify)(struct mgmt_fe_client *client, + uintptr_t user_data, uint64_t client_id, + uintptr_t session_id, + uintptr_t user_session_client, uint64_t req_id, bool lock_ds, bool success, Mgmtd__DatastoreId ds_id, char *errmsg_if_any); - void (*set_config_notify)(uintptr_t lib_hndl, uintptr_t user_data, - uint64_t client_id, uintptr_t session_id, - uintptr_t user_session_ctx, uint64_t req_id, - bool success, Mgmtd__DatastoreId ds_id, + void (*set_config_notify)(struct mgmt_fe_client *client, + uintptr_t user_data, uint64_t client_id, + uintptr_t session_id, + uintptr_t user_session_client, + uint64_t req_id, bool success, + Mgmtd__DatastoreId ds_id, char *errmsg_if_any); - void (*commit_config_notify)( - uintptr_t lib_hndl, uintptr_t user_data, uint64_t client_id, - uintptr_t session_id, uintptr_t user_session_ctx, - uint64_t req_id, bool success, Mgmtd__DatastoreId src_ds_id, - Mgmtd__DatastoreId dst_ds_id, bool validate_only, - char *errmsg_if_any); - - enum mgmt_result (*get_data_notify)( - uintptr_t lib_hndl, uintptr_t user_data, uint64_t client_id, - uintptr_t session_id, uintptr_t user_session_ctx, - uint64_t req_id, bool success, Mgmtd__DatastoreId ds_id, - Mgmtd__YangData **yang_data, size_t num_data, int next_key, - char *errmsg_if_any); - - enum mgmt_result (*data_notify)( - uint64_t client_id, uint64_t session_id, uintptr_t user_data, - uint64_t req_id, Mgmtd__DatastoreId ds_id, - Mgmtd__YangData **yang_data, size_t num_data); + void (*commit_config_notify)(struct mgmt_fe_client *client, + uintptr_t user_data, uint64_t client_id, + uintptr_t session_id, + uintptr_t user_session_client, + uint64_t req_id, bool success, + Mgmtd__DatastoreId src_ds_id, + Mgmtd__DatastoreId dst_ds_id, + bool validate_only, char *errmsg_if_any); + + int (*get_data_notify)(struct mgmt_fe_client *client, + uintptr_t user_data, uint64_t client_id, + uintptr_t session_id, + uintptr_t user_session_client, uint64_t req_id, + bool success, Mgmtd__DatastoreId ds_id, + Mgmtd__YangData **yang_data, size_t num_data, + int next_key, char *errmsg_if_any); + + int (*data_notify)(uint64_t client_id, uint64_t session_id, + uintptr_t user_data, uint64_t req_id, + Mgmtd__DatastoreId ds_id, + Mgmtd__YangData **yang_data, size_t num_data); }; extern struct debug mgmt_dbg_fe_client; @@ -139,17 +142,18 @@ extern struct debug mgmt_dbg_fe_client; * Thread master. * * Returns: - * Frontend client lib handler (nothing but address of mgmt_fe_client_ctx) + * Frontend client lib handler (nothing but address of mgmt_fe_client) */ -extern uintptr_t mgmt_fe_client_lib_init(struct mgmt_fe_client_params *params, - struct event_loop *master_thread); +extern struct mgmt_fe_client * +mgmt_fe_client_create(const char *client_name, struct mgmt_fe_client_cbs *cbs, + uintptr_t user_data, struct event_loop *event_loop); /* * Initialize library vty (adds debug support). * - * This call should be added to your component when enabling other vty code to - * enable mgmtd client debugs. When adding, one needs to also add a their - * component in `xref2vtysh.py` as well. + * This call should be added to your component when enabling other vty + * code to enable mgmtd client debugs. When adding, one needs to also + * add a their component in `xref2vtysh.py` as well. */ extern void mgmt_fe_client_lib_vty_init(void); @@ -167,15 +171,15 @@ extern void mgmt_debug_fe_client_show_debug(struct vty *vty); * client_id * Unique identifier of client. * - * user_ctx + * user_client * Client context. * * Returns: * MGMTD_SUCCESS on success, MGMTD_* otherwise. */ -extern enum mgmt_result mgmt_fe_create_client_session(uintptr_t lib_hndl, - uint64_t client_id, - uintptr_t user_ctx); +extern enum mgmt_result +mgmt_fe_create_client_session(struct mgmt_fe_client *client, uint64_t client_id, + uintptr_t user_client); /* * Delete an existing Session for a Frontend Client connection. @@ -187,10 +191,11 @@ extern enum mgmt_result mgmt_fe_create_client_session(uintptr_t lib_hndl, * Unique identifier of client. * * Returns: - * MGMTD_SUCCESS on success, MGMTD_* otherwise. + * 0 on success, otherwise msg_conn_send_msg() return values. */ -extern enum mgmt_result mgmt_fe_destroy_client_session(uintptr_t lib_hndl, - uint64_t client_id); +extern enum mgmt_result +mgmt_fe_destroy_client_session(struct mgmt_fe_client *client, + uint64_t client_id); /* * Send UN/LOCK_DS_REQ to MGMTD for a specific Datastore DS. @@ -211,11 +216,11 @@ extern enum mgmt_result mgmt_fe_destroy_client_session(uintptr_t lib_hndl, * TRUE for lock request, FALSE for unlock request. * * Returns: - * MGMTD_SUCCESS on success, MGMTD_* otherwise. + * 0 on success, otherwise msg_conn_send_msg() return values. */ -extern enum mgmt_result mgmt_fe_lock_ds(uintptr_t lib_hndl, uint64_t session_id, - uint64_t req_id, - Mgmtd__DatastoreId ds_id, bool lock_ds); +extern int mgmt_fe_send_lockds_req(struct mgmt_fe_client *client, + uint64_t session_id, uint64_t req_id, + Mgmtd__DatastoreId ds_id, bool lock_ds); /* * Send SET_CONFIG_REQ to MGMTD for one or more config data(s). @@ -245,13 +250,15 @@ extern enum mgmt_result mgmt_fe_lock_ds(uintptr_t lib_hndl, uint64_t session_id, * Destination Datastore ID where data needs to be set. * * Returns: - * MGMTD_SUCCESS on success, MGMTD_* otherwise. + * 0 on success, otherwise msg_conn_send_msg() return values. */ -extern enum mgmt_result -mgmt_fe_set_config_data(uintptr_t lib_hndl, uint64_t session_id, - uint64_t req_id, Mgmtd__DatastoreId ds_id, - Mgmtd__YangCfgDataReq **config_req, int num_req, - bool implicit_commit, Mgmtd__DatastoreId dst_ds_id); + +extern int mgmt_fe_send_setcfg_req(struct mgmt_fe_client *client, + uint64_t session_id, uint64_t req_id, + Mgmtd__DatastoreId ds_id, + Mgmtd__YangCfgDataReq **config_req, + int num_req, bool implicit_commit, + Mgmtd__DatastoreId dst_ds_id); /* * Send SET_COMMMIT_REQ to MGMTD for one or more config data(s). @@ -278,13 +285,13 @@ mgmt_fe_set_config_data(uintptr_t lib_hndl, uint64_t session_id, * TRUE if need to restore Src DS back to Dest DS, FALSE otherwise. * * Returns: - * MGMTD_SUCCESS on success, MGMTD_* otherwise. + * 0 on success, otherwise msg_conn_send_msg() return values. */ -extern enum mgmt_result -mgmt_fe_commit_config_data(uintptr_t lib_hndl, uint64_t session_id, - uint64_t req_id, Mgmtd__DatastoreId src_ds_id, - Mgmtd__DatastoreId dst_ds_id, bool validate_only, - bool abort); +extern int mgmt_fe_send_commitcfg_req(struct mgmt_fe_client *client, + uint64_t session_id, uint64_t req_id, + Mgmtd__DatastoreId src_ds_id, + Mgmtd__DatastoreId dst_ds_id, + bool validate_only, bool abort); /* * Send GET_CONFIG_REQ to MGMTD for one or more config data item(s). @@ -308,12 +315,13 @@ mgmt_fe_commit_config_data(uintptr_t lib_hndl, uint64_t session_id, * Number of get config requests. * * Returns: - * MGMTD_SUCCESS on success, MGMTD_* otherwise. + * 0 on success, otherwise msg_conn_send_msg() return values. */ -extern enum mgmt_result -mgmt_fe_get_config_data(uintptr_t lib_hndl, uint64_t session_id, - uint64_t req_id, Mgmtd__DatastoreId ds_id, - Mgmtd__YangGetDataReq **data_req, int num_reqs); +extern int mgmt_fe_send_getcfg_req(struct mgmt_fe_client *client, + uint64_t session_id, uint64_t req_id, + Mgmtd__DatastoreId ds_id, + Mgmtd__YangGetDataReq **data_req, + int num_reqs); /* * Send GET_DATA_REQ to MGMTD for one or more data item(s). @@ -321,11 +329,11 @@ mgmt_fe_get_config_data(uintptr_t lib_hndl, uint64_t session_id, * Similar to get config request but supports getting data * from operational ds aka backend clients directly. */ -extern enum mgmt_result mgmt_fe_get_data(uintptr_t lib_hndl, - uint64_t session_id, uint64_t req_id, - Mgmtd__DatastoreId ds_id, - Mgmtd__YangGetDataReq **data_req, - int num_reqs); +extern int mgmt_fe_send_getdata_req(struct mgmt_fe_client *client, + uint64_t session_id, uint64_t req_id, + Mgmtd__DatastoreId ds_id, + Mgmtd__YangGetDataReq **data_req, + int num_reqs); /* * Send NOTIFY_REGISTER_REQ to MGMTD daemon. @@ -352,23 +360,24 @@ extern enum mgmt_result mgmt_fe_get_data(uintptr_t lib_hndl, * Number of data requests. * * Returns: - * MGMTD_SUCCESS on success, MGMTD_* otherwise. + * 0 on success, otherwise msg_conn_send_msg() return values. */ -extern enum mgmt_result -mgmt_fe_register_yang_notify(uintptr_t lib_hndl, uint64_t session_id, - uint64_t req_id, Mgmtd__DatastoreId ds_id, - bool register_req, Mgmtd__YangDataXPath **data_req, - int num_reqs); +extern int mgmt_fe_send_regnotify_req(struct mgmt_fe_client *client, + uint64_t session_id, uint64_t req_id, + Mgmtd__DatastoreId ds_id, + bool register_req, + Mgmtd__YangDataXPath **data_req, + int num_reqs); /* * Destroy library and cleanup everything. */ -extern void mgmt_fe_client_lib_destroy(void); +extern void mgmt_fe_client_destroy(struct mgmt_fe_client *client); /* * Get count of open sessions. */ -extern uint mgmt_fe_client_session_count(uintptr_t lib_hndl); +extern uint mgmt_fe_client_session_count(struct mgmt_fe_client *client); #ifdef __cplusplus } diff --git a/lib/northbound.c b/lib/northbound.c index 775f6ff92f..ef2344ee11 100644 --- a/lib/northbound.c +++ b/lib/northbound.c @@ -792,18 +792,19 @@ static void nb_update_candidate_changes(struct nb_config *candidate, LYD_TREE_DFS_BEGIN (root, dnode) { op = nb_lyd_diff_get_op(dnode); switch (op) { - case 'c': + case 'c': /* create */ nb_config_diff_created(dnode, seq, cfg_chgs); LYD_TREE_DFS_continue = 1; break; - case 'd': + case 'd': /* delete */ nb_config_diff_deleted(dnode, seq, cfg_chgs); LYD_TREE_DFS_continue = 1; break; - case 'r': + case 'r': /* replace */ nb_config_diff_add_change(cfg_chgs, NB_OP_MODIFY, seq, dnode); break; + case 'n': /* none */ default: break; } @@ -68,7 +68,7 @@ enum vty_event { struct nb_config *vty_mgmt_candidate_config; -static uintptr_t mgmt_lib_hndl; +static struct mgmt_fe_client *mgmt_fe_client; static bool mgmt_fe_connected; static bool mgmt_candidate_ds_wr_locked; static uint64_t mgmt_client_id_next; @@ -1640,12 +1640,12 @@ struct vty *vty_new(void) new->max = VTY_BUFSIZ; new->pass_fd = -1; - if (mgmt_lib_hndl) { + if (mgmt_fe_client) { if (!mgmt_client_id_next) mgmt_client_id_next++; new->mgmt_client_id = mgmt_client_id_next++; if (mgmt_fe_create_client_session( - mgmt_lib_hndl, new->mgmt_client_id, + mgmt_fe_client, new->mgmt_client_id, (uintptr_t) new) != MGMTD_SUCCESS) zlog_err( "Failed to open a MGMTD Frontend session for VTY session %p!!", @@ -2217,6 +2217,8 @@ bool mgmt_vty_read_configs(void) line_num = 0; (void)config_from_file(vty, confp, &line_num); count++; + + fclose(confp); } snprintf(path, sizeof(path), "%s/mgmtd.conf", frr_sysconfdir); @@ -2240,6 +2242,8 @@ bool mgmt_vty_read_configs(void) line_num = 0; (void)config_from_file(vty, confp, &line_num); count++; + + fclose(confp); } vty->pending_allowed = false; @@ -2419,8 +2423,9 @@ void vty_close(struct vty *vty) vty->status = VTY_CLOSE; - if (mgmt_lib_hndl && vty->mgmt_session_id) { - mgmt_fe_destroy_client_session(mgmt_lib_hndl, + if (mgmt_fe_client && vty->mgmt_session_id) { + MGMTD_FE_CLIENT_DBG("closing vty session"); + mgmt_fe_destroy_client_session(mgmt_fe_client, vty->mgmt_client_id); vty->mgmt_session_id = 0; } @@ -3391,8 +3396,8 @@ void vty_init_vtysh(void) * functionality linked into it. This design choice was taken for efficiency. */ -static void vty_mgmt_server_connected(uintptr_t lib_hndl, uintptr_t usr_data, - bool connected) +static void vty_mgmt_server_connected(struct mgmt_fe_client *client, + uintptr_t usr_data, bool connected) { MGMTD_FE_CLIENT_DBG("Got %sconnected %s MGMTD Frontend Server", !connected ? "dis: " : "", @@ -3403,7 +3408,7 @@ static void vty_mgmt_server_connected(uintptr_t lib_hndl, uintptr_t usr_data, * The fe client library will delete all session on disconnect before * calling us. */ - assert(mgmt_fe_client_session_count(lib_hndl) == 0); + assert(mgmt_fe_client_session_count(client) == 0); mgmt_fe_connected = connected; @@ -3417,10 +3422,10 @@ static void vty_mgmt_server_connected(uintptr_t lib_hndl, uintptr_t usr_data, /* * A session has successfully been created for a vty. */ -static void vty_mgmt_session_notify(uintptr_t lib_hndl, uintptr_t usr_data, - uint64_t client_id, bool create, - bool success, uintptr_t session_id, - uintptr_t session_ctx) +static void vty_mgmt_session_notify(struct mgmt_fe_client *client, + uintptr_t usr_data, uint64_t client_id, + bool create, bool success, + uintptr_t session_id, uintptr_t session_ctx) { struct vty *vty; @@ -3444,8 +3449,9 @@ static void vty_mgmt_session_notify(uintptr_t lib_hndl, uintptr_t usr_data, } } -static void vty_mgmt_ds_lock_notified(uintptr_t lib_hndl, uintptr_t usr_data, - uint64_t client_id, uintptr_t session_id, +static void vty_mgmt_ds_lock_notified(struct mgmt_fe_client *client, + uintptr_t usr_data, uint64_t client_id, + uintptr_t session_id, uintptr_t session_ctx, uint64_t req_id, bool lock_ds, bool success, Mgmtd__DatastoreId ds_id, @@ -3469,7 +3475,7 @@ static void vty_mgmt_ds_lock_notified(uintptr_t lib_hndl, uintptr_t usr_data, } static void vty_mgmt_set_config_result_notified( - uintptr_t lib_hndl, uintptr_t usr_data, uint64_t client_id, + struct mgmt_fe_client *client, uintptr_t usr_data, uint64_t client_id, uintptr_t session_id, uintptr_t session_ctx, uint64_t req_id, bool success, Mgmtd__DatastoreId ds_id, char *errmsg_if_any) { @@ -3493,7 +3499,7 @@ static void vty_mgmt_set_config_result_notified( } static void vty_mgmt_commit_config_result_notified( - uintptr_t lib_hndl, uintptr_t usr_data, uint64_t client_id, + struct mgmt_fe_client *client, uintptr_t usr_data, uint64_t client_id, uintptr_t session_id, uintptr_t session_ctx, uint64_t req_id, bool success, Mgmtd__DatastoreId src_ds_id, Mgmtd__DatastoreId dst_ds_id, bool validate_only, char *errmsg_if_any) @@ -3520,8 +3526,8 @@ static void vty_mgmt_commit_config_result_notified( vty_mgmt_resume_response(vty, success); } -static enum mgmt_result vty_mgmt_get_data_result_notified( - uintptr_t lib_hndl, uintptr_t usr_data, uint64_t client_id, +static int vty_mgmt_get_data_result_notified( + struct mgmt_fe_client *client, uintptr_t usr_data, uint64_t client_id, uintptr_t session_id, uintptr_t session_ctx, uint64_t req_id, bool success, Mgmtd__DatastoreId ds_id, Mgmtd__YangData **yang_data, size_t num_data, int next_key, char *errmsg_if_any) @@ -3538,7 +3544,7 @@ static enum mgmt_result vty_mgmt_get_data_result_notified( vty_out(vty, "ERROR: GET_DATA request failed, Error: %s\n", errmsg_if_any ? errmsg_if_any : "Unknown"); vty_mgmt_resume_response(vty, success); - return MGMTD_INTERNAL_ERROR; + return -1; } MGMTD_FE_CLIENT_DBG("GET_DATA request succeeded, client 0x%" PRIx64 @@ -3559,10 +3565,10 @@ static enum mgmt_result vty_mgmt_get_data_result_notified( vty_mgmt_resume_response(vty, success); } - return MGMTD_SUCCESS; + return 0; } -static struct mgmt_fe_client_params client_params = { +static struct mgmt_fe_client_cbs mgmt_cbs = { .client_connect_notify = vty_mgmt_server_connected, .client_session_notify = vty_mgmt_session_notify, .lock_ds_notify = vty_mgmt_ds_lock_notified, @@ -3573,21 +3579,19 @@ static struct mgmt_fe_client_params client_params = { void vty_init_mgmt_fe(void) { - if (!vty_master) { - zlog_err("Always call vty_mgmt_init_fe() after vty_init()!!"); - return; - } + char name[40]; - assert(!mgmt_lib_hndl); - snprintf(client_params.name, sizeof(client_params.name), "%s-%lld", - frr_get_progname(), (long long)getpid()); - mgmt_lib_hndl = mgmt_fe_client_lib_init(&client_params, vty_master); - assert(mgmt_lib_hndl); + assert(vty_master); + assert(!mgmt_fe_client); + snprintf(name, sizeof(name), "vty-%s-%ld", frr_get_progname(), + (long)getpid()); + mgmt_fe_client = mgmt_fe_client_create(name, &mgmt_cbs, 0, vty_master); + assert(mgmt_fe_client); } bool vty_mgmt_fe_enabled(void) { - return mgmt_lib_hndl && mgmt_fe_connected; + return mgmt_fe_client && mgmt_fe_connected; } bool vty_mgmt_should_process_cli_apply_changes(struct vty *vty) @@ -3598,13 +3602,11 @@ bool vty_mgmt_should_process_cli_apply_changes(struct vty *vty) int vty_mgmt_send_lockds_req(struct vty *vty, Mgmtd__DatastoreId ds_id, bool lock) { - enum mgmt_result ret; - - if (mgmt_lib_hndl && vty->mgmt_session_id) { + if (mgmt_fe_client && vty->mgmt_session_id) { vty->mgmt_req_id++; - ret = mgmt_fe_lock_ds(mgmt_lib_hndl, vty->mgmt_session_id, - vty->mgmt_req_id, ds_id, lock); - if (ret != MGMTD_SUCCESS) { + if (mgmt_fe_send_lockds_req(mgmt_fe_client, + vty->mgmt_session_id, + vty->mgmt_req_id, ds_id, lock)) { zlog_err("Failed sending %sLOCK-DS-REQ req-id %" PRIu64, lock ? "" : "UN", vty->mgmt_req_id); vty_out(vty, "Failed to send %sLOCK-DS-REQ to MGMTD!\n", @@ -3641,7 +3643,7 @@ int vty_mgmt_send_config_data(struct vty *vty) } - if (mgmt_lib_hndl && vty->mgmt_client_id && !vty->mgmt_session_id) { + if (mgmt_fe_client && vty->mgmt_client_id && !vty->mgmt_session_id) { /* * We are connected to mgmtd but we do not yet have an * established session. this means we need to send any changes @@ -3652,7 +3654,7 @@ int vty_mgmt_send_config_data(struct vty *vty) return 0; } - if (mgmt_lib_hndl && vty->mgmt_session_id) { + if (mgmt_fe_client && vty->mgmt_session_id) { cnt = 0; for (indx = 0; indx < vty->num_cfg_changes; indx++) { mgmt_yang_data_init(&cfg_data[cnt]); @@ -3701,8 +3703,8 @@ int vty_mgmt_send_config_data(struct vty *vty) vty->mgmt_req_id++; implicit_commit = vty_needs_implicit_commit(vty); - if (cnt && mgmt_fe_set_config_data( - mgmt_lib_hndl, vty->mgmt_session_id, + if (cnt && mgmt_fe_send_setcfg_req( + mgmt_fe_client, vty->mgmt_session_id, vty->mgmt_req_id, MGMTD_DS_CANDIDATE, cfgreq, cnt, implicit_commit, MGMTD_DS_RUNNING) != MGMTD_SUCCESS) { @@ -3720,15 +3722,12 @@ int vty_mgmt_send_config_data(struct vty *vty) int vty_mgmt_send_commit_config(struct vty *vty, bool validate_only, bool abort) { - enum mgmt_result ret; - - if (mgmt_lib_hndl && vty->mgmt_session_id) { + if (mgmt_fe_client && vty->mgmt_session_id) { vty->mgmt_req_id++; - ret = mgmt_fe_commit_config_data( - mgmt_lib_hndl, vty->mgmt_session_id, vty->mgmt_req_id, - MGMTD_DS_CANDIDATE, MGMTD_DS_RUNNING, validate_only, - abort); - if (ret != MGMTD_SUCCESS) { + if (mgmt_fe_send_commitcfg_req( + mgmt_fe_client, vty->mgmt_session_id, + vty->mgmt_req_id, MGMTD_DS_CANDIDATE, + MGMTD_DS_RUNNING, validate_only, abort)) { zlog_err("Failed sending COMMIT-REQ req-id %" PRIu64, vty->mgmt_req_id); vty_out(vty, "Failed to send COMMIT-REQ to MGMTD!\n"); @@ -3745,7 +3744,6 @@ int vty_mgmt_send_commit_config(struct vty *vty, bool validate_only, bool abort) int vty_mgmt_send_get_config(struct vty *vty, Mgmtd__DatastoreId datastore, const char **xpath_list, int num_req) { - enum mgmt_result ret; Mgmtd__YangData yang_data[VTY_MAXCFGCHANGES]; Mgmtd__YangGetDataReq get_req[VTY_MAXCFGCHANGES]; Mgmtd__YangGetDataReq *getreq[VTY_MAXCFGCHANGES]; @@ -3762,11 +3760,9 @@ int vty_mgmt_send_get_config(struct vty *vty, Mgmtd__DatastoreId datastore, get_req[i].data = &yang_data[i]; getreq[i] = &get_req[i]; } - ret = mgmt_fe_get_config_data(mgmt_lib_hndl, vty->mgmt_session_id, - vty->mgmt_req_id, datastore, getreq, - num_req); - - if (ret != MGMTD_SUCCESS) { + if (mgmt_fe_send_getcfg_req(mgmt_fe_client, vty->mgmt_session_id, + vty->mgmt_req_id, datastore, getreq, + num_req)) { zlog_err( "Failed to send GET-CONFIG to MGMTD for req-id %" PRIu64 ".", @@ -3783,7 +3779,6 @@ int vty_mgmt_send_get_config(struct vty *vty, Mgmtd__DatastoreId datastore, int vty_mgmt_send_get_data(struct vty *vty, Mgmtd__DatastoreId datastore, const char **xpath_list, int num_req) { - enum mgmt_result ret; Mgmtd__YangData yang_data[VTY_MAXCFGCHANGES]; Mgmtd__YangGetDataReq get_req[VTY_MAXCFGCHANGES]; Mgmtd__YangGetDataReq *getreq[VTY_MAXCFGCHANGES]; @@ -3800,10 +3795,9 @@ int vty_mgmt_send_get_data(struct vty *vty, Mgmtd__DatastoreId datastore, get_req[i].data = &yang_data[i]; getreq[i] = &get_req[i]; } - ret = mgmt_fe_get_data(mgmt_lib_hndl, vty->mgmt_session_id, - vty->mgmt_req_id, datastore, getreq, num_req); - - if (ret != MGMTD_SUCCESS) { + if (mgmt_fe_send_getdata_req(mgmt_fe_client, vty->mgmt_session_id, + vty->mgmt_req_id, datastore, getreq, + num_req)) { zlog_err("Failed to send GET-DATA to MGMTD for req-id %" PRIu64 ".", vty->mgmt_req_id); @@ -3862,9 +3856,9 @@ void vty_terminate(void) { struct vty *vty; - if (mgmt_lib_hndl) { - mgmt_fe_client_lib_destroy(); - mgmt_lib_hndl = 0; + if (mgmt_fe_client) { + mgmt_fe_client_destroy(mgmt_fe_client); + mgmt_fe_client = 0; } memset(vty_cwd, 0x00, sizeof(vty_cwd)); diff --git a/mgmtd/mgmt_be_adapter.c b/mgmtd/mgmt_be_adapter.c index 2d01f8ecad..e4a62951d2 100644 --- a/mgmtd/mgmt_be_adapter.c +++ b/mgmtd/mgmt_be_adapter.c @@ -564,8 +564,8 @@ mgmt_be_adapter_handle_msg(struct mgmt_be_client_adapter *adapter, return 0; } -static int mgmt_be_send_txn_req(struct mgmt_be_client_adapter *adapter, - uint64_t txn_id, bool create) +int mgmt_be_send_txn_req(struct mgmt_be_client_adapter *adapter, + uint64_t txn_id, bool create) { Mgmtd__BeMessage be_msg; Mgmtd__BeTxnReq txn_req; @@ -584,11 +584,10 @@ static int mgmt_be_send_txn_req(struct mgmt_be_client_adapter *adapter, return mgmt_be_adapter_send_msg(adapter, &be_msg); } -static int -mgmt_be_send_cfgdata_create_req(struct mgmt_be_client_adapter *adapter, - uint64_t txn_id, uint64_t batch_id, - Mgmtd__YangCfgDataReq **cfgdata_reqs, - size_t num_reqs, bool end_of_data) +int mgmt_be_send_cfgdata_req(struct mgmt_be_client_adapter *adapter, + uint64_t txn_id, uint64_t batch_id, + Mgmtd__YangCfgDataReq **cfgdata_reqs, + size_t num_reqs, bool end_of_data) { Mgmtd__BeMessage be_msg; Mgmtd__BeCfgDataCreateReq cfgdata_req; @@ -612,8 +611,8 @@ mgmt_be_send_cfgdata_create_req(struct mgmt_be_client_adapter *adapter, return mgmt_be_adapter_send_msg(adapter, &be_msg); } -static int mgmt_be_send_cfgapply_req(struct mgmt_be_client_adapter *adapter, - uint64_t txn_id) +int mgmt_be_send_cfgapply_req(struct mgmt_be_client_adapter *adapter, + uint64_t txn_id) { Mgmtd__BeMessage be_msg; Mgmtd__BeCfgDataApplyReq apply_req; @@ -834,35 +833,6 @@ int mgmt_be_get_adapter_config(struct mgmt_be_client_adapter *adapter, return 0; } -int mgmt_be_create_txn(struct mgmt_be_client_adapter *adapter, - uint64_t txn_id) -{ - return mgmt_be_send_txn_req(adapter, txn_id, true); -} - -int mgmt_be_destroy_txn(struct mgmt_be_client_adapter *adapter, - uint64_t txn_id) -{ - return mgmt_be_send_txn_req(adapter, txn_id, false); -} - -int mgmt_be_send_cfg_data_create_req(struct mgmt_be_client_adapter *adapter, - uint64_t txn_id, uint64_t batch_id, - struct mgmt_be_cfgreq *cfg_req, - bool end_of_data) -{ - return mgmt_be_send_cfgdata_create_req( - adapter, txn_id, batch_id, cfg_req->cfgdata_reqs, - cfg_req->num_reqs, end_of_data); -} - -extern int -mgmt_be_send_cfg_apply_req(struct mgmt_be_client_adapter *adapter, - uint64_t txn_id) -{ - return mgmt_be_send_cfgapply_req(adapter, txn_id); -} - void mgmt_be_get_subscr_info_for_xpath( const char *xpath, struct mgmt_be_client_subscr_info *subscr_info) { diff --git a/mgmtd/mgmt_be_adapter.h b/mgmtd/mgmt_be_adapter.h index 8f4eef5fb3..e1676e63af 100644 --- a/mgmtd/mgmt_be_adapter.h +++ b/mgmtd/mgmt_be_adapter.h @@ -115,13 +115,9 @@ mgmt_be_get_adapter_config(struct mgmt_be_client_adapter *adapter, struct mgmt_ds_ctx *ds_ctx, struct nb_config_cbs **cfg_chgs); -/* Create a transaction. */ -extern int mgmt_be_create_txn(struct mgmt_be_client_adapter *adapter, - uint64_t txn_id); - -/* Destroy a transaction. */ -extern int mgmt_be_destroy_txn(struct mgmt_be_client_adapter *adapter, - uint64_t txn_id); +/* Create/destroy a transaction. */ +extern int mgmt_be_send_txn_req(struct mgmt_be_client_adapter *adapter, + uint64_t txn_id, bool create); /* * Send config data create request to backend client. @@ -135,8 +131,11 @@ extern int mgmt_be_destroy_txn(struct mgmt_be_client_adapter *adapter, * batch_id * Request batch ID. * - * cfg_req - * Config data request. + * cfgdata_reqs + * An array of pointer to Mgmtd__YangCfgDataReq. + * + * num_reqs + * Length of the cfgdata_reqs array. * * end_of_data * TRUE if the data from last batch, FALSE otherwise. @@ -144,37 +143,15 @@ extern int mgmt_be_destroy_txn(struct mgmt_be_client_adapter *adapter, * Returns: * 0 on success, -1 on failure. */ -extern int mgmt_be_send_cfg_data_create_req( - struct mgmt_be_client_adapter *adapter, uint64_t txn_id, - uint64_t batch_id, struct mgmt_be_cfgreq *cfg_req, bool end_of_data); - -/* - * Send config validate request to backend client. - * - * adaptr - * Backend adapter information. - * - * txn_id - * Unique transaction identifier. - * - * batch_ids - * List of request batch IDs. - * - * num_batch_ids - * Number of batch ids. - * - * Returns: - * 0 on success, -1 on failure. - */ -extern int -mgmt_be_send_cfg_validate_req(struct mgmt_be_client_adapter *adapter, - uint64_t txn_id, uint64_t batch_ids[], - size_t num_batch_ids); +extern int mgmt_be_send_cfgdata_req(struct mgmt_be_client_adapter *adapter, + uint64_t txn_id, uint64_t batch_id, + Mgmtd__YangCfgDataReq **cfgdata_reqs, + size_t num_reqs, bool end_of_data); /* * Send config apply request to backend client. * - * adaptr + * adapter * Backend adapter information. * * txn_id @@ -183,9 +160,8 @@ mgmt_be_send_cfg_validate_req(struct mgmt_be_client_adapter *adapter, * Returns: * 0 on success, -1 on failure. */ -extern int -mgmt_be_send_cfg_apply_req(struct mgmt_be_client_adapter *adapter, - uint64_t txn_id); +extern int mgmt_be_send_cfgapply_req(struct mgmt_be_client_adapter *adapter, + uint64_t txn_id); /* * Dump backend adapter status to vty. diff --git a/mgmtd/mgmt_history.h b/mgmtd/mgmt_history.h index d3f7958952..5d9b662694 100644 --- a/mgmtd/mgmt_history.h +++ b/mgmtd/mgmt_history.h @@ -74,9 +74,11 @@ mgmt_time_to_string(struct timespec *tv, bool long_fmt, char *buffer, size_t sz) if (long_fmt) { n = strftime(buffer, sz, MGMT_LONG_TIME_FMT, &tm); + assert(n < sz); snprintf(&buffer[n], sz - n, ",%09lu", tv->tv_nsec); } else { n = strftime(buffer, sz, MGMT_SHORT_TIME_FMT, &tm); + assert(n < sz); snprintf(&buffer[n], sz - n, "%09lu", tv->tv_nsec); } diff --git a/mgmtd/mgmt_txn.c b/mgmtd/mgmt_txn.c index bf59224338..588693b7e3 100644 --- a/mgmtd/mgmt_txn.c +++ b/mgmtd/mgmt_txn.c @@ -635,7 +635,6 @@ static void mgmt_txn_process_set_cfg(struct event *thread) txn->session_id); FOREACH_TXN_REQ_IN_LIST (&txn->set_cfg_reqs, txn_req) { - error = false; assert(txn_req->req_event == MGMTD_TXN_PROC_SETCFG); ds_ctx = txn_req->req.set_cfg->ds_ctx; if (!ds_ctx) { @@ -644,7 +643,6 @@ static void mgmt_txn_process_set_cfg(struct event *thread) txn_req->req.set_cfg->ds_id, txn_req->req_id, MGMTD_INTERNAL_ERROR, "No such datastore!", txn_req->req.set_cfg->implicit_commit); - error = true; goto mgmt_txn_process_set_cfg_done; } @@ -656,7 +654,6 @@ static void mgmt_txn_process_set_cfg(struct event *thread) MGMTD_INTERNAL_ERROR, "Unable to retrieve DS Config Tree!", txn_req->req.set_cfg->implicit_commit); - error = true; goto mgmt_txn_process_set_cfg_done; } @@ -713,7 +710,6 @@ static void mgmt_txn_process_set_cfg(struct event *thread) "Failed to send SET_CONFIG_REPLY txn-id %" PRIu64 " session-id: %" PRIu64, txn->txn_id, txn->session_id); - error = true; } mgmt_txn_process_set_cfg_done: @@ -1022,7 +1018,7 @@ static int mgmt_txn_create_config_batches(struct mgmt_txn_req *txn_req, (void)mgmt_txn_send_commit_cfg_reply( txn_req->txn, MGMTD_INTERNAL_ERROR, "Internal error! Could not get Xpath from Ds node!"); - goto mgmt_txn_create_config_batches_failed; + return -1; } value = (char *)lyd_get_value(chg->cb.dnode); @@ -1122,7 +1118,6 @@ static int mgmt_txn_create_config_batches(struct mgmt_txn_req *txn_req, } free(xpath); - xpath = NULL; } cmtcfg_req->cmt_stats->last_batch_cnt = num_chgs; @@ -1130,18 +1125,11 @@ static int mgmt_txn_create_config_batches(struct mgmt_txn_req *txn_req, (void)mgmt_txn_send_commit_cfg_reply( txn_req->txn, MGMTD_NO_CFG_CHANGES, "No changes found to commit!"); - goto mgmt_txn_create_config_batches_failed; + return -1; } cmtcfg_req->next_phase = MGMTD_COMMIT_PHASE_TXN_CREATE; return 0; - -mgmt_txn_create_config_batches_failed: - - if (xpath) - free(xpath); - - return -1; } static int mgmt_txn_prepare_config(struct mgmt_txn_ctx *txn) @@ -1345,8 +1333,7 @@ static int mgmt_txn_send_be_txn_create(struct mgmt_txn_ctx *txn) FOREACH_MGMTD_BE_CLIENT_ID (id) { if (cmtcfg_req->subscr_info.xpath_subscr[id]) { adapter = mgmt_be_get_adapter_by_id(id); - if (mgmt_be_create_txn(adapter, txn->txn_id) - != 0) { + if (mgmt_be_send_txn_req(adapter, txn->txn_id, true)) { (void)mgmt_txn_send_commit_cfg_reply( txn, MGMTD_INTERNAL_ERROR, "Could not send TXN_CREATE to backend adapter"); @@ -1379,9 +1366,8 @@ static int mgmt_txn_send_be_txn_create(struct mgmt_txn_ctx *txn) return 0; } -static int -mgmt_txn_send_be_cfg_data(struct mgmt_txn_ctx *txn, - struct mgmt_be_client_adapter *adapter) +static int mgmt_txn_send_be_cfg_data(struct mgmt_txn_ctx *txn, + struct mgmt_be_client_adapter *adapter) { struct mgmt_commit_cfg_req *cmtcfg_req; struct mgmt_txn_be_cfg_batch *cfg_btch; @@ -1403,10 +1389,10 @@ mgmt_txn_send_be_cfg_data(struct mgmt_txn_ctx *txn, cfg_req.cfgdata_reqs = cfg_btch->cfg_datap; cfg_req.num_reqs = cfg_btch->num_cfg_data; indx++; - if (mgmt_be_send_cfg_data_create_req( - adapter, txn->txn_id, cfg_btch->batch_id, &cfg_req, - indx == num_batches ? true : false) - != 0) { + if (mgmt_be_send_cfgdata_req( + adapter, txn->txn_id, cfg_btch->batch_id, + cfg_req.cfgdata_reqs, cfg_req.num_reqs, + indx == num_batches ? true : false)) { (void)mgmt_txn_send_commit_cfg_reply( txn, MGMTD_INTERNAL_ERROR, "Internal Error! Could not send config data to backend!"); @@ -1426,7 +1412,7 @@ mgmt_txn_send_be_cfg_data(struct mgmt_txn_ctx *txn, } /* - * This could ne the last Backend Client to send CFGDATA_CREATE_REQ to. + * This could be the last Backend Client to send CFGDATA_CREATE_REQ to. * Try moving the commit to next phase. */ mgmt_try_move_commit_to_next_phase(txn, cmtcfg_req); @@ -1446,7 +1432,7 @@ mgmt_txn_send_be_txn_delete(struct mgmt_txn_ctx *txn, cmtcfg_req = &txn->commit_cfg_req->req.commit_cfg; if (cmtcfg_req->subscr_info.xpath_subscr[adapter->id]) { adapter = mgmt_be_get_adapter_by_id(adapter->id); - (void)mgmt_be_destroy_txn(adapter, txn->txn_id); + (void)mgmt_be_send_txn_req(adapter, txn->txn_id, false); FOREACH_TXN_CFG_BATCH_IN_LIST ( &txn->commit_cfg_req->req.commit_cfg @@ -1519,8 +1505,7 @@ static int mgmt_txn_send_be_cfg_apply(struct mgmt_txn_ctx *txn) return -1; btch_list = &cmtcfg_req->curr_batches[id]; - if (mgmt_be_send_cfg_apply_req(adapter, txn->txn_id) - != 0) { + if (mgmt_be_send_cfgapply_req(adapter, txn->txn_id)) { (void)mgmt_txn_send_commit_cfg_reply( txn, MGMTD_INTERNAL_ERROR, "Could not send CFG_APPLY_REQ to backend adapter"); @@ -2268,11 +2253,6 @@ uint64_t mgmt_create_txn(uint64_t session_id, enum mgmt_txn_type type) return txn ? txn->txn_id : MGMTD_TXN_ID_NONE; } -bool mgmt_txn_id_is_valid(uint64_t txn_id) -{ - return mgmt_txn_id2ctx(txn_id) ? true : false; -} - void mgmt_destroy_txn(uint64_t *txn_id) { struct mgmt_txn_ctx *txn; @@ -2285,17 +2265,6 @@ void mgmt_destroy_txn(uint64_t *txn_id) *txn_id = MGMTD_TXN_ID_NONE; } -enum mgmt_txn_type mgmt_get_txn_type(uint64_t txn_id) -{ - struct mgmt_txn_ctx *txn; - - txn = mgmt_txn_id2ctx(txn_id); - if (!txn) - return MGMTD_TXN_TYPE_NONE; - - return txn->type; -} - int mgmt_txn_send_set_config_req(uint64_t txn_id, uint64_t req_id, Mgmtd__DatastoreId ds_id, struct mgmt_ds_ctx *ds_ctx, @@ -2549,7 +2518,7 @@ int mgmt_txn_notify_be_cfgdata_reply( { struct mgmt_txn_ctx *txn; struct mgmt_txn_be_cfg_batch *cfg_btch; - struct mgmt_commit_cfg_req *cmtcfg_req = NULL; + struct mgmt_commit_cfg_req *cmtcfg_req; txn = mgmt_txn_id2ctx(txn_id); if (!txn || txn->type != MGMTD_TXN_TYPE_CONFIG) diff --git a/mgmtd/mgmt_txn.h b/mgmtd/mgmt_txn.h index be781ab954..0718397138 100644 --- a/mgmtd/mgmt_txn.h +++ b/mgmtd/mgmt_txn.h @@ -101,16 +101,6 @@ extern uint64_t mgmt_create_txn(uint64_t session_id, enum mgmt_txn_type type); extern void mgmt_destroy_txn(uint64_t *txn_id); /* - * Check if transaction is valid given an ID. - */ -extern bool mgmt_txn_id_is_valid(uint64_t txn_id); - -/* - * Returns the type of transaction given an ID. - */ -extern enum mgmt_txn_type mgmt_get_txn_type(uint64_t txn_id); - -/* * Send set-config request to be processed later in transaction. * * txn_id diff --git a/ospf6d/ospf6_interface.c b/ospf6d/ospf6_interface.c index ea059c4be6..0fb3d29e25 100644 --- a/ospf6d/ospf6_interface.c +++ b/ospf6d/ospf6_interface.c @@ -516,7 +516,6 @@ static int ospf6_interface_state_change(uint8_t next_state, OSPF6_NETWORK_LSA_EXECUTE(oi); OSPF6_INTRA_PREFIX_LSA_EXECUTE_TRANSIT(oi); OSPF6_INTRA_PREFIX_LSA_SCHEDULE_STUB(oi->area); - OSPF6_INTRA_PREFIX_LSA_EXECUTE_TRANSIT(oi); } else if (prev_state == OSPF6_INTERFACE_DR || next_state == OSPF6_INTERFACE_DR) { OSPF6_NETWORK_LSA_SCHEDULE(oi); diff --git a/ospfd/ospf_route.c b/ospfd/ospf_route.c index 75868056ad..cdb1eb0095 100644 --- a/ospfd/ospf_route.c +++ b/ospfd/ospf_route.c @@ -684,6 +684,8 @@ void ospf_intra_add_stub(struct route_table *rt, struct router_lsa_link *link, __func__); } } + if (rn->info) + ospf_route_free(rn->info); rn->info = or ; diff --git a/pimd/pim_iface.c b/pimd/pim_iface.c index b1beb45630..f26fd818b5 100644 --- a/pimd/pim_iface.c +++ b/pimd/pim_iface.c @@ -1762,3 +1762,61 @@ void pim_iface_init(void) if_zapi_callbacks(pim_ifp_create, pim_ifp_up, pim_ifp_down, pim_ifp_destroy); } + +static void pim_if_membership_clear(struct interface *ifp) +{ + struct pim_interface *pim_ifp; + + pim_ifp = ifp->info; + assert(pim_ifp); + + if (pim_ifp->pim_enable && pim_ifp->gm_enable) + return; + + pim_ifchannel_membership_clear(ifp); +} + +void pim_pim_interface_delete(struct interface *ifp) +{ + struct pim_interface *pim_ifp = ifp->info; + + if (!pim_ifp) + return; + + pim_ifp->pim_enable = false; + + pim_if_membership_clear(ifp); + + /* + * pim_sock_delete() removes all neighbors from + * pim_ifp->pim_neighbor_list. + */ + pim_sock_delete(ifp, "pim unconfigured on interface"); + pim_upstream_nh_if_update(pim_ifp->pim, ifp); + + if (!pim_ifp->gm_enable) { + pim_if_addr_del_all(ifp); + pim_if_delete(ifp); + } +} + +void pim_gm_interface_delete(struct interface *ifp) +{ + struct pim_interface *pim_ifp = ifp->info; + + if (!pim_ifp) + return; + + pim_ifp->gm_enable = false; + + pim_if_membership_clear(ifp); + +#if PIM_IPV == 4 + igmp_sock_delete_all(ifp); +#else + gm_ifp_teardown(ifp); +#endif + + if (!pim_ifp->pim_enable) + pim_if_delete(ifp); +} diff --git a/pimd/pim_iface.h b/pimd/pim_iface.h index 973840a753..0312f719d3 100644 --- a/pimd/pim_iface.h +++ b/pimd/pim_iface.h @@ -243,5 +243,7 @@ bool pim_if_is_vrf_device(struct interface *ifp); int pim_if_ifchannel_count(struct pim_interface *pim_ifp); void pim_iface_init(void); +void pim_pim_interface_delete(struct interface *ifp); +void pim_gm_interface_delete(struct interface *ifp); #endif /* PIM_IFACE_H */ diff --git a/pimd/pim_igmpv3.c b/pimd/pim_igmpv3.c index 15078dd1ec..18a9fb7c6c 100644 --- a/pimd/pim_igmpv3.c +++ b/pimd/pim_igmpv3.c @@ -319,14 +319,6 @@ void igmp_source_free(struct gm_source *source) XFREE(MTYPE_PIM_IGMP_GROUP_SOURCE, source); } -static void source_channel_oil_detach(struct gm_source *source) -{ - if (source->source_channel_oil) { - pim_channel_oil_del(source->source_channel_oil, __func__); - source->source_channel_oil = NULL; - } -} - /* igmp_source_delete: stop forwarding, and delete the source igmp_source_forward_stop: stop forwarding, but keep the source @@ -355,6 +347,7 @@ void igmp_source_delete(struct gm_source *source) source_timer_off(group, source); igmp_source_forward_stop(source); + source->source_channel_oil = NULL; /* sanity check that forwarding has been disabled */ if (IGMP_SOURCE_TEST_FORWARDING(source->source_flags)) { @@ -371,8 +364,6 @@ void igmp_source_delete(struct gm_source *source) /* warning only */ } - source_channel_oil_detach(source); - /* notice that listnode_delete() can't be moved into igmp_source_free() because the later is diff --git a/pimd/pim_nb_config.c b/pimd/pim_nb_config.c index 3c4ab1d4cc..86c40d1800 100644 --- a/pimd/pim_nb_config.c +++ b/pimd/pim_nb_config.c @@ -45,20 +45,6 @@ MACRO_REQUIRE_SEMICOLON() #define yang_dnode_get_pimaddr yang_dnode_get_ipv4 #endif /* PIM_IPV != 6 */ -static void pim_if_membership_clear(struct interface *ifp) -{ - struct pim_interface *pim_ifp; - - pim_ifp = ifp->info; - assert(pim_ifp); - - if (pim_ifp->pim_enable && pim_ifp->gm_enable) { - return; - } - - pim_ifchannel_membership_clear(ifp); -} - /* * When PIM is disabled on interface, IGMPv3 local membership * information is not injected into PIM interface state. @@ -81,15 +67,17 @@ static void pim_if_membership_refresh(struct interface *ifp) pim_ifp = ifp->info; assert(pim_ifp); -#if PIM_IPV == 6 - gm_ifp = pim_ifp->mld; -#endif if (!pim_ifp->pim_enable) return; if (!pim_ifp->gm_enable) return; +#if PIM_IPV == 6 + gm_ifp = pim_ifp->mld; + if (!gm_ifp) + return; +#endif /* * First clear off membership from all PIM (S,G) entries on the * interface @@ -159,32 +147,6 @@ static int pim_cmd_interface_add(struct interface *ifp) return 1; } -static int pim_cmd_interface_delete(struct interface *ifp) -{ - struct pim_interface *pim_ifp = ifp->info; - - if (!pim_ifp) - return 1; - - pim_ifp->pim_enable = false; - - pim_if_membership_clear(ifp); - - /* - * pim_sock_delete() removes all neighbors from - * pim_ifp->pim_neighbor_list. - */ - pim_sock_delete(ifp, "pim unconfigured on interface"); - pim_upstream_nh_if_update(pim_ifp->pim, ifp); - - if (!pim_ifp->gm_enable) { - pim_if_addr_del_all(ifp); - pim_if_delete(ifp); - } - - return 1; -} - static int interface_pim_use_src_cmd_worker(struct interface *ifp, pim_addr source_addr, char *errmsg, size_t errmsg_len) { @@ -1571,12 +1533,7 @@ int lib_interface_pim_address_family_destroy(struct nb_cb_destroy_args *args) if (!pim_ifp) return NB_OK; - if (!pim_cmd_interface_delete(ifp)) { - snprintf(args->errmsg, args->errmsg_len, - "Unable to delete interface information %s", - ifp->name); - return NB_ERR_INCONSISTENCY; - } + pim_pim_interface_delete(ifp); } return NB_OK; @@ -1624,11 +1581,7 @@ int lib_interface_pim_address_family_pim_enable_modify(struct nb_cb_modify_args if (!pim_ifp) return NB_ERR_INCONSISTENCY; - if (!pim_cmd_interface_delete(ifp)) { - snprintf(args->errmsg, args->errmsg_len, - "Unable to delete interface information"); - return NB_ERR_INCONSISTENCY; - } + pim_pim_interface_delete(ifp); } break; } @@ -2563,7 +2516,6 @@ int lib_interface_gmp_address_family_create(struct nb_cb_create_args *args) int lib_interface_gmp_address_family_destroy(struct nb_cb_destroy_args *args) { struct interface *ifp; - struct pim_interface *pim_ifp; switch (args->event) { case NB_EV_VALIDATE: @@ -2572,19 +2524,7 @@ int lib_interface_gmp_address_family_destroy(struct nb_cb_destroy_args *args) break; case NB_EV_APPLY: ifp = nb_running_get_entry(args->dnode, NULL, true); - pim_ifp = ifp->info; - - if (!pim_ifp) - return NB_OK; - - pim_ifp->gm_enable = false; - - pim_if_membership_clear(ifp); - - pim_if_addr_del_all_igmp(ifp); - - if (!pim_ifp->pim_enable) - pim_if_delete(ifp); + pim_gm_interface_delete(ifp); } return NB_OK; @@ -2598,7 +2538,6 @@ int lib_interface_gmp_address_family_enable_modify( { struct interface *ifp; bool gm_enable; - struct pim_interface *pim_ifp; int mcast_if_count; const char *ifp_name; const struct lyd_node *if_dnode; @@ -2628,25 +2567,8 @@ int lib_interface_gmp_address_family_enable_modify( if (gm_enable) return pim_cmd_gm_start(ifp); - else { - pim_ifp = ifp->info; - - if (!pim_ifp) - return NB_ERR_INCONSISTENCY; - - pim_ifp->gm_enable = false; - - pim_if_membership_clear(ifp); - -#if PIM_IPV == 4 - pim_if_addr_del_all_igmp(ifp); -#else - gm_ifp_teardown(ifp); -#endif - - if (!pim_ifp->pim_enable) - pim_if_delete(ifp); - } + else + pim_gm_interface_delete(ifp); } return NB_OK; } diff --git a/pimd/pim_tib.c b/pimd/pim_tib.c index 6ffea868d8..4081786c1e 100644 --- a/pimd/pim_tib.c +++ b/pimd/pim_tib.c @@ -163,4 +163,6 @@ void tib_sg_gm_prune(struct pim_instance *pim, pim_sgaddr sg, per-interface (S,G) state. */ pim_ifchannel_local_membership_del(oif, &sg); + + pim_channel_oil_del(*oilp, __func__); } diff --git a/redhat/frr.spec.in b/redhat/frr.spec.in index 4dec84b8fb..656df20cce 100644 --- a/redhat/frr.spec.in +++ b/redhat/frr.spec.in @@ -799,9 +799,33 @@ sed -i 's/ -M rpki//' %{_sysconfdir}/frr/daemons %changelog -* Tue Feb 07 2023 Martin Winter <mwinter@opensourcerouting.org> - %{version} - -* Tue Feb 07 2023 Donatas Abraitis <donatas@opensourcerouting.org> - 8.5 +* Tue Jun 06 2023 Martin Winter <mwinter@opensourcerouting.org> - %{version} + +* Tue Jun 06 2023 Jafar Al-Gharaibeh <jafar@atcorp.com> - 9.0 + +* Fri Mar 10 2023 Jafar Al-Gharaibeh <jafar@atcorp.com> - 8.5 +- Major Highlights: +- Add support for per-VRF SRv6 SID +- Add BGP labeled-unicast Add-Path functionality +- Implementation of SNMP BGP4v2-MIB (IPv6 support) for better network management and monitoring +- Add BGP new command neighbor path-attribute discard +- Add BGP new command neighbor path-attribute treat-as-withdraw +- Implement L3 route-target auto/wildcard configuration +- Implement BGP ACCEPT_OWN Community Attribute (rfc7611) +- Implement The Accumulated IGP Metric Attribute for BGP (rfc7311) +- Implement graceful-shutdown command per neighbor +- Add BGP new command to configure TCP keepalives for a peer bgp tcp-keepalive +- Traffic control (TC) ZAPI implementation +- SRv6 uSID (microSID) implementation +- Start deprecating start-shell, ssh, and telnet commands due to security reasons +- Add VRRPv3 an ability to disable IPv4 pseudo-header checksum +- BFD integration for static routes +- Allow protocols to configure BFD sessions with automatic source selection +- Allow zero-length opaque LSAs for OSPF (rfc5250) +- Add ISIS new command set-overload-bit on-startup +- PIMv6 BSM support +- For a full list of new features and bug fixes, please refer to: +- https://frrouting.org/release/ * Tue Nov 01 2022 Jafar Al-Gharaibeh <jafar@atcorp.com> - 8.4 - New BGP command (neighbor PEER soo) to configure SoO to prevent routing loops and suboptimal routing on dual-homed sites. diff --git a/staticd/static_main.c b/staticd/static_main.c index 464c42ecab..f6b7847602 100644 --- a/staticd/static_main.c +++ b/staticd/static_main.c @@ -53,7 +53,7 @@ struct option longopts[] = { { 0 } }; /* Master of threads. */ struct event_loop *master; -uintptr_t mgmt_lib_hndl; +struct mgmt_be_client *mgmt_be_client; static struct frr_daemon_info staticd_di; /* SIGHUP handler. */ @@ -71,7 +71,7 @@ static void sigint(void) /* Disable BFD events to avoid wasting processing. */ bfd_protocol_integration_set_shutdown(true); - mgmt_be_client_lib_destroy(); + mgmt_be_client_destroy(mgmt_be_client); static_vrf_terminate(); @@ -106,56 +106,6 @@ struct frr_signal_t static_signals[] = { }, }; -#if 0 -static void static_mgmt_be_client_connect(uintptr_t lib_hndl, - uintptr_t usr_data, bool connected) -{ - (void)usr_data; - - assert(lib_hndl == mgmt_lib_hndl); - - zlog_debug("Got %s %s MGMTD Backend Client Server", - connected ? "connected" : "disconnected", - connected ? "to" : "from"); - - /* unless we are subscribing to xpaths we don't need to do this */ - if (connected) - (void)mgmt_be_subscribe_yang_data(mgmt_lib_hndl, NULL, 0); -} - -static void -static_mgmt_txn_notify(uintptr_t lib_hndl, uintptr_t usr_data, - struct mgmt_be_client_txn_ctx *txn_ctx, - bool destroyed) -{ - zlog_debug("Got Txn %s Notify from MGMTD server", - destroyed ? "DESTROY" : "CREATE"); - - if (!destroyed) { - /* - * TODO: Allocate and install a private scratchpad for this - * transaction if required - */ - } else { - /* - * TODO: Uninstall and deallocate the private scratchpad for - * this transaction if installed earlier. - */ - } -} -#endif - -static struct mgmt_be_client_params mgmt_params = { - .name = "staticd", - .conn_retry_intvl_sec = 3, - /* - * instead of a connect routine maybe just put xpaths to subcribe to - * here - */ - .client_connect_notify = NULL, /* static_mgmt_be_client_connect, */ - .txn_notify = NULL, /* static_mgmt_txn_notify */ -}; - static const struct frr_yang_module_info *const staticd_yang_modules[] = { &frr_filter_info, &frr_interface_info, @@ -212,7 +162,7 @@ int main(int argc, char **argv, char **envp) static_vty_init(); /* Initialize MGMT backend functionalities */ - mgmt_lib_hndl = mgmt_be_client_lib_init(&mgmt_params, master); + mgmt_be_client = mgmt_be_client_create("staticd", NULL, 0, master); hook_register(routing_conf_event, routing_control_plane_protocols_name_validate); diff --git a/tests/topotests/babel_topo1/r1/babeld.conf b/tests/topotests/babel_topo1/r1/babeld.conf index 372d2edff1..4058362cc3 100644 --- a/tests/topotests/babel_topo1/r1/babeld.conf +++ b/tests/topotests/babel_topo1/r1/babeld.conf @@ -1,4 +1,3 @@ -log file eigrpd.log interface r1-eth0 babel hello-interval 1000 diff --git a/tests/topotests/babel_topo1/r2/babeld.conf b/tests/topotests/babel_topo1/r2/babeld.conf index 8a36dda5f8..bae4e59e0b 100644 --- a/tests/topotests/babel_topo1/r2/babeld.conf +++ b/tests/topotests/babel_topo1/r2/babeld.conf @@ -1,4 +1,3 @@ -log file eigrpd.log ! interface r2-eth0 babel hello-interval 1000 diff --git a/tests/topotests/babel_topo1/r3/babeld.conf b/tests/topotests/babel_topo1/r3/babeld.conf index 1e9dc261f5..bfda3622dd 100644 --- a/tests/topotests/babel_topo1/r3/babeld.conf +++ b/tests/topotests/babel_topo1/r3/babeld.conf @@ -1,4 +1,3 @@ -log file eigrpd.log ! interface r3-eth0 babel hello-interval 1000 diff --git a/tests/topotests/bgp_always_compare_med/bgp_always_compare_med_topo1.json b/tests/topotests/bgp_always_compare_med/bgp_always_compare_med_topo1.json new file mode 100644 index 0000000000..4156c6d0f7 --- /dev/null +++ b/tests/topotests/bgp_always_compare_med/bgp_always_compare_med_topo1.json @@ -0,0 +1,152 @@ +{ + "address_types": ["ipv4", "ipv6"], + "ipv4base": "192.168.0.0", + "ipv4mask": 30, + "ipv6base": "fd00::", + "ipv6mask": 64, + "link_ip_start":{"ipv4":"192.168.0.0", "v4mask":24, "ipv6":"fd00::", "v6mask":64}, + "lo_prefix":{"ipv4":"1.0.", "v4mask":32, "ipv6":"2001:DB8:F::", "v6mask":128}, + "routers": { + "r1": { + "links": { + "lo": {"ipv4": "auto", "ipv6": "auto", "type": "loopback"}, + "r2": {"ipv4": "auto", "ipv6": "auto"}, + "r3": {"ipv4": "auto", "ipv6": "auto"} + }, + "bgp": { + "local_as": "100", + "address_family": { + "ipv4": { + "unicast": { + "neighbor": { + "r2": {"dest_link": {"r1": {}}}, + "r3": {"dest_link": {"r1": {}}} + } + } + }, + "ipv6": { + "unicast": { + "neighbor": { + "r2": {"dest_link": {"r1": { + "route_maps": [{ + "name": "rmap_global", + "direction": "in" + }] + }}}, + "r3": {"dest_link": {"r1": { + "route_maps": [{ + "name": "rmap_global", + "direction": "in" + }] + }}} + } + } + } + } + }, + "route_maps": { + "rmap_global": [{ + "action": "permit", + "set": { + "ipv6": { + "nexthop": "prefer-global" + } + } + }] + }, + "static_routes":[ + { + "network":"192.168.20.1/32", + "next_hop":"Null0" + }, + { + "network":"192:168:20::1/128", + "next_hop":"Null0" + }] + }, + "r2": { + "links": { + "lo": {"ipv4": "auto", "ipv6": "auto", "type": "loopback"}, + "r1": {"ipv4": "auto", "ipv6": "auto"}, + "r4": {"ipv4": "auto", "ipv6": "auto"} + }, + "bgp": { + "local_as": "200", + "address_family": { + "ipv4": { + "unicast": { + "neighbor": { + "r1": {"dest_link": {"r2": {}}}, + "r4": {"dest_link": {"r2": {}}} + } + } + }, + "ipv6": { + "unicast": { + "neighbor": { + "r1": {"dest_link": {"r2": {}}}, + "r4": {"dest_link": {"r2": {}}} + } + } + } + } + } + }, + "r3": { + "links": { + "lo": {"ipv4": "auto", "ipv6": "auto", "type": "loopback"}, + "r1": {"ipv4": "auto", "ipv6": "auto"}, + "r4": {"ipv4": "auto", "ipv6": "auto"} + }, + "bgp": { + "local_as": "300", + "address_family": { + "ipv4": { + "unicast": { + "neighbor": { + "r1": {"dest_link": {"r3": {}}}, + "r4": {"dest_link": {"r3": {}}} + } + } + }, + "ipv6": { + "unicast": { + "neighbor": { + "r1": {"dest_link": {"r3": {}}}, + "r4": {"dest_link": {"r3": {}}} + } + } + } + } + } + }, + "r4": { + "links": { + "lo": {"ipv4": "auto", "ipv6": "auto", "type": "loopback"}, + "r2": {"ipv4": "auto", "ipv6": "auto"}, + "r3": {"ipv4": "auto", "ipv6": "auto"} + }, + "bgp": { + "local_as": "400", + "address_family": { + "ipv4": { + "unicast": { + "neighbor": { + "r2": {"dest_link": {"r4": {}}}, + "r3": {"dest_link": {"r4": {}}} + } + } + }, + "ipv6": { + "unicast": { + "neighbor": { + "r2": {"dest_link": {"r4": {}}}, + "r3": {"dest_link": {"r4": {}}} + } + } + } + } + } + } + } +} diff --git a/tests/topotests/bgp_always_compare_med/test_bgp_always_compare_med_topo1.py b/tests/topotests/bgp_always_compare_med/test_bgp_always_compare_med_topo1.py new file mode 100644 index 0000000000..fb72f4331d --- /dev/null +++ b/tests/topotests/bgp_always_compare_med/test_bgp_always_compare_med_topo1.py @@ -0,0 +1,1118 @@ +#!/usr/bin/env python3 +# +# SPDX-License-Identifier: ISC +# +# Copyright (c) 2023 by VMware, Inc. ("VMware") +# +# +################################################################################ +# Following tests are performed to validate BGP always compare MED functionality +################################################################################ +""" +1. Verify the BGP always compare MED functionality in between eBGP Peers +2. Verify the BGP always compare MED functionality in between eBGP Peers with by changing different AD values +3. Verify the BGP always compare MED functionality in between eBGP Peers by changing MED values in middle routers +4. Verify that BGP Always compare MED functionality by restarting BGP, Zebra and FRR services and clear BGP and + shutdown BGP neighbor +5. Verify BGP always compare MED functionality by performing shut/noshut on the interfaces in between BGP neighbors +""" + +import os +import sys +import time +import pytest + +# Save the Current Working Directory to find configuration files. +CWD = os.path.dirname(os.path.realpath(__file__)) +sys.path.append(os.path.join(CWD, "../")) +sys.path.append(os.path.join(CWD, "../lib/")) + +# pylint: disable=C0413 +# Import topogen and topotest helpers +from lib.topogen import Topogen, get_topogen + +from lib.common_config import ( + start_topology, + write_test_header, + create_static_routes, + write_test_footer, + reset_config_on_routers, + verify_rib, + step, + check_address_types, + check_router_status, + create_static_routes, + create_prefix_lists, + create_route_maps, + kill_router_daemons, + shutdown_bringup_interface, + stop_router, + start_router, + delete_route_maps, +) + +from lib.topolog import logger +from lib.bgp import verify_bgp_convergence, verify_bgp_rib, create_router_bgp, clear_bgp +from lib.topojson import build_config_from_json + +pytestmark = [pytest.mark.bgpd, pytest.mark.staticd] + +# Reading the data from JSON File for topology creation +topo = None + +# Global variables +ADDR_TYPES = check_address_types() +NETWORK1_1 = {"ipv4": "192.168.20.1/32", "ipv6": "192:168:20::1/128"} +NETWORK1_2 = {"ipv4": "192.168.30.1/32", "ipv6": "192:168:30::1/128"} +NETWORK1_3 = {"ipv4": "192.168.40.1/32", "ipv6": "192:168:40::1/128"} +NEXT_HOP_IP = {"ipv4": "Null0", "ipv6": "Null0"} + + +def setup_module(mod): + """ + Sets up the pytest environment + + * `mod`: module name + """ + + testsuite_run_time = time.asctime(time.localtime(time.time())) + logger.info("Testsuite start time: {}".format(testsuite_run_time)) + logger.info("=" * 40) + + logger.info("Running setup_module to create topology") + + # This function initiates the topology build with Topogen... + json_file = "{}/bgp_always_compare_med_topo1.json".format(CWD) + tgen = Topogen(json_file, mod.__name__) + global topo + topo = tgen.json_topo + # ... and here it calls Mininet initialization functions. + + # Starting topology, create tmp files which are loaded to routers + # to start daemons and then start routers + start_topology(tgen) + + # Creating configuration from JSON + build_config_from_json(tgen, topo) + + global ADDR_TYPES + ADDR_TYPES = check_address_types() + + BGP_CONVERGENCE = verify_bgp_convergence(tgen, topo) + assert BGP_CONVERGENCE is True, "setup_module : Failed \n Error: {}".format( + BGP_CONVERGENCE + ) + + logger.info("Running setup_module() done") + + +def teardown_module(): + """Teardown the pytest environment""" + + logger.info("Running teardown_module to delete topology") + + tgen = get_topogen() + + # Stop toplogy and Remove tmp files + tgen.stop_topology() + + logger.info( + "Testsuite end time: {}".format(time.asctime(time.localtime(time.time()))) + ) + logger.info("=" * 40) + + +########################################################################################################## +# +# Local API +# +########################################################################################################## + + +def initial_configuration(tgen, tc_name): + """ + API to do initial set of configuration + """ + + step( + "Configure IPv4 and IPv6, eBGP neighbors between R1,R2 and R3 routers as per base config" + ) + + step("Configure static routes in R4") + for addr_type in ADDR_TYPES: + input_static_r4 = { + "r4": { + "static_routes": [ + { + "network": NETWORK1_1[addr_type], + "next_hop": NEXT_HOP_IP[addr_type], + } + ] + } + } + + logger.info("Configure static routes") + result = create_static_routes(tgen, input_static_r4) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step("Configure redistribute static in R4") + input_static_redist_r4 = { + "r4": { + "bgp": { + "address_family": { + addr_type: { + "unicast": {"redistribute": [{"redist_type": "static"}]} + } + } + } + } + } + result = create_router_bgp(tgen, topo, input_static_redist_r4) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + for addr_type in ADDR_TYPES: + # Create prefix list + input_dict_23 = { + "r2": { + "prefix_lists": { + addr_type: { + "pf_ls_r2_{}".format(addr_type): [ + {"network": NETWORK1_1[addr_type], "action": "permit"} + ] + } + } + }, + "r3": { + "prefix_lists": { + "ipv4": { + "pf_ls_r3_{}".format(addr_type): [ + {"network": NETWORK1_1[addr_type], "action": "permit"} + ] + } + } + }, + } + result = create_prefix_lists(tgen, input_dict_23) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + # Create route map + input_dict_23 = { + "r2": { + "route_maps": { + "RMAP_MED_R2": [ + { + "action": "permit", + "match": { + addr_type: { + "prefix_lists": "pf_ls_r2_{}".format(addr_type) + } + }, + "set": {"med": 300}, + } + ] + } + }, + "r3": { + "route_maps": { + "RMAP_MED_R3": [ + { + "action": "permit", + "match": { + addr_type: { + "prefix_lists": "pf_ls_r3_{}".format(addr_type) + } + }, + "set": {"med": 200}, + } + ] + } + }, + } + result = create_route_maps(tgen, input_dict_23) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + input_dict_r2_r3 = { + "r2": { + "bgp": { + "address_family": { + addr_type: { + "unicast": { + "neighbor": { + "r1": { + "dest_link": { + "r2": { + "route_maps": [ + { + "name": "RMAP_MED_R2", + "direction": "out", + } + ] + } + } + } + } + } + } + } + } + }, + "r3": { + "bgp": { + "address_family": { + addr_type: { + "unicast": { + "neighbor": { + "r1": { + "dest_link": { + "r3": { + "route_maps": [ + { + "name": "RMAP_MED_R3", + "direction": "out", + } + ] + } + } + } + } + } + } + } + } + }, + } + result = create_router_bgp(tgen, topo, input_dict_r2_r3) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + +########################################################################################################## +# +# Testcases +# +########################################################################################################## + + +def test_verify_bgp_always_compare_med_functionality_bw_eBGP_peers_p0(request): + """ + Verify the BGP always compare MED functionality in between eBGP Peers + """ + + # test case name + tc_name = request.node.name + write_test_header(tc_name) + + tgen = get_topogen() + if tgen.routers_have_failure(): + check_router_status(tgen) + reset_config_on_routers(tgen) + initial_configuration(tgen, tc_name) + + step( + "Configure IPv4 and IPv6, eBGP neighbors between R1,R2 and R3 routers as per base config" + ) + step( + "Verify that IPv4 and IPv6 eBGP neighbors are configured in between routers by following " + "commands and verify that best path chosen by lowest MED value" + ) + + result = verify_bgp_convergence(tgen, topo) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + for addr_type in ADDR_TYPES: + input_static_r1 = { + "r1": {"static_routes": [{"network": NETWORK1_1[addr_type]}]} + } + result = verify_bgp_rib(tgen, addr_type, "r1", input_static_r1) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + result = verify_rib(tgen, addr_type, "r1", input_static_r1) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step("Configure 'multi-path as-path relax' command at R1.") + configure_bgp = { + "r1": {"bgp": {"local_as": "100", "bestpath": {"aspath": "multipath-relax"}}} + } + result = create_router_bgp(tgen, topo, configure_bgp) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + step( + "Verify that after applying 'multi-path as-path relax' command, " + "its also chooses lowest MED to reach destination." + ) + for addr_type in ADDR_TYPES: + input_static_r1 = { + "r1": {"static_routes": [{"network": NETWORK1_1[addr_type]}]} + } + nh1 = topo["routers"]["r2"]["links"]["r1"][addr_type].split("/")[0] + nh2 = topo["routers"]["r3"]["links"]["r1"][addr_type].split("/")[0] + + result = verify_bgp_rib(tgen, addr_type, "r1", input_static_r1) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + result = verify_rib(tgen, addr_type, "r1", input_static_r1, next_hop=[nh1, nh2]) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step("Configure 'bgp always-compare-med' command at R1.") + input_dict_r1 = {"r1": {"bgp": {"local_as": "100", "bgp_always_compare_med": True}}} + result = create_router_bgp(tgen, topo, input_dict_r1) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step( + "Verify that after applying 'bgp always-compare-med', its chooses lowest MED value path" + ) + for addr_type in ADDR_TYPES: + input_static_r1 = { + "r1": {"static_routes": [{"network": NETWORK1_1[addr_type]}]} + } + nh = topo["routers"]["r3"]["links"]["r1"][addr_type].split("/")[0] + + result = verify_bgp_rib(tgen, addr_type, "r1", input_static_r1) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + result = verify_rib(tgen, addr_type, "r1", input_static_r1, next_hop=nh) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step("Remove 'bgp always-compare-med' command at R1.") + input_dict_r1 = { + "r1": {"bgp": {"local_as": "100", "bgp_always_compare_med": False}} + } + result = create_router_bgp(tgen, topo, input_dict_r1) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step("Verify that 'bgp always-compare-med' command is removed") + for addr_type in ADDR_TYPES: + input_static_r1 = { + "r1": {"static_routes": [{"network": NETWORK1_1[addr_type]}]} + } + nh1 = topo["routers"]["r2"]["links"]["r1"][addr_type].split("/")[0] + nh2 = topo["routers"]["r3"]["links"]["r1"][addr_type].split("/")[0] + + result = verify_bgp_rib(tgen, addr_type, "r1", input_static_r1) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + result = verify_rib(tgen, addr_type, "r1", input_static_r1, next_hop=[nh1, nh2]) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step("Remove 'multi-path as-path relax' command at R1") + configure_bgp = { + "r1": { + "bgp": { + "local_as": "100", + "bestpath": {"aspath": "multipath-relax", "delete": True}, + } + } + } + result = create_router_bgp(tgen, topo, configure_bgp) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + step("Verify route selection after removing 'multi-path as-path relax' command") + for addr_type in ADDR_TYPES: + input_static_r1 = { + "r1": {"static_routes": [{"network": NETWORK1_1[addr_type]}]} + } + result = verify_bgp_rib(tgen, addr_type, "r1", input_static_r1) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + result = verify_rib(tgen, addr_type, "r1", input_static_r1) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + write_test_footer(tc_name) + + +def test_verify_bgp_always_compare_med_functionality_bw_eBGP_peers_by_changing_AD_values_p0( + request, +): + """ + Verify the BGP always compare MED functionality in between eBGP Peers with by changing different AD values. + """ + + # test case name + tc_name = request.node.name + write_test_header(tc_name) + + tgen = get_topogen() + if tgen.routers_have_failure(): + check_router_status(tgen) + reset_config_on_routers(tgen) + initial_configuration(tgen, tc_name) + + step( + "Configure IPv4 and IPv6, eBGP neighbors between R1,R2 and R3 routers as per base config" + ) + step( + "Verify that IPv4 and IPv6 eBGP neighbors are configured in between routers by following " + "commands and verify that best path chosen by lowest MED value" + ) + + result = verify_bgp_convergence(tgen, topo) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + for addr_type in ADDR_TYPES: + input_static_r1 = { + "r1": {"static_routes": [{"network": NETWORK1_1[addr_type]}]} + } + result = verify_bgp_rib(tgen, addr_type, "r1", input_static_r1) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + result = verify_rib(tgen, addr_type, "r1", input_static_r1) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step("Configure 'bgp always-compare-med' command at R1.") + input_dict_r1 = {"r1": {"bgp": {"local_as": "100", "bgp_always_compare_med": True}}} + result = create_router_bgp(tgen, topo, input_dict_r1) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step( + "Verify that after applying 'bgp always-compare-med', its chooses lowest MED value path" + ) + for addr_type in ADDR_TYPES: + input_static_r1 = { + "r1": {"static_routes": [{"network": NETWORK1_1[addr_type]}]} + } + nh = topo["routers"]["r2"]["links"]["r1"][addr_type].split("/")[0] + + result = verify_bgp_rib(tgen, addr_type, "r1", input_static_r1) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + result = verify_rib(tgen, addr_type, "r1", input_static_r1, next_hop=nh) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step("Configure AD value=100 at R2 and AD value=200 at R3 towards R1") + input_dict_1 = { + "r2": { + "bgp": { + "local_as": 200, + "address_family": { + "ipv4": { + "unicast": { + "distance": {"ebgp": 100, "ibgp": 100, "local": 100} + } + }, + "ipv6": { + "unicast": { + "distance": {"ebgp": 100, "ibgp": 100, "local": 100} + } + }, + }, + } + }, + "r3": { + "bgp": { + "local_as": 300, + "address_family": { + "ipv4": { + "unicast": { + "distance": {"ebgp": 200, "ibgp": 200, "local": 200} + } + }, + "ipv6": { + "unicast": { + "distance": {"ebgp": 200, "ibgp": 200, "local": 200} + } + }, + }, + } + }, + } + + result = create_router_bgp(tgen, topo, input_dict_1) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + step( + "Verify that inspite of AD values, always lowest MED value is getting " + "selected at destination router R1" + ) + for addr_type in ADDR_TYPES: + input_static_r1 = { + "r1": {"static_routes": [{"network": NETWORK1_1[addr_type]}]} + } + nh = topo["routers"]["r2"]["links"]["r1"][addr_type].split("/")[0] + + result = verify_bgp_rib(tgen, addr_type, "r1", input_static_r1) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + result = verify_rib(tgen, addr_type, "r1", input_static_r1, next_hop=nh) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + write_test_footer(tc_name) + + +def test_verify_bgp_always_compare_med_functionality_bw_eBGP_peers_by_changing_MED_values_p1( + request, +): + """ + Verify the BGP always compare MED functionality in between eBGP Peers by changing MED values in middle routers + """ + + # test case name + tc_name = request.node.name + write_test_header(tc_name) + + tgen = get_topogen() + if tgen.routers_have_failure(): + check_router_status(tgen) + reset_config_on_routers(tgen) + initial_configuration(tgen, tc_name) + + step( + "Configure IPv4 and IPv6, eBGP neighbors between R1,R2 and R3 routers as per base config" + ) + step( + "Verify that IPv4 and IPv6 eBGP neighbors are configured in between routers by following " + "commands and verify that best path chosen by lowest MED value" + ) + + result = verify_bgp_convergence(tgen, topo) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + for addr_type in ADDR_TYPES: + input_static_r1 = { + "r1": {"static_routes": [{"network": NETWORK1_1[addr_type]}]} + } + result = verify_bgp_rib(tgen, addr_type, "r1", input_static_r1) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + result = verify_rib(tgen, addr_type, "r1", input_static_r1) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step("Configure 'multi-path as-path relax' command at R1.") + configure_bgp = { + "r1": {"bgp": {"local_as": "100", "bestpath": {"aspath": "multipath-relax"}}} + } + result = create_router_bgp(tgen, topo, configure_bgp) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + step( + "Verify that after applying 'multi-path as-path relax' command, " + "its also chooses lowest MED to reach destination." + ) + for addr_type in ADDR_TYPES: + input_static_r1 = { + "r1": {"static_routes": [{"network": NETWORK1_1[addr_type]}]} + } + nh1 = topo["routers"]["r2"]["links"]["r1"][addr_type].split("/")[0] + nh2 = topo["routers"]["r3"]["links"]["r1"][addr_type].split("/")[0] + + result = verify_bgp_rib(tgen, addr_type, "r1", input_static_r1) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + result = verify_rib(tgen, addr_type, "r1", input_static_r1, next_hop=[nh1, nh2]) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step("Configure 'bgp always-compare-med' command at R1.") + input_dict_r1 = {"r1": {"bgp": {"local_as": "100", "bgp_always_compare_med": True}}} + result = create_router_bgp(tgen, topo, input_dict_r1) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step( + "Verify that after applying 'bgp always-compare-med', its chooses lowest MED value path" + ) + for addr_type in ADDR_TYPES: + input_static_r1 = { + "r1": {"static_routes": [{"network": NETWORK1_1[addr_type]}]} + } + nh = topo["routers"]["r3"]["links"]["r1"][addr_type].split("/")[0] + + result = verify_bgp_rib(tgen, addr_type, "r1", input_static_r1) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + result = verify_rib(tgen, addr_type, "r1", input_static_r1, next_hop=nh) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step("Change the MED value 150 in R2 router.") + input_dict = {"r2": {"route_maps": ["RMAP_MED_R2"]}} + result = delete_route_maps(tgen, input_dict) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + for addr_type in ADDR_TYPES: + input_dict_2 = { + "r2": { + "route_maps": { + "RMAP_MED_R2": [ + { + "action": "permit", + "match": { + addr_type: { + "prefix_lists": "pf_ls_r2_{}".format(addr_type) + } + }, + "set": {"med": 150}, + } + ] + } + } + } + result = create_route_maps(tgen, input_dict_2) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step("Verify that after changing MED, its chooses lowest MED value path") + for addr_type in ADDR_TYPES: + input_static_r1 = { + "r1": {"static_routes": [{"network": NETWORK1_1[addr_type]}]} + } + nh = topo["routers"]["r2"]["links"]["r1"][addr_type].split("/")[0] + + result = verify_bgp_rib(tgen, addr_type, "r1", input_static_r1) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + result = verify_rib(tgen, addr_type, "r1", input_static_r1, next_hop=nh) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step("Change the MED value 100 in R3 router.") + input_dict = {"r3": {"route_maps": ["RMAP_MED_R3"]}} + result = delete_route_maps(tgen, input_dict) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + for addr_type in ADDR_TYPES: + input_dict_3 = { + "r3": { + "route_maps": { + "RMAP_MED_R3": [ + { + "action": "permit", + "match": { + addr_type: { + "prefix_lists": "pf_ls_r3_{}".format(addr_type) + } + }, + "set": {"med": 100}, + } + ] + } + } + } + result = create_route_maps(tgen, input_dict_3) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step("Verify that after changing MED, its chooses lowest MED value path") + for addr_type in ADDR_TYPES: + input_static_r1 = { + "r1": {"static_routes": [{"network": NETWORK1_1[addr_type]}]} + } + nh = topo["routers"]["r3"]["links"]["r1"][addr_type].split("/")[0] + + result = verify_bgp_rib(tgen, addr_type, "r1", input_static_r1) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + result = verify_rib(tgen, addr_type, "r1", input_static_r1, next_hop=nh) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + write_test_footer(tc_name) + + +def test_verify_bgp_always_compare_med_functionality_by_restarting_daemons_clear_bgp_shut_neighbors_p1( + request, +): + """ + Verify that BGP Always compare MED functionality by restarting BGP, Zebra and FRR services and clear BGP and shutdown BGP neighbor + """ + + # test case name + tc_name = request.node.name + write_test_header(tc_name) + + tgen = get_topogen() + if tgen.routers_have_failure(): + check_router_status(tgen) + reset_config_on_routers(tgen) + initial_configuration(tgen, tc_name) + + step( + "Configure IPv4 and IPv6, eBGP neighbors between R1,R2 and R3 routers as per base config" + ) + step( + "Verify that IPv4 and IPv6 eBGP neighbors are configured in between routers by following " + "commands and verify that best path chosen by lowest MED value" + ) + + result = verify_bgp_convergence(tgen, topo) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + for addr_type in ADDR_TYPES: + input_static_r1 = { + "r1": {"static_routes": [{"network": NETWORK1_1[addr_type]}]} + } + result = verify_bgp_rib(tgen, addr_type, "r1", input_static_r1) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + result = verify_rib(tgen, addr_type, "r1", input_static_r1) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step("Configure 'multi-path as-path relax' command at R1.") + configure_bgp = { + "r1": {"bgp": {"local_as": "100", "bestpath": {"aspath": "multipath-relax"}}} + } + result = create_router_bgp(tgen, topo, configure_bgp) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + step( + "Verify that after applying 'multi-path as-path relax' command, " + "its also chooses lowest MED to reach destination." + ) + for addr_type in ADDR_TYPES: + input_static_r1 = { + "r1": {"static_routes": [{"network": NETWORK1_1[addr_type]}]} + } + nh1 = topo["routers"]["r2"]["links"]["r1"][addr_type].split("/")[0] + nh2 = topo["routers"]["r3"]["links"]["r1"][addr_type].split("/")[0] + + result = verify_bgp_rib(tgen, addr_type, "r1", input_static_r1) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + result = verify_rib(tgen, addr_type, "r1", input_static_r1, next_hop=[nh1, nh2]) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step("Configure 'bgp always-compare-med' command at R1.") + input_dict_r1 = {"r1": {"bgp": {"local_as": "100", "bgp_always_compare_med": True}}} + result = create_router_bgp(tgen, topo, input_dict_r1) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step( + "Verify that after applying 'bgp always-compare-med', its chooses lowest MED value path" + ) + for addr_type in ADDR_TYPES: + input_static_r1 = { + "r1": {"static_routes": [{"network": NETWORK1_1[addr_type]}]} + } + nh = topo["routers"]["r3"]["links"]["r1"][addr_type].split("/")[0] + + result = verify_bgp_rib(tgen, addr_type, "r1", input_static_r1) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + result = verify_rib(tgen, addr_type, "r1", input_static_r1, next_hop=nh) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step("Restart the BGPd/Zebra/FRR service on R1") + for daemon in ["bgpd", "zebra", "frr"]: + if daemon == "frr": + stop_router(tgen, "r1") + start_router(tgen, "r1") + else: + kill_router_daemons(tgen, "r1", daemon) + + step( + "Verify after restarting dameons and frr services, its chooses lowest MED value path" + ) + for addr_type in ADDR_TYPES: + input_static_r1 = { + "r1": {"static_routes": [{"network": NETWORK1_1[addr_type]}]} + } + nh = topo["routers"]["r3"]["links"]["r1"][addr_type].split("/")[0] + + result = verify_bgp_rib(tgen, addr_type, "r1", input_static_r1) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + result = verify_rib(tgen, addr_type, "r1", input_static_r1, next_hop=nh) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step("Clear bgp on R1") + clear_bgp(tgen, None, "r1") + + step("Verify after clearing BGP, its chooses lowest MED value path") + for addr_type in ADDR_TYPES: + input_static_r1 = { + "r1": {"static_routes": [{"network": NETWORK1_1[addr_type]}]} + } + nh = topo["routers"]["r3"]["links"]["r1"][addr_type].split("/")[0] + + result = verify_bgp_rib(tgen, addr_type, "r1", input_static_r1) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + result = verify_rib(tgen, addr_type, "r1", input_static_r1, next_hop=nh) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step("Perform BGP neighborship shut/no shut") + for action, keyword in zip([True, False], ["shut", "noshut"]): + for addr_type in ADDR_TYPES: + input_dict = { + "r1": { + "bgp": { + "address_family": { + addr_type: { + "unicast": { + "neighbor": { + "r3": { + "dest_link": {"r1": {"shutdown": action}} + } + } + } + } + } + } + } + } + result = create_router_bgp(tgen, topo, input_dict) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step("Verify after {} BGP, its chooses lowest MED value path".format(keyword)) + if action: + for addr_type in ADDR_TYPES: + input_static_r1 = { + "r1": {"static_routes": [{"network": NETWORK1_1[addr_type]}]} + } + nh = topo["routers"]["r2"]["links"]["r1"][addr_type].split("/")[0] + + result = verify_bgp_rib(tgen, addr_type, "r1", input_static_r1) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + result = verify_rib(tgen, addr_type, "r1", input_static_r1, next_hop=nh) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + else: + for addr_type in ADDR_TYPES: + input_static_r1 = { + "r1": {"static_routes": [{"network": NETWORK1_1[addr_type]}]} + } + nh = topo["routers"]["r3"]["links"]["r1"][addr_type].split("/")[0] + + result = verify_bgp_rib(tgen, addr_type, "r1", input_static_r1) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + result = verify_rib(tgen, addr_type, "r1", input_static_r1, next_hop=nh) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + write_test_footer(tc_name) + + +def test_verify_bgp_always_compare_med_functionality_by_shut_noshut_interfaces_bw_bgp_neighbors_p1( + request, +): + """ + Verify BGP always compare MED functionality by performing shut/noshut on the interfaces in between BGP neighbors + """ + + # test case name + tc_name = request.node.name + write_test_header(tc_name) + + tgen = get_topogen() + if tgen.routers_have_failure(): + check_router_status(tgen) + reset_config_on_routers(tgen) + initial_configuration(tgen, tc_name) + + step( + "Configure IPv4 and IPv6, eBGP neighbors between R1,R2 and R3 routers as per base config" + ) + step( + "Verify that IPv4 and IPv6 eBGP neighbors are configured in between routers by following " + "commands and verify that best path chosen by lowest MED value" + ) + + result = verify_bgp_convergence(tgen, topo) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + for addr_type in ADDR_TYPES: + input_static_r1 = { + "r1": {"static_routes": [{"network": NETWORK1_1[addr_type]}]} + } + result = verify_bgp_rib(tgen, addr_type, "r1", input_static_r1) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + result = verify_rib(tgen, addr_type, "r1", input_static_r1) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step("Configure 'multi-path as-path relax' command at R1.") + configure_bgp = { + "r1": {"bgp": {"local_as": "100", "bestpath": {"aspath": "multipath-relax"}}} + } + result = create_router_bgp(tgen, topo, configure_bgp) + assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) + + step( + "Verify that after applying 'multi-path as-path relax' command, " + "its also chooses lowest MED to reach destination." + ) + for addr_type in ADDR_TYPES: + input_static_r1 = { + "r1": {"static_routes": [{"network": NETWORK1_1[addr_type]}]} + } + nh1 = topo["routers"]["r2"]["links"]["r1"][addr_type].split("/")[0] + nh2 = topo["routers"]["r3"]["links"]["r1"][addr_type].split("/")[0] + + result = verify_bgp_rib(tgen, addr_type, "r1", input_static_r1) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + result = verify_rib(tgen, addr_type, "r1", input_static_r1, next_hop=[nh1, nh2]) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + step("Configure 'bgp always-compare-med' command at R1.") + input_dict_r1 = {"r1": {"bgp": {"local_as": "100", "bgp_always_compare_med": True}}} + result = create_router_bgp(tgen, topo, input_dict_r1) + assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + + step( + "Verify that after applying 'bgp always-compare-med', its chooses lowest MED value path" + ) + for addr_type in ADDR_TYPES: + input_static_r1 = { + "r1": {"static_routes": [{"network": NETWORK1_1[addr_type]}]} + } + nh = topo["routers"]["r3"]["links"]["r1"][addr_type].split("/")[0] + + result = verify_bgp_rib(tgen, addr_type, "r1", input_static_r1) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + result = verify_rib(tgen, addr_type, "r1", input_static_r1, next_hop=nh) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + for action, keyword in zip([False, True], ["Shut", "No Shut"]): + step( + "{} the interface on the link between R3 & R4 and R2 & R4 routers".format( + keyword + ) + ) + intf2_4 = topo["routers"]["r2"]["links"]["r4"]["interface"] + intf3_4 = topo["routers"]["r3"]["links"]["r4"]["interface"] + for dut, intf in zip(["r2", "r3"], [intf2_4, intf3_4]): + shutdown_bringup_interface(tgen, dut, intf, action) + + for addr_type in ADDR_TYPES: + input_static_r1 = { + "r1": {"static_routes": [{"network": NETWORK1_1[addr_type]}]} + } + nh = topo["routers"]["r3"]["links"]["r1"][addr_type].split("/")[0] + + if action: + result = verify_bgp_rib(tgen, addr_type, "r1", input_static_r1) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + result = verify_rib(tgen, addr_type, "r1", input_static_r1, next_hop=nh) + assert result is True, "Testcase {} : Failed \n Error: {}".format( + tc_name, result + ) + + else: + result = verify_bgp_rib( + tgen, addr_type, "r1", input_static_r1, expected=False + ) + assert ( + result is not True + ), "Testcase {} :Failed \n Routes are still present in BGP table\n Error {}".format( + tc_name, result + ) + + result = verify_rib( + tgen, addr_type, "r1", input_static_r1, next_hop=nh, expected=False + ) + assert ( + result is not True + ), "Testcase {} :Failed \n Routes are still present in FIB \n Error {}".format( + tc_name, result + ) + + write_test_footer(tc_name) + + +if __name__ == "__main__": + args = ["-s"] + sys.argv[1:] + sys.exit(pytest.main(args)) diff --git a/tests/topotests/bgp_gr_functionality_topo3/bgp_gr_functionality_topo3.py b/tests/topotests/bgp_gr_functionality_topo3/test_bgp_gr_functionality_topo3.py index 593a8d6417..593a8d6417 100644 --- a/tests/topotests/bgp_gr_functionality_topo3/bgp_gr_functionality_topo3.py +++ b/tests/topotests/bgp_gr_functionality_topo3/test_bgp_gr_functionality_topo3.py diff --git a/tests/topotests/conftest.py b/tests/topotests/conftest.py index b78a2f1052..cb25d63a36 100755 --- a/tests/topotests/conftest.py +++ b/tests/topotests/conftest.py @@ -4,6 +4,7 @@ Topotest conftest.py file. """ # pylint: disable=consider-using-f-string +import contextlib import glob import logging import os @@ -12,6 +13,7 @@ import resource import subprocess import sys import time +from pathlib import Path import lib.fixtures import pytest @@ -41,6 +43,30 @@ except (AttributeError, ImportError): pass +# Remove this and use munet version when we move to pytest_asyncio +@contextlib.contextmanager +def chdir(ndir, desc=""): + odir = os.getcwd() + os.chdir(ndir) + if desc: + logging.debug("%s: chdir from %s to %s", desc, odir, ndir) + try: + yield + finally: + if desc: + logging.debug("%s: chdir back from %s to %s", desc, ndir, odir) + os.chdir(odir) + + +@contextlib.contextmanager +def log_handler(basename, logpath): + topolog.logstart(basename, logpath) + try: + yield + finally: + topolog.logfinish(basename, logpath) + + def pytest_addoption(parser): """ Add topology-only option to the topology tester. This option makes pytest @@ -272,6 +298,20 @@ def check_for_memleaks(): @pytest.fixture(autouse=True, scope="module") +def module_autouse(request): + basename = get_test_logdir(request.node.nodeid, True) + logdir = Path(topotest.g_pytest_config.option.rundir) / basename + logpath = logdir / "exec.log" + + subprocess.check_call("mkdir -p -m 1777 {}".format(logdir), shell=True) + + with log_handler(basename, logpath): + sdir = os.path.dirname(os.path.realpath(request.fspath)) + with chdir(sdir, "module autouse fixture"): + yield + + +@pytest.fixture(autouse=True, scope="module") def module_check_memtest(request): yield if request.config.option.valgrind_memleaks: @@ -282,14 +322,19 @@ def module_check_memtest(request): check_for_memleaks() -def pytest_runtest_logstart(nodeid, location): - # location is (filename, lineno, testname) - topolog.logstart(nodeid, location, topotest.g_pytest_config.option.rundir) - - -def pytest_runtest_logfinish(nodeid, location): - # location is (filename, lineno, testname) - topolog.logfinish(nodeid, location) +# +# Disable per test function logging as FRR CI system can't handle it. +# +# @pytest.fixture(autouse=True, scope="function") +# def function_autouse(request): +# # For tests we actually use the logdir name as the logfile base +# logbase = get_test_logdir(nodeid=request.node.nodeid, module=False) +# logbase = os.path.join(topotest.g_pytest_config.option.rundir, logbase) +# logpath = Path(logbase) +# path = Path(f"{logpath.parent}/exec-{logpath.name}.log") +# subprocess.check_call("mkdir -p -m 1777 {}".format(logpath.parent), shell=True) +# with log_handler(request.node.nodeid, path): +# yield @pytest.hookimpl(hookwrapper=True) @@ -340,8 +385,10 @@ def pytest_configure(config): os.environ["PYTEST_TOPOTEST_WORKER"] = "" is_xdist = os.environ["PYTEST_XDIST_MODE"] != "no" is_worker = False + wname = "" else: - os.environ["PYTEST_TOPOTEST_WORKER"] = os.environ["PYTEST_XDIST_WORKER"] + wname = os.environ["PYTEST_XDIST_WORKER"] + os.environ["PYTEST_TOPOTEST_WORKER"] = wname is_xdist = True is_worker = True @@ -375,6 +422,16 @@ def pytest_configure(config): if not config.getoption("--log-file") and not config.getini("log_file"): config.option.log_file = os.path.join(rundir, "exec.log") + # Handle pytest-xdist each worker get's it's own top level log file + # `exec-worker-N.log` + if wname: + wname = wname.replace("gw", "worker-") + cpath = Path(config.option.log_file).absolute() + config.option.log_file = f"{cpath.parent}/{cpath.stem}-{wname}{cpath.suffix}" + elif is_xdist: + cpath = Path(config.option.log_file).absolute() + config.option.log_file = f"{cpath.parent}/{cpath.stem}-xdist{cpath.suffix}" + # Turn on live logging if user specified verbose and the config has a CLI level set if config.getoption("--verbose") and not is_xdist and not config.getini("log_cli"): if config.getoption("--log-cli-level", None) is None: @@ -433,6 +490,10 @@ def pytest_configure(config): @pytest.fixture(autouse=True, scope="session") def setup_session_auto(): + # Aligns logs nicely + logging.addLevelName(logging.WARNING, " WARN") + logging.addLevelName(logging.INFO, " INFO") + if "PYTEST_TOPOTEST_WORKER" not in os.environ: is_worker = False elif not os.environ["PYTEST_TOPOTEST_WORKER"]: diff --git a/tests/topotests/isis_sr_flex_algo_topo1/test_isis_sr_flex_algo_topo1.py b/tests/topotests/isis_sr_flex_algo_topo1/test_isis_sr_flex_algo_topo1.py index 85600beb0e..c81f63942b 100755 --- a/tests/topotests/isis_sr_flex_algo_topo1/test_isis_sr_flex_algo_topo1.py +++ b/tests/topotests/isis_sr_flex_algo_topo1/test_isis_sr_flex_algo_topo1.py @@ -38,6 +38,7 @@ import sys import pytest import json import tempfile +from copy import deepcopy from functools import partial # Save the Current Working Directory to find configuration files. @@ -111,8 +112,12 @@ def setup_module(mod): # For all registered routers, load the zebra configuration file for rname, router in router_list.items(): - router.load_config( TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname))) - router.load_config( TopoRouter.RD_ISIS, os.path.join(CWD, "{}/isisd.conf".format(rname))) + router.load_config( + TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname)) + ) + router.load_config( + TopoRouter.RD_ISIS, os.path.join(CWD, "{}/isisd.conf".format(rname)) + ) tgen.start_router() @@ -130,6 +135,30 @@ def setup_testcase(msg): return tgen +def router_json_cmp_exact_filter(router, cmd, expected): + output = router.vtysh_cmd(cmd) + logger.info("{}: {}\n{}".format(router.name, cmd, output)) + + json_output = json.loads(output) + router_output = deepcopy(json_output) + + # filter out dynamic data from "show mpls table" + for label, data in json_output.items(): + if "1500" in label: + # filter out SR local labels + router_output.pop(label) + continue + nexthops = data.get("nexthops", []) + for i in range(len(nexthops)): + if "fe80::" in nexthops[i].get("nexthop"): + router_output.get(label).get("nexthops")[i].pop("nexthop") + elif "." in nexthops[i].get("nexthop"): + # IPv4, just checking the nexthop + router_output.get(label).get("nexthops")[i].pop("interface") + + return topotest.json_cmp(router_output, expected, exact=True) + + def router_compare_json_output(rname, command, reference): "Compare router JSON output" @@ -139,7 +168,9 @@ def router_compare_json_output(rname, command, reference): expected = json.loads(reference) # Run test function until we get an result. Wait at most 60 seconds. - test_func = partial(topotest.router_json_cmp, tgen.gears[rname], command, expected) + test_func = partial( + router_json_cmp_exact_filter, tgen.gears[rname], command, expected + ) _, diff = topotest.run_and_expect(test_func, None, count=120, wait=0.5) assertmsg = '"{}" JSON output mismatches the expected result'.format(rname) assert diff is None, assertmsg @@ -153,9 +184,13 @@ def router_compare_output(rname, command, reference): tgen = get_topogen() # Run test function until we get an result. Wait at most 60 seconds. - test_func = partial(topotest.router_output_cmp, tgen.gears[rname], command, reference) + test_func = partial( + topotest.router_output_cmp, tgen.gears[rname], command, reference + ) result, diff = topotest.run_and_expect(test_func, "", count=120, wait=0.5) - assertmsg = '{} command "{}" output mismatches the expected result:\n{}'.format(rname, command, diff) + assertmsg = '{} command "{}" output mismatches the expected result:\n{}'.format( + rname, command, diff + ) assert result, assertmsg @@ -176,11 +211,11 @@ def test_step1_mpls_lfib(): # tgen.mininet_cli() for rname in ["rt1", "rt2", "rt3"]: router_compare_output( - rname, "show isis flex-algo", - outputs[rname][1]["show_isis_flex_algo.ref"]) + rname, "show isis flex-algo", outputs[rname][1]["show_isis_flex_algo.ref"] + ) router_compare_json_output( - rname, "show mpls table json", - outputs[rname][1]["show_mpls_table.ref"]) + rname, "show mpls table json", outputs[rname][1]["show_mpls_table.ref"] + ) # @@ -207,17 +242,18 @@ def test_step2_mpls_lfib(): router isis 1 flex-algo 203 no advertise-definition - """) + """ + ) # For Developers # tgen.mininet_cli() for rname in ["rt1", "rt2", "rt3"]: router_compare_output( - rname, "show isis flex-algo", - outputs[rname][2]["show_isis_flex_algo.ref"]) + rname, "show isis flex-algo", outputs[rname][2]["show_isis_flex_algo.ref"] + ) router_compare_json_output( - rname, "show mpls table json", - outputs[rname][2]["show_mpls_table.ref"]) + rname, "show mpls table json", outputs[rname][2]["show_mpls_table.ref"] + ) # @@ -244,17 +280,18 @@ def test_step3_mpls_lfib(): router isis 1 flex-algo 203 no advertise-definition - """) + """ + ) # For Developers # tgen.mininet_cli() for rname in ["rt1", "rt2", "rt3"]: router_compare_output( - rname, "show isis flex-algo", - outputs[rname][3]["show_isis_flex_algo.ref"]) + rname, "show isis flex-algo", outputs[rname][3]["show_isis_flex_algo.ref"] + ) router_compare_json_output( - rname, "show mpls table json", - outputs[rname][3]["show_mpls_table.ref"]) + rname, "show mpls table json", outputs[rname][3]["show_mpls_table.ref"] + ) # @@ -281,17 +318,18 @@ def test_step4_mpls_lfib(): router isis 1 flex-algo 203 advertise-definition - """) + """ + ) # For Developers # tgen.mininet_cli() for rname in ["rt1", "rt2", "rt3"]: router_compare_output( - rname, "show isis flex-algo", - outputs[rname][4]["show_isis_flex_algo.ref"]) + rname, "show isis flex-algo", outputs[rname][4]["show_isis_flex_algo.ref"] + ) router_compare_json_output( - rname, "show mpls table json", - outputs[rname][4]["show_mpls_table.ref"]) + rname, "show mpls table json", outputs[rname][4]["show_mpls_table.ref"] + ) # @@ -319,17 +357,18 @@ def test_step5_mpls_lfib(): router isis 1 flex-algo 203 advertise-definition - """) + """ + ) # For Developers # tgen.mininet_cli() for rname in ["rt1", "rt2", "rt3"]: router_compare_output( - rname, "show isis flex-algo", - outputs[rname][5]["show_isis_flex_algo.ref"]) + rname, "show isis flex-algo", outputs[rname][5]["show_isis_flex_algo.ref"] + ) router_compare_json_output( - rname, "show mpls table json", - outputs[rname][5]["show_mpls_table.ref"]) + rname, "show mpls table json", outputs[rname][5]["show_mpls_table.ref"] + ) # @@ -360,17 +399,18 @@ def test_step6_mpls_lfib(): router isis 1 flex-algo 203 no dataplane sr-mpls - """) + """ + ) # For Developers # tgen.mininet_cli() for rname in ["rt1", "rt2", "rt3"]: router_compare_output( - rname, "show isis flex-algo", - outputs[rname][6]["show_isis_flex_algo.ref"]) + rname, "show isis flex-algo", outputs[rname][6]["show_isis_flex_algo.ref"] + ) router_compare_json_output( - rname, "show mpls table json", - outputs[rname][6]["show_mpls_table.ref"]) + rname, "show mpls table json", outputs[rname][6]["show_mpls_table.ref"] + ) # @@ -400,17 +440,19 @@ def test_step7_mpls_lfib(): configure terminal router isis 1 no flex-algo 203 - """) + """ + ) # For Developers # tgen.mininet_cli() for rname in ["rt1", "rt2", "rt3"]: router_compare_output( - rname, "show isis flex-algo", - outputs[rname][7]["show_isis_flex_algo.ref"]) + rname, "show isis flex-algo", outputs[rname][7]["show_isis_flex_algo.ref"] + ) router_compare_json_output( - rname, "show mpls table json", - outputs[rname][7]["show_mpls_table.ref"]) + rname, "show mpls table json", outputs[rname][7]["show_mpls_table.ref"] + ) + # # Step 8 @@ -440,7 +482,8 @@ def test_step8_mpls_lfib(): advertise-definition affinity exclude-any green dataplane sr-mpls - """) + """ + ) tgen.gears["rt2"].vtysh_cmd( """ @@ -450,7 +493,8 @@ def test_step8_mpls_lfib(): advertise-definition affinity exclude-any green dataplane sr-mpls - """) + """ + ) tgen.gears["rt3"].vtysh_cmd( """ @@ -458,17 +502,18 @@ def test_step8_mpls_lfib(): router isis 1 flex-algo 203 dataplane sr-mpls - """) + """ + ) # For Developers # tgen.mininet_cli() for rname in ["rt1", "rt2", "rt3"]: router_compare_output( - rname, "show isis flex-algo", - outputs[rname][8]["show_isis_flex_algo.ref"]) + rname, "show isis flex-algo", outputs[rname][8]["show_isis_flex_algo.ref"] + ) router_compare_json_output( - rname, "show mpls table json", - outputs[rname][8]["show_mpls_table.ref"]) + rname, "show mpls table json", outputs[rname][8]["show_mpls_table.ref"] + ) # @@ -494,17 +539,18 @@ def test_step9_mpls_lfib(): router isis 1 no segment-routing prefix 1.1.1.1/32 algorithm 203 index 301 no segment-routing prefix 2001:db8:1000::1/128 algorithm 203 index 1301 - """) + """ + ) # For Developers # tgen.mininet_cli() for rname in ["rt1", "rt2", "rt3"]: router_compare_output( - rname, "show isis flex-algo", - outputs[rname][9]["show_isis_flex_algo.ref"]) + rname, "show isis flex-algo", outputs[rname][9]["show_isis_flex_algo.ref"] + ) router_compare_json_output( - rname, "show mpls table json", - outputs[rname][9]["show_mpls_table.ref"]) + rname, "show mpls table json", outputs[rname][9]["show_mpls_table.ref"] + ) # @@ -530,17 +576,18 @@ def test_step10_mpls_lfib(): router isis 1 segment-routing prefix 1.1.1.1/32 algorithm 203 index 301 segment-routing prefix 2001:db8:1000::1/128 algorithm 203 index 1301 - """) + """ + ) # For Developers # tgen.mininet_cli() for rname in ["rt1", "rt2", "rt3"]: router_compare_output( - rname, "show isis flex-algo", - outputs[rname][10]["show_isis_flex_algo.ref"]) + rname, "show isis flex-algo", outputs[rname][10]["show_isis_flex_algo.ref"] + ) router_compare_json_output( - rname, "show mpls table json", - outputs[rname][10]["show_mpls_table.ref"]) + rname, "show mpls table json", outputs[rname][10]["show_mpls_table.ref"] + ) # @@ -565,17 +612,18 @@ def test_step11_mpls_lfib(): router isis 1 segment-routing prefix 1.1.1.1/32 algorithm 203 index 311 segment-routing prefix 2001:db8:1000::1/128 algorithm 203 index 1311 - """) + """ + ) # For Developers # tgen.mininet_cli() for rname in ["rt1", "rt2", "rt3"]: router_compare_output( - rname, "show isis flex-algo", - outputs[rname][11]["show_isis_flex_algo.ref"]) + rname, "show isis flex-algo", outputs[rname][11]["show_isis_flex_algo.ref"] + ) router_compare_json_output( - rname, "show mpls table json", - outputs[rname][11]["show_mpls_table.ref"]) + rname, "show mpls table json", outputs[rname][11]["show_mpls_table.ref"] + ) if __name__ == "__main__": diff --git a/tests/topotests/lib/bgp.py b/tests/topotests/lib/bgp.py index 0bd9408c28..21d4567d6b 100644 --- a/tests/topotests/lib/bgp.py +++ b/tests/topotests/lib/bgp.py @@ -50,6 +50,7 @@ def create_router_bgp(tgen, topo=None, input_dict=None, build=False, load_config "bgp": { "local_as": "200", "router_id": "22.22.22.22", + "bgp_always_compare_med": True, "graceful-restart": { "graceful-restart": True, "preserve-fw-state": True, @@ -343,6 +344,13 @@ def __create_bgp_global(tgen, input_dict, router, build=False): config_data.append(cmd) + if "bgp_always_compare_med" in bgp_data: + bgp_always_compare_med = bgp_data["bgp_always_compare_med"] + if bgp_always_compare_med == True: + config_data.append("bgp always-compare-med") + elif bgp_always_compare_med == False: + config_data.append("no bgp always-compare-med") + logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name)) return config_data diff --git a/tests/topotests/lib/common_config.py b/tests/topotests/lib/common_config.py index a85b86668c..5d37b062ac 100644 --- a/tests/topotests/lib/common_config.py +++ b/tests/topotests/lib/common_config.py @@ -33,6 +33,7 @@ from lib.topogen import TopoRouter, get_topogen from lib.topolog import get_logger, logger from lib.topotest import frr_unicode, interface_set_status, version_cmp from lib import topotest +from munet.testing.util import pause_test FRRCFG_FILE = "frr_json.conf" FRRCFG_BKUP_FILE = "frr_json_initial.conf" @@ -2069,6 +2070,8 @@ def step(msg, reset=False): * ` msg` : Step message body. * `reset` : Reset step count to 1 when set to True. """ + if bool(topotest.g_pytest_config.get_option("--pause")): + pause_test("before :" + msg) _step = Stepper() _step(msg, reset) diff --git a/tests/topotests/lib/mcast-tester.py b/tests/topotests/lib/mcast-tester.py index 8a8251010c..5efbecd5e5 100755 --- a/tests/topotests/lib/mcast-tester.py +++ b/tests/topotests/lib/mcast-tester.py @@ -11,6 +11,7 @@ for the multicast group we subscribed to. import argparse import json +import ipaddress import os import socket import struct @@ -35,13 +36,16 @@ def interface_name_to_index(name): def multicast_join(sock, ifindex, group, port): "Joins a multicast group." - mreq = struct.pack( - "=4sLL", socket.inet_aton(args.group), socket.INADDR_ANY, ifindex - ) - sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) - sock.bind((group, port)) - sock.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq) + + if ip_version == 4: + mreq = group.packed + struct.pack("@II", socket.INADDR_ANY, ifindex) + opt = socket.IP_ADD_MEMBERSHIP + else: + mreq = group.packed + struct.pack("@I", ifindex) + opt = socket.IPV6_JOIN_GROUP + sock.bind((str(group), port)) + sock.setsockopt(ip_proto, opt, mreq) # @@ -50,15 +54,14 @@ def multicast_join(sock, ifindex, group, port): parser = argparse.ArgumentParser(description="Multicast RX utility") parser.add_argument("group", help="Multicast IP") parser.add_argument("interface", help="Interface name") +parser.add_argument("--port", type=int, default=1000, help="port to send to") +parser.add_argument("--ttl", type=int, default=16, help="TTL/hops for sending packets") parser.add_argument("--socket", help="Point to topotest UNIX socket") parser.add_argument( "--send", help="Transmit instead of join with interval", type=float, default=0 ) args = parser.parse_args() -ttl = 16 -port = 1000 - # Get interface index/validate. ifindex = interface_name_to_index(args.interface) if ifindex is None: @@ -85,7 +88,12 @@ else: # Set topotest socket non blocking so we can multiplex the main loop. toposock.setblocking(False) -msock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) +args.group = ipaddress.ip_address(args.group) +ip_version = args.group.version +ip_family = socket.AF_INET if ip_version == 4 else socket.AF_INET6 +ip_proto = socket.IPPROTO_IP if ip_version == 4 else socket.IPPROTO_IPV6 + +msock = socket.socket(ip_family, socket.SOCK_DGRAM, socket.IPPROTO_UDP) if args.send > 0: # Prepare multicast bit in that interface. msock.setsockopt( @@ -93,12 +101,18 @@ if args.send > 0: 25, struct.pack("%ds" % len(args.interface), args.interface.encode("utf-8")), ) - # Set packets TTL. - msock.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, struct.pack("b", ttl)) + + # Set packets TTL/hops. + ttlopt = socket.IP_MULTICAST_TTL if ip_version == 4 else socket.IPV6_MULTICAST_HOPS + if ip_version == 4: + msock.setsockopt(ip_proto, ttlopt, struct.pack("B", args.ttl)) + else: + msock.setsockopt(ip_proto, ttlopt, struct.pack("I", args.ttl)) + # Block to ensure packet send. msock.setblocking(True) else: - multicast_join(msock, ifindex, args.group, port) + multicast_join(msock, ifindex, args.group, args.port) def should_exit(): @@ -120,7 +134,7 @@ def should_exit(): counter = 0 while not should_exit(): if args.send > 0: - msock.sendto(b"test %d" % counter, (args.group, port)) + msock.sendto(b"test %d" % counter, (str(args.group), args.port)) counter += 1 time.sleep(args.send) diff --git a/tests/topotests/lib/micronet_compat.py b/tests/topotests/lib/micronet_compat.py index d648a120ab..b348c85988 100644 --- a/tests/topotests/lib/micronet_compat.py +++ b/tests/topotests/lib/micronet_compat.py @@ -121,7 +121,7 @@ class Mininet(BaseMunet): g_mnet_inst = None - def __init__(self, rundir=None, pytestconfig=None): + def __init__(self, rundir=None, pytestconfig=None, logger=None): """ Create a Micronet. """ @@ -140,7 +140,7 @@ class Mininet(BaseMunet): # os.umask(0) super(Mininet, self).__init__( - pid=False, rundir=rundir, pytestconfig=pytestconfig + pid=False, rundir=rundir, pytestconfig=pytestconfig, logger=logger ) # From munet/munet/native.py diff --git a/tests/topotests/lib/pim.py b/tests/topotests/lib/pim.py index e26bdb3af3..f69718a5bd 100644 --- a/tests/topotests/lib/pim.py +++ b/tests/topotests/lib/pim.py @@ -1,35 +1,35 @@ +# -*- coding: utf-8 eval: (blacken-mode 1) -*- # SPDX-License-Identifier: ISC # Copyright (c) 2019 by VMware, Inc. ("VMware") # Used Copyright (c) 2018 by Network Device Education Foundation, Inc. # ("NetDEF") in this file. import datetime +import functools import os import re import sys import traceback -import functools from copy import deepcopy from time import sleep -from lib import topotest - # Import common_config to use commomnly used APIs from lib.common_config import ( - create_common_configurations, HostApplicationHelper, InvalidCLIError, create_common_configuration, - InvalidCLIError, + create_common_configurations, + get_frr_ipv6_linklocal, retry, run_frr_cmd, validate_ip_address, - get_frr_ipv6_linklocal, ) from lib.micronet import get_exec_path from lib.topolog import logger from lib.topotest import frr_unicode +from lib import topotest + #### CWD = os.path.dirname(os.path.realpath(__file__)) diff --git a/tests/topotests/lib/topogen.py b/tests/topotests/lib/topogen.py index 0e685a97b0..6ddd223e25 100644 --- a/tests/topotests/lib/topogen.py +++ b/tests/topotests/lib/topogen.py @@ -84,7 +84,7 @@ def get_exabgp_cmd(commander=None): """Return the command to use for ExaBGP version < 4.""" if commander is None: - commander = Commander("topogen") + commander = Commander("exabgp", logger=logging.getLogger("exabgp")) def exacmd_version_ok(exacmd): logger.debug("checking %s for exabgp < version 4", exacmd) @@ -107,7 +107,7 @@ def get_exabgp_cmd(commander=None): exacmd = py2_path + " -m exabgp" if exacmd_version_ok(exacmd): return exacmd - py2_path = commander.get_exec_path("python") + py2_path = commander.get_exec_path("python") if py2_path: exacmd = py2_path + " -m exabgp" if exacmd_version_ok(exacmd): @@ -209,7 +209,11 @@ class Topogen(object): # Mininet(Micronet) to build the actual topology. assert not inspect.isclass(topodef) - self.net = Mininet(rundir=self.logdir, pytestconfig=topotest.g_pytest_config) + self.net = Mininet( + rundir=self.logdir, + pytestconfig=topotest.g_pytest_config, + logger=topolog.get_logger("mu", log_level="debug"), + ) # Adjust the parent namespace topotest.fix_netns_limits(self.net) @@ -1090,8 +1094,9 @@ class TopoSwitch(TopoGear): # pylint: disable=too-few-public-methods def __init__(self, tgen, name, **params): + logger = topolog.get_logger(name, log_level="debug") super(TopoSwitch, self).__init__(tgen, name, **params) - tgen.net.add_switch(name) + tgen.net.add_switch(name, logger=logger) def __str__(self): gear = super(TopoSwitch, self).__str__() diff --git a/tests/topotests/lib/topolog.py b/tests/topotests/lib/topolog.py index b501670789..aceb2cb031 100644 --- a/tests/topotests/lib/topolog.py +++ b/tests/topotests/lib/topolog.py @@ -15,13 +15,6 @@ This file defines our logging abstraction. import logging import os -import subprocess -import sys - -if sys.version_info[0] > 2: - pass -else: - pass try: from xdist import is_xdist_controller @@ -31,8 +24,6 @@ except ImportError: return False -BASENAME = "topolog" - # Helper dictionary to convert Topogen logging levels to Python's logging. DEBUG_TOPO2LOGGING = { "debug": logging.DEBUG, @@ -42,13 +33,43 @@ DEBUG_TOPO2LOGGING = { "error": logging.ERROR, "critical": logging.CRITICAL, } -FORMAT = "%(asctime)s.%(msecs)03d %(levelname)s: %(name)s: %(message)s" +FORMAT = "%(asctime)s %(levelname)s: %(name)s: %(message)s" handlers = {} -logger = logging.getLogger("topolog") +logger = logging.getLogger("topo") + + +# Remove this and use munet version when we move to pytest_asyncio +def get_test_logdir(nodeid=None, module=False): + """Get log directory relative pathname.""" + xdist_worker = os.getenv("PYTEST_XDIST_WORKER", "") + mode = os.getenv("PYTEST_XDIST_MODE", "no") + + # nodeid: all_protocol_startup/test_all_protocol_startup.py::test_router_running + # may be missing "::testname" if module is True + if not nodeid: + nodeid = os.environ["PYTEST_CURRENT_TEST"].split(" ")[0] + + cur_test = nodeid.replace("[", "_").replace("]", "_") + if module: + idx = cur_test.rfind("::") + path = cur_test if idx == -1 else cur_test[:idx] + testname = "" + else: + path, testname = cur_test.split("::") + testname = testname.replace("/", ".") + path = path[:-3].replace("/", ".") + # We use different logdir paths based on how xdist is running. + if mode == "each": + if module: + return os.path.join(path, "worker-logs", xdist_worker) + return os.path.join(path, testname, xdist_worker) + assert mode in ("no", "load", "loadfile", "loadscope"), f"Unknown dist mode {mode}" + return path if module else os.path.join(path, testname) -def set_handler(l, target=None): + +def set_handler(lg, target=None): if target is None: h = logging.NullHandler() else: @@ -59,106 +80,81 @@ def set_handler(l, target=None): h.setFormatter(logging.Formatter(fmt=FORMAT)) # Don't filter anything at the handler level h.setLevel(logging.DEBUG) - l.addHandler(h) + lg.addHandler(h) return h -def set_log_level(l, level): +def set_log_level(lg, level): "Set the logging level." # Messages sent to this logger only are created if this level or above. log_level = DEBUG_TOPO2LOGGING.get(level, level) - l.setLevel(log_level) + lg.setLevel(log_level) -def get_logger(name, log_level=None, target=None): - l = logging.getLogger("{}.{}".format(BASENAME, name)) +def reset_logger(lg): + while lg.handlers: + x = lg.handlers.pop() + x.close() + lg.removeHandler(x) - if log_level is not None: - set_log_level(l, log_level) - if target is not None: - set_handler(l, target) - - return l - - -# nodeid: all_protocol_startup/test_all_protocol_startup.py::test_router_running - - -def get_test_logdir(nodeid=None): - """Get log directory relative pathname.""" - xdist_worker = os.getenv("PYTEST_XDIST_WORKER", "") - mode = os.getenv("PYTEST_XDIST_MODE", "no") +def get_logger(name, log_level=None, target=None, reset=True): + lg = logging.getLogger(name) - if not nodeid: - nodeid = os.environ["PYTEST_CURRENT_TEST"].split(" ")[0] + if reset: + reset_logger(lg) - cur_test = nodeid.replace("[", "_").replace("]", "_") - path, testname = cur_test.split("::") - path = path[:-3].replace("/", ".") + if log_level is not None: + set_log_level(lg, log_level) - # We use different logdir paths based on how xdist is running. - if mode == "each": - return os.path.join(path, testname, xdist_worker) - elif mode == "load": - return os.path.join(path, testname) - else: - assert ( - mode == "no" or mode == "loadfile" or mode == "loadscope" - ), "Unknown dist mode {}".format(mode) + if target is not None: + set_handler(lg, target) - return path + return lg -def logstart(nodeid, location, rundir): +def logstart(nodeid, logpath): """Called from pytest before module setup.""" - - mode = os.getenv("PYTEST_XDIST_MODE", "no") worker = os.getenv("PYTEST_TOPOTEST_WORKER", "") + wstr = f" on worker {worker}" if worker else "" + handler_id = nodeid + worker + logpath = logpath.absolute() - # We only per-test log in the workers (or non-dist) - if not worker and mode != "no": - return + logging.debug("logstart: adding logging for %s%s at %s", nodeid, wstr, logpath) + root_logger = logging.getLogger() + handler = logging.FileHandler(logpath, mode="w") + handler.setFormatter(logging.Formatter(FORMAT)) - handler_id = nodeid + worker - assert handler_id not in handlers - - rel_log_dir = get_test_logdir(nodeid) - exec_log_dir = os.path.join(rundir, rel_log_dir) - subprocess.check_call( - "mkdir -p {0} && chmod 1777 {0}".format(exec_log_dir), shell=True - ) - exec_log_path = os.path.join(exec_log_dir, "exec.log") - - # Add test based exec log handler - h = set_handler(logger, exec_log_path) - handlers[handler_id] = h - - if worker: - logger.info( - "Logging on worker %s for %s into %s", worker, handler_id, exec_log_path - ) - else: - logger.info("Logging for %s into %s", handler_id, exec_log_path) + root_logger.addHandler(handler) + handlers[handler_id] = handler + logging.debug("logstart: added logging for %s%s at %s", nodeid, wstr, logpath) + return handler -def logfinish(nodeid, location): - """Called from pytest after module teardown.""" - # This function may not be called if pytest is interrupted. +def logfinish(nodeid, logpath): + """Called from pytest after module teardown.""" worker = os.getenv("PYTEST_TOPOTEST_WORKER", "") - handler_id = nodeid + worker + wstr = f" on worker {worker}" if worker else "" + + root_logger = logging.getLogger() - if handler_id in handlers: - # Remove test based exec log handler - if worker: - logger.info("Closing logs for %s", handler_id) + handler_id = nodeid + worker + if handler_id not in handlers: + logging.critical("can't find log handler to remove") + else: + logging.debug( + "logfinish: removing logging for %s%s at %s", nodeid, wstr, logpath + ) h = handlers[handler_id] - logger.removeHandler(handlers[handler_id]) + root_logger.removeHandler(h) h.flush() h.close() del handlers[handler_id] + logging.debug( + "logfinish: removed logging for %s%s at %s", nodeid, wstr, logpath + ) console_handler = set_handler(logger, None) diff --git a/tests/topotests/lib/topotest.py b/tests/topotests/lib/topotest.py index 0e96921b7f..845d3e3b53 100644 --- a/tests/topotests/lib/topotest.py +++ b/tests/topotests/lib/topotest.py @@ -24,6 +24,7 @@ import subprocess import sys import tempfile import time +import logging from collections.abc import Mapping from copy import deepcopy @@ -38,7 +39,7 @@ g_pytest_config = None def get_logs_path(rundir): - logspath = topolog.get_test_logdir() + logspath = topolog.get_test_logdir(module=True) return os.path.join(rundir, logspath) @@ -1137,7 +1138,9 @@ def _sysctl_assure(commander, variable, value): def sysctl_atleast(commander, variable, min_value, raises=False): try: if commander is None: - commander = micronet.Commander("topotest") + logger = logging.getLogger("topotest") + commander = micronet.Commander("sysctl", logger=logger) + return _sysctl_atleast(commander, variable, min_value) except subprocess.CalledProcessError as error: logger.warning( @@ -1153,7 +1156,8 @@ def sysctl_atleast(commander, variable, min_value, raises=False): def sysctl_assure(commander, variable, value, raises=False): try: if commander is None: - commander = micronet.Commander("topotest") + logger = logging.getLogger("topotest") + commander = micronet.Commander("sysctl", logger=logger) return _sysctl_assure(commander, variable, value) except subprocess.CalledProcessError as error: logger.warning( diff --git a/tests/topotests/mgmt_startup/test_bigconf.py b/tests/topotests/mgmt_startup/test_bigconf.py index 465f646b6e..4f46c8fabd 100644 --- a/tests/topotests/mgmt_startup/test_bigconf.py +++ b/tests/topotests/mgmt_startup/test_bigconf.py @@ -42,8 +42,10 @@ def tgen(request): tgen = Topogen(topodef, request.module.__name__) tgen.start_topology() + prologue = open(f"{CWD}/r1/mgmtd.conf").read() + confpath = f"{tgen.gears['r1'].gearlogdir}/r1-late-big.conf" - start, end = write_big_route_conf("10.0.0.0/8", ROUTE_COUNT, confpath) + start, end = write_big_route_conf("10.0.0.0/8", ROUTE_COUNT, confpath, prologue) ROUTE_RANGE[0] = start ROUTE_RANGE[1] = end @@ -69,10 +71,10 @@ def test_staticd_latestart(tgen): check_vtysh_up(r1) logging.info("r1: vtysh connected after %ss", track.elapsed()) - result = check_kernel(r1, ROUTE_RANGE[0], retry_timeout=20) + result = check_kernel(r1, ROUTE_RANGE[0], retry_timeout=60) assert result is None logging.info("r1: first route installed after %ss", track.elapsed()) - result = check_kernel(r1, ROUTE_RANGE[1], retry_timeout=20) + result = check_kernel(r1, ROUTE_RANGE[1], retry_timeout=60) assert result is None logging.info("r1: last route installed after %ss", track.elapsed()) diff --git a/tests/topotests/mgmt_startup/test_late_bigconf.py b/tests/topotests/mgmt_startup/test_late_bigconf.py index ac7ac57cf8..0b5bf38d10 100644 --- a/tests/topotests/mgmt_startup/test_late_bigconf.py +++ b/tests/topotests/mgmt_startup/test_late_bigconf.py @@ -42,8 +42,10 @@ def tgen(request): tgen = Topogen(topodef, request.module.__name__) tgen.start_topology() + prologue = open(f"{CWD}/r1/mgmtd.conf").read() + confpath = f"{tgen.gears['r1'].gearlogdir}/r1-late-big.conf" - start, end = write_big_route_conf("10.0.0.0/8", ROUTE_COUNT, confpath) + start, end = write_big_route_conf("10.0.0.0/8", ROUTE_COUNT, confpath, prologue) ROUTE_RANGE[0] = start ROUTE_RANGE[1] = end @@ -68,15 +70,29 @@ def test_staticd_latestart(tgen): check_vtysh_up(r1) logging.info("r1: vtysh connected after %ss", track.elapsed()) - result = check_kernel(r1, ROUTE_RANGE[0], retry_timeout=20, expected=False) + result = check_kernel(r1, ROUTE_RANGE[0], retry_timeout=60, expected=False) assert result is not None, "first route present and should not be" - result = check_kernel(r1, ROUTE_RANGE[1], retry_timeout=20, expected=False) + result = check_kernel(r1, ROUTE_RANGE[1], retry_timeout=60, expected=False) assert result is not None, "last route present and should not be" step("Starting staticd") + t2 = Timeout(0) r1.startDaemons(["staticd"]) result = check_kernel(r1, ROUTE_RANGE[0], retry_timeout=60) assert result is None, "first route not present and should be" - result = check_kernel(r1, ROUTE_RANGE[1], retry_timeout=20) + logging.info("r1: elapsed time for first route %ss", t2.elapsed()) + + count = 0 + ocount = 0 + while count < ROUTE_COUNT: + rc, o, e = r1.net.cmd_status("ip -o route | wc -l") + if not rc: + if count > ocount + 100: + ocount = count + logging.info("r1: elapsed time for %d routes %s", count, t2.elapsed()) + count = int(o) + + result = check_kernel(r1, ROUTE_RANGE[1], retry_timeout=1200) assert result is None, "last route not present and should be" + logging.info("r1: elapsed time for last route %ss", t2.elapsed()) diff --git a/tests/topotests/mgmt_startup/util.py b/tests/topotests/mgmt_startup/util.py index 87a2ad442e..e366351326 100644 --- a/tests/topotests/mgmt_startup/util.py +++ b/tests/topotests/mgmt_startup/util.py @@ -50,11 +50,13 @@ def get_ip_networks(super_prefix, count): return tuple(network.subnets(count_log2))[0:count] -def write_big_route_conf(super_prefix, count, confpath): +def write_big_route_conf(super_prefix, count, confpath, prologue=""): start = None end = None with open(confpath, "w+", encoding="ascii") as f: + if prologue: + f.write(prologue + "\n") for net in get_ip_networks(super_prefix, count): end = net if not start: diff --git a/tests/topotests/multicast_mld_join_topo1/test_multicast_mld_local_join.py b/tests/topotests/multicast_mld_join_topo1/test_multicast_mld_local_join.py index 2c4fb4e998..826d6e2941 100644 --- a/tests/topotests/multicast_mld_join_topo1/test_multicast_mld_local_join.py +++ b/tests/topotests/multicast_mld_join_topo1/test_multicast_mld_local_join.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +# -*- coding: utf-8 eval: (blacken-mode 1) -*- # SPDX-License-Identifier: ISC # # Copyright (c) 2023 by VMware, Inc. ("VMware") @@ -20,52 +20,31 @@ Following tests are covered: 5. Verify static MLD groups after removing and adding MLD config """ -import os import sys import time -import pytest - -# Save the Current Working Directory to find configuration files. -CWD = os.path.dirname(os.path.realpath(__file__)) -sys.path.append(os.path.join(CWD, "../")) -sys.path.append(os.path.join(CWD, "../lib/")) - -# Required to instantiate the topology builder class. - -# pylint: disable=C0413 -# Import topogen and topotest helpers -from lib.topogen import Topogen, get_topogen -from re import search as re_search -from re import findall as findall +import pytest from lib.common_config import ( + reset_config_on_routers, start_topology, - write_test_header, - write_test_footer, step, - kill_router_daemons, - start_router_daemons, - reset_config_on_routers, - do_countdown, - apply_raw_config, - socat_send_pim6_traffic, + write_test_footer, + write_test_header, ) - from lib.pim import ( - create_pim_config, - verify_mroutes, - verify_upstream_iif, - verify_mld_groups, - clear_pim6_mroute, McastTesterHelper, - verify_pim_neighbors, create_mld_config, - verify_mld_groups, + create_pim_config, verify_local_mld_groups, + verify_mld_groups, + verify_mroutes, + verify_pim_neighbors, verify_pim_rp_info, + verify_upstream_iif, ) -from lib.topolog import logger +from lib.topogen import Topogen, get_topogen from lib.topojson import build_config_from_json +from lib.topolog import logger r1_r2_links = [] r1_r3_links = [] @@ -131,7 +110,7 @@ def setup_module(mod): logger.info("Running setup_module to create topology") # This function initiates the topology build with Topogen... - json_file = "{}/multicast_mld_local_join.json".format(CWD) + json_file = "multicast_mld_local_join.json" tgen = Topogen(json_file, mod.__name__) global topo topo = tgen.json_topo @@ -151,6 +130,9 @@ def setup_module(mod): result = verify_pim_neighbors(tgen, topo) assert result is True, " Verify PIM neighbor: Failed Error: {}".format(result) + global app_helper + app_helper = McastTesterHelper(tgen) + logger.info("Running setup_module() done") @@ -161,6 +143,8 @@ def teardown_module(): tgen = get_topogen() + app_helper.cleanup() + # Stop toplogy and Remove tmp files tgen.stop_topology() @@ -265,6 +249,8 @@ def test_mroute_with_mld_local_joins_p0(request): reset_config_on_routers(tgen) + app_helper.stop_all_hosts() + step("Enable the PIM on all the interfaces of R1, R2, R3, R4") step("configure BGP on R1, R2, R3, R4 and enable redistribute static/connected") step("Enable the MLD on R11 interfac of R1 and configure local mld groups") @@ -330,9 +316,7 @@ def test_mroute_with_mld_local_joins_p0(request): assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) step("Send traffic from R4 to all the groups ( ffaa::1 to ffaa::5)") - intf_ip = topo["routers"]["i4"]["links"]["r4"]["ipv6"].split("/")[0] - intf = topo["routers"]["i4"]["links"]["r4"]["interface"] - result = socat_send_pim6_traffic(tgen, "i4", "UDP6-SEND", MLD_JOIN_RANGE_1, intf) + result = app_helper.run_traffic("i4", MLD_JOIN_RANGE_1, "r4") assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) step( @@ -458,6 +442,8 @@ def test_remove_add_mld_local_joins_p1(request): reset_config_on_routers(tgen) + app_helper.stop_all_hosts() + step("Enable the PIM on all the interfaces of R1, R2, R3, R4") step("configure BGP on R1, R2, R3, R4 and enable redistribute static/connected") step("Enable the MLD on R11 interfac of R1 and configure local mld groups") @@ -517,9 +503,7 @@ def test_remove_add_mld_local_joins_p1(request): assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) step("Send traffic from R4 to all the groups ( ffaa::1 to ffaa::5)") - intf_ip = topo["routers"]["i4"]["links"]["r4"]["ipv6"].split("/")[0] - intf = topo["routers"]["i4"]["links"]["r4"]["interface"] - result = socat_send_pim6_traffic(tgen, "i4", "UDP6-SEND", MLD_JOIN_RANGE_1, intf) + result = app_helper.run_traffic("i4", MLD_JOIN_RANGE_1, "r4") assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) step( @@ -710,6 +694,8 @@ def test_remove_add_mld_config_with_local_joins_p1(request): reset_config_on_routers(tgen) + app_helper.stop_all_hosts() + step("Enable the PIM on all the interfaces of R1, R2, R3, R4") step("configure BGP on R1, R2, R3, R4 and enable redistribute static/connected") step("Enable the MLD on R11 interfac of R1 and configure local mld groups") @@ -759,9 +745,7 @@ def test_remove_add_mld_config_with_local_joins_p1(request): assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) step("Send traffic from R4 to all the groups ( ffaa::1 to ffaa::5)") - intf_ip = topo["routers"]["i4"]["links"]["r4"]["ipv6"].split("/")[0] - intf = topo["routers"]["i4"]["links"]["r4"]["interface"] - result = socat_send_pim6_traffic(tgen, "i4", "UDP6-SEND", MLD_JOIN_RANGE_1, intf) + result = app_helper.run_traffic("i4", MLD_JOIN_RANGE_1, "r4") assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) step( diff --git a/tests/topotests/multicast_pim6_sm_topo1/test_multicast_pim6_sm1.py b/tests/topotests/multicast_pim6_sm_topo1/test_multicast_pim6_sm1.py index 87b04b41be..aff623705c 100644 --- a/tests/topotests/multicast_pim6_sm_topo1/test_multicast_pim6_sm1.py +++ b/tests/topotests/multicast_pim6_sm_topo1/test_multicast_pim6_sm1.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +# -*- coding: utf-8 eval: (blacken-mode 1) -*- # SPDX-License-Identifier: ISC # @@ -30,61 +30,40 @@ should get update accordingly data traffic """ -import os +import datetime import sys -import json import time -import datetime -import pytest - -# Save the Current Working Directory to find configuration files. -CWD = os.path.dirname(os.path.realpath(__file__)) -sys.path.append(os.path.join(CWD, "../")) -sys.path.append(os.path.join(CWD, "../lib/")) - -# Required to instantiate the topology builder class. - -# pylint: disable=C0413 -# Import topogen and topotest helpers -from lib.topogen import Topogen, get_topogen +import pytest from lib.common_config import ( - start_topology, - write_test_header, - write_test_footer, - step, + get_frr_ipv6_linklocal, + required_linux_kernel_version, reset_config_on_routers, shutdown_bringup_interface, - start_router, - stop_router, - create_static_routes, - required_linux_kernel_version, - socat_send_mld_join, - socat_send_pim6_traffic, - get_frr_ipv6_linklocal, - kill_socat, + start_topology, + step, + write_test_footer, + write_test_header, ) -from lib.bgp import create_router_bgp from lib.pim import ( - create_pim_config, + McastTesterHelper, + clear_pim6_mroute, create_mld_config, + create_pim_config, + verify_mld_config, verify_mld_groups, + verify_mroute_summary, verify_mroutes, - clear_pim6_interface_traffic, - verify_upstream_iif, - clear_pim6_mroute, verify_pim_interface_traffic, - verify_pim_state, - McastTesterHelper, verify_pim_join, - verify_mroute_summary, verify_pim_nexthop, + verify_pim_state, verify_sg_traffic, - verify_mld_config, + verify_upstream_iif, ) - -from lib.topolog import logger +from lib.topogen import Topogen, get_topogen from lib.topojson import build_config_from_json +from lib.topolog import logger # Global variables GROUP_RANGE = "ff00::/8" @@ -141,8 +120,7 @@ def setup_module(mod): logger.info("Running setup_module to create topology") - testdir = os.path.dirname(os.path.realpath(__file__)) - json_file = "{}/multicast_pim6_sm_topo1.json".format(testdir) + json_file = "multicast_pim6_sm_topo1.json" tgen = Topogen(json_file, mod.__name__) global topo topo = tgen.json_topo @@ -159,6 +137,9 @@ def setup_module(mod): # Creating configuration from JSON build_config_from_json(tgen, tgen.json_topo) + global app_helper + app_helper = McastTesterHelper(tgen) + logger.info("Running setup_module() done") @@ -169,8 +150,7 @@ def teardown_module(): tgen = get_topogen() - # Clean up socat - kill_socat(tgen) + app_helper.cleanup() # Stop toplogy and Remove tmp files tgen.stop_topology() @@ -296,6 +276,8 @@ def test_multicast_data_traffic_static_RP_send_traffic_then_join_p0(request): # Creating configuration from JSON reset_config_on_routers(tgen) + app_helper.stop_all_hosts() + # Don"t run this test if we have any failure. if tgen.routers_have_failure(): pytest.skip(tgen.errors) @@ -334,9 +316,7 @@ def test_multicast_data_traffic_static_RP_send_traffic_then_join_p0(request): step("Send multicast traffic from FRR3 to all the receivers" "ffaa::1-5") - intf_ip = topo["routers"]["i2"]["links"]["r3"]["ipv6"].split("/")[0] - intf = topo["routers"]["i2"]["links"]["r3"]["interface"] - result = socat_send_pim6_traffic(tgen, "i2", "UDP6-SEND", MLD_JOIN_RANGE_1, intf) + result = app_helper.run_traffic("i2", MLD_JOIN_RANGE_1, "r3") assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) source = topo["routers"]["i2"]["links"]["r3"]["ipv6"].split("/")[0] @@ -375,11 +355,7 @@ def test_multicast_data_traffic_static_RP_send_traffic_then_join_p0(request): ) step("send mld join (ffaa::1-5) to R1") - intf = topo["routers"]["i1"]["links"]["r1"]["interface"] - intf_ip = topo["routers"]["i1"]["links"]["r1"]["ipv6"].split("/")[0] - result = socat_send_mld_join( - tgen, "i1", "UDP6-RECV", MLD_JOIN_RANGE_1, intf, intf_ip - ) + result = app_helper.run_join("i1", MLD_JOIN_RANGE_1, "r1") assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) step( @@ -532,11 +508,7 @@ def test_verify_mroute_when_receiver_is_outside_frr_p0(request): assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) step("send mld join (ffaa::1-5) to R1") - intf = topo["routers"]["i1"]["links"]["r1"]["interface"] - intf_ip = topo["routers"]["i1"]["links"]["r1"]["ipv6"].split("/")[0] - result = socat_send_mld_join( - tgen, "i1", "UDP6-RECV", _MLD_JOIN_RANGE, intf, intf_ip - ) + result = app_helper.run_join("i1", _MLD_JOIN_RANGE, "r1") assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) step("verify MLD joins received on r1") @@ -546,9 +518,7 @@ def test_verify_mroute_when_receiver_is_outside_frr_p0(request): assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) step("Send multicast traffic from FRR3 to all the receivers" "ffaa::1-5") - intf_ip = topo["routers"]["i2"]["links"]["r3"]["ipv6"].split("/")[0] - intf = topo["routers"]["i2"]["links"]["r3"]["interface"] - result = socat_send_pim6_traffic(tgen, "i2", "UDP6-SEND", _MLD_JOIN_RANGE, intf) + result = app_helper.run_traffic("i2", _MLD_JOIN_RANGE, "r3") assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) step( @@ -561,11 +531,7 @@ def test_verify_mroute_when_receiver_is_outside_frr_p0(request): result = create_mld_config(tgen, topo, input_dict) assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) - i5_r5 = topo["routers"]["i5"]["links"]["r5"]["interface"] - intf_ip = topo["routers"]["i5"]["links"]["r5"]["ipv6"].split("/")[0] - result = socat_send_mld_join( - tgen, "i5", "UDP6-RECV", _MLD_JOIN_RANGE, i5_r5, intf_ip - ) + result = app_helper.run_join("i5", _MLD_JOIN_RANGE, "r5") assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) step("FRR1 has 10 (*.G) and 10 (S,G) verify using 'show ipv6 mroute'") @@ -682,6 +648,8 @@ def test_verify_mroute_when_frr_is_transit_router_p2(request): # Creating configuration from JSON reset_config_on_routers(tgen) + app_helper.stop_all_hosts() + # Don"t run this test if we have any failure. if tgen.routers_have_failure(): pytest.skip(tgen.errors) @@ -708,11 +676,7 @@ def test_verify_mroute_when_frr_is_transit_router_p2(request): step("Enable mld on FRR1 interface and send mld join ") step("send mld join (ffaa::1-5) to R1") - intf = topo["routers"]["i1"]["links"]["r1"]["interface"] - intf_ip = topo["routers"]["i1"]["links"]["r1"]["ipv6"].split("/")[0] - result = socat_send_mld_join( - tgen, "i1", "UDP6-RECV", MLD_JOIN_RANGE_1, intf, intf_ip - ) + result = app_helper.run_join("i1", MLD_JOIN_RANGE_1, "r1") assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) step("verify mld groups received on R1") @@ -722,9 +686,7 @@ def test_verify_mroute_when_frr_is_transit_router_p2(request): assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) step("Send multicast traffic from FRR3 to ffaa::1-5 receivers") - intf_ip = topo["routers"]["i2"]["links"]["r3"]["ipv6"].split("/")[0] - intf = topo["routers"]["i2"]["links"]["r3"]["interface"] - result = socat_send_pim6_traffic(tgen, "i2", "UDP6-SEND", MLD_JOIN_RANGE_1, intf) + result = app_helper.run_traffic("i2", MLD_JOIN_RANGE_1, "r3") assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) step("shut the direct link to R1 ") @@ -841,6 +803,8 @@ def test_verify_mroute_when_RP_unreachable_p1(request): # Creating configuration from JSON reset_config_on_routers(tgen) + app_helper.stop_all_hosts() + # Don"t run this test if we have any failure. if tgen.routers_have_failure(): pytest.skip(tgen.errors) @@ -868,17 +832,11 @@ def test_verify_mroute_when_RP_unreachable_p1(request): step("Enable mld on FRR1 interface and send mld join ffaa::1-5") step("send mld join (ffaa::1-5) to R1") - intf = topo["routers"]["i1"]["links"]["r1"]["interface"] - intf_ip = topo["routers"]["i1"]["links"]["r1"]["ipv6"].split("/")[0] - result = socat_send_mld_join( - tgen, "i1", "UDP6-RECV", MLD_JOIN_RANGE_1, intf, intf_ip - ) + result = app_helper.run_join("i1", MLD_JOIN_RANGE_1, "r1") assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) step("Send multicast traffic from FRR3 to ffaa::1-5 receivers") - intf_ip = topo["routers"]["i2"]["links"]["r3"]["ipv6"].split("/")[0] - intf = topo["routers"]["i2"]["links"]["r3"]["interface"] - result = socat_send_pim6_traffic(tgen, "i2", "UDP6-SEND", MLD_JOIN_RANGE_1, intf) + result = app_helper.run_traffic("i2", MLD_JOIN_RANGE_1, "r3") assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) step("Configure one MLD interface on FRR3 node and send MLD" " join (ffcc::1)") @@ -888,11 +846,7 @@ def test_verify_mroute_when_RP_unreachable_p1(request): assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) step("send mld join (ffaa::1-5) to R1") - intf = topo["routers"]["i8"]["links"]["r3"]["interface"] - intf_ip = topo["routers"]["i8"]["links"]["r3"]["ipv6"].split("/")[0] - result = socat_send_mld_join( - tgen, "i8", "UDP6-RECV", MLD_JOIN_RANGE_1, intf, intf_ip - ) + result = app_helper.run_join("i8", MLD_JOIN_RANGE_1, "r3") assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) step("verify MLD groups received ") @@ -975,16 +929,14 @@ def test_modify_mld_query_timer_p0(request): # Creating configuration from JSON reset_config_on_routers(tgen) + app_helper.stop_all_hosts() + # Don"t run this test if we have any failure. if tgen.routers_have_failure(): pytest.skip(tgen.errors) step("send mld join (ffaa::1-5) to R1") - intf = topo["routers"]["i8"]["links"]["r3"]["interface"] - intf_ip = topo["routers"]["i8"]["links"]["r3"]["ipv6"].split("/")[0] - result = socat_send_mld_join( - tgen, "i8", "UDP6-RECV", MLD_JOIN_RANGE_1, intf, intf_ip - ) + result = app_helper.run_join("i8", MLD_JOIN_RANGE_1, "r3") assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) step("Enable MLD on receiver interface") @@ -1023,9 +975,7 @@ def test_modify_mld_query_timer_p0(request): assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) step("Send multicast traffic from FRR3 to ffaa::1-5 receivers") - intf_ip = topo["routers"]["i2"]["links"]["r3"]["ipv6"].split("/")[0] - intf = topo["routers"]["i2"]["links"]["r3"]["interface"] - result = socat_send_pim6_traffic(tgen, "i2", "UDP6-SEND", MLD_JOIN_RANGE_1, intf) + result = app_helper.run_traffic("i2", MLD_JOIN_RANGE_1, "r3") assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) step( @@ -1158,17 +1108,15 @@ def test_modify_mld_max_query_response_timer_p0(request): # Creating configuration from JSON reset_config_on_routers(tgen) + app_helper.stop_all_hosts() + # Don"t run this test if we have any failure. if tgen.routers_have_failure(): pytest.skip(tgen.errors) step("Enable mld on FRR1 interface and send MLD join") step("send mld join (ffaa::1-5) to R1") - intf = topo["routers"]["i1"]["links"]["r1"]["interface"] - intf_ip = topo["routers"]["i1"]["links"]["r1"]["ipv6"].split("/")[0] - result = socat_send_mld_join( - tgen, "i1", "UDP6-RECV", MLD_JOIN_RANGE_1, intf, intf_ip - ) + result = app_helper.run_join("i1", MLD_JOIN_RANGE_1, "r1") assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) r1_i1 = topo["routers"]["r1"]["links"]["i1"]["interface"] @@ -1214,9 +1162,7 @@ def test_modify_mld_max_query_response_timer_p0(request): assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) step("Send multicast traffic from FRR3 to ffaa::1-5 receivers") - intf_ip = topo["routers"]["i2"]["links"]["r3"]["ipv6"].split("/")[0] - intf = topo["routers"]["i2"]["links"]["r3"]["interface"] - result = socat_send_pim6_traffic(tgen, "i2", "UDP6-SEND", MLD_JOIN_RANGE_1, intf) + result = app_helper.run_traffic("i2", MLD_JOIN_RANGE_1, "r3") assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) step( @@ -1431,6 +1377,8 @@ def test_verify_impact_on_multicast_traffic_when_RP_removed_p0(request): # Creating configuration from JSON reset_config_on_routers(tgen) + app_helper.stop_all_hosts() + # Don"t run this test if we have any failure. if tgen.routers_have_failure(): pytest.skip(tgen.errors) @@ -1438,9 +1386,7 @@ def test_verify_impact_on_multicast_traffic_when_RP_removed_p0(request): step("send multicast traffic for group range ffaa::1-5") step("Send multicast traffic from FRR3 to ffaa::1-5 receivers") - intf_ip = topo["routers"]["i2"]["links"]["r3"]["ipv6"].split("/")[0] - intf = topo["routers"]["i2"]["links"]["r3"]["interface"] - result = socat_send_pim6_traffic(tgen, "i2", "UDP6-SEND", MLD_JOIN_RANGE_1, intf) + result = app_helper.run_traffic("i2", MLD_JOIN_RANGE_1, "r3") assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) step("Configure static RP for group (ffaa::1) on r5") @@ -1464,11 +1410,7 @@ def test_verify_impact_on_multicast_traffic_when_RP_removed_p0(request): step("Enable mld on FRR1 interface and send MLD join") step("send mld join (ffaa::1-5) to R1") - intf = topo["routers"]["i1"]["links"]["r1"]["interface"] - intf_ip = topo["routers"]["i1"]["links"]["r1"]["ipv6"].split("/")[0] - result = socat_send_mld_join( - tgen, "i1", "UDP6-RECV", MLD_JOIN_RANGE_1, intf, intf_ip - ) + result = app_helper.run_join("i1", MLD_JOIN_RANGE_1, "r1") assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) step( diff --git a/tests/topotests/multicast_pim6_sm_topo1/test_multicast_pim6_sm2.py b/tests/topotests/multicast_pim6_sm_topo1/test_multicast_pim6_sm2.py index 788a839918..767264a7c0 100644 --- a/tests/topotests/multicast_pim6_sm_topo1/test_multicast_pim6_sm2.py +++ b/tests/topotests/multicast_pim6_sm_topo1/test_multicast_pim6_sm2.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +# -*- coding: utf-8 eval: (blacken-mode 1) -*- # SPDX-License-Identifier: ISC # @@ -21,61 +21,31 @@ PIM nbr and mroute from FRR node different """ -import os import sys -import json import time -import datetime -import pytest - -# Save the Current Working Directory to find configuration files. -CWD = os.path.dirname(os.path.realpath(__file__)) -sys.path.append(os.path.join(CWD, "../")) -sys.path.append(os.path.join(CWD, "../lib/")) - -# Required to instantiate the topology builder class. - -# pylint: disable=C0413 -# Import topogen and topotest helpers -from lib.topogen import Topogen, get_topogen +import pytest from lib.common_config import ( - start_topology, - write_test_header, - write_test_footer, - step, + required_linux_kernel_version, reset_config_on_routers, shutdown_bringup_interface, - start_router, - stop_router, - create_static_routes, - required_linux_kernel_version, - socat_send_mld_join, - socat_send_pim6_traffic, - get_frr_ipv6_linklocal, - kill_socat, + start_topology, + step, + write_test_footer, + write_test_header, ) -from lib.bgp import create_router_bgp from lib.pim import ( + McastTesterHelper, + clear_pim6_mroute, create_pim_config, - create_mld_config, - verify_mld_groups, verify_mroutes, - clear_pim6_interface_traffic, - verify_upstream_iif, - clear_pim6_mroute, verify_pim_interface_traffic, - verify_pim_state, - McastTesterHelper, - verify_pim_join, - verify_mroute_summary, - verify_pim_nexthop, verify_sg_traffic, - verify_mld_config, + verify_upstream_iif, ) - -from lib.topolog import logger +from lib.topogen import Topogen, get_topogen from lib.topojson import build_config_from_json +from lib.topolog import logger # Global variables GROUP_RANGE = "ff00::/8" @@ -114,6 +84,16 @@ ASSERT_MSG = "Testcase {} : Failed Error: {}" pytestmark = [pytest.mark.pim6d] +@pytest.fixture(scope="function") +def app_helper(): + # helper = McastTesterHelper(get_topogen()) + # yield helepr + # helper.cleanup() + # Even better use contextmanager functionality: + with McastTesterHelper(get_topogen()) as ah: + yield ah + + def setup_module(mod): """ Sets up the pytest environment @@ -132,8 +112,7 @@ def setup_module(mod): logger.info("Running setup_module to create topology") - testdir = os.path.dirname(os.path.realpath(__file__)) - json_file = "{}/multicast_pim6_sm_topo1.json".format(testdir) + json_file = "multicast_pim6_sm_topo1.json" tgen = Topogen(json_file, mod.__name__) global topo topo = tgen.json_topo @@ -160,9 +139,6 @@ def teardown_module(): tgen = get_topogen() - # Clean up socat - kill_socat(tgen) - # Stop toplogy and Remove tmp files tgen.stop_topology() @@ -225,7 +201,7 @@ def verify_state_incremented(state_before, state_after): ##################################################### -def test_clear_mroute_and_verify_multicast_data_p0(request): +def test_clear_mroute_and_verify_multicast_data_p0(request, app_helper): """ Verify (*,G) and (S,G) entry populated again after clear the PIM nbr and mroute from FRR node @@ -237,6 +213,8 @@ def test_clear_mroute_and_verify_multicast_data_p0(request): # Creating configuration from JSON reset_config_on_routers(tgen) + app_helper.stop_all_hosts() + # Don"t run this test if we have any failure. if tgen.routers_have_failure(): pytest.skip(tgen.errors) @@ -266,18 +244,12 @@ def test_clear_mroute_and_verify_multicast_data_p0(request): ) step("send mld join (ffaa::1-5) to R1") - intf = topo["routers"]["i1"]["links"]["r1"]["interface"] - intf_ip = topo["routers"]["i1"]["links"]["r1"]["ipv6"].split("/")[0] - result = socat_send_mld_join( - tgen, "i1", "UDP6-RECV", MLD_JOIN_RANGE_1, intf, intf_ip - ) + result = app_helper.run_join("i1", MLD_JOIN_RANGE_1, "r1") assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) step("Send multicast traffic from FRR3 to all the receivers" "ffaa::1-5") - intf_ip = topo["routers"]["i2"]["links"]["r3"]["ipv6"].split("/")[0] - intf = topo["routers"]["i2"]["links"]["r3"]["interface"] - result = socat_send_pim6_traffic(tgen, "i2", "UDP6-SEND", MLD_JOIN_RANGE_1, intf) + result = app_helper.run_traffic("i2", MLD_JOIN_RANGE_1, "r3") assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) step("Clear the mroute on r1, wait for 5 sec") @@ -457,7 +429,9 @@ def test_clear_mroute_and_verify_multicast_data_p0(request): write_test_footer(tc_name) -def test_verify_SPT_switchover_when_RPT_and_SPT_path_is_different_p0(request): +def test_verify_SPT_switchover_when_RPT_and_SPT_path_is_different_p0( + request, app_helper +): """ Verify SPT switchover working when RPT and SPT path is different @@ -498,11 +472,7 @@ def test_verify_SPT_switchover_when_RPT_and_SPT_path_is_different_p0(request): assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) step("send mld join (ffbb::1-5, ffcc::1-5) to R1") - intf = topo["routers"]["i1"]["links"]["r1"]["interface"] - intf_ip = topo["routers"]["i1"]["links"]["r1"]["ipv6"].split("/")[0] - result = socat_send_mld_join( - tgen, "i1", "UDP6-RECV", _MLD_JOIN_RANGE, intf, intf_ip - ) + result = app_helper.run_join("i1", _MLD_JOIN_RANGE, "r1") assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) step("registerRx and registerStopTx value before traffic sent") @@ -518,9 +488,7 @@ def test_verify_SPT_switchover_when_RPT_and_SPT_path_is_different_p0(request): step( "Send multicast traffic from FRR3 to all the receivers" "ffbb::1-5 , ffcc::1-5" ) - intf_ip = topo["routers"]["i2"]["links"]["r3"]["ipv6"].split("/")[0] - intf = topo["routers"]["i2"]["links"]["r3"]["interface"] - result = socat_send_pim6_traffic(tgen, "i2", "UDP6-SEND", _MLD_JOIN_RANGE, intf) + result = app_helper.run_traffic("i2", _MLD_JOIN_RANGE, "r3") assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) step( diff --git a/tests/topotests/multicast_pim6_static_rp_topo1/test_multicast_pim6_static_rp1.py b/tests/topotests/multicast_pim6_static_rp_topo1/test_multicast_pim6_static_rp1.py index 977cd477c8..23326337d6 100755 --- a/tests/topotests/multicast_pim6_static_rp_topo1/test_multicast_pim6_static_rp1.py +++ b/tests/topotests/multicast_pim6_static_rp_topo1/test_multicast_pim6_static_rp1.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +# -*- coding: utf-8 eval: (blacken-mode 1) -*- # SPDX-License-Identifier: ISC # @@ -41,57 +41,36 @@ Test steps 8. Verify PIM6 join send towards the higher preferred RP 9. Verify PIM6 prune send towards the lower preferred RP """ - -import os import sys -import json import time -import pytest - -# Save the Current Working Directory to find configuration files. -CWD = os.path.dirname(os.path.realpath(__file__)) -sys.path.append(os.path.join(CWD, "../")) -sys.path.append(os.path.join(CWD, "../lib/")) - -# Required to instantiate the topology builder class. - -# pylint: disable=C0413 -# Import topogen and topotest helpers -from lib.topogen import Topogen, get_topogen +import pytest from lib.common_config import ( - start_topology, - write_test_header, - write_test_footer, + check_router_status, reset_config_on_routers, - step, shutdown_bringup_interface, - kill_router_daemons, - start_router_daemons, - create_static_routes, - check_router_status, - socat_send_mld_join, - socat_send_pim6_traffic, - kill_socat, + start_topology, + step, + write_test_footer, + write_test_header, ) from lib.pim import ( + McastTesterHelper, + clear_pim6_interface_traffic, create_pim_config, - verify_upstream_iif, + get_pim6_interface_traffic, verify_join_state_and_timer, + verify_mld_groups, verify_mroutes, - verify_pim_neighbors, + verify_pim6_neighbors, verify_pim_interface_traffic, verify_pim_rp_info, verify_pim_state, - clear_pim6_interface_traffic, - clear_pim6_mroute, - verify_pim6_neighbors, - get_pim6_interface_traffic, - clear_pim6_interfaces, - verify_mld_groups, + verify_upstream_iif, ) +from lib.topogen import Topogen, get_topogen +from lib.topojson import build_config_from_json, build_topo_from_json from lib.topolog import logger -from lib.topojson import build_topo_from_json, build_config_from_json # Global variables GROUP_RANGE_1 = "ff08::/64" @@ -141,7 +120,7 @@ def setup_module(mod): logger.info("Running setup_module to create topology") # This function initiates the topology build with Topogen... - json_file = "{}/multicast_pim6_static_rp.json".format(CWD) + json_file = "multicast_pim6_static_rp.json" tgen = Topogen(json_file, mod.__name__) global TOPO TOPO = tgen.json_topo @@ -163,6 +142,9 @@ def setup_module(mod): result = verify_pim6_neighbors(tgen, TOPO) assert result is True, "setup_module :Failed \n Error:" " {}".format(result) + global app_helper + app_helper = McastTesterHelper(tgen) + logger.info("Running setup_module() done") @@ -172,8 +154,7 @@ def teardown_module(): logger.info("Running teardown_module to delete topology") tgen = get_topogen() - # Clean up socat - kill_socat(tgen) + app_helper.cleanup() # Stop toplogy and Remove tmp files tgen.stop_topology() @@ -260,6 +241,8 @@ def test_pim6_add_delete_static_RP_p0(request): step("Creating configuration from JSON") reset_config_on_routers(tgen) + app_helper.stop_all_hosts() + step("Shut link b/w R1 and R3 and R1 and R4 as per testcase topology") intf_r1_r3 = TOPO["routers"]["r1"]["links"]["r3"]["interface"] intf_r1_r4 = TOPO["routers"]["r1"]["links"]["r4"]["interface"] @@ -313,11 +296,7 @@ def test_pim6_add_delete_static_RP_p0(request): ) step("send mld join {} to R1".format(GROUP_ADDRESS_1)) - intf = TOPO["routers"]["r0"]["links"]["r1"]["interface"] - intf_ip = TOPO["routers"]["r0"]["links"]["r1"]["ipv6"].split("/")[0] - result = socat_send_mld_join( - tgen, "r0", "UDP6-RECV", GROUP_ADDRESS_1, intf, intf_ip - ) + result = app_helper.run_join("r0", GROUP_ADDRESS_1, "r1") assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) step("r1: Verify MLD groups") @@ -457,6 +436,8 @@ def test_pim6_SPT_RPT_path_same_p1(request): step("Creating configuration from JSON") reset_config_on_routers(tgen) + app_helper.stop_all_hosts() + step("Shut link b/w R1->R3, R1->R4 and R3->R1, R3->R4 as per " "testcase topology") intf_r1_r3 = TOPO["routers"]["r1"]["links"]["r3"]["interface"] intf_r1_r4 = TOPO["routers"]["r1"]["links"]["r4"]["interface"] @@ -494,11 +475,7 @@ def test_pim6_SPT_RPT_path_same_p1(request): step( "Enable MLD on r1 interface and send MLD join {} to R1".format(GROUP_ADDRESS_1) ) - intf = TOPO["routers"]["r0"]["links"]["r1"]["interface"] - intf_ip = TOPO["routers"]["r0"]["links"]["r1"]["ipv6"].split("/")[0] - result = socat_send_mld_join( - tgen, "r0", "UDP6-RECV", GROUP_ADDRESS_1, intf, intf_ip - ) + result = app_helper.run_join("r0", GROUP_ADDRESS_1, "r1") assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) step("r1: Verify MLD groups") @@ -508,9 +485,8 @@ def test_pim6_SPT_RPT_path_same_p1(request): assert result is True, ASSERT_MSG.format(tc_name, result) step("Send multicast traffic from R5") - intf = TOPO["routers"]["r5"]["links"]["r3"]["interface"] SOURCE_ADDRESS = TOPO["routers"]["r5"]["links"]["r3"]["ipv6"].split("/")[0] - result = socat_send_pim6_traffic(tgen, "r5", "UDP6-SEND", GROUP_ADDRESS_1, intf) + result = app_helper.run_traffic("r5", GROUP_ADDRESS_1, "r3") assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) step("r2: Verify RP info") @@ -630,6 +606,8 @@ def test_pim6_RP_configured_as_LHR_p1(request): step("Creating configuration from JSON") reset_config_on_routers(tgen) + app_helper.stop_all_hosts() + step("Enable MLD on r1 interface") step("Enable the PIM6 on all the interfaces of r1, r2, r3 and r4 routers") @@ -665,11 +643,7 @@ def test_pim6_RP_configured_as_LHR_p1(request): assert result is True, ASSERT_MSG.format(tc_name, result) step("send mld join {} to R1".format(GROUP_ADDRESS_1)) - intf = TOPO["routers"]["r0"]["links"]["r1"]["interface"] - intf_ip = TOPO["routers"]["r0"]["links"]["r1"]["ipv6"].split("/")[0] - result = socat_send_mld_join( - tgen, "r0", "UDP6-RECV", GROUP_ADDRESS_1, intf, intf_ip - ) + result = app_helper.run_join("r0", GROUP_ADDRESS_1, "r1") assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) step("r1: Verify MLD groups") @@ -679,9 +653,8 @@ def test_pim6_RP_configured_as_LHR_p1(request): assert result is True, ASSERT_MSG.format(tc_name, result) step("r5: Send multicast traffic for group {}".format(GROUP_ADDRESS_1)) - intf = TOPO["routers"]["r5"]["links"]["r3"]["interface"] SOURCE_ADDRESS = TOPO["routers"]["r5"]["links"]["r3"]["ipv6"].split("/")[0] - result = socat_send_pim6_traffic(tgen, "r5", "UDP6-SEND", GROUP_ADDRESS_1, intf) + result = app_helper.run_traffic("r5", GROUP_ADDRESS_1, "r3") assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) step("r1: Verify (*, G) upstream IIF interface") @@ -762,6 +735,8 @@ def test_pim6_RP_configured_as_FHR_p1(request): step("Creating configuration from JSON") reset_config_on_routers(tgen) + app_helper.stop_all_hosts() + step("Enable MLD on r1 interface") step("Enable the PIM6 on all the interfaces of r1, r2, r3 and r4 routers") step("r3: Configure r3(FHR) as RP") @@ -792,11 +767,7 @@ def test_pim6_RP_configured_as_FHR_p1(request): assert result is True, ASSERT_MSG.format(tc_name, result) step("send mld join {} to R1".format(GROUP_ADDRESS_1)) - intf = TOPO["routers"]["r0"]["links"]["r1"]["interface"] - intf_ip = TOPO["routers"]["r0"]["links"]["r1"]["ipv6"].split("/")[0] - result = socat_send_mld_join( - tgen, "r0", "UDP6-RECV", GROUP_ADDRESS_1, intf, intf_ip - ) + result = app_helper.run_join("r0", GROUP_ADDRESS_1, "r1") assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) step("r1: Verify MLD groups") @@ -806,9 +777,8 @@ def test_pim6_RP_configured_as_FHR_p1(request): assert result is True, ASSERT_MSG.format(tc_name, result) step("r5: Send multicast traffic for group {}".format(GROUP_ADDRESS_1)) - intf = TOPO["routers"]["r5"]["links"]["r3"]["interface"] SOURCE_ADDRESS = TOPO["routers"]["r5"]["links"]["r3"]["ipv6"].split("/")[0] - result = socat_send_pim6_traffic(tgen, "r5", "UDP6-SEND", GROUP_ADDRESS_1, intf) + result = app_helper.run_traffic("r5", GROUP_ADDRESS_1, "r3") assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) step("r1: Verify (*, G) upstream IIF interface") @@ -890,6 +860,8 @@ def test_pim6_SPT_RPT_path_different_p1(request): step("Creating configuration from JSON") reset_config_on_routers(tgen) + app_helper.stop_all_hosts() + step("Enable MLD on r1 interface") step("Enable the PIM6 on all the interfaces of r1, r2, r3 and r4 routers") step("r2: Configure r2 as RP") @@ -921,11 +893,7 @@ def test_pim6_SPT_RPT_path_different_p1(request): assert result is True, ASSERT_MSG.format(tc_name, result) step("send mld join {} to R1".format(GROUP_ADDRESS_1)) - intf = TOPO["routers"]["r0"]["links"]["r1"]["interface"] - intf_ip = TOPO["routers"]["r0"]["links"]["r1"]["ipv6"].split("/")[0] - result = socat_send_mld_join( - tgen, "r0", "UDP6-RECV", GROUP_ADDRESS_1, intf, intf_ip - ) + result = app_helper.run_join("r0", GROUP_ADDRESS_1, "r1") assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) step("r1: Verify MLD groups") @@ -935,9 +903,8 @@ def test_pim6_SPT_RPT_path_different_p1(request): assert result is True, ASSERT_MSG.format(tc_name, result) step("r5: Send multicast traffic for group {}".format(GROUP_ADDRESS_1)) - intf = TOPO["routers"]["r5"]["links"]["r3"]["interface"] SOURCE_ADDRESS = TOPO["routers"]["r5"]["links"]["r3"]["ipv6"].split("/")[0] - result = socat_send_pim6_traffic(tgen, "r5", "UDP6-SEND", GROUP_ADDRESS_1, intf) + result = app_helper.run_traffic("r5", GROUP_ADDRESS_1, "r3") assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) step("r1: Verify (*, G) upstream IIF interface") @@ -1060,6 +1027,8 @@ def test_pim6_send_join_on_higher_preffered_rp_p1(request): step("Creating configuration from JSON") reset_config_on_routers(tgen) + app_helper.stop_all_hosts() + step("Enable MLD on r1 interface") step("Enable the PIM66 on all the interfaces of r1, r2, r3 and r4 routers") step( @@ -1109,11 +1078,7 @@ def test_pim6_send_join_on_higher_preffered_rp_p1(request): ) step("r0: send mld join {} to R1".format(GROUP_ADDRESS_3)) - intf = TOPO["routers"]["r0"]["links"]["r1"]["interface"] - intf_ip = TOPO["routers"]["r0"]["links"]["r1"]["ipv6"].split("/")[0] - result = socat_send_mld_join( - tgen, "r0", "UDP6-RECV", GROUP_ADDRESS_3, intf, intf_ip - ) + result = app_helper.run_join("r0", GROUP_ADDRESS_3, "r1") assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) step("r1: Verify MLD groups") diff --git a/tests/topotests/multicast_pim6_static_rp_topo1/test_multicast_pim6_static_rp2.py b/tests/topotests/multicast_pim6_static_rp_topo1/test_multicast_pim6_static_rp2.py index a61164baa2..39497e91ed 100755 --- a/tests/topotests/multicast_pim6_static_rp_topo1/test_multicast_pim6_static_rp2.py +++ b/tests/topotests/multicast_pim6_static_rp_topo1/test_multicast_pim6_static_rp2.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +# -*- coding: utf-8 eval: (blacken-mode 1) -*- # SPDX-License-Identifier: ISC # @@ -33,55 +33,31 @@ Test steps import os import sys -import json import time -import pytest - -# Save the Current Working Directory to find configuration files. -CWD = os.path.dirname(os.path.realpath(__file__)) -sys.path.append(os.path.join(CWD, "../")) -sys.path.append(os.path.join(CWD, "../lib/")) - -# Required to instantiate the topology builder class. - -# pylint: disable=C0413 -# Import topogen and topotest helpers -from lib.topogen import Topogen, get_topogen +import pytest from lib.common_config import ( - start_topology, - write_test_header, - write_test_footer, + create_debug_log_config, reset_config_on_routers, - step, shutdown_bringup_interface, - kill_router_daemons, - start_router_daemons, - create_static_routes, - check_router_status, - socat_send_mld_join, - socat_send_pim6_traffic, - kill_socat, - create_debug_log_config, + start_topology, + step, + write_test_footer, + write_test_header, ) from lib.pim import ( + McastTesterHelper, create_pim_config, - verify_upstream_iif, verify_join_state_and_timer, + verify_mld_groups, verify_mroutes, - verify_pim_neighbors, - verify_pim_interface_traffic, - verify_pim_rp_info, - verify_pim_state, - clear_pim6_interface_traffic, - clear_pim6_mroute, verify_pim6_neighbors, - get_pim6_interface_traffic, - clear_pim6_interfaces, - verify_mld_groups, + verify_pim_rp_info, + verify_upstream_iif, ) +from lib.topogen import Topogen, get_topogen +from lib.topojson import build_config_from_json, build_topo_from_json from lib.topolog import logger -from lib.topojson import build_topo_from_json, build_config_from_json # Global variables GROUP_RANGE_1 = "ff08::/64" @@ -145,7 +121,7 @@ def setup_module(mod): logger.info("Running setup_module to create topology") # This function initiates the topology build with Topogen... - json_file = "{}/multicast_pim6_static_rp.json".format(CWD) + json_file = "multicast_pim6_static_rp.json" tgen = Topogen(json_file, mod.__name__) global TOPO TOPO = tgen.json_topo @@ -167,6 +143,9 @@ def setup_module(mod): result = verify_pim6_neighbors(tgen, TOPO) assert result is True, "setup_module :Failed \n Error:" " {}".format(result) + global app_helper + app_helper = McastTesterHelper(tgen) + logger.info("Running setup_module() done") @@ -176,8 +155,7 @@ def teardown_module(): logger.info("Running teardown_module to delete topology") tgen = get_topogen() - # Clean up socat - kill_socat(tgen) + app_helper.cleanup() # Stop toplogy and Remove tmp files tgen.stop_topology() @@ -265,6 +243,8 @@ def test_pim6_multiple_groups_same_RP_address_p2(request): step("Creating configuration from JSON") reset_config_on_routers(tgen) + app_helper.stop_all_hosts() + input_dict = { "r1": {"debug": {"log_file": "r1_debug.log", "enable": ["pim6d"]}}, "r2": {"debug": {"log_file": "r2_debug.log", "enable": ["pim6d"]}}, @@ -305,10 +285,7 @@ def test_pim6_multiple_groups_same_RP_address_p2(request): group_address_list = GROUP_ADDRESS_LIST_1 + GROUP_ADDRESS_LIST_2 step("r0: Send MLD join for 10 groups") intf = TOPO["routers"]["r0"]["links"]["r1"]["interface"] - intf_ip = TOPO["routers"]["r0"]["links"]["r1"]["ipv6"].split("/")[0] - result = socat_send_mld_join( - tgen, "r0", "UDP6-RECV", group_address_list, intf, intf_ip - ) + result = app_helper.run_join("r0", group_address_list, "r1") assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) step("r1: Verify MLD groups") @@ -318,9 +295,8 @@ def test_pim6_multiple_groups_same_RP_address_p2(request): assert result is True, ASSERT_MSG.format(tc_name, result) step("r5: Send multicast traffic for group {}".format(group_address_list)) - intf = TOPO["routers"]["r5"]["links"]["r3"]["interface"] SOURCE_ADDRESS = TOPO["routers"]["r5"]["links"]["r3"]["ipv6"].split("/")[0] - result = socat_send_pim6_traffic(tgen, "r5", "UDP6-SEND", group_address_list, intf) + result = app_helper.run_traffic("r5", group_address_list, "r3") assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) step("r1: Verify (*, G) upstream IIF interface") @@ -593,6 +569,8 @@ def test_pim6_multiple_groups_different_RP_address_p2(request): step("Creating configuration from JSON") reset_config_on_routers(tgen) + app_helper.stop_all_hosts() + step("Enable MLD on r1 interface") step("Enable the PIM6 on all the interfaces of r1, r2, r3 and r4 routers") step("r2: Configure r2 as RP") @@ -646,11 +624,7 @@ def test_pim6_multiple_groups_different_RP_address_p2(request): group_address_list = GROUP_ADDRESS_LIST_1 + GROUP_ADDRESS_LIST_2 step("r0: Send MLD join for 10 groups") - intf = TOPO["routers"]["r0"]["links"]["r1"]["interface"] - intf_ip = TOPO["routers"]["r0"]["links"]["r1"]["ipv6"].split("/")[0] - result = socat_send_mld_join( - tgen, "r0", "UDP6-RECV", group_address_list, intf, intf_ip - ) + result = app_helper.run_join("r0", group_address_list, "r1") assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) step("r1: Verify MLD groups") @@ -660,9 +634,8 @@ def test_pim6_multiple_groups_different_RP_address_p2(request): assert result is True, ASSERT_MSG.format(tc_name, result) step("r5: Send multicast traffic for group {}".format(group_address_list)) - intf = TOPO["routers"]["r5"]["links"]["r3"]["interface"] SOURCE_ADDRESS = TOPO["routers"]["r5"]["links"]["r3"]["ipv6"].split("/")[0] - result = socat_send_pim6_traffic(tgen, "r5", "UDP6-SEND", group_address_list, intf) + result = app_helper.run_traffic("r5", group_address_list, "r3") assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) step("r1: Verify (*, G) upstream IIF interface") @@ -1189,6 +1162,8 @@ def test_pim6_delete_RP_shut_noshut_upstream_interface_p1(request): step("Creating configuration from JSON") reset_config_on_routers(tgen) + app_helper.stop_all_hosts() + step("Enable MLD on r1 interface") step("Enable the PIM6 on all the interfaces of r1, r2, r3 and r4 routers") step("r2: Configure r2 as RP") @@ -1220,11 +1195,7 @@ def test_pim6_delete_RP_shut_noshut_upstream_interface_p1(request): assert result is True, ASSERT_MSG.format(tc_name, result) step("r0: Send MLD join") - intf = TOPO["routers"]["r0"]["links"]["r1"]["interface"] - intf_ip = TOPO["routers"]["r0"]["links"]["r1"]["ipv6"].split("/")[0] - result = socat_send_mld_join( - tgen, "r0", "UDP6-RECV", GROUP_ADDRESS_1, intf, intf_ip - ) + result = app_helper.run_join("r0", GROUP_ADDRESS_1, "r1") assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result) step("r1: Verify MLD groups") diff --git a/tools/frr-reload.py b/tools/frr-reload.py index 0e0aec9839..c2be9f78eb 100755 --- a/tools/frr-reload.py +++ b/tools/frr-reload.py @@ -1480,12 +1480,17 @@ def ignore_delete_re_add_lines(lines_to_add, lines_to_del): lines_to_add_to_del.append((tmp_ctx_keys, line)) for (ctx_keys, line) in lines_to_del_to_del: - if line is not None: + try: lines_to_del.remove((ctx_keys, line)) + except ValueError: + pass for (ctx_keys, line) in lines_to_add_to_del: - if line is not None: + try: lines_to_add.remove((ctx_keys, line)) + except ValueError: + pass + return (lines_to_add, lines_to_del) diff --git a/zebra/rib.h b/zebra/rib.h index a56bb05d68..65cc1ffab9 100644 --- a/zebra/rib.h +++ b/zebra/rib.h @@ -465,6 +465,13 @@ extern uint8_t route_distance(int type); extern void zebra_rib_evaluate_rn_nexthops(struct route_node *rn, uint32_t seq, bool rt_delete); +/* + * rib_find_rn_from_ctx + * + * Returns a lock increased route_node for the appropriate + * table and prefix specified by the context. Developer + * should unlock the node when done. + */ extern struct route_node * rib_find_rn_from_ctx(const struct zebra_dplane_ctx *ctx); diff --git a/zebra/zapi_msg.c b/zebra/zapi_msg.c index 4c6c336d41..4bc9f4acfa 100644 --- a/zebra/zapi_msg.c +++ b/zebra/zapi_msg.c @@ -801,11 +801,17 @@ int zsend_route_notify_owner(const struct route_node *rn, int zsend_route_notify_owner_ctx(const struct zebra_dplane_ctx *ctx, enum zapi_route_notify_owner note) { - return (route_notify_internal( - rib_find_rn_from_ctx(ctx), dplane_ctx_get_type(ctx), - dplane_ctx_get_instance(ctx), dplane_ctx_get_vrf(ctx), - dplane_ctx_get_table(ctx), note, dplane_ctx_get_afi(ctx), - dplane_ctx_get_safi(ctx))); + int result; + struct route_node *rn = rib_find_rn_from_ctx(ctx); + + result = route_notify_internal( + rn, dplane_ctx_get_type(ctx), dplane_ctx_get_instance(ctx), + dplane_ctx_get_vrf(ctx), dplane_ctx_get_table(ctx), note, + dplane_ctx_get_afi(ctx), dplane_ctx_get_safi(ctx)); + + route_unlock_node(rn); + + return result; } static void zread_route_notify_request(ZAPI_HANDLER_ARGS) diff --git a/zebra/zebra_mlag.c b/zebra/zebra_mlag.c index 6713dbc967..7715eab0a8 100644 --- a/zebra/zebra_mlag.c +++ b/zebra/zebra_mlag.c @@ -338,8 +338,6 @@ static void zebra_mlag_post_data_from_main_thread(struct event *thread) } } - stream_free(s); - return; stream_failure: stream_free(s); if (zebra_s) diff --git a/zebra/zserv.c b/zebra/zserv.c index 6abd49310c..d2367007cf 100644 --- a/zebra/zserv.c +++ b/zebra/zserv.c @@ -507,8 +507,6 @@ static void zserv_process_messages(struct event *thread) stream_fifo_push(cache, msg); } - msg = NULL; - /* Need to reschedule processing work if there are still * packets in the fifo. */ |
