summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--bfdd/bfd.h14
-rw-r--r--bfdd/bfd_packet.c38
-rw-r--r--bfdd/bfdd_nb.c2
-rw-r--r--bfdd/bfdd_nb.h3
-rw-r--r--bfdd/bfdd_nb_config.c38
-rw-r--r--bgpd/bgp_attr.c4
-rw-r--r--bgpd/bgp_clist.c81
-rw-r--r--bgpd/bgp_clist.h3
-rw-r--r--bgpd/bgp_flowspec.c7
-rw-r--r--bgpd/bgp_fsm.c6
-rw-r--r--bgpd/bgp_mplsvpn.c13
-rw-r--r--bgpd/bgp_nht.c6
-rw-r--r--bgpd/bgp_route.c14
-rw-r--r--bgpd/bgp_route.h3
-rw-r--r--bgpd/bgp_updgrp.c1
-rw-r--r--bgpd/bgp_updgrp_adv.c77
-rw-r--r--bgpd/bgp_vty.c77
-rw-r--r--bgpd/bgpd.c11
-rw-r--r--bgpd/bgpd.h1
-rw-r--r--configure.ac2
-rw-r--r--debian/changelog10
-rw-r--r--doc/developer/topotests.rst64
-rw-r--r--doc/developer/workflow.rst18
-rw-r--r--doc/user/bgp.rst14
-rw-r--r--eigrpd/eigrp_update.c3
-rw-r--r--isisd/isis_spf.c28
-rw-r--r--ldpd/hello.c103
-rw-r--r--ldpd/init.c91
-rw-r--r--ldpd/l2vpn.c72
-rw-r--r--ldpd/ldp_zebra.c15
-rw-r--r--ldpd/notification.c73
-rw-r--r--ldpd/socket.c42
-rw-r--r--lib/command.c9
-rw-r--r--lib/if.c1
-rw-r--r--lib/libfrr.c15
-rw-r--r--lib/libfrr.h7
-rw-r--r--lib/mgmt_be_client.c638
-rw-r--r--lib/mgmt_be_client.h145
-rw-r--r--lib/mgmt_fe_client.c898
-rw-r--r--lib/mgmt_fe_client.h192
-rw-r--r--lib/mgmt_msg.c532
-rw-r--r--lib/mgmt_msg.h156
-rw-r--r--lib/northbound.c7
-rw-r--r--lib/northbound_cli.c7
-rw-r--r--lib/vty.c453
-rw-r--r--lib/vty.h33
-rw-r--r--mgmtd/mgmt.c32
-rw-r--r--mgmtd/mgmt.h5
-rw-r--r--mgmtd/mgmt_be_adapter.c875
-rw-r--r--mgmtd/mgmt_be_adapter.h118
-rw-r--r--mgmtd/mgmt_be_server.c150
-rw-r--r--mgmtd/mgmt_be_server.h20
-rw-r--r--mgmtd/mgmt_defines.h2
-rw-r--r--mgmtd/mgmt_ds.c166
-rw-r--r--mgmtd/mgmt_ds.h44
-rw-r--r--mgmtd/mgmt_fe_adapter.c589
-rw-r--r--mgmtd/mgmt_fe_adapter.h21
-rw-r--r--mgmtd/mgmt_fe_server.c150
-rw-r--r--mgmtd/mgmt_fe_server.h20
-rw-r--r--mgmtd/mgmt_history.h2
-rw-r--r--mgmtd/mgmt_main.c24
-rw-r--r--mgmtd/mgmt_memory.c21
-rw-r--r--mgmtd/mgmt_memory.h1
-rw-r--r--mgmtd/mgmt_txn.c464
-rw-r--r--mgmtd/mgmt_txn.h10
-rw-r--r--mgmtd/mgmt_vty.c34
-rw-r--r--mgmtd/subdir.am12
-rw-r--r--ospf6d/ospf6_gr.c3
-rw-r--r--ospf6d/ospf6_gr.h2
-rw-r--r--ospf6d/ospf6_interface.c1
-rw-r--r--ospfd/ospf_gr.c2
-rw-r--r--ospfd/ospf_gr.h2
-rw-r--r--ospfd/ospf_route.c2
-rw-r--r--pimd/pim_cmd_common.c4
-rw-r--r--pimd/pim_iface.c58
-rw-r--r--pimd/pim_iface.h2
-rw-r--r--pimd/pim_igmpv3.c11
-rw-r--r--pimd/pim_nb_config.c100
-rw-r--r--pimd/pim_tib.c2
-rw-r--r--redhat/frr.spec.in30
-rw-r--r--ripd/subdir.am1
-rw-r--r--staticd/static_main.c52
-rw-r--r--tests/helpers/c/main.c2
-rwxr-xr-xtests/topotests/analyze.py329
-rw-r--r--tests/topotests/babel_topo1/r1/babeld.conf1
-rw-r--r--tests/topotests/babel_topo1/r2/babeld.conf1
-rw-r--r--tests/topotests/babel_topo1/r3/babeld.conf1
-rw-r--r--tests/topotests/bgp_always_compare_med/bgp_always_compare_med_topo1.json152
-rw-r--r--tests/topotests/bgp_always_compare_med/test_bgp_always_compare_med_topo1.py1118
-rw-r--r--tests/topotests/bgp_default_originate/test_bgp_default_originate_2links.py11
-rw-r--r--tests/topotests/bgp_default_originate_timer/__init__.py0
-rw-r--r--tests/topotests/bgp_default_originate_timer/r1/bgpd.conf18
-rw-r--r--tests/topotests/bgp_default_originate_timer/r1/zebra.conf7
-rw-r--r--tests/topotests/bgp_default_originate_timer/r2/bgpd.conf6
-rw-r--r--tests/topotests/bgp_default_originate_timer/r2/zebra.conf4
-rw-r--r--tests/topotests/bgp_default_originate_timer/r3/bgpd.conf12
-rw-r--r--tests/topotests/bgp_default_originate_timer/r3/zebra.conf7
-rw-r--r--tests/topotests/bgp_default_originate_timer/test_bgp_default_originate_timer.py123
-rw-r--r--tests/topotests/bgp_gr_functionality_topo3/test_bgp_gr_functionality_topo3.py (renamed from tests/topotests/bgp_gr_functionality_topo3/bgp_gr_functionality_topo3.py)0
-rw-r--r--tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/test_bgp_srv6l3vpn_to_bgp_vrf3.py3
-rwxr-xr-xtests/topotests/conftest.py79
-rwxr-xr-xtests/topotests/isis_sr_flex_algo_topo1/test_isis_sr_flex_algo_topo1.py170
-rw-r--r--tests/topotests/lib/bgp.py8
-rw-r--r--tests/topotests/lib/common_config.py3
-rwxr-xr-xtests/topotests/lib/mcast-tester.py42
-rw-r--r--tests/topotests/lib/micronet_compat.py4
-rw-r--r--tests/topotests/lib/pim.py12
-rw-r--r--tests/topotests/lib/topogen.py27
-rw-r--r--tests/topotests/lib/topolog.py160
-rw-r--r--tests/topotests/lib/topotest.py51
-rw-r--r--tests/topotests/mgmt_startup/r1/mgmtd.conf13
-rw-r--r--tests/topotests/mgmt_startup/r1/zebra.conf7
-rw-r--r--tests/topotests/mgmt_startup/r2/staticd.conf7
-rw-r--r--tests/topotests/mgmt_startup/r2/zebra.conf12
-rw-r--r--tests/topotests/mgmt_startup/r3/zebra.conf18
-rw-r--r--tests/topotests/mgmt_startup/r4/frr.conf21
-rw-r--r--tests/topotests/mgmt_startup/test_bigconf.py80
-rw-r--r--tests/topotests/mgmt_startup/test_config.py109
-rw-r--r--tests/topotests/mgmt_startup/test_late_bigconf.py98
-rw-r--r--tests/topotests/mgmt_startup/test_late_uniconf.py44
-rw-r--r--tests/topotests/mgmt_startup/test_latestart.py45
-rw-r--r--tests/topotests/mgmt_startup/util.py98
-rw-r--r--tests/topotests/multicast_mld_join_topo1/test_multicast_mld_local_join.py70
-rw-r--r--tests/topotests/multicast_pim6_sm_topo1/test_multicast_pim6_sm1.py160
-rw-r--r--tests/topotests/multicast_pim6_sm_topo1/test_multicast_pim6_sm2.py98
-rwxr-xr-xtests/topotests/multicast_pim6_static_rp_topo1/test_multicast_pim6_static_rp1.py119
-rwxr-xr-xtests/topotests/multicast_pim6_static_rp_topo1/test_multicast_pim6_static_rp2.py87
-rw-r--r--tests/topotests/ospfapi/test_ospf_clientapi.py63
-rwxr-xr-xtools/frr-reload.py9
-rw-r--r--vtysh/vtysh_user.c15
-rw-r--r--zebra/interface.c3
-rw-r--r--zebra/rib.h7
-rw-r--r--zebra/zapi_msg.c16
-rw-r--r--zebra/zebra_evpn_mh.c22
-rw-r--r--zebra/zebra_evpn_mh.h2
-rw-r--r--zebra/zebra_mlag.c2
-rw-r--r--zebra/zebra_vxlan.c4
-rw-r--r--zebra/zserv.c2
138 files changed, 6090 insertions, 4551 deletions
diff --git a/bfdd/bfd.h b/bfdd/bfd.h
index 5451e66c23..69529aba17 100644
--- a/bfdd/bfd.h
+++ b/bfdd/bfd.h
@@ -32,6 +32,11 @@ DECLARE_MGROUP(BFDD);
DECLARE_MTYPE(BFDD_CONTROL);
DECLARE_MTYPE(BFDD_NOTIFICATION);
+/* bfd Authentication Type. */
+#define BFD_AUTH_NULL 0
+#define BFD_AUTH_SIMPLE 1
+#define BFD_AUTH_CRYPTOGRAPHIC 2
+
struct bfd_timers {
uint32_t desired_min_tx;
uint32_t required_min_rx;
@@ -61,6 +66,15 @@ struct bfd_pkt {
};
/*
+ * Format of authentification.
+ */
+struct bfd_auth {
+ uint8_t type;
+ uint8_t length;
+};
+
+
+/*
* Format of Echo packet.
*/
struct bfd_echo_pkt {
diff --git a/bfdd/bfd_packet.c b/bfdd/bfd_packet.c
index ea7a1038ae..0c72ee7581 100644
--- a/bfdd/bfd_packet.c
+++ b/bfdd/bfd_packet.c
@@ -768,6 +768,37 @@ static void cp_debug(bool mhop, struct sockaddr_any *peer,
mhop ? "yes" : "no", peerstr, localstr, portstr, vrfstr);
}
+static bool bfd_check_auth(const struct bfd_session *bfd,
+ const struct bfd_pkt *cp)
+{
+ if (CHECK_FLAG(cp->flags, BFD_ABIT)) {
+ /* RFC5880 4.1: Authentication Section is present. */
+ struct bfd_auth *auth = (struct bfd_auth *)(cp + 1);
+ uint16_t pkt_auth_type = ntohs(auth->type);
+
+ if (cp->len < BFD_PKT_LEN + sizeof(struct bfd_auth))
+ return false;
+
+ if (cp->len < BFD_PKT_LEN + auth->length)
+ return false;
+
+ switch (pkt_auth_type) {
+ case BFD_AUTH_NULL:
+ return false;
+ case BFD_AUTH_SIMPLE:
+ /* RFC5880 6.7: To be finshed. */
+ return false;
+ case BFD_AUTH_CRYPTOGRAPHIC:
+ /* RFC5880 6.7: To be finshed. */
+ return false;
+ default:
+ /* RFC5880 6.7: To be finshed. */
+ return false;
+ }
+ }
+ return true;
+}
+
void bfd_recv_cb(struct event *t)
{
int sd = EVENT_FD(t);
@@ -932,6 +963,13 @@ void bfd_recv_cb(struct event *t)
bfd->discrs.remote_discr = ntohl(cp->discrs.my_discr);
+ /* Check authentication. */
+ if (!bfd_check_auth(bfd, cp)) {
+ cp_debug(is_mhop, &peer, &local, ifindex, vrfid,
+ "Authentication failed");
+ return;
+ }
+
/* Save remote diagnostics before state switch. */
bfd->remote_diag = cp->diag & BFD_DIAGMASK;
diff --git a/bfdd/bfdd_nb.c b/bfdd/bfdd_nb.c
index 7135c50763..114fbc2bdd 100644
--- a/bfdd/bfdd_nb.c
+++ b/bfdd/bfdd_nb.c
@@ -74,7 +74,6 @@ const struct frr_yang_module_info frr_bfdd_info = {
.xpath = "/frr-bfdd:bfdd/bfd/profile/minimum-ttl",
.cbs = {
.modify = bfdd_bfd_profile_minimum_ttl_modify,
- .destroy = bfdd_bfd_profile_minimum_ttl_destroy,
.cli_show = bfd_cli_show_minimum_ttl,
}
},
@@ -361,7 +360,6 @@ const struct frr_yang_module_info frr_bfdd_info = {
.xpath = "/frr-bfdd:bfdd/bfd/sessions/multi-hop/minimum-ttl",
.cbs = {
.modify = bfdd_bfd_sessions_multi_hop_minimum_ttl_modify,
- .destroy = bfdd_bfd_sessions_multi_hop_minimum_ttl_destroy,
.cli_show = bfd_cli_show_minimum_ttl,
}
},
diff --git a/bfdd/bfdd_nb.h b/bfdd/bfdd_nb.h
index 7a0e724d28..b5b00b57e4 100644
--- a/bfdd/bfdd_nb.h
+++ b/bfdd/bfdd_nb.h
@@ -25,7 +25,6 @@ int bfdd_bfd_profile_required_receive_interval_modify(
int bfdd_bfd_profile_administrative_down_modify(struct nb_cb_modify_args *args);
int bfdd_bfd_profile_passive_mode_modify(struct nb_cb_modify_args *args);
int bfdd_bfd_profile_minimum_ttl_modify(struct nb_cb_modify_args *args);
-int bfdd_bfd_profile_minimum_ttl_destroy(struct nb_cb_destroy_args *args);
int bfdd_bfd_profile_echo_mode_modify(struct nb_cb_modify_args *args);
int bfdd_bfd_profile_desired_echo_transmission_interval_modify(
struct nb_cb_modify_args *args);
@@ -128,8 +127,6 @@ int bfdd_bfd_sessions_multi_hop_administrative_down_modify(
struct nb_cb_modify_args *args);
int bfdd_bfd_sessions_multi_hop_minimum_ttl_modify(
struct nb_cb_modify_args *args);
-int bfdd_bfd_sessions_multi_hop_minimum_ttl_destroy(
- struct nb_cb_destroy_args *args);
struct yang_data *
bfdd_bfd_sessions_multi_hop_stats_local_discriminator_get_elem(
struct nb_cb_get_elem_args *args);
diff --git a/bfdd/bfdd_nb_config.c b/bfdd/bfdd_nb_config.c
index e4e97404d8..8cf2f0a6f1 100644
--- a/bfdd/bfdd_nb_config.c
+++ b/bfdd/bfdd_nb_config.c
@@ -423,20 +423,6 @@ int bfdd_bfd_profile_minimum_ttl_modify(struct nb_cb_modify_args *args)
return NB_OK;
}
-int bfdd_bfd_profile_minimum_ttl_destroy(struct nb_cb_destroy_args *args)
-{
- struct bfd_profile *bp;
-
- if (args->event != NB_EV_APPLY)
- return NB_OK;
-
- bp = nb_running_get_entry(args->dnode, NULL, true);
- bp->minimum_ttl = BFD_DEF_MHOP_TTL;
- bfd_profile_update(bp);
-
- return NB_OK;
-}
-
/*
* XPath: /frr-bfdd:bfdd/bfd/profile/echo-mode
*/
@@ -859,27 +845,3 @@ int bfdd_bfd_sessions_multi_hop_minimum_ttl_modify(
return NB_OK;
}
-
-int bfdd_bfd_sessions_multi_hop_minimum_ttl_destroy(
- struct nb_cb_destroy_args *args)
-{
- struct bfd_session *bs;
-
- switch (args->event) {
- case NB_EV_VALIDATE:
- case NB_EV_PREPARE:
- return NB_OK;
-
- case NB_EV_APPLY:
- break;
-
- case NB_EV_ABORT:
- return NB_OK;
- }
-
- bs = nb_running_get_entry(args->dnode, NULL, true);
- bs->peer_profile.minimum_ttl = BFD_DEF_MHOP_TTL;
- bfd_session_apply(bs);
-
- return NB_OK;
-}
diff --git a/bgpd/bgp_attr.c b/bgpd/bgp_attr.c
index d5223a1e6e..ec9f12d61a 100644
--- a/bgpd/bgp_attr.c
+++ b/bgpd/bgp_attr.c
@@ -4682,6 +4682,10 @@ bgp_size_t bgp_packet_attribute(struct bgp *bgp, struct peer *peer,
* there! (JK)
* Folks, talk to me: what is reasonable here!?
*/
+
+ /* Make sure dup aspath before the modification */
+ if (aspath == attr->aspath)
+ aspath = aspath_dup(attr->aspath);
aspath = aspath_delete_confed_seq(aspath);
stream_putc(s,
diff --git a/bgpd/bgp_clist.c b/bgpd/bgp_clist.c
index 1d2ba3bf58..f3c308afb9 100644
--- a/bgpd/bgp_clist.c
+++ b/bgpd/bgp_clist.c
@@ -659,9 +659,6 @@ bool community_list_match(struct community *com, struct community_list *list)
struct community_entry *entry;
for (entry = list->head; entry; entry = entry->next) {
- if (entry->any)
- return entry->direct == COMMUNITY_PERMIT;
-
if (entry->style == COMMUNITY_LIST_STANDARD) {
if (community_include(entry->u.com, COMMUNITY_INTERNET))
return entry->direct == COMMUNITY_PERMIT;
@@ -681,9 +678,6 @@ bool lcommunity_list_match(struct lcommunity *lcom, struct community_list *list)
struct community_entry *entry;
for (entry = list->head; entry; entry = entry->next) {
- if (entry->any)
- return entry->direct == COMMUNITY_PERMIT;
-
if (entry->style == LARGE_COMMUNITY_LIST_STANDARD) {
if (lcommunity_match(lcom, entry->u.lcom))
return entry->direct == COMMUNITY_PERMIT;
@@ -705,9 +699,6 @@ bool lcommunity_list_exact_match(struct lcommunity *lcom,
struct community_entry *entry;
for (entry = list->head; entry; entry = entry->next) {
- if (entry->any)
- return entry->direct == COMMUNITY_PERMIT;
-
if (entry->style == LARGE_COMMUNITY_LIST_STANDARD) {
if (lcommunity_cmp(lcom, entry->u.lcom))
return entry->direct == COMMUNITY_PERMIT;
@@ -724,9 +715,6 @@ bool ecommunity_list_match(struct ecommunity *ecom, struct community_list *list)
struct community_entry *entry;
for (entry = list->head; entry; entry = entry->next) {
- if (entry->any)
- return entry->direct == COMMUNITY_PERMIT;
-
if (entry->style == EXTCOMMUNITY_LIST_STANDARD) {
if (ecommunity_match(ecom, entry->u.ecom))
return entry->direct == COMMUNITY_PERMIT;
@@ -746,9 +734,6 @@ bool community_list_exact_match(struct community *com,
struct community_entry *entry;
for (entry = list->head; entry; entry = entry->next) {
- if (entry->any)
- return entry->direct == COMMUNITY_PERMIT;
-
if (entry->style == COMMUNITY_LIST_STANDARD) {
if (community_include(entry->u.com, COMMUNITY_INTERNET))
return entry->direct == COMMUNITY_PERMIT;
@@ -781,28 +766,18 @@ struct community *community_list_match_delete(struct community *com,
val = community_val_get(com, i);
for (entry = list->head; entry; entry = entry->next) {
- if (entry->any) {
+ if ((entry->style == COMMUNITY_LIST_STANDARD) &&
+ (community_include(entry->u.com,
+ COMMUNITY_INTERNET) ||
+ community_include(entry->u.com, val))) {
if (entry->direct == COMMUNITY_PERMIT) {
com_index_to_delete[delete_index] = i;
delete_index++;
}
break;
- }
-
- else if ((entry->style == COMMUNITY_LIST_STANDARD)
- && (community_include(entry->u.com,
- COMMUNITY_INTERNET)
- || community_include(entry->u.com, val))) {
- if (entry->direct == COMMUNITY_PERMIT) {
- com_index_to_delete[delete_index] = i;
- delete_index++;
- }
- break;
- }
-
- else if ((entry->style == COMMUNITY_LIST_EXPANDED)
- && community_regexp_include(entry->reg, com,
- i)) {
+ } else if ((entry->style == COMMUNITY_LIST_EXPANDED) &&
+ community_regexp_include(entry->reg, com,
+ i)) {
if (entry->direct == COMMUNITY_PERMIT) {
com_index_to_delete[delete_index] = i;
delete_index++;
@@ -836,12 +811,6 @@ static bool community_list_dup_check(struct community_list *list,
if (entry->direct != new->direct)
continue;
- if (entry->any != new->any)
- continue;
-
- if (entry->any)
- return true;
-
switch (entry->style) {
case COMMUNITY_LIST_STANDARD:
if (community_cmp(entry->u.com, new->u.com))
@@ -899,20 +868,17 @@ int community_list_set(struct community_list_handler *ch, const char *name,
}
}
- if (str) {
- if (style == COMMUNITY_LIST_STANDARD)
- com = community_str2com(str);
- else
- regex = bgp_regcomp(str);
+ if (style == COMMUNITY_LIST_STANDARD)
+ com = community_str2com(str);
+ else
+ regex = bgp_regcomp(str);
- if (!com && !regex)
- return COMMUNITY_LIST_ERR_MALFORMED_VAL;
- }
+ if (!com && !regex)
+ return COMMUNITY_LIST_ERR_MALFORMED_VAL;
entry = community_entry_new();
entry->direct = direct;
entry->style = style;
- entry->any = (str ? false : true);
entry->u.com = com;
entry->reg = regex;
entry->seq = seqnum;
@@ -989,16 +955,8 @@ struct lcommunity *lcommunity_list_match_delete(struct lcommunity *lcom,
for (i = 0; i < lcom->size; i++) {
ptr = lcom->val + (i * LCOMMUNITY_SIZE);
for (entry = list->head; entry; entry = entry->next) {
- if (entry->any) {
- if (entry->direct == COMMUNITY_PERMIT) {
- com_index_to_delete[delete_index] = i;
- delete_index++;
- }
- break;
- }
-
- else if ((entry->style == LARGE_COMMUNITY_LIST_STANDARD)
- && lcommunity_include(entry->u.lcom, ptr)) {
+ if ((entry->style == LARGE_COMMUNITY_LIST_STANDARD) &&
+ lcommunity_include(entry->u.lcom, ptr)) {
if (entry->direct == COMMUNITY_PERMIT) {
com_index_to_delete[delete_index] = i;
delete_index++;
@@ -1006,9 +964,10 @@ struct lcommunity *lcommunity_list_match_delete(struct lcommunity *lcom,
break;
}
- else if ((entry->style == LARGE_COMMUNITY_LIST_EXPANDED)
- && lcommunity_regexp_include(entry->reg, lcom,
- i)) {
+ else if ((entry->style ==
+ LARGE_COMMUNITY_LIST_EXPANDED) &&
+ lcommunity_regexp_include(entry->reg, lcom,
+ i)) {
if (entry->direct == COMMUNITY_PERMIT) {
com_index_to_delete[delete_index] = i;
delete_index++;
@@ -1127,7 +1086,6 @@ int lcommunity_list_set(struct community_list_handler *ch, const char *name,
entry = community_entry_new();
entry->direct = direct;
entry->style = style;
- entry->any = (str ? false : true);
entry->u.lcom = lcom;
entry->reg = regex;
entry->seq = seqnum;
@@ -1248,7 +1206,6 @@ int extcommunity_list_set(struct community_list_handler *ch, const char *name,
entry = community_entry_new();
entry->direct = direct;
entry->style = style;
- entry->any = false;
if (ecom)
entry->config = ecommunity_ecom2str(
ecom, ECOMMUNITY_FORMAT_COMMUNITY_LIST, 0);
diff --git a/bgpd/bgp_clist.h b/bgpd/bgp_clist.h
index 7a9b28038c..8e5d637bab 100644
--- a/bgpd/bgp_clist.h
+++ b/bgpd/bgp_clist.h
@@ -65,9 +65,6 @@ struct community_entry {
/* Standard or expanded. */
uint8_t style;
- /* Any match. */
- bool any;
-
/* Sequence number. */
int64_t seq;
diff --git a/bgpd/bgp_flowspec.c b/bgpd/bgp_flowspec.c
index 70bdbaf035..6165bf892e 100644
--- a/bgpd/bgp_flowspec.c
+++ b/bgpd/bgp_flowspec.c
@@ -189,13 +189,16 @@ int bgp_nlri_parse_flowspec(struct peer *peer, struct attr *attr,
zlog_info("%s", local_string);
}
/* Process the route. */
- if (!withdraw)
+ if (!withdraw) {
bgp_update(peer, &p, 0, attr, afi, safi,
ZEBRA_ROUTE_BGP, BGP_ROUTE_NORMAL, NULL,
NULL, 0, 0, NULL);
- else
+ } else {
bgp_withdraw(peer, &p, 0, afi, safi, ZEBRA_ROUTE_BGP,
BGP_ROUTE_NORMAL, NULL, NULL, 0, NULL);
+ }
+
+ XFREE(MTYPE_TMP, temp);
}
return BGP_NLRI_PARSE_OK;
}
diff --git a/bgpd/bgp_fsm.c b/bgpd/bgp_fsm.c
index a289d3d67a..ad6906d092 100644
--- a/bgpd/bgp_fsm.c
+++ b/bgpd/bgp_fsm.c
@@ -2656,12 +2656,12 @@ int bgp_event_update(struct peer *peer, enum bgp_fsm_events event)
ret != BGP_FSM_FAILURE_AND_DELETE) {
flog_err(
EC_BGP_FSM,
- "%s [FSM] Failure handling event %s in state %s, prior events %s, %s, fd %d",
+ "%s [FSM] Failure handling event %s in state %s, prior events %s, %s, fd %d, last reset: %s",
peer->host, bgp_event_str[peer->cur_event],
lookup_msg(bgp_status_msg, peer->status, NULL),
bgp_event_str[peer->last_event],
- bgp_event_str[peer->last_major_event],
- peer->fd);
+ bgp_event_str[peer->last_major_event], peer->fd,
+ peer_down_str[peer->last_reset]);
bgp_stop(peer);
bgp_fsm_change_status(peer, Idle);
bgp_timer_set(peer);
diff --git a/bgpd/bgp_mplsvpn.c b/bgpd/bgp_mplsvpn.c
index ecc84533b0..dc9bd3cff5 100644
--- a/bgpd/bgp_mplsvpn.c
+++ b/bgpd/bgp_mplsvpn.c
@@ -1467,13 +1467,12 @@ static mpls_label_t _vpn_leak_from_vrf_get_per_nexthop_label(
/* Unlink from any existing nexthop cache. Free the entry if unused.
*/
bgp_mplsvpn_path_nh_label_unlink(pi);
- if (blnc) {
- /* updates NHT pi list reference */
- LIST_INSERT_HEAD(&(blnc->paths), pi, label_nh_thread);
- pi->label_nexthop_cache = blnc;
- pi->label_nexthop_cache->path_count++;
- blnc->last_update = monotime(NULL);
- }
+
+ /* updates NHT pi list reference */
+ LIST_INSERT_HEAD(&(blnc->paths), pi, label_nh_thread);
+ pi->label_nexthop_cache = blnc;
+ pi->label_nexthop_cache->path_count++;
+ blnc->last_update = monotime(NULL);
/* then add or update the selected nexthop */
if (!blnc->nh)
diff --git a/bgpd/bgp_nht.c b/bgpd/bgp_nht.c
index 83a7fbd412..636ea06a20 100644
--- a/bgpd/bgp_nht.c
+++ b/bgpd/bgp_nht.c
@@ -551,7 +551,7 @@ static void bgp_process_nexthop_update(struct bgp_nexthop_cache *bnc,
char bnc_buf[BNC_FLAG_DUMP_SIZE];
zlog_debug(
- "%s(%u): Rcvd NH update %pFX(%u)%u) - metric %d/%d #nhops %d/%d flags %s",
+ "%s(%u): Rcvd NH update %pFX(%u)(%u) - metric %d/%d #nhops %d/%d flags %s",
bnc->bgp->name_pretty, bnc->bgp->vrf_id, &nhr->prefix,
bnc->ifindex_ipv6_ll, bnc->srte_color, nhr->metric,
bnc->metric, nhr->nexthop_num, bnc->nexthop_num,
@@ -884,7 +884,7 @@ void bgp_parse_nexthop_update(int command, vrf_id_t vrf_id)
if (!bnc_nhc) {
if (BGP_DEBUG(nht, NHT))
zlog_debug(
- "parse nexthop update(%pFX(%u)(%s)): bnc info not found for nexthop cache",
+ "parse nexthop update %pFX(%u)(%s): bnc info not found for nexthop cache",
&nhr.prefix, nhr.srte_color, bgp->name_pretty);
} else
bgp_process_nexthop_update(bnc_nhc, &nhr, false);
@@ -895,7 +895,7 @@ void bgp_parse_nexthop_update(int command, vrf_id_t vrf_id)
if (!bnc_import) {
if (BGP_DEBUG(nht, NHT))
zlog_debug(
- "parse nexthop update(%pFX(%u)(%s)): bnc info not found for import check",
+ "parse nexthop update %pFX(%u)(%s): bnc info not found for import check",
&nhr.prefix, nhr.srte_color, bgp->name_pretty);
} else
bgp_process_nexthop_update(bnc_import, &nhr, true);
diff --git a/bgpd/bgp_route.c b/bgpd/bgp_route.c
index 85f08bbed5..7737738d28 100644
--- a/bgpd/bgp_route.c
+++ b/bgpd/bgp_route.c
@@ -2894,20 +2894,16 @@ void bgp_best_selection(struct bgp *bgp, struct bgp_dest *dest,
*/
void subgroup_process_announce_selected(struct update_subgroup *subgrp,
struct bgp_path_info *selected,
- struct bgp_dest *dest,
- uint32_t addpath_tx_id)
+ struct bgp_dest *dest, afi_t afi,
+ safi_t safi, uint32_t addpath_tx_id)
{
const struct prefix *p;
struct peer *onlypeer;
struct attr attr;
- afi_t afi;
- safi_t safi;
struct bgp *bgp;
bool advertise;
p = bgp_dest_get_prefix(dest);
- afi = SUBGRP_AFI(subgrp);
- safi = SUBGRP_SAFI(subgrp);
bgp = SUBGRP_INST(subgrp);
onlypeer = ((SUBGRP_PCOUNT(subgrp) == 1) ? (SUBGRP_PFIRST(subgrp))->peer
: NULL);
@@ -3292,14 +3288,12 @@ static void bgp_process_main_one(struct bgp *bgp, struct bgp_dest *dest,
if (old_select || new_select) {
bgp_bump_version(dest);
- if (!bgp->t_rmap_def_originate_eval) {
- bgp_lock(bgp);
+ if (!bgp->t_rmap_def_originate_eval)
event_add_timer(
bm->master,
update_group_refresh_default_originate_route_map,
- bgp, RMAP_DEFAULT_ORIGINATE_EVAL_TIMER,
+ bgp, bgp->rmap_def_originate_eval_timer,
&bgp->t_rmap_def_originate_eval);
- }
}
if (old_select)
diff --git a/bgpd/bgp_route.h b/bgpd/bgp_route.h
index fbdd5fae7d..9bd9e48e22 100644
--- a/bgpd/bgp_route.h
+++ b/bgpd/bgp_route.h
@@ -813,7 +813,8 @@ extern void bgp_notify_conditional_adv_scanner(struct update_subgroup *subgrp);
extern void subgroup_process_announce_selected(struct update_subgroup *subgrp,
struct bgp_path_info *selected,
- struct bgp_dest *dest,
+ struct bgp_dest *dest, afi_t afi,
+ safi_t safi,
uint32_t addpath_tx_id);
extern bool subgroup_announce_check(struct bgp_dest *dest,
diff --git a/bgpd/bgp_updgrp.c b/bgpd/bgp_updgrp.c
index 0b1e54916a..a642be935d 100644
--- a/bgpd/bgp_updgrp.c
+++ b/bgpd/bgp_updgrp.c
@@ -2106,7 +2106,6 @@ void update_group_refresh_default_originate_route_map(struct event *thread)
update_group_walk(bgp, update_group_default_originate_route_map_walkcb,
reason);
EVENT_OFF(bgp->t_rmap_def_originate_eval);
- bgp_unlock(bgp);
}
/*
diff --git a/bgpd/bgp_updgrp_adv.c b/bgpd/bgp_updgrp_adv.c
index e4bc0176d1..33617811cf 100644
--- a/bgpd/bgp_updgrp_adv.c
+++ b/bgpd/bgp_updgrp_adv.c
@@ -114,8 +114,9 @@ static void subgrp_withdraw_stale_addpath(struct updwalk_context *ctx,
}
if (!pi) {
- subgroup_process_announce_selected(
- subgrp, NULL, ctx->dest, adj->addpath_tx_id);
+ subgroup_process_announce_selected(subgrp, NULL,
+ ctx->dest, afi, safi,
+ adj->addpath_tx_id);
}
}
}
@@ -161,7 +162,8 @@ static int group_announce_route_walkcb(struct update_group *updgrp, void *arg)
continue;
subgroup_process_announce_selected(
- subgrp, pi, ctx->dest,
+ subgrp, pi, ctx->dest, afi,
+ safi,
bgp_addpath_id_for_peer(
peer, afi, safi,
&pi->tx_addpath));
@@ -173,7 +175,8 @@ static int group_announce_route_walkcb(struct update_group *updgrp, void *arg)
*/
if (ctx->pi)
subgroup_process_announce_selected(
- subgrp, ctx->pi, ctx->dest,
+ subgrp, ctx->pi, ctx->dest, afi,
+ safi,
bgp_addpath_id_for_peer(
peer, afi, safi,
&ctx->pi->tx_addpath));
@@ -182,7 +185,8 @@ static int group_announce_route_walkcb(struct update_group *updgrp, void *arg)
else {
if (ctx->pi) {
subgroup_process_announce_selected(
- subgrp, ctx->pi, ctx->dest,
+ subgrp, ctx->pi, ctx->dest, afi,
+ safi,
bgp_addpath_id_for_peer(
peer, afi, safi,
&ctx->pi->tx_addpath));
@@ -196,7 +200,8 @@ static int group_announce_route_walkcb(struct update_group *updgrp, void *arg)
if (adj->subgroup == subgrp) {
subgroup_process_announce_selected(
subgrp, NULL,
- ctx->dest,
+ ctx->dest, afi,
+ safi,
adj->addpath_tx_id);
}
}
@@ -653,19 +658,15 @@ void subgroup_announce_table(struct update_subgroup *subgrp,
{
struct bgp_dest *dest;
struct bgp_path_info *ri;
- struct attr attr;
struct peer *peer;
afi_t afi;
safi_t safi;
safi_t safi_rib;
bool addpath_capable;
- struct bgp *bgp;
- bool advertise;
peer = SUBGRP_PEER(subgrp);
afi = SUBGRP_AFI(subgrp);
safi = SUBGRP_SAFI(subgrp);
- bgp = SUBGRP_INST(subgrp);
addpath_capable = bgp_addpath_encode_tx(peer, afi, safi);
if (safi == SAFI_LABELED_UNICAST)
@@ -685,55 +686,27 @@ void subgroup_announce_table(struct update_subgroup *subgrp,
SET_FLAG(subgrp->sflags, SUBGRP_STATUS_TABLE_REPARSING);
for (dest = bgp_table_top(table); dest; dest = bgp_route_next(dest)) {
- const struct prefix *dest_p = bgp_dest_get_prefix(dest);
-
- /* Check if the route can be advertised */
- advertise = bgp_check_advertise(bgp, dest);
-
for (ri = bgp_dest_get_bgp_path_info(dest); ri; ri = ri->next) {
if (!bgp_check_selected(ri, peer, addpath_capable, afi,
safi_rib))
continue;
- if (subgroup_announce_check(dest, ri, subgrp, dest_p,
- &attr, NULL)) {
- /* Check if route can be advertised */
- if (advertise) {
- if (!bgp_check_withdrawal(bgp, dest)) {
- struct attr *adv_attr =
- bgp_attr_intern(&attr);
+ /* If default originate is enabled for
+ * the peer, do not send explicit
+ * withdraw. This will prevent deletion
+ * of default route advertised through
+ * default originate
+ */
+ if (CHECK_FLAG(peer->af_flags[afi][safi],
+ PEER_FLAG_DEFAULT_ORIGINATE) &&
+ is_default_prefix(bgp_dest_get_prefix(dest)))
+ break;
- bgp_adj_out_set_subgroup(
- dest, subgrp, adv_attr,
- ri);
- } else
- bgp_adj_out_unset_subgroup(
- dest, subgrp, 1,
- bgp_addpath_id_for_peer(
- peer, afi,
- safi_rib,
- &ri->tx_addpath));
- }
- } else {
- /* If default originate is enabled for
- * the peer, do not send explicit
- * withdraw. This will prevent deletion
- * of default route advertised through
- * default originate
- */
- if (CHECK_FLAG(peer->af_flags[afi][safi],
- PEER_FLAG_DEFAULT_ORIGINATE) &&
- is_default_prefix(
- bgp_dest_get_prefix(dest)))
- break;
-
- bgp_adj_out_unset_subgroup(
- dest, subgrp, 1,
- bgp_addpath_id_for_peer(
- peer, afi, safi_rib,
- &ri->tx_addpath));
- }
+ subgroup_process_announce_selected(
+ subgrp, ri, dest, afi, safi_rib,
+ bgp_addpath_id_for_peer(peer, afi, safi_rib,
+ &ri->tx_addpath));
}
}
UNSET_FLAG(subgrp->sflags, SUBGRP_STATUS_TABLE_REPARSING);
diff --git a/bgpd/bgp_vty.c b/bgpd/bgp_vty.c
index 7ef9db9f0d..6d7b745713 100644
--- a/bgpd/bgp_vty.c
+++ b/bgpd/bgp_vty.c
@@ -7959,6 +7959,26 @@ DEFPY (bgp_condadv_period,
return CMD_SUCCESS;
}
+DEFPY (bgp_def_originate_eval,
+ bgp_def_originate_eval_cmd,
+ "[no$no] bgp default-originate timer (0-3600)$timer",
+ NO_STR
+ BGP_STR
+ "Control default-originate\n"
+ "Set period to rescan BGP table to check if default-originate condition is met\n"
+ "Period between BGP table scans, in seconds; default 5\n")
+{
+ VTY_DECLVAR_CONTEXT(bgp, bgp);
+
+ bgp->rmap_def_originate_eval_timer =
+ no ? RMAP_DEFAULT_ORIGINATE_EVAL_TIMER : timer;
+
+ if (bgp->t_rmap_def_originate_eval)
+ EVENT_OFF(bgp->t_rmap_def_originate_eval);
+
+ return CMD_SUCCESS;
+}
+
DEFPY (neighbor_advertise_map,
neighbor_advertise_map_cmd,
"[no$no] neighbor <A.B.C.D|X:X::X:X|WORD>$neighbor advertise-map RMAP_NAME$advertise_str <exist-map|non-exist-map>$exist RMAP_NAME$condition_str",
@@ -9199,6 +9219,8 @@ DEFPY(af_label_vpn_export_allocation_mode,
bool old_per_nexthop, new_per_nexthop;
afi = vpn_policy_getafi(vty, bgp, false);
+ if (afi == AFI_MAX)
+ return CMD_WARNING_CONFIG_FAILED;
old_per_nexthop = !!CHECK_FLAG(bgp->vpn_policy[afi].flags,
BGP_VPN_POLICY_TOVPN_LABEL_PER_NEXTHOP);
@@ -18620,6 +18642,12 @@ int bgp_config_write(struct vty *vty)
" bgp conditional-advertisement timer %u\n",
bgp->condition_check_period);
+ /* default-originate timer configuration */
+ if (bgp->rmap_def_originate_eval_timer !=
+ RMAP_DEFAULT_ORIGINATE_EVAL_TIMER)
+ vty_out(vty, " bgp default-originate timer %u\n",
+ bgp->rmap_def_originate_eval_timer);
+
/* peer-group */
for (ALL_LIST_ELEMENTS(bgp->group, node, nnode, group)) {
bgp_config_write_peer_global(vty, bgp, group->conf);
@@ -20248,6 +20276,9 @@ void bgp_vty_init(void)
install_element(BGP_VPNV4_NODE, &neighbor_advertise_map_cmd);
install_element(BGP_VPNV6_NODE, &neighbor_advertise_map_cmd);
+ /* bgp default-originate timer */
+ install_element(BGP_NODE, &bgp_def_originate_eval_cmd);
+
/* neighbor maximum-prefix-out commands. */
install_element(BGP_NODE, &neighbor_maximum_prefix_out_cmd);
install_element(BGP_NODE, &no_neighbor_maximum_prefix_out_cmd);
@@ -20663,6 +20694,7 @@ DEFUN (community_list_standard,
argv_find(argv, argc, "AA:NN", &idx);
char *str = argv_concat(argv, argc, idx);
+ assert(str);
int ret = community_list_set(bgp_clist, cl_name_or_number, str, seq,
direct, style);
@@ -20775,6 +20807,7 @@ DEFUN (community_list_expanded_all,
argv_find(argv, argc, "AA:NN", &idx);
char *str = argv_concat(argv, argc, idx);
+ assert(str);
int ret = community_list_set(bgp_clist, cl_name_or_number, str, seq,
direct, style);
@@ -20859,16 +20892,13 @@ static const char *community_list_config_str(struct community_entry *entry)
{
const char *str;
- if (entry->any)
- str = "";
- else {
- if (entry->style == COMMUNITY_LIST_STANDARD)
- str = community_str(entry->u.com, false, false);
- else if (entry->style == LARGE_COMMUNITY_LIST_STANDARD)
- str = lcommunity_str(entry->u.lcom, false, false);
- else
- str = entry->config;
- }
+ if (entry->style == COMMUNITY_LIST_STANDARD)
+ str = community_str(entry->u.com, false, false);
+ else if (entry->style == LARGE_COMMUNITY_LIST_STANDARD)
+ str = lcommunity_str(entry->u.lcom, false, false);
+ else
+ str = entry->config;
+
return str;
}
@@ -20891,13 +20921,8 @@ static void community_list_show(struct vty *vty, struct community_list *list)
: "expanded",
list->name);
}
- if (entry->any)
- vty_out(vty, " %s\n",
- community_direct_str(entry->direct));
- else
- vty_out(vty, " %s %s\n",
- community_direct_str(entry->direct),
- community_list_config_str(entry));
+ vty_out(vty, " %s %s\n", community_direct_str(entry->direct),
+ community_list_config_str(entry));
}
}
@@ -21256,13 +21281,8 @@ static void lcommunity_list_show(struct vty *vty, struct community_list *list)
: "expanded",
list->name);
}
- if (entry->any)
- vty_out(vty, " %s\n",
- community_direct_str(entry->direct));
- else
- vty_out(vty, " %s %s\n",
- community_direct_str(entry->direct),
- community_list_config_str(entry));
+ vty_out(vty, " %s %s\n", community_direct_str(entry->direct),
+ community_list_config_str(entry));
}
}
@@ -21558,13 +21578,8 @@ static void extcommunity_list_show(struct vty *vty, struct community_list *list)
: "expanded",
list->name);
}
- if (entry->any)
- vty_out(vty, " %s\n",
- community_direct_str(entry->direct));
- else
- vty_out(vty, " %s %s\n",
- community_direct_str(entry->direct),
- community_list_config_str(entry));
+ vty_out(vty, " %s %s\n", community_direct_str(entry->direct),
+ community_list_config_str(entry));
}
}
diff --git a/bgpd/bgpd.c b/bgpd/bgpd.c
index 99c6a46e7c..ba2985d304 100644
--- a/bgpd/bgpd.c
+++ b/bgpd/bgpd.c
@@ -3330,6 +3330,7 @@ static struct bgp *bgp_create(as_t *as, const char *name,
bgp_addpath_init_bgp_data(&bgp->tx_addpath);
bgp->fast_convergence = false;
bgp->llgr_stale_time = BGP_DEFAULT_LLGR_STALE_TIME;
+ bgp->rmap_def_originate_eval_timer = RMAP_DEFAULT_ORIGINATE_EVAL_TIMER;
#ifdef ENABLE_BGP_VNC
if (inst_type != BGP_INSTANCE_TYPE_VRF) {
@@ -3703,11 +3704,8 @@ void bgp_instance_down(struct bgp *bgp)
struct listnode *next;
/* Stop timers. */
- if (bgp->t_rmap_def_originate_eval) {
+ if (bgp->t_rmap_def_originate_eval)
EVENT_OFF(bgp->t_rmap_def_originate_eval);
- bgp_unlock(bgp); /* TODO - This timer is started with a lock -
- why? */
- }
/* Bring down peers, so corresponding routes are purged. */
for (ALL_LIST_ELEMENTS(bgp->peer, node, next, peer)) {
@@ -3810,11 +3808,8 @@ int bgp_delete(struct bgp *bgp)
vpn_leak_zebra_vrf_label_withdraw(bgp, AFI_IP6);
/* Stop timers. */
- if (bgp->t_rmap_def_originate_eval) {
+ if (bgp->t_rmap_def_originate_eval)
EVENT_OFF(bgp->t_rmap_def_originate_eval);
- bgp_unlock(bgp); /* TODO - This timer is started with a lock -
- why? */
- }
/* Inform peers we're going down. */
for (ALL_LIST_ELEMENTS(bgp->peer, node, next, peer))
diff --git a/bgpd/bgpd.h b/bgpd/bgpd.h
index 9cb1d51088..ecd122fee2 100644
--- a/bgpd/bgpd.h
+++ b/bgpd/bgpd.h
@@ -601,6 +601,7 @@ struct bgp {
/* timer to re-evaluate neighbor default-originate route-maps */
struct event *t_rmap_def_originate_eval;
+ uint16_t rmap_def_originate_eval_timer;
#define RMAP_DEFAULT_ORIGINATE_EVAL_TIMER 5
/* BGP distance configuration. */
diff --git a/configure.ac b/configure.ac
index 0120c517c6..47ee44a7df 100644
--- a/configure.ac
+++ b/configure.ac
@@ -7,7 +7,7 @@
##
AC_PREREQ([2.69])
-AC_INIT([frr], [9.0-dev], [https://github.com/frrouting/frr/issues])
+AC_INIT([frr], [9.1-dev], [https://github.com/frrouting/frr/issues])
PACKAGE_URL="https://frrouting.org/"
AC_SUBST([PACKAGE_URL])
PACKAGE_FULLNAME="FRRouting"
diff --git a/debian/changelog b/debian/changelog
index 008a97c7d5..5c0429d69d 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,14 +1,14 @@
-frr (9.0~dev-1) UNRELEASED; urgency=medium
+frr (9.1~dev-1) UNRELEASED; urgency=medium
- * FRR Dev 9.0
+ * FRR Dev 9.1
- -- Donatas Abraitis <donatas@opensourcerouting.org> Tue, 07 Feb 2023 16:00:00 +0500
+ -- Jafar Al-Gharaibeh <jafar@atcorp.com> Tue, 06 Jun 2023 12:00:00 -0600
-frr (8.5-1) UNRELEASED; urgency=medium
+frr (8.5-0) unstable; urgency=medium
* New upstream release FRR 8.5
- -- Donatas Abraitis <donatas@opensourcerouting.org> Tue, 07 Feb 2023 16:00:00 +0500
+ -- Jafar Al-Gharaibeh <jafar@atcorp.com> Fri, 10 Mar 2023 02:00:00 -0600
frr (8.4.2-1) unstable; urgency=medium
diff --git a/doc/developer/topotests.rst b/doc/developer/topotests.rst
index f44cf9df98..773691e698 100644
--- a/doc/developer/topotests.rst
+++ b/doc/developer/topotests.rst
@@ -196,13 +196,15 @@ Analyze Test Results (``analyze.py``)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
By default router and execution logs are saved in ``/tmp/topotests`` and an XML
-results file is saved in ``/tmp/topotests.xml``. An analysis tool ``analyze.py``
-is provided to archive and analyze these results after the run completes.
+results file is saved in ``/tmp/topotests/topotests.xml``. An analysis tool
+``analyze.py`` is provided to archive and analyze these results after the run
+completes.
After the test run completes one should pick an archive directory to store the
results in and pass this value to ``analyze.py``. On first execution the results
-are copied to that directory from ``/tmp``, and subsequent runs use that
-directory for analyzing the results. Below is an example of this which also
+are moved to that directory from ``/tmp/topotests``. Subsequent runs of
+``analyze.py`` with the same args will use that directories contents for instead
+of copying any new results from ``/tmp``. Below is an example of this which also
shows the default behavior which is to display all failed and errored tests in
the run.
@@ -214,7 +216,7 @@ the run.
bgp_gr_functionality_topo2/test_bgp_gr_functionality_topo2.py::test_BGP_GR_10_p2
bgp_multiview_topo1/test_bgp_multiview_topo1.py::test_bgp_routingTable
-Here we see that 4 tests have failed. We an dig deeper by displaying the
+Here we see that 4 tests have failed. We can dig deeper by displaying the
captured logs and errors. First let's redisplay the results enumerated by adding
the ``-E`` flag
@@ -249,9 +251,11 @@ the number of the test we are interested in along with ``--errmsg`` option.
assert False
-Now to look at the full text of the error for a failed test we use ``-T N``
-where N is the number of the test we are interested in along with ``--errtext``
-option.
+Now to look at the error text for a failed test we can use ``-T RANGES`` where
+``RANGES`` can be a number (e.g., ``5``), a range (e.g., ``0-10``), or a comma
+separated list numbers and ranges (e.g., ``5,10-20,30``) of the test cases we
+are interested in along with ``--errtext`` option. In the example below we'll
+select the first failed test case.
.. code:: shell
@@ -277,8 +281,8 @@ option.
[...]
To look at the full capture for a test including the stdout and stderr which
-includes full debug logs, just use the ``-T N`` option without the ``--errmsg``
-or ``--errtext`` options.
+includes full debug logs, use ``--full`` option, or specify a ``-T RANGES`` without
+specifying ``--errmsg`` or ``--errtext``.
.. code:: shell
@@ -298,6 +302,46 @@ or ``--errtext`` options.
--------------------------------- Captured Out ---------------------------------
system-err: --------------------------------- Captured Err ---------------------------------
+Filtered results
+""""""""""""""""
+
+There are 4 types of test results, [e]rrored, [f]ailed, [p]assed, and
+[s]kipped. One can select the set of results to show with the ``-S`` or
+``--select`` flags along with the letters for each type (i.e., ``-S efps``
+would select all results). By default ``analyze.py`` will use ``-S ef`` (i.e.,
+[e]rrors and [f]ailures) unless the ``--search`` filter is given in which case
+the default is to search all results (i.e., ``-S efps``).
+
+One can find all results which contain a ``REGEXP``. To filter results using a
+regular expression use the ``--search REGEXP`` option. In this case, by default,
+all result types will be searched for a match against the given ``REGEXP``. If a
+test result output contains a match it is selected into the set of results to show.
+
+An example of using ``--search`` would be to search all tests results for some
+log message, perhaps a warning or error.
+
+Using XML Results File from CI
+""""""""""""""""""""""""""""""
+
+``analyze.py`` actually only needs the ``topotests.xml`` file to run. This is
+very useful for analyzing a CI run failure where one only need download the
+``topotests.xml`` artifact from the run and then pass that to ``analyze.py``
+with the ``-r`` or ``--results`` option.
+
+For local runs if you wish to simply copy the ``topotests.xml`` file (leaving
+the log files where they are), you can pass the ``-a`` (or ``--save-xml``)
+instead of the ``-A`` (or ``-save``) options.
+
+Analyze Results from a Container Run
+""""""""""""""""""""""""""""""""""""
+
+``analyze.py`` can also be used with ``docker`` or ``podman`` containers.
+Everything works exactly as with a host run except that you specify the name of
+the container, or the container-id, using the `-C` or ``--container`` option.
+``analyze.py`` will then use the results inside that containers
+``/tmp/topotests`` directory. It will extract and save those results when you
+pass the ``-A`` or ``-a`` options just as withe host results.
+
Execute single test
^^^^^^^^^^^^^^^^^^^
diff --git a/doc/developer/workflow.rst b/doc/developer/workflow.rst
index 28cf5d0ab1..65befaccba 100644
--- a/doc/developer/workflow.rst
+++ b/doc/developer/workflow.rst
@@ -166,15 +166,15 @@ as early as possible, i.e. the first 2-week window.
For reference, the expected release schedule according to the above is:
-+---------+------------+------------+------------+------------+------------+
-| Release | 2023-03-07 | 2023-07-04 | 2023-10-31 | 2024-02-27 | 2024-06-25 |
-+---------+------------+------------+------------+------------+------------+
-| RC | 2023-02-21 | 2023-06-20 | 2023-10-17 | 2024-02-13 | 2024-06-11 |
-+---------+------------+------------+------------+------------+------------+
-| dev/X.Y | 2023-02-07 | 2023-06-06 | 2023-10-03 | 2024-01-30 | 2024-05-28 |
-+---------+------------+------------+------------+------------+------------+
-| freeze | 2023-01-24 | 2023-05-23 | 2023-09-19 | 2024-01-16 | 2024-05-14 |
-+---------+------------+------------+------------+------------+------------+
++---------+------------+------------+------------+
+| Release | 2023-07-04 | 2023-10-31 | 2024-02-27 |
++---------+------------+------------+------------+
+| RC | 2023-06-20 | 2023-10-17 | 2024-02-13 |
++---------+------------+------------+------------+
+| dev/X.Y | 2023-06-06 | 2023-10-03 | 2024-01-30 |
++---------+------------+------------+------------+
+| freeze | 2023-05-23 | 2023-09-19 | 2024-01-16 |
++---------+------------+------------+------------+
Here is the hint on how to get the dates easily:
diff --git a/doc/user/bgp.rst b/doc/user/bgp.rst
index f045ca239e..a2585f3a57 100644
--- a/doc/user/bgp.rst
+++ b/doc/user/bgp.rst
@@ -455,7 +455,7 @@ Administrative Distance Metrics
.. _bgp-requires-policy:
Require policy on EBGP
--------------------------------
+----------------------
.. clicmd:: bgp ebgp-requires-policy
@@ -1729,6 +1729,12 @@ Configuring Peers
and will not be displayed as part of a `show run`. The no form
of the command turns off this ability.
+.. clicmd:: bgp default-originate timer (0-3600)
+
+ Set the period to rerun the default-originate route-map scanner process. The
+ default is 5 seconds. With a full routing table, it might be useful to increase
+ this setting to avoid scanning the whole BGP table aggressively.
+
.. clicmd:: bgp default ipv4-unicast
This command allows the user to specify that the IPv4 Unicast address
@@ -3882,6 +3888,12 @@ structure is extended with :clicmd:`show bgp [afi] [safi]`.
Total number of neighbors 1
exit1#
+If PfxRcd and/or PfxSnt is shown as ``(Policy)``, that means that the EBGP
+default policy is turned on, but you don't have any filters applied for
+incoming/outgoing directions.
+
+.. seealso:: :ref:`bgp-requires-policy`
+
.. clicmd:: show bgp [afi] [safi] [all] [wide|json]
.. clicmd:: show bgp vrfs [<VRFNAME$vrf_name>] [json]
diff --git a/eigrpd/eigrp_update.c b/eigrpd/eigrp_update.c
index 2237a611e8..a056267bf7 100644
--- a/eigrpd/eigrp_update.c
+++ b/eigrpd/eigrp_update.c
@@ -842,9 +842,6 @@ static void eigrp_update_send_GR_part(struct eigrp_neighbor *nbr)
eigrp_fsm_event(&fsm_msg);
}
- /* NULL the pointer */
- dest_addr = NULL;
-
/* delete processed prefix from list */
listnode_delete(prefixes, pe);
diff --git a/isisd/isis_spf.c b/isisd/isis_spf.c
index de467c8262..e114467e07 100644
--- a/isisd/isis_spf.c
+++ b/isisd/isis_spf.c
@@ -1843,6 +1843,9 @@ void isis_run_spf(struct isis_spftree *spftree)
struct timeval time_end;
struct isis_mt_router_info *mt_router_info;
uint16_t mtid = 0;
+#ifndef FABRICD
+ bool flex_algo_enabled;
+#endif /* ifndef FABRICD */
/* Get time that can't roll backwards. */
monotime(&time_start);
@@ -1885,16 +1888,27 @@ void isis_run_spf(struct isis_spftree *spftree)
* not supported by the node, it MUST stop participating in such
* Flexible-Algorithm.
*/
- if (flex_algo_id_valid(spftree->algorithm) &&
- !flex_algo_get_state(spftree->area->flex_algos,
- spftree->algorithm)) {
- if (!CHECK_FLAG(spftree->flags, F_SPFTREE_DISABLED)) {
- isis_spftree_clear(spftree);
- SET_FLAG(spftree->flags, F_SPFTREE_DISABLED);
+ if (flex_algo_id_valid(spftree->algorithm)) {
+ flex_algo_enabled = isis_flex_algo_elected_supported(
+ spftree->algorithm, spftree->area);
+ if (flex_algo_enabled !=
+ flex_algo_get_state(spftree->area->flex_algos,
+ spftree->algorithm)) {
+ /* actual state is inconsistent with local LSP */
lsp_regenerate_schedule(spftree->area,
spftree->area->is_type, 0);
+ goto out;
+ }
+ if (!flex_algo_enabled) {
+ if (!CHECK_FLAG(spftree->flags, F_SPFTREE_DISABLED)) {
+ isis_spftree_clear(spftree);
+ SET_FLAG(spftree->flags, F_SPFTREE_DISABLED);
+ lsp_regenerate_schedule(spftree->area,
+ spftree->area->is_type,
+ 0);
+ }
+ goto out;
}
- goto out;
}
#endif /* ifndef FABRICD */
diff --git a/ldpd/hello.c b/ldpd/hello.c
index 83c0b2f8ca..0b07f24b45 100644
--- a/ldpd/hello.c
+++ b/ldpd/hello.c
@@ -41,8 +41,8 @@ send_hello(enum hello_type type, struct iface_af *ia, struct tnbr *tnbr)
/* multicast destination address */
switch (af) {
case AF_INET:
- if (!(leconf->ipv4.flags & F_LDPD_AF_NO_GTSM))
- flags |= F_HELLO_GTSM;
+ if (!CHECK_FLAG(leconf->ipv4.flags, F_LDPD_AF_NO_GTSM))
+ SET_FLAG(flags, F_HELLO_GTSM);
dst.v4 = global.mcast_addr_v4;
break;
case AF_INET6:
@@ -56,9 +56,11 @@ send_hello(enum hello_type type, struct iface_af *ia, struct tnbr *tnbr)
af = tnbr->af;
holdtime = tnbr_get_hello_holdtime(tnbr);
flags = F_HELLO_TARGETED;
- if ((tnbr->flags & F_TNBR_CONFIGURED) || tnbr->pw_count
- || tnbr->rlfa_count)
+ if (CHECK_FLAG(tnbr->flags, F_TNBR_CONFIGURED) ||
+ tnbr->pw_count ||
+ tnbr->rlfa_count)
flags |= F_HELLO_REQ_TARG;
+
fd = (ldp_af_global_get(&global, af))->ldp_edisc_socket;
/* unicast destination address */
@@ -88,10 +90,10 @@ send_hello(enum hello_type type, struct iface_af *ia, struct tnbr *tnbr)
if ((buf = ibuf_open(size)) == NULL)
fatal(__func__);
- err |= gen_ldp_hdr(buf, size);
+ SET_FLAG(err, gen_ldp_hdr(buf, size));
size -= LDP_HDR_SIZE;
- err |= gen_msg_hdr(buf, MSG_TYPE_HELLO, size);
- err |= gen_hello_prms_tlv(buf, holdtime, flags);
+ SET_FLAG(err, gen_msg_hdr(buf, MSG_TYPE_HELLO, size));
+ SET_FLAG(err, gen_hello_prms_tlv(buf, holdtime, flags));
/*
* RFC 7552 - Section 6.1:
@@ -101,19 +103,19 @@ send_hello(enum hello_type type, struct iface_af *ia, struct tnbr *tnbr)
*/
switch (af) {
case AF_INET:
- err |= gen_opt4_hello_prms_tlv(buf, TLV_TYPE_IPV4TRANSADDR,
- leconf->ipv4.trans_addr.v4.s_addr);
+ SET_FLAG(err, gen_opt4_hello_prms_tlv(buf, TLV_TYPE_IPV4TRANSADDR,
+ leconf->ipv4.trans_addr.v4.s_addr));
break;
case AF_INET6:
- err |= gen_opt16_hello_prms_tlv(buf, TLV_TYPE_IPV6TRANSADDR,
- leconf->ipv6.trans_addr.v6.s6_addr);
+ SET_FLAG(err, gen_opt16_hello_prms_tlv(buf, TLV_TYPE_IPV6TRANSADDR,
+ leconf->ipv6.trans_addr.v6.s6_addr));
break;
default:
fatalx("send_hello: unknown af");
}
- err |= gen_opt4_hello_prms_tlv(buf, TLV_TYPE_CONFIG,
- htonl(global.conf_seqnum));
+ SET_FLAG(err, gen_opt4_hello_prms_tlv(buf, TLV_TYPE_CONFIG,
+ htonl(global.conf_seqnum)));
/*
* RFC 7552 - Section 6.1.1:
@@ -121,7 +123,7 @@ send_hello(enum hello_type type, struct iface_af *ia, struct tnbr *tnbr)
* MUST include the Dual-Stack capability TLV in all of its LDP Hellos".
*/
if (ldp_is_dual_stack(leconf))
- err |= gen_ds_hello_prms_tlv(buf, leconf->trans_pref);
+ SET_FLAG(err, gen_ds_hello_prms_tlv(buf, leconf->trans_pref));
if (err) {
ibuf_free(buf);
@@ -169,8 +171,7 @@ recv_hello(struct in_addr lsr_id, struct ldp_msg *msg, int af,
r = tlv_decode_hello_prms(buf, len, &holdtime, &flags);
if (r == -1) {
- log_debug("%s: lsr-id %pI4: failed to decode params", __func__,
- &lsr_id);
+ log_debug("%s: lsr-id %pI4: failed to decode params", __func__, &lsr_id);
return;
}
/* safety checks */
@@ -179,14 +180,12 @@ recv_hello(struct in_addr lsr_id, struct ldp_msg *msg, int af,
__func__, &lsr_id, holdtime);
return;
}
- if (multicast && (flags & F_HELLO_TARGETED)) {
- log_debug("%s: lsr-id %pI4: multicast targeted hello", __func__,
- &lsr_id);
+ if (multicast && CHECK_FLAG(flags, F_HELLO_TARGETED)) {
+ log_debug("%s: lsr-id %pI4: multicast targeted hello", __func__, &lsr_id);
return;
}
- if (!multicast && !((flags & F_HELLO_TARGETED))) {
- log_debug("%s: lsr-id %pI4: unicast link hello", __func__,
- &lsr_id);
+ if (!multicast && !CHECK_FLAG(flags, F_HELLO_TARGETED)) {
+ log_debug("%s: lsr-id %pI4: unicast link hello", __func__, &lsr_id);
return;
}
buf += r;
@@ -204,10 +203,10 @@ recv_hello(struct in_addr lsr_id, struct ldp_msg *msg, int af,
__func__, &lsr_id);
return;
}
- ds_tlv = (tlvs_rcvd & F_HELLO_TLV_RCVD_DS) ? 1 : 0;
+ ds_tlv = CHECK_FLAG(tlvs_rcvd, F_HELLO_TLV_RCVD_DS) ? 1 : 0;
/* implicit transport address */
- if (!(tlvs_rcvd & F_HELLO_TLV_RCVD_ADDR))
+ if (!CHECK_FLAG(tlvs_rcvd, F_HELLO_TLV_RCVD_ADDR))
trans_addr = *src;
if (bad_addr(af, &trans_addr)) {
log_debug("%s: lsr-id %pI4: invalid transport address %s",
@@ -223,7 +222,7 @@ recv_hello(struct in_addr lsr_id, struct ldp_msg *msg, int af,
* (i.e., MUST discard the targeted Hello if it failed the
* check)".
*/
- if (flags & F_HELLO_TARGETED) {
+ if (CHECK_FLAG(flags, F_HELLO_TARGETED)) {
log_debug("%s: lsr-id %pI4: invalid targeted hello transport address %s", __func__, &lsr_id,
log_addr(af, &trans_addr));
return;
@@ -232,7 +231,7 @@ recv_hello(struct in_addr lsr_id, struct ldp_msg *msg, int af,
}
memset(&source, 0, sizeof(source));
- if (flags & F_HELLO_TARGETED) {
+ if (CHECK_FLAG(flags, F_HELLO_TARGETED)) {
/*
* RFC 7552 - Section 5.2:
* "The link-local IPv6 addresses MUST NOT be used as the
@@ -247,26 +246,27 @@ recv_hello(struct in_addr lsr_id, struct ldp_msg *msg, int af,
tnbr = tnbr_find(leconf, af, src);
/* remove the dynamic tnbr if the 'R' bit was cleared */
- if (tnbr && (tnbr->flags & F_TNBR_DYNAMIC) &&
- !((flags & F_HELLO_REQ_TARG))) {
- tnbr->flags &= ~F_TNBR_DYNAMIC;
+ if (tnbr &&
+ CHECK_FLAG(tnbr->flags, F_TNBR_DYNAMIC) &&
+ !CHECK_FLAG(flags, F_HELLO_REQ_TARG)) {
+ UNSET_FLAG(tnbr->flags, F_TNBR_DYNAMIC);
tnbr = tnbr_check(leconf, tnbr);
}
if (!tnbr) {
struct ldpd_af_conf *af_conf;
- if (!(flags & F_HELLO_REQ_TARG))
+ if (!CHECK_FLAG(flags, F_HELLO_REQ_TARG))
return;
af_conf = ldp_af_conf_get(leconf, af);
- if (!(af_conf->flags & F_LDPD_AF_THELLO_ACCEPT))
+ if (!CHECK_FLAG(af_conf->flags, F_LDPD_AF_THELLO_ACCEPT))
return;
if (ldpe_acl_check(af_conf->acl_thello_accept_from, af,
src, (af == AF_INET) ? 32 : 128) != FILTER_PERMIT)
return;
tnbr = tnbr_new(af, src);
- tnbr->flags |= F_TNBR_DYNAMIC;
+ SET_FLAG(tnbr->flags, F_TNBR_DYNAMIC);
tnbr_update(tnbr);
RB_INSERT(tnbr_head, &leconf->tnbr_tree, tnbr);
}
@@ -308,8 +308,7 @@ recv_hello(struct in_addr lsr_id, struct ldp_msg *msg, int af,
*/
log_debug("%s: lsr-id %pI4: remote transport preference does not match the local preference", __func__, &lsr_id);
if (nbr)
- session_shutdown(nbr, S_TRANS_MISMTCH, msg->id,
- msg->type);
+ session_shutdown(nbr, S_TRANS_MISMTCH, msg->id, msg->type);
if (adj)
adj_del(adj, S_SHUTDOWN);
return;
@@ -323,15 +322,13 @@ recv_hello(struct in_addr lsr_id, struct ldp_msg *msg, int af,
switch (af) {
case AF_INET:
if (nbr_adj_count(nbr, AF_INET6) > 0) {
- session_shutdown(nbr, S_DS_NONCMPLNCE,
- msg->id, msg->type);
+ session_shutdown(nbr, S_DS_NONCMPLNCE, msg->id, msg->type);
return;
}
break;
case AF_INET6:
if (nbr_adj_count(nbr, AF_INET) > 0) {
- session_shutdown(nbr, S_DS_NONCMPLNCE,
- msg->id, msg->type);
+ session_shutdown(nbr, S_DS_NONCMPLNCE, msg->id, msg->type);
return;
}
break;
@@ -384,16 +381,15 @@ recv_hello(struct in_addr lsr_id, struct ldp_msg *msg, int af,
/* dynamic LDPv4 GTSM negotiation as per RFC 6720 */
if (nbr) {
- if (flags & F_HELLO_GTSM)
- nbr->flags |= F_NBR_GTSM_NEGOTIATED;
+ if (CHECK_FLAG(flags, F_HELLO_GTSM))
+ SET_FLAG(nbr->flags, F_NBR_GTSM_NEGOTIATED);
else
- nbr->flags &= ~F_NBR_GTSM_NEGOTIATED;
+ UNSET_FLAG(nbr->flags, F_NBR_GTSM_NEGOTIATED);
}
/* update neighbor's configuration sequence number */
if (nbr && (tlvs_rcvd & F_HELLO_TLV_RCVD_CONF)) {
- if (conf_seqnum > nbr->conf_seqnum &&
- nbr_pending_idtimer(nbr))
+ if (conf_seqnum > nbr->conf_seqnum && nbr_pending_idtimer(nbr))
nbr_stop_idtimer(nbr);
nbr->conf_seqnum = conf_seqnum;
}
@@ -465,7 +461,7 @@ gen_opt16_hello_prms_tlv(struct ibuf *buf, uint16_t type, uint8_t *value)
static int
gen_ds_hello_prms_tlv(struct ibuf *buf, uint32_t value)
{
- if (leconf->flags & F_LDPD_DS_CISCO_INTEROP)
+ if (CHECK_FLAG(leconf->flags, F_LDPD_DS_CISCO_INTEROP))
value = htonl(value);
else
value = htonl(value << 28);
@@ -533,26 +529,26 @@ tlv_decode_opt_hello_prms(char *buf, uint16_t len, int *tlvs_rcvd, int af,
return (-1);
if (af != AF_INET)
return (-1);
- if (*tlvs_rcvd & F_HELLO_TLV_RCVD_ADDR)
+ if (CHECK_FLAG(*tlvs_rcvd, F_HELLO_TLV_RCVD_ADDR))
break;
memcpy(&addr->v4, buf, sizeof(addr->v4));
- *tlvs_rcvd |= F_HELLO_TLV_RCVD_ADDR;
+ SET_FLAG(*tlvs_rcvd, F_HELLO_TLV_RCVD_ADDR);
break;
case TLV_TYPE_IPV6TRANSADDR:
if (tlv_len != sizeof(addr->v6))
return (-1);
if (af != AF_INET6)
return (-1);
- if (*tlvs_rcvd & F_HELLO_TLV_RCVD_ADDR)
+ if (CHECK_FLAG(*tlvs_rcvd, F_HELLO_TLV_RCVD_ADDR))
break;
memcpy(&addr->v6, buf, sizeof(addr->v6));
- *tlvs_rcvd |= F_HELLO_TLV_RCVD_ADDR;
+ SET_FLAG(*tlvs_rcvd, F_HELLO_TLV_RCVD_ADDR);
break;
case TLV_TYPE_CONFIG:
if (tlv_len != sizeof(uint32_t))
return (-1);
memcpy(conf_number, buf, sizeof(uint32_t));
- *tlvs_rcvd |= F_HELLO_TLV_RCVD_CONF;
+ SET_FLAG(*tlvs_rcvd, F_HELLO_TLV_RCVD_CONF);
break;
case TLV_TYPE_DUALSTACK:
if (tlv_len != sizeof(uint32_t))
@@ -566,19 +562,18 @@ tlv_decode_opt_hello_prms(char *buf, uint16_t len, int *tlvs_rcvd, int af,
if (!ldp_is_dual_stack(leconf))
break;
/* Shame on you, Cisco! */
- if (leconf->flags & F_LDPD_DS_CISCO_INTEROP) {
- memcpy(trans_pref, buf + sizeof(uint16_t),
- sizeof(uint16_t));
+ if (CHECK_FLAG(leconf->flags, F_LDPD_DS_CISCO_INTEROP)) {
+ memcpy(trans_pref, buf + sizeof(uint16_t), sizeof(uint16_t));
*trans_pref = ntohs(*trans_pref);
} else {
memcpy(trans_pref, buf , sizeof(uint16_t));
*trans_pref = ntohs(*trans_pref) >> 12;
}
- *tlvs_rcvd |= F_HELLO_TLV_RCVD_DS;
+ SET_FLAG(*tlvs_rcvd, F_HELLO_TLV_RCVD_DS);
break;
default:
/* if unknown flag set, ignore TLV */
- if (!(ntohs(tlv.type) & UNKNOWN_FLAG))
+ if (!CHECK_FLAG(ntohs(tlv.type), UNKNOWN_FLAG))
return (-1);
break;
}
diff --git a/ldpd/init.c b/ldpd/init.c
index 15d653b747..f0cb98e5c0 100644
--- a/ldpd/init.c
+++ b/ldpd/init.c
@@ -31,13 +31,13 @@ send_init(struct nbr *nbr)
if ((buf = ibuf_open(size)) == NULL)
fatal(__func__);
- err |= gen_ldp_hdr(buf, size);
+ SET_FLAG(err, gen_ldp_hdr(buf, size));
size -= LDP_HDR_SIZE;
- err |= gen_msg_hdr(buf, MSG_TYPE_INIT, size);
- err |= gen_init_prms_tlv(buf, nbr);
- err |= gen_cap_dynamic_tlv(buf);
- err |= gen_cap_twcard_tlv(buf, 1);
- err |= gen_cap_unotif_tlv(buf, 1);
+ SET_FLAG(err, gen_msg_hdr(buf, MSG_TYPE_INIT, size));
+ SET_FLAG(err, gen_init_prms_tlv(buf, nbr));
+ SET_FLAG(err, gen_cap_dynamic_tlv(buf));
+ SET_FLAG(err, gen_cap_twcard_tlv(buf, 1));
+ SET_FLAG(err, gen_cap_unotif_tlv(buf, 1));
if (err) {
ibuf_free(buf);
return;
@@ -121,62 +121,56 @@ recv_init(struct nbr *nbr, char *buf, uint16_t len)
return (-1);
case TLV_TYPE_DYNAMIC_CAP:
if (tlv_len != CAP_TLV_DYNAMIC_LEN) {
- session_shutdown(nbr, S_BAD_TLV_LEN, msg.id,
- msg.type);
+ session_shutdown(nbr, S_BAD_TLV_LEN, msg.id, msg.type);
return (-1);
}
- if (caps_rcvd & F_CAP_TLV_RCVD_DYNAMIC) {
- session_shutdown(nbr, S_BAD_TLV_VAL, msg.id,
- msg.type);
+ if (CHECK_FLAG(caps_rcvd, F_CAP_TLV_RCVD_DYNAMIC)) {
+ session_shutdown(nbr, S_BAD_TLV_VAL, msg.id, msg.type);
return (-1);
}
- caps_rcvd |= F_CAP_TLV_RCVD_DYNAMIC;
+ SET_FLAG(caps_rcvd, F_CAP_TLV_RCVD_DYNAMIC);
- nbr->flags |= F_NBR_CAP_DYNAMIC;
+ SET_FLAG(nbr->flags, F_NBR_CAP_DYNAMIC);
log_debug("%s: lsr-id %pI4 announced the Dynamic Capability Announcement capability", __func__,
&nbr->id);
break;
case TLV_TYPE_TWCARD_CAP:
if (tlv_len != CAP_TLV_TWCARD_LEN) {
- session_shutdown(nbr, S_BAD_TLV_LEN, msg.id,
- msg.type);
+ session_shutdown(nbr, S_BAD_TLV_LEN, msg.id, msg.type);
return (-1);
}
- if (caps_rcvd & F_CAP_TLV_RCVD_TWCARD) {
- session_shutdown(nbr, S_BAD_TLV_VAL, msg.id,
- msg.type);
+ if (CHECK_FLAG(caps_rcvd, F_CAP_TLV_RCVD_TWCARD)) {
+ session_shutdown(nbr, S_BAD_TLV_VAL, msg.id, msg.type);
return (-1);
}
- caps_rcvd |= F_CAP_TLV_RCVD_TWCARD;
+ SET_FLAG(caps_rcvd, F_CAP_TLV_RCVD_TWCARD);
- nbr->flags |= F_NBR_CAP_TWCARD;
+ SET_FLAG(nbr->flags, F_NBR_CAP_TWCARD);
log_debug("%s: lsr-id %pI4 announced the Typed Wildcard FEC capability", __func__, &nbr->id);
break;
case TLV_TYPE_UNOTIF_CAP:
if (tlv_len != CAP_TLV_UNOTIF_LEN) {
- session_shutdown(nbr, S_BAD_TLV_LEN, msg.id,
- msg.type);
+ session_shutdown(nbr, S_BAD_TLV_LEN, msg.id, msg.type);
return (-1);
}
- if (caps_rcvd & F_CAP_TLV_RCVD_UNOTIF) {
- session_shutdown(nbr, S_BAD_TLV_VAL, msg.id,
- msg.type);
+ if (CHECK_FLAG(caps_rcvd, F_CAP_TLV_RCVD_UNOTIF)) {
+ session_shutdown(nbr, S_BAD_TLV_VAL, msg.id, msg.type);
return (-1);
}
- caps_rcvd |= F_CAP_TLV_RCVD_UNOTIF;
+ SET_FLAG(caps_rcvd, F_CAP_TLV_RCVD_UNOTIF);
- nbr->flags |= F_NBR_CAP_UNOTIF;
+ SET_FLAG(nbr->flags, F_NBR_CAP_UNOTIF);
log_debug("%s: lsr-id %pI4 announced the Unrecognized Notification capability", __func__,
&nbr->id);
break;
default:
- if (!(ntohs(tlv.type) & UNKNOWN_FLAG))
+ if (!CHECK_FLAG(ntohs(tlv.type), UNKNOWN_FLAG))
send_notification_rtlvs(nbr, S_UNSSUPORTDCAP,
msg.id, msg.type, tlv_type, tlv_len, buf);
/* ignore unknown tlv */
@@ -217,16 +211,16 @@ send_capability(struct nbr *nbr, uint16_t capability, int enable)
if ((buf = ibuf_open(size)) == NULL)
fatal(__func__);
- err |= gen_ldp_hdr(buf, size);
+ SET_FLAG(err, gen_ldp_hdr(buf, size));
size -= LDP_HDR_SIZE;
- err |= gen_msg_hdr(buf, MSG_TYPE_CAPABILITY, size);
+ SET_FLAG(err, gen_msg_hdr(buf, MSG_TYPE_CAPABILITY, size));
switch (capability) {
case TLV_TYPE_TWCARD_CAP:
- err |= gen_cap_twcard_tlv(buf, enable);
+ SET_FLAG(err, gen_cap_twcard_tlv(buf, enable));
break;
case TLV_TYPE_UNOTIF_CAP:
- err |= gen_cap_unotif_tlv(buf, enable);
+ SET_FLAG(err, gen_cap_unotif_tlv(buf, enable));
break;
case TLV_TYPE_DYNAMIC_CAP:
/*
@@ -288,52 +282,47 @@ recv_capability(struct nbr *nbr, char *buf, uint16_t len)
switch (tlv_type) {
case TLV_TYPE_TWCARD_CAP:
if (tlv_len != CAP_TLV_TWCARD_LEN) {
- session_shutdown(nbr, S_BAD_TLV_LEN, msg.id,
- msg.type);
+ session_shutdown(nbr, S_BAD_TLV_LEN, msg.id, msg.type);
return (-1);
}
- if (caps_rcvd & F_CAP_TLV_RCVD_TWCARD) {
- session_shutdown(nbr, S_BAD_TLV_VAL, msg.id,
- msg.type);
+ if (CHECK_FLAG(caps_rcvd, F_CAP_TLV_RCVD_TWCARD)) {
+ session_shutdown(nbr, S_BAD_TLV_VAL, msg.id, msg.type);
return (-1);
}
- caps_rcvd |= F_CAP_TLV_RCVD_TWCARD;
+ SET_FLAG(caps_rcvd, F_CAP_TLV_RCVD_TWCARD);
memcpy(&reserved, buf, sizeof(reserved));
enable = reserved & STATE_BIT;
if (enable)
- nbr->flags |= F_NBR_CAP_TWCARD;
+ SET_FLAG(nbr->flags, F_NBR_CAP_TWCARD);
else
- nbr->flags &= ~F_NBR_CAP_TWCARD;
+ UNSET_FLAG(nbr->flags, F_NBR_CAP_TWCARD);
log_debug("%s: lsr-id %pI4 %s the Typed Wildcard FEC capability", __func__, &nbr->id,
(enable) ? "announced" : "withdrew");
break;
case TLV_TYPE_UNOTIF_CAP:
if (tlv_len != CAP_TLV_UNOTIF_LEN) {
- session_shutdown(nbr, S_BAD_TLV_LEN, msg.id,
- msg.type);
+ session_shutdown(nbr, S_BAD_TLV_LEN, msg.id, msg.type);
return (-1);
}
- if (caps_rcvd & F_CAP_TLV_RCVD_UNOTIF) {
- session_shutdown(nbr, S_BAD_TLV_VAL, msg.id,
- msg.type);
+ if (CHECK_FLAG(caps_rcvd, F_CAP_TLV_RCVD_UNOTIF)) {
+ session_shutdown(nbr, S_BAD_TLV_VAL, msg.id, msg.type);
return (-1);
}
- caps_rcvd |= F_CAP_TLV_RCVD_UNOTIF;
+ SET_FLAG(caps_rcvd, F_CAP_TLV_RCVD_UNOTIF);
memcpy(&reserved, buf, sizeof(reserved));
enable = reserved & STATE_BIT;
if (enable)
- nbr->flags |= F_NBR_CAP_UNOTIF;
+ SET_FLAG(nbr->flags, F_NBR_CAP_UNOTIF);
else
- nbr->flags &= ~F_NBR_CAP_UNOTIF;
+ UNSET_FLAG(nbr->flags, F_NBR_CAP_UNOTIF);
log_debug("%s: lsr-id %pI4 %s the Unrecognized Notification capability", __func__,
- &nbr->id, (enable) ? "announced" :
- "withdrew");
+ &nbr->id, (enable) ? "announced" : "withdrew");
break;
case TLV_TYPE_DYNAMIC_CAP:
/*
@@ -346,7 +335,7 @@ recv_capability(struct nbr *nbr, char *buf, uint16_t len)
*/
/* FALLTHROUGH */
default:
- if (!(ntohs(tlv.type) & UNKNOWN_FLAG))
+ if (!CHECK_FLAG(ntohs(tlv.type), UNKNOWN_FLAG))
send_notification_rtlvs(nbr, S_UNSSUPORTDCAP,
msg.id, msg.type, tlv_type, tlv_len, buf);
/* ignore unknown tlv */
diff --git a/ldpd/l2vpn.c b/ldpd/l2vpn.c
index 4664b1f894..ce038acdcb 100644
--- a/ldpd/l2vpn.c
+++ b/ldpd/l2vpn.c
@@ -161,7 +161,7 @@ l2vpn_if_update(struct l2vpn_if *lif)
fec.type = MAP_TYPE_PWID;
fec.fec.pwid.type = l2vpn->pw_type;
fec.fec.pwid.group_id = 0;
- fec.flags |= F_MAP_PW_ID;
+ SET_FLAG(fec.flags, F_MAP_PW_ID);
fec.fec.pwid.pwid = pw->pwid;
send_mac_withdrawal(nbr, &fec, lif->mac);
@@ -274,17 +274,17 @@ l2vpn_pw_reset(struct l2vpn_pw *pw)
pw->local_status = PW_FORWARDING;
pw->remote_status = PW_NOT_FORWARDING;
- if (pw->flags & F_PW_CWORD_CONF)
- pw->flags |= F_PW_CWORD;
+ if (CHECK_FLAG(pw->flags, F_PW_CWORD_CONF))
+ SET_FLAG(pw->flags, F_PW_CWORD);
else
- pw->flags &= ~F_PW_CWORD;
+ UNSET_FLAG(pw->flags, F_PW_CWORD);
- if (pw->flags & F_PW_STATUSTLV_CONF)
- pw->flags |= F_PW_STATUSTLV;
+ if (CHECK_FLAG(pw->flags, F_PW_STATUSTLV_CONF))
+ SET_FLAG(pw->flags, F_PW_STATUSTLV);
else
- pw->flags &= ~F_PW_STATUSTLV;
+ UNSET_FLAG(pw->flags, F_PW_STATUSTLV);
- if (pw->flags & F_PW_STATUSTLV_CONF) {
+ if (CHECK_FLAG(pw->flags, F_PW_STATUSTLV_CONF)) {
struct fec_node *fn;
struct fec fec;
l2vpn_pw_fec(pw, &fec);
@@ -300,8 +300,7 @@ l2vpn_pw_ok(struct l2vpn_pw *pw, struct fec_nh *fnh)
{
/* check for a remote label */
if (fnh->remote_label == NO_LABEL) {
- log_warnx("%s: pseudowire %s: no remote label", __func__,
- pw->ifname);
+ log_warnx("%s: pseudowire %s: no remote label", __func__, pw->ifname);
pw->reason = F_PW_NO_REMOTE_LABEL;
return (0);
}
@@ -315,10 +314,9 @@ l2vpn_pw_ok(struct l2vpn_pw *pw, struct fec_nh *fnh)
}
/* check pw status if applicable */
- if ((pw->flags & F_PW_STATUSTLV) &&
+ if (CHECK_FLAG(pw->flags, F_PW_STATUSTLV) &&
pw->remote_status != PW_FORWARDING) {
- log_warnx("%s: pseudowire %s: remote end is down", __func__,
- pw->ifname);
+ log_warnx("%s: pseudowire %s: remote end is down", __func__, pw->ifname);
pw->reason = F_PW_REMOTE_NOT_FWD;
return (0);
}
@@ -345,34 +343,34 @@ l2vpn_pw_negotiate(struct lde_nbr *ln, struct fec_node *fn, struct map *map)
/* RFC4447 - Section 6.2: control word negotiation */
if (fec_find(&ln->sent_map, &fn->fec)) {
- if ((map->flags & F_MAP_PW_CWORD) &&
- !(pw->flags & F_PW_CWORD_CONF)) {
+ if (CHECK_FLAG(map->flags, F_MAP_PW_CWORD) &&
+ !CHECK_FLAG(pw->flags, F_PW_CWORD_CONF)) {
/* ignore the received label mapping */
return (1);
- } else if (!(map->flags & F_MAP_PW_CWORD) &&
- (pw->flags & F_PW_CWORD_CONF)) {
+ } else if (!CHECK_FLAG(map->flags, F_MAP_PW_CWORD) &&
+ CHECK_FLAG(pw->flags, F_PW_CWORD_CONF)) {
/* append a "Wrong C-bit" status code */
st.status_code = S_WRONG_CBIT;
st.msg_id = map->msg_id;
st.msg_type = htons(MSG_TYPE_LABELMAPPING);
lde_send_labelwithdraw(ln, fn, NULL, &st);
- pw->flags &= ~F_PW_CWORD;
+ UNSET_FLAG(pw->flags, F_PW_CWORD);
lde_send_labelmapping(ln, fn, 1);
}
- } else if (map->flags & F_MAP_PW_CWORD) {
- if (pw->flags & F_PW_CWORD_CONF)
- pw->flags |= F_PW_CWORD;
+ } else if (CHECK_FLAG(map->flags, F_MAP_PW_CWORD)) {
+ if (CHECK_FLAG(pw->flags, F_PW_CWORD_CONF))
+ SET_FLAG(pw->flags, F_PW_CWORD);
else
/* act as if no label mapping had been received */
return (1);
} else
- pw->flags &= ~F_PW_CWORD;
+ UNSET_FLAG(pw->flags, F_PW_CWORD);
/* RFC4447 - Section 5.4.3: pseudowire status negotiation */
if (fec_find(&ln->recv_map, &fn->fec) == NULL &&
- !(map->flags & F_MAP_PW_STATUS))
- pw->flags &= ~F_PW_STATUSTLV;
+ !CHECK_FLAG(map->flags, F_MAP_PW_STATUS))
+ UNSET_FLAG(pw->flags, F_PW_STATUSTLV);
return (0);
}
@@ -385,12 +383,11 @@ l2vpn_send_pw_status(struct lde_nbr *ln, uint32_t status, struct fec *fec)
memset(&nm, 0, sizeof(nm));
nm.status_code = S_PW_STATUS;
nm.pw_status = status;
- nm.flags |= F_NOTIF_PW_STATUS;
+ SET_FLAG(nm.flags, F_NOTIF_PW_STATUS);
lde_fec2map(fec, &nm.fec);
- nm.flags |= F_NOTIF_FEC;
+ SET_FLAG(nm.flags, F_NOTIF_FEC);
- lde_imsg_compose_ldpe(IMSG_NOTIFICATION_SEND, ln->peerid, 0, &nm,
- sizeof(nm));
+ lde_imsg_compose_ldpe(IMSG_NOTIFICATION_SEND, ln->peerid, 0, &nm, sizeof(nm));
}
void
@@ -402,14 +399,13 @@ l2vpn_send_pw_status_wcard(struct lde_nbr *ln, uint32_t status,
memset(&nm, 0, sizeof(nm));
nm.status_code = S_PW_STATUS;
nm.pw_status = status;
- nm.flags |= F_NOTIF_PW_STATUS;
+ SET_FLAG(nm.flags, F_NOTIF_PW_STATUS);
nm.fec.type = MAP_TYPE_PWID;
nm.fec.fec.pwid.type = pw_type;
nm.fec.fec.pwid.group_id = group_id;
- nm.flags |= F_NOTIF_FEC;
+ SET_FLAG(nm.flags, F_NOTIF_FEC);
- lde_imsg_compose_ldpe(IMSG_NOTIFICATION_SEND, ln->peerid, 0, &nm,
- sizeof(nm));
+ lde_imsg_compose_ldpe(IMSG_NOTIFICATION_SEND, ln->peerid, 0, &nm, sizeof(nm));
}
void
@@ -421,7 +417,7 @@ l2vpn_recv_pw_status(struct lde_nbr *ln, struct notify_msg *nm)
struct l2vpn_pw *pw;
if (nm->fec.type == MAP_TYPE_TYPED_WCARD ||
- !(nm->fec.flags & F_MAP_PW_ID)) {
+ !CHECK_FLAG(nm->fec.flags, F_MAP_PW_ID)) {
l2vpn_recv_pw_status_wcard(ln, nm);
return;
}
@@ -540,7 +536,7 @@ l2vpn_pw_status_update(struct zapi_pw_status *zpw)
if (ln == NULL)
return (0);
l2vpn_pw_fec(pw, &fec);
- if (pw->flags & F_PW_STATUSTLV)
+ if (CHECK_FLAG(pw->flags, F_PW_STATUSTLV))
l2vpn_send_pw_status(ln, local_status, &fec);
else {
struct fec_node *fn;
@@ -611,8 +607,7 @@ l2vpn_binding_ctl(pid_t pid)
pwctl.local_label = fn->local_label;
pwctl.local_gid = 0;
pwctl.local_ifmtu = pw->l2vpn->mtu;
- pwctl.local_cword = (pw->flags & F_PW_CWORD_CONF) ?
- 1 : 0;
+ pwctl.local_cword = CHECK_FLAG(pw->flags, F_PW_CWORD_CONF) ? 1 : 0;
pwctl.reason = pw->reason;
} else
pwctl.local_label = NO_LABEL;
@@ -624,11 +619,10 @@ l2vpn_binding_ctl(pid_t pid)
if (me) {
pwctl.remote_label = me->map.label;
pwctl.remote_gid = me->map.fec.pwid.group_id;
- if (me->map.flags & F_MAP_PW_IFMTU)
+ if (CHECK_FLAG(me->map.flags, F_MAP_PW_IFMTU))
pwctl.remote_ifmtu = me->map.fec.pwid.ifmtu;
if (pw)
- pwctl.remote_cword = (pw->flags & F_PW_CWORD) ?
- 1 : 0;
+ pwctl.remote_cword = CHECK_FLAG(pw->flags, F_PW_CWORD) ? 1 : 0;
lde_imsg_compose_ldpe(IMSG_CTL_SHOW_L2VPN_BINDING,
0, pid, &pwctl, sizeof(pwctl));
diff --git a/ldpd/ldp_zebra.c b/ldpd/ldp_zebra.c
index e3ace30582..2010829035 100644
--- a/ldpd/ldp_zebra.c
+++ b/ldpd/ldp_zebra.c
@@ -22,8 +22,7 @@
#include "ldp_debug.h"
static void ifp2kif(struct interface *, struct kif *);
-static void ifc2kaddr(struct interface *, struct connected *,
- struct kaddr *);
+static void ifc2kaddr(struct interface *, struct connected *, struct kaddr *);
static int ldp_zebra_send_mpls_labels(int, struct kroute *);
static int ldp_router_id_update(ZAPI_CALLBACK_ARGS);
static int ldp_interface_address_add(ZAPI_CALLBACK_ARGS);
@@ -295,8 +294,7 @@ kmpw_add(struct zapi_pw *zpw)
debug_zebra_out("pseudowire %s nexthop %s (add)",
zpw->ifname, log_addr(zpw->af, (union ldpd_addr *)&zpw->nexthop));
- return zebra_send_pw(zclient, ZEBRA_PW_ADD, zpw)
- == ZCLIENT_SEND_FAILURE;
+ return zebra_send_pw(zclient, ZEBRA_PW_ADD, zpw) == ZCLIENT_SEND_FAILURE;
}
int
@@ -305,8 +303,7 @@ kmpw_del(struct zapi_pw *zpw)
debug_zebra_out("pseudowire %s nexthop %s (del)",
zpw->ifname, log_addr(zpw->af, (union ldpd_addr *)&zpw->nexthop));
- return zebra_send_pw(zclient, ZEBRA_PW_DELETE, zpw)
- == ZCLIENT_SEND_FAILURE;
+ return zebra_send_pw(zclient, ZEBRA_PW_DELETE, zpw) == ZCLIENT_SEND_FAILURE;
}
int
@@ -316,8 +313,7 @@ kmpw_set(struct zapi_pw *zpw)
zpw->ifname, log_addr(zpw->af, (union ldpd_addr *)&zpw->nexthop),
zpw->local_label, zpw->remote_label);
- return zebra_send_pw(zclient, ZEBRA_PW_SET, zpw)
- == ZCLIENT_SEND_FAILURE;
+ return zebra_send_pw(zclient, ZEBRA_PW_SET, zpw) == ZCLIENT_SEND_FAILURE;
}
int
@@ -326,8 +322,7 @@ kmpw_unset(struct zapi_pw *zpw)
debug_zebra_out("pseudowire %s nexthop %s (unset)",
zpw->ifname, log_addr(zpw->af, (union ldpd_addr *)&zpw->nexthop));
- return zebra_send_pw(zclient, ZEBRA_PW_UNSET, zpw)
- == ZCLIENT_SEND_FAILURE;
+ return zebra_send_pw(zclient, ZEBRA_PW_UNSET, zpw) == ZCLIENT_SEND_FAILURE;
}
void
diff --git a/ldpd/notification.c b/ldpd/notification.c
index af5bb267d7..1709098d09 100644
--- a/ldpd/notification.c
+++ b/ldpd/notification.c
@@ -25,28 +25,28 @@ send_notification_full(struct tcp_conn *tcp, struct notify_msg *nm)
/* calculate size */
size = LDP_HDR_SIZE + LDP_MSG_SIZE + STATUS_SIZE;
- if (nm->flags & F_NOTIF_PW_STATUS)
+ if (CHECK_FLAG(nm->flags, F_NOTIF_PW_STATUS))
size += PW_STATUS_TLV_SIZE;
- if (nm->flags & F_NOTIF_FEC)
+ if (CHECK_FLAG(nm->flags, F_NOTIF_FEC))
size += len_fec_tlv(&nm->fec);
- if (nm->flags & F_NOTIF_RETURNED_TLVS)
+ if (CHECK_FLAG(nm->flags, F_NOTIF_RETURNED_TLVS))
size += TLV_HDR_SIZE * 2 + nm->rtlvs.length;
if ((buf = ibuf_open(size)) == NULL)
fatal(__func__);
- err |= gen_ldp_hdr(buf, size);
+ SET_FLAG(err, gen_ldp_hdr(buf, size));
size -= LDP_HDR_SIZE;
- err |= gen_msg_hdr(buf, MSG_TYPE_NOTIFICATION, size);
- err |= gen_status_tlv(buf, nm->status_code, nm->msg_id, nm->msg_type);
+ SET_FLAG(err, gen_msg_hdr(buf, MSG_TYPE_NOTIFICATION, size));
+ SET_FLAG(err, gen_status_tlv(buf, nm->status_code, nm->msg_id, nm->msg_type));
/* optional tlvs */
- if (nm->flags & F_NOTIF_PW_STATUS)
- err |= gen_pw_status_tlv(buf, nm->pw_status);
- if (nm->flags & F_NOTIF_FEC)
- err |= gen_fec_tlv(buf, &nm->fec);
- if (nm->flags & F_NOTIF_RETURNED_TLVS)
- err |= gen_returned_tlvs(buf, nm->rtlvs.type, nm->rtlvs.length,
- nm->rtlvs.data);
+ if (CHECK_FLAG(nm->flags, F_NOTIF_PW_STATUS))
+ SET_FLAG(err, gen_pw_status_tlv(buf, nm->pw_status));
+ if (CHECK_FLAG(nm->flags, F_NOTIF_FEC))
+ SET_FLAG(err, gen_fec_tlv(buf, &nm->fec));
+ if (CHECK_FLAG(nm->flags, F_NOTIF_RETURNED_TLVS))
+ SET_FLAG(err, gen_returned_tlvs(buf, nm->rtlvs.type, nm->rtlvs.length,
+ nm->rtlvs.data));
if (err) {
ibuf_free(buf);
return;
@@ -121,7 +121,7 @@ send_notification_rtlvs(struct nbr *nbr, uint32_t status_code, uint32_t msg_id,
nm.rtlvs.type = tlv_type;
nm.rtlvs.length = tlv_len;
nm.rtlvs.data = tlv_data;
- nm.flags |= F_NOTIF_RETURNED_TLVS;
+ SET_FLAG(nm.flags, F_NOTIF_RETURNED_TLVS);
}
send_notification_full(nbr->tcp, &nm);
@@ -189,13 +189,12 @@ recv_notification(struct nbr *nbr, char *buf, uint16_t len)
break;
case TLV_TYPE_PW_STATUS:
if (tlv_len != 4) {
- session_shutdown(nbr, S_BAD_TLV_LEN,
- msg.id, msg.type);
+ session_shutdown(nbr, S_BAD_TLV_LEN, msg.id, msg.type);
return (-1);
}
nm.pw_status = ntohl(*(uint32_t *)buf);
- nm.flags |= F_NOTIF_PW_STATUS;
+ SET_FLAG(nm.flags, F_NOTIF_PW_STATUS);
break;
case TLV_TYPE_FEC:
if ((tlen = tlv_decode_fec_elm(nbr, &msg, buf,
@@ -203,12 +202,11 @@ recv_notification(struct nbr *nbr, char *buf, uint16_t len)
return (-1);
/* allow only one fec element */
if (tlen != tlv_len) {
- session_shutdown(nbr, S_BAD_TLV_VAL,
- msg.id, msg.type);
+ session_shutdown(nbr, S_BAD_TLV_VAL, msg.id, msg.type);
leconf->stats.bad_tlv_len++;
return (-1);
}
- nm.flags |= F_NOTIF_FEC;
+ SET_FLAG(nm.flags, F_NOTIF_FEC);
break;
default:
if (!(ntohs(tlv.type) & UNKNOWN_FLAG)) {
@@ -226,9 +224,8 @@ recv_notification(struct nbr *nbr, char *buf, uint16_t len)
/* sanity checks */
switch (nm.status_code) {
case S_PW_STATUS:
- if (!(nm.flags & (F_NOTIF_PW_STATUS|F_NOTIF_FEC))) {
- send_notification(nbr->tcp, S_MISS_MSG,
- msg.id, msg.type);
+ if (!CHECK_FLAG(nm.flags, (F_NOTIF_PW_STATUS|F_NOTIF_FEC))) {
+ send_notification(nbr->tcp, S_MISS_MSG, msg.id, msg.type);
return (-1);
}
@@ -236,20 +233,17 @@ recv_notification(struct nbr *nbr, char *buf, uint16_t len)
case MAP_TYPE_PWID:
break;
default:
- send_notification(nbr->tcp, S_BAD_TLV_VAL,
- msg.id, msg.type);
+ send_notification(nbr->tcp, S_BAD_TLV_VAL, msg.id, msg.type);
return (-1);
}
break;
case S_ENDOFLIB:
- if (!(nm.flags & F_NOTIF_FEC)) {
- send_notification(nbr->tcp, S_MISS_MSG,
- msg.id, msg.type);
+ if (!CHECK_FLAG(nm.flags, F_NOTIF_FEC)) {
+ send_notification(nbr->tcp, S_MISS_MSG, msg.id, msg.type);
return (-1);
}
if (nm.fec.type != MAP_TYPE_TYPED_WCARD) {
- send_notification(nbr->tcp, S_BAD_TLV_VAL,
- msg.id, msg.type);
+ send_notification(nbr->tcp, S_BAD_TLV_VAL, msg.id, msg.type);
return (-1);
}
break;
@@ -259,7 +253,7 @@ recv_notification(struct nbr *nbr, char *buf, uint16_t len)
log_msg_notification(0, nbr, &nm);
- if (st.status_code & htonl(STATUS_FATAL)) {
+ if (CHECK_FLAG(st.status_code, htonl(STATUS_FATAL))) {
if (nbr->state == NBR_STA_OPENSENT)
nbr_start_idtimer(nbr);
@@ -269,11 +263,9 @@ recv_notification(struct nbr *nbr, char *buf, uint16_t len)
* initialization, it SHOULD transmit a Shutdown message and
* then close the transport connection".
*/
- if (nbr->state != NBR_STA_OPER &&
- nm.status_code == S_SHUTDOWN) {
+ if (nbr->state != NBR_STA_OPER && nm.status_code == S_SHUTDOWN) {
leconf->stats.session_attempts++;
- send_notification(nbr->tcp, S_SHUTDOWN,
- msg.id, msg.type);
+ send_notification(nbr->tcp, S_SHUTDOWN, msg.id, msg.type);
}
leconf->stats.shutdown_rcv_notify++;
@@ -287,8 +279,7 @@ recv_notification(struct nbr *nbr, char *buf, uint16_t len)
switch (nm.status_code) {
case S_PW_STATUS:
case S_ENDOFLIB:
- ldpe_imsg_compose_lde(IMSG_NOTIFICATION, nbr->peerid, 0,
- &nm, sizeof(nm));
+ ldpe_imsg_compose_lde(IMSG_NOTIFICATION, nbr->peerid, 0, &nm, sizeof(nm));
break;
case S_NO_HELLO:
leconf->stats.session_rejects_hello++;
@@ -361,8 +352,8 @@ gen_returned_tlvs(struct ibuf *buf, uint16_t type, uint16_t length,
tlv.length = htons(length);
err = ibuf_add(buf, &rtlvs, sizeof(rtlvs));
- err |= ibuf_add(buf, &tlv, sizeof(tlv));
- err |= ibuf_add(buf, tlv_data, length);
+ SET_FLAG(err, ibuf_add(buf, &tlv, sizeof(tlv)));
+ SET_FLAG(err, ibuf_add(buf, tlv_data, length));
return (err);
}
@@ -378,9 +369,9 @@ log_msg_notification(int out, struct nbr *nbr, struct notify_msg *nm)
debug_msg(out, "notification: lsr-id %pI4, status %s",
&nbr->id, status_code_name(nm->status_code));
- if (nm->flags & F_NOTIF_FEC)
+ if (CHECK_FLAG(nm->flags, F_NOTIF_FEC))
debug_msg(out, "notification: fec %s", log_map(&nm->fec));
- if (nm->flags & F_NOTIF_PW_STATUS)
+ if (CHECK_FLAG(nm->flags, F_NOTIF_PW_STATUS))
debug_msg(out, "notification: pw-status %s",
(nm->pw_status == PW_FORWARDING) ? "forwarding" : "not forwarding");
}
diff --git a/ldpd/socket.c b/ldpd/socket.c
index ec6d8be3d5..6b7e475d7f 100644
--- a/ldpd/socket.c
+++ b/ldpd/socket.c
@@ -89,8 +89,7 @@ ldp_create_socket(int af, enum socket_type type)
return (-1);
}
if (type == LDP_SOCKET_DISC) {
- if (sock_set_ipv4_mcast_ttl(fd,
- IP_DEFAULT_MULTICAST_TTL) == -1) {
+ if (sock_set_ipv4_mcast_ttl(fd, IP_DEFAULT_MULTICAST_TTL) == -1) {
close(fd);
return (-1);
}
@@ -141,7 +140,7 @@ ldp_create_socket(int af, enum socket_type type)
close(fd);
return (-1);
}
- if (!(ldpd_conf->ipv6.flags & F_LDPD_AF_NO_GTSM)) {
+ if (!CHECK_FLAG(ldpd_conf->ipv6.flags, F_LDPD_AF_NO_GTSM)) {
/* ignore any possible error */
sock_set_ipv6_minhopcount(fd, 255);
}
@@ -171,8 +170,7 @@ ldp_create_socket(int af, enum socket_type type)
#ifdef __OpenBSD__
opt = 1;
- if (setsockopt(fd, IPPROTO_TCP, TCP_MD5SIG, &opt,
- sizeof(opt)) == -1) {
+ if (setsockopt(fd, IPPROTO_TCP, TCP_MD5SIG, &opt, sizeof(opt)) == -1) {
if (errno == ENOPROTOOPT) { /* system w/o md5sig */
log_warnx("md5sig not available, disabling");
sysdep.no_md5sig = 1;
@@ -196,7 +194,7 @@ sock_set_nonblock(int fd)
if ((flags = fcntl(fd, F_GETFL, 0)) == -1)
fatal("fcntl F_GETFL");
- flags |= O_NONBLOCK;
+ SET_FLAG(flags, O_NONBLOCK);
if (fcntl(fd, F_SETFL, flags) == -1)
fatal("fcntl F_SETFL");
@@ -210,7 +208,7 @@ sock_set_cloexec(int fd)
if ((flags = fcntl(fd, F_GETFD, 0)) == -1)
fatal("fcntl F_GETFD");
- flags |= FD_CLOEXEC;
+ SET_FLAG(flags, FD_CLOEXEC);
if (fcntl(fd, F_SETFD, flags) == -1)
fatal("fcntl F_SETFD");
@@ -222,16 +220,14 @@ sock_set_recvbuf(int fd)
int bsize;
bsize = 65535;
- while (setsockopt(fd, SOL_SOCKET, SO_RCVBUF, &bsize,
- sizeof(bsize)) == -1)
+ while (setsockopt(fd, SOL_SOCKET, SO_RCVBUF, &bsize, sizeof(bsize)) == -1)
bsize /= 2;
}
int
sock_set_reuse(int fd, int enable)
{
- if (setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &enable,
- sizeof(int)) < 0) {
+ if (setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &enable, sizeof(int)) < 0) {
log_warn("%s: error setting SO_REUSEADDR", __func__);
return (-1);
}
@@ -244,8 +240,7 @@ sock_set_bindany(int fd, int enable)
{
#ifdef HAVE_SO_BINDANY
frr_with_privs(&ldpd_privs) {
- if (setsockopt(fd, SOL_SOCKET, SO_BINDANY, &enable,
- sizeof(int)) < 0) {
+ if (setsockopt(fd, SOL_SOCKET, SO_BINDANY, &enable, sizeof(int)) < 0) {
log_warn("%s: error setting SO_BINDANY", __func__);
return (-1);
}
@@ -259,8 +254,7 @@ sock_set_bindany(int fd, int enable)
return (0);
#elif defined(IP_BINDANY)
frr_with_privs(&ldpd_privs) {
- if (setsockopt(fd, IPPROTO_IP, IP_BINDANY, &enable, sizeof(int))
- < 0) {
+ if (setsockopt(fd, IPPROTO_IP, IP_BINDANY, &enable, sizeof(int)) < 0) {
log_warn("%s: error setting IP_BINDANY", __func__);
return (-1);
}
@@ -343,10 +337,8 @@ sock_set_ipv4_ucast_ttl(int fd, int ttl)
int
sock_set_ipv4_mcast_ttl(int fd, uint8_t ttl)
{
- if (setsockopt(fd, IPPROTO_IP, IP_MULTICAST_TTL,
- (char *)&ttl, sizeof(ttl)) < 0) {
- log_warn("%s: error setting IP_MULTICAST_TTL to %d",
- __func__, ttl);
+ if (setsockopt(fd, IPPROTO_IP, IP_MULTICAST_TTL, (char *)&ttl, sizeof(ttl)) < 0) {
+ log_warn("%s: error setting IP_MULTICAST_TTL to %d", __func__, ttl);
return (-1);
}
@@ -358,8 +350,7 @@ sock_set_ipv4_mcast_ttl(int fd, uint8_t ttl)
int
sock_set_ipv4_pktinfo(int fd, int enable)
{
- if (setsockopt(fd, IPPROTO_IP, IP_PKTINFO, &enable,
- sizeof(enable)) < 0) {
+ if (setsockopt(fd, IPPROTO_IP, IP_PKTINFO, &enable, sizeof(enable)) < 0) {
log_warn("%s: error setting IP_PKTINFO", __func__);
return (-1);
}
@@ -370,8 +361,7 @@ sock_set_ipv4_pktinfo(int fd, int enable)
int
sock_set_ipv4_recvdstaddr(int fd, int enable)
{
- if (setsockopt(fd, IPPROTO_IP, IP_RECVDSTADDR, &enable,
- sizeof(enable)) < 0) {
+ if (setsockopt(fd, IPPROTO_IP, IP_RECVDSTADDR, &enable, sizeof(enable)) < 0) {
log_warn("%s: error setting IP_RECVDSTADDR", __func__);
return (-1);
}
@@ -409,8 +399,7 @@ sock_set_ipv4_mcast_loop(int fd)
int
sock_set_ipv6_dscp(int fd, int dscp)
{
- if (setsockopt(fd, IPPROTO_IPV6, IPV6_TCLASS, &dscp,
- sizeof(dscp)) < 0) {
+ if (setsockopt(fd, IPPROTO_IPV6, IPV6_TCLASS, &dscp, sizeof(dscp)) < 0) {
log_warn("%s: error setting IPV6_TCLASS", __func__);
return (-1);
}
@@ -421,8 +410,7 @@ sock_set_ipv6_dscp(int fd, int dscp)
int
sock_set_ipv6_pktinfo(int fd, int enable)
{
- if (setsockopt(fd, IPPROTO_IPV6, IPV6_RECVPKTINFO, &enable,
- sizeof(enable)) < 0) {
+ if (setsockopt(fd, IPPROTO_IPV6, IPV6_RECVPKTINFO, &enable, sizeof(enable)) < 0) {
log_warn("%s: error setting IPV6_RECVPKTINFO", __func__);
return (-1);
}
diff --git a/lib/command.c b/lib/command.c
index 7a7ce3f5dc..0995637219 100644
--- a/lib/command.c
+++ b/lib/command.c
@@ -735,9 +735,13 @@ char *cmd_variable_comp2str(vector comps, unsigned short cols)
char *item = vector_slot(comps, j);
itemlen = strlen(item);
- if (cs + itemlen + AUTOCOMP_INDENT + 3 >= bsz)
- buf = XREALLOC(MTYPE_TMP, buf, (bsz *= 2));
+ size_t next_sz = cs + itemlen + AUTOCOMP_INDENT + 3;
+ if (next_sz > bsz) {
+ /* Make sure the buf size is large enough */
+ bsz = next_sz;
+ buf = XREALLOC(MTYPE_TMP, buf, bsz);
+ }
if (lc + itemlen + 1 >= cols) {
cs += snprintf(&buf[cs], bsz - cs, "\n%*s",
AUTOCOMP_INDENT, "");
@@ -1283,6 +1287,7 @@ int command_config_read_one_line(struct vty *vty,
memcpy(ve->error_buf, vty->buf, VTY_BUFSIZ);
ve->line_num = line_num;
+ ve->cmd_ret = ret;
if (!vty->error)
vty->error = list_new();
diff --git a/lib/if.c b/lib/if.c
index 08d8918742..6f567861d1 100644
--- a/lib/if.c
+++ b/lib/if.c
@@ -1028,6 +1028,7 @@ void if_terminate(struct vrf *vrf)
if (ifp->node) {
ifp->node->info = NULL;
route_unlock_node(ifp->node);
+ ifp->node = NULL;
}
if_delete(&ifp);
}
diff --git a/lib/libfrr.c b/lib/libfrr.c
index e890057269..33237df5fc 100644
--- a/lib/libfrr.c
+++ b/lib/libfrr.c
@@ -1036,7 +1036,7 @@ void frr_config_fork(void)
zlog_tls_buffer_init();
}
-static void frr_vty_serv(void)
+void frr_vty_serv_start(void)
{
/* allow explicit override of vty_path in the future
* (not currently set anywhere) */
@@ -1058,7 +1058,15 @@ static void frr_vty_serv(void)
di->vty_path = vtypath_default;
}
- vty_serv_sock(di->vty_addr, di->vty_port, di->vty_path);
+ vty_serv_start(di->vty_addr, di->vty_port, di->vty_path);
+}
+
+void frr_vty_serv_stop(void)
+{
+ vty_serv_stop();
+
+ if (di->vty_path)
+ unlink(di->vty_path);
}
static void frr_check_detach(void)
@@ -1155,7 +1163,8 @@ void frr_run(struct event_loop *master)
{
char instanceinfo[64] = "";
- frr_vty_serv();
+ if (!(di->flags & FRR_MANUAL_VTY_START))
+ frr_vty_serv_start();
if (di->instance)
snprintf(instanceinfo, sizeof(instanceinfo), "instance %u ",
diff --git a/lib/libfrr.h b/lib/libfrr.h
index c05bc01e4f..b260a54dfe 100644
--- a/lib/libfrr.h
+++ b/lib/libfrr.h
@@ -39,6 +39,11 @@ extern "C" {
* Does nothing if -d isn't used.
*/
#define FRR_DETACH_LATER (1 << 6)
+/* If FRR_MANUAL_VTY_START is used, frr_run() will not automatically start
+ * listening on for vty connection (either TCP or Unix socket based). The daemon
+ * is responsible for calling frr_vty_serv() itself.
+ */
+#define FRR_MANUAL_VTY_START (1 << 7)
PREDECL_DLIST(log_args);
struct log_arg {
@@ -150,6 +155,8 @@ extern void frr_config_fork(void);
extern void frr_run(struct event_loop *master);
extern void frr_detach(void);
+extern void frr_vty_serv_start(void);
+extern void frr_vty_serv_stop(void);
extern bool frr_zclient_addr(struct sockaddr_storage *sa, socklen_t *sa_len,
const char *path);
diff --git a/lib/mgmt_be_client.c b/lib/mgmt_be_client.c
index f74cf6ba09..5c875204f7 100644
--- a/lib/mgmt_be_client.c
+++ b/lib/mgmt_be_client.c
@@ -7,6 +7,7 @@
#include <zebra.h>
#include "debug.h"
+#include "compiler.h"
#include "libfrr.h"
#include "mgmtd/mgmt.h"
#include "mgmt_be_client.h"
@@ -20,12 +21,15 @@
#include "lib/mgmt_be_client_clippy.c"
#define MGMTD_BE_CLIENT_DBG(fmt, ...) \
- DEBUGD(&mgmt_dbg_be_client, "%s:" fmt, __func__, ##__VA_ARGS__)
+ DEBUGD(&mgmt_dbg_be_client, "BE-CLIENT: %s:" fmt, __func__, \
+ ##__VA_ARGS__)
#define MGMTD_BE_CLIENT_ERR(fmt, ...) \
- zlog_err("%s: ERROR: " fmt, __func__, ##__VA_ARGS__)
+ zlog_err("BE-CLIENT: %s: ERROR: " fmt, __func__, ##__VA_ARGS__)
#define MGMTD_DBG_BE_CLIENT_CHECK() \
DEBUG_MODE_CHECK(&mgmt_dbg_be_client, DEBUG_MODE_ALL)
+DEFINE_MTYPE_STATIC(LIB, MGMTD_BE_CLIENT, "backend client");
+DEFINE_MTYPE_STATIC(LIB, MGMTD_BE_CLIENT_NAME, "backend client name");
DEFINE_MTYPE_STATIC(LIB, MGMTD_BE_BATCH, "backend transaction batch data");
DEFINE_MTYPE_STATIC(LIB, MGMTD_BE_TXN, "backend transaction data");
@@ -68,8 +72,6 @@ struct mgmt_be_batch_ctx {
#define MGMTD_BE_TXN_FLAGS_CFG_APPLIED (1U << 1)
DECLARE_LIST(mgmt_be_batches, struct mgmt_be_batch_ctx, list_linkage);
-struct mgmt_be_client_ctx;
-
PREDECL_LIST(mgmt_be_txns);
struct mgmt_be_txn_ctx {
/* Txn-Id as assigned by MGMTD */
@@ -77,7 +79,7 @@ struct mgmt_be_txn_ctx {
uint32_t flags;
struct mgmt_be_client_txn_ctx client_data;
- struct mgmt_be_client_ctx *client_ctx;
+ struct mgmt_be_client *client;
/* List of batches belonging to this transaction */
struct mgmt_be_batches_head cfg_batches;
@@ -98,17 +100,10 @@ DECLARE_LIST(mgmt_be_txns, struct mgmt_be_txn_ctx, list_linkage);
#define FOREACH_BE_APPLY_BATCH_IN_LIST(txn, batch) \
frr_each_safe (mgmt_be_batches, &(txn)->apply_cfgs, (batch))
-struct mgmt_be_client_ctx {
- int conn_fd;
- struct event_loop *tm;
- struct event *conn_retry_tmr;
- struct event *conn_read_ev;
- struct event *conn_write_ev;
- struct event *conn_writes_on;
- struct event *msg_proc_ev;
- uint32_t flags;
+struct mgmt_be_client {
+ struct msg_client client;
- struct mgmt_msg_state mstate;
+ char *name;
struct nb_config *candidate_config;
struct nb_config *running_config;
@@ -121,20 +116,16 @@ struct mgmt_be_client_ctx {
unsigned long avg_apply_nb_cfg_tm;
struct mgmt_be_txns_head txn_head;
- struct mgmt_be_client_params client_params;
-};
-#define MGMTD_BE_CLIENT_FLAGS_WRITES_OFF (1U << 0)
+ struct mgmt_be_client_cbs cbs;
+ uintptr_t user_data;
+};
#define FOREACH_BE_TXN_IN_LIST(client_ctx, txn) \
frr_each_safe (mgmt_be_txns, &(client_ctx)->txn_head, (txn))
struct debug mgmt_dbg_be_client = {0, "Management backend client operations"};
-static struct mgmt_be_client_ctx mgmt_be_client_ctx = {
- .conn_fd = -1,
-};
-
const char *mgmt_be_client_names[MGMTD_BE_CLIENT_ID_MAX + 1] = {
#ifdef HAVE_STATICD
[MGMTD_BE_CLIENT_ID_STATICD] = "staticd",
@@ -142,35 +133,13 @@ const char *mgmt_be_client_names[MGMTD_BE_CLIENT_ID_MAX + 1] = {
[MGMTD_BE_CLIENT_ID_MAX] = "Unknown/Invalid",
};
-/* Forward declarations */
-static void
-mgmt_be_client_register_event(struct mgmt_be_client_ctx *client_ctx,
- enum mgmt_be_event event);
-static void
-mgmt_be_client_schedule_conn_retry(struct mgmt_be_client_ctx *client_ctx,
- unsigned long intvl_secs);
-static int mgmt_be_client_send_msg(struct mgmt_be_client_ctx *client_ctx,
- Mgmtd__BeMessage *be_msg);
-
-static void
-mgmt_be_server_disconnect(struct mgmt_be_client_ctx *client_ctx,
- bool reconnect)
+static int mgmt_be_client_send_msg(struct mgmt_be_client *client_ctx,
+ Mgmtd__BeMessage *be_msg)
{
- /* Notify client through registered callback (if any) */
- if (client_ctx->client_params.client_connect_notify)
- (void)(*client_ctx->client_params.client_connect_notify)(
- (uintptr_t)client_ctx,
- client_ctx->client_params.user_data, false);
-
- if (client_ctx->conn_fd != -1) {
- close(client_ctx->conn_fd);
- client_ctx->conn_fd = -1;
- }
-
- if (reconnect)
- mgmt_be_client_schedule_conn_retry(
- client_ctx,
- client_ctx->client_params.conn_retry_intvl_sec);
+ return msg_conn_send_msg(
+ &client_ctx->client.conn, MGMT_MSG_VERSION_PROTOBUF, be_msg,
+ mgmtd__be_message__get_packed_size(be_msg),
+ (size_t(*)(void *, void *))mgmtd__be_message__pack, false);
}
static struct mgmt_be_batch_ctx *
@@ -201,8 +170,9 @@ mgmt_be_batch_create(struct mgmt_be_txn_ctx *txn, uint64_t batch_id)
batch->batch_id = batch_id;
mgmt_be_batches_add_tail(&txn->cfg_batches, batch);
- MGMTD_BE_CLIENT_DBG("Added new batch 0x%llx to transaction",
- (unsigned long long)batch_id);
+ MGMTD_BE_CLIENT_DBG("Added new batch-id: %" PRIu64
+ " to transaction",
+ batch_id);
}
return batch;
@@ -247,8 +217,7 @@ static void mgmt_be_cleanup_all_batches(struct mgmt_be_txn_ctx *txn)
}
static struct mgmt_be_txn_ctx *
-mgmt_be_find_txn_by_id(struct mgmt_be_client_ctx *client_ctx,
- uint64_t txn_id)
+mgmt_be_find_txn_by_id(struct mgmt_be_client *client_ctx, uint64_t txn_id)
{
struct mgmt_be_txn_ctx *txn = NULL;
@@ -261,8 +230,7 @@ mgmt_be_find_txn_by_id(struct mgmt_be_client_ctx *client_ctx,
}
static struct mgmt_be_txn_ctx *
-mgmt_be_txn_create(struct mgmt_be_client_ctx *client_ctx,
- uint64_t txn_id)
+mgmt_be_txn_create(struct mgmt_be_client *client_ctx, uint64_t txn_id)
{
struct mgmt_be_txn_ctx *txn = NULL;
@@ -273,20 +241,19 @@ mgmt_be_txn_create(struct mgmt_be_client_ctx *client_ctx,
assert(txn);
txn->txn_id = txn_id;
- txn->client_ctx = client_ctx;
+ txn->client = client_ctx;
mgmt_be_batches_init(&txn->cfg_batches);
mgmt_be_batches_init(&txn->apply_cfgs);
mgmt_be_txns_add_tail(&client_ctx->txn_head, txn);
- MGMTD_BE_CLIENT_DBG("Added new transaction 0x%llx",
- (unsigned long long)txn_id);
+ MGMTD_BE_CLIENT_DBG("Added new txn-id: %" PRIu64, txn_id);
}
return txn;
}
-static void mgmt_be_txn_delete(struct mgmt_be_client_ctx *client_ctx,
- struct mgmt_be_txn_ctx **txn)
+static void mgmt_be_txn_delete(struct mgmt_be_client *client_ctx,
+ struct mgmt_be_txn_ctx **txn)
{
char err_msg[] = "MGMT Transaction Delete";
@@ -306,12 +273,10 @@ static void mgmt_be_txn_delete(struct mgmt_be_client_ctx *client_ctx,
* CFGDATA_CREATE_REQs. But first notify the client
* about the transaction delete.
*/
- if (client_ctx->client_params.txn_notify)
- (void)(*client_ctx->client_params
- .txn_notify)(
- (uintptr_t)client_ctx,
- client_ctx->client_params.user_data,
- &(*txn)->client_data, true);
+ if (client_ctx->cbs.txn_notify)
+ (void)(*client_ctx->cbs.txn_notify)(client_ctx,
+ client_ctx->user_data,
+ &(*txn)->client_data, true);
mgmt_be_cleanup_all_batches(*txn);
if ((*txn)->nb_txn)
@@ -322,8 +287,7 @@ static void mgmt_be_txn_delete(struct mgmt_be_client_ctx *client_ctx,
*txn = NULL;
}
-static void
-mgmt_be_cleanup_all_txns(struct mgmt_be_client_ctx *client_ctx)
+static void mgmt_be_cleanup_all_txns(struct mgmt_be_client *client_ctx)
{
struct mgmt_be_txn_ctx *txn = NULL;
@@ -332,9 +296,8 @@ mgmt_be_cleanup_all_txns(struct mgmt_be_client_ctx *client_ctx)
}
}
-static int mgmt_be_send_txn_reply(struct mgmt_be_client_ctx *client_ctx,
- uint64_t txn_id, bool create,
- bool success)
+static int mgmt_be_send_txn_reply(struct mgmt_be_client *client_ctx,
+ uint64_t txn_id, bool create, bool success)
{
Mgmtd__BeMessage be_msg;
Mgmtd__BeTxnReply txn_reply;
@@ -348,15 +311,13 @@ static int mgmt_be_send_txn_reply(struct mgmt_be_client_ctx *client_ctx,
be_msg.message_case = MGMTD__BE_MESSAGE__MESSAGE_TXN_REPLY;
be_msg.txn_reply = &txn_reply;
- MGMTD_BE_CLIENT_DBG(
- "Sending TXN_REPLY message to MGMTD for txn 0x%llx",
- (unsigned long long)txn_id);
+ MGMTD_BE_CLIENT_DBG("Sending TXN_REPLY txn-id %" PRIu64, txn_id);
return mgmt_be_client_send_msg(client_ctx, &be_msg);
}
-static int mgmt_be_process_txn_req(struct mgmt_be_client_ctx *client_ctx,
- uint64_t txn_id, bool create)
+static int mgmt_be_process_txn_req(struct mgmt_be_client *client_ctx,
+ uint64_t txn_id, bool create)
{
struct mgmt_be_txn_ctx *txn;
@@ -368,21 +329,17 @@ static int mgmt_be_process_txn_req(struct mgmt_be_client_ctx *client_ctx,
* Should not happen under any circumstances.
*/
MGMTD_BE_CLIENT_ERR(
- "Transaction 0x%llx already exists!!!",
- (unsigned long long)txn_id);
+ "txn-id: %" PRIu64 " already exists", txn_id);
mgmt_be_send_txn_reply(client_ctx, txn_id, create,
false);
}
- MGMTD_BE_CLIENT_DBG("Created new transaction 0x%llx",
- (unsigned long long)txn_id);
+ MGMTD_BE_CLIENT_DBG("Created new txn-id %" PRIu64, txn_id);
txn = mgmt_be_txn_create(client_ctx, txn_id);
- if (client_ctx->client_params.txn_notify)
- (void)(*client_ctx->client_params
- .txn_notify)(
- (uintptr_t)client_ctx,
- client_ctx->client_params.user_data,
+ if (client_ctx->cbs.txn_notify)
+ (void)(*client_ctx->cbs.txn_notify)(
+ client_ctx, client_ctx->user_data,
&txn->client_data, false);
} else {
if (!txn) {
@@ -390,12 +347,11 @@ static int mgmt_be_process_txn_req(struct mgmt_be_client_ctx *client_ctx,
* Transaction with same txn-id does not exists.
* Return sucess anyways.
*/
- MGMTD_BE_CLIENT_DBG(
- "Transaction to delete 0x%llx does NOT exists!!!",
- (unsigned long long)txn_id);
+ MGMTD_BE_CLIENT_DBG("txn-id: %" PRIu64
+ " for delete does NOT exists",
+ txn_id);
} else {
- MGMTD_BE_CLIENT_DBG("Delete transaction 0x%llx",
- (unsigned long long)txn_id);
+ MGMTD_BE_CLIENT_DBG("Delete txn-id: %" PRIu64, txn_id);
mgmt_be_txn_delete(client_ctx, &txn);
}
}
@@ -405,10 +361,10 @@ static int mgmt_be_process_txn_req(struct mgmt_be_client_ctx *client_ctx,
return 0;
}
-static int
-mgmt_be_send_cfgdata_create_reply(struct mgmt_be_client_ctx *client_ctx,
- uint64_t txn_id, uint64_t batch_id,
- bool success, const char *error_if_any)
+static int mgmt_be_send_cfgdata_create_reply(struct mgmt_be_client *client_ctx,
+ uint64_t txn_id, uint64_t batch_id,
+ bool success,
+ const char *error_if_any)
{
Mgmtd__BeMessage be_msg;
Mgmtd__BeCfgDataCreateReply cfgdata_reply;
@@ -424,9 +380,9 @@ mgmt_be_send_cfgdata_create_reply(struct mgmt_be_client_ctx *client_ctx,
be_msg.message_case = MGMTD__BE_MESSAGE__MESSAGE_CFG_DATA_REPLY;
be_msg.cfg_data_reply = &cfgdata_reply;
- MGMTD_BE_CLIENT_DBG(
- "Sending CFGDATA_CREATE_REPLY message to MGMTD for txn 0x%llx batch 0x%llx",
- (unsigned long long)txn_id, (unsigned long long)batch_id);
+ MGMTD_BE_CLIENT_DBG("Sending CFGDATA_CREATE_REPLY txn-id: %" PRIu64
+ " batch-id: %" PRIu64,
+ txn_id, batch_id);
return mgmt_be_client_send_msg(client_ctx, &be_msg);
}
@@ -435,11 +391,11 @@ static void mgmt_be_txn_cfg_abort(struct mgmt_be_txn_ctx *txn)
{
char errmsg[BUFSIZ] = {0};
- assert(txn && txn->client_ctx);
+ assert(txn && txn->client);
if (txn->nb_txn) {
MGMTD_BE_CLIENT_ERR(
- "Aborting configurations after prep for Txn 0x%llx",
- (unsigned long long)txn->txn_id);
+ "Aborting configs after prep for txn-id: %" PRIu64,
+ txn->txn_id);
nb_candidate_commit_abort(txn->nb_txn, errmsg, sizeof(errmsg));
txn->nb_txn = 0;
}
@@ -451,15 +407,15 @@ static void mgmt_be_txn_cfg_abort(struct mgmt_be_txn_ctx *txn)
* does that work?
*/
MGMTD_BE_CLIENT_DBG(
- "Reset candidate configurations after abort of Txn 0x%llx",
- (unsigned long long)txn->txn_id);
- nb_config_replace(txn->client_ctx->candidate_config,
- txn->client_ctx->running_config, true);
+ "Reset candidate configurations after abort of txn-id: %" PRIu64,
+ txn->txn_id);
+ nb_config_replace(txn->client->candidate_config,
+ txn->client->running_config, true);
}
static int mgmt_be_txn_cfg_prepare(struct mgmt_be_txn_ctx *txn)
{
- struct mgmt_be_client_ctx *client_ctx;
+ struct mgmt_be_client *client_ctx;
struct mgmt_be_txn_req *txn_req = NULL;
struct nb_context nb_ctx = {0};
struct timeval edit_nb_cfg_start;
@@ -474,15 +430,15 @@ static int mgmt_be_txn_cfg_prepare(struct mgmt_be_txn_ctx *txn)
size_t num_processed;
int err;
- assert(txn && txn->client_ctx);
- client_ctx = txn->client_ctx;
+ assert(txn && txn->client);
+ client_ctx = txn->client;
num_processed = 0;
FOREACH_BE_TXN_BATCH_IN_LIST (txn, batch) {
txn_req = &batch->txn_req;
error = false;
nb_ctx.client = NB_CLIENT_CLI;
- nb_ctx.user = (void *)client_ctx->client_params.user_data;
+ nb_ctx.user = (void *)client_ctx->user_data;
if (!txn->nb_txn) {
/*
@@ -502,10 +458,10 @@ static int mgmt_be_txn_cfg_prepare(struct mgmt_be_txn_ctx *txn)
if (error) {
err_buf[sizeof(err_buf) - 1] = 0;
MGMTD_BE_CLIENT_ERR(
- "Failed to update configs for Txn %llx Batch %llx to Candidate! Err: '%s'",
- (unsigned long long)txn->txn_id,
- (unsigned long long)batch->batch_id,
- err_buf);
+ "Failed to update configs for txn-id: %" PRIu64
+ " batch-id: %" PRIu64
+ " to candidate, err: '%s'",
+ txn->txn_id, batch->batch_id, err_buf);
return -1;
}
gettimeofday(&edit_nb_cfg_end, NULL);
@@ -529,7 +485,7 @@ static int mgmt_be_txn_cfg_prepare(struct mgmt_be_txn_ctx *txn)
* Now prepare all the batches we have applied in one go.
*/
nb_ctx.client = NB_CLIENT_CLI;
- nb_ctx.user = (void *)client_ctx->client_params.user_data;
+ nb_ctx.user = (void *)client_ctx->user_data;
gettimeofday(&prep_nb_cfg_start, NULL);
err = nb_candidate_commit_prepare(nb_ctx, client_ctx->candidate_config,
@@ -544,21 +500,20 @@ static int mgmt_be_txn_cfg_prepare(struct mgmt_be_txn_ctx *txn)
err_buf[sizeof(err_buf) - 1] = 0;
if (err == NB_ERR_VALIDATION)
MGMTD_BE_CLIENT_ERR(
- "Failed to validate configs for Txn %llx %u Batches! Err: '%s'",
- (unsigned long long)txn->txn_id,
- (uint32_t)num_processed, err_buf);
+ "Failed to validate configs txn-id: %" PRIu64
+ " %zu batches, err: '%s'",
+ txn->txn_id, num_processed, err_buf);
else
MGMTD_BE_CLIENT_ERR(
- "Failed to prepare configs for Txn %llx, %u Batches! Err: '%s'",
- (unsigned long long)txn->txn_id,
- (uint32_t)num_processed, err_buf);
+ "Failed to prepare configs for txn-id: %" PRIu64
+ " %zu batches, err: '%s'",
+ txn->txn_id, num_processed, err_buf);
error = true;
SET_FLAG(txn->flags, MGMTD_BE_TXN_FLAGS_CFGPREP_FAILED);
} else
- MGMTD_BE_CLIENT_DBG(
- "Prepared configs for Txn %llx, %u Batches! successfully!",
- (unsigned long long)txn->txn_id,
- (uint32_t)num_processed);
+ MGMTD_BE_CLIENT_DBG("Prepared configs for txn-id: %" PRIu64
+ " %zu batches",
+ txn->txn_id, num_processed);
gettimeofday(&prep_nb_cfg_end, NULL);
prep_nb_cfg_tm = timeval_elapsed(prep_nb_cfg_end, prep_nb_cfg_start);
@@ -594,12 +549,11 @@ static int mgmt_be_txn_cfg_prepare(struct mgmt_be_txn_ctx *txn)
/*
* Process all CFG_DATA_REQs received so far and prepare them all in one go.
*/
-static int
-mgmt_be_update_setcfg_in_batch(struct mgmt_be_client_ctx *client_ctx,
- struct mgmt_be_txn_ctx *txn,
- uint64_t batch_id,
- Mgmtd__YangCfgDataReq * cfg_req[],
- int num_req)
+static int mgmt_be_update_setcfg_in_batch(struct mgmt_be_client *client_ctx,
+ struct mgmt_be_txn_ctx *txn,
+ uint64_t batch_id,
+ Mgmtd__YangCfgDataReq *cfg_req[],
+ int num_req)
{
struct mgmt_be_batch_ctx *batch = NULL;
struct mgmt_be_txn_req *txn_req = NULL;
@@ -614,10 +568,9 @@ mgmt_be_update_setcfg_in_batch(struct mgmt_be_client_ctx *client_ctx,
txn_req = &batch->txn_req;
txn_req->event = MGMTD_BE_TXN_PROC_SETCFG;
- MGMTD_BE_CLIENT_DBG(
- "Created Set-Config request for batch 0x%llx, txn id 0x%llx, cfg-items:%d",
- (unsigned long long)batch_id, (unsigned long long)txn->txn_id,
- num_req);
+ MGMTD_BE_CLIENT_DBG("Created SETCFG request for batch-id: %" PRIu64
+ " txn-id: %" PRIu64 " cfg-items:%d",
+ batch_id, txn->txn_id, num_req);
txn_req->req.set_cfg.num_cfg_changes = num_req;
for (index = 0; index < num_req; index++) {
@@ -650,19 +603,18 @@ mgmt_be_update_setcfg_in_batch(struct mgmt_be_client_ctx *client_ctx,
return 0;
}
-static int
-mgmt_be_process_cfgdata_req(struct mgmt_be_client_ctx *client_ctx,
- uint64_t txn_id, uint64_t batch_id,
- Mgmtd__YangCfgDataReq * cfg_req[], int num_req,
- bool end_of_data)
+static int mgmt_be_process_cfgdata_req(struct mgmt_be_client *client_ctx,
+ uint64_t txn_id, uint64_t batch_id,
+ Mgmtd__YangCfgDataReq *cfg_req[],
+ int num_req, bool end_of_data)
{
struct mgmt_be_txn_ctx *txn;
txn = mgmt_be_find_txn_by_id(client_ctx, txn_id);
if (!txn) {
- MGMTD_BE_CLIENT_ERR(
- "Invalid txn-id 0x%llx provided from MGMTD server",
- (unsigned long long)txn_id);
+ MGMTD_BE_CLIENT_ERR("Invalid txn-id: %" PRIu64
+ " from MGMTD server",
+ txn_id);
mgmt_be_send_cfgdata_create_reply(
client_ctx, txn_id, batch_id, false,
"Transaction context not created yet");
@@ -679,10 +631,10 @@ mgmt_be_process_cfgdata_req(struct mgmt_be_client_ctx *client_ctx,
return 0;
}
-static int mgmt_be_send_apply_reply(struct mgmt_be_client_ctx *client_ctx,
- uint64_t txn_id, uint64_t batch_ids[],
- size_t num_batch_ids, bool success,
- const char *error_if_any)
+static int mgmt_be_send_apply_reply(struct mgmt_be_client *client_ctx,
+ uint64_t txn_id, uint64_t batch_ids[],
+ size_t num_batch_ids, bool success,
+ const char *error_if_any)
{
Mgmtd__BeMessage be_msg;
Mgmtd__BeCfgDataApplyReply apply_reply;
@@ -701,19 +653,18 @@ static int mgmt_be_send_apply_reply(struct mgmt_be_client_ctx *client_ctx,
be_msg.cfg_apply_reply = &apply_reply;
MGMTD_BE_CLIENT_DBG(
- "Sending CFG_APPLY_REPLY message to MGMTD for txn 0x%llx, %d batches [0x%llx - 0x%llx]",
- (unsigned long long)txn_id, (int)num_batch_ids,
- success && num_batch_ids ?
- (unsigned long long)batch_ids[0] : 0,
- success && num_batch_ids ?
- (unsigned long long)batch_ids[num_batch_ids - 1] : 0);
+ "Sending CFG_APPLY_REPLY txn-id %" PRIu64
+ " %zu batch ids %" PRIu64 " - %" PRIu64,
+ txn_id, num_batch_ids,
+ success && num_batch_ids ? batch_ids[0] : 0,
+ success && num_batch_ids ? batch_ids[num_batch_ids - 1] : 0);
return mgmt_be_client_send_msg(client_ctx, &be_msg);
}
static int mgmt_be_txn_proc_cfgapply(struct mgmt_be_txn_ctx *txn)
{
- struct mgmt_be_client_ctx *client_ctx;
+ struct mgmt_be_client *client_ctx;
struct timeval apply_nb_cfg_start;
struct timeval apply_nb_cfg_end;
unsigned long apply_nb_cfg_tm;
@@ -722,8 +673,8 @@ static int mgmt_be_txn_proc_cfgapply(struct mgmt_be_txn_ctx *txn)
size_t num_processed;
static uint64_t batch_ids[MGMTD_BE_MAX_BATCH_IDS_IN_REQ];
- assert(txn && txn->client_ctx);
- client_ctx = txn->client_ctx;
+ assert(txn && txn->client);
+ client_ctx = txn->client;
assert(txn->nb_txn);
num_processed = 0;
@@ -775,9 +726,8 @@ static int mgmt_be_txn_proc_cfgapply(struct mgmt_be_txn_ctx *txn)
return 0;
}
-static int
-mgmt_be_process_cfg_apply(struct mgmt_be_client_ctx *client_ctx,
- uint64_t txn_id)
+static int mgmt_be_process_cfg_apply(struct mgmt_be_client *client_ctx,
+ uint64_t txn_id)
{
struct mgmt_be_txn_ctx *txn;
@@ -794,9 +744,8 @@ mgmt_be_process_cfg_apply(struct mgmt_be_client_ctx *client_ctx,
return 0;
}
-static int
-mgmt_be_client_handle_msg(struct mgmt_be_client_ctx *client_ctx,
- Mgmtd__BeMessage *be_msg)
+static int mgmt_be_client_handle_msg(struct mgmt_be_client *client_ctx,
+ Mgmtd__BeMessage *be_msg)
{
/*
* protobuf-c adds a max size enum with an internal, and changing by
@@ -804,15 +753,24 @@ mgmt_be_client_handle_msg(struct mgmt_be_client_ctx *client_ctx,
*/
switch ((int)be_msg->message_case) {
case MGMTD__BE_MESSAGE__MESSAGE_SUBSCR_REPLY:
- MGMTD_BE_CLIENT_DBG("Subscribe Reply Msg from mgmt, status %u",
- be_msg->subscr_reply->success);
+ MGMTD_BE_CLIENT_DBG("Got SUBSCR_REPLY success %u",
+ be_msg->subscr_reply->success);
break;
case MGMTD__BE_MESSAGE__MESSAGE_TXN_REQ:
+ MGMTD_BE_CLIENT_DBG("Got TXN_REQ %s txn-id: %" PRIu64,
+ be_msg->txn_req->create ? "Create"
+ : "Delete",
+ be_msg->txn_req->txn_id);
mgmt_be_process_txn_req(client_ctx,
be_msg->txn_req->txn_id,
be_msg->txn_req->create);
break;
case MGMTD__BE_MESSAGE__MESSAGE_CFG_DATA_REQ:
+ MGMTD_BE_CLIENT_DBG("Got CFG_DATA_REQ txn-id: %" PRIu64
+ " batch-id: %" PRIu64 " end-of-data %u",
+ be_msg->cfg_data_req->txn_id,
+ be_msg->cfg_data_req->batch_id,
+ be_msg->cfg_data_req->end_of_data);
mgmt_be_process_cfgdata_req(
client_ctx, be_msg->cfg_data_req->txn_id,
be_msg->cfg_data_req->batch_id,
@@ -821,6 +779,8 @@ mgmt_be_client_handle_msg(struct mgmt_be_client_ctx *client_ctx,
be_msg->cfg_data_req->end_of_data);
break;
case MGMTD__BE_MESSAGE__MESSAGE_CFG_APPLY_REQ:
+ MGMTD_BE_CLIENT_DBG("Got CFG_APPLY_REQ txn-id: %" PRIu64,
+ be_msg->cfg_data_req->txn_id);
mgmt_be_process_cfg_apply(
client_ctx, (uint64_t)be_msg->cfg_apply_req->txn_id);
break;
@@ -828,6 +788,8 @@ mgmt_be_client_handle_msg(struct mgmt_be_client_ctx *client_ctx,
case MGMTD__BE_MESSAGE__MESSAGE_SUBSCR_REQ:
case MGMTD__BE_MESSAGE__MESSAGE_CFG_CMD_REQ:
case MGMTD__BE_MESSAGE__MESSAGE_SHOW_CMD_REQ:
+ MGMTD_BE_CLIENT_ERR("Got unhandled message type %u",
+ be_msg->message_case);
/*
* TODO: Add handling code in future.
*/
@@ -857,12 +819,16 @@ mgmt_be_client_handle_msg(struct mgmt_be_client_ctx *client_ctx,
return 0;
}
-static void mgmt_be_client_process_msg(void *user_ctx, uint8_t *data,
- size_t len)
+static void mgmt_be_client_process_msg(uint8_t version, uint8_t *data,
+ size_t len, struct msg_conn *conn)
{
- struct mgmt_be_client_ctx *client_ctx = user_ctx;
+ struct mgmt_be_client *client_ctx;
+ struct msg_client *client;
Mgmtd__BeMessage *be_msg;
+ client = container_of(conn, struct msg_client, conn);
+ client_ctx = container_of(client, struct mgmt_be_client, client);
+
be_msg = mgmtd__be_message__unpack(NULL, len, data);
if (!be_msg) {
MGMTD_BE_CLIENT_DBG("Failed to decode %zu bytes from server",
@@ -876,111 +842,17 @@ static void mgmt_be_client_process_msg(void *user_ctx, uint8_t *data,
mgmtd__be_message__free_unpacked(be_msg, NULL);
}
-static void mgmt_be_client_proc_msgbufs(struct event *thread)
-{
- struct mgmt_be_client_ctx *client_ctx = EVENT_ARG(thread);
-
- if (mgmt_msg_procbufs(&client_ctx->mstate, mgmt_be_client_process_msg,
- client_ctx, MGMTD_DBG_BE_CLIENT_CHECK()))
- mgmt_be_client_register_event(client_ctx, MGMTD_BE_PROC_MSG);
-}
-
-static void mgmt_be_client_read(struct event *thread)
-{
- struct mgmt_be_client_ctx *client_ctx = EVENT_ARG(thread);
- enum mgmt_msg_rsched rv;
-
- rv = mgmt_msg_read(&client_ctx->mstate, client_ctx->conn_fd,
- MGMTD_DBG_BE_CLIENT_CHECK());
- if (rv == MSR_DISCONNECT) {
- mgmt_be_server_disconnect(client_ctx, true);
- return;
- }
- if (rv == MSR_SCHED_BOTH)
- mgmt_be_client_register_event(client_ctx, MGMTD_BE_PROC_MSG);
- mgmt_be_client_register_event(client_ctx, MGMTD_BE_CONN_READ);
-}
-
-static inline void
-mgmt_be_client_sched_msg_write(struct mgmt_be_client_ctx *client_ctx)
-{
- if (!CHECK_FLAG(client_ctx->flags, MGMTD_BE_CLIENT_FLAGS_WRITES_OFF))
- mgmt_be_client_register_event(client_ctx,
- MGMTD_BE_CONN_WRITE);
-}
-
-static inline void
-mgmt_be_client_writes_on(struct mgmt_be_client_ctx *client_ctx)
-{
- MGMTD_BE_CLIENT_DBG("Resume writing msgs");
- UNSET_FLAG(client_ctx->flags, MGMTD_BE_CLIENT_FLAGS_WRITES_OFF);
- mgmt_be_client_sched_msg_write(client_ctx);
-}
-
-static inline void
-mgmt_be_client_writes_off(struct mgmt_be_client_ctx *client_ctx)
-{
- SET_FLAG(client_ctx->flags, MGMTD_BE_CLIENT_FLAGS_WRITES_OFF);
- MGMTD_BE_CLIENT_DBG("Paused writing msgs");
-}
-
-static int mgmt_be_client_send_msg(struct mgmt_be_client_ctx *client_ctx,
- Mgmtd__BeMessage *be_msg)
-{
- if (client_ctx->conn_fd == -1) {
- MGMTD_BE_CLIENT_DBG("can't send message on closed connection");
- return -1;
- }
-
- int rv = mgmt_msg_send_msg(
- &client_ctx->mstate, be_msg,
- mgmtd__be_message__get_packed_size(be_msg),
- (size_t(*)(void *, void *))mgmtd__be_message__pack,
- MGMTD_DBG_BE_CLIENT_CHECK());
- mgmt_be_client_sched_msg_write(client_ctx);
- return rv;
-}
-
-static void mgmt_be_client_write(struct event *thread)
-{
- struct mgmt_be_client_ctx *client_ctx = EVENT_ARG(thread);
- enum mgmt_msg_wsched rv;
-
- rv = mgmt_msg_write(&client_ctx->mstate, client_ctx->conn_fd,
- MGMTD_DBG_BE_CLIENT_CHECK());
- if (rv == MSW_SCHED_STREAM)
- mgmt_be_client_register_event(client_ctx, MGMTD_BE_CONN_WRITE);
- else if (rv == MSW_DISCONNECT)
- mgmt_be_server_disconnect(client_ctx, true);
- else if (rv == MSW_SCHED_WRITES_OFF) {
- mgmt_be_client_writes_off(client_ctx);
- mgmt_be_client_register_event(client_ctx,
- MGMTD_BE_CONN_WRITES_ON);
- } else
- assert(rv == MSW_SCHED_NONE);
-}
-
-static void mgmt_be_client_resume_writes(struct event *thread)
-{
- struct mgmt_be_client_ctx *client_ctx;
-
- client_ctx = (struct mgmt_be_client_ctx *)EVENT_ARG(thread);
- assert(client_ctx && client_ctx->conn_fd != -1);
-
- mgmt_be_client_writes_on(client_ctx);
-}
-
-static int mgmt_be_send_subscr_req(struct mgmt_be_client_ctx *client_ctx,
- bool subscr_xpaths, uint16_t num_reg_xpaths,
- char **reg_xpaths)
+int mgmt_be_send_subscr_req(struct mgmt_be_client *client_ctx,
+ bool subscr_xpaths, int num_xpaths,
+ char **reg_xpaths)
{
Mgmtd__BeMessage be_msg;
Mgmtd__BeSubscribeReq subscr_req;
mgmtd__be_subscribe_req__init(&subscr_req);
- subscr_req.client_name = client_ctx->client_params.name;
- subscr_req.n_xpath_reg = num_reg_xpaths;
- if (num_reg_xpaths)
+ subscr_req.client_name = client_ctx->name;
+ subscr_req.n_xpath_reg = num_xpaths;
+ if (num_xpaths)
subscr_req.xpath_reg = reg_xpaths;
else
subscr_req.xpath_reg = NULL;
@@ -990,90 +862,50 @@ static int mgmt_be_send_subscr_req(struct mgmt_be_client_ctx *client_ctx,
be_msg.message_case = MGMTD__BE_MESSAGE__MESSAGE_SUBSCR_REQ;
be_msg.subscr_req = &subscr_req;
+ MGMTD_FE_CLIENT_DBG(
+ "Sending SUBSCR_REQ name: %s subscr_xpaths: %u num_xpaths: %zu",
+ subscr_req.client_name, subscr_req.subscribe_xpaths,
+ subscr_req.n_xpath_reg);
+
return mgmt_be_client_send_msg(client_ctx, &be_msg);
}
-static void mgmt_be_server_connect(struct mgmt_be_client_ctx *client_ctx)
+static int _notify_conenct_disconnect(struct msg_client *msg_client,
+ bool connected)
{
- const char *dbgtag = MGMTD_DBG_BE_CLIENT_CHECK() ? "BE-client" : NULL;
-
- assert(client_ctx->conn_fd == -1);
- client_ctx->conn_fd = mgmt_msg_connect(
- MGMTD_BE_SERVER_PATH, MGMTD_SOCKET_BE_SEND_BUF_SIZE,
- MGMTD_SOCKET_BE_RECV_BUF_SIZE, dbgtag);
+ struct mgmt_be_client *client =
+ container_of(msg_client, struct mgmt_be_client, client);
+ int ret;
- /* Send SUBSCRIBE_REQ message */
- if (client_ctx->conn_fd == -1 ||
- mgmt_be_send_subscr_req(client_ctx, false, 0, NULL) != 0) {
- mgmt_be_server_disconnect(client_ctx, true);
- return;
+ if (connected) {
+ assert(msg_client->conn.fd != -1);
+ ret = mgmt_be_send_subscr_req(client, false, 0, NULL);
+ if (ret)
+ return ret;
}
- /* Start reading from the socket */
- mgmt_be_client_register_event(client_ctx, MGMTD_BE_CONN_READ);
-
- /* Notify client through registered callback (if any) */
- if (client_ctx->client_params.client_connect_notify)
- (void)(*client_ctx->client_params.client_connect_notify)(
- (uintptr_t)client_ctx,
- client_ctx->client_params.user_data, true);
+ /* Notify BE client through registered callback (if any) */
+ if (client->cbs.client_connect_notify)
+ (void)(*client->cbs.client_connect_notify)(
+ client, client->user_data, connected);
+ return 0;
}
-static void mgmt_be_client_conn_timeout(struct event *thread)
+static int mgmt_be_client_notify_conenct(struct msg_client *client)
{
- mgmt_be_server_connect(EVENT_ARG(thread));
+ return _notify_conenct_disconnect(client, true);
}
-static void
-mgmt_be_client_register_event(struct mgmt_be_client_ctx *client_ctx,
- enum mgmt_be_event event)
+static int mgmt_be_client_notify_disconenct(struct msg_conn *conn)
{
- struct timeval tv = {0};
+ struct msg_client *client = container_of(conn, struct msg_client, conn);
- switch (event) {
- case MGMTD_BE_CONN_READ:
- event_add_read(client_ctx->tm, mgmt_be_client_read,
- client_ctx, client_ctx->conn_fd,
- &client_ctx->conn_read_ev);
- break;
- case MGMTD_BE_CONN_WRITE:
- event_add_write(client_ctx->tm, mgmt_be_client_write,
- client_ctx, client_ctx->conn_fd,
- &client_ctx->conn_write_ev);
- break;
- case MGMTD_BE_PROC_MSG:
- tv.tv_usec = MGMTD_BE_MSG_PROC_DELAY_USEC;
- event_add_timer_tv(client_ctx->tm, mgmt_be_client_proc_msgbufs,
- client_ctx, &tv, &client_ctx->msg_proc_ev);
- break;
- case MGMTD_BE_CONN_WRITES_ON:
- event_add_timer_msec(client_ctx->tm,
- mgmt_be_client_resume_writes, client_ctx,
- MGMTD_BE_MSG_WRITE_DELAY_MSEC,
- &client_ctx->conn_writes_on);
- break;
- case MGMTD_BE_SERVER:
- case MGMTD_BE_CONN_INIT:
- case MGMTD_BE_SCHED_CFG_PREPARE:
- case MGMTD_BE_RESCHED_CFG_PREPARE:
- case MGMTD_BE_SCHED_CFG_APPLY:
- case MGMTD_BE_RESCHED_CFG_APPLY:
- assert(!"mgmt_be_client_post_event() called incorrectly");
- break;
- }
+ return _notify_conenct_disconnect(client, false);
}
-static void
-mgmt_be_client_schedule_conn_retry(struct mgmt_be_client_ctx *client_ctx,
- unsigned long intvl_secs)
-{
- MGMTD_BE_CLIENT_DBG(
- "Scheduling MGMTD Backend server connection retry after %lu seconds",
- intvl_secs);
- event_add_timer(client_ctx->tm, mgmt_be_client_conn_timeout,
- (void *)client_ctx, intvl_secs,
- &client_ctx->conn_retry_tmr);
-}
+/*
+ * Debug Flags
+ */
DEFPY(debug_mgmt_client_be, debug_mgmt_client_be_cmd,
"[no] debug mgmt client backend",
@@ -1117,39 +949,33 @@ static struct cmd_node mgmt_dbg_node = {
.config_write = mgmt_debug_be_client_config_write,
};
-/*
- * Initialize library and try connecting with MGMTD.
- */
-uintptr_t mgmt_be_client_lib_init(struct mgmt_be_client_params *params,
- struct event_loop *master_thread)
+struct mgmt_be_client *mgmt_be_client_create(const char *client_name,
+ struct mgmt_be_client_cbs *cbs,
+ uintptr_t user_data,
+ struct event_loop *event_loop)
{
- assert(master_thread && params && strlen(params->name)
- && !mgmt_be_client_ctx.tm);
-
- mgmt_be_client_ctx.tm = master_thread;
-
- if (!running_config)
- assert(!"MGMTD Be Client lib_init() after frr_init() only!");
- mgmt_be_client_ctx.running_config = running_config;
- mgmt_be_client_ctx.candidate_config = nb_config_new(NULL);
+ struct mgmt_be_client *client =
+ XCALLOC(MTYPE_MGMTD_BE_CLIENT, sizeof(*client));
- memcpy(&mgmt_be_client_ctx.client_params, params,
- sizeof(mgmt_be_client_ctx.client_params));
- if (!mgmt_be_client_ctx.client_params.conn_retry_intvl_sec)
- mgmt_be_client_ctx.client_params.conn_retry_intvl_sec =
- MGMTD_BE_DEFAULT_CONN_RETRY_INTVL_SEC;
+ /* Only call after frr_init() */
+ assert(running_config);
- mgmt_be_txns_init(&mgmt_be_client_ctx.txn_head);
- mgmt_msg_init(&mgmt_be_client_ctx.mstate, MGMTD_BE_MAX_NUM_MSG_PROC,
- MGMTD_BE_MAX_NUM_MSG_WRITE, MGMTD_BE_MSG_MAX_LEN,
- "BE-client");
+ client->name = XSTRDUP(MTYPE_MGMTD_BE_CLIENT_NAME, client_name);
+ client->running_config = running_config;
+ client->candidate_config = nb_config_new(NULL);
+ if (cbs)
+ client->cbs = *cbs;
+ mgmt_be_txns_init(&client->txn_head);
+ msg_client_init(&client->client, event_loop, MGMTD_BE_SERVER_PATH,
+ mgmt_be_client_notify_conenct,
+ mgmt_be_client_notify_disconenct,
+ mgmt_be_client_process_msg, MGMTD_BE_MAX_NUM_MSG_PROC,
+ MGMTD_BE_MAX_NUM_MSG_WRITE, MGMTD_BE_MSG_MAX_LEN, false,
+ "BE-client", MGMTD_DBG_BE_CLIENT_CHECK());
- /* Start trying to connect to MGMTD backend server immediately */
- mgmt_be_client_schedule_conn_retry(&mgmt_be_client_ctx, 1);
+ MGMTD_BE_CLIENT_DBG("Initialized client '%s'", client_name);
- MGMTD_BE_CLIENT_DBG("Initialized client '%s'", params->name);
-
- return (uintptr_t)&mgmt_be_client_ctx;
+ return client;
}
@@ -1161,88 +987,16 @@ void mgmt_be_client_lib_vty_init(void)
install_element(CONFIG_NODE, &debug_mgmt_client_be_cmd);
}
-
-/*
- * Subscribe with MGMTD for one or more YANG subtree(s).
- */
-enum mgmt_result mgmt_be_subscribe_yang_data(uintptr_t lib_hndl,
- char *reg_yang_xpaths[],
- int num_reg_xpaths)
+void mgmt_be_client_destroy(struct mgmt_be_client *client)
{
- struct mgmt_be_client_ctx *client_ctx;
-
- client_ctx = (struct mgmt_be_client_ctx *)lib_hndl;
- if (!client_ctx)
- return MGMTD_INVALID_PARAM;
-
- if (mgmt_be_send_subscr_req(client_ctx, true, num_reg_xpaths,
- reg_yang_xpaths)
- != 0)
- return MGMTD_INTERNAL_ERROR;
-
- return MGMTD_SUCCESS;
-}
-
-/*
- * Unsubscribe with MGMTD for one or more YANG subtree(s).
- */
-enum mgmt_result mgmt_be_unsubscribe_yang_data(uintptr_t lib_hndl,
- char *reg_yang_xpaths[],
- int num_reg_xpaths)
-{
- struct mgmt_be_client_ctx *client_ctx;
-
- client_ctx = (struct mgmt_be_client_ctx *)lib_hndl;
- if (!client_ctx)
- return MGMTD_INVALID_PARAM;
-
-
- if (mgmt_be_send_subscr_req(client_ctx, false, num_reg_xpaths,
- reg_yang_xpaths)
- < 0)
- return MGMTD_INTERNAL_ERROR;
-
- return MGMTD_SUCCESS;
-}
-
-/*
- * Send one or more YANG notifications to MGMTD daemon.
- */
-enum mgmt_result mgmt_be_send_yang_notify(uintptr_t lib_hndl,
- Mgmtd__YangData * data_elems[],
- int num_elems)
-{
- struct mgmt_be_client_ctx *client_ctx;
-
- client_ctx = (struct mgmt_be_client_ctx *)lib_hndl;
- if (!client_ctx)
- return MGMTD_INVALID_PARAM;
-
- return MGMTD_SUCCESS;
-}
-
-/*
- * Destroy library and cleanup everything.
- */
-void mgmt_be_client_lib_destroy(uintptr_t lib_hndl)
-{
- struct mgmt_be_client_ctx *client_ctx;
-
- client_ctx = (struct mgmt_be_client_ctx *)lib_hndl;
- assert(client_ctx);
-
MGMTD_BE_CLIENT_DBG("Destroying MGMTD Backend Client '%s'",
- client_ctx->client_params.name);
-
- mgmt_be_server_disconnect(client_ctx, false);
+ client->name);
- mgmt_msg_destroy(&client_ctx->mstate);
+ msg_client_cleanup(&client->client);
+ mgmt_be_cleanup_all_txns(client);
+ mgmt_be_txns_fini(&client->txn_head);
+ nb_config_free(client->candidate_config);
- EVENT_OFF(client_ctx->conn_retry_tmr);
- EVENT_OFF(client_ctx->conn_read_ev);
- EVENT_OFF(client_ctx->conn_write_ev);
- EVENT_OFF(client_ctx->conn_writes_on);
- EVENT_OFF(client_ctx->msg_proc_ev);
- mgmt_be_cleanup_all_txns(client_ctx);
- mgmt_be_txns_fini(&client_ctx->txn_head);
+ XFREE(MTYPE_MGMTD_BE_CLIENT_NAME, client->name);
+ XFREE(MTYPE_MGMTD_BE_CLIENT, client);
}
diff --git a/lib/mgmt_be_client.h b/lib/mgmt_be_client.h
index d4f2d86fdf..4d8a1f51a1 100644
--- a/lib/mgmt_be_client.h
+++ b/lib/mgmt_be_client.h
@@ -82,67 +82,26 @@ enum mgmt_be_client_id {
#define MGMTD_BE_MAX_CLIENTS_PER_XPATH_REG 32
+struct mgmt_be_client;
+
struct mgmt_be_client_txn_ctx {
uintptr_t *user_ctx;
};
-/*
- * All the client-specific information this library needs to
- * initialize itself, setup connection with MGMTD BackEnd interface
- * and carry on all required procedures appropriately.
+/**
+ * Backend client callbacks.
*
- * BackEnd clients need to initialise a instance of this structure
- * with appropriate data and pass it while calling the API
- * to initialize the library (See mgmt_be_client_lib_init for
- * more details).
+ * Callbacks:
+ * client_connect_notify: called when connection is made/lost to mgmtd.
+ * txn_notify: called when a txn has been created
*/
-struct mgmt_be_client_params {
- char name[MGMTD_CLIENT_NAME_MAX_LEN];
- uintptr_t user_data;
- unsigned long conn_retry_intvl_sec;
-
- void (*client_connect_notify)(uintptr_t lib_hndl,
- uintptr_t usr_data,
- bool connected);
-
- void (*client_subscribe_notify)(
- uintptr_t lib_hndl, uintptr_t usr_data,
- struct nb_yang_xpath **xpath,
- enum mgmt_result subscribe_result[], int num_paths);
-
- void (*txn_notify)(
- uintptr_t lib_hndl, uintptr_t usr_data,
- struct mgmt_be_client_txn_ctx *txn_ctx, bool destroyed);
-
- enum mgmt_result (*data_validate)(
- uintptr_t lib_hndl, uintptr_t usr_data,
- struct mgmt_be_client_txn_ctx *txn_ctx,
- struct nb_yang_xpath *xpath, struct nb_yang_value *data,
- bool delete, char *error_if_any);
-
- enum mgmt_result (*data_apply)(
- uintptr_t lib_hndl, uintptr_t usr_data,
- struct mgmt_be_client_txn_ctx *txn_ctx,
- struct nb_yang_xpath *xpath, struct nb_yang_value *data,
- bool delete);
-
- enum mgmt_result (*get_data_elem)(
- uintptr_t lib_hndl, uintptr_t usr_data,
- struct mgmt_be_client_txn_ctx *txn_ctx,
- struct nb_yang_xpath *xpath, struct nb_yang_xpath_elem *elem);
-
- enum mgmt_result (*get_data)(
- uintptr_t lib_hndl, uintptr_t usr_data,
- struct mgmt_be_client_txn_ctx *txn_ctx,
- struct nb_yang_xpath *xpath, bool keys_only,
- struct nb_yang_xpath_elem **elems, int *num_elems,
- int *next_key);
-
- enum mgmt_result (*get_next_data)(
- uintptr_t lib_hndl, uintptr_t usr_data,
- struct mgmt_be_client_txn_ctx *txn_ctx,
- struct nb_yang_xpath *xpath, bool keys_only,
- struct nb_yang_xpath_elem **elems, int *num_elems);
+struct mgmt_be_client_cbs {
+ void (*client_connect_notify)(struct mgmt_be_client *client,
+ uintptr_t usr_data, bool connected);
+
+ void (*txn_notify)(struct mgmt_be_client *client, uintptr_t usr_data,
+ struct mgmt_be_client_txn_ctx *txn_ctx,
+ bool destroyed);
};
/***************************************************************
@@ -176,20 +135,20 @@ mgmt_be_client_name2id(const char *name)
* API prototypes
***************************************************************/
-/*
- * Initialize library and try connecting with MGMTD.
- *
- * params
- * Backend client parameters.
+/**
+ * Create backend client and connect to MGMTD.
*
- * master_thread
- * Thread master.
+ * Args:
+ * client_name: the name of the client
+ * cbs: callbacks for various events.
+ * event_loop: the main event loop.
*
* Returns:
- * Backend client lib handler (nothing but address of mgmt_be_client_ctx)
+ * Backend client object.
*/
-extern uintptr_t mgmt_be_client_lib_init(struct mgmt_be_client_params *params,
- struct event_loop *master_thread);
+extern struct mgmt_be_client *
+mgmt_be_client_create(const char *name, struct mgmt_be_client_cbs *cbs,
+ uintptr_t user_data, struct event_loop *event_loop);
/*
* Initialize library vty (adds debug support).
@@ -206,13 +165,13 @@ extern void mgmt_be_client_lib_vty_init(void);
extern void mgmt_debug_be_client_show_debug(struct vty *vty);
/*
- * Subscribe with MGMTD for one or more YANG subtree(s).
+ * [Un]-subscribe with MGMTD for one or more YANG subtree(s).
*
- * lib_hndl
- * Client library handler.
+ * client
+ * The client object.
*
* reg_yang_xpaths
- * Yang xpath(s) that needs to be subscribed to.
+ * Yang xpath(s) that needs to be [un]-subscribed from/to
*
* num_xpaths
* Number of xpaths
@@ -220,52 +179,14 @@ extern void mgmt_debug_be_client_show_debug(struct vty *vty);
* Returns:
* MGMTD_SUCCESS on success, MGMTD_* otherwise.
*/
-extern enum mgmt_result mgmt_be_subscribe_yang_data(uintptr_t lib_hndl,
- char **reg_yang_xpaths,
- int num_xpaths);
-
-/*
- * Send one or more YANG notifications to MGMTD daemon.
- *
- * lib_hndl
- * Client library handler.
- *
- * data_elems
- * Yang data elements from data tree.
- *
- * num_elems
- * Number of data elements.
- *
- * Returns:
- * MGMTD_SUCCESS on success, MGMTD_* otherwise.
- */
-extern enum mgmt_result
-mgmt_be_send_yang_notify(uintptr_t lib_hndl, Mgmtd__YangData **data_elems,
- int num_elems);
-
-/*
- * Un-subscribe with MGMTD for one or more YANG subtree(s).
- *
- * lib_hndl
- * Client library handler.
- *
- * reg_yang_xpaths
- * Yang xpath(s) that needs to be un-subscribed from.
- *
- * num_reg_xpaths
- * Number of subscribed xpaths
- *
- * Returns:
- * MGMTD_SUCCESS on success, MGMTD_* otherwise.
- */
-enum mgmt_result mgmt_be_unsubscribe_yang_data(uintptr_t lib_hndl,
- char **reg_yang_xpaths,
- int num_reg_xpaths);
+extern int mgmt_be_send_subscr_req(struct mgmt_be_client *client,
+ bool subscr_xpaths, int num_xpaths,
+ char **reg_xpaths);
/*
- * Destroy library and cleanup everything.
+ * Destroy backend client and cleanup everything.
*/
-extern void mgmt_be_client_lib_destroy(uintptr_t lib_hndl);
+extern void mgmt_be_client_destroy(struct mgmt_be_client *client);
#ifdef __cplusplus
}
diff --git a/lib/mgmt_fe_client.c b/lib/mgmt_fe_client.c
index b8266bfa82..35a6d7d909 100644
--- a/lib/mgmt_fe_client.c
+++ b/lib/mgmt_fe_client.c
@@ -6,6 +6,7 @@
*/
#include <zebra.h>
+#include "compiler.h"
#include "debug.h"
#include "memory.h"
#include "libfrr.h"
@@ -18,21 +19,12 @@
#include "lib/mgmt_fe_client_clippy.c"
-#define MGMTD_FE_CLIENT_DBG(fmt, ...) \
- DEBUGD(&mgmt_dbg_fe_client, "%s:" fmt, __func__, ##__VA_ARGS__)
-#define MGMTD_FE_CLIENT_ERR(fmt, ...) \
- zlog_err("%s: ERROR: " fmt, __func__, ##__VA_ARGS__)
-#define MGMTD_DBG_FE_CLIENT_CHECK() \
- DEBUG_MODE_CHECK(&mgmt_dbg_fe_client, DEBUG_MODE_ALL)
-
-struct mgmt_fe_client_ctx;
-
PREDECL_LIST(mgmt_sessions);
struct mgmt_fe_client_session {
- uint64_t client_id;
- uint64_t session_id;
- struct mgmt_fe_client_ctx *client_ctx;
+ uint64_t client_id; /* FE client identifies itself with this ID */
+ uint64_t session_id; /* FE adapter identified session with this ID */
+ struct mgmt_fe_client *client;
uintptr_t user_ctx;
struct mgmt_sessions_item list_linkage;
@@ -40,173 +32,81 @@ struct mgmt_fe_client_session {
DECLARE_LIST(mgmt_sessions, struct mgmt_fe_client_session, list_linkage);
-DEFINE_MTYPE_STATIC(LIB, MGMTD_FE_SESSION, "MGMTD Frontend session");
-
-struct mgmt_fe_client_ctx {
- int conn_fd;
- struct event_loop *tm;
- struct event *conn_retry_tmr;
- struct event *conn_read_ev;
- struct event *conn_write_ev;
- struct event *conn_writes_on;
- struct event *msg_proc_ev;
- uint32_t flags;
+DEFINE_MTYPE_STATIC(LIB, MGMTD_FE_CLIENT, "frontend client");
+DEFINE_MTYPE_STATIC(LIB, MGMTD_FE_CLIENT_NAME, "frontend client name");
+DEFINE_MTYPE_STATIC(LIB, MGMTD_FE_SESSION, "frontend session");
- struct mgmt_msg_state mstate;
-
- struct mgmt_fe_client_params client_params;
-
- struct mgmt_sessions_head client_sessions;
+struct mgmt_fe_client {
+ struct msg_client client;
+ char *name;
+ struct mgmt_fe_client_cbs cbs;
+ uintptr_t user_data;
+ struct mgmt_sessions_head sessions;
};
-#define MGMTD_FE_CLIENT_FLAGS_WRITES_OFF (1U << 0)
-
-#define FOREACH_SESSION_IN_LIST(client_ctx, session) \
- frr_each_safe (mgmt_sessions, &(client_ctx)->client_sessions, (session))
+#define FOREACH_SESSION_IN_LIST(client, session) \
+ frr_each_safe (mgmt_sessions, &(client)->sessions, (session))
struct debug mgmt_dbg_fe_client = {0, "Management frontend client operations"};
-static struct mgmt_fe_client_ctx mgmt_fe_client_ctx = {
- .conn_fd = -1,
-};
-
-/* Forward declarations */
-static void
-mgmt_fe_client_register_event(struct mgmt_fe_client_ctx *client_ctx,
- enum mgmt_fe_event event);
-static void mgmt_fe_client_schedule_conn_retry(
- struct mgmt_fe_client_ctx *client_ctx, unsigned long intvl_secs);
static struct mgmt_fe_client_session *
-mgmt_fe_find_session_by_client_id(struct mgmt_fe_client_ctx *client_ctx,
- uint64_t client_id)
+mgmt_fe_find_session_by_client_id(struct mgmt_fe_client *client,
+ uint64_t client_id)
{
struct mgmt_fe_client_session *session;
- FOREACH_SESSION_IN_LIST (client_ctx, session) {
+ FOREACH_SESSION_IN_LIST (client, session) {
if (session->client_id == client_id) {
- MGMTD_FE_CLIENT_DBG(
- "Found session %p for client-id %llu.", session,
- (unsigned long long)client_id);
+ MGMTD_FE_CLIENT_DBG("Found session-id %" PRIu64
+ " using client-id %" PRIu64,
+ session->session_id, client_id);
return session;
}
}
-
+ MGMTD_FE_CLIENT_DBG("Session not found using client-id %" PRIu64,
+ client_id);
return NULL;
}
static struct mgmt_fe_client_session *
-mgmt_fe_find_session_by_session_id(struct mgmt_fe_client_ctx *client_ctx,
- uint64_t session_id)
+mgmt_fe_find_session_by_session_id(struct mgmt_fe_client *client,
+ uint64_t session_id)
{
struct mgmt_fe_client_session *session;
- FOREACH_SESSION_IN_LIST (client_ctx, session) {
+ FOREACH_SESSION_IN_LIST (client, session) {
if (session->session_id == session_id) {
MGMTD_FE_CLIENT_DBG(
- "Found session %p for session-id %llu.",
- session, (unsigned long long)session_id);
+ "Found session of client-id %" PRIu64
+ " using session-id %" PRIu64,
+ session->client_id, session_id);
return session;
}
}
-
+ MGMTD_FE_CLIENT_DBG("Session not found using session-id %" PRIu64,
+ session_id);
return NULL;
}
-static void
-mgmt_fe_server_disconnect(struct mgmt_fe_client_ctx *client_ctx,
- bool reconnect)
-{
- if (client_ctx->conn_fd != -1) {
- close(client_ctx->conn_fd);
- client_ctx->conn_fd = -1;
- }
-
- if (reconnect)
- mgmt_fe_client_schedule_conn_retry(
- client_ctx,
- client_ctx->client_params.conn_retry_intvl_sec);
-}
-
-static inline void
-mgmt_fe_client_sched_msg_write(struct mgmt_fe_client_ctx *client_ctx)
-{
- if (!CHECK_FLAG(client_ctx->flags, MGMTD_FE_CLIENT_FLAGS_WRITES_OFF))
- mgmt_fe_client_register_event(client_ctx,
- MGMTD_FE_CONN_WRITE);
-}
-
-static inline void
-mgmt_fe_client_writes_on(struct mgmt_fe_client_ctx *client_ctx)
-{
- MGMTD_FE_CLIENT_DBG("Resume writing msgs");
- UNSET_FLAG(client_ctx->flags, MGMTD_FE_CLIENT_FLAGS_WRITES_OFF);
- mgmt_fe_client_sched_msg_write(client_ctx);
-}
-
-static inline void
-mgmt_fe_client_writes_off(struct mgmt_fe_client_ctx *client_ctx)
+static int mgmt_fe_client_send_msg(struct mgmt_fe_client *client,
+ Mgmtd__FeMessage *fe_msg,
+ bool short_circuit_ok)
{
- SET_FLAG(client_ctx->flags, MGMTD_FE_CLIENT_FLAGS_WRITES_OFF);
- MGMTD_FE_CLIENT_DBG("Paused writing msgs");
-}
-
-static int mgmt_fe_client_send_msg(struct mgmt_fe_client_ctx *client_ctx,
- Mgmtd__FeMessage *fe_msg)
-{
- /* users current expect this to fail here */
- if (client_ctx->conn_fd == -1) {
- MGMTD_FE_CLIENT_DBG("can't send message on closed connection");
- return -1;
- }
-
- int rv = mgmt_msg_send_msg(
- &client_ctx->mstate, fe_msg,
+ return msg_conn_send_msg(
+ &client->client.conn, MGMT_MSG_VERSION_PROTOBUF, fe_msg,
mgmtd__fe_message__get_packed_size(fe_msg),
(size_t(*)(void *, void *))mgmtd__fe_message__pack,
- MGMTD_DBG_FE_CLIENT_CHECK());
- mgmt_fe_client_sched_msg_write(client_ctx);
- return rv;
-}
-
-static void mgmt_fe_client_write(struct event *thread)
-{
- struct mgmt_fe_client_ctx *client_ctx;
- enum mgmt_msg_wsched rv;
-
- client_ctx = (struct mgmt_fe_client_ctx *)EVENT_ARG(thread);
- rv = mgmt_msg_write(&client_ctx->mstate, client_ctx->conn_fd,
- MGMTD_DBG_FE_CLIENT_CHECK());
- if (rv == MSW_SCHED_STREAM)
- mgmt_fe_client_register_event(client_ctx, MGMTD_FE_CONN_WRITE);
- else if (rv == MSW_DISCONNECT)
- mgmt_fe_server_disconnect(client_ctx, true);
- else if (rv == MSW_SCHED_WRITES_OFF) {
- mgmt_fe_client_writes_off(client_ctx);
- mgmt_fe_client_register_event(client_ctx,
- MGMTD_FE_CONN_WRITES_ON);
- } else
- assert(rv == MSW_SCHED_NONE);
-}
-
-static void mgmt_fe_client_resume_writes(struct event *thread)
-{
- struct mgmt_fe_client_ctx *client_ctx;
-
- client_ctx = (struct mgmt_fe_client_ctx *)EVENT_ARG(thread);
- assert(client_ctx && client_ctx->conn_fd != -1);
-
- mgmt_fe_client_writes_on(client_ctx);
+ short_circuit_ok);
}
-static int
-mgmt_fe_send_register_req(struct mgmt_fe_client_ctx *client_ctx)
+static int mgmt_fe_send_register_req(struct mgmt_fe_client *client)
{
Mgmtd__FeMessage fe_msg;
Mgmtd__FeRegisterReq rgstr_req;
mgmtd__fe_register_req__init(&rgstr_req);
- rgstr_req.client_name = client_ctx->client_params.name;
+ rgstr_req.client_name = client->name;
mgmtd__fe_message__init(&fe_msg);
fe_msg.message_case = MGMTD__FE_MESSAGE__MESSAGE_REGISTER_REQ;
@@ -215,25 +115,27 @@ mgmt_fe_send_register_req(struct mgmt_fe_client_ctx *client_ctx)
MGMTD_FE_CLIENT_DBG(
"Sending REGISTER_REQ message to MGMTD Frontend server");
- return mgmt_fe_client_send_msg(client_ctx, &fe_msg);
+ return mgmt_fe_client_send_msg(client, &fe_msg, true);
}
-static int
-mgmt_fe_send_session_req(struct mgmt_fe_client_ctx *client_ctx,
- struct mgmt_fe_client_session *session,
- bool create)
+static int mgmt_fe_send_session_req(struct mgmt_fe_client *client,
+ struct mgmt_fe_client_session *session,
+ bool create)
{
Mgmtd__FeMessage fe_msg;
Mgmtd__FeSessionReq sess_req;
+ bool scok;
mgmtd__fe_session_req__init(&sess_req);
sess_req.create = create;
if (create) {
sess_req.id_case = MGMTD__FE_SESSION_REQ__ID_CLIENT_CONN_ID;
sess_req.client_conn_id = session->client_id;
+ scok = true;
} else {
sess_req.id_case = MGMTD__FE_SESSION_REQ__ID_SESSION_ID;
sess_req.session_id = session->session_id;
+ scok = false;
}
mgmtd__fe_message__init(&fe_msg);
@@ -241,24 +143,22 @@ mgmt_fe_send_session_req(struct mgmt_fe_client_ctx *client_ctx,
fe_msg.session_req = &sess_req;
MGMTD_FE_CLIENT_DBG(
- "Sending SESSION_REQ message for %s session %llu to MGMTD Frontend server",
- create ? "creating" : "destroying",
- (unsigned long long)session->client_id);
+ "Sending SESSION_REQ %s message for client-id %" PRIu64,
+ create ? "create" : "destroy", session->client_id);
- return mgmt_fe_client_send_msg(client_ctx, &fe_msg);
+ return mgmt_fe_client_send_msg(client, &fe_msg, scok);
}
-static int
-mgmt_fe_send_lockds_req(struct mgmt_fe_client_ctx *client_ctx,
- struct mgmt_fe_client_session *session, bool lock,
- uint64_t req_id, Mgmtd__DatastoreId ds_id)
+int mgmt_fe_send_lockds_req(struct mgmt_fe_client *client, uint64_t session_id,
+ uint64_t req_id, Mgmtd__DatastoreId ds_id,
+ bool lock)
{
(void)req_id;
Mgmtd__FeMessage fe_msg;
Mgmtd__FeLockDsReq lockds_req;
mgmtd__fe_lock_ds_req__init(&lockds_req);
- lockds_req.session_id = session->session_id;
+ lockds_req.session_id = session_id;
lockds_req.req_id = req_id;
lockds_req.ds_id = ds_id;
lockds_req.lock = lock;
@@ -268,16 +168,13 @@ mgmt_fe_send_lockds_req(struct mgmt_fe_client_ctx *client_ctx,
fe_msg.lockds_req = &lockds_req;
MGMTD_FE_CLIENT_DBG(
- "Sending %sLOCK_REQ message for Ds:%d session %llu to MGMTD Frontend server",
- lock ? "" : "UN", ds_id,
- (unsigned long long)session->client_id);
+ "Sending %sLOCK_REQ message for Ds:%d session-id %" PRIu64,
+ lock ? "" : "UN", ds_id, session_id);
- return mgmt_fe_client_send_msg(client_ctx, &fe_msg);
+ return mgmt_fe_client_send_msg(client, &fe_msg, false);
}
-static int
-mgmt_fe_send_setcfg_req(struct mgmt_fe_client_ctx *client_ctx,
- struct mgmt_fe_client_session *session,
+int mgmt_fe_send_setcfg_req(struct mgmt_fe_client *client, uint64_t session_id,
uint64_t req_id, Mgmtd__DatastoreId ds_id,
Mgmtd__YangCfgDataReq **data_req, int num_data_reqs,
bool implicit_commit, Mgmtd__DatastoreId dst_ds_id)
@@ -287,7 +184,7 @@ mgmt_fe_send_setcfg_req(struct mgmt_fe_client_ctx *client_ctx,
Mgmtd__FeSetConfigReq setcfg_req;
mgmtd__fe_set_config_req__init(&setcfg_req);
- setcfg_req.session_id = session->session_id;
+ setcfg_req.session_id = session_id;
setcfg_req.ds_id = ds_id;
setcfg_req.req_id = req_id;
setcfg_req.data = data_req;
@@ -300,25 +197,25 @@ mgmt_fe_send_setcfg_req(struct mgmt_fe_client_ctx *client_ctx,
fe_msg.setcfg_req = &setcfg_req;
MGMTD_FE_CLIENT_DBG(
- "Sending SET_CONFIG_REQ message for Ds:%d session %llu (#xpaths:%d) to MGMTD Frontend server",
- ds_id, (unsigned long long)session->client_id, num_data_reqs);
+ "Sending SET_CONFIG_REQ message for Ds:%d session-id %" PRIu64
+ " (#xpaths:%d)",
+ ds_id, session_id, num_data_reqs);
- return mgmt_fe_client_send_msg(client_ctx, &fe_msg);
+ return mgmt_fe_client_send_msg(client, &fe_msg, false);
}
-static int mgmt_fe_send_commitcfg_req(struct mgmt_fe_client_ctx *client_ctx,
- struct mgmt_fe_client_session *session,
- uint64_t req_id,
- Mgmtd__DatastoreId src_ds_id,
- Mgmtd__DatastoreId dest_ds_id,
- bool validate_only, bool abort)
+int mgmt_fe_send_commitcfg_req(struct mgmt_fe_client *client,
+ uint64_t session_id, uint64_t req_id,
+ Mgmtd__DatastoreId src_ds_id,
+ Mgmtd__DatastoreId dest_ds_id,
+ bool validate_only, bool abort)
{
(void)req_id;
Mgmtd__FeMessage fe_msg;
Mgmtd__FeCommitConfigReq commitcfg_req;
mgmtd__fe_commit_config_req__init(&commitcfg_req);
- commitcfg_req.session_id = session->session_id;
+ commitcfg_req.session_id = session_id;
commitcfg_req.src_ds_id = src_ds_id;
commitcfg_req.dst_ds_id = dest_ds_id;
commitcfg_req.req_id = req_id;
@@ -330,17 +227,15 @@ static int mgmt_fe_send_commitcfg_req(struct mgmt_fe_client_ctx *client_ctx,
fe_msg.commcfg_req = &commitcfg_req;
MGMTD_FE_CLIENT_DBG(
- "Sending COMMIT_CONFIG_REQ message for Src-Ds:%d, Dst-Ds:%d session %llu to MGMTD Frontend server",
- src_ds_id, dest_ds_id, (unsigned long long)session->client_id);
+ "Sending COMMIT_CONFIG_REQ message for Src-Ds:%d, Dst-Ds:%d session-id %" PRIu64,
+ src_ds_id, dest_ds_id, session_id);
- return mgmt_fe_client_send_msg(client_ctx, &fe_msg);
+ return mgmt_fe_client_send_msg(client, &fe_msg, false);
}
-static int
-mgmt_fe_send_getcfg_req(struct mgmt_fe_client_ctx *client_ctx,
- struct mgmt_fe_client_session *session,
+int mgmt_fe_send_getcfg_req(struct mgmt_fe_client *client, uint64_t session_id,
uint64_t req_id, Mgmtd__DatastoreId ds_id,
- Mgmtd__YangGetDataReq * data_req[],
+ Mgmtd__YangGetDataReq *data_req[],
int num_data_reqs)
{
(void)req_id;
@@ -348,7 +243,7 @@ mgmt_fe_send_getcfg_req(struct mgmt_fe_client_ctx *client_ctx,
Mgmtd__FeGetConfigReq getcfg_req;
mgmtd__fe_get_config_req__init(&getcfg_req);
- getcfg_req.session_id = session->session_id;
+ getcfg_req.session_id = session_id;
getcfg_req.ds_id = ds_id;
getcfg_req.req_id = req_id;
getcfg_req.data = data_req;
@@ -359,17 +254,16 @@ mgmt_fe_send_getcfg_req(struct mgmt_fe_client_ctx *client_ctx,
fe_msg.getcfg_req = &getcfg_req;
MGMTD_FE_CLIENT_DBG(
- "Sending GET_CONFIG_REQ message for Ds:%d session %llu (#xpaths:%d) to MGMTD Frontend server",
- ds_id, (unsigned long long)session->client_id, num_data_reqs);
+ "Sending GET_CONFIG_REQ message for Ds:%d session-id %" PRIu64
+ " (#xpaths:%d)",
+ ds_id, session_id, num_data_reqs);
- return mgmt_fe_client_send_msg(client_ctx, &fe_msg);
+ return mgmt_fe_client_send_msg(client, &fe_msg, false);
}
-static int
-mgmt_fe_send_getdata_req(struct mgmt_fe_client_ctx *client_ctx,
- struct mgmt_fe_client_session *session,
+int mgmt_fe_send_getdata_req(struct mgmt_fe_client *client, uint64_t session_id,
uint64_t req_id, Mgmtd__DatastoreId ds_id,
- Mgmtd__YangGetDataReq * data_req[],
+ Mgmtd__YangGetDataReq *data_req[],
int num_data_reqs)
{
(void)req_id;
@@ -377,7 +271,7 @@ mgmt_fe_send_getdata_req(struct mgmt_fe_client_ctx *client_ctx,
Mgmtd__FeGetDataReq getdata_req;
mgmtd__fe_get_data_req__init(&getdata_req);
- getdata_req.session_id = session->session_id;
+ getdata_req.session_id = session_id;
getdata_req.ds_id = ds_id;
getdata_req.req_id = req_id;
getdata_req.data = data_req;
@@ -388,24 +282,25 @@ mgmt_fe_send_getdata_req(struct mgmt_fe_client_ctx *client_ctx,
fe_msg.getdata_req = &getdata_req;
MGMTD_FE_CLIENT_DBG(
- "Sending GET_CONFIG_REQ message for Ds:%d session %llu (#xpaths:%d) to MGMTD Frontend server",
- ds_id, (unsigned long long)session->client_id, num_data_reqs);
+ "Sending GET_CONFIG_REQ message for Ds:%d session-id %" PRIu64
+ " (#xpaths:%d)",
+ ds_id, session_id, num_data_reqs);
- return mgmt_fe_client_send_msg(client_ctx, &fe_msg);
+ return mgmt_fe_client_send_msg(client, &fe_msg, false);
}
-static int mgmt_fe_send_regnotify_req(
- struct mgmt_fe_client_ctx *client_ctx,
- struct mgmt_fe_client_session *session, uint64_t req_id,
- Mgmtd__DatastoreId ds_id, bool register_req,
- Mgmtd__YangDataXPath * data_req[], int num_data_reqs)
+int mgmt_fe_send_regnotify_req(struct mgmt_fe_client *client,
+ uint64_t session_id, uint64_t req_id,
+ Mgmtd__DatastoreId ds_id, bool register_req,
+ Mgmtd__YangDataXPath *data_req[],
+ int num_data_reqs)
{
(void)req_id;
Mgmtd__FeMessage fe_msg;
Mgmtd__FeRegisterNotifyReq regntfy_req;
mgmtd__fe_register_notify_req__init(&regntfy_req);
- regntfy_req.session_id = session->session_id;
+ regntfy_req.session_id = session_id;
regntfy_req.ds_id = ds_id;
regntfy_req.register_req = register_req;
regntfy_req.data_xpath = data_req;
@@ -415,12 +310,11 @@ static int mgmt_fe_send_regnotify_req(
fe_msg.message_case = MGMTD__FE_MESSAGE__MESSAGE_REGNOTIFY_REQ;
fe_msg.regnotify_req = &regntfy_req;
- return mgmt_fe_client_send_msg(client_ctx, &fe_msg);
+ return mgmt_fe_client_send_msg(client, &fe_msg, false);
}
-static int
-mgmt_fe_client_handle_msg(struct mgmt_fe_client_ctx *client_ctx,
- Mgmtd__FeMessage *fe_msg)
+static int mgmt_fe_client_handle_msg(struct mgmt_fe_client *client,
+ Mgmtd__FeMessage *fe_msg)
{
struct mgmt_fe_client_session *session = NULL;
@@ -430,119 +324,93 @@ mgmt_fe_client_handle_msg(struct mgmt_fe_client_ctx *client_ctx,
*/
switch ((int)fe_msg->message_case) {
case MGMTD__FE_MESSAGE__MESSAGE_SESSION_REPLY:
- if (fe_msg->session_reply->create
- && fe_msg->session_reply->has_client_conn_id) {
+ if (fe_msg->session_reply->create &&
+ fe_msg->session_reply->has_client_conn_id) {
MGMTD_FE_CLIENT_DBG(
- "Got Session Create Reply Msg for client-id %llu with session-id: %llu.",
- (unsigned long long)
- fe_msg->session_reply->client_conn_id,
- (unsigned long long)
- fe_msg->session_reply->session_id);
+ "Got SESSION_REPLY (create) for client-id %" PRIu64
+ " with session-id: %" PRIu64,
+ fe_msg->session_reply->client_conn_id,
+ fe_msg->session_reply->session_id);
session = mgmt_fe_find_session_by_client_id(
- client_ctx,
- fe_msg->session_reply->client_conn_id);
+ client, fe_msg->session_reply->client_conn_id);
if (session && fe_msg->session_reply->success) {
MGMTD_FE_CLIENT_DBG(
- "Session Create for client-id %llu successful.",
- (unsigned long long)
- fe_msg->session_reply
- ->client_conn_id);
+ "Session Created for client-id %" PRIu64,
+ fe_msg->session_reply->client_conn_id);
session->session_id =
fe_msg->session_reply->session_id;
} else {
MGMTD_FE_CLIENT_ERR(
- "Session Create for client-id %llu failed.",
- (unsigned long long)
- fe_msg->session_reply
- ->client_conn_id);
+ "Session Create failed for client-id %" PRIu64,
+ fe_msg->session_reply->client_conn_id);
}
} else if (!fe_msg->session_reply->create) {
MGMTD_FE_CLIENT_DBG(
- "Got Session Destroy Reply Msg for session-id %llu",
- (unsigned long long)
- fe_msg->session_reply->session_id);
+ "Got SESSION_REPLY (destroy) for session-id %" PRIu64,
+ fe_msg->session_reply->session_id);
session = mgmt_fe_find_session_by_session_id(
- client_ctx, fe_msg->session_req->session_id);
+ client, fe_msg->session_req->session_id);
}
- if (session && session->client_ctx
- && session->client_ctx->client_params
- .client_session_notify)
- (*session->client_ctx->client_params
- .client_session_notify)(
- (uintptr_t)client_ctx,
- client_ctx->client_params.user_data,
- session->client_id,
+ /* The session state may be deleted by the callback */
+ if (session && session->client &&
+ session->client->cbs.client_session_notify)
+ (*session->client->cbs.client_session_notify)(
+ client, client->user_data, session->client_id,
fe_msg->session_reply->create,
fe_msg->session_reply->success,
- (uintptr_t)session, session->user_ctx);
+ fe_msg->session_reply->session_id,
+ session->user_ctx);
break;
case MGMTD__FE_MESSAGE__MESSAGE_LOCKDS_REPLY:
- MGMTD_FE_CLIENT_DBG(
- "Got LockDs Reply Msg for session-id %llu",
- (unsigned long long)
- fe_msg->lockds_reply->session_id);
+ MGMTD_FE_CLIENT_DBG("Got LOCKDS_REPLY for session-id %" PRIu64,
+ fe_msg->lockds_reply->session_id);
session = mgmt_fe_find_session_by_session_id(
- client_ctx, fe_msg->lockds_reply->session_id);
-
- if (session && session->client_ctx
- && session->client_ctx->client_params
- .lock_ds_notify)
- (*session->client_ctx->client_params
- .lock_ds_notify)(
- (uintptr_t)client_ctx,
- client_ctx->client_params.user_data,
- session->client_id, (uintptr_t)session,
- session->user_ctx,
- fe_msg->lockds_reply->req_id,
+ client, fe_msg->lockds_reply->session_id);
+
+ if (session && session->client &&
+ session->client->cbs.lock_ds_notify)
+ (*session->client->cbs.lock_ds_notify)(
+ client, client->user_data, session->client_id,
+ fe_msg->lockds_reply->session_id,
+ session->user_ctx, fe_msg->lockds_reply->req_id,
fe_msg->lockds_reply->lock,
fe_msg->lockds_reply->success,
fe_msg->lockds_reply->ds_id,
fe_msg->lockds_reply->error_if_any);
break;
case MGMTD__FE_MESSAGE__MESSAGE_SETCFG_REPLY:
- MGMTD_FE_CLIENT_DBG(
- "Got Set Config Reply Msg for session-id %llu",
- (unsigned long long)
- fe_msg->setcfg_reply->session_id);
+ MGMTD_FE_CLIENT_DBG("Got SETCFG_REPLY for session-id %" PRIu64,
+ fe_msg->setcfg_reply->session_id);
session = mgmt_fe_find_session_by_session_id(
- client_ctx, fe_msg->setcfg_reply->session_id);
-
- if (session && session->client_ctx
- && session->client_ctx->client_params
- .set_config_notify)
- (*session->client_ctx->client_params
- .set_config_notify)(
- (uintptr_t)client_ctx,
- client_ctx->client_params.user_data,
- session->client_id, (uintptr_t)session,
- session->user_ctx,
- fe_msg->setcfg_reply->req_id,
+ client, fe_msg->setcfg_reply->session_id);
+
+ if (session && session->client &&
+ session->client->cbs.set_config_notify)
+ (*session->client->cbs.set_config_notify)(
+ client, client->user_data, session->client_id,
+ fe_msg->setcfg_reply->session_id,
+ session->user_ctx, fe_msg->setcfg_reply->req_id,
fe_msg->setcfg_reply->success,
fe_msg->setcfg_reply->ds_id,
fe_msg->setcfg_reply->error_if_any);
break;
case MGMTD__FE_MESSAGE__MESSAGE_COMMCFG_REPLY:
- MGMTD_FE_CLIENT_DBG(
- "Got Commit Config Reply Msg for session-id %llu",
- (unsigned long long)
- fe_msg->commcfg_reply->session_id);
+ MGMTD_FE_CLIENT_DBG("Got COMMCFG_REPLY for session-id %" PRIu64,
+ fe_msg->commcfg_reply->session_id);
session = mgmt_fe_find_session_by_session_id(
- client_ctx, fe_msg->commcfg_reply->session_id);
-
- if (session && session->client_ctx
- && session->client_ctx->client_params
- .commit_config_notify)
- (*session->client_ctx->client_params
- .commit_config_notify)(
- (uintptr_t)client_ctx,
- client_ctx->client_params.user_data,
- session->client_id, (uintptr_t)session,
+ client, fe_msg->commcfg_reply->session_id);
+
+ if (session && session->client &&
+ session->client->cbs.commit_config_notify)
+ (*session->client->cbs.commit_config_notify)(
+ client, client->user_data, session->client_id,
+ fe_msg->commcfg_reply->session_id,
session->user_ctx,
fe_msg->commcfg_reply->req_id,
fe_msg->commcfg_reply->success,
@@ -552,24 +420,18 @@ mgmt_fe_client_handle_msg(struct mgmt_fe_client_ctx *client_ctx,
fe_msg->commcfg_reply->error_if_any);
break;
case MGMTD__FE_MESSAGE__MESSAGE_GETCFG_REPLY:
- MGMTD_FE_CLIENT_DBG(
- "Got Get Config Reply Msg for session-id %llu",
- (unsigned long long)
- fe_msg->getcfg_reply->session_id);
+ MGMTD_FE_CLIENT_DBG("Got GETCFG_REPLY for session-id %" PRIu64,
+ fe_msg->getcfg_reply->session_id);
session = mgmt_fe_find_session_by_session_id(
- client_ctx, fe_msg->getcfg_reply->session_id);
-
- if (session && session->client_ctx
- && session->client_ctx->client_params
- .get_data_notify)
- (*session->client_ctx->client_params
- .get_data_notify)(
- (uintptr_t)client_ctx,
- client_ctx->client_params.user_data,
- session->client_id, (uintptr_t)session,
- session->user_ctx,
- fe_msg->getcfg_reply->req_id,
+ client, fe_msg->getcfg_reply->session_id);
+
+ if (session && session->client &&
+ session->client->cbs.get_data_notify)
+ (*session->client->cbs.get_data_notify)(
+ client, client->user_data, session->client_id,
+ fe_msg->getcfg_reply->session_id,
+ session->user_ctx, fe_msg->getcfg_reply->req_id,
fe_msg->getcfg_reply->success,
fe_msg->getcfg_reply->ds_id,
fe_msg->getcfg_reply->data
@@ -579,28 +441,22 @@ mgmt_fe_client_handle_msg(struct mgmt_fe_client_ctx *client_ctx,
? fe_msg->getcfg_reply->data->n_data
: 0,
fe_msg->getcfg_reply->data
- ? fe_msg->getcfg_reply->data
- ->next_indx
+ ? fe_msg->getcfg_reply->data->next_indx
: 0,
fe_msg->getcfg_reply->error_if_any);
break;
case MGMTD__FE_MESSAGE__MESSAGE_GETDATA_REPLY:
- MGMTD_FE_CLIENT_DBG(
- "Got Get Data Reply Msg for session-id %llu",
- (unsigned long long)
- fe_msg->getdata_reply->session_id);
+ MGMTD_FE_CLIENT_DBG("Got GETDATA_REPLY for session-id %" PRIu64,
+ fe_msg->getdata_reply->session_id);
session = mgmt_fe_find_session_by_session_id(
- client_ctx, fe_msg->getdata_reply->session_id);
-
- if (session && session->client_ctx
- && session->client_ctx->client_params
- .get_data_notify)
- (*session->client_ctx->client_params
- .get_data_notify)(
- (uintptr_t)client_ctx,
- client_ctx->client_params.user_data,
- session->client_id, (uintptr_t)session,
+ client, fe_msg->getdata_reply->session_id);
+
+ if (session && session->client &&
+ session->client->cbs.get_data_notify)
+ (*session->client->cbs.get_data_notify)(
+ client, client->user_data, session->client_id,
+ fe_msg->getdata_reply->session_id,
session->user_ctx,
fe_msg->getdata_reply->req_id,
fe_msg->getdata_reply->success,
@@ -609,12 +465,10 @@ mgmt_fe_client_handle_msg(struct mgmt_fe_client_ctx *client_ctx,
? fe_msg->getdata_reply->data->data
: NULL,
fe_msg->getdata_reply->data
- ? fe_msg->getdata_reply->data
- ->n_data
+ ? fe_msg->getdata_reply->data->n_data
: 0,
fe_msg->getdata_reply->data
- ? fe_msg->getdata_reply->data
- ->next_indx
+ ? fe_msg->getdata_reply->data->next_indx
: 0,
fe_msg->getdata_reply->error_if_any);
break;
@@ -649,12 +503,16 @@ mgmt_fe_client_handle_msg(struct mgmt_fe_client_ctx *client_ctx,
return 0;
}
-static void mgmt_fe_client_process_msg(void *user_ctx, uint8_t *data,
- size_t len)
+static void mgmt_fe_client_process_msg(uint8_t version, uint8_t *data,
+ size_t len, struct msg_conn *conn)
{
- struct mgmt_fe_client_ctx *client_ctx = user_ctx;
+ struct mgmt_fe_client *client;
+ struct msg_client *msg_client;
Mgmtd__FeMessage *fe_msg;
+ msg_client = container_of(conn, struct msg_client, conn);
+ client = container_of(msg_client, struct mgmt_fe_client, client);
+
fe_msg = mgmtd__fe_message__unpack(NULL, len, data);
if (!fe_msg) {
MGMTD_FE_CLIENT_DBG("Failed to decode %zu bytes from server.",
@@ -664,115 +522,65 @@ static void mgmt_fe_client_process_msg(void *user_ctx, uint8_t *data,
MGMTD_FE_CLIENT_DBG(
"Decoded %zu bytes of message(msg: %u/%u) from server", len,
fe_msg->message_case, fe_msg->message_case);
- (void)mgmt_fe_client_handle_msg(client_ctx, fe_msg);
+ (void)mgmt_fe_client_handle_msg(client, fe_msg);
mgmtd__fe_message__free_unpacked(fe_msg, NULL);
}
-static void mgmt_fe_client_proc_msgbufs(struct event *thread)
+static int _notify_connect_disconnect(struct msg_client *msg_client,
+ bool connected)
{
- struct mgmt_fe_client_ctx *client_ctx;
-
- client_ctx = (struct mgmt_fe_client_ctx *)EVENT_ARG(thread);
- if (mgmt_msg_procbufs(&client_ctx->mstate, mgmt_fe_client_process_msg,
- client_ctx, MGMTD_DBG_FE_CLIENT_CHECK()))
- mgmt_fe_client_register_event(client_ctx, MGMTD_FE_PROC_MSG);
-}
+ struct mgmt_fe_client *client =
+ container_of(msg_client, struct mgmt_fe_client, client);
+ struct mgmt_fe_client_session *session;
+ int ret;
-static void mgmt_fe_client_read(struct event *thread)
-{
- struct mgmt_fe_client_ctx *client_ctx;
- enum mgmt_msg_rsched rv;
+ /* Send REGISTER_REQ message */
+ if (connected) {
+ if ((ret = mgmt_fe_send_register_req(client)) != 0)
+ return ret;
+ }
- client_ctx = (struct mgmt_fe_client_ctx *)EVENT_ARG(thread);
+ /* Walk list of sessions for this FE client deleting them */
+ if (!connected && mgmt_sessions_count(&client->sessions)) {
+ MGMTD_FE_CLIENT_DBG("Cleaning up existing sessions");
- rv = mgmt_msg_read(&client_ctx->mstate, client_ctx->conn_fd,
- MGMTD_DBG_FE_CLIENT_CHECK());
- if (rv == MSR_DISCONNECT) {
- mgmt_fe_server_disconnect(client_ctx, true);
- return;
- }
- if (rv == MSR_SCHED_BOTH)
- mgmt_fe_client_register_event(client_ctx, MGMTD_FE_PROC_MSG);
- mgmt_fe_client_register_event(client_ctx, MGMTD_FE_CONN_READ);
-}
+ FOREACH_SESSION_IN_LIST (client, session) {
+ assert(session->client);
-static void mgmt_fe_server_connect(struct mgmt_fe_client_ctx *client_ctx)
-{
- const char *dbgtag = MGMTD_DBG_FE_CLIENT_CHECK() ? "FE-client" : NULL;
+ /* unlink from list first this avoids double free */
+ mgmt_sessions_del(&client->sessions, session);
- assert(client_ctx->conn_fd == -1);
- client_ctx->conn_fd = mgmt_msg_connect(
- MGMTD_FE_SERVER_PATH, MGMTD_SOCKET_FE_SEND_BUF_SIZE,
- MGMTD_SOCKET_FE_RECV_BUF_SIZE, dbgtag);
+ /* notify FE client the session is being deleted */
+ if (session->client->cbs.client_session_notify) {
+ (*session->client->cbs.client_session_notify)(
+ client, client->user_data,
+ session->client_id, false, true,
+ session->session_id, session->user_ctx);
+ }
- /* Send REGISTER_REQ message */
- if (client_ctx->conn_fd == -1 ||
- mgmt_fe_send_register_req(client_ctx) != 0) {
- mgmt_fe_server_disconnect(client_ctx, true);
- return;
+ XFREE(MTYPE_MGMTD_FE_SESSION, session);
+ }
}
- /* Start reading from the socket */
- mgmt_fe_client_register_event(client_ctx, MGMTD_FE_CONN_READ);
-
- /* Notify client through registered callback (if any) */
- if (client_ctx->client_params.client_connect_notify)
- (void)(*client_ctx->client_params.client_connect_notify)(
- (uintptr_t)client_ctx,
- client_ctx->client_params.user_data, true);
+ /* Notify FE client through registered callback (if any). */
+ if (client->cbs.client_connect_notify)
+ (void)(*client->cbs.client_connect_notify)(
+ client, client->user_data, connected);
+ return 0;
}
-
-static void mgmt_fe_client_conn_timeout(struct event *thread)
+static int mgmt_fe_client_notify_connect(struct msg_client *client)
{
- mgmt_fe_server_connect(EVENT_ARG(thread));
+ return _notify_connect_disconnect(client, true);
}
-static void
-mgmt_fe_client_register_event(struct mgmt_fe_client_ctx *client_ctx,
- enum mgmt_fe_event event)
+static int mgmt_fe_client_notify_disconnect(struct msg_conn *conn)
{
- struct timeval tv = {0};
+ struct msg_client *client = container_of(conn, struct msg_client, conn);
- switch (event) {
- case MGMTD_FE_CONN_READ:
- event_add_read(client_ctx->tm, mgmt_fe_client_read,
- client_ctx, client_ctx->conn_fd,
- &client_ctx->conn_read_ev);
- break;
- case MGMTD_FE_CONN_WRITE:
- event_add_write(client_ctx->tm, mgmt_fe_client_write,
- client_ctx, client_ctx->conn_fd,
- &client_ctx->conn_write_ev);
- break;
- case MGMTD_FE_PROC_MSG:
- tv.tv_usec = MGMTD_FE_MSG_PROC_DELAY_USEC;
- event_add_timer_tv(client_ctx->tm,
- mgmt_fe_client_proc_msgbufs, client_ctx,
- &tv, &client_ctx->msg_proc_ev);
- break;
- case MGMTD_FE_CONN_WRITES_ON:
- event_add_timer_msec(
- client_ctx->tm, mgmt_fe_client_resume_writes,
- client_ctx, MGMTD_FE_MSG_WRITE_DELAY_MSEC,
- &client_ctx->conn_writes_on);
- break;
- case MGMTD_FE_SERVER:
- assert(!"mgmt_fe_client_ctx_post_event called incorrectly");
- break;
- }
+ return _notify_connect_disconnect(client, false);
}
-static void mgmt_fe_client_schedule_conn_retry(
- struct mgmt_fe_client_ctx *client_ctx, unsigned long intvl_secs)
-{
- MGMTD_FE_CLIENT_DBG(
- "Scheduling MGMTD Frontend server connection retry after %lu seconds",
- intvl_secs);
- event_add_timer(client_ctx->tm, mgmt_fe_client_conn_timeout,
- (void *)client_ctx, intvl_secs,
- &client_ctx->conn_retry_tmr);
-}
DEFPY(debug_mgmt_client_fe, debug_mgmt_client_fe_cmd,
"[no] debug mgmt client frontend",
@@ -819,31 +627,31 @@ static struct cmd_node mgmt_dbg_node = {
/*
* Initialize library and try connecting with MGMTD.
*/
-uintptr_t mgmt_fe_client_lib_init(struct mgmt_fe_client_params *params,
- struct event_loop *master_thread)
+struct mgmt_fe_client *mgmt_fe_client_create(const char *client_name,
+ struct mgmt_fe_client_cbs *cbs,
+ uintptr_t user_data,
+ struct event_loop *event_loop)
{
- assert(master_thread && params && strlen(params->name)
- && !mgmt_fe_client_ctx.tm);
-
- mgmt_fe_client_ctx.tm = master_thread;
- memcpy(&mgmt_fe_client_ctx.client_params, params,
- sizeof(mgmt_fe_client_ctx.client_params));
- if (!mgmt_fe_client_ctx.client_params.conn_retry_intvl_sec)
- mgmt_fe_client_ctx.client_params.conn_retry_intvl_sec =
- MGMTD_FE_DEFAULT_CONN_RETRY_INTVL_SEC;
+ struct mgmt_fe_client *client =
+ XCALLOC(MTYPE_MGMTD_FE_CLIENT, sizeof(*client));
- mgmt_msg_init(&mgmt_fe_client_ctx.mstate, MGMTD_FE_MAX_NUM_MSG_PROC,
- MGMTD_FE_MAX_NUM_MSG_WRITE, MGMTD_FE_MSG_MAX_LEN,
- "FE-client");
+ client->name = XSTRDUP(MTYPE_MGMTD_FE_CLIENT_NAME, client_name);
+ client->user_data = user_data;
+ if (cbs)
+ client->cbs = *cbs;
- mgmt_sessions_init(&mgmt_fe_client_ctx.client_sessions);
+ mgmt_sessions_init(&client->sessions);
- /* Start trying to connect to MGMTD frontend server immediately */
- mgmt_fe_client_schedule_conn_retry(&mgmt_fe_client_ctx, 1);
+ msg_client_init(&client->client, event_loop, MGMTD_FE_SERVER_PATH,
+ mgmt_fe_client_notify_connect,
+ mgmt_fe_client_notify_disconnect,
+ mgmt_fe_client_process_msg, MGMTD_FE_MAX_NUM_MSG_PROC,
+ MGMTD_FE_MAX_NUM_MSG_WRITE, MGMTD_FE_MSG_MAX_LEN, true,
+ "FE-client", MGMTD_DBG_FE_CLIENT_CHECK());
- MGMTD_FE_CLIENT_DBG("Initialized client '%s'", params->name);
+ MGMTD_FE_CLIENT_DBG("Initialized client '%s'", client_name);
- return (uintptr_t)&mgmt_fe_client_ctx;
+ return client;
}
void mgmt_fe_client_lib_vty_init(void)
@@ -854,33 +662,34 @@ void mgmt_fe_client_lib_vty_init(void)
install_element(CONFIG_NODE, &debug_mgmt_client_fe_cmd);
}
+uint mgmt_fe_client_session_count(struct mgmt_fe_client *client)
+{
+ return mgmt_sessions_count(&client->sessions);
+}
+
/*
* Create a new Session for a Frontend Client connection.
*/
-enum mgmt_result mgmt_fe_create_client_session(uintptr_t lib_hndl,
- uint64_t client_id,
- uintptr_t user_ctx)
+enum mgmt_result mgmt_fe_create_client_session(struct mgmt_fe_client *client,
+ uint64_t client_id,
+ uintptr_t user_ctx)
{
- struct mgmt_fe_client_ctx *client_ctx;
struct mgmt_fe_client_session *session;
- client_ctx = (struct mgmt_fe_client_ctx *)lib_hndl;
- if (!client_ctx)
- return MGMTD_INVALID_PARAM;
-
session = XCALLOC(MTYPE_MGMTD_FE_SESSION,
- sizeof(struct mgmt_fe_client_session));
+ sizeof(struct mgmt_fe_client_session));
assert(session);
session->user_ctx = user_ctx;
session->client_id = client_id;
- session->client_ctx = client_ctx;
+ session->client = client;
session->session_id = 0;
- if (mgmt_fe_send_session_req(client_ctx, session, true) != 0) {
+ mgmt_sessions_add_tail(&client->sessions, session);
+
+ if (mgmt_fe_send_session_req(client, session, true) != 0) {
XFREE(MTYPE_MGMTD_FE_SESSION, session);
return MGMTD_INTERNAL_ERROR;
}
- mgmt_sessions_add_tail(&client_ctx->client_sessions, session);
return MGMTD_SUCCESS;
}
@@ -888,233 +697,42 @@ enum mgmt_result mgmt_fe_create_client_session(uintptr_t lib_hndl,
/*
* Delete an existing Session for a Frontend Client connection.
*/
-enum mgmt_result mgmt_fe_destroy_client_session(uintptr_t lib_hndl,
+enum mgmt_result mgmt_fe_destroy_client_session(struct mgmt_fe_client *client,
uint64_t client_id)
{
- struct mgmt_fe_client_ctx *client_ctx;
struct mgmt_fe_client_session *session;
- client_ctx = (struct mgmt_fe_client_ctx *)lib_hndl;
- if (!client_ctx)
- return MGMTD_INVALID_PARAM;
-
- session = mgmt_fe_find_session_by_client_id(client_ctx, client_id);
- if (!session || session->client_ctx != client_ctx)
+ session = mgmt_fe_find_session_by_client_id(client, client_id);
+ if (!session || session->client != client)
return MGMTD_INVALID_PARAM;
if (session->session_id &&
- mgmt_fe_send_session_req(client_ctx, session, false) != 0)
+ mgmt_fe_send_session_req(client, session, false) != 0)
MGMTD_FE_CLIENT_ERR(
- "Failed to send session destroy request for the session-id %lu",
- (unsigned long)session->session_id);
+ "Failed to send session destroy request for the session-id %" PRIu64,
+ session->session_id);
- mgmt_sessions_del(&client_ctx->client_sessions, session);
+ mgmt_sessions_del(&client->sessions, session);
XFREE(MTYPE_MGMTD_FE_SESSION, session);
return MGMTD_SUCCESS;
}
-static void mgmt_fe_destroy_client_sessions(uintptr_t lib_hndl)
-{
- struct mgmt_fe_client_ctx *client_ctx;
- struct mgmt_fe_client_session *session;
-
- client_ctx = (struct mgmt_fe_client_ctx *)lib_hndl;
- if (!client_ctx)
- return;
-
- FOREACH_SESSION_IN_LIST (client_ctx, session)
- mgmt_fe_destroy_client_session(lib_hndl, session->client_id);
-}
-
-/*
- * Send UN/LOCK_DS_REQ to MGMTD for a specific Datastore DS.
- */
-enum mgmt_result mgmt_fe_lock_ds(uintptr_t lib_hndl, uintptr_t session_id,
- uint64_t req_id, Mgmtd__DatastoreId ds_id,
- bool lock_ds)
-{
- struct mgmt_fe_client_ctx *client_ctx;
- struct mgmt_fe_client_session *session;
-
- client_ctx = (struct mgmt_fe_client_ctx *)lib_hndl;
- if (!client_ctx)
- return MGMTD_INVALID_PARAM;
-
- session = (struct mgmt_fe_client_session *)session_id;
- if (!session || session->client_ctx != client_ctx)
- return MGMTD_INVALID_PARAM;
-
- if (mgmt_fe_send_lockds_req(client_ctx, session, lock_ds, req_id,
- ds_id)
- != 0)
- return MGMTD_INTERNAL_ERROR;
-
- return MGMTD_SUCCESS;
-}
-
-/*
- * Send SET_CONFIG_REQ to MGMTD for one or more config data(s).
- */
-enum mgmt_result
-mgmt_fe_set_config_data(uintptr_t lib_hndl, uintptr_t session_id,
- uint64_t req_id, Mgmtd__DatastoreId ds_id,
- Mgmtd__YangCfgDataReq **config_req, int num_reqs,
- bool implicit_commit, Mgmtd__DatastoreId dst_ds_id)
-{
- struct mgmt_fe_client_ctx *client_ctx;
- struct mgmt_fe_client_session *session;
-
- client_ctx = (struct mgmt_fe_client_ctx *)lib_hndl;
- if (!client_ctx)
- return MGMTD_INVALID_PARAM;
-
- session = (struct mgmt_fe_client_session *)session_id;
- if (!session || session->client_ctx != client_ctx)
- return MGMTD_INVALID_PARAM;
-
- if (mgmt_fe_send_setcfg_req(client_ctx, session, req_id, ds_id,
- config_req, num_reqs, implicit_commit,
- dst_ds_id)
- != 0)
- return MGMTD_INTERNAL_ERROR;
-
- return MGMTD_SUCCESS;
-}
-
-/*
- * Send SET_CONFIG_REQ to MGMTD for one or more config data(s).
- */
-enum mgmt_result mgmt_fe_commit_config_data(uintptr_t lib_hndl,
- uintptr_t session_id,
- uint64_t req_id,
- Mgmtd__DatastoreId src_ds_id,
- Mgmtd__DatastoreId dst_ds_id,
- bool validate_only, bool abort)
-{
- struct mgmt_fe_client_ctx *client_ctx;
- struct mgmt_fe_client_session *session;
-
- client_ctx = (struct mgmt_fe_client_ctx *)lib_hndl;
- if (!client_ctx)
- return MGMTD_INVALID_PARAM;
-
- session = (struct mgmt_fe_client_session *)session_id;
- if (!session || session->client_ctx != client_ctx)
- return MGMTD_INVALID_PARAM;
-
- if (mgmt_fe_send_commitcfg_req(client_ctx, session, req_id, src_ds_id,
- dst_ds_id, validate_only, abort)
- != 0)
- return MGMTD_INTERNAL_ERROR;
-
- return MGMTD_SUCCESS;
-}
-
-/*
- * Send GET_CONFIG_REQ to MGMTD for one or more config data item(s).
- */
-enum mgmt_result
-mgmt_fe_get_config_data(uintptr_t lib_hndl, uintptr_t session_id,
- uint64_t req_id, Mgmtd__DatastoreId ds_id,
- Mgmtd__YangGetDataReq * data_req[], int num_reqs)
-{
- struct mgmt_fe_client_ctx *client_ctx;
- struct mgmt_fe_client_session *session;
-
- client_ctx = (struct mgmt_fe_client_ctx *)lib_hndl;
- if (!client_ctx)
- return MGMTD_INVALID_PARAM;
-
- session = (struct mgmt_fe_client_session *)session_id;
- if (!session || session->client_ctx != client_ctx)
- return MGMTD_INVALID_PARAM;
-
- if (mgmt_fe_send_getcfg_req(client_ctx, session, req_id, ds_id,
- data_req, num_reqs)
- != 0)
- return MGMTD_INTERNAL_ERROR;
-
- return MGMTD_SUCCESS;
-}
-
-/*
- * Send GET_DATA_REQ to MGMTD for one or more config data item(s).
- */
-enum mgmt_result mgmt_fe_get_data(uintptr_t lib_hndl, uintptr_t session_id,
- uint64_t req_id, Mgmtd__DatastoreId ds_id,
- Mgmtd__YangGetDataReq * data_req[],
- int num_reqs)
-{
- struct mgmt_fe_client_ctx *client_ctx;
- struct mgmt_fe_client_session *session;
-
- client_ctx = (struct mgmt_fe_client_ctx *)lib_hndl;
- if (!client_ctx)
- return MGMTD_INVALID_PARAM;
-
- session = (struct mgmt_fe_client_session *)session_id;
- if (!session || session->client_ctx != client_ctx)
- return MGMTD_INVALID_PARAM;
-
- if (mgmt_fe_send_getdata_req(client_ctx, session, req_id, ds_id,
- data_req, num_reqs)
- != 0)
- return MGMTD_INTERNAL_ERROR;
-
- return MGMTD_SUCCESS;
-}
-
-/*
- * Send NOTIFY_REGISTER_REQ to MGMTD daemon.
- */
-enum mgmt_result
-mgmt_fe_register_yang_notify(uintptr_t lib_hndl, uintptr_t session_id,
- uint64_t req_id, Mgmtd__DatastoreId ds_id,
- bool register_req,
- Mgmtd__YangDataXPath * data_req[],
- int num_reqs)
-{
- struct mgmt_fe_client_ctx *client_ctx;
- struct mgmt_fe_client_session *session;
-
- client_ctx = (struct mgmt_fe_client_ctx *)lib_hndl;
- if (!client_ctx)
- return MGMTD_INVALID_PARAM;
-
- session = (struct mgmt_fe_client_session *)session_id;
- if (!session || session->client_ctx != client_ctx)
- return MGMTD_INVALID_PARAM;
-
- if (mgmt_fe_send_regnotify_req(client_ctx, session, req_id, ds_id,
- register_req, data_req, num_reqs)
- != 0)
- return MGMTD_INTERNAL_ERROR;
-
- return MGMTD_SUCCESS;
-}
-
/*
* Destroy library and cleanup everything.
*/
-void mgmt_fe_client_lib_destroy(uintptr_t lib_hndl)
+void mgmt_fe_client_destroy(struct mgmt_fe_client *client)
{
- struct mgmt_fe_client_ctx *client_ctx;
-
- client_ctx = (struct mgmt_fe_client_ctx *)lib_hndl;
- assert(client_ctx);
+ struct mgmt_fe_client_session *session;
MGMTD_FE_CLIENT_DBG("Destroying MGMTD Frontend Client '%s'",
- client_ctx->client_params.name);
+ client->name);
- mgmt_fe_server_disconnect(client_ctx, false);
+ FOREACH_SESSION_IN_LIST (client, session)
+ mgmt_fe_destroy_client_session(client, session->client_id);
- mgmt_fe_destroy_client_sessions(lib_hndl);
+ msg_client_cleanup(&client->client);
- EVENT_OFF(client_ctx->conn_retry_tmr);
- EVENT_OFF(client_ctx->conn_read_ev);
- EVENT_OFF(client_ctx->conn_write_ev);
- EVENT_OFF(client_ctx->conn_writes_on);
- EVENT_OFF(client_ctx->msg_proc_ev);
- mgmt_msg_destroy(&client_ctx->mstate);
+ XFREE(MTYPE_MGMTD_FE_CLIENT_NAME, client->name);
+ XFREE(MTYPE_MGMTD_FE_CLIENT, client);
}
diff --git a/lib/mgmt_fe_client.h b/lib/mgmt_fe_client.h
index 94867787d9..edf861746c 100644
--- a/lib/mgmt_fe_client.h
+++ b/lib/mgmt_fe_client.h
@@ -56,6 +56,9 @@ extern "C" {
#define MGMTD_DS_OPERATIONAL MGMTD__DATASTORE_ID__OPERATIONAL_DS
#define MGMTD_DS_MAX_ID MGMTD_DS_OPERATIONAL + 1
+struct mgmt_fe_client;
+
+
/*
* All the client specific information this library needs to
* initialize itself, setup connection with MGMTD FrontEnd interface
@@ -66,54 +69,65 @@ extern "C" {
* to initialize the library (See mgmt_fe_client_lib_init for
* more details).
*/
-struct mgmt_fe_client_params {
- char name[MGMTD_CLIENT_NAME_MAX_LEN];
- uintptr_t user_data;
- unsigned long conn_retry_intvl_sec;
-
- void (*client_connect_notify)(uintptr_t lib_hndl,
- uintptr_t user_data,
- bool connected);
-
- void (*client_session_notify)(uintptr_t lib_hndl,
- uintptr_t user_data,
- uint64_t client_id,
+struct mgmt_fe_client_cbs {
+ void (*client_connect_notify)(struct mgmt_fe_client *client,
+ uintptr_t user_data, bool connected);
+
+ void (*client_session_notify)(struct mgmt_fe_client *client,
+ uintptr_t user_data, uint64_t client_id,
bool create, bool success,
uintptr_t session_id,
- uintptr_t user_session_ctx);
+ uintptr_t user_session_client);
- void (*lock_ds_notify)(uintptr_t lib_hndl, uintptr_t user_data,
- uint64_t client_id, uintptr_t session_id,
- uintptr_t user_session_ctx, uint64_t req_id,
+ void (*lock_ds_notify)(struct mgmt_fe_client *client,
+ uintptr_t user_data, uint64_t client_id,
+ uintptr_t session_id,
+ uintptr_t user_session_client, uint64_t req_id,
bool lock_ds, bool success,
Mgmtd__DatastoreId ds_id, char *errmsg_if_any);
- void (*set_config_notify)(uintptr_t lib_hndl, uintptr_t user_data,
- uint64_t client_id, uintptr_t session_id,
- uintptr_t user_session_ctx, uint64_t req_id,
- bool success, Mgmtd__DatastoreId ds_id,
+ void (*set_config_notify)(struct mgmt_fe_client *client,
+ uintptr_t user_data, uint64_t client_id,
+ uintptr_t session_id,
+ uintptr_t user_session_client,
+ uint64_t req_id, bool success,
+ Mgmtd__DatastoreId ds_id,
char *errmsg_if_any);
- void (*commit_config_notify)(
- uintptr_t lib_hndl, uintptr_t user_data, uint64_t client_id,
- uintptr_t session_id, uintptr_t user_session_ctx,
- uint64_t req_id, bool success, Mgmtd__DatastoreId src_ds_id,
- Mgmtd__DatastoreId dst_ds_id, bool validate_only,
- char *errmsg_if_any);
-
- enum mgmt_result (*get_data_notify)(
- uintptr_t lib_hndl, uintptr_t user_data, uint64_t client_id,
- uintptr_t session_id, uintptr_t user_session_ctx,
- uint64_t req_id, bool success, Mgmtd__DatastoreId ds_id,
- Mgmtd__YangData **yang_data, size_t num_data, int next_key,
- char *errmsg_if_any);
-
- enum mgmt_result (*data_notify)(
- uint64_t client_id, uint64_t session_id, uintptr_t user_data,
- uint64_t req_id, Mgmtd__DatastoreId ds_id,
- Mgmtd__YangData **yang_data, size_t num_data);
+ void (*commit_config_notify)(struct mgmt_fe_client *client,
+ uintptr_t user_data, uint64_t client_id,
+ uintptr_t session_id,
+ uintptr_t user_session_client,
+ uint64_t req_id, bool success,
+ Mgmtd__DatastoreId src_ds_id,
+ Mgmtd__DatastoreId dst_ds_id,
+ bool validate_only, char *errmsg_if_any);
+
+ int (*get_data_notify)(struct mgmt_fe_client *client,
+ uintptr_t user_data, uint64_t client_id,
+ uintptr_t session_id,
+ uintptr_t user_session_client, uint64_t req_id,
+ bool success, Mgmtd__DatastoreId ds_id,
+ Mgmtd__YangData **yang_data, size_t num_data,
+ int next_key, char *errmsg_if_any);
+
+ int (*data_notify)(uint64_t client_id, uint64_t session_id,
+ uintptr_t user_data, uint64_t req_id,
+ Mgmtd__DatastoreId ds_id,
+ Mgmtd__YangData **yang_data, size_t num_data);
};
+extern struct debug mgmt_dbg_fe_client;
+
+#define MGMTD_FE_CLIENT_DBG(fmt, ...) \
+ DEBUGD(&mgmt_dbg_fe_client, "FE-CLIENT: %s:" fmt, __func__, \
+ ##__VA_ARGS__)
+#define MGMTD_FE_CLIENT_ERR(fmt, ...) \
+ zlog_err("FE-CLIENT: %s: ERROR: " fmt, __func__, ##__VA_ARGS__)
+#define MGMTD_DBG_FE_CLIENT_CHECK() \
+ DEBUG_MODE_CHECK(&mgmt_dbg_fe_client, DEBUG_MODE_ALL)
+
+
/***************************************************************
* API prototypes
***************************************************************/
@@ -128,17 +142,18 @@ struct mgmt_fe_client_params {
* Thread master.
*
* Returns:
- * Frontend client lib handler (nothing but address of mgmt_fe_client_ctx)
+ * Frontend client lib handler (nothing but address of mgmt_fe_client)
*/
-extern uintptr_t mgmt_fe_client_lib_init(struct mgmt_fe_client_params *params,
- struct event_loop *master_thread);
+extern struct mgmt_fe_client *
+mgmt_fe_client_create(const char *client_name, struct mgmt_fe_client_cbs *cbs,
+ uintptr_t user_data, struct event_loop *event_loop);
/*
* Initialize library vty (adds debug support).
*
- * This call should be added to your component when enabling other vty code to
- * enable mgmtd client debugs. When adding, one needs to also add a their
- * component in `xref2vtysh.py` as well.
+ * This call should be added to your component when enabling other vty
+ * code to enable mgmtd client debugs. When adding, one needs to also
+ * add a their component in `xref2vtysh.py` as well.
*/
extern void mgmt_fe_client_lib_vty_init(void);
@@ -156,15 +171,15 @@ extern void mgmt_debug_fe_client_show_debug(struct vty *vty);
* client_id
* Unique identifier of client.
*
- * user_ctx
+ * user_client
* Client context.
*
* Returns:
* MGMTD_SUCCESS on success, MGMTD_* otherwise.
*/
-extern enum mgmt_result mgmt_fe_create_client_session(uintptr_t lib_hndl,
- uint64_t client_id,
- uintptr_t user_ctx);
+extern enum mgmt_result
+mgmt_fe_create_client_session(struct mgmt_fe_client *client, uint64_t client_id,
+ uintptr_t user_client);
/*
* Delete an existing Session for a Frontend Client connection.
@@ -176,10 +191,11 @@ extern enum mgmt_result mgmt_fe_create_client_session(uintptr_t lib_hndl,
* Unique identifier of client.
*
* Returns:
- * MGMTD_SUCCESS on success, MGMTD_* otherwise.
+ * 0 on success, otherwise msg_conn_send_msg() return values.
*/
-extern enum mgmt_result mgmt_fe_destroy_client_session(uintptr_t lib_hndl,
- uint64_t client_id);
+extern enum mgmt_result
+mgmt_fe_destroy_client_session(struct mgmt_fe_client *client,
+ uint64_t client_id);
/*
* Send UN/LOCK_DS_REQ to MGMTD for a specific Datastore DS.
@@ -200,11 +216,11 @@ extern enum mgmt_result mgmt_fe_destroy_client_session(uintptr_t lib_hndl,
* TRUE for lock request, FALSE for unlock request.
*
* Returns:
- * MGMTD_SUCCESS on success, MGMTD_* otherwise.
+ * 0 on success, otherwise msg_conn_send_msg() return values.
*/
-extern enum mgmt_result
-mgmt_fe_lock_ds(uintptr_t lib_hndl, uintptr_t session_id, uint64_t req_id,
- Mgmtd__DatastoreId ds_id, bool lock_ds);
+extern int mgmt_fe_send_lockds_req(struct mgmt_fe_client *client,
+ uint64_t session_id, uint64_t req_id,
+ Mgmtd__DatastoreId ds_id, bool lock_ds);
/*
* Send SET_CONFIG_REQ to MGMTD for one or more config data(s).
@@ -234,13 +250,15 @@ mgmt_fe_lock_ds(uintptr_t lib_hndl, uintptr_t session_id, uint64_t req_id,
* Destination Datastore ID where data needs to be set.
*
* Returns:
- * MGMTD_SUCCESS on success, MGMTD_* otherwise.
+ * 0 on success, otherwise msg_conn_send_msg() return values.
*/
-extern enum mgmt_result
-mgmt_fe_set_config_data(uintptr_t lib_hndl, uintptr_t session_id,
- uint64_t req_id, Mgmtd__DatastoreId ds_id,
- Mgmtd__YangCfgDataReq **config_req, int num_req,
- bool implicit_commit, Mgmtd__DatastoreId dst_ds_id);
+
+extern int mgmt_fe_send_setcfg_req(struct mgmt_fe_client *client,
+ uint64_t session_id, uint64_t req_id,
+ Mgmtd__DatastoreId ds_id,
+ Mgmtd__YangCfgDataReq **config_req,
+ int num_req, bool implicit_commit,
+ Mgmtd__DatastoreId dst_ds_id);
/*
* Send SET_COMMMIT_REQ to MGMTD for one or more config data(s).
@@ -267,13 +285,13 @@ mgmt_fe_set_config_data(uintptr_t lib_hndl, uintptr_t session_id,
* TRUE if need to restore Src DS back to Dest DS, FALSE otherwise.
*
* Returns:
- * MGMTD_SUCCESS on success, MGMTD_* otherwise.
+ * 0 on success, otherwise msg_conn_send_msg() return values.
*/
-extern enum mgmt_result
-mgmt_fe_commit_config_data(uintptr_t lib_hndl, uintptr_t session_id,
- uint64_t req_id, Mgmtd__DatastoreId src_ds_id,
- Mgmtd__DatastoreId dst_ds_id, bool validate_only,
- bool abort);
+extern int mgmt_fe_send_commitcfg_req(struct mgmt_fe_client *client,
+ uint64_t session_id, uint64_t req_id,
+ Mgmtd__DatastoreId src_ds_id,
+ Mgmtd__DatastoreId dst_ds_id,
+ bool validate_only, bool abort);
/*
* Send GET_CONFIG_REQ to MGMTD for one or more config data item(s).
@@ -297,12 +315,13 @@ mgmt_fe_commit_config_data(uintptr_t lib_hndl, uintptr_t session_id,
* Number of get config requests.
*
* Returns:
- * MGMTD_SUCCESS on success, MGMTD_* otherwise.
+ * 0 on success, otherwise msg_conn_send_msg() return values.
*/
-extern enum mgmt_result
-mgmt_fe_get_config_data(uintptr_t lib_hndl, uintptr_t session_id,
- uint64_t req_id, Mgmtd__DatastoreId ds_id,
- Mgmtd__YangGetDataReq **data_req, int num_reqs);
+extern int mgmt_fe_send_getcfg_req(struct mgmt_fe_client *client,
+ uint64_t session_id, uint64_t req_id,
+ Mgmtd__DatastoreId ds_id,
+ Mgmtd__YangGetDataReq **data_req,
+ int num_reqs);
/*
* Send GET_DATA_REQ to MGMTD for one or more data item(s).
@@ -310,10 +329,11 @@ mgmt_fe_get_config_data(uintptr_t lib_hndl, uintptr_t session_id,
* Similar to get config request but supports getting data
* from operational ds aka backend clients directly.
*/
-extern enum mgmt_result
-mgmt_fe_get_data(uintptr_t lib_hndl, uintptr_t session_id, uint64_t req_id,
- Mgmtd__DatastoreId ds_id, Mgmtd__YangGetDataReq **data_req,
- int num_reqs);
+extern int mgmt_fe_send_getdata_req(struct mgmt_fe_client *client,
+ uint64_t session_id, uint64_t req_id,
+ Mgmtd__DatastoreId ds_id,
+ Mgmtd__YangGetDataReq **data_req,
+ int num_reqs);
/*
* Send NOTIFY_REGISTER_REQ to MGMTD daemon.
@@ -340,18 +360,24 @@ mgmt_fe_get_data(uintptr_t lib_hndl, uintptr_t session_id, uint64_t req_id,
* Number of data requests.
*
* Returns:
- * MGMTD_SUCCESS on success, MGMTD_* otherwise.
+ * 0 on success, otherwise msg_conn_send_msg() return values.
*/
-extern enum mgmt_result
-mgmt_fe_register_yang_notify(uintptr_t lib_hndl, uintptr_t session_id,
- uint64_t req_id, Mgmtd__DatastoreId ds_id,
- bool register_req,
- Mgmtd__YangDataXPath **data_req, int num_reqs);
+extern int mgmt_fe_send_regnotify_req(struct mgmt_fe_client *client,
+ uint64_t session_id, uint64_t req_id,
+ Mgmtd__DatastoreId ds_id,
+ bool register_req,
+ Mgmtd__YangDataXPath **data_req,
+ int num_reqs);
/*
* Destroy library and cleanup everything.
*/
-extern void mgmt_fe_client_lib_destroy(uintptr_t lib_hndl);
+extern void mgmt_fe_client_destroy(struct mgmt_fe_client *client);
+
+/*
+ * Get count of open sessions.
+ */
+extern uint mgmt_fe_client_session_count(struct mgmt_fe_client *client);
#ifdef __cplusplus
}
diff --git a/lib/mgmt_msg.c b/lib/mgmt_msg.c
index 3f55f82024..0d9802a2b3 100644
--- a/lib/mgmt_msg.c
+++ b/lib/mgmt_msg.c
@@ -7,6 +7,7 @@
* Copyright (c) 2023, LabN Consulting, L.L.C.
*/
#include <zebra.h>
+#include "debug.h"
#include "network.h"
#include "sockopt.h"
#include "stream.h"
@@ -22,7 +23,9 @@
} while (0)
#define MGMT_MSG_ERR(ms, fmt, ...) \
- zlog_err("%s: %s: " fmt, ms->idtag, __func__, ##__VA_ARGS__)
+ zlog_err("%s: %s: " fmt, (ms)->idtag, __func__, ##__VA_ARGS__)
+
+DEFINE_MTYPE(LIB, MSG_CONN, "msg connection state");
/**
* Read data from a socket into streams containing 1 or more full msgs headed by
@@ -81,7 +84,7 @@ enum mgmt_msg_rsched mgmt_msg_read(struct mgmt_msg_state *ms, int fd,
left = stream_get_endp(ms->ins);
while (left > (long)sizeof(struct mgmt_msg_hdr)) {
mhdr = (struct mgmt_msg_hdr *)(STREAM_DATA(ms->ins) + total);
- if (mhdr->marker != MGMT_MSG_MARKER) {
+ if (!MGMT_MSG_IS_MARKER(mhdr->marker)) {
MGMT_MSG_DBG(dbgtag, "recv corrupt buffer, disconnect");
return MSR_DISCONNECT;
}
@@ -127,8 +130,8 @@ enum mgmt_msg_rsched mgmt_msg_read(struct mgmt_msg_state *ms, int fd,
* true if more to process (so reschedule) else false
*/
bool mgmt_msg_procbufs(struct mgmt_msg_state *ms,
- void (*handle_msg)(void *user, uint8_t *msg,
- size_t msglen),
+ void (*handle_msg)(uint8_t version, uint8_t *msg,
+ size_t msglen, void *user),
void *user, bool debug)
{
const char *dbgtag = debug ? ms->idtag : NULL;
@@ -153,11 +156,13 @@ bool mgmt_msg_procbufs(struct mgmt_msg_state *ms,
left -= mhdr->len, data += mhdr->len) {
mhdr = (struct mgmt_msg_hdr *)data;
- assert(mhdr->marker == MGMT_MSG_MARKER);
+ assert(MGMT_MSG_IS_MARKER(mhdr->marker));
assert(left >= mhdr->len);
- handle_msg(user, (uint8_t *)(mhdr + 1),
- mhdr->len - sizeof(struct mgmt_msg_hdr));
+ handle_msg(MGMT_MSG_MARKER_VERSION(mhdr->marker),
+ (uint8_t *)(mhdr + 1),
+ mhdr->len - sizeof(struct mgmt_msg_hdr),
+ user);
ms->nrxm++;
nproc++;
}
@@ -251,7 +256,7 @@ enum mgmt_msg_wsched mgmt_msg_write(struct mgmt_msg_state *ms, int fd,
dbgtag,
"reached %zu buffer writes, pausing with %zu streams left",
ms->max_write_buf, ms->outq.count);
- return MSW_SCHED_WRITES_OFF;
+ return MSW_SCHED_STREAM;
}
MGMT_MSG_DBG(dbgtag, "flushed all streams from output q");
return MSW_SCHED_NONE;
@@ -264,15 +269,19 @@ enum mgmt_msg_wsched mgmt_msg_write(struct mgmt_msg_state *ms, int fd,
*
* Args:
* ms: mgmt_msg_state for this process.
- * fd: socket/file to read data from.
+ * version: version of this message, will be given to receiving side.
+ * msg: the message to be sent.
+ * len: the length of the message.
+ * packf: a function to pack the message.
* debug: true to enable debug logging.
*
* Returns:
* 0 on success, otherwise -1 on failure. The only failure mode is if a
* the message exceeds the maximum message size configured on init.
*/
-int mgmt_msg_send_msg(struct mgmt_msg_state *ms, void *msg, size_t len,
- mgmt_msg_packf packf, bool debug)
+int mgmt_msg_send_msg(struct mgmt_msg_state *ms, uint8_t version, void *msg,
+ size_t len, size_t (*packf)(void *msg, void *buf),
+ bool debug)
{
const char *dbgtag = debug ? ms->idtag : NULL;
struct mgmt_msg_hdr *mhdr;
@@ -308,12 +317,17 @@ int mgmt_msg_send_msg(struct mgmt_msg_state *ms, void *msg, size_t len,
/* We have a stream with space, pack the message into it. */
mhdr = (struct mgmt_msg_hdr *)(STREAM_DATA(s) + s->endp);
- mhdr->marker = MGMT_MSG_MARKER;
+ mhdr->marker = MGMT_MSG_MARKER(version);
mhdr->len = mlen;
stream_forward_endp(s, sizeof(*mhdr));
endp = stream_get_endp(s);
dstbuf = STREAM_DATA(s) + endp;
- n = packf(msg, dstbuf);
+ if (packf)
+ n = packf(msg, dstbuf);
+ else {
+ memcpy(dstbuf, msg, len);
+ n = len;
+ }
stream_set_endp(s, endp + n);
ms->ntxm++;
@@ -392,6 +406,7 @@ size_t mgmt_msg_reset_writes(struct mgmt_msg_state *ms)
return nproc;
}
+
void mgmt_msg_init(struct mgmt_msg_state *ms, size_t max_read_buf,
size_t max_write_buf, size_t max_msg_sz, const char *idtag)
{
@@ -412,3 +427,494 @@ void mgmt_msg_destroy(struct mgmt_msg_state *ms)
stream_free(ms->ins);
free(ms->idtag);
}
+
+/*
+ * Connections
+ */
+
+#define MSG_CONN_DEFAULT_CONN_RETRY_MSEC 250
+#define MSG_CONN_SEND_BUF_SIZE (1u << 16)
+#define MSG_CONN_RECV_BUF_SIZE (1u << 16)
+
+static void msg_client_sched_connect(struct msg_client *client,
+ unsigned long msec);
+
+static void msg_conn_sched_proc_msgs(struct msg_conn *conn);
+static void msg_conn_sched_read(struct msg_conn *conn);
+static void msg_conn_sched_write(struct msg_conn *conn);
+
+static void msg_conn_write(struct event *thread)
+{
+ struct msg_conn *conn = EVENT_ARG(thread);
+ enum mgmt_msg_wsched rv;
+
+ rv = mgmt_msg_write(&conn->mstate, conn->fd, conn->debug);
+ if (rv == MSW_SCHED_STREAM)
+ msg_conn_sched_write(conn);
+ else if (rv == MSW_DISCONNECT)
+ msg_conn_disconnect(conn, conn->is_client);
+ else
+ assert(rv == MSW_SCHED_NONE);
+}
+
+static void msg_conn_read(struct event *thread)
+{
+ struct msg_conn *conn = EVENT_ARG(thread);
+ enum mgmt_msg_rsched rv;
+
+ rv = mgmt_msg_read(&conn->mstate, conn->fd, conn->debug);
+ if (rv == MSR_DISCONNECT) {
+ msg_conn_disconnect(conn, conn->is_client);
+ return;
+ }
+ if (rv == MSR_SCHED_BOTH)
+ msg_conn_sched_proc_msgs(conn);
+ msg_conn_sched_read(conn);
+}
+
+/* collapse this into mgmt_msg_procbufs */
+static void msg_conn_proc_msgs(struct event *thread)
+{
+ struct msg_conn *conn = EVENT_ARG(thread);
+
+ if (mgmt_msg_procbufs(&conn->mstate,
+ (void (*)(uint8_t, uint8_t *, size_t,
+ void *))conn->handle_msg,
+ conn, conn->debug))
+ /* there's more, schedule handling more */
+ msg_conn_sched_proc_msgs(conn);
+}
+
+static void msg_conn_sched_read(struct msg_conn *conn)
+{
+ event_add_read(conn->loop, msg_conn_read, conn, conn->fd,
+ &conn->read_ev);
+}
+
+static void msg_conn_sched_write(struct msg_conn *conn)
+{
+ event_add_write(conn->loop, msg_conn_write, conn, conn->fd,
+ &conn->write_ev);
+}
+
+static void msg_conn_sched_proc_msgs(struct msg_conn *conn)
+{
+ event_add_event(conn->loop, msg_conn_proc_msgs, conn, 0,
+ &conn->proc_msg_ev);
+}
+
+
+void msg_conn_disconnect(struct msg_conn *conn, bool reconnect)
+{
+
+ /* disconnect short-circuit if present */
+ if (conn->remote_conn) {
+ conn->remote_conn->remote_conn = NULL;
+ conn->remote_conn = NULL;
+ }
+
+ if (conn->fd != -1) {
+ close(conn->fd);
+ conn->fd = -1;
+
+ /* Notify client through registered callback (if any) */
+ if (conn->notify_disconnect)
+ (void)(*conn->notify_disconnect)(conn);
+ }
+
+ if (reconnect) {
+ assert(conn->is_client);
+ msg_client_sched_connect(
+ container_of(conn, struct msg_client, conn),
+ MSG_CONN_DEFAULT_CONN_RETRY_MSEC);
+ }
+}
+
+int msg_conn_send_msg(struct msg_conn *conn, uint8_t version, void *msg,
+ size_t mlen, size_t (*packf)(void *, void *),
+ bool short_circuit_ok)
+{
+ const char *dbgtag = conn->debug ? conn->mstate.idtag : NULL;
+
+ if (conn->fd == -1) {
+ MGMT_MSG_ERR(&conn->mstate,
+ "can't send message on closed connection");
+ return -1;
+ }
+
+ /* immediately handle the message if short-circuit is present */
+ if (conn->remote_conn && short_circuit_ok) {
+ uint8_t *buf = msg;
+ size_t n = mlen;
+
+ if (packf) {
+ buf = XMALLOC(MTYPE_TMP, mlen);
+ n = packf(msg, buf);
+ }
+
+ MGMT_MSG_DBG(dbgtag, "SC send: depth %u msg: %p",
+ ++conn->short_circuit_depth, msg);
+
+ conn->remote_conn->handle_msg(version, buf, n,
+ conn->remote_conn);
+
+ MGMT_MSG_DBG(dbgtag, "SC return from depth: %u msg: %p",
+ conn->short_circuit_depth--, msg);
+
+ if (packf)
+ XFREE(MTYPE_TMP, buf);
+ return 0;
+ }
+
+ int rv = mgmt_msg_send_msg(&conn->mstate, version, msg, mlen, packf,
+ conn->debug);
+
+ msg_conn_sched_write(conn);
+
+ return rv;
+}
+
+void msg_conn_cleanup(struct msg_conn *conn)
+{
+ struct mgmt_msg_state *ms = &conn->mstate;
+
+ /* disconnect short-circuit if present */
+ if (conn->remote_conn) {
+ conn->remote_conn->remote_conn = NULL;
+ conn->remote_conn = NULL;
+ }
+
+ if (conn->fd != -1) {
+ close(conn->fd);
+ conn->fd = -1;
+ }
+
+ EVENT_OFF(conn->read_ev);
+ EVENT_OFF(conn->write_ev);
+ EVENT_OFF(conn->proc_msg_ev);
+
+ mgmt_msg_destroy(ms);
+}
+
+/*
+ * Client Connections
+ */
+
+DECLARE_LIST(msg_server_list, struct msg_server, link);
+
+static struct msg_server_list_head msg_servers;
+
+static void msg_client_connect(struct msg_client *conn);
+
+static void msg_client_connect_timer(struct event *thread)
+{
+ msg_client_connect(EVENT_ARG(thread));
+}
+
+static void msg_client_sched_connect(struct msg_client *client,
+ unsigned long msec)
+{
+ struct msg_conn *conn = &client->conn;
+ const char *dbgtag = conn->debug ? conn->mstate.idtag : NULL;
+
+ MGMT_MSG_DBG(dbgtag, "connection retry in %lu msec", msec);
+ if (msec)
+ event_add_timer_msec(conn->loop, msg_client_connect_timer,
+ client, msec, &client->conn_retry_tmr);
+ else
+ event_add_event(conn->loop, msg_client_connect_timer, client, 0,
+ &client->conn_retry_tmr);
+}
+
+static bool msg_client_connect_short_circuit(struct msg_client *client)
+{
+ struct msg_conn *server_conn;
+ struct msg_server *server;
+ const char *dbgtag =
+ client->conn.debug ? client->conn.mstate.idtag : NULL;
+ union sockunion su = {0};
+ int sockets[2];
+
+ frr_each (msg_server_list, &msg_servers, server)
+ if (!strcmp(server->sopath, client->sopath))
+ break;
+ if (!server) {
+ MGMT_MSG_DBG(dbgtag,
+ "no short-circuit connection available for %s",
+ client->sopath);
+
+ return false;
+ }
+
+ if (socketpair(AF_UNIX, SOCK_STREAM, 0, sockets)) {
+ MGMT_MSG_ERR(
+ &client->conn.mstate,
+ "socketpair failed trying to short-circuit connection on %s: %s",
+ client->sopath, safe_strerror(errno));
+ return false;
+ }
+
+ /* client side */
+ client->conn.fd = sockets[0];
+ set_nonblocking(sockets[0]);
+ setsockopt_so_sendbuf(sockets[0], client->conn.mstate.max_write_buf);
+ setsockopt_so_recvbuf(sockets[0], client->conn.mstate.max_read_buf);
+ client->conn.is_short_circuit = true;
+
+ /* server side */
+ memset(&su, 0, sizeof(union sockunion));
+ server_conn = server->create(sockets[1], &su);
+ server_conn->is_short_circuit = true;
+
+ client->conn.remote_conn = server_conn;
+ server_conn->remote_conn = &client->conn;
+
+ MGMT_MSG_DBG(
+ dbgtag,
+ "short-circuit connection on %s server %s:%d to client %s:%d",
+ client->sopath, server_conn->mstate.idtag, server_conn->fd,
+ client->conn.mstate.idtag, client->conn.fd);
+
+ MGMT_MSG_DBG(
+ server_conn->debug ? server_conn->mstate.idtag : NULL,
+ "short-circuit connection on %s client %s:%d to server %s:%d",
+ client->sopath, client->conn.mstate.idtag, client->conn.fd,
+ server_conn->mstate.idtag, server_conn->fd);
+
+ return true;
+}
+
+
+/* Connect and start reading from the socket */
+static void msg_client_connect(struct msg_client *client)
+{
+ struct msg_conn *conn = &client->conn;
+ const char *dbgtag = conn->debug ? conn->mstate.idtag : NULL;
+
+ if (!client->short_circuit_ok ||
+ !msg_client_connect_short_circuit(client))
+ conn->fd =
+ mgmt_msg_connect(client->sopath, MSG_CONN_SEND_BUF_SIZE,
+ MSG_CONN_RECV_BUF_SIZE, dbgtag);
+
+ if (conn->fd == -1)
+ /* retry the connection */
+ msg_client_sched_connect(client,
+ MSG_CONN_DEFAULT_CONN_RETRY_MSEC);
+ else if (client->notify_connect && client->notify_connect(client))
+ /* client connect notify failed */
+ msg_conn_disconnect(conn, true);
+ else
+ /* start reading */
+ msg_conn_sched_read(conn);
+}
+
+void msg_client_init(struct msg_client *client, struct event_loop *tm,
+ const char *sopath,
+ int (*notify_connect)(struct msg_client *client),
+ int (*notify_disconnect)(struct msg_conn *client),
+ void (*handle_msg)(uint8_t version, uint8_t *data,
+ size_t len, struct msg_conn *client),
+ size_t max_read_buf, size_t max_write_buf,
+ size_t max_msg_sz, bool short_circuit_ok,
+ const char *idtag, bool debug)
+{
+ struct msg_conn *conn = &client->conn;
+ memset(client, 0, sizeof(*client));
+
+ conn->loop = tm;
+ conn->fd = -1;
+ conn->handle_msg = handle_msg;
+ conn->notify_disconnect = notify_disconnect;
+ conn->is_client = true;
+ conn->debug = debug;
+ client->short_circuit_ok = short_circuit_ok;
+ client->sopath = strdup(sopath);
+ client->notify_connect = notify_connect;
+
+ mgmt_msg_init(&conn->mstate, max_read_buf, max_write_buf, max_msg_sz,
+ idtag);
+
+ /* XXX maybe just have client kick this off */
+ /* Start trying to connect to server */
+ msg_client_sched_connect(client, 0);
+}
+
+void msg_client_cleanup(struct msg_client *client)
+{
+ assert(client->conn.is_client);
+
+ EVENT_OFF(client->conn_retry_tmr);
+ free(client->sopath);
+
+ msg_conn_cleanup(&client->conn);
+}
+
+
+/*
+ * Server-side connections
+ */
+
+static void msg_server_accept(struct event *event)
+{
+ struct msg_server *server = EVENT_ARG(event);
+ int fd;
+ union sockunion su;
+
+ if (server->fd < 0)
+ return;
+
+ /* We continue hearing server listen socket. */
+ event_add_read(server->loop, msg_server_accept, server, server->fd,
+ &server->listen_ev);
+
+ memset(&su, 0, sizeof(union sockunion));
+
+ /* We can handle IPv4 or IPv6 socket. */
+ fd = sockunion_accept(server->fd, &su);
+ if (fd < 0) {
+ zlog_err("Failed to accept %s client connection: %s",
+ server->idtag, safe_strerror(errno));
+ return;
+ }
+ set_nonblocking(fd);
+ set_cloexec(fd);
+
+ DEBUGD(server->debug, "Accepted new %s connection", server->idtag);
+
+ server->create(fd, &su);
+}
+
+int msg_server_init(struct msg_server *server, const char *sopath,
+ struct event_loop *loop,
+ struct msg_conn *(*create)(int fd, union sockunion *su),
+ const char *idtag, struct debug *debug)
+{
+ int ret;
+ int sock;
+ struct sockaddr_un addr;
+ mode_t old_mask;
+
+ memset(server, 0, sizeof(*server));
+ server->fd = -1;
+
+ sock = socket(AF_UNIX, SOCK_STREAM, PF_UNSPEC);
+ if (sock < 0) {
+ zlog_err("Failed to create %s server socket: %s", server->idtag,
+ safe_strerror(errno));
+ goto fail;
+ }
+
+ addr.sun_family = AF_UNIX,
+ strlcpy(addr.sun_path, sopath, sizeof(addr.sun_path));
+ unlink(addr.sun_path);
+ old_mask = umask(0077);
+ ret = bind(sock, (struct sockaddr *)&addr, sizeof(addr));
+ if (ret < 0) {
+ zlog_err("Failed to bind %s server socket to '%s': %s",
+ server->idtag, addr.sun_path, safe_strerror(errno));
+ umask(old_mask);
+ goto fail;
+ }
+ umask(old_mask);
+
+ ret = listen(sock, MGMTD_MAX_CONN);
+ if (ret < 0) {
+ zlog_err("Failed to listen on %s server socket: %s",
+ server->idtag, safe_strerror(errno));
+ goto fail;
+ }
+
+ server->fd = sock;
+ server->loop = loop;
+ server->sopath = strdup(sopath);
+ server->idtag = strdup(idtag);
+ server->create = create;
+ server->debug = debug;
+
+ msg_server_list_add_head(&msg_servers, server);
+
+ event_add_read(server->loop, msg_server_accept, server, server->fd,
+ &server->listen_ev);
+
+
+ DEBUGD(debug, "Started %s server, listening on %s", idtag, sopath);
+ return 0;
+
+fail:
+ if (sock >= 0)
+ close(sock);
+ server->fd = -1;
+ return -1;
+}
+
+void msg_server_cleanup(struct msg_server *server)
+{
+ DEBUGD(server->debug, "Closing %s server", server->idtag);
+
+ if (server->listen_ev)
+ EVENT_OFF(server->listen_ev);
+
+ msg_server_list_del(&msg_servers, server);
+
+ if (server->fd >= 0)
+ close(server->fd);
+ free((char *)server->sopath);
+ free((char *)server->idtag);
+
+ memset(server, 0, sizeof(*server));
+ server->fd = -1;
+}
+
+/*
+ * Initialize and start reading from the accepted socket
+ *
+ * notify_connect - only called for disconnect i.e., connected == false
+ */
+void msg_conn_accept_init(struct msg_conn *conn, struct event_loop *tm, int fd,
+ int (*notify_disconnect)(struct msg_conn *conn),
+ void (*handle_msg)(uint8_t version, uint8_t *data,
+ size_t len, struct msg_conn *conn),
+ size_t max_read, size_t max_write, size_t max_size,
+ const char *idtag)
+{
+ conn->loop = tm;
+ conn->fd = fd;
+ conn->notify_disconnect = notify_disconnect;
+ conn->handle_msg = handle_msg;
+ conn->is_client = false;
+
+ mgmt_msg_init(&conn->mstate, max_read, max_write, max_size, idtag);
+
+ /* start reading */
+ msg_conn_sched_read(conn);
+
+ /* Make socket non-blocking. */
+ set_nonblocking(conn->fd);
+ setsockopt_so_sendbuf(conn->fd, MSG_CONN_SEND_BUF_SIZE);
+ setsockopt_so_recvbuf(conn->fd, MSG_CONN_RECV_BUF_SIZE);
+}
+
+struct msg_conn *
+msg_server_conn_create(struct event_loop *tm, int fd,
+ int (*notify_disconnect)(struct msg_conn *conn),
+ void (*handle_msg)(uint8_t version, uint8_t *data,
+ size_t len, struct msg_conn *conn),
+ size_t max_read, size_t max_write, size_t max_size,
+ void *user, const char *idtag)
+{
+ struct msg_conn *conn = XMALLOC(MTYPE_MSG_CONN, sizeof(*conn));
+ memset(conn, 0, sizeof(*conn));
+ msg_conn_accept_init(conn, tm, fd, notify_disconnect, handle_msg,
+ max_read, max_write, max_size, idtag);
+ conn->user = user;
+ return conn;
+}
+
+void msg_server_conn_delete(struct msg_conn *conn)
+{
+ if (!conn)
+ return;
+ msg_conn_cleanup(conn);
+ XFREE(MTYPE_MSG_CONN, conn);
+}
diff --git a/lib/mgmt_msg.h b/lib/mgmt_msg.h
index e2dd2d476a..9fdcb9ecd3 100644
--- a/lib/mgmt_msg.h
+++ b/lib/mgmt_msg.h
@@ -7,10 +7,23 @@
#ifndef _MGMT_MSG_H
#define _MGMT_MSG_H
+#include "memory.h"
#include "stream.h"
#include "frrevent.h"
-#define MGMT_MSG_MARKER (0x4D724B21u) /* ASCII - "MrK!"*/
+DECLARE_MTYPE(MSG_CONN);
+
+/*
+ * Messages on the stream start with a marker that encodes a version octet.
+ */
+#define MGMT_MSG_MARKER_PFX (0x23232300u) /* ASCII - "###\ooo"*/
+#define MGMT_MSG_IS_MARKER(x) (((x)&0xFFFFFF00u) == MGMT_MSG_MARKER_PFX)
+#define MGMT_MSG_MARKER(version) (MGMT_MSG_MARKER_PFX | (version))
+#define MGMT_MSG_MARKER_VERSION(x) (0xFF & (x))
+
+#define MGMT_MSG_VERSION_PROTOBUF 0
+#define MGMT_MSG_VERSION_NATIVE 1
+
struct mgmt_msg_state {
struct stream *ins;
@@ -41,33 +54,148 @@ enum mgmt_msg_rsched {
enum mgmt_msg_wsched {
MSW_SCHED_NONE, /* no scheduling required */
MSW_SCHED_STREAM, /* schedule writing */
- MSW_SCHED_WRITES_OFF, /* toggle writes off */
MSW_DISCONNECT, /* disconnect and start reconnecting */
};
-static inline uint8_t *msg_payload(struct mgmt_msg_hdr *mhdr)
-{
- return (uint8_t *)(mhdr + 1);
-}
+struct msg_conn;
-typedef size_t (*mgmt_msg_packf)(void *msg, void *data);
extern int mgmt_msg_connect(const char *path, size_t sendbuf, size_t recvbuf,
const char *dbgtag);
-extern void mgmt_msg_destroy(struct mgmt_msg_state *ms);
-extern void mgmt_msg_init(struct mgmt_msg_state *ms, size_t max_read_buf,
- size_t max_write_buf, size_t max_msg_sz,
- const char *idtag);
extern bool mgmt_msg_procbufs(struct mgmt_msg_state *ms,
- void (*handle_msg)(void *user, uint8_t *msg,
- size_t msglen),
+ void (*handle_msg)(uint8_t version, uint8_t *msg,
+ size_t msglen, void *user),
void *user, bool debug);
extern enum mgmt_msg_rsched mgmt_msg_read(struct mgmt_msg_state *ms, int fd,
bool debug);
extern size_t mgmt_msg_reset_writes(struct mgmt_msg_state *ms);
-extern int mgmt_msg_send_msg(struct mgmt_msg_state *ms, void *msg, size_t len,
+extern int mgmt_msg_send_msg(struct mgmt_msg_state *ms, uint8_t version,
+ void *msg, size_t len,
size_t (*packf)(void *msg, void *buf), bool debug);
extern enum mgmt_msg_wsched mgmt_msg_write(struct mgmt_msg_state *ms, int fd,
bool debug);
+extern void mgmt_msg_destroy(struct mgmt_msg_state *state);
+
+extern void mgmt_msg_init(struct mgmt_msg_state *ms, size_t max_read_buf,
+ size_t max_write_buf, size_t max_msg_sz,
+ const char *idtag);
+
+/*
+ * Connections
+ */
+
+struct msg_conn {
+ int fd;
+ struct mgmt_msg_state mstate;
+ struct event_loop *loop;
+ struct event *read_ev;
+ struct event *write_ev;
+ struct event *proc_msg_ev;
+ struct msg_conn *remote_conn;
+ int (*notify_disconnect)(struct msg_conn *conn);
+ void (*handle_msg)(uint8_t version, uint8_t *data, size_t len,
+ struct msg_conn *conn);
+ void *user;
+ uint short_circuit_depth;
+ bool is_client;
+ bool is_short_circuit;
+ bool debug;
+};
+
+
+/*
+ * `notify_disconnect` is not called when `msg_conn_cleanup` is called for a
+ * msg_conn which is currently connected. The socket is closed but there is no
+ * notification.
+ */
+extern void msg_conn_cleanup(struct msg_conn *conn);
+extern void msg_conn_disconnect(struct msg_conn *conn, bool reconnect);
+extern int msg_conn_send_msg(struct msg_conn *client, uint8_t version,
+ void *msg, size_t mlen,
+ size_t (*packf)(void *, void *),
+ bool short_circuit_ok);
+
+/*
+ * Client-side Connections
+ */
+
+struct msg_client {
+ struct msg_conn conn;
+ struct event *conn_retry_tmr;
+ char *sopath;
+ int (*notify_connect)(struct msg_client *client);
+ bool short_circuit_ok;
+};
+
+/*
+ * `notify_disconnect` is not called when `msg_client_cleanup` is called for a
+ * msg_client which is currently connected. The socket is closed but there is no
+ * notification.
+ */
+extern void msg_client_cleanup(struct msg_client *client);
+
+/*
+ * `notify_disconnect` is not called when the user `msg_client_cleanup` is
+ * called for a client which is currently connected. The socket is closed
+ * but there is no notification.
+ */
+extern void
+msg_client_init(struct msg_client *client, struct event_loop *tm,
+ const char *sopath,
+ int (*notify_connect)(struct msg_client *client),
+ int (*notify_disconnect)(struct msg_conn *client),
+ void (*handle_msg)(uint8_t version, uint8_t *data, size_t len,
+ struct msg_conn *client),
+ size_t max_read_buf, size_t max_write_buf, size_t max_msg_sz,
+ bool short_circuit_ok, const char *idtag, bool debug);
+
+/*
+ * Server-side Connections
+ */
+#define MGMTD_MAX_CONN 32
+
+PREDECL_LIST(msg_server_list);
+
+struct msg_server {
+ int fd;
+ struct msg_server_list_item link;
+ struct event_loop *loop;
+ struct event *listen_ev;
+ const char *sopath;
+ const char *idtag;
+ struct msg_conn *(*create)(int fd, union sockunion *su);
+ struct debug *debug;
+};
+
+extern int msg_server_init(struct msg_server *server, const char *sopath,
+ struct event_loop *loop,
+ struct msg_conn *(*create)(int fd,
+ union sockunion *su),
+ const char *idtag, struct debug *debug);
+extern void msg_server_cleanup(struct msg_server *server);
+
+/*
+ * `notify_disconnect` is not called when the user `msg_conn_cleanup` is
+ * called for a client which is currently connected. The socket is closed
+ * but there is no notification.
+ */
+struct msg_conn *
+msg_server_conn_create(struct event_loop *tm, int fd,
+ int (*notify_disconnect)(struct msg_conn *conn),
+ void (*handle_msg)(uint8_t version, uint8_t *data,
+ size_t len, struct msg_conn *conn),
+ size_t max_read, size_t max_write, size_t max_size,
+ void *user, const char *idtag);
+
+extern void msg_server_conn_delete(struct msg_conn *conn);
+
+extern void
+msg_conn_accept_init(struct msg_conn *conn, struct event_loop *tm, int fd,
+ int (*notify_disconnect)(struct msg_conn *conn),
+ void (*handle_msg)(uint8_t version, uint8_t *data,
+ size_t len, struct msg_conn *conn),
+ size_t max_read, size_t max_write, size_t max_size,
+ const char *idtag);
+
#endif /* _MGMT_MSG_H */
diff --git a/lib/northbound.c b/lib/northbound.c
index 775f6ff92f..ef2344ee11 100644
--- a/lib/northbound.c
+++ b/lib/northbound.c
@@ -792,18 +792,19 @@ static void nb_update_candidate_changes(struct nb_config *candidate,
LYD_TREE_DFS_BEGIN (root, dnode) {
op = nb_lyd_diff_get_op(dnode);
switch (op) {
- case 'c':
+ case 'c': /* create */
nb_config_diff_created(dnode, seq, cfg_chgs);
LYD_TREE_DFS_continue = 1;
break;
- case 'd':
+ case 'd': /* delete */
nb_config_diff_deleted(dnode, seq, cfg_chgs);
LYD_TREE_DFS_continue = 1;
break;
- case 'r':
+ case 'r': /* replace */
nb_config_diff_add_change(cfg_chgs, NB_OP_MODIFY, seq,
dnode);
break;
+ case 'n': /* none */
default:
break;
}
diff --git a/lib/northbound_cli.c b/lib/northbound_cli.c
index c5582fc21c..e9c89d2029 100644
--- a/lib/northbound_cli.c
+++ b/lib/northbound_cli.c
@@ -195,9 +195,12 @@ int nb_cli_apply_changes(struct vty *vty, const char *xpath_base_fmt, ...)
va_end(ap);
}
- if (vty_mgmt_fe_enabled()) {
+ if (vty_mgmt_should_process_cli_apply_changes(vty)) {
VTY_CHECK_XPATH;
+ if (vty->type == VTY_FILE)
+ return CMD_SUCCESS;
+
implicit_commit = vty_needs_implicit_commit(vty);
ret = vty_mgmt_send_config_data(vty);
if (ret >= 0 && !implicit_commit)
@@ -224,7 +227,7 @@ int nb_cli_apply_changes_clear_pending(struct vty *vty,
va_end(ap);
}
- if (vty_mgmt_fe_enabled()) {
+ if (vty_mgmt_should_process_cli_apply_changes(vty)) {
VTY_CHECK_XPATH;
implicit_commit = vty_needs_implicit_commit(vty);
diff --git a/lib/vty.c b/lib/vty.c
index d6a0dba206..fedbdbb813 100644
--- a/lib/vty.c
+++ b/lib/vty.c
@@ -21,6 +21,7 @@
#endif /* HAVE_LIBPCRE2_POSIX */
#include <stdio.h>
+#include "debug.h"
#include "linklist.h"
#include "frrevent.h"
#include "buffer.h"
@@ -67,19 +68,12 @@ enum vty_event {
struct nb_config *vty_mgmt_candidate_config;
-static uintptr_t mgmt_lib_hndl;
+static struct mgmt_fe_client *mgmt_fe_client;
static bool mgmt_fe_connected;
static bool mgmt_candidate_ds_wr_locked;
static uint64_t mgmt_client_id_next;
static uint64_t mgmt_last_req_id = UINT64_MAX;
-static bool vty_debug;
-#define VTY_DBG(fmt, ...) \
- do { \
- if (vty_debug) \
- zlog_debug(fmt, ##__VA_ARGS__); \
- } while (0)
-
PREDECL_DLIST(vtyservs);
struct vty_serv {
@@ -128,6 +122,13 @@ static char integrate_default[] = SYSCONFDIR INTEGRATE_DEFAULT_CONFIG;
bool vty_log_commands;
static bool vty_log_commands_perm;
+char const *const mgmt_daemons[] = {
+#ifdef HAVE_STATICD
+ "staticd",
+#endif
+};
+uint mgmt_daemons_count = array_size(mgmt_daemons);
+
void vty_mgmt_resume_response(struct vty *vty, bool success)
{
uint8_t header[4] = {0, 0, 0, 0};
@@ -143,19 +144,26 @@ void vty_mgmt_resume_response(struct vty *vty, bool success)
ret = CMD_WARNING_CONFIG_FAILED;
vty->mgmt_req_pending = false;
- header[3] = ret;
- buffer_put(vty->obuf, header, 4);
- if (!vty->t_write && (vtysh_flush(vty) < 0))
- /* Try to flush results; exit if a write
- * error occurs.
- */
- return;
+ MGMTD_FE_CLIENT_DBG("resuming: %s:", success ? "succeeded" : "failed");
+
+ if (vty->type != VTY_FILE) {
+ header[3] = ret;
+ buffer_put(vty->obuf, header, 4);
+ if (!vty->t_write && (vtysh_flush(vty) < 0)) {
+ zlog_err("failed to vtysh_flush");
+ /* Try to flush results; exit if a write error occurs */
+ return;
+ }
+ }
if (vty->status == VTY_CLOSE)
vty_close(vty);
- else
+ else if (vty->type != VTY_FILE)
vty_event(VTYSH_READ, vty);
+ else
+ /* should we assert here? */
+ zlog_err("mgmtd: unexpected resume while reading config file");
}
void vty_frame(struct vty *vty, const char *format, ...)
@@ -1632,10 +1640,12 @@ struct vty *vty_new(void)
new->max = VTY_BUFSIZ;
new->pass_fd = -1;
- if (mgmt_lib_hndl) {
+ if (mgmt_fe_client) {
+ if (!mgmt_client_id_next)
+ mgmt_client_id_next++;
new->mgmt_client_id = mgmt_client_id_next++;
if (mgmt_fe_create_client_session(
- mgmt_lib_hndl, new->mgmt_client_id,
+ mgmt_fe_client, new->mgmt_client_id,
(uintptr_t) new) != MGMTD_SUCCESS)
zlog_err(
"Failed to open a MGMTD Frontend session for VTY session %p!!",
@@ -2173,6 +2183,84 @@ void vty_pass_fd(struct vty *vty, int fd)
vty->pass_fd = fd;
}
+bool mgmt_vty_read_configs(void)
+{
+ char path[PATH_MAX];
+ struct vty *vty;
+ FILE *confp;
+ uint line_num = 0;
+ uint count = 0;
+ uint index;
+
+ vty = vty_new();
+ vty->wfd = STDERR_FILENO;
+ vty->type = VTY_FILE;
+ vty->node = CONFIG_NODE;
+ vty->config = true;
+ vty->pending_allowed = true;
+ vty->candidate_config = vty_shared_candidate_config;
+ vty->mgmt_locked_candidate_ds = true;
+ mgmt_candidate_ds_wr_locked = true;
+
+
+ for (index = 0; index < array_size(mgmt_daemons); index++) {
+ snprintf(path, sizeof(path), "%s/%s.conf", frr_sysconfdir,
+ mgmt_daemons[index]);
+
+ confp = vty_open_config(path, config_default);
+ if (!confp)
+ continue;
+
+ zlog_info("mgmtd: reading config file: %s", path);
+
+ /* Execute configuration file */
+ line_num = 0;
+ (void)config_from_file(vty, confp, &line_num);
+ count++;
+
+ fclose(confp);
+ }
+
+ snprintf(path, sizeof(path), "%s/mgmtd.conf", frr_sysconfdir);
+ confp = vty_open_config(path, config_default);
+ if (!confp) {
+ char *orig;
+
+ snprintf(path, sizeof(path), "%s/zebra.conf", frr_sysconfdir);
+ orig = XSTRDUP(MTYPE_TMP, host_config_get());
+
+ zlog_info("mgmtd: trying backup config file: %s", path);
+ confp = vty_open_config(path, config_default);
+
+ host_config_set(path);
+ XFREE(MTYPE_TMP, orig);
+ }
+
+ if (confp) {
+ zlog_info("mgmtd: reading config file: %s", path);
+
+ line_num = 0;
+ (void)config_from_file(vty, confp, &line_num);
+ count++;
+
+ fclose(confp);
+ }
+
+ vty->pending_allowed = false;
+
+ vty->mgmt_locked_candidate_ds = false;
+ mgmt_candidate_ds_wr_locked = false;
+
+ if (!count)
+ vty_close(vty);
+ else
+ vty_read_file_finish(vty, NULL);
+
+ zlog_info("mgmtd: finished reading config files");
+
+ return true;
+}
+
static void vtysh_read(struct event *thread)
{
int ret;
@@ -2292,7 +2380,7 @@ static void vtysh_write(struct event *thread)
#endif /* VTYSH */
/* Determine address family to bind. */
-void vty_serv_sock(const char *addr, unsigned short port, const char *path)
+void vty_serv_start(const char *addr, unsigned short port, const char *path)
{
/* If port is set to 0, do not listen on TCP/IP at all! */
if (port)
@@ -2303,6 +2391,20 @@ void vty_serv_sock(const char *addr, unsigned short port, const char *path)
#endif /* VTYSH */
}
+void vty_serv_stop(void)
+{
+ struct vty_serv *vtyserv;
+
+ while ((vtyserv = vtyservs_pop(vty_servs))) {
+ EVENT_OFF(vtyserv->t_accept);
+ close(vtyserv->sock);
+ XFREE(MTYPE_VTY_SERV, vtyserv);
+ }
+
+ vtyservs_fini(vty_servs);
+ vtyservs_init(vty_servs);
+}
+
static void vty_error_delete(void *arg)
{
struct vty_error *ve = arg;
@@ -2319,8 +2421,11 @@ void vty_close(struct vty *vty)
int i;
bool was_stdio = false;
- if (mgmt_lib_hndl) {
- mgmt_fe_destroy_client_session(mgmt_lib_hndl,
+ vty->status = VTY_CLOSE;
+
+ if (mgmt_fe_client && vty->mgmt_session_id) {
+ MGMTD_FE_CLIENT_DBG("closing vty session");
+ mgmt_fe_destroy_client_session(mgmt_fe_client,
vty->mgmt_client_id);
vty->mgmt_session_id = 0;
}
@@ -2355,7 +2460,7 @@ void vty_close(struct vty *vty)
if (vty->fd != -1) {
if (vty->type == VTY_SHELL_SERV)
vtys_del(vtysh_sessions, vty);
- else
+ else if (vty->type == VTY_TERM)
vtys_del(vty_sessions, vty);
}
@@ -2374,6 +2479,7 @@ void vty_close(struct vty *vty)
if (vty->fd == STDIN_FILENO)
was_stdio = true;
+ XFREE(MTYPE_TMP, vty->pending_cmds_buf);
XFREE(MTYPE_VTY, vty->buf);
if (vty->error) {
@@ -2409,10 +2515,7 @@ static void vty_timeout(struct event *thread)
/* Read up configuration file from file_name. */
void vty_read_file(struct nb_config *config, FILE *confp)
{
- int ret;
struct vty *vty;
- struct vty_error *ve;
- struct listnode *node;
unsigned int line_num = 0;
vty = vty_new();
@@ -2435,16 +2538,30 @@ void vty_read_file(struct nb_config *config, FILE *confp)
}
/* Execute configuration file */
- ret = config_from_file(vty, confp, &line_num);
+ (void)config_from_file(vty, confp, &line_num);
+
+ vty_read_file_finish(vty, config);
+}
+
+void vty_read_file_finish(struct vty *vty, struct nb_config *config)
+{
+ struct vty_error *ve;
+ struct listnode *node;
/* Flush any previous errors before printing messages below */
buffer_flush_all(vty->obuf, vty->wfd);
- if (!((ret == CMD_SUCCESS) || (ret == CMD_ERR_NOTHING_TODO))) {
+ for (ALL_LIST_ELEMENTS_RO(vty->error, node, ve)) {
const char *message = NULL;
char *nl;
- switch (ret) {
+ switch (ve->cmd_ret) {
+ case CMD_SUCCESS:
+ message = "Command succeeded";
+ break;
+ case CMD_ERR_NOTHING_TODO:
+ message = "Nothing to do";
+ break;
case CMD_ERR_AMBIGUOUS:
message = "Ambiguous command";
break;
@@ -2469,13 +2586,11 @@ void vty_read_file(struct nb_config *config, FILE *confp)
break;
}
- for (ALL_LIST_ELEMENTS_RO(vty->error, node, ve)) {
- nl = strchr(ve->error_buf, '\n');
- if (nl)
- *nl = '\0';
- flog_err(EC_LIB_VTY, "%s on config line %u: %s",
- message, ve->line_num, ve->error_buf);
- }
+ nl = strchr(ve->error_buf, '\n');
+ if (nl)
+ *nl = '\0';
+ flog_err(EC_LIB_VTY, "%s on config line %u: %s", message,
+ ve->line_num, ve->error_buf);
}
/*
@@ -2485,6 +2600,7 @@ void vty_read_file(struct nb_config *config, FILE *confp)
if (config == NULL) {
struct nb_context context = {};
char errmsg[BUFSIZ] = {0};
+ int ret;
context.client = NB_CLIENT_CLI;
context.user = vty;
@@ -2555,15 +2671,12 @@ static FILE *vty_use_backup_config(const char *fullpath)
return ret;
}
-/* Read up configuration file from file_name. */
-bool vty_read_config(struct nb_config *config, const char *config_file,
- char *config_default_dir)
+FILE *vty_open_config(const char *config_file, char *config_default_dir)
{
char cwd[MAXPATHLEN];
FILE *confp = NULL;
const char *fullpath;
char *tmp = NULL;
- bool read_success = false;
/* If -f flag specified. */
if (config_file != NULL) {
@@ -2626,10 +2739,8 @@ bool vty_read_config(struct nb_config *config, const char *config_file,
if (strstr(config_default_dir, "vtysh") == NULL) {
ret = stat(integrate_default, &conf_stat);
- if (ret >= 0) {
- read_success = true;
+ if (ret >= 0)
goto tmp_free_and_out;
- }
}
#endif /* VTYSH */
confp = fopen(config_default_dir, "r");
@@ -2655,42 +2766,29 @@ bool vty_read_config(struct nb_config *config, const char *config_file,
fullpath = config_default_dir;
}
- vty_read_file(config, confp);
- read_success = true;
-
- fclose(confp);
-
host_config_set(fullpath);
tmp_free_and_out:
XFREE(MTYPE_TMP, tmp);
- return read_success;
+ return confp;
}
-static void update_xpath(struct vty *vty, const char *oldpath,
- const char *newpath)
+
+bool vty_read_config(struct nb_config *config, const char *config_file,
+ char *config_default_dir)
{
- int i;
+ FILE *confp;
- for (i = 0; i < vty->xpath_index; i++) {
- if (!frrstr_startswith(vty->xpath[i], oldpath))
- break;
+ confp = vty_open_config(config_file, config_default_dir);
+ if (!confp)
+ return false;
- char *tmp = frrstr_replace(vty->xpath[i], oldpath, newpath);
- strlcpy(vty->xpath[i], tmp, sizeof(vty->xpath[0]));
- XFREE(MTYPE_TMP, tmp);
- }
-}
+ vty_read_file(config, confp);
-void vty_update_xpath(const char *oldpath, const char *newpath)
-{
- struct vty *vty;
+ fclose(confp);
- frr_each (vtys, vtysh_sessions, vty)
- update_xpath(vty, oldpath, newpath);
- frr_each (vtys, vty_sessions, vty)
- update_xpath(vty, oldpath, newpath);
+ return true;
}
int vty_config_enter(struct vty *vty, bool private_config, bool exclusive)
@@ -2768,8 +2866,12 @@ int vty_config_node_exit(struct vty *vty)
{
vty->xpath_index = 0;
- if (vty_mgmt_fe_enabled() && mgmt_candidate_ds_wr_locked &&
- vty->mgmt_locked_candidate_ds) {
+ /*
+ * If we are not reading config file and we are mgmtd FE and we are
+ * locked then unlock.
+ */
+ if (vty->type != VTY_FILE && vty_mgmt_fe_enabled() &&
+ mgmt_candidate_ds_wr_locked && vty->mgmt_locked_candidate_ds) {
if (vty_mgmt_send_lockds_req(vty, MGMTD_DS_CANDIDATE, false) !=
0) {
vty_out(vty, "Not able to unlock candidate DS\n");
@@ -2804,6 +2906,16 @@ int vty_config_node_exit(struct vty *vty)
}
vty->config = false;
+
+ /*
+ * If this is a config file and we are dropping out of config end
+ * parsing.
+ */
+ if (vty->type == VTY_FILE && vty->status != VTY_CLOSE) {
+ vty_out(vty, "exit from config node while reading config file");
+ vty->status = VTY_CLOSE;
+ }
+
return 1;
}
@@ -3277,25 +3389,43 @@ void vty_init_vtysh(void)
/* currently nothing to do, but likely to have future use */
}
-static void vty_mgmt_server_connected(uintptr_t lib_hndl, uintptr_t usr_data,
- bool connected)
-{
- VTY_DBG("%sGot %sconnected %s MGMTD Frontend Server",
- !connected ? "ERROR: " : "", !connected ? "dis: " : "",
- !connected ? "from" : "to");
- mgmt_fe_connected = connected;
+/*
+ * These functions allow for CLI handling to be placed inside daemons; however,
+ * currently they are only used by mgmtd, with mgmtd having each daemons CLI
+ * functionality linked into it. This design choice was taken for efficiency.
+ */
+
+static void vty_mgmt_server_connected(struct mgmt_fe_client *client,
+ uintptr_t usr_data, bool connected)
+{
+ MGMTD_FE_CLIENT_DBG("Got %sconnected %s MGMTD Frontend Server",
+ !connected ? "dis: " : "",
+ !connected ? "from" : "to");
/*
- * TODO: Setup or teardown front-end sessions for existing
- * VTY connections.
+ * We should not have any sessions for connecting or disconnecting case.
+ * The fe client library will delete all session on disconnect before
+ * calling us.
*/
+ assert(mgmt_fe_client_session_count(client) == 0);
+
+ mgmt_fe_connected = connected;
+
+ /* Start or stop listening for vty connections */
+ if (connected)
+ frr_vty_serv_start();
+ else
+ frr_vty_serv_stop();
}
-static void vty_mgmt_session_created(uintptr_t lib_hndl, uintptr_t usr_data,
- uint64_t client_id, bool create,
- bool success, uintptr_t session_id,
- uintptr_t session_ctx)
+/*
+ * A session has successfully been created for a vty.
+ */
+static void vty_mgmt_session_notify(struct mgmt_fe_client *client,
+ uintptr_t usr_data, uint64_t client_id,
+ bool create, bool success,
+ uintptr_t session_id, uintptr_t session_ctx)
{
struct vty *vty;
@@ -3307,14 +3437,21 @@ static void vty_mgmt_session_created(uintptr_t lib_hndl, uintptr_t usr_data,
return;
}
- VTY_DBG("%s session for client %" PRIu64 " successfully",
- create ? "Created" : "Destroyed", client_id);
- if (create)
+ MGMTD_FE_CLIENT_DBG("%s session for client %" PRIu64 " successfully",
+ create ? "Created" : "Destroyed", client_id);
+
+ if (create) {
+ assert(session_id != 0);
vty->mgmt_session_id = session_id;
+ } else {
+ vty->mgmt_session_id = 0;
+ vty_close(vty);
+ }
}
-static void vty_mgmt_ds_lock_notified(uintptr_t lib_hndl, uintptr_t usr_data,
- uint64_t client_id, uintptr_t session_id,
+static void vty_mgmt_ds_lock_notified(struct mgmt_fe_client *client,
+ uintptr_t usr_data, uint64_t client_id,
+ uintptr_t session_id,
uintptr_t session_ctx, uint64_t req_id,
bool lock_ds, bool success,
Mgmtd__DatastoreId ds_id,
@@ -3330,15 +3467,15 @@ static void vty_mgmt_ds_lock_notified(uintptr_t lib_hndl, uintptr_t usr_data,
vty_out(vty, "ERROR: %socking for DS %u failed, Err: '%s'\n",
lock_ds ? "L" : "Unl", ds_id, errmsg_if_any);
} else {
- VTY_DBG("%socked DS %u successfully", lock_ds ? "L" : "Unl",
- ds_id);
+ MGMTD_FE_CLIENT_DBG("%socked DS %u successfully",
+ lock_ds ? "L" : "Unl", ds_id);
}
vty_mgmt_resume_response(vty, success);
}
static void vty_mgmt_set_config_result_notified(
- uintptr_t lib_hndl, uintptr_t usr_data, uint64_t client_id,
+ struct mgmt_fe_client *client, uintptr_t usr_data, uint64_t client_id,
uintptr_t session_id, uintptr_t session_ctx, uint64_t req_id,
bool success, Mgmtd__DatastoreId ds_id, char *errmsg_if_any)
{
@@ -3353,16 +3490,16 @@ static void vty_mgmt_set_config_result_notified(
vty_out(vty, "ERROR: SET_CONFIG request failed, Error: %s\n",
errmsg_if_any ? errmsg_if_any : "Unknown");
} else {
- VTY_DBG("SET_CONFIG request for client 0x%" PRIx64
- " req-id %" PRIu64 " was successfull",
- client_id, req_id);
+ MGMTD_FE_CLIENT_DBG("SET_CONFIG request for client 0x%" PRIx64
+ " req-id %" PRIu64 " was successfull",
+ client_id, req_id);
}
vty_mgmt_resume_response(vty, success);
}
static void vty_mgmt_commit_config_result_notified(
- uintptr_t lib_hndl, uintptr_t usr_data, uint64_t client_id,
+ struct mgmt_fe_client *client, uintptr_t usr_data, uint64_t client_id,
uintptr_t session_id, uintptr_t session_ctx, uint64_t req_id,
bool success, Mgmtd__DatastoreId src_ds_id,
Mgmtd__DatastoreId dst_ds_id, bool validate_only, char *errmsg_if_any)
@@ -3378,7 +3515,8 @@ static void vty_mgmt_commit_config_result_notified(
vty_out(vty, "ERROR: COMMIT_CONFIG request failed, Error: %s\n",
errmsg_if_any ? errmsg_if_any : "Unknown");
} else {
- VTY_DBG("COMMIT_CONFIG request for client 0x%" PRIx64
+ MGMTD_FE_CLIENT_DBG(
+ "COMMIT_CONFIG request for client 0x%" PRIx64
" req-id %" PRIu64 " was successfull",
client_id, req_id);
if (errmsg_if_any)
@@ -3388,8 +3526,8 @@ static void vty_mgmt_commit_config_result_notified(
vty_mgmt_resume_response(vty, success);
}
-static enum mgmt_result vty_mgmt_get_data_result_notified(
- uintptr_t lib_hndl, uintptr_t usr_data, uint64_t client_id,
+static int vty_mgmt_get_data_result_notified(
+ struct mgmt_fe_client *client, uintptr_t usr_data, uint64_t client_id,
uintptr_t session_id, uintptr_t session_ctx, uint64_t req_id,
bool success, Mgmtd__DatastoreId ds_id, Mgmtd__YangData **yang_data,
size_t num_data, int next_key, char *errmsg_if_any)
@@ -3406,12 +3544,12 @@ static enum mgmt_result vty_mgmt_get_data_result_notified(
vty_out(vty, "ERROR: GET_DATA request failed, Error: %s\n",
errmsg_if_any ? errmsg_if_any : "Unknown");
vty_mgmt_resume_response(vty, success);
- return MGMTD_INTERNAL_ERROR;
+ return -1;
}
- VTY_DBG("GET_DATA request succeeded, client 0x%" PRIx64
- " req-id %" PRIu64,
- client_id, req_id);
+ MGMTD_FE_CLIENT_DBG("GET_DATA request succeeded, client 0x%" PRIx64
+ " req-id %" PRIu64,
+ client_id, req_id);
if (req_id != mgmt_last_req_id) {
mgmt_last_req_id = req_id;
@@ -3427,12 +3565,12 @@ static enum mgmt_result vty_mgmt_get_data_result_notified(
vty_mgmt_resume_response(vty, success);
}
- return MGMTD_SUCCESS;
+ return 0;
}
-static struct mgmt_fe_client_params client_params = {
+static struct mgmt_fe_client_cbs mgmt_cbs = {
.client_connect_notify = vty_mgmt_server_connected,
- .client_session_notify = vty_mgmt_session_created,
+ .client_session_notify = vty_mgmt_session_notify,
.lock_ds_notify = vty_mgmt_ds_lock_notified,
.set_config_notify = vty_mgmt_set_config_result_notified,
.commit_config_notify = vty_mgmt_commit_config_result_notified,
@@ -3441,33 +3579,34 @@ static struct mgmt_fe_client_params client_params = {
void vty_init_mgmt_fe(void)
{
- if (!vty_master) {
- zlog_err("Always call vty_mgmt_init_fe() after vty_init()!!");
- return;
- }
+ char name[40];
- assert(!mgmt_lib_hndl);
- snprintf(client_params.name, sizeof(client_params.name), "%s-%lld",
- frr_get_progname(), (long long)getpid());
- mgmt_lib_hndl = mgmt_fe_client_lib_init(&client_params, vty_master);
- assert(mgmt_lib_hndl);
+ assert(vty_master);
+ assert(!mgmt_fe_client);
+ snprintf(name, sizeof(name), "vty-%s-%ld", frr_get_progname(),
+ (long)getpid());
+ mgmt_fe_client = mgmt_fe_client_create(name, &mgmt_cbs, 0, vty_master);
+ assert(mgmt_fe_client);
}
bool vty_mgmt_fe_enabled(void)
{
- return mgmt_lib_hndl && mgmt_fe_connected ? true : false;
+ return mgmt_fe_client && mgmt_fe_connected;
+}
+
+bool vty_mgmt_should_process_cli_apply_changes(struct vty *vty)
+{
+ return vty->type != VTY_FILE && vty_mgmt_fe_enabled();
}
int vty_mgmt_send_lockds_req(struct vty *vty, Mgmtd__DatastoreId ds_id,
bool lock)
{
- enum mgmt_result ret;
-
- if (mgmt_lib_hndl && vty->mgmt_session_id) {
+ if (mgmt_fe_client && vty->mgmt_session_id) {
vty->mgmt_req_id++;
- ret = mgmt_fe_lock_ds(mgmt_lib_hndl, vty->mgmt_session_id,
- vty->mgmt_req_id, ds_id, lock);
- if (ret != MGMTD_SUCCESS) {
+ if (mgmt_fe_send_lockds_req(mgmt_fe_client,
+ vty->mgmt_session_id,
+ vty->mgmt_req_id, ds_id, lock)) {
zlog_err("Failed sending %sLOCK-DS-REQ req-id %" PRIu64,
lock ? "" : "UN", vty->mgmt_req_id);
vty_out(vty, "Failed to send %sLOCK-DS-REQ to MGMTD!\n",
@@ -3491,7 +3630,31 @@ int vty_mgmt_send_config_data(struct vty *vty)
int cnt;
bool implicit_commit = false;
- if (mgmt_lib_hndl && vty->mgmt_session_id) {
+ if (vty->type == VTY_FILE) {
+ /*
+ * if this is a config file read we will not send any of the
+ * changes until we are done reading the file and have modified
+ * the local candidate DS.
+ */
+ assert(vty->mgmt_locked_candidate_ds);
+ /* no-one else should be sending data right now */
+ assert(!vty->mgmt_num_pending_setcfg);
+ return 0;
+ }
+
+
+ if (mgmt_fe_client && vty->mgmt_client_id && !vty->mgmt_session_id) {
+ /*
+ * We are connected to mgmtd but we do not yet have an
+ * established session. this means we need to send any changes
+ * made during this "down-time" to all backend clients when this
+ * FE client finishes coming up.
+ */
+ MGMTD_FE_CLIENT_DBG("skipping as no session exists");
+ return 0;
+ }
+
+ if (mgmt_fe_client && vty->mgmt_session_id) {
cnt = 0;
for (indx = 0; indx < vty->num_cfg_changes; indx++) {
mgmt_yang_data_init(&cfg_data[cnt]);
@@ -3540,8 +3703,8 @@ int vty_mgmt_send_config_data(struct vty *vty)
vty->mgmt_req_id++;
implicit_commit = vty_needs_implicit_commit(vty);
- if (cnt && mgmt_fe_set_config_data(
- mgmt_lib_hndl, vty->mgmt_session_id,
+ if (cnt && mgmt_fe_send_setcfg_req(
+ mgmt_fe_client, vty->mgmt_session_id,
vty->mgmt_req_id, MGMTD_DS_CANDIDATE, cfgreq,
cnt, implicit_commit,
MGMTD_DS_RUNNING) != MGMTD_SUCCESS) {
@@ -3559,15 +3722,12 @@ int vty_mgmt_send_config_data(struct vty *vty)
int vty_mgmt_send_commit_config(struct vty *vty, bool validate_only, bool abort)
{
- enum mgmt_result ret;
-
- if (mgmt_lib_hndl && vty->mgmt_session_id) {
+ if (mgmt_fe_client && vty->mgmt_session_id) {
vty->mgmt_req_id++;
- ret = mgmt_fe_commit_config_data(
- mgmt_lib_hndl, vty->mgmt_session_id, vty->mgmt_req_id,
- MGMTD_DS_CANDIDATE, MGMTD_DS_RUNNING, validate_only,
- abort);
- if (ret != MGMTD_SUCCESS) {
+ if (mgmt_fe_send_commitcfg_req(
+ mgmt_fe_client, vty->mgmt_session_id,
+ vty->mgmt_req_id, MGMTD_DS_CANDIDATE,
+ MGMTD_DS_RUNNING, validate_only, abort)) {
zlog_err("Failed sending COMMIT-REQ req-id %" PRIu64,
vty->mgmt_req_id);
vty_out(vty, "Failed to send COMMIT-REQ to MGMTD!\n");
@@ -3584,7 +3744,6 @@ int vty_mgmt_send_commit_config(struct vty *vty, bool validate_only, bool abort)
int vty_mgmt_send_get_config(struct vty *vty, Mgmtd__DatastoreId datastore,
const char **xpath_list, int num_req)
{
- enum mgmt_result ret;
Mgmtd__YangData yang_data[VTY_MAXCFGCHANGES];
Mgmtd__YangGetDataReq get_req[VTY_MAXCFGCHANGES];
Mgmtd__YangGetDataReq *getreq[VTY_MAXCFGCHANGES];
@@ -3601,11 +3760,9 @@ int vty_mgmt_send_get_config(struct vty *vty, Mgmtd__DatastoreId datastore,
get_req[i].data = &yang_data[i];
getreq[i] = &get_req[i];
}
- ret = mgmt_fe_get_config_data(mgmt_lib_hndl, vty->mgmt_session_id,
- vty->mgmt_req_id, datastore, getreq,
- num_req);
-
- if (ret != MGMTD_SUCCESS) {
+ if (mgmt_fe_send_getcfg_req(mgmt_fe_client, vty->mgmt_session_id,
+ vty->mgmt_req_id, datastore, getreq,
+ num_req)) {
zlog_err(
"Failed to send GET-CONFIG to MGMTD for req-id %" PRIu64
".",
@@ -3622,7 +3779,6 @@ int vty_mgmt_send_get_config(struct vty *vty, Mgmtd__DatastoreId datastore,
int vty_mgmt_send_get_data(struct vty *vty, Mgmtd__DatastoreId datastore,
const char **xpath_list, int num_req)
{
- enum mgmt_result ret;
Mgmtd__YangData yang_data[VTY_MAXCFGCHANGES];
Mgmtd__YangGetDataReq get_req[VTY_MAXCFGCHANGES];
Mgmtd__YangGetDataReq *getreq[VTY_MAXCFGCHANGES];
@@ -3639,10 +3795,9 @@ int vty_mgmt_send_get_data(struct vty *vty, Mgmtd__DatastoreId datastore,
get_req[i].data = &yang_data[i];
getreq[i] = &get_req[i];
}
- ret = mgmt_fe_get_data(mgmt_lib_hndl, vty->mgmt_session_id,
- vty->mgmt_req_id, datastore, getreq, num_req);
-
- if (ret != MGMTD_SUCCESS) {
+ if (mgmt_fe_send_getdata_req(mgmt_fe_client, vty->mgmt_session_id,
+ vty->mgmt_req_id, datastore, getreq,
+ num_req)) {
zlog_err("Failed to send GET-DATA to MGMTD for req-id %" PRIu64
".",
vty->mgmt_req_id);
@@ -3700,11 +3855,10 @@ void vty_init(struct event_loop *master_thread, bool do_command_logging)
void vty_terminate(void)
{
struct vty *vty;
- struct vty_serv *vtyserv;
- if (mgmt_lib_hndl) {
- mgmt_fe_client_lib_destroy(mgmt_lib_hndl);
- mgmt_lib_hndl = 0;
+ if (mgmt_fe_client) {
+ mgmt_fe_client_destroy(mgmt_fe_client);
+ mgmt_fe_client = 0;
}
memset(vty_cwd, 0x00, sizeof(vty_cwd));
@@ -3726,12 +3880,5 @@ void vty_terminate(void)
vtys_fini(vtysh_sessions);
vtys_init(vtysh_sessions);
- while ((vtyserv = vtyservs_pop(vty_servs))) {
- EVENT_OFF(vtyserv->t_accept);
- close(vtyserv->sock);
- XFREE(MTYPE_VTY_SERV, vtyserv);
- }
-
- vtyservs_fini(vty_servs);
- vtyservs_init(vty_servs);
+ vty_serv_stop();
}
diff --git a/lib/vty.h b/lib/vty.h
index 560748d91d..28f27d0d47 100644
--- a/lib/vty.h
+++ b/lib/vty.h
@@ -43,6 +43,7 @@ struct json_object;
struct vty_error {
char error_buf[VTY_BUFSIZ];
uint32_t line_num;
+ int cmd_ret;
};
struct vty_cfg_change {
@@ -71,7 +72,11 @@ struct vty {
bool is_paged;
/* Is this vty connect to file or not */
- enum { VTY_TERM, VTY_FILE, VTY_SHELL, VTY_SHELL_SERV } type;
+ enum { VTY_TERM, /* telnet conn or stdin/stdout UI */
+ VTY_FILE, /* reading and writing config files */
+ VTY_SHELL, /* vtysh client side UI */
+ VTY_SHELL_SERV, /* server-side vtysh connection */
+ } type;
/* Node status of this vty */
int node;
@@ -218,9 +223,12 @@ struct vty {
size_t frame_pos;
char frame[1024];
- uintptr_t mgmt_session_id;
- uint64_t mgmt_client_id;
+ uint64_t mgmt_session_id; /* FE adapter identifies session w/ this */
+ uint64_t mgmt_client_id; /* FE vty client identifies w/ this ID */
uint64_t mgmt_req_id;
+ /* set when we have sent mgmtd a *REQ command in response to some vty
+ * CLI command and we are waiting on the reply so we can respond to the
+ * vty user. */
bool mgmt_req_pending;
bool mgmt_locked_candidate_ds;
};
@@ -298,10 +306,10 @@ static inline void vty_push_context(struct vty *vty, int node, uint64_t id)
#define VTY_CHECK_XPATH \
do { \
- if (vty->type != VTY_FILE && !vty->private_config \
- && vty->xpath_index > 0 \
- && !yang_dnode_exists(vty->candidate_config->dnode, \
- VTY_CURR_XPATH)) { \
+ if (vty->type != VTY_FILE && !vty->private_config && \
+ vty->xpath_index > 0 && \
+ !yang_dnode_exists(vty->candidate_config->dnode, \
+ VTY_CURR_XPATH)) { \
vty_out(vty, \
"Current configuration object was deleted " \
"by another process.\n\n"); \
@@ -337,6 +345,9 @@ struct vty_arg {
extern struct nb_config *vty_mgmt_candidate_config;
extern bool vty_log_commands;
+extern char const *const mgmt_daemons[];
+extern uint mgmt_daemons_count;
+
/* Prototypes. */
extern void vty_init(struct event_loop *m, bool do_command_logging);
extern void vty_init_vtysh(void);
@@ -368,11 +379,14 @@ extern void vty_json_empty(struct vty *vty);
*/
extern void vty_pass_fd(struct vty *vty, int fd);
+extern FILE *vty_open_config(const char *config_file, char *config_default_dir);
extern bool vty_read_config(struct nb_config *config, const char *config_file,
char *config_default_dir);
extern void vty_read_file(struct nb_config *config, FILE *confp);
+extern void vty_read_file_finish(struct vty *vty, struct nb_config *config);
extern void vty_time_print(struct vty *, int);
-extern void vty_serv_sock(const char *, unsigned short, const char *);
+extern void vty_serv_start(const char *, unsigned short, const char *);
+extern void vty_serv_stop(void);
extern void vty_close(struct vty *);
extern char *vty_get_cwd(void);
extern void vty_update_xpath(const char *oldpath, const char *newpath);
@@ -391,6 +405,9 @@ extern void vty_stdio_close(void);
extern void vty_init_mgmt_fe(void);
extern bool vty_mgmt_fe_enabled(void);
+extern bool vty_mgmt_should_process_cli_apply_changes(struct vty *vty);
+
+extern bool mgmt_vty_read_configs(void);
extern int vty_mgmt_send_config_data(struct vty *vty);
extern int vty_mgmt_send_commit_config(struct vty *vty, bool validate_only,
bool abort);
diff --git a/mgmtd/mgmt.c b/mgmtd/mgmt.c
index 7a65a19f55..77c4473e49 100644
--- a/mgmtd/mgmt.c
+++ b/mgmtd/mgmt.c
@@ -9,11 +9,9 @@
#include <zebra.h>
#include "debug.h"
#include "mgmtd/mgmt.h"
-#include "mgmtd/mgmt_be_server.h"
#include "mgmtd/mgmt_be_adapter.h"
-#include "mgmtd/mgmt_fe_server.h"
-#include "mgmtd/mgmt_fe_adapter.h"
#include "mgmtd/mgmt_ds.h"
+#include "mgmtd/mgmt_fe_adapter.h"
#include "mgmtd/mgmt_history.h"
#include "mgmtd/mgmt_memory.h"
@@ -42,12 +40,6 @@ void mgmt_master_init(struct event_loop *master, const int buffer_size)
void mgmt_init(void)
{
- /*
- * Allocates some vital data structures used by peer commands in
- * vty_init
- */
- vty_init_mgmt_fe();
-
/* Initialize datastores */
mgmt_ds_init(mm);
@@ -57,27 +49,27 @@ void mgmt_init(void)
/* Initialize MGMTD Transaction module */
mgmt_txn_init(mm, mm->master);
- /* Initialize the MGMTD Backend Adapter Module */
- mgmt_be_adapter_init(mm->master);
-
/* Initialize the MGMTD Frontend Adapter Module */
- mgmt_fe_adapter_init(mm->master, mm);
-
- /* Start the MGMTD Backend Server for clients to connect */
- mgmt_be_server_init(mm->master);
+ mgmt_fe_adapter_init(mm->master);
- /* Start the MGMTD Frontend Server for clients to connect */
- mgmt_fe_server_init(mm->master);
+ /* Initialize the CLI frontend client */
+ vty_init_mgmt_fe();
/* MGMTD VTY commands installation. */
mgmt_vty_init();
+
+ /*
+ * Initialize the MGMTD Backend Adapter Module
+ *
+ * We do this after the FE stuff so that we always read our config file
+ * prior to any BE connection.
+ */
+ mgmt_be_adapter_init(mm->master);
}
void mgmt_terminate(void)
{
- mgmt_fe_server_destroy();
mgmt_fe_adapter_destroy();
- mgmt_be_server_destroy();
mgmt_be_adapter_destroy();
mgmt_txn_destroy();
mgmt_history_destroy();
diff --git a/mgmtd/mgmt.h b/mgmtd/mgmt.h
index 603296bb38..f52d478bc2 100644
--- a/mgmtd/mgmt.h
+++ b/mgmtd/mgmt.h
@@ -68,8 +68,6 @@ struct mgmt_master {
};
extern struct mgmt_master *mm;
-extern char const *const mgmt_daemons[];
-extern uint mgmt_daemons_count;
/* Inline functions */
static inline unsigned long timeval_elapsed(struct timeval a, struct timeval b)
@@ -102,7 +100,8 @@ extern void mgmt_reset(void);
extern time_t mgmt_clock(void);
extern int mgmt_config_write(struct vty *vty);
-
+extern struct vty *mgmt_vty_read_config(const char *config_file,
+ char *config_default_dir);
extern void mgmt_master_init(struct event_loop *master, const int buffer_size);
extern void mgmt_init(void);
diff --git a/mgmtd/mgmt_be_adapter.c b/mgmtd/mgmt_be_adapter.c
index 9d210b8716..e4a62951d2 100644
--- a/mgmtd/mgmt_be_adapter.c
+++ b/mgmtd/mgmt_be_adapter.c
@@ -4,6 +4,7 @@
*
* Copyright (C) 2021 Vmware, Inc.
* Pushpasis Sarkar <spushpasis@vmware.com>
+ * Copyright (c) 2023, LabN Consulting, L.L.C.
*/
#include <zebra.h>
@@ -19,9 +20,9 @@
#include "mgmtd/mgmt_be_adapter.h"
#define MGMTD_BE_ADAPTER_DBG(fmt, ...) \
- DEBUGD(&mgmt_debug_be, "%s:" fmt, __func__, ##__VA_ARGS__)
-#define MGMTD_BE_ADAPTER_ERR(fmt, ...) \
- zlog_err("%s: ERROR: " fmt, __func__, ##__VA_ARGS__)
+ DEBUGD(&mgmt_debug_be, "BE-ADAPTER: %s:" fmt, __func__, ##__VA_ARGS__)
+#define MGMTD_BE_ADAPTER_ERR(fmt, ...) \
+ zlog_err("BE-ADAPTER: %s: ERROR: " fmt, __func__, ##__VA_ARGS__)
#define FOREACH_ADAPTER_IN_LIST(adapter) \
frr_each_safe (mgmt_be_adapters, &mgmt_be_adapters, (adapter))
@@ -39,14 +40,24 @@
* Please see xpath_map_reg[] in lib/mgmt_be_client.c
* for the actual map
*/
-struct mgmt_be_xpath_map_reg {
- const char *xpath_regexp; /* Longest matching regular expression */
- enum mgmt_be_client_id *be_clients; /* clients to notify */
+struct mgmt_be_xpath_map_init {
+ const char *xpath_regexp;
+ uint subscr_info[MGMTD_BE_CLIENT_ID_MAX];
};
-struct mgmt_be_xpath_regexp_map {
- const char *xpath_regexp;
- struct mgmt_be_client_subscr_info be_subscrs;
+struct mgmt_be_xpath_map {
+ char *xpath_regexp;
+ uint subscr_info[MGMTD_BE_CLIENT_ID_MAX];
+};
+
+struct mgmt_be_client_xpath {
+ const char *xpath;
+ uint subscribed;
+};
+
+struct mgmt_be_client_xpath_map {
+ struct mgmt_be_client_xpath *xpaths;
+ uint nxpaths;
};
struct mgmt_be_get_adapter_config_params {
@@ -65,38 +76,84 @@ struct mgmt_be_get_adapter_config_params {
* handle real-time mapping of YANG xpaths to one or
* more interested backend client adapters.
*/
-static const struct mgmt_be_xpath_map_reg xpath_static_map_reg[] = {
- {.xpath_regexp = "/frr-vrf:lib/*",
- .be_clients =
- (enum mgmt_be_client_id[]){
+static const struct mgmt_be_xpath_map_init mgmt_xpath_map_init[] = {
+ {
+ .xpath_regexp = "/frr-vrf:lib/*",
+ .subscr_info =
+ {
#if HAVE_STATICD
- MGMTD_BE_CLIENT_ID_STATICD,
+ [MGMTD_BE_CLIENT_ID_STATICD] =
+ MGMT_SUBSCR_VALIDATE_CFG |
+ MGMT_SUBSCR_NOTIFY_CFG,
#endif
- MGMTD_BE_CLIENT_ID_MAX}},
- {.xpath_regexp = "/frr-interface:lib/*",
- .be_clients =
- (enum mgmt_be_client_id[]){
+ },
+ },
+ {
+ .xpath_regexp = "/frr-interface:lib/*",
+ .subscr_info =
+ {
#if HAVE_STATICD
- MGMTD_BE_CLIENT_ID_STATICD,
+ [MGMTD_BE_CLIENT_ID_STATICD] =
+ MGMT_SUBSCR_VALIDATE_CFG |
+ MGMT_SUBSCR_NOTIFY_CFG,
#endif
- MGMTD_BE_CLIENT_ID_MAX}},
- {.xpath_regexp =
- "/frr-routing:routing/control-plane-protocols/control-plane-protocol[type='frr-staticd:staticd'][name='staticd'][vrf='default']/frr-staticd:staticd/*",
+ },
+ },
+
+ {
+ .xpath_regexp =
+ "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-staticd:staticd/*",
+ .subscr_info =
+ {
+#if HAVE_STATICD
+ [MGMTD_BE_CLIENT_ID_STATICD] =
+ MGMT_SUBSCR_VALIDATE_CFG |
+ MGMT_SUBSCR_NOTIFY_CFG,
+#endif
+ },
+ },
+};
+
- .be_clients =
- (enum mgmt_be_client_id[]){
+/*
+ * Each client gets their own map, but also union all the strings into the
+ * above map as well.
+ */
#if HAVE_STATICD
- MGMTD_BE_CLIENT_ID_STATICD,
+static struct mgmt_be_client_xpath staticd_xpaths[] = {
+ {
+ .xpath = "/frr-vrf:lib/*",
+ .subscribed = MGMT_SUBSCR_VALIDATE_CFG | MGMT_SUBSCR_NOTIFY_CFG,
+ },
+ {
+ .xpath = "/frr-interface:lib/*",
+ .subscribed = MGMT_SUBSCR_VALIDATE_CFG | MGMT_SUBSCR_NOTIFY_CFG,
+ },
+ {
+ .xpath =
+ "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-staticd:staticd/*",
+ .subscribed = MGMT_SUBSCR_VALIDATE_CFG | MGMT_SUBSCR_NOTIFY_CFG,
+ },
+};
+#endif
+
+static struct mgmt_be_client_xpath_map
+ mgmt_client_xpaths[MGMTD_BE_CLIENT_ID_MAX] = {
+#ifdef HAVE_STATICD
+ [MGMTD_BE_CLIENT_ID_STATICD] = {staticd_xpaths,
+ array_size(staticd_xpaths)},
#endif
- MGMTD_BE_CLIENT_ID_MAX}},
};
#define MGMTD_BE_MAX_NUM_XPATH_MAP 256
-static struct mgmt_be_xpath_regexp_map
- mgmt_xpath_map[MGMTD_BE_MAX_NUM_XPATH_MAP];
-static int mgmt_num_xpath_maps;
-static struct event_loop *mgmt_be_adapter_tm;
+/* We would like to have a better ADT than one with O(n)
+ comparisons */
+static struct mgmt_be_xpath_map *mgmt_xpath_map;
+static uint mgmt_num_xpath_maps;
+
+static struct event_loop *mgmt_loop;
+static struct msg_server mgmt_be_server = {.fd = -1};
static struct mgmt_be_adapters_head mgmt_be_adapters;
@@ -105,8 +162,10 @@ static struct mgmt_be_client_adapter
/* Forward declarations */
static void
-mgmt_be_adapter_register_event(struct mgmt_be_client_adapter *adapter,
- enum mgmt_be_event event);
+mgmt_be_adapter_sched_init_event(struct mgmt_be_client_adapter *adapter);
+
+static uint mgmt_be_get_subscr_for_xpath_and_client(
+ const char *xpath, enum mgmt_be_client_id client_id, uint subscr_mask);
static struct mgmt_be_client_adapter *
mgmt_be_find_adapter_by_fd(int conn_fd)
@@ -114,7 +173,7 @@ mgmt_be_find_adapter_by_fd(int conn_fd)
struct mgmt_be_client_adapter *adapter;
FOREACH_ADAPTER_IN_LIST (adapter) {
- if (adapter->conn_fd == conn_fd)
+ if (adapter->conn->fd == conn_fd)
return adapter;
}
@@ -134,54 +193,36 @@ mgmt_be_find_adapter_by_name(const char *name)
return NULL;
}
-static void
-mgmt_be_cleanup_adapters(void)
-{
- struct mgmt_be_client_adapter *adapter;
-
- FOREACH_ADAPTER_IN_LIST (adapter)
- mgmt_be_adapter_unlock(&adapter);
-}
-
static void mgmt_be_xpath_map_init(void)
{
- int indx, num_xpath_maps;
- uint16_t indx1;
- enum mgmt_be_client_id id;
+ uint i;
MGMTD_BE_ADAPTER_DBG("Init XPath Maps");
- num_xpath_maps = (int)array_size(xpath_static_map_reg);
- for (indx = 0; indx < num_xpath_maps; indx++) {
+ mgmt_num_xpath_maps = array_size(mgmt_xpath_map_init);
+ mgmt_xpath_map =
+ calloc(1, sizeof(*mgmt_xpath_map) * mgmt_num_xpath_maps);
+ for (i = 0; i < mgmt_num_xpath_maps; i++) {
MGMTD_BE_ADAPTER_DBG(" - XPATH: '%s'",
- xpath_static_map_reg[indx].xpath_regexp);
- mgmt_xpath_map[indx].xpath_regexp =
- xpath_static_map_reg[indx].xpath_regexp;
- for (indx1 = 0;; indx1++) {
- id = xpath_static_map_reg[indx].be_clients[indx1];
- if (id == MGMTD_BE_CLIENT_ID_MAX)
- break;
- MGMTD_BE_ADAPTER_DBG(" -- Client: %s Id: %u",
- mgmt_be_client_id2name(id),
- id);
- if (id < MGMTD_BE_CLIENT_ID_MAX) {
- mgmt_xpath_map[indx]
- .be_subscrs.xpath_subscr[id]
- .validate_config = 1;
- mgmt_xpath_map[indx]
- .be_subscrs.xpath_subscr[id]
- .notify_config = 1;
- mgmt_xpath_map[indx]
- .be_subscrs.xpath_subscr[id]
- .own_oper_data = 1;
- }
- }
+ mgmt_xpath_map_init[i].xpath_regexp);
+ mgmt_xpath_map[i].xpath_regexp = XSTRDUP(
+ MTYPE_MGMTD_XPATH, mgmt_xpath_map_init[i].xpath_regexp);
+ memcpy(mgmt_xpath_map[i].subscr_info,
+ mgmt_xpath_map_init[i].subscr_info,
+ sizeof(mgmt_xpath_map_init[i].subscr_info));
}
-
- mgmt_num_xpath_maps = indx;
MGMTD_BE_ADAPTER_DBG("Total XPath Maps: %u", mgmt_num_xpath_maps);
}
+static void mgmt_be_xpath_map_cleanup(void)
+{
+ uint i;
+
+ for (i = 0; i < mgmt_num_xpath_maps; i++)
+ XFREE(MTYPE_MGMTD_XPATH, mgmt_xpath_map[i].xpath_regexp);
+ free(mgmt_xpath_map);
+}
+
static int mgmt_be_eval_regexp_match(const char *xpath_regexp,
const char *xpath)
{
@@ -309,28 +350,35 @@ static int mgmt_be_eval_regexp_match(const char *xpath_regexp,
return match_len;
}
-static void mgmt_be_adapter_disconnect(struct mgmt_be_client_adapter *adapter)
+static void mgmt_be_adapter_delete(struct mgmt_be_client_adapter *adapter)
{
- if (adapter->conn_fd >= 0) {
- close(adapter->conn_fd);
- adapter->conn_fd = -1;
- }
+ MGMTD_BE_ADAPTER_DBG("deleting client adapter '%s'", adapter->name);
/*
- * Notify about client disconnect for appropriate cleanup
+ * Notify about disconnect for appropriate cleanup
*/
mgmt_txn_notify_be_adapter_conn(adapter, false);
-
if (adapter->id < MGMTD_BE_CLIENT_ID_MAX) {
mgmt_be_adapters_by_id[adapter->id] = NULL;
adapter->id = MGMTD_BE_CLIENT_ID_MAX;
}
- mgmt_be_adapters_del(&mgmt_be_adapters, adapter);
-
+ assert(adapter->refcount == 1);
mgmt_be_adapter_unlock(&adapter);
}
+static int mgmt_be_adapter_notify_disconnect(struct msg_conn *conn)
+{
+ struct mgmt_be_client_adapter *adapter = conn->user;
+
+ MGMTD_BE_ADAPTER_DBG("notify disconnect for client adapter '%s'",
+ adapter->name);
+
+ mgmt_be_adapter_delete(adapter);
+
+ return 0;
+}
+
static void
mgmt_be_adapter_cleanup_old_conn(struct mgmt_be_client_adapter *adapter)
{
@@ -344,12 +392,43 @@ mgmt_be_adapter_cleanup_old_conn(struct mgmt_be_client_adapter *adapter)
*/
MGMTD_BE_ADAPTER_DBG(
"Client '%s' (FD:%d) seems to have reconnected. Removing old connection (FD:%d)!",
- adapter->name, adapter->conn_fd, old->conn_fd);
- mgmt_be_adapter_disconnect(old);
+ adapter->name, adapter->conn->fd,
+ old->conn->fd);
+ /* this will/should delete old */
+ msg_conn_disconnect(old->conn, false);
}
}
}
+
+static int mgmt_be_adapter_send_msg(struct mgmt_be_client_adapter *adapter,
+ Mgmtd__BeMessage *be_msg)
+{
+ return msg_conn_send_msg(
+ adapter->conn, MGMT_MSG_VERSION_PROTOBUF, be_msg,
+ mgmtd__be_message__get_packed_size(be_msg),
+ (size_t(*)(void *, void *))mgmtd__be_message__pack, false);
+}
+
+static int mgmt_be_send_subscr_reply(struct mgmt_be_client_adapter *adapter,
+ bool success)
+{
+ Mgmtd__BeMessage be_msg;
+ Mgmtd__BeSubscribeReply reply;
+
+ mgmtd__be_subscribe_reply__init(&reply);
+ reply.success = success;
+
+ mgmtd__be_message__init(&be_msg);
+ be_msg.message_case = MGMTD__BE_MESSAGE__MESSAGE_SUBSCR_REPLY;
+ be_msg.subscr_reply = &reply;
+
+ MGMTD_FE_CLIENT_DBG("Sending SUBSCR_REPLY client: %s sucess: %u",
+ adapter->name, success);
+
+ return mgmt_be_adapter_send_msg(adapter, &be_msg);
+}
+
static int
mgmt_be_adapter_handle_msg(struct mgmt_be_client_adapter *adapter,
Mgmtd__BeMessage *be_msg)
@@ -361,13 +440,13 @@ mgmt_be_adapter_handle_msg(struct mgmt_be_client_adapter *adapter,
switch ((int)be_msg->message_case) {
case MGMTD__BE_MESSAGE__MESSAGE_SUBSCR_REQ:
MGMTD_BE_ADAPTER_DBG(
- "Got Subscribe Req Msg from '%s' to %sregister %u xpaths",
+ "Got SUBSCR_REQ from '%s' to %sregister %zu xpaths",
be_msg->subscr_req->client_name,
- !be_msg->subscr_req->subscribe_xpaths
- && be_msg->subscr_req->n_xpath_reg
+ !be_msg->subscr_req->subscribe_xpaths &&
+ be_msg->subscr_req->n_xpath_reg
? "de"
: "",
- (uint32_t)be_msg->subscr_req->n_xpath_reg);
+ be_msg->subscr_req->n_xpath_reg);
if (strlen(be_msg->subscr_req->client_name)) {
strlcpy(adapter->name, be_msg->subscr_req->client_name,
@@ -377,18 +456,30 @@ mgmt_be_adapter_handle_msg(struct mgmt_be_client_adapter *adapter,
MGMTD_BE_ADAPTER_ERR(
"Unable to resolve adapter '%s' to a valid ID. Disconnecting!",
adapter->name);
- mgmt_be_adapter_disconnect(adapter);
+ /* this will/should delete old */
+ msg_conn_disconnect(adapter->conn, false);
+ zlog_err("XXX different from original code");
+ break;
}
mgmt_be_adapters_by_id[adapter->id] = adapter;
mgmt_be_adapter_cleanup_old_conn(adapter);
+
+ /* schedule INIT sequence now that it is registered */
+ mgmt_be_adapter_sched_init_event(adapter);
}
+
+ if (be_msg->subscr_req->n_xpath_reg)
+ /* we aren't handling dynamic xpaths yet */
+ mgmt_be_send_subscr_reply(adapter, false);
+ else
+ mgmt_be_send_subscr_reply(adapter, true);
break;
case MGMTD__BE_MESSAGE__MESSAGE_TXN_REPLY:
MGMTD_BE_ADAPTER_DBG(
- "Got %s TXN_REPLY Msg for Txn-Id 0x%llx from '%s' with '%s'",
+ "Got %s TXN_REPLY from '%s' txn-id %" PRIx64
+ " with '%s'",
be_msg->txn_reply->create ? "Create" : "Delete",
- (unsigned long long)be_msg->txn_reply->txn_id,
- adapter->name,
+ adapter->name, be_msg->txn_reply->txn_id,
be_msg->txn_reply->success ? "success" : "failure");
/*
* Forward the TXN_REPLY to txn module.
@@ -400,10 +491,10 @@ mgmt_be_adapter_handle_msg(struct mgmt_be_client_adapter *adapter,
break;
case MGMTD__BE_MESSAGE__MESSAGE_CFG_DATA_REPLY:
MGMTD_BE_ADAPTER_DBG(
- "Got CFGDATA_REPLY Msg from '%s' for Txn-Id 0x%llx Batch-Id 0x%llx with Err:'%s'",
- adapter->name,
- (unsigned long long)be_msg->cfg_data_reply->txn_id,
- (unsigned long long)be_msg->cfg_data_reply->batch_id,
+ "Got CFGDATA_REPLY from '%s' txn-id %" PRIx64
+ " batch-id %" PRIu64 " err:'%s'",
+ adapter->name, be_msg->cfg_data_reply->txn_id,
+ be_msg->cfg_data_reply->batch_id,
be_msg->cfg_data_reply->error_if_any
? be_msg->cfg_data_reply->error_if_any
: "None");
@@ -418,19 +509,15 @@ mgmt_be_adapter_handle_msg(struct mgmt_be_client_adapter *adapter,
break;
case MGMTD__BE_MESSAGE__MESSAGE_CFG_APPLY_REPLY:
MGMTD_BE_ADAPTER_DBG(
- "Got %s CFG_APPLY_REPLY Msg from '%s' for Txn-Id 0x%llx for %d batches (Id 0x%llx-0x%llx), Err:'%s'",
+ "Got %s CFG_APPLY_REPLY from '%s' txn-id %" PRIx64
+ " for %zu batches id %" PRIu64 "-%" PRIu64 " err:'%s'",
be_msg->cfg_apply_reply->success ? "successful"
- : "failed",
- adapter->name,
- (unsigned long long)
- be_msg->cfg_apply_reply->txn_id,
- (int)be_msg->cfg_apply_reply->n_batch_ids,
- (unsigned long long)
- be_msg->cfg_apply_reply->batch_ids[0],
- (unsigned long long)be_msg->cfg_apply_reply
- ->batch_ids[be_msg->cfg_apply_reply
- ->n_batch_ids
- - 1],
+ : "failed",
+ adapter->name, be_msg->cfg_apply_reply->txn_id,
+ be_msg->cfg_apply_reply->n_batch_ids,
+ be_msg->cfg_apply_reply->batch_ids[0],
+ be_msg->cfg_apply_reply->batch_ids
+ [be_msg->cfg_apply_reply->n_batch_ids - 1],
be_msg->cfg_apply_reply->error_if_any
? be_msg->cfg_apply_reply->error_if_any
: "None");
@@ -477,47 +564,8 @@ mgmt_be_adapter_handle_msg(struct mgmt_be_client_adapter *adapter,
return 0;
}
-static inline void
-mgmt_be_adapter_sched_msg_write(struct mgmt_be_client_adapter *adapter)
-{
- if (!CHECK_FLAG(adapter->flags, MGMTD_BE_ADAPTER_FLAGS_WRITES_OFF))
- mgmt_be_adapter_register_event(adapter, MGMTD_BE_CONN_WRITE);
-}
-
-static inline void
-mgmt_be_adapter_writes_on(struct mgmt_be_client_adapter *adapter)
-{
- MGMTD_BE_ADAPTER_DBG("Resume writing msgs for '%s'", adapter->name);
- UNSET_FLAG(adapter->flags, MGMTD_BE_ADAPTER_FLAGS_WRITES_OFF);
- mgmt_be_adapter_sched_msg_write(adapter);
-}
-
-static inline void
-mgmt_be_adapter_writes_off(struct mgmt_be_client_adapter *adapter)
-{
- SET_FLAG(adapter->flags, MGMTD_BE_ADAPTER_FLAGS_WRITES_OFF);
- MGMTD_BE_ADAPTER_DBG("Pause writing msgs for '%s'", adapter->name);
-}
-
-static int mgmt_be_adapter_send_msg(struct mgmt_be_client_adapter *adapter,
- Mgmtd__BeMessage *be_msg)
-{
- if (adapter->conn_fd == -1) {
- MGMTD_BE_ADAPTER_DBG("can't send message on closed connection");
- return -1;
- }
-
- int rv = mgmt_msg_send_msg(
- &adapter->mstate, be_msg,
- mgmtd__be_message__get_packed_size(be_msg),
- (size_t(*)(void *, void *))mgmtd__be_message__pack,
- MGMT_DEBUG_BE_CHECK());
- mgmt_be_adapter_sched_msg_write(adapter);
- return rv;
-}
-
-static int mgmt_be_send_txn_req(struct mgmt_be_client_adapter *adapter,
- uint64_t txn_id, bool create)
+int mgmt_be_send_txn_req(struct mgmt_be_client_adapter *adapter,
+ uint64_t txn_id, bool create)
{
Mgmtd__BeMessage be_msg;
Mgmtd__BeTxnReq txn_req;
@@ -530,18 +578,16 @@ static int mgmt_be_send_txn_req(struct mgmt_be_client_adapter *adapter,
be_msg.message_case = MGMTD__BE_MESSAGE__MESSAGE_TXN_REQ;
be_msg.txn_req = &txn_req;
- MGMTD_BE_ADAPTER_DBG(
- "Sending TXN_REQ message to Backend client '%s' for Txn-Id %llx",
- adapter->name, (unsigned long long)txn_id);
+ MGMTD_BE_ADAPTER_DBG("Sending TXN_REQ to '%s' txn-id: %" PRIu64,
+ adapter->name, txn_id);
return mgmt_be_adapter_send_msg(adapter, &be_msg);
}
-static int
-mgmt_be_send_cfgdata_create_req(struct mgmt_be_client_adapter *adapter,
- uint64_t txn_id, uint64_t batch_id,
- Mgmtd__YangCfgDataReq **cfgdata_reqs,
- size_t num_reqs, bool end_of_data)
+int mgmt_be_send_cfgdata_req(struct mgmt_be_client_adapter *adapter,
+ uint64_t txn_id, uint64_t batch_id,
+ Mgmtd__YangCfgDataReq **cfgdata_reqs,
+ size_t num_reqs, bool end_of_data)
{
Mgmtd__BeMessage be_msg;
Mgmtd__BeCfgDataCreateReq cfgdata_req;
@@ -558,15 +604,15 @@ mgmt_be_send_cfgdata_create_req(struct mgmt_be_client_adapter *adapter,
be_msg.cfg_data_req = &cfgdata_req;
MGMTD_BE_ADAPTER_DBG(
- "Sending CFGDATA_CREATE_REQ message to Backend client '%s' for Txn-Id %llx, Batch-Id: %llx",
- adapter->name, (unsigned long long)txn_id,
- (unsigned long long)batch_id);
+ "Sending CFGDATA_CREATE_REQ to '%s' txn-id: %" PRIu64
+ " batch-id: %" PRIu64,
+ adapter->name, txn_id, batch_id);
return mgmt_be_adapter_send_msg(adapter, &be_msg);
}
-static int mgmt_be_send_cfgapply_req(struct mgmt_be_client_adapter *adapter,
- uint64_t txn_id)
+int mgmt_be_send_cfgapply_req(struct mgmt_be_client_adapter *adapter,
+ uint64_t txn_id)
{
Mgmtd__BeMessage be_msg;
Mgmtd__BeCfgDataApplyReq apply_req;
@@ -578,20 +624,18 @@ static int mgmt_be_send_cfgapply_req(struct mgmt_be_client_adapter *adapter,
be_msg.message_case = MGMTD__BE_MESSAGE__MESSAGE_CFG_APPLY_REQ;
be_msg.cfg_apply_req = &apply_req;
- MGMTD_BE_ADAPTER_DBG(
- "Sending CFG_APPLY_REQ message to Backend client '%s' for Txn-Id 0x%llx",
- adapter->name, (unsigned long long)txn_id);
+ MGMTD_BE_ADAPTER_DBG("Sending CFG_APPLY_REQ to '%s' txn-id: %" PRIu64,
+ adapter->name, txn_id);
return mgmt_be_adapter_send_msg(adapter, &be_msg);
}
-static void mgmt_be_adapter_process_msg(void *user_ctx, uint8_t *data,
- size_t len)
+static void mgmt_be_adapter_process_msg(uint8_t version, uint8_t *data,
+ size_t len, struct msg_conn *conn)
{
- struct mgmt_be_client_adapter *adapter = user_ctx;
- Mgmtd__BeMessage *be_msg;
+ struct mgmt_be_client_adapter *adapter = conn->user;
+ Mgmtd__BeMessage *be_msg = mgmtd__be_message__unpack(NULL, len, data);
- be_msg = mgmtd__be_message__unpack(NULL, len, data);
if (!be_msg) {
MGMTD_BE_ADAPTER_DBG(
"Failed to decode %zu bytes for adapter: %s", len,
@@ -604,95 +648,29 @@ static void mgmt_be_adapter_process_msg(void *user_ctx, uint8_t *data,
mgmtd__be_message__free_unpacked(be_msg, NULL);
}
-static void mgmt_be_adapter_proc_msgbufs(struct event *thread)
-{
- struct mgmt_be_client_adapter *adapter = EVENT_ARG(thread);
-
- if (mgmt_msg_procbufs(&adapter->mstate, mgmt_be_adapter_process_msg,
- adapter, MGMT_DEBUG_BE_CHECK()))
- mgmt_be_adapter_register_event(adapter, MGMTD_BE_PROC_MSG);
-}
-
-static void mgmt_be_adapter_read(struct event *thread)
-{
- struct mgmt_be_client_adapter *adapter;
- enum mgmt_msg_rsched rv;
-
- adapter = (struct mgmt_be_client_adapter *)EVENT_ARG(thread);
-
- rv = mgmt_msg_read(&adapter->mstate, adapter->conn_fd,
- MGMT_DEBUG_BE_CHECK());
- if (rv == MSR_DISCONNECT) {
- mgmt_be_adapter_disconnect(adapter);
- return;
- }
- if (rv == MSR_SCHED_BOTH)
- mgmt_be_adapter_register_event(adapter, MGMTD_BE_PROC_MSG);
- mgmt_be_adapter_register_event(adapter, MGMTD_BE_CONN_READ);
-}
-
-static void mgmt_be_adapter_write(struct event *thread)
-{
- struct mgmt_be_client_adapter *adapter = EVENT_ARG(thread);
- enum mgmt_msg_wsched rv;
-
- rv = mgmt_msg_write(&adapter->mstate, adapter->conn_fd,
- MGMT_DEBUG_BE_CHECK());
- if (rv == MSW_SCHED_STREAM)
- mgmt_be_adapter_register_event(adapter, MGMTD_BE_CONN_WRITE);
- else if (rv == MSW_DISCONNECT)
- mgmt_be_adapter_disconnect(adapter);
- else if (rv == MSW_SCHED_WRITES_OFF) {
- mgmt_be_adapter_writes_off(adapter);
- mgmt_be_adapter_register_event(adapter,
- MGMTD_BE_CONN_WRITES_ON);
- } else
- assert(rv == MSW_SCHED_NONE);
-}
-
-static void mgmt_be_adapter_resume_writes(struct event *thread)
-{
- struct mgmt_be_client_adapter *adapter;
-
- adapter = (struct mgmt_be_client_adapter *)EVENT_ARG(thread);
- assert(adapter && adapter->conn_fd >= 0);
-
- mgmt_be_adapter_writes_on(adapter);
-}
-
static void mgmt_be_iter_and_get_cfg(struct mgmt_ds_ctx *ds_ctx,
- char *xpath, struct lyd_node *node,
- struct nb_node *nb_node, void *ctx)
+ const char *xpath, struct lyd_node *node,
+ struct nb_node *nb_node, void *ctx)
{
- struct mgmt_be_client_subscr_info subscr_info;
- struct mgmt_be_get_adapter_config_params *parms;
- struct mgmt_be_client_adapter *adapter;
- struct nb_config_cbs *root;
- uint32_t *seq;
-
- if (mgmt_be_get_subscr_info_for_xpath(xpath, &subscr_info) != 0) {
- MGMTD_BE_ADAPTER_ERR(
- "ERROR: Failed to get subscriber for '%s'", xpath);
- return;
- }
-
- parms = (struct mgmt_be_get_adapter_config_params *)ctx;
+ struct mgmt_be_get_adapter_config_params *parms = ctx;
+ struct mgmt_be_client_adapter *adapter = parms->adapter;
+ uint subscr;
- adapter = parms->adapter;
- if (!subscr_info.xpath_subscr[adapter->id].subscribed)
- return;
-
- root = parms->cfg_chgs;
- seq = &parms->seq;
- nb_config_diff_created(node, seq, root);
+ subscr = mgmt_be_get_subscr_for_xpath_and_client(
+ xpath, adapter->id, MGMT_SUBSCR_NOTIFY_CFG);
+ if (subscr)
+ nb_config_diff_created(node, &parms->seq, parms->cfg_chgs);
}
+/*
+ * Initialize a BE client over a new connection
+ */
static void mgmt_be_adapter_conn_init(struct event *thread)
{
struct mgmt_be_client_adapter *adapter;
adapter = (struct mgmt_be_client_adapter *)EVENT_ARG(thread);
- assert(adapter && adapter->conn_fd >= 0);
+ assert(adapter && adapter->conn->fd >= 0);
/*
* Check first if the current session can run a CONFIG
@@ -700,7 +678,8 @@ static void mgmt_be_adapter_conn_init(struct event *thread)
* from another session is already in progress.
*/
if (mgmt_config_txn_in_progress() != MGMTD_SESSION_ID_NONE) {
- mgmt_be_adapter_register_event(adapter, MGMTD_BE_CONN_INIT);
+ zlog_err("XXX txn in progress, retry init");
+ mgmt_be_adapter_sched_init_event(adapter);
return;
}
@@ -712,61 +691,21 @@ static void mgmt_be_adapter_conn_init(struct event *thread)
* That should also take care of destroying the adapter.
*/
if (mgmt_txn_notify_be_adapter_conn(adapter, true) != 0) {
- mgmt_be_adapter_disconnect(adapter);
+ zlog_err("XXX notify be adapter conn fail");
+ msg_conn_disconnect(adapter->conn, false);
adapter = NULL;
}
}
+/*
+ * Schedule the initialization of the BE client connection.
+ */
static void
-mgmt_be_adapter_register_event(struct mgmt_be_client_adapter *adapter,
- enum mgmt_be_event event)
+mgmt_be_adapter_sched_init_event(struct mgmt_be_client_adapter *adapter)
{
- struct timeval tv = {0};
-
- switch (event) {
- case MGMTD_BE_CONN_INIT:
- event_add_timer_msec(mgmt_be_adapter_tm,
- mgmt_be_adapter_conn_init, adapter,
- MGMTD_BE_CONN_INIT_DELAY_MSEC,
- &adapter->conn_init_ev);
- break;
- case MGMTD_BE_CONN_READ:
- event_add_read(mgmt_be_adapter_tm, mgmt_be_adapter_read,
- adapter, adapter->conn_fd, &adapter->conn_read_ev);
- break;
- case MGMTD_BE_CONN_WRITE:
- if (adapter->conn_write_ev)
- MGMTD_BE_ADAPTER_DBG(
- "write ready notify already set for client %s",
- adapter->name);
- else
- MGMTD_BE_ADAPTER_DBG(
- "scheduling write ready notify for client %s",
- adapter->name);
- event_add_write(mgmt_be_adapter_tm, mgmt_be_adapter_write,
- adapter, adapter->conn_fd, &adapter->conn_write_ev);
- assert(adapter->conn_write_ev);
- break;
- case MGMTD_BE_PROC_MSG:
- tv.tv_usec = MGMTD_BE_MSG_PROC_DELAY_USEC;
- event_add_timer_tv(mgmt_be_adapter_tm,
- mgmt_be_adapter_proc_msgbufs, adapter, &tv,
- &adapter->proc_msg_ev);
- break;
- case MGMTD_BE_CONN_WRITES_ON:
- event_add_timer_msec(mgmt_be_adapter_tm,
- mgmt_be_adapter_resume_writes, adapter,
- MGMTD_BE_MSG_WRITE_DELAY_MSEC,
- &adapter->conn_writes_on);
- break;
- case MGMTD_BE_SERVER:
- case MGMTD_BE_SCHED_CFG_PREPARE:
- case MGMTD_BE_RESCHED_CFG_PREPARE:
- case MGMTD_BE_SCHED_CFG_APPLY:
- case MGMTD_BE_RESCHED_CFG_APPLY:
- assert(!"mgmt_be_adapter_post_event() called incorrectly");
- break;
- }
+ event_add_timer_msec(mgmt_loop, mgmt_be_adapter_conn_init, adapter,
+ MGMTD_BE_CONN_INIT_DELAY_MSEC,
+ &adapter->conn_init_ev);
}
void mgmt_be_adapter_lock(struct mgmt_be_client_adapter *adapter)
@@ -776,82 +715,81 @@ void mgmt_be_adapter_lock(struct mgmt_be_client_adapter *adapter)
extern void mgmt_be_adapter_unlock(struct mgmt_be_client_adapter **adapter)
{
- assert(*adapter && (*adapter)->refcount);
-
- (*adapter)->refcount--;
- if (!(*adapter)->refcount) {
- mgmt_be_adapters_del(&mgmt_be_adapters, *adapter);
- EVENT_OFF((*adapter)->conn_init_ev);
- EVENT_OFF((*adapter)->conn_read_ev);
- EVENT_OFF((*adapter)->conn_write_ev);
- EVENT_OFF((*adapter)->conn_writes_on);
- EVENT_OFF((*adapter)->proc_msg_ev);
- mgmt_msg_destroy(&(*adapter)->mstate);
- XFREE(MTYPE_MGMTD_BE_ADPATER, *adapter);
+ struct mgmt_be_client_adapter *a = *adapter;
+ assert(a && a->refcount);
+
+ if (!--a->refcount) {
+ mgmt_be_adapters_del(&mgmt_be_adapters, a);
+ EVENT_OFF(a->conn_init_ev);
+ msg_server_conn_delete(a->conn);
+ XFREE(MTYPE_MGMTD_BE_ADPATER, a);
}
*adapter = NULL;
}
-int mgmt_be_adapter_init(struct event_loop *tm)
+/*
+ * Initialize the BE adapter module
+ */
+void mgmt_be_adapter_init(struct event_loop *tm)
{
- if (!mgmt_be_adapter_tm) {
- mgmt_be_adapter_tm = tm;
- memset(mgmt_xpath_map, 0, sizeof(mgmt_xpath_map));
- mgmt_num_xpath_maps = 0;
- memset(mgmt_be_adapters_by_id, 0,
- sizeof(mgmt_be_adapters_by_id));
- mgmt_be_adapters_init(&mgmt_be_adapters);
- mgmt_be_xpath_map_init();
- }
+ assert(!mgmt_loop);
+ mgmt_loop = tm;
- return 0;
+ mgmt_be_adapters_init(&mgmt_be_adapters);
+ mgmt_be_xpath_map_init();
+
+ if (msg_server_init(&mgmt_be_server, MGMTD_BE_SERVER_PATH, tm,
+ mgmt_be_create_adapter, "backend",
+ &mgmt_debug_be)) {
+ zlog_err("cannot initialize backend server");
+ exit(1);
+ }
}
+/*
+ * Destroy the BE adapter module
+ */
void mgmt_be_adapter_destroy(void)
{
- mgmt_be_cleanup_adapters();
+ struct mgmt_be_client_adapter *adapter;
+
+ msg_server_cleanup(&mgmt_be_server);
+ FOREACH_ADAPTER_IN_LIST (adapter) {
+ mgmt_be_adapter_delete(adapter);
+ }
+ mgmt_be_xpath_map_cleanup();
}
-struct mgmt_be_client_adapter *
-mgmt_be_create_adapter(int conn_fd, union sockunion *from)
+/*
+ * The server accepted a new connection
+ */
+struct msg_conn *mgmt_be_create_adapter(int conn_fd, union sockunion *from)
{
struct mgmt_be_client_adapter *adapter = NULL;
- adapter = mgmt_be_find_adapter_by_fd(conn_fd);
- if (!adapter) {
- adapter = XCALLOC(MTYPE_MGMTD_BE_ADPATER,
- sizeof(struct mgmt_be_client_adapter));
- assert(adapter);
+ assert(!mgmt_be_find_adapter_by_fd(conn_fd));
- adapter->conn_fd = conn_fd;
- adapter->id = MGMTD_BE_CLIENT_ID_MAX;
- memcpy(&adapter->conn_su, from, sizeof(adapter->conn_su));
- snprintf(adapter->name, sizeof(adapter->name), "Unknown-FD-%d",
- adapter->conn_fd);
- mgmt_msg_init(&adapter->mstate, MGMTD_BE_MAX_NUM_MSG_PROC,
- MGMTD_BE_MAX_NUM_MSG_WRITE, MGMTD_BE_MSG_MAX_LEN,
- "BE-adapter");
- mgmt_be_adapter_lock(adapter);
-
- mgmt_be_adapter_register_event(adapter, MGMTD_BE_CONN_READ);
- mgmt_be_adapters_add_tail(&mgmt_be_adapters, adapter);
-
- RB_INIT(nb_config_cbs, &adapter->cfg_chgs);
+ adapter = XCALLOC(MTYPE_MGMTD_BE_ADPATER,
+ sizeof(struct mgmt_be_client_adapter));
+ adapter->id = MGMTD_BE_CLIENT_ID_MAX;
+ snprintf(adapter->name, sizeof(adapter->name), "Unknown-FD-%d",
+ conn_fd);
- MGMTD_BE_ADAPTER_DBG("Added new MGMTD Backend adapter '%s'",
- adapter->name);
- }
+ mgmt_be_adapter_lock(adapter);
+ mgmt_be_adapters_add_tail(&mgmt_be_adapters, adapter);
+ RB_INIT(nb_config_cbs, &adapter->cfg_chgs);
- /* Make client socket non-blocking. */
- set_nonblocking(adapter->conn_fd);
- setsockopt_so_sendbuf(adapter->conn_fd, MGMTD_SOCKET_BE_SEND_BUF_SIZE);
- setsockopt_so_recvbuf(adapter->conn_fd, MGMTD_SOCKET_BE_RECV_BUF_SIZE);
+ adapter->conn = msg_server_conn_create(
+ mgmt_loop, conn_fd, mgmt_be_adapter_notify_disconnect,
+ mgmt_be_adapter_process_msg, MGMTD_BE_MAX_NUM_MSG_PROC,
+ MGMTD_BE_MAX_NUM_MSG_WRITE, MGMTD_BE_MSG_MAX_LEN, adapter,
+ "BE-adapter");
- /* Trigger resync of config with the new adapter */
- mgmt_be_adapter_register_event(adapter, MGMTD_BE_CONN_INIT);
+ MGMTD_BE_ADAPTER_DBG("Added new MGMTD Backend adapter '%s'",
+ adapter->name);
- return adapter;
+ return adapter->conn;
}
struct mgmt_be_client_adapter *
@@ -871,115 +809,98 @@ int mgmt_be_get_adapter_config(struct mgmt_be_client_adapter *adapter,
struct mgmt_ds_ctx *ds_ctx,
struct nb_config_cbs **cfg_chgs)
{
- char base_xpath[] = "/";
struct mgmt_be_get_adapter_config_params parms;
assert(cfg_chgs);
+ /*
+ * TODO: we should consider making this an assertable condition and
+ * guaranteeing it be true when this function is called. B/c what is
+ * going to happen if there are some changes being sent, and we don't
+ * gather a new snapshot, what new changes that came after the previous
+ * snapshot will then be lost?
+ */
if (RB_EMPTY(nb_config_cbs, &adapter->cfg_chgs)) {
parms.adapter = adapter;
parms.cfg_chgs = &adapter->cfg_chgs;
parms.seq = 0;
- mgmt_ds_iter_data(ds_ctx, base_xpath,
- mgmt_be_iter_and_get_cfg, (void *)&parms,
- false);
+ mgmt_ds_iter_data(ds_ctx, "", mgmt_be_iter_and_get_cfg,
+ (void *)&parms);
}
*cfg_chgs = &adapter->cfg_chgs;
return 0;
}
-int mgmt_be_create_txn(struct mgmt_be_client_adapter *adapter,
- uint64_t txn_id)
-{
- return mgmt_be_send_txn_req(adapter, txn_id, true);
-}
-
-int mgmt_be_destroy_txn(struct mgmt_be_client_adapter *adapter,
- uint64_t txn_id)
-{
- return mgmt_be_send_txn_req(adapter, txn_id, false);
-}
-
-int mgmt_be_send_cfg_data_create_req(struct mgmt_be_client_adapter *adapter,
- uint64_t txn_id, uint64_t batch_id,
- struct mgmt_be_cfgreq *cfg_req,
- bool end_of_data)
-{
- return mgmt_be_send_cfgdata_create_req(
- adapter, txn_id, batch_id, cfg_req->cfgdata_reqs,
- cfg_req->num_reqs, end_of_data);
-}
-
-extern int
-mgmt_be_send_cfg_apply_req(struct mgmt_be_client_adapter *adapter,
- uint64_t txn_id)
-{
- return mgmt_be_send_cfgapply_req(adapter, txn_id);
-}
-
-/*
- * This function maps a YANG dtata Xpath to one or more
- * Backend Clients that should be contacted for various purposes.
- */
-int mgmt_be_get_subscr_info_for_xpath(
+void mgmt_be_get_subscr_info_for_xpath(
const char *xpath, struct mgmt_be_client_subscr_info *subscr_info)
{
- int indx, match, max_match = 0, num_reg;
enum mgmt_be_client_id id;
- struct mgmt_be_client_subscr_info
- *reg_maps[array_size(mgmt_xpath_map)] = {0};
- bool root_xp = false;
-
- if (!subscr_info)
- return -1;
+ uint i;
- num_reg = 0;
memset(subscr_info, 0, sizeof(*subscr_info));
- if (strlen(xpath) <= 2 && xpath[0] == '/'
- && (!xpath[1] || xpath[1] == '*')) {
- root_xp = true;
- }
-
- MGMTD_BE_ADAPTER_DBG("XPATH: %s", xpath);
- for (indx = 0; indx < mgmt_num_xpath_maps; indx++) {
- /*
- * For Xpaths: '/' and '/ *' all xpath maps should match
- * the given xpath.
- */
- if (!root_xp) {
- match = mgmt_be_eval_regexp_match(
- mgmt_xpath_map[indx].xpath_regexp, xpath);
-
- if (!match || match < max_match)
- continue;
-
- if (match > max_match) {
- num_reg = 0;
- max_match = match;
- }
+ MGMTD_BE_ADAPTER_DBG("XPATH: '%s'", xpath);
+ for (i = 0; i < mgmt_num_xpath_maps; i++) {
+ if (!mgmt_be_eval_regexp_match(mgmt_xpath_map[i].xpath_regexp,
+ xpath))
+ continue;
+ FOREACH_MGMTD_BE_CLIENT_ID (id) {
+ subscr_info->xpath_subscr[id] |=
+ mgmt_xpath_map[i].subscr_info[id];
}
-
- reg_maps[num_reg] = &mgmt_xpath_map[indx].be_subscrs;
- num_reg++;
}
- for (indx = 0; indx < num_reg; indx++) {
+ if (DEBUG_MODE_CHECK(&mgmt_debug_be, DEBUG_MODE_ALL)) {
FOREACH_MGMTD_BE_CLIENT_ID (id) {
- if (reg_maps[indx]->xpath_subscr[id].subscribed) {
- MGMTD_BE_ADAPTER_DBG(
- "Cient: %s",
- mgmt_be_client_id2name(id));
- memcpy(&subscr_info->xpath_subscr[id],
- &reg_maps[indx]->xpath_subscr[id],
- sizeof(subscr_info->xpath_subscr[id]));
- }
+ if (!subscr_info->xpath_subscr[id])
+ continue;
+ MGMTD_BE_ADAPTER_DBG("Cient: %s: subscribed: 0x%x",
+ mgmt_be_client_id2name(id),
+ subscr_info->xpath_subscr[id]);
}
}
+}
- return 0;
+/**
+ * Return the subscription info bits for a given `xpath` for a given
+ * `client_id`.
+ *
+ * Args:
+ * xpath - the xpath to check for subscription information.
+ * client_id - the BE client being checked for.
+ * subscr_mask - The subscr bits the caller is interested in seeing
+ * if set.
+ *
+ * Returns:
+ * The subscription info bits.
+ */
+static uint mgmt_be_get_subscr_for_xpath_and_client(
+ const char *xpath, enum mgmt_be_client_id client_id, uint subscr_mask)
+{
+ struct mgmt_be_client_xpath_map *map;
+ uint subscr = 0;
+ uint i;
+
+ assert(client_id < MGMTD_BE_CLIENT_ID_MAX);
+
+ MGMTD_BE_ADAPTER_DBG("Checking client: %s for xpath: '%s'",
+ mgmt_be_client_id2name(client_id), xpath);
+
+ map = &mgmt_client_xpaths[client_id];
+ for (i = 0; i < map->nxpaths; i++) {
+ if (!mgmt_be_eval_regexp_match(map->xpaths[i].xpath, xpath))
+ continue;
+ MGMTD_BE_ADAPTER_DBG("xpath: %s: matched: %s",
+ map->xpaths[i].xpath, xpath);
+ subscr |= map->xpaths[i].subscribed;
+ if ((subscr & subscr_mask) == subscr_mask)
+ break;
+ }
+ MGMTD_BE_ADAPTER_DBG("client: %s: subscribed: 0x%x",
+ mgmt_be_client_id2name(client_id), subscr);
+ return subscr;
}
void mgmt_be_adapter_status_write(struct vty *vty)
@@ -990,17 +911,17 @@ void mgmt_be_adapter_status_write(struct vty *vty)
FOREACH_ADAPTER_IN_LIST (adapter) {
vty_out(vty, " Client: \t\t\t%s\n", adapter->name);
- vty_out(vty, " Conn-FD: \t\t\t%d\n", adapter->conn_fd);
+ vty_out(vty, " Conn-FD: \t\t\t%d\n", adapter->conn->fd);
vty_out(vty, " Client-Id: \t\t\t%d\n", adapter->id);
vty_out(vty, " Ref-Count: \t\t\t%u\n", adapter->refcount);
vty_out(vty, " Msg-Recvd: \t\t\t%" PRIu64 "\n",
- adapter->mstate.nrxm);
+ adapter->conn->mstate.nrxm);
vty_out(vty, " Bytes-Recvd: \t\t%" PRIu64 "\n",
- adapter->mstate.nrxb);
+ adapter->conn->mstate.nrxb);
vty_out(vty, " Msg-Sent: \t\t\t%" PRIu64 "\n",
- adapter->mstate.ntxm);
+ adapter->conn->mstate.ntxm);
vty_out(vty, " Bytes-Sent: \t\t%" PRIu64 "\n",
- adapter->mstate.ntxb);
+ adapter->conn->mstate.ntxb);
}
vty_out(vty, " Total: %d\n",
(int)mgmt_be_adapters_count(&mgmt_be_adapters));
@@ -1008,9 +929,10 @@ void mgmt_be_adapter_status_write(struct vty *vty)
void mgmt_be_xpath_register_write(struct vty *vty)
{
- int indx;
+ uint indx;
enum mgmt_be_client_id id;
struct mgmt_be_client_adapter *adapter;
+ uint info;
vty_out(vty, "MGMTD Backend XPath Registry\n");
@@ -1018,36 +940,18 @@ void mgmt_be_xpath_register_write(struct vty *vty)
vty_out(vty, " - XPATH: '%s'\n",
mgmt_xpath_map[indx].xpath_regexp);
FOREACH_MGMTD_BE_CLIENT_ID (id) {
- if (mgmt_xpath_map[indx]
- .be_subscrs.xpath_subscr[id]
- .subscribed) {
- vty_out(vty,
- " -- Client: '%s' \t Validate:%s, Notify:%s, Own:%s\n",
- mgmt_be_client_id2name(id),
- mgmt_xpath_map[indx]
- .be_subscrs
- .xpath_subscr[id]
- .validate_config
- ? "T"
- : "F",
- mgmt_xpath_map[indx]
- .be_subscrs
- .xpath_subscr[id]
- .notify_config
- ? "T"
- : "F",
- mgmt_xpath_map[indx]
- .be_subscrs
- .xpath_subscr[id]
- .own_oper_data
- ? "T"
- : "F");
- adapter = mgmt_be_get_adapter_by_id(id);
- if (adapter) {
- vty_out(vty, " -- Adapter: %p\n",
- adapter);
- }
- }
+ info = mgmt_xpath_map[indx].subscr_info[id];
+ if (!info)
+ continue;
+ vty_out(vty,
+ " -- Client: '%s'\tValidate:%d, Notify:%d, Own:%d\n",
+ mgmt_be_client_id2name(id),
+ (info & MGMT_SUBSCR_VALIDATE_CFG) != 0,
+ (info & MGMT_SUBSCR_NOTIFY_CFG) != 0,
+ (info & MGMT_SUBSCR_OPER_OWN) != 0);
+ adapter = mgmt_be_get_adapter_by_id(id);
+ if (adapter)
+ vty_out(vty, " -- Adapter: %p\n", adapter);
}
}
@@ -1059,28 +963,23 @@ void mgmt_be_xpath_subscr_info_write(struct vty *vty, const char *xpath)
struct mgmt_be_client_subscr_info subscr;
enum mgmt_be_client_id id;
struct mgmt_be_client_adapter *adapter;
+ uint info;
- if (mgmt_be_get_subscr_info_for_xpath(xpath, &subscr) != 0) {
- vty_out(vty, "ERROR: Failed to get subscriber for '%s'\n",
- xpath);
- return;
- }
+ mgmt_be_get_subscr_info_for_xpath(xpath, &subscr);
vty_out(vty, "XPath: '%s'\n", xpath);
FOREACH_MGMTD_BE_CLIENT_ID (id) {
- if (subscr.xpath_subscr[id].subscribed) {
- vty_out(vty,
- " -- Client: '%s' \t Validate:%s, Notify:%s, Own:%s\n",
- mgmt_be_client_id2name(id),
- subscr.xpath_subscr[id].validate_config ? "T"
- : "F",
- subscr.xpath_subscr[id].notify_config ? "T"
- : "F",
- subscr.xpath_subscr[id].own_oper_data ? "T"
- : "F");
- adapter = mgmt_be_get_adapter_by_id(id);
- if (adapter)
- vty_out(vty, " -- Adapter: %p\n", adapter);
- }
+ info = subscr.xpath_subscr[id];
+ if (!info)
+ continue;
+ vty_out(vty,
+ " -- Client: '%s'\tValidate:%d, Notify:%d, Own:%d\n",
+ mgmt_be_client_id2name(id),
+ (info & MGMT_SUBSCR_VALIDATE_CFG) != 0,
+ (info & MGMT_SUBSCR_NOTIFY_CFG) != 0,
+ (info & MGMT_SUBSCR_OPER_OWN) != 0);
+ adapter = mgmt_be_get_adapter_by_id(id);
+ if (adapter)
+ vty_out(vty, " -- Adapter: %p\n", adapter);
}
}
diff --git a/mgmtd/mgmt_be_adapter.h b/mgmtd/mgmt_be_adapter.h
index 88d54a7842..e1676e63af 100644
--- a/mgmtd/mgmt_be_adapter.h
+++ b/mgmtd/mgmt_be_adapter.h
@@ -4,6 +4,7 @@
*
* Copyright (C) 2021 Vmware, Inc.
* Pushpasis Sarkar <spushpasis@vmware.com>
+ * Copyright (c) 2023, LabN Consulting, L.L.C.
*/
#ifndef _FRR_MGMTD_BE_ADAPTER_H_
@@ -41,22 +42,16 @@ PREDECL_LIST(mgmt_be_adapters);
PREDECL_LIST(mgmt_txn_badapters);
struct mgmt_be_client_adapter {
- enum mgmt_be_client_id id;
- int conn_fd;
- union sockunion conn_su;
+ struct msg_conn *conn;
+
struct event *conn_init_ev;
- struct event *conn_read_ev;
- struct event *conn_write_ev;
- struct event *conn_writes_on;
- struct event *proc_msg_ev;
+
+ enum mgmt_be_client_id id;
uint32_t flags;
char name[MGMTD_CLIENT_NAME_MAX_LEN];
uint8_t num_xpath_reg;
char xpath_reg[MGMTD_MAX_NUM_XPATH_REG][MGMTD_MAX_XPATH_LEN];
- /* IO streams for read and write */
- struct mgmt_msg_state mstate;
-
int refcount;
/*
@@ -68,31 +63,30 @@ struct mgmt_be_client_adapter {
struct nb_config_cbs cfg_chgs;
struct mgmt_be_adapters_item list_linkage;
- struct mgmt_txn_badapters_item txn_list_linkage;
};
-#define MGMTD_BE_ADAPTER_FLAGS_WRITES_OFF (1U << 0)
-#define MGMTD_BE_ADAPTER_FLAGS_CFG_SYNCED (1U << 1)
+#define MGMTD_BE_ADAPTER_FLAGS_CFG_SYNCED (1U << 0)
DECLARE_LIST(mgmt_be_adapters, struct mgmt_be_client_adapter, list_linkage);
-DECLARE_LIST(mgmt_txn_badapters, struct mgmt_be_client_adapter,
- txn_list_linkage);
-
-union mgmt_be_xpath_subscr_info {
- uint8_t subscribed;
- struct {
- uint8_t validate_config : 1;
- uint8_t notify_config : 1;
- uint8_t own_oper_data : 1;
- };
-};
+
+/*
+ * MGMT_SUBSCR_xxx - flags for subscription types for xpaths registrations
+ *
+ * MGMT_SUBSCR_VALIDATE_CFG :: the client should be asked to validate config
+ * MGMT_SUBSCR_NOTIFY_CFG :: the client should be notified of config changes
+ * MGMT_SUBSCR_OPER_OWN :: the client owns the given oeprational state
+ */
+#define MGMT_SUBSCR_VALIDATE_CFG 0x1
+#define MGMT_SUBSCR_NOTIFY_CFG 0x2
+#define MGMT_SUBSCR_OPER_OWN 0x4
+#define MGMT_SUBSCR_ALL 0x7
struct mgmt_be_client_subscr_info {
- union mgmt_be_xpath_subscr_info xpath_subscr[MGMTD_BE_CLIENT_ID_MAX];
+ uint xpath_subscr[MGMTD_BE_CLIENT_ID_MAX];
};
/* Initialise backend adapter module. */
-extern int mgmt_be_adapter_init(struct event_loop *tm);
+extern void mgmt_be_adapter_init(struct event_loop *tm);
/* Destroy the backend adapter module. */
extern void mgmt_be_adapter_destroy(void);
@@ -104,8 +98,8 @@ extern void mgmt_be_adapter_lock(struct mgmt_be_client_adapter *adapter);
extern void mgmt_be_adapter_unlock(struct mgmt_be_client_adapter **adapter);
/* Create backend adapter. */
-extern struct mgmt_be_client_adapter *
-mgmt_be_create_adapter(int conn_fd, union sockunion *su);
+extern struct msg_conn *mgmt_be_create_adapter(int conn_fd,
+ union sockunion *su);
/* Fetch backend adapter given an adapter name. */
extern struct mgmt_be_client_adapter *
@@ -121,13 +115,9 @@ mgmt_be_get_adapter_config(struct mgmt_be_client_adapter *adapter,
struct mgmt_ds_ctx *ds_ctx,
struct nb_config_cbs **cfg_chgs);
-/* Create a transaction. */
-extern int mgmt_be_create_txn(struct mgmt_be_client_adapter *adapter,
- uint64_t txn_id);
-
-/* Destroy a transaction. */
-extern int mgmt_be_destroy_txn(struct mgmt_be_client_adapter *adapter,
- uint64_t txn_id);
+/* Create/destroy a transaction. */
+extern int mgmt_be_send_txn_req(struct mgmt_be_client_adapter *adapter,
+ uint64_t txn_id, bool create);
/*
* Send config data create request to backend client.
@@ -141,8 +131,11 @@ extern int mgmt_be_destroy_txn(struct mgmt_be_client_adapter *adapter,
* batch_id
* Request batch ID.
*
- * cfg_req
- * Config data request.
+ * cfgdata_reqs
+ * An array of pointer to Mgmtd__YangCfgDataReq.
+ *
+ * num_reqs
+ * Length of the cfgdata_reqs array.
*
* end_of_data
* TRUE if the data from last batch, FALSE otherwise.
@@ -150,37 +143,15 @@ extern int mgmt_be_destroy_txn(struct mgmt_be_client_adapter *adapter,
* Returns:
* 0 on success, -1 on failure.
*/
-extern int mgmt_be_send_cfg_data_create_req(
- struct mgmt_be_client_adapter *adapter, uint64_t txn_id,
- uint64_t batch_id, struct mgmt_be_cfgreq *cfg_req, bool end_of_data);
-
-/*
- * Send config validate request to backend client.
- *
- * adaptr
- * Backend adapter information.
- *
- * txn_id
- * Unique transaction identifier.
- *
- * batch_ids
- * List of request batch IDs.
- *
- * num_batch_ids
- * Number of batch ids.
- *
- * Returns:
- * 0 on success, -1 on failure.
- */
-extern int
-mgmt_be_send_cfg_validate_req(struct mgmt_be_client_adapter *adapter,
- uint64_t txn_id, uint64_t batch_ids[],
- size_t num_batch_ids);
+extern int mgmt_be_send_cfgdata_req(struct mgmt_be_client_adapter *adapter,
+ uint64_t txn_id, uint64_t batch_id,
+ Mgmtd__YangCfgDataReq **cfgdata_reqs,
+ size_t num_reqs, bool end_of_data);
/*
* Send config apply request to backend client.
*
- * adaptr
+ * adapter
* Backend adapter information.
*
* txn_id
@@ -189,9 +160,8 @@ mgmt_be_send_cfg_validate_req(struct mgmt_be_client_adapter *adapter,
* Returns:
* 0 on success, -1 on failure.
*/
-extern int
-mgmt_be_send_cfg_apply_req(struct mgmt_be_client_adapter *adapter,
- uint64_t txn_id);
+extern int mgmt_be_send_cfgapply_req(struct mgmt_be_client_adapter *adapter,
+ uint64_t txn_id);
/*
* Dump backend adapter status to vty.
@@ -203,11 +173,17 @@ extern void mgmt_be_adapter_status_write(struct vty *vty);
*/
extern void mgmt_be_xpath_register_write(struct vty *vty);
-/*
- * Maps a YANG dtata Xpath to one or more
- * backend clients that should be contacted for various purposes.
+/**
+ * Lookup the clients which are subscribed to a given `xpath`
+ * and the way they are subscribed.
+ *
+ * Args:
+ * xpath - the xpath to check for subscription information.
+ * subscr_info - An array of uint indexed by client id
+ * each eleemnt holds the subscription info
+ * for that client.
*/
-extern int mgmt_be_get_subscr_info_for_xpath(
+extern void mgmt_be_get_subscr_info_for_xpath(
const char *xpath, struct mgmt_be_client_subscr_info *subscr_info);
/*
diff --git a/mgmtd/mgmt_be_server.c b/mgmtd/mgmt_be_server.c
deleted file mode 100644
index 029e032feb..0000000000
--- a/mgmtd/mgmt_be_server.c
+++ /dev/null
@@ -1,150 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * MGMTD Backend Server
- *
- * Copyright (C) 2021 Vmware, Inc.
- * Pushpasis Sarkar <spushpasis@vmware.com>
- */
-
-#include <zebra.h>
-#include "network.h"
-#include "libfrr.h"
-#include "mgmtd/mgmt.h"
-#include "mgmtd/mgmt_be_server.h"
-#include "mgmtd/mgmt_be_adapter.h"
-
-#define MGMTD_BE_SRVR_DBG(fmt, ...) \
- DEBUGD(&mgmt_debug_be, "%s:" fmt, __func__, ##__VA_ARGS__)
-#define MGMTD_BE_SRVR_ERR(fmt, ...) \
- zlog_err("%s: ERROR: " fmt, __func__, ##__VA_ARGS__)
-
-static int mgmt_be_listen_fd = -1;
-static struct event_loop *mgmt_be_listen_tm;
-static struct event *mgmt_be_listen_ev;
-static void mgmt_be_server_register_event(enum mgmt_be_event event);
-
-static void mgmt_be_conn_accept(struct event *thread)
-{
- int client_conn_fd;
- union sockunion su;
-
- if (mgmt_be_listen_fd < 0)
- return;
-
- /* We continue hearing server listen socket. */
- mgmt_be_server_register_event(MGMTD_BE_SERVER);
-
- memset(&su, 0, sizeof(union sockunion));
-
- /* We can handle IPv4 or IPv6 socket. */
- client_conn_fd = sockunion_accept(mgmt_be_listen_fd, &su);
- if (client_conn_fd < 0) {
- MGMTD_BE_SRVR_ERR(
- "Failed to accept MGMTD Backend client connection : %s",
- safe_strerror(errno));
- return;
- }
- set_nonblocking(client_conn_fd);
- set_cloexec(client_conn_fd);
-
- MGMTD_BE_SRVR_DBG("Got a new MGMTD Backend connection");
-
- mgmt_be_create_adapter(client_conn_fd, &su);
-}
-
-static void mgmt_be_server_register_event(enum mgmt_be_event event)
-{
- if (event == MGMTD_BE_SERVER) {
- event_add_read(mgmt_be_listen_tm, mgmt_be_conn_accept,
- NULL, mgmt_be_listen_fd,
- &mgmt_be_listen_ev);
- assert(mgmt_be_listen_ev);
- } else {
- assert(!"mgmt_be_server_post_event() called incorrectly");
- }
-}
-
-static void mgmt_be_server_start(const char *hostname)
-{
- int ret;
- int sock;
- struct sockaddr_un addr;
- mode_t old_mask;
-
- /* Set umask */
- old_mask = umask(0077);
-
- sock = socket(AF_UNIX, SOCK_STREAM, PF_UNSPEC);
- if (sock < 0) {
- MGMTD_BE_SRVR_ERR("Failed to create server socket: %s",
- safe_strerror(errno));
- goto mgmt_be_server_start_failed;
- }
-
- addr.sun_family = AF_UNIX,
- strlcpy(addr.sun_path, MGMTD_BE_SERVER_PATH, sizeof(addr.sun_path));
- unlink(addr.sun_path);
- ret = bind(sock, (struct sockaddr *)&addr, sizeof(addr));
- if (ret < 0) {
- MGMTD_BE_SRVR_ERR(
- "Failed to bind server socket to '%s'. Err: %s",
- addr.sun_path, safe_strerror(errno));
- goto mgmt_be_server_start_failed;
- }
-
- ret = listen(sock, MGMTD_BE_MAX_CONN);
- if (ret < 0) {
- MGMTD_BE_SRVR_ERR("Failed to listen on server socket: %s",
- safe_strerror(errno));
- goto mgmt_be_server_start_failed;
- }
-
- /* Restore umask */
- umask(old_mask);
-
- mgmt_be_listen_fd = sock;
- mgmt_be_server_register_event(MGMTD_BE_SERVER);
-
- MGMTD_BE_SRVR_DBG("Started MGMTD Backend Server!");
- return;
-
-mgmt_be_server_start_failed:
- if (sock > 0)
- close(sock);
-
- mgmt_be_listen_fd = -1;
- exit(-1);
-}
-
-int mgmt_be_server_init(struct event_loop *master)
-{
- if (mgmt_be_listen_tm) {
- MGMTD_BE_SRVR_DBG("MGMTD Backend Server already running!");
- return 0;
- }
-
- mgmt_be_listen_tm = master;
-
- mgmt_be_server_start("localhost");
-
- return 0;
-}
-
-void mgmt_be_server_destroy(void)
-{
- if (mgmt_be_listen_tm) {
- MGMTD_BE_SRVR_DBG("Closing MGMTD Backend Server!");
-
- if (mgmt_be_listen_ev) {
- EVENT_OFF(mgmt_be_listen_ev);
- mgmt_be_listen_ev = NULL;
- }
-
- if (mgmt_be_listen_fd >= 0) {
- close(mgmt_be_listen_fd);
- mgmt_be_listen_fd = -1;
- }
-
- mgmt_be_listen_tm = NULL;
- }
-}
diff --git a/mgmtd/mgmt_be_server.h b/mgmtd/mgmt_be_server.h
deleted file mode 100644
index 63731a0ef5..0000000000
--- a/mgmtd/mgmt_be_server.h
+++ /dev/null
@@ -1,20 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * MGMTD Backend Server
- *
- * Copyright (C) 2021 Vmware, Inc.
- * Pushpasis Sarkar
- */
-
-#ifndef _FRR_MGMTD_BE_SERVER_H_
-#define _FRR_MGMTD_BE_SERVER_H_
-
-#define MGMTD_BE_MAX_CONN 32
-
-/* Initialise backend server */
-extern int mgmt_be_server_init(struct event_loop *master);
-
-/* Destroy backend server */
-extern void mgmt_be_server_destroy(void);
-
-#endif /* _FRR_MGMTD_BE_SERVER_H_ */
diff --git a/mgmtd/mgmt_defines.h b/mgmtd/mgmt_defines.h
index ee2f376f48..40fa67075d 100644
--- a/mgmtd/mgmt_defines.h
+++ b/mgmtd/mgmt_defines.h
@@ -36,7 +36,6 @@ enum mgmt_fe_event {
MGMTD_FE_SERVER = 1,
MGMTD_FE_CONN_READ,
MGMTD_FE_CONN_WRITE,
- MGMTD_FE_CONN_WRITES_ON,
MGMTD_FE_PROC_MSG
};
@@ -45,7 +44,6 @@ enum mgmt_be_event {
MGMTD_BE_CONN_INIT,
MGMTD_BE_CONN_READ,
MGMTD_BE_CONN_WRITE,
- MGMTD_BE_CONN_WRITES_ON,
MGMTD_BE_PROC_MSG,
MGMTD_BE_SCHED_CFG_PREPARE,
MGMTD_BE_RESCHED_CFG_PREPARE,
diff --git a/mgmtd/mgmt_ds.c b/mgmtd/mgmt_ds.c
index b8cb5b2b15..3fd47862b2 100644
--- a/mgmtd/mgmt_ds.c
+++ b/mgmtd/mgmt_ds.c
@@ -312,36 +312,28 @@ struct nb_config *mgmt_ds_get_nb_config(struct mgmt_ds_ctx *ds_ctx)
}
static int mgmt_walk_ds_nodes(
- struct mgmt_ds_ctx *ds_ctx, char *base_xpath,
+ struct mgmt_ds_ctx *ds_ctx, const char *base_xpath,
struct lyd_node *base_dnode,
- void (*mgmt_ds_node_iter_fn)(struct mgmt_ds_ctx *ds_ctx, char *xpath,
- struct lyd_node *node,
+ void (*mgmt_ds_node_iter_fn)(struct mgmt_ds_ctx *ds_ctx,
+ const char *xpath, struct lyd_node *node,
struct nb_node *nb_node, void *ctx),
- void *ctx, char *xpaths[], int *num_nodes, bool childs_as_well,
- bool alloc_xp_copy)
+ void *ctx)
{
- uint32_t indx;
- char *xpath, *xpath_buf, *iter_xp;
- int ret, num_left = 0, num_found = 0;
+ /* this is 1k per recursion... */
+ char xpath[MGMTD_MAX_XPATH_LEN];
struct lyd_node *dnode;
struct nb_node *nbnode;
- bool alloc_xp = false;
-
- if (xpaths)
- assert(num_nodes);
-
- if (num_nodes && !*num_nodes)
- return 0;
+ int ret = 0;
- if (num_nodes) {
- num_left = *num_nodes;
- MGMTD_DS_DBG(" -- START: num_left:%d", num_left);
- *num_nodes = 0;
- }
+ assert(mgmt_ds_node_iter_fn);
- MGMTD_DS_DBG(" -- START: Base: %s", base_xpath);
+ MGMTD_DS_DBG(" -- START: base xpath: '%s'", base_xpath);
if (!base_dnode)
+ /*
+ * This function only returns the first node of a possible set
+ * of matches issuing a warning if more than 1 matches
+ */
base_dnode = yang_dnode_get(
ds_ctx->config_ds ? ds_ctx->root.cfg_root->dnode
: ds_ctx->root.dnode_root,
@@ -349,111 +341,42 @@ static int mgmt_walk_ds_nodes(
if (!base_dnode)
return -1;
- if (mgmt_ds_node_iter_fn) {
- /*
- * In case the caller is interested in getting a copy
- * of the xpath for themselves (by setting
- * 'alloc_xp_copy' to 'true') we make a copy for the
- * caller and pass it. Else we pass the original xpath
- * buffer.
- *
- * NOTE: In such case caller will have to take care of
- * the copy later.
- */
- iter_xp = alloc_xp_copy ? strdup(base_xpath) : base_xpath;
+ MGMTD_DS_DBG(" search base schema: '%s'",
+ lysc_path(base_dnode->schema, LYSC_PATH_LOG, xpath,
+ sizeof(xpath)));
- nbnode = (struct nb_node *)base_dnode->schema->priv;
- (*mgmt_ds_node_iter_fn)(ds_ctx, iter_xp, base_dnode, nbnode,
- ctx);
- }
-
- if (num_nodes) {
- (*num_nodes)++;
- num_left--;
- }
+ nbnode = (struct nb_node *)base_dnode->schema->priv;
+ (*mgmt_ds_node_iter_fn)(ds_ctx, base_xpath, base_dnode, nbnode, ctx);
/*
- * If the base_xpath points to a leaf node, or we don't need to
- * visit any children we can skip the tree walk.
+ * If the base_xpath points to a leaf node we can skip the tree walk.
*/
- if (!childs_as_well || base_dnode->schema->nodetype & LYD_NODE_TERM)
+ if (base_dnode->schema->nodetype & LYD_NODE_TERM)
return 0;
- indx = 0;
+ /*
+ * at this point the xpath matched this container node (or some parent
+ * and we're wildcard descending now) so by walking it's children we
+ * continue to change the meaning of an xpath regex to rather be a
+ * prefix matching path
+ */
+
LY_LIST_FOR (lyd_child(base_dnode), dnode) {
assert(dnode->schema && dnode->schema->priv);
- xpath = NULL;
- if (xpaths) {
- if (!xpaths[*num_nodes]) {
- alloc_xp = true;
- xpaths[*num_nodes] =
- (char *)calloc(1, MGMTD_MAX_XPATH_LEN);
- }
- xpath = lyd_path(dnode, LYD_PATH_STD,
- xpaths[*num_nodes],
- MGMTD_MAX_XPATH_LEN);
- } else {
- alloc_xp = true;
- xpath_buf = (char *)calloc(1, MGMTD_MAX_XPATH_LEN);
- (void) lyd_path(dnode, LYD_PATH_STD, xpath_buf,
- MGMTD_MAX_XPATH_LEN);
- xpath = xpath_buf;
- }
-
- assert(xpath);
- MGMTD_DS_DBG(" -- XPATH: %s", xpath);
+ (void)lyd_path(dnode, LYD_PATH_STD, xpath, sizeof(xpath));
- if (num_nodes)
- num_found = num_left;
+ MGMTD_DS_DBG(" -- Child xpath: %s", xpath);
ret = mgmt_walk_ds_nodes(ds_ctx, xpath, dnode,
- mgmt_ds_node_iter_fn, ctx,
- xpaths ? &xpaths[*num_nodes] : NULL,
- num_nodes ? &num_found : NULL,
- childs_as_well, alloc_xp_copy);
-
- if (num_nodes) {
- num_left -= num_found;
- (*num_nodes) += num_found;
- }
-
- if (alloc_xp)
- free(xpath);
-
+ mgmt_ds_node_iter_fn, ctx);
if (ret != 0)
break;
-
- indx++;
- }
-
-
- if (num_nodes) {
- MGMTD_DS_DBG(" -- END: *num_nodes:%d, num_left:%d", *num_nodes,
- num_left);
}
- return 0;
-}
-
-int mgmt_ds_lookup_data_nodes(struct mgmt_ds_ctx *ds_ctx, const char *xpath,
- char *dxpaths[], int *num_nodes,
- bool get_childs_as_well, bool alloc_xp_copy)
-{
- char base_xpath[MGMTD_MAX_XPATH_LEN];
+ MGMTD_DS_DBG(" -- END: base xpath: '%s'", base_xpath);
- if (!ds_ctx || !num_nodes)
- return -1;
-
- if (xpath[0] == '.' && xpath[1] == '/')
- xpath += 2;
-
- strlcpy(base_xpath, xpath, sizeof(base_xpath));
- mgmt_remove_trailing_separator(base_xpath, '/');
-
- return (mgmt_walk_ds_nodes(ds_ctx, base_xpath, NULL, NULL, NULL,
- dxpaths, num_nodes, get_childs_as_well,
- alloc_xp_copy));
+ return ret;
}
struct lyd_node *mgmt_ds_find_data_node_by_xpath(struct mgmt_ds_ctx *ds_ctx,
@@ -534,13 +457,13 @@ int mgmt_ds_load_config_from_file(struct mgmt_ds_ctx *dst,
return 0;
}
-int mgmt_ds_iter_data(struct mgmt_ds_ctx *ds_ctx, char *base_xpath,
+int mgmt_ds_iter_data(struct mgmt_ds_ctx *ds_ctx, const char *base_xpath,
void (*mgmt_ds_node_iter_fn)(struct mgmt_ds_ctx *ds_ctx,
- char *xpath,
+ const char *xpath,
struct lyd_node *node,
struct nb_node *nb_node,
void *ctx),
- void *ctx, bool alloc_xp_copy)
+ void *ctx)
{
int ret = 0;
char xpath[MGMTD_MAX_XPATH_LEN];
@@ -550,14 +473,19 @@ int mgmt_ds_iter_data(struct mgmt_ds_ctx *ds_ctx, char *base_xpath,
if (!ds_ctx)
return -1;
- mgmt_remove_trailing_separator(base_xpath, '/');
-
strlcpy(xpath, base_xpath, sizeof(xpath));
+ mgmt_remove_trailing_separator(xpath, '/');
+
+ /*
+ * mgmt_ds_iter_data is the only user of mgmt_walk_ds_nodes other than
+ * mgmt_walk_ds_nodes itself, so we can modify the API if we would like.
+ * Oper-state should be kept in mind though for the prefix walk
+ */
MGMTD_DS_DBG(" -- START DS walk for DSid: %d", ds_ctx->ds_id);
/* If the base_xpath is empty then crawl the sibblings */
- if (xpath[0] == '\0') {
+ if (xpath[0] == 0) {
base_dnode = ds_ctx->config_ds ? ds_ctx->root.cfg_root->dnode
: ds_ctx->root.dnode_root;
@@ -569,14 +497,12 @@ int mgmt_ds_iter_data(struct mgmt_ds_ctx *ds_ctx, char *base_xpath,
base_dnode = base_dnode->prev;
LY_LIST_FOR (base_dnode, node) {
- ret = mgmt_walk_ds_nodes(
- ds_ctx, xpath, node, mgmt_ds_node_iter_fn,
- ctx, NULL, NULL, true, alloc_xp_copy);
+ ret = mgmt_walk_ds_nodes(ds_ctx, xpath, node,
+ mgmt_ds_node_iter_fn, ctx);
}
} else
ret = mgmt_walk_ds_nodes(ds_ctx, xpath, base_dnode,
- mgmt_ds_node_iter_fn, ctx, NULL, NULL,
- true, alloc_xp_copy);
+ mgmt_ds_node_iter_fn, ctx);
return ret;
}
diff --git a/mgmtd/mgmt_ds.h b/mgmtd/mgmt_ds.h
index 2a32eb641a..e5c88742dd 100644
--- a/mgmtd/mgmt_ds.h
+++ b/mgmtd/mgmt_ds.h
@@ -218,39 +218,6 @@ extern int mgmt_ds_copy_dss(struct mgmt_ds_ctx *src_ds_ctx,
extern struct nb_config *mgmt_ds_get_nb_config(struct mgmt_ds_ctx *ds_ctx);
/*
- * Lookup YANG data nodes.
- *
- * ds_ctx
- * Datastore context.
- *
- * xpath
- * YANG base xpath.
- *
- * dxpaths
- * Out param - array of YANG data xpaths.
- *
- * num_nodes
- * In-out param - number of YANG data xpaths.
- * Note - Caller should init this to the size of the array
- * provided in dxpaths.
- * On return this will have the actual number of xpaths
- * being returned.
- *
- * get_childs_as_well
- * TRUE if child nodes needs to be fetched as well, FALSE otherwise.
- *
- * alloc_xp_copy
- * TRUE if the caller is interested in getting a copy of the xpath.
- *
- * Returns:
- * 0 on success, -1 on failure.
- */
-extern int mgmt_ds_lookup_data_nodes(struct mgmt_ds_ctx *ds_ctx,
- const char *xpath, char *dxpaths[],
- int *num_nodes, bool get_childs_as_well,
- bool alloc_xp_copy);
-
-/*
* Find YANG data node given a datastore handle YANG xpath.
*/
extern struct lyd_node *
@@ -281,18 +248,15 @@ extern int mgmt_ds_delete_data_nodes(struct mgmt_ds_ctx *ds_ctx,
* be passed to the iterator function provided in
* 'iter_fn'.
*
- * alloc_xp_copy
- * TRUE if the caller is interested in getting a copy of the xpath.
- *
* Returns:
* 0 on success, -1 on failure.
*/
extern int mgmt_ds_iter_data(
- struct mgmt_ds_ctx *ds_ctx, char *base_xpath,
- void (*mgmt_ds_node_iter_fn)(struct mgmt_ds_ctx *ds_ctx, char *xpath,
- struct lyd_node *node,
+ struct mgmt_ds_ctx *ds_ctx, const char *base_xpath,
+ void (*mgmt_ds_node_iter_fn)(struct mgmt_ds_ctx *ds_ctx,
+ const char *xpath, struct lyd_node *node,
struct nb_node *nb_node, void *ctx),
- void *ctx, bool alloc_xp_copy);
+ void *ctx);
/*
* Load config to datastore from a file.
diff --git a/mgmtd/mgmt_fe_adapter.c b/mgmtd/mgmt_fe_adapter.c
index 262741b665..7509d24a6a 100644
--- a/mgmtd/mgmt_fe_adapter.c
+++ b/mgmtd/mgmt_fe_adapter.c
@@ -4,6 +4,7 @@
*
* Copyright (C) 2021 Vmware, Inc.
* Pushpasis Sarkar <spushpasis@vmware.com>
+ * Copyright (c) 2023, LabN Consulting, L.L.C.
*/
#include <zebra.h>
@@ -20,9 +21,9 @@
#include "mgmtd/mgmt_fe_adapter.h"
#define MGMTD_FE_ADAPTER_DBG(fmt, ...) \
- DEBUGD(&mgmt_debug_fe, "%s:" fmt, __func__, ##__VA_ARGS__)
-#define MGMTD_FE_ADAPTER_ERR(fmt, ...) \
- zlog_err("%s: ERROR: " fmt, __func__, ##__VA_ARGS__)
+ DEBUGD(&mgmt_debug_fe, "FE-ADAPTER: %s:" fmt, __func__, ##__VA_ARGS__)
+#define MGMTD_FE_ADAPTER_ERR(fmt, ...) \
+ zlog_err("FE-ADAPTER: %s: ERROR: " fmt, __func__, ##__VA_ARGS__)
#define FOREACH_ADAPTER_IN_LIST(adapter) \
frr_each_safe (mgmt_fe_adapters, &mgmt_fe_adapters, (adapter))
@@ -52,8 +53,8 @@ DECLARE_LIST(mgmt_fe_sessions, struct mgmt_fe_session_ctx, list_linkage);
#define FOREACH_SESSION_IN_LIST(adapter, session) \
frr_each_safe (mgmt_fe_sessions, &(adapter)->fe_sessions, (session))
-static struct event_loop *mgmt_fe_adapter_tm;
-static struct mgmt_master *mgmt_fe_adapter_mm;
+static struct event_loop *mgmt_loop;
+static struct msg_server mgmt_fe_server = {.fd = -1};
static struct mgmt_fe_adapters_head mgmt_fe_adapters;
@@ -62,11 +63,6 @@ static uint64_t mgmt_fe_next_session_id;
/* Forward declarations */
static void
-mgmt_fe_adapter_register_event(struct mgmt_fe_client_adapter *adapter,
- enum mgmt_fe_event event);
-static void
-mgmt_fe_adapter_disconnect(struct mgmt_fe_client_adapter *adapter);
-static void
mgmt_fe_session_register_event(struct mgmt_fe_session_ctx *session,
enum mgmt_session_event event);
@@ -78,15 +74,18 @@ mgmt_fe_session_write_lock_ds(Mgmtd__DatastoreId ds_id,
if (!session->ds_write_locked[ds_id]) {
if (mgmt_ds_write_lock(ds_ctx) != 0) {
MGMTD_FE_ADAPTER_DBG(
- "Failed to lock the DS %u for Sessn: %p from %s!",
- ds_id, session, session->adapter->name);
+ "Failed to lock the DS %u for session-id: %" PRIu64
+ " from %s!",
+ ds_id, session->session_id,
+ session->adapter->name);
return -1;
}
session->ds_write_locked[ds_id] = true;
MGMTD_FE_ADAPTER_DBG(
- "Write-Locked the DS %u for Sessn: %p from %s!", ds_id,
- session, session->adapter->name);
+ "Write-Locked the DS %u for session-id: %" PRIu64
+ " from %s",
+ ds_id, session->session_id, session->adapter->name);
}
return 0;
@@ -100,15 +99,18 @@ mgmt_fe_session_read_lock_ds(Mgmtd__DatastoreId ds_id,
if (!session->ds_read_locked[ds_id]) {
if (mgmt_ds_read_lock(ds_ctx) != 0) {
MGMTD_FE_ADAPTER_DBG(
- "Failed to lock the DS %u for Sessn: %p from %s!",
- ds_id, session, session->adapter->name);
+ "Failed to lock the DS %u for session-is: %" PRIu64
+ " from %s",
+ ds_id, session->session_id,
+ session->adapter->name);
return -1;
}
session->ds_read_locked[ds_id] = true;
MGMTD_FE_ADAPTER_DBG(
- "Read-Locked the DS %u for Sessn: %p from %s!", ds_id,
- session, session->adapter->name);
+ "Read-Locked the DS %u for session-id: %" PRIu64
+ " from %s",
+ ds_id, session->session_id, session->adapter->name);
}
return 0;
@@ -124,27 +126,33 @@ static int mgmt_fe_session_unlock_ds(Mgmtd__DatastoreId ds_id,
session->ds_locked_implict[ds_id] = false;
if (mgmt_ds_unlock(ds_ctx) != 0) {
MGMTD_FE_ADAPTER_DBG(
- "Failed to unlock the DS %u taken earlier by Sessn: %p from %s!",
- ds_id, session, session->adapter->name);
+ "Failed to unlock the DS %u taken earlier by session-id: %" PRIu64
+ " from %s",
+ ds_id, session->session_id,
+ session->adapter->name);
return -1;
}
MGMTD_FE_ADAPTER_DBG(
- "Unlocked DS %u write-locked earlier by Sessn: %p from %s",
- ds_id, session, session->adapter->name);
+ "Unlocked DS %u write-locked earlier by session-id: %" PRIu64
+ " from %s",
+ ds_id, session->session_id, session->adapter->name);
} else if (unlock_read && session->ds_read_locked[ds_id]) {
session->ds_read_locked[ds_id] = false;
session->ds_locked_implict[ds_id] = false;
if (mgmt_ds_unlock(ds_ctx) != 0) {
MGMTD_FE_ADAPTER_DBG(
- "Failed to unlock the DS %u taken earlier by Sessn: %p from %s!",
- ds_id, session, session->adapter->name);
+ "Failed to unlock the DS %u taken earlier by session-id: %" PRIu64
+ " from %s",
+ ds_id, session->session_id,
+ session->adapter->name);
return -1;
}
MGMTD_FE_ADAPTER_DBG(
- "Unlocked DS %u read-locked earlier by Sessn: %p from %s",
- ds_id, session, session->adapter->name);
+ "Unlocked DS %u read-locked earlier by session-id: %" PRIu64
+ " from %s",
+ ds_id, session->session_id, session->adapter->name);
}
return 0;
@@ -163,7 +171,7 @@ mgmt_fe_session_cfg_txn_cleanup(struct mgmt_fe_session_ctx *session)
mgmt_ds_copy_dss(mm->running_ds, mm->candidate_ds, false);
for (ds_id = 0; ds_id < MGMTD_DS_MAX_ID; ds_id++) {
- ds_ctx = mgmt_ds_get_ctx_by_id(mgmt_fe_adapter_mm, ds_id);
+ ds_ctx = mgmt_ds_get_ctx_by_id(mm, ds_id);
if (ds_ctx) {
if (session->ds_locked_implict[ds_id])
mgmt_fe_session_unlock_ds(
@@ -185,7 +193,7 @@ mgmt_fe_session_show_txn_cleanup(struct mgmt_fe_session_ctx *session)
struct mgmt_ds_ctx *ds_ctx;
for (ds_id = 0; ds_id < MGMTD_DS_MAX_ID; ds_id++) {
- ds_ctx = mgmt_ds_get_ctx_by_id(mgmt_fe_adapter_mm, ds_id);
+ ds_ctx = mgmt_ds_get_ctx_by_id(mm, ds_id);
if (ds_ctx) {
mgmt_fe_session_unlock_ds(ds_id, ds_ctx, session,
false, true);
@@ -237,15 +245,14 @@ static void mgmt_fe_cleanup_session(struct mgmt_fe_session_ctx **session)
if ((*session)->adapter) {
mgmt_fe_session_cfg_txn_cleanup((*session));
mgmt_fe_session_show_txn_cleanup((*session));
- mgmt_fe_session_unlock_ds(MGMTD_DS_CANDIDATE,
- mgmt_fe_adapter_mm->candidate_ds,
+ mgmt_fe_session_unlock_ds(MGMTD_DS_CANDIDATE, mm->candidate_ds,
*session, true, true);
- mgmt_fe_session_unlock_ds(MGMTD_DS_RUNNING,
- mgmt_fe_adapter_mm->running_ds,
+ mgmt_fe_session_unlock_ds(MGMTD_DS_RUNNING, mm->running_ds,
*session, true, true);
mgmt_fe_sessions_del(&(*session)->adapter->fe_sessions,
*session);
+ assert((*session)->adapter->refcount > 1);
mgmt_fe_adapter_unlock(&(*session)->adapter);
}
@@ -261,10 +268,15 @@ mgmt_fe_find_session_by_client_id(struct mgmt_fe_client_adapter *adapter,
struct mgmt_fe_session_ctx *session;
FOREACH_SESSION_IN_LIST (adapter, session) {
- if (session->client_id == client_id)
+ if (session->client_id == client_id) {
+ MGMTD_FE_ADAPTER_DBG("Found session-id %" PRIu64
+ " using client-id %" PRIu64,
+ session->session_id, client_id);
return session;
+ }
}
-
+ MGMTD_FE_ADAPTER_DBG("Session not found using client-id %" PRIu64,
+ client_id);
return NULL;
}
@@ -284,24 +296,6 @@ static bool mgmt_fe_session_hash_cmp(const void *d1, const void *d2)
return (session1->session_id == session2->session_id);
}
-static void mgmt_fe_session_hash_free(void *data)
-{
- struct mgmt_fe_session_ctx *session = data;
-
- mgmt_fe_cleanup_session(&session);
-}
-
-static void mgmt_fe_session_hash_destroy(void)
-{
- if (mgmt_fe_sessions == NULL)
- return;
-
- hash_clean(mgmt_fe_sessions,
- mgmt_fe_session_hash_free);
- hash_free(mgmt_fe_sessions);
- mgmt_fe_sessions = NULL;
-}
-
static inline struct mgmt_fe_session_ctx *
mgmt_session_id2ctx(uint64_t session_id)
{
@@ -344,54 +338,15 @@ mgmt_fe_create_session(struct mgmt_fe_client_adapter *adapter,
return session;
}
-static void
-mgmt_fe_cleanup_sessions(struct mgmt_fe_client_adapter *adapter)
-{
- struct mgmt_fe_session_ctx *session;
-
- FOREACH_SESSION_IN_LIST (adapter, session)
- mgmt_fe_cleanup_session(&session);
-}
-
-static inline void
-mgmt_fe_adapter_sched_msg_write(struct mgmt_fe_client_adapter *adapter)
-{
- if (!CHECK_FLAG(adapter->flags, MGMTD_FE_ADAPTER_FLAGS_WRITES_OFF))
- mgmt_fe_adapter_register_event(adapter,
- MGMTD_FE_CONN_WRITE);
-}
-
-static inline void
-mgmt_fe_adapter_writes_on(struct mgmt_fe_client_adapter *adapter)
-{
- MGMTD_FE_ADAPTER_DBG("Resume writing msgs for '%s'", adapter->name);
- UNSET_FLAG(adapter->flags, MGMTD_FE_ADAPTER_FLAGS_WRITES_OFF);
- mgmt_fe_adapter_sched_msg_write(adapter);
-}
-
-static inline void
-mgmt_fe_adapter_writes_off(struct mgmt_fe_client_adapter *adapter)
-{
- SET_FLAG(adapter->flags, MGMTD_FE_ADAPTER_FLAGS_WRITES_OFF);
- MGMTD_FE_ADAPTER_DBG("Paused writing msgs for '%s'", adapter->name);
-}
-
-static int
-mgmt_fe_adapter_send_msg(struct mgmt_fe_client_adapter *adapter,
- Mgmtd__FeMessage *fe_msg)
+static int mgmt_fe_adapter_send_msg(struct mgmt_fe_client_adapter *adapter,
+ Mgmtd__FeMessage *fe_msg,
+ bool short_circuit_ok)
{
- if (adapter->conn_fd == -1) {
- MGMTD_FE_ADAPTER_DBG("can't send message on closed connection");
- return -1;
- }
-
- int rv = mgmt_msg_send_msg(
- &adapter->mstate, fe_msg,
+ return msg_conn_send_msg(
+ adapter->conn, MGMT_MSG_VERSION_PROTOBUF, fe_msg,
mgmtd__fe_message__get_packed_size(fe_msg),
(size_t(*)(void *, void *))mgmtd__fe_message__pack,
- MGMT_DEBUG_FE_CHECK());
- mgmt_fe_adapter_sched_msg_write(adapter);
- return rv;
+ short_circuit_ok);
}
static int
@@ -419,7 +374,7 @@ mgmt_fe_send_session_reply(struct mgmt_fe_client_adapter *adapter,
"Sending SESSION_REPLY message to MGMTD Frontend client '%s'",
adapter->name);
- return mgmt_fe_adapter_send_msg(adapter, &fe_msg);
+ return mgmt_fe_adapter_send_msg(adapter, &fe_msg, true);
}
static int mgmt_fe_send_lockds_reply(struct mgmt_fe_session_ctx *session,
@@ -449,7 +404,7 @@ static int mgmt_fe_send_lockds_reply(struct mgmt_fe_session_ctx *session,
"Sending LOCK_DS_REPLY message to MGMTD Frontend client '%s'",
session->adapter->name);
- return mgmt_fe_adapter_send_msg(session->adapter, &fe_msg);
+ return mgmt_fe_adapter_send_msg(session->adapter, &fe_msg, false);
}
static int mgmt_fe_send_setcfg_reply(struct mgmt_fe_session_ctx *session,
@@ -480,7 +435,7 @@ static int mgmt_fe_send_setcfg_reply(struct mgmt_fe_session_ctx *session,
fe_msg.setcfg_reply = &setcfg_reply;
MGMTD_FE_ADAPTER_DBG(
- "Sending SET_CONFIG_REPLY message to MGMTD Frontend client '%s'",
+ "Sending SETCFG_REPLY message to MGMTD Frontend client '%s'",
session->adapter->name);
if (implicit_commit) {
@@ -495,7 +450,7 @@ static int mgmt_fe_send_setcfg_reply(struct mgmt_fe_session_ctx *session,
gettimeofday(&session->adapter->setcfg_stats.last_end, NULL);
mgmt_fe_adapter_compute_set_cfg_timers(&session->adapter->setcfg_stats);
- return mgmt_fe_adapter_send_msg(session->adapter, &fe_msg);
+ return mgmt_fe_adapter_send_msg(session->adapter, &fe_msg, false);
}
static int mgmt_fe_send_commitcfg_reply(
@@ -541,7 +496,7 @@ static int mgmt_fe_send_commitcfg_reply(
if (mm->perf_stats_en)
gettimeofday(&session->adapter->cmt_stats.last_end, NULL);
mgmt_fe_session_compute_commit_timers(&session->adapter->cmt_stats);
- return mgmt_fe_adapter_send_msg(session->adapter, &fe_msg);
+ return mgmt_fe_adapter_send_msg(session->adapter, &fe_msg, false);
}
static int mgmt_fe_send_getcfg_reply(struct mgmt_fe_session_ctx *session,
@@ -579,7 +534,7 @@ static int mgmt_fe_send_getcfg_reply(struct mgmt_fe_session_ctx *session,
mgmt_fe_session_register_event(
session, MGMTD_FE_SESSION_SHOW_TXN_CLNUP);
- return mgmt_fe_adapter_send_msg(session->adapter, &fe_msg);
+ return mgmt_fe_adapter_send_msg(session->adapter, &fe_msg, false);
}
static int mgmt_fe_send_getdata_reply(struct mgmt_fe_session_ctx *session,
@@ -617,7 +572,7 @@ static int mgmt_fe_send_getdata_reply(struct mgmt_fe_session_ctx *session,
mgmt_fe_session_register_event(
session, MGMTD_FE_SESSION_SHOW_TXN_CLNUP);
- return mgmt_fe_adapter_send_msg(session->adapter, &fe_msg);
+ return mgmt_fe_adapter_send_msg(session->adapter, &fe_msg, false);
}
static void mgmt_fe_session_cfg_txn_clnup(struct event *thread)
@@ -647,14 +602,12 @@ mgmt_fe_session_register_event(struct mgmt_fe_session_ctx *session,
switch (event) {
case MGMTD_FE_SESSION_CFG_TXN_CLNUP:
- event_add_timer_tv(mgmt_fe_adapter_tm,
- mgmt_fe_session_cfg_txn_clnup, session,
- &tv, &session->proc_cfg_txn_clnp);
+ event_add_timer_tv(mgmt_loop, mgmt_fe_session_cfg_txn_clnup,
+ session, &tv, &session->proc_cfg_txn_clnp);
break;
case MGMTD_FE_SESSION_SHOW_TXN_CLNUP:
- event_add_timer_tv(mgmt_fe_adapter_tm,
- mgmt_fe_session_show_txn_clnup, session,
- &tv, &session->proc_show_txn_clnp);
+ event_add_timer_tv(mgmt_loop, mgmt_fe_session_show_txn_clnup,
+ session, &tv, &session->proc_show_txn_clnp);
break;
}
}
@@ -665,7 +618,7 @@ mgmt_fe_find_adapter_by_fd(int conn_fd)
struct mgmt_fe_client_adapter *adapter;
FOREACH_ADAPTER_IN_LIST (adapter) {
- if (adapter->conn_fd == conn_fd)
+ if (adapter->conn->fd == conn_fd)
return adapter;
}
@@ -685,21 +638,36 @@ mgmt_fe_find_adapter_by_name(const char *name)
return NULL;
}
-static void mgmt_fe_adapter_disconnect(struct mgmt_fe_client_adapter *adapter)
+static void mgmt_fe_adapter_delete(struct mgmt_fe_client_adapter *adapter)
{
- if (adapter->conn_fd >= 0) {
- close(adapter->conn_fd);
- adapter->conn_fd = -1;
- }
+ struct mgmt_fe_session_ctx *session;
+ MGMTD_FE_ADAPTER_DBG("deleting client adapter '%s'", adapter->name);
/* TODO: notify about client disconnect for appropriate cleanup */
- mgmt_fe_cleanup_sessions(adapter);
+ FOREACH_SESSION_IN_LIST (adapter, session)
+ mgmt_fe_cleanup_session(&session);
mgmt_fe_sessions_fini(&adapter->fe_sessions);
- mgmt_fe_adapters_del(&mgmt_fe_adapters, adapter);
+ assert(adapter->refcount == 1);
mgmt_fe_adapter_unlock(&adapter);
}
+static int mgmt_fe_adapter_notify_disconnect(struct msg_conn *conn)
+{
+ struct mgmt_fe_client_adapter *adapter = conn->user;
+
+ MGMTD_FE_ADAPTER_DBG("notify disconnect for client adapter '%s'",
+ adapter->name);
+
+ mgmt_fe_adapter_delete(adapter);
+
+ return 0;
+}
+
+/*
+ * XXX chopps: get rid of this, we should have deleted sessions when there was a
+ * disconnect
+ */
static void
mgmt_fe_adapter_cleanup_old_conn(struct mgmt_fe_client_adapter *adapter)
{
@@ -713,23 +681,13 @@ mgmt_fe_adapter_cleanup_old_conn(struct mgmt_fe_client_adapter *adapter)
*/
MGMTD_FE_ADAPTER_DBG(
"Client '%s' (FD:%d) seems to have reconnected. Removing old connection (FD:%d)!",
- adapter->name, adapter->conn_fd, old->conn_fd);
- mgmt_fe_adapter_disconnect(old);
+ adapter->name, adapter->conn->fd,
+ old->conn->fd);
+ msg_conn_disconnect(old->conn, false);
}
}
}
-static void
-mgmt_fe_cleanup_adapters(void)
-{
- struct mgmt_fe_client_adapter *adapter;
-
- FOREACH_ADAPTER_IN_LIST (adapter) {
- mgmt_fe_cleanup_sessions(adapter);
- mgmt_fe_adapter_unlock(&adapter);
- }
-}
-
static int
mgmt_fe_session_handle_lockds_req_msg(struct mgmt_fe_session_ctx *session,
Mgmtd__FeLockDsReq *lockds_req)
@@ -737,7 +695,7 @@ mgmt_fe_session_handle_lockds_req_msg(struct mgmt_fe_session_ctx *session,
struct mgmt_ds_ctx *ds_ctx;
/*
- * Next check first if the SET_CONFIG_REQ is for Candidate DS
+ * Next check first if the SETCFG_REQ is for Candidate DS
* or not. Report failure if its not. MGMTD currently only
* supports editing the Candidate DS.
*/
@@ -749,8 +707,7 @@ mgmt_fe_session_handle_lockds_req_msg(struct mgmt_fe_session_ctx *session,
return -1;
}
- ds_ctx =
- mgmt_ds_get_ctx_by_id(mgmt_fe_adapter_mm, lockds_req->ds_id);
+ ds_ctx = mgmt_ds_get_ctx_by_id(mm, lockds_req->ds_id);
if (!ds_ctx) {
mgmt_fe_send_lockds_reply(
session, lockds_req->ds_id, lockds_req->req_id,
@@ -789,13 +746,20 @@ mgmt_fe_session_handle_lockds_req_msg(struct mgmt_fe_session_ctx *session,
true, NULL)
!= 0) {
MGMTD_FE_ADAPTER_DBG(
- "Failed to send LOCK_DS_REPLY for DS %u Sessn: %p from %s",
- lockds_req->ds_id, session, session->adapter->name);
+ "Failed to send LOCK_DS_REPLY for DS %u session-id: %" PRIu64
+ " from %s",
+ lockds_req->ds_id, session->session_id,
+ session->adapter->name);
}
return 0;
}
+/*
+ * TODO: this function has too many conditionals relating to complex error
+ * conditions. It needs to be simplified and these complex error conditions
+ * probably need to just disconnect the client with a suitably loud log message.
+ */
static int
mgmt_fe_session_handle_setcfg_req_msg(struct mgmt_fe_session_ctx *session,
Mgmtd__FeSetConfigReq *setcfg_req)
@@ -807,7 +771,7 @@ mgmt_fe_session_handle_setcfg_req_msg(struct mgmt_fe_session_ctx *session,
gettimeofday(&session->adapter->setcfg_stats.last_start, NULL);
/*
- * Next check first if the SET_CONFIG_REQ is for Candidate DS
+ * Next check first if the SETCFG_REQ is for Candidate DS
* or not. Report failure if its not. MGMTD currently only
* supports editing the Candidate DS.
*/
@@ -822,8 +786,7 @@ mgmt_fe_session_handle_setcfg_req_msg(struct mgmt_fe_session_ctx *session,
/*
* Get the DS handle.
*/
- ds_ctx =
- mgmt_ds_get_ctx_by_id(mgmt_fe_adapter_mm, setcfg_req->ds_id);
+ ds_ctx = mgmt_ds_get_ctx_by_id(mm, setcfg_req->ds_id);
if (!ds_ctx) {
mgmt_fe_send_setcfg_reply(
session, setcfg_req->ds_id, setcfg_req->req_id, false,
@@ -838,8 +801,8 @@ mgmt_fe_session_handle_setcfg_req_msg(struct mgmt_fe_session_ctx *session,
* from another session is already in progress.
*/
cfg_session_id = mgmt_config_txn_in_progress();
- if (cfg_session_id != MGMTD_SESSION_ID_NONE
- && cfg_session_id != session->session_id) {
+ if (cfg_session_id != MGMTD_SESSION_ID_NONE) {
+ assert(cfg_session_id != session->session_id);
mgmt_fe_send_setcfg_reply(
session, setcfg_req->ds_id, setcfg_req->req_id,
false,
@@ -853,6 +816,10 @@ mgmt_fe_session_handle_setcfg_req_msg(struct mgmt_fe_session_ctx *session,
* Try taking write-lock on the requested DS (if not already).
*/
if (!session->ds_write_locked[setcfg_req->ds_id]) {
+ MGMTD_FE_ADAPTER_ERR(
+ "SETCFG_REQ on session-id: %" PRIu64
+ " without obtaining lock",
+ session->session_id);
if (mgmt_fe_session_write_lock_ds(setcfg_req->ds_id,
ds_ctx, session)
!= 0) {
@@ -881,13 +848,14 @@ mgmt_fe_session_handle_setcfg_req_msg(struct mgmt_fe_session_ctx *session,
goto mgmt_fe_sess_handle_setcfg_req_failed;
}
- MGMTD_FE_ADAPTER_DBG(
- "Created new Config Txn 0x%llx for session %p",
- (unsigned long long)session->cfg_txn_id, session);
+ MGMTD_FE_ADAPTER_DBG("Created new Config txn-id: %" PRIu64
+ " for session-id %" PRIu64,
+ session->cfg_txn_id, session->session_id);
} else {
- MGMTD_FE_ADAPTER_DBG(
- "Config Txn 0x%llx for session %p already created",
- (unsigned long long)session->cfg_txn_id, session);
+ MGMTD_FE_ADAPTER_ERR("Config txn-id: %" PRIu64
+ " for session-id: %" PRIu64
+ " already created",
+ session->cfg_txn_id, session->session_id);
if (setcfg_req->implicit_commit) {
/*
@@ -905,8 +873,8 @@ mgmt_fe_session_handle_setcfg_req_msg(struct mgmt_fe_session_ctx *session,
dst_ds_ctx = 0;
if (setcfg_req->implicit_commit) {
- dst_ds_ctx = mgmt_ds_get_ctx_by_id(mgmt_fe_adapter_mm,
- setcfg_req->commit_ds_id);
+ dst_ds_ctx =
+ mgmt_ds_get_ctx_by_id(mm, setcfg_req->commit_ds_id);
if (!dst_ds_ctx) {
mgmt_fe_send_setcfg_reply(
session, setcfg_req->ds_id, setcfg_req->req_id,
@@ -957,8 +925,7 @@ mgmt_fe_session_handle_getcfg_req_msg(struct mgmt_fe_session_ctx *session,
/*
* Get the DS handle.
*/
- ds_ctx =
- mgmt_ds_get_ctx_by_id(mgmt_fe_adapter_mm, getcfg_req->ds_id);
+ ds_ctx = mgmt_ds_get_ctx_by_id(mm, getcfg_req->ds_id);
if (!ds_ctx) {
mgmt_fe_send_getcfg_reply(session, getcfg_req->ds_id,
getcfg_req->req_id, false, NULL,
@@ -967,7 +934,7 @@ mgmt_fe_session_handle_getcfg_req_msg(struct mgmt_fe_session_ctx *session,
}
/*
- * Next check first if the SET_CONFIG_REQ is for Candidate DS
+ * Next check first if the GETCFG_REQ is for Candidate DS
* or not. Report failure if its not. MGMTD currently only
* supports editing the Candidate DS.
*/
@@ -1015,13 +982,14 @@ mgmt_fe_session_handle_getcfg_req_msg(struct mgmt_fe_session_ctx *session,
goto mgmt_fe_sess_handle_getcfg_req_failed;
}
- MGMTD_FE_ADAPTER_DBG(
- "Created new Show Txn 0x%llx for session %p",
- (unsigned long long)session->txn_id, session);
+ MGMTD_FE_ADAPTER_DBG("Created new show txn-id: %" PRIu64
+ " for session-id: %" PRIu64,
+ session->txn_id, session->session_id);
} else {
- MGMTD_FE_ADAPTER_DBG(
- "Show Txn 0x%llx for session %p already created",
- (unsigned long long)session->txn_id, session);
+ MGMTD_FE_ADAPTER_DBG("Show txn-id: %" PRIu64
+ " for session-id: %" PRIu64
+ " already created",
+ session->txn_id, session->session_id);
}
/*
@@ -1062,8 +1030,7 @@ mgmt_fe_session_handle_getdata_req_msg(struct mgmt_fe_session_ctx *session,
/*
* Get the DS handle.
*/
- ds_ctx = mgmt_ds_get_ctx_by_id(mgmt_fe_adapter_mm,
- getdata_req->ds_id);
+ ds_ctx = mgmt_ds_get_ctx_by_id(mm, getdata_req->ds_id);
if (!ds_ctx) {
mgmt_fe_send_getdata_reply(session, getdata_req->ds_id,
getdata_req->req_id, false, NULL,
@@ -1106,13 +1073,13 @@ mgmt_fe_session_handle_getdata_req_msg(struct mgmt_fe_session_ctx *session,
goto mgmt_fe_sess_handle_getdata_req_failed;
}
- MGMTD_FE_ADAPTER_DBG(
- "Created new Show Txn 0x%llx for session %p",
- (unsigned long long)session->txn_id, session);
+ MGMTD_FE_ADAPTER_DBG("Created new Show Txn %" PRIu64
+ " for session %" PRIu64,
+ session->txn_id, session->session_id);
} else {
- MGMTD_FE_ADAPTER_DBG(
- "Show Txn 0x%llx for session %p already created",
- (unsigned long long)session->txn_id, session);
+ MGMTD_FE_ADAPTER_DBG("Show txn-id: %" PRIu64
+ " for session %" PRIu64 " already created",
+ session->txn_id, session->session_id);
}
/*
@@ -1157,8 +1124,7 @@ static int mgmt_fe_session_handle_commit_config_req_msg(
/*
* Get the source DS handle.
*/
- src_ds_ctx = mgmt_ds_get_ctx_by_id(mgmt_fe_adapter_mm,
- commcfg_req->src_ds_id);
+ src_ds_ctx = mgmt_ds_get_ctx_by_id(mm, commcfg_req->src_ds_id);
if (!src_ds_ctx) {
mgmt_fe_send_commitcfg_reply(
session, commcfg_req->src_ds_id, commcfg_req->dst_ds_id,
@@ -1171,8 +1137,7 @@ static int mgmt_fe_session_handle_commit_config_req_msg(
/*
* Get the destination DS handle.
*/
- dst_ds_ctx = mgmt_ds_get_ctx_by_id(mgmt_fe_adapter_mm,
- commcfg_req->dst_ds_id);
+ dst_ds_ctx = mgmt_ds_get_ctx_by_id(mm, commcfg_req->dst_ds_id);
if (!dst_ds_ctx) {
mgmt_fe_send_commitcfg_reply(
session, commcfg_req->src_ds_id, commcfg_req->dst_ds_id,
@@ -1183,7 +1148,7 @@ static int mgmt_fe_session_handle_commit_config_req_msg(
}
/*
- * Next check first if the SET_CONFIG_REQ is for Candidate DS
+ * Next check first if the COMMCFG_REQ is for Candidate DS
* or not. Report failure if its not. MGMTD currently only
* supports editing the Candidate DS.
*/
@@ -1211,8 +1176,8 @@ static int mgmt_fe_session_handle_commit_config_req_msg(
"Failed to create a Configuration session!");
return 0;
}
- MGMTD_FE_ADAPTER_DBG("Created txn %" PRIu64
- " for session %" PRIu64
+ MGMTD_FE_ADAPTER_DBG("Created txn-id: %" PRIu64
+ " for session-id %" PRIu64
" for COMMIT-CFG-REQ",
session->cfg_txn_id, session->session_id);
}
@@ -1269,8 +1234,8 @@ mgmt_fe_adapter_handle_msg(struct mgmt_fe_client_adapter *adapter,
*/
switch ((int)fe_msg->message_case) {
case MGMTD__FE_MESSAGE__MESSAGE_REGISTER_REQ:
- MGMTD_FE_ADAPTER_DBG("Got Register Req Msg from '%s'",
- fe_msg->register_req->client_name);
+ MGMTD_FE_ADAPTER_DBG("Got REGISTER_REQ from '%s'",
+ fe_msg->register_req->client_name);
if (strlen(fe_msg->register_req->client_name)) {
strlcpy(adapter->name,
@@ -1284,9 +1249,9 @@ mgmt_fe_adapter_handle_msg(struct mgmt_fe_client_adapter *adapter,
&& fe_msg->session_req->id_case
== MGMTD__FE_SESSION_REQ__ID_CLIENT_CONN_ID) {
MGMTD_FE_ADAPTER_DBG(
- "Got Session Create Req Msg for client-id %llu from '%s'",
- (unsigned long long)
- fe_msg->session_req->client_conn_id,
+ "Got SESSION_REQ (create) for client-id %" PRIu64
+ " from '%s'",
+ fe_msg->session_req->client_conn_id,
adapter->name);
session = mgmt_fe_create_session(
@@ -1298,10 +1263,9 @@ mgmt_fe_adapter_handle_msg(struct mgmt_fe_client_adapter *adapter,
&& fe_msg->session_req->id_case
== MGMTD__FE_SESSION_REQ__ID_SESSION_ID) {
MGMTD_FE_ADAPTER_DBG(
- "Got Session Destroy Req Msg for session-id %llu from '%s'",
- (unsigned long long)
- fe_msg->session_req->session_id,
- adapter->name);
+ "Got SESSION_REQ (destroy) for session-id %" PRIu64
+ "from '%s'",
+ fe_msg->session_req->session_id, adapter->name);
session = mgmt_session_id2ctx(
fe_msg->session_req->session_id);
@@ -1314,11 +1278,11 @@ mgmt_fe_adapter_handle_msg(struct mgmt_fe_client_adapter *adapter,
session = mgmt_session_id2ctx(
fe_msg->lockds_req->session_id);
MGMTD_FE_ADAPTER_DBG(
- "Got %sockDS Req Msg for DS:%d for session-id %llx from '%s'",
- fe_msg->lockds_req->lock ? "L" : "Unl",
+ "Got %sLOCKDS_REQ for DS:%d for session-id %" PRIu64
+ " from '%s'",
+ fe_msg->lockds_req->lock ? "" : "UN",
fe_msg->lockds_req->ds_id,
- (unsigned long long)fe_msg->lockds_req->session_id,
- adapter->name);
+ fe_msg->lockds_req->session_id, adapter->name);
mgmt_fe_session_handle_lockds_req_msg(
session, fe_msg->lockds_req);
break;
@@ -1327,12 +1291,12 @@ mgmt_fe_adapter_handle_msg(struct mgmt_fe_client_adapter *adapter,
fe_msg->setcfg_req->session_id);
session->adapter->setcfg_stats.set_cfg_count++;
MGMTD_FE_ADAPTER_DBG(
- "Got Set Config Req Msg (%d Xpaths, Implicit:%c) on DS:%d for session-id %llu from '%s'",
+ "Got SETCFG_REQ (%d Xpaths, Implicit:%c) on DS:%d for session-id %" PRIu64
+ " from '%s'",
(int)fe_msg->setcfg_req->n_data,
- fe_msg->setcfg_req->implicit_commit ? 'T':'F',
+ fe_msg->setcfg_req->implicit_commit ? 'T' : 'F',
fe_msg->setcfg_req->ds_id,
- (unsigned long long)fe_msg->setcfg_req->session_id,
- adapter->name);
+ fe_msg->setcfg_req->session_id, adapter->name);
mgmt_fe_session_handle_setcfg_req_msg(
session, fe_msg->setcfg_req);
@@ -1341,12 +1305,12 @@ mgmt_fe_adapter_handle_msg(struct mgmt_fe_client_adapter *adapter,
session = mgmt_session_id2ctx(
fe_msg->commcfg_req->session_id);
MGMTD_FE_ADAPTER_DBG(
- "Got Commit Config Req Msg for src-DS:%d dst-DS:%d (Abort:%c) on session-id %llu from '%s'",
+ "Got COMMCFG_REQ for src-DS:%d dst-DS:%d (Abort:%c) on session-id %" PRIu64
+ " from '%s'",
fe_msg->commcfg_req->src_ds_id,
fe_msg->commcfg_req->dst_ds_id,
- fe_msg->commcfg_req->abort ? 'T':'F',
- (unsigned long long)fe_msg->commcfg_req->session_id,
- adapter->name);
+ fe_msg->commcfg_req->abort ? 'T' : 'F',
+ fe_msg->commcfg_req->session_id, adapter->name);
mgmt_fe_session_handle_commit_config_req_msg(
session, fe_msg->commcfg_req);
break;
@@ -1354,11 +1318,11 @@ mgmt_fe_adapter_handle_msg(struct mgmt_fe_client_adapter *adapter,
session = mgmt_session_id2ctx(
fe_msg->getcfg_req->session_id);
MGMTD_FE_ADAPTER_DBG(
- "Got Get-Config Req Msg for DS:%d (xpaths: %d) on session-id %llu from '%s'",
+ "Got GETCFG_REQ for DS:%d (xpaths: %d) on session-id %" PRIu64
+ " from '%s'",
fe_msg->getcfg_req->ds_id,
(int)fe_msg->getcfg_req->n_data,
- (unsigned long long)fe_msg->getcfg_req->session_id,
- adapter->name);
+ fe_msg->getcfg_req->session_id, adapter->name);
mgmt_fe_session_handle_getcfg_req_msg(
session, fe_msg->getcfg_req);
break;
@@ -1366,16 +1330,19 @@ mgmt_fe_adapter_handle_msg(struct mgmt_fe_client_adapter *adapter,
session = mgmt_session_id2ctx(
fe_msg->getdata_req->session_id);
MGMTD_FE_ADAPTER_DBG(
- "Got Get-Data Req Msg for DS:%d (xpaths: %d) on session-id %llu from '%s'",
+ "Got GETDATA_REQ for DS:%d (xpaths: %d) on session-id %" PRIu64
+ " from '%s'",
fe_msg->getdata_req->ds_id,
(int)fe_msg->getdata_req->n_data,
- (unsigned long long)fe_msg->getdata_req->session_id,
- adapter->name);
+ fe_msg->getdata_req->session_id, adapter->name);
mgmt_fe_session_handle_getdata_req_msg(
session, fe_msg->getdata_req);
break;
case MGMTD__FE_MESSAGE__MESSAGE_NOTIFY_DATA_REQ:
case MGMTD__FE_MESSAGE__MESSAGE_REGNOTIFY_REQ:
+ MGMTD_FE_ADAPTER_ERR(
+ "Got unhandled message of type %u from '%s'",
+ fe_msg->message_case, adapter->name);
/*
* TODO: Add handling code in future.
*/
@@ -1404,13 +1371,12 @@ mgmt_fe_adapter_handle_msg(struct mgmt_fe_client_adapter *adapter,
return 0;
}
-static void mgmt_fe_adapter_process_msg(void *user_ctx, uint8_t *data,
- size_t len)
+static void mgmt_fe_adapter_process_msg(uint8_t version, uint8_t *data,
+ size_t len, struct msg_conn *conn)
{
- struct mgmt_fe_client_adapter *adapter = user_ctx;
- Mgmtd__FeMessage *fe_msg;
+ struct mgmt_fe_client_adapter *adapter = conn->user;
+ Mgmtd__FeMessage *fe_msg = mgmtd__fe_message__unpack(NULL, len, data);
- fe_msg = mgmtd__fe_message__unpack(NULL, len, data);
if (!fe_msg) {
MGMTD_FE_ADAPTER_DBG(
"Failed to decode %zu bytes for adapter: %s", len,
@@ -1424,142 +1390,79 @@ static void mgmt_fe_adapter_process_msg(void *user_ctx, uint8_t *data,
mgmtd__fe_message__free_unpacked(fe_msg, NULL);
}
-static void mgmt_fe_adapter_proc_msgbufs(struct event *thread)
+void mgmt_fe_adapter_lock(struct mgmt_fe_client_adapter *adapter)
{
- struct mgmt_fe_client_adapter *adapter = EVENT_ARG(thread);
-
- if (mgmt_msg_procbufs(&adapter->mstate, mgmt_fe_adapter_process_msg,
- adapter, MGMT_DEBUG_FE_CHECK()))
- mgmt_fe_adapter_register_event(adapter, MGMTD_FE_PROC_MSG);
+ adapter->refcount++;
}
-static void mgmt_fe_adapter_read(struct event *thread)
+extern void mgmt_fe_adapter_unlock(struct mgmt_fe_client_adapter **adapter)
{
- struct mgmt_fe_client_adapter *adapter = EVENT_ARG(thread);
- enum mgmt_msg_rsched rv;
+ struct mgmt_fe_client_adapter *a = *adapter;
+ assert(a && a->refcount);
- rv = mgmt_msg_read(&adapter->mstate, adapter->conn_fd,
- MGMT_DEBUG_FE_CHECK());
- if (rv == MSR_DISCONNECT) {
- mgmt_fe_adapter_disconnect(adapter);
- return;
+ if (!--a->refcount) {
+ mgmt_fe_adapters_del(&mgmt_fe_adapters, a);
+ msg_server_conn_delete(a->conn);
+ XFREE(MTYPE_MGMTD_FE_ADPATER, a);
}
- if (rv == MSR_SCHED_BOTH)
- mgmt_fe_adapter_register_event(adapter, MGMTD_FE_PROC_MSG);
- mgmt_fe_adapter_register_event(adapter, MGMTD_FE_CONN_READ);
+ *adapter = NULL;
}
-static void mgmt_fe_adapter_write(struct event *thread)
+/*
+ * Initialize the FE adapter module
+ */
+void mgmt_fe_adapter_init(struct event_loop *tm)
{
- struct mgmt_fe_client_adapter *adapter = EVENT_ARG(thread);
- enum mgmt_msg_wsched rv;
-
- rv = mgmt_msg_write(&adapter->mstate, adapter->conn_fd,
- MGMT_DEBUG_FE_CHECK());
- if (rv == MSW_SCHED_STREAM)
- mgmt_fe_adapter_register_event(adapter, MGMTD_FE_CONN_WRITE);
- else if (rv == MSW_DISCONNECT)
- mgmt_fe_adapter_disconnect(adapter);
- else if (rv == MSW_SCHED_WRITES_OFF) {
- mgmt_fe_adapter_writes_off(adapter);
- mgmt_fe_adapter_register_event(adapter,
- MGMTD_FE_CONN_WRITES_ON);
- } else
- assert(rv == MSW_SCHED_NONE);
-}
+ assert(!mgmt_loop);
+ mgmt_loop = tm;
-static void mgmt_fe_adapter_resume_writes(struct event *thread)
-{
- struct mgmt_fe_client_adapter *adapter;
+ mgmt_fe_adapters_init(&mgmt_fe_adapters);
- adapter = (struct mgmt_fe_client_adapter *)EVENT_ARG(thread);
- assert(adapter && adapter->conn_fd != -1);
+ assert(!mgmt_fe_sessions);
+ mgmt_fe_sessions =
+ hash_create(mgmt_fe_session_hash_key, mgmt_fe_session_hash_cmp,
+ "MGMT Frontend Sessions");
- mgmt_fe_adapter_writes_on(adapter);
-}
-
-static void
-mgmt_fe_adapter_register_event(struct mgmt_fe_client_adapter *adapter,
- enum mgmt_fe_event event)
-{
- struct timeval tv = {0};
-
- switch (event) {
- case MGMTD_FE_CONN_READ:
- event_add_read(mgmt_fe_adapter_tm, mgmt_fe_adapter_read,
- adapter, adapter->conn_fd, &adapter->conn_read_ev);
- break;
- case MGMTD_FE_CONN_WRITE:
- event_add_write(mgmt_fe_adapter_tm,
- mgmt_fe_adapter_write, adapter,
- adapter->conn_fd, &adapter->conn_write_ev);
- break;
- case MGMTD_FE_PROC_MSG:
- tv.tv_usec = MGMTD_FE_MSG_PROC_DELAY_USEC;
- event_add_timer_tv(mgmt_fe_adapter_tm,
- mgmt_fe_adapter_proc_msgbufs, adapter,
- &tv, &adapter->proc_msg_ev);
- break;
- case MGMTD_FE_CONN_WRITES_ON:
- event_add_timer_msec(mgmt_fe_adapter_tm,
- mgmt_fe_adapter_resume_writes, adapter,
- MGMTD_FE_MSG_WRITE_DELAY_MSEC,
- &adapter->conn_writes_on);
- break;
- case MGMTD_FE_SERVER:
- assert(!"mgmt_fe_adapter_post_event() called incorrectly");
- break;
+ if (msg_server_init(&mgmt_fe_server, MGMTD_FE_SERVER_PATH, tm,
+ mgmt_fe_create_adapter, "frontend",
+ &mgmt_debug_fe)) {
+ zlog_err("cannot initialize frontend server");
+ exit(1);
}
}
-void mgmt_fe_adapter_lock(struct mgmt_fe_client_adapter *adapter)
+static void mgmt_fe_abort_if_session(void *data)
{
- adapter->refcount++;
-}
-
-extern void
-mgmt_fe_adapter_unlock(struct mgmt_fe_client_adapter **adapter)
-{
- assert(*adapter && (*adapter)->refcount);
-
- (*adapter)->refcount--;
- if (!(*adapter)->refcount) {
- mgmt_fe_adapters_del(&mgmt_fe_adapters, *adapter);
- EVENT_OFF((*adapter)->conn_read_ev);
- EVENT_OFF((*adapter)->conn_write_ev);
- EVENT_OFF((*adapter)->proc_msg_ev);
- EVENT_OFF((*adapter)->conn_writes_on);
- mgmt_msg_destroy(&(*adapter)->mstate);
- XFREE(MTYPE_MGMTD_FE_ADPATER, *adapter);
- }
+ struct mgmt_fe_session_ctx *session = data;
- *adapter = NULL;
+ MGMTD_FE_ADAPTER_ERR("found orphaned session id %" PRIu64
+ " client id %" PRIu64 " adapter %s",
+ session->session_id, session->client_id,
+ session->adapter ? session->adapter->name
+ : "NULL");
+ abort();
}
-int mgmt_fe_adapter_init(struct event_loop *tm, struct mgmt_master *mm)
+/*
+ * Destroy the FE adapter module
+ */
+void mgmt_fe_adapter_destroy(void)
{
- if (!mgmt_fe_adapter_tm) {
- mgmt_fe_adapter_tm = tm;
- mgmt_fe_adapter_mm = mm;
- mgmt_fe_adapters_init(&mgmt_fe_adapters);
-
- assert(!mgmt_fe_sessions);
- mgmt_fe_sessions = hash_create(mgmt_fe_session_hash_key,
- mgmt_fe_session_hash_cmp,
- "MGMT Frontend Sessions");
- }
+ struct mgmt_fe_client_adapter *adapter;
- return 0;
-}
+ msg_server_cleanup(&mgmt_fe_server);
-void mgmt_fe_adapter_destroy(void)
-{
- mgmt_fe_cleanup_adapters();
- mgmt_fe_session_hash_destroy();
+ /* Deleting the adapters will delete all the sessions */
+ FOREACH_ADAPTER_IN_LIST (adapter)
+ mgmt_fe_adapter_delete(adapter);
+
+ hash_clean_and_free(&mgmt_fe_sessions, mgmt_fe_abort_if_session);
}
-struct mgmt_fe_client_adapter *
-mgmt_fe_create_adapter(int conn_fd, union sockunion *from)
+/*
+ * The server accepted a new connection
+ */
+struct msg_conn *mgmt_fe_create_adapter(int conn_fd, union sockunion *from)
{
struct mgmt_fe_client_adapter *adapter = NULL;
@@ -1567,35 +1470,25 @@ mgmt_fe_create_adapter(int conn_fd, union sockunion *from)
if (!adapter) {
adapter = XCALLOC(MTYPE_MGMTD_FE_ADPATER,
sizeof(struct mgmt_fe_client_adapter));
- assert(adapter);
-
- adapter->conn_fd = conn_fd;
- memcpy(&adapter->conn_su, from, sizeof(adapter->conn_su));
snprintf(adapter->name, sizeof(adapter->name), "Unknown-FD-%d",
- adapter->conn_fd);
- mgmt_fe_sessions_init(&adapter->fe_sessions);
+ conn_fd);
- mgmt_msg_init(&adapter->mstate, MGMTD_FE_MAX_NUM_MSG_PROC,
- MGMTD_FE_MAX_NUM_MSG_WRITE, MGMTD_FE_MSG_MAX_LEN,
- "FE-adapter");
+ mgmt_fe_sessions_init(&adapter->fe_sessions);
mgmt_fe_adapter_lock(adapter);
-
- mgmt_fe_adapter_register_event(adapter, MGMTD_FE_CONN_READ);
mgmt_fe_adapters_add_tail(&mgmt_fe_adapters, adapter);
+ adapter->conn = msg_server_conn_create(
+ mgmt_loop, conn_fd, mgmt_fe_adapter_notify_disconnect,
+ mgmt_fe_adapter_process_msg, MGMTD_FE_MAX_NUM_MSG_PROC,
+ MGMTD_FE_MAX_NUM_MSG_WRITE, MGMTD_FE_MSG_MAX_LEN,
+ adapter, "FE-adapter");
+
adapter->setcfg_stats.min_tm = ULONG_MAX;
adapter->cmt_stats.min_tm = ULONG_MAX;
MGMTD_FE_ADAPTER_DBG("Added new MGMTD Frontend adapter '%s'",
adapter->name);
}
-
- /* Make client socket non-blocking. */
- set_nonblocking(adapter->conn_fd);
- setsockopt_so_sendbuf(adapter->conn_fd,
- MGMTD_SOCKET_FE_SEND_BUF_SIZE);
- setsockopt_so_recvbuf(adapter->conn_fd,
- MGMTD_SOCKET_FE_RECV_BUF_SIZE);
- return adapter;
+ return adapter->conn;
}
struct mgmt_fe_client_adapter *mgmt_fe_get_adapter(const char *name)
@@ -1615,9 +1508,9 @@ int mgmt_fe_send_set_cfg_reply(uint64_t session_id, uint64_t txn_id,
if (!session || session->cfg_txn_id != txn_id) {
if (session)
MGMTD_FE_ADAPTER_ERR(
- "Txn_id doesnot match, session txn is 0x%llx, current txn 0x%llx",
- (unsigned long long)session->cfg_txn_id,
- (unsigned long long)txn_id);
+ "txn-id doesn't match, session txn-id is %" PRIu64
+ " current txnid: %" PRIu64,
+ session->cfg_txn_id, txn_id);
return -1;
}
@@ -1831,7 +1724,7 @@ void mgmt_fe_adapter_status_write(struct vty *vty, bool detail)
FOREACH_ADAPTER_IN_LIST (adapter) {
vty_out(vty, " Client: \t\t\t\t%s\n", adapter->name);
- vty_out(vty, " Conn-FD: \t\t\t\t%d\n", adapter->conn_fd);
+ vty_out(vty, " Conn-FD: \t\t\t\t%d\n", adapter->conn->fd);
if (detail) {
mgmt_fe_adapter_setcfg_stats_write(vty, adapter);
mgmt_fe_adapter_cmt_stats_write(vty, adapter);
@@ -1839,10 +1732,10 @@ void mgmt_fe_adapter_status_write(struct vty *vty, bool detail)
vty_out(vty, " Sessions\n");
FOREACH_SESSION_IN_LIST (adapter, session) {
vty_out(vty, " Session: \t\t\t\t%p\n", session);
- vty_out(vty, " Client-Id: \t\t\t%llu\n",
- (unsigned long long)session->client_id);
- vty_out(vty, " Session-Id: \t\t\t%llx\n",
- (unsigned long long)session->session_id);
+ vty_out(vty, " Client-Id: \t\t\t%" PRIu64 "\n",
+ session->client_id);
+ vty_out(vty, " Session-Id: \t\t\t%" PRIu64 "\n",
+ session->session_id);
vty_out(vty, " DS-Locks:\n");
FOREACH_MGMTD_DS_ID (ds_id) {
if (session->ds_write_locked[ds_id]
@@ -1865,13 +1758,13 @@ void mgmt_fe_adapter_status_write(struct vty *vty, bool detail)
vty_out(vty, " Total-Sessions: \t\t\t%d\n",
(int)mgmt_fe_sessions_count(&adapter->fe_sessions));
vty_out(vty, " Msg-Recvd: \t\t\t\t%" PRIu64 "\n",
- adapter->mstate.nrxm);
+ adapter->conn->mstate.nrxm);
vty_out(vty, " Bytes-Recvd: \t\t\t%" PRIu64 "\n",
- adapter->mstate.nrxb);
+ adapter->conn->mstate.nrxb);
vty_out(vty, " Msg-Sent: \t\t\t\t%" PRIu64 "\n",
- adapter->mstate.ntxm);
+ adapter->conn->mstate.ntxm);
vty_out(vty, " Bytes-Sent: \t\t\t%" PRIu64 "\n",
- adapter->mstate.ntxb);
+ adapter->conn->mstate.ntxb);
}
vty_out(vty, " Total: %d\n",
(int)mgmt_fe_adapters_count(&mgmt_fe_adapters));
diff --git a/mgmtd/mgmt_fe_adapter.h b/mgmtd/mgmt_fe_adapter.h
index 6f72837031..fef205f36a 100644
--- a/mgmtd/mgmt_fe_adapter.h
+++ b/mgmtd/mgmt_fe_adapter.h
@@ -4,6 +4,7 @@
*
* Copyright (C) 2021 Vmware, Inc.
* Pushpasis Sarkar <spushpasis@vmware.com>
+ * Copyright (c) 2023, LabN Consulting, L.L.C.
*/
#ifndef _FRR_MGMTD_FE_ADAPTER_H_
@@ -54,22 +55,12 @@ PREDECL_LIST(mgmt_fe_sessions);
PREDECL_LIST(mgmt_fe_adapters);
struct mgmt_fe_client_adapter {
- int conn_fd;
- union sockunion conn_su;
- struct event *conn_read_ev;
- struct event *conn_write_ev;
- struct event *conn_writes_on;
- struct event *proc_msg_ev;
- uint32_t flags;
-
+ struct msg_conn *conn;
char name[MGMTD_CLIENT_NAME_MAX_LEN];
/* List of sessions created and being maintained for this client. */
struct mgmt_fe_sessions_head fe_sessions;
- /* IO streams for read and write */
- struct mgmt_msg_state mstate;
-
int refcount;
struct mgmt_commit_stats cmt_stats;
struct mgmt_setcfg_stats setcfg_stats;
@@ -77,12 +68,10 @@ struct mgmt_fe_client_adapter {
struct mgmt_fe_adapters_item list_linkage;
};
-#define MGMTD_FE_ADAPTER_FLAGS_WRITES_OFF (1U << 0)
-
DECLARE_LIST(mgmt_fe_adapters, struct mgmt_fe_client_adapter, list_linkage);
/* Initialise frontend adapter module */
-extern int mgmt_fe_adapter_init(struct event_loop *tm, struct mgmt_master *cm);
+extern void mgmt_fe_adapter_init(struct event_loop *tm);
/* Destroy frontend adapter module */
extern void mgmt_fe_adapter_destroy(void);
@@ -95,8 +84,8 @@ extern void
mgmt_fe_adapter_unlock(struct mgmt_fe_client_adapter **adapter);
/* Create frontend adapter */
-extern struct mgmt_fe_client_adapter *
-mgmt_fe_create_adapter(int conn_fd, union sockunion *su);
+extern struct msg_conn *mgmt_fe_create_adapter(int conn_fd,
+ union sockunion *su);
/* Fetch frontend adapter given a name */
extern struct mgmt_fe_client_adapter *
diff --git a/mgmtd/mgmt_fe_server.c b/mgmtd/mgmt_fe_server.c
deleted file mode 100644
index e737e00352..0000000000
--- a/mgmtd/mgmt_fe_server.c
+++ /dev/null
@@ -1,150 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * MGMTD Frontend Server
- *
- * Copyright (C) 2021 Vmware, Inc.
- * Pushpasis Sarkar <spushpasis@vmware.com>
- */
-
-#include <zebra.h>
-#include "network.h"
-#include "libfrr.h"
-#include "mgmtd/mgmt.h"
-#include "mgmtd/mgmt_fe_server.h"
-#include "mgmtd/mgmt_fe_adapter.h"
-
-#define MGMTD_FE_SRVR_DBG(fmt, ...) \
- DEBUGD(&mgmt_debug_fe, "%s:" fmt, __func__, ##__VA_ARGS__)
-#define MGMTD_FE_SRVR_ERR(fmt, ...) \
- zlog_err("%s: ERROR: " fmt, __func__, ##__VA_ARGS__)
-
-static int mgmt_fe_listen_fd = -1;
-static struct event_loop *mgmt_fe_listen_tm;
-static struct event *mgmt_fe_listen_ev;
-static void mgmt_fe_server_register_event(enum mgmt_fe_event event);
-
-static void mgmt_fe_conn_accept(struct event *thread)
-{
- int client_conn_fd;
- union sockunion su;
-
- if (mgmt_fe_listen_fd < 0)
- return;
-
- /* We continue hearing server listen socket. */
- mgmt_fe_server_register_event(MGMTD_FE_SERVER);
-
- memset(&su, 0, sizeof(union sockunion));
-
- /* We can handle IPv4 or IPv6 socket. */
- client_conn_fd = sockunion_accept(mgmt_fe_listen_fd, &su);
- if (client_conn_fd < 0) {
- MGMTD_FE_SRVR_ERR(
- "Failed to accept MGMTD Frontend client connection : %s",
- safe_strerror(errno));
- return;
- }
- set_nonblocking(client_conn_fd);
- set_cloexec(client_conn_fd);
-
- MGMTD_FE_SRVR_DBG("Got a new MGMTD Frontend connection");
-
- mgmt_fe_create_adapter(client_conn_fd, &su);
-}
-
-static void mgmt_fe_server_register_event(enum mgmt_fe_event event)
-{
- if (event == MGMTD_FE_SERVER) {
- event_add_read(mgmt_fe_listen_tm, mgmt_fe_conn_accept,
- NULL, mgmt_fe_listen_fd,
- &mgmt_fe_listen_ev);
- assert(mgmt_fe_listen_ev);
- } else {
- assert(!"mgmt_fe_server_post_event() called incorrectly");
- }
-}
-
-static void mgmt_fe_server_start(const char *hostname)
-{
- int ret;
- int sock;
- struct sockaddr_un addr;
- mode_t old_mask;
-
- /* Set umask */
- old_mask = umask(0077);
-
- sock = socket(AF_UNIX, SOCK_STREAM, PF_UNSPEC);
- if (sock < 0) {
- MGMTD_FE_SRVR_ERR("Failed to create server socket: %s",
- safe_strerror(errno));
- goto mgmt_fe_server_start_failed;
- }
-
- addr.sun_family = AF_UNIX,
- strlcpy(addr.sun_path, MGMTD_FE_SERVER_PATH, sizeof(addr.sun_path));
- unlink(addr.sun_path);
- ret = bind(sock, (struct sockaddr *)&addr, sizeof(addr));
- if (ret < 0) {
- MGMTD_FE_SRVR_ERR(
- "Failed to bind server socket to '%s'. Err: %s",
- addr.sun_path, safe_strerror(errno));
- goto mgmt_fe_server_start_failed;
- }
-
- ret = listen(sock, MGMTD_FE_MAX_CONN);
- if (ret < 0) {
- MGMTD_FE_SRVR_ERR("Failed to listen on server socket: %s",
- safe_strerror(errno));
- goto mgmt_fe_server_start_failed;
- }
-
- /* Restore umask */
- umask(old_mask);
-
- mgmt_fe_listen_fd = sock;
- mgmt_fe_server_register_event(MGMTD_FE_SERVER);
-
- MGMTD_FE_SRVR_DBG("Started MGMTD Frontend Server!");
- return;
-
-mgmt_fe_server_start_failed:
- if (sock > 0)
- close(sock);
-
- mgmt_fe_listen_fd = -1;
- exit(-1);
-}
-
-int mgmt_fe_server_init(struct event_loop *master)
-{
- if (mgmt_fe_listen_tm) {
- MGMTD_FE_SRVR_DBG("MGMTD Frontend Server already running!");
- return 0;
- }
-
- mgmt_fe_listen_tm = master;
-
- mgmt_fe_server_start("localhost");
-
- return 0;
-}
-
-void mgmt_fe_server_destroy(void)
-{
- if (mgmt_fe_listen_tm) {
- MGMTD_FE_SRVR_DBG("Closing MGMTD Frontend Server!");
-
- if (mgmt_fe_listen_ev) {
- EVENT_OFF(mgmt_fe_listen_ev);
- mgmt_fe_listen_ev = NULL;
- }
-
- if (mgmt_fe_listen_fd >= 0) {
- close(mgmt_fe_listen_fd);
- mgmt_fe_listen_fd = -1;
- }
-
- mgmt_fe_listen_tm = NULL;
- }
-}
diff --git a/mgmtd/mgmt_fe_server.h b/mgmtd/mgmt_fe_server.h
deleted file mode 100644
index 6f676ba4da..0000000000
--- a/mgmtd/mgmt_fe_server.h
+++ /dev/null
@@ -1,20 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * MGMTD Frontend Server
- *
- * Copyright (C) 2021 Vmware, Inc.
- * Pushpasis Sarkar <spushpasis@vmware.com>
- */
-
-#ifndef _FRR_MGMTD_FE_SERVER_H_
-#define _FRR_MGMTD_FE_SERVER_H_
-
-#define MGMTD_FE_MAX_CONN 32
-
-/* Initialise frontend server */
-extern int mgmt_fe_server_init(struct event_loop *master);
-
-/* Destroy frontend server */
-extern void mgmt_fe_server_destroy(void);
-
-#endif /* _FRR_MGMTD_FE_SERVER_H_ */
diff --git a/mgmtd/mgmt_history.h b/mgmtd/mgmt_history.h
index d3f7958952..5d9b662694 100644
--- a/mgmtd/mgmt_history.h
+++ b/mgmtd/mgmt_history.h
@@ -74,9 +74,11 @@ mgmt_time_to_string(struct timespec *tv, bool long_fmt, char *buffer, size_t sz)
if (long_fmt) {
n = strftime(buffer, sz, MGMT_LONG_TIME_FMT, &tm);
+ assert(n < sz);
snprintf(&buffer[n], sz - n, ",%09lu", tv->tv_nsec);
} else {
n = strftime(buffer, sz, MGMT_SHORT_TIME_FMT, &tm);
+ assert(n < sz);
snprintf(&buffer[n], sz - n, "%09lu", tv->tv_nsec);
}
diff --git a/mgmtd/mgmt_main.c b/mgmtd/mgmt_main.c
index 08c999260d..39362fa74a 100644
--- a/mgmtd/mgmt_main.c
+++ b/mgmtd/mgmt_main.c
@@ -17,20 +17,12 @@
#include "routing_nb.h"
-char const *const mgmt_daemons[] = {
-#ifdef HAVE_STATICD
- "staticd",
-#endif
-};
-uint mgmt_daemons_count = array_size(mgmt_daemons);
-
/* mgmt options, we use GNU getopt library. */
static const struct option longopts[] = {
{"skip_runas", no_argument, NULL, 'S'},
{"no_zebra", no_argument, NULL, 'Z'},
{"socket_size", required_argument, NULL, 's'},
- {0}
-};
+ {0}};
static void mgmt_exit(int);
static void mgmt_vrf_terminate(void);
@@ -201,8 +193,11 @@ static void mgmt_vrf_terminate(void)
* all individual Backend clients.
*/
static const struct frr_yang_module_info *const mgmt_yang_modules[] = {
- &frr_filter_info, &frr_interface_info, &frr_route_map_info,
- &frr_routing_info, &frr_vrf_info,
+ &frr_filter_info,
+ &frr_interface_info,
+ &frr_route_map_info,
+ &frr_routing_info,
+ &frr_vrf_info,
/*
* YANG module info supported by backend clients get added here.
* NOTE: Always set .ignore_cbs true for to avoid validating
@@ -222,10 +217,14 @@ FRR_DAEMON_INFO(mgmtd, MGMTD, .vty_port = MGMTD_VTY_PORT,
.privs = &mgmt_privs, .yang_modules = mgmt_yang_modules,
.n_yang_modules = array_size(mgmt_yang_modules),
-);
+
+ /* avoid libfrr trying to read our config file for us */
+ .flags = FRR_MANUAL_VTY_START);
#define DEPRECATED_OPTIONS ""
+struct frr_daemon_info *mgmt_daemon_info = &mgmtd_di;
+
/* Main routine of mgmt. Treatment of argument and start mgmt finite
* state machine is handled at here.
*/
@@ -278,6 +277,7 @@ int main(int argc, char **argv)
"%s/zebra.conf", frr_sysconfdir);
mgmtd_di.backup_config_file = backup_config_file;
+ /* this will queue a read configs event */
frr_config_fork();
frr_run(mm->master);
diff --git a/mgmtd/mgmt_memory.c b/mgmtd/mgmt_memory.c
index 920ab9363c..b2a0f0e848 100644
--- a/mgmtd/mgmt_memory.c
+++ b/mgmtd/mgmt_memory.c
@@ -19,14 +19,15 @@
DEFINE_MGROUP(MGMTD, "mgmt");
DEFINE_MTYPE(MGMTD, MGMTD, "instance");
+DEFINE_MTYPE(MGMTD, MGMTD_XPATH, "xpath regex");
DEFINE_MTYPE(MGMTD, MGMTD_BE_ADPATER, "backend adapter");
-DEFINE_MTYPE(MGMTD, MGMTD_FE_ADPATER, "Frontend adapter");
-DEFINE_MTYPE(MGMTD, MGMTD_FE_SESSION, "Frontend Client Session");
-DEFINE_MTYPE(MGMTD, MGMTD_TXN, "Trnsction");
-DEFINE_MTYPE(MGMTD, MGMTD_TXN_REQ, "Trnsction Requests");
-DEFINE_MTYPE(MGMTD, MGMTD_TXN_SETCFG_REQ, "Trnsction Set-Config Requests");
-DEFINE_MTYPE(MGMTD, MGMTD_TXN_COMMCFG_REQ, "Trnsction Commit-Config Requests");
-DEFINE_MTYPE(MGMTD, MGMTD_TXN_GETDATA_REQ, "Trnsction Get-Data Requests");
-DEFINE_MTYPE(MGMTD, MGMTD_TXN_GETDATA_REPLY, "Trnsction Get-Data Replies");
-DEFINE_MTYPE(MGMTD, MGMTD_TXN_CFG_BATCH, "Trnsction Gonfig Batches");
-DEFINE_MTYPE(MGMTD, MGMTD_CMT_INFO, "info for tracking commits");
+DEFINE_MTYPE(MGMTD, MGMTD_FE_ADPATER, "frontend adapter");
+DEFINE_MTYPE(MGMTD, MGMTD_FE_SESSION, "frontend session");
+DEFINE_MTYPE(MGMTD, MGMTD_TXN, "txn");
+DEFINE_MTYPE(MGMTD, MGMTD_TXN_REQ, "txn request");
+DEFINE_MTYPE(MGMTD, MGMTD_TXN_SETCFG_REQ, "txn set-config requests");
+DEFINE_MTYPE(MGMTD, MGMTD_TXN_COMMCFG_REQ, "txn commit-config requests");
+DEFINE_MTYPE(MGMTD, MGMTD_TXN_GETDATA_REQ, "txn get-data requests");
+DEFINE_MTYPE(MGMTD, MGMTD_TXN_GETDATA_REPLY, "txn get-data replies");
+DEFINE_MTYPE(MGMTD, MGMTD_TXN_CFG_BATCH, "txn config batches");
+DEFINE_MTYPE(MGMTD, MGMTD_CMT_INFO, "commit info");
diff --git a/mgmtd/mgmt_memory.h b/mgmtd/mgmt_memory.h
index 5cfcafc749..06518e3838 100644
--- a/mgmtd/mgmt_memory.h
+++ b/mgmtd/mgmt_memory.h
@@ -13,6 +13,7 @@
DECLARE_MGROUP(MGMTD);
DECLARE_MTYPE(MGMTD);
+DECLARE_MTYPE(MGMTD_XPATH);
DECLARE_MTYPE(MGMTD_BE_ADPATER);
DECLARE_MTYPE(MGMTD_FE_ADPATER);
DECLARE_MTYPE(MGMTD_FE_SESSION);
diff --git a/mgmtd/mgmt_txn.c b/mgmtd/mgmt_txn.c
index ad4a4e31f4..588693b7e3 100644
--- a/mgmtd/mgmt_txn.c
+++ b/mgmtd/mgmt_txn.c
@@ -81,8 +81,7 @@ struct mgmt_txn_be_cfg_batch {
uint64_t batch_id;
enum mgmt_be_client_id be_id;
struct mgmt_be_client_adapter *be_adapter;
- union mgmt_be_xpath_subscr_info
- xp_subscr[MGMTD_MAX_CFG_CHANGES_IN_BATCH];
+ uint xp_subscr[MGMTD_MAX_CFG_CHANGES_IN_BATCH];
Mgmtd__YangCfgDataReq cfg_data[MGMTD_MAX_CFG_CHANGES_IN_BATCH];
Mgmtd__YangCfgDataReq * cfg_datap[MGMTD_MAX_CFG_CHANGES_IN_BATCH];
Mgmtd__YangData data[MGMTD_MAX_CFG_CHANGES_IN_BATCH];
@@ -327,7 +326,8 @@ mgmt_txn_cfg_batch_free(struct mgmt_txn_be_cfg_batch **cfg_btch)
size_t indx;
struct mgmt_commit_cfg_req *cmtcfg_req;
- MGMTD_TXN_DBG(" Batch: %p, Txn: %p", *cfg_btch, (*cfg_btch)->txn);
+ MGMTD_TXN_DBG(" freeing batch-id: %" PRIu64 " txn-id %" PRIu64,
+ (*cfg_btch)->batch_id, (*cfg_btch)->txn->txn_id);
assert((*cfg_btch)->txn
&& (*cfg_btch)->txn->type == MGMTD_TXN_TYPE_CONFIG);
@@ -435,15 +435,15 @@ static struct mgmt_txn_req *mgmt_txn_req_alloc(struct mgmt_txn_ctx *txn,
sizeof(struct mgmt_set_cfg_req));
assert(txn_req->req.set_cfg);
mgmt_txn_reqs_add_tail(&txn->set_cfg_reqs, txn_req);
- MGMTD_TXN_DBG(
- "Added a new SETCFG Req: %p for Txn: %p, Sessn: 0x%llx",
- txn_req, txn, (unsigned long long)txn->session_id);
+ MGMTD_TXN_DBG("Added a new SETCFG req-id: %" PRIu64
+ " txn-id: %" PRIu64 ", session-id: %" PRIu64,
+ txn_req->req_id, txn->txn_id, txn->session_id);
break;
case MGMTD_TXN_PROC_COMMITCFG:
txn->commit_cfg_req = txn_req;
- MGMTD_TXN_DBG(
- "Added a new COMMITCFG Req: %p for Txn: %p, Sessn: 0x%llx",
- txn_req, txn, (unsigned long long)txn->session_id);
+ MGMTD_TXN_DBG("Added a new COMMITCFG req-id: %" PRIu64
+ " txn-id: %" PRIu64 " session-id: %" PRIu64,
+ txn_req->req_id, txn->txn_id, txn->session_id);
FOREACH_MGMTD_BE_CLIENT_ID (id) {
mgmt_txn_batches_init(
@@ -463,9 +463,9 @@ static struct mgmt_txn_req *mgmt_txn_req_alloc(struct mgmt_txn_ctx *txn,
sizeof(struct mgmt_get_data_req));
assert(txn_req->req.get_data);
mgmt_txn_reqs_add_tail(&txn->get_cfg_reqs, txn_req);
- MGMTD_TXN_DBG(
- "Added a new GETCFG Req: %p for Txn: %p, Sessn: 0x%llx",
- txn_req, txn, (unsigned long long)txn->session_id);
+ MGMTD_TXN_DBG("Added a new GETCFG req-id: %" PRIu64
+ " txn-id: %" PRIu64 " session-id: %" PRIu64,
+ txn_req->req_id, txn->txn_id, txn->session_id);
break;
case MGMTD_TXN_PROC_GETDATA:
txn_req->req.get_data =
@@ -473,9 +473,9 @@ static struct mgmt_txn_req *mgmt_txn_req_alloc(struct mgmt_txn_ctx *txn,
sizeof(struct mgmt_get_data_req));
assert(txn_req->req.get_data);
mgmt_txn_reqs_add_tail(&txn->get_data_reqs, txn_req);
- MGMTD_TXN_DBG(
- "Added a new GETDATA Req: %p for Txn: %p, Sessn: 0x%llx",
- txn_req, txn, (unsigned long long)txn->session_id);
+ MGMTD_TXN_DBG("Added a new GETDATA req-id: %" PRIu64
+ " txn-id: %" PRIu64 " session-id: %" PRIu64,
+ txn_req->req_id, txn->txn_id, txn->session_id);
break;
case MGMTD_TXN_COMMITCFG_TIMEOUT:
case MGMTD_TXN_CLEANUP:
@@ -517,26 +517,27 @@ static void mgmt_txn_req_free(struct mgmt_txn_req **txn_req)
}
}
req_list = &(*txn_req)->txn->set_cfg_reqs;
- MGMTD_TXN_DBG("Deleting SETCFG Req: %p for Txn: %p",
- *txn_req, (*txn_req)->txn);
+ MGMTD_TXN_DBG("Deleting SETCFG req-id: %" PRIu64
+ " txn-id: %" PRIu64,
+ (*txn_req)->req_id, (*txn_req)->txn->txn_id);
XFREE(MTYPE_MGMTD_TXN_SETCFG_REQ, (*txn_req)->req.set_cfg);
break;
case MGMTD_TXN_PROC_COMMITCFG:
- MGMTD_TXN_DBG("Deleting COMMITCFG Req: %p for Txn: %p",
- *txn_req, (*txn_req)->txn);
+ MGMTD_TXN_DBG("Deleting COMMITCFG req-id: %" PRIu64
+ " txn-id: %" PRIu64,
+ (*txn_req)->req_id, (*txn_req)->txn->txn_id);
FOREACH_MGMTD_BE_CLIENT_ID (id) {
/*
* Send TXN_DELETE to cleanup state for this
* transaction on backend
*/
- if ((*txn_req)->req.commit_cfg.curr_phase
- >= MGMTD_COMMIT_PHASE_TXN_CREATE
- && (*txn_req)->req.commit_cfg.curr_phase
- < MGMTD_COMMIT_PHASE_TXN_DELETE
- && (*txn_req)
- ->req.commit_cfg.subscr_info
- .xpath_subscr[id]
- .subscribed) {
+ if ((*txn_req)->req.commit_cfg.curr_phase >=
+ MGMTD_COMMIT_PHASE_TXN_CREATE &&
+ (*txn_req)->req.commit_cfg.curr_phase <
+ MGMTD_COMMIT_PHASE_TXN_DELETE &&
+ (*txn_req)
+ ->req.commit_cfg.subscr_info
+ .xpath_subscr[id]) {
adapter = mgmt_be_get_adapter_by_id(id);
if (adapter)
mgmt_txn_send_be_txn_delete(
@@ -561,8 +562,9 @@ static void mgmt_txn_req_free(struct mgmt_txn_req **txn_req)
->req.get_data->xpaths[indx]);
}
req_list = &(*txn_req)->txn->get_cfg_reqs;
- MGMTD_TXN_DBG("Deleting GETCFG Req: %p for Txn: %p",
- *txn_req, (*txn_req)->txn);
+ MGMTD_TXN_DBG("Deleting GETCFG req-id: %" PRIu64
+ " txn-id: %" PRIu64,
+ (*txn_req)->req_id, (*txn_req)->txn->txn_id);
if ((*txn_req)->req.get_data->reply)
XFREE(MTYPE_MGMTD_TXN_GETDATA_REPLY,
(*txn_req)->req.get_data->reply);
@@ -577,8 +579,9 @@ static void mgmt_txn_req_free(struct mgmt_txn_req **txn_req)
}
pending_list = &(*txn_req)->txn->pending_get_datas;
req_list = &(*txn_req)->txn->get_data_reqs;
- MGMTD_TXN_DBG("Deleting GETDATA Req: %p for Txn: %p",
- *txn_req, (*txn_req)->txn);
+ MGMTD_TXN_DBG("Deleting GETDATA req-id: %" PRIu64
+ " txn-id: %" PRIu64,
+ (*txn_req)->req_id, (*txn_req)->txn->txn_id);
if ((*txn_req)->req.get_data->reply)
XFREE(MTYPE_MGMTD_TXN_GETDATA_REPLY,
(*txn_req)->req.get_data->reply);
@@ -591,12 +594,16 @@ static void mgmt_txn_req_free(struct mgmt_txn_req **txn_req)
if ((*txn_req)->pending_be_proc && pending_list) {
mgmt_txn_reqs_del(pending_list, *txn_req);
- MGMTD_TXN_DBG("Removed Req: %p from pending-list (left:%d)",
- *txn_req, (int)mgmt_txn_reqs_count(pending_list));
+ MGMTD_TXN_DBG("Removed req-id: %" PRIu64
+ " from pending-list (left:%zu)",
+ (*txn_req)->req_id,
+ mgmt_txn_reqs_count(pending_list));
} else if (req_list) {
mgmt_txn_reqs_del(req_list, *txn_req);
- MGMTD_TXN_DBG("Removed Req: %p from request-list (left:%d)",
- *txn_req, (int)mgmt_txn_reqs_count(req_list));
+ MGMTD_TXN_DBG("Removed req-id: %" PRIu64
+ " from request-list (left:%zu)",
+ (*txn_req)->req_id,
+ mgmt_txn_reqs_count(req_list));
}
(*txn_req)->pending_be_proc = false;
@@ -622,13 +629,12 @@ static void mgmt_txn_process_set_cfg(struct event *thread)
assert(txn);
cmt_stats = mgmt_fe_get_session_commit_stats(txn->session_id);
- MGMTD_TXN_DBG(
- "Processing %d SET_CONFIG requests for Txn:%p Session:0x%llx",
- (int)mgmt_txn_reqs_count(&txn->set_cfg_reqs), txn,
- (unsigned long long)txn->session_id);
+ MGMTD_TXN_DBG("Processing %zu SET_CONFIG requests txn-id:%" PRIu64
+ " session-id: %" PRIu64,
+ mgmt_txn_reqs_count(&txn->set_cfg_reqs), txn->txn_id,
+ txn->session_id);
FOREACH_TXN_REQ_IN_LIST (&txn->set_cfg_reqs, txn_req) {
- error = false;
assert(txn_req->req_event == MGMTD_TXN_PROC_SETCFG);
ds_ctx = txn_req->req.set_cfg->ds_ctx;
if (!ds_ctx) {
@@ -637,7 +643,6 @@ static void mgmt_txn_process_set_cfg(struct event *thread)
txn_req->req.set_cfg->ds_id, txn_req->req_id,
MGMTD_INTERNAL_ERROR, "No such datastore!",
txn_req->req.set_cfg->implicit_commit);
- error = true;
goto mgmt_txn_process_set_cfg_done;
}
@@ -649,7 +654,6 @@ static void mgmt_txn_process_set_cfg(struct event *thread)
MGMTD_INTERNAL_ERROR,
"Unable to retrieve DS Config Tree!",
txn_req->req.set_cfg->implicit_commit);
- error = true;
goto mgmt_txn_process_set_cfg_done;
}
@@ -675,9 +679,10 @@ static void mgmt_txn_process_set_cfg(struct event *thread)
txn_req->req.set_cfg->dst_ds_ctx);
if (ret != 0) {
MGMTD_TXN_ERR(
- "Failed to lock the DS %u for txn: %p session 0x%llx, errstr %s!",
- txn_req->req.set_cfg->dst_ds_id, txn,
- (unsigned long long)txn->session_id,
+ "Failed to lock DS %u txn-id: %" PRIu64
+ " session-id: %" PRIu64 " err: %s",
+ txn_req->req.set_cfg->dst_ds_id,
+ txn->txn_id, txn->session_id,
strerror(ret));
mgmt_txn_send_commit_cfg_reply(
txn, MGMTD_DS_LOCK_FAILED,
@@ -702,9 +707,9 @@ static void mgmt_txn_process_set_cfg(struct event *thread)
txn_req->req_id, MGMTD_SUCCESS, NULL, false)
!= 0) {
MGMTD_TXN_ERR(
- "Failed to send SET_CONFIG_REPLY for txn %p session 0x%llx",
- txn, (unsigned long long)txn->session_id);
- error = true;
+ "Failed to send SET_CONFIG_REPLY txn-id %" PRIu64
+ " session-id: %" PRIu64,
+ txn->txn_id, txn->session_id);
}
mgmt_txn_process_set_cfg_done:
@@ -751,8 +756,9 @@ static int mgmt_txn_send_commit_cfg_reply(struct mgmt_txn_ctx *txn,
result, error_if_any)
!= 0) {
MGMTD_TXN_ERR(
- "Failed to send COMMIT-CONFIG-REPLY for Txn %p Sessn 0x%llx",
- txn, (unsigned long long)txn->session_id);
+ "Failed to send COMMIT-CONFIG-REPLY txn-id: %" PRIu64
+ " session-id: %" PRIu64,
+ txn->txn_id, txn->session_id);
}
if (txn->commit_cfg_req->req.commit_cfg.implicit && txn->session_id
@@ -763,9 +769,9 @@ static int mgmt_txn_send_commit_cfg_reply(struct mgmt_txn_ctx *txn,
success ? MGMTD_SUCCESS : MGMTD_INTERNAL_ERROR,
error_if_any, true)
!= 0) {
- MGMTD_TXN_ERR(
- "Failed to send SET-CONFIG-REPLY for Txn %p Sessn 0x%llx",
- txn, (unsigned long long)txn->session_id);
+ MGMTD_TXN_ERR("Failed to send SET-CONFIG-REPLY txn-id: %" PRIu64
+ " session-id: %" PRIu64,
+ txn->txn_id, txn->session_id);
}
if (success) {
@@ -862,8 +868,9 @@ mgmt_move_txn_cfg_batch_to_next(struct mgmt_commit_cfg_req *cmtcfg_req,
mgmt_txn_batches_del(src_list, cfg_btch);
if (update_commit_phase) {
- MGMTD_TXN_DBG("Move Txn-Id %p Batch-Id %p from '%s' --> '%s'",
- cfg_btch->txn, cfg_btch,
+ MGMTD_TXN_DBG("Move txn-id %" PRIu64 " batch-id: %" PRIu64
+ " from '%s' --> '%s'",
+ cfg_btch->txn->txn_id, cfg_btch->batch_id,
mgmt_commit_phase2str(cfg_btch->comm_phase),
mgmt_txn_commit_phase_str(cfg_btch->txn, false));
cfg_btch->comm_phase = to_phase;
@@ -895,15 +902,15 @@ mgmt_try_move_commit_to_next_phase(struct mgmt_txn_ctx *txn,
struct mgmt_txn_batches_head *curr_list, *next_list;
enum mgmt_be_client_id id;
- MGMTD_TXN_DBG("Txn-Id %p, Phase(current:'%s' next:'%s')", txn,
- mgmt_txn_commit_phase_str(txn, true),
+ MGMTD_TXN_DBG("txn-id: %" PRIu64 ", Phase(current:'%s' next:'%s')",
+ txn->txn_id, mgmt_txn_commit_phase_str(txn, true),
mgmt_txn_commit_phase_str(txn, false));
/*
* Check if all clients has moved to next phase or not.
*/
FOREACH_MGMTD_BE_CLIENT_ID (id) {
- if (cmtcfg_req->subscr_info.xpath_subscr[id].subscribed &&
+ if (cmtcfg_req->subscr_info.xpath_subscr[id] &&
mgmt_txn_batches_count(&cmtcfg_req->curr_batches[id])) {
/*
* There's atleast once client who hasn't moved to
@@ -917,9 +924,9 @@ mgmt_try_move_commit_to_next_phase(struct mgmt_txn_ctx *txn,
}
}
- MGMTD_TXN_DBG("Move entire Txn-Id %p from '%s' to '%s'", txn,
- mgmt_txn_commit_phase_str(txn, true),
- mgmt_txn_commit_phase_str(txn, false));
+ MGMTD_TXN_DBG("Move entire txn-id: %" PRIu64 " from '%s' to '%s'",
+ txn->txn_id, mgmt_txn_commit_phase_str(txn, true),
+ mgmt_txn_commit_phase_str(txn, false));
/*
* If we are here, it means all the clients has moved to next phase.
@@ -927,9 +934,9 @@ mgmt_try_move_commit_to_next_phase(struct mgmt_txn_ctx *txn,
*/
cmtcfg_req->curr_phase = cmtcfg_req->next_phase;
cmtcfg_req->next_phase++;
- MGMTD_TXN_DBG(
- "Move back all config batches for Txn %p from next to current branch",
- txn);
+ MGMTD_TXN_DBG("Move back all config batches for txn-id: %" PRIu64
+ " from next to current branch",
+ txn->txn_id);
FOREACH_MGMTD_BE_CLIENT_ID (id) {
curr_list = &cmtcfg_req->curr_batches[id];
next_list = &cmtcfg_req->next_batches[id];
@@ -954,10 +961,11 @@ mgmt_move_be_commit_to_next_phase(struct mgmt_txn_ctx *txn,
cmtcfg_req = &txn->commit_cfg_req->req.commit_cfg;
- MGMTD_TXN_DBG(
- "Move Txn-Id %p for '%s' Phase(current: '%s' next:'%s')", txn,
- adapter->name, mgmt_txn_commit_phase_str(txn, true),
- mgmt_txn_commit_phase_str(txn, false));
+ MGMTD_TXN_DBG("Move txn-id: %" PRIu64
+ " for '%s' Phase(current: '%s' next:'%s')",
+ txn->txn_id, adapter->name,
+ mgmt_txn_commit_phase_str(txn, true),
+ mgmt_txn_commit_phase_str(txn, false));
MGMTD_TXN_DBG(
"Move all config batches for '%s' from current to next list",
@@ -967,9 +975,9 @@ mgmt_move_be_commit_to_next_phase(struct mgmt_txn_ctx *txn,
mgmt_move_txn_cfg_batches(txn, cmtcfg_req, curr_list, next_list, true,
cmtcfg_req->next_phase);
- MGMTD_TXN_DBG("Txn-Id %p, Phase(current:'%s' next:'%s')", txn,
- mgmt_txn_commit_phase_str(txn, true),
- mgmt_txn_commit_phase_str(txn, false));
+ MGMTD_TXN_DBG("txn-id: %" PRIu64 ", Phase(current:'%s' next:'%s')",
+ txn->txn_id, mgmt_txn_commit_phase_str(txn, true),
+ mgmt_txn_commit_phase_str(txn, false));
/*
* Check if all clients has moved to next phase or not.
@@ -1010,7 +1018,7 @@ static int mgmt_txn_create_config_batches(struct mgmt_txn_req *txn_req,
(void)mgmt_txn_send_commit_cfg_reply(
txn_req->txn, MGMTD_INTERNAL_ERROR,
"Internal error! Could not get Xpath from Ds node!");
- goto mgmt_txn_create_config_batches_failed;
+ return -1;
}
value = (char *)lyd_get_value(chg->cb.dnode);
@@ -1020,22 +1028,15 @@ static int mgmt_txn_create_config_batches(struct mgmt_txn_req *txn_req,
MGMTD_TXN_DBG("XPATH: %s, Value: '%s'", xpath,
value ? value : "NIL");
- if (mgmt_be_get_subscr_info_for_xpath(xpath, &subscr_info)
- != 0) {
- snprintf(err_buf, sizeof(err_buf),
- "No backend module found for XPATH: '%s",
- xpath);
- (void)mgmt_txn_send_commit_cfg_reply(
- txn_req->txn, MGMTD_INTERNAL_ERROR, err_buf);
- goto mgmt_txn_create_config_batches_failed;
- }
+ mgmt_be_get_subscr_info_for_xpath(xpath, &subscr_info);
xpath_len = strlen(xpath) + 1;
value_len = strlen(value) + 1;
found_validator = false;
FOREACH_MGMTD_BE_CLIENT_ID (id) {
- if (!subscr_info.xpath_subscr[id].validate_config
- && !subscr_info.xpath_subscr[id].notify_config)
+ if (!(subscr_info.xpath_subscr[id] &
+ (MGMT_SUBSCR_VALIDATE_CFG |
+ MGMT_SUBSCR_NOTIFY_CFG)))
continue;
adapter = mgmt_be_get_adapter_by_id(id);
@@ -1076,8 +1077,8 @@ static int mgmt_txn_create_config_batches(struct mgmt_txn_req *txn_req,
&cfg_btch->data[cfg_btch->num_cfg_data]);
cfg_btch->cfg_data[cfg_btch->num_cfg_data].data =
&cfg_btch->data[cfg_btch->num_cfg_data];
- cfg_btch->data[cfg_btch->num_cfg_data].xpath = xpath;
- xpath = NULL;
+ cfg_btch->data[cfg_btch->num_cfg_data].xpath =
+ strdup(xpath);
mgmt_yang_data_value_init(
&cfg_btch->value[cfg_btch->num_cfg_data]);
@@ -1089,17 +1090,21 @@ static int mgmt_txn_create_config_batches(struct mgmt_txn_req *txn_req,
.encoded_str_val = value;
value = NULL;
- if (subscr_info.xpath_subscr[id].validate_config)
+ if (subscr_info.xpath_subscr[id] &
+ MGMT_SUBSCR_VALIDATE_CFG)
found_validator = true;
- cmtcfg_req->subscr_info.xpath_subscr[id].subscribed |=
- subscr_info.xpath_subscr[id].subscribed;
- MGMTD_TXN_DBG(
- " -- %s, {V:%d, N:%d}, Batch: %p, Item:%d",
- adapter->name,
- subscr_info.xpath_subscr[id].validate_config,
- subscr_info.xpath_subscr[id].notify_config,
- cfg_btch, (int)cfg_btch->num_cfg_data);
+ cmtcfg_req->subscr_info.xpath_subscr[id] |=
+ subscr_info.xpath_subscr[id];
+ MGMTD_TXN_DBG(" -- %s, {V:%d, N:%d}, batch-id: %" PRIu64
+ " item:%d",
+ adapter->name,
+ (subscr_info.xpath_subscr[id] &
+ MGMT_SUBSCR_VALIDATE_CFG) != 0,
+ (subscr_info.xpath_subscr[id] &
+ MGMT_SUBSCR_NOTIFY_CFG) != 0,
+ cfg_btch->batch_id,
+ (int)cfg_btch->num_cfg_data);
cfg_btch->num_cfg_data++;
num_chgs++;
@@ -1111,6 +1116,8 @@ static int mgmt_txn_create_config_batches(struct mgmt_txn_req *txn_req,
xpath);
MGMTD_TXN_ERR("***** %s", err_buf);
}
+
+ free(xpath);
}
cmtcfg_req->cmt_stats->last_batch_cnt = num_chgs;
@@ -1118,18 +1125,11 @@ static int mgmt_txn_create_config_batches(struct mgmt_txn_req *txn_req,
(void)mgmt_txn_send_commit_cfg_reply(
txn_req->txn, MGMTD_NO_CFG_CHANGES,
"No changes found to commit!");
- goto mgmt_txn_create_config_batches_failed;
+ return -1;
}
cmtcfg_req->next_phase = MGMTD_COMMIT_PHASE_TXN_CREATE;
return 0;
-
-mgmt_txn_create_config_batches_failed:
-
- if (xpath)
- free(xpath);
-
- return -1;
}
static int mgmt_txn_prepare_config(struct mgmt_txn_ctx *txn)
@@ -1331,10 +1331,9 @@ static int mgmt_txn_send_be_txn_create(struct mgmt_txn_ctx *txn)
cmtcfg_req = &txn->commit_cfg_req->req.commit_cfg;
FOREACH_MGMTD_BE_CLIENT_ID (id) {
- if (cmtcfg_req->subscr_info.xpath_subscr[id].subscribed) {
+ if (cmtcfg_req->subscr_info.xpath_subscr[id]) {
adapter = mgmt_be_get_adapter_by_id(id);
- if (mgmt_be_create_txn(adapter, txn->txn_id)
- != 0) {
+ if (mgmt_be_send_txn_req(adapter, txn->txn_id, true)) {
(void)mgmt_txn_send_commit_cfg_reply(
txn, MGMTD_INTERNAL_ERROR,
"Could not send TXN_CREATE to backend adapter");
@@ -1358,18 +1357,17 @@ static int mgmt_txn_send_be_txn_create(struct mgmt_txn_ctx *txn)
* come back.
*/
- MGMTD_TXN_DBG(
- "Txn:%p Session:0x%llx, Phase(Current:'%s', Next: '%s')", txn,
- (unsigned long long)txn->session_id,
- mgmt_txn_commit_phase_str(txn, true),
- mgmt_txn_commit_phase_str(txn, false));
+ MGMTD_TXN_DBG("txn-id: %" PRIu64 " session-id: %" PRIu64
+ " Phase(Current:'%s', Next: '%s')",
+ txn->txn_id, txn->session_id,
+ mgmt_txn_commit_phase_str(txn, true),
+ mgmt_txn_commit_phase_str(txn, false));
return 0;
}
-static int
-mgmt_txn_send_be_cfg_data(struct mgmt_txn_ctx *txn,
- struct mgmt_be_client_adapter *adapter)
+static int mgmt_txn_send_be_cfg_data(struct mgmt_txn_ctx *txn,
+ struct mgmt_be_client_adapter *adapter)
{
struct mgmt_commit_cfg_req *cmtcfg_req;
struct mgmt_txn_be_cfg_batch *cfg_btch;
@@ -1379,7 +1377,7 @@ mgmt_txn_send_be_cfg_data(struct mgmt_txn_ctx *txn,
assert(txn->type == MGMTD_TXN_TYPE_CONFIG && txn->commit_cfg_req);
cmtcfg_req = &txn->commit_cfg_req->req.commit_cfg;
- assert(cmtcfg_req->subscr_info.xpath_subscr[adapter->id].subscribed);
+ assert(cmtcfg_req->subscr_info.xpath_subscr[adapter->id]);
indx = 0;
num_batches =
@@ -1391,16 +1389,17 @@ mgmt_txn_send_be_cfg_data(struct mgmt_txn_ctx *txn,
cfg_req.cfgdata_reqs = cfg_btch->cfg_datap;
cfg_req.num_reqs = cfg_btch->num_cfg_data;
indx++;
- if (mgmt_be_send_cfg_data_create_req(
- adapter, txn->txn_id, cfg_btch->batch_id, &cfg_req,
- indx == num_batches ? true : false)
- != 0) {
+ if (mgmt_be_send_cfgdata_req(
+ adapter, txn->txn_id, cfg_btch->batch_id,
+ cfg_req.cfgdata_reqs, cfg_req.num_reqs,
+ indx == num_batches ? true : false)) {
(void)mgmt_txn_send_commit_cfg_reply(
txn, MGMTD_INTERNAL_ERROR,
"Internal Error! Could not send config data to backend!");
MGMTD_TXN_ERR(
- "Could not send CFGDATA_CREATE for Txn %p Batch %p to client '%s",
- txn, cfg_btch, adapter->name);
+ "Could not send CFGDATA_CREATE txn-id: %" PRIu64
+ " batch-id: %" PRIu64 " to client '%s",
+ txn->txn_id, cfg_btch->batch_id, adapter->name);
return -1;
}
@@ -1413,7 +1412,7 @@ mgmt_txn_send_be_cfg_data(struct mgmt_txn_ctx *txn,
}
/*
- * This could ne the last Backend Client to send CFGDATA_CREATE_REQ to.
+ * This could be the last Backend Client to send CFGDATA_CREATE_REQ to.
* Try moving the commit to next phase.
*/
mgmt_try_move_commit_to_next_phase(txn, cmtcfg_req);
@@ -1431,9 +1430,9 @@ mgmt_txn_send_be_txn_delete(struct mgmt_txn_ctx *txn,
assert(txn->type == MGMTD_TXN_TYPE_CONFIG && txn->commit_cfg_req);
cmtcfg_req = &txn->commit_cfg_req->req.commit_cfg;
- if (cmtcfg_req->subscr_info.xpath_subscr[adapter->id].subscribed) {
+ if (cmtcfg_req->subscr_info.xpath_subscr[adapter->id]) {
adapter = mgmt_be_get_adapter_by_id(adapter->id);
- (void)mgmt_be_destroy_txn(adapter, txn->txn_id);
+ (void)mgmt_be_send_txn_req(adapter, txn->txn_id, false);
FOREACH_TXN_CFG_BATCH_IN_LIST (
&txn->commit_cfg_req->req.commit_cfg
@@ -1457,9 +1456,8 @@ static void mgmt_txn_cfg_commit_timedout(struct event *thread)
if (!txn->commit_cfg_req)
return;
- MGMTD_TXN_ERR(
- "Backend operations for Config Txn %p has timedout! Aborting commit!!",
- txn);
+ MGMTD_TXN_ERR("Backend timeout txn-id: %" PRIu64 " aborting commit",
+ txn->txn_id);
/*
* Send a COMMIT_CONFIG_REPLY with failure.
@@ -1500,14 +1498,14 @@ static int mgmt_txn_send_be_cfg_apply(struct mgmt_txn_ctx *txn)
}
FOREACH_MGMTD_BE_CLIENT_ID (id) {
- if (cmtcfg_req->subscr_info.xpath_subscr[id].notify_config) {
+ if (cmtcfg_req->subscr_info.xpath_subscr[id] &
+ MGMT_SUBSCR_NOTIFY_CFG) {
adapter = mgmt_be_get_adapter_by_id(id);
if (!adapter)
return -1;
btch_list = &cmtcfg_req->curr_batches[id];
- if (mgmt_be_send_cfg_apply_req(adapter, txn->txn_id)
- != 0) {
+ if (mgmt_be_send_cfgapply_req(adapter, txn->txn_id)) {
(void)mgmt_txn_send_commit_cfg_reply(
txn, MGMTD_INTERNAL_ERROR,
"Could not send CFG_APPLY_REQ to backend adapter");
@@ -1543,11 +1541,12 @@ static void mgmt_txn_process_commit_cfg(struct event *thread)
txn = (struct mgmt_txn_ctx *)EVENT_ARG(thread);
assert(txn);
- MGMTD_TXN_DBG(
- "Processing COMMIT_CONFIG for Txn:%p Session:0x%llx, Phase(Current:'%s', Next: '%s')",
- txn, (unsigned long long)txn->session_id,
- mgmt_txn_commit_phase_str(txn, true),
- mgmt_txn_commit_phase_str(txn, false));
+ MGMTD_TXN_DBG("Processing COMMIT_CONFIG for txn-id: %" PRIu64
+ " session-id: %" PRIu64
+ " Phase(Current:'%s', Next: '%s')",
+ txn->txn_id, txn->session_id,
+ mgmt_txn_commit_phase_str(txn, true),
+ mgmt_txn_commit_phase_str(txn, false));
assert(txn->commit_cfg_req);
cmtcfg_req = &txn->commit_cfg_req->req.commit_cfg;
@@ -1575,13 +1574,15 @@ static void mgmt_txn_process_commit_cfg(struct event *thread)
#ifndef MGMTD_LOCAL_VALIDATIONS_ENABLED
assert(cmtcfg_req->next_phase == MGMTD_COMMIT_PHASE_APPLY_CFG);
MGMTD_TXN_DBG(
- "Txn:%p Session:0x%llx, trigger sending CFG_VALIDATE_REQ to all backend clients",
- txn, (unsigned long long)txn->session_id);
+ "txn-id: %" PRIu64 " session-id: %" PRIu64
+ " trigger sending CFG_VALIDATE_REQ to all backend clients",
+ txn->txn_id, txn->session_id);
#else /* ifndef MGMTD_LOCAL_VALIDATIONS_ENABLED */
assert(cmtcfg_req->next_phase == MGMTD_COMMIT_PHASE_APPLY_CFG);
MGMTD_TXN_DBG(
- "Txn:%p Session:0x%llx, trigger sending CFG_APPLY_REQ to all backend clients",
- txn, (unsigned long long)txn->session_id);
+ "txn-id: %" PRIu64 " session-id: %" PRIu64
+ " trigger sending CFG_APPLY_REQ to all backend clients",
+ txn->txn_id, txn->session_id);
#endif /* ifndef MGMTD_LOCAL_VALIDATIONS_ENABLED */
break;
case MGMTD_COMMIT_PHASE_APPLY_CFG:
@@ -1613,11 +1614,11 @@ static void mgmt_txn_process_commit_cfg(struct event *thread)
break;
}
- MGMTD_TXN_DBG(
- "Txn:%p Session:0x%llx, Phase updated to (Current:'%s', Next: '%s')",
- txn, (unsigned long long)txn->session_id,
- mgmt_txn_commit_phase_str(txn, true),
- mgmt_txn_commit_phase_str(txn, false));
+ MGMTD_TXN_DBG("txn-id:%" PRIu64 " session-id: %" PRIu64
+ " phase updated to (current:'%s', next: '%s')",
+ txn->txn_id, txn->session_id,
+ mgmt_txn_commit_phase_str(txn, true),
+ mgmt_txn_commit_phase_str(txn, false));
}
static void mgmt_init_get_data_reply(struct mgmt_get_data_reply *get_reply)
@@ -1678,9 +1679,8 @@ static void mgmt_txn_send_getcfg_reply_data(struct mgmt_txn_req *txn_req,
data_reply->next_indx =
(!get_reply->last_batch ? get_req->total_reply : -1);
- MGMTD_TXN_DBG("Sending %d Get-Config/Data replies (next-idx:%lld)",
- (int) data_reply->n_data,
- (long long)data_reply->next_indx);
+ MGMTD_TXN_DBG("Sending %zu Get-Config/Data replies next-index:%" PRId64,
+ data_reply->n_data, data_reply->next_indx);
switch (txn_req->req_event) {
case MGMTD_TXN_PROC_GETCFG:
@@ -1690,10 +1690,10 @@ static void mgmt_txn_send_getcfg_reply_data(struct mgmt_txn_req *txn_req,
data_reply, NULL)
!= 0) {
MGMTD_TXN_ERR(
- "Failed to send GET-CONFIG-REPLY for Txn %p, Sessn: 0x%llx, Req: %llu",
- txn_req->txn,
- (unsigned long long)txn_req->txn->session_id,
- (unsigned long long)txn_req->req_id);
+ "Failed to send GET-CONFIG-REPLY txn-id: %" PRIu64
+ " session-id: %" PRIu64 " req-id: %" PRIu64,
+ txn_req->txn->txn_id, txn_req->txn->session_id,
+ txn_req->req_id);
}
break;
case MGMTD_TXN_PROC_GETDATA:
@@ -1703,10 +1703,10 @@ static void mgmt_txn_send_getcfg_reply_data(struct mgmt_txn_req *txn_req,
data_reply, NULL)
!= 0) {
MGMTD_TXN_ERR(
- "Failed to send GET-DATA-REPLY for Txn %p, Sessn: 0x%llx, Req: %llu",
- txn_req->txn,
- (unsigned long long)txn_req->txn->session_id,
- (unsigned long long)txn_req->req_id);
+ "Failed to send GET-DATA-REPLY txn-id: %" PRIu64
+ " session-id: %" PRIu64 " req-id: %" PRIu64,
+ txn_req->txn->txn_id, txn_req->txn->session_id,
+ txn_req->req_id);
}
break;
case MGMTD_TXN_PROC_SETCFG:
@@ -1725,10 +1725,10 @@ static void mgmt_txn_send_getcfg_reply_data(struct mgmt_txn_req *txn_req,
}
static void mgmt_txn_iter_and_send_get_cfg_reply(struct mgmt_ds_ctx *ds_ctx,
- char *xpath,
- struct lyd_node *node,
- struct nb_node *nb_node,
- void *ctx)
+ const char *xpath,
+ struct lyd_node *node,
+ struct nb_node *nb_node,
+ void *ctx)
{
struct mgmt_txn_req *txn_req;
struct mgmt_get_data_req *get_req;
@@ -1738,10 +1738,10 @@ static void mgmt_txn_iter_and_send_get_cfg_reply(struct mgmt_ds_ctx *ds_ctx,
txn_req = (struct mgmt_txn_req *)ctx;
if (!txn_req)
- goto mgmtd_ignore_get_cfg_reply_data;
+ return;
if (!(node->schema->nodetype & LYD_NODE_TERM))
- goto mgmtd_ignore_get_cfg_reply_data;
+ return;
assert(txn_req->req_event == MGMTD_TXN_PROC_GETCFG
|| txn_req->req_event == MGMTD_TXN_PROC_GETDATA);
@@ -1753,7 +1753,7 @@ static void mgmt_txn_iter_and_send_get_cfg_reply(struct mgmt_ds_ctx *ds_ctx,
data_value = &get_reply->reply_value[get_reply->num_reply];
mgmt_yang_data_init(data);
- data->xpath = xpath;
+ data->xpath = strdup(xpath);
mgmt_yang_data_value_init(data_value);
data_value->value_case = MGMTD__YANG_DATA_VALUE__VALUE_ENCODED_STR_VAL;
data_value->encoded_str_val = (char *)lyd_get_value(node);
@@ -1766,12 +1766,6 @@ static void mgmt_txn_iter_and_send_get_cfg_reply(struct mgmt_ds_ctx *ds_ctx,
if (get_reply->num_reply == MGMTD_MAX_NUM_DATA_REPLY_IN_BATCH)
mgmt_txn_send_getcfg_reply_data(txn_req, get_req);
-
- return;
-
-mgmtd_ignore_get_cfg_reply_data:
- if (xpath)
- free(xpath);
}
static int mgmt_txn_get_config(struct mgmt_txn_ctx *txn,
@@ -1806,10 +1800,14 @@ static int mgmt_txn_get_config(struct mgmt_txn_ctx *txn,
MGMTD_TXN_DBG("Trying to get all data under '%s'",
get_data->xpaths[indx]);
mgmt_init_get_data_reply(get_reply);
+ /*
+ * mgmt_ds_iter_data works on path prefixes, but the user may
+ * want to also use an xpath regexp we need to add this
+ * functionality.
+ */
if (mgmt_ds_iter_data(get_data->ds_ctx, get_data->xpaths[indx],
mgmt_txn_iter_and_send_get_cfg_reply,
- (void *)txn_req, true)
- == -1) {
+ (void *)txn_req) == -1) {
MGMTD_TXN_DBG("Invalid Xpath '%s",
get_data->xpaths[indx]);
mgmt_fe_send_get_cfg_reply(
@@ -1846,10 +1844,10 @@ static void mgmt_txn_process_get_cfg(struct event *thread)
txn = (struct mgmt_txn_ctx *)EVENT_ARG(thread);
assert(txn);
- MGMTD_TXN_DBG(
- "Processing %d GET_CONFIG requests for Txn:%p Session:0x%llx",
- (int)mgmt_txn_reqs_count(&txn->get_cfg_reqs), txn,
- (unsigned long long)txn->session_id);
+ MGMTD_TXN_DBG("Processing %zu GET_CONFIG requests txn-id: %" PRIu64
+ " session-id: %" PRIu64,
+ mgmt_txn_reqs_count(&txn->get_cfg_reqs), txn->txn_id,
+ txn->session_id);
FOREACH_TXN_REQ_IN_LIST (&txn->get_cfg_reqs, txn_req) {
error = false;
@@ -1867,10 +1865,10 @@ static void mgmt_txn_process_get_cfg(struct event *thread)
if (mgmt_txn_get_config(txn, txn_req, ds_ctx) != 0) {
MGMTD_TXN_ERR(
- "Unable to retrieve Config from DS %d for Txn %p, Sessn: 0x%llx, Req: %llu!",
- txn_req->req.get_data->ds_id, txn,
- (unsigned long long)txn->session_id,
- (unsigned long long)txn_req->req_id);
+ "Unable to retrieve config from DS %d txn-id: %" PRIu64
+ " session-id: %" PRIu64 " req-id: %" PRIu64,
+ txn_req->req.get_data->ds_id, txn->txn_id,
+ txn->session_id, txn_req->req_id);
error = true;
}
@@ -1913,10 +1911,10 @@ static void mgmt_txn_process_get_data(struct event *thread)
txn = (struct mgmt_txn_ctx *)EVENT_ARG(thread);
assert(txn);
- MGMTD_TXN_DBG(
- "Processing %d GET_DATA requests for Txn:%p Session:0x%llx",
- (int)mgmt_txn_reqs_count(&txn->get_data_reqs), txn,
- (unsigned long long)txn->session_id);
+ MGMTD_TXN_DBG("Processing %zu GET_DATA requests txn-id: %" PRIu64
+ " session-id: %" PRIu64,
+ mgmt_txn_reqs_count(&txn->get_data_reqs), txn->txn_id,
+ txn->session_id);
FOREACH_TXN_REQ_IN_LIST (&txn->get_data_reqs, txn_req) {
error = false;
@@ -1936,10 +1934,12 @@ static void mgmt_txn_process_get_data(struct event *thread)
if (mgmt_txn_get_config(txn, txn_req, ds_ctx)
!= 0) {
MGMTD_TXN_ERR(
- "Unable to retrieve Config from DS %d for Txn %p, Sessn: 0x%llx, Req: %llu!",
- txn_req->req.get_data->ds_id, txn,
- (unsigned long long)txn->session_id,
- (unsigned long long)txn_req->req_id);
+ "Unable to retrieve config from DS %d txn-id %" PRIu64
+ " session-id: %" PRIu64
+ " req-id: %" PRIu64,
+ txn_req->req.get_data->ds_id,
+ txn->txn_id, txn->session_id,
+ txn_req->req_id);
error = true;
}
} else {
@@ -2020,7 +2020,6 @@ static struct mgmt_txn_ctx *mgmt_txn_create_new(uint64_t session_id,
txn->session_id = session_id;
txn->type = type;
- mgmt_txn_badapters_init(&txn->be_adapters);
mgmt_txns_add_tail(&mgmt_txn_mm->txn_list, txn);
mgmt_txn_reqs_init(&txn->set_cfg_reqs);
mgmt_txn_reqs_init(&txn->get_cfg_reqs);
@@ -2033,8 +2032,8 @@ static struct mgmt_txn_ctx *mgmt_txn_create_new(uint64_t session_id,
txn->txn_id = mgmt_txn_mm->next_txn_id++;
hash_get(mgmt_txn_mm->txn_hash, txn, hash_alloc_intern);
- MGMTD_TXN_DBG("Added new '%s' MGMTD Transaction '%p'",
- mgmt_txn_type2str(type), txn);
+ MGMTD_TXN_DBG("Added new '%s' txn-id: %" PRIu64,
+ mgmt_txn_type2str(type), txn->txn_id);
if (type == MGMTD_TXN_TYPE_CONFIG)
mgmt_txn_mm->cfg_txn = txn;
@@ -2114,8 +2113,9 @@ static void mgmt_txn_lock(struct mgmt_txn_ctx *txn, const char *file,
int line)
{
txn->refcount++;
- MGMTD_TXN_DBG("%s:%d --> Lock %s Txn %p, Count: %d", file, line,
- mgmt_txn_type2str(txn->type), txn, txn->refcount);
+ MGMTD_TXN_DBG("%s:%d --> Lock %s txn-id: %" PRIu64 " refcnt: %d", file,
+ line, mgmt_txn_type2str(txn->type), txn->txn_id,
+ txn->refcount);
}
static void mgmt_txn_unlock(struct mgmt_txn_ctx **txn, const char *file,
@@ -2124,9 +2124,9 @@ static void mgmt_txn_unlock(struct mgmt_txn_ctx **txn, const char *file,
assert(*txn && (*txn)->refcount);
(*txn)->refcount--;
- MGMTD_TXN_DBG("%s:%d --> Unlock %s Txn %p, Count: %d", file, line,
- mgmt_txn_type2str((*txn)->type), *txn,
- (*txn)->refcount);
+ MGMTD_TXN_DBG("%s:%d --> Unlock %s txn-id: %" PRIu64 " refcnt: %d",
+ file, line, mgmt_txn_type2str((*txn)->type),
+ (*txn)->txn_id, (*txn)->refcount);
if (!(*txn)->refcount) {
if ((*txn)->type == MGMTD_TXN_TYPE_CONFIG)
if (mgmt_txn_mm->cfg_txn == *txn)
@@ -2138,9 +2138,10 @@ static void mgmt_txn_unlock(struct mgmt_txn_ctx **txn, const char *file,
hash_release(mgmt_txn_mm->txn_hash, *txn);
mgmt_txns_del(&mgmt_txn_mm->txn_list, *txn);
- MGMTD_TXN_DBG("Deleted %s Txn %p for Sessn: 0x%llx",
- mgmt_txn_type2str((*txn)->type), *txn,
- (unsigned long long)(*txn)->session_id);
+ MGMTD_TXN_DBG("Deleted %s txn-id: %" PRIu64
+ " session-id: %" PRIu64,
+ mgmt_txn_type2str((*txn)->type), (*txn)->txn_id,
+ (*txn)->session_id);
XFREE(MTYPE_MGMTD_TXN, *txn);
}
@@ -2252,11 +2253,6 @@ uint64_t mgmt_create_txn(uint64_t session_id, enum mgmt_txn_type type)
return txn ? txn->txn_id : MGMTD_TXN_ID_NONE;
}
-bool mgmt_txn_id_is_valid(uint64_t txn_id)
-{
- return mgmt_txn_id2ctx(txn_id) ? true : false;
-}
-
void mgmt_destroy_txn(uint64_t *txn_id)
{
struct mgmt_txn_ctx *txn;
@@ -2269,17 +2265,6 @@ void mgmt_destroy_txn(uint64_t *txn_id)
*txn_id = MGMTD_TXN_ID_NONE;
}
-enum mgmt_txn_type mgmt_get_txn_type(uint64_t txn_id)
-{
- struct mgmt_txn_ctx *txn;
-
- txn = mgmt_txn_id2ctx(txn_id);
- if (!txn)
- return MGMTD_TXN_TYPE_NONE;
-
- return txn->type;
-}
-
int mgmt_txn_send_set_config_req(uint64_t txn_id, uint64_t req_id,
Mgmtd__DatastoreId ds_id,
struct mgmt_ds_ctx *ds_ctx,
@@ -2374,9 +2359,9 @@ int mgmt_txn_send_commit_config_req(uint64_t txn_id, uint64_t req_id,
return -1;
if (txn->commit_cfg_req) {
- MGMTD_TXN_ERR(
- "A commit is already in-progress for Txn %p, session 0x%llx. Cannot start another!",
- txn, (unsigned long long)txn->session_id);
+ MGMTD_TXN_ERR("Commit already in-progress txn-id: %" PRIu64
+ " session-id: %" PRIu64 ". Cannot start another",
+ txn->txn_id, txn->session_id);
return -1;
}
@@ -2431,8 +2416,8 @@ int mgmt_txn_notify_be_adapter_conn(struct mgmt_be_client_adapter *adapter,
return -1;
}
- MGMTD_TXN_DBG("Created initial txn %" PRIu64
- " for BE connection %s",
+ MGMTD_TXN_DBG("Created initial txn-id: %" PRIu64
+ " for BE client '%s'",
txn->txn_id, adapter->name);
/*
* Set the changeset for transaction to commit and trigger the
@@ -2461,15 +2446,16 @@ int mgmt_txn_notify_be_adapter_conn(struct mgmt_be_client_adapter *adapter,
* has failed.
*/
FOREACH_TXN_IN_LIST (mgmt_txn_mm, txn) {
+ /* TODO: update with operational state when that is
+ * completed */
if (txn->type == MGMTD_TXN_TYPE_CONFIG) {
cmtcfg_req = txn->commit_cfg_req
? &txn->commit_cfg_req
->req.commit_cfg
: NULL;
- if (cmtcfg_req
- && cmtcfg_req->subscr_info
- .xpath_subscr[adapter->id]
- .subscribed) {
+ if (cmtcfg_req &&
+ cmtcfg_req->subscr_info
+ .xpath_subscr[adapter->id]) {
mgmt_txn_send_commit_cfg_reply(
txn, MGMTD_INTERNAL_ERROR,
"Backend daemon disconnected while processing commit!");
@@ -2532,7 +2518,7 @@ int mgmt_txn_notify_be_cfgdata_reply(
{
struct mgmt_txn_ctx *txn;
struct mgmt_txn_be_cfg_batch *cfg_btch;
- struct mgmt_commit_cfg_req *cmtcfg_req = NULL;
+ struct mgmt_commit_cfg_req *cmtcfg_req;
txn = mgmt_txn_id2ctx(txn_id);
if (!txn || txn->type != MGMTD_TXN_TYPE_CONFIG)
@@ -2548,8 +2534,9 @@ int mgmt_txn_notify_be_cfgdata_reply(
if (!success) {
MGMTD_TXN_ERR(
- "CFGDATA_CREATE_REQ sent to '%s' failed for Txn %p, Batch %p, Err: %s",
- adapter->name, txn, cfg_btch,
+ "CFGDATA_CREATE_REQ sent to '%s' failed txn-id: %" PRIu64
+ " batch-id %" PRIu64 " err: %s",
+ adapter->name, txn->txn_id, cfg_btch->batch_id,
error_if_any ? error_if_any : "None");
mgmt_txn_send_commit_cfg_reply(
txn, MGMTD_INTERNAL_ERROR,
@@ -2559,8 +2546,9 @@ int mgmt_txn_notify_be_cfgdata_reply(
}
MGMTD_TXN_DBG(
- "CFGDATA_CREATE_REQ sent to '%s' was successful for Txn %p, Batch %p, Err: %s",
- adapter->name, txn, cfg_btch,
+ "CFGDATA_CREATE_REQ sent to '%s' was successful txn-id: %" PRIu64
+ " batch-id %" PRIu64 " err: %s",
+ adapter->name, txn->txn_id, cfg_btch->batch_id,
error_if_any ? error_if_any : "None");
mgmt_move_txn_cfg_batch_to_next(
cmtcfg_req, cfg_btch, &cmtcfg_req->curr_batches[adapter->id],
@@ -2591,9 +2579,10 @@ int mgmt_txn_notify_be_cfg_apply_reply(uint64_t txn_id, bool success,
if (!success) {
MGMTD_TXN_ERR(
- "CFGDATA_APPLY_REQ sent to '%s' failed for Txn %p, Batches [0x%llx - 0x%llx], Err: %s",
- adapter->name, txn, (unsigned long long)batch_ids[0],
- (unsigned long long)batch_ids[num_batch_ids - 1],
+ "CFGDATA_APPLY_REQ sent to '%s' failed txn-id: %" PRIu64
+ " batch ids %" PRIu64 " - %" PRIu64 " err: %s",
+ adapter->name, txn->txn_id, batch_ids[0],
+ batch_ids[num_batch_ids - 1],
error_if_any ? error_if_any : "None");
mgmt_txn_send_commit_cfg_reply(
txn, MGMTD_INTERNAL_ERROR,
@@ -2640,9 +2629,9 @@ int mgmt_txn_send_commit_config_reply(uint64_t txn_id,
return -1;
if (!txn->commit_cfg_req) {
- MGMTD_TXN_ERR(
- "NO commit in-progress for Txn %p, session 0x%llx!",
- txn, (unsigned long long)txn->session_id);
+ MGMTD_TXN_ERR("NO commit in-progress txn-id: %" PRIu64
+ " session-id: %" PRIu64,
+ txn->txn_id, txn->session_id);
return -1;
}
@@ -2718,11 +2707,10 @@ void mgmt_txn_status_write(struct vty *vty)
vty_out(vty, "MGMTD Transactions\n");
FOREACH_TXN_IN_LIST (mgmt_txn_mm, txn) {
- vty_out(vty, " Txn: \t\t\t%p\n", txn);
- vty_out(vty, " Txn-Id: \t\t\t%llu\n",
- (unsigned long long)txn->txn_id);
- vty_out(vty, " Session-Id: \t\t%llu\n",
- (unsigned long long)txn->session_id);
+ vty_out(vty, " Txn: \t\t\t0x%p\n", txn);
+ vty_out(vty, " Txn-Id: \t\t\t%" PRIu64 "\n", txn->txn_id);
+ vty_out(vty, " Session-Id: \t\t%" PRIu64 "\n",
+ txn->session_id);
vty_out(vty, " Type: \t\t\t%s\n",
mgmt_txn_type2str(txn->type));
vty_out(vty, " Ref-Count: \t\t\t%d\n", txn->refcount);
@@ -2771,7 +2759,7 @@ int mgmt_txn_rollback_trigger_cfg_apply(struct mgmt_ds_ctx *src_ds_ctx,
return -1;
}
- MGMTD_TXN_DBG("Created rollback txn %" PRIu64, txn->txn_id);
+ MGMTD_TXN_DBG("Created rollback txn-id: %" PRIu64, txn->txn_id);
/*
* Set the changeset for transaction to commit and trigger the commit
diff --git a/mgmtd/mgmt_txn.h b/mgmtd/mgmt_txn.h
index be781ab954..0718397138 100644
--- a/mgmtd/mgmt_txn.h
+++ b/mgmtd/mgmt_txn.h
@@ -101,16 +101,6 @@ extern uint64_t mgmt_create_txn(uint64_t session_id, enum mgmt_txn_type type);
extern void mgmt_destroy_txn(uint64_t *txn_id);
/*
- * Check if transaction is valid given an ID.
- */
-extern bool mgmt_txn_id_is_valid(uint64_t txn_id);
-
-/*
- * Returns the type of transaction given an ID.
- */
-extern enum mgmt_txn_type mgmt_get_txn_type(uint64_t txn_id);
-
-/*
* Send set-config request to be processed later in transaction.
*
* txn_id
diff --git a/mgmtd/mgmt_vty.c b/mgmtd/mgmt_vty.c
index 7d133d522f..93c5145d71 100644
--- a/mgmtd/mgmt_vty.c
+++ b/mgmtd/mgmt_vty.c
@@ -10,18 +10,19 @@
#include "command.h"
#include "json.h"
+#include "network.h"
#include "northbound_cli.h"
#include "mgmtd/mgmt.h"
-#include "mgmtd/mgmt_be_server.h"
#include "mgmtd/mgmt_be_adapter.h"
-#include "mgmtd/mgmt_fe_server.h"
#include "mgmtd/mgmt_fe_adapter.h"
#include "mgmtd/mgmt_ds.h"
#include "mgmtd/mgmt_history.h"
#include "mgmtd/mgmt_vty_clippy.c"
+extern struct frr_daemon_info *mgmt_daemon_info;
+
DEFPY(show_mgmt_be_adapter,
show_mgmt_be_adapter_cmd,
"show mgmt backend-adapter all",
@@ -453,31 +454,9 @@ DEFPY(debug_mgmt, debug_mgmt_cmd,
return CMD_SUCCESS;
}
-/*
- * Analog of `frr_config_read_in()`, instead of our config file though we loop
- * over all daemons that have transitioned to mgmtd, loading their configs
- */
-static int mgmt_config_pre_hook(struct event_loop *loop)
+static void mgmt_config_read_in(struct event *event)
{
- FILE *confp;
- char *p;
-
- for (uint i = 0; i < mgmt_daemons_count; i++) {
- p = asprintfrr(MTYPE_TMP, "%s/%s.conf", frr_sysconfdir,
- mgmt_daemons[i]);
- confp = fopen(p, "r");
- if (confp == NULL) {
- if (errno != ENOENT)
- zlog_err("%s: couldn't read config file %s: %s",
- __func__, p, safe_strerror(errno));
- } else {
- zlog_info("mgmtd: reading daemon config from %s", p);
- vty_read_file(vty_shared_candidate_config, confp);
- fclose(confp);
- }
- XFREE(MTYPE_TMP, p);
- }
- return 0;
+ mgmt_vty_read_configs();
}
void mgmt_vty_init(void)
@@ -493,7 +472,8 @@ void mgmt_vty_init(void)
static_vty_init();
#endif
- hook_register(frr_config_pre, mgmt_config_pre_hook);
+ event_add_event(mm->master, mgmt_config_read_in, NULL, 0,
+ &mgmt_daemon_info->read_in);
install_node(&debug_node);
diff --git a/mgmtd/subdir.am b/mgmtd/subdir.am
index a93f8f9441..67b45d5bd9 100644
--- a/mgmtd/subdir.am
+++ b/mgmtd/subdir.am
@@ -25,9 +25,7 @@ noinst_LIBRARIES += mgmtd/libmgmtd.a
mgmtd_libmgmtd_a_SOURCES = \
mgmtd/mgmt.c \
mgmtd/mgmt_ds.c \
- mgmtd/mgmt_be_server.c \
mgmtd/mgmt_be_adapter.c \
- mgmtd/mgmt_fe_server.c \
mgmtd/mgmt_fe_adapter.c \
mgmtd/mgmt_history.c \
mgmtd/mgmt_memory.c \
@@ -42,10 +40,8 @@ mgmtdheader_HEADERS = \
noinst_HEADERS += \
mgmtd/mgmt.h \
- mgmtd/mgmt_be_server.h \
mgmtd/mgmt_be_adapter.h \
mgmtd/mgmt_ds.h \
- mgmtd/mgmt_fe_server.h \
mgmtd/mgmt_fe_adapter.h \
mgmtd/mgmt_history.h \
mgmtd/mgmt_memory.h \
@@ -57,12 +53,16 @@ sbin_PROGRAMS += mgmtd/mgmtd
mgmtd_mgmtd_SOURCES = \
mgmtd/mgmt_main.c \
# end
+nodist_mgmtd_mgmtd_SOURCES = \
+ # nothing
mgmtd_mgmtd_CFLAGS = $(AM_CFLAGS) -I ./
mgmtd_mgmtd_LDADD = mgmtd/libmgmtd.a lib/libfrr.la $(LIBCAP) $(LIBM) $(LIBYANG_LIBS) $(UST_LIBS)
mgmtd_mgmtd_LDADD += mgmtd/libmgmt_be_nb.la
if STATICD
-$(mgmtd_mgmtd_OBJECTS): yang/frr-staticd.yang.c
-CLEANFILES += yang/frr-staticd.yang.c
+nodist_mgmtd_mgmtd_SOURCES += \
+ yang/frr-staticd.yang.c \
+ yang/frr-bfdd.yang.c \
+ # end
nodist_mgmtd_libmgmt_be_nb_la_SOURCES += staticd/static_vty.c
endif
diff --git a/ospf6d/ospf6_gr.c b/ospf6d/ospf6_gr.c
index 3d5d4d259f..ecaaa038ab 100644
--- a/ospf6d/ospf6_gr.c
+++ b/ospf6d/ospf6_gr.c
@@ -239,7 +239,8 @@ static void ospf6_gr_restart_exit(struct ospf6 *ospf6, const char *reason)
/* Enter the Graceful Restart mode. */
void ospf6_gr_restart_enter(struct ospf6 *ospf6,
- enum ospf6_gr_restart_reason reason, int timestamp)
+ enum ospf6_gr_restart_reason reason,
+ time_t timestamp)
{
unsigned long remaining_time;
diff --git a/ospf6d/ospf6_gr.h b/ospf6d/ospf6_gr.h
index e6566a6098..84ef3aeb8a 100644
--- a/ospf6d/ospf6_gr.h
+++ b/ospf6d/ospf6_gr.h
@@ -158,7 +158,7 @@ extern int config_write_ospf6_debug_gr_helper(struct vty *vty);
extern void ospf6_gr_iface_send_grace_lsa(struct event *thread);
extern void ospf6_gr_restart_enter(struct ospf6 *ospf6,
enum ospf6_gr_restart_reason reason,
- int timestamp);
+ time_t timestamp);
extern void ospf6_gr_check_lsdb_consistency(struct ospf6 *ospf,
struct ospf6_area *area);
extern void ospf6_gr_nvm_read(struct ospf6 *ospf);
diff --git a/ospf6d/ospf6_interface.c b/ospf6d/ospf6_interface.c
index ea059c4be6..0fb3d29e25 100644
--- a/ospf6d/ospf6_interface.c
+++ b/ospf6d/ospf6_interface.c
@@ -516,7 +516,6 @@ static int ospf6_interface_state_change(uint8_t next_state,
OSPF6_NETWORK_LSA_EXECUTE(oi);
OSPF6_INTRA_PREFIX_LSA_EXECUTE_TRANSIT(oi);
OSPF6_INTRA_PREFIX_LSA_SCHEDULE_STUB(oi->area);
- OSPF6_INTRA_PREFIX_LSA_EXECUTE_TRANSIT(oi);
} else if (prev_state == OSPF6_INTERFACE_DR
|| next_state == OSPF6_INTERFACE_DR) {
OSPF6_NETWORK_LSA_SCHEDULE(oi);
diff --git a/ospfd/ospf_gr.c b/ospfd/ospf_gr.c
index 6999b3f623..2a346f2388 100644
--- a/ospfd/ospf_gr.c
+++ b/ospfd/ospf_gr.c
@@ -282,7 +282,7 @@ static void ospf_gr_restart_exit(struct ospf *ospf, const char *reason)
/* Enter the Graceful Restart mode. */
void ospf_gr_restart_enter(struct ospf *ospf,
- enum ospf_gr_restart_reason reason, int timestamp)
+ enum ospf_gr_restart_reason reason, time_t timestamp)
{
unsigned long remaining_time;
diff --git a/ospfd/ospf_gr.h b/ospfd/ospf_gr.h
index 750d77381d..22f9e1ef22 100644
--- a/ospfd/ospf_gr.h
+++ b/ospfd/ospf_gr.h
@@ -169,7 +169,7 @@ extern void ospf_gr_helper_set_supported_planned_only_restart(struct ospf *ospf,
extern void ospf_gr_iface_send_grace_lsa(struct event *thread);
extern void ospf_gr_restart_enter(struct ospf *ospf,
enum ospf_gr_restart_reason reason,
- int timestamp);
+ time_t timestamp);
extern void ospf_gr_check_lsdb_consistency(struct ospf *ospf,
struct ospf_area *area);
extern void ospf_gr_check_adjs(struct ospf *ospf);
diff --git a/ospfd/ospf_route.c b/ospfd/ospf_route.c
index 75868056ad..cdb1eb0095 100644
--- a/ospfd/ospf_route.c
+++ b/ospfd/ospf_route.c
@@ -684,6 +684,8 @@ void ospf_intra_add_stub(struct route_table *rt, struct router_lsa_link *link,
__func__);
}
}
+ if (rn->info)
+ ospf_route_free(rn->info);
rn->info = or ;
diff --git a/pimd/pim_cmd_common.c b/pimd/pim_cmd_common.c
index 75df09ec35..5b905a9536 100644
--- a/pimd/pim_cmd_common.c
+++ b/pimd/pim_cmd_common.c
@@ -2747,7 +2747,7 @@ void pim_show_interfaces_single(struct pim_instance *pim, struct vty *vty,
}
}
- if (!found_ifname)
+ if (!found_ifname && !json)
vty_out(vty, "%% No such interface\n");
}
@@ -3200,7 +3200,7 @@ void pim_show_neighbors_single(struct pim_instance *pim, struct vty *vty,
}
}
- if (!found_neighbor)
+ if (!found_neighbor && !json)
vty_out(vty, "%% No such interface or neighbor\n");
}
diff --git a/pimd/pim_iface.c b/pimd/pim_iface.c
index b1beb45630..f26fd818b5 100644
--- a/pimd/pim_iface.c
+++ b/pimd/pim_iface.c
@@ -1762,3 +1762,61 @@ void pim_iface_init(void)
if_zapi_callbacks(pim_ifp_create, pim_ifp_up, pim_ifp_down,
pim_ifp_destroy);
}
+
+static void pim_if_membership_clear(struct interface *ifp)
+{
+ struct pim_interface *pim_ifp;
+
+ pim_ifp = ifp->info;
+ assert(pim_ifp);
+
+ if (pim_ifp->pim_enable && pim_ifp->gm_enable)
+ return;
+
+ pim_ifchannel_membership_clear(ifp);
+}
+
+void pim_pim_interface_delete(struct interface *ifp)
+{
+ struct pim_interface *pim_ifp = ifp->info;
+
+ if (!pim_ifp)
+ return;
+
+ pim_ifp->pim_enable = false;
+
+ pim_if_membership_clear(ifp);
+
+ /*
+ * pim_sock_delete() removes all neighbors from
+ * pim_ifp->pim_neighbor_list.
+ */
+ pim_sock_delete(ifp, "pim unconfigured on interface");
+ pim_upstream_nh_if_update(pim_ifp->pim, ifp);
+
+ if (!pim_ifp->gm_enable) {
+ pim_if_addr_del_all(ifp);
+ pim_if_delete(ifp);
+ }
+}
+
+void pim_gm_interface_delete(struct interface *ifp)
+{
+ struct pim_interface *pim_ifp = ifp->info;
+
+ if (!pim_ifp)
+ return;
+
+ pim_ifp->gm_enable = false;
+
+ pim_if_membership_clear(ifp);
+
+#if PIM_IPV == 4
+ igmp_sock_delete_all(ifp);
+#else
+ gm_ifp_teardown(ifp);
+#endif
+
+ if (!pim_ifp->pim_enable)
+ pim_if_delete(ifp);
+}
diff --git a/pimd/pim_iface.h b/pimd/pim_iface.h
index 973840a753..0312f719d3 100644
--- a/pimd/pim_iface.h
+++ b/pimd/pim_iface.h
@@ -243,5 +243,7 @@ bool pim_if_is_vrf_device(struct interface *ifp);
int pim_if_ifchannel_count(struct pim_interface *pim_ifp);
void pim_iface_init(void);
+void pim_pim_interface_delete(struct interface *ifp);
+void pim_gm_interface_delete(struct interface *ifp);
#endif /* PIM_IFACE_H */
diff --git a/pimd/pim_igmpv3.c b/pimd/pim_igmpv3.c
index 15078dd1ec..18a9fb7c6c 100644
--- a/pimd/pim_igmpv3.c
+++ b/pimd/pim_igmpv3.c
@@ -319,14 +319,6 @@ void igmp_source_free(struct gm_source *source)
XFREE(MTYPE_PIM_IGMP_GROUP_SOURCE, source);
}
-static void source_channel_oil_detach(struct gm_source *source)
-{
- if (source->source_channel_oil) {
- pim_channel_oil_del(source->source_channel_oil, __func__);
- source->source_channel_oil = NULL;
- }
-}
-
/*
igmp_source_delete: stop forwarding, and delete the source
igmp_source_forward_stop: stop forwarding, but keep the source
@@ -355,6 +347,7 @@ void igmp_source_delete(struct gm_source *source)
source_timer_off(group, source);
igmp_source_forward_stop(source);
+ source->source_channel_oil = NULL;
/* sanity check that forwarding has been disabled */
if (IGMP_SOURCE_TEST_FORWARDING(source->source_flags)) {
@@ -371,8 +364,6 @@ void igmp_source_delete(struct gm_source *source)
/* warning only */
}
- source_channel_oil_detach(source);
-
/*
notice that listnode_delete() can't be moved
into igmp_source_free() because the later is
diff --git a/pimd/pim_nb_config.c b/pimd/pim_nb_config.c
index 3c4ab1d4cc..30d84710e6 100644
--- a/pimd/pim_nb_config.c
+++ b/pimd/pim_nb_config.c
@@ -45,20 +45,6 @@ MACRO_REQUIRE_SEMICOLON()
#define yang_dnode_get_pimaddr yang_dnode_get_ipv4
#endif /* PIM_IPV != 6 */
-static void pim_if_membership_clear(struct interface *ifp)
-{
- struct pim_interface *pim_ifp;
-
- pim_ifp = ifp->info;
- assert(pim_ifp);
-
- if (pim_ifp->pim_enable && pim_ifp->gm_enable) {
- return;
- }
-
- pim_ifchannel_membership_clear(ifp);
-}
-
/*
* When PIM is disabled on interface, IGMPv3 local membership
* information is not injected into PIM interface state.
@@ -81,15 +67,17 @@ static void pim_if_membership_refresh(struct interface *ifp)
pim_ifp = ifp->info;
assert(pim_ifp);
-#if PIM_IPV == 6
- gm_ifp = pim_ifp->mld;
-#endif
if (!pim_ifp->pim_enable)
return;
if (!pim_ifp->gm_enable)
return;
+#if PIM_IPV == 6
+ gm_ifp = pim_ifp->mld;
+ if (!gm_ifp)
+ return;
+#endif
/*
* First clear off membership from all PIM (S,G) entries on the
* interface
@@ -159,32 +147,6 @@ static int pim_cmd_interface_add(struct interface *ifp)
return 1;
}
-static int pim_cmd_interface_delete(struct interface *ifp)
-{
- struct pim_interface *pim_ifp = ifp->info;
-
- if (!pim_ifp)
- return 1;
-
- pim_ifp->pim_enable = false;
-
- pim_if_membership_clear(ifp);
-
- /*
- * pim_sock_delete() removes all neighbors from
- * pim_ifp->pim_neighbor_list.
- */
- pim_sock_delete(ifp, "pim unconfigured on interface");
- pim_upstream_nh_if_update(pim_ifp->pim, ifp);
-
- if (!pim_ifp->gm_enable) {
- pim_if_addr_del_all(ifp);
- pim_if_delete(ifp);
- }
-
- return 1;
-}
-
static int interface_pim_use_src_cmd_worker(struct interface *ifp,
pim_addr source_addr, char *errmsg, size_t errmsg_len)
{
@@ -276,7 +238,7 @@ static int pim_rp_cmd_worker(struct pim_instance *pim, pim_addr rp_addr,
if (result == PIM_RP_NO_PATH) {
snprintfrr(errmsg, errmsg_len,
"No Path to RP address specified: %pPA", &rp_addr);
- return NB_ERR_INCONSISTENCY;
+ return NB_OK;
}
if (result == PIM_GROUP_OVERLAP) {
@@ -1571,12 +1533,7 @@ int lib_interface_pim_address_family_destroy(struct nb_cb_destroy_args *args)
if (!pim_ifp)
return NB_OK;
- if (!pim_cmd_interface_delete(ifp)) {
- snprintf(args->errmsg, args->errmsg_len,
- "Unable to delete interface information %s",
- ifp->name);
- return NB_ERR_INCONSISTENCY;
- }
+ pim_pim_interface_delete(ifp);
}
return NB_OK;
@@ -1624,11 +1581,7 @@ int lib_interface_pim_address_family_pim_enable_modify(struct nb_cb_modify_args
if (!pim_ifp)
return NB_ERR_INCONSISTENCY;
- if (!pim_cmd_interface_delete(ifp)) {
- snprintf(args->errmsg, args->errmsg_len,
- "Unable to delete interface information");
- return NB_ERR_INCONSISTENCY;
- }
+ pim_pim_interface_delete(ifp);
}
break;
}
@@ -2563,7 +2516,6 @@ int lib_interface_gmp_address_family_create(struct nb_cb_create_args *args)
int lib_interface_gmp_address_family_destroy(struct nb_cb_destroy_args *args)
{
struct interface *ifp;
- struct pim_interface *pim_ifp;
switch (args->event) {
case NB_EV_VALIDATE:
@@ -2572,19 +2524,7 @@ int lib_interface_gmp_address_family_destroy(struct nb_cb_destroy_args *args)
break;
case NB_EV_APPLY:
ifp = nb_running_get_entry(args->dnode, NULL, true);
- pim_ifp = ifp->info;
-
- if (!pim_ifp)
- return NB_OK;
-
- pim_ifp->gm_enable = false;
-
- pim_if_membership_clear(ifp);
-
- pim_if_addr_del_all_igmp(ifp);
-
- if (!pim_ifp->pim_enable)
- pim_if_delete(ifp);
+ pim_gm_interface_delete(ifp);
}
return NB_OK;
@@ -2598,7 +2538,6 @@ int lib_interface_gmp_address_family_enable_modify(
{
struct interface *ifp;
bool gm_enable;
- struct pim_interface *pim_ifp;
int mcast_if_count;
const char *ifp_name;
const struct lyd_node *if_dnode;
@@ -2628,25 +2567,8 @@ int lib_interface_gmp_address_family_enable_modify(
if (gm_enable)
return pim_cmd_gm_start(ifp);
- else {
- pim_ifp = ifp->info;
-
- if (!pim_ifp)
- return NB_ERR_INCONSISTENCY;
-
- pim_ifp->gm_enable = false;
-
- pim_if_membership_clear(ifp);
-
-#if PIM_IPV == 4
- pim_if_addr_del_all_igmp(ifp);
-#else
- gm_ifp_teardown(ifp);
-#endif
-
- if (!pim_ifp->pim_enable)
- pim_if_delete(ifp);
- }
+ else
+ pim_gm_interface_delete(ifp);
}
return NB_OK;
}
diff --git a/pimd/pim_tib.c b/pimd/pim_tib.c
index 6ffea868d8..4081786c1e 100644
--- a/pimd/pim_tib.c
+++ b/pimd/pim_tib.c
@@ -163,4 +163,6 @@ void tib_sg_gm_prune(struct pim_instance *pim, pim_sgaddr sg,
per-interface (S,G) state.
*/
pim_ifchannel_local_membership_del(oif, &sg);
+
+ pim_channel_oil_del(*oilp, __func__);
}
diff --git a/redhat/frr.spec.in b/redhat/frr.spec.in
index 4dec84b8fb..656df20cce 100644
--- a/redhat/frr.spec.in
+++ b/redhat/frr.spec.in
@@ -799,9 +799,33 @@ sed -i 's/ -M rpki//' %{_sysconfdir}/frr/daemons
%changelog
-* Tue Feb 07 2023 Martin Winter <mwinter@opensourcerouting.org> - %{version}
-
-* Tue Feb 07 2023 Donatas Abraitis <donatas@opensourcerouting.org> - 8.5
+* Tue Jun 06 2023 Martin Winter <mwinter@opensourcerouting.org> - %{version}
+
+* Tue Jun 06 2023 Jafar Al-Gharaibeh <jafar@atcorp.com> - 9.0
+
+* Fri Mar 10 2023 Jafar Al-Gharaibeh <jafar@atcorp.com> - 8.5
+- Major Highlights:
+- Add support for per-VRF SRv6 SID
+- Add BGP labeled-unicast Add-Path functionality
+- Implementation of SNMP BGP4v2-MIB (IPv6 support) for better network management and monitoring
+- Add BGP new command neighbor path-attribute discard
+- Add BGP new command neighbor path-attribute treat-as-withdraw
+- Implement L3 route-target auto/wildcard configuration
+- Implement BGP ACCEPT_OWN Community Attribute (rfc7611)
+- Implement The Accumulated IGP Metric Attribute for BGP (rfc7311)
+- Implement graceful-shutdown command per neighbor
+- Add BGP new command to configure TCP keepalives for a peer bgp tcp-keepalive
+- Traffic control (TC) ZAPI implementation
+- SRv6 uSID (microSID) implementation
+- Start deprecating start-shell, ssh, and telnet commands due to security reasons
+- Add VRRPv3 an ability to disable IPv4 pseudo-header checksum
+- BFD integration for static routes
+- Allow protocols to configure BFD sessions with automatic source selection
+- Allow zero-length opaque LSAs for OSPF (rfc5250)
+- Add ISIS new command set-overload-bit on-startup
+- PIMv6 BSM support
+- For a full list of new features and bug fixes, please refer to:
+- https://frrouting.org/release/
* Tue Nov 01 2022 Jafar Al-Gharaibeh <jafar@atcorp.com> - 8.4
- New BGP command (neighbor PEER soo) to configure SoO to prevent routing loops and suboptimal routing on dual-homed sites.
diff --git a/ripd/subdir.am b/ripd/subdir.am
index 294a05e575..c793a6d685 100644
--- a/ripd/subdir.am
+++ b/ripd/subdir.am
@@ -48,6 +48,7 @@ noinst_HEADERS += \
ripd_ripd_LDADD = lib/libfrr.la $(LIBCAP)
nodist_ripd_ripd_SOURCES = \
yang/frr-ripd.yang.c \
+ yang/frr-bfdd.yang.c \
# end
ripd_ripd_snmp_la_SOURCES = ripd/rip_snmp.c
diff --git a/staticd/static_main.c b/staticd/static_main.c
index 9809d9751a..f6b7847602 100644
--- a/staticd/static_main.c
+++ b/staticd/static_main.c
@@ -53,7 +53,7 @@ struct option longopts[] = { { 0 } };
/* Master of threads. */
struct event_loop *master;
-uintptr_t mgmt_lib_hndl;
+struct mgmt_be_client *mgmt_be_client;
static struct frr_daemon_info staticd_di;
/* SIGHUP handler. */
@@ -71,7 +71,7 @@ static void sigint(void)
/* Disable BFD events to avoid wasting processing. */
bfd_protocol_integration_set_shutdown(true);
- mgmt_be_client_lib_destroy(mgmt_lib_hndl);
+ mgmt_be_client_destroy(mgmt_be_client);
static_vrf_terminate();
@@ -106,51 +106,6 @@ struct frr_signal_t static_signals[] = {
},
};
-static void static_mgmt_be_client_connect(uintptr_t lib_hndl,
- uintptr_t usr_data, bool connected)
-{
- (void)usr_data;
-
- assert(lib_hndl == mgmt_lib_hndl);
-
- zlog_debug("Got %s %s MGMTD Backend Client Server",
- connected ? "connected" : "disconnected",
- connected ? "to" : "from");
-
- if (connected)
- (void)mgmt_be_subscribe_yang_data(mgmt_lib_hndl, NULL, 0);
-}
-
-#if 0
-static void
-static_mgmt_txn_notify(uintptr_t lib_hndl, uintptr_t usr_data,
- struct mgmt_be_client_txn_ctx *txn_ctx,
- bool destroyed)
-{
- zlog_debug("Got Txn %s Notify from MGMTD server",
- destroyed ? "DESTROY" : "CREATE");
-
- if (!destroyed) {
- /*
- * TODO: Allocate and install a private scratchpad for this
- * transaction if required
- */
- } else {
- /*
- * TODO: Uninstall and deallocate the private scratchpad for
- * this transaction if installed earlier.
- */
- }
-}
-#endif
-
-static struct mgmt_be_client_params mgmt_params = {
- .name = "staticd",
- .conn_retry_intvl_sec = 3,
- .client_connect_notify = static_mgmt_be_client_connect,
- .txn_notify = NULL, /* static_mgmt_txn_notify */
-};
-
static const struct frr_yang_module_info *const staticd_yang_modules[] = {
&frr_filter_info,
&frr_interface_info,
@@ -207,8 +162,7 @@ int main(int argc, char **argv, char **envp)
static_vty_init();
/* Initialize MGMT backend functionalities */
- mgmt_lib_hndl = mgmt_be_client_lib_init(&mgmt_params, master);
- assert(mgmt_lib_hndl);
+ mgmt_be_client = mgmt_be_client_create("staticd", NULL, 0, master);
hook_register(routing_conf_event,
routing_control_plane_protocols_name_validate);
diff --git a/tests/helpers/c/main.c b/tests/helpers/c/main.c
index cd2b5665e2..8af53a2ea4 100644
--- a/tests/helpers/c/main.c
+++ b/tests/helpers/c/main.c
@@ -152,7 +152,7 @@ int main(int argc, char **argv)
}
/* Create VTY socket */
- vty_serv_sock(vty_addr, vty_port, "/tmp/.heavy.sock");
+ vty_serv_start(vty_addr, vty_port, "/tmp/.heavy.sock");
/* Configuration file read*/
if (!config_file)
diff --git a/tests/topotests/analyze.py b/tests/topotests/analyze.py
index 9c9bfda1ed..690786a07c 100755
--- a/tests/topotests/analyze.py
+++ b/tests/topotests/analyze.py
@@ -7,17 +7,61 @@
# Copyright (c) 2021, LabN Consulting, L.L.C.
#
import argparse
-import glob
+import atexit
import logging
import os
import re
import subprocess
import sys
+import tempfile
from collections import OrderedDict
import xmltodict
+def get_range_list(rangestr):
+ result = []
+ for e in rangestr.split(","):
+ e = e.strip()
+ if not e:
+ continue
+ if e.find("-") == -1:
+ result.append(int(e))
+ else:
+ start, end = e.split("-")
+ result.extend(list(range(int(start), int(end) + 1)))
+ return result
+
+
+def dict_range_(dct, rangestr, dokeys):
+ keys = list(dct.keys())
+ if not rangestr or rangestr == "all":
+ for key in keys:
+ if dokeys:
+ yield key
+ else:
+ yield dct[key]
+ return
+
+ dlen = len(keys)
+ for index in get_range_list(rangestr):
+ if index >= dlen:
+ break
+ key = keys[index]
+ if dokeys:
+ yield key
+ else:
+ yield dct[key]
+
+
+def dict_range_keys(dct, rangestr):
+ return dict_range_(dct, rangestr, True)
+
+
+def dict_range_values(dct, rangestr):
+ return dict_range_(dct, rangestr, False)
+
+
def get_summary(results):
ntest = int(results["@tests"])
nfail = int(results["@failures"])
@@ -87,7 +131,7 @@ def get_filtered(tfilters, results, args):
else:
if not fname:
fname = cname.replace(".", "/") + ".py"
- if args.files_only or "@name" not in testcase:
+ if "@name" not in testcase:
tcname = fname
else:
tcname = fname + "::" + testcase["@name"]
@@ -95,9 +139,14 @@ def get_filtered(tfilters, results, args):
return found_files
-def dump_testcase(testcase):
- expand_keys = ("failure", "error", "skipped")
+def search_testcase(testcase, regexp):
+ for key, val in testcase.items():
+ if regexp.search(str(val)):
+ return True
+ return False
+
+def dump_testcase(testcase):
s = ""
for key, val in testcase.items():
if isinstance(val, str) or isinstance(val, float) or isinstance(val, int):
@@ -114,22 +163,49 @@ def dump_testcase(testcase):
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
+ "-a",
+ "--save-xml",
+ action="store_true",
+ help=(
+ "Move [container:]/tmp/topotests/topotests.xml "
+ "to --results value if --results does not exist yet"
+ ),
+ )
+ parser.add_argument(
"-A",
"--save",
action="store_true",
- help="Save /tmp/topotests{,.xml} in --rundir if --rundir does not yet exist",
+ help=(
+ "Move [container:]/tmp/topotests{,.xml} "
+ "to --results value if --results does not exist yet"
+ ),
)
parser.add_argument(
- "-F",
- "--files-only",
+ "-C",
+ "--container",
+ help="specify docker/podman container of the run",
+ )
+ parser.add_argument(
+ "--use-podman",
action="store_true",
- help="print test file names rather than individual full testcase names",
+ help="Use `podman` instead of `docker` for saving container data",
)
parser.add_argument(
"-S",
"--select",
- default="fe",
- help="select results combination of letters: 'e'rrored 'f'ailed 'p'assed 's'kipped.",
+ help=(
+ "select results combination of letters: "
+ "'e'rrored 'f'ailed 'p'assed 's'kipped. "
+ "Default is 'fe', unless --search or --time which default to 'efps'"
+ ),
+ )
+ parser.add_argument(
+ "-R",
+ "--search",
+ help=(
+ "filter results to those which match a regex. "
+ "All test text is search unless restricted by --errmsg or --errtext"
+ ),
)
parser.add_argument(
"-r",
@@ -143,59 +219,147 @@ def main():
action="store_true",
help="enumerate each item (results scoped)",
)
- parser.add_argument("-T", "--test", help="print testcase at enumeration")
+ parser.add_argument(
+ "-T", "--test", help="select testcase at given ordinal from the enumerated list"
+ )
parser.add_argument(
"--errmsg", action="store_true", help="print testcase error message"
)
parser.add_argument(
"--errtext", action="store_true", help="print testcase error text"
)
+ parser.add_argument(
+ "--full", action="store_true", help="print all logging for selected testcases"
+ )
parser.add_argument("--time", action="store_true", help="print testcase run times")
parser.add_argument("-s", "--summary", action="store_true", help="print summary")
parser.add_argument("-v", "--verbose", action="store_true", help="be verbose")
args = parser.parse_args()
- if args.save and args.results and not os.path.exists(args.results):
- if not os.path.exists("/tmp/topotests"):
- logging.critical('No "/tmp/topotests" directory to save')
+ if args.save and args.save_xml:
+ logging.critical("Only one of --save or --save-xml allowed")
+ sys.exit(1)
+
+ scount = bool(args.save) + bool(args.save_xml)
+
+ #
+ # Saving/Archiving results
+ #
+
+ docker_bin = "podman" if args.use_podman else "docker"
+ contid = ""
+ if args.container:
+ # check for container existence
+ contid = args.container
+ try:
+ # p =
+ subprocess.run(
+ f"{docker_bin} inspect {contid}",
+ check=True,
+ shell=True,
+ errors="ignore",
+ capture_output=True,
+ )
+ except subprocess.CalledProcessError:
+ print(f"{docker_bin} container '{contid}' does not exist")
sys.exit(1)
- subprocess.run(["mv", "/tmp/topotests", args.results])
+ # If you need container info someday...
+ # cont_info = json.loads(p.stdout)
+
+ cppath = "/tmp/topotests"
+ if args.save_xml or scount == 0:
+ cppath += "/topotests.xml"
+ if contid:
+ cppath = contid + ":" + cppath
+
+ tresfile = None
+
+ if scount and args.results and not os.path.exists(args.results):
+ if not contid:
+ if not os.path.exists(cppath):
+ print(f"'{cppath}' doesn't exist to save")
+ sys.exit(1)
+ if args.save_xml:
+ subprocess.run(["cp", cppath, args.results])
+ else:
+ subprocess.run(["mv", cppath, args.results])
+ else:
+ try:
+ subprocess.run(
+ f"{docker_bin} cp {cppath} {args.results}",
+ check=True,
+ shell=True,
+ errors="ignore",
+ capture_output=True,
+ )
+ except subprocess.CalledProcessError as error:
+ print(f"Can't {docker_bin} cp '{cppath}': %s", str(error))
+ sys.exit(1)
+
if "SUDO_USER" in os.environ:
subprocess.run(["chown", "-R", os.environ["SUDO_USER"], args.results])
- # # Old location for results
- # if os.path.exists("/tmp/topotests.xml", args.results):
- # subprocess.run(["mv", "/tmp/topotests.xml", args.results])
+ elif not args.results:
+ # User doesn't want to save results just use them inplace
+ if not contid:
+ if not os.path.exists(cppath):
+ print(f"'{cppath}' doesn't exist")
+ sys.exit(1)
+ args.results = cppath
+ else:
+ tresfile, tresname = tempfile.mkstemp(
+ suffix=".xml", prefix="topotests-", text=True
+ )
+ atexit.register(lambda: os.unlink(tresname))
+ os.close(tresfile)
+ try:
+ subprocess.run(
+ f"{docker_bin} cp {cppath} {tresname}",
+ check=True,
+ shell=True,
+ errors="ignore",
+ capture_output=True,
+ )
+ except subprocess.CalledProcessError as error:
+ print(f"Can't {docker_bin} cp '{cppath}': %s", str(error))
+ sys.exit(1)
+ args.results = tresname
- assert (
- args.test is None or not args.files_only
- ), "Can't have both --files and --test"
+ #
+ # Result option validation
+ #
+
+ count = 0
+ if args.errmsg:
+ count += 1
+ if args.errtext:
+ count += 1
+ if args.full:
+ count += 1
+ if count > 1:
+ logging.critical("Only one of --full, --errmsg or --errtext allowed")
+ sys.exit(1)
+
+ if args.time and count:
+ logging.critical("Can't use --full, --errmsg or --errtext with --time")
+ sys.exit(1)
+
+ if args.enumerate and (count or args.time or args.test):
+ logging.critical(
+ "Can't use --enumerate with --errmsg, --errtext, --full, --test or --time"
+ )
+ sys.exit(1)
results = {}
ttfiles = []
- if args.rundir:
- basedir = os.path.realpath(args.rundir)
- os.chdir(basedir)
-
- newfiles = glob.glob("tt-group-*/topotests.xml")
- if newfiles:
- ttfiles.extend(newfiles)
- if os.path.exists("topotests.xml"):
- ttfiles.append("topotests.xml")
- else:
- if args.results:
- if os.path.exists(os.path.join(args.results, "topotests.xml")):
- args.results = os.path.join(args.results, "topotests.xml")
- if not os.path.exists(args.results):
- logging.critical("%s doesn't exist", args.results)
- sys.exit(1)
- ttfiles = [args.results]
- elif os.path.exists("/tmp/topotests/topotests.xml"):
- ttfiles.append("/tmp/topotests/topotests.xml")
- if not ttfiles:
- if os.path.exists("/tmp/topotests.xml"):
- ttfiles.append("/tmp/topotests.xml")
+ if os.path.exists(os.path.join(args.results, "topotests.xml")):
+ args.results = os.path.join(args.results, "topotests.xml")
+ if not os.path.exists(args.results):
+ logging.critical("%s doesn't exist", args.results)
+ sys.exit(1)
+
+ ttfiles = [args.results]
for f in ttfiles:
m = re.match(r"tt-group-(\d+)/topotests.xml", f)
@@ -203,6 +367,14 @@ def main():
with open(f) as xml_file:
results[group] = xmltodict.parse(xml_file.read())["testsuites"]["testsuite"]
+ search_re = re.compile(args.search) if args.search else None
+
+ if args.select is None:
+ if search_re or args.time:
+ args.select = "efsp"
+ else:
+ args.select = "fe"
+
filters = []
if "e" in args.select:
filters.append("error")
@@ -214,43 +386,44 @@ def main():
filters.append(None)
found_files = get_filtered(filters, results, args)
- if found_files:
- if args.test is not None:
- if args.test == "all":
- keys = found_files.keys()
- else:
- keys = [list(found_files.keys())[int(args.test)]]
- for key in keys:
- testcase = found_files[key]
- if args.errtext:
- if "error" in testcase:
- errmsg = testcase["error"]["#text"]
- elif "failure" in testcase:
- errmsg = testcase["failure"]["#text"]
- else:
- errmsg = "none found"
- s = "{}: {}".format(key, errmsg)
- elif args.time:
- text = testcase["@time"]
- s = "{}: {}".format(text, key)
- elif args.errmsg:
- if "error" in testcase:
- errmsg = testcase["error"]["@message"]
- elif "failure" in testcase:
- errmsg = testcase["failure"]["@message"]
- else:
- errmsg = "none found"
- s = "{}: {}".format(key, errmsg)
+
+ if search_re:
+ found_files = {
+ k: v for k, v in found_files.items() if search_testcase(v, search_re)
+ }
+
+ if args.enumerate:
+ # print the selected test names with ordinal
+ print("\n".join(["{} {}".format(i, x) for i, x in enumerate(found_files)]))
+ elif args.test is None and count == 0 and not args.time:
+ # print the selected test names
+ print("\n".join([str(x) for x in found_files]))
+ else:
+ rangestr = args.test if args.test else "all"
+ for key in dict_range_keys(found_files, rangestr):
+ testcase = found_files[key]
+ if args.time:
+ text = testcase["@time"]
+ s = "{}: {}".format(text, key)
+ elif args.errtext:
+ if "error" in testcase:
+ errmsg = testcase["error"]["#text"]
+ elif "failure" in testcase:
+ errmsg = testcase["failure"]["#text"]
else:
- s = dump_testcase(testcase)
- print(s)
- elif filters:
- if args.enumerate:
- print(
- "\n".join(["{} {}".format(i, x) for i, x in enumerate(found_files)])
- )
+ errmsg = "none found"
+ s = "{}: {}".format(key, errmsg)
+ elif args.errmsg:
+ if "error" in testcase:
+ errmsg = testcase["error"]["@message"]
+ elif "failure" in testcase:
+ errmsg = testcase["failure"]["@message"]
+ else:
+ errmsg = "none found"
+ s = "{}: {}".format(key, errmsg)
else:
- print("\n".join(found_files))
+ s = dump_testcase(testcase)
+ print(s)
if args.summary:
print_summary(results, args)
diff --git a/tests/topotests/babel_topo1/r1/babeld.conf b/tests/topotests/babel_topo1/r1/babeld.conf
index 372d2edff1..4058362cc3 100644
--- a/tests/topotests/babel_topo1/r1/babeld.conf
+++ b/tests/topotests/babel_topo1/r1/babeld.conf
@@ -1,4 +1,3 @@
-log file eigrpd.log
interface r1-eth0
babel hello-interval 1000
diff --git a/tests/topotests/babel_topo1/r2/babeld.conf b/tests/topotests/babel_topo1/r2/babeld.conf
index 8a36dda5f8..bae4e59e0b 100644
--- a/tests/topotests/babel_topo1/r2/babeld.conf
+++ b/tests/topotests/babel_topo1/r2/babeld.conf
@@ -1,4 +1,3 @@
-log file eigrpd.log
!
interface r2-eth0
babel hello-interval 1000
diff --git a/tests/topotests/babel_topo1/r3/babeld.conf b/tests/topotests/babel_topo1/r3/babeld.conf
index 1e9dc261f5..bfda3622dd 100644
--- a/tests/topotests/babel_topo1/r3/babeld.conf
+++ b/tests/topotests/babel_topo1/r3/babeld.conf
@@ -1,4 +1,3 @@
-log file eigrpd.log
!
interface r3-eth0
babel hello-interval 1000
diff --git a/tests/topotests/bgp_always_compare_med/bgp_always_compare_med_topo1.json b/tests/topotests/bgp_always_compare_med/bgp_always_compare_med_topo1.json
new file mode 100644
index 0000000000..4156c6d0f7
--- /dev/null
+++ b/tests/topotests/bgp_always_compare_med/bgp_always_compare_med_topo1.json
@@ -0,0 +1,152 @@
+{
+ "address_types": ["ipv4", "ipv6"],
+ "ipv4base": "192.168.0.0",
+ "ipv4mask": 30,
+ "ipv6base": "fd00::",
+ "ipv6mask": 64,
+ "link_ip_start":{"ipv4":"192.168.0.0", "v4mask":24, "ipv6":"fd00::", "v6mask":64},
+ "lo_prefix":{"ipv4":"1.0.", "v4mask":32, "ipv6":"2001:DB8:F::", "v6mask":128},
+ "routers": {
+ "r1": {
+ "links": {
+ "lo": {"ipv4": "auto", "ipv6": "auto", "type": "loopback"},
+ "r2": {"ipv4": "auto", "ipv6": "auto"},
+ "r3": {"ipv4": "auto", "ipv6": "auto"}
+ },
+ "bgp": {
+ "local_as": "100",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r2": {"dest_link": {"r1": {}}},
+ "r3": {"dest_link": {"r1": {}}}
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r2": {"dest_link": {"r1": {
+ "route_maps": [{
+ "name": "rmap_global",
+ "direction": "in"
+ }]
+ }}},
+ "r3": {"dest_link": {"r1": {
+ "route_maps": [{
+ "name": "rmap_global",
+ "direction": "in"
+ }]
+ }}}
+ }
+ }
+ }
+ }
+ },
+ "route_maps": {
+ "rmap_global": [{
+ "action": "permit",
+ "set": {
+ "ipv6": {
+ "nexthop": "prefer-global"
+ }
+ }
+ }]
+ },
+ "static_routes":[
+ {
+ "network":"192.168.20.1/32",
+ "next_hop":"Null0"
+ },
+ {
+ "network":"192:168:20::1/128",
+ "next_hop":"Null0"
+ }]
+ },
+ "r2": {
+ "links": {
+ "lo": {"ipv4": "auto", "ipv6": "auto", "type": "loopback"},
+ "r1": {"ipv4": "auto", "ipv6": "auto"},
+ "r4": {"ipv4": "auto", "ipv6": "auto"}
+ },
+ "bgp": {
+ "local_as": "200",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r1": {"dest_link": {"r2": {}}},
+ "r4": {"dest_link": {"r2": {}}}
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r1": {"dest_link": {"r2": {}}},
+ "r4": {"dest_link": {"r2": {}}}
+ }
+ }
+ }
+ }
+ }
+ },
+ "r3": {
+ "links": {
+ "lo": {"ipv4": "auto", "ipv6": "auto", "type": "loopback"},
+ "r1": {"ipv4": "auto", "ipv6": "auto"},
+ "r4": {"ipv4": "auto", "ipv6": "auto"}
+ },
+ "bgp": {
+ "local_as": "300",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r1": {"dest_link": {"r3": {}}},
+ "r4": {"dest_link": {"r3": {}}}
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r1": {"dest_link": {"r3": {}}},
+ "r4": {"dest_link": {"r3": {}}}
+ }
+ }
+ }
+ }
+ }
+ },
+ "r4": {
+ "links": {
+ "lo": {"ipv4": "auto", "ipv6": "auto", "type": "loopback"},
+ "r2": {"ipv4": "auto", "ipv6": "auto"},
+ "r3": {"ipv4": "auto", "ipv6": "auto"}
+ },
+ "bgp": {
+ "local_as": "400",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r2": {"dest_link": {"r4": {}}},
+ "r3": {"dest_link": {"r4": {}}}
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r2": {"dest_link": {"r4": {}}},
+ "r3": {"dest_link": {"r4": {}}}
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/tests/topotests/bgp_always_compare_med/test_bgp_always_compare_med_topo1.py b/tests/topotests/bgp_always_compare_med/test_bgp_always_compare_med_topo1.py
new file mode 100644
index 0000000000..fb72f4331d
--- /dev/null
+++ b/tests/topotests/bgp_always_compare_med/test_bgp_always_compare_med_topo1.py
@@ -0,0 +1,1118 @@
+#!/usr/bin/env python3
+#
+# SPDX-License-Identifier: ISC
+#
+# Copyright (c) 2023 by VMware, Inc. ("VMware")
+#
+#
+################################################################################
+# Following tests are performed to validate BGP always compare MED functionality
+################################################################################
+"""
+1. Verify the BGP always compare MED functionality in between eBGP Peers
+2. Verify the BGP always compare MED functionality in between eBGP Peers with by changing different AD values
+3. Verify the BGP always compare MED functionality in between eBGP Peers by changing MED values in middle routers
+4. Verify that BGP Always compare MED functionality by restarting BGP, Zebra and FRR services and clear BGP and
+ shutdown BGP neighbor
+5. Verify BGP always compare MED functionality by performing shut/noshut on the interfaces in between BGP neighbors
+"""
+
+import os
+import sys
+import time
+import pytest
+
+# Save the Current Working Directory to find configuration files.
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, "../"))
+sys.path.append(os.path.join(CWD, "../lib/"))
+
+# pylint: disable=C0413
+# Import topogen and topotest helpers
+from lib.topogen import Topogen, get_topogen
+
+from lib.common_config import (
+ start_topology,
+ write_test_header,
+ create_static_routes,
+ write_test_footer,
+ reset_config_on_routers,
+ verify_rib,
+ step,
+ check_address_types,
+ check_router_status,
+ create_static_routes,
+ create_prefix_lists,
+ create_route_maps,
+ kill_router_daemons,
+ shutdown_bringup_interface,
+ stop_router,
+ start_router,
+ delete_route_maps,
+)
+
+from lib.topolog import logger
+from lib.bgp import verify_bgp_convergence, verify_bgp_rib, create_router_bgp, clear_bgp
+from lib.topojson import build_config_from_json
+
+pytestmark = [pytest.mark.bgpd, pytest.mark.staticd]
+
+# Reading the data from JSON File for topology creation
+topo = None
+
+# Global variables
+ADDR_TYPES = check_address_types()
+NETWORK1_1 = {"ipv4": "192.168.20.1/32", "ipv6": "192:168:20::1/128"}
+NETWORK1_2 = {"ipv4": "192.168.30.1/32", "ipv6": "192:168:30::1/128"}
+NETWORK1_3 = {"ipv4": "192.168.40.1/32", "ipv6": "192:168:40::1/128"}
+NEXT_HOP_IP = {"ipv4": "Null0", "ipv6": "Null0"}
+
+
+def setup_module(mod):
+ """
+ Sets up the pytest environment
+
+ * `mod`: module name
+ """
+
+ testsuite_run_time = time.asctime(time.localtime(time.time()))
+ logger.info("Testsuite start time: {}".format(testsuite_run_time))
+ logger.info("=" * 40)
+
+ logger.info("Running setup_module to create topology")
+
+ # This function initiates the topology build with Topogen...
+ json_file = "{}/bgp_always_compare_med_topo1.json".format(CWD)
+ tgen = Topogen(json_file, mod.__name__)
+ global topo
+ topo = tgen.json_topo
+ # ... and here it calls Mininet initialization functions.
+
+ # Starting topology, create tmp files which are loaded to routers
+ # to start daemons and then start routers
+ start_topology(tgen)
+
+ # Creating configuration from JSON
+ build_config_from_json(tgen, topo)
+
+ global ADDR_TYPES
+ ADDR_TYPES = check_address_types()
+
+ BGP_CONVERGENCE = verify_bgp_convergence(tgen, topo)
+ assert BGP_CONVERGENCE is True, "setup_module : Failed \n Error: {}".format(
+ BGP_CONVERGENCE
+ )
+
+ logger.info("Running setup_module() done")
+
+
+def teardown_module():
+ """Teardown the pytest environment"""
+
+ logger.info("Running teardown_module to delete topology")
+
+ tgen = get_topogen()
+
+ # Stop toplogy and Remove tmp files
+ tgen.stop_topology()
+
+ logger.info(
+ "Testsuite end time: {}".format(time.asctime(time.localtime(time.time())))
+ )
+ logger.info("=" * 40)
+
+
+##########################################################################################################
+#
+# Local API
+#
+##########################################################################################################
+
+
+def initial_configuration(tgen, tc_name):
+ """
+ API to do initial set of configuration
+ """
+
+ step(
+ "Configure IPv4 and IPv6, eBGP neighbors between R1,R2 and R3 routers as per base config"
+ )
+
+ step("Configure static routes in R4")
+ for addr_type in ADDR_TYPES:
+ input_static_r4 = {
+ "r4": {
+ "static_routes": [
+ {
+ "network": NETWORK1_1[addr_type],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ }
+ ]
+ }
+ }
+
+ logger.info("Configure static routes")
+ result = create_static_routes(tgen, input_static_r4)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("Configure redistribute static in R4")
+ input_static_redist_r4 = {
+ "r4": {
+ "bgp": {
+ "address_family": {
+ addr_type: {
+ "unicast": {"redistribute": [{"redist_type": "static"}]}
+ }
+ }
+ }
+ }
+ }
+ result = create_router_bgp(tgen, topo, input_static_redist_r4)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ for addr_type in ADDR_TYPES:
+ # Create prefix list
+ input_dict_23 = {
+ "r2": {
+ "prefix_lists": {
+ addr_type: {
+ "pf_ls_r2_{}".format(addr_type): [
+ {"network": NETWORK1_1[addr_type], "action": "permit"}
+ ]
+ }
+ }
+ },
+ "r3": {
+ "prefix_lists": {
+ "ipv4": {
+ "pf_ls_r3_{}".format(addr_type): [
+ {"network": NETWORK1_1[addr_type], "action": "permit"}
+ ]
+ }
+ }
+ },
+ }
+ result = create_prefix_lists(tgen, input_dict_23)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ # Create route map
+ input_dict_23 = {
+ "r2": {
+ "route_maps": {
+ "RMAP_MED_R2": [
+ {
+ "action": "permit",
+ "match": {
+ addr_type: {
+ "prefix_lists": "pf_ls_r2_{}".format(addr_type)
+ }
+ },
+ "set": {"med": 300},
+ }
+ ]
+ }
+ },
+ "r3": {
+ "route_maps": {
+ "RMAP_MED_R3": [
+ {
+ "action": "permit",
+ "match": {
+ addr_type: {
+ "prefix_lists": "pf_ls_r3_{}".format(addr_type)
+ }
+ },
+ "set": {"med": 200},
+ }
+ ]
+ }
+ },
+ }
+ result = create_route_maps(tgen, input_dict_23)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ input_dict_r2_r3 = {
+ "r2": {
+ "bgp": {
+ "address_family": {
+ addr_type: {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r2": {
+ "route_maps": [
+ {
+ "name": "RMAP_MED_R2",
+ "direction": "out",
+ }
+ ]
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "r3": {
+ "bgp": {
+ "address_family": {
+ addr_type: {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r3": {
+ "route_maps": [
+ {
+ "name": "RMAP_MED_R3",
+ "direction": "out",
+ }
+ ]
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ }
+ result = create_router_bgp(tgen, topo, input_dict_r2_r3)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+
+##########################################################################################################
+#
+# Testcases
+#
+##########################################################################################################
+
+
+def test_verify_bgp_always_compare_med_functionality_bw_eBGP_peers_p0(request):
+ """
+ Verify the BGP always compare MED functionality in between eBGP Peers
+ """
+
+ # test case name
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ tgen = get_topogen()
+ if tgen.routers_have_failure():
+ check_router_status(tgen)
+ reset_config_on_routers(tgen)
+ initial_configuration(tgen, tc_name)
+
+ step(
+ "Configure IPv4 and IPv6, eBGP neighbors between R1,R2 and R3 routers as per base config"
+ )
+ step(
+ "Verify that IPv4 and IPv6 eBGP neighbors are configured in between routers by following "
+ "commands and verify that best path chosen by lowest MED value"
+ )
+
+ result = verify_bgp_convergence(tgen, topo)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ for addr_type in ADDR_TYPES:
+ input_static_r1 = {
+ "r1": {"static_routes": [{"network": NETWORK1_1[addr_type]}]}
+ }
+ result = verify_bgp_rib(tgen, addr_type, "r1", input_static_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib(tgen, addr_type, "r1", input_static_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("Configure 'multi-path as-path relax' command at R1.")
+ configure_bgp = {
+ "r1": {"bgp": {"local_as": "100", "bestpath": {"aspath": "multipath-relax"}}}
+ }
+ result = create_router_bgp(tgen, topo, configure_bgp)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ "Verify that after applying 'multi-path as-path relax' command, "
+ "its also chooses lowest MED to reach destination."
+ )
+ for addr_type in ADDR_TYPES:
+ input_static_r1 = {
+ "r1": {"static_routes": [{"network": NETWORK1_1[addr_type]}]}
+ }
+ nh1 = topo["routers"]["r2"]["links"]["r1"][addr_type].split("/")[0]
+ nh2 = topo["routers"]["r3"]["links"]["r1"][addr_type].split("/")[0]
+
+ result = verify_bgp_rib(tgen, addr_type, "r1", input_static_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib(tgen, addr_type, "r1", input_static_r1, next_hop=[nh1, nh2])
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("Configure 'bgp always-compare-med' command at R1.")
+ input_dict_r1 = {"r1": {"bgp": {"local_as": "100", "bgp_always_compare_med": True}}}
+ result = create_router_bgp(tgen, topo, input_dict_r1)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ "Verify that after applying 'bgp always-compare-med', its chooses lowest MED value path"
+ )
+ for addr_type in ADDR_TYPES:
+ input_static_r1 = {
+ "r1": {"static_routes": [{"network": NETWORK1_1[addr_type]}]}
+ }
+ nh = topo["routers"]["r3"]["links"]["r1"][addr_type].split("/")[0]
+
+ result = verify_bgp_rib(tgen, addr_type, "r1", input_static_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib(tgen, addr_type, "r1", input_static_r1, next_hop=nh)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("Remove 'bgp always-compare-med' command at R1.")
+ input_dict_r1 = {
+ "r1": {"bgp": {"local_as": "100", "bgp_always_compare_med": False}}
+ }
+ result = create_router_bgp(tgen, topo, input_dict_r1)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("Verify that 'bgp always-compare-med' command is removed")
+ for addr_type in ADDR_TYPES:
+ input_static_r1 = {
+ "r1": {"static_routes": [{"network": NETWORK1_1[addr_type]}]}
+ }
+ nh1 = topo["routers"]["r2"]["links"]["r1"][addr_type].split("/")[0]
+ nh2 = topo["routers"]["r3"]["links"]["r1"][addr_type].split("/")[0]
+
+ result = verify_bgp_rib(tgen, addr_type, "r1", input_static_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib(tgen, addr_type, "r1", input_static_r1, next_hop=[nh1, nh2])
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("Remove 'multi-path as-path relax' command at R1")
+ configure_bgp = {
+ "r1": {
+ "bgp": {
+ "local_as": "100",
+ "bestpath": {"aspath": "multipath-relax", "delete": True},
+ }
+ }
+ }
+ result = create_router_bgp(tgen, topo, configure_bgp)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("Verify route selection after removing 'multi-path as-path relax' command")
+ for addr_type in ADDR_TYPES:
+ input_static_r1 = {
+ "r1": {"static_routes": [{"network": NETWORK1_1[addr_type]}]}
+ }
+ result = verify_bgp_rib(tgen, addr_type, "r1", input_static_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib(tgen, addr_type, "r1", input_static_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ write_test_footer(tc_name)
+
+
+def test_verify_bgp_always_compare_med_functionality_bw_eBGP_peers_by_changing_AD_values_p0(
+ request,
+):
+ """
+ Verify the BGP always compare MED functionality in between eBGP Peers with by changing different AD values.
+ """
+
+ # test case name
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ tgen = get_topogen()
+ if tgen.routers_have_failure():
+ check_router_status(tgen)
+ reset_config_on_routers(tgen)
+ initial_configuration(tgen, tc_name)
+
+ step(
+ "Configure IPv4 and IPv6, eBGP neighbors between R1,R2 and R3 routers as per base config"
+ )
+ step(
+ "Verify that IPv4 and IPv6 eBGP neighbors are configured in between routers by following "
+ "commands and verify that best path chosen by lowest MED value"
+ )
+
+ result = verify_bgp_convergence(tgen, topo)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ for addr_type in ADDR_TYPES:
+ input_static_r1 = {
+ "r1": {"static_routes": [{"network": NETWORK1_1[addr_type]}]}
+ }
+ result = verify_bgp_rib(tgen, addr_type, "r1", input_static_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib(tgen, addr_type, "r1", input_static_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("Configure 'bgp always-compare-med' command at R1.")
+ input_dict_r1 = {"r1": {"bgp": {"local_as": "100", "bgp_always_compare_med": True}}}
+ result = create_router_bgp(tgen, topo, input_dict_r1)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ "Verify that after applying 'bgp always-compare-med', its chooses lowest MED value path"
+ )
+ for addr_type in ADDR_TYPES:
+ input_static_r1 = {
+ "r1": {"static_routes": [{"network": NETWORK1_1[addr_type]}]}
+ }
+ nh = topo["routers"]["r2"]["links"]["r1"][addr_type].split("/")[0]
+
+ result = verify_bgp_rib(tgen, addr_type, "r1", input_static_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib(tgen, addr_type, "r1", input_static_r1, next_hop=nh)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("Configure AD value=100 at R2 and AD value=200 at R3 towards R1")
+ input_dict_1 = {
+ "r2": {
+ "bgp": {
+ "local_as": 200,
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "distance": {"ebgp": 100, "ibgp": 100, "local": 100}
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "distance": {"ebgp": 100, "ibgp": 100, "local": 100}
+ }
+ },
+ },
+ }
+ },
+ "r3": {
+ "bgp": {
+ "local_as": 300,
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "distance": {"ebgp": 200, "ibgp": 200, "local": 200}
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "distance": {"ebgp": 200, "ibgp": 200, "local": 200}
+ }
+ },
+ },
+ }
+ },
+ }
+
+ result = create_router_bgp(tgen, topo, input_dict_1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ "Verify that inspite of AD values, always lowest MED value is getting "
+ "selected at destination router R1"
+ )
+ for addr_type in ADDR_TYPES:
+ input_static_r1 = {
+ "r1": {"static_routes": [{"network": NETWORK1_1[addr_type]}]}
+ }
+ nh = topo["routers"]["r2"]["links"]["r1"][addr_type].split("/")[0]
+
+ result = verify_bgp_rib(tgen, addr_type, "r1", input_static_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib(tgen, addr_type, "r1", input_static_r1, next_hop=nh)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ write_test_footer(tc_name)
+
+
+def test_verify_bgp_always_compare_med_functionality_bw_eBGP_peers_by_changing_MED_values_p1(
+ request,
+):
+ """
+ Verify the BGP always compare MED functionality in between eBGP Peers by changing MED values in middle routers
+ """
+
+ # test case name
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ tgen = get_topogen()
+ if tgen.routers_have_failure():
+ check_router_status(tgen)
+ reset_config_on_routers(tgen)
+ initial_configuration(tgen, tc_name)
+
+ step(
+ "Configure IPv4 and IPv6, eBGP neighbors between R1,R2 and R3 routers as per base config"
+ )
+ step(
+ "Verify that IPv4 and IPv6 eBGP neighbors are configured in between routers by following "
+ "commands and verify that best path chosen by lowest MED value"
+ )
+
+ result = verify_bgp_convergence(tgen, topo)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ for addr_type in ADDR_TYPES:
+ input_static_r1 = {
+ "r1": {"static_routes": [{"network": NETWORK1_1[addr_type]}]}
+ }
+ result = verify_bgp_rib(tgen, addr_type, "r1", input_static_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib(tgen, addr_type, "r1", input_static_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("Configure 'multi-path as-path relax' command at R1.")
+ configure_bgp = {
+ "r1": {"bgp": {"local_as": "100", "bestpath": {"aspath": "multipath-relax"}}}
+ }
+ result = create_router_bgp(tgen, topo, configure_bgp)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ "Verify that after applying 'multi-path as-path relax' command, "
+ "its also chooses lowest MED to reach destination."
+ )
+ for addr_type in ADDR_TYPES:
+ input_static_r1 = {
+ "r1": {"static_routes": [{"network": NETWORK1_1[addr_type]}]}
+ }
+ nh1 = topo["routers"]["r2"]["links"]["r1"][addr_type].split("/")[0]
+ nh2 = topo["routers"]["r3"]["links"]["r1"][addr_type].split("/")[0]
+
+ result = verify_bgp_rib(tgen, addr_type, "r1", input_static_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib(tgen, addr_type, "r1", input_static_r1, next_hop=[nh1, nh2])
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("Configure 'bgp always-compare-med' command at R1.")
+ input_dict_r1 = {"r1": {"bgp": {"local_as": "100", "bgp_always_compare_med": True}}}
+ result = create_router_bgp(tgen, topo, input_dict_r1)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ "Verify that after applying 'bgp always-compare-med', its chooses lowest MED value path"
+ )
+ for addr_type in ADDR_TYPES:
+ input_static_r1 = {
+ "r1": {"static_routes": [{"network": NETWORK1_1[addr_type]}]}
+ }
+ nh = topo["routers"]["r3"]["links"]["r1"][addr_type].split("/")[0]
+
+ result = verify_bgp_rib(tgen, addr_type, "r1", input_static_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib(tgen, addr_type, "r1", input_static_r1, next_hop=nh)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("Change the MED value 150 in R2 router.")
+ input_dict = {"r2": {"route_maps": ["RMAP_MED_R2"]}}
+ result = delete_route_maps(tgen, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ for addr_type in ADDR_TYPES:
+ input_dict_2 = {
+ "r2": {
+ "route_maps": {
+ "RMAP_MED_R2": [
+ {
+ "action": "permit",
+ "match": {
+ addr_type: {
+ "prefix_lists": "pf_ls_r2_{}".format(addr_type)
+ }
+ },
+ "set": {"med": 150},
+ }
+ ]
+ }
+ }
+ }
+ result = create_route_maps(tgen, input_dict_2)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("Verify that after changing MED, its chooses lowest MED value path")
+ for addr_type in ADDR_TYPES:
+ input_static_r1 = {
+ "r1": {"static_routes": [{"network": NETWORK1_1[addr_type]}]}
+ }
+ nh = topo["routers"]["r2"]["links"]["r1"][addr_type].split("/")[0]
+
+ result = verify_bgp_rib(tgen, addr_type, "r1", input_static_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib(tgen, addr_type, "r1", input_static_r1, next_hop=nh)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("Change the MED value 100 in R3 router.")
+ input_dict = {"r3": {"route_maps": ["RMAP_MED_R3"]}}
+ result = delete_route_maps(tgen, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ for addr_type in ADDR_TYPES:
+ input_dict_3 = {
+ "r3": {
+ "route_maps": {
+ "RMAP_MED_R3": [
+ {
+ "action": "permit",
+ "match": {
+ addr_type: {
+ "prefix_lists": "pf_ls_r3_{}".format(addr_type)
+ }
+ },
+ "set": {"med": 100},
+ }
+ ]
+ }
+ }
+ }
+ result = create_route_maps(tgen, input_dict_3)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("Verify that after changing MED, its chooses lowest MED value path")
+ for addr_type in ADDR_TYPES:
+ input_static_r1 = {
+ "r1": {"static_routes": [{"network": NETWORK1_1[addr_type]}]}
+ }
+ nh = topo["routers"]["r3"]["links"]["r1"][addr_type].split("/")[0]
+
+ result = verify_bgp_rib(tgen, addr_type, "r1", input_static_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib(tgen, addr_type, "r1", input_static_r1, next_hop=nh)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ write_test_footer(tc_name)
+
+
+def test_verify_bgp_always_compare_med_functionality_by_restarting_daemons_clear_bgp_shut_neighbors_p1(
+ request,
+):
+ """
+ Verify that BGP Always compare MED functionality by restarting BGP, Zebra and FRR services and clear BGP and shutdown BGP neighbor
+ """
+
+ # test case name
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ tgen = get_topogen()
+ if tgen.routers_have_failure():
+ check_router_status(tgen)
+ reset_config_on_routers(tgen)
+ initial_configuration(tgen, tc_name)
+
+ step(
+ "Configure IPv4 and IPv6, eBGP neighbors between R1,R2 and R3 routers as per base config"
+ )
+ step(
+ "Verify that IPv4 and IPv6 eBGP neighbors are configured in between routers by following "
+ "commands and verify that best path chosen by lowest MED value"
+ )
+
+ result = verify_bgp_convergence(tgen, topo)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ for addr_type in ADDR_TYPES:
+ input_static_r1 = {
+ "r1": {"static_routes": [{"network": NETWORK1_1[addr_type]}]}
+ }
+ result = verify_bgp_rib(tgen, addr_type, "r1", input_static_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib(tgen, addr_type, "r1", input_static_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("Configure 'multi-path as-path relax' command at R1.")
+ configure_bgp = {
+ "r1": {"bgp": {"local_as": "100", "bestpath": {"aspath": "multipath-relax"}}}
+ }
+ result = create_router_bgp(tgen, topo, configure_bgp)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ "Verify that after applying 'multi-path as-path relax' command, "
+ "its also chooses lowest MED to reach destination."
+ )
+ for addr_type in ADDR_TYPES:
+ input_static_r1 = {
+ "r1": {"static_routes": [{"network": NETWORK1_1[addr_type]}]}
+ }
+ nh1 = topo["routers"]["r2"]["links"]["r1"][addr_type].split("/")[0]
+ nh2 = topo["routers"]["r3"]["links"]["r1"][addr_type].split("/")[0]
+
+ result = verify_bgp_rib(tgen, addr_type, "r1", input_static_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib(tgen, addr_type, "r1", input_static_r1, next_hop=[nh1, nh2])
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("Configure 'bgp always-compare-med' command at R1.")
+ input_dict_r1 = {"r1": {"bgp": {"local_as": "100", "bgp_always_compare_med": True}}}
+ result = create_router_bgp(tgen, topo, input_dict_r1)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ "Verify that after applying 'bgp always-compare-med', its chooses lowest MED value path"
+ )
+ for addr_type in ADDR_TYPES:
+ input_static_r1 = {
+ "r1": {"static_routes": [{"network": NETWORK1_1[addr_type]}]}
+ }
+ nh = topo["routers"]["r3"]["links"]["r1"][addr_type].split("/")[0]
+
+ result = verify_bgp_rib(tgen, addr_type, "r1", input_static_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib(tgen, addr_type, "r1", input_static_r1, next_hop=nh)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("Restart the BGPd/Zebra/FRR service on R1")
+ for daemon in ["bgpd", "zebra", "frr"]:
+ if daemon == "frr":
+ stop_router(tgen, "r1")
+ start_router(tgen, "r1")
+ else:
+ kill_router_daemons(tgen, "r1", daemon)
+
+ step(
+ "Verify after restarting dameons and frr services, its chooses lowest MED value path"
+ )
+ for addr_type in ADDR_TYPES:
+ input_static_r1 = {
+ "r1": {"static_routes": [{"network": NETWORK1_1[addr_type]}]}
+ }
+ nh = topo["routers"]["r3"]["links"]["r1"][addr_type].split("/")[0]
+
+ result = verify_bgp_rib(tgen, addr_type, "r1", input_static_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib(tgen, addr_type, "r1", input_static_r1, next_hop=nh)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("Clear bgp on R1")
+ clear_bgp(tgen, None, "r1")
+
+ step("Verify after clearing BGP, its chooses lowest MED value path")
+ for addr_type in ADDR_TYPES:
+ input_static_r1 = {
+ "r1": {"static_routes": [{"network": NETWORK1_1[addr_type]}]}
+ }
+ nh = topo["routers"]["r3"]["links"]["r1"][addr_type].split("/")[0]
+
+ result = verify_bgp_rib(tgen, addr_type, "r1", input_static_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib(tgen, addr_type, "r1", input_static_r1, next_hop=nh)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("Perform BGP neighborship shut/no shut")
+ for action, keyword in zip([True, False], ["shut", "noshut"]):
+ for addr_type in ADDR_TYPES:
+ input_dict = {
+ "r1": {
+ "bgp": {
+ "address_family": {
+ addr_type: {
+ "unicast": {
+ "neighbor": {
+ "r3": {
+ "dest_link": {"r1": {"shutdown": action}}
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ result = create_router_bgp(tgen, topo, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("Verify after {} BGP, its chooses lowest MED value path".format(keyword))
+ if action:
+ for addr_type in ADDR_TYPES:
+ input_static_r1 = {
+ "r1": {"static_routes": [{"network": NETWORK1_1[addr_type]}]}
+ }
+ nh = topo["routers"]["r2"]["links"]["r1"][addr_type].split("/")[0]
+
+ result = verify_bgp_rib(tgen, addr_type, "r1", input_static_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib(tgen, addr_type, "r1", input_static_r1, next_hop=nh)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+ else:
+ for addr_type in ADDR_TYPES:
+ input_static_r1 = {
+ "r1": {"static_routes": [{"network": NETWORK1_1[addr_type]}]}
+ }
+ nh = topo["routers"]["r3"]["links"]["r1"][addr_type].split("/")[0]
+
+ result = verify_bgp_rib(tgen, addr_type, "r1", input_static_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib(tgen, addr_type, "r1", input_static_r1, next_hop=nh)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ write_test_footer(tc_name)
+
+
+def test_verify_bgp_always_compare_med_functionality_by_shut_noshut_interfaces_bw_bgp_neighbors_p1(
+ request,
+):
+ """
+ Verify BGP always compare MED functionality by performing shut/noshut on the interfaces in between BGP neighbors
+ """
+
+ # test case name
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ tgen = get_topogen()
+ if tgen.routers_have_failure():
+ check_router_status(tgen)
+ reset_config_on_routers(tgen)
+ initial_configuration(tgen, tc_name)
+
+ step(
+ "Configure IPv4 and IPv6, eBGP neighbors between R1,R2 and R3 routers as per base config"
+ )
+ step(
+ "Verify that IPv4 and IPv6 eBGP neighbors are configured in between routers by following "
+ "commands and verify that best path chosen by lowest MED value"
+ )
+
+ result = verify_bgp_convergence(tgen, topo)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ for addr_type in ADDR_TYPES:
+ input_static_r1 = {
+ "r1": {"static_routes": [{"network": NETWORK1_1[addr_type]}]}
+ }
+ result = verify_bgp_rib(tgen, addr_type, "r1", input_static_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib(tgen, addr_type, "r1", input_static_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("Configure 'multi-path as-path relax' command at R1.")
+ configure_bgp = {
+ "r1": {"bgp": {"local_as": "100", "bestpath": {"aspath": "multipath-relax"}}}
+ }
+ result = create_router_bgp(tgen, topo, configure_bgp)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ "Verify that after applying 'multi-path as-path relax' command, "
+ "its also chooses lowest MED to reach destination."
+ )
+ for addr_type in ADDR_TYPES:
+ input_static_r1 = {
+ "r1": {"static_routes": [{"network": NETWORK1_1[addr_type]}]}
+ }
+ nh1 = topo["routers"]["r2"]["links"]["r1"][addr_type].split("/")[0]
+ nh2 = topo["routers"]["r3"]["links"]["r1"][addr_type].split("/")[0]
+
+ result = verify_bgp_rib(tgen, addr_type, "r1", input_static_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib(tgen, addr_type, "r1", input_static_r1, next_hop=[nh1, nh2])
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("Configure 'bgp always-compare-med' command at R1.")
+ input_dict_r1 = {"r1": {"bgp": {"local_as": "100", "bgp_always_compare_med": True}}}
+ result = create_router_bgp(tgen, topo, input_dict_r1)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ "Verify that after applying 'bgp always-compare-med', its chooses lowest MED value path"
+ )
+ for addr_type in ADDR_TYPES:
+ input_static_r1 = {
+ "r1": {"static_routes": [{"network": NETWORK1_1[addr_type]}]}
+ }
+ nh = topo["routers"]["r3"]["links"]["r1"][addr_type].split("/")[0]
+
+ result = verify_bgp_rib(tgen, addr_type, "r1", input_static_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib(tgen, addr_type, "r1", input_static_r1, next_hop=nh)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ for action, keyword in zip([False, True], ["Shut", "No Shut"]):
+ step(
+ "{} the interface on the link between R3 & R4 and R2 & R4 routers".format(
+ keyword
+ )
+ )
+ intf2_4 = topo["routers"]["r2"]["links"]["r4"]["interface"]
+ intf3_4 = topo["routers"]["r3"]["links"]["r4"]["interface"]
+ for dut, intf in zip(["r2", "r3"], [intf2_4, intf3_4]):
+ shutdown_bringup_interface(tgen, dut, intf, action)
+
+ for addr_type in ADDR_TYPES:
+ input_static_r1 = {
+ "r1": {"static_routes": [{"network": NETWORK1_1[addr_type]}]}
+ }
+ nh = topo["routers"]["r3"]["links"]["r1"][addr_type].split("/")[0]
+
+ if action:
+ result = verify_bgp_rib(tgen, addr_type, "r1", input_static_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib(tgen, addr_type, "r1", input_static_r1, next_hop=nh)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ else:
+ result = verify_bgp_rib(
+ tgen, addr_type, "r1", input_static_r1, expected=False
+ )
+ assert (
+ result is not True
+ ), "Testcase {} :Failed \n Routes are still present in BGP table\n Error {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib(
+ tgen, addr_type, "r1", input_static_r1, next_hop=nh, expected=False
+ )
+ assert (
+ result is not True
+ ), "Testcase {} :Failed \n Routes are still present in FIB \n Error {}".format(
+ tc_name, result
+ )
+
+ write_test_footer(tc_name)
+
+
+if __name__ == "__main__":
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
diff --git a/tests/topotests/bgp_default_originate/test_bgp_default_originate_2links.py b/tests/topotests/bgp_default_originate/test_bgp_default_originate_2links.py
index 8058823baf..75e66566b7 100644
--- a/tests/topotests/bgp_default_originate/test_bgp_default_originate_2links.py
+++ b/tests/topotests/bgp_default_originate/test_bgp_default_originate_2links.py
@@ -26,6 +26,7 @@ from time import sleep
from lib.topogen import Topogen, get_topogen
from lib.topojson import build_config_from_json
from lib.topolog import logger
+from lib import topotest
from lib.bgp import (
verify_bgp_convergence,
@@ -1559,8 +1560,14 @@ def test_verify_default_originate_with_2way_ecmp_p2(request):
step("Ping R1 configure IPv4 and IPv6 loopback address from R2")
pingaddr = topo["routers"]["r1"]["links"]["lo"]["ipv4"].split("/")[0]
router = tgen.gears["r2"]
- output = router.run("ping -c 4 -w 4 {}".format(pingaddr))
- assert " 0% packet loss" in output, "Ping R1->R2 FAILED"
+
+ def ping_router():
+ output = router.run("ping -c 4 -w 4 {}".format(pingaddr))
+ logger.info(output)
+ if " 0% packet loss" not in output:
+ return False
+
+ _, res = topotest.run_and_expect(ping_router, None, count=10, wait=1)
logger.info("Ping from R1 to R2 ... success")
step("Shuting up the active route")
diff --git a/tests/topotests/bgp_default_originate_timer/__init__.py b/tests/topotests/bgp_default_originate_timer/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/tests/topotests/bgp_default_originate_timer/__init__.py
diff --git a/tests/topotests/bgp_default_originate_timer/r1/bgpd.conf b/tests/topotests/bgp_default_originate_timer/r1/bgpd.conf
new file mode 100644
index 0000000000..f2a1c9005a
--- /dev/null
+++ b/tests/topotests/bgp_default_originate_timer/r1/bgpd.conf
@@ -0,0 +1,18 @@
+router bgp 65001
+ no bgp ebgp-requires-policy
+ bgp default-originate timer 3600
+ neighbor 192.168.1.2 remote-as external
+ neighbor 192.168.1.2 timers 1 3
+ neighbor 192.168.1.2 timers connect 1
+ neighbor 192.168.2.2 remote-as external
+ neighbor 192.168.2.2 timers 1 3
+ neighbor 192.168.2.2 timers connect 1
+ address-family ipv4
+ neighbor 192.168.1.2 default-originate route-map default
+ exit-address-family
+!
+bgp community-list standard r3 seq 5 permit 65003:1
+!
+route-map default permit 10
+ match community r3
+exit
diff --git a/tests/topotests/bgp_default_originate_timer/r1/zebra.conf b/tests/topotests/bgp_default_originate_timer/r1/zebra.conf
new file mode 100644
index 0000000000..3692361fb3
--- /dev/null
+++ b/tests/topotests/bgp_default_originate_timer/r1/zebra.conf
@@ -0,0 +1,7 @@
+!
+interface r1-eth0
+ ip address 192.168.1.1/24
+!
+interface r1-eth1
+ ip address 192.168.2.1/24
+!
diff --git a/tests/topotests/bgp_default_originate_timer/r2/bgpd.conf b/tests/topotests/bgp_default_originate_timer/r2/bgpd.conf
new file mode 100644
index 0000000000..7ca65a94a1
--- /dev/null
+++ b/tests/topotests/bgp_default_originate_timer/r2/bgpd.conf
@@ -0,0 +1,6 @@
+router bgp 65002
+ no bgp ebgp-requires-policy
+ neighbor 192.168.1.1 remote-as external
+ neighbor 192.168.1.1 timers 1 3
+ neighbor 192.168.1.1 timers connect 1
+!
diff --git a/tests/topotests/bgp_default_originate_timer/r2/zebra.conf b/tests/topotests/bgp_default_originate_timer/r2/zebra.conf
new file mode 100644
index 0000000000..0c95656663
--- /dev/null
+++ b/tests/topotests/bgp_default_originate_timer/r2/zebra.conf
@@ -0,0 +1,4 @@
+!
+interface r2-eth0
+ ip address 192.168.1.2/24
+!
diff --git a/tests/topotests/bgp_default_originate_timer/r3/bgpd.conf b/tests/topotests/bgp_default_originate_timer/r3/bgpd.conf
new file mode 100644
index 0000000000..0a37913d73
--- /dev/null
+++ b/tests/topotests/bgp_default_originate_timer/r3/bgpd.conf
@@ -0,0 +1,12 @@
+router bgp 65003
+ no bgp ebgp-requires-policy
+ neighbor 192.168.2.1 remote-as external
+ neighbor 192.168.2.1 timers 1 3
+ neighbor 192.168.2.1 timers connect 1
+ address-family ipv4 unicast
+ redistribute connected route-map r1
+ exit-address-family
+!
+route-map r1 permit 10
+ set community 65003:1
+exit
diff --git a/tests/topotests/bgp_default_originate_timer/r3/zebra.conf b/tests/topotests/bgp_default_originate_timer/r3/zebra.conf
new file mode 100644
index 0000000000..20801f937e
--- /dev/null
+++ b/tests/topotests/bgp_default_originate_timer/r3/zebra.conf
@@ -0,0 +1,7 @@
+!
+interface lo
+ ip address 10.10.10.10/32
+!
+interface r3-eth0
+ ip address 192.168.2.2/24
+!
diff --git a/tests/topotests/bgp_default_originate_timer/test_bgp_default_originate_timer.py b/tests/topotests/bgp_default_originate_timer/test_bgp_default_originate_timer.py
new file mode 100644
index 0000000000..b2ba936fb1
--- /dev/null
+++ b/tests/topotests/bgp_default_originate_timer/test_bgp_default_originate_timer.py
@@ -0,0 +1,123 @@
+#!/usr/bin/env python
+# SPDX-License-Identifier: ISC
+
+#
+# Copyright (c) 2023 by
+# Donatas Abraitis <donatas@opensourcerouting.org>
+#
+
+"""
+Check if `bgp default-originate timer` commands takes an effect:
+1. Set bgp default-originate timer 3600
+2. No default route is advertised because the timer is running for 3600 seconds
+3. We reduce it to 10 seconds
+4. Default route is advertised
+"""
+
+import os
+import sys
+import json
+import pytest
+import functools
+
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, "../"))
+
+# pylint: disable=C0413
+from lib import topotest
+from lib.topogen import Topogen, TopoRouter, get_topogen
+from lib.common_config import step
+
+pytestmark = [pytest.mark.bgpd]
+
+
+def build_topo(tgen):
+ for routern in range(1, 4):
+ tgen.add_router("r{}".format(routern))
+
+ switch = tgen.add_switch("s1")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r2"])
+
+ switch = tgen.add_switch("s2")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r3"])
+
+
+def setup_module(mod):
+ tgen = Topogen(build_topo, mod.__name__)
+ tgen.start_topology()
+
+ router_list = tgen.routers()
+
+ for i, (rname, router) in enumerate(router_list.items(), 1):
+ router.load_config(
+ TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname))
+ )
+ router.load_config(
+ TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format(rname))
+ )
+
+ tgen.start_router()
+
+
+def teardown_module(mod):
+ tgen = get_topogen()
+ tgen.stop_topology()
+
+
+def test_bgp_default_originate_timer():
+ tgen = get_topogen()
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ r1 = tgen.gears["r1"]
+ r2 = tgen.gears["r2"]
+ r3 = tgen.gears["r3"]
+
+ def _bgp_default_received_from_r1():
+ output = json.loads(r2.vtysh_cmd("show bgp ipv4 unicast 0.0.0.0/0 json"))
+ expected = {
+ "paths": [
+ {
+ "nexthops": [
+ {
+ "hostname": "r1",
+ "ip": "192.168.1.1",
+ }
+ ],
+ }
+ ],
+ }
+ return topotest.json_cmp(output, expected)
+
+ test_func = functools.partial(_bgp_default_received_from_r1)
+ _, result = topotest.run_and_expect(test_func, not None, count=30, wait=1)
+ assert result is not None, "Seen default route received from r1, but should not"
+
+ step("Set BGP default-originate timer to 10 seconds")
+ r1.vtysh_cmd(
+ """
+ configure terminal
+ router bgp
+ bgp default-originate timer 10
+ """
+ )
+
+ step("Trigger BGP UPDATE from r3")
+ r3.vtysh_cmd(
+ """
+ configure terminal
+ route-map r1 permit 10
+ set metric 1
+ """
+ )
+
+ test_func = functools.partial(_bgp_default_received_from_r1)
+ _, result = topotest.run_and_expect(test_func, None, count=60, wait=1)
+ assert result is None, "Did not see default route received from r1, but should"
+
+
+if __name__ == "__main__":
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
diff --git a/tests/topotests/bgp_gr_functionality_topo3/bgp_gr_functionality_topo3.py b/tests/topotests/bgp_gr_functionality_topo3/test_bgp_gr_functionality_topo3.py
index 593a8d6417..593a8d6417 100644
--- a/tests/topotests/bgp_gr_functionality_topo3/bgp_gr_functionality_topo3.py
+++ b/tests/topotests/bgp_gr_functionality_topo3/test_bgp_gr_functionality_topo3.py
diff --git a/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/test_bgp_srv6l3vpn_to_bgp_vrf3.py b/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/test_bgp_srv6l3vpn_to_bgp_vrf3.py
index eb0f30f84a..7c2c7cfdaa 100644
--- a/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/test_bgp_srv6l3vpn_to_bgp_vrf3.py
+++ b/tests/topotests/bgp_srv6l3vpn_to_bgp_vrf3/test_bgp_srv6l3vpn_to_bgp_vrf3.py
@@ -98,7 +98,8 @@ def check_ping4(name, dest_addr, expect_connected):
tgen = get_topogen()
output = tgen.gears[name].run("ping {} -c 1 -w 1".format(dest_addr))
logger.info(output)
- assert match in output, "ping fail"
+ if match not in output:
+ return "ping fail"
match = ", {} packet loss".format("0%" if expect_connected else "100%")
logger.info("[+] check {} {} {}".format(name, dest_addr, match))
diff --git a/tests/topotests/conftest.py b/tests/topotests/conftest.py
index b78a2f1052..cb25d63a36 100755
--- a/tests/topotests/conftest.py
+++ b/tests/topotests/conftest.py
@@ -4,6 +4,7 @@ Topotest conftest.py file.
"""
# pylint: disable=consider-using-f-string
+import contextlib
import glob
import logging
import os
@@ -12,6 +13,7 @@ import resource
import subprocess
import sys
import time
+from pathlib import Path
import lib.fixtures
import pytest
@@ -41,6 +43,30 @@ except (AttributeError, ImportError):
pass
+# Remove this and use munet version when we move to pytest_asyncio
+@contextlib.contextmanager
+def chdir(ndir, desc=""):
+ odir = os.getcwd()
+ os.chdir(ndir)
+ if desc:
+ logging.debug("%s: chdir from %s to %s", desc, odir, ndir)
+ try:
+ yield
+ finally:
+ if desc:
+ logging.debug("%s: chdir back from %s to %s", desc, ndir, odir)
+ os.chdir(odir)
+
+
+@contextlib.contextmanager
+def log_handler(basename, logpath):
+ topolog.logstart(basename, logpath)
+ try:
+ yield
+ finally:
+ topolog.logfinish(basename, logpath)
+
+
def pytest_addoption(parser):
"""
Add topology-only option to the topology tester. This option makes pytest
@@ -272,6 +298,20 @@ def check_for_memleaks():
@pytest.fixture(autouse=True, scope="module")
+def module_autouse(request):
+ basename = get_test_logdir(request.node.nodeid, True)
+ logdir = Path(topotest.g_pytest_config.option.rundir) / basename
+ logpath = logdir / "exec.log"
+
+ subprocess.check_call("mkdir -p -m 1777 {}".format(logdir), shell=True)
+
+ with log_handler(basename, logpath):
+ sdir = os.path.dirname(os.path.realpath(request.fspath))
+ with chdir(sdir, "module autouse fixture"):
+ yield
+
+
+@pytest.fixture(autouse=True, scope="module")
def module_check_memtest(request):
yield
if request.config.option.valgrind_memleaks:
@@ -282,14 +322,19 @@ def module_check_memtest(request):
check_for_memleaks()
-def pytest_runtest_logstart(nodeid, location):
- # location is (filename, lineno, testname)
- topolog.logstart(nodeid, location, topotest.g_pytest_config.option.rundir)
-
-
-def pytest_runtest_logfinish(nodeid, location):
- # location is (filename, lineno, testname)
- topolog.logfinish(nodeid, location)
+#
+# Disable per test function logging as FRR CI system can't handle it.
+#
+# @pytest.fixture(autouse=True, scope="function")
+# def function_autouse(request):
+# # For tests we actually use the logdir name as the logfile base
+# logbase = get_test_logdir(nodeid=request.node.nodeid, module=False)
+# logbase = os.path.join(topotest.g_pytest_config.option.rundir, logbase)
+# logpath = Path(logbase)
+# path = Path(f"{logpath.parent}/exec-{logpath.name}.log")
+# subprocess.check_call("mkdir -p -m 1777 {}".format(logpath.parent), shell=True)
+# with log_handler(request.node.nodeid, path):
+# yield
@pytest.hookimpl(hookwrapper=True)
@@ -340,8 +385,10 @@ def pytest_configure(config):
os.environ["PYTEST_TOPOTEST_WORKER"] = ""
is_xdist = os.environ["PYTEST_XDIST_MODE"] != "no"
is_worker = False
+ wname = ""
else:
- os.environ["PYTEST_TOPOTEST_WORKER"] = os.environ["PYTEST_XDIST_WORKER"]
+ wname = os.environ["PYTEST_XDIST_WORKER"]
+ os.environ["PYTEST_TOPOTEST_WORKER"] = wname
is_xdist = True
is_worker = True
@@ -375,6 +422,16 @@ def pytest_configure(config):
if not config.getoption("--log-file") and not config.getini("log_file"):
config.option.log_file = os.path.join(rundir, "exec.log")
+ # Handle pytest-xdist each worker get's it's own top level log file
+ # `exec-worker-N.log`
+ if wname:
+ wname = wname.replace("gw", "worker-")
+ cpath = Path(config.option.log_file).absolute()
+ config.option.log_file = f"{cpath.parent}/{cpath.stem}-{wname}{cpath.suffix}"
+ elif is_xdist:
+ cpath = Path(config.option.log_file).absolute()
+ config.option.log_file = f"{cpath.parent}/{cpath.stem}-xdist{cpath.suffix}"
+
# Turn on live logging if user specified verbose and the config has a CLI level set
if config.getoption("--verbose") and not is_xdist and not config.getini("log_cli"):
if config.getoption("--log-cli-level", None) is None:
@@ -433,6 +490,10 @@ def pytest_configure(config):
@pytest.fixture(autouse=True, scope="session")
def setup_session_auto():
+ # Aligns logs nicely
+ logging.addLevelName(logging.WARNING, " WARN")
+ logging.addLevelName(logging.INFO, " INFO")
+
if "PYTEST_TOPOTEST_WORKER" not in os.environ:
is_worker = False
elif not os.environ["PYTEST_TOPOTEST_WORKER"]:
diff --git a/tests/topotests/isis_sr_flex_algo_topo1/test_isis_sr_flex_algo_topo1.py b/tests/topotests/isis_sr_flex_algo_topo1/test_isis_sr_flex_algo_topo1.py
index 85600beb0e..c81f63942b 100755
--- a/tests/topotests/isis_sr_flex_algo_topo1/test_isis_sr_flex_algo_topo1.py
+++ b/tests/topotests/isis_sr_flex_algo_topo1/test_isis_sr_flex_algo_topo1.py
@@ -38,6 +38,7 @@ import sys
import pytest
import json
import tempfile
+from copy import deepcopy
from functools import partial
# Save the Current Working Directory to find configuration files.
@@ -111,8 +112,12 @@ def setup_module(mod):
# For all registered routers, load the zebra configuration file
for rname, router in router_list.items():
- router.load_config( TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname)))
- router.load_config( TopoRouter.RD_ISIS, os.path.join(CWD, "{}/isisd.conf".format(rname)))
+ router.load_config(
+ TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname))
+ )
+ router.load_config(
+ TopoRouter.RD_ISIS, os.path.join(CWD, "{}/isisd.conf".format(rname))
+ )
tgen.start_router()
@@ -130,6 +135,30 @@ def setup_testcase(msg):
return tgen
+def router_json_cmp_exact_filter(router, cmd, expected):
+ output = router.vtysh_cmd(cmd)
+ logger.info("{}: {}\n{}".format(router.name, cmd, output))
+
+ json_output = json.loads(output)
+ router_output = deepcopy(json_output)
+
+ # filter out dynamic data from "show mpls table"
+ for label, data in json_output.items():
+ if "1500" in label:
+ # filter out SR local labels
+ router_output.pop(label)
+ continue
+ nexthops = data.get("nexthops", [])
+ for i in range(len(nexthops)):
+ if "fe80::" in nexthops[i].get("nexthop"):
+ router_output.get(label).get("nexthops")[i].pop("nexthop")
+ elif "." in nexthops[i].get("nexthop"):
+ # IPv4, just checking the nexthop
+ router_output.get(label).get("nexthops")[i].pop("interface")
+
+ return topotest.json_cmp(router_output, expected, exact=True)
+
+
def router_compare_json_output(rname, command, reference):
"Compare router JSON output"
@@ -139,7 +168,9 @@ def router_compare_json_output(rname, command, reference):
expected = json.loads(reference)
# Run test function until we get an result. Wait at most 60 seconds.
- test_func = partial(topotest.router_json_cmp, tgen.gears[rname], command, expected)
+ test_func = partial(
+ router_json_cmp_exact_filter, tgen.gears[rname], command, expected
+ )
_, diff = topotest.run_and_expect(test_func, None, count=120, wait=0.5)
assertmsg = '"{}" JSON output mismatches the expected result'.format(rname)
assert diff is None, assertmsg
@@ -153,9 +184,13 @@ def router_compare_output(rname, command, reference):
tgen = get_topogen()
# Run test function until we get an result. Wait at most 60 seconds.
- test_func = partial(topotest.router_output_cmp, tgen.gears[rname], command, reference)
+ test_func = partial(
+ topotest.router_output_cmp, tgen.gears[rname], command, reference
+ )
result, diff = topotest.run_and_expect(test_func, "", count=120, wait=0.5)
- assertmsg = '{} command "{}" output mismatches the expected result:\n{}'.format(rname, command, diff)
+ assertmsg = '{} command "{}" output mismatches the expected result:\n{}'.format(
+ rname, command, diff
+ )
assert result, assertmsg
@@ -176,11 +211,11 @@ def test_step1_mpls_lfib():
# tgen.mininet_cli()
for rname in ["rt1", "rt2", "rt3"]:
router_compare_output(
- rname, "show isis flex-algo",
- outputs[rname][1]["show_isis_flex_algo.ref"])
+ rname, "show isis flex-algo", outputs[rname][1]["show_isis_flex_algo.ref"]
+ )
router_compare_json_output(
- rname, "show mpls table json",
- outputs[rname][1]["show_mpls_table.ref"])
+ rname, "show mpls table json", outputs[rname][1]["show_mpls_table.ref"]
+ )
#
@@ -207,17 +242,18 @@ def test_step2_mpls_lfib():
router isis 1
flex-algo 203
no advertise-definition
- """)
+ """
+ )
# For Developers
# tgen.mininet_cli()
for rname in ["rt1", "rt2", "rt3"]:
router_compare_output(
- rname, "show isis flex-algo",
- outputs[rname][2]["show_isis_flex_algo.ref"])
+ rname, "show isis flex-algo", outputs[rname][2]["show_isis_flex_algo.ref"]
+ )
router_compare_json_output(
- rname, "show mpls table json",
- outputs[rname][2]["show_mpls_table.ref"])
+ rname, "show mpls table json", outputs[rname][2]["show_mpls_table.ref"]
+ )
#
@@ -244,17 +280,18 @@ def test_step3_mpls_lfib():
router isis 1
flex-algo 203
no advertise-definition
- """)
+ """
+ )
# For Developers
# tgen.mininet_cli()
for rname in ["rt1", "rt2", "rt3"]:
router_compare_output(
- rname, "show isis flex-algo",
- outputs[rname][3]["show_isis_flex_algo.ref"])
+ rname, "show isis flex-algo", outputs[rname][3]["show_isis_flex_algo.ref"]
+ )
router_compare_json_output(
- rname, "show mpls table json",
- outputs[rname][3]["show_mpls_table.ref"])
+ rname, "show mpls table json", outputs[rname][3]["show_mpls_table.ref"]
+ )
#
@@ -281,17 +318,18 @@ def test_step4_mpls_lfib():
router isis 1
flex-algo 203
advertise-definition
- """)
+ """
+ )
# For Developers
# tgen.mininet_cli()
for rname in ["rt1", "rt2", "rt3"]:
router_compare_output(
- rname, "show isis flex-algo",
- outputs[rname][4]["show_isis_flex_algo.ref"])
+ rname, "show isis flex-algo", outputs[rname][4]["show_isis_flex_algo.ref"]
+ )
router_compare_json_output(
- rname, "show mpls table json",
- outputs[rname][4]["show_mpls_table.ref"])
+ rname, "show mpls table json", outputs[rname][4]["show_mpls_table.ref"]
+ )
#
@@ -319,17 +357,18 @@ def test_step5_mpls_lfib():
router isis 1
flex-algo 203
advertise-definition
- """)
+ """
+ )
# For Developers
# tgen.mininet_cli()
for rname in ["rt1", "rt2", "rt3"]:
router_compare_output(
- rname, "show isis flex-algo",
- outputs[rname][5]["show_isis_flex_algo.ref"])
+ rname, "show isis flex-algo", outputs[rname][5]["show_isis_flex_algo.ref"]
+ )
router_compare_json_output(
- rname, "show mpls table json",
- outputs[rname][5]["show_mpls_table.ref"])
+ rname, "show mpls table json", outputs[rname][5]["show_mpls_table.ref"]
+ )
#
@@ -360,17 +399,18 @@ def test_step6_mpls_lfib():
router isis 1
flex-algo 203
no dataplane sr-mpls
- """)
+ """
+ )
# For Developers
# tgen.mininet_cli()
for rname in ["rt1", "rt2", "rt3"]:
router_compare_output(
- rname, "show isis flex-algo",
- outputs[rname][6]["show_isis_flex_algo.ref"])
+ rname, "show isis flex-algo", outputs[rname][6]["show_isis_flex_algo.ref"]
+ )
router_compare_json_output(
- rname, "show mpls table json",
- outputs[rname][6]["show_mpls_table.ref"])
+ rname, "show mpls table json", outputs[rname][6]["show_mpls_table.ref"]
+ )
#
@@ -400,17 +440,19 @@ def test_step7_mpls_lfib():
configure terminal
router isis 1
no flex-algo 203
- """)
+ """
+ )
# For Developers
# tgen.mininet_cli()
for rname in ["rt1", "rt2", "rt3"]:
router_compare_output(
- rname, "show isis flex-algo",
- outputs[rname][7]["show_isis_flex_algo.ref"])
+ rname, "show isis flex-algo", outputs[rname][7]["show_isis_flex_algo.ref"]
+ )
router_compare_json_output(
- rname, "show mpls table json",
- outputs[rname][7]["show_mpls_table.ref"])
+ rname, "show mpls table json", outputs[rname][7]["show_mpls_table.ref"]
+ )
+
#
# Step 8
@@ -440,7 +482,8 @@ def test_step8_mpls_lfib():
advertise-definition
affinity exclude-any green
dataplane sr-mpls
- """)
+ """
+ )
tgen.gears["rt2"].vtysh_cmd(
"""
@@ -450,7 +493,8 @@ def test_step8_mpls_lfib():
advertise-definition
affinity exclude-any green
dataplane sr-mpls
- """)
+ """
+ )
tgen.gears["rt3"].vtysh_cmd(
"""
@@ -458,17 +502,18 @@ def test_step8_mpls_lfib():
router isis 1
flex-algo 203
dataplane sr-mpls
- """)
+ """
+ )
# For Developers
# tgen.mininet_cli()
for rname in ["rt1", "rt2", "rt3"]:
router_compare_output(
- rname, "show isis flex-algo",
- outputs[rname][8]["show_isis_flex_algo.ref"])
+ rname, "show isis flex-algo", outputs[rname][8]["show_isis_flex_algo.ref"]
+ )
router_compare_json_output(
- rname, "show mpls table json",
- outputs[rname][8]["show_mpls_table.ref"])
+ rname, "show mpls table json", outputs[rname][8]["show_mpls_table.ref"]
+ )
#
@@ -494,17 +539,18 @@ def test_step9_mpls_lfib():
router isis 1
no segment-routing prefix 1.1.1.1/32 algorithm 203 index 301
no segment-routing prefix 2001:db8:1000::1/128 algorithm 203 index 1301
- """)
+ """
+ )
# For Developers
# tgen.mininet_cli()
for rname in ["rt1", "rt2", "rt3"]:
router_compare_output(
- rname, "show isis flex-algo",
- outputs[rname][9]["show_isis_flex_algo.ref"])
+ rname, "show isis flex-algo", outputs[rname][9]["show_isis_flex_algo.ref"]
+ )
router_compare_json_output(
- rname, "show mpls table json",
- outputs[rname][9]["show_mpls_table.ref"])
+ rname, "show mpls table json", outputs[rname][9]["show_mpls_table.ref"]
+ )
#
@@ -530,17 +576,18 @@ def test_step10_mpls_lfib():
router isis 1
segment-routing prefix 1.1.1.1/32 algorithm 203 index 301
segment-routing prefix 2001:db8:1000::1/128 algorithm 203 index 1301
- """)
+ """
+ )
# For Developers
# tgen.mininet_cli()
for rname in ["rt1", "rt2", "rt3"]:
router_compare_output(
- rname, "show isis flex-algo",
- outputs[rname][10]["show_isis_flex_algo.ref"])
+ rname, "show isis flex-algo", outputs[rname][10]["show_isis_flex_algo.ref"]
+ )
router_compare_json_output(
- rname, "show mpls table json",
- outputs[rname][10]["show_mpls_table.ref"])
+ rname, "show mpls table json", outputs[rname][10]["show_mpls_table.ref"]
+ )
#
@@ -565,17 +612,18 @@ def test_step11_mpls_lfib():
router isis 1
segment-routing prefix 1.1.1.1/32 algorithm 203 index 311
segment-routing prefix 2001:db8:1000::1/128 algorithm 203 index 1311
- """)
+ """
+ )
# For Developers
# tgen.mininet_cli()
for rname in ["rt1", "rt2", "rt3"]:
router_compare_output(
- rname, "show isis flex-algo",
- outputs[rname][11]["show_isis_flex_algo.ref"])
+ rname, "show isis flex-algo", outputs[rname][11]["show_isis_flex_algo.ref"]
+ )
router_compare_json_output(
- rname, "show mpls table json",
- outputs[rname][11]["show_mpls_table.ref"])
+ rname, "show mpls table json", outputs[rname][11]["show_mpls_table.ref"]
+ )
if __name__ == "__main__":
diff --git a/tests/topotests/lib/bgp.py b/tests/topotests/lib/bgp.py
index 0bd9408c28..21d4567d6b 100644
--- a/tests/topotests/lib/bgp.py
+++ b/tests/topotests/lib/bgp.py
@@ -50,6 +50,7 @@ def create_router_bgp(tgen, topo=None, input_dict=None, build=False, load_config
"bgp": {
"local_as": "200",
"router_id": "22.22.22.22",
+ "bgp_always_compare_med": True,
"graceful-restart": {
"graceful-restart": True,
"preserve-fw-state": True,
@@ -343,6 +344,13 @@ def __create_bgp_global(tgen, input_dict, router, build=False):
config_data.append(cmd)
+ if "bgp_always_compare_med" in bgp_data:
+ bgp_always_compare_med = bgp_data["bgp_always_compare_med"]
+ if bgp_always_compare_med == True:
+ config_data.append("bgp always-compare-med")
+ elif bgp_always_compare_med == False:
+ config_data.append("no bgp always-compare-med")
+
logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
return config_data
diff --git a/tests/topotests/lib/common_config.py b/tests/topotests/lib/common_config.py
index a85b86668c..5d37b062ac 100644
--- a/tests/topotests/lib/common_config.py
+++ b/tests/topotests/lib/common_config.py
@@ -33,6 +33,7 @@ from lib.topogen import TopoRouter, get_topogen
from lib.topolog import get_logger, logger
from lib.topotest import frr_unicode, interface_set_status, version_cmp
from lib import topotest
+from munet.testing.util import pause_test
FRRCFG_FILE = "frr_json.conf"
FRRCFG_BKUP_FILE = "frr_json_initial.conf"
@@ -2069,6 +2070,8 @@ def step(msg, reset=False):
* ` msg` : Step message body.
* `reset` : Reset step count to 1 when set to True.
"""
+ if bool(topotest.g_pytest_config.get_option("--pause")):
+ pause_test("before :" + msg)
_step = Stepper()
_step(msg, reset)
diff --git a/tests/topotests/lib/mcast-tester.py b/tests/topotests/lib/mcast-tester.py
index 8a8251010c..5efbecd5e5 100755
--- a/tests/topotests/lib/mcast-tester.py
+++ b/tests/topotests/lib/mcast-tester.py
@@ -11,6 +11,7 @@ for the multicast group we subscribed to.
import argparse
import json
+import ipaddress
import os
import socket
import struct
@@ -35,13 +36,16 @@ def interface_name_to_index(name):
def multicast_join(sock, ifindex, group, port):
"Joins a multicast group."
- mreq = struct.pack(
- "=4sLL", socket.inet_aton(args.group), socket.INADDR_ANY, ifindex
- )
-
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
- sock.bind((group, port))
- sock.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)
+
+ if ip_version == 4:
+ mreq = group.packed + struct.pack("@II", socket.INADDR_ANY, ifindex)
+ opt = socket.IP_ADD_MEMBERSHIP
+ else:
+ mreq = group.packed + struct.pack("@I", ifindex)
+ opt = socket.IPV6_JOIN_GROUP
+ sock.bind((str(group), port))
+ sock.setsockopt(ip_proto, opt, mreq)
#
@@ -50,15 +54,14 @@ def multicast_join(sock, ifindex, group, port):
parser = argparse.ArgumentParser(description="Multicast RX utility")
parser.add_argument("group", help="Multicast IP")
parser.add_argument("interface", help="Interface name")
+parser.add_argument("--port", type=int, default=1000, help="port to send to")
+parser.add_argument("--ttl", type=int, default=16, help="TTL/hops for sending packets")
parser.add_argument("--socket", help="Point to topotest UNIX socket")
parser.add_argument(
"--send", help="Transmit instead of join with interval", type=float, default=0
)
args = parser.parse_args()
-ttl = 16
-port = 1000
-
# Get interface index/validate.
ifindex = interface_name_to_index(args.interface)
if ifindex is None:
@@ -85,7 +88,12 @@ else:
# Set topotest socket non blocking so we can multiplex the main loop.
toposock.setblocking(False)
-msock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
+args.group = ipaddress.ip_address(args.group)
+ip_version = args.group.version
+ip_family = socket.AF_INET if ip_version == 4 else socket.AF_INET6
+ip_proto = socket.IPPROTO_IP if ip_version == 4 else socket.IPPROTO_IPV6
+
+msock = socket.socket(ip_family, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
if args.send > 0:
# Prepare multicast bit in that interface.
msock.setsockopt(
@@ -93,12 +101,18 @@ if args.send > 0:
25,
struct.pack("%ds" % len(args.interface), args.interface.encode("utf-8")),
)
- # Set packets TTL.
- msock.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, struct.pack("b", ttl))
+
+ # Set packets TTL/hops.
+ ttlopt = socket.IP_MULTICAST_TTL if ip_version == 4 else socket.IPV6_MULTICAST_HOPS
+ if ip_version == 4:
+ msock.setsockopt(ip_proto, ttlopt, struct.pack("B", args.ttl))
+ else:
+ msock.setsockopt(ip_proto, ttlopt, struct.pack("I", args.ttl))
+
# Block to ensure packet send.
msock.setblocking(True)
else:
- multicast_join(msock, ifindex, args.group, port)
+ multicast_join(msock, ifindex, args.group, args.port)
def should_exit():
@@ -120,7 +134,7 @@ def should_exit():
counter = 0
while not should_exit():
if args.send > 0:
- msock.sendto(b"test %d" % counter, (args.group, port))
+ msock.sendto(b"test %d" % counter, (str(args.group), args.port))
counter += 1
time.sleep(args.send)
diff --git a/tests/topotests/lib/micronet_compat.py b/tests/topotests/lib/micronet_compat.py
index d648a120ab..b348c85988 100644
--- a/tests/topotests/lib/micronet_compat.py
+++ b/tests/topotests/lib/micronet_compat.py
@@ -121,7 +121,7 @@ class Mininet(BaseMunet):
g_mnet_inst = None
- def __init__(self, rundir=None, pytestconfig=None):
+ def __init__(self, rundir=None, pytestconfig=None, logger=None):
"""
Create a Micronet.
"""
@@ -140,7 +140,7 @@ class Mininet(BaseMunet):
# os.umask(0)
super(Mininet, self).__init__(
- pid=False, rundir=rundir, pytestconfig=pytestconfig
+ pid=False, rundir=rundir, pytestconfig=pytestconfig, logger=logger
)
# From munet/munet/native.py
diff --git a/tests/topotests/lib/pim.py b/tests/topotests/lib/pim.py
index e26bdb3af3..f69718a5bd 100644
--- a/tests/topotests/lib/pim.py
+++ b/tests/topotests/lib/pim.py
@@ -1,35 +1,35 @@
+# -*- coding: utf-8 eval: (blacken-mode 1) -*-
# SPDX-License-Identifier: ISC
# Copyright (c) 2019 by VMware, Inc. ("VMware")
# Used Copyright (c) 2018 by Network Device Education Foundation, Inc.
# ("NetDEF") in this file.
import datetime
+import functools
import os
import re
import sys
import traceback
-import functools
from copy import deepcopy
from time import sleep
-from lib import topotest
-
# Import common_config to use commomnly used APIs
from lib.common_config import (
- create_common_configurations,
HostApplicationHelper,
InvalidCLIError,
create_common_configuration,
- InvalidCLIError,
+ create_common_configurations,
+ get_frr_ipv6_linklocal,
retry,
run_frr_cmd,
validate_ip_address,
- get_frr_ipv6_linklocal,
)
from lib.micronet import get_exec_path
from lib.topolog import logger
from lib.topotest import frr_unicode
+from lib import topotest
+
####
CWD = os.path.dirname(os.path.realpath(__file__))
diff --git a/tests/topotests/lib/topogen.py b/tests/topotests/lib/topogen.py
index 2d6138990e..6ddd223e25 100644
--- a/tests/topotests/lib/topogen.py
+++ b/tests/topotests/lib/topogen.py
@@ -84,7 +84,7 @@ def get_exabgp_cmd(commander=None):
"""Return the command to use for ExaBGP version < 4."""
if commander is None:
- commander = Commander("topogen")
+ commander = Commander("exabgp", logger=logging.getLogger("exabgp"))
def exacmd_version_ok(exacmd):
logger.debug("checking %s for exabgp < version 4", exacmd)
@@ -107,7 +107,7 @@ def get_exabgp_cmd(commander=None):
exacmd = py2_path + " -m exabgp"
if exacmd_version_ok(exacmd):
return exacmd
- py2_path = commander.get_exec_path("python")
+ py2_path = commander.get_exec_path("python")
if py2_path:
exacmd = py2_path + " -m exabgp"
if exacmd_version_ok(exacmd):
@@ -209,7 +209,11 @@ class Topogen(object):
# Mininet(Micronet) to build the actual topology.
assert not inspect.isclass(topodef)
- self.net = Mininet(rundir=self.logdir, pytestconfig=topotest.g_pytest_config)
+ self.net = Mininet(
+ rundir=self.logdir,
+ pytestconfig=topotest.g_pytest_config,
+ logger=topolog.get_logger("mu", log_level="debug"),
+ )
# Adjust the parent namespace
topotest.fix_netns_limits(self.net)
@@ -798,23 +802,23 @@ class TopoRouter(TopoGear):
Start the daemons in the list
If daemons is None, try to infer daemons from the config file
"""
- self.load_config(self.RD_FRR, source)
+ source_path = self.load_config(self.RD_FRR, source)
if not daemons:
# Always add zebra
- self.load_config(self.RD_ZEBRA)
+ self.load_config(self.RD_ZEBRA, "")
for daemon in self.RD:
# This will not work for all daemons
daemonstr = self.RD.get(daemon).rstrip("d")
if daemonstr == "pim":
- grep_cmd = "grep 'ip {}' {}".format(daemonstr, source)
+ grep_cmd = "grep 'ip {}' {}".format(daemonstr, source_path)
else:
- grep_cmd = "grep 'router {}' {}".format(daemonstr, source)
+ grep_cmd = "grep 'router {}' {}".format(daemonstr, source_path)
result = self.run(grep_cmd, warn=False).strip()
if result:
- self.load_config(daemon)
+ self.load_config(daemon, "")
else:
for daemon in daemons:
- self.load_config(daemon)
+ self.load_config(daemon, "")
def load_config(self, daemon, source=None, param=None):
"""Loads daemon configuration from the specified source
@@ -833,7 +837,7 @@ class TopoRouter(TopoGear):
"""
daemonstr = self.RD.get(daemon)
self.logger.debug('loading "{}" configuration: {}'.format(daemonstr, source))
- self.net.loadConf(daemonstr, source, param)
+ return self.net.loadConf(daemonstr, source, param)
def check_router_running(self):
"""
@@ -1090,8 +1094,9 @@ class TopoSwitch(TopoGear):
# pylint: disable=too-few-public-methods
def __init__(self, tgen, name, **params):
+ logger = topolog.get_logger(name, log_level="debug")
super(TopoSwitch, self).__init__(tgen, name, **params)
- tgen.net.add_switch(name)
+ tgen.net.add_switch(name, logger=logger)
def __str__(self):
gear = super(TopoSwitch, self).__str__()
diff --git a/tests/topotests/lib/topolog.py b/tests/topotests/lib/topolog.py
index b501670789..aceb2cb031 100644
--- a/tests/topotests/lib/topolog.py
+++ b/tests/topotests/lib/topolog.py
@@ -15,13 +15,6 @@ This file defines our logging abstraction.
import logging
import os
-import subprocess
-import sys
-
-if sys.version_info[0] > 2:
- pass
-else:
- pass
try:
from xdist import is_xdist_controller
@@ -31,8 +24,6 @@ except ImportError:
return False
-BASENAME = "topolog"
-
# Helper dictionary to convert Topogen logging levels to Python's logging.
DEBUG_TOPO2LOGGING = {
"debug": logging.DEBUG,
@@ -42,13 +33,43 @@ DEBUG_TOPO2LOGGING = {
"error": logging.ERROR,
"critical": logging.CRITICAL,
}
-FORMAT = "%(asctime)s.%(msecs)03d %(levelname)s: %(name)s: %(message)s"
+FORMAT = "%(asctime)s %(levelname)s: %(name)s: %(message)s"
handlers = {}
-logger = logging.getLogger("topolog")
+logger = logging.getLogger("topo")
+
+
+# Remove this and use munet version when we move to pytest_asyncio
+def get_test_logdir(nodeid=None, module=False):
+ """Get log directory relative pathname."""
+ xdist_worker = os.getenv("PYTEST_XDIST_WORKER", "")
+ mode = os.getenv("PYTEST_XDIST_MODE", "no")
+
+ # nodeid: all_protocol_startup/test_all_protocol_startup.py::test_router_running
+ # may be missing "::testname" if module is True
+ if not nodeid:
+ nodeid = os.environ["PYTEST_CURRENT_TEST"].split(" ")[0]
+
+ cur_test = nodeid.replace("[", "_").replace("]", "_")
+ if module:
+ idx = cur_test.rfind("::")
+ path = cur_test if idx == -1 else cur_test[:idx]
+ testname = ""
+ else:
+ path, testname = cur_test.split("::")
+ testname = testname.replace("/", ".")
+ path = path[:-3].replace("/", ".")
+ # We use different logdir paths based on how xdist is running.
+ if mode == "each":
+ if module:
+ return os.path.join(path, "worker-logs", xdist_worker)
+ return os.path.join(path, testname, xdist_worker)
+ assert mode in ("no", "load", "loadfile", "loadscope"), f"Unknown dist mode {mode}"
+ return path if module else os.path.join(path, testname)
-def set_handler(l, target=None):
+
+def set_handler(lg, target=None):
if target is None:
h = logging.NullHandler()
else:
@@ -59,106 +80,81 @@ def set_handler(l, target=None):
h.setFormatter(logging.Formatter(fmt=FORMAT))
# Don't filter anything at the handler level
h.setLevel(logging.DEBUG)
- l.addHandler(h)
+ lg.addHandler(h)
return h
-def set_log_level(l, level):
+def set_log_level(lg, level):
"Set the logging level."
# Messages sent to this logger only are created if this level or above.
log_level = DEBUG_TOPO2LOGGING.get(level, level)
- l.setLevel(log_level)
+ lg.setLevel(log_level)
-def get_logger(name, log_level=None, target=None):
- l = logging.getLogger("{}.{}".format(BASENAME, name))
+def reset_logger(lg):
+ while lg.handlers:
+ x = lg.handlers.pop()
+ x.close()
+ lg.removeHandler(x)
- if log_level is not None:
- set_log_level(l, log_level)
- if target is not None:
- set_handler(l, target)
-
- return l
-
-
-# nodeid: all_protocol_startup/test_all_protocol_startup.py::test_router_running
-
-
-def get_test_logdir(nodeid=None):
- """Get log directory relative pathname."""
- xdist_worker = os.getenv("PYTEST_XDIST_WORKER", "")
- mode = os.getenv("PYTEST_XDIST_MODE", "no")
+def get_logger(name, log_level=None, target=None, reset=True):
+ lg = logging.getLogger(name)
- if not nodeid:
- nodeid = os.environ["PYTEST_CURRENT_TEST"].split(" ")[0]
+ if reset:
+ reset_logger(lg)
- cur_test = nodeid.replace("[", "_").replace("]", "_")
- path, testname = cur_test.split("::")
- path = path[:-3].replace("/", ".")
+ if log_level is not None:
+ set_log_level(lg, log_level)
- # We use different logdir paths based on how xdist is running.
- if mode == "each":
- return os.path.join(path, testname, xdist_worker)
- elif mode == "load":
- return os.path.join(path, testname)
- else:
- assert (
- mode == "no" or mode == "loadfile" or mode == "loadscope"
- ), "Unknown dist mode {}".format(mode)
+ if target is not None:
+ set_handler(lg, target)
- return path
+ return lg
-def logstart(nodeid, location, rundir):
+def logstart(nodeid, logpath):
"""Called from pytest before module setup."""
-
- mode = os.getenv("PYTEST_XDIST_MODE", "no")
worker = os.getenv("PYTEST_TOPOTEST_WORKER", "")
+ wstr = f" on worker {worker}" if worker else ""
+ handler_id = nodeid + worker
+ logpath = logpath.absolute()
- # We only per-test log in the workers (or non-dist)
- if not worker and mode != "no":
- return
+ logging.debug("logstart: adding logging for %s%s at %s", nodeid, wstr, logpath)
+ root_logger = logging.getLogger()
+ handler = logging.FileHandler(logpath, mode="w")
+ handler.setFormatter(logging.Formatter(FORMAT))
- handler_id = nodeid + worker
- assert handler_id not in handlers
-
- rel_log_dir = get_test_logdir(nodeid)
- exec_log_dir = os.path.join(rundir, rel_log_dir)
- subprocess.check_call(
- "mkdir -p {0} && chmod 1777 {0}".format(exec_log_dir), shell=True
- )
- exec_log_path = os.path.join(exec_log_dir, "exec.log")
-
- # Add test based exec log handler
- h = set_handler(logger, exec_log_path)
- handlers[handler_id] = h
-
- if worker:
- logger.info(
- "Logging on worker %s for %s into %s", worker, handler_id, exec_log_path
- )
- else:
- logger.info("Logging for %s into %s", handler_id, exec_log_path)
+ root_logger.addHandler(handler)
+ handlers[handler_id] = handler
+ logging.debug("logstart: added logging for %s%s at %s", nodeid, wstr, logpath)
+ return handler
-def logfinish(nodeid, location):
- """Called from pytest after module teardown."""
- # This function may not be called if pytest is interrupted.
+def logfinish(nodeid, logpath):
+ """Called from pytest after module teardown."""
worker = os.getenv("PYTEST_TOPOTEST_WORKER", "")
- handler_id = nodeid + worker
+ wstr = f" on worker {worker}" if worker else ""
+
+ root_logger = logging.getLogger()
- if handler_id in handlers:
- # Remove test based exec log handler
- if worker:
- logger.info("Closing logs for %s", handler_id)
+ handler_id = nodeid + worker
+ if handler_id not in handlers:
+ logging.critical("can't find log handler to remove")
+ else:
+ logging.debug(
+ "logfinish: removing logging for %s%s at %s", nodeid, wstr, logpath
+ )
h = handlers[handler_id]
- logger.removeHandler(handlers[handler_id])
+ root_logger.removeHandler(h)
h.flush()
h.close()
del handlers[handler_id]
+ logging.debug(
+ "logfinish: removed logging for %s%s at %s", nodeid, wstr, logpath
+ )
console_handler = set_handler(logger, None)
diff --git a/tests/topotests/lib/topotest.py b/tests/topotests/lib/topotest.py
index 9cfeb8e1de..845d3e3b53 100644
--- a/tests/topotests/lib/topotest.py
+++ b/tests/topotests/lib/topotest.py
@@ -24,6 +24,7 @@ import subprocess
import sys
import tempfile
import time
+import logging
from collections.abc import Mapping
from copy import deepcopy
@@ -38,7 +39,7 @@ g_pytest_config = None
def get_logs_path(rundir):
- logspath = topolog.get_test_logdir()
+ logspath = topolog.get_test_logdir(module=True)
return os.path.join(rundir, logspath)
@@ -988,7 +989,7 @@ def checkAddressSanitizerError(output, router, component, logdir=""):
)
if addressSanitizerLog:
# Find Calling Test. Could be multiple steps back
- testframe = sys._current_frames().values()[0]
+ testframe = list(sys._current_frames().values())[0]
level = 0
while level < 10:
test = os.path.splitext(
@@ -1137,7 +1138,9 @@ def _sysctl_assure(commander, variable, value):
def sysctl_atleast(commander, variable, min_value, raises=False):
try:
if commander is None:
- commander = micronet.Commander("topotest")
+ logger = logging.getLogger("topotest")
+ commander = micronet.Commander("sysctl", logger=logger)
+
return _sysctl_atleast(commander, variable, min_value)
except subprocess.CalledProcessError as error:
logger.warning(
@@ -1153,7 +1156,8 @@ def sysctl_atleast(commander, variable, min_value, raises=False):
def sysctl_assure(commander, variable, value, raises=False):
try:
if commander is None:
- commander = micronet.Commander("topotest")
+ logger = logging.getLogger("topotest")
+ commander = micronet.Commander("sysctl", logger=logger)
return _sysctl_assure(commander, variable, value)
except subprocess.CalledProcessError as error:
logger.warning(
@@ -1527,9 +1531,11 @@ class Router(Node):
"""
# Unfortunately this API allowsfor source to not exist for any and all routers.
- if source is None:
+ source_was_none = source is None
+ if source_was_none:
source = f"{daemon}.conf"
+ # "" to avoid loading a default config which is present in router dir
if source:
head, tail = os.path.split(source)
if not head and not self.path_exists(tail):
@@ -1550,18 +1556,40 @@ class Router(Node):
if param is not None:
self.daemons_options[daemon] = param
conf_file = "/etc/{}/{}.conf".format(self.routertype, daemon)
- if source is None or not os.path.exists(source):
+ if source and not os.path.exists(source):
+ logger.warning(
+ "missing config '%s' for '%s' creating empty file '%s'",
+ self.name,
+ source,
+ conf_file,
+ )
if daemon == "frr" or not self.unified_config:
self.cmd_raises("rm -f " + conf_file)
self.cmd_raises("touch " + conf_file)
- else:
+ self.cmd_raises(
+ "chown {0}:{0} {1}".format(self.routertype, conf_file)
+ )
+ self.cmd_raises("chmod 664 {}".format(conf_file))
+ elif source:
# copy zebra.conf to mgmtd folder, which can be used during startup
- if daemon == "zebra":
+ if daemon == "zebra" and not self.unified_config:
conf_file_mgmt = "/etc/{}/{}.conf".format(self.routertype, "mgmtd")
+ logger.debug(
+ "copying '%s' as '%s' on '%s'",
+ source,
+ conf_file_mgmt,
+ self.name,
+ )
self.cmd_raises("cp {} {}".format(source, conf_file_mgmt))
- self.cmd_raises("cp {} {}".format(source, conf_file))
+ self.cmd_raises(
+ "chown {0}:{0} {1}".format(self.routertype, conf_file_mgmt)
+ )
+ self.cmd_raises("chmod 664 {}".format(conf_file_mgmt))
- if not (self.unified_config or daemon == "frr"):
+ logger.debug(
+ "copying '%s' as '%s' on '%s'", source, conf_file, self.name
+ )
+ self.cmd_raises("cp {} {}".format(source, conf_file))
self.cmd_raises("chown {0}:{0} {1}".format(self.routertype, conf_file))
self.cmd_raises("chmod 664 {}".format(conf_file))
@@ -1588,7 +1616,8 @@ class Router(Node):
else:
logger.warning("No daemon {} known".format(daemon))
- # print "Daemons after:", self.daemons
+
+ return source if os.path.exists(source) else ""
def runInWindow(self, cmd, title=None):
return self.run_in_window(cmd, title)
diff --git a/tests/topotests/mgmt_startup/r1/mgmtd.conf b/tests/topotests/mgmt_startup/r1/mgmtd.conf
new file mode 100644
index 0000000000..ecc829c662
--- /dev/null
+++ b/tests/topotests/mgmt_startup/r1/mgmtd.conf
@@ -0,0 +1,13 @@
+debug northbound notifications
+debug northbound libyang
+debug northbound events
+debug northbound callbacks
+debug mgmt backend datastore frontend transaction
+debug mgmt client backend
+debug mgmt client frontend
+
+ip route 12.0.0.0/24 101.0.0.2
+ip route 13.0.0.0/24 101.0.0.3
+
+ipv6 route 2012::/48 2101::2
+ipv6 route 2013::/48 2101::3
diff --git a/tests/topotests/mgmt_startup/r1/zebra.conf b/tests/topotests/mgmt_startup/r1/zebra.conf
new file mode 100644
index 0000000000..98cc95ba2b
--- /dev/null
+++ b/tests/topotests/mgmt_startup/r1/zebra.conf
@@ -0,0 +1,7 @@
+log timestamp precision 3
+log file frr2.log
+
+interface r1-eth0
+ ip address 101.0.0.1/24
+ ipv6 address 2101::1/64
+exit \ No newline at end of file
diff --git a/tests/topotests/mgmt_startup/r2/staticd.conf b/tests/topotests/mgmt_startup/r2/staticd.conf
new file mode 100644
index 0000000000..b581ed2dc3
--- /dev/null
+++ b/tests/topotests/mgmt_startup/r2/staticd.conf
@@ -0,0 +1,7 @@
+log timestamp precision 3
+
+ip route 11.0.0.0/24 101.0.0.1
+ip route 13.0.0.0/24 101.0.0.3
+
+ipv6 route 2011::/48 2102::1
+ipv6 route 2013::/48 2102::3 \ No newline at end of file
diff --git a/tests/topotests/mgmt_startup/r2/zebra.conf b/tests/topotests/mgmt_startup/r2/zebra.conf
new file mode 100644
index 0000000000..1d37a65737
--- /dev/null
+++ b/tests/topotests/mgmt_startup/r2/zebra.conf
@@ -0,0 +1,12 @@
+log timestamp precision 3
+debug northbound notifications
+debug northbound libyang
+debug northbound events
+debug northbound callbacks
+debug mgmt backend datastore frontend transaction
+debug mgmt client backend
+debug mgmt client frontend
+
+interface r2-eth0
+ ip address 101.0.0.2/24
+ ipv6 address 2101::2/64
diff --git a/tests/topotests/mgmt_startup/r3/zebra.conf b/tests/topotests/mgmt_startup/r3/zebra.conf
new file mode 100644
index 0000000000..8419d74975
--- /dev/null
+++ b/tests/topotests/mgmt_startup/r3/zebra.conf
@@ -0,0 +1,18 @@
+log timestamp precision 3
+debug northbound notifications
+debug northbound libyang
+debug northbound events
+debug northbound callbacks
+debug mgmt backend datastore frontend transaction
+debug mgmt client backend
+debug mgmt client frontend
+
+interface r3-eth0
+ ip address 101.0.0.3/24
+ ipv6 address 2101::3/64
+
+ip route 11.0.0.0/24 101.0.0.1
+ip route 12.0.0.0/24 101.0.0.2
+
+ipv6 route 2011::/48 2101::1
+ipv6 route 2012::/48 2101::2
diff --git a/tests/topotests/mgmt_startup/r4/frr.conf b/tests/topotests/mgmt_startup/r4/frr.conf
new file mode 100644
index 0000000000..5f3b35d9ce
--- /dev/null
+++ b/tests/topotests/mgmt_startup/r4/frr.conf
@@ -0,0 +1,21 @@
+log timestamp precision 6
+log file frr.log
+
+debug northbound notifications
+debug northbound libyang
+debug northbound events
+debug northbound callbacks
+debug mgmt backend datastore frontend transaction
+debug mgmt client backend
+debug mgmt client frontend
+
+interface r4-eth0
+ ip address 101.0.0.4/24
+ ipv6 address 2101::4/64
+exit
+
+ip route 11.0.0.0/24 101.0.0.1
+ip route 12.0.0.0/24 101.0.0.2
+
+ipv6 route 2012::/48 2101::2
+ipv6 route 2013::/48 2101::3
diff --git a/tests/topotests/mgmt_startup/test_bigconf.py b/tests/topotests/mgmt_startup/test_bigconf.py
new file mode 100644
index 0000000000..4f46c8fabd
--- /dev/null
+++ b/tests/topotests/mgmt_startup/test_bigconf.py
@@ -0,0 +1,80 @@
+# -*- coding: utf-8 eval: (blacken-mode 1) -*-
+# SPDX-License-Identifier: ISC
+#
+# May 2 2023, Christian Hopps <chopps@labn.net>
+#
+# Copyright (c) 2023, LabN Consulting, L.L.C.
+#
+"""
+Test static route startup functionality
+"""
+
+import datetime
+import logging
+import os
+
+import pytest
+from lib.common_config import step
+from lib.topogen import Topogen, TopoRouter
+from munet.base import Timeout
+from util import check_kernel, check_vtysh_up, write_big_route_conf
+
+CWD = os.path.dirname(os.path.realpath(__file__))
+
+# pytestmark = [pytest.mark.staticd, pytest.mark.mgmtd]
+pytestmark = [pytest.mark.staticd]
+
+
+track = Timeout(0)
+ROUTE_COUNT = 2500
+ROUTE_RANGE = [None, None]
+
+
+@pytest.fixture(scope="module")
+def tgen(request):
+ "Setup/Teardown the environment and provide tgen argument to tests"
+
+ global start_time
+ topodef = {
+ "s1": ("r1",),
+ }
+
+ tgen = Topogen(topodef, request.module.__name__)
+ tgen.start_topology()
+
+ prologue = open(f"{CWD}/r1/mgmtd.conf").read()
+
+ confpath = f"{tgen.gears['r1'].gearlogdir}/r1-late-big.conf"
+ start, end = write_big_route_conf("10.0.0.0/8", ROUTE_COUNT, confpath, prologue)
+ ROUTE_RANGE[0] = start
+ ROUTE_RANGE[1] = end
+
+ # configure mgmtd using current mgmtd config file
+ tgen.gears["r1"].load_config(TopoRouter.RD_ZEBRA, "zebra.conf")
+ tgen.gears["r1"].load_config(TopoRouter.RD_MGMTD, confpath)
+
+ track.started_on = datetime.datetime.now()
+
+ tgen.start_router()
+ yield tgen
+ tgen.stop_topology()
+
+
+def test_staticd_latestart(tgen):
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ r1 = tgen.routers()["r1"]
+
+ step(f"Verifying {ROUTE_COUNT} startup routes are present")
+
+ check_vtysh_up(r1)
+ logging.info("r1: vtysh connected after %ss", track.elapsed())
+
+ result = check_kernel(r1, ROUTE_RANGE[0], retry_timeout=60)
+ assert result is None
+ logging.info("r1: first route installed after %ss", track.elapsed())
+
+ result = check_kernel(r1, ROUTE_RANGE[1], retry_timeout=60)
+ assert result is None
+ logging.info("r1: last route installed after %ss", track.elapsed())
diff --git a/tests/topotests/mgmt_startup/test_config.py b/tests/topotests/mgmt_startup/test_config.py
new file mode 100644
index 0000000000..6a54f71910
--- /dev/null
+++ b/tests/topotests/mgmt_startup/test_config.py
@@ -0,0 +1,109 @@
+# -*- coding: utf-8 eval: (blacken-mode 1) -*-
+# SPDX-License-Identifier: ISC
+#
+# May 2 2023, Christian Hopps <chopps@labn.net>
+#
+# Copyright (c) 2023, LabN Consulting, L.L.C.
+#
+"""
+Test static route functionality using old or new configuration files.
+
+User compat:
+
+ - mgmtd split config will first look to `/etc/frr/zebra.conf`
+ then `/etc/frr/staticd.conf` and finally `/etc/frr/mgmtd.conf`
+
+ - When new components are converted to mgmtd their split config should be
+ added here too.
+
+Topotest compat:
+
+ - `mgmtd.conf` is copied to `/etc/frr/` for use by mgmtd when implicit load,
+ or explicit load no config specified.
+
+ - `staticd.conf` is copied to `/etc/frr/` for use by mgmtd when staticd
+ is explicit load implict config, and explicit config.
+
+"""
+
+import pytest
+from lib.common_config import step
+from lib.topogen import Topogen, TopoRouter
+from util import check_kernel
+
+# pytestmark = [pytest.mark.staticd, pytest.mark.mgmtd]
+pytestmark = [pytest.mark.staticd]
+
+
+@pytest.fixture(scope="module")
+def tgen(request):
+ "Setup/Teardown the environment and provide tgen argument to tests"
+
+ topodef = {
+ "s1": ("r1", "r2", "r3", "r4"),
+ }
+
+ tgen = Topogen(topodef, request.module.__name__)
+ tgen.start_topology()
+
+ # configure mgmtd using current mgmtd config file
+ tgen.gears["r1"].load_config(TopoRouter.RD_ZEBRA, "zebra.conf")
+ tgen.gears["r1"].load_config(TopoRouter.RD_MGMTD, "mgmtd.conf")
+
+ # user/topotest compat:
+ # configure mgmtd using old staticd config file, with explicity staticd
+ # load.
+ tgen.gears["r2"].load_config(TopoRouter.RD_ZEBRA, "zebra.conf")
+ tgen.gears["r2"].load_config(TopoRouter.RD_STATIC, "staticd.conf")
+
+ # user compat:
+ # configure mgmtd using backup config file `zebra.conf`
+ tgen.gears["r3"].load_config(TopoRouter.RD_ZEBRA, "zebra.conf")
+
+ # configure mgmtd using current mgmtd config file
+ tgen.gears["r4"].load_frr_config("frr.conf")
+
+ tgen.start_router()
+ yield tgen
+ tgen.stop_topology()
+
+
+def test_staticd_routes_present(tgen):
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ for x in ["r1", "r2", "r3", "r4"]:
+ tgen.gears[x].net.cmd_nostatus(
+ "vtysh -c 'debug mgmt client frontend' "
+ "-c 'debug mgmt client backend' "
+ "-c 'debug mgmt backend frontend datastore transaction'"
+ )
+
+ r1 = tgen.routers()["r1"]
+ r2 = tgen.routers()["r2"]
+ r3 = tgen.routers()["r3"]
+ r4 = tgen.routers()["r4"]
+
+ step("Verifying routes are present on r1")
+ result = check_kernel(r1, "12.0.0.0/24")
+ assert result is None
+ result = check_kernel(r1, "13.0.0.0/24")
+ assert result is None
+
+ step("Verifying routes are present on r2")
+ result = check_kernel(r2, "11.0.0.0/24")
+ assert result is None
+ result = check_kernel(r2, "13.0.0.0/24")
+ assert result is None
+
+ step("Verifying routes are present on r3")
+ result = check_kernel(r3, "11.0.0.0/24")
+ assert result is None
+ result = check_kernel(r3, "12.0.0.0/24")
+ assert result is None
+
+ step("Verifying routes are present on r4")
+ result = check_kernel(r4, "11.0.0.0/24")
+ assert result is None
+ result = check_kernel(r4, "12.0.0.0/24")
+ assert result is None
diff --git a/tests/topotests/mgmt_startup/test_late_bigconf.py b/tests/topotests/mgmt_startup/test_late_bigconf.py
new file mode 100644
index 0000000000..0b5bf38d10
--- /dev/null
+++ b/tests/topotests/mgmt_startup/test_late_bigconf.py
@@ -0,0 +1,98 @@
+# -*- coding: utf-8 eval: (blacken-mode 1) -*-
+# SPDX-License-Identifier: ISC
+#
+# May 2 2023, Christian Hopps <chopps@labn.net>
+#
+# Copyright (c) 2023, LabN Consulting, L.L.C.
+#
+
+"""
+Verify large set of routes present when staticd (backend client) is started after it's
+startup config is present during launch.
+"""
+
+import logging
+import os
+
+import pytest
+from lib.common_config import step
+from lib.topogen import Topogen, TopoRouter
+from munet.base import Timeout
+from util import check_kernel, check_vtysh_up, write_big_route_conf
+
+CWD = os.path.dirname(os.path.realpath(__file__))
+
+# pytestmark = [pytest.mark.staticd, pytest.mark.mgmtd]
+pytestmark = [pytest.mark.staticd]
+
+track = Timeout(0)
+ROUTE_COUNT = 2500
+ROUTE_RANGE = [None, None]
+
+
+@pytest.fixture(scope="module")
+def tgen(request):
+ "Setup/Teardown the environment and provide tgen argument to tests"
+
+ global start_time
+ topodef = {
+ "s1": ("r1",),
+ }
+
+ tgen = Topogen(topodef, request.module.__name__)
+ tgen.start_topology()
+
+ prologue = open(f"{CWD}/r1/mgmtd.conf").read()
+
+ confpath = f"{tgen.gears['r1'].gearlogdir}/r1-late-big.conf"
+ start, end = write_big_route_conf("10.0.0.0/8", ROUTE_COUNT, confpath, prologue)
+ ROUTE_RANGE[0] = start
+ ROUTE_RANGE[1] = end
+
+ # configure mgmtd using current mgmtd config file
+ tgen.gears["r1"].load_config(TopoRouter.RD_ZEBRA, "zebra.conf")
+ tgen.gears["r1"].load_config(TopoRouter.RD_MGMTD, confpath)
+
+ # Explicit disable staticd now..
+ tgen.gears["r1"].net.daemons["staticd"] = 0
+
+ tgen.start_router()
+ yield tgen
+ tgen.stop_topology()
+
+
+def test_staticd_latestart(tgen):
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ r1 = tgen.routers()["r1"]
+
+ check_vtysh_up(r1)
+ logging.info("r1: vtysh connected after %ss", track.elapsed())
+
+ result = check_kernel(r1, ROUTE_RANGE[0], retry_timeout=60, expected=False)
+ assert result is not None, "first route present and should not be"
+ result = check_kernel(r1, ROUTE_RANGE[1], retry_timeout=60, expected=False)
+ assert result is not None, "last route present and should not be"
+
+ step("Starting staticd")
+ t2 = Timeout(0)
+ r1.startDaemons(["staticd"])
+
+ result = check_kernel(r1, ROUTE_RANGE[0], retry_timeout=60)
+ assert result is None, "first route not present and should be"
+ logging.info("r1: elapsed time for first route %ss", t2.elapsed())
+
+ count = 0
+ ocount = 0
+ while count < ROUTE_COUNT:
+ rc, o, e = r1.net.cmd_status("ip -o route | wc -l")
+ if not rc:
+ if count > ocount + 100:
+ ocount = count
+ logging.info("r1: elapsed time for %d routes %s", count, t2.elapsed())
+ count = int(o)
+
+ result = check_kernel(r1, ROUTE_RANGE[1], retry_timeout=1200)
+ assert result is None, "last route not present and should be"
+ logging.info("r1: elapsed time for last route %ss", t2.elapsed())
diff --git a/tests/topotests/mgmt_startup/test_late_uniconf.py b/tests/topotests/mgmt_startup/test_late_uniconf.py
new file mode 100644
index 0000000000..d4e7e07ad6
--- /dev/null
+++ b/tests/topotests/mgmt_startup/test_late_uniconf.py
@@ -0,0 +1,44 @@
+# -*- coding: utf-8 eval: (blacken-mode 1) -*-
+# SPDX-License-Identifier: ISC
+#
+# May 2 2023, Christian Hopps <chopps@labn.net>
+#
+# Copyright (c) 2023, LabN Consulting, L.L.C.
+#
+
+"""
+Verify routes present when staticd (backend client) is started after it's startup
+config, contained inside a unified configuration file, is present during launch.
+"""
+import pytest
+from lib.topogen import Topogen
+from util import _test_staticd_late_start
+
+# pytestmark = [pytest.mark.staticd, pytest.mark.mgmtd]
+pytestmark = [pytest.mark.staticd]
+
+
+@pytest.fixture(scope="module")
+def tgen(request):
+ "Setup/Teardown the environment and provide tgen argument to tests"
+
+ topodef = {
+ "s1": ("r4",),
+ }
+
+ tgen = Topogen(topodef, request.module.__name__)
+ tgen.start_topology()
+
+ # configure mgmtd using current mgmtd config file
+ tgen.gears["r4"].load_frr_config("frr.conf")
+
+ # Explicit disable staticd now..
+ tgen.gears["r4"].net.daemons["staticd"] = 0
+
+ tgen.start_router()
+ yield tgen
+ tgen.stop_topology()
+
+
+def test_staticd_late_start(tgen):
+ return _test_staticd_late_start(tgen, tgen.routers()["r4"])
diff --git a/tests/topotests/mgmt_startup/test_latestart.py b/tests/topotests/mgmt_startup/test_latestart.py
new file mode 100644
index 0000000000..1c97b9dd0f
--- /dev/null
+++ b/tests/topotests/mgmt_startup/test_latestart.py
@@ -0,0 +1,45 @@
+# -*- coding: utf-8 eval: (blacken-mode 1) -*-
+# SPDX-License-Identifier: ISC
+#
+# May 2 2023, Christian Hopps <chopps@labn.net>
+#
+# Copyright (c) 2023, LabN Consulting, L.L.C.
+#
+"""
+Verify routes present when staticd (backend client) is started after it's startup config
+is present during launch.
+"""
+
+import pytest
+from lib.topogen import Topogen, TopoRouter
+from util import _test_staticd_late_start
+
+# pytestmark = [pytest.mark.staticd, pytest.mark.mgmtd]
+pytestmark = [pytest.mark.staticd]
+
+
+@pytest.fixture(scope="module")
+def tgen(request):
+ "Setup/Teardown the environment and provide tgen argument to tests"
+
+ topodef = {
+ "s1": ("r1",),
+ }
+
+ tgen = Topogen(topodef, request.module.__name__)
+ tgen.start_topology()
+
+ # configure mgmtd using current mgmtd config file
+ tgen.gears["r1"].load_config(TopoRouter.RD_ZEBRA, "zebra.conf")
+ tgen.gears["r1"].load_config(TopoRouter.RD_MGMTD)
+
+ # Explicit disable staticd now..
+ tgen.gears["r1"].net.daemons["staticd"] = 0
+
+ tgen.start_router()
+ yield tgen
+ tgen.stop_topology()
+
+
+def test_staticd_late_start(tgen):
+ return _test_staticd_late_start(tgen, tgen.routers()["r1"])
diff --git a/tests/topotests/mgmt_startup/util.py b/tests/topotests/mgmt_startup/util.py
new file mode 100644
index 0000000000..e366351326
--- /dev/null
+++ b/tests/topotests/mgmt_startup/util.py
@@ -0,0 +1,98 @@
+# -*- coding: utf-8 eval: (blacken-mode 1) -*-
+# SPDX-License-Identifier: ISC
+#
+# May 28 2023, Christian Hopps <chopps@labn.net>
+#
+# Copyright (c) 2023, LabN Consulting, L.L.C.
+#
+
+import ipaddress
+import math
+import re
+
+import pytest
+from lib.common_config import retry, step
+from lib.topolog import logger
+from munet.base import proc_error
+
+
+@retry(retry_timeout=30)
+def check_vtysh_up(router):
+ rc, o, e = router.net.cmd_status("vtysh -c 'show version'")
+ return None if not rc else proc_error(rc, o, e)
+
+
+@retry(retry_timeout=3, initial_wait=0.1)
+def check_kernel(r1, prefix, expected=True):
+ net = ipaddress.ip_network(prefix)
+ if net.version == 6:
+ kernel = r1.net.cmd_nostatus("ip -6 route show", warn=not expected)
+ else:
+ kernel = r1.net.cmd_nostatus("ip -4 route show", warn=not expected)
+
+ logger.debug("checking kernel routing table:\n%0.1920s", kernel)
+ route = f"{str(net)}(?: nhid [0-9]+)?.*proto (static|196)"
+ m = re.search(route, kernel)
+ if expected and not m:
+ return f"Failed to find \n'{route}'\n in \n'{kernel:.1920}'"
+ elif not expected and m:
+ return f"Failed found \n'{route}'\n in \n'{kernel:.1920}'"
+ return None
+
+
+def get_ip_networks(super_prefix, count):
+ count_log2 = math.log(count, 2)
+ if count_log2 != int(count_log2):
+ count_log2 = int(count_log2) + 1
+ else:
+ count_log2 = int(count_log2)
+ network = ipaddress.ip_network(super_prefix)
+ return tuple(network.subnets(count_log2))[0:count]
+
+
+def write_big_route_conf(super_prefix, count, confpath, prologue=""):
+ start = None
+ end = None
+
+ with open(confpath, "w+", encoding="ascii") as f:
+ if prologue:
+ f.write(prologue + "\n")
+ for net in get_ip_networks(super_prefix, count):
+ end = net
+ if not start:
+ start = net
+ f.write(f"ip route {net} lo\n")
+
+ return start, end
+
+
+def _test_staticd_late_start(tgen, router):
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ # for x in ["r1"]:
+ # tgen.gears[x].net.cmd_nostatus(
+ # "vtysh -c 'debug mgmt client frontend' "
+ # "-c 'debug mgmt client backend' "
+ # "-c 'debug mgmt backend frontend datastore transaction'"
+ # )
+
+ step("Verifying startup route is not present w/o staticd running")
+ result = check_kernel(router, "12.0.0.0/24", expected=False)
+ assert result is not None
+
+ step("Configure another static route verify is not present w/o staticd running")
+ router.net.cmd_nostatus("vtysh -c 'config t' -c 'ip route 12.1.0.0/24 101.0.0.2'")
+ result = check_kernel(router, "12.0.0.0/24", expected=False)
+ assert result is not None
+ result = check_kernel(router, "12.1.0.0/24", expected=False)
+ assert result is not None
+
+ step("Starting staticd")
+ router.startDaemons(["staticd"])
+
+ step("Verifying both routes are present")
+ result = check_kernel(router, "12.0.0.0/24")
+ assert result is None
+ result = check_kernel(router, "12.1.0.0/24")
+ assert result is None
diff --git a/tests/topotests/multicast_mld_join_topo1/test_multicast_mld_local_join.py b/tests/topotests/multicast_mld_join_topo1/test_multicast_mld_local_join.py
index 2c4fb4e998..826d6e2941 100644
--- a/tests/topotests/multicast_mld_join_topo1/test_multicast_mld_local_join.py
+++ b/tests/topotests/multicast_mld_join_topo1/test_multicast_mld_local_join.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+# -*- coding: utf-8 eval: (blacken-mode 1) -*-
# SPDX-License-Identifier: ISC
#
# Copyright (c) 2023 by VMware, Inc. ("VMware")
@@ -20,52 +20,31 @@ Following tests are covered:
5. Verify static MLD groups after removing and adding MLD config
"""
-import os
import sys
import time
-import pytest
-
-# Save the Current Working Directory to find configuration files.
-CWD = os.path.dirname(os.path.realpath(__file__))
-sys.path.append(os.path.join(CWD, "../"))
-sys.path.append(os.path.join(CWD, "../lib/"))
-
-# Required to instantiate the topology builder class.
-
-# pylint: disable=C0413
-# Import topogen and topotest helpers
-from lib.topogen import Topogen, get_topogen
-from re import search as re_search
-from re import findall as findall
+import pytest
from lib.common_config import (
+ reset_config_on_routers,
start_topology,
- write_test_header,
- write_test_footer,
step,
- kill_router_daemons,
- start_router_daemons,
- reset_config_on_routers,
- do_countdown,
- apply_raw_config,
- socat_send_pim6_traffic,
+ write_test_footer,
+ write_test_header,
)
-
from lib.pim import (
- create_pim_config,
- verify_mroutes,
- verify_upstream_iif,
- verify_mld_groups,
- clear_pim6_mroute,
McastTesterHelper,
- verify_pim_neighbors,
create_mld_config,
- verify_mld_groups,
+ create_pim_config,
verify_local_mld_groups,
+ verify_mld_groups,
+ verify_mroutes,
+ verify_pim_neighbors,
verify_pim_rp_info,
+ verify_upstream_iif,
)
-from lib.topolog import logger
+from lib.topogen import Topogen, get_topogen
from lib.topojson import build_config_from_json
+from lib.topolog import logger
r1_r2_links = []
r1_r3_links = []
@@ -131,7 +110,7 @@ def setup_module(mod):
logger.info("Running setup_module to create topology")
# This function initiates the topology build with Topogen...
- json_file = "{}/multicast_mld_local_join.json".format(CWD)
+ json_file = "multicast_mld_local_join.json"
tgen = Topogen(json_file, mod.__name__)
global topo
topo = tgen.json_topo
@@ -151,6 +130,9 @@ def setup_module(mod):
result = verify_pim_neighbors(tgen, topo)
assert result is True, " Verify PIM neighbor: Failed Error: {}".format(result)
+ global app_helper
+ app_helper = McastTesterHelper(tgen)
+
logger.info("Running setup_module() done")
@@ -161,6 +143,8 @@ def teardown_module():
tgen = get_topogen()
+ app_helper.cleanup()
+
# Stop toplogy and Remove tmp files
tgen.stop_topology()
@@ -265,6 +249,8 @@ def test_mroute_with_mld_local_joins_p0(request):
reset_config_on_routers(tgen)
+ app_helper.stop_all_hosts()
+
step("Enable the PIM on all the interfaces of R1, R2, R3, R4")
step("configure BGP on R1, R2, R3, R4 and enable redistribute static/connected")
step("Enable the MLD on R11 interfac of R1 and configure local mld groups")
@@ -330,9 +316,7 @@ def test_mroute_with_mld_local_joins_p0(request):
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("Send traffic from R4 to all the groups ( ffaa::1 to ffaa::5)")
- intf_ip = topo["routers"]["i4"]["links"]["r4"]["ipv6"].split("/")[0]
- intf = topo["routers"]["i4"]["links"]["r4"]["interface"]
- result = socat_send_pim6_traffic(tgen, "i4", "UDP6-SEND", MLD_JOIN_RANGE_1, intf)
+ result = app_helper.run_traffic("i4", MLD_JOIN_RANGE_1, "r4")
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
step(
@@ -458,6 +442,8 @@ def test_remove_add_mld_local_joins_p1(request):
reset_config_on_routers(tgen)
+ app_helper.stop_all_hosts()
+
step("Enable the PIM on all the interfaces of R1, R2, R3, R4")
step("configure BGP on R1, R2, R3, R4 and enable redistribute static/connected")
step("Enable the MLD on R11 interfac of R1 and configure local mld groups")
@@ -517,9 +503,7 @@ def test_remove_add_mld_local_joins_p1(request):
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("Send traffic from R4 to all the groups ( ffaa::1 to ffaa::5)")
- intf_ip = topo["routers"]["i4"]["links"]["r4"]["ipv6"].split("/")[0]
- intf = topo["routers"]["i4"]["links"]["r4"]["interface"]
- result = socat_send_pim6_traffic(tgen, "i4", "UDP6-SEND", MLD_JOIN_RANGE_1, intf)
+ result = app_helper.run_traffic("i4", MLD_JOIN_RANGE_1, "r4")
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
step(
@@ -710,6 +694,8 @@ def test_remove_add_mld_config_with_local_joins_p1(request):
reset_config_on_routers(tgen)
+ app_helper.stop_all_hosts()
+
step("Enable the PIM on all the interfaces of R1, R2, R3, R4")
step("configure BGP on R1, R2, R3, R4 and enable redistribute static/connected")
step("Enable the MLD on R11 interfac of R1 and configure local mld groups")
@@ -759,9 +745,7 @@ def test_remove_add_mld_config_with_local_joins_p1(request):
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("Send traffic from R4 to all the groups ( ffaa::1 to ffaa::5)")
- intf_ip = topo["routers"]["i4"]["links"]["r4"]["ipv6"].split("/")[0]
- intf = topo["routers"]["i4"]["links"]["r4"]["interface"]
- result = socat_send_pim6_traffic(tgen, "i4", "UDP6-SEND", MLD_JOIN_RANGE_1, intf)
+ result = app_helper.run_traffic("i4", MLD_JOIN_RANGE_1, "r4")
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
step(
diff --git a/tests/topotests/multicast_pim6_sm_topo1/test_multicast_pim6_sm1.py b/tests/topotests/multicast_pim6_sm_topo1/test_multicast_pim6_sm1.py
index 87b04b41be..aff623705c 100644
--- a/tests/topotests/multicast_pim6_sm_topo1/test_multicast_pim6_sm1.py
+++ b/tests/topotests/multicast_pim6_sm_topo1/test_multicast_pim6_sm1.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+# -*- coding: utf-8 eval: (blacken-mode 1) -*-
# SPDX-License-Identifier: ISC
#
@@ -30,61 +30,40 @@ should get update accordingly
data traffic
"""
-import os
+import datetime
import sys
-import json
import time
-import datetime
-import pytest
-
-# Save the Current Working Directory to find configuration files.
-CWD = os.path.dirname(os.path.realpath(__file__))
-sys.path.append(os.path.join(CWD, "../"))
-sys.path.append(os.path.join(CWD, "../lib/"))
-
-# Required to instantiate the topology builder class.
-
-# pylint: disable=C0413
-# Import topogen and topotest helpers
-from lib.topogen import Topogen, get_topogen
+import pytest
from lib.common_config import (
- start_topology,
- write_test_header,
- write_test_footer,
- step,
+ get_frr_ipv6_linklocal,
+ required_linux_kernel_version,
reset_config_on_routers,
shutdown_bringup_interface,
- start_router,
- stop_router,
- create_static_routes,
- required_linux_kernel_version,
- socat_send_mld_join,
- socat_send_pim6_traffic,
- get_frr_ipv6_linklocal,
- kill_socat,
+ start_topology,
+ step,
+ write_test_footer,
+ write_test_header,
)
-from lib.bgp import create_router_bgp
from lib.pim import (
- create_pim_config,
+ McastTesterHelper,
+ clear_pim6_mroute,
create_mld_config,
+ create_pim_config,
+ verify_mld_config,
verify_mld_groups,
+ verify_mroute_summary,
verify_mroutes,
- clear_pim6_interface_traffic,
- verify_upstream_iif,
- clear_pim6_mroute,
verify_pim_interface_traffic,
- verify_pim_state,
- McastTesterHelper,
verify_pim_join,
- verify_mroute_summary,
verify_pim_nexthop,
+ verify_pim_state,
verify_sg_traffic,
- verify_mld_config,
+ verify_upstream_iif,
)
-
-from lib.topolog import logger
+from lib.topogen import Topogen, get_topogen
from lib.topojson import build_config_from_json
+from lib.topolog import logger
# Global variables
GROUP_RANGE = "ff00::/8"
@@ -141,8 +120,7 @@ def setup_module(mod):
logger.info("Running setup_module to create topology")
- testdir = os.path.dirname(os.path.realpath(__file__))
- json_file = "{}/multicast_pim6_sm_topo1.json".format(testdir)
+ json_file = "multicast_pim6_sm_topo1.json"
tgen = Topogen(json_file, mod.__name__)
global topo
topo = tgen.json_topo
@@ -159,6 +137,9 @@ def setup_module(mod):
# Creating configuration from JSON
build_config_from_json(tgen, tgen.json_topo)
+ global app_helper
+ app_helper = McastTesterHelper(tgen)
+
logger.info("Running setup_module() done")
@@ -169,8 +150,7 @@ def teardown_module():
tgen = get_topogen()
- # Clean up socat
- kill_socat(tgen)
+ app_helper.cleanup()
# Stop toplogy and Remove tmp files
tgen.stop_topology()
@@ -296,6 +276,8 @@ def test_multicast_data_traffic_static_RP_send_traffic_then_join_p0(request):
# Creating configuration from JSON
reset_config_on_routers(tgen)
+ app_helper.stop_all_hosts()
+
# Don"t run this test if we have any failure.
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
@@ -334,9 +316,7 @@ def test_multicast_data_traffic_static_RP_send_traffic_then_join_p0(request):
step("Send multicast traffic from FRR3 to all the receivers" "ffaa::1-5")
- intf_ip = topo["routers"]["i2"]["links"]["r3"]["ipv6"].split("/")[0]
- intf = topo["routers"]["i2"]["links"]["r3"]["interface"]
- result = socat_send_pim6_traffic(tgen, "i2", "UDP6-SEND", MLD_JOIN_RANGE_1, intf)
+ result = app_helper.run_traffic("i2", MLD_JOIN_RANGE_1, "r3")
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
source = topo["routers"]["i2"]["links"]["r3"]["ipv6"].split("/")[0]
@@ -375,11 +355,7 @@ def test_multicast_data_traffic_static_RP_send_traffic_then_join_p0(request):
)
step("send mld join (ffaa::1-5) to R1")
- intf = topo["routers"]["i1"]["links"]["r1"]["interface"]
- intf_ip = topo["routers"]["i1"]["links"]["r1"]["ipv6"].split("/")[0]
- result = socat_send_mld_join(
- tgen, "i1", "UDP6-RECV", MLD_JOIN_RANGE_1, intf, intf_ip
- )
+ result = app_helper.run_join("i1", MLD_JOIN_RANGE_1, "r1")
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
step(
@@ -532,11 +508,7 @@ def test_verify_mroute_when_receiver_is_outside_frr_p0(request):
assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
step("send mld join (ffaa::1-5) to R1")
- intf = topo["routers"]["i1"]["links"]["r1"]["interface"]
- intf_ip = topo["routers"]["i1"]["links"]["r1"]["ipv6"].split("/")[0]
- result = socat_send_mld_join(
- tgen, "i1", "UDP6-RECV", _MLD_JOIN_RANGE, intf, intf_ip
- )
+ result = app_helper.run_join("i1", _MLD_JOIN_RANGE, "r1")
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
step("verify MLD joins received on r1")
@@ -546,9 +518,7 @@ def test_verify_mroute_when_receiver_is_outside_frr_p0(request):
assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
step("Send multicast traffic from FRR3 to all the receivers" "ffaa::1-5")
- intf_ip = topo["routers"]["i2"]["links"]["r3"]["ipv6"].split("/")[0]
- intf = topo["routers"]["i2"]["links"]["r3"]["interface"]
- result = socat_send_pim6_traffic(tgen, "i2", "UDP6-SEND", _MLD_JOIN_RANGE, intf)
+ result = app_helper.run_traffic("i2", _MLD_JOIN_RANGE, "r3")
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
step(
@@ -561,11 +531,7 @@ def test_verify_mroute_when_receiver_is_outside_frr_p0(request):
result = create_mld_config(tgen, topo, input_dict)
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
- i5_r5 = topo["routers"]["i5"]["links"]["r5"]["interface"]
- intf_ip = topo["routers"]["i5"]["links"]["r5"]["ipv6"].split("/")[0]
- result = socat_send_mld_join(
- tgen, "i5", "UDP6-RECV", _MLD_JOIN_RANGE, i5_r5, intf_ip
- )
+ result = app_helper.run_join("i5", _MLD_JOIN_RANGE, "r5")
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
step("FRR1 has 10 (*.G) and 10 (S,G) verify using 'show ipv6 mroute'")
@@ -682,6 +648,8 @@ def test_verify_mroute_when_frr_is_transit_router_p2(request):
# Creating configuration from JSON
reset_config_on_routers(tgen)
+ app_helper.stop_all_hosts()
+
# Don"t run this test if we have any failure.
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
@@ -708,11 +676,7 @@ def test_verify_mroute_when_frr_is_transit_router_p2(request):
step("Enable mld on FRR1 interface and send mld join ")
step("send mld join (ffaa::1-5) to R1")
- intf = topo["routers"]["i1"]["links"]["r1"]["interface"]
- intf_ip = topo["routers"]["i1"]["links"]["r1"]["ipv6"].split("/")[0]
- result = socat_send_mld_join(
- tgen, "i1", "UDP6-RECV", MLD_JOIN_RANGE_1, intf, intf_ip
- )
+ result = app_helper.run_join("i1", MLD_JOIN_RANGE_1, "r1")
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
step("verify mld groups received on R1")
@@ -722,9 +686,7 @@ def test_verify_mroute_when_frr_is_transit_router_p2(request):
assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
step("Send multicast traffic from FRR3 to ffaa::1-5 receivers")
- intf_ip = topo["routers"]["i2"]["links"]["r3"]["ipv6"].split("/")[0]
- intf = topo["routers"]["i2"]["links"]["r3"]["interface"]
- result = socat_send_pim6_traffic(tgen, "i2", "UDP6-SEND", MLD_JOIN_RANGE_1, intf)
+ result = app_helper.run_traffic("i2", MLD_JOIN_RANGE_1, "r3")
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
step("shut the direct link to R1 ")
@@ -841,6 +803,8 @@ def test_verify_mroute_when_RP_unreachable_p1(request):
# Creating configuration from JSON
reset_config_on_routers(tgen)
+ app_helper.stop_all_hosts()
+
# Don"t run this test if we have any failure.
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
@@ -868,17 +832,11 @@ def test_verify_mroute_when_RP_unreachable_p1(request):
step("Enable mld on FRR1 interface and send mld join ffaa::1-5")
step("send mld join (ffaa::1-5) to R1")
- intf = topo["routers"]["i1"]["links"]["r1"]["interface"]
- intf_ip = topo["routers"]["i1"]["links"]["r1"]["ipv6"].split("/")[0]
- result = socat_send_mld_join(
- tgen, "i1", "UDP6-RECV", MLD_JOIN_RANGE_1, intf, intf_ip
- )
+ result = app_helper.run_join("i1", MLD_JOIN_RANGE_1, "r1")
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
step("Send multicast traffic from FRR3 to ffaa::1-5 receivers")
- intf_ip = topo["routers"]["i2"]["links"]["r3"]["ipv6"].split("/")[0]
- intf = topo["routers"]["i2"]["links"]["r3"]["interface"]
- result = socat_send_pim6_traffic(tgen, "i2", "UDP6-SEND", MLD_JOIN_RANGE_1, intf)
+ result = app_helper.run_traffic("i2", MLD_JOIN_RANGE_1, "r3")
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
step("Configure one MLD interface on FRR3 node and send MLD" " join (ffcc::1)")
@@ -888,11 +846,7 @@ def test_verify_mroute_when_RP_unreachable_p1(request):
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
step("send mld join (ffaa::1-5) to R1")
- intf = topo["routers"]["i8"]["links"]["r3"]["interface"]
- intf_ip = topo["routers"]["i8"]["links"]["r3"]["ipv6"].split("/")[0]
- result = socat_send_mld_join(
- tgen, "i8", "UDP6-RECV", MLD_JOIN_RANGE_1, intf, intf_ip
- )
+ result = app_helper.run_join("i8", MLD_JOIN_RANGE_1, "r3")
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
step("verify MLD groups received ")
@@ -975,16 +929,14 @@ def test_modify_mld_query_timer_p0(request):
# Creating configuration from JSON
reset_config_on_routers(tgen)
+ app_helper.stop_all_hosts()
+
# Don"t run this test if we have any failure.
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
step("send mld join (ffaa::1-5) to R1")
- intf = topo["routers"]["i8"]["links"]["r3"]["interface"]
- intf_ip = topo["routers"]["i8"]["links"]["r3"]["ipv6"].split("/")[0]
- result = socat_send_mld_join(
- tgen, "i8", "UDP6-RECV", MLD_JOIN_RANGE_1, intf, intf_ip
- )
+ result = app_helper.run_join("i8", MLD_JOIN_RANGE_1, "r3")
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
step("Enable MLD on receiver interface")
@@ -1023,9 +975,7 @@ def test_modify_mld_query_timer_p0(request):
assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
step("Send multicast traffic from FRR3 to ffaa::1-5 receivers")
- intf_ip = topo["routers"]["i2"]["links"]["r3"]["ipv6"].split("/")[0]
- intf = topo["routers"]["i2"]["links"]["r3"]["interface"]
- result = socat_send_pim6_traffic(tgen, "i2", "UDP6-SEND", MLD_JOIN_RANGE_1, intf)
+ result = app_helper.run_traffic("i2", MLD_JOIN_RANGE_1, "r3")
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
step(
@@ -1158,17 +1108,15 @@ def test_modify_mld_max_query_response_timer_p0(request):
# Creating configuration from JSON
reset_config_on_routers(tgen)
+ app_helper.stop_all_hosts()
+
# Don"t run this test if we have any failure.
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
step("Enable mld on FRR1 interface and send MLD join")
step("send mld join (ffaa::1-5) to R1")
- intf = topo["routers"]["i1"]["links"]["r1"]["interface"]
- intf_ip = topo["routers"]["i1"]["links"]["r1"]["ipv6"].split("/")[0]
- result = socat_send_mld_join(
- tgen, "i1", "UDP6-RECV", MLD_JOIN_RANGE_1, intf, intf_ip
- )
+ result = app_helper.run_join("i1", MLD_JOIN_RANGE_1, "r1")
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
r1_i1 = topo["routers"]["r1"]["links"]["i1"]["interface"]
@@ -1214,9 +1162,7 @@ def test_modify_mld_max_query_response_timer_p0(request):
assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
step("Send multicast traffic from FRR3 to ffaa::1-5 receivers")
- intf_ip = topo["routers"]["i2"]["links"]["r3"]["ipv6"].split("/")[0]
- intf = topo["routers"]["i2"]["links"]["r3"]["interface"]
- result = socat_send_pim6_traffic(tgen, "i2", "UDP6-SEND", MLD_JOIN_RANGE_1, intf)
+ result = app_helper.run_traffic("i2", MLD_JOIN_RANGE_1, "r3")
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
step(
@@ -1431,6 +1377,8 @@ def test_verify_impact_on_multicast_traffic_when_RP_removed_p0(request):
# Creating configuration from JSON
reset_config_on_routers(tgen)
+ app_helper.stop_all_hosts()
+
# Don"t run this test if we have any failure.
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
@@ -1438,9 +1386,7 @@ def test_verify_impact_on_multicast_traffic_when_RP_removed_p0(request):
step("send multicast traffic for group range ffaa::1-5")
step("Send multicast traffic from FRR3 to ffaa::1-5 receivers")
- intf_ip = topo["routers"]["i2"]["links"]["r3"]["ipv6"].split("/")[0]
- intf = topo["routers"]["i2"]["links"]["r3"]["interface"]
- result = socat_send_pim6_traffic(tgen, "i2", "UDP6-SEND", MLD_JOIN_RANGE_1, intf)
+ result = app_helper.run_traffic("i2", MLD_JOIN_RANGE_1, "r3")
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
step("Configure static RP for group (ffaa::1) on r5")
@@ -1464,11 +1410,7 @@ def test_verify_impact_on_multicast_traffic_when_RP_removed_p0(request):
step("Enable mld on FRR1 interface and send MLD join")
step("send mld join (ffaa::1-5) to R1")
- intf = topo["routers"]["i1"]["links"]["r1"]["interface"]
- intf_ip = topo["routers"]["i1"]["links"]["r1"]["ipv6"].split("/")[0]
- result = socat_send_mld_join(
- tgen, "i1", "UDP6-RECV", MLD_JOIN_RANGE_1, intf, intf_ip
- )
+ result = app_helper.run_join("i1", MLD_JOIN_RANGE_1, "r1")
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
step(
diff --git a/tests/topotests/multicast_pim6_sm_topo1/test_multicast_pim6_sm2.py b/tests/topotests/multicast_pim6_sm_topo1/test_multicast_pim6_sm2.py
index 788a839918..767264a7c0 100644
--- a/tests/topotests/multicast_pim6_sm_topo1/test_multicast_pim6_sm2.py
+++ b/tests/topotests/multicast_pim6_sm_topo1/test_multicast_pim6_sm2.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+# -*- coding: utf-8 eval: (blacken-mode 1) -*-
# SPDX-License-Identifier: ISC
#
@@ -21,61 +21,31 @@ PIM nbr and mroute from FRR node
different
"""
-import os
import sys
-import json
import time
-import datetime
-import pytest
-
-# Save the Current Working Directory to find configuration files.
-CWD = os.path.dirname(os.path.realpath(__file__))
-sys.path.append(os.path.join(CWD, "../"))
-sys.path.append(os.path.join(CWD, "../lib/"))
-
-# Required to instantiate the topology builder class.
-
-# pylint: disable=C0413
-# Import topogen and topotest helpers
-from lib.topogen import Topogen, get_topogen
+import pytest
from lib.common_config import (
- start_topology,
- write_test_header,
- write_test_footer,
- step,
+ required_linux_kernel_version,
reset_config_on_routers,
shutdown_bringup_interface,
- start_router,
- stop_router,
- create_static_routes,
- required_linux_kernel_version,
- socat_send_mld_join,
- socat_send_pim6_traffic,
- get_frr_ipv6_linklocal,
- kill_socat,
+ start_topology,
+ step,
+ write_test_footer,
+ write_test_header,
)
-from lib.bgp import create_router_bgp
from lib.pim import (
+ McastTesterHelper,
+ clear_pim6_mroute,
create_pim_config,
- create_mld_config,
- verify_mld_groups,
verify_mroutes,
- clear_pim6_interface_traffic,
- verify_upstream_iif,
- clear_pim6_mroute,
verify_pim_interface_traffic,
- verify_pim_state,
- McastTesterHelper,
- verify_pim_join,
- verify_mroute_summary,
- verify_pim_nexthop,
verify_sg_traffic,
- verify_mld_config,
+ verify_upstream_iif,
)
-
-from lib.topolog import logger
+from lib.topogen import Topogen, get_topogen
from lib.topojson import build_config_from_json
+from lib.topolog import logger
# Global variables
GROUP_RANGE = "ff00::/8"
@@ -114,6 +84,16 @@ ASSERT_MSG = "Testcase {} : Failed Error: {}"
pytestmark = [pytest.mark.pim6d]
+@pytest.fixture(scope="function")
+def app_helper():
+ # helper = McastTesterHelper(get_topogen())
+ # yield helepr
+ # helper.cleanup()
+ # Even better use contextmanager functionality:
+ with McastTesterHelper(get_topogen()) as ah:
+ yield ah
+
+
def setup_module(mod):
"""
Sets up the pytest environment
@@ -132,8 +112,7 @@ def setup_module(mod):
logger.info("Running setup_module to create topology")
- testdir = os.path.dirname(os.path.realpath(__file__))
- json_file = "{}/multicast_pim6_sm_topo1.json".format(testdir)
+ json_file = "multicast_pim6_sm_topo1.json"
tgen = Topogen(json_file, mod.__name__)
global topo
topo = tgen.json_topo
@@ -160,9 +139,6 @@ def teardown_module():
tgen = get_topogen()
- # Clean up socat
- kill_socat(tgen)
-
# Stop toplogy and Remove tmp files
tgen.stop_topology()
@@ -225,7 +201,7 @@ def verify_state_incremented(state_before, state_after):
#####################################################
-def test_clear_mroute_and_verify_multicast_data_p0(request):
+def test_clear_mroute_and_verify_multicast_data_p0(request, app_helper):
"""
Verify (*,G) and (S,G) entry populated again after clear the
PIM nbr and mroute from FRR node
@@ -237,6 +213,8 @@ def test_clear_mroute_and_verify_multicast_data_p0(request):
# Creating configuration from JSON
reset_config_on_routers(tgen)
+ app_helper.stop_all_hosts()
+
# Don"t run this test if we have any failure.
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
@@ -266,18 +244,12 @@ def test_clear_mroute_and_verify_multicast_data_p0(request):
)
step("send mld join (ffaa::1-5) to R1")
- intf = topo["routers"]["i1"]["links"]["r1"]["interface"]
- intf_ip = topo["routers"]["i1"]["links"]["r1"]["ipv6"].split("/")[0]
- result = socat_send_mld_join(
- tgen, "i1", "UDP6-RECV", MLD_JOIN_RANGE_1, intf, intf_ip
- )
+ result = app_helper.run_join("i1", MLD_JOIN_RANGE_1, "r1")
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
step("Send multicast traffic from FRR3 to all the receivers" "ffaa::1-5")
- intf_ip = topo["routers"]["i2"]["links"]["r3"]["ipv6"].split("/")[0]
- intf = topo["routers"]["i2"]["links"]["r3"]["interface"]
- result = socat_send_pim6_traffic(tgen, "i2", "UDP6-SEND", MLD_JOIN_RANGE_1, intf)
+ result = app_helper.run_traffic("i2", MLD_JOIN_RANGE_1, "r3")
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
step("Clear the mroute on r1, wait for 5 sec")
@@ -457,7 +429,9 @@ def test_clear_mroute_and_verify_multicast_data_p0(request):
write_test_footer(tc_name)
-def test_verify_SPT_switchover_when_RPT_and_SPT_path_is_different_p0(request):
+def test_verify_SPT_switchover_when_RPT_and_SPT_path_is_different_p0(
+ request, app_helper
+):
"""
Verify SPT switchover working when RPT and SPT path is
different
@@ -498,11 +472,7 @@ def test_verify_SPT_switchover_when_RPT_and_SPT_path_is_different_p0(request):
assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
step("send mld join (ffbb::1-5, ffcc::1-5) to R1")
- intf = topo["routers"]["i1"]["links"]["r1"]["interface"]
- intf_ip = topo["routers"]["i1"]["links"]["r1"]["ipv6"].split("/")[0]
- result = socat_send_mld_join(
- tgen, "i1", "UDP6-RECV", _MLD_JOIN_RANGE, intf, intf_ip
- )
+ result = app_helper.run_join("i1", _MLD_JOIN_RANGE, "r1")
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
step("registerRx and registerStopTx value before traffic sent")
@@ -518,9 +488,7 @@ def test_verify_SPT_switchover_when_RPT_and_SPT_path_is_different_p0(request):
step(
"Send multicast traffic from FRR3 to all the receivers" "ffbb::1-5 , ffcc::1-5"
)
- intf_ip = topo["routers"]["i2"]["links"]["r3"]["ipv6"].split("/")[0]
- intf = topo["routers"]["i2"]["links"]["r3"]["interface"]
- result = socat_send_pim6_traffic(tgen, "i2", "UDP6-SEND", _MLD_JOIN_RANGE, intf)
+ result = app_helper.run_traffic("i2", _MLD_JOIN_RANGE, "r3")
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
step(
diff --git a/tests/topotests/multicast_pim6_static_rp_topo1/test_multicast_pim6_static_rp1.py b/tests/topotests/multicast_pim6_static_rp_topo1/test_multicast_pim6_static_rp1.py
index 977cd477c8..23326337d6 100755
--- a/tests/topotests/multicast_pim6_static_rp_topo1/test_multicast_pim6_static_rp1.py
+++ b/tests/topotests/multicast_pim6_static_rp_topo1/test_multicast_pim6_static_rp1.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+# -*- coding: utf-8 eval: (blacken-mode 1) -*-
# SPDX-License-Identifier: ISC
#
@@ -41,57 +41,36 @@ Test steps
8. Verify PIM6 join send towards the higher preferred RP
9. Verify PIM6 prune send towards the lower preferred RP
"""
-
-import os
import sys
-import json
import time
-import pytest
-
-# Save the Current Working Directory to find configuration files.
-CWD = os.path.dirname(os.path.realpath(__file__))
-sys.path.append(os.path.join(CWD, "../"))
-sys.path.append(os.path.join(CWD, "../lib/"))
-
-# Required to instantiate the topology builder class.
-
-# pylint: disable=C0413
-# Import topogen and topotest helpers
-from lib.topogen import Topogen, get_topogen
+import pytest
from lib.common_config import (
- start_topology,
- write_test_header,
- write_test_footer,
+ check_router_status,
reset_config_on_routers,
- step,
shutdown_bringup_interface,
- kill_router_daemons,
- start_router_daemons,
- create_static_routes,
- check_router_status,
- socat_send_mld_join,
- socat_send_pim6_traffic,
- kill_socat,
+ start_topology,
+ step,
+ write_test_footer,
+ write_test_header,
)
from lib.pim import (
+ McastTesterHelper,
+ clear_pim6_interface_traffic,
create_pim_config,
- verify_upstream_iif,
+ get_pim6_interface_traffic,
verify_join_state_and_timer,
+ verify_mld_groups,
verify_mroutes,
- verify_pim_neighbors,
+ verify_pim6_neighbors,
verify_pim_interface_traffic,
verify_pim_rp_info,
verify_pim_state,
- clear_pim6_interface_traffic,
- clear_pim6_mroute,
- verify_pim6_neighbors,
- get_pim6_interface_traffic,
- clear_pim6_interfaces,
- verify_mld_groups,
+ verify_upstream_iif,
)
+from lib.topogen import Topogen, get_topogen
+from lib.topojson import build_config_from_json, build_topo_from_json
from lib.topolog import logger
-from lib.topojson import build_topo_from_json, build_config_from_json
# Global variables
GROUP_RANGE_1 = "ff08::/64"
@@ -141,7 +120,7 @@ def setup_module(mod):
logger.info("Running setup_module to create topology")
# This function initiates the topology build with Topogen...
- json_file = "{}/multicast_pim6_static_rp.json".format(CWD)
+ json_file = "multicast_pim6_static_rp.json"
tgen = Topogen(json_file, mod.__name__)
global TOPO
TOPO = tgen.json_topo
@@ -163,6 +142,9 @@ def setup_module(mod):
result = verify_pim6_neighbors(tgen, TOPO)
assert result is True, "setup_module :Failed \n Error:" " {}".format(result)
+ global app_helper
+ app_helper = McastTesterHelper(tgen)
+
logger.info("Running setup_module() done")
@@ -172,8 +154,7 @@ def teardown_module():
logger.info("Running teardown_module to delete topology")
tgen = get_topogen()
- # Clean up socat
- kill_socat(tgen)
+ app_helper.cleanup()
# Stop toplogy and Remove tmp files
tgen.stop_topology()
@@ -260,6 +241,8 @@ def test_pim6_add_delete_static_RP_p0(request):
step("Creating configuration from JSON")
reset_config_on_routers(tgen)
+ app_helper.stop_all_hosts()
+
step("Shut link b/w R1 and R3 and R1 and R4 as per testcase topology")
intf_r1_r3 = TOPO["routers"]["r1"]["links"]["r3"]["interface"]
intf_r1_r4 = TOPO["routers"]["r1"]["links"]["r4"]["interface"]
@@ -313,11 +296,7 @@ def test_pim6_add_delete_static_RP_p0(request):
)
step("send mld join {} to R1".format(GROUP_ADDRESS_1))
- intf = TOPO["routers"]["r0"]["links"]["r1"]["interface"]
- intf_ip = TOPO["routers"]["r0"]["links"]["r1"]["ipv6"].split("/")[0]
- result = socat_send_mld_join(
- tgen, "r0", "UDP6-RECV", GROUP_ADDRESS_1, intf, intf_ip
- )
+ result = app_helper.run_join("r0", GROUP_ADDRESS_1, "r1")
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
step("r1: Verify MLD groups")
@@ -457,6 +436,8 @@ def test_pim6_SPT_RPT_path_same_p1(request):
step("Creating configuration from JSON")
reset_config_on_routers(tgen)
+ app_helper.stop_all_hosts()
+
step("Shut link b/w R1->R3, R1->R4 and R3->R1, R3->R4 as per " "testcase topology")
intf_r1_r3 = TOPO["routers"]["r1"]["links"]["r3"]["interface"]
intf_r1_r4 = TOPO["routers"]["r1"]["links"]["r4"]["interface"]
@@ -494,11 +475,7 @@ def test_pim6_SPT_RPT_path_same_p1(request):
step(
"Enable MLD on r1 interface and send MLD join {} to R1".format(GROUP_ADDRESS_1)
)
- intf = TOPO["routers"]["r0"]["links"]["r1"]["interface"]
- intf_ip = TOPO["routers"]["r0"]["links"]["r1"]["ipv6"].split("/")[0]
- result = socat_send_mld_join(
- tgen, "r0", "UDP6-RECV", GROUP_ADDRESS_1, intf, intf_ip
- )
+ result = app_helper.run_join("r0", GROUP_ADDRESS_1, "r1")
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
step("r1: Verify MLD groups")
@@ -508,9 +485,8 @@ def test_pim6_SPT_RPT_path_same_p1(request):
assert result is True, ASSERT_MSG.format(tc_name, result)
step("Send multicast traffic from R5")
- intf = TOPO["routers"]["r5"]["links"]["r3"]["interface"]
SOURCE_ADDRESS = TOPO["routers"]["r5"]["links"]["r3"]["ipv6"].split("/")[0]
- result = socat_send_pim6_traffic(tgen, "r5", "UDP6-SEND", GROUP_ADDRESS_1, intf)
+ result = app_helper.run_traffic("r5", GROUP_ADDRESS_1, "r3")
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
step("r2: Verify RP info")
@@ -630,6 +606,8 @@ def test_pim6_RP_configured_as_LHR_p1(request):
step("Creating configuration from JSON")
reset_config_on_routers(tgen)
+ app_helper.stop_all_hosts()
+
step("Enable MLD on r1 interface")
step("Enable the PIM6 on all the interfaces of r1, r2, r3 and r4 routers")
@@ -665,11 +643,7 @@ def test_pim6_RP_configured_as_LHR_p1(request):
assert result is True, ASSERT_MSG.format(tc_name, result)
step("send mld join {} to R1".format(GROUP_ADDRESS_1))
- intf = TOPO["routers"]["r0"]["links"]["r1"]["interface"]
- intf_ip = TOPO["routers"]["r0"]["links"]["r1"]["ipv6"].split("/")[0]
- result = socat_send_mld_join(
- tgen, "r0", "UDP6-RECV", GROUP_ADDRESS_1, intf, intf_ip
- )
+ result = app_helper.run_join("r0", GROUP_ADDRESS_1, "r1")
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
step("r1: Verify MLD groups")
@@ -679,9 +653,8 @@ def test_pim6_RP_configured_as_LHR_p1(request):
assert result is True, ASSERT_MSG.format(tc_name, result)
step("r5: Send multicast traffic for group {}".format(GROUP_ADDRESS_1))
- intf = TOPO["routers"]["r5"]["links"]["r3"]["interface"]
SOURCE_ADDRESS = TOPO["routers"]["r5"]["links"]["r3"]["ipv6"].split("/")[0]
- result = socat_send_pim6_traffic(tgen, "r5", "UDP6-SEND", GROUP_ADDRESS_1, intf)
+ result = app_helper.run_traffic("r5", GROUP_ADDRESS_1, "r3")
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
step("r1: Verify (*, G) upstream IIF interface")
@@ -762,6 +735,8 @@ def test_pim6_RP_configured_as_FHR_p1(request):
step("Creating configuration from JSON")
reset_config_on_routers(tgen)
+ app_helper.stop_all_hosts()
+
step("Enable MLD on r1 interface")
step("Enable the PIM6 on all the interfaces of r1, r2, r3 and r4 routers")
step("r3: Configure r3(FHR) as RP")
@@ -792,11 +767,7 @@ def test_pim6_RP_configured_as_FHR_p1(request):
assert result is True, ASSERT_MSG.format(tc_name, result)
step("send mld join {} to R1".format(GROUP_ADDRESS_1))
- intf = TOPO["routers"]["r0"]["links"]["r1"]["interface"]
- intf_ip = TOPO["routers"]["r0"]["links"]["r1"]["ipv6"].split("/")[0]
- result = socat_send_mld_join(
- tgen, "r0", "UDP6-RECV", GROUP_ADDRESS_1, intf, intf_ip
- )
+ result = app_helper.run_join("r0", GROUP_ADDRESS_1, "r1")
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
step("r1: Verify MLD groups")
@@ -806,9 +777,8 @@ def test_pim6_RP_configured_as_FHR_p1(request):
assert result is True, ASSERT_MSG.format(tc_name, result)
step("r5: Send multicast traffic for group {}".format(GROUP_ADDRESS_1))
- intf = TOPO["routers"]["r5"]["links"]["r3"]["interface"]
SOURCE_ADDRESS = TOPO["routers"]["r5"]["links"]["r3"]["ipv6"].split("/")[0]
- result = socat_send_pim6_traffic(tgen, "r5", "UDP6-SEND", GROUP_ADDRESS_1, intf)
+ result = app_helper.run_traffic("r5", GROUP_ADDRESS_1, "r3")
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
step("r1: Verify (*, G) upstream IIF interface")
@@ -890,6 +860,8 @@ def test_pim6_SPT_RPT_path_different_p1(request):
step("Creating configuration from JSON")
reset_config_on_routers(tgen)
+ app_helper.stop_all_hosts()
+
step("Enable MLD on r1 interface")
step("Enable the PIM6 on all the interfaces of r1, r2, r3 and r4 routers")
step("r2: Configure r2 as RP")
@@ -921,11 +893,7 @@ def test_pim6_SPT_RPT_path_different_p1(request):
assert result is True, ASSERT_MSG.format(tc_name, result)
step("send mld join {} to R1".format(GROUP_ADDRESS_1))
- intf = TOPO["routers"]["r0"]["links"]["r1"]["interface"]
- intf_ip = TOPO["routers"]["r0"]["links"]["r1"]["ipv6"].split("/")[0]
- result = socat_send_mld_join(
- tgen, "r0", "UDP6-RECV", GROUP_ADDRESS_1, intf, intf_ip
- )
+ result = app_helper.run_join("r0", GROUP_ADDRESS_1, "r1")
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
step("r1: Verify MLD groups")
@@ -935,9 +903,8 @@ def test_pim6_SPT_RPT_path_different_p1(request):
assert result is True, ASSERT_MSG.format(tc_name, result)
step("r5: Send multicast traffic for group {}".format(GROUP_ADDRESS_1))
- intf = TOPO["routers"]["r5"]["links"]["r3"]["interface"]
SOURCE_ADDRESS = TOPO["routers"]["r5"]["links"]["r3"]["ipv6"].split("/")[0]
- result = socat_send_pim6_traffic(tgen, "r5", "UDP6-SEND", GROUP_ADDRESS_1, intf)
+ result = app_helper.run_traffic("r5", GROUP_ADDRESS_1, "r3")
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
step("r1: Verify (*, G) upstream IIF interface")
@@ -1060,6 +1027,8 @@ def test_pim6_send_join_on_higher_preffered_rp_p1(request):
step("Creating configuration from JSON")
reset_config_on_routers(tgen)
+ app_helper.stop_all_hosts()
+
step("Enable MLD on r1 interface")
step("Enable the PIM66 on all the interfaces of r1, r2, r3 and r4 routers")
step(
@@ -1109,11 +1078,7 @@ def test_pim6_send_join_on_higher_preffered_rp_p1(request):
)
step("r0: send mld join {} to R1".format(GROUP_ADDRESS_3))
- intf = TOPO["routers"]["r0"]["links"]["r1"]["interface"]
- intf_ip = TOPO["routers"]["r0"]["links"]["r1"]["ipv6"].split("/")[0]
- result = socat_send_mld_join(
- tgen, "r0", "UDP6-RECV", GROUP_ADDRESS_3, intf, intf_ip
- )
+ result = app_helper.run_join("r0", GROUP_ADDRESS_3, "r1")
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
step("r1: Verify MLD groups")
diff --git a/tests/topotests/multicast_pim6_static_rp_topo1/test_multicast_pim6_static_rp2.py b/tests/topotests/multicast_pim6_static_rp_topo1/test_multicast_pim6_static_rp2.py
index a61164baa2..39497e91ed 100755
--- a/tests/topotests/multicast_pim6_static_rp_topo1/test_multicast_pim6_static_rp2.py
+++ b/tests/topotests/multicast_pim6_static_rp_topo1/test_multicast_pim6_static_rp2.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+# -*- coding: utf-8 eval: (blacken-mode 1) -*-
# SPDX-License-Identifier: ISC
#
@@ -33,55 +33,31 @@ Test steps
import os
import sys
-import json
import time
-import pytest
-
-# Save the Current Working Directory to find configuration files.
-CWD = os.path.dirname(os.path.realpath(__file__))
-sys.path.append(os.path.join(CWD, "../"))
-sys.path.append(os.path.join(CWD, "../lib/"))
-
-# Required to instantiate the topology builder class.
-
-# pylint: disable=C0413
-# Import topogen and topotest helpers
-from lib.topogen import Topogen, get_topogen
+import pytest
from lib.common_config import (
- start_topology,
- write_test_header,
- write_test_footer,
+ create_debug_log_config,
reset_config_on_routers,
- step,
shutdown_bringup_interface,
- kill_router_daemons,
- start_router_daemons,
- create_static_routes,
- check_router_status,
- socat_send_mld_join,
- socat_send_pim6_traffic,
- kill_socat,
- create_debug_log_config,
+ start_topology,
+ step,
+ write_test_footer,
+ write_test_header,
)
from lib.pim import (
+ McastTesterHelper,
create_pim_config,
- verify_upstream_iif,
verify_join_state_and_timer,
+ verify_mld_groups,
verify_mroutes,
- verify_pim_neighbors,
- verify_pim_interface_traffic,
- verify_pim_rp_info,
- verify_pim_state,
- clear_pim6_interface_traffic,
- clear_pim6_mroute,
verify_pim6_neighbors,
- get_pim6_interface_traffic,
- clear_pim6_interfaces,
- verify_mld_groups,
+ verify_pim_rp_info,
+ verify_upstream_iif,
)
+from lib.topogen import Topogen, get_topogen
+from lib.topojson import build_config_from_json, build_topo_from_json
from lib.topolog import logger
-from lib.topojson import build_topo_from_json, build_config_from_json
# Global variables
GROUP_RANGE_1 = "ff08::/64"
@@ -145,7 +121,7 @@ def setup_module(mod):
logger.info("Running setup_module to create topology")
# This function initiates the topology build with Topogen...
- json_file = "{}/multicast_pim6_static_rp.json".format(CWD)
+ json_file = "multicast_pim6_static_rp.json"
tgen = Topogen(json_file, mod.__name__)
global TOPO
TOPO = tgen.json_topo
@@ -167,6 +143,9 @@ def setup_module(mod):
result = verify_pim6_neighbors(tgen, TOPO)
assert result is True, "setup_module :Failed \n Error:" " {}".format(result)
+ global app_helper
+ app_helper = McastTesterHelper(tgen)
+
logger.info("Running setup_module() done")
@@ -176,8 +155,7 @@ def teardown_module():
logger.info("Running teardown_module to delete topology")
tgen = get_topogen()
- # Clean up socat
- kill_socat(tgen)
+ app_helper.cleanup()
# Stop toplogy and Remove tmp files
tgen.stop_topology()
@@ -265,6 +243,8 @@ def test_pim6_multiple_groups_same_RP_address_p2(request):
step("Creating configuration from JSON")
reset_config_on_routers(tgen)
+ app_helper.stop_all_hosts()
+
input_dict = {
"r1": {"debug": {"log_file": "r1_debug.log", "enable": ["pim6d"]}},
"r2": {"debug": {"log_file": "r2_debug.log", "enable": ["pim6d"]}},
@@ -305,10 +285,7 @@ def test_pim6_multiple_groups_same_RP_address_p2(request):
group_address_list = GROUP_ADDRESS_LIST_1 + GROUP_ADDRESS_LIST_2
step("r0: Send MLD join for 10 groups")
intf = TOPO["routers"]["r0"]["links"]["r1"]["interface"]
- intf_ip = TOPO["routers"]["r0"]["links"]["r1"]["ipv6"].split("/")[0]
- result = socat_send_mld_join(
- tgen, "r0", "UDP6-RECV", group_address_list, intf, intf_ip
- )
+ result = app_helper.run_join("r0", group_address_list, "r1")
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
step("r1: Verify MLD groups")
@@ -318,9 +295,8 @@ def test_pim6_multiple_groups_same_RP_address_p2(request):
assert result is True, ASSERT_MSG.format(tc_name, result)
step("r5: Send multicast traffic for group {}".format(group_address_list))
- intf = TOPO["routers"]["r5"]["links"]["r3"]["interface"]
SOURCE_ADDRESS = TOPO["routers"]["r5"]["links"]["r3"]["ipv6"].split("/")[0]
- result = socat_send_pim6_traffic(tgen, "r5", "UDP6-SEND", group_address_list, intf)
+ result = app_helper.run_traffic("r5", group_address_list, "r3")
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
step("r1: Verify (*, G) upstream IIF interface")
@@ -593,6 +569,8 @@ def test_pim6_multiple_groups_different_RP_address_p2(request):
step("Creating configuration from JSON")
reset_config_on_routers(tgen)
+ app_helper.stop_all_hosts()
+
step("Enable MLD on r1 interface")
step("Enable the PIM6 on all the interfaces of r1, r2, r3 and r4 routers")
step("r2: Configure r2 as RP")
@@ -646,11 +624,7 @@ def test_pim6_multiple_groups_different_RP_address_p2(request):
group_address_list = GROUP_ADDRESS_LIST_1 + GROUP_ADDRESS_LIST_2
step("r0: Send MLD join for 10 groups")
- intf = TOPO["routers"]["r0"]["links"]["r1"]["interface"]
- intf_ip = TOPO["routers"]["r0"]["links"]["r1"]["ipv6"].split("/")[0]
- result = socat_send_mld_join(
- tgen, "r0", "UDP6-RECV", group_address_list, intf, intf_ip
- )
+ result = app_helper.run_join("r0", group_address_list, "r1")
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
step("r1: Verify MLD groups")
@@ -660,9 +634,8 @@ def test_pim6_multiple_groups_different_RP_address_p2(request):
assert result is True, ASSERT_MSG.format(tc_name, result)
step("r5: Send multicast traffic for group {}".format(group_address_list))
- intf = TOPO["routers"]["r5"]["links"]["r3"]["interface"]
SOURCE_ADDRESS = TOPO["routers"]["r5"]["links"]["r3"]["ipv6"].split("/")[0]
- result = socat_send_pim6_traffic(tgen, "r5", "UDP6-SEND", group_address_list, intf)
+ result = app_helper.run_traffic("r5", group_address_list, "r3")
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
step("r1: Verify (*, G) upstream IIF interface")
@@ -1189,6 +1162,8 @@ def test_pim6_delete_RP_shut_noshut_upstream_interface_p1(request):
step("Creating configuration from JSON")
reset_config_on_routers(tgen)
+ app_helper.stop_all_hosts()
+
step("Enable MLD on r1 interface")
step("Enable the PIM6 on all the interfaces of r1, r2, r3 and r4 routers")
step("r2: Configure r2 as RP")
@@ -1220,11 +1195,7 @@ def test_pim6_delete_RP_shut_noshut_upstream_interface_p1(request):
assert result is True, ASSERT_MSG.format(tc_name, result)
step("r0: Send MLD join")
- intf = TOPO["routers"]["r0"]["links"]["r1"]["interface"]
- intf_ip = TOPO["routers"]["r0"]["links"]["r1"]["ipv6"].split("/")[0]
- result = socat_send_mld_join(
- tgen, "r0", "UDP6-RECV", GROUP_ADDRESS_1, intf, intf_ip
- )
+ result = app_helper.run_join("r0", GROUP_ADDRESS_1, "r1")
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
step("r1: Verify MLD groups")
diff --git a/tests/topotests/ospfapi/test_ospf_clientapi.py b/tests/topotests/ospfapi/test_ospf_clientapi.py
index 2e8bea2651..39ebbcfb62 100644
--- a/tests/topotests/ospfapi/test_ospf_clientapi.py
+++ b/tests/topotests/ospfapi/test_ospf_clientapi.py
@@ -19,16 +19,14 @@ import time
from datetime import datetime, timedelta
import pytest
-
from lib.common_config import (
+ kill_router_daemons,
retry,
run_frr_cmd,
- step,
- kill_router_daemons,
- start_router_daemons,
shutdown_bringup_interface,
+ start_router_daemons,
+ step,
)
-
from lib.micronet import Timeout, comm_error
from lib.topogen import Topogen, TopoRouter
from lib.topotest import interface_set_status, json_cmp
@@ -967,12 +965,9 @@ def _test_opaque_add_restart_add(tgen, apibin):
[
apibin,
"-v",
- "add,10,1.2.3.4,231,1",
- "add,10,1.2.3.4,231,1,feedaceebeef",
- "wait, 5",
- "add,10,1.2.3.4,231,1,feedaceedeadbeef",
+ "add,10,1.2.3.4,231,1", # seq = 80000001
"wait, 5",
- "add,10,1.2.3.4,231,1,feedaceebaddbeef",
+ "add,10,1.2.3.4,231,1,feedaceebeef", # seq = 80000002
"wait, 5",
]
)
@@ -983,15 +978,15 @@ def _test_opaque_add_restart_add(tgen, apibin):
{
"lsId": "231.0.0.1",
"advertisedRouter": "1.0.0.0",
- "sequenceNumber": "80000004",
- "checksum": "3128",
+ "sequenceNumber": "80000002",
+ "checksum": "cd26",
},
],
"areaLocalOpaqueLsaCount": 1,
},
},
}
- step("Check for add LSAs")
+ step("Wait for the Opaque LSA to be distributed")
json_cmd = "show ip ospf da json"
assert verify_ospf_database(tgen, r1, add_input_dict, json_cmd) is None
assert verify_ospf_database(tgen, r2, add_input_dict, json_cmd) is None
@@ -1006,6 +1001,9 @@ def _test_opaque_add_restart_add(tgen, apibin):
p.wait()
p = None
+ # Verify the OLD LSA is still there unchanged on R2
+ assert verify_ospf_database(tgen, r2, add_input_dict, json_cmd) is None
+
step("Kill ospfd on R1")
kill_router_daemons(tgen, "r1", ["ospfd"])
time.sleep(2)
@@ -1013,30 +1011,31 @@ def _test_opaque_add_restart_add(tgen, apibin):
step("Bring ospfd on R1 back up")
start_router_daemons(tgen, "r1", ["ospfd"])
+ # This will start off with sequence num 80000001
+ # But should advance to 80000003 when we reestablish with r2
p = r1.popen(
[
apibin,
"-v",
- "add,10,1.2.3.4,231,1",
- "add,10,1.2.3.4,231,1,feedaceecafebeef",
+ "add,10,1.2.3.4,231,1,feedaceecafebeef", # seq=80000001
"wait, 5",
]
)
- step("Bring the interface on r1 back up for connection to r2")
- shutdown_bringup_interface(tgen, "r1", "r1-eth0", True)
+ # verify the old value on r2 doesn't change yet
+ time.sleep(2)
+ assert verify_ospf_database(tgen, r2, add_input_dict, json_cmd) is None
- step("Verify area opaque LSA refresh")
json_cmd = "show ip ospf da opaque-area json"
- add_detail_input_dict = {
+ new_add_input_dict = {
"areaLocalOpaqueLsa": {
"areas": {
"1.2.3.4": [
{
"linkStateId": "231.0.0.1",
"advertisingRouter": "1.0.0.0",
- "lsaSeqNumber": "80000005",
- "checksum": "a87e",
+ "lsaSeqNumber": "80000001",
+ "checksum": "b07a",
"length": 28,
"opaqueDataLength": 8,
},
@@ -1044,8 +1043,22 @@ def _test_opaque_add_restart_add(tgen, apibin):
},
},
}
- assert verify_ospf_database(tgen, r1, add_detail_input_dict, json_cmd) is None
- assert verify_ospf_database(tgen, r2, add_detail_input_dict, json_cmd) is None
+ # verify new value with initial seq number on r1
+ assert verify_ospf_database(tgen, r1, new_add_input_dict, json_cmd) is None
+
+ step("Bring the interface on r1 back up for connection to r2")
+ shutdown_bringup_interface(tgen, "r1", "r1-eth0", True)
+
+ step("Verify area opaque LSA refresh")
+
+ # Update the expected value to sequence number rev and new checksum
+ update_dict = new_add_input_dict["areaLocalOpaqueLsa"]["areas"]["1.2.3.4"][0]
+ update_dict["lsaSeqNumber"] = "80000003"
+ update_dict["checksum"] = "cb27"
+
+ # should settle on the same value now.
+ assert verify_ospf_database(tgen, r1, new_add_input_dict, json_cmd) is None
+ assert verify_ospf_database(tgen, r2, new_add_input_dict, json_cmd) is None
step("Shutdown the interface on r1 to isolate it for r2")
shutdown_bringup_interface(tgen, "r1", "r1-eth0", False)
@@ -1077,8 +1090,8 @@ def _test_opaque_add_restart_add(tgen, apibin):
"lsaAge": 3600,
"linkStateId": "231.0.0.1",
"advertisingRouter": "1.0.0.0",
- "lsaSeqNumber": "80000005",
- "checksum": "a87e",
+ "lsaSeqNumber": "80000003",
+ "checksum": "cb27",
"length": 28,
"opaqueDataLength": 8,
},
diff --git a/tools/frr-reload.py b/tools/frr-reload.py
index 0e0aec9839..c2be9f78eb 100755
--- a/tools/frr-reload.py
+++ b/tools/frr-reload.py
@@ -1480,12 +1480,17 @@ def ignore_delete_re_add_lines(lines_to_add, lines_to_del):
lines_to_add_to_del.append((tmp_ctx_keys, line))
for (ctx_keys, line) in lines_to_del_to_del:
- if line is not None:
+ try:
lines_to_del.remove((ctx_keys, line))
+ except ValueError:
+ pass
for (ctx_keys, line) in lines_to_add_to_del:
- if line is not None:
+ try:
lines_to_add.remove((ctx_keys, line))
+ except ValueError:
+ pass
+
return (lines_to_add, lines_to_del)
diff --git a/vtysh/vtysh_user.c b/vtysh/vtysh_user.c
index a0667acc7e..111bda868d 100644
--- a/vtysh/vtysh_user.c
+++ b/vtysh/vtysh_user.c
@@ -42,7 +42,7 @@ static struct pam_conv conv = {PAM_CONV_FUNC, NULL};
static int vtysh_pam(const char *user)
{
- int ret;
+ int ret, second_ret;
pam_handle_t *pamh = NULL;
/* Start PAM. */
@@ -56,15 +56,18 @@ static int vtysh_pam(const char *user)
fprintf(stderr, "vtysh_pam: Failure to initialize pam: %s(%d)",
pam_strerror(pamh, ret), ret);
- if (pam_acct_mgmt(pamh, 0) != PAM_SUCCESS)
+ second_ret = pam_acct_mgmt(pamh, 0);
+ if (second_ret != PAM_SUCCESS)
fprintf(stderr, "%s: Failed in account validation: %s(%d)",
- __func__, pam_strerror(pamh, ret), ret);
+ __func__, pam_strerror(pamh, second_ret), second_ret);
/* close Linux-PAM */
- if (pam_end(pamh, ret) != PAM_SUCCESS) {
+ second_ret = pam_end(pamh, ret);
+ if (second_ret != PAM_SUCCESS) {
pamh = NULL;
- fprintf(stderr, "vtysh_pam: failed to release authenticator: %s(%d)\n",
- pam_strerror(pamh, ret), ret);
+ fprintf(stderr,
+ "vtysh_pam: failed to release authenticator: %s(%d)\n",
+ pam_strerror(pamh, second_ret), second_ret);
exit(1);
}
diff --git a/zebra/interface.c b/zebra/interface.c
index 231ddc51db..ccf1a0a204 100644
--- a/zebra/interface.c
+++ b/zebra/interface.c
@@ -267,6 +267,9 @@ struct interface *if_link_per_ns(struct zebra_ns *ns, struct interface *ifp)
/* Delete a VRF. This is called in vrf_terminate(). */
void if_unlink_per_ns(struct interface *ifp)
{
+ if (!ifp->node)
+ return;
+
ifp->node->info = NULL;
route_unlock_node(ifp->node);
ifp->node = NULL;
diff --git a/zebra/rib.h b/zebra/rib.h
index a56bb05d68..65cc1ffab9 100644
--- a/zebra/rib.h
+++ b/zebra/rib.h
@@ -465,6 +465,13 @@ extern uint8_t route_distance(int type);
extern void zebra_rib_evaluate_rn_nexthops(struct route_node *rn, uint32_t seq,
bool rt_delete);
+/*
+ * rib_find_rn_from_ctx
+ *
+ * Returns a lock increased route_node for the appropriate
+ * table and prefix specified by the context. Developer
+ * should unlock the node when done.
+ */
extern struct route_node *
rib_find_rn_from_ctx(const struct zebra_dplane_ctx *ctx);
diff --git a/zebra/zapi_msg.c b/zebra/zapi_msg.c
index 4c6c336d41..4bc9f4acfa 100644
--- a/zebra/zapi_msg.c
+++ b/zebra/zapi_msg.c
@@ -801,11 +801,17 @@ int zsend_route_notify_owner(const struct route_node *rn,
int zsend_route_notify_owner_ctx(const struct zebra_dplane_ctx *ctx,
enum zapi_route_notify_owner note)
{
- return (route_notify_internal(
- rib_find_rn_from_ctx(ctx), dplane_ctx_get_type(ctx),
- dplane_ctx_get_instance(ctx), dplane_ctx_get_vrf(ctx),
- dplane_ctx_get_table(ctx), note, dplane_ctx_get_afi(ctx),
- dplane_ctx_get_safi(ctx)));
+ int result;
+ struct route_node *rn = rib_find_rn_from_ctx(ctx);
+
+ result = route_notify_internal(
+ rn, dplane_ctx_get_type(ctx), dplane_ctx_get_instance(ctx),
+ dplane_ctx_get_vrf(ctx), dplane_ctx_get_table(ctx), note,
+ dplane_ctx_get_afi(ctx), dplane_ctx_get_safi(ctx));
+
+ route_unlock_node(rn);
+
+ return result;
}
static void zread_route_notify_request(ZAPI_HANDLER_ARGS)
diff --git a/zebra/zebra_evpn_mh.c b/zebra/zebra_evpn_mh.c
index 49120c2877..a5092c629a 100644
--- a/zebra/zebra_evpn_mh.c
+++ b/zebra/zebra_evpn_mh.c
@@ -535,19 +535,26 @@ static bool zebra_evpn_acc_vl_cmp(const void *p1, const void *p2)
}
/* Lookup VLAN based broadcast domain */
-struct zebra_evpn_access_bd *zebra_evpn_acc_vl_find(vlanid_t vid,
- struct interface *br_if)
+struct zebra_evpn_access_bd *
+zebra_evpn_acc_vl_find_index(vlanid_t vid, ifindex_t bridge_ifindex)
{
struct zebra_evpn_access_bd *acc_bd;
struct zebra_evpn_access_bd tmp;
tmp.vid = vid;
- tmp.bridge_ifindex = br_if->ifindex;
+ tmp.bridge_ifindex = bridge_ifindex;
acc_bd = hash_lookup(zmh_info->evpn_vlan_table, &tmp);
return acc_bd;
}
+/* Lookup VLAN based broadcast domain */
+struct zebra_evpn_access_bd *zebra_evpn_acc_vl_find(vlanid_t vid,
+ struct interface *br_if)
+{
+ return zebra_evpn_acc_vl_find_index(vid, br_if->ifindex);
+}
+
/* A new broadcast domain can be created when a VLAN member or VLAN<=>VxLAN_IF
* mapping is added.
*/
@@ -842,9 +849,9 @@ void zebra_evpn_access_bd_bridge_cleanup(vlanid_t vid, struct interface *br_if,
void zebra_evpn_vxl_evpn_set(struct zebra_if *zif, struct zebra_evpn *zevpn,
bool set)
{
- struct interface *br_if;
struct zebra_vxlan_vni *vni;
struct zebra_evpn_access_bd *acc_bd;
+ ifindex_t br_ifindex;
if (!zif)
return;
@@ -854,11 +861,12 @@ void zebra_evpn_vxl_evpn_set(struct zebra_if *zif, struct zebra_evpn *zevpn,
if (!vni)
return;
- br_if = zif->brslave_info.br_if;
- if (!br_if)
+ /* Use the index as the pointer can be stale (deleted) */
+ br_ifindex = zif->brslave_info.bridge_ifindex;
+ if (!zif->brslave_info.br_if || br_ifindex == IFINDEX_INTERNAL)
return;
- acc_bd = zebra_evpn_acc_vl_find(vni->access_vlan, br_if);
+ acc_bd = zebra_evpn_acc_vl_find_index(vni->access_vlan, br_ifindex);
if (!acc_bd)
return;
diff --git a/zebra/zebra_evpn_mh.h b/zebra/zebra_evpn_mh.h
index 6dda30a57f..59a41d0606 100644
--- a/zebra/zebra_evpn_mh.h
+++ b/zebra/zebra_evpn_mh.h
@@ -344,6 +344,8 @@ extern void zebra_evpn_if_es_print(struct vty *vty, json_object *json,
struct zebra_if *zif);
extern struct zebra_evpn_access_bd *
zebra_evpn_acc_vl_find(vlanid_t vid, struct interface *br_if);
+struct zebra_evpn_access_bd *
+zebra_evpn_acc_vl_find_index(vlanid_t vid, ifindex_t bridge_ifindex);
extern void zebra_evpn_acc_vl_show_vid(struct vty *vty, bool uj, vlanid_t vid,
struct interface *br_if);
extern void zebra_evpn_es_cleanup(void);
diff --git a/zebra/zebra_mlag.c b/zebra/zebra_mlag.c
index 6713dbc967..7715eab0a8 100644
--- a/zebra/zebra_mlag.c
+++ b/zebra/zebra_mlag.c
@@ -338,8 +338,6 @@ static void zebra_mlag_post_data_from_main_thread(struct event *thread)
}
}
- stream_free(s);
- return;
stream_failure:
stream_free(s);
if (zebra_s)
diff --git a/zebra/zebra_vxlan.c b/zebra/zebra_vxlan.c
index 36290f99e0..babd93ab20 100644
--- a/zebra/zebra_vxlan.c
+++ b/zebra/zebra_vxlan.c
@@ -5274,7 +5274,9 @@ int zebra_vxlan_vrf_delete(struct zebra_vrf *zvrf)
vni = zl3vni->vni;
zl3vni_del(zl3vni);
- zebra_vxlan_handle_vni_transition(zvrf, vni, 0);
+
+ if (!zrouter.in_shutdown)
+ zebra_vxlan_handle_vni_transition(zvrf, vni, 0);
return 0;
}
diff --git a/zebra/zserv.c b/zebra/zserv.c
index 6abd49310c..d2367007cf 100644
--- a/zebra/zserv.c
+++ b/zebra/zserv.c
@@ -507,8 +507,6 @@ static void zserv_process_messages(struct event *thread)
stream_fifo_push(cache, msg);
}
- msg = NULL;
-
/* Need to reschedule processing work if there are still
* packets in the fifo.
*/