summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--bgpd/bgp_clist.c14
-rw-r--r--bgpd/bgp_fsm.c6
-rw-r--r--bgpd/bgp_mplsvpn.c13
-rw-r--r--bgpd/bgp_nht.c6
-rw-r--r--bgpd/bgp_vty.c4
-rw-r--r--doc/developer/workflow.rst18
-rw-r--r--eigrpd/eigrp_update.c3
-rw-r--r--ldpd/l2vpn.c72
-rw-r--r--ldpd/ldp_zebra.c15
-rw-r--r--ldpd/socket.c42
-rw-r--r--lib/command.c8
-rw-r--r--lib/mgmt_be_client.c284
-rw-r--r--lib/mgmt_be_client.h145
-rw-r--r--lib/mgmt_fe_client.c466
-rw-r--r--lib/mgmt_fe_client.h179
-rw-r--r--lib/vty.c115
-rw-r--r--mgmtd/mgmt_txn.c11
-rw-r--r--ospf6d/ospf6_interface.c1
-rw-r--r--staticd/static_main.c56
-rw-r--r--tests/topotests/lib/common_config.py3
-rw-r--r--tests/topotests/mgmt_startup/test_bigconf.py4
-rw-r--r--tests/topotests/mgmt_startup/test_late_bigconf.py6
-rw-r--r--zebra/zebra_mlag.c2
-rw-r--r--zebra/zserv.c2
24 files changed, 532 insertions, 943 deletions
diff --git a/bgpd/bgp_clist.c b/bgpd/bgp_clist.c
index 1d2ba3bf58..ac5cdd6acb 100644
--- a/bgpd/bgp_clist.c
+++ b/bgpd/bgp_clist.c
@@ -899,15 +899,13 @@ int community_list_set(struct community_list_handler *ch, const char *name,
}
}
- if (str) {
- if (style == COMMUNITY_LIST_STANDARD)
- com = community_str2com(str);
- else
- regex = bgp_regcomp(str);
+ if (style == COMMUNITY_LIST_STANDARD)
+ com = community_str2com(str);
+ else
+ regex = bgp_regcomp(str);
- if (!com && !regex)
- return COMMUNITY_LIST_ERR_MALFORMED_VAL;
- }
+ if (!com && !regex)
+ return COMMUNITY_LIST_ERR_MALFORMED_VAL;
entry = community_entry_new();
entry->direct = direct;
diff --git a/bgpd/bgp_fsm.c b/bgpd/bgp_fsm.c
index a289d3d67a..ad6906d092 100644
--- a/bgpd/bgp_fsm.c
+++ b/bgpd/bgp_fsm.c
@@ -2656,12 +2656,12 @@ int bgp_event_update(struct peer *peer, enum bgp_fsm_events event)
ret != BGP_FSM_FAILURE_AND_DELETE) {
flog_err(
EC_BGP_FSM,
- "%s [FSM] Failure handling event %s in state %s, prior events %s, %s, fd %d",
+ "%s [FSM] Failure handling event %s in state %s, prior events %s, %s, fd %d, last reset: %s",
peer->host, bgp_event_str[peer->cur_event],
lookup_msg(bgp_status_msg, peer->status, NULL),
bgp_event_str[peer->last_event],
- bgp_event_str[peer->last_major_event],
- peer->fd);
+ bgp_event_str[peer->last_major_event], peer->fd,
+ peer_down_str[peer->last_reset]);
bgp_stop(peer);
bgp_fsm_change_status(peer, Idle);
bgp_timer_set(peer);
diff --git a/bgpd/bgp_mplsvpn.c b/bgpd/bgp_mplsvpn.c
index ecc84533b0..dc9bd3cff5 100644
--- a/bgpd/bgp_mplsvpn.c
+++ b/bgpd/bgp_mplsvpn.c
@@ -1467,13 +1467,12 @@ static mpls_label_t _vpn_leak_from_vrf_get_per_nexthop_label(
/* Unlink from any existing nexthop cache. Free the entry if unused.
*/
bgp_mplsvpn_path_nh_label_unlink(pi);
- if (blnc) {
- /* updates NHT pi list reference */
- LIST_INSERT_HEAD(&(blnc->paths), pi, label_nh_thread);
- pi->label_nexthop_cache = blnc;
- pi->label_nexthop_cache->path_count++;
- blnc->last_update = monotime(NULL);
- }
+
+ /* updates NHT pi list reference */
+ LIST_INSERT_HEAD(&(blnc->paths), pi, label_nh_thread);
+ pi->label_nexthop_cache = blnc;
+ pi->label_nexthop_cache->path_count++;
+ blnc->last_update = monotime(NULL);
/* then add or update the selected nexthop */
if (!blnc->nh)
diff --git a/bgpd/bgp_nht.c b/bgpd/bgp_nht.c
index bda163d7a5..d7b1429881 100644
--- a/bgpd/bgp_nht.c
+++ b/bgpd/bgp_nht.c
@@ -547,7 +547,7 @@ static void bgp_process_nexthop_update(struct bgp_nexthop_cache *bnc,
char bnc_buf[BNC_FLAG_DUMP_SIZE];
zlog_debug(
- "%s(%u): Rcvd NH update %pFX(%u)%u) - metric %d/%d #nhops %d/%d flags %s",
+ "%s(%u): Rcvd NH update %pFX(%u)(%u) - metric %d/%d #nhops %d/%d flags %s",
bnc->bgp->name_pretty, bnc->bgp->vrf_id, &nhr->prefix,
bnc->ifindex, bnc->srte_color, nhr->metric, bnc->metric,
nhr->nexthop_num, bnc->nexthop_num,
@@ -849,7 +849,7 @@ void bgp_parse_nexthop_update(int command, vrf_id_t vrf_id)
if (!bnc_nhc) {
if (BGP_DEBUG(nht, NHT))
zlog_debug(
- "parse nexthop update(%pFX(%u)(%s)): bnc info not found for nexthop cache",
+ "parse nexthop update %pFX(%u)(%s): bnc info not found for nexthop cache",
&nhr.prefix, nhr.srte_color, bgp->name_pretty);
} else
bgp_process_nexthop_update(bnc_nhc, &nhr, false);
@@ -860,7 +860,7 @@ void bgp_parse_nexthop_update(int command, vrf_id_t vrf_id)
if (!bnc_import) {
if (BGP_DEBUG(nht, NHT))
zlog_debug(
- "parse nexthop update(%pFX(%u)(%s)): bnc info not found for import check",
+ "parse nexthop update %pFX(%u)(%s): bnc info not found for import check",
&nhr.prefix, nhr.srte_color, bgp->name_pretty);
} else
bgp_process_nexthop_update(bnc_import, &nhr, true);
diff --git a/bgpd/bgp_vty.c b/bgpd/bgp_vty.c
index 7ef9db9f0d..04bdba1345 100644
--- a/bgpd/bgp_vty.c
+++ b/bgpd/bgp_vty.c
@@ -9199,6 +9199,8 @@ DEFPY(af_label_vpn_export_allocation_mode,
bool old_per_nexthop, new_per_nexthop;
afi = vpn_policy_getafi(vty, bgp, false);
+ if (afi == AFI_MAX)
+ return CMD_WARNING_CONFIG_FAILED;
old_per_nexthop = !!CHECK_FLAG(bgp->vpn_policy[afi].flags,
BGP_VPN_POLICY_TOVPN_LABEL_PER_NEXTHOP);
@@ -20663,6 +20665,7 @@ DEFUN (community_list_standard,
argv_find(argv, argc, "AA:NN", &idx);
char *str = argv_concat(argv, argc, idx);
+ assert(str);
int ret = community_list_set(bgp_clist, cl_name_or_number, str, seq,
direct, style);
@@ -20775,6 +20778,7 @@ DEFUN (community_list_expanded_all,
argv_find(argv, argc, "AA:NN", &idx);
char *str = argv_concat(argv, argc, idx);
+ assert(str);
int ret = community_list_set(bgp_clist, cl_name_or_number, str, seq,
direct, style);
diff --git a/doc/developer/workflow.rst b/doc/developer/workflow.rst
index 28cf5d0ab1..65befaccba 100644
--- a/doc/developer/workflow.rst
+++ b/doc/developer/workflow.rst
@@ -166,15 +166,15 @@ as early as possible, i.e. the first 2-week window.
For reference, the expected release schedule according to the above is:
-+---------+------------+------------+------------+------------+------------+
-| Release | 2023-03-07 | 2023-07-04 | 2023-10-31 | 2024-02-27 | 2024-06-25 |
-+---------+------------+------------+------------+------------+------------+
-| RC | 2023-02-21 | 2023-06-20 | 2023-10-17 | 2024-02-13 | 2024-06-11 |
-+---------+------------+------------+------------+------------+------------+
-| dev/X.Y | 2023-02-07 | 2023-06-06 | 2023-10-03 | 2024-01-30 | 2024-05-28 |
-+---------+------------+------------+------------+------------+------------+
-| freeze | 2023-01-24 | 2023-05-23 | 2023-09-19 | 2024-01-16 | 2024-05-14 |
-+---------+------------+------------+------------+------------+------------+
++---------+------------+------------+------------+
+| Release | 2023-07-04 | 2023-10-31 | 2024-02-27 |
++---------+------------+------------+------------+
+| RC | 2023-06-20 | 2023-10-17 | 2024-02-13 |
++---------+------------+------------+------------+
+| dev/X.Y | 2023-06-06 | 2023-10-03 | 2024-01-30 |
++---------+------------+------------+------------+
+| freeze | 2023-05-23 | 2023-09-19 | 2024-01-16 |
++---------+------------+------------+------------+
Here is the hint on how to get the dates easily:
diff --git a/eigrpd/eigrp_update.c b/eigrpd/eigrp_update.c
index 2237a611e8..a056267bf7 100644
--- a/eigrpd/eigrp_update.c
+++ b/eigrpd/eigrp_update.c
@@ -842,9 +842,6 @@ static void eigrp_update_send_GR_part(struct eigrp_neighbor *nbr)
eigrp_fsm_event(&fsm_msg);
}
- /* NULL the pointer */
- dest_addr = NULL;
-
/* delete processed prefix from list */
listnode_delete(prefixes, pe);
diff --git a/ldpd/l2vpn.c b/ldpd/l2vpn.c
index 4664b1f894..ce038acdcb 100644
--- a/ldpd/l2vpn.c
+++ b/ldpd/l2vpn.c
@@ -161,7 +161,7 @@ l2vpn_if_update(struct l2vpn_if *lif)
fec.type = MAP_TYPE_PWID;
fec.fec.pwid.type = l2vpn->pw_type;
fec.fec.pwid.group_id = 0;
- fec.flags |= F_MAP_PW_ID;
+ SET_FLAG(fec.flags, F_MAP_PW_ID);
fec.fec.pwid.pwid = pw->pwid;
send_mac_withdrawal(nbr, &fec, lif->mac);
@@ -274,17 +274,17 @@ l2vpn_pw_reset(struct l2vpn_pw *pw)
pw->local_status = PW_FORWARDING;
pw->remote_status = PW_NOT_FORWARDING;
- if (pw->flags & F_PW_CWORD_CONF)
- pw->flags |= F_PW_CWORD;
+ if (CHECK_FLAG(pw->flags, F_PW_CWORD_CONF))
+ SET_FLAG(pw->flags, F_PW_CWORD);
else
- pw->flags &= ~F_PW_CWORD;
+ UNSET_FLAG(pw->flags, F_PW_CWORD);
- if (pw->flags & F_PW_STATUSTLV_CONF)
- pw->flags |= F_PW_STATUSTLV;
+ if (CHECK_FLAG(pw->flags, F_PW_STATUSTLV_CONF))
+ SET_FLAG(pw->flags, F_PW_STATUSTLV);
else
- pw->flags &= ~F_PW_STATUSTLV;
+ UNSET_FLAG(pw->flags, F_PW_STATUSTLV);
- if (pw->flags & F_PW_STATUSTLV_CONF) {
+ if (CHECK_FLAG(pw->flags, F_PW_STATUSTLV_CONF)) {
struct fec_node *fn;
struct fec fec;
l2vpn_pw_fec(pw, &fec);
@@ -300,8 +300,7 @@ l2vpn_pw_ok(struct l2vpn_pw *pw, struct fec_nh *fnh)
{
/* check for a remote label */
if (fnh->remote_label == NO_LABEL) {
- log_warnx("%s: pseudowire %s: no remote label", __func__,
- pw->ifname);
+ log_warnx("%s: pseudowire %s: no remote label", __func__, pw->ifname);
pw->reason = F_PW_NO_REMOTE_LABEL;
return (0);
}
@@ -315,10 +314,9 @@ l2vpn_pw_ok(struct l2vpn_pw *pw, struct fec_nh *fnh)
}
/* check pw status if applicable */
- if ((pw->flags & F_PW_STATUSTLV) &&
+ if (CHECK_FLAG(pw->flags, F_PW_STATUSTLV) &&
pw->remote_status != PW_FORWARDING) {
- log_warnx("%s: pseudowire %s: remote end is down", __func__,
- pw->ifname);
+ log_warnx("%s: pseudowire %s: remote end is down", __func__, pw->ifname);
pw->reason = F_PW_REMOTE_NOT_FWD;
return (0);
}
@@ -345,34 +343,34 @@ l2vpn_pw_negotiate(struct lde_nbr *ln, struct fec_node *fn, struct map *map)
/* RFC4447 - Section 6.2: control word negotiation */
if (fec_find(&ln->sent_map, &fn->fec)) {
- if ((map->flags & F_MAP_PW_CWORD) &&
- !(pw->flags & F_PW_CWORD_CONF)) {
+ if (CHECK_FLAG(map->flags, F_MAP_PW_CWORD) &&
+ !CHECK_FLAG(pw->flags, F_PW_CWORD_CONF)) {
/* ignore the received label mapping */
return (1);
- } else if (!(map->flags & F_MAP_PW_CWORD) &&
- (pw->flags & F_PW_CWORD_CONF)) {
+ } else if (!CHECK_FLAG(map->flags, F_MAP_PW_CWORD) &&
+ CHECK_FLAG(pw->flags, F_PW_CWORD_CONF)) {
/* append a "Wrong C-bit" status code */
st.status_code = S_WRONG_CBIT;
st.msg_id = map->msg_id;
st.msg_type = htons(MSG_TYPE_LABELMAPPING);
lde_send_labelwithdraw(ln, fn, NULL, &st);
- pw->flags &= ~F_PW_CWORD;
+ UNSET_FLAG(pw->flags, F_PW_CWORD);
lde_send_labelmapping(ln, fn, 1);
}
- } else if (map->flags & F_MAP_PW_CWORD) {
- if (pw->flags & F_PW_CWORD_CONF)
- pw->flags |= F_PW_CWORD;
+ } else if (CHECK_FLAG(map->flags, F_MAP_PW_CWORD)) {
+ if (CHECK_FLAG(pw->flags, F_PW_CWORD_CONF))
+ SET_FLAG(pw->flags, F_PW_CWORD);
else
/* act as if no label mapping had been received */
return (1);
} else
- pw->flags &= ~F_PW_CWORD;
+ UNSET_FLAG(pw->flags, F_PW_CWORD);
/* RFC4447 - Section 5.4.3: pseudowire status negotiation */
if (fec_find(&ln->recv_map, &fn->fec) == NULL &&
- !(map->flags & F_MAP_PW_STATUS))
- pw->flags &= ~F_PW_STATUSTLV;
+ !CHECK_FLAG(map->flags, F_MAP_PW_STATUS))
+ UNSET_FLAG(pw->flags, F_PW_STATUSTLV);
return (0);
}
@@ -385,12 +383,11 @@ l2vpn_send_pw_status(struct lde_nbr *ln, uint32_t status, struct fec *fec)
memset(&nm, 0, sizeof(nm));
nm.status_code = S_PW_STATUS;
nm.pw_status = status;
- nm.flags |= F_NOTIF_PW_STATUS;
+ SET_FLAG(nm.flags, F_NOTIF_PW_STATUS);
lde_fec2map(fec, &nm.fec);
- nm.flags |= F_NOTIF_FEC;
+ SET_FLAG(nm.flags, F_NOTIF_FEC);
- lde_imsg_compose_ldpe(IMSG_NOTIFICATION_SEND, ln->peerid, 0, &nm,
- sizeof(nm));
+ lde_imsg_compose_ldpe(IMSG_NOTIFICATION_SEND, ln->peerid, 0, &nm, sizeof(nm));
}
void
@@ -402,14 +399,13 @@ l2vpn_send_pw_status_wcard(struct lde_nbr *ln, uint32_t status,
memset(&nm, 0, sizeof(nm));
nm.status_code = S_PW_STATUS;
nm.pw_status = status;
- nm.flags |= F_NOTIF_PW_STATUS;
+ SET_FLAG(nm.flags, F_NOTIF_PW_STATUS);
nm.fec.type = MAP_TYPE_PWID;
nm.fec.fec.pwid.type = pw_type;
nm.fec.fec.pwid.group_id = group_id;
- nm.flags |= F_NOTIF_FEC;
+ SET_FLAG(nm.flags, F_NOTIF_FEC);
- lde_imsg_compose_ldpe(IMSG_NOTIFICATION_SEND, ln->peerid, 0, &nm,
- sizeof(nm));
+ lde_imsg_compose_ldpe(IMSG_NOTIFICATION_SEND, ln->peerid, 0, &nm, sizeof(nm));
}
void
@@ -421,7 +417,7 @@ l2vpn_recv_pw_status(struct lde_nbr *ln, struct notify_msg *nm)
struct l2vpn_pw *pw;
if (nm->fec.type == MAP_TYPE_TYPED_WCARD ||
- !(nm->fec.flags & F_MAP_PW_ID)) {
+ !CHECK_FLAG(nm->fec.flags, F_MAP_PW_ID)) {
l2vpn_recv_pw_status_wcard(ln, nm);
return;
}
@@ -540,7 +536,7 @@ l2vpn_pw_status_update(struct zapi_pw_status *zpw)
if (ln == NULL)
return (0);
l2vpn_pw_fec(pw, &fec);
- if (pw->flags & F_PW_STATUSTLV)
+ if (CHECK_FLAG(pw->flags, F_PW_STATUSTLV))
l2vpn_send_pw_status(ln, local_status, &fec);
else {
struct fec_node *fn;
@@ -611,8 +607,7 @@ l2vpn_binding_ctl(pid_t pid)
pwctl.local_label = fn->local_label;
pwctl.local_gid = 0;
pwctl.local_ifmtu = pw->l2vpn->mtu;
- pwctl.local_cword = (pw->flags & F_PW_CWORD_CONF) ?
- 1 : 0;
+ pwctl.local_cword = CHECK_FLAG(pw->flags, F_PW_CWORD_CONF) ? 1 : 0;
pwctl.reason = pw->reason;
} else
pwctl.local_label = NO_LABEL;
@@ -624,11 +619,10 @@ l2vpn_binding_ctl(pid_t pid)
if (me) {
pwctl.remote_label = me->map.label;
pwctl.remote_gid = me->map.fec.pwid.group_id;
- if (me->map.flags & F_MAP_PW_IFMTU)
+ if (CHECK_FLAG(me->map.flags, F_MAP_PW_IFMTU))
pwctl.remote_ifmtu = me->map.fec.pwid.ifmtu;
if (pw)
- pwctl.remote_cword = (pw->flags & F_PW_CWORD) ?
- 1 : 0;
+ pwctl.remote_cword = CHECK_FLAG(pw->flags, F_PW_CWORD) ? 1 : 0;
lde_imsg_compose_ldpe(IMSG_CTL_SHOW_L2VPN_BINDING,
0, pid, &pwctl, sizeof(pwctl));
diff --git a/ldpd/ldp_zebra.c b/ldpd/ldp_zebra.c
index e3ace30582..2010829035 100644
--- a/ldpd/ldp_zebra.c
+++ b/ldpd/ldp_zebra.c
@@ -22,8 +22,7 @@
#include "ldp_debug.h"
static void ifp2kif(struct interface *, struct kif *);
-static void ifc2kaddr(struct interface *, struct connected *,
- struct kaddr *);
+static void ifc2kaddr(struct interface *, struct connected *, struct kaddr *);
static int ldp_zebra_send_mpls_labels(int, struct kroute *);
static int ldp_router_id_update(ZAPI_CALLBACK_ARGS);
static int ldp_interface_address_add(ZAPI_CALLBACK_ARGS);
@@ -295,8 +294,7 @@ kmpw_add(struct zapi_pw *zpw)
debug_zebra_out("pseudowire %s nexthop %s (add)",
zpw->ifname, log_addr(zpw->af, (union ldpd_addr *)&zpw->nexthop));
- return zebra_send_pw(zclient, ZEBRA_PW_ADD, zpw)
- == ZCLIENT_SEND_FAILURE;
+ return zebra_send_pw(zclient, ZEBRA_PW_ADD, zpw) == ZCLIENT_SEND_FAILURE;
}
int
@@ -305,8 +303,7 @@ kmpw_del(struct zapi_pw *zpw)
debug_zebra_out("pseudowire %s nexthop %s (del)",
zpw->ifname, log_addr(zpw->af, (union ldpd_addr *)&zpw->nexthop));
- return zebra_send_pw(zclient, ZEBRA_PW_DELETE, zpw)
- == ZCLIENT_SEND_FAILURE;
+ return zebra_send_pw(zclient, ZEBRA_PW_DELETE, zpw) == ZCLIENT_SEND_FAILURE;
}
int
@@ -316,8 +313,7 @@ kmpw_set(struct zapi_pw *zpw)
zpw->ifname, log_addr(zpw->af, (union ldpd_addr *)&zpw->nexthop),
zpw->local_label, zpw->remote_label);
- return zebra_send_pw(zclient, ZEBRA_PW_SET, zpw)
- == ZCLIENT_SEND_FAILURE;
+ return zebra_send_pw(zclient, ZEBRA_PW_SET, zpw) == ZCLIENT_SEND_FAILURE;
}
int
@@ -326,8 +322,7 @@ kmpw_unset(struct zapi_pw *zpw)
debug_zebra_out("pseudowire %s nexthop %s (unset)",
zpw->ifname, log_addr(zpw->af, (union ldpd_addr *)&zpw->nexthop));
- return zebra_send_pw(zclient, ZEBRA_PW_UNSET, zpw)
- == ZCLIENT_SEND_FAILURE;
+ return zebra_send_pw(zclient, ZEBRA_PW_UNSET, zpw) == ZCLIENT_SEND_FAILURE;
}
void
diff --git a/ldpd/socket.c b/ldpd/socket.c
index ec6d8be3d5..6b7e475d7f 100644
--- a/ldpd/socket.c
+++ b/ldpd/socket.c
@@ -89,8 +89,7 @@ ldp_create_socket(int af, enum socket_type type)
return (-1);
}
if (type == LDP_SOCKET_DISC) {
- if (sock_set_ipv4_mcast_ttl(fd,
- IP_DEFAULT_MULTICAST_TTL) == -1) {
+ if (sock_set_ipv4_mcast_ttl(fd, IP_DEFAULT_MULTICAST_TTL) == -1) {
close(fd);
return (-1);
}
@@ -141,7 +140,7 @@ ldp_create_socket(int af, enum socket_type type)
close(fd);
return (-1);
}
- if (!(ldpd_conf->ipv6.flags & F_LDPD_AF_NO_GTSM)) {
+ if (!CHECK_FLAG(ldpd_conf->ipv6.flags, F_LDPD_AF_NO_GTSM)) {
/* ignore any possible error */
sock_set_ipv6_minhopcount(fd, 255);
}
@@ -171,8 +170,7 @@ ldp_create_socket(int af, enum socket_type type)
#ifdef __OpenBSD__
opt = 1;
- if (setsockopt(fd, IPPROTO_TCP, TCP_MD5SIG, &opt,
- sizeof(opt)) == -1) {
+ if (setsockopt(fd, IPPROTO_TCP, TCP_MD5SIG, &opt, sizeof(opt)) == -1) {
if (errno == ENOPROTOOPT) { /* system w/o md5sig */
log_warnx("md5sig not available, disabling");
sysdep.no_md5sig = 1;
@@ -196,7 +194,7 @@ sock_set_nonblock(int fd)
if ((flags = fcntl(fd, F_GETFL, 0)) == -1)
fatal("fcntl F_GETFL");
- flags |= O_NONBLOCK;
+ SET_FLAG(flags, O_NONBLOCK);
if (fcntl(fd, F_SETFL, flags) == -1)
fatal("fcntl F_SETFL");
@@ -210,7 +208,7 @@ sock_set_cloexec(int fd)
if ((flags = fcntl(fd, F_GETFD, 0)) == -1)
fatal("fcntl F_GETFD");
- flags |= FD_CLOEXEC;
+ SET_FLAG(flags, FD_CLOEXEC);
if (fcntl(fd, F_SETFD, flags) == -1)
fatal("fcntl F_SETFD");
@@ -222,16 +220,14 @@ sock_set_recvbuf(int fd)
int bsize;
bsize = 65535;
- while (setsockopt(fd, SOL_SOCKET, SO_RCVBUF, &bsize,
- sizeof(bsize)) == -1)
+ while (setsockopt(fd, SOL_SOCKET, SO_RCVBUF, &bsize, sizeof(bsize)) == -1)
bsize /= 2;
}
int
sock_set_reuse(int fd, int enable)
{
- if (setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &enable,
- sizeof(int)) < 0) {
+ if (setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &enable, sizeof(int)) < 0) {
log_warn("%s: error setting SO_REUSEADDR", __func__);
return (-1);
}
@@ -244,8 +240,7 @@ sock_set_bindany(int fd, int enable)
{
#ifdef HAVE_SO_BINDANY
frr_with_privs(&ldpd_privs) {
- if (setsockopt(fd, SOL_SOCKET, SO_BINDANY, &enable,
- sizeof(int)) < 0) {
+ if (setsockopt(fd, SOL_SOCKET, SO_BINDANY, &enable, sizeof(int)) < 0) {
log_warn("%s: error setting SO_BINDANY", __func__);
return (-1);
}
@@ -259,8 +254,7 @@ sock_set_bindany(int fd, int enable)
return (0);
#elif defined(IP_BINDANY)
frr_with_privs(&ldpd_privs) {
- if (setsockopt(fd, IPPROTO_IP, IP_BINDANY, &enable, sizeof(int))
- < 0) {
+ if (setsockopt(fd, IPPROTO_IP, IP_BINDANY, &enable, sizeof(int)) < 0) {
log_warn("%s: error setting IP_BINDANY", __func__);
return (-1);
}
@@ -343,10 +337,8 @@ sock_set_ipv4_ucast_ttl(int fd, int ttl)
int
sock_set_ipv4_mcast_ttl(int fd, uint8_t ttl)
{
- if (setsockopt(fd, IPPROTO_IP, IP_MULTICAST_TTL,
- (char *)&ttl, sizeof(ttl)) < 0) {
- log_warn("%s: error setting IP_MULTICAST_TTL to %d",
- __func__, ttl);
+ if (setsockopt(fd, IPPROTO_IP, IP_MULTICAST_TTL, (char *)&ttl, sizeof(ttl)) < 0) {
+ log_warn("%s: error setting IP_MULTICAST_TTL to %d", __func__, ttl);
return (-1);
}
@@ -358,8 +350,7 @@ sock_set_ipv4_mcast_ttl(int fd, uint8_t ttl)
int
sock_set_ipv4_pktinfo(int fd, int enable)
{
- if (setsockopt(fd, IPPROTO_IP, IP_PKTINFO, &enable,
- sizeof(enable)) < 0) {
+ if (setsockopt(fd, IPPROTO_IP, IP_PKTINFO, &enable, sizeof(enable)) < 0) {
log_warn("%s: error setting IP_PKTINFO", __func__);
return (-1);
}
@@ -370,8 +361,7 @@ sock_set_ipv4_pktinfo(int fd, int enable)
int
sock_set_ipv4_recvdstaddr(int fd, int enable)
{
- if (setsockopt(fd, IPPROTO_IP, IP_RECVDSTADDR, &enable,
- sizeof(enable)) < 0) {
+ if (setsockopt(fd, IPPROTO_IP, IP_RECVDSTADDR, &enable, sizeof(enable)) < 0) {
log_warn("%s: error setting IP_RECVDSTADDR", __func__);
return (-1);
}
@@ -409,8 +399,7 @@ sock_set_ipv4_mcast_loop(int fd)
int
sock_set_ipv6_dscp(int fd, int dscp)
{
- if (setsockopt(fd, IPPROTO_IPV6, IPV6_TCLASS, &dscp,
- sizeof(dscp)) < 0) {
+ if (setsockopt(fd, IPPROTO_IPV6, IPV6_TCLASS, &dscp, sizeof(dscp)) < 0) {
log_warn("%s: error setting IPV6_TCLASS", __func__);
return (-1);
}
@@ -421,8 +410,7 @@ sock_set_ipv6_dscp(int fd, int dscp)
int
sock_set_ipv6_pktinfo(int fd, int enable)
{
- if (setsockopt(fd, IPPROTO_IPV6, IPV6_RECVPKTINFO, &enable,
- sizeof(enable)) < 0) {
+ if (setsockopt(fd, IPPROTO_IPV6, IPV6_RECVPKTINFO, &enable, sizeof(enable)) < 0) {
log_warn("%s: error setting IPV6_RECVPKTINFO", __func__);
return (-1);
}
diff --git a/lib/command.c b/lib/command.c
index e92251160f..0995637219 100644
--- a/lib/command.c
+++ b/lib/command.c
@@ -735,9 +735,13 @@ char *cmd_variable_comp2str(vector comps, unsigned short cols)
char *item = vector_slot(comps, j);
itemlen = strlen(item);
- if (cs + itemlen + AUTOCOMP_INDENT + 3 >= bsz)
- buf = XREALLOC(MTYPE_TMP, buf, (bsz *= 2));
+ size_t next_sz = cs + itemlen + AUTOCOMP_INDENT + 3;
+ if (next_sz > bsz) {
+ /* Make sure the buf size is large enough */
+ bsz = next_sz;
+ buf = XREALLOC(MTYPE_TMP, buf, bsz);
+ }
if (lc + itemlen + 1 >= cols) {
cs += snprintf(&buf[cs], bsz - cs, "\n%*s",
AUTOCOMP_INDENT, "");
diff --git a/lib/mgmt_be_client.c b/lib/mgmt_be_client.c
index 534dc43405..5c875204f7 100644
--- a/lib/mgmt_be_client.c
+++ b/lib/mgmt_be_client.c
@@ -28,6 +28,8 @@
#define MGMTD_DBG_BE_CLIENT_CHECK() \
DEBUG_MODE_CHECK(&mgmt_dbg_be_client, DEBUG_MODE_ALL)
+DEFINE_MTYPE_STATIC(LIB, MGMTD_BE_CLIENT, "backend client");
+DEFINE_MTYPE_STATIC(LIB, MGMTD_BE_CLIENT_NAME, "backend client name");
DEFINE_MTYPE_STATIC(LIB, MGMTD_BE_BATCH, "backend transaction batch data");
DEFINE_MTYPE_STATIC(LIB, MGMTD_BE_TXN, "backend transaction data");
@@ -70,8 +72,6 @@ struct mgmt_be_batch_ctx {
#define MGMTD_BE_TXN_FLAGS_CFG_APPLIED (1U << 1)
DECLARE_LIST(mgmt_be_batches, struct mgmt_be_batch_ctx, list_linkage);
-struct mgmt_be_client_ctx;
-
PREDECL_LIST(mgmt_be_txns);
struct mgmt_be_txn_ctx {
/* Txn-Id as assigned by MGMTD */
@@ -79,7 +79,7 @@ struct mgmt_be_txn_ctx {
uint32_t flags;
struct mgmt_be_client_txn_ctx client_data;
- struct mgmt_be_client_ctx *client_ctx;
+ struct mgmt_be_client *client;
/* List of batches belonging to this transaction */
struct mgmt_be_batches_head cfg_batches;
@@ -100,9 +100,11 @@ DECLARE_LIST(mgmt_be_txns, struct mgmt_be_txn_ctx, list_linkage);
#define FOREACH_BE_APPLY_BATCH_IN_LIST(txn, batch) \
frr_each_safe (mgmt_be_batches, &(txn)->apply_cfgs, (batch))
-struct mgmt_be_client_ctx {
+struct mgmt_be_client {
struct msg_client client;
+ char *name;
+
struct nb_config *candidate_config;
struct nb_config *running_config;
@@ -114,7 +116,9 @@ struct mgmt_be_client_ctx {
unsigned long avg_apply_nb_cfg_tm;
struct mgmt_be_txns_head txn_head;
- struct mgmt_be_client_params client_params;
+
+ struct mgmt_be_client_cbs cbs;
+ uintptr_t user_data;
};
#define FOREACH_BE_TXN_IN_LIST(client_ctx, txn) \
@@ -122,9 +126,6 @@ struct mgmt_be_client_ctx {
struct debug mgmt_dbg_be_client = {0, "Management backend client operations"};
-static struct mgmt_be_client_ctx mgmt_be_client_ctx = {
- .client = {.conn = {.fd = -1}}};
-
const char *mgmt_be_client_names[MGMTD_BE_CLIENT_ID_MAX + 1] = {
#ifdef HAVE_STATICD
[MGMTD_BE_CLIENT_ID_STATICD] = "staticd",
@@ -132,7 +133,7 @@ const char *mgmt_be_client_names[MGMTD_BE_CLIENT_ID_MAX + 1] = {
[MGMTD_BE_CLIENT_ID_MAX] = "Unknown/Invalid",
};
-static int mgmt_be_client_send_msg(struct mgmt_be_client_ctx *client_ctx,
+static int mgmt_be_client_send_msg(struct mgmt_be_client *client_ctx,
Mgmtd__BeMessage *be_msg)
{
return msg_conn_send_msg(
@@ -216,8 +217,7 @@ static void mgmt_be_cleanup_all_batches(struct mgmt_be_txn_ctx *txn)
}
static struct mgmt_be_txn_ctx *
-mgmt_be_find_txn_by_id(struct mgmt_be_client_ctx *client_ctx,
- uint64_t txn_id)
+mgmt_be_find_txn_by_id(struct mgmt_be_client *client_ctx, uint64_t txn_id)
{
struct mgmt_be_txn_ctx *txn = NULL;
@@ -230,8 +230,7 @@ mgmt_be_find_txn_by_id(struct mgmt_be_client_ctx *client_ctx,
}
static struct mgmt_be_txn_ctx *
-mgmt_be_txn_create(struct mgmt_be_client_ctx *client_ctx,
- uint64_t txn_id)
+mgmt_be_txn_create(struct mgmt_be_client *client_ctx, uint64_t txn_id)
{
struct mgmt_be_txn_ctx *txn = NULL;
@@ -242,7 +241,7 @@ mgmt_be_txn_create(struct mgmt_be_client_ctx *client_ctx,
assert(txn);
txn->txn_id = txn_id;
- txn->client_ctx = client_ctx;
+ txn->client = client_ctx;
mgmt_be_batches_init(&txn->cfg_batches);
mgmt_be_batches_init(&txn->apply_cfgs);
mgmt_be_txns_add_tail(&client_ctx->txn_head, txn);
@@ -253,8 +252,8 @@ mgmt_be_txn_create(struct mgmt_be_client_ctx *client_ctx,
return txn;
}
-static void mgmt_be_txn_delete(struct mgmt_be_client_ctx *client_ctx,
- struct mgmt_be_txn_ctx **txn)
+static void mgmt_be_txn_delete(struct mgmt_be_client *client_ctx,
+ struct mgmt_be_txn_ctx **txn)
{
char err_msg[] = "MGMT Transaction Delete";
@@ -274,12 +273,10 @@ static void mgmt_be_txn_delete(struct mgmt_be_client_ctx *client_ctx,
* CFGDATA_CREATE_REQs. But first notify the client
* about the transaction delete.
*/
- if (client_ctx->client_params.txn_notify)
- (void)(*client_ctx->client_params
- .txn_notify)(
- (uintptr_t)client_ctx,
- client_ctx->client_params.user_data,
- &(*txn)->client_data, true);
+ if (client_ctx->cbs.txn_notify)
+ (void)(*client_ctx->cbs.txn_notify)(client_ctx,
+ client_ctx->user_data,
+ &(*txn)->client_data, true);
mgmt_be_cleanup_all_batches(*txn);
if ((*txn)->nb_txn)
@@ -290,8 +287,7 @@ static void mgmt_be_txn_delete(struct mgmt_be_client_ctx *client_ctx,
*txn = NULL;
}
-static void
-mgmt_be_cleanup_all_txns(struct mgmt_be_client_ctx *client_ctx)
+static void mgmt_be_cleanup_all_txns(struct mgmt_be_client *client_ctx)
{
struct mgmt_be_txn_ctx *txn = NULL;
@@ -300,9 +296,8 @@ mgmt_be_cleanup_all_txns(struct mgmt_be_client_ctx *client_ctx)
}
}
-static int mgmt_be_send_txn_reply(struct mgmt_be_client_ctx *client_ctx,
- uint64_t txn_id, bool create,
- bool success)
+static int mgmt_be_send_txn_reply(struct mgmt_be_client *client_ctx,
+ uint64_t txn_id, bool create, bool success)
{
Mgmtd__BeMessage be_msg;
Mgmtd__BeTxnReply txn_reply;
@@ -321,8 +316,8 @@ static int mgmt_be_send_txn_reply(struct mgmt_be_client_ctx *client_ctx,
return mgmt_be_client_send_msg(client_ctx, &be_msg);
}
-static int mgmt_be_process_txn_req(struct mgmt_be_client_ctx *client_ctx,
- uint64_t txn_id, bool create)
+static int mgmt_be_process_txn_req(struct mgmt_be_client *client_ctx,
+ uint64_t txn_id, bool create)
{
struct mgmt_be_txn_ctx *txn;
@@ -342,11 +337,9 @@ static int mgmt_be_process_txn_req(struct mgmt_be_client_ctx *client_ctx,
MGMTD_BE_CLIENT_DBG("Created new txn-id %" PRIu64, txn_id);
txn = mgmt_be_txn_create(client_ctx, txn_id);
- if (client_ctx->client_params.txn_notify)
- (void)(*client_ctx->client_params
- .txn_notify)(
- (uintptr_t)client_ctx,
- client_ctx->client_params.user_data,
+ if (client_ctx->cbs.txn_notify)
+ (void)(*client_ctx->cbs.txn_notify)(
+ client_ctx, client_ctx->user_data,
&txn->client_data, false);
} else {
if (!txn) {
@@ -368,10 +361,10 @@ static int mgmt_be_process_txn_req(struct mgmt_be_client_ctx *client_ctx,
return 0;
}
-static int
-mgmt_be_send_cfgdata_create_reply(struct mgmt_be_client_ctx *client_ctx,
- uint64_t txn_id, uint64_t batch_id,
- bool success, const char *error_if_any)
+static int mgmt_be_send_cfgdata_create_reply(struct mgmt_be_client *client_ctx,
+ uint64_t txn_id, uint64_t batch_id,
+ bool success,
+ const char *error_if_any)
{
Mgmtd__BeMessage be_msg;
Mgmtd__BeCfgDataCreateReply cfgdata_reply;
@@ -398,7 +391,7 @@ static void mgmt_be_txn_cfg_abort(struct mgmt_be_txn_ctx *txn)
{
char errmsg[BUFSIZ] = {0};
- assert(txn && txn->client_ctx);
+ assert(txn && txn->client);
if (txn->nb_txn) {
MGMTD_BE_CLIENT_ERR(
"Aborting configs after prep for txn-id: %" PRIu64,
@@ -416,13 +409,13 @@ static void mgmt_be_txn_cfg_abort(struct mgmt_be_txn_ctx *txn)
MGMTD_BE_CLIENT_DBG(
"Reset candidate configurations after abort of txn-id: %" PRIu64,
txn->txn_id);
- nb_config_replace(txn->client_ctx->candidate_config,
- txn->client_ctx->running_config, true);
+ nb_config_replace(txn->client->candidate_config,
+ txn->client->running_config, true);
}
static int mgmt_be_txn_cfg_prepare(struct mgmt_be_txn_ctx *txn)
{
- struct mgmt_be_client_ctx *client_ctx;
+ struct mgmt_be_client *client_ctx;
struct mgmt_be_txn_req *txn_req = NULL;
struct nb_context nb_ctx = {0};
struct timeval edit_nb_cfg_start;
@@ -437,15 +430,15 @@ static int mgmt_be_txn_cfg_prepare(struct mgmt_be_txn_ctx *txn)
size_t num_processed;
int err;
- assert(txn && txn->client_ctx);
- client_ctx = txn->client_ctx;
+ assert(txn && txn->client);
+ client_ctx = txn->client;
num_processed = 0;
FOREACH_BE_TXN_BATCH_IN_LIST (txn, batch) {
txn_req = &batch->txn_req;
error = false;
nb_ctx.client = NB_CLIENT_CLI;
- nb_ctx.user = (void *)client_ctx->client_params.user_data;
+ nb_ctx.user = (void *)client_ctx->user_data;
if (!txn->nb_txn) {
/*
@@ -492,7 +485,7 @@ static int mgmt_be_txn_cfg_prepare(struct mgmt_be_txn_ctx *txn)
* Now prepare all the batches we have applied in one go.
*/
nb_ctx.client = NB_CLIENT_CLI;
- nb_ctx.user = (void *)client_ctx->client_params.user_data;
+ nb_ctx.user = (void *)client_ctx->user_data;
gettimeofday(&prep_nb_cfg_start, NULL);
err = nb_candidate_commit_prepare(nb_ctx, client_ctx->candidate_config,
@@ -556,12 +549,11 @@ static int mgmt_be_txn_cfg_prepare(struct mgmt_be_txn_ctx *txn)
/*
* Process all CFG_DATA_REQs received so far and prepare them all in one go.
*/
-static int
-mgmt_be_update_setcfg_in_batch(struct mgmt_be_client_ctx *client_ctx,
- struct mgmt_be_txn_ctx *txn,
- uint64_t batch_id,
- Mgmtd__YangCfgDataReq * cfg_req[],
- int num_req)
+static int mgmt_be_update_setcfg_in_batch(struct mgmt_be_client *client_ctx,
+ struct mgmt_be_txn_ctx *txn,
+ uint64_t batch_id,
+ Mgmtd__YangCfgDataReq *cfg_req[],
+ int num_req)
{
struct mgmt_be_batch_ctx *batch = NULL;
struct mgmt_be_txn_req *txn_req = NULL;
@@ -611,11 +603,10 @@ mgmt_be_update_setcfg_in_batch(struct mgmt_be_client_ctx *client_ctx,
return 0;
}
-static int
-mgmt_be_process_cfgdata_req(struct mgmt_be_client_ctx *client_ctx,
- uint64_t txn_id, uint64_t batch_id,
- Mgmtd__YangCfgDataReq * cfg_req[], int num_req,
- bool end_of_data)
+static int mgmt_be_process_cfgdata_req(struct mgmt_be_client *client_ctx,
+ uint64_t txn_id, uint64_t batch_id,
+ Mgmtd__YangCfgDataReq *cfg_req[],
+ int num_req, bool end_of_data)
{
struct mgmt_be_txn_ctx *txn;
@@ -640,10 +631,10 @@ mgmt_be_process_cfgdata_req(struct mgmt_be_client_ctx *client_ctx,
return 0;
}
-static int mgmt_be_send_apply_reply(struct mgmt_be_client_ctx *client_ctx,
- uint64_t txn_id, uint64_t batch_ids[],
- size_t num_batch_ids, bool success,
- const char *error_if_any)
+static int mgmt_be_send_apply_reply(struct mgmt_be_client *client_ctx,
+ uint64_t txn_id, uint64_t batch_ids[],
+ size_t num_batch_ids, bool success,
+ const char *error_if_any)
{
Mgmtd__BeMessage be_msg;
Mgmtd__BeCfgDataApplyReply apply_reply;
@@ -673,7 +664,7 @@ static int mgmt_be_send_apply_reply(struct mgmt_be_client_ctx *client_ctx,
static int mgmt_be_txn_proc_cfgapply(struct mgmt_be_txn_ctx *txn)
{
- struct mgmt_be_client_ctx *client_ctx;
+ struct mgmt_be_client *client_ctx;
struct timeval apply_nb_cfg_start;
struct timeval apply_nb_cfg_end;
unsigned long apply_nb_cfg_tm;
@@ -682,8 +673,8 @@ static int mgmt_be_txn_proc_cfgapply(struct mgmt_be_txn_ctx *txn)
size_t num_processed;
static uint64_t batch_ids[MGMTD_BE_MAX_BATCH_IDS_IN_REQ];
- assert(txn && txn->client_ctx);
- client_ctx = txn->client_ctx;
+ assert(txn && txn->client);
+ client_ctx = txn->client;
assert(txn->nb_txn);
num_processed = 0;
@@ -735,9 +726,8 @@ static int mgmt_be_txn_proc_cfgapply(struct mgmt_be_txn_ctx *txn)
return 0;
}
-static int
-mgmt_be_process_cfg_apply(struct mgmt_be_client_ctx *client_ctx,
- uint64_t txn_id)
+static int mgmt_be_process_cfg_apply(struct mgmt_be_client *client_ctx,
+ uint64_t txn_id)
{
struct mgmt_be_txn_ctx *txn;
@@ -754,9 +744,8 @@ mgmt_be_process_cfg_apply(struct mgmt_be_client_ctx *client_ctx,
return 0;
}
-static int
-mgmt_be_client_handle_msg(struct mgmt_be_client_ctx *client_ctx,
- Mgmtd__BeMessage *be_msg)
+static int mgmt_be_client_handle_msg(struct mgmt_be_client *client_ctx,
+ Mgmtd__BeMessage *be_msg)
{
/*
* protobuf-c adds a max size enum with an internal, and changing by
@@ -833,12 +822,12 @@ mgmt_be_client_handle_msg(struct mgmt_be_client_ctx *client_ctx,
static void mgmt_be_client_process_msg(uint8_t version, uint8_t *data,
size_t len, struct msg_conn *conn)
{
- struct mgmt_be_client_ctx *client_ctx;
+ struct mgmt_be_client *client_ctx;
struct msg_client *client;
Mgmtd__BeMessage *be_msg;
client = container_of(conn, struct msg_client, conn);
- client_ctx = container_of(client, struct mgmt_be_client_ctx, client);
+ client_ctx = container_of(client, struct mgmt_be_client, client);
be_msg = mgmtd__be_message__unpack(NULL, len, data);
if (!be_msg) {
@@ -853,17 +842,17 @@ static void mgmt_be_client_process_msg(uint8_t version, uint8_t *data,
mgmtd__be_message__free_unpacked(be_msg, NULL);
}
-static int mgmt_be_send_subscr_req(struct mgmt_be_client_ctx *client_ctx,
- bool subscr_xpaths, uint16_t num_reg_xpaths,
- char **reg_xpaths)
+int mgmt_be_send_subscr_req(struct mgmt_be_client *client_ctx,
+ bool subscr_xpaths, int num_xpaths,
+ char **reg_xpaths)
{
Mgmtd__BeMessage be_msg;
Mgmtd__BeSubscribeReq subscr_req;
mgmtd__be_subscribe_req__init(&subscr_req);
- subscr_req.client_name = client_ctx->client_params.name;
- subscr_req.n_xpath_reg = num_reg_xpaths;
- if (num_reg_xpaths)
+ subscr_req.client_name = client_ctx->name;
+ subscr_req.n_xpath_reg = num_xpaths;
+ if (num_xpaths)
subscr_req.xpath_reg = reg_xpaths;
else
subscr_req.xpath_reg = NULL;
@@ -881,24 +870,24 @@ static int mgmt_be_send_subscr_req(struct mgmt_be_client_ctx *client_ctx,
return mgmt_be_client_send_msg(client_ctx, &be_msg);
}
-static int _notify_conenct_disconnect(struct msg_client *client, bool connected)
+static int _notify_conenct_disconnect(struct msg_client *msg_client,
+ bool connected)
{
- struct mgmt_be_client_ctx *client_ctx =
- container_of(client, struct mgmt_be_client_ctx, client);
+ struct mgmt_be_client *client =
+ container_of(msg_client, struct mgmt_be_client, client);
int ret;
if (connected) {
- assert(client->conn.fd != -1);
- ret = mgmt_be_send_subscr_req(client_ctx, false, 0, NULL);
+ assert(msg_client->conn.fd != -1);
+ ret = mgmt_be_send_subscr_req(client, false, 0, NULL);
if (ret)
return ret;
}
/* Notify BE client through registered callback (if any) */
- if (client_ctx->client_params.client_connect_notify)
- (void)(*client_ctx->client_params.client_connect_notify)(
- (uintptr_t)client_ctx,
- client_ctx->client_params.user_data, connected);
+ if (client->cbs.client_connect_notify)
+ (void)(*client->cbs.client_connect_notify)(
+ client, client->user_data, connected);
return 0;
}
@@ -914,6 +903,10 @@ static int mgmt_be_client_notify_disconenct(struct msg_conn *conn)
return _notify_conenct_disconnect(client, false);
}
+/*
+ * Debug Flags
+ */
+
DEFPY(debug_mgmt_client_be, debug_mgmt_client_be_cmd,
"[no] debug mgmt client backend",
NO_STR DEBUG_STR MGMTD_STR
@@ -956,32 +949,33 @@ static struct cmd_node mgmt_dbg_node = {
.config_write = mgmt_debug_be_client_config_write,
};
-/*
- * Initialize library and try connecting with MGMTD.
- */
-uintptr_t mgmt_be_client_lib_init(struct mgmt_be_client_params *params,
- struct event_loop *master_thread)
+struct mgmt_be_client *mgmt_be_client_create(const char *client_name,
+ struct mgmt_be_client_cbs *cbs,
+ uintptr_t user_data,
+ struct event_loop *event_loop)
{
- /* Don't call twice */
- assert(!mgmt_be_client_ctx.client.conn.loop);
+ struct mgmt_be_client *client =
+ XCALLOC(MTYPE_MGMTD_BE_CLIENT, sizeof(*client));
/* Only call after frr_init() */
assert(running_config);
- mgmt_be_client_ctx.running_config = running_config;
- mgmt_be_client_ctx.candidate_config = nb_config_new(NULL);
- mgmt_be_client_ctx.client_params = *params;
- mgmt_be_txns_init(&mgmt_be_client_ctx.txn_head);
- msg_client_init(&mgmt_be_client_ctx.client, master_thread,
- MGMTD_BE_SERVER_PATH, mgmt_be_client_notify_conenct,
+ client->name = XSTRDUP(MTYPE_MGMTD_BE_CLIENT_NAME, client_name);
+ client->running_config = running_config;
+ client->candidate_config = nb_config_new(NULL);
+ if (cbs)
+ client->cbs = *cbs;
+ mgmt_be_txns_init(&client->txn_head);
+ msg_client_init(&client->client, event_loop, MGMTD_BE_SERVER_PATH,
+ mgmt_be_client_notify_conenct,
mgmt_be_client_notify_disconenct,
mgmt_be_client_process_msg, MGMTD_BE_MAX_NUM_MSG_PROC,
MGMTD_BE_MAX_NUM_MSG_WRITE, MGMTD_BE_MSG_MAX_LEN, false,
"BE-client", MGMTD_DBG_BE_CLIENT_CHECK());
- MGMTD_BE_CLIENT_DBG("Initialized client '%s'", params->name);
+ MGMTD_BE_CLIENT_DBG("Initialized client '%s'", client_name);
- return (uintptr_t)&mgmt_be_client_ctx;
+ return client;
}
@@ -993,86 +987,16 @@ void mgmt_be_client_lib_vty_init(void)
install_element(CONFIG_NODE, &debug_mgmt_client_be_cmd);
}
-
-/*
- * Subscribe with MGMTD for one or more YANG subtree(s).
- */
-enum mgmt_result mgmt_be_subscribe_yang_data(uintptr_t lib_hndl,
- char *reg_yang_xpaths[],
- int num_reg_xpaths)
+void mgmt_be_client_destroy(struct mgmt_be_client *client)
{
- struct mgmt_be_client_ctx *client_ctx;
-
- if (!num_reg_xpaths)
- return MGMTD_SUCCESS;
-
- client_ctx = (struct mgmt_be_client_ctx *)lib_hndl;
- if (!client_ctx)
- return MGMTD_INVALID_PARAM;
-
- if (mgmt_be_send_subscr_req(client_ctx, true, num_reg_xpaths,
- reg_yang_xpaths)
- != 0)
- return MGMTD_INTERNAL_ERROR;
-
- return MGMTD_SUCCESS;
-}
-
-/*
- * Unsubscribe with MGMTD for one or more YANG subtree(s).
- */
-enum mgmt_result mgmt_be_unsubscribe_yang_data(uintptr_t lib_hndl,
- char *reg_yang_xpaths[],
- int num_reg_xpaths)
-{
- struct mgmt_be_client_ctx *client_ctx;
-
- if (!num_reg_xpaths)
- return MGMTD_SUCCESS;
-
- client_ctx = (struct mgmt_be_client_ctx *)lib_hndl;
- if (!client_ctx)
- return MGMTD_INVALID_PARAM;
-
-
- if (mgmt_be_send_subscr_req(client_ctx, false, num_reg_xpaths,
- reg_yang_xpaths)
- < 0)
- return MGMTD_INTERNAL_ERROR;
-
- return MGMTD_SUCCESS;
-}
-
-/*
- * Send one or more YANG notifications to MGMTD daemon.
- */
-enum mgmt_result mgmt_be_send_yang_notify(uintptr_t lib_hndl,
- Mgmtd__YangData * data_elems[],
- int num_elems)
-{
- struct mgmt_be_client_ctx *client_ctx;
-
- client_ctx = (struct mgmt_be_client_ctx *)lib_hndl;
- if (!client_ctx)
- return MGMTD_INVALID_PARAM;
-
- return MGMTD_SUCCESS;
-}
-
-/*
- * Destroy library and cleanup everything.
- */
-void mgmt_be_client_lib_destroy(void)
-{
- struct mgmt_be_client_ctx *client_ctx = &mgmt_be_client_ctx;
-
MGMTD_BE_CLIENT_DBG("Destroying MGMTD Backend Client '%s'",
- client_ctx->client_params.name);
+ client->name);
- msg_client_cleanup(&client_ctx->client);
- mgmt_be_cleanup_all_txns(client_ctx);
- mgmt_be_txns_fini(&client_ctx->txn_head);
- nb_config_free(client_ctx->candidate_config);
+ msg_client_cleanup(&client->client);
+ mgmt_be_cleanup_all_txns(client);
+ mgmt_be_txns_fini(&client->txn_head);
+ nb_config_free(client->candidate_config);
- memset(client_ctx, 0, sizeof(*client_ctx));
+ XFREE(MTYPE_MGMTD_BE_CLIENT_NAME, client->name);
+ XFREE(MTYPE_MGMTD_BE_CLIENT, client);
}
diff --git a/lib/mgmt_be_client.h b/lib/mgmt_be_client.h
index bbe938b5b4..4d8a1f51a1 100644
--- a/lib/mgmt_be_client.h
+++ b/lib/mgmt_be_client.h
@@ -82,67 +82,26 @@ enum mgmt_be_client_id {
#define MGMTD_BE_MAX_CLIENTS_PER_XPATH_REG 32
+struct mgmt_be_client;
+
struct mgmt_be_client_txn_ctx {
uintptr_t *user_ctx;
};
-/*
- * All the client-specific information this library needs to
- * initialize itself, setup connection with MGMTD BackEnd interface
- * and carry on all required procedures appropriately.
+/**
+ * Backend client callbacks.
*
- * BackEnd clients need to initialise a instance of this structure
- * with appropriate data and pass it while calling the API
- * to initialize the library (See mgmt_be_client_lib_init for
- * more details).
+ * Callbacks:
+ * client_connect_notify: called when connection is made/lost to mgmtd.
+ * txn_notify: called when a txn has been created
*/
-struct mgmt_be_client_params {
- char name[MGMTD_CLIENT_NAME_MAX_LEN];
- uintptr_t user_data;
- unsigned long conn_retry_intvl_sec;
-
- void (*client_connect_notify)(uintptr_t lib_hndl,
- uintptr_t usr_data,
- bool connected);
-
- void (*client_subscribe_notify)(
- uintptr_t lib_hndl, uintptr_t usr_data,
- struct nb_yang_xpath **xpath,
- enum mgmt_result subscribe_result[], int num_paths);
-
- void (*txn_notify)(
- uintptr_t lib_hndl, uintptr_t usr_data,
- struct mgmt_be_client_txn_ctx *txn_ctx, bool destroyed);
-
- enum mgmt_result (*data_validate)(
- uintptr_t lib_hndl, uintptr_t usr_data,
- struct mgmt_be_client_txn_ctx *txn_ctx,
- struct nb_yang_xpath *xpath, struct nb_yang_value *data,
- bool delete, char *error_if_any);
-
- enum mgmt_result (*data_apply)(
- uintptr_t lib_hndl, uintptr_t usr_data,
- struct mgmt_be_client_txn_ctx *txn_ctx,
- struct nb_yang_xpath *xpath, struct nb_yang_value *data,
- bool delete);
-
- enum mgmt_result (*get_data_elem)(
- uintptr_t lib_hndl, uintptr_t usr_data,
- struct mgmt_be_client_txn_ctx *txn_ctx,
- struct nb_yang_xpath *xpath, struct nb_yang_xpath_elem *elem);
-
- enum mgmt_result (*get_data)(
- uintptr_t lib_hndl, uintptr_t usr_data,
- struct mgmt_be_client_txn_ctx *txn_ctx,
- struct nb_yang_xpath *xpath, bool keys_only,
- struct nb_yang_xpath_elem **elems, int *num_elems,
- int *next_key);
-
- enum mgmt_result (*get_next_data)(
- uintptr_t lib_hndl, uintptr_t usr_data,
- struct mgmt_be_client_txn_ctx *txn_ctx,
- struct nb_yang_xpath *xpath, bool keys_only,
- struct nb_yang_xpath_elem **elems, int *num_elems);
+struct mgmt_be_client_cbs {
+ void (*client_connect_notify)(struct mgmt_be_client *client,
+ uintptr_t usr_data, bool connected);
+
+ void (*txn_notify)(struct mgmt_be_client *client, uintptr_t usr_data,
+ struct mgmt_be_client_txn_ctx *txn_ctx,
+ bool destroyed);
};
/***************************************************************
@@ -176,20 +135,20 @@ mgmt_be_client_name2id(const char *name)
* API prototypes
***************************************************************/
-/*
- * Initialize library and try connecting with MGMTD.
- *
- * params
- * Backend client parameters.
+/**
+ * Create backend client and connect to MGMTD.
*
- * master_thread
- * Thread master.
+ * Args:
+ * client_name: the name of the client
+ * cbs: callbacks for various events.
+ * event_loop: the main event loop.
*
* Returns:
- * Backend client lib handler (nothing but address of mgmt_be_client_ctx)
+ * Backend client object.
*/
-extern uintptr_t mgmt_be_client_lib_init(struct mgmt_be_client_params *params,
- struct event_loop *master_thread);
+extern struct mgmt_be_client *
+mgmt_be_client_create(const char *name, struct mgmt_be_client_cbs *cbs,
+ uintptr_t user_data, struct event_loop *event_loop);
/*
* Initialize library vty (adds debug support).
@@ -206,13 +165,13 @@ extern void mgmt_be_client_lib_vty_init(void);
extern void mgmt_debug_be_client_show_debug(struct vty *vty);
/*
- * Subscribe with MGMTD for one or more YANG subtree(s).
+ * [Un]-subscribe with MGMTD for one or more YANG subtree(s).
*
- * lib_hndl
- * Client library handler.
+ * client
+ * The client object.
*
* reg_yang_xpaths
- * Yang xpath(s) that needs to be subscribed to.
+ * Yang xpath(s) that needs to be [un]-subscribed from/to
*
* num_xpaths
* Number of xpaths
@@ -220,52 +179,14 @@ extern void mgmt_debug_be_client_show_debug(struct vty *vty);
* Returns:
* MGMTD_SUCCESS on success, MGMTD_* otherwise.
*/
-extern enum mgmt_result mgmt_be_subscribe_yang_data(uintptr_t lib_hndl,
- char **reg_yang_xpaths,
- int num_xpaths);
-
-/*
- * Send one or more YANG notifications to MGMTD daemon.
- *
- * lib_hndl
- * Client library handler.
- *
- * data_elems
- * Yang data elements from data tree.
- *
- * num_elems
- * Number of data elements.
- *
- * Returns:
- * MGMTD_SUCCESS on success, MGMTD_* otherwise.
- */
-extern enum mgmt_result
-mgmt_be_send_yang_notify(uintptr_t lib_hndl, Mgmtd__YangData **data_elems,
- int num_elems);
-
-/*
- * Un-subscribe with MGMTD for one or more YANG subtree(s).
- *
- * lib_hndl
- * Client library handler.
- *
- * reg_yang_xpaths
- * Yang xpath(s) that needs to be un-subscribed from.
- *
- * num_reg_xpaths
- * Number of subscribed xpaths
- *
- * Returns:
- * MGMTD_SUCCESS on success, MGMTD_* otherwise.
- */
-enum mgmt_result mgmt_be_unsubscribe_yang_data(uintptr_t lib_hndl,
- char **reg_yang_xpaths,
- int num_reg_xpaths);
+extern int mgmt_be_send_subscr_req(struct mgmt_be_client *client,
+ bool subscr_xpaths, int num_xpaths,
+ char **reg_xpaths);
/*
- * Destroy library and cleanup everything.
+ * Destroy backend client and cleanup everything.
*/
-extern void mgmt_be_client_lib_destroy(void);
+extern void mgmt_be_client_destroy(struct mgmt_be_client *client);
#ifdef __cplusplus
}
diff --git a/lib/mgmt_fe_client.c b/lib/mgmt_fe_client.c
index 83f60ea58b..35a6d7d909 100644
--- a/lib/mgmt_fe_client.c
+++ b/lib/mgmt_fe_client.c
@@ -19,14 +19,12 @@
#include "lib/mgmt_fe_client_clippy.c"
-struct mgmt_fe_client_ctx;
-
PREDECL_LIST(mgmt_sessions);
struct mgmt_fe_client_session {
uint64_t client_id; /* FE client identifies itself with this ID */
uint64_t session_id; /* FE adapter identified session with this ID */
- struct mgmt_fe_client_ctx *client_ctx;
+ struct mgmt_fe_client *client;
uintptr_t user_ctx;
struct mgmt_sessions_item list_linkage;
@@ -34,29 +32,31 @@ struct mgmt_fe_client_session {
DECLARE_LIST(mgmt_sessions, struct mgmt_fe_client_session, list_linkage);
-DEFINE_MTYPE_STATIC(LIB, MGMTD_FE_SESSION, "MGMTD Frontend session");
+DEFINE_MTYPE_STATIC(LIB, MGMTD_FE_CLIENT, "frontend client");
+DEFINE_MTYPE_STATIC(LIB, MGMTD_FE_CLIENT_NAME, "frontend client name");
+DEFINE_MTYPE_STATIC(LIB, MGMTD_FE_SESSION, "frontend session");
-struct mgmt_fe_client_ctx {
+struct mgmt_fe_client {
struct msg_client client;
- struct mgmt_fe_client_params client_params;
- struct mgmt_sessions_head client_sessions;
+ char *name;
+ struct mgmt_fe_client_cbs cbs;
+ uintptr_t user_data;
+ struct mgmt_sessions_head sessions;
};
-#define FOREACH_SESSION_IN_LIST(client_ctx, session) \
- frr_each_safe (mgmt_sessions, &(client_ctx)->client_sessions, (session))
+#define FOREACH_SESSION_IN_LIST(client, session) \
+ frr_each_safe (mgmt_sessions, &(client)->sessions, (session))
struct debug mgmt_dbg_fe_client = {0, "Management frontend client operations"};
-static struct mgmt_fe_client_ctx mgmt_fe_client_ctx = {
- .client = {.conn = {.fd = -1}}};
static struct mgmt_fe_client_session *
-mgmt_fe_find_session_by_client_id(struct mgmt_fe_client_ctx *client_ctx,
+mgmt_fe_find_session_by_client_id(struct mgmt_fe_client *client,
uint64_t client_id)
{
struct mgmt_fe_client_session *session;
- FOREACH_SESSION_IN_LIST (client_ctx, session) {
+ FOREACH_SESSION_IN_LIST (client, session) {
if (session->client_id == client_id) {
MGMTD_FE_CLIENT_DBG("Found session-id %" PRIu64
" using client-id %" PRIu64,
@@ -70,12 +70,12 @@ mgmt_fe_find_session_by_client_id(struct mgmt_fe_client_ctx *client_ctx,
}
static struct mgmt_fe_client_session *
-mgmt_fe_find_session_by_session_id(struct mgmt_fe_client_ctx *client_ctx,
+mgmt_fe_find_session_by_session_id(struct mgmt_fe_client *client,
uint64_t session_id)
{
struct mgmt_fe_client_session *session;
- FOREACH_SESSION_IN_LIST (client_ctx, session) {
+ FOREACH_SESSION_IN_LIST (client, session) {
if (session->session_id == session_id) {
MGMTD_FE_CLIENT_DBG(
"Found session of client-id %" PRIu64
@@ -89,24 +89,24 @@ mgmt_fe_find_session_by_session_id(struct mgmt_fe_client_ctx *client_ctx,
return NULL;
}
-static int mgmt_fe_client_send_msg(struct mgmt_fe_client_ctx *client_ctx,
+static int mgmt_fe_client_send_msg(struct mgmt_fe_client *client,
Mgmtd__FeMessage *fe_msg,
bool short_circuit_ok)
{
return msg_conn_send_msg(
- &client_ctx->client.conn, MGMT_MSG_VERSION_PROTOBUF, fe_msg,
+ &client->client.conn, MGMT_MSG_VERSION_PROTOBUF, fe_msg,
mgmtd__fe_message__get_packed_size(fe_msg),
(size_t(*)(void *, void *))mgmtd__fe_message__pack,
short_circuit_ok);
}
-static int mgmt_fe_send_register_req(struct mgmt_fe_client_ctx *client_ctx)
+static int mgmt_fe_send_register_req(struct mgmt_fe_client *client)
{
Mgmtd__FeMessage fe_msg;
Mgmtd__FeRegisterReq rgstr_req;
mgmtd__fe_register_req__init(&rgstr_req);
- rgstr_req.client_name = client_ctx->client_params.name;
+ rgstr_req.client_name = client->name;
mgmtd__fe_message__init(&fe_msg);
fe_msg.message_case = MGMTD__FE_MESSAGE__MESSAGE_REGISTER_REQ;
@@ -115,10 +115,10 @@ static int mgmt_fe_send_register_req(struct mgmt_fe_client_ctx *client_ctx)
MGMTD_FE_CLIENT_DBG(
"Sending REGISTER_REQ message to MGMTD Frontend server");
- return mgmt_fe_client_send_msg(client_ctx, &fe_msg, true);
+ return mgmt_fe_client_send_msg(client, &fe_msg, true);
}
-static int mgmt_fe_send_session_req(struct mgmt_fe_client_ctx *client_ctx,
+static int mgmt_fe_send_session_req(struct mgmt_fe_client *client,
struct mgmt_fe_client_session *session,
bool create)
{
@@ -146,12 +146,12 @@ static int mgmt_fe_send_session_req(struct mgmt_fe_client_ctx *client_ctx,
"Sending SESSION_REQ %s message for client-id %" PRIu64,
create ? "create" : "destroy", session->client_id);
- return mgmt_fe_client_send_msg(client_ctx, &fe_msg, scok);
+ return mgmt_fe_client_send_msg(client, &fe_msg, scok);
}
-static int mgmt_fe_send_lockds_req(struct mgmt_fe_client_ctx *client_ctx,
- uint64_t session_id, bool lock,
- uint64_t req_id, Mgmtd__DatastoreId ds_id)
+int mgmt_fe_send_lockds_req(struct mgmt_fe_client *client, uint64_t session_id,
+ uint64_t req_id, Mgmtd__DatastoreId ds_id,
+ bool lock)
{
(void)req_id;
Mgmtd__FeMessage fe_msg;
@@ -171,15 +171,13 @@ static int mgmt_fe_send_lockds_req(struct mgmt_fe_client_ctx *client_ctx,
"Sending %sLOCK_REQ message for Ds:%d session-id %" PRIu64,
lock ? "" : "UN", ds_id, session_id);
- return mgmt_fe_client_send_msg(client_ctx, &fe_msg, false);
+ return mgmt_fe_client_send_msg(client, &fe_msg, false);
}
-static int mgmt_fe_send_setcfg_req(struct mgmt_fe_client_ctx *client_ctx,
- uint64_t session_id, uint64_t req_id,
- Mgmtd__DatastoreId ds_id,
- Mgmtd__YangCfgDataReq **data_req,
- int num_data_reqs, bool implicit_commit,
- Mgmtd__DatastoreId dst_ds_id)
+int mgmt_fe_send_setcfg_req(struct mgmt_fe_client *client, uint64_t session_id,
+ uint64_t req_id, Mgmtd__DatastoreId ds_id,
+ Mgmtd__YangCfgDataReq **data_req, int num_data_reqs,
+ bool implicit_commit, Mgmtd__DatastoreId dst_ds_id)
{
(void)req_id;
Mgmtd__FeMessage fe_msg;
@@ -203,14 +201,14 @@ static int mgmt_fe_send_setcfg_req(struct mgmt_fe_client_ctx *client_ctx,
" (#xpaths:%d)",
ds_id, session_id, num_data_reqs);
- return mgmt_fe_client_send_msg(client_ctx, &fe_msg, false);
+ return mgmt_fe_client_send_msg(client, &fe_msg, false);
}
-static int mgmt_fe_send_commitcfg_req(struct mgmt_fe_client_ctx *client_ctx,
- uint64_t session_id, uint64_t req_id,
- Mgmtd__DatastoreId src_ds_id,
- Mgmtd__DatastoreId dest_ds_id,
- bool validate_only, bool abort)
+int mgmt_fe_send_commitcfg_req(struct mgmt_fe_client *client,
+ uint64_t session_id, uint64_t req_id,
+ Mgmtd__DatastoreId src_ds_id,
+ Mgmtd__DatastoreId dest_ds_id,
+ bool validate_only, bool abort)
{
(void)req_id;
Mgmtd__FeMessage fe_msg;
@@ -232,14 +230,13 @@ static int mgmt_fe_send_commitcfg_req(struct mgmt_fe_client_ctx *client_ctx,
"Sending COMMIT_CONFIG_REQ message for Src-Ds:%d, Dst-Ds:%d session-id %" PRIu64,
src_ds_id, dest_ds_id, session_id);
- return mgmt_fe_client_send_msg(client_ctx, &fe_msg, false);
+ return mgmt_fe_client_send_msg(client, &fe_msg, false);
}
-static int mgmt_fe_send_getcfg_req(struct mgmt_fe_client_ctx *client_ctx,
- uint64_t session_id, uint64_t req_id,
- Mgmtd__DatastoreId ds_id,
- Mgmtd__YangGetDataReq *data_req[],
- int num_data_reqs)
+int mgmt_fe_send_getcfg_req(struct mgmt_fe_client *client, uint64_t session_id,
+ uint64_t req_id, Mgmtd__DatastoreId ds_id,
+ Mgmtd__YangGetDataReq *data_req[],
+ int num_data_reqs)
{
(void)req_id;
Mgmtd__FeMessage fe_msg;
@@ -261,14 +258,13 @@ static int mgmt_fe_send_getcfg_req(struct mgmt_fe_client_ctx *client_ctx,
" (#xpaths:%d)",
ds_id, session_id, num_data_reqs);
- return mgmt_fe_client_send_msg(client_ctx, &fe_msg, false);
+ return mgmt_fe_client_send_msg(client, &fe_msg, false);
}
-static int mgmt_fe_send_getdata_req(struct mgmt_fe_client_ctx *client_ctx,
- uint64_t session_id, uint64_t req_id,
- Mgmtd__DatastoreId ds_id,
- Mgmtd__YangGetDataReq *data_req[],
- int num_data_reqs)
+int mgmt_fe_send_getdata_req(struct mgmt_fe_client *client, uint64_t session_id,
+ uint64_t req_id, Mgmtd__DatastoreId ds_id,
+ Mgmtd__YangGetDataReq *data_req[],
+ int num_data_reqs)
{
(void)req_id;
Mgmtd__FeMessage fe_msg;
@@ -290,15 +286,14 @@ static int mgmt_fe_send_getdata_req(struct mgmt_fe_client_ctx *client_ctx,
" (#xpaths:%d)",
ds_id, session_id, num_data_reqs);
- return mgmt_fe_client_send_msg(client_ctx, &fe_msg, false);
+ return mgmt_fe_client_send_msg(client, &fe_msg, false);
}
-static int mgmt_fe_send_regnotify_req(struct mgmt_fe_client_ctx *client_ctx,
- uint64_t session_id, uint64_t req_id,
- Mgmtd__DatastoreId ds_id,
- bool register_req,
- Mgmtd__YangDataXPath *data_req[],
- int num_data_reqs)
+int mgmt_fe_send_regnotify_req(struct mgmt_fe_client *client,
+ uint64_t session_id, uint64_t req_id,
+ Mgmtd__DatastoreId ds_id, bool register_req,
+ Mgmtd__YangDataXPath *data_req[],
+ int num_data_reqs)
{
(void)req_id;
Mgmtd__FeMessage fe_msg;
@@ -315,10 +310,10 @@ static int mgmt_fe_send_regnotify_req(struct mgmt_fe_client_ctx *client_ctx,
fe_msg.message_case = MGMTD__FE_MESSAGE__MESSAGE_REGNOTIFY_REQ;
fe_msg.regnotify_req = &regntfy_req;
- return mgmt_fe_client_send_msg(client_ctx, &fe_msg, false);
+ return mgmt_fe_client_send_msg(client, &fe_msg, false);
}
-static int mgmt_fe_client_handle_msg(struct mgmt_fe_client_ctx *client_ctx,
+static int mgmt_fe_client_handle_msg(struct mgmt_fe_client *client,
Mgmtd__FeMessage *fe_msg)
{
struct mgmt_fe_client_session *session = NULL;
@@ -338,8 +333,7 @@ static int mgmt_fe_client_handle_msg(struct mgmt_fe_client_ctx *client_ctx,
fe_msg->session_reply->session_id);
session = mgmt_fe_find_session_by_client_id(
- client_ctx,
- fe_msg->session_reply->client_conn_id);
+ client, fe_msg->session_reply->client_conn_id);
if (session && fe_msg->session_reply->success) {
MGMTD_FE_CLIENT_DBG(
@@ -358,17 +352,14 @@ static int mgmt_fe_client_handle_msg(struct mgmt_fe_client_ctx *client_ctx,
fe_msg->session_reply->session_id);
session = mgmt_fe_find_session_by_session_id(
- client_ctx, fe_msg->session_req->session_id);
+ client, fe_msg->session_req->session_id);
}
/* The session state may be deleted by the callback */
- if (session && session->client_ctx &&
- session->client_ctx->client_params.client_session_notify)
- (*session->client_ctx->client_params
- .client_session_notify)(
- (uintptr_t)client_ctx,
- client_ctx->client_params.user_data,
- session->client_id,
+ if (session && session->client &&
+ session->client->cbs.client_session_notify)
+ (*session->client->cbs.client_session_notify)(
+ client, client->user_data, session->client_id,
fe_msg->session_reply->create,
fe_msg->session_reply->success,
fe_msg->session_reply->session_id,
@@ -378,14 +369,12 @@ static int mgmt_fe_client_handle_msg(struct mgmt_fe_client_ctx *client_ctx,
MGMTD_FE_CLIENT_DBG("Got LOCKDS_REPLY for session-id %" PRIu64,
fe_msg->lockds_reply->session_id);
session = mgmt_fe_find_session_by_session_id(
- client_ctx, fe_msg->lockds_reply->session_id);
-
- if (session && session->client_ctx &&
- session->client_ctx->client_params.lock_ds_notify)
- (*session->client_ctx->client_params.lock_ds_notify)(
- (uintptr_t)client_ctx,
- client_ctx->client_params.user_data,
- session->client_id,
+ client, fe_msg->lockds_reply->session_id);
+
+ if (session && session->client &&
+ session->client->cbs.lock_ds_notify)
+ (*session->client->cbs.lock_ds_notify)(
+ client, client->user_data, session->client_id,
fe_msg->lockds_reply->session_id,
session->user_ctx, fe_msg->lockds_reply->req_id,
fe_msg->lockds_reply->lock,
@@ -398,14 +387,12 @@ static int mgmt_fe_client_handle_msg(struct mgmt_fe_client_ctx *client_ctx,
fe_msg->setcfg_reply->session_id);
session = mgmt_fe_find_session_by_session_id(
- client_ctx, fe_msg->setcfg_reply->session_id);
-
- if (session && session->client_ctx &&
- session->client_ctx->client_params.set_config_notify)
- (*session->client_ctx->client_params.set_config_notify)(
- (uintptr_t)client_ctx,
- client_ctx->client_params.user_data,
- session->client_id,
+ client, fe_msg->setcfg_reply->session_id);
+
+ if (session && session->client &&
+ session->client->cbs.set_config_notify)
+ (*session->client->cbs.set_config_notify)(
+ client, client->user_data, session->client_id,
fe_msg->setcfg_reply->session_id,
session->user_ctx, fe_msg->setcfg_reply->req_id,
fe_msg->setcfg_reply->success,
@@ -417,15 +404,12 @@ static int mgmt_fe_client_handle_msg(struct mgmt_fe_client_ctx *client_ctx,
fe_msg->commcfg_reply->session_id);
session = mgmt_fe_find_session_by_session_id(
- client_ctx, fe_msg->commcfg_reply->session_id);
-
- if (session && session->client_ctx &&
- session->client_ctx->client_params.commit_config_notify)
- (*session->client_ctx->client_params
- .commit_config_notify)(
- (uintptr_t)client_ctx,
- client_ctx->client_params.user_data,
- session->client_id,
+ client, fe_msg->commcfg_reply->session_id);
+
+ if (session && session->client &&
+ session->client->cbs.commit_config_notify)
+ (*session->client->cbs.commit_config_notify)(
+ client, client->user_data, session->client_id,
fe_msg->commcfg_reply->session_id,
session->user_ctx,
fe_msg->commcfg_reply->req_id,
@@ -440,14 +424,12 @@ static int mgmt_fe_client_handle_msg(struct mgmt_fe_client_ctx *client_ctx,
fe_msg->getcfg_reply->session_id);
session = mgmt_fe_find_session_by_session_id(
- client_ctx, fe_msg->getcfg_reply->session_id);
-
- if (session && session->client_ctx &&
- session->client_ctx->client_params.get_data_notify)
- (*session->client_ctx->client_params.get_data_notify)(
- (uintptr_t)client_ctx,
- client_ctx->client_params.user_data,
- session->client_id,
+ client, fe_msg->getcfg_reply->session_id);
+
+ if (session && session->client &&
+ session->client->cbs.get_data_notify)
+ (*session->client->cbs.get_data_notify)(
+ client, client->user_data, session->client_id,
fe_msg->getcfg_reply->session_id,
session->user_ctx, fe_msg->getcfg_reply->req_id,
fe_msg->getcfg_reply->success,
@@ -468,14 +450,12 @@ static int mgmt_fe_client_handle_msg(struct mgmt_fe_client_ctx *client_ctx,
fe_msg->getdata_reply->session_id);
session = mgmt_fe_find_session_by_session_id(
- client_ctx, fe_msg->getdata_reply->session_id);
-
- if (session && session->client_ctx &&
- session->client_ctx->client_params.get_data_notify)
- (*session->client_ctx->client_params.get_data_notify)(
- (uintptr_t)client_ctx,
- client_ctx->client_params.user_data,
- session->client_id,
+ client, fe_msg->getdata_reply->session_id);
+
+ if (session && session->client &&
+ session->client->cbs.get_data_notify)
+ (*session->client->cbs.get_data_notify)(
+ client, client->user_data, session->client_id,
fe_msg->getdata_reply->session_id,
session->user_ctx,
fe_msg->getdata_reply->req_id,
@@ -526,12 +506,12 @@ static int mgmt_fe_client_handle_msg(struct mgmt_fe_client_ctx *client_ctx,
static void mgmt_fe_client_process_msg(uint8_t version, uint8_t *data,
size_t len, struct msg_conn *conn)
{
- struct mgmt_fe_client_ctx *client_ctx;
- struct msg_client *client;
+ struct mgmt_fe_client *client;
+ struct msg_client *msg_client;
Mgmtd__FeMessage *fe_msg;
- client = container_of(conn, struct msg_client, conn);
- client_ctx = container_of(client, struct mgmt_fe_client_ctx, client);
+ msg_client = container_of(conn, struct msg_client, conn);
+ client = container_of(msg_client, struct mgmt_fe_client, client);
fe_msg = mgmtd__fe_message__unpack(NULL, len, data);
if (!fe_msg) {
@@ -542,41 +522,38 @@ static void mgmt_fe_client_process_msg(uint8_t version, uint8_t *data,
MGMTD_FE_CLIENT_DBG(
"Decoded %zu bytes of message(msg: %u/%u) from server", len,
fe_msg->message_case, fe_msg->message_case);
- (void)mgmt_fe_client_handle_msg(client_ctx, fe_msg);
+ (void)mgmt_fe_client_handle_msg(client, fe_msg);
mgmtd__fe_message__free_unpacked(fe_msg, NULL);
}
-static int _notify_connect_disconnect(struct msg_client *client, bool connected)
+static int _notify_connect_disconnect(struct msg_client *msg_client,
+ bool connected)
{
- struct mgmt_fe_client_ctx *client_ctx =
- container_of(client, struct mgmt_fe_client_ctx, client);
+ struct mgmt_fe_client *client =
+ container_of(msg_client, struct mgmt_fe_client, client);
struct mgmt_fe_client_session *session;
int ret;
/* Send REGISTER_REQ message */
if (connected) {
- if ((ret = mgmt_fe_send_register_req(client_ctx)) != 0)
+ if ((ret = mgmt_fe_send_register_req(client)) != 0)
return ret;
}
/* Walk list of sessions for this FE client deleting them */
- if (!connected && mgmt_sessions_count(&client_ctx->client_sessions)) {
+ if (!connected && mgmt_sessions_count(&client->sessions)) {
MGMTD_FE_CLIENT_DBG("Cleaning up existing sessions");
- FOREACH_SESSION_IN_LIST (client_ctx, session) {
- assert(session->client_ctx);
+ FOREACH_SESSION_IN_LIST (client, session) {
+ assert(session->client);
/* unlink from list first this avoids double free */
- mgmt_sessions_del(&client_ctx->client_sessions,
- session);
+ mgmt_sessions_del(&client->sessions, session);
/* notify FE client the session is being deleted */
- if (session->client_ctx->client_params
- .client_session_notify) {
- (*session->client_ctx->client_params
- .client_session_notify)(
- (uintptr_t)client_ctx,
- client_ctx->client_params.user_data,
+ if (session->client->cbs.client_session_notify) {
+ (*session->client->cbs.client_session_notify)(
+ client, client->user_data,
session->client_id, false, true,
session->session_id, session->user_ctx);
}
@@ -586,10 +563,9 @@ static int _notify_connect_disconnect(struct msg_client *client, bool connected)
}
/* Notify FE client through registered callback (if any). */
- if (client_ctx->client_params.client_connect_notify)
- (void)(*client_ctx->client_params.client_connect_notify)(
- (uintptr_t)client_ctx,
- client_ctx->client_params.user_data, connected);
+ if (client->cbs.client_connect_notify)
+ (void)(*client->cbs.client_connect_notify)(
+ client, client->user_data, connected);
return 0;
}
@@ -651,26 +627,31 @@ static struct cmd_node mgmt_dbg_node = {
/*
* Initialize library and try connecting with MGMTD.
*/
-uintptr_t mgmt_fe_client_lib_init(struct mgmt_fe_client_params *params,
- struct event_loop *master_thread)
+struct mgmt_fe_client *mgmt_fe_client_create(const char *client_name,
+ struct mgmt_fe_client_cbs *cbs,
+ uintptr_t user_data,
+ struct event_loop *event_loop)
{
- /* Don't call twice */
- assert(!mgmt_fe_client_ctx.client.conn.loop);
+ struct mgmt_fe_client *client =
+ XCALLOC(MTYPE_MGMTD_FE_CLIENT, sizeof(*client));
- mgmt_fe_client_ctx.client_params = *params;
+ client->name = XSTRDUP(MTYPE_MGMTD_FE_CLIENT_NAME, client_name);
+ client->user_data = user_data;
+ if (cbs)
+ client->cbs = *cbs;
- mgmt_sessions_init(&mgmt_fe_client_ctx.client_sessions);
+ mgmt_sessions_init(&client->sessions);
- msg_client_init(&mgmt_fe_client_ctx.client, master_thread,
- MGMTD_FE_SERVER_PATH, mgmt_fe_client_notify_connect,
+ msg_client_init(&client->client, event_loop, MGMTD_FE_SERVER_PATH,
+ mgmt_fe_client_notify_connect,
mgmt_fe_client_notify_disconnect,
mgmt_fe_client_process_msg, MGMTD_FE_MAX_NUM_MSG_PROC,
MGMTD_FE_MAX_NUM_MSG_WRITE, MGMTD_FE_MSG_MAX_LEN, true,
"FE-client", MGMTD_DBG_FE_CLIENT_CHECK());
- MGMTD_FE_CLIENT_DBG("Initialized client '%s'", params->name);
+ MGMTD_FE_CLIENT_DBG("Initialized client '%s'", client_name);
- return (uintptr_t)&mgmt_fe_client_ctx;
+ return client;
}
void mgmt_fe_client_lib_vty_init(void)
@@ -681,39 +662,31 @@ void mgmt_fe_client_lib_vty_init(void)
install_element(CONFIG_NODE, &debug_mgmt_client_fe_cmd);
}
-uint mgmt_fe_client_session_count(uintptr_t lib_hndl)
+uint mgmt_fe_client_session_count(struct mgmt_fe_client *client)
{
- struct mgmt_fe_client_ctx *client_ctx =
- (struct mgmt_fe_client_ctx *)lib_hndl;
-
- return mgmt_sessions_count(&client_ctx->client_sessions);
+ return mgmt_sessions_count(&client->sessions);
}
/*
* Create a new Session for a Frontend Client connection.
*/
-enum mgmt_result mgmt_fe_create_client_session(uintptr_t lib_hndl,
+enum mgmt_result mgmt_fe_create_client_session(struct mgmt_fe_client *client,
uint64_t client_id,
uintptr_t user_ctx)
{
- struct mgmt_fe_client_ctx *client_ctx;
struct mgmt_fe_client_session *session;
- client_ctx = (struct mgmt_fe_client_ctx *)lib_hndl;
- if (!client_ctx)
- return MGMTD_INVALID_PARAM;
-
session = XCALLOC(MTYPE_MGMTD_FE_SESSION,
sizeof(struct mgmt_fe_client_session));
assert(session);
session->user_ctx = user_ctx;
session->client_id = client_id;
- session->client_ctx = client_ctx;
+ session->client = client;
session->session_id = 0;
- mgmt_sessions_add_tail(&client_ctx->client_sessions, session);
+ mgmt_sessions_add_tail(&client->sessions, session);
- if (mgmt_fe_send_session_req(client_ctx, session, true) != 0) {
+ if (mgmt_fe_send_session_req(client, session, true) != 0) {
XFREE(MTYPE_MGMTD_FE_SESSION, session);
return MGMTD_INTERNAL_ERROR;
}
@@ -724,189 +697,42 @@ enum mgmt_result mgmt_fe_create_client_session(uintptr_t lib_hndl,
/*
* Delete an existing Session for a Frontend Client connection.
*/
-enum mgmt_result mgmt_fe_destroy_client_session(uintptr_t lib_hndl,
+enum mgmt_result mgmt_fe_destroy_client_session(struct mgmt_fe_client *client,
uint64_t client_id)
{
- struct mgmt_fe_client_ctx *client_ctx;
struct mgmt_fe_client_session *session;
- client_ctx = (struct mgmt_fe_client_ctx *)lib_hndl;
- if (!client_ctx)
- return MGMTD_INVALID_PARAM;
-
- session = mgmt_fe_find_session_by_client_id(client_ctx, client_id);
- if (!session || session->client_ctx != client_ctx)
+ session = mgmt_fe_find_session_by_client_id(client, client_id);
+ if (!session || session->client != client)
return MGMTD_INVALID_PARAM;
if (session->session_id &&
- mgmt_fe_send_session_req(client_ctx, session, false) != 0)
+ mgmt_fe_send_session_req(client, session, false) != 0)
MGMTD_FE_CLIENT_ERR(
"Failed to send session destroy request for the session-id %" PRIu64,
session->session_id);
- mgmt_sessions_del(&client_ctx->client_sessions, session);
+ mgmt_sessions_del(&client->sessions, session);
XFREE(MTYPE_MGMTD_FE_SESSION, session);
return MGMTD_SUCCESS;
}
-static void mgmt_fe_destroy_client_sessions(uintptr_t lib_hndl)
-{
- struct mgmt_fe_client_ctx *client_ctx;
- struct mgmt_fe_client_session *session;
-
- client_ctx = (struct mgmt_fe_client_ctx *)lib_hndl;
- if (!client_ctx)
- return;
-
- FOREACH_SESSION_IN_LIST (client_ctx, session)
- mgmt_fe_destroy_client_session(lib_hndl, session->client_id);
-}
-
-/*
- * Send UN/LOCK_DS_REQ to MGMTD for a specific Datastore DS.
- */
-enum mgmt_result mgmt_fe_lock_ds(uintptr_t lib_hndl, uint64_t session_id,
- uint64_t req_id, Mgmtd__DatastoreId ds_id,
- bool lock_ds)
-{
- struct mgmt_fe_client_ctx *client_ctx;
-
- client_ctx = (struct mgmt_fe_client_ctx *)lib_hndl;
- if (!client_ctx)
- return MGMTD_INVALID_PARAM;
-
- if (mgmt_fe_send_lockds_req(client_ctx, session_id, lock_ds, req_id,
- ds_id) != 0)
- return MGMTD_INTERNAL_ERROR;
-
- return MGMTD_SUCCESS;
-}
-
-/*
- * Send SET_CONFIG_REQ to MGMTD for one or more config data(s).
- */
-enum mgmt_result mgmt_fe_set_config_data(uintptr_t lib_hndl,
- uint64_t session_id, uint64_t req_id,
- Mgmtd__DatastoreId ds_id,
- Mgmtd__YangCfgDataReq **config_req,
- int num_reqs, bool implicit_commit,
- Mgmtd__DatastoreId dst_ds_id)
-{
- struct mgmt_fe_client_ctx *client_ctx;
-
- client_ctx = (struct mgmt_fe_client_ctx *)lib_hndl;
- if (!client_ctx)
- return MGMTD_INVALID_PARAM;
-
- if (mgmt_fe_send_setcfg_req(client_ctx, session_id, req_id, ds_id,
- config_req, num_reqs, implicit_commit,
- dst_ds_id) != 0)
- return MGMTD_INTERNAL_ERROR;
-
- return MGMTD_SUCCESS;
-}
-
-/*
- * Send SET_CONFIG_REQ to MGMTD for one or more config data(s).
- */
-enum mgmt_result mgmt_fe_commit_config_data(uintptr_t lib_hndl,
- uint64_t session_id,
- uint64_t req_id,
- Mgmtd__DatastoreId src_ds_id,
- Mgmtd__DatastoreId dst_ds_id,
- bool validate_only, bool abort)
-{
- struct mgmt_fe_client_ctx *client_ctx;
-
- client_ctx = (struct mgmt_fe_client_ctx *)lib_hndl;
- if (!client_ctx)
- return MGMTD_INVALID_PARAM;
-
- if (mgmt_fe_send_commitcfg_req(client_ctx, session_id, req_id,
- src_ds_id, dst_ds_id, validate_only,
- abort) != 0)
- return MGMTD_INTERNAL_ERROR;
-
- return MGMTD_SUCCESS;
-}
-
-/*
- * Send GET_CONFIG_REQ to MGMTD for one or more config data item(s).
- */
-enum mgmt_result mgmt_fe_get_config_data(uintptr_t lib_hndl,
- uint64_t session_id, uint64_t req_id,
- Mgmtd__DatastoreId ds_id,
- Mgmtd__YangGetDataReq *data_req[],
- int num_reqs)
-{
- struct mgmt_fe_client_ctx *client_ctx;
-
- client_ctx = (struct mgmt_fe_client_ctx *)lib_hndl;
- if (!client_ctx)
- return MGMTD_INVALID_PARAM;
-
- if (mgmt_fe_send_getcfg_req(client_ctx, session_id, req_id, ds_id,
- data_req, num_reqs) != 0)
- return MGMTD_INTERNAL_ERROR;
-
- return MGMTD_SUCCESS;
-}
-
-/*
- * Send GET_DATA_REQ to MGMTD for one or more config data item(s).
- */
-enum mgmt_result mgmt_fe_get_data(uintptr_t lib_hndl, uint64_t session_id,
- uint64_t req_id, Mgmtd__DatastoreId ds_id,
- Mgmtd__YangGetDataReq *data_req[],
- int num_reqs)
-{
- struct mgmt_fe_client_ctx *client_ctx;
-
- client_ctx = (struct mgmt_fe_client_ctx *)lib_hndl;
- if (!client_ctx)
- return MGMTD_INVALID_PARAM;
-
- if (mgmt_fe_send_getdata_req(client_ctx, session_id, req_id, ds_id,
- data_req, num_reqs) != 0)
- return MGMTD_INTERNAL_ERROR;
-
- return MGMTD_SUCCESS;
-}
-
-/*
- * Send NOTIFY_REGISTER_REQ to MGMTD daemon.
- */
-enum mgmt_result
-mgmt_fe_register_yang_notify(uintptr_t lib_hndl, uint64_t session_id,
- uint64_t req_id, Mgmtd__DatastoreId ds_id,
- bool register_req,
- Mgmtd__YangDataXPath *data_req[], int num_reqs)
-{
- struct mgmt_fe_client_ctx *client_ctx;
-
- client_ctx = (struct mgmt_fe_client_ctx *)lib_hndl;
- if (!client_ctx)
- return MGMTD_INVALID_PARAM;
-
- if (mgmt_fe_send_regnotify_req(client_ctx, session_id, req_id, ds_id,
- register_req, data_req, num_reqs) != 0)
- return MGMTD_INTERNAL_ERROR;
-
- return MGMTD_SUCCESS;
-}
-
/*
* Destroy library and cleanup everything.
*/
-void mgmt_fe_client_lib_destroy(void)
+void mgmt_fe_client_destroy(struct mgmt_fe_client *client)
{
- struct mgmt_fe_client_ctx *client_ctx = &mgmt_fe_client_ctx;
+ struct mgmt_fe_client_session *session;
MGMTD_FE_CLIENT_DBG("Destroying MGMTD Frontend Client '%s'",
- client_ctx->client_params.name);
+ client->name);
+
+ FOREACH_SESSION_IN_LIST (client, session)
+ mgmt_fe_destroy_client_session(client, session->client_id);
+
+ msg_client_cleanup(&client->client);
- mgmt_fe_destroy_client_sessions((uintptr_t)client_ctx);
- msg_client_cleanup(&client_ctx->client);
- memset(client_ctx, 0, sizeof(*client_ctx));
+ XFREE(MTYPE_MGMTD_FE_CLIENT_NAME, client->name);
+ XFREE(MTYPE_MGMTD_FE_CLIENT, client);
}
diff --git a/lib/mgmt_fe_client.h b/lib/mgmt_fe_client.h
index 7ce6c5eef5..edf861746c 100644
--- a/lib/mgmt_fe_client.h
+++ b/lib/mgmt_fe_client.h
@@ -56,6 +56,9 @@ extern "C" {
#define MGMTD_DS_OPERATIONAL MGMTD__DATASTORE_ID__OPERATIONAL_DS
#define MGMTD_DS_MAX_ID MGMTD_DS_OPERATIONAL + 1
+struct mgmt_fe_client;
+
+
/*
* All the client specific information this library needs to
* initialize itself, setup connection with MGMTD FrontEnd interface
@@ -66,52 +69,52 @@ extern "C" {
* to initialize the library (See mgmt_fe_client_lib_init for
* more details).
*/
-struct mgmt_fe_client_params {
- char name[MGMTD_CLIENT_NAME_MAX_LEN];
- uintptr_t user_data;
- unsigned long conn_retry_intvl_sec;
-
- void (*client_connect_notify)(uintptr_t lib_hndl,
- uintptr_t user_data,
- bool connected);
-
- void (*client_session_notify)(uintptr_t lib_hndl,
- uintptr_t user_data,
- uint64_t client_id,
+struct mgmt_fe_client_cbs {
+ void (*client_connect_notify)(struct mgmt_fe_client *client,
+ uintptr_t user_data, bool connected);
+
+ void (*client_session_notify)(struct mgmt_fe_client *client,
+ uintptr_t user_data, uint64_t client_id,
bool create, bool success,
uintptr_t session_id,
- uintptr_t user_session_ctx);
+ uintptr_t user_session_client);
- void (*lock_ds_notify)(uintptr_t lib_hndl, uintptr_t user_data,
- uint64_t client_id, uintptr_t session_id,
- uintptr_t user_session_ctx, uint64_t req_id,
+ void (*lock_ds_notify)(struct mgmt_fe_client *client,
+ uintptr_t user_data, uint64_t client_id,
+ uintptr_t session_id,
+ uintptr_t user_session_client, uint64_t req_id,
bool lock_ds, bool success,
Mgmtd__DatastoreId ds_id, char *errmsg_if_any);
- void (*set_config_notify)(uintptr_t lib_hndl, uintptr_t user_data,
- uint64_t client_id, uintptr_t session_id,
- uintptr_t user_session_ctx, uint64_t req_id,
- bool success, Mgmtd__DatastoreId ds_id,
+ void (*set_config_notify)(struct mgmt_fe_client *client,
+ uintptr_t user_data, uint64_t client_id,
+ uintptr_t session_id,
+ uintptr_t user_session_client,
+ uint64_t req_id, bool success,
+ Mgmtd__DatastoreId ds_id,
char *errmsg_if_any);
- void (*commit_config_notify)(
- uintptr_t lib_hndl, uintptr_t user_data, uint64_t client_id,
- uintptr_t session_id, uintptr_t user_session_ctx,
- uint64_t req_id, bool success, Mgmtd__DatastoreId src_ds_id,
- Mgmtd__DatastoreId dst_ds_id, bool validate_only,
- char *errmsg_if_any);
-
- enum mgmt_result (*get_data_notify)(
- uintptr_t lib_hndl, uintptr_t user_data, uint64_t client_id,
- uintptr_t session_id, uintptr_t user_session_ctx,
- uint64_t req_id, bool success, Mgmtd__DatastoreId ds_id,
- Mgmtd__YangData **yang_data, size_t num_data, int next_key,
- char *errmsg_if_any);
-
- enum mgmt_result (*data_notify)(
- uint64_t client_id, uint64_t session_id, uintptr_t user_data,
- uint64_t req_id, Mgmtd__DatastoreId ds_id,
- Mgmtd__YangData **yang_data, size_t num_data);
+ void (*commit_config_notify)(struct mgmt_fe_client *client,
+ uintptr_t user_data, uint64_t client_id,
+ uintptr_t session_id,
+ uintptr_t user_session_client,
+ uint64_t req_id, bool success,
+ Mgmtd__DatastoreId src_ds_id,
+ Mgmtd__DatastoreId dst_ds_id,
+ bool validate_only, char *errmsg_if_any);
+
+ int (*get_data_notify)(struct mgmt_fe_client *client,
+ uintptr_t user_data, uint64_t client_id,
+ uintptr_t session_id,
+ uintptr_t user_session_client, uint64_t req_id,
+ bool success, Mgmtd__DatastoreId ds_id,
+ Mgmtd__YangData **yang_data, size_t num_data,
+ int next_key, char *errmsg_if_any);
+
+ int (*data_notify)(uint64_t client_id, uint64_t session_id,
+ uintptr_t user_data, uint64_t req_id,
+ Mgmtd__DatastoreId ds_id,
+ Mgmtd__YangData **yang_data, size_t num_data);
};
extern struct debug mgmt_dbg_fe_client;
@@ -139,17 +142,18 @@ extern struct debug mgmt_dbg_fe_client;
* Thread master.
*
* Returns:
- * Frontend client lib handler (nothing but address of mgmt_fe_client_ctx)
+ * Frontend client lib handler (nothing but address of mgmt_fe_client)
*/
-extern uintptr_t mgmt_fe_client_lib_init(struct mgmt_fe_client_params *params,
- struct event_loop *master_thread);
+extern struct mgmt_fe_client *
+mgmt_fe_client_create(const char *client_name, struct mgmt_fe_client_cbs *cbs,
+ uintptr_t user_data, struct event_loop *event_loop);
/*
* Initialize library vty (adds debug support).
*
- * This call should be added to your component when enabling other vty code to
- * enable mgmtd client debugs. When adding, one needs to also add a their
- * component in `xref2vtysh.py` as well.
+ * This call should be added to your component when enabling other vty
+ * code to enable mgmtd client debugs. When adding, one needs to also
+ * add a their component in `xref2vtysh.py` as well.
*/
extern void mgmt_fe_client_lib_vty_init(void);
@@ -167,15 +171,15 @@ extern void mgmt_debug_fe_client_show_debug(struct vty *vty);
* client_id
* Unique identifier of client.
*
- * user_ctx
+ * user_client
* Client context.
*
* Returns:
* MGMTD_SUCCESS on success, MGMTD_* otherwise.
*/
-extern enum mgmt_result mgmt_fe_create_client_session(uintptr_t lib_hndl,
- uint64_t client_id,
- uintptr_t user_ctx);
+extern enum mgmt_result
+mgmt_fe_create_client_session(struct mgmt_fe_client *client, uint64_t client_id,
+ uintptr_t user_client);
/*
* Delete an existing Session for a Frontend Client connection.
@@ -187,10 +191,11 @@ extern enum mgmt_result mgmt_fe_create_client_session(uintptr_t lib_hndl,
* Unique identifier of client.
*
* Returns:
- * MGMTD_SUCCESS on success, MGMTD_* otherwise.
+ * 0 on success, otherwise msg_conn_send_msg() return values.
*/
-extern enum mgmt_result mgmt_fe_destroy_client_session(uintptr_t lib_hndl,
- uint64_t client_id);
+extern enum mgmt_result
+mgmt_fe_destroy_client_session(struct mgmt_fe_client *client,
+ uint64_t client_id);
/*
* Send UN/LOCK_DS_REQ to MGMTD for a specific Datastore DS.
@@ -211,11 +216,11 @@ extern enum mgmt_result mgmt_fe_destroy_client_session(uintptr_t lib_hndl,
* TRUE for lock request, FALSE for unlock request.
*
* Returns:
- * MGMTD_SUCCESS on success, MGMTD_* otherwise.
+ * 0 on success, otherwise msg_conn_send_msg() return values.
*/
-extern enum mgmt_result mgmt_fe_lock_ds(uintptr_t lib_hndl, uint64_t session_id,
- uint64_t req_id,
- Mgmtd__DatastoreId ds_id, bool lock_ds);
+extern int mgmt_fe_send_lockds_req(struct mgmt_fe_client *client,
+ uint64_t session_id, uint64_t req_id,
+ Mgmtd__DatastoreId ds_id, bool lock_ds);
/*
* Send SET_CONFIG_REQ to MGMTD for one or more config data(s).
@@ -245,13 +250,15 @@ extern enum mgmt_result mgmt_fe_lock_ds(uintptr_t lib_hndl, uint64_t session_id,
* Destination Datastore ID where data needs to be set.
*
* Returns:
- * MGMTD_SUCCESS on success, MGMTD_* otherwise.
+ * 0 on success, otherwise msg_conn_send_msg() return values.
*/
-extern enum mgmt_result
-mgmt_fe_set_config_data(uintptr_t lib_hndl, uint64_t session_id,
- uint64_t req_id, Mgmtd__DatastoreId ds_id,
- Mgmtd__YangCfgDataReq **config_req, int num_req,
- bool implicit_commit, Mgmtd__DatastoreId dst_ds_id);
+
+extern int mgmt_fe_send_setcfg_req(struct mgmt_fe_client *client,
+ uint64_t session_id, uint64_t req_id,
+ Mgmtd__DatastoreId ds_id,
+ Mgmtd__YangCfgDataReq **config_req,
+ int num_req, bool implicit_commit,
+ Mgmtd__DatastoreId dst_ds_id);
/*
* Send SET_COMMMIT_REQ to MGMTD for one or more config data(s).
@@ -278,13 +285,13 @@ mgmt_fe_set_config_data(uintptr_t lib_hndl, uint64_t session_id,
* TRUE if need to restore Src DS back to Dest DS, FALSE otherwise.
*
* Returns:
- * MGMTD_SUCCESS on success, MGMTD_* otherwise.
+ * 0 on success, otherwise msg_conn_send_msg() return values.
*/
-extern enum mgmt_result
-mgmt_fe_commit_config_data(uintptr_t lib_hndl, uint64_t session_id,
- uint64_t req_id, Mgmtd__DatastoreId src_ds_id,
- Mgmtd__DatastoreId dst_ds_id, bool validate_only,
- bool abort);
+extern int mgmt_fe_send_commitcfg_req(struct mgmt_fe_client *client,
+ uint64_t session_id, uint64_t req_id,
+ Mgmtd__DatastoreId src_ds_id,
+ Mgmtd__DatastoreId dst_ds_id,
+ bool validate_only, bool abort);
/*
* Send GET_CONFIG_REQ to MGMTD for one or more config data item(s).
@@ -308,12 +315,13 @@ mgmt_fe_commit_config_data(uintptr_t lib_hndl, uint64_t session_id,
* Number of get config requests.
*
* Returns:
- * MGMTD_SUCCESS on success, MGMTD_* otherwise.
+ * 0 on success, otherwise msg_conn_send_msg() return values.
*/
-extern enum mgmt_result
-mgmt_fe_get_config_data(uintptr_t lib_hndl, uint64_t session_id,
- uint64_t req_id, Mgmtd__DatastoreId ds_id,
- Mgmtd__YangGetDataReq **data_req, int num_reqs);
+extern int mgmt_fe_send_getcfg_req(struct mgmt_fe_client *client,
+ uint64_t session_id, uint64_t req_id,
+ Mgmtd__DatastoreId ds_id,
+ Mgmtd__YangGetDataReq **data_req,
+ int num_reqs);
/*
* Send GET_DATA_REQ to MGMTD for one or more data item(s).
@@ -321,11 +329,11 @@ mgmt_fe_get_config_data(uintptr_t lib_hndl, uint64_t session_id,
* Similar to get config request but supports getting data
* from operational ds aka backend clients directly.
*/
-extern enum mgmt_result mgmt_fe_get_data(uintptr_t lib_hndl,
- uint64_t session_id, uint64_t req_id,
- Mgmtd__DatastoreId ds_id,
- Mgmtd__YangGetDataReq **data_req,
- int num_reqs);
+extern int mgmt_fe_send_getdata_req(struct mgmt_fe_client *client,
+ uint64_t session_id, uint64_t req_id,
+ Mgmtd__DatastoreId ds_id,
+ Mgmtd__YangGetDataReq **data_req,
+ int num_reqs);
/*
* Send NOTIFY_REGISTER_REQ to MGMTD daemon.
@@ -352,23 +360,24 @@ extern enum mgmt_result mgmt_fe_get_data(uintptr_t lib_hndl,
* Number of data requests.
*
* Returns:
- * MGMTD_SUCCESS on success, MGMTD_* otherwise.
+ * 0 on success, otherwise msg_conn_send_msg() return values.
*/
-extern enum mgmt_result
-mgmt_fe_register_yang_notify(uintptr_t lib_hndl, uint64_t session_id,
- uint64_t req_id, Mgmtd__DatastoreId ds_id,
- bool register_req, Mgmtd__YangDataXPath **data_req,
- int num_reqs);
+extern int mgmt_fe_send_regnotify_req(struct mgmt_fe_client *client,
+ uint64_t session_id, uint64_t req_id,
+ Mgmtd__DatastoreId ds_id,
+ bool register_req,
+ Mgmtd__YangDataXPath **data_req,
+ int num_reqs);
/*
* Destroy library and cleanup everything.
*/
-extern void mgmt_fe_client_lib_destroy(void);
+extern void mgmt_fe_client_destroy(struct mgmt_fe_client *client);
/*
* Get count of open sessions.
*/
-extern uint mgmt_fe_client_session_count(uintptr_t lib_hndl);
+extern uint mgmt_fe_client_session_count(struct mgmt_fe_client *client);
#ifdef __cplusplus
}
diff --git a/lib/vty.c b/lib/vty.c
index 4cf63508bf..b701f3bc0f 100644
--- a/lib/vty.c
+++ b/lib/vty.c
@@ -68,7 +68,7 @@ enum vty_event {
struct nb_config *vty_mgmt_candidate_config;
-static uintptr_t mgmt_lib_hndl;
+static struct mgmt_fe_client *mgmt_fe_client;
static bool mgmt_fe_connected;
static bool mgmt_candidate_ds_wr_locked;
static uint64_t mgmt_client_id_next;
@@ -1640,12 +1640,12 @@ struct vty *vty_new(void)
new->max = VTY_BUFSIZ;
new->pass_fd = -1;
- if (mgmt_lib_hndl) {
+ if (mgmt_fe_client) {
if (!mgmt_client_id_next)
mgmt_client_id_next++;
new->mgmt_client_id = mgmt_client_id_next++;
if (mgmt_fe_create_client_session(
- mgmt_lib_hndl, new->mgmt_client_id,
+ mgmt_fe_client, new->mgmt_client_id,
(uintptr_t) new) != MGMTD_SUCCESS)
zlog_err(
"Failed to open a MGMTD Frontend session for VTY session %p!!",
@@ -2419,8 +2419,8 @@ void vty_close(struct vty *vty)
vty->status = VTY_CLOSE;
- if (mgmt_lib_hndl && vty->mgmt_session_id) {
- mgmt_fe_destroy_client_session(mgmt_lib_hndl,
+ if (mgmt_fe_client && vty->mgmt_session_id) {
+ mgmt_fe_destroy_client_session(mgmt_fe_client,
vty->mgmt_client_id);
vty->mgmt_session_id = 0;
}
@@ -3391,8 +3391,8 @@ void vty_init_vtysh(void)
* functionality linked into it. This design choice was taken for efficiency.
*/
-static void vty_mgmt_server_connected(uintptr_t lib_hndl, uintptr_t usr_data,
- bool connected)
+static void vty_mgmt_server_connected(struct mgmt_fe_client *client,
+ uintptr_t usr_data, bool connected)
{
MGMTD_FE_CLIENT_DBG("Got %sconnected %s MGMTD Frontend Server",
!connected ? "dis: " : "",
@@ -3403,7 +3403,7 @@ static void vty_mgmt_server_connected(uintptr_t lib_hndl, uintptr_t usr_data,
* The fe client library will delete all session on disconnect before
* calling us.
*/
- assert(mgmt_fe_client_session_count(lib_hndl) == 0);
+ assert(mgmt_fe_client_session_count(client) == 0);
mgmt_fe_connected = connected;
@@ -3417,10 +3417,10 @@ static void vty_mgmt_server_connected(uintptr_t lib_hndl, uintptr_t usr_data,
/*
* A session has successfully been created for a vty.
*/
-static void vty_mgmt_session_notify(uintptr_t lib_hndl, uintptr_t usr_data,
- uint64_t client_id, bool create,
- bool success, uintptr_t session_id,
- uintptr_t session_ctx)
+static void vty_mgmt_session_notify(struct mgmt_fe_client *client,
+ uintptr_t usr_data, uint64_t client_id,
+ bool create, bool success,
+ uintptr_t session_id, uintptr_t session_ctx)
{
struct vty *vty;
@@ -3444,8 +3444,9 @@ static void vty_mgmt_session_notify(uintptr_t lib_hndl, uintptr_t usr_data,
}
}
-static void vty_mgmt_ds_lock_notified(uintptr_t lib_hndl, uintptr_t usr_data,
- uint64_t client_id, uintptr_t session_id,
+static void vty_mgmt_ds_lock_notified(struct mgmt_fe_client *client,
+ uintptr_t usr_data, uint64_t client_id,
+ uintptr_t session_id,
uintptr_t session_ctx, uint64_t req_id,
bool lock_ds, bool success,
Mgmtd__DatastoreId ds_id,
@@ -3469,7 +3470,7 @@ static void vty_mgmt_ds_lock_notified(uintptr_t lib_hndl, uintptr_t usr_data,
}
static void vty_mgmt_set_config_result_notified(
- uintptr_t lib_hndl, uintptr_t usr_data, uint64_t client_id,
+ struct mgmt_fe_client *client, uintptr_t usr_data, uint64_t client_id,
uintptr_t session_id, uintptr_t session_ctx, uint64_t req_id,
bool success, Mgmtd__DatastoreId ds_id, char *errmsg_if_any)
{
@@ -3493,7 +3494,7 @@ static void vty_mgmt_set_config_result_notified(
}
static void vty_mgmt_commit_config_result_notified(
- uintptr_t lib_hndl, uintptr_t usr_data, uint64_t client_id,
+ struct mgmt_fe_client *client, uintptr_t usr_data, uint64_t client_id,
uintptr_t session_id, uintptr_t session_ctx, uint64_t req_id,
bool success, Mgmtd__DatastoreId src_ds_id,
Mgmtd__DatastoreId dst_ds_id, bool validate_only, char *errmsg_if_any)
@@ -3520,8 +3521,8 @@ static void vty_mgmt_commit_config_result_notified(
vty_mgmt_resume_response(vty, success);
}
-static enum mgmt_result vty_mgmt_get_data_result_notified(
- uintptr_t lib_hndl, uintptr_t usr_data, uint64_t client_id,
+static int vty_mgmt_get_data_result_notified(
+ struct mgmt_fe_client *client, uintptr_t usr_data, uint64_t client_id,
uintptr_t session_id, uintptr_t session_ctx, uint64_t req_id,
bool success, Mgmtd__DatastoreId ds_id, Mgmtd__YangData **yang_data,
size_t num_data, int next_key, char *errmsg_if_any)
@@ -3538,7 +3539,7 @@ static enum mgmt_result vty_mgmt_get_data_result_notified(
vty_out(vty, "ERROR: GET_DATA request failed, Error: %s\n",
errmsg_if_any ? errmsg_if_any : "Unknown");
vty_mgmt_resume_response(vty, success);
- return MGMTD_INTERNAL_ERROR;
+ return -1;
}
MGMTD_FE_CLIENT_DBG("GET_DATA request succeeded, client 0x%" PRIx64
@@ -3559,10 +3560,10 @@ static enum mgmt_result vty_mgmt_get_data_result_notified(
vty_mgmt_resume_response(vty, success);
}
- return MGMTD_SUCCESS;
+ return 0;
}
-static struct mgmt_fe_client_params client_params = {
+static struct mgmt_fe_client_cbs mgmt_cbs = {
.client_connect_notify = vty_mgmt_server_connected,
.client_session_notify = vty_mgmt_session_notify,
.lock_ds_notify = vty_mgmt_ds_lock_notified,
@@ -3573,21 +3574,19 @@ static struct mgmt_fe_client_params client_params = {
void vty_init_mgmt_fe(void)
{
- if (!vty_master) {
- zlog_err("Always call vty_mgmt_init_fe() after vty_init()!!");
- return;
- }
+ char name[40];
- assert(!mgmt_lib_hndl);
- snprintf(client_params.name, sizeof(client_params.name), "%s-%lld",
- frr_get_progname(), (long long)getpid());
- mgmt_lib_hndl = mgmt_fe_client_lib_init(&client_params, vty_master);
- assert(mgmt_lib_hndl);
+ assert(vty_master);
+ assert(!mgmt_fe_client);
+ snprintf(name, sizeof(name), "vty-%s-%ld", frr_get_progname(),
+ (long)getpid());
+ mgmt_fe_client = mgmt_fe_client_create(name, &mgmt_cbs, 0, vty_master);
+ assert(mgmt_fe_client);
}
bool vty_mgmt_fe_enabled(void)
{
- return mgmt_lib_hndl && mgmt_fe_connected;
+ return mgmt_fe_client && mgmt_fe_connected;
}
bool vty_mgmt_should_process_cli_apply_changes(struct vty *vty)
@@ -3598,13 +3597,11 @@ bool vty_mgmt_should_process_cli_apply_changes(struct vty *vty)
int vty_mgmt_send_lockds_req(struct vty *vty, Mgmtd__DatastoreId ds_id,
bool lock)
{
- enum mgmt_result ret;
-
- if (mgmt_lib_hndl && vty->mgmt_session_id) {
+ if (mgmt_fe_client && vty->mgmt_session_id) {
vty->mgmt_req_id++;
- ret = mgmt_fe_lock_ds(mgmt_lib_hndl, vty->mgmt_session_id,
- vty->mgmt_req_id, ds_id, lock);
- if (ret != MGMTD_SUCCESS) {
+ if (mgmt_fe_send_lockds_req(mgmt_fe_client,
+ vty->mgmt_session_id,
+ vty->mgmt_req_id, ds_id, lock)) {
zlog_err("Failed sending %sLOCK-DS-REQ req-id %" PRIu64,
lock ? "" : "UN", vty->mgmt_req_id);
vty_out(vty, "Failed to send %sLOCK-DS-REQ to MGMTD!\n",
@@ -3641,7 +3638,7 @@ int vty_mgmt_send_config_data(struct vty *vty)
}
- if (mgmt_lib_hndl && vty->mgmt_client_id && !vty->mgmt_session_id) {
+ if (mgmt_fe_client && vty->mgmt_client_id && !vty->mgmt_session_id) {
/*
* We are connected to mgmtd but we do not yet have an
* established session. this means we need to send any changes
@@ -3652,7 +3649,7 @@ int vty_mgmt_send_config_data(struct vty *vty)
return 0;
}
- if (mgmt_lib_hndl && vty->mgmt_session_id) {
+ if (mgmt_fe_client && vty->mgmt_session_id) {
cnt = 0;
for (indx = 0; indx < vty->num_cfg_changes; indx++) {
mgmt_yang_data_init(&cfg_data[cnt]);
@@ -3701,8 +3698,8 @@ int vty_mgmt_send_config_data(struct vty *vty)
vty->mgmt_req_id++;
implicit_commit = vty_needs_implicit_commit(vty);
- if (cnt && mgmt_fe_set_config_data(
- mgmt_lib_hndl, vty->mgmt_session_id,
+ if (cnt && mgmt_fe_send_setcfg_req(
+ mgmt_fe_client, vty->mgmt_session_id,
vty->mgmt_req_id, MGMTD_DS_CANDIDATE, cfgreq,
cnt, implicit_commit,
MGMTD_DS_RUNNING) != MGMTD_SUCCESS) {
@@ -3720,15 +3717,12 @@ int vty_mgmt_send_config_data(struct vty *vty)
int vty_mgmt_send_commit_config(struct vty *vty, bool validate_only, bool abort)
{
- enum mgmt_result ret;
-
- if (mgmt_lib_hndl && vty->mgmt_session_id) {
+ if (mgmt_fe_client && vty->mgmt_session_id) {
vty->mgmt_req_id++;
- ret = mgmt_fe_commit_config_data(
- mgmt_lib_hndl, vty->mgmt_session_id, vty->mgmt_req_id,
- MGMTD_DS_CANDIDATE, MGMTD_DS_RUNNING, validate_only,
- abort);
- if (ret != MGMTD_SUCCESS) {
+ if (mgmt_fe_send_commitcfg_req(
+ mgmt_fe_client, vty->mgmt_session_id,
+ vty->mgmt_req_id, MGMTD_DS_CANDIDATE,
+ MGMTD_DS_RUNNING, validate_only, abort)) {
zlog_err("Failed sending COMMIT-REQ req-id %" PRIu64,
vty->mgmt_req_id);
vty_out(vty, "Failed to send COMMIT-REQ to MGMTD!\n");
@@ -3745,7 +3739,6 @@ int vty_mgmt_send_commit_config(struct vty *vty, bool validate_only, bool abort)
int vty_mgmt_send_get_config(struct vty *vty, Mgmtd__DatastoreId datastore,
const char **xpath_list, int num_req)
{
- enum mgmt_result ret;
Mgmtd__YangData yang_data[VTY_MAXCFGCHANGES];
Mgmtd__YangGetDataReq get_req[VTY_MAXCFGCHANGES];
Mgmtd__YangGetDataReq *getreq[VTY_MAXCFGCHANGES];
@@ -3762,11 +3755,9 @@ int vty_mgmt_send_get_config(struct vty *vty, Mgmtd__DatastoreId datastore,
get_req[i].data = &yang_data[i];
getreq[i] = &get_req[i];
}
- ret = mgmt_fe_get_config_data(mgmt_lib_hndl, vty->mgmt_session_id,
- vty->mgmt_req_id, datastore, getreq,
- num_req);
-
- if (ret != MGMTD_SUCCESS) {
+ if (mgmt_fe_send_getcfg_req(mgmt_fe_client, vty->mgmt_session_id,
+ vty->mgmt_req_id, datastore, getreq,
+ num_req)) {
zlog_err(
"Failed to send GET-CONFIG to MGMTD for req-id %" PRIu64
".",
@@ -3783,7 +3774,6 @@ int vty_mgmt_send_get_config(struct vty *vty, Mgmtd__DatastoreId datastore,
int vty_mgmt_send_get_data(struct vty *vty, Mgmtd__DatastoreId datastore,
const char **xpath_list, int num_req)
{
- enum mgmt_result ret;
Mgmtd__YangData yang_data[VTY_MAXCFGCHANGES];
Mgmtd__YangGetDataReq get_req[VTY_MAXCFGCHANGES];
Mgmtd__YangGetDataReq *getreq[VTY_MAXCFGCHANGES];
@@ -3800,10 +3790,9 @@ int vty_mgmt_send_get_data(struct vty *vty, Mgmtd__DatastoreId datastore,
get_req[i].data = &yang_data[i];
getreq[i] = &get_req[i];
}
- ret = mgmt_fe_get_data(mgmt_lib_hndl, vty->mgmt_session_id,
- vty->mgmt_req_id, datastore, getreq, num_req);
-
- if (ret != MGMTD_SUCCESS) {
+ if (mgmt_fe_send_getdata_req(mgmt_fe_client, vty->mgmt_session_id,
+ vty->mgmt_req_id, datastore, getreq,
+ num_req)) {
zlog_err("Failed to send GET-DATA to MGMTD for req-id %" PRIu64
".",
vty->mgmt_req_id);
@@ -3862,9 +3851,9 @@ void vty_terminate(void)
{
struct vty *vty;
- if (mgmt_lib_hndl) {
- mgmt_fe_client_lib_destroy();
- mgmt_lib_hndl = 0;
+ if (mgmt_fe_client) {
+ mgmt_fe_client_destroy(mgmt_fe_client);
+ mgmt_fe_client = 0;
}
memset(vty_cwd, 0x00, sizeof(vty_cwd));
diff --git a/mgmtd/mgmt_txn.c b/mgmtd/mgmt_txn.c
index bf59224338..3d818cb4c2 100644
--- a/mgmtd/mgmt_txn.c
+++ b/mgmtd/mgmt_txn.c
@@ -1022,7 +1022,7 @@ static int mgmt_txn_create_config_batches(struct mgmt_txn_req *txn_req,
(void)mgmt_txn_send_commit_cfg_reply(
txn_req->txn, MGMTD_INTERNAL_ERROR,
"Internal error! Could not get Xpath from Ds node!");
- goto mgmt_txn_create_config_batches_failed;
+ return -1;
}
value = (char *)lyd_get_value(chg->cb.dnode);
@@ -1130,18 +1130,11 @@ static int mgmt_txn_create_config_batches(struct mgmt_txn_req *txn_req,
(void)mgmt_txn_send_commit_cfg_reply(
txn_req->txn, MGMTD_NO_CFG_CHANGES,
"No changes found to commit!");
- goto mgmt_txn_create_config_batches_failed;
+ return -1;
}
cmtcfg_req->next_phase = MGMTD_COMMIT_PHASE_TXN_CREATE;
return 0;
-
-mgmt_txn_create_config_batches_failed:
-
- if (xpath)
- free(xpath);
-
- return -1;
}
static int mgmt_txn_prepare_config(struct mgmt_txn_ctx *txn)
diff --git a/ospf6d/ospf6_interface.c b/ospf6d/ospf6_interface.c
index ea059c4be6..0fb3d29e25 100644
--- a/ospf6d/ospf6_interface.c
+++ b/ospf6d/ospf6_interface.c
@@ -516,7 +516,6 @@ static int ospf6_interface_state_change(uint8_t next_state,
OSPF6_NETWORK_LSA_EXECUTE(oi);
OSPF6_INTRA_PREFIX_LSA_EXECUTE_TRANSIT(oi);
OSPF6_INTRA_PREFIX_LSA_SCHEDULE_STUB(oi->area);
- OSPF6_INTRA_PREFIX_LSA_EXECUTE_TRANSIT(oi);
} else if (prev_state == OSPF6_INTERFACE_DR
|| next_state == OSPF6_INTERFACE_DR) {
OSPF6_NETWORK_LSA_SCHEDULE(oi);
diff --git a/staticd/static_main.c b/staticd/static_main.c
index 464c42ecab..f6b7847602 100644
--- a/staticd/static_main.c
+++ b/staticd/static_main.c
@@ -53,7 +53,7 @@ struct option longopts[] = { { 0 } };
/* Master of threads. */
struct event_loop *master;
-uintptr_t mgmt_lib_hndl;
+struct mgmt_be_client *mgmt_be_client;
static struct frr_daemon_info staticd_di;
/* SIGHUP handler. */
@@ -71,7 +71,7 @@ static void sigint(void)
/* Disable BFD events to avoid wasting processing. */
bfd_protocol_integration_set_shutdown(true);
- mgmt_be_client_lib_destroy();
+ mgmt_be_client_destroy(mgmt_be_client);
static_vrf_terminate();
@@ -106,56 +106,6 @@ struct frr_signal_t static_signals[] = {
},
};
-#if 0
-static void static_mgmt_be_client_connect(uintptr_t lib_hndl,
- uintptr_t usr_data, bool connected)
-{
- (void)usr_data;
-
- assert(lib_hndl == mgmt_lib_hndl);
-
- zlog_debug("Got %s %s MGMTD Backend Client Server",
- connected ? "connected" : "disconnected",
- connected ? "to" : "from");
-
- /* unless we are subscribing to xpaths we don't need to do this */
- if (connected)
- (void)mgmt_be_subscribe_yang_data(mgmt_lib_hndl, NULL, 0);
-}
-
-static void
-static_mgmt_txn_notify(uintptr_t lib_hndl, uintptr_t usr_data,
- struct mgmt_be_client_txn_ctx *txn_ctx,
- bool destroyed)
-{
- zlog_debug("Got Txn %s Notify from MGMTD server",
- destroyed ? "DESTROY" : "CREATE");
-
- if (!destroyed) {
- /*
- * TODO: Allocate and install a private scratchpad for this
- * transaction if required
- */
- } else {
- /*
- * TODO: Uninstall and deallocate the private scratchpad for
- * this transaction if installed earlier.
- */
- }
-}
-#endif
-
-static struct mgmt_be_client_params mgmt_params = {
- .name = "staticd",
- .conn_retry_intvl_sec = 3,
- /*
- * instead of a connect routine maybe just put xpaths to subcribe to
- * here
- */
- .client_connect_notify = NULL, /* static_mgmt_be_client_connect, */
- .txn_notify = NULL, /* static_mgmt_txn_notify */
-};
-
static const struct frr_yang_module_info *const staticd_yang_modules[] = {
&frr_filter_info,
&frr_interface_info,
@@ -212,7 +162,7 @@ int main(int argc, char **argv, char **envp)
static_vty_init();
/* Initialize MGMT backend functionalities */
- mgmt_lib_hndl = mgmt_be_client_lib_init(&mgmt_params, master);
+ mgmt_be_client = mgmt_be_client_create("staticd", NULL, 0, master);
hook_register(routing_conf_event,
routing_control_plane_protocols_name_validate);
diff --git a/tests/topotests/lib/common_config.py b/tests/topotests/lib/common_config.py
index a85b86668c..5d37b062ac 100644
--- a/tests/topotests/lib/common_config.py
+++ b/tests/topotests/lib/common_config.py
@@ -33,6 +33,7 @@ from lib.topogen import TopoRouter, get_topogen
from lib.topolog import get_logger, logger
from lib.topotest import frr_unicode, interface_set_status, version_cmp
from lib import topotest
+from munet.testing.util import pause_test
FRRCFG_FILE = "frr_json.conf"
FRRCFG_BKUP_FILE = "frr_json_initial.conf"
@@ -2069,6 +2070,8 @@ def step(msg, reset=False):
* ` msg` : Step message body.
* `reset` : Reset step count to 1 when set to True.
"""
+ if bool(topotest.g_pytest_config.get_option("--pause")):
+ pause_test("before :" + msg)
_step = Stepper()
_step(msg, reset)
diff --git a/tests/topotests/mgmt_startup/test_bigconf.py b/tests/topotests/mgmt_startup/test_bigconf.py
index 465f646b6e..3b13229af5 100644
--- a/tests/topotests/mgmt_startup/test_bigconf.py
+++ b/tests/topotests/mgmt_startup/test_bigconf.py
@@ -69,10 +69,10 @@ def test_staticd_latestart(tgen):
check_vtysh_up(r1)
logging.info("r1: vtysh connected after %ss", track.elapsed())
- result = check_kernel(r1, ROUTE_RANGE[0], retry_timeout=20)
+ result = check_kernel(r1, ROUTE_RANGE[0], retry_timeout=60)
assert result is None
logging.info("r1: first route installed after %ss", track.elapsed())
- result = check_kernel(r1, ROUTE_RANGE[1], retry_timeout=20)
+ result = check_kernel(r1, ROUTE_RANGE[1], retry_timeout=60)
assert result is None
logging.info("r1: last route installed after %ss", track.elapsed())
diff --git a/tests/topotests/mgmt_startup/test_late_bigconf.py b/tests/topotests/mgmt_startup/test_late_bigconf.py
index ac7ac57cf8..5e594aba6c 100644
--- a/tests/topotests/mgmt_startup/test_late_bigconf.py
+++ b/tests/topotests/mgmt_startup/test_late_bigconf.py
@@ -68,9 +68,9 @@ def test_staticd_latestart(tgen):
check_vtysh_up(r1)
logging.info("r1: vtysh connected after %ss", track.elapsed())
- result = check_kernel(r1, ROUTE_RANGE[0], retry_timeout=20, expected=False)
+ result = check_kernel(r1, ROUTE_RANGE[0], retry_timeout=60, expected=False)
assert result is not None, "first route present and should not be"
- result = check_kernel(r1, ROUTE_RANGE[1], retry_timeout=20, expected=False)
+ result = check_kernel(r1, ROUTE_RANGE[1], retry_timeout=60, expected=False)
assert result is not None, "last route present and should not be"
step("Starting staticd")
@@ -78,5 +78,5 @@ def test_staticd_latestart(tgen):
result = check_kernel(r1, ROUTE_RANGE[0], retry_timeout=60)
assert result is None, "first route not present and should be"
- result = check_kernel(r1, ROUTE_RANGE[1], retry_timeout=20)
+ result = check_kernel(r1, ROUTE_RANGE[1], retry_timeout=60)
assert result is None, "last route not present and should be"
diff --git a/zebra/zebra_mlag.c b/zebra/zebra_mlag.c
index 6713dbc967..7715eab0a8 100644
--- a/zebra/zebra_mlag.c
+++ b/zebra/zebra_mlag.c
@@ -338,8 +338,6 @@ static void zebra_mlag_post_data_from_main_thread(struct event *thread)
}
}
- stream_free(s);
- return;
stream_failure:
stream_free(s);
if (zebra_s)
diff --git a/zebra/zserv.c b/zebra/zserv.c
index 6abd49310c..d2367007cf 100644
--- a/zebra/zserv.c
+++ b/zebra/zserv.c
@@ -507,8 +507,6 @@ static void zserv_process_messages(struct event *thread)
stream_fifo_push(cache, msg);
}
- msg = NULL;
-
/* Need to reschedule processing work if there are still
* packets in the fifo.
*/