- FOREACH_SAFI
# ospfd
- LSDB_LOOP
+ # mgmtd
+ - FOREACH_MGMTD_DB_ID
+ - FOREACH_ADAPTER_IN_LIST
+ - FOREACH_SESSION_IN_LIST
+ - FOREACH_SESSION_IN_LIST_SAFE
zlog_info("Configuration Read in Took: %s", readin_time_str);
+ if (vty_mgmt_fe_enabled())
+ vty_mgmt_send_commit_config(vty, false, false);
+
if (callback.end_config)
(*callback.end_config)();
--- /dev/null
+// SPDX-License-Identifier: ISC
+//
+// mgmt.proto
+//
+// @copyright Copyright (C) 2021 Vmware, Inc.
+//
+// @author Pushpasis Sarkar <spushpasis@vmware.com>
+//
+
+syntax = "proto2";
+
+//
+// Protobuf definitions pertaining to the MGMTD component.
+//
+
+package mgmtd;
+
+//
+// Common Sub-Messages
+//
+
+message YangDataXPath {
+ required string xpath = 1;
+}
+
+message YangDataValue {
+ oneof value {
+ //
+ // NOTE: For now let's use stringized value ONLY.
+ // We will enhance it later to pass native-format
+ // if needed.
+ //
+ // bool bool_val = 2;
+ // double double_val = 3;
+ // float float_val = 4;
+ // string string_val = 5;
+ // bytes bytes_val = 6;
+ // int32 int32_val = 7;
+ // int64 int64_val = 8;
+ // uint32 uint32_val = 9;
+ // uint64 uint64_val = 10;
+ // int32 int8_val = 11;
+ // uint32 uint8_val = 12;
+ // int32 int16_val = 13;
+ // uint32 uint16_val = 14;
+ string encoded_str_val = 100;
+ }
+}
+
+message YangData {
+ required string xpath = 1;
+ optional YangDataValue value = 2;
+}
+
+enum CfgDataReqType {
+ REQ_TYPE_NONE = 0;
+ SET_DATA = 1;
+ DELETE_DATA = 2;
+}
+
+message YangCfgDataReq {
+ required YangData data = 1;
+ required CfgDataReqType req_type = 2;
+}
+
+message YangGetDataReq {
+ required YangData data = 1;
+ required int64 next_indx = 2;
+}
+
+//
+// Backend Interface Messages
+//
+message BeSubscribeReq {
+ required string client_name = 1;
+ required bool subscribe_xpaths = 2;
+ repeated string xpath_reg = 3;
+}
+
+message BeSubscribeReply {
+ required bool success = 1;
+}
+
+message BeTxnReq {
+ required uint64 txn_id = 1;
+ required bool create = 2;
+}
+
+message BeTxnReply {
+ required uint64 txn_id = 1;
+ required bool create = 2;
+ required bool success = 3;
+}
+
+message BeCfgDataCreateReq {
+ required uint64 txn_id = 1;
+ required uint64 batch_id = 2;
+ repeated YangCfgDataReq data_req = 3;
+ required bool end_of_data = 4;
+}
+
+message BeCfgDataCreateReply {
+ required uint64 txn_id = 1;
+ required uint64 batch_id = 2;
+ required bool success = 3;
+ optional string error_if_any = 4;
+}
+
+message BeCfgDataValidateReq {
+ required uint64 txn_id = 1;
+ repeated uint64 batch_ids = 2;
+}
+
+message BeCfgDataValidateReply {
+ required uint64 txn_id = 1;
+ repeated uint64 batch_ids = 2;
+ required bool success = 3;
+ optional string error_if_any = 4;
+}
+
+message BeCfgDataApplyReq {
+ required uint64 txn_id = 1;
+}
+
+message BeCfgDataApplyReply {
+ required uint64 txn_id = 1;
+ repeated uint64 batch_ids = 2;
+ required bool success = 3;
+ optional string error_if_any = 4;
+}
+
+message BeOperDataGetReq {
+ required uint64 txn_id = 1;
+ required uint64 batch_id = 2;
+ repeated YangGetDataReq data = 3;
+}
+
+message YangDataReply {
+ repeated YangData data = 1;
+ required int64 next_indx = 2;
+}
+
+message BeOperDataGetReply {
+ required uint64 txn_id = 1;
+ required uint64 batch_id = 2;
+ required bool success = 3;
+ optional string error = 4;
+ optional YangDataReply data = 5;
+}
+
+message BeOperDataNotify {
+ required YangDataReply data = 5;
+}
+
+message BeConfigCmdReq {
+ required string cmd = 1;
+}
+
+message BeConfigCmdReply {
+ required bool success = 1;
+ required string error_if_any = 2;
+}
+
+message BeShowCmdReq {
+ required string cmd = 1;
+}
+
+message BeShowCmdReply {
+ required bool success = 1;
+ required string cmd_ouput = 2;
+}
+
+//
+// Any message on the MGMTD Backend Interface.
+//
+message BeMessage {
+ oneof message {
+ BeSubscribeReq subscr_req = 2;
+ BeSubscribeReply subscr_reply = 3;
+ BeTxnReq txn_req = 4;
+ BeTxnReply txn_reply = 5;
+ BeCfgDataCreateReq cfg_data_req = 6;
+ BeCfgDataCreateReply cfg_data_reply = 7;
+ BeCfgDataValidateReq cfg_validate_req = 8;
+ BeCfgDataValidateReply cfg_validate_reply = 9;
+ BeCfgDataApplyReq cfg_apply_req = 10;
+ BeCfgDataApplyReply cfg_apply_reply = 11;
+ BeOperDataGetReq get_req = 12;
+ BeOperDataGetReply get_reply = 13;
+ BeOperDataNotify notify_data = 14;
+ BeConfigCmdReq cfg_cmd_req = 15;
+ BeConfigCmdReply cfg_cmd_reply = 16;
+ BeShowCmdReq show_cmd_req = 17;
+ BeShowCmdReply show_cmd_reply = 18;
+ }
+}
+
+
+//
+// Frontend Interface Messages
+//
+
+message FeRegisterReq {
+ required string client_name = 1;
+}
+
+message FeSessionReq {
+ required bool create = 1;
+ oneof id {
+ uint64 client_conn_id = 2; // Applicable for create request only
+ uint64 session_id = 3; // Applicable for delete request only
+ }
+}
+
+message FeSessionReply {
+ required bool create = 1;
+ required bool success = 2;
+ optional uint64 client_conn_id = 3; // Applicable for create request only
+ required uint64 session_id = 4;
+}
+
+enum DatastoreId {
+ DS_NONE = 0;
+ RUNNING_DS = 1;
+ CANDIDATE_DS = 2;
+ OPERATIONAL_DS = 3;
+ STARTUP_DS = 4;
+}
+
+message FeLockDsReq {
+ required uint64 session_id = 1;
+ required uint64 req_id = 2;
+ required DatastoreId ds_id = 3;
+ required bool lock = 4;
+}
+
+message FeLockDsReply {
+ required uint64 session_id = 1;
+ required uint64 req_id = 2;
+ required DatastoreId ds_id = 3;
+ required bool lock = 4;
+ required bool success = 5;
+ optional string error_if_any = 6;
+}
+
+message FeSetConfigReq {
+ required uint64 session_id = 1;
+ required DatastoreId ds_id = 2;
+ required uint64 req_id = 3;
+ repeated YangCfgDataReq data = 4;
+ required bool implicit_commit = 5;
+ required DatastoreId commit_ds_id = 6;
+}
+
+message FeSetConfigReply {
+ required uint64 session_id = 1;
+ required DatastoreId ds_id = 2;
+ required uint64 req_id = 3;
+ required bool success = 4;
+ optional string error_if_any = 5;
+}
+
+message FeCommitConfigReq {
+ required uint64 session_id = 1;
+ required DatastoreId src_ds_id = 2;
+ required DatastoreId dst_ds_id = 3;
+ required uint64 req_id = 4;
+ required bool validate_only = 5;
+ required bool abort = 6;
+}
+
+message FeCommitConfigReply {
+ required uint64 session_id = 1;
+ required DatastoreId src_ds_id = 2;
+ required DatastoreId dst_ds_id = 3;
+ required uint64 req_id = 4;
+ required bool validate_only = 5;
+ required bool success = 6;
+ required bool abort = 7;
+ optional string error_if_any = 8;
+}
+
+message FeGetConfigReq {
+ required uint64 session_id = 1;
+ required DatastoreId ds_id = 2;
+ required uint64 req_id = 3;
+ repeated YangGetDataReq data = 4;
+}
+
+message FeGetConfigReply {
+ required uint64 session_id = 1;
+ required DatastoreId ds_id = 2;
+ required uint64 req_id = 3;
+ required bool success = 4;
+ optional string error_if_any = 5;
+ optional YangDataReply data = 6;
+}
+
+message FeGetDataReq {
+ required uint64 session_id = 1;
+ required DatastoreId ds_id = 2;
+ required uint64 req_id = 3;
+ repeated YangGetDataReq data = 4;
+}
+
+message FeGetDataReply {
+ required uint64 session_id = 1;
+ required DatastoreId ds_id = 2;
+ required uint64 req_id = 3;
+ required bool success = 4;
+ optional string error_if_any = 5;
+ optional YangDataReply data = 6;
+}
+
+message FeNotifyDataReq {
+ repeated YangData data = 1;
+}
+
+message FeRegisterNotifyReq {
+ required uint64 session_id = 1;
+ required DatastoreId ds_id = 2;
+ required bool register_req = 3;
+ required uint64 req_id = 4;
+ repeated YangDataXPath data_xpath = 5;
+}
+
+message FeMessage {
+ oneof message {
+ FeRegisterReq register_req = 2;
+ FeSessionReq session_req = 3;
+ FeSessionReply session_reply = 4;
+ FeLockDsReq lockds_req = 5;
+ FeLockDsReply lockds_reply = 6;
+ FeSetConfigReq setcfg_req = 7;
+ FeSetConfigReply setcfg_reply = 8;
+ FeCommitConfigReq commcfg_req = 9;
+ FeCommitConfigReply commcfg_reply = 10;
+ FeGetConfigReq getcfg_req = 11;
+ FeGetConfigReply getcfg_reply = 12;
+ FeGetDataReq getdata_req = 13;
+ FeGetDataReply getdata_reply = 14;
+ FeNotifyDataReq notify_data_req = 15;
+ FeRegisterNotifyReq regnotify_req = 16;
+ }
+}
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * MGMTD Frontend Client Library api interfaces
+ * Copyright (C) 2021 Vmware, Inc.
+ * Pushpasis Sarkar <spushpasis@vmware.com>
+ */
+
+#include <zebra.h>
+#include "memory.h"
+#include "libfrr.h"
+#include "mgmt_fe_client.h"
+#include "mgmt_pb.h"
+#include "network.h"
+#include "stream.h"
+#include "sockopt.h"
+
+#ifdef REDIRECT_DEBUG_TO_STDERR
+#define MGMTD_FE_CLIENT_DBG(fmt, ...) \
+ fprintf(stderr, "%s: " fmt "\n", __func__, ##__VA_ARGS__)
+#define MGMTD_FE_CLIENT_ERR(fmt, ...) \
+ fprintf(stderr, "%s: ERROR, " fmt "\n", __func__, ##__VA_ARGS__)
+#else /* REDIRECT_DEBUG_TO_STDERR */
+#define MGMTD_FE_CLIENT_DBG(fmt, ...) \
+ do { \
+ if (mgmt_debug_fe_client) \
+ zlog_debug("%s: " fmt, __func__, ##__VA_ARGS__); \
+ } while (0)
+#define MGMTD_FE_CLIENT_ERR(fmt, ...) \
+ zlog_err("%s: ERROR: " fmt, __func__, ##__VA_ARGS__)
+#endif /* REDIRECT_DEBUG_TO_STDERR */
+
+struct mgmt_fe_client_ctx;
+
+PREDECL_LIST(mgmt_sessions);
+
+struct mgmt_fe_client_session {
+ uint64_t client_id;
+ uint64_t session_id;
+ struct mgmt_fe_client_ctx *client_ctx;
+ uintptr_t user_ctx;
+
+ struct mgmt_sessions_item list_linkage;
+};
+
+DECLARE_LIST(mgmt_sessions, struct mgmt_fe_client_session, list_linkage);
+
+DEFINE_MTYPE_STATIC(LIB, MGMTD_FE_SESSION, "MGMTD Frontend session");
+
+struct mgmt_fe_client_ctx {
+ int conn_fd;
+ struct thread_master *tm;
+ struct thread *conn_retry_tmr;
+ struct thread *conn_read_ev;
+ struct thread *conn_write_ev;
+ struct thread *conn_writes_on;
+ struct thread *msg_proc_ev;
+ uint32_t flags;
+ uint32_t num_msg_tx;
+ uint32_t num_msg_rx;
+
+ struct stream_fifo *ibuf_fifo;
+ struct stream *ibuf_work;
+ struct stream_fifo *obuf_fifo;
+ struct stream *obuf_work;
+
+ struct mgmt_fe_client_params client_params;
+
+ struct mgmt_sessions_head client_sessions;
+};
+
+#define MGMTD_FE_CLIENT_FLAGS_WRITES_OFF (1U << 0)
+
+#define FOREACH_SESSION_IN_LIST(client_ctx, session) \
+ frr_each_safe (mgmt_sessions, &(client_ctx)->client_sessions, (session))
+
+static bool mgmt_debug_fe_client;
+
+static struct mgmt_fe_client_ctx mgmt_fe_client_ctx = {0};
+
+/* Forward declarations */
+static void
+mgmt_fe_client_register_event(struct mgmt_fe_client_ctx *client_ctx,
+ enum mgmt_fe_event event);
+static void mgmt_fe_client_schedule_conn_retry(
+ struct mgmt_fe_client_ctx *client_ctx, unsigned long intvl_secs);
+
+static struct mgmt_fe_client_session *
+mgmt_fe_find_session_by_client_id(struct mgmt_fe_client_ctx *client_ctx,
+ uint64_t client_id)
+{
+ struct mgmt_fe_client_session *session;
+
+ FOREACH_SESSION_IN_LIST (client_ctx, session) {
+ if (session->client_id == client_id) {
+ MGMTD_FE_CLIENT_DBG(
+ "Found session %p for client-id %llu.", session,
+ (unsigned long long)client_id);
+ return session;
+ }
+ }
+
+ return NULL;
+}
+
+static struct mgmt_fe_client_session *
+mgmt_fe_find_session_by_session_id(struct mgmt_fe_client_ctx *client_ctx,
+ uint64_t session_id)
+{
+ struct mgmt_fe_client_session *session;
+
+ FOREACH_SESSION_IN_LIST (client_ctx, session) {
+ if (session->session_id == session_id) {
+ MGMTD_FE_CLIENT_DBG(
+ "Found session %p for session-id %llu.", session,
+ (unsigned long long)session_id);
+ return session;
+ }
+ }
+
+ return NULL;
+}
+
+static void
+mgmt_fe_server_disconnect(struct mgmt_fe_client_ctx *client_ctx,
+ bool reconnect)
+{
+ if (client_ctx->conn_fd) {
+ close(client_ctx->conn_fd);
+ client_ctx->conn_fd = 0;
+ }
+
+ if (reconnect)
+ mgmt_fe_client_schedule_conn_retry(
+ client_ctx,
+ client_ctx->client_params.conn_retry_intvl_sec);
+}
+
+static inline void
+mgmt_fe_client_sched_msg_write(struct mgmt_fe_client_ctx *client_ctx)
+{
+ if (!CHECK_FLAG(client_ctx->flags, MGMTD_FE_CLIENT_FLAGS_WRITES_OFF))
+ mgmt_fe_client_register_event(client_ctx,
+ MGMTD_FE_CONN_WRITE);
+}
+
+static inline void
+mgmt_fe_client_writes_on(struct mgmt_fe_client_ctx *client_ctx)
+{
+ MGMTD_FE_CLIENT_DBG("Resume writing msgs");
+ UNSET_FLAG(client_ctx->flags, MGMTD_FE_CLIENT_FLAGS_WRITES_OFF);
+ if (client_ctx->obuf_work
+ || stream_fifo_count_safe(client_ctx->obuf_fifo))
+ mgmt_fe_client_sched_msg_write(client_ctx);
+}
+
+static inline void
+mgmt_fe_client_writes_off(struct mgmt_fe_client_ctx *client_ctx)
+{
+ SET_FLAG(client_ctx->flags, MGMTD_FE_CLIENT_FLAGS_WRITES_OFF);
+ MGMTD_FE_CLIENT_DBG("Paused writing msgs");
+}
+
+static int
+mgmt_fe_client_send_msg(struct mgmt_fe_client_ctx *client_ctx,
+ Mgmtd__FeMessage *fe_msg)
+{
+ size_t msg_size;
+ uint8_t msg_buf[MGMTD_FE_MSG_MAX_LEN];
+ struct mgmt_fe_msg *msg;
+
+ if (client_ctx->conn_fd == 0)
+ return -1;
+
+ msg_size = mgmtd__fe_message__get_packed_size(fe_msg);
+ msg_size += MGMTD_FE_MSG_HDR_LEN;
+ if (msg_size > sizeof(msg_buf)) {
+ MGMTD_FE_CLIENT_ERR(
+ "Message size %d more than max size'%d. Not sending!'",
+ (int)msg_size, (int)sizeof(msg_buf));
+ return -1;
+ }
+
+ msg = (struct mgmt_fe_msg *)msg_buf;
+ msg->hdr.marker = MGMTD_FE_MSG_MARKER;
+ msg->hdr.len = (uint16_t)msg_size;
+ mgmtd__fe_message__pack(fe_msg, msg->payload);
+
+ if (!client_ctx->obuf_work)
+ client_ctx->obuf_work = stream_new(MGMTD_FE_MSG_MAX_LEN);
+ if (STREAM_WRITEABLE(client_ctx->obuf_work) < msg_size) {
+ stream_fifo_push(client_ctx->obuf_fifo, client_ctx->obuf_work);
+ client_ctx->obuf_work = stream_new(MGMTD_FE_MSG_MAX_LEN);
+ }
+ stream_write(client_ctx->obuf_work, (void *)msg_buf, msg_size);
+
+ mgmt_fe_client_sched_msg_write(client_ctx);
+ client_ctx->num_msg_tx++;
+ return 0;
+}
+
+static void mgmt_fe_client_write(struct thread *thread)
+{
+ int bytes_written = 0;
+ int processed = 0;
+ int msg_size = 0;
+ struct stream *s = NULL;
+ struct stream *free = NULL;
+ struct mgmt_fe_client_ctx *client_ctx;
+
+ client_ctx = (struct mgmt_fe_client_ctx *)THREAD_ARG(thread);
+ assert(client_ctx && client_ctx->conn_fd);
+
+ /* Ensure pushing any pending write buffer to FIFO */
+ if (client_ctx->obuf_work) {
+ stream_fifo_push(client_ctx->obuf_fifo, client_ctx->obuf_work);
+ client_ctx->obuf_work = NULL;
+ }
+
+ for (s = stream_fifo_head(client_ctx->obuf_fifo);
+ s && processed < MGMTD_FE_MAX_NUM_MSG_WRITE;
+ s = stream_fifo_head(client_ctx->obuf_fifo)) {
+ /* msg_size = (int)stream_get_size(s); */
+ msg_size = (int)STREAM_READABLE(s);
+ bytes_written = stream_flush(s, client_ctx->conn_fd);
+ if (bytes_written == -1
+ && (errno == EAGAIN || errno == EWOULDBLOCK)) {
+ mgmt_fe_client_register_event(
+ client_ctx, MGMTD_FE_CONN_WRITE);
+ return;
+ } else if (bytes_written != msg_size) {
+ MGMTD_FE_CLIENT_ERR(
+ "Could not write all %d bytes (wrote: %d) to MGMTD Backend client socket. Err: '%s'",
+ msg_size, bytes_written, safe_strerror(errno));
+ if (bytes_written > 0) {
+ stream_forward_getp(s, (size_t)bytes_written);
+ stream_pulldown(s);
+ mgmt_fe_client_register_event(
+ client_ctx, MGMTD_FE_CONN_WRITE);
+ return;
+ }
+ mgmt_fe_server_disconnect(client_ctx, true);
+ return;
+ }
+
+ free = stream_fifo_pop(client_ctx->obuf_fifo);
+ stream_free(free);
+ MGMTD_FE_CLIENT_DBG(
+ "Wrote %d bytes of message to MGMTD Backend client socket.'",
+ bytes_written);
+ processed++;
+ }
+
+ if (s) {
+ mgmt_fe_client_writes_off(client_ctx);
+ mgmt_fe_client_register_event(client_ctx,
+ MGMTD_FE_CONN_WRITES_ON);
+ }
+}
+
+static void mgmt_fe_client_resume_writes(struct thread *thread)
+{
+ struct mgmt_fe_client_ctx *client_ctx;
+
+ client_ctx = (struct mgmt_fe_client_ctx *)THREAD_ARG(thread);
+ assert(client_ctx && client_ctx->conn_fd);
+
+ mgmt_fe_client_writes_on(client_ctx);
+}
+
+static int
+mgmt_fe_send_register_req(struct mgmt_fe_client_ctx *client_ctx)
+{
+ Mgmtd__FeMessage fe_msg;
+ Mgmtd__FeRegisterReq rgstr_req;
+
+ mgmtd__fe_register_req__init(&rgstr_req);
+ rgstr_req.client_name = client_ctx->client_params.name;
+
+ mgmtd__fe_message__init(&fe_msg);
+ fe_msg.message_case = MGMTD__FE_MESSAGE__MESSAGE_REGISTER_REQ;
+ fe_msg.register_req = &rgstr_req;
+
+ MGMTD_FE_CLIENT_DBG(
+ "Sending REGISTER_REQ message to MGMTD Frontend server");
+
+ return mgmt_fe_client_send_msg(client_ctx, &fe_msg);
+}
+
+static int
+mgmt_fe_send_session_req(struct mgmt_fe_client_ctx *client_ctx,
+ struct mgmt_fe_client_session *session,
+ bool create)
+{
+ Mgmtd__FeMessage fe_msg;
+ Mgmtd__FeSessionReq sess_req;
+
+ mgmtd__fe_session_req__init(&sess_req);
+ sess_req.create = create;
+ if (create) {
+ sess_req.id_case = MGMTD__FE_SESSION_REQ__ID_CLIENT_CONN_ID;
+ sess_req.client_conn_id = session->client_id;
+ } else {
+ sess_req.id_case = MGMTD__FE_SESSION_REQ__ID_SESSION_ID;
+ sess_req.session_id = session->session_id;
+ }
+
+ mgmtd__fe_message__init(&fe_msg);
+ fe_msg.message_case = MGMTD__FE_MESSAGE__MESSAGE_SESSION_REQ;
+ fe_msg.session_req = &sess_req;
+
+ MGMTD_FE_CLIENT_DBG(
+ "Sending SESSION_REQ message for %s session %llu to MGMTD Frontend server",
+ create ? "creating" : "destroying",
+ (unsigned long long)session->client_id);
+
+ return mgmt_fe_client_send_msg(client_ctx, &fe_msg);
+}
+
+static int
+mgmt_fe_send_lockds_req(struct mgmt_fe_client_ctx *client_ctx,
+ struct mgmt_fe_client_session *session, bool lock,
+ uint64_t req_id, Mgmtd__DatastoreId ds_id)
+{
+ (void)req_id;
+ Mgmtd__FeMessage fe_msg;
+ Mgmtd__FeLockDsReq lockds_req;
+
+ mgmtd__fe_lock_ds_req__init(&lockds_req);
+ lockds_req.session_id = session->session_id;
+ lockds_req.req_id = req_id;
+ lockds_req.ds_id = ds_id;
+ lockds_req.lock = lock;
+
+ mgmtd__fe_message__init(&fe_msg);
+ fe_msg.message_case = MGMTD__FE_MESSAGE__MESSAGE_LOCKDS_REQ;
+ fe_msg.lockds_req = &lockds_req;
+
+ MGMTD_FE_CLIENT_DBG(
+ "Sending %sLOCK_REQ message for Ds:%d session %llu to MGMTD Frontend server",
+ lock ? "" : "UN", ds_id, (unsigned long long)session->client_id);
+
+ return mgmt_fe_client_send_msg(client_ctx, &fe_msg);
+}
+
+static int
+mgmt_fe_send_setcfg_req(struct mgmt_fe_client_ctx *client_ctx,
+ struct mgmt_fe_client_session *session,
+ uint64_t req_id, Mgmtd__DatastoreId ds_id,
+ Mgmtd__YangCfgDataReq **data_req, int num_data_reqs,
+ bool implicit_commit, Mgmtd__DatastoreId dst_ds_id)
+{
+ (void)req_id;
+ Mgmtd__FeMessage fe_msg;
+ Mgmtd__FeSetConfigReq setcfg_req;
+
+ mgmtd__fe_set_config_req__init(&setcfg_req);
+ setcfg_req.session_id = session->session_id;
+ setcfg_req.ds_id = ds_id;
+ setcfg_req.req_id = req_id;
+ setcfg_req.data = data_req;
+ setcfg_req.n_data = (size_t)num_data_reqs;
+ setcfg_req.implicit_commit = implicit_commit;
+ setcfg_req.commit_ds_id = dst_ds_id;
+
+ mgmtd__fe_message__init(&fe_msg);
+ fe_msg.message_case = MGMTD__FE_MESSAGE__MESSAGE_SETCFG_REQ;
+ fe_msg.setcfg_req = &setcfg_req;
+
+ MGMTD_FE_CLIENT_DBG(
+ "Sending SET_CONFIG_REQ message for Ds:%d session %llu (#xpaths:%d) to MGMTD Frontend server",
+ ds_id, (unsigned long long)session->client_id, num_data_reqs);
+
+ return mgmt_fe_client_send_msg(client_ctx, &fe_msg);
+}
+
+static int
+mgmt_fe_send_commitcfg_req(struct mgmt_fe_client_ctx *client_ctx,
+ struct mgmt_fe_client_session *session,
+ uint64_t req_id, Mgmtd__DatastoreId src_ds_id,
+ Mgmtd__DatastoreId dest_ds_id, bool validate_only,
+ bool abort)
+{
+ (void)req_id;
+ Mgmtd__FeMessage fe_msg;
+ Mgmtd__FeCommitConfigReq commitcfg_req;
+
+ mgmtd__fe_commit_config_req__init(&commitcfg_req);
+ commitcfg_req.session_id = session->session_id;
+ commitcfg_req.src_ds_id = src_ds_id;
+ commitcfg_req.dst_ds_id = dest_ds_id;
+ commitcfg_req.req_id = req_id;
+ commitcfg_req.validate_only = validate_only;
+ commitcfg_req.abort = abort;
+
+ mgmtd__fe_message__init(&fe_msg);
+ fe_msg.message_case = MGMTD__FE_MESSAGE__MESSAGE_COMMCFG_REQ;
+ fe_msg.commcfg_req = &commitcfg_req;
+
+ MGMTD_FE_CLIENT_DBG(
+ "Sending COMMIT_CONFIG_REQ message for Src-Ds:%d, Dst-Ds:%d session %llu to MGMTD Frontend server",
+ src_ds_id, dest_ds_id, (unsigned long long)session->client_id);
+
+ return mgmt_fe_client_send_msg(client_ctx, &fe_msg);
+}
+
+static int
+mgmt_fe_send_getcfg_req(struct mgmt_fe_client_ctx *client_ctx,
+ struct mgmt_fe_client_session *session,
+ uint64_t req_id, Mgmtd__DatastoreId ds_id,
+ Mgmtd__YangGetDataReq * data_req[],
+ int num_data_reqs)
+{
+ (void)req_id;
+ Mgmtd__FeMessage fe_msg;
+ Mgmtd__FeGetConfigReq getcfg_req;
+
+ mgmtd__fe_get_config_req__init(&getcfg_req);
+ getcfg_req.session_id = session->session_id;
+ getcfg_req.ds_id = ds_id;
+ getcfg_req.req_id = req_id;
+ getcfg_req.data = data_req;
+ getcfg_req.n_data = (size_t)num_data_reqs;
+
+ mgmtd__fe_message__init(&fe_msg);
+ fe_msg.message_case = MGMTD__FE_MESSAGE__MESSAGE_GETCFG_REQ;
+ fe_msg.getcfg_req = &getcfg_req;
+
+ MGMTD_FE_CLIENT_DBG(
+ "Sending GET_CONFIG_REQ message for Ds:%d session %llu (#xpaths:%d) to MGMTD Frontend server",
+ ds_id, (unsigned long long)session->client_id, num_data_reqs);
+
+ return mgmt_fe_client_send_msg(client_ctx, &fe_msg);
+}
+
+static int
+mgmt_fe_send_getdata_req(struct mgmt_fe_client_ctx *client_ctx,
+ struct mgmt_fe_client_session *session,
+ uint64_t req_id, Mgmtd__DatastoreId ds_id,
+ Mgmtd__YangGetDataReq * data_req[],
+ int num_data_reqs)
+{
+ (void)req_id;
+ Mgmtd__FeMessage fe_msg;
+ Mgmtd__FeGetDataReq getdata_req;
+
+ mgmtd__fe_get_data_req__init(&getdata_req);
+ getdata_req.session_id = session->session_id;
+ getdata_req.ds_id = ds_id;
+ getdata_req.req_id = req_id;
+ getdata_req.data = data_req;
+ getdata_req.n_data = (size_t)num_data_reqs;
+
+ mgmtd__fe_message__init(&fe_msg);
+ fe_msg.message_case = MGMTD__FE_MESSAGE__MESSAGE_GETDATA_REQ;
+ fe_msg.getdata_req = &getdata_req;
+
+ MGMTD_FE_CLIENT_DBG(
+ "Sending GET_CONFIG_REQ message for Ds:%d session %llu (#xpaths:%d) to MGMTD Frontend server",
+ ds_id, (unsigned long long)session->client_id, num_data_reqs);
+
+ return mgmt_fe_client_send_msg(client_ctx, &fe_msg);
+}
+
+static int mgmt_fe_send_regnotify_req(
+ struct mgmt_fe_client_ctx *client_ctx,
+ struct mgmt_fe_client_session *session, uint64_t req_id,
+ Mgmtd__DatastoreId ds_id, bool register_req,
+ Mgmtd__YangDataXPath * data_req[], int num_data_reqs)
+{
+ (void)req_id;
+ Mgmtd__FeMessage fe_msg;
+ Mgmtd__FeRegisterNotifyReq regntfy_req;
+
+ mgmtd__fe_register_notify_req__init(®ntfy_req);
+ regntfy_req.session_id = session->session_id;
+ regntfy_req.ds_id = ds_id;
+ regntfy_req.register_req = register_req;
+ regntfy_req.data_xpath = data_req;
+ regntfy_req.n_data_xpath = (size_t)num_data_reqs;
+
+ mgmtd__fe_message__init(&fe_msg);
+ fe_msg.message_case = MGMTD__FE_MESSAGE__MESSAGE_REGNOTIFY_REQ;
+ fe_msg.regnotify_req = ®ntfy_req;
+
+ return mgmt_fe_client_send_msg(client_ctx, &fe_msg);
+}
+
+static int
+mgmt_fe_client_handle_msg(struct mgmt_fe_client_ctx *client_ctx,
+ Mgmtd__FeMessage *fe_msg)
+{
+ struct mgmt_fe_client_session *session = NULL;
+
+ switch (fe_msg->message_case) {
+ case MGMTD__FE_MESSAGE__MESSAGE_SESSION_REPLY:
+ if (fe_msg->session_reply->create
+ && fe_msg->session_reply->has_client_conn_id) {
+ MGMTD_FE_CLIENT_DBG(
+ "Got Session Create Reply Msg for client-id %llu with session-id: %llu.",
+ (unsigned long long)
+ fe_msg->session_reply->client_conn_id,
+ (unsigned long long)
+ fe_msg->session_reply->session_id);
+
+ session = mgmt_fe_find_session_by_client_id(
+ client_ctx,
+ fe_msg->session_reply->client_conn_id);
+
+ if (session && fe_msg->session_reply->success) {
+ MGMTD_FE_CLIENT_DBG(
+ "Session Create for client-id %llu successful.",
+ (unsigned long long)fe_msg
+ ->session_reply->client_conn_id);
+ session->session_id =
+ fe_msg->session_reply->session_id;
+ } else {
+ MGMTD_FE_CLIENT_ERR(
+ "Session Create for client-id %llu failed.",
+ (unsigned long long)fe_msg
+ ->session_reply->client_conn_id);
+ }
+ } else if (!fe_msg->session_reply->create) {
+ MGMTD_FE_CLIENT_DBG(
+ "Got Session Destroy Reply Msg for session-id %llu",
+ (unsigned long long)
+ fe_msg->session_reply->session_id);
+
+ session = mgmt_fe_find_session_by_session_id(
+ client_ctx, fe_msg->session_req->session_id);
+ }
+
+ if (session && session->client_ctx
+ && session->client_ctx->client_params
+ .client_session_notify)
+ (*session->client_ctx->client_params
+ .client_session_notify)(
+ (uintptr_t)client_ctx,
+ client_ctx->client_params.user_data,
+ session->client_id,
+ fe_msg->session_reply->create,
+ fe_msg->session_reply->success,
+ (uintptr_t)session, session->user_ctx);
+ break;
+ case MGMTD__FE_MESSAGE__MESSAGE_LOCKDS_REPLY:
+ MGMTD_FE_CLIENT_DBG(
+ "Got LockDs Reply Msg for session-id %llu",
+ (unsigned long long)
+ fe_msg->lockds_reply->session_id);
+ session = mgmt_fe_find_session_by_session_id(
+ client_ctx, fe_msg->lockds_reply->session_id);
+
+ if (session && session->client_ctx
+ && session->client_ctx->client_params
+ .lock_ds_notify)
+ (*session->client_ctx->client_params
+ .lock_ds_notify)(
+ (uintptr_t)client_ctx,
+ client_ctx->client_params.user_data,
+ session->client_id, (uintptr_t)session,
+ session->user_ctx,
+ fe_msg->lockds_reply->req_id,
+ fe_msg->lockds_reply->lock,
+ fe_msg->lockds_reply->success,
+ fe_msg->lockds_reply->ds_id,
+ fe_msg->lockds_reply->error_if_any);
+ break;
+ case MGMTD__FE_MESSAGE__MESSAGE_SETCFG_REPLY:
+ MGMTD_FE_CLIENT_DBG(
+ "Got Set Config Reply Msg for session-id %llu",
+ (unsigned long long)
+ fe_msg->setcfg_reply->session_id);
+
+ session = mgmt_fe_find_session_by_session_id(
+ client_ctx, fe_msg->setcfg_reply->session_id);
+
+ if (session && session->client_ctx
+ && session->client_ctx->client_params
+ .set_config_notify)
+ (*session->client_ctx->client_params
+ .set_config_notify)(
+ (uintptr_t)client_ctx,
+ client_ctx->client_params.user_data,
+ session->client_id, (uintptr_t)session,
+ session->user_ctx,
+ fe_msg->setcfg_reply->req_id,
+ fe_msg->setcfg_reply->success,
+ fe_msg->setcfg_reply->ds_id,
+ fe_msg->setcfg_reply->error_if_any);
+ break;
+ case MGMTD__FE_MESSAGE__MESSAGE_COMMCFG_REPLY:
+ MGMTD_FE_CLIENT_DBG(
+ "Got Commit Config Reply Msg for session-id %llu",
+ (unsigned long long)
+ fe_msg->commcfg_reply->session_id);
+
+ session = mgmt_fe_find_session_by_session_id(
+ client_ctx, fe_msg->commcfg_reply->session_id);
+
+ if (session && session->client_ctx
+ && session->client_ctx->client_params
+ .commit_config_notify)
+ (*session->client_ctx->client_params
+ .commit_config_notify)(
+ (uintptr_t)client_ctx,
+ client_ctx->client_params.user_data,
+ session->client_id, (uintptr_t)session,
+ session->user_ctx,
+ fe_msg->commcfg_reply->req_id,
+ fe_msg->commcfg_reply->success,
+ fe_msg->commcfg_reply->src_ds_id,
+ fe_msg->commcfg_reply->dst_ds_id,
+ fe_msg->commcfg_reply->validate_only,
+ fe_msg->commcfg_reply->error_if_any);
+ break;
+ case MGMTD__FE_MESSAGE__MESSAGE_GETCFG_REPLY:
+ MGMTD_FE_CLIENT_DBG(
+ "Got Get Config Reply Msg for session-id %llu",
+ (unsigned long long)
+ fe_msg->getcfg_reply->session_id);
+
+ session = mgmt_fe_find_session_by_session_id(
+ client_ctx, fe_msg->getcfg_reply->session_id);
+
+ if (session && session->client_ctx
+ && session->client_ctx->client_params
+ .get_data_notify)
+ (*session->client_ctx->client_params
+ .get_data_notify)(
+ (uintptr_t)client_ctx,
+ client_ctx->client_params.user_data,
+ session->client_id, (uintptr_t)session,
+ session->user_ctx,
+ fe_msg->getcfg_reply->req_id,
+ fe_msg->getcfg_reply->success,
+ fe_msg->getcfg_reply->ds_id,
+ fe_msg->getcfg_reply->data
+ ? fe_msg->getcfg_reply->data->data
+ : NULL,
+ fe_msg->getcfg_reply->data
+ ? fe_msg->getcfg_reply->data->n_data
+ : 0,
+ fe_msg->getcfg_reply->data
+ ? fe_msg->getcfg_reply->data
+ ->next_indx
+ : 0,
+ fe_msg->getcfg_reply->error_if_any);
+ break;
+ case MGMTD__FE_MESSAGE__MESSAGE_GETDATA_REPLY:
+ MGMTD_FE_CLIENT_DBG(
+ "Got Get Data Reply Msg for session-id %llu",
+ (unsigned long long)
+ fe_msg->getdata_reply->session_id);
+
+ session = mgmt_fe_find_session_by_session_id(
+ client_ctx, fe_msg->getdata_reply->session_id);
+
+ if (session && session->client_ctx
+ && session->client_ctx->client_params
+ .get_data_notify)
+ (*session->client_ctx->client_params
+ .get_data_notify)(
+ (uintptr_t)client_ctx,
+ client_ctx->client_params.user_data,
+ session->client_id, (uintptr_t)session,
+ session->user_ctx,
+ fe_msg->getdata_reply->req_id,
+ fe_msg->getdata_reply->success,
+ fe_msg->getdata_reply->ds_id,
+ fe_msg->getdata_reply->data
+ ? fe_msg->getdata_reply->data->data
+ : NULL,
+ fe_msg->getdata_reply->data
+ ? fe_msg->getdata_reply->data
+ ->n_data
+ : 0,
+ fe_msg->getdata_reply->data
+ ? fe_msg->getdata_reply->data
+ ->next_indx
+ : 0,
+ fe_msg->getdata_reply->error_if_any);
+ break;
+ case MGMTD__FE_MESSAGE__MESSAGE_NOTIFY_DATA_REQ:
+ case MGMTD__FE_MESSAGE__MESSAGE_REGNOTIFY_REQ:
+ /*
+ * TODO: Add handling code in future.
+ */
+ break;
+ /*
+ * NOTE: The following messages are always sent from Frontend
+ * clients to MGMTd only and/or need not be handled here.
+ */
+ case MGMTD__FE_MESSAGE__MESSAGE_REGISTER_REQ:
+ case MGMTD__FE_MESSAGE__MESSAGE_SESSION_REQ:
+ case MGMTD__FE_MESSAGE__MESSAGE_LOCKDS_REQ:
+ case MGMTD__FE_MESSAGE__MESSAGE_SETCFG_REQ:
+ case MGMTD__FE_MESSAGE__MESSAGE_COMMCFG_REQ:
+ case MGMTD__FE_MESSAGE__MESSAGE_GETCFG_REQ:
+ case MGMTD__FE_MESSAGE__MESSAGE_GETDATA_REQ:
+ case MGMTD__FE_MESSAGE__MESSAGE__NOT_SET:
+#if PROTOBUF_C_VERSION_NUMBER >= 1003000
+ case _MGMTD__FE_MESSAGE__MESSAGE_IS_INT_SIZE:
+#endif
+ default:
+ /*
+ * A 'default' case is being added contrary to the
+ * FRR code guidelines to take care of build
+ * failures on certain build systems (courtesy of
+ * the proto-c package).
+ */
+ break;
+ }
+
+ return 0;
+}
+
+static int
+mgmt_fe_client_process_msg(struct mgmt_fe_client_ctx *client_ctx,
+ uint8_t *msg_buf, int bytes_read)
+{
+ Mgmtd__FeMessage *fe_msg;
+ struct mgmt_fe_msg *msg;
+ uint16_t bytes_left;
+ uint16_t processed = 0;
+
+ MGMTD_FE_CLIENT_DBG(
+ "Have %u bytes of messages from MGMTD Frontend server to .",
+ bytes_read);
+
+ bytes_left = bytes_read;
+ for (; bytes_left > MGMTD_FE_MSG_HDR_LEN;
+ bytes_left -= msg->hdr.len, msg_buf += msg->hdr.len) {
+ msg = (struct mgmt_fe_msg *)msg_buf;
+ if (msg->hdr.marker != MGMTD_FE_MSG_MARKER) {
+ MGMTD_FE_CLIENT_DBG(
+ "Marker not found in message from MGMTD Frontend server.");
+ break;
+ }
+
+ if (bytes_left < msg->hdr.len) {
+ MGMTD_FE_CLIENT_DBG(
+ "Incomplete message of %d bytes (epxected: %u) from MGMTD Frontend server.",
+ bytes_left, msg->hdr.len);
+ break;
+ }
+
+ fe_msg = mgmtd__fe_message__unpack(
+ NULL, (size_t)(msg->hdr.len - MGMTD_FE_MSG_HDR_LEN),
+ msg->payload);
+ if (!fe_msg) {
+ MGMTD_FE_CLIENT_DBG(
+ "Failed to decode %d bytes from MGMTD Frontend server.",
+ msg->hdr.len);
+ continue;
+ }
+
+ MGMTD_FE_CLIENT_DBG(
+ "Decoded %d bytes of message(msg: %u/%u) from MGMTD Frontend server",
+ msg->hdr.len, fe_msg->message_case,
+ fe_msg->message_case);
+
+ (void)mgmt_fe_client_handle_msg(client_ctx, fe_msg);
+
+ mgmtd__fe_message__free_unpacked(fe_msg, NULL);
+ processed++;
+ client_ctx->num_msg_rx++;
+ }
+
+ return processed;
+}
+
+static void mgmt_fe_client_proc_msgbufs(struct thread *thread)
+{
+ struct mgmt_fe_client_ctx *client_ctx;
+ struct stream *work;
+ int processed = 0;
+
+ client_ctx = (struct mgmt_fe_client_ctx *)THREAD_ARG(thread);
+ assert(client_ctx && client_ctx->conn_fd);
+
+ for (; processed < MGMTD_FE_MAX_NUM_MSG_PROC;) {
+ work = stream_fifo_pop_safe(client_ctx->ibuf_fifo);
+ if (!work)
+ break;
+
+ processed += mgmt_fe_client_process_msg(
+ client_ctx, STREAM_DATA(work), stream_get_endp(work));
+
+ if (work != client_ctx->ibuf_work) {
+ /* Free it up */
+ stream_free(work);
+ } else {
+ /* Reset stream buffer for next read */
+ stream_reset(work);
+ }
+ }
+
+ /*
+ * If we have more to process, reschedule for processing it.
+ */
+ if (stream_fifo_head(client_ctx->ibuf_fifo))
+ mgmt_fe_client_register_event(client_ctx,
+ MGMTD_FE_PROC_MSG);
+}
+
+static void mgmt_fe_client_read(struct thread *thread)
+{
+ struct mgmt_fe_client_ctx *client_ctx;
+ int bytes_read, msg_cnt;
+ size_t total_bytes, bytes_left;
+ struct mgmt_fe_msg_hdr *msg_hdr;
+ bool incomplete = false;
+
+ client_ctx = (struct mgmt_fe_client_ctx *)THREAD_ARG(thread);
+ assert(client_ctx && client_ctx->conn_fd);
+
+ total_bytes = 0;
+ bytes_left = STREAM_SIZE(client_ctx->ibuf_work)
+ - stream_get_endp(client_ctx->ibuf_work);
+ for (; bytes_left > MGMTD_FE_MSG_HDR_LEN;) {
+ bytes_read = stream_read_try(client_ctx->ibuf_work,
+ client_ctx->conn_fd, bytes_left);
+ MGMTD_FE_CLIENT_DBG(
+ "Got %d bytes of message from MGMTD Frontend server",
+ bytes_read);
+ /* -2 is normal nothing read, and to retry */
+ if (bytes_read == -2)
+ break;
+ if (bytes_read <= 0) {
+ if (bytes_read == 0) {
+ MGMTD_FE_CLIENT_ERR(
+ "Got EOF/disconnect while reading from MGMTD Frontend server.");
+ } else {
+ /* Fatal error */
+ MGMTD_FE_CLIENT_ERR(
+ "Got error (%d) while reading from MGMTD Frontend server. Err: '%s'",
+ bytes_read, safe_strerror(errno));
+ }
+ mgmt_fe_server_disconnect(client_ctx, true);
+ return;
+ }
+ total_bytes += bytes_read;
+ bytes_left -= bytes_read;
+ }
+
+ /*
+ * Check if we have read complete messages or not.
+ */
+ stream_set_getp(client_ctx->ibuf_work, 0);
+ total_bytes = 0;
+ msg_cnt = 0;
+ bytes_left = stream_get_endp(client_ctx->ibuf_work);
+ for (; bytes_left > MGMTD_FE_MSG_HDR_LEN;) {
+ msg_hdr = (struct mgmt_fe_msg_hdr
+ *)(STREAM_DATA(client_ctx->ibuf_work)
+ + total_bytes);
+ if (msg_hdr->marker != MGMTD_FE_MSG_MARKER) {
+ /* Corrupted buffer. Force disconnect?? */
+ MGMTD_FE_CLIENT_ERR(
+ "Received corrupted buffer from MGMTD frontend server.");
+ mgmt_fe_server_disconnect(client_ctx, true);
+ return;
+ }
+ if (msg_hdr->len > bytes_left)
+ break;
+
+ total_bytes += msg_hdr->len;
+ bytes_left -= msg_hdr->len;
+ msg_cnt++;
+ }
+
+ if (!msg_cnt)
+ goto resched;
+
+ if (bytes_left > 0)
+ incomplete = true;
+
+ /*
+ * We have read one or several messages.
+ * Schedule processing them now.
+ */
+ msg_hdr =
+ (struct mgmt_fe_msg_hdr *)(STREAM_DATA(client_ctx->ibuf_work)
+ + total_bytes);
+ stream_set_endp(client_ctx->ibuf_work, total_bytes);
+ stream_fifo_push(client_ctx->ibuf_fifo, client_ctx->ibuf_work);
+ client_ctx->ibuf_work = stream_new(MGMTD_FE_MSG_MAX_LEN);
+ if (incomplete) {
+ stream_put(client_ctx->ibuf_work, msg_hdr, bytes_left);
+ stream_set_endp(client_ctx->ibuf_work, bytes_left);
+ }
+
+ mgmt_fe_client_register_event(client_ctx, MGMTD_FE_PROC_MSG);
+
+resched:
+ mgmt_fe_client_register_event(client_ctx, MGMTD_FE_CONN_READ);
+}
+
+static int mgmt_fe_server_connect(struct mgmt_fe_client_ctx *client_ctx)
+{
+ int ret, sock, len;
+ struct sockaddr_un addr;
+
+ MGMTD_FE_CLIENT_DBG(
+ "Trying to connect to MGMTD Frontend server at %s",
+ MGMTD_FE_SERVER_PATH);
+
+ assert(!client_ctx->conn_fd);
+
+ sock = socket(AF_UNIX, SOCK_STREAM, 0);
+ if (sock < 0) {
+ MGMTD_FE_CLIENT_ERR("Failed to create socket");
+ goto mgmt_fe_server_connect_failed;
+ }
+
+ MGMTD_FE_CLIENT_DBG(
+ "Created MGMTD Frontend server socket successfully!");
+
+ memset(&addr, 0, sizeof(struct sockaddr_un));
+ addr.sun_family = AF_UNIX;
+ strlcpy(addr.sun_path, MGMTD_FE_SERVER_PATH, sizeof(addr.sun_path));
+#ifdef HAVE_STRUCT_SOCKADDR_UN_SUN_LEN
+ len = addr.sun_len = SUN_LEN(&addr);
+#else
+ len = sizeof(addr.sun_family) + strlen(addr.sun_path);
+#endif /* HAVE_STRUCT_SOCKADDR_UN_SUN_LEN */
+
+ ret = connect(sock, (struct sockaddr *)&addr, len);
+ if (ret < 0) {
+ MGMTD_FE_CLIENT_ERR(
+ "Failed to connect to MGMTD Frontend Server at %s. Err: %s",
+ addr.sun_path, safe_strerror(errno));
+ close(sock);
+ goto mgmt_fe_server_connect_failed;
+ }
+
+ MGMTD_FE_CLIENT_DBG(
+ "Connected to MGMTD Frontend Server at %s successfully!",
+ addr.sun_path);
+ client_ctx->conn_fd = sock;
+
+ /* Make client socket non-blocking. */
+ set_nonblocking(sock);
+ setsockopt_so_sendbuf(client_ctx->conn_fd,
+ MGMTD_SOCKET_FE_SEND_BUF_SIZE);
+ setsockopt_so_recvbuf(client_ctx->conn_fd,
+ MGMTD_SOCKET_FE_RECV_BUF_SIZE);
+
+ thread_add_read(client_ctx->tm, mgmt_fe_client_read,
+ (void *)&mgmt_fe_client_ctx, client_ctx->conn_fd,
+ &client_ctx->conn_read_ev);
+ assert(client_ctx->conn_read_ev);
+
+ /* Send REGISTER_REQ message */
+ if (mgmt_fe_send_register_req(client_ctx) != 0)
+ goto mgmt_fe_server_connect_failed;
+
+ /* Notify client through registered callback (if any) */
+ if (client_ctx->client_params.client_connect_notify)
+ (void)(*client_ctx->client_params.client_connect_notify)(
+ (uintptr_t)client_ctx,
+ client_ctx->client_params.user_data, true);
+
+ return 0;
+
+mgmt_fe_server_connect_failed:
+ if (sock && sock != client_ctx->conn_fd)
+ close(sock);
+
+ mgmt_fe_server_disconnect(client_ctx, true);
+ return -1;
+}
+
+static void mgmt_fe_client_conn_timeout(struct thread *thread)
+{
+ struct mgmt_fe_client_ctx *client_ctx;
+
+ client_ctx = (struct mgmt_fe_client_ctx *)THREAD_ARG(thread);
+ assert(client_ctx);
+
+ mgmt_fe_server_connect(client_ctx);
+}
+
+static void
+mgmt_fe_client_register_event(struct mgmt_fe_client_ctx *client_ctx,
+ enum mgmt_fe_event event)
+{
+ struct timeval tv = {0};
+
+ switch (event) {
+ case MGMTD_FE_CONN_READ:
+ thread_add_read(client_ctx->tm, mgmt_fe_client_read,
+ client_ctx, client_ctx->conn_fd,
+ &client_ctx->conn_read_ev);
+ assert(client_ctx->conn_read_ev);
+ break;
+ case MGMTD_FE_CONN_WRITE:
+ thread_add_write(client_ctx->tm, mgmt_fe_client_write,
+ client_ctx, client_ctx->conn_fd,
+ &client_ctx->conn_write_ev);
+ assert(client_ctx->conn_write_ev);
+ break;
+ case MGMTD_FE_PROC_MSG:
+ tv.tv_usec = MGMTD_FE_MSG_PROC_DELAY_USEC;
+ thread_add_timer_tv(client_ctx->tm,
+ mgmt_fe_client_proc_msgbufs, client_ctx,
+ &tv, &client_ctx->msg_proc_ev);
+ assert(client_ctx->msg_proc_ev);
+ break;
+ case MGMTD_FE_CONN_WRITES_ON:
+ thread_add_timer_msec(
+ client_ctx->tm, mgmt_fe_client_resume_writes,
+ client_ctx, MGMTD_FE_MSG_WRITE_DELAY_MSEC,
+ &client_ctx->conn_writes_on);
+ assert(client_ctx->conn_writes_on);
+ break;
+ case MGMTD_FE_SERVER:
+ assert(!"mgmt_fe_client_ctx_post_event called incorrectly");
+ break;
+ }
+}
+
+static void mgmt_fe_client_schedule_conn_retry(
+ struct mgmt_fe_client_ctx *client_ctx, unsigned long intvl_secs)
+{
+ MGMTD_FE_CLIENT_DBG(
+ "Scheduling MGMTD Frontend server connection retry after %lu seconds",
+ intvl_secs);
+ thread_add_timer(client_ctx->tm, mgmt_fe_client_conn_timeout,
+ (void *)client_ctx, intvl_secs,
+ &client_ctx->conn_retry_tmr);
+}
+
+/*
+ * Initialize library and try connecting with MGMTD.
+ */
+uintptr_t mgmt_fe_client_lib_init(struct mgmt_fe_client_params *params,
+ struct thread_master *master_thread)
+{
+ assert(master_thread && params && strlen(params->name)
+ && !mgmt_fe_client_ctx.tm);
+
+ mgmt_fe_client_ctx.tm = master_thread;
+ memcpy(&mgmt_fe_client_ctx.client_params, params,
+ sizeof(mgmt_fe_client_ctx.client_params));
+ if (!mgmt_fe_client_ctx.client_params.conn_retry_intvl_sec)
+ mgmt_fe_client_ctx.client_params.conn_retry_intvl_sec =
+ MGMTD_FE_DEFAULT_CONN_RETRY_INTVL_SEC;
+
+ assert(!mgmt_fe_client_ctx.ibuf_fifo
+ && !mgmt_fe_client_ctx.ibuf_work
+ && !mgmt_fe_client_ctx.obuf_fifo
+ && !mgmt_fe_client_ctx.obuf_work);
+
+ mgmt_fe_client_ctx.ibuf_fifo = stream_fifo_new();
+ mgmt_fe_client_ctx.ibuf_work = stream_new(MGMTD_FE_MSG_MAX_LEN);
+ mgmt_fe_client_ctx.obuf_fifo = stream_fifo_new();
+
+ mgmt_fe_client_ctx.obuf_work = NULL;
+
+ mgmt_sessions_init(&mgmt_fe_client_ctx.client_sessions);
+
+ /* Start trying to connect to MGMTD frontend server immediately */
+ mgmt_fe_client_schedule_conn_retry(&mgmt_fe_client_ctx, 1);
+
+ MGMTD_FE_CLIENT_DBG("Initialized client '%s'", params->name);
+
+ return (uintptr_t)&mgmt_fe_client_ctx;
+}
+
+/*
+ * Create a new Session for a Frontend Client connection.
+ */
+enum mgmt_result mgmt_fe_create_client_session(uintptr_t lib_hndl,
+ uint64_t client_id,
+ uintptr_t user_ctx)
+{
+ struct mgmt_fe_client_ctx *client_ctx;
+ struct mgmt_fe_client_session *session;
+
+ client_ctx = (struct mgmt_fe_client_ctx *)lib_hndl;
+ if (!client_ctx)
+ return MGMTD_INVALID_PARAM;
+
+ session = XCALLOC(MTYPE_MGMTD_FE_SESSION,
+ sizeof(struct mgmt_fe_client_session));
+ assert(session);
+ session->user_ctx = user_ctx;
+ session->client_id = client_id;
+ session->client_ctx = client_ctx;
+ session->session_id = 0;
+
+ if (mgmt_fe_send_session_req(client_ctx, session, true) != 0) {
+ XFREE(MTYPE_MGMTD_FE_SESSION, session);
+ return MGMTD_INTERNAL_ERROR;
+ }
+ mgmt_sessions_add_tail(&client_ctx->client_sessions, session);
+
+ return MGMTD_SUCCESS;
+}
+
+/*
+ * Delete an existing Session for a Frontend Client connection.
+ */
+enum mgmt_result mgmt_fe_destroy_client_session(uintptr_t lib_hndl,
+ uint64_t client_id)
+{
+ struct mgmt_fe_client_ctx *client_ctx;
+ struct mgmt_fe_client_session *session;
+
+ client_ctx = (struct mgmt_fe_client_ctx *)lib_hndl;
+ if (!client_ctx)
+ return MGMTD_INVALID_PARAM;
+
+ session = mgmt_fe_find_session_by_client_id(client_ctx, client_id);
+ if (!session || session->client_ctx != client_ctx)
+ return MGMTD_INVALID_PARAM;
+
+ if (session->session_id &&
+ mgmt_fe_send_session_req(client_ctx, session, false) != 0)
+ MGMTD_FE_CLIENT_ERR(
+ "Failed to send session destroy request for the session-id %lu",
+ (unsigned long)session->session_id);
+
+ mgmt_sessions_del(&client_ctx->client_sessions, session);
+ XFREE(MTYPE_MGMTD_FE_SESSION, session);
+
+ return MGMTD_SUCCESS;
+}
+
+static void mgmt_fe_destroy_client_sessions(uintptr_t lib_hndl)
+{
+ struct mgmt_fe_client_ctx *client_ctx;
+ struct mgmt_fe_client_session *session;
+
+ client_ctx = (struct mgmt_fe_client_ctx *)lib_hndl;
+ if (!client_ctx)
+ return;
+
+ FOREACH_SESSION_IN_LIST (client_ctx, session)
+ mgmt_fe_destroy_client_session(lib_hndl, session->client_id);
+}
+
+/*
+ * Send UN/LOCK_DS_REQ to MGMTD for a specific Datastore DS.
+ */
+enum mgmt_result mgmt_fe_lock_ds(uintptr_t lib_hndl, uintptr_t session_id,
+ uint64_t req_id, Mgmtd__DatastoreId ds_id,
+ bool lock_ds)
+{
+ struct mgmt_fe_client_ctx *client_ctx;
+ struct mgmt_fe_client_session *session;
+
+ client_ctx = (struct mgmt_fe_client_ctx *)lib_hndl;
+ if (!client_ctx)
+ return MGMTD_INVALID_PARAM;
+
+ session = (struct mgmt_fe_client_session *)session_id;
+ if (!session || session->client_ctx != client_ctx)
+ return MGMTD_INVALID_PARAM;
+
+ if (mgmt_fe_send_lockds_req(client_ctx, session, lock_ds, req_id,
+ ds_id)
+ != 0)
+ return MGMTD_INTERNAL_ERROR;
+
+ return MGMTD_SUCCESS;
+}
+
+/*
+ * Send SET_CONFIG_REQ to MGMTD for one or more config data(s).
+ */
+enum mgmt_result
+mgmt_fe_set_config_data(uintptr_t lib_hndl, uintptr_t session_id,
+ uint64_t req_id, Mgmtd__DatastoreId ds_id,
+ Mgmtd__YangCfgDataReq **config_req, int num_reqs,
+ bool implicit_commit, Mgmtd__DatastoreId dst_ds_id)
+{
+ struct mgmt_fe_client_ctx *client_ctx;
+ struct mgmt_fe_client_session *session;
+
+ client_ctx = (struct mgmt_fe_client_ctx *)lib_hndl;
+ if (!client_ctx)
+ return MGMTD_INVALID_PARAM;
+
+ session = (struct mgmt_fe_client_session *)session_id;
+ if (!session || session->client_ctx != client_ctx)
+ return MGMTD_INVALID_PARAM;
+
+ if (mgmt_fe_send_setcfg_req(client_ctx, session, req_id, ds_id,
+ config_req, num_reqs, implicit_commit,
+ dst_ds_id)
+ != 0)
+ return MGMTD_INTERNAL_ERROR;
+
+ return MGMTD_SUCCESS;
+}
+
+/*
+ * Send SET_CONFIG_REQ to MGMTD for one or more config data(s).
+ */
+enum mgmt_result mgmt_fe_commit_config_data(uintptr_t lib_hndl,
+ uintptr_t session_id,
+ uint64_t req_id,
+ Mgmtd__DatastoreId src_ds_id,
+ Mgmtd__DatastoreId dst_ds_id,
+ bool validate_only, bool abort)
+{
+ struct mgmt_fe_client_ctx *client_ctx;
+ struct mgmt_fe_client_session *session;
+
+ client_ctx = (struct mgmt_fe_client_ctx *)lib_hndl;
+ if (!client_ctx)
+ return MGMTD_INVALID_PARAM;
+
+ session = (struct mgmt_fe_client_session *)session_id;
+ if (!session || session->client_ctx != client_ctx)
+ return MGMTD_INVALID_PARAM;
+
+ if (mgmt_fe_send_commitcfg_req(client_ctx, session, req_id, src_ds_id,
+ dst_ds_id, validate_only, abort)
+ != 0)
+ return MGMTD_INTERNAL_ERROR;
+
+ return MGMTD_SUCCESS;
+}
+
+/*
+ * Send GET_CONFIG_REQ to MGMTD for one or more config data item(s).
+ */
+enum mgmt_result
+mgmt_fe_get_config_data(uintptr_t lib_hndl, uintptr_t session_id,
+ uint64_t req_id, Mgmtd__DatastoreId ds_id,
+ Mgmtd__YangGetDataReq * data_req[], int num_reqs)
+{
+ struct mgmt_fe_client_ctx *client_ctx;
+ struct mgmt_fe_client_session *session;
+
+ client_ctx = (struct mgmt_fe_client_ctx *)lib_hndl;
+ if (!client_ctx)
+ return MGMTD_INVALID_PARAM;
+
+ session = (struct mgmt_fe_client_session *)session_id;
+ if (!session || session->client_ctx != client_ctx)
+ return MGMTD_INVALID_PARAM;
+
+ if (mgmt_fe_send_getcfg_req(client_ctx, session, req_id, ds_id,
+ data_req, num_reqs)
+ != 0)
+ return MGMTD_INTERNAL_ERROR;
+
+ return MGMTD_SUCCESS;
+}
+
+/*
+ * Send GET_DATA_REQ to MGMTD for one or more config data item(s).
+ */
+enum mgmt_result mgmt_fe_get_data(uintptr_t lib_hndl, uintptr_t session_id,
+ uint64_t req_id, Mgmtd__DatastoreId ds_id,
+ Mgmtd__YangGetDataReq * data_req[],
+ int num_reqs)
+{
+ struct mgmt_fe_client_ctx *client_ctx;
+ struct mgmt_fe_client_session *session;
+
+ client_ctx = (struct mgmt_fe_client_ctx *)lib_hndl;
+ if (!client_ctx)
+ return MGMTD_INVALID_PARAM;
+
+ session = (struct mgmt_fe_client_session *)session_id;
+ if (!session || session->client_ctx != client_ctx)
+ return MGMTD_INVALID_PARAM;
+
+ if (mgmt_fe_send_getdata_req(client_ctx, session, req_id, ds_id,
+ data_req, num_reqs)
+ != 0)
+ return MGMTD_INTERNAL_ERROR;
+
+ return MGMTD_SUCCESS;
+}
+
+/*
+ * Send NOTIFY_REGISTER_REQ to MGMTD daemon.
+ */
+enum mgmt_result
+mgmt_fe_register_yang_notify(uintptr_t lib_hndl, uintptr_t session_id,
+ uint64_t req_id, Mgmtd__DatastoreId ds_id,
+ bool register_req,
+ Mgmtd__YangDataXPath * data_req[],
+ int num_reqs)
+{
+ struct mgmt_fe_client_ctx *client_ctx;
+ struct mgmt_fe_client_session *session;
+
+ client_ctx = (struct mgmt_fe_client_ctx *)lib_hndl;
+ if (!client_ctx)
+ return MGMTD_INVALID_PARAM;
+
+ session = (struct mgmt_fe_client_session *)session_id;
+ if (!session || session->client_ctx != client_ctx)
+ return MGMTD_INVALID_PARAM;
+
+ if (mgmt_fe_send_regnotify_req(client_ctx, session, req_id, ds_id,
+ register_req, data_req, num_reqs)
+ != 0)
+ return MGMTD_INTERNAL_ERROR;
+
+ return MGMTD_SUCCESS;
+}
+
+/*
+ * Destroy library and cleanup everything.
+ */
+void mgmt_fe_client_lib_destroy(uintptr_t lib_hndl)
+{
+ struct mgmt_fe_client_ctx *client_ctx;
+
+ client_ctx = (struct mgmt_fe_client_ctx *)lib_hndl;
+ assert(client_ctx);
+
+ MGMTD_FE_CLIENT_DBG("Destroying MGMTD Frontend Client '%s'",
+ client_ctx->client_params.name);
+
+ mgmt_fe_server_disconnect(client_ctx, false);
+
+ assert(mgmt_fe_client_ctx.ibuf_fifo && mgmt_fe_client_ctx.obuf_fifo);
+
+ mgmt_fe_destroy_client_sessions(lib_hndl);
+
+ THREAD_OFF(client_ctx->conn_retry_tmr);
+ THREAD_OFF(client_ctx->conn_read_ev);
+ THREAD_OFF(client_ctx->conn_write_ev);
+ THREAD_OFF(client_ctx->conn_writes_on);
+ THREAD_OFF(client_ctx->msg_proc_ev);
+ stream_fifo_free(mgmt_fe_client_ctx.ibuf_fifo);
+ if (mgmt_fe_client_ctx.ibuf_work)
+ stream_free(mgmt_fe_client_ctx.ibuf_work);
+ stream_fifo_free(mgmt_fe_client_ctx.obuf_fifo);
+ if (mgmt_fe_client_ctx.obuf_work)
+ stream_free(mgmt_fe_client_ctx.obuf_work);
+}
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * MGMTD Frontend Client Library api interfaces
+ * Copyright (C) 2021 Vmware, Inc.
+ * Pushpasis Sarkar <spushpasis@vmware.com>
+ */
+
+#ifndef _FRR_MGMTD_FE_CLIENT_H_
+#define _FRR_MGMTD_FE_CLIENT_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "mgmtd/mgmt_defines.h"
+#include "mgmt_pb.h"
+
+/***************************************************************
+ * Macros
+ ***************************************************************/
+
+/*
+ * The server port MGMTD daemon is listening for Backend Client
+ * connections.
+ */
+
+#define MGMTD_FE_CLIENT_ERROR_STRING_MAX_LEN 32
+
+#define MGMTD_FE_DEFAULT_CONN_RETRY_INTVL_SEC 5
+
+#define MGMTD_FE_MSG_PROC_DELAY_USEC 10
+#define MGMTD_FE_MAX_NUM_MSG_PROC 500
+
+#define MGMTD_FE_MSG_WRITE_DELAY_MSEC 1
+#define MGMTD_FE_MAX_NUM_MSG_WRITE 100
+
+#define GMGD_FE_MAX_NUM_REQ_ITEMS 64
+
+#define MGMTD_FE_MSG_MAX_LEN 9000
+
+#define MGMTD_SOCKET_FE_SEND_BUF_SIZE 65535
+#define MGMTD_SOCKET_FE_RECV_BUF_SIZE MGMTD_SOCKET_FE_SEND_BUF_SIZE
+
+/***************************************************************
+ * Data-structures
+ ***************************************************************/
+
+#define MGMTD_SESSION_ID_NONE 0
+
+#define MGMTD_CLIENT_ID_NONE 0
+
+#define MGMTD_DS_NONE MGMTD__DATASTORE_ID__DS_NONE
+#define MGMTD_DS_RUNNING MGMTD__DATASTORE_ID__RUNNING_DS
+#define MGMTD_DS_CANDIDATE MGMTD__DATASTORE_ID__CANDIDATE_DS
+#define MGMTD_DS_OPERATIONAL MGMTD__DATASTORE_ID__OPERATIONAL_DS
+#define MGMTD_DS_MAX_ID MGMTD_DS_OPERATIONAL + 1
+
+struct mgmt_fe_msg_hdr {
+ uint16_t marker;
+ uint16_t len; /* Includes header */
+};
+#define MGMTD_FE_MSG_HDR_LEN sizeof(struct mgmt_fe_msg_hdr)
+#define MGMTD_FE_MSG_MARKER 0xdeaf
+
+struct mgmt_fe_msg {
+ struct mgmt_fe_msg_hdr hdr;
+ uint8_t payload[];
+};
+
+/*
+ * All the client specific information this library needs to
+ * initialize itself, setup connection with MGMTD FrontEnd interface
+ * and carry on all required procedures appropriately.
+ *
+ * FrontEnd clients need to initialise a instance of this structure
+ * with appropriate data and pass it while calling the API
+ * to initialize the library (See mgmt_fe_client_lib_init for
+ * more details).
+ */
+struct mgmt_fe_client_params {
+ char name[MGMTD_CLIENT_NAME_MAX_LEN];
+ uintptr_t user_data;
+ unsigned long conn_retry_intvl_sec;
+
+ void (*client_connect_notify)(uintptr_t lib_hndl,
+ uintptr_t user_data,
+ bool connected);
+
+ void (*client_session_notify)(uintptr_t lib_hndl,
+ uintptr_t user_data,
+ uint64_t client_id,
+ bool create, bool success,
+ uintptr_t session_id,
+ uintptr_t user_session_ctx);
+
+ void (*lock_ds_notify)(uintptr_t lib_hndl, uintptr_t user_data,
+ uint64_t client_id, uintptr_t session_id,
+ uintptr_t user_session_ctx, uint64_t req_id,
+ bool lock_ds, bool success,
+ Mgmtd__DatastoreId ds_id, char *errmsg_if_any);
+
+ void (*set_config_notify)(uintptr_t lib_hndl, uintptr_t user_data,
+ uint64_t client_id, uintptr_t session_id,
+ uintptr_t user_session_ctx, uint64_t req_id,
+ bool success, Mgmtd__DatastoreId ds_id,
+ char *errmsg_if_any);
+
+ void (*commit_config_notify)(
+ uintptr_t lib_hndl, uintptr_t user_data, uint64_t client_id,
+ uintptr_t session_id, uintptr_t user_session_ctx,
+ uint64_t req_id, bool success, Mgmtd__DatastoreId src_ds_id,
+ Mgmtd__DatastoreId dst_ds_id, bool validate_only,
+ char *errmsg_if_any);
+
+ enum mgmt_result (*get_data_notify)(
+ uintptr_t lib_hndl, uintptr_t user_data, uint64_t client_id,
+ uintptr_t session_id, uintptr_t user_session_ctx,
+ uint64_t req_id, bool success, Mgmtd__DatastoreId ds_id,
+ Mgmtd__YangData **yang_data, size_t num_data, int next_key,
+ char *errmsg_if_any);
+
+ enum mgmt_result (*data_notify)(
+ uint64_t client_id, uint64_t session_id, uintptr_t user_data,
+ uint64_t req_id, Mgmtd__DatastoreId ds_id,
+ Mgmtd__YangData **yang_data, size_t num_data);
+};
+
+/***************************************************************
+ * API prototypes
+ ***************************************************************/
+
+/*
+ * Initialize library and try connecting with MGMTD FrontEnd interface.
+ *
+ * params
+ * Frontend client parameters.
+ *
+ * master_thread
+ * Thread master.
+ *
+ * Returns:
+ * Frontend client lib handler (nothing but address of mgmt_fe_client_ctx)
+ */
+extern uintptr_t
+mgmt_fe_client_lib_init(struct mgmt_fe_client_params *params,
+ struct thread_master *master_thread);
+
+/*
+ * Create a new Session for a Frontend Client connection.
+ *
+ * lib_hndl
+ * Client library handler.
+ *
+ * client_id
+ * Unique identifier of client.
+ *
+ * user_ctx
+ * Client context.
+ *
+ * Returns:
+ * MGMTD_SUCCESS on success, MGMTD_* otherwise.
+ */
+extern enum mgmt_result mgmt_fe_create_client_session(uintptr_t lib_hndl,
+ uint64_t client_id,
+ uintptr_t user_ctx);
+
+/*
+ * Delete an existing Session for a Frontend Client connection.
+ *
+ * lib_hndl
+ * Client library handler.
+ *
+ * client_id
+ * Unique identifier of client.
+ *
+ * Returns:
+ * MGMTD_SUCCESS on success, MGMTD_* otherwise.
+ */
+extern enum mgmt_result mgmt_fe_destroy_client_session(uintptr_t lib_hndl,
+ uint64_t client_id);
+
+/*
+ * Send UN/LOCK_DS_REQ to MGMTD for a specific Datastore DS.
+ *
+ * lib_hndl
+ * Client library handler.
+ *
+ * session_id
+ * Client session ID.
+ *
+ * req_id
+ * Client request ID.
+ *
+ * ds_id
+ * Datastore ID (Running/Candidate/Oper/Startup)
+ *
+ * lock_ds
+ * TRUE for lock request, FALSE for unlock request.
+ *
+ * Returns:
+ * MGMTD_SUCCESS on success, MGMTD_* otherwise.
+ */
+extern enum mgmt_result
+mgmt_fe_lock_ds(uintptr_t lib_hndl, uintptr_t session_id, uint64_t req_id,
+ Mgmtd__DatastoreId ds_id, bool lock_ds);
+
+/*
+ * Send SET_CONFIG_REQ to MGMTD for one or more config data(s).
+ *
+ * lib_hndl
+ * Client library handler.
+ *
+ * session_id
+ * Client session ID.
+ *
+ * req_id
+ * Client request ID.
+ *
+ * ds_id
+ * Datastore ID (Running/Candidate/Oper/Startup)
+ *
+ * conf_req
+ * Details regarding the SET_CONFIG_REQ.
+ *
+ * num_req
+ * Number of config requests.
+ *
+ * implcit commit
+ * TRUE for implicit commit, FALSE otherwise.
+ *
+ * dst_ds_id
+ * Destination Datastore ID where data needs to be set.
+ *
+ * Returns:
+ * MGMTD_SUCCESS on success, MGMTD_* otherwise.
+ */
+extern enum mgmt_result
+mgmt_fe_set_config_data(uintptr_t lib_hndl, uintptr_t session_id,
+ uint64_t req_id, Mgmtd__DatastoreId ds_id,
+ Mgmtd__YangCfgDataReq **config_req, int num_req,
+ bool implicit_commit, Mgmtd__DatastoreId dst_ds_id);
+
+/*
+ * Send SET_COMMMIT_REQ to MGMTD for one or more config data(s).
+ *
+ * lib_hndl
+ * Client library handler.
+ *
+ * session_id
+ * Client session ID.
+ *
+ * req_id
+ * Client request ID.
+ *
+ * src_ds_id
+ * Source datastore ID from where data needs to be committed from.
+ *
+ * dst_ds_id
+ * Destination datastore ID where data needs to be committed to.
+ *
+ * validate_only
+ * TRUE if data needs to be validated only, FALSE otherwise.
+ *
+ * abort
+ * TRUE if need to restore Src DS back to Dest DS, FALSE otherwise.
+ *
+ * Returns:
+ * MGMTD_SUCCESS on success, MGMTD_* otherwise.
+ */
+extern enum mgmt_result
+mgmt_fe_commit_config_data(uintptr_t lib_hndl, uintptr_t session_id,
+ uint64_t req_id, Mgmtd__DatastoreId src_ds_id,
+ Mgmtd__DatastoreId dst_ds_id, bool validate_only,
+ bool abort);
+
+/*
+ * Send GET_CONFIG_REQ to MGMTD for one or more config data item(s).
+ *
+ * lib_hndl
+ * Client library handler.
+ *
+ * session_id
+ * Client session ID.
+ *
+ * req_id
+ * Client request ID.
+ *
+ * ds_id
+ * Datastore ID (Running/Candidate)
+ *
+ * data_req
+ * Get config requested.
+ *
+ * num_req
+ * Number of get config requests.
+ *
+ * Returns:
+ * MGMTD_SUCCESS on success, MGMTD_* otherwise.
+ */
+extern enum mgmt_result
+mgmt_fe_get_config_data(uintptr_t lib_hndl, uintptr_t session_id,
+ uint64_t req_id, Mgmtd__DatastoreId ds_id,
+ Mgmtd__YangGetDataReq **data_req, int num_reqs);
+
+/*
+ * Send GET_DATA_REQ to MGMTD for one or more data item(s).
+ *
+ * Similar to get config request but supports getting data
+ * from operational ds aka backend clients directly.
+ */
+extern enum mgmt_result
+mgmt_fe_get_data(uintptr_t lib_hndl, uintptr_t session_id, uint64_t req_id,
+ Mgmtd__DatastoreId ds_id, Mgmtd__YangGetDataReq **data_req,
+ int num_reqs);
+
+/*
+ * Send NOTIFY_REGISTER_REQ to MGMTD daemon.
+ *
+ * lib_hndl
+ * Client library handler.
+ *
+ * session_id
+ * Client session ID.
+ *
+ * req_id
+ * Client request ID.
+ *
+ * ds_id
+ * Datastore ID.
+ *
+ * register_req
+ * TRUE if registering, FALSE otherwise.
+ *
+ * data_req
+ * Details of the YANG notification data.
+ *
+ * num_reqs
+ * Number of data requests.
+ *
+ * Returns:
+ * MGMTD_SUCCESS on success, MGMTD_* otherwise.
+ */
+extern enum mgmt_result
+mgmt_fe_register_yang_notify(uintptr_t lib_hndl, uintptr_t session_id,
+ uint64_t req_id, Mgmtd__DatastoreId ds_id,
+ bool register_req,
+ Mgmtd__YangDataXPath **data_req, int num_reqs);
+
+/*
+ * Destroy library and cleanup everything.
+ */
+extern void mgmt_fe_client_lib_destroy(uintptr_t lib_hndl);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _FRR_MGMTD_FE_CLIENT_H_ */
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * MGMTD protobuf main header file
+ * Copyright (C) 2021 Vmware, Inc.
+ * Pushpasis Sarkar <spushpasis@vmware.com>
+ */
+
+#ifndef _FRR_MGMTD_PB_H_
+#define _FRR_MGMTD_PB_H_
+
+#include "lib/mgmt.pb-c.h"
+
+#define mgmt_yang_data_xpath_init(ptr) mgmtd__yang_data_xpath__init(ptr)
+
+#define mgmt_yang_data_value_init(ptr) mgmtd__yang_data_value__init(ptr)
+
+#define mgmt_yang_data_init(ptr) mgmtd__yang_data__init(ptr)
+
+#define mgmt_yang_data_reply_init(ptr) mgmtd__yang_data_reply__init(ptr)
+
+#define mgmt_yang_cfg_data_req_init(ptr) mgmtd__yang_cfg_data_req__init(ptr)
+
+#define mgmt_yang_get_data_req_init(ptr) mgmtd__yang_get_data_req__init(ptr)
+
+#endif /* _FRR_MGMTD_PB_H_ */
NB_OP_RPC,
};
+struct nb_cfg_change {
+ char xpath[XPATH_MAXLEN];
+ enum nb_operation operation;
+ const char *value;
+};
+
union nb_resource {
int fd;
void *ptr;
void nb_cli_enqueue_change(struct vty *vty, const char *xpath,
enum nb_operation operation, const char *value)
{
- struct vty_cfg_change *change;
+ struct nb_cfg_change *change;
if (vty->num_cfg_changes == VTY_MAXCFGCHANGES) {
/* Not expected to happen. */
/* Edit candidate configuration. */
for (size_t i = 0; i < vty->num_cfg_changes; i++) {
- struct vty_cfg_change *change = &vty->cfg_changes[i];
+ struct nb_cfg_change *change = &vty->cfg_changes[i];
struct nb_node *nb_node;
char xpath[XPATH_MAXLEN];
struct yang_data *data;
lib/log_vty.c \
lib/md5.c \
lib/memory.c \
+ lib/mgmt_fe_client.c \
lib/mlag.c \
lib/module.c \
lib/mpls.c \
yang/frr-module-translator.yang.c \
# end
+# Add logic to build mgmt.proto
+lib_libfrr_la_LIBADD += $(PROTOBUF_C_LIBS)
+
+BUILT_SOURCES += \
+ lib/mgmt.pb-c.c \
+ lib/mgmt.pb-c.h \
+ # end
+
+CLEANFILES += \
+ lib/mgmt.pb-c.h \
+ lib/mgmt.pb-c.c \
+ # end
+
+lib_libfrr_la_SOURCES += \
+ lib/mgmt.pb-c.c \
+ #end
+
if SQLITE3
lib_libfrr_la_LIBADD += $(SQLITE3_LIBS)
lib_libfrr_la_SOURCES += lib/db.c
lib/log_vty.h \
lib/md5.h \
lib/memory.h \
+ lib/mgmt.pb-c.h \
+ lib/mgmt_fe_client.h \
+ lib/mgmt_pb.h \
lib/module.h \
lib/monotime.h \
lib/mpls.h \
#endif /* VTYSH */
};
+struct nb_config *vty_mgmt_candidate_config;
+
+static uintptr_t mgmt_lib_hndl;
+static bool mgmt_fe_connected;
+static bool mgmt_candidate_ds_wr_locked;
+static uint64_t mgmt_client_id_next;
+static uint64_t mgmt_last_req_id = UINT64_MAX;
+
PREDECL_DLIST(vtyservs);
struct vty_serv {
static void vty_event_serv(enum vty_event event, struct vty_serv *);
static void vty_event(enum vty_event, struct vty *);
+static int vtysh_flush(struct vty *vty);
/* Extern host structure from command.c */
extern struct host host;
static bool do_log_commands;
static bool do_log_commands_perm;
+static void vty_mgmt_resume_response(struct vty *vty, bool success)
+{
+ uint8_t header[4] = {0, 0, 0, 0};
+ int ret = CMD_SUCCESS;
+
+ if (!vty->mgmt_req_pending) {
+ zlog_err(
+ "vty response called without setting mgmt_req_pending");
+ return;
+ }
+
+ if (!success)
+ ret = CMD_WARNING_CONFIG_FAILED;
+
+ vty->mgmt_req_pending = false;
+ header[3] = ret;
+ buffer_put(vty->obuf, header, 4);
+
+ if (!vty->t_write && (vtysh_flush(vty) < 0))
+ /* Try to flush results; exit if a write
+ * error occurs.
+ */
+ return;
+
+ if (vty->status == VTY_CLOSE)
+ vty_close(vty);
+ else
+ vty_event(VTYSH_READ, vty);
+}
+
void vty_frame(struct vty *vty, const char *format, ...)
{
va_list args;
new->max = VTY_BUFSIZ;
new->pass_fd = -1;
+ if (mgmt_lib_hndl) {
+ new->mgmt_client_id = mgmt_client_id_next++;
+ if (mgmt_fe_create_client_session(mgmt_lib_hndl,
+ new->mgmt_client_id,
+ (uintptr_t)new)
+ != MGMTD_SUCCESS)
+ zlog_err(
+ "Failed to open a MGMTD Frontend session for VTY session %p!!",
+ new);
+ }
+
return new;
}
if (ret == CMD_SUSPEND)
break;
+ /* with new infra we need to stop response till
+ * we get response through callback.
+ */
+ if (vty->mgmt_req_pending)
+ return;
+
/* warning: watchfrr hardcodes this result write
*/
header[3] = ret;
int i;
bool was_stdio = false;
+ if (mgmt_lib_hndl) {
+ mgmt_fe_destroy_client_session(mgmt_lib_hndl,
+ vty->mgmt_client_id);
+ vty->mgmt_session_id = 0;
+ }
+
/* Drop out of configure / transaction if needed. */
vty_config_exit(vty);
return CMD_WARNING;
}
+ if (vty_mgmt_fe_enabled()) {
+ if (!mgmt_candidate_ds_wr_locked) {
+ if (vty_mgmt_send_lockds_req(vty, MGMTD_DS_CANDIDATE,
+ true)
+ != 0) {
+ vty_out(vty, "Not able to lock candidate DS\n");
+ return CMD_WARNING;
+ }
+ } else {
+ vty_out(vty,
+ "Candidate DS already locked by different session\n");
+ return CMD_WARNING;
+ }
+
+ vty->mgmt_locked_candidate_ds = true;
+ mgmt_candidate_ds_wr_locked = true;
+ }
+
vty->node = CONFIG_NODE;
vty->config = true;
vty->private_config = private_config;
vty_out(vty,
"Warning: uncommitted changes will be discarded on exit.\n\n");
} else {
- vty->candidate_config = vty_shared_candidate_config;
+ /*
+ * NOTE: On the MGMTD daemon we point the VTY candidate DS to
+ * the global MGMTD candidate DS. Else we point to the VTY
+ * Shared Candidate Config.
+ */
+ vty->candidate_config = vty_mgmt_candidate_config
+ ? vty_mgmt_candidate_config
+ : vty_shared_candidate_config;
if (frr_get_cli_mode() == FRR_CLI_TRANSACTIONAL)
vty->candidate_config_base =
nb_config_dup(running_config);
{
vty->xpath_index = 0;
+ if (vty_mgmt_fe_enabled() && mgmt_candidate_ds_wr_locked
+ && vty->mgmt_locked_candidate_ds) {
+ if (vty_mgmt_send_lockds_req(vty, MGMTD_DS_CANDIDATE, false)
+ != 0) {
+ vty_out(vty, "Not able to unlock candidate DS\n");
+ return CMD_WARNING;
+ }
+
+ vty->mgmt_locked_candidate_ds = false;
+ mgmt_candidate_ds_wr_locked = false;
+ }
+
/* Perform any pending commits. */
(void)nb_cli_pending_commit_check(vty);
/* currently nothing to do, but likely to have future use */
}
+static void vty_mgmt_server_connected(uintptr_t lib_hndl, uintptr_t usr_data,
+ bool connected)
+{
+ zlog_err("%sGot %sconnected %s MGMTD Frontend Server",
+ !connected ? "ERROR: " : "", !connected ? "dis: " : "",
+ !connected ? "from" : "to");
+
+ mgmt_fe_connected = connected;
+
+ /*
+ * TODO: Setup or teardown front-end sessions for existing
+ * VTY connections.
+ */
+}
+
+static void vty_mgmt_session_created(uintptr_t lib_hndl, uintptr_t usr_data,
+ uint64_t client_id, bool create,
+ bool success, uintptr_t session_id,
+ uintptr_t session_ctx)
+{
+ struct vty *vty;
+
+ vty = (struct vty *)session_ctx;
+
+ if (!success) {
+ zlog_err("%s session for client %llu failed!",
+ create ? "Creating" : "Destroying",
+ (unsigned long long)client_id);
+ return;
+ }
+
+ zlog_err("%s session for client %llu successfully!",
+ create ? "Created" : "Destroyed",
+ (unsigned long long)client_id);
+ if (create)
+ vty->mgmt_session_id = session_id;
+}
+
+static void vty_mgmt_ds_lock_notified(uintptr_t lib_hndl, uintptr_t usr_data,
+ uint64_t client_id, uintptr_t session_id,
+ uintptr_t session_ctx, uint64_t req_id,
+ bool lock_ds, bool success,
+ Mgmtd__DatastoreId ds_id,
+ char *errmsg_if_any)
+{
+ struct vty *vty;
+
+ vty = (struct vty *)session_ctx;
+
+ if (!success) {
+ zlog_err("%socking for DS %u failed! Err: '%s'",
+ lock_ds ? "L" : "Unl", ds_id, errmsg_if_any);
+ vty_out(vty, "ERROR: %socking for DS %u failed! Err: '%s'\n",
+ lock_ds ? "L" : "Unl", ds_id, errmsg_if_any);
+ } else {
+ zlog_err("%socked DS %u successfully!", lock_ds ? "L" : "Unl",
+ ds_id);
+ }
+
+ vty_mgmt_resume_response(vty, success);
+}
+
+static void vty_mgmt_set_config_result_notified(
+ uintptr_t lib_hndl, uintptr_t usr_data, uint64_t client_id,
+ uintptr_t session_id, uintptr_t session_ctx, uint64_t req_id,
+ bool success, Mgmtd__DatastoreId ds_id, char *errmsg_if_any)
+{
+ struct vty *vty;
+
+ vty = (struct vty *)session_ctx;
+
+ if (!success) {
+ zlog_err(
+ "SET_CONFIG request for client 0x%llx failed! Error: '%s'",
+ (unsigned long long)client_id,
+ errmsg_if_any ? errmsg_if_any : "Unknown");
+ vty_out(vty, "ERROR: SET_CONFIG request failed! Error: %s\n",
+ errmsg_if_any ? errmsg_if_any : "Unknown");
+ } else {
+ zlog_err(
+ "SET_CONFIG request for client 0x%llx req-id %llu was successfull!",
+ (unsigned long long)client_id,
+ (unsigned long long)req_id);
+ }
+
+ vty_mgmt_resume_response(vty, success);
+}
+
+static void vty_mgmt_commit_config_result_notified(
+ uintptr_t lib_hndl, uintptr_t usr_data, uint64_t client_id,
+ uintptr_t session_id, uintptr_t session_ctx, uint64_t req_id,
+ bool success, Mgmtd__DatastoreId src_ds_id, Mgmtd__DatastoreId dst_ds_id,
+ bool validate_only, char *errmsg_if_any)
+{
+ struct vty *vty;
+
+ vty = (struct vty *)session_ctx;
+
+ if (!success) {
+ zlog_err(
+ "COMMIT_CONFIG request for client 0x%llx failed! Error: '%s'",
+ (unsigned long long)client_id,
+ errmsg_if_any ? errmsg_if_any : "Unknown");
+ vty_out(vty, "ERROR: COMMIT_CONFIG request failed! Error: %s\n",
+ errmsg_if_any ? errmsg_if_any : "Unknown");
+ } else {
+ zlog_err(
+ "COMMIT_CONFIG request for client 0x%llx req-id %llu was successfull!",
+ (unsigned long long)client_id,
+ (unsigned long long)req_id);
+ if (errmsg_if_any)
+ vty_out(vty, "MGMTD: %s\n", errmsg_if_any);
+ }
+
+ vty_mgmt_resume_response(vty, success);
+}
+
+static enum mgmt_result vty_mgmt_get_data_result_notified(
+ uintptr_t lib_hndl, uintptr_t usr_data, uint64_t client_id,
+ uintptr_t session_id, uintptr_t session_ctx, uint64_t req_id,
+ bool success, Mgmtd__DatastoreId ds_id, Mgmtd__YangData **yang_data,
+ size_t num_data, int next_key, char *errmsg_if_any)
+{
+ struct vty *vty;
+ size_t indx;
+
+ vty = (struct vty *)session_ctx;
+
+ if (!success) {
+ zlog_err(
+ "GET_DATA request for client 0x%llx failed! Error: '%s'",
+ (unsigned long long)client_id,
+ errmsg_if_any ? errmsg_if_any : "Unknown");
+ vty_out(vty, "ERROR: GET_DATA request failed! Error: %s\n",
+ errmsg_if_any ? errmsg_if_any : "Unknown");
+ vty_mgmt_resume_response(vty, success);
+ return MGMTD_INTERNAL_ERROR;
+ }
+
+ zlog_debug(
+ "GET_DATA request for client 0x%llx req-id %llu was successfull!",
+ (unsigned long long)client_id, (unsigned long long)req_id);
+
+ if (req_id != mgmt_last_req_id) {
+ mgmt_last_req_id = req_id;
+ vty_out(vty, "[\n");
+ }
+
+ for (indx = 0; indx < num_data; indx++) {
+ vty_out(vty, " \"%s\": \"%s\"\n", yang_data[indx]->xpath,
+ yang_data[indx]->value->encoded_str_val);
+ }
+ if (next_key < 0) {
+ vty_out(vty, "]\n");
+ vty_mgmt_resume_response(vty, success);
+ }
+
+ return MGMTD_SUCCESS;
+}
+
+static struct mgmt_fe_client_params client_params = {
+ .client_connect_notify = vty_mgmt_server_connected,
+ .client_session_notify = vty_mgmt_session_created,
+ .lock_ds_notify = vty_mgmt_ds_lock_notified,
+ .set_config_notify =
+ vty_mgmt_set_config_result_notified,
+ .commit_config_notify =
+ vty_mgmt_commit_config_result_notified,
+ .get_data_notify = vty_mgmt_get_data_result_notified,
+};
+
+void vty_init_mgmt_fe(void)
+{
+ if (!vty_master) {
+ zlog_err(
+ "Always call vty_mgmt_init_fe() after vty_init()!!");
+ return;
+ }
+
+ assert(!mgmt_lib_hndl);
+ snprintf(client_params.name, sizeof(client_params.name), "%s-%lld",
+ frr_get_progname(), (long long)getpid());
+ mgmt_lib_hndl = mgmt_fe_client_lib_init(&client_params, vty_master);
+ assert(mgmt_lib_hndl);
+}
+
+bool vty_mgmt_fe_enabled(void)
+{
+ return mgmt_lib_hndl && mgmt_fe_connected ? true : false;
+}
+
+int vty_mgmt_send_lockds_req(struct vty *vty, Mgmtd__DatastoreId ds_id,
+ bool lock)
+{
+ enum mgmt_result ret;
+
+ if (mgmt_lib_hndl && vty->mgmt_session_id) {
+ vty->mgmt_req_id++;
+ ret = mgmt_fe_lock_ds(mgmt_lib_hndl, vty->mgmt_session_id,
+ vty->mgmt_req_id, ds_id, lock);
+ if (ret != MGMTD_SUCCESS) {
+ zlog_err(
+ "Failed to send %sLOCK-DS-REQ to MGMTD for req-id %llu.",
+ lock ? "" : "UN",
+ (unsigned long long)vty->mgmt_req_id);
+ vty_out(vty, "Failed to send %sLOCK-DS-REQ to MGMTD!",
+ lock ? "" : "UN");
+ return -1;
+ }
+
+ vty->mgmt_req_pending = true;
+ }
+
+ return 0;
+}
+
+int vty_mgmt_send_config_data(struct vty *vty)
+{
+ Mgmtd__YangDataValue value[VTY_MAXCFGCHANGES];
+ Mgmtd__YangData cfg_data[VTY_MAXCFGCHANGES];
+ Mgmtd__YangCfgDataReq cfg_req[VTY_MAXCFGCHANGES];
+ Mgmtd__YangCfgDataReq * cfgreq[VTY_MAXCFGCHANGES] = {0};
+ size_t indx;
+ int cnt;
+
+ if (mgmt_lib_hndl && vty->mgmt_session_id) {
+ cnt = 0;
+ for (indx = 0; indx < vty->num_cfg_changes; indx++) {
+ mgmt_yang_data_init(&cfg_data[cnt]);
+
+ if (vty->cfg_changes[indx].value) {
+ mgmt_yang_data_value_init(&value[cnt]);
+ value[cnt].encoded_str_val =
+ (char *)vty->cfg_changes[indx].value;
+ value[cnt].value_case =
+ MGMTD__YANG_DATA_VALUE__VALUE_ENCODED_STR_VAL;
+ cfg_data[cnt].value = &value[cnt];
+ }
+
+ cfg_data[cnt].xpath = vty->cfg_changes[indx].xpath;
+
+ mgmt_yang_cfg_data_req_init(&cfg_req[cnt]);
+ cfg_req[cnt].data = &cfg_data[cnt];
+ switch (vty->cfg_changes[indx].operation) {
+ case NB_OP_DESTROY:
+ cfg_req[cnt].req_type =
+ MGMTD__CFG_DATA_REQ_TYPE__DELETE_DATA;
+ break;
+
+ case NB_OP_CREATE:
+ case NB_OP_MODIFY:
+ case NB_OP_MOVE:
+ case NB_OP_PRE_VALIDATE:
+ case NB_OP_APPLY_FINISH:
+ cfg_req[cnt].req_type =
+ MGMTD__CFG_DATA_REQ_TYPE__SET_DATA;
+ break;
+ case NB_OP_GET_ELEM:
+ case NB_OP_GET_NEXT:
+ case NB_OP_GET_KEYS:
+ case NB_OP_LOOKUP_ENTRY:
+ case NB_OP_RPC:
+ assert(!"Invalid type of operation");
+ break;
+ default:
+ assert(!"non-enum value, invalid");
+ }
+
+ cfgreq[cnt] = &cfg_req[cnt];
+ cnt++;
+ }
+
+ vty->mgmt_req_id++;
+ if (cnt
+ && mgmt_fe_set_config_data(
+ mgmt_lib_hndl, vty->mgmt_session_id,
+ vty->mgmt_req_id, MGMTD_DS_CANDIDATE, cfgreq,
+ cnt,
+ frr_get_cli_mode() == FRR_CLI_CLASSIC
+ ? ((vty->pending_allowed
+ || vty->no_implicit_commit)
+ ? false
+ : true)
+ : false,
+ MGMTD_DS_RUNNING)
+ != MGMTD_SUCCESS) {
+ zlog_err("Failed to send %d Config Xpaths to MGMTD!!",
+ (int)indx);
+ return -1;
+ }
+
+ vty->mgmt_req_pending = true;
+ }
+
+ return 0;
+}
+
+int vty_mgmt_send_commit_config(struct vty *vty, bool validate_only, bool abort)
+{
+ enum mgmt_result ret;
+
+ if (mgmt_lib_hndl && vty->mgmt_session_id) {
+ vty->mgmt_req_id++;
+ ret = mgmt_fe_commit_config_data(
+ mgmt_lib_hndl, vty->mgmt_session_id, vty->mgmt_req_id,
+ MGMTD_DS_CANDIDATE, MGMTD_DS_RUNNING, validate_only,
+ abort);
+ if (ret != MGMTD_SUCCESS) {
+ zlog_err(
+ "Failed to send COMMIT-REQ to MGMTD for req-id %llu.",
+ (unsigned long long)vty->mgmt_req_id);
+ vty_out(vty, "Failed to send COMMIT-REQ to MGMTD!");
+ return -1;
+ }
+
+ vty->mgmt_req_pending = true;
+ }
+
+ return 0;
+}
+
+int vty_mgmt_send_get_config(struct vty *vty, Mgmtd__DatastoreId datastore,
+ const char **xpath_list, int num_req)
+{
+ enum mgmt_result ret;
+ Mgmtd__YangData yang_data[VTY_MAXCFGCHANGES];
+ Mgmtd__YangGetDataReq get_req[VTY_MAXCFGCHANGES];
+ Mgmtd__YangGetDataReq * getreq[VTY_MAXCFGCHANGES];
+ int i;
+
+ vty->mgmt_req_id++;
+
+ for (i = 0; i < num_req; i++) {
+ mgmt_yang_get_data_req_init(&get_req[i]);
+ mgmt_yang_data_init(&yang_data[i]);
+
+ yang_data->xpath = (char *)xpath_list[i];
+
+ get_req[i].data = &yang_data[i];
+ getreq[i] = &get_req[i];
+ }
+ ret = mgmt_fe_get_config_data(mgmt_lib_hndl, vty->mgmt_session_id,
+ vty->mgmt_req_id, datastore, getreq,
+ num_req);
+
+ if (ret != MGMTD_SUCCESS) {
+ zlog_err("Failed to send GET-CONFIG to MGMTD for req-id %llu.",
+ (unsigned long long)vty->mgmt_req_id);
+ vty_out(vty, "Failed to send GET-CONFIG to MGMTD!");
+ return -1;
+ }
+
+ vty->mgmt_req_pending = true;
+
+ return 0;
+}
+
+int vty_mgmt_send_get_data(struct vty *vty, Mgmtd__DatastoreId datastore,
+ const char **xpath_list, int num_req)
+{
+ enum mgmt_result ret;
+ Mgmtd__YangData yang_data[VTY_MAXCFGCHANGES];
+ Mgmtd__YangGetDataReq get_req[VTY_MAXCFGCHANGES];
+ Mgmtd__YangGetDataReq * getreq[VTY_MAXCFGCHANGES];
+ int i;
+
+ vty->mgmt_req_id++;
+
+ for (i = 0; i < num_req; i++) {
+ mgmt_yang_get_data_req_init(&get_req[i]);
+ mgmt_yang_data_init(&yang_data[i]);
+
+ yang_data->xpath = (char *)xpath_list[i];
+
+ get_req[i].data = &yang_data[i];
+ getreq[i] = &get_req[i];
+ }
+ ret = mgmt_fe_get_data(mgmt_lib_hndl, vty->mgmt_session_id,
+ vty->mgmt_req_id, datastore, getreq, num_req);
+
+ if (ret != MGMTD_SUCCESS) {
+ zlog_err("Failed to send GET-DATA to MGMTD for req-id %llu.",
+ (unsigned long long)vty->mgmt_req_id);
+ vty_out(vty, "Failed to send GET-DATA to MGMTD!");
+ return -1;
+ }
+
+ vty->mgmt_req_pending = true;
+
+ return 0;
+}
+
/* Install vty's own commands like `who' command. */
void vty_init(struct thread_master *master_thread, bool do_command_logging)
{
struct vty *vty;
struct vty_serv *vtyserv;
+ if (mgmt_lib_hndl) {
+ mgmt_fe_client_lib_destroy(mgmt_lib_hndl);
+ mgmt_lib_hndl = 0;
+ }
+
memset(vty_cwd, 0x00, sizeof(vty_cwd));
vty_reset();
#include "compiler.h"
#include "northbound.h"
#include "zlog_live.h"
+#include "mgmt_fe_client.h"
#ifdef __cplusplus
extern "C" {
/* Changes enqueued to be applied in the candidate configuration. */
size_t num_cfg_changes;
- struct vty_cfg_change cfg_changes[VTY_MAXCFGCHANGES];
+ struct nb_cfg_change cfg_changes[VTY_MAXCFGCHANGES];
/* XPath of the current node */
int xpath_index;
/* Dynamic transaction information. */
bool pending_allowed;
bool pending_commit;
+ bool no_implicit_commit;
char *pending_cmds_buf;
size_t pending_cmds_buflen;
size_t pending_cmds_bufpos;
* without any output. */
size_t frame_pos;
char frame[1024];
+
+ uintptr_t mgmt_session_id;
+ uint64_t mgmt_client_id;
+ uint64_t mgmt_req_id;
+ bool mgmt_req_pending;
+ bool mgmt_locked_candidate_ds;
};
static inline void vty_push_context(struct vty *vty, int node, uint64_t id)
#define IS_DIRECTORY_SEP(c) ((c) == DIRECTORY_SEP)
#endif
+extern struct nb_config *vty_mgmt_candidate_config;
+
/* Prototypes. */
extern void vty_init(struct thread_master *, bool do_command_logging);
extern void vty_init_vtysh(void);
extern void vty_stdio_resume(void);
extern void vty_stdio_close(void);
+extern void vty_init_mgmt_fe(void);
+extern bool vty_mgmt_fe_enabled(void);
+extern int vty_mgmt_send_config_data(struct vty *vty);
+extern int vty_mgmt_send_commit_config(struct vty *vty, bool validate_only,
+ bool abort);
+extern int vty_mgmt_send_get_config(struct vty *vty, Mgmtd__DatastoreId datastore,
+ const char **xpath_list, int num_req);
+extern int vty_mgmt_send_get_data(struct vty *vty, Mgmtd__DatastoreId datastore,
+ const char **xpath_list, int num_req);
+extern int vty_mgmt_send_lockds_req(struct vty *vty, Mgmtd__DatastoreId ds_id,
+ bool lock);
+
#ifdef __cplusplus
}
#endif
#include <zebra.h>
#include "mgmtd/mgmt.h"
+#include "mgmtd/mgmt_fe_server.h"
+#include "mgmtd/mgmt_fe_adapter.h"
#include "mgmtd/mgmt_ds.h"
#include "mgmtd/mgmt_memory.h"
void mgmt_init(void)
{
+ /*
+ * Allocates some vital data structures used by peer commands in
+ * vty_init
+ */
+ vty_init_mgmt_fe();
+
/* Initialize datastores */
mgmt_ds_init(mm);
+ /* Initialize the MGMTD Frontend Adapter Module */
+ mgmt_fe_adapter_init(mm->master, mm);
+
+ /* Start the MGMTD Frontend Server for clients to connect */
+ mgmt_fe_server_init(mm->master);
+
/* MGMTD VTY commands installation. */
mgmt_vty_init();
}
void mgmt_terminate(void)
{
+ mgmt_fe_server_destroy();
+ mgmt_fe_adapter_destroy();
mgmt_ds_destroy();
}
#include "vrf.h"
#include "defaults.h"
+#include "stream.h"
#include "mgmtd/mgmt_memory.h"
#include "mgmtd/mgmt_ds.h"
extern struct mgmt_master *mm;
+/* Inline functions */
+static inline unsigned long timeval_elapsed(struct timeval a, struct timeval b)
+{
+ return (((a.tv_sec - b.tv_sec) * TIMER_SECOND_MICRO)
+ + (a.tv_usec - b.tv_usec));
+}
+
/*
* Remove trailing separator from a string.
*
#define MGMTD_MAX_YANG_VALUE_LEN YANG_VALUE_MAXLEN
+enum mgmt_result {
+ MGMTD_SUCCESS = 0,
+ MGMTD_INVALID_PARAM,
+ MGMTD_INTERNAL_ERROR,
+ MGMTD_NO_CFG_CHANGES,
+ MGMTD_DS_LOCK_FAILED,
+ MGMTD_DS_UNLOCK_FAILED,
+ MGMTD_UNKNOWN_FAILURE
+};
+
+enum mgmt_fe_event {
+ MGMTD_FE_SERVER = 1,
+ MGMTD_FE_CONN_READ,
+ MGMTD_FE_CONN_WRITE,
+ MGMTD_FE_CONN_WRITES_ON,
+ MGMTD_FE_PROC_MSG
+};
+
+#define MGMTD_TXN_ID_NONE 0
+
#endif /* _FRR_MGMTD_DEFINES_H */
#endif /* REDIRECT_DEBUG_TO_STDERR */
struct mgmt_ds_ctx {
- enum mgmt_datastore_id ds_id;
+ Mgmtd__DatastoreId ds_id;
int lock; /* 0 unlocked, >0 read locked < write locked */
bool config_ds;
/*
* TODO: Free the datastores.
*/
-
}
struct mgmt_ds_ctx *mgmt_ds_get_ctx_by_id(struct mgmt_master *mm,
- enum mgmt_datastore_id ds_id)
+ Mgmtd__DatastoreId ds_id)
{
switch (ds_id) {
case MGMTD_DS_CANDIDATE:
case MGMTD_DS_OPERATIONAL:
return (mm->oper_ds);
case MGMTD_DS_NONE:
- case MGMTD_DS_MAX_ID:
- default:
+ case MGMTD__DATASTORE_ID__STARTUP_DS:
+ case _MGMTD__DATASTORE_ID_IS_INT_SIZE:
return 0;
}
#ifndef _FRR_MGMTD_DS_H_
#define _FRR_MGMTD_DS_H_
+#include "mgmt_fe_client.h"
#include "northbound.h"
#include "mgmtd/mgmt_defines.h"
struct mgmt_ds_ctx;
-/*
- * Datastore-Id: For now defined here. Eventually will be
- * defined as part of MGMTD Front-End interface.
- */
-enum mgmt_datastore_id {
- MGMTD_DS_NONE = 0,
- MGMTD_DS_RUNNING,
- MGMTD_DS_CANDIDATE,
- MGMTD_DS_OPERATIONAL,
- MGMTD_DS_MAX_ID
-};
-
typedef void (*mgmt_ds_node_iter_fn)(uint64_t ds_hndl, char *xpath,
struct lyd_node *node,
struct nb_node *nb_node, void *ctx);
* Returns:
* Datastore name.
*/
-static inline const char *mgmt_ds_id2name(enum mgmt_datastore_id id)
+static inline const char *mgmt_ds_id2name(Mgmtd__DatastoreId id)
{
if (id > MGMTD_DS_MAX_ID)
id = MGMTD_DS_MAX_ID;
* Returns:
* Datastore ID.
*/
-static inline enum mgmt_datastore_id mgmt_ds_name2id(const char *name)
+static inline Mgmtd__DatastoreId mgmt_ds_name2id(const char *name)
{
- enum mgmt_datastore_id id;
+ Mgmtd__DatastoreId id;
FOREACH_MGMTD_DS_ID (id) {
if (!strncmp(mgmt_ds_names[id], name, MGMTD_DS_NAME_MAX_LEN))
*
* similar to above funtion.
*/
-static inline enum mgmt_datastore_id mgmt_get_ds_id_by_name(const char *ds_name)
+static inline Mgmtd__DatastoreId mgmt_get_ds_id_by_name(const char *ds_name)
{
if (!strncmp(ds_name, "candidate", sizeof("candidate")))
return MGMTD_DS_CANDIDATE;
* Datastore context (Holds info about ID, lock, root node etc).
*/
extern struct mgmt_ds_ctx *mgmt_ds_get_ctx_by_id(struct mgmt_master *mm,
- enum mgmt_datastore_id ds_id);
+ Mgmtd__DatastoreId ds_id);
/*
* Check if a given datastore is config ds
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * MGMTD Frontend Client Connection Adapter
+ *
+ * Copyright (C) 2021 Vmware, Inc.
+ * Pushpasis Sarkar <spushpasis@vmware.com>
+ */
+
+#include <zebra.h>
+#include "sockopt.h"
+#include "network.h"
+#include "libfrr.h"
+#include "mgmt_fe_client.h"
+#include "mgmt_pb.h"
+#include "hash.h"
+#include "jhash.h"
+#include "mgmtd/mgmt.h"
+#include "mgmtd/mgmt_memory.h"
+#include "mgmtd/mgmt_fe_adapter.h"
+
+#ifdef REDIRECT_DEBUG_TO_STDERR
+#define MGMTD_FE_ADAPTER_DBG(fmt, ...) \
+ fprintf(stderr, "%s: " fmt "\n", __func__, ##__VA_ARGS__)
+#define MGMTD_FE_ADAPTER_ERR(fmt, ...) \
+ fprintf(stderr, "%s: ERROR, " fmt "\n", __func__, ##__VA_ARGS__)
+#else /* REDIRECT_DEBUG_TO_STDERR */
+#define MGMTD_FE_ADAPTER_DBG(fmt, ...) \
+ do { \
+ if (mgmt_debug_fe) \
+ zlog_debug("%s: " fmt, __func__, ##__VA_ARGS__); \
+ } while (0)
+#define MGMTD_FE_ADAPTER_ERR(fmt, ...) \
+ zlog_err("%s: ERROR: " fmt, __func__, ##__VA_ARGS__)
+#endif /* REDIRECT_DEBUG_TO_STDERR */
+
+#define FOREACH_ADAPTER_IN_LIST(adapter) \
+ frr_each_safe (mgmt_fe_adapters, &mgmt_fe_adapters, (adapter))
+
+enum mgmt_session_event {
+ MGMTD_FE_SESSION_CFG_TXN_CLNUP = 1,
+ MGMTD_FE_SESSION_SHOW_TXN_CLNUP,
+};
+
+struct mgmt_fe_session_ctx {
+ struct mgmt_fe_client_adapter *adapter;
+ uint64_t session_id;
+ uint64_t client_id;
+ uint64_t txn_id;
+ uint64_t cfg_txn_id;
+ uint8_t ds_write_locked[MGMTD_DS_MAX_ID];
+ uint8_t ds_read_locked[MGMTD_DS_MAX_ID];
+ uint8_t ds_locked_implict[MGMTD_DS_MAX_ID];
+ struct thread *proc_cfg_txn_clnp;
+ struct thread *proc_show_txn_clnp;
+
+ struct mgmt_fe_sessions_item list_linkage;
+};
+
+DECLARE_LIST(mgmt_fe_sessions, struct mgmt_fe_session_ctx, list_linkage);
+
+#define FOREACH_SESSION_IN_LIST(adapter, session) \
+ frr_each_safe (mgmt_fe_sessions, &(adapter)->fe_sessions, (session))
+
+static struct thread_master *mgmt_fe_adapter_tm;
+static struct mgmt_master *mgmt_fe_adapter_mm;
+
+static struct mgmt_fe_adapters_head mgmt_fe_adapters;
+
+static struct hash *mgmt_fe_sessions;
+static uint64_t mgmt_fe_next_session_id;
+
+/* Forward declarations */
+static void
+mgmt_fe_adapter_register_event(struct mgmt_fe_client_adapter *adapter,
+ enum mgmt_fe_event event);
+static void
+mgmt_fe_adapter_disconnect(struct mgmt_fe_client_adapter *adapter);
+static void
+mgmt_fe_session_register_event(struct mgmt_fe_session_ctx *session,
+ enum mgmt_session_event event);
+
+static int
+mgmt_fe_session_write_lock_ds(Mgmtd__DatastoreId ds_id,
+ struct mgmt_ds_ctx *ds_ctx,
+ struct mgmt_fe_session_ctx *session)
+{
+ if (!session->ds_write_locked[ds_id]) {
+ if (mgmt_ds_write_lock(ds_ctx) != 0) {
+ MGMTD_FE_ADAPTER_DBG(
+ "Failed to lock the DS %u for Sessn: %p from %s!",
+ ds_id, session, session->adapter->name);
+ return -1;
+ }
+
+ session->ds_write_locked[ds_id] = true;
+ MGMTD_FE_ADAPTER_DBG(
+ "Write-Locked the DS %u for Sessn: %p from %s!", ds_id,
+ session, session->adapter->name);
+ }
+
+ return 0;
+}
+
+static int
+mgmt_fe_session_read_lock_ds(Mgmtd__DatastoreId ds_id,
+ struct mgmt_ds_ctx *ds_ctx,
+ struct mgmt_fe_session_ctx *session)
+{
+ if (!session->ds_read_locked[ds_id]) {
+ if (mgmt_ds_read_lock(ds_ctx) != 0) {
+ MGMTD_FE_ADAPTER_DBG(
+ "Failed to lock the DS %u for Sessn: %p from %s!",
+ ds_id, session, session->adapter->name);
+ return -1;
+ }
+
+ session->ds_read_locked[ds_id] = true;
+ MGMTD_FE_ADAPTER_DBG(
+ "Read-Locked the DS %u for Sessn: %p from %s!", ds_id,
+ session, session->adapter->name);
+ }
+
+ return 0;
+}
+
+static int mgmt_fe_session_unlock_ds(Mgmtd__DatastoreId ds_id,
+ struct mgmt_ds_ctx *ds_ctx,
+ struct mgmt_fe_session_ctx *session,
+ bool unlock_write, bool unlock_read)
+{
+ if (unlock_write && session->ds_write_locked[ds_id]) {
+ session->ds_write_locked[ds_id] = false;
+ session->ds_locked_implict[ds_id] = false;
+ if (mgmt_ds_unlock(ds_ctx) != 0) {
+ MGMTD_FE_ADAPTER_DBG(
+ "Failed to unlock the DS %u taken earlier by Sessn: %p from %s!",
+ ds_id, session, session->adapter->name);
+ return -1;
+ }
+
+ MGMTD_FE_ADAPTER_DBG(
+ "Unlocked DS %u write-locked earlier by Sessn: %p from %s",
+ ds_id, session, session->adapter->name);
+ } else if (unlock_read && session->ds_read_locked[ds_id]) {
+ session->ds_read_locked[ds_id] = false;
+ session->ds_locked_implict[ds_id] = false;
+ if (mgmt_ds_unlock(ds_ctx) != 0) {
+ MGMTD_FE_ADAPTER_DBG(
+ "Failed to unlock the DS %u taken earlier by Sessn: %p from %s!",
+ ds_id, session, session->adapter->name);
+ return -1;
+ }
+
+ MGMTD_FE_ADAPTER_DBG(
+ "Unlocked DS %u read-locked earlier by Sessn: %p from %s",
+ ds_id, session, session->adapter->name);
+ }
+
+ return 0;
+}
+
+static void
+mgmt_fe_session_cfg_txn_cleanup(struct mgmt_fe_session_ctx *session)
+{
+ Mgmtd__DatastoreId ds_id;
+ struct mgmt_ds_ctx *ds_ctx;
+
+ /*
+ * Ensure any uncommitted changes in Candidate DS
+ * is discarded.
+ */
+ mgmt_ds_copy_dss(mm->running_ds, mm->candidate_ds, false);
+
+ for (ds_id = 0; ds_id < MGMTD_DS_MAX_ID; ds_id++) {
+ ds_ctx = mgmt_ds_get_ctx_by_id(mgmt_fe_adapter_mm, ds_id);
+ if (ds_ctx) {
+ if (session->ds_locked_implict[ds_id])
+ mgmt_fe_session_unlock_ds(
+ ds_id, ds_ctx, session, true, false);
+ }
+ }
+
+ /* TODO: Destroy the actual transaction created earlier.
+ * if (session->cfg_txn_id != MGMTD_TXN_ID_NONE)
+ * mgmt_destroy_txn(&session->cfg_txn_id);
+ */
+}
+
+static void
+mgmt_fe_session_show_txn_cleanup(struct mgmt_fe_session_ctx *session)
+{
+ Mgmtd__DatastoreId ds_id;
+ struct mgmt_ds_ctx *ds_ctx;
+
+ for (ds_id = 0; ds_id < MGMTD_DS_MAX_ID; ds_id++) {
+ ds_ctx = mgmt_ds_get_ctx_by_id(mgmt_fe_adapter_mm, ds_id);
+ if (ds_ctx) {
+ mgmt_fe_session_unlock_ds(ds_id, ds_ctx, session,
+ false, true);
+ }
+ }
+
+ /* TODO: Destroy the transaction created recently.
+ * if (session->txn_id != MGMTD_TXN_ID_NONE)
+ * mgmt_destroy_txn(&session->txn_id);
+ */
+}
+
+static void
+mgmt_fe_adapter_compute_set_cfg_timers(struct mgmt_setcfg_stats *setcfg_stats)
+{
+ setcfg_stats->last_exec_tm = timeval_elapsed(setcfg_stats->last_end,
+ setcfg_stats->last_start);
+ if (setcfg_stats->last_exec_tm > setcfg_stats->max_tm)
+ setcfg_stats->max_tm = setcfg_stats->last_exec_tm;
+
+ if (setcfg_stats->last_exec_tm < setcfg_stats->min_tm)
+ setcfg_stats->min_tm = setcfg_stats->last_exec_tm;
+
+ setcfg_stats->avg_tm =
+ (((setcfg_stats->avg_tm * (setcfg_stats->set_cfg_count - 1))
+ + setcfg_stats->last_exec_tm)
+ / setcfg_stats->set_cfg_count);
+}
+
+static void
+mgmt_fe_session_compute_commit_timers(struct mgmt_commit_stats *cmt_stats)
+{
+ cmt_stats->last_exec_tm =
+ timeval_elapsed(cmt_stats->last_end, cmt_stats->last_start);
+ if (cmt_stats->last_exec_tm > cmt_stats->max_tm) {
+ cmt_stats->max_tm = cmt_stats->last_exec_tm;
+ cmt_stats->max_batch_cnt = cmt_stats->last_batch_cnt;
+ }
+
+ if (cmt_stats->last_exec_tm < cmt_stats->min_tm) {
+ cmt_stats->min_tm = cmt_stats->last_exec_tm;
+ cmt_stats->min_batch_cnt = cmt_stats->last_batch_cnt;
+ }
+}
+
+static void mgmt_fe_cleanup_session(struct mgmt_fe_session_ctx **session)
+{
+ if ((*session)->adapter) {
+ mgmt_fe_session_cfg_txn_cleanup((*session));
+ mgmt_fe_session_show_txn_cleanup((*session));
+ mgmt_fe_session_unlock_ds(MGMTD_DS_CANDIDATE,
+ mgmt_fe_adapter_mm->candidate_ds,
+ *session, true, true);
+ mgmt_fe_session_unlock_ds(MGMTD_DS_RUNNING,
+ mgmt_fe_adapter_mm->running_ds,
+ *session, true, true);
+
+ mgmt_fe_sessions_del(&(*session)->adapter->fe_sessions,
+ *session);
+ mgmt_fe_adapter_unlock(&(*session)->adapter);
+ }
+
+ hash_release(mgmt_fe_sessions, *session);
+ XFREE(MTYPE_MGMTD_FE_SESSION, *session);
+ *session = NULL;
+}
+
+static struct mgmt_fe_session_ctx *
+mgmt_fe_find_session_by_client_id(struct mgmt_fe_client_adapter *adapter,
+ uint64_t client_id)
+{
+ struct mgmt_fe_session_ctx *session;
+
+ FOREACH_SESSION_IN_LIST (adapter, session) {
+ if (session->client_id == client_id)
+ return session;
+ }
+
+ return NULL;
+}
+
+static unsigned int mgmt_fe_session_hash_key(const void *data)
+{
+ const struct mgmt_fe_session_ctx *session = data;
+
+ return jhash2((uint32_t *) &session->session_id,
+ sizeof(session->session_id) / sizeof(uint32_t), 0);
+}
+
+static bool mgmt_fe_session_hash_cmp(const void *d1, const void *d2)
+{
+ const struct mgmt_fe_session_ctx *session1 = d1;
+ const struct mgmt_fe_session_ctx *session2 = d2;
+
+ return (session1->session_id == session2->session_id);
+}
+
+static void mgmt_fe_session_hash_free(void *data)
+{
+ struct mgmt_fe_session_ctx *session = data;
+
+ mgmt_fe_cleanup_session(&session);
+}
+
+static void mgmt_fe_session_hash_destroy(void)
+{
+ if (mgmt_fe_sessions == NULL)
+ return;
+
+ hash_clean(mgmt_fe_sessions,
+ mgmt_fe_session_hash_free);
+ hash_free(mgmt_fe_sessions);
+ mgmt_fe_sessions = NULL;
+}
+
+static inline struct mgmt_fe_session_ctx *
+mgmt_session_id2ctx(uint64_t session_id)
+{
+ struct mgmt_fe_session_ctx key = {0};
+ struct mgmt_fe_session_ctx *session;
+
+ if (!mgmt_fe_sessions)
+ return NULL;
+
+ key.session_id = session_id;
+ session = hash_lookup(mgmt_fe_sessions, &key);
+
+ return session;
+}
+
+static struct mgmt_fe_session_ctx *
+mgmt_fe_create_session(struct mgmt_fe_client_adapter *adapter,
+ uint64_t client_id)
+{
+ struct mgmt_fe_session_ctx *session;
+
+ session = mgmt_fe_find_session_by_client_id(adapter, client_id);
+ if (session)
+ mgmt_fe_cleanup_session(&session);
+
+ session = XCALLOC(MTYPE_MGMTD_FE_SESSION,
+ sizeof(struct mgmt_fe_session_ctx));
+ assert(session);
+ session->client_id = client_id;
+ session->adapter = adapter;
+ session->txn_id = MGMTD_TXN_ID_NONE;
+ session->cfg_txn_id = MGMTD_TXN_ID_NONE;
+ mgmt_fe_adapter_lock(adapter);
+ mgmt_fe_sessions_add_tail(&adapter->fe_sessions, session);
+ if (!mgmt_fe_next_session_id)
+ mgmt_fe_next_session_id++;
+ session->session_id = mgmt_fe_next_session_id++;
+ hash_get(mgmt_fe_sessions, session, hash_alloc_intern);
+
+ return session;
+}
+
+static void
+mgmt_fe_cleanup_sessions(struct mgmt_fe_client_adapter *adapter)
+{
+ struct mgmt_fe_session_ctx *session;
+
+ FOREACH_SESSION_IN_LIST (adapter, session)
+ mgmt_fe_cleanup_session(&session);
+}
+
+static inline void
+mgmt_fe_adapter_sched_msg_write(struct mgmt_fe_client_adapter *adapter)
+{
+ if (!CHECK_FLAG(adapter->flags, MGMTD_FE_ADAPTER_FLAGS_WRITES_OFF))
+ mgmt_fe_adapter_register_event(adapter,
+ MGMTD_FE_CONN_WRITE);
+}
+
+static inline void
+mgmt_fe_adapter_writes_on(struct mgmt_fe_client_adapter *adapter)
+{
+ MGMTD_FE_ADAPTER_DBG("Resume writing msgs for '%s'", adapter->name);
+ UNSET_FLAG(adapter->flags, MGMTD_FE_ADAPTER_FLAGS_WRITES_OFF);
+ if (adapter->obuf_work || stream_fifo_count_safe(adapter->obuf_fifo))
+ mgmt_fe_adapter_sched_msg_write(adapter);
+}
+
+static inline void
+mgmt_fe_adapter_writes_off(struct mgmt_fe_client_adapter *adapter)
+{
+ SET_FLAG(adapter->flags, MGMTD_FE_ADAPTER_FLAGS_WRITES_OFF);
+ MGMTD_FE_ADAPTER_DBG("Paused writing msgs for '%s'", adapter->name);
+}
+
+static int
+mgmt_fe_adapter_send_msg(struct mgmt_fe_client_adapter *adapter,
+ Mgmtd__FeMessage *fe_msg)
+{
+ size_t msg_size;
+ uint8_t msg_buf[MGMTD_FE_MSG_MAX_LEN];
+ struct mgmt_fe_msg *msg;
+
+ if (adapter->conn_fd < 0) {
+ MGMTD_FE_ADAPTER_ERR("Connection already reset");
+ return -1;
+ }
+
+ msg_size = mgmtd__fe_message__get_packed_size(fe_msg);
+ msg_size += MGMTD_FE_MSG_HDR_LEN;
+ if (msg_size > sizeof(msg_buf)) {
+ MGMTD_FE_ADAPTER_ERR(
+ "Message size %d more than max size'%d. Not sending!'",
+ (int)msg_size, (int)sizeof(msg_buf));
+ return -1;
+ }
+
+ msg = (struct mgmt_fe_msg *)msg_buf;
+ msg->hdr.marker = MGMTD_FE_MSG_MARKER;
+ msg->hdr.len = (uint16_t)msg_size;
+ mgmtd__fe_message__pack(fe_msg, msg->payload);
+
+ if (!adapter->obuf_work)
+ adapter->obuf_work = stream_new(MGMTD_FE_MSG_MAX_LEN);
+ if (STREAM_WRITEABLE(adapter->obuf_work) < msg_size) {
+ stream_fifo_push(adapter->obuf_fifo, adapter->obuf_work);
+ adapter->obuf_work = stream_new(MGMTD_FE_MSG_MAX_LEN);
+ }
+ stream_write(adapter->obuf_work, (void *)msg_buf, msg_size);
+
+ mgmt_fe_adapter_sched_msg_write(adapter);
+ adapter->num_msg_tx++;
+ return 0;
+}
+
+static int
+mgmt_fe_send_session_reply(struct mgmt_fe_client_adapter *adapter,
+ struct mgmt_fe_session_ctx *session,
+ bool create, bool success)
+{
+ Mgmtd__FeMessage fe_msg;
+ Mgmtd__FeSessionReply session_reply;
+
+ mgmtd__fe_session_reply__init(&session_reply);
+ session_reply.create = create;
+ if (create) {
+ session_reply.has_client_conn_id = 1;
+ session_reply.client_conn_id = session->client_id;
+ }
+ session_reply.session_id = session->session_id;
+ session_reply.success = success;
+
+ mgmtd__fe_message__init(&fe_msg);
+ fe_msg.message_case = MGMTD__FE_MESSAGE__MESSAGE_SESSION_REPLY;
+ fe_msg.session_reply = &session_reply;
+
+ MGMTD_FE_ADAPTER_DBG(
+ "Sending SESSION_REPLY message to MGMTD Frontend client '%s'",
+ adapter->name);
+
+ return mgmt_fe_adapter_send_msg(adapter, &fe_msg);
+}
+
+static int mgmt_fe_send_lockds_reply(struct mgmt_fe_session_ctx *session,
+ Mgmtd__DatastoreId ds_id,
+ uint64_t req_id, bool lock_ds,
+ bool success, const char *error_if_any)
+{
+ Mgmtd__FeMessage fe_msg;
+ Mgmtd__FeLockDsReply lockds_reply;
+
+ assert(session->adapter);
+
+ mgmtd__fe_lock_ds_reply__init(&lockds_reply);
+ lockds_reply.session_id = session->session_id;
+ lockds_reply.ds_id = ds_id;
+ lockds_reply.req_id = req_id;
+ lockds_reply.lock = lock_ds;
+ lockds_reply.success = success;
+ if (error_if_any)
+ lockds_reply.error_if_any = (char *)error_if_any;
+
+ mgmtd__fe_message__init(&fe_msg);
+ fe_msg.message_case = MGMTD__FE_MESSAGE__MESSAGE_LOCKDS_REPLY;
+ fe_msg.lockds_reply = &lockds_reply;
+
+ MGMTD_FE_ADAPTER_DBG(
+ "Sending LOCK_DS_REPLY message to MGMTD Frontend client '%s'",
+ session->adapter->name);
+
+ return mgmt_fe_adapter_send_msg(session->adapter, &fe_msg);
+}
+
+static int mgmt_fe_send_setcfg_reply(struct mgmt_fe_session_ctx *session,
+ Mgmtd__DatastoreId ds_id,
+ uint64_t req_id, bool success,
+ const char *error_if_any,
+ bool implicit_commit)
+{
+ Mgmtd__FeMessage fe_msg;
+ Mgmtd__FeSetConfigReply setcfg_reply;
+
+ assert(session->adapter);
+
+ if (implicit_commit && session->cfg_txn_id)
+ mgmt_fe_session_register_event(
+ session, MGMTD_FE_SESSION_CFG_TXN_CLNUP);
+
+ mgmtd__fe_set_config_reply__init(&setcfg_reply);
+ setcfg_reply.session_id = session->session_id;
+ setcfg_reply.ds_id = ds_id;
+ setcfg_reply.req_id = req_id;
+ setcfg_reply.success = success;
+ if (error_if_any)
+ setcfg_reply.error_if_any = (char *)error_if_any;
+
+ mgmtd__fe_message__init(&fe_msg);
+ fe_msg.message_case = MGMTD__FE_MESSAGE__MESSAGE_SETCFG_REPLY;
+ fe_msg.setcfg_reply = &setcfg_reply;
+
+ MGMTD_FE_ADAPTER_DBG(
+ "Sending SET_CONFIG_REPLY message to MGMTD Frontend client '%s'",
+ session->adapter->name);
+
+ if (implicit_commit) {
+ if (mm->perf_stats_en)
+ gettimeofday(&session->adapter->cmt_stats.last_end, NULL);
+ mgmt_fe_session_compute_commit_timers(
+ &session->adapter->cmt_stats);
+ }
+
+ if (mm->perf_stats_en)
+ gettimeofday(&session->adapter->setcfg_stats.last_end, NULL);
+ mgmt_fe_adapter_compute_set_cfg_timers(&session->adapter->setcfg_stats);
+
+ return mgmt_fe_adapter_send_msg(session->adapter, &fe_msg);
+}
+
+static int mgmt_fe_send_commitcfg_reply(
+ struct mgmt_fe_session_ctx *session, Mgmtd__DatastoreId src_ds_id,
+ Mgmtd__DatastoreId dst_ds_id, uint64_t req_id, enum mgmt_result result,
+ bool validate_only, const char *error_if_any)
+{
+ Mgmtd__FeMessage fe_msg;
+ Mgmtd__FeCommitConfigReply commcfg_reply;
+
+ assert(session->adapter);
+
+ mgmtd__fe_commit_config_reply__init(&commcfg_reply);
+ commcfg_reply.session_id = session->session_id;
+ commcfg_reply.src_ds_id = src_ds_id;
+ commcfg_reply.dst_ds_id = dst_ds_id;
+ commcfg_reply.req_id = req_id;
+ commcfg_reply.success =
+ (result == MGMTD_SUCCESS || result == MGMTD_NO_CFG_CHANGES)
+ ? true
+ : false;
+ commcfg_reply.validate_only = validate_only;
+ if (error_if_any)
+ commcfg_reply.error_if_any = (char *)error_if_any;
+
+ mgmtd__fe_message__init(&fe_msg);
+ fe_msg.message_case = MGMTD__FE_MESSAGE__MESSAGE_COMMCFG_REPLY;
+ fe_msg.commcfg_reply = &commcfg_reply;
+
+ MGMTD_FE_ADAPTER_DBG(
+ "Sending COMMIT_CONFIG_REPLY message to MGMTD Frontend client '%s'",
+ session->adapter->name);
+
+ /*
+ * Cleanup the CONFIG transaction associated with this session.
+ */
+ if (session->cfg_txn_id
+ && ((result == MGMTD_SUCCESS && !validate_only)
+ || (result == MGMTD_NO_CFG_CHANGES)))
+ mgmt_fe_session_register_event(
+ session, MGMTD_FE_SESSION_CFG_TXN_CLNUP);
+
+ if (mm->perf_stats_en)
+ gettimeofday(&session->adapter->cmt_stats.last_end, NULL);
+ mgmt_fe_session_compute_commit_timers(&session->adapter->cmt_stats);
+ return mgmt_fe_adapter_send_msg(session->adapter, &fe_msg);
+}
+
+static int mgmt_fe_send_getcfg_reply(struct mgmt_fe_session_ctx *session,
+ Mgmtd__DatastoreId ds_id,
+ uint64_t req_id, bool success,
+ Mgmtd__YangDataReply *data,
+ const char *error_if_any)
+{
+ Mgmtd__FeMessage fe_msg;
+ Mgmtd__FeGetConfigReply getcfg_reply;
+
+ assert(session->adapter);
+
+ mgmtd__fe_get_config_reply__init(&getcfg_reply);
+ getcfg_reply.session_id = session->session_id;
+ getcfg_reply.ds_id = ds_id;
+ getcfg_reply.req_id = req_id;
+ getcfg_reply.success = success;
+ getcfg_reply.data = data;
+ if (error_if_any)
+ getcfg_reply.error_if_any = (char *)error_if_any;
+
+ mgmtd__fe_message__init(&fe_msg);
+ fe_msg.message_case = MGMTD__FE_MESSAGE__MESSAGE_GETCFG_REPLY;
+ fe_msg.getcfg_reply = &getcfg_reply;
+
+ MGMTD_FE_ADAPTER_DBG(
+ "Sending GET_CONFIG_REPLY message to MGMTD Frontend client '%s'",
+ session->adapter->name);
+
+ /*
+ * Cleanup the SHOW transaction associated with this session.
+ */
+ if (session->txn_id && (!success || (data && data->next_indx < 0)))
+ mgmt_fe_session_register_event(
+ session, MGMTD_FE_SESSION_SHOW_TXN_CLNUP);
+
+ return mgmt_fe_adapter_send_msg(session->adapter, &fe_msg);
+}
+
+static int mgmt_fe_send_getdata_reply(struct mgmt_fe_session_ctx *session,
+ Mgmtd__DatastoreId ds_id,
+ uint64_t req_id, bool success,
+ Mgmtd__YangDataReply *data,
+ const char *error_if_any)
+{
+ Mgmtd__FeMessage fe_msg;
+ Mgmtd__FeGetDataReply getdata_reply;
+
+ assert(session->adapter);
+
+ mgmtd__fe_get_data_reply__init(&getdata_reply);
+ getdata_reply.session_id = session->session_id;
+ getdata_reply.ds_id = ds_id;
+ getdata_reply.req_id = req_id;
+ getdata_reply.success = success;
+ getdata_reply.data = data;
+ if (error_if_any)
+ getdata_reply.error_if_any = (char *)error_if_any;
+
+ mgmtd__fe_message__init(&fe_msg);
+ fe_msg.message_case = MGMTD__FE_MESSAGE__MESSAGE_GETDATA_REPLY;
+ fe_msg.getdata_reply = &getdata_reply;
+
+ MGMTD_FE_ADAPTER_DBG(
+ "Sending GET_DATA_REPLY message to MGMTD Frontend client '%s'",
+ session->adapter->name);
+
+ /*
+ * Cleanup the SHOW transaction associated with this session.
+ */
+ if (session->txn_id && (!success || (data && data->next_indx < 0)))
+ mgmt_fe_session_register_event(
+ session, MGMTD_FE_SESSION_SHOW_TXN_CLNUP);
+
+ return mgmt_fe_adapter_send_msg(session->adapter, &fe_msg);
+}
+
+static void mgmt_fe_session_cfg_txn_clnup(struct thread *thread)
+{
+ struct mgmt_fe_session_ctx *session;
+
+ session = (struct mgmt_fe_session_ctx *)THREAD_ARG(thread);
+
+ mgmt_fe_session_cfg_txn_cleanup(session);
+}
+
+static void mgmt_fe_session_show_txn_clnup(struct thread *thread)
+{
+ struct mgmt_fe_session_ctx *session;
+
+ session = (struct mgmt_fe_session_ctx *)THREAD_ARG(thread);
+
+ mgmt_fe_session_show_txn_cleanup(session);
+}
+
+static void
+mgmt_fe_session_register_event(struct mgmt_fe_session_ctx *session,
+ enum mgmt_session_event event)
+{
+ struct timeval tv = {.tv_sec = 0,
+ .tv_usec = MGMTD_FE_MSG_PROC_DELAY_USEC};
+
+ switch (event) {
+ case MGMTD_FE_SESSION_CFG_TXN_CLNUP:
+ thread_add_timer_tv(mgmt_fe_adapter_tm,
+ mgmt_fe_session_cfg_txn_clnup, session,
+ &tv, &session->proc_cfg_txn_clnp);
+ assert(session->proc_cfg_txn_clnp);
+ break;
+ case MGMTD_FE_SESSION_SHOW_TXN_CLNUP:
+ thread_add_timer_tv(mgmt_fe_adapter_tm,
+ mgmt_fe_session_show_txn_clnup, session,
+ &tv, &session->proc_show_txn_clnp);
+ assert(session->proc_show_txn_clnp);
+ break;
+ default:
+ assert(!"mgmt_fe_adapter_post_event() called incorrectly");
+ break;
+ }
+}
+
+static struct mgmt_fe_client_adapter *
+mgmt_fe_find_adapter_by_fd(int conn_fd)
+{
+ struct mgmt_fe_client_adapter *adapter;
+
+ FOREACH_ADAPTER_IN_LIST (adapter) {
+ if (adapter->conn_fd == conn_fd)
+ return adapter;
+ }
+
+ return NULL;
+}
+
+static struct mgmt_fe_client_adapter *
+mgmt_fe_find_adapter_by_name(const char *name)
+{
+ struct mgmt_fe_client_adapter *adapter;
+
+ FOREACH_ADAPTER_IN_LIST (adapter) {
+ if (!strncmp(adapter->name, name, sizeof(adapter->name)))
+ return adapter;
+ }
+
+ return NULL;
+}
+
+static void mgmt_fe_adapter_disconnect(struct mgmt_fe_client_adapter *adapter)
+{
+ if (adapter->conn_fd >= 0) {
+ close(adapter->conn_fd);
+ adapter->conn_fd = -1;
+ }
+
+ /* TODO: notify about client disconnect for appropriate cleanup */
+ mgmt_fe_cleanup_sessions(adapter);
+ mgmt_fe_sessions_fini(&adapter->fe_sessions);
+ mgmt_fe_adapters_del(&mgmt_fe_adapters, adapter);
+
+ mgmt_fe_adapter_unlock(&adapter);
+}
+
+static void
+mgmt_fe_adapter_cleanup_old_conn(struct mgmt_fe_client_adapter *adapter)
+{
+ struct mgmt_fe_client_adapter *old;
+
+ FOREACH_ADAPTER_IN_LIST (old) {
+ if (old != adapter
+ && !strncmp(adapter->name, old->name, sizeof(adapter->name))) {
+ /*
+ * We have a Zombie lingering around
+ */
+ MGMTD_FE_ADAPTER_DBG(
+ "Client '%s' (FD:%d) seems to have reconnected. Removing old connection (FD:%d)!",
+ adapter->name, adapter->conn_fd, old->conn_fd);
+ mgmt_fe_adapter_disconnect(old);
+ }
+ }
+}
+
+static void
+mgmt_fe_cleanup_adapters(void)
+{
+ struct mgmt_fe_client_adapter *adapter;
+
+ FOREACH_ADAPTER_IN_LIST (adapter) {
+ mgmt_fe_cleanup_sessions(adapter);
+ mgmt_fe_adapter_unlock(&adapter);
+ }
+}
+
+static int
+mgmt_fe_session_handle_lockds_req_msg(struct mgmt_fe_session_ctx *session,
+ Mgmtd__FeLockDsReq *lockds_req)
+{
+ struct mgmt_ds_ctx *ds_ctx;
+
+ /*
+ * Next check first if the SET_CONFIG_REQ is for Candidate DS
+ * or not. Report failure if its not. MGMTD currently only
+ * supports editing the Candidate DS.
+ */
+ if (lockds_req->ds_id != MGMTD_DS_CANDIDATE) {
+ mgmt_fe_send_lockds_reply(
+ session, lockds_req->ds_id, lockds_req->req_id,
+ lockds_req->lock, false,
+ "Lock/Unlock on datastores other than Candidate DS not permitted!");
+ return -1;
+ }
+
+ ds_ctx =
+ mgmt_ds_get_ctx_by_id(mgmt_fe_adapter_mm, lockds_req->ds_id);
+ if (!ds_ctx) {
+ mgmt_fe_send_lockds_reply(
+ session, lockds_req->ds_id, lockds_req->req_id,
+ lockds_req->lock, false,
+ "Failed to retrieve handle for DS!");
+ return -1;
+ }
+
+ if (lockds_req->lock) {
+ if (mgmt_fe_session_write_lock_ds(lockds_req->ds_id,
+ ds_ctx, session)
+ != 0) {
+ mgmt_fe_send_lockds_reply(
+ session, lockds_req->ds_id, lockds_req->req_id,
+ lockds_req->lock, false,
+ "Lock already taken on DS by another session!");
+ return -1;
+ }
+
+ session->ds_locked_implict[lockds_req->ds_id] = false;
+ } else {
+ if (!session->ds_write_locked[lockds_req->ds_id]) {
+ mgmt_fe_send_lockds_reply(
+ session, lockds_req->ds_id, lockds_req->req_id,
+ lockds_req->lock, false,
+ "Lock on DS was not taken by this session!");
+ return 0;
+ }
+
+ (void)mgmt_fe_session_unlock_ds(lockds_req->ds_id, ds_ctx,
+ session, true, false);
+ }
+
+ if (mgmt_fe_send_lockds_reply(session, lockds_req->ds_id,
+ lockds_req->req_id, lockds_req->lock,
+ true, NULL)
+ != 0) {
+ MGMTD_FE_ADAPTER_DBG(
+ "Failed to send LOCK_DS_REPLY for DS %u Sessn: %p from %s",
+ lockds_req->ds_id, session, session->adapter->name);
+ }
+
+ return 0;
+}
+
+static int
+mgmt_fe_session_handle_setcfg_req_msg(struct mgmt_fe_session_ctx *session,
+ Mgmtd__FeSetConfigReq *setcfg_req)
+{
+ /* uint64_t cfg_session_id; */
+ struct mgmt_ds_ctx *ds_ctx, *dst_ds_ctx;
+
+ if (mm->perf_stats_en)
+ gettimeofday(&session->adapter->setcfg_stats.last_start, NULL);
+
+ /*
+ * Next check first if the SET_CONFIG_REQ is for Candidate DS
+ * or not. Report failure if its not. MGMTD currently only
+ * supports editing the Candidate DS.
+ */
+ if (setcfg_req->ds_id != MGMTD_DS_CANDIDATE) {
+ mgmt_fe_send_setcfg_reply(
+ session, setcfg_req->ds_id, setcfg_req->req_id, false,
+ "Set-Config on datastores other than Candidate DS not permitted!",
+ setcfg_req->implicit_commit);
+ return 0;
+ }
+
+ /*
+ * Get the DS handle.
+ */
+ ds_ctx =
+ mgmt_ds_get_ctx_by_id(mgmt_fe_adapter_mm, setcfg_req->ds_id);
+ if (!ds_ctx) {
+ mgmt_fe_send_setcfg_reply(
+ session, setcfg_req->ds_id, setcfg_req->req_id, false,
+ "No such DS exists!", setcfg_req->implicit_commit);
+ return 0;
+ }
+
+ if (session->cfg_txn_id == MGMTD_TXN_ID_NONE) {
+ /*
+ * TODO: Check first if the current session can run a CONFIG
+ * transaction or not. Report failure if a CONFIG transaction
+ * from another session is already in progress.
+ * cfg_session_id = mgmt_config_txn_in_progress();
+ * if (cfg_session_id != MGMTD_SESSION_ID_NONE
+ * && cfg_session_id != session->session_id) {
+ * mgmt_fe_send_setcfg_reply(
+ * session, setcfg_req->ds_id, setcfg_req->req_id,
+ * false,
+ * "Configuration already in-progress through a
+ *different user session!", setcfg_req->implicit_commit); goto
+ *mgmt_fe_sess_handle_setcfg_req_failed;
+ *}
+ */
+
+
+ /*
+ * Try taking write-lock on the requested DS (if not already).
+ */
+ if (!session->ds_write_locked[setcfg_req->ds_id]) {
+ if (mgmt_fe_session_write_lock_ds(setcfg_req->ds_id,
+ ds_ctx, session)
+ != 0) {
+ mgmt_fe_send_setcfg_reply(
+ session, setcfg_req->ds_id,
+ setcfg_req->req_id, false,
+ "Failed to lock the DS!",
+ setcfg_req->implicit_commit);
+ goto mgmt_fe_sess_handle_setcfg_req_failed;
+ }
+
+ session->ds_locked_implict[setcfg_req->ds_id] = true;
+ }
+
+ /*
+ * TODO: Start a CONFIG Transaction (if not started already)
+ * session->cfg_txn_id = mgmt_create_txn(session->session_id,
+ * MGMTD_TXN_TYPE_CONFIG);
+ * if (session->cfg_txn_id == MGMTD_SESSION_ID_NONE) {
+ * mgmt_fe_send_setcfg_reply(
+ * session, setcfg_req->ds_id, setcfg_req->req_id,
+ * false,
+ * "Failed to create a Configuration session!",
+ * setcfg_req->implicit_commit);
+ * goto mgmt_fe_sess_handle_setcfg_req_failed;
+ * }
+ */
+
+ MGMTD_FE_ADAPTER_DBG(
+ "Created new Config Txn 0x%llx for session %p",
+ (unsigned long long)session->cfg_txn_id, session);
+ } else {
+ MGMTD_FE_ADAPTER_DBG(
+ "Config Txn 0x%llx for session %p already created",
+ (unsigned long long)session->cfg_txn_id, session);
+
+ if (setcfg_req->implicit_commit) {
+ /*
+ * In this scenario need to skip cleanup of the txn,
+ * so setting implicit commit to false.
+ */
+ mgmt_fe_send_setcfg_reply(
+ session, setcfg_req->ds_id, setcfg_req->req_id,
+ false,
+ "A Configuration transaction is already in progress!",
+ false);
+ return 0;
+ }
+ }
+
+ dst_ds_ctx = 0;
+ if (setcfg_req->implicit_commit) {
+ dst_ds_ctx = mgmt_ds_get_ctx_by_id(mgmt_fe_adapter_mm,
+ setcfg_req->commit_ds_id);
+ if (!dst_ds_ctx) {
+ mgmt_fe_send_setcfg_reply(
+ session, setcfg_req->ds_id, setcfg_req->req_id,
+ false, "No such commit DS exists!",
+ setcfg_req->implicit_commit);
+ return 0;
+ }
+ }
+
+ /* TODO: Create the SETConfig request under the transaction.
+ * if (mgmt_txn_send_set_config_req(
+ * session->cfg_txn_id, setcfg_req->req_id, setcfg_req->ds_id,
+ * ds_ctx, setcfg_req->data, setcfg_req->n_data,
+ * setcfg_req->implicit_commit, setcfg_req->commit_ds_id,
+ * dst_ds_ctx)
+ * != 0) {
+ * mgmt_fe_send_setcfg_reply(
+ * session, setcfg_req->ds_id, setcfg_req->req_id, false,
+ * "Request processing for SET-CONFIG failed!",
+ * setcfg_req->implicit_commit);
+ * goto mgmt_fe_sess_handle_setcfg_req_failed;
+ * }
+ *
+ * For now send a failure reply.
+ */
+ mgmt_fe_send_setcfg_reply(
+ session, setcfg_req->ds_id, setcfg_req->req_id, false,
+ "Request processing for SET-CONFIG failed!",
+ setcfg_req->implicit_commit);
+ goto mgmt_fe_sess_handle_setcfg_req_failed;
+
+ return 0;
+
+mgmt_fe_sess_handle_setcfg_req_failed:
+
+ /* TODO: Delete transaction created recently.
+ * if (session->cfg_txn_id != MGMTD_TXN_ID_NONE)
+ * mgmt_destroy_txn(&session->cfg_txn_id);
+ */
+ if (ds_ctx && session->ds_write_locked[setcfg_req->ds_id])
+ mgmt_fe_session_unlock_ds(setcfg_req->ds_id, ds_ctx, session,
+ true, false);
+
+ return 0;
+}
+
+static int
+mgmt_fe_session_handle_getcfg_req_msg(struct mgmt_fe_session_ctx *session,
+ Mgmtd__FeGetConfigReq *getcfg_req)
+{
+ struct mgmt_ds_ctx *ds_ctx;
+
+ /*
+ * Get the DS handle.
+ */
+ ds_ctx =
+ mgmt_ds_get_ctx_by_id(mgmt_fe_adapter_mm, getcfg_req->ds_id);
+ if (!ds_ctx) {
+ mgmt_fe_send_getcfg_reply(session, getcfg_req->ds_id,
+ getcfg_req->req_id, false, NULL,
+ "No such DS exists!");
+ return 0;
+ }
+
+ /*
+ * Next check first if the SET_CONFIG_REQ is for Candidate DS
+ * or not. Report failure if its not. MGMTD currently only
+ * supports editing the Candidate DS.
+ */
+ if (getcfg_req->ds_id != MGMTD_DS_CANDIDATE
+ && getcfg_req->ds_id != MGMTD_DS_RUNNING) {
+ mgmt_fe_send_getcfg_reply(
+ session, getcfg_req->ds_id, getcfg_req->req_id, false,
+ NULL,
+ "Get-Config on datastores other than Candidate or Running DS not permitted!");
+ return 0;
+ }
+
+ if (session->txn_id == MGMTD_TXN_ID_NONE) {
+ /*
+ * Try taking read-lock on the requested DS (if not already
+ * locked). If the DS has already been write-locked by a ongoing
+ * CONFIG transaction we may allow reading the contents of the
+ * same DS.
+ */
+ if (!session->ds_read_locked[getcfg_req->ds_id]
+ && !session->ds_write_locked[getcfg_req->ds_id]) {
+ if (mgmt_fe_session_read_lock_ds(getcfg_req->ds_id,
+ ds_ctx, session)
+ != 0) {
+ mgmt_fe_send_getcfg_reply(
+ session, getcfg_req->ds_id,
+ getcfg_req->req_id, false, NULL,
+ "Failed to lock the DS! Another session might have locked it!");
+ goto mgmt_fe_sess_handle_getcfg_req_failed;
+ }
+
+ session->ds_locked_implict[getcfg_req->ds_id] = true;
+ }
+
+ /*
+ * TODO: Start a SHOW Transaction (if not started already)
+ * session->txn_id = mgmt_create_txn(session->session_id,
+ * MGMTD_TXN_TYPE_SHOW);
+ * if (session->txn_id == MGMTD_SESSION_ID_NONE) {
+ * mgmt_fe_send_getcfg_reply(
+ * session, getcfg_req->ds_id, getcfg_req->req_id,
+ * false, NULL,
+ * "Failed to create a Show transaction!");
+ * goto mgmt_fe_sess_handle_getcfg_req_failed;
+ * }
+ */
+ mgmt_fe_send_getcfg_reply(
+ session, getcfg_req->ds_id, getcfg_req->req_id, false,
+ NULL, "Failed to create a Show transaction!");
+ goto mgmt_fe_sess_handle_getcfg_req_failed;
+
+
+ MGMTD_FE_ADAPTER_DBG(
+ "Created new Show Txn 0x%llx for session %p",
+ (unsigned long long)session->txn_id, session);
+ } else {
+ MGMTD_FE_ADAPTER_DBG(
+ "Show Txn 0x%llx for session %p already created",
+ (unsigned long long)session->txn_id, session);
+ }
+
+ /* TODO: Create a GETConfig request under the transaction.
+ * if (mgmt_txn_send_get_config_req(session->txn_id, getcfg_req->req_id,
+ * getcfg_req->ds_id, ds_ctx,
+ * getcfg_req->data, getcfg_req->n_data)
+ * != 0) {
+ * mgmt_fe_send_getcfg_reply(
+ * session, getcfg_req->ds_id, getcfg_req->req_id, false,
+ * NULL, "Request processing for GET-CONFIG failed!");
+ * goto mgmt_fe_sess_handle_getcfg_req_failed;
+ * }
+ *
+ * For now send back a failure reply.
+ */
+ mgmt_fe_send_getcfg_reply(
+ session, getcfg_req->ds_id, getcfg_req->req_id, false, NULL,
+ "Request processing for GET-CONFIG failed!");
+ goto mgmt_fe_sess_handle_getcfg_req_failed;
+
+ return 0;
+
+mgmt_fe_sess_handle_getcfg_req_failed:
+
+ /* TODO: Destroy the transaction created recently.
+ * if (session->txn_id != MGMTD_TXN_ID_NONE)
+ * mgmt_destroy_txn(&session->txn_id);
+ */
+ if (ds_ctx && session->ds_read_locked[getcfg_req->ds_id])
+ mgmt_fe_session_unlock_ds(getcfg_req->ds_id, ds_ctx, session,
+ false, true);
+
+ return -1;
+}
+
+static int
+mgmt_fe_session_handle_getdata_req_msg(struct mgmt_fe_session_ctx *session,
+ Mgmtd__FeGetDataReq *getdata_req)
+{
+ struct mgmt_ds_ctx *ds_ctx;
+
+ /*
+ * Get the DS handle.
+ */
+ ds_ctx = mgmt_ds_get_ctx_by_id(mgmt_fe_adapter_mm,
+ getdata_req->ds_id);
+ if (!ds_ctx) {
+ mgmt_fe_send_getdata_reply(session, getdata_req->ds_id,
+ getdata_req->req_id, false, NULL,
+ "No such DS exists!");
+ return 0;
+ }
+
+ if (session->txn_id == MGMTD_TXN_ID_NONE) {
+ /*
+ * Try taking read-lock on the requested DS (if not already
+ * locked). If the DS has already been write-locked by a ongoing
+ * CONFIG transaction we may allow reading the contents of the
+ * same DS.
+ */
+ if (!session->ds_read_locked[getdata_req->ds_id]
+ && !session->ds_write_locked[getdata_req->ds_id]) {
+ if (mgmt_fe_session_read_lock_ds(getdata_req->ds_id,
+ ds_ctx, session)
+ != 0) {
+ mgmt_fe_send_getdata_reply(
+ session, getdata_req->ds_id,
+ getdata_req->req_id, false, NULL,
+ "Failed to lock the DS! Another session might have locked it!");
+ goto mgmt_fe_sess_handle_getdata_req_failed;
+ }
+
+ session->ds_locked_implict[getdata_req->ds_id] = true;
+ }
+
+ /*
+ * TODO: Start a SHOW Transaction (if not started already)
+ * session->txn_id =
+ * mgmt_create_txn(session->session_id,
+ * MGMTD_TXN_TYPE_SHOW);
+ * if (session->txn_id == MGMTD_SESSION_ID_NONE) {
+ * mgmt_fe_send_getdata_reply(
+ * session, getdata_req->ds_id, getdata_req->req_id,
+ * false, NULL,
+ * "Failed to create a Show transaction!");
+ * goto mgmt_fe_sess_handle_getdata_req_failed;
+ * }
+ */
+ mgmt_fe_send_getdata_reply(
+ session, getdata_req->ds_id, getdata_req->req_id, false,
+ NULL, "Failed to create a Show transaction!");
+ goto mgmt_fe_sess_handle_getdata_req_failed;
+
+
+ MGMTD_FE_ADAPTER_DBG(
+ "Created new Show Txn 0x%llx for session %p",
+ (unsigned long long)session->txn_id, session);
+ } else {
+ MGMTD_FE_ADAPTER_DBG(
+ "Show Txn 0x%llx for session %p already created",
+ (unsigned long long)session->txn_id, session);
+ }
+
+ /* TODO: Create a GETData request under the transaction.
+ * if (mgmt_txn_send_get_data_req(session->txn_id, getdata_req->req_id,
+ * getdata_req->ds_id, ds_ctx,
+ * getdata_req->data, getdata_req->n_data)
+ * != 0) {
+ * mgmt_fe_send_getdata_reply(
+ * session, getdata_req->ds_id, getdata_req->req_id, false,
+ * NULL, "Request processing for GET-CONFIG failed!");
+ * goto mgmt_fe_sess_handle_getdata_req_failed;
+ * }
+ *
+ * For now send back a failure reply.
+ */
+ mgmt_fe_send_getdata_reply(
+ session, getdata_req->ds_id, getdata_req->req_id, false, NULL,
+ "Request processing for GET-CONFIG failed!");
+ goto mgmt_fe_sess_handle_getdata_req_failed;
+
+ return 0;
+
+mgmt_fe_sess_handle_getdata_req_failed:
+
+ /* TODO: Destroy the transaction created recently.
+ * if (session->txn_id != MGMTD_TXN_ID_NONE)
+ * mgmt_destroy_txn(&session->txn_id);
+ */
+
+ if (ds_ctx && session->ds_read_locked[getdata_req->ds_id])
+ mgmt_fe_session_unlock_ds(getdata_req->ds_id, ds_ctx,
+ session, false, true);
+
+ return -1;
+}
+
+static int mgmt_fe_session_handle_commit_config_req_msg(
+ struct mgmt_fe_session_ctx *session,
+ Mgmtd__FeCommitConfigReq *commcfg_req)
+{
+ struct mgmt_ds_ctx *src_ds_ctx, *dst_ds_ctx;
+
+ if (mm->perf_stats_en)
+ gettimeofday(&session->adapter->cmt_stats.last_start, NULL);
+ session->adapter->cmt_stats.commit_cnt++;
+ /*
+ * Get the source DS handle.
+ */
+ src_ds_ctx = mgmt_ds_get_ctx_by_id(mgmt_fe_adapter_mm,
+ commcfg_req->src_ds_id);
+ if (!src_ds_ctx) {
+ mgmt_fe_send_commitcfg_reply(
+ session, commcfg_req->src_ds_id, commcfg_req->dst_ds_id,
+ commcfg_req->req_id, MGMTD_INTERNAL_ERROR,
+ commcfg_req->validate_only,
+ "No such source DS exists!");
+ return 0;
+ }
+
+ /*
+ * Get the destination DS handle.
+ */
+ dst_ds_ctx = mgmt_ds_get_ctx_by_id(mgmt_fe_adapter_mm,
+ commcfg_req->dst_ds_id);
+ if (!dst_ds_ctx) {
+ mgmt_fe_send_commitcfg_reply(
+ session, commcfg_req->src_ds_id, commcfg_req->dst_ds_id,
+ commcfg_req->req_id, MGMTD_INTERNAL_ERROR,
+ commcfg_req->validate_only,
+ "No such destination DS exists!");
+ return 0;
+ }
+
+ /*
+ * Next check first if the SET_CONFIG_REQ is for Candidate DS
+ * or not. Report failure if its not. MGMTD currently only
+ * supports editing the Candidate DS.
+ */
+ if (commcfg_req->dst_ds_id != MGMTD_DS_RUNNING) {
+ mgmt_fe_send_commitcfg_reply(
+ session, commcfg_req->src_ds_id, commcfg_req->dst_ds_id,
+ commcfg_req->req_id, MGMTD_INTERNAL_ERROR,
+ commcfg_req->validate_only,
+ "Set-Config on datastores other than Running DS not permitted!");
+ return 0;
+ }
+
+ if (session->cfg_txn_id == MGMTD_TXN_ID_NONE) {
+ /*
+ * TODO: Start a CONFIG Transaction (if not started already)
+ * session->cfg_txn_id = mgmt_create_txn(session->session_id,
+ * MGMTD_TXN_TYPE_CONFIG);
+ * if (session->cfg_txn_id == MGMTD_SESSION_ID_NONE) {
+ * mgmt_fe_send_commitcfg_reply(
+ * session, commcfg_req->src_ds_id,
+ * commcfg_req->dst_ds_id, commcfg_req->req_id,
+ * MGMTD_INTERNAL_ERROR,
+ * commcfg_req->validate_only,
+ * "Failed to create a Configuration session!");
+ * return 0;
+ * }
+ */
+ mgmt_fe_send_commitcfg_reply(
+ session, commcfg_req->src_ds_id, commcfg_req->dst_ds_id,
+ commcfg_req->req_id, MGMTD_INTERNAL_ERROR,
+ commcfg_req->validate_only,
+ "Failed to create a Configuration session!");
+ return 0;
+ }
+
+
+ /*
+ * Try taking write-lock on the destination DS (if not already).
+ */
+ if (!session->ds_write_locked[commcfg_req->dst_ds_id]) {
+ if (mgmt_fe_session_write_lock_ds(commcfg_req->dst_ds_id,
+ dst_ds_ctx, session)
+ != 0) {
+ mgmt_fe_send_commitcfg_reply(
+ session, commcfg_req->src_ds_id,
+ commcfg_req->dst_ds_id, commcfg_req->req_id,
+ MGMTD_DS_LOCK_FAILED,
+ commcfg_req->validate_only,
+ "Failed to lock the destination DS!");
+ return 0;
+ }
+
+ session->ds_locked_implict[commcfg_req->dst_ds_id] = true;
+ }
+
+ /* TODO: Create COMMITConfig request under the transaction
+ * if (mgmt_txn_send_commit_config_req(
+ * session->cfg_txn_id, commcfg_req->req_id,
+ * commcfg_req->src_ds_id, src_ds_ctx, commcfg_req->dst_ds_id,
+ * dst_ds_ctx, commcfg_req->validate_only, commcfg_req->abort,
+ * false)
+ * != 0) {
+ * mgmt_fe_send_commitcfg_reply(
+ * session, commcfg_req->src_ds_id, commcfg_req->dst_ds_id,
+ * commcfg_req->req_id, MGMTD_INTERNAL_ERROR,
+ * commcfg_req->validate_only,
+ * "Request processing for COMMIT-CONFIG failed!");
+ * return 0;
+ * }
+ *
+ * For now due to lack of txn modules send a unsuccessfull reply.
+ */
+ mgmt_fe_send_commitcfg_reply(
+ session, commcfg_req->src_ds_id, commcfg_req->dst_ds_id,
+ commcfg_req->req_id, MGMTD_INTERNAL_ERROR,
+ commcfg_req->validate_only,
+ "Request processing for COMMIT-CONFIG failed!");
+
+ return 0;
+}
+
+static int
+mgmt_fe_adapter_handle_msg(struct mgmt_fe_client_adapter *adapter,
+ Mgmtd__FeMessage *fe_msg)
+{
+ struct mgmt_fe_session_ctx *session;
+
+ switch (fe_msg->message_case) {
+ case MGMTD__FE_MESSAGE__MESSAGE_REGISTER_REQ:
+ MGMTD_FE_ADAPTER_DBG("Got Register Req Msg from '%s'",
+ fe_msg->register_req->client_name);
+
+ if (strlen(fe_msg->register_req->client_name)) {
+ strlcpy(adapter->name,
+ fe_msg->register_req->client_name,
+ sizeof(adapter->name));
+ mgmt_fe_adapter_cleanup_old_conn(adapter);
+ }
+ break;
+ case MGMTD__FE_MESSAGE__MESSAGE_SESSION_REQ:
+ if (fe_msg->session_req->create
+ && fe_msg->session_req->id_case
+ == MGMTD__FE_SESSION_REQ__ID_CLIENT_CONN_ID) {
+ MGMTD_FE_ADAPTER_DBG(
+ "Got Session Create Req Msg for client-id %llu from '%s'",
+ (unsigned long long)
+ fe_msg->session_req->client_conn_id,
+ adapter->name);
+
+ session = mgmt_fe_create_session(
+ adapter, fe_msg->session_req->client_conn_id);
+ mgmt_fe_send_session_reply(adapter, session, true,
+ session ? true : false);
+ } else if (
+ !fe_msg->session_req->create
+ && fe_msg->session_req->id_case
+ == MGMTD__FE_SESSION_REQ__ID_SESSION_ID) {
+ MGMTD_FE_ADAPTER_DBG(
+ "Got Session Destroy Req Msg for session-id %llu from '%s'",
+ (unsigned long long)
+ fe_msg->session_req->session_id,
+ adapter->name);
+
+ session = mgmt_session_id2ctx(
+ fe_msg->session_req->session_id);
+ mgmt_fe_send_session_reply(adapter, session, false,
+ true);
+ mgmt_fe_cleanup_session(&session);
+ }
+ break;
+ case MGMTD__FE_MESSAGE__MESSAGE_LOCKDS_REQ:
+ session = mgmt_session_id2ctx(
+ fe_msg->lockds_req->session_id);
+ MGMTD_FE_ADAPTER_DBG(
+ "Got %sockDS Req Msg for DS:%d for session-id %llx from '%s'",
+ fe_msg->lockds_req->lock ? "L" : "Unl",
+ fe_msg->lockds_req->ds_id,
+ (unsigned long long)fe_msg->lockds_req->session_id,
+ adapter->name);
+ mgmt_fe_session_handle_lockds_req_msg(
+ session, fe_msg->lockds_req);
+ break;
+ case MGMTD__FE_MESSAGE__MESSAGE_SETCFG_REQ:
+ session = mgmt_session_id2ctx(
+ fe_msg->setcfg_req->session_id);
+ session->adapter->setcfg_stats.set_cfg_count++;
+ MGMTD_FE_ADAPTER_DBG(
+ "Got Set Config Req Msg (%d Xpaths) on DS:%d for session-id %llu from '%s'",
+ (int)fe_msg->setcfg_req->n_data,
+ fe_msg->setcfg_req->ds_id,
+ (unsigned long long)fe_msg->setcfg_req->session_id,
+ adapter->name);
+
+ mgmt_fe_session_handle_setcfg_req_msg(
+ session, fe_msg->setcfg_req);
+ break;
+ case MGMTD__FE_MESSAGE__MESSAGE_COMMCFG_REQ:
+ session = mgmt_session_id2ctx(
+ fe_msg->commcfg_req->session_id);
+ MGMTD_FE_ADAPTER_DBG(
+ "Got Commit Config Req Msg for src-DS:%d dst-DS:%d on session-id %llu from '%s'",
+ fe_msg->commcfg_req->src_ds_id,
+ fe_msg->commcfg_req->dst_ds_id,
+ (unsigned long long)fe_msg->commcfg_req->session_id,
+ adapter->name);
+ mgmt_fe_session_handle_commit_config_req_msg(
+ session, fe_msg->commcfg_req);
+ break;
+ case MGMTD__FE_MESSAGE__MESSAGE_GETCFG_REQ:
+ session = mgmt_session_id2ctx(
+ fe_msg->getcfg_req->session_id);
+ MGMTD_FE_ADAPTER_DBG(
+ "Got Get-Config Req Msg for DS:%d (xpaths: %d) on session-id %llu from '%s'",
+ fe_msg->getcfg_req->ds_id,
+ (int)fe_msg->getcfg_req->n_data,
+ (unsigned long long)fe_msg->getcfg_req->session_id,
+ adapter->name);
+ mgmt_fe_session_handle_getcfg_req_msg(
+ session, fe_msg->getcfg_req);
+ break;
+ case MGMTD__FE_MESSAGE__MESSAGE_GETDATA_REQ:
+ session = mgmt_session_id2ctx(
+ fe_msg->getdata_req->session_id);
+ MGMTD_FE_ADAPTER_DBG(
+ "Got Get-Data Req Msg for DS:%d (xpaths: %d) on session-id %llu from '%s'",
+ fe_msg->getdata_req->ds_id,
+ (int)fe_msg->getdata_req->n_data,
+ (unsigned long long)fe_msg->getdata_req->session_id,
+ adapter->name);
+ mgmt_fe_session_handle_getdata_req_msg(
+ session, fe_msg->getdata_req);
+ break;
+ case MGMTD__FE_MESSAGE__MESSAGE_NOTIFY_DATA_REQ:
+ case MGMTD__FE_MESSAGE__MESSAGE_REGNOTIFY_REQ:
+ /*
+ * TODO: Add handling code in future.
+ */
+ break;
+ /*
+ * NOTE: The following messages are always sent from MGMTD to
+ * Frontend clients only and/or need not be handled on MGMTd.
+ */
+ case MGMTD__FE_MESSAGE__MESSAGE_SESSION_REPLY:
+ case MGMTD__FE_MESSAGE__MESSAGE_LOCKDS_REPLY:
+ case MGMTD__FE_MESSAGE__MESSAGE_SETCFG_REPLY:
+ case MGMTD__FE_MESSAGE__MESSAGE_COMMCFG_REPLY:
+ case MGMTD__FE_MESSAGE__MESSAGE_GETCFG_REPLY:
+ case MGMTD__FE_MESSAGE__MESSAGE_GETDATA_REPLY:
+ case MGMTD__FE_MESSAGE__MESSAGE__NOT_SET:
+#if PROTOBUF_C_VERSION_NUMBER >= 1003000
+ case _MGMTD__FE_MESSAGE__MESSAGE_IS_INT_SIZE:
+#endif
+ default:
+ /*
+ * A 'default' case is being added contrary to the
+ * FRR code guidelines to take care of build
+ * failures on certain build systems (courtesy of
+ * the proto-c package).
+ */
+ break;
+ }
+
+ return 0;
+}
+
+static uint16_t
+mgmt_fe_adapter_process_msg(struct mgmt_fe_client_adapter *adapter,
+ uint8_t *msg_buf, uint16_t bytes_read)
+{
+ Mgmtd__FeMessage *fe_msg;
+ struct mgmt_fe_msg *msg;
+ uint16_t bytes_left;
+ uint16_t processed = 0;
+
+ MGMTD_FE_ADAPTER_DBG(
+ "Have %u bytes of messages from client '%s' to process",
+ bytes_read, adapter->name);
+
+ bytes_left = bytes_read;
+ for (; bytes_left > MGMTD_FE_MSG_HDR_LEN;
+ bytes_left -= msg->hdr.len, msg_buf += msg->hdr.len) {
+ msg = (struct mgmt_fe_msg *)msg_buf;
+ if (msg->hdr.marker != MGMTD_FE_MSG_MARKER) {
+ MGMTD_FE_ADAPTER_DBG(
+ "Marker not found in message from MGMTD Frontend adapter '%s'",
+ adapter->name);
+ break;
+ }
+
+ if (bytes_left < msg->hdr.len) {
+ MGMTD_FE_ADAPTER_DBG(
+ "Incomplete message of %d bytes (epxected: %u) from MGMTD Frontend adapter '%s'",
+ bytes_left, msg->hdr.len, adapter->name);
+ break;
+ }
+
+ fe_msg = mgmtd__fe_message__unpack(
+ NULL, (size_t)(msg->hdr.len - MGMTD_FE_MSG_HDR_LEN),
+ msg->payload);
+ if (!fe_msg) {
+ MGMTD_FE_ADAPTER_DBG(
+ "Failed to decode %d bytes from MGMTD Frontend adapter '%s'",
+ msg->hdr.len, adapter->name);
+ continue;
+ }
+
+ MGMTD_FE_ADAPTER_DBG(
+ "Decoded %d bytes of message(msg: %u/%u) from MGMTD Frontend adapter '%s'",
+ msg->hdr.len, fe_msg->message_case,
+ fe_msg->message_case, adapter->name);
+
+ (void)mgmt_fe_adapter_handle_msg(adapter, fe_msg);
+
+ mgmtd__fe_message__free_unpacked(fe_msg, NULL);
+ processed++;
+ adapter->num_msg_rx++;
+ }
+
+ return processed;
+}
+
+static void mgmt_fe_adapter_proc_msgbufs(struct thread *thread)
+{
+ struct mgmt_fe_client_adapter *adapter;
+ struct stream *work;
+ int processed = 0;
+
+ adapter = (struct mgmt_fe_client_adapter *)THREAD_ARG(thread);
+ assert(adapter && adapter->conn_fd >= 0);
+
+ MGMTD_FE_ADAPTER_DBG("Have %d ibufs for client '%s' to process",
+ (int)stream_fifo_count_safe(adapter->ibuf_fifo),
+ adapter->name);
+
+ for (; processed < MGMTD_FE_MAX_NUM_MSG_PROC;) {
+ work = stream_fifo_pop_safe(adapter->ibuf_fifo);
+ if (!work)
+ break;
+
+ processed += mgmt_fe_adapter_process_msg(
+ adapter, STREAM_DATA(work), stream_get_endp(work));
+
+ if (work != adapter->ibuf_work) {
+ /* Free it up */
+ stream_free(work);
+ } else {
+ /* Reset stream buffer for next read */
+ stream_reset(work);
+ }
+ }
+
+ /*
+ * If we have more to process, reschedule for processing it.
+ */
+ if (stream_fifo_head(adapter->ibuf_fifo))
+ mgmt_fe_adapter_register_event(adapter, MGMTD_FE_PROC_MSG);
+}
+
+static void mgmt_fe_adapter_read(struct thread *thread)
+{
+ struct mgmt_fe_client_adapter *adapter;
+ int bytes_read, msg_cnt;
+ size_t total_bytes, bytes_left;
+ struct mgmt_fe_msg_hdr *msg_hdr;
+ bool incomplete = false;
+
+ adapter = (struct mgmt_fe_client_adapter *)THREAD_ARG(thread);
+ assert(adapter && adapter->conn_fd);
+
+ total_bytes = 0;
+ bytes_left = STREAM_SIZE(adapter->ibuf_work)
+ - stream_get_endp(adapter->ibuf_work);
+ for (; bytes_left > MGMTD_FE_MSG_HDR_LEN;) {
+ bytes_read = stream_read_try(adapter->ibuf_work, adapter->conn_fd,
+ bytes_left);
+ MGMTD_FE_ADAPTER_DBG(
+ "Got %d bytes of message from MGMTD Frontend adapter '%s'",
+ bytes_read, adapter->name);
+ if (bytes_read <= 0) {
+ if (bytes_read == -1
+ && (errno == EAGAIN || errno == EWOULDBLOCK)) {
+ mgmt_fe_adapter_register_event(
+ adapter, MGMTD_FE_CONN_READ);
+ return;
+ }
+
+ if (!bytes_read) {
+ /* Looks like connection closed */
+ MGMTD_FE_ADAPTER_ERR(
+ "Got error (%d) while reading from MGMTD Frontend adapter '%s'. Err: '%s'",
+ bytes_read, adapter->name,
+ safe_strerror(errno));
+ mgmt_fe_adapter_disconnect(adapter);
+ return;
+ }
+ break;
+ }
+
+ total_bytes += bytes_read;
+ bytes_left -= bytes_read;
+ }
+
+ /*
+ * Check if we would have read incomplete messages or not.
+ */
+ stream_set_getp(adapter->ibuf_work, 0);
+ total_bytes = 0;
+ msg_cnt = 0;
+ bytes_left = stream_get_endp(adapter->ibuf_work);
+ for (; bytes_left > MGMTD_FE_MSG_HDR_LEN;) {
+ msg_hdr =
+ (struct mgmt_fe_msg_hdr *)(STREAM_DATA(
+ adapter->ibuf_work)
+ + total_bytes);
+ if (msg_hdr->marker != MGMTD_FE_MSG_MARKER) {
+ /* Corrupted buffer. Force disconnect?? */
+ MGMTD_FE_ADAPTER_ERR(
+ "Received corrupted buffer from MGMTD frontend client.");
+ mgmt_fe_adapter_disconnect(adapter);
+ return;
+ }
+ if (msg_hdr->len > bytes_left)
+ break;
+
+ MGMTD_FE_ADAPTER_DBG("Got message (len: %u) from client '%s'",
+ msg_hdr->len, adapter->name);
+
+ total_bytes += msg_hdr->len;
+ bytes_left -= msg_hdr->len;
+ msg_cnt++;
+ }
+
+ if (bytes_left > 0)
+ incomplete = true;
+ /*
+ * We would have read one or several messages.
+ * Schedule processing them now.
+ */
+ msg_hdr = (struct mgmt_fe_msg_hdr *)(STREAM_DATA(adapter->ibuf_work)
+ + total_bytes);
+ stream_set_endp(adapter->ibuf_work, total_bytes);
+ stream_fifo_push(adapter->ibuf_fifo, adapter->ibuf_work);
+ adapter->ibuf_work = stream_new(MGMTD_FE_MSG_MAX_LEN);
+ if (incomplete) {
+ stream_put(adapter->ibuf_work, msg_hdr, bytes_left);
+ stream_set_endp(adapter->ibuf_work, bytes_left);
+ }
+
+ if (msg_cnt)
+ mgmt_fe_adapter_register_event(adapter, MGMTD_FE_PROC_MSG);
+
+ mgmt_fe_adapter_register_event(adapter, MGMTD_FE_CONN_READ);
+}
+
+static void mgmt_fe_adapter_write(struct thread *thread)
+{
+ int bytes_written = 0;
+ int processed = 0;
+ int msg_size = 0;
+ struct stream *s = NULL;
+ struct stream *free = NULL;
+ struct mgmt_fe_client_adapter *adapter;
+
+ adapter = (struct mgmt_fe_client_adapter *)THREAD_ARG(thread);
+ assert(adapter && adapter->conn_fd);
+
+ /* Ensure pushing any pending write buffer to FIFO */
+ if (adapter->obuf_work) {
+ stream_fifo_push(adapter->obuf_fifo, adapter->obuf_work);
+ adapter->obuf_work = NULL;
+ }
+
+ for (s = stream_fifo_head(adapter->obuf_fifo);
+ s && processed < MGMTD_FE_MAX_NUM_MSG_WRITE;
+ s = stream_fifo_head(adapter->obuf_fifo)) {
+ /* msg_size = (int)stream_get_size(s); */
+ msg_size = (int)STREAM_READABLE(s);
+ bytes_written = stream_flush(s, adapter->conn_fd);
+ if (bytes_written == -1
+ && (errno == EAGAIN || errno == EWOULDBLOCK)) {
+ mgmt_fe_adapter_register_event(
+ adapter, MGMTD_FE_CONN_WRITE);
+ return;
+ } else if (bytes_written != msg_size) {
+ MGMTD_FE_ADAPTER_ERR(
+ "Could not write all %d bytes (wrote: %d) to MGMTD Frontend client socket. Err: '%s'",
+ msg_size, bytes_written, safe_strerror(errno));
+ if (bytes_written > 0) {
+ stream_forward_getp(s, (size_t)bytes_written);
+ stream_pulldown(s);
+ mgmt_fe_adapter_register_event(
+ adapter, MGMTD_FE_CONN_WRITE);
+ return;
+ }
+ mgmt_fe_adapter_disconnect(adapter);
+ return;
+ }
+
+ free = stream_fifo_pop(adapter->obuf_fifo);
+ stream_free(free);
+ MGMTD_FE_ADAPTER_DBG(
+ "Wrote %d bytes of message to MGMTD Frontend client socket.'",
+ bytes_written);
+ processed++;
+ }
+
+ if (s) {
+ mgmt_fe_adapter_writes_off(adapter);
+ mgmt_fe_adapter_register_event(adapter,
+ MGMTD_FE_CONN_WRITES_ON);
+ }
+}
+
+static void mgmt_fe_adapter_resume_writes(struct thread *thread)
+{
+ struct mgmt_fe_client_adapter *adapter;
+
+ adapter = (struct mgmt_fe_client_adapter *)THREAD_ARG(thread);
+ assert(adapter && adapter->conn_fd);
+
+ mgmt_fe_adapter_writes_on(adapter);
+}
+
+static void
+mgmt_fe_adapter_register_event(struct mgmt_fe_client_adapter *adapter,
+ enum mgmt_fe_event event)
+{
+ struct timeval tv = {0};
+
+ switch (event) {
+ case MGMTD_FE_CONN_READ:
+ thread_add_read(mgmt_fe_adapter_tm, mgmt_fe_adapter_read,
+ adapter, adapter->conn_fd, &adapter->conn_read_ev);
+ assert(adapter->conn_read_ev);
+ break;
+ case MGMTD_FE_CONN_WRITE:
+ thread_add_write(mgmt_fe_adapter_tm,
+ mgmt_fe_adapter_write, adapter,
+ adapter->conn_fd, &adapter->conn_write_ev);
+ assert(adapter->conn_write_ev);
+ break;
+ case MGMTD_FE_PROC_MSG:
+ tv.tv_usec = MGMTD_FE_MSG_PROC_DELAY_USEC;
+ thread_add_timer_tv(mgmt_fe_adapter_tm,
+ mgmt_fe_adapter_proc_msgbufs, adapter,
+ &tv, &adapter->proc_msg_ev);
+ assert(adapter->proc_msg_ev);
+ break;
+ case MGMTD_FE_CONN_WRITES_ON:
+ thread_add_timer_msec(mgmt_fe_adapter_tm,
+ mgmt_fe_adapter_resume_writes, adapter,
+ MGMTD_FE_MSG_WRITE_DELAY_MSEC,
+ &adapter->conn_writes_on);
+ assert(adapter->conn_writes_on);
+ break;
+ case MGMTD_FE_SERVER:
+ assert(!"mgmt_fe_adapter_post_event() called incorrectly");
+ break;
+ }
+}
+
+void mgmt_fe_adapter_lock(struct mgmt_fe_client_adapter *adapter)
+{
+ adapter->refcount++;
+}
+
+extern void
+mgmt_fe_adapter_unlock(struct mgmt_fe_client_adapter **adapter)
+{
+ assert(*adapter && (*adapter)->refcount);
+
+ (*adapter)->refcount--;
+ if (!(*adapter)->refcount) {
+ mgmt_fe_adapters_del(&mgmt_fe_adapters, *adapter);
+
+ stream_fifo_free((*adapter)->ibuf_fifo);
+ stream_free((*adapter)->ibuf_work);
+ stream_fifo_free((*adapter)->obuf_fifo);
+ stream_free((*adapter)->obuf_work);
+
+ THREAD_OFF((*adapter)->conn_read_ev);
+ THREAD_OFF((*adapter)->conn_write_ev);
+ THREAD_OFF((*adapter)->proc_msg_ev);
+ THREAD_OFF((*adapter)->conn_writes_on);
+ XFREE(MTYPE_MGMTD_FE_ADPATER, *adapter);
+ }
+
+ *adapter = NULL;
+}
+
+int mgmt_fe_adapter_init(struct thread_master *tm, struct mgmt_master *mm)
+{
+ if (!mgmt_fe_adapter_tm) {
+ mgmt_fe_adapter_tm = tm;
+ mgmt_fe_adapter_mm = mm;
+ mgmt_fe_adapters_init(&mgmt_fe_adapters);
+
+ assert(!mgmt_fe_sessions);
+ mgmt_fe_sessions = hash_create(mgmt_fe_session_hash_key,
+ mgmt_fe_session_hash_cmp,
+ "MGMT Frontend Sessions");
+ }
+
+ return 0;
+}
+
+void mgmt_fe_adapter_destroy(void)
+{
+ mgmt_fe_cleanup_adapters();
+ mgmt_fe_session_hash_destroy();
+}
+
+struct mgmt_fe_client_adapter *
+mgmt_fe_create_adapter(int conn_fd, union sockunion *from)
+{
+ struct mgmt_fe_client_adapter *adapter = NULL;
+
+ adapter = mgmt_fe_find_adapter_by_fd(conn_fd);
+ if (!adapter) {
+ adapter = XCALLOC(MTYPE_MGMTD_FE_ADPATER,
+ sizeof(struct mgmt_fe_client_adapter));
+ assert(adapter);
+
+ adapter->conn_fd = conn_fd;
+ memcpy(&adapter->conn_su, from, sizeof(adapter->conn_su));
+ snprintf(adapter->name, sizeof(adapter->name), "Unknown-FD-%d",
+ adapter->conn_fd);
+ mgmt_fe_sessions_init(&adapter->fe_sessions);
+ adapter->ibuf_fifo = stream_fifo_new();
+ adapter->ibuf_work = stream_new(MGMTD_FE_MSG_MAX_LEN);
+ adapter->obuf_fifo = stream_fifo_new();
+ /* adapter->obuf_work = stream_new(MGMTD_FE_MSG_MAX_LEN); */
+ adapter->obuf_work = NULL;
+ mgmt_fe_adapter_lock(adapter);
+
+ mgmt_fe_adapter_register_event(adapter, MGMTD_FE_CONN_READ);
+ mgmt_fe_adapters_add_tail(&mgmt_fe_adapters, adapter);
+
+ adapter->setcfg_stats.min_tm = ULONG_MAX;
+ adapter->cmt_stats.min_tm = ULONG_MAX;
+ MGMTD_FE_ADAPTER_DBG("Added new MGMTD Frontend adapter '%s'",
+ adapter->name);
+ }
+
+ /* Make client socket non-blocking. */
+ set_nonblocking(adapter->conn_fd);
+ setsockopt_so_sendbuf(adapter->conn_fd,
+ MGMTD_SOCKET_FE_SEND_BUF_SIZE);
+ setsockopt_so_recvbuf(adapter->conn_fd,
+ MGMTD_SOCKET_FE_RECV_BUF_SIZE);
+ return adapter;
+}
+
+struct mgmt_fe_client_adapter *mgmt_fe_get_adapter(const char *name)
+{
+ return mgmt_fe_find_adapter_by_name(name);
+}
+
+int mgmt_fe_send_set_cfg_reply(uint64_t session_id, uint64_t txn_id,
+ Mgmtd__DatastoreId ds_id, uint64_t req_id,
+ enum mgmt_result result,
+ const char *error_if_any,
+ bool implicit_commit)
+{
+ struct mgmt_fe_session_ctx *session;
+
+ session = mgmt_session_id2ctx(session_id);
+ if (!session || session->cfg_txn_id != txn_id) {
+ if (session)
+ MGMTD_FE_ADAPTER_ERR(
+ "Txn_id doesnot match, session txn is 0x%llx, current txn 0x%llx",
+ (unsigned long long)session->cfg_txn_id,
+ (unsigned long long)txn_id);
+ return -1;
+ }
+
+ return mgmt_fe_send_setcfg_reply(
+ session, ds_id, req_id, result == MGMTD_SUCCESS ? true : false,
+ error_if_any, implicit_commit);
+}
+
+int mgmt_fe_send_commit_cfg_reply(uint64_t session_id, uint64_t txn_id,
+ Mgmtd__DatastoreId src_ds_id,
+ Mgmtd__DatastoreId dst_ds_id,
+ uint64_t req_id, bool validate_only,
+ enum mgmt_result result,
+ const char *error_if_any)
+{
+ struct mgmt_fe_session_ctx *session;
+
+ session = mgmt_session_id2ctx(session_id);
+ if (!session || session->cfg_txn_id != txn_id)
+ return -1;
+
+ return mgmt_fe_send_commitcfg_reply(session, src_ds_id, dst_ds_id,
+ req_id, result, validate_only,
+ error_if_any);
+}
+
+int mgmt_fe_send_get_cfg_reply(uint64_t session_id, uint64_t txn_id,
+ Mgmtd__DatastoreId ds_id, uint64_t req_id,
+ enum mgmt_result result,
+ Mgmtd__YangDataReply *data_resp,
+ const char *error_if_any)
+{
+ struct mgmt_fe_session_ctx *session;
+
+ session = mgmt_session_id2ctx(session_id);
+ if (!session || session->txn_id != txn_id)
+ return -1;
+
+ return mgmt_fe_send_getcfg_reply(session, ds_id, req_id,
+ result == MGMTD_SUCCESS, data_resp,
+ error_if_any);
+}
+
+int mgmt_fe_send_get_data_reply(uint64_t session_id, uint64_t txn_id,
+ Mgmtd__DatastoreId ds_id, uint64_t req_id,
+ enum mgmt_result result,
+ Mgmtd__YangDataReply *data_resp,
+ const char *error_if_any)
+{
+ struct mgmt_fe_session_ctx *session;
+
+ session = mgmt_session_id2ctx(session_id);
+ if (!session || session->txn_id != txn_id)
+ return -1;
+
+ return mgmt_fe_send_getdata_reply(session, ds_id, req_id,
+ result == MGMTD_SUCCESS,
+ data_resp, error_if_any);
+}
+
+int mgmt_fe_send_data_notify(Mgmtd__DatastoreId ds_id,
+ Mgmtd__YangData * data_resp[], int num_data)
+{
+ /* struct mgmt_fe_session_ctx *session; */
+
+ return 0;
+}
+
+struct mgmt_setcfg_stats *
+mgmt_fe_get_session_setcfg_stats(uint64_t session_id)
+{
+ struct mgmt_fe_session_ctx *session;
+
+ session = mgmt_session_id2ctx(session_id);
+ if (!session || !session->adapter)
+ return NULL;
+
+ return &session->adapter->setcfg_stats;
+}
+
+struct mgmt_commit_stats *
+mgmt_fe_get_session_commit_stats(uint64_t session_id)
+{
+ struct mgmt_fe_session_ctx *session;
+
+ session = mgmt_session_id2ctx(session_id);
+ if (!session || !session->adapter)
+ return NULL;
+
+ return &session->adapter->cmt_stats;
+}
+
+static void
+mgmt_fe_adapter_cmt_stats_write(struct vty *vty,
+ struct mgmt_fe_client_adapter *adapter)
+{
+ char buf[100] = {0};
+
+ if (!mm->perf_stats_en)
+ return;
+
+ vty_out(vty, " Num-Commits: \t\t\t%lu\n",
+ adapter->cmt_stats.commit_cnt);
+ if (adapter->cmt_stats.commit_cnt > 0) {
+ if (mm->perf_stats_en)
+ vty_out(vty, " Max-Commit-Duration: \t\t%lu uSecs\n",
+ adapter->cmt_stats.max_tm);
+ vty_out(vty, " Max-Commit-Batch-Size: \t\t%lu\n",
+ adapter->cmt_stats.max_batch_cnt);
+ if (mm->perf_stats_en)
+ vty_out(vty, " Min-Commit-Duration: \t\t%lu uSecs\n",
+ adapter->cmt_stats.min_tm);
+ vty_out(vty, " Min-Commit-Batch-Size: \t\t%lu\n",
+ adapter->cmt_stats.min_batch_cnt);
+ if (mm->perf_stats_en)
+ vty_out(vty,
+ " Last-Commit-Duration: \t\t%lu uSecs\n",
+ adapter->cmt_stats.last_exec_tm);
+ vty_out(vty, " Last-Commit-Batch-Size: \t\t%lu\n",
+ adapter->cmt_stats.last_batch_cnt);
+ vty_out(vty, " Last-Commit-CfgData-Reqs: \t\t%lu\n",
+ adapter->cmt_stats.last_num_cfgdata_reqs);
+ vty_out(vty, " Last-Commit-CfgApply-Reqs: \t\t%lu\n",
+ adapter->cmt_stats.last_num_apply_reqs);
+ if (mm->perf_stats_en) {
+ vty_out(vty, " Last-Commit-Details:\n");
+ vty_out(vty, " Commit Start: \t\t\t%s\n",
+ mgmt_realtime_to_string(
+ &adapter->cmt_stats.last_start, buf,
+ sizeof(buf)));
+#ifdef MGMTD_LOCAL_VALIDATIONS_ENABLED
+ vty_out(vty, " Config-Validate Start: \t\t%s\n",
+ mgmt_realtime_to_string(
+ &adapter->cmt_stats.validate_start, buf,
+ sizeof(buf)));
+#endif
+ vty_out(vty, " Prep-Config Start: \t\t%s\n",
+ mgmt_realtime_to_string(
+ &adapter->cmt_stats.prep_cfg_start, buf,
+ sizeof(buf)));
+ vty_out(vty, " Txn-Create Start: \t\t%s\n",
+ mgmt_realtime_to_string(
+ &adapter->cmt_stats.txn_create_start,
+ buf, sizeof(buf)));
+ vty_out(vty,
+#ifdef MGMTD_LOCAL_VALIDATIONS_ENABLED
+ " Send-Config Start: \t\t%s\n",
+#else
+ " Send-Config-Validate Start: \t%s\n",
+#endif
+ mgmt_realtime_to_string(
+ &adapter->cmt_stats.send_cfg_start, buf,
+ sizeof(buf)));
+ vty_out(vty, " Apply-Config Start: \t\t%s\n",
+ mgmt_realtime_to_string(
+ &adapter->cmt_stats.apply_cfg_start, buf,
+ sizeof(buf)));
+ vty_out(vty, " Apply-Config End: \t\t%s\n",
+ mgmt_realtime_to_string(
+ &adapter->cmt_stats.apply_cfg_end, buf,
+ sizeof(buf)));
+ vty_out(vty, " Txn-Delete Start: \t\t%s\n",
+ mgmt_realtime_to_string(
+ &adapter->cmt_stats.txn_del_start, buf,
+ sizeof(buf)));
+ vty_out(vty, " Commit End: \t\t\t%s\n",
+ mgmt_realtime_to_string(
+ &adapter->cmt_stats.last_end, buf,
+ sizeof(buf)));
+ }
+ }
+}
+
+static void
+mgmt_fe_adapter_setcfg_stats_write(struct vty *vty,
+ struct mgmt_fe_client_adapter *adapter)
+{
+ char buf[100] = {0};
+
+ if (!mm->perf_stats_en)
+ return;
+
+ vty_out(vty, " Num-Set-Cfg: \t\t\t%lu\n",
+ adapter->setcfg_stats.set_cfg_count);
+ if (mm->perf_stats_en && adapter->setcfg_stats.set_cfg_count > 0) {
+ vty_out(vty, " Max-Set-Cfg-Duration: \t\t%lu uSec\n",
+ adapter->setcfg_stats.max_tm);
+ vty_out(vty, " Min-Set-Cfg-Duration: \t\t%lu uSec\n",
+ adapter->setcfg_stats.min_tm);
+ vty_out(vty, " Avg-Set-Cfg-Duration: \t\t%lu uSec\n",
+ adapter->setcfg_stats.avg_tm);
+ vty_out(vty, " Last-Set-Cfg-Details:\n");
+ vty_out(vty, " Set-Cfg Start: \t\t\t%s\n",
+ mgmt_realtime_to_string(&adapter->setcfg_stats.last_start,
+ buf, sizeof(buf)));
+ vty_out(vty, " Set-Cfg End: \t\t\t%s\n",
+ mgmt_realtime_to_string(&adapter->setcfg_stats.last_end,
+ buf, sizeof(buf)));
+ }
+}
+
+void mgmt_fe_adapter_status_write(struct vty *vty, bool detail)
+{
+ struct mgmt_fe_client_adapter *adapter;
+ struct mgmt_fe_session_ctx *session;
+ Mgmtd__DatastoreId ds_id;
+ bool locked = false;
+
+ vty_out(vty, "MGMTD Frontend Adpaters\n");
+
+ FOREACH_ADAPTER_IN_LIST (adapter) {
+ vty_out(vty, " Client: \t\t\t\t%s\n", adapter->name);
+ vty_out(vty, " Conn-FD: \t\t\t\t%d\n", adapter->conn_fd);
+ if (detail) {
+ mgmt_fe_adapter_setcfg_stats_write(vty, adapter);
+ mgmt_fe_adapter_cmt_stats_write(vty, adapter);
+ }
+ vty_out(vty, " Sessions\n");
+ FOREACH_SESSION_IN_LIST (adapter, session) {
+ vty_out(vty, " Session: \t\t\t\t%p\n", session);
+ vty_out(vty, " Client-Id: \t\t\t%llu\n",
+ (unsigned long long)session->client_id);
+ vty_out(vty, " Session-Id: \t\t\t%llx\n",
+ (unsigned long long)session->session_id);
+ vty_out(vty, " DS-Locks:\n");
+ FOREACH_MGMTD_DS_ID (ds_id) {
+ if (session->ds_write_locked[ds_id]
+ || session->ds_read_locked[ds_id]) {
+ locked = true;
+ vty_out(vty,
+ " %s\t\t\t%s, %s\n",
+ mgmt_ds_id2name(ds_id),
+ session->ds_write_locked[ds_id]
+ ? "Write"
+ : "Read",
+ session->ds_locked_implict[ds_id]
+ ? "Implicit"
+ : "Explicit");
+ }
+ }
+ if (!locked)
+ vty_out(vty, " None\n");
+ }
+ vty_out(vty, " Total-Sessions: \t\t\t%d\n",
+ (int)mgmt_fe_sessions_count(&adapter->fe_sessions));
+ vty_out(vty, " Msg-Sent: \t\t\t\t%u\n", adapter->num_msg_tx);
+ vty_out(vty, " Msg-Recvd: \t\t\t\t%u\n",
+ adapter->num_msg_rx);
+ }
+ vty_out(vty, " Total: %d\n",
+ (int)mgmt_fe_adapters_count(&mgmt_fe_adapters));
+}
+
+void mgmt_fe_adapter_perf_measurement(struct vty *vty, bool config)
+{
+ mm->perf_stats_en = config;
+}
+
+void mgmt_fe_adapter_reset_perf_stats(struct vty *vty)
+{
+ struct mgmt_fe_client_adapter *adapter;
+ struct mgmt_fe_session_ctx *session;
+
+ FOREACH_ADAPTER_IN_LIST (adapter) {
+ memset(&adapter->setcfg_stats, 0, sizeof(adapter->setcfg_stats));
+ FOREACH_SESSION_IN_LIST (adapter, session) {
+ memset(&adapter->cmt_stats, 0, sizeof(adapter->cmt_stats));
+ }
+ }
+}
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * MGMTD Frontend Client Connection Adapter
+ *
+ * Copyright (C) 2021 Vmware, Inc.
+ * Pushpasis Sarkar <spushpasis@vmware.com>
+ */
+
+#ifndef _FRR_MGMTD_FE_ADAPTER_H_
+#define _FRR_MGMTD_FE_ADAPTER_H_
+
+struct mgmt_fe_client_adapter;
+struct mgmt_master;
+
+struct mgmt_commit_stats {
+ struct timeval last_start;
+#ifdef MGMTD_LOCAL_VALIDATIONS_ENABLED
+ struct timeval validate_start;
+#endif
+ struct timeval prep_cfg_start;
+ struct timeval txn_create_start;
+ struct timeval send_cfg_start;
+ struct timeval apply_cfg_start;
+ struct timeval apply_cfg_end;
+ struct timeval txn_del_start;
+ struct timeval last_end;
+ unsigned long last_exec_tm;
+ unsigned long max_tm;
+ unsigned long min_tm;
+ unsigned long last_batch_cnt;
+ unsigned long last_num_cfgdata_reqs;
+ unsigned long last_num_apply_reqs;
+ unsigned long max_batch_cnt;
+ unsigned long min_batch_cnt;
+ unsigned long commit_cnt;
+};
+
+struct mgmt_setcfg_stats {
+ struct timeval last_start;
+ struct timeval last_end;
+ unsigned long last_exec_tm;
+ unsigned long max_tm;
+ unsigned long min_tm;
+ unsigned long avg_tm;
+ unsigned long set_cfg_count;
+};
+
+PREDECL_LIST(mgmt_fe_sessions);
+
+PREDECL_LIST(mgmt_fe_adapters);
+
+struct mgmt_fe_client_adapter {
+ int conn_fd;
+ union sockunion conn_su;
+ struct thread *conn_read_ev;
+ struct thread *conn_write_ev;
+ struct thread *conn_writes_on;
+ struct thread *proc_msg_ev;
+ uint32_t flags;
+
+ char name[MGMTD_CLIENT_NAME_MAX_LEN];
+
+ /* List of sessions created and being maintained for this client. */
+ struct mgmt_fe_sessions_head fe_sessions;
+
+ /* IO streams for read and write */
+ /* pthread_mutex_t ibuf_mtx; */
+ struct stream_fifo *ibuf_fifo;
+ /* pthread_mutex_t obuf_mtx; */
+ struct stream_fifo *obuf_fifo;
+
+ /* Private I/O buffers */
+ struct stream *ibuf_work;
+ struct stream *obuf_work;
+
+ int refcount;
+ uint32_t num_msg_tx;
+ uint32_t num_msg_rx;
+ struct mgmt_commit_stats cmt_stats;
+ struct mgmt_setcfg_stats setcfg_stats;
+
+ struct mgmt_fe_adapters_item list_linkage;
+};
+
+#define MGMTD_FE_ADAPTER_FLAGS_WRITES_OFF (1U << 0)
+
+DECLARE_LIST(mgmt_fe_adapters, struct mgmt_fe_client_adapter, list_linkage);
+
+/* Initialise frontend adapter module */
+extern int mgmt_fe_adapter_init(struct thread_master *tm,
+ struct mgmt_master *cm);
+
+/* Destroy frontend adapter module */
+extern void mgmt_fe_adapter_destroy(void);
+
+/* Acquire lock for frontend adapter */
+extern void mgmt_fe_adapter_lock(struct mgmt_fe_client_adapter *adapter);
+
+/* Remove lock from frontend adapter */
+extern void
+mgmt_fe_adapter_unlock(struct mgmt_fe_client_adapter **adapter);
+
+/* Create frontend adapter */
+extern struct mgmt_fe_client_adapter *
+mgmt_fe_create_adapter(int conn_fd, union sockunion *su);
+
+/* Fetch frontend adapter given a name */
+extern struct mgmt_fe_client_adapter *
+mgmt_fe_get_adapter(const char *name);
+
+/*
+ * Send set-config reply to the frontend client.
+ *
+ * session
+ * Unique session identifier.
+ *
+ * txn_id
+ * Unique transaction identifier.
+ *
+ * ds_id
+ * Datastore ID.
+ *
+ * req_id
+ * Config request ID.
+ *
+ * result
+ * Config request result (MGMT_*).
+ *
+ * error_if_any
+ * Buffer to store human-readable error message in case of error.
+ *
+ * implicit_commit
+ * TRUE if the commit is implicit, FALSE otherwise.
+ *
+ * Returns:
+ * 0 on success, -1 on failures.
+ */
+extern int mgmt_fe_send_set_cfg_reply(uint64_t session_id, uint64_t txn_id,
+ Mgmtd__DatastoreId ds_id,
+ uint64_t req_id,
+ enum mgmt_result result,
+ const char *error_if_any,
+ bool implcit_commit);
+
+/*
+ * Send commit-config reply to the frontend client.
+ */
+extern int mgmt_fe_send_commit_cfg_reply(
+ uint64_t session_id, uint64_t txn_id, Mgmtd__DatastoreId src_ds_id,
+ Mgmtd__DatastoreId dst_ds_id, uint64_t req_id, bool validate_only,
+ enum mgmt_result result, const char *error_if_any);
+
+/*
+ * Send get-config reply to the frontend client.
+ */
+extern int mgmt_fe_send_get_cfg_reply(uint64_t session_id, uint64_t txn_id,
+ Mgmtd__DatastoreId ds_id,
+ uint64_t req_id,
+ enum mgmt_result result,
+ Mgmtd__YangDataReply *data_resp,
+ const char *error_if_any);
+
+/*
+ * Send get-data reply to the frontend client.
+ */
+extern int mgmt_fe_send_get_data_reply(
+ uint64_t session_id, uint64_t txn_id, Mgmtd__DatastoreId ds_id,
+ uint64_t req_id, enum mgmt_result result,
+ Mgmtd__YangDataReply *data_resp, const char *error_if_any);
+
+/*
+ * Send data notify to the frontend client.
+ */
+extern int mgmt_fe_send_data_notify(Mgmtd__DatastoreId ds_id,
+ Mgmtd__YangData * data_resp[],
+ int num_data);
+
+/* Fetch frontend client session set-config stats */
+extern struct mgmt_setcfg_stats *
+mgmt_fe_get_session_setcfg_stats(uint64_t session_id);
+
+/* Fetch frontend client session commit stats */
+extern struct mgmt_commit_stats *
+mgmt_fe_get_session_commit_stats(uint64_t session_id);
+
+extern void mgmt_fe_adapter_status_write(struct vty *vty, bool detail);
+extern void mgmt_fe_adapter_perf_measurement(struct vty *vty, bool config);
+extern void mgmt_fe_adapter_reset_perf_stats(struct vty *vty);
+#endif /* _FRR_MGMTD_FE_ADAPTER_H_ */
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * MGMTD Frontend Server
+ *
+ * Copyright (C) 2021 Vmware, Inc.
+ * Pushpasis Sarkar <spushpasis@vmware.com>
+ */
+
+#include <zebra.h>
+#include "network.h"
+#include "libfrr.h"
+#include "mgmtd/mgmt.h"
+#include "mgmtd/mgmt_fe_server.h"
+#include "mgmtd/mgmt_fe_adapter.h"
+
+#ifdef REDIRECT_DEBUG_TO_STDERR
+#define MGMTD_FE_SRVR_DBG(fmt, ...) \
+ fprintf(stderr, "%s: " fmt "\n", __func__, ##__VA_ARGS__)
+#define MGMTD_FE_SRVR_ERR(fmt, ...) \
+ fprintf(stderr, "%s: ERROR, " fmt "\n", __func__, ##__VA_ARGS__)
+#else /* REDIRECT_DEBUG_TO_STDERR */
+#define MGMTD_FE_SRVR_DBG(fmt, ...) \
+ do { \
+ if (mgmt_debug_fe) \
+ zlog_debug("%s: " fmt, __func__, ##__VA_ARGS__); \
+ } while (0)
+#define MGMTD_FE_SRVR_ERR(fmt, ...) \
+ zlog_err("%s: ERROR: " fmt, __func__, ##__VA_ARGS__)
+#endif /* REDIRECT_DEBUG_TO_STDERR */
+
+static int mgmt_fe_listen_fd;
+static struct thread_master *mgmt_fe_listen_tm;
+static struct thread *mgmt_fe_listen_ev;
+static void mgmt_fe_server_register_event(enum mgmt_fe_event event);
+
+static void mgmt_fe_conn_accept(struct thread *thread)
+{
+ int client_conn_fd;
+ union sockunion su;
+
+ if (mgmt_fe_listen_fd < 0)
+ return;
+
+ /* We continue hearing server listen socket. */
+ mgmt_fe_server_register_event(MGMTD_FE_SERVER);
+
+ memset(&su, 0, sizeof(union sockunion));
+
+ /* We can handle IPv4 or IPv6 socket. */
+ client_conn_fd = sockunion_accept(mgmt_fe_listen_fd, &su);
+ if (client_conn_fd < 0) {
+ MGMTD_FE_SRVR_ERR(
+ "Failed to accept MGMTD Frontend client connection : %s",
+ safe_strerror(errno));
+ return;
+ }
+ set_nonblocking(client_conn_fd);
+ set_cloexec(client_conn_fd);
+
+ MGMTD_FE_SRVR_DBG("Got a new MGMTD Frontend connection");
+
+ mgmt_fe_create_adapter(client_conn_fd, &su);
+}
+
+static void mgmt_fe_server_register_event(enum mgmt_fe_event event)
+{
+ if (event == MGMTD_FE_SERVER) {
+ thread_add_read(mgmt_fe_listen_tm, mgmt_fe_conn_accept,
+ NULL, mgmt_fe_listen_fd,
+ &mgmt_fe_listen_ev);
+ assert(mgmt_fe_listen_ev);
+ } else {
+ assert(!"mgmt_fe_server_post_event() called incorrectly");
+ }
+}
+
+static void mgmt_fe_server_start(const char *hostname)
+{
+ int ret;
+ int sock;
+ struct sockaddr_un addr;
+ mode_t old_mask;
+
+ /* Set umask */
+ old_mask = umask(0077);
+
+ sock = socket(AF_UNIX, SOCK_STREAM, PF_UNSPEC);
+ if (sock < 0) {
+ MGMTD_FE_SRVR_ERR("Failed to create server socket: %s",
+ safe_strerror(errno));
+ goto mgmt_fe_server_start_failed;
+ }
+
+ addr.sun_family = AF_UNIX,
+ strlcpy(addr.sun_path, MGMTD_FE_SERVER_PATH, sizeof(addr.sun_path));
+ unlink(addr.sun_path);
+ ret = bind(sock, (struct sockaddr *)&addr, sizeof(addr));
+ if (ret < 0) {
+ MGMTD_FE_SRVR_ERR(
+ "Failed to bind server socket to '%s'. Err: %s",
+ addr.sun_path, safe_strerror(errno));
+ goto mgmt_fe_server_start_failed;
+ }
+
+ ret = listen(sock, MGMTD_FE_MAX_CONN);
+ if (ret < 0) {
+ MGMTD_FE_SRVR_ERR("Failed to listen on server socket: %s",
+ safe_strerror(errno));
+ goto mgmt_fe_server_start_failed;
+ }
+
+ /* Restore umask */
+ umask(old_mask);
+
+ mgmt_fe_listen_fd = sock;
+ mgmt_fe_server_register_event(MGMTD_FE_SERVER);
+
+ MGMTD_FE_SRVR_DBG("Started MGMTD Frontend Server!");
+ return;
+
+mgmt_fe_server_start_failed:
+ if (sock)
+ close(sock);
+
+ mgmt_fe_listen_fd = -1;
+ exit(-1);
+}
+
+int mgmt_fe_server_init(struct thread_master *master)
+{
+ if (mgmt_fe_listen_tm) {
+ MGMTD_FE_SRVR_DBG("MGMTD Frontend Server already running!");
+ return 0;
+ }
+
+ mgmt_fe_listen_tm = master;
+
+ mgmt_fe_server_start("localhost");
+
+ return 0;
+}
+
+void mgmt_fe_server_destroy(void)
+{
+ if (mgmt_fe_listen_tm) {
+ MGMTD_FE_SRVR_DBG("Closing MGMTD Frontend Server!");
+
+ if (mgmt_fe_listen_ev) {
+ THREAD_OFF(mgmt_fe_listen_ev);
+ mgmt_fe_listen_ev = NULL;
+ }
+
+ if (mgmt_fe_listen_fd >= 0) {
+ close(mgmt_fe_listen_fd);
+ mgmt_fe_listen_fd = -1;
+ }
+
+ mgmt_fe_listen_tm = NULL;
+ }
+}
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * MGMTD Frontend Server
+ *
+ * Copyright (C) 2021 Vmware, Inc.
+ * Pushpasis Sarkar <spushpasis@vmware.com>
+ */
+
+#ifndef _FRR_MGMTD_FE_SERVER_H_
+#define _FRR_MGMTD_FE_SERVER_H_
+
+#define MGMTD_FE_MAX_CONN 32
+
+/* Initialise frontend server */
+extern int mgmt_fe_server_init(struct thread_master *master);
+
+/* Destroy frontend server */
+extern void mgmt_fe_server_destroy(void);
+
+#endif /* _FRR_MGMTD_FE_SERVER_H_ */
zlog_rotate();
}
-static struct frr_signal_t mgmt_signals[] = {
- {
- .signal = SIGHUP,
- .handler = &sighup,
- },
- {
- .signal = SIGUSR1,
- .handler = &sigusr1,
- },
- {
- .signal = SIGINT,
- .handler = &sigint,
- },
- {
- .signal = SIGTERM,
- .handler = &sigint,
- },
-};
-
-
/*
* Try to free up allocations we know about so that diagnostic tools such as
* valgrind are able to better illuminate leaks.
exit(status);
}
+static struct frr_signal_t mgmt_signals[] = {
+ {
+ .signal = SIGHUP,
+ .handler = &sighup,
+ },
+ {
+ .signal = SIGUSR1,
+ .handler = &sigusr1,
+ },
+ {
+ .signal = SIGINT,
+ .handler = &sigint,
+ },
+ {
+ .signal = SIGTERM,
+ .handler = &sigint,
+ },
+};
+
static int mgmt_vrf_new(struct vrf *vrf)
{
zlog_debug("VRF Created: %s(%u)", vrf->name, vrf->vrf_id);
DEFINE_MGROUP(MGMTD, "mgmt");
DEFINE_MTYPE(MGMTD, MGMTD, "MGMTD instance");
+DEFINE_MTYPE(MGMTD, MGMTD_FE_ADPATER, "MGMTD Frontend adapter");
+DEFINE_MTYPE(MGMTD, MGMTD_FE_SESSION, "MGMTD Frontend Client Session");
DECLARE_MGROUP(MGMTD);
DECLARE_MTYPE(MGMTD);
+DECLARE_MTYPE(MGMTD_FE_ADPATER);
+DECLARE_MTYPE(MGMTD_FE_SESSION);
#endif /* _FRR_MGMTD_MEMORY_H */
+++ /dev/null
-#! /bin/bash
-
-# mgmtd/mgmt_test_fe - temporary wrapper script for .libs/mgmt_test_fe
-# Generated by libtool (GNU libtool) 2.4.6 Debian-2.4.6-2
-#
-# The mgmtd/mgmt_test_fe program cannot be directly executed until all the libtool
-# libraries that it depends on are installed.
-#
-# This wrapper script should never be moved out of the build directory.
-# If it is, it will not operate correctly.
-
-# Sed substitution that helps us do robust quoting. It backslashifies
-# metacharacters that are still active within double-quoted strings.
-sed_quote_subst='s|\([`"$\\]\)|\\\1|g'
-
-# Be Bourne compatible
-if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then
- emulate sh
- NULLCMD=:
- # Zsh 3.x and 4.x performs word splitting on ${1+"$@"}, which
- # is contrary to our usage. Disable this feature.
- alias -g '${1+"$@"}'='"$@"'
- setopt NO_GLOB_SUBST
-else
- case `(set -o) 2>/dev/null` in *posix*) set -o posix;; esac
-fi
-BIN_SH=xpg4; export BIN_SH # for Tru64
-DUALCASE=1; export DUALCASE # for MKS sh
-
-# The HP-UX ksh and POSIX shell print the target directory to stdout
-# if CDPATH is set.
-(unset CDPATH) >/dev/null 2>&1 && unset CDPATH
-
-relink_command=""
-
-# This environment variable determines our operation mode.
-if test "$libtool_install_magic" = "%%%MAGIC variable%%%"; then
- # install mode needs the following variables:
- generated_by_libtool_version='2.4.6'
- notinst_deplibs=' lib/libfrr.la'
-else
- # When we are sourced in execute mode, $file and $ECHO are already set.
- if test "$libtool_execute_magic" != "%%%MAGIC variable%%%"; then
- file="$0"
-
-# A function that is used when there is no print builtin or printf.
-func_fallback_echo ()
-{
- eval 'cat <<_LTECHO_EOF
-$1
-_LTECHO_EOF'
-}
- ECHO="printf %s\\n"
- fi
-
-# Very basic option parsing. These options are (a) specific to
-# the libtool wrapper, (b) are identical between the wrapper
-# /script/ and the wrapper /executable/ that is used only on
-# windows platforms, and (c) all begin with the string --lt-
-# (application programs are unlikely to have options that match
-# this pattern).
-#
-# There are only two supported options: --lt-debug and
-# --lt-dump-script. There is, deliberately, no --lt-help.
-#
-# The first argument to this parsing function should be the
-# script's ./libtool value, followed by no.
-lt_option_debug=
-func_parse_lt_options ()
-{
- lt_script_arg0=$0
- shift
- for lt_opt
- do
- case "$lt_opt" in
- --lt-debug) lt_option_debug=1 ;;
- --lt-dump-script)
- lt_dump_D=`$ECHO "X$lt_script_arg0" | sed -e 's/^X//' -e 's%/[^/]*$%%'`
- test "X$lt_dump_D" = "X$lt_script_arg0" && lt_dump_D=.
- lt_dump_F=`$ECHO "X$lt_script_arg0" | sed -e 's/^X//' -e 's%^.*/%%'`
- cat "$lt_dump_D/$lt_dump_F"
- exit 0
- ;;
- --lt-*)
- $ECHO "Unrecognized --lt- option: '$lt_opt'" 1>&2
- exit 1
- ;;
- esac
- done
-
- # Print the debug banner immediately:
- if test -n "$lt_option_debug"; then
- echo "mgmt_test_fe:mgmtd/mgmt_test_fe:$LINENO: libtool wrapper (GNU libtool) 2.4.6 Debian-2.4.6-2" 1>&2
- fi
-}
-
-# Used when --lt-debug. Prints its arguments to stdout
-# (redirection is the responsibility of the caller)
-func_lt_dump_args ()
-{
- lt_dump_args_N=1;
- for lt_arg
- do
- $ECHO "mgmt_test_fe:mgmtd/mgmt_test_fe:$LINENO: newargv[$lt_dump_args_N]: $lt_arg"
- lt_dump_args_N=`expr $lt_dump_args_N + 1`
- done
-}
-
-# Core function for launching the target application
-func_exec_program_core ()
-{
-
- if test -n "$lt_option_debug"; then
- $ECHO "mgmt_test_fe:mgmtd/mgmt_test_fe:$LINENO: newargv[0]: $progdir/$program" 1>&2
- func_lt_dump_args ${1+"$@"} 1>&2
- fi
- exec "$progdir/$program" ${1+"$@"}
-
- $ECHO "$0: cannot exec $program $*" 1>&2
- exit 1
-}
-
-# A function to encapsulate launching the target application
-# Strips options in the --lt-* namespace from $@ and
-# launches target application with the remaining arguments.
-func_exec_program ()
-{
- case " $* " in
- *\ --lt-*)
- for lt_wr_arg
- do
- case $lt_wr_arg in
- --lt-*) ;;
- *) set x "$@" "$lt_wr_arg"; shift;;
- esac
- shift
- done ;;
- esac
- func_exec_program_core ${1+"$@"}
-}
-
- # Parse options
- func_parse_lt_options "$0" ${1+"$@"}
-
- # Find the directory that this script lives in.
- thisdir=`$ECHO "$file" | sed 's%/[^/]*$%%'`
- test "x$thisdir" = "x$file" && thisdir=.
-
- # Follow symbolic links until we get to the real thisdir.
- file=`ls -ld "$file" | sed -n 's/.*-> //p'`
- while test -n "$file"; do
- destdir=`$ECHO "$file" | sed 's%/[^/]*$%%'`
-
- # If there was a directory component, then change thisdir.
- if test "x$destdir" != "x$file"; then
- case "$destdir" in
- [\\/]* | [A-Za-z]:[\\/]*) thisdir="$destdir" ;;
- *) thisdir="$thisdir/$destdir" ;;
- esac
- fi
-
- file=`$ECHO "$file" | sed 's%^.*/%%'`
- file=`ls -ld "$thisdir/$file" | sed -n 's/.*-> //p'`
- done
-
- # Usually 'no', except on cygwin/mingw when embedded into
- # the cwrapper.
- WRAPPER_SCRIPT_BELONGS_IN_OBJDIR=no
- if test "$WRAPPER_SCRIPT_BELONGS_IN_OBJDIR" = "yes"; then
- # special case for '.'
- if test "$thisdir" = "."; then
- thisdir=`pwd`
- fi
- # remove .libs from thisdir
- case "$thisdir" in
- *[\\/].libs ) thisdir=`$ECHO "$thisdir" | sed 's%[\\/][^\\/]*$%%'` ;;
- .libs ) thisdir=. ;;
- esac
- fi
-
- # Try to get the absolute directory name.
- absdir=`cd "$thisdir" && pwd`
- test -n "$absdir" && thisdir="$absdir"
-
- program='mgmt_test_fe'
- progdir="$thisdir/.libs"
-
-
- if test -f "$progdir/$program"; then
- # Add our own library path to LD_LIBRARY_PATH
- LD_LIBRARY_PATH="/root/upstream_p1/lib/.libs:$LD_LIBRARY_PATH"
-
- # Some systems cannot cope with colon-terminated LD_LIBRARY_PATH
- # The second colon is a workaround for a bug in BeOS R4 sed
- LD_LIBRARY_PATH=`$ECHO "$LD_LIBRARY_PATH" | sed 's/::*$//'`
-
- export LD_LIBRARY_PATH
-
- if test "$libtool_execute_magic" != "%%%MAGIC variable%%%"; then
- # Run the actual program with our arguments.
- func_exec_program ${1+"$@"}
- fi
- else
- # The program doesn't exist.
- $ECHO "$0: error: '$progdir/$program' does not exist" 1>&2
- $ECHO "This script is just a wrapper for $program." 1>&2
- $ECHO "See the libtool documentation for more information." 1>&2
- exit 1
- fi
-fi
#include "command.h"
#include "json.h"
#include "mgmtd/mgmt.h"
+#include "mgmtd/mgmt_fe_server.h"
+#include "mgmtd/mgmt_fe_adapter.h"
#include "mgmtd/mgmt_ds.h"
-#ifndef VTYSH_EXTRACT_PL
#include "mgmtd/mgmt_vty_clippy.c"
-#endif
+
+DEFPY(show_mgmt_fe_adapter, show_mgmt_fe_adapter_cmd,
+ "show mgmt frontend-adapter all [detail$detail]",
+ SHOW_STR
+ MGMTD_STR
+ MGMTD_FE_ADAPTER_STR
+ "Display all Frontend Adapters\n"
+ "Display more details\n")
+{
+ mgmt_fe_adapter_status_write(vty, !!detail);
+
+ return CMD_SUCCESS;
+}
DEFPY(show_mgmt_ds,
show_mgmt_ds_cmd,
return CMD_SUCCESS;
}
+DEFPY(mgmt_commit,
+ mgmt_commit_cmd,
+ "mgmt commit <check|apply|abort>$type",
+ MGMTD_STR
+ "Commit action\n"
+ "Validate the set of config commands\n"
+ "Validate and apply the set of config commands\n"
+ "Abort and drop the set of config commands recently added\n")
+{
+ bool validate_only = type[0] == 'c';
+ bool abort = type[1] == 'b';
+
+ if (vty_mgmt_send_commit_config(vty, validate_only, abort) != 0)
+ return CMD_WARNING_CONFIG_FAILED;
+ return CMD_SUCCESS;
+}
+
+DEFPY(mgmt_set_config_data, mgmt_set_config_data_cmd,
+ "mgmt set-config WORD$path VALUE",
+ MGMTD_STR
+ "Set configuration data\n"
+ "XPath expression specifying the YANG data path\n"
+ "Value of the data to set\n")
+{
+ strlcpy(vty->cfg_changes[0].xpath, path,
+ sizeof(vty->cfg_changes[0].xpath));
+ vty->cfg_changes[0].value = value;
+ vty->cfg_changes[0].operation = NB_OP_CREATE;
+ vty->num_cfg_changes = 1;
+
+ vty->no_implicit_commit = true;
+ vty_mgmt_send_config_data(vty);
+ vty->no_implicit_commit = false;
+ return CMD_SUCCESS;
+}
+
+DEFPY(mgmt_delete_config_data, mgmt_delete_config_data_cmd,
+ "mgmt delete-config WORD$path",
+ MGMTD_STR
+ "Delete configuration data\n"
+ "XPath expression specifying the YANG data path\n")
+{
+
+ strlcpy(vty->cfg_changes[0].xpath, path,
+ sizeof(vty->cfg_changes[0].xpath));
+ vty->cfg_changes[0].value = NULL;
+ vty->cfg_changes[0].operation = NB_OP_DESTROY;
+ vty->num_cfg_changes = 1;
+
+ vty->no_implicit_commit = true;
+ vty_mgmt_send_config_data(vty);
+ vty->no_implicit_commit = false;
+ return CMD_SUCCESS;
+}
+
+DEFPY(show_mgmt_get_config, show_mgmt_get_config_cmd,
+ "show mgmt get-config [candidate|operational|running]$dsname WORD$path",
+ SHOW_STR MGMTD_STR
+ "Get configuration data from a specific configuration datastore\n"
+ "Candidate datastore (default)\n"
+ "Operational datastore\n"
+ "Running datastore\n"
+ "XPath expression specifying the YANG data path\n")
+{
+ const char *xpath_list[VTY_MAXCFGCHANGES] = {0};
+ Mgmtd__DatastoreId datastore = MGMTD_DS_CANDIDATE;
+
+ if (dsname)
+ datastore = mgmt_ds_name2id(dsname);
+
+ xpath_list[0] = path;
+ vty_mgmt_send_get_config(vty, datastore, xpath_list, 1);
+ return CMD_SUCCESS;
+}
+
+DEFPY(show_mgmt_get_data, show_mgmt_get_data_cmd,
+ "show mgmt get-data [candidate|operational|running]$dsname WORD$path",
+ SHOW_STR MGMTD_STR
+ "Get data from a specific datastore\n"
+ "Candidate datastore\n"
+ "Operational datastore (default)\n"
+ "Running datastore\n"
+ "XPath expression specifying the YANG data path\n")
+{
+ const char *xpath_list[VTY_MAXCFGCHANGES] = {0};
+ Mgmtd__DatastoreId datastore = MGMTD_DS_OPERATIONAL;
+
+ if (dsname)
+ datastore = mgmt_ds_name2id(dsname);
+
+ xpath_list[0] = path;
+ vty_mgmt_send_get_data(vty, datastore, xpath_list, 1);
+ return CMD_SUCCESS;
+}
+
DEFPY(show_mgmt_dump_data,
show_mgmt_dump_data_cmd,
- "show mgmt datastore-contents WORD$dsname [xpath WORD$path] [file WORD$filepath] <json|xml>$fmt",
+ "show mgmt datastore-contents [candidate|operational|running]$dsname [xpath WORD$path] [file WORD$filepath] <json|xml>$fmt",
SHOW_STR
MGMTD_STR
"Get Datastore contents from a specific datastore\n"
- "<candidate | running | operational>\n"
+ "Candidate datastore (default)\n"
+ "Operational datastore\n"
+ "Running datastore\n"
"XPath expression specifying the YANG data path\n"
"XPath string\n"
"Dump the contents to a file\n"
"Full path of the file\n"
- "json|xml\n")
+ "json output\n"
+ "xml output\n")
{
- enum mgmt_datastore_id datastore = MGMTD_DS_CANDIDATE;
struct mgmt_ds_ctx *ds_ctx;
+ Mgmtd__DatastoreId datastore = MGMTD_DS_CANDIDATE;
LYD_FORMAT format = fmt[0] == 'j' ? LYD_JSON : LYD_XML;
FILE *f = NULL;
- datastore = mgmt_ds_name2id(dsname);
-
- if (datastore == MGMTD_DS_NONE) {
- vty_out(vty,
- "DS Name %s does not matches any existing datastore\n",
- dsname);
- return CMD_SUCCESS;
- }
+ if (datastore)
+ datastore = mgmt_ds_name2id(dsname);
ds_ctx = mgmt_ds_get_ctx_by_id(mm, datastore);
if (!ds_ctx) {
DEFPY(mgmt_load_config,
mgmt_load_config_cmd,
- "mgmt load-config file WORD$filepath <merge|replace>",
+ "mgmt load-config WORD$filepath <merge|replace>$type",
MGMTD_STR
"Load configuration onto Candidate Datastore\n"
- "Read the configuration from a file\n"
"Full path of the file\n"
"Merge configuration with contents of Candidate Datastore\n"
"Replace the existing contents of Candidate datastore\n")
{
- bool merge = false;
- int idx_merge = 4;
- int ret;
+ bool merge = type[0] == 'm' ? true : false;
struct mgmt_ds_ctx *ds_ctx;
+ int ret;
if (access(filepath, F_OK) == -1) {
vty_out(vty, "ERROR: File %s : %s\n", filepath,
return CMD_ERR_NO_MATCH;
}
- if (strncmp(argv[idx_merge]->arg, "merge", sizeof("merge")) == 0)
- merge = true;
- else if (strncmp(argv[idx_merge]->arg, "replace", sizeof("replace"))
- == 0)
- merge = false;
- else {
- vty_out(vty, "Chosen option: %s not valid\n",
- argv[idx_merge]->arg);
- return CMD_SUCCESS;
- }
-
ret = mgmt_ds_load_config_from_file(ds_ctx, filepath, merge);
if (ret != 0)
vty_out(vty, "Error with parsing the file with error code %d\n",
DEFPY(mgmt_save_config,
mgmt_save_config_cmd,
- "mgmt save-config datastore WORD$dsname file WORD$filepath",
+ "mgmt save-config <candidate|running>$dsname WORD$filepath",
MGMTD_STR
"Save configuration from datastore\n"
- "Datastore keyword\n"
- "<candidate|running>\n"
- "Write the configuration to a file\n"
+ "Candidate datastore\n"
+ "Running datastore\n"
"Full path of the file\n")
{
+ Mgmtd__DatastoreId datastore = mgmt_ds_name2id(dsname);
struct mgmt_ds_ctx *ds_ctx;
- enum mgmt_datastore_id datastore;
FILE *f;
- datastore = mgmt_ds_name2id(dsname);
-
- if (datastore == MGMTD_DS_NONE) {
- vty_out(vty,
- "DS Name %s does not matches any existing datastore\n",
- dsname);
- return CMD_SUCCESS;
- }
-
- if (datastore != MGMTD_DS_CANDIDATE && datastore != MGMTD_DS_RUNNING) {
- vty_out(vty, "DS Name %s is not a configuration datastore\n",
- dsname);
- return CMD_SUCCESS;
- }
-
ds_ctx = mgmt_ds_get_ctx_by_id(mm, datastore);
if (!ds_ctx) {
vty_out(vty, "ERROR: Could not access the '%s' datastore!\n",
{
install_node(&debug_node);
+ install_element(VIEW_NODE, &show_mgmt_fe_adapter_cmd);
install_element(VIEW_NODE, &show_mgmt_ds_cmd);
+ install_element(VIEW_NODE, &show_mgmt_get_config_cmd);
+ install_element(VIEW_NODE, &show_mgmt_get_data_cmd);
install_element(VIEW_NODE, &show_mgmt_dump_data_cmd);
+ install_element(CONFIG_NODE, &mgmt_commit_cmd);
+ install_element(CONFIG_NODE, &mgmt_set_config_data_cmd);
+ install_element(CONFIG_NODE, &mgmt_delete_config_data_cmd);
install_element(CONFIG_NODE, &mgmt_load_config_cmd);
install_element(CONFIG_NODE, &mgmt_save_config_cmd);
mgmtd_libmgmtd_a_SOURCES = \
mgmtd/mgmt.c \
mgmtd/mgmt_ds.c \
+ mgmtd/mgmt_fe_server.c \
+ mgmtd/mgmt_fe_adapter.c \
mgmtd/mgmt_memory.c \
mgmtd/mgmt_vty.c \
# end
noinst_HEADERS += \
mgmtd/mgmt.h \
mgmtd/mgmt_ds.h \
+ mgmtd/mgmt_fe_server.h \
+ mgmtd/mgmt_fe_adapter.h \
mgmtd/mgmt_memory.h \
# end