- FOR_ALL_INTERFACES
- FOR_ALL_INTERFACES_ADDRESSES
- JSON_FOREACH
+ - FOREACH_BCKND_TRXN_BATCH_IN_LIST
+ - FOREACH_BCKND_APPLY_BATCH_IN_LIST
+ - FOREACH_BCKND_TRXN_IN_LIST
+ - FOREACH_SESSN_IN_LIST
+ - FOREACH_MGMTD_BCKND_CLIENT_ID
# libyang
- LY_FOR_KEYS
- LY_LIST_FOR
--- /dev/null
+/*
+ * MGMTD Backend Client Library api interfaces
+ * Copyright (C) 2021 Vmware, Inc.
+ * Pushpasis Sarkar <spushpasis@vmware.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; see the file COPYING; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <zebra.h>
+#include "libfrr.h"
+#include "mgmtd/mgmt.h"
+#include "mgmt_bcknd_client.h"
+#include "mgmt_pb.h"
+#include "network.h"
+#include "stream.h"
+#include "sockopt.h"
+
+#ifdef REDIRECT_DEBUG_TO_STDERR
+#define MGMTD_BCKND_CLNT_DBG(fmt, ...) \
+ fprintf(stderr, "%s: " fmt "\n", __func__, ##__VA_ARGS__)
+#define MGMTD_BCKND_CLNT_ERR(fmt, ...) \
+ fprintf(stderr, "%s: ERROR, " fmt "\n", __func__, ##__VA_ARGS__)
+#else /* REDIRECT_DEBUG_TO_STDERR */
+#define MGMTD_BCKND_CLNT_DBG(fmt, ...) \
+ do { \
+ if (mgmt_debug_bcknd_clnt) \
+ zlog_err("%s: " fmt, __func__, ##__VA_ARGS__); \
+ } while (0)
+#define MGMTD_BCKND_CLNT_ERR(fmt, ...) \
+ zlog_err("%s: ERROR: " fmt, __func__, ##__VA_ARGS__)
+#endif /* REDIRECT_DEBUG_TO_STDERR */
+
+DEFINE_MTYPE_STATIC(LIB, MGMTD_BCKND_BATCH,
+ "MGMTD backend transaction batch data");
+DEFINE_MTYPE_STATIC(LIB, MGMTD_BCKND_TRXN, "MGMTD backend transaction data");
+
+enum mgmt_bcknd_trxn_event {
+ MGMTD_BCKND_TRXN_PROC_SETCFG = 1,
+ MGMTD_BCKND_TRXN_PROC_GETCFG,
+ MGMTD_BCKND_TRXN_PROC_GETDATA
+};
+
+struct mgmt_bcknd_set_cfg_req {
+ struct nb_cfg_change cfg_changes[MGMTD_MAX_CFG_CHANGES_IN_BATCH];
+ uint16_t num_cfg_changes;
+};
+
+struct mgmt_bcknd_get_data_req {
+ char *xpaths[MGMTD_MAX_NUM_DATA_REQ_IN_BATCH];
+ uint16_t num_xpaths;
+};
+
+struct mgmt_bcknd_trxn_req {
+ enum mgmt_bcknd_trxn_event event;
+ union {
+ struct mgmt_bcknd_set_cfg_req set_cfg;
+ struct mgmt_bcknd_get_data_req get_data;
+ } req;
+};
+
+PREDECL_LIST(mgmt_bcknd_batch_list);
+struct mgmt_bcknd_batch_ctxt {
+ /* Batch-Id as assigned by MGMTD */
+ uint64_t batch_id;
+
+ struct mgmt_bcknd_trxn_req trxn_req;
+
+ uint32_t flags;
+
+ struct mgmt_bcknd_batch_list_item list_linkage;
+};
+#define MGMTD_BCKND_BATCH_FLAGS_CFG_PREPARED (1U << 0)
+#define MGMTD_BCKND_TRXN_FLAGS_CFG_APPLIED (1U << 1)
+DECLARE_LIST(mgmt_bcknd_batch_list, struct mgmt_bcknd_batch_ctxt, list_linkage);
+
+struct mgmt_bcknd_client_ctxt;
+
+PREDECL_LIST(mgmt_bcknd_trxn_list);
+struct mgmt_bcknd_trxn_ctxt {
+ /* Trxn-Id as assigned by MGMTD */
+ uint64_t trxn_id;
+ uint32_t flags;
+
+ struct mgmt_bcknd_client_trxn_ctxt client_data;
+ struct mgmt_bcknd_client_ctxt *clnt_ctxt;
+
+ /* List of batches belonging to this transaction */
+ struct mgmt_bcknd_batch_list_head cfg_batches;
+ struct mgmt_bcknd_batch_list_head apply_cfgs;
+
+ struct mgmt_bcknd_trxn_list_item list_linkage;
+
+ struct nb_transaction *nb_trxn;
+ uint32_t nb_trxn_id;
+};
+#define MGMTD_BCKND_TRXN_FLAGS_CFGPREP_FAILED (1U << 1)
+
+DECLARE_LIST(mgmt_bcknd_trxn_list, struct mgmt_bcknd_trxn_ctxt, list_linkage);
+
+#define FOREACH_BCKND_TRXN_BATCH_IN_LIST(trxn, batch) \
+ frr_each_safe(mgmt_bcknd_batch_list, &(trxn)->cfg_batches, (batch))
+
+#define FOREACH_BCKND_APPLY_BATCH_IN_LIST(trxn, batch) \
+ frr_each_safe(mgmt_bcknd_batch_list, &(trxn)->apply_cfgs, (batch))
+
+struct mgmt_bcknd_client_ctxt {
+ int conn_fd;
+ struct thread_master *tm;
+ struct thread *conn_retry_tmr;
+ struct thread *conn_read_ev;
+ struct thread *conn_write_ev;
+ struct thread *conn_writes_on;
+ struct thread *msg_proc_ev;
+ uint32_t flags;
+ uint32_t num_msg_tx;
+ uint32_t num_msg_rx;
+
+ struct stream_fifo *ibuf_fifo;
+ struct stream *ibuf_work;
+ struct stream_fifo *obuf_fifo;
+ struct stream *obuf_work;
+ uint8_t msg_buf[MGMTD_BCKND_MSG_MAX_LEN];
+
+ struct nb_config *candidate_config;
+ struct nb_config *running_config;
+
+ unsigned long num_batch_find;
+ unsigned long avg_batch_find_tm;
+ unsigned long num_edit_nb_cfg;
+ unsigned long avg_edit_nb_cfg_tm;
+ unsigned long num_prep_nb_cfg;
+ unsigned long avg_prep_nb_cfg_tm;
+ unsigned long num_apply_nb_cfg;
+ unsigned long avg_apply_nb_cfg_tm;
+
+ struct mgmt_bcknd_trxn_list_head trxn_head;
+ struct mgmt_bcknd_client_params client_params;
+};
+
+#define MGMTD_BCKND_CLNT_FLAGS_WRITES_OFF (1U << 0)
+
+#define FOREACH_BCKND_TRXN_IN_LIST(clntctxt, trxn) \
+ frr_each_safe(mgmt_bcknd_trxn_list, &(clntctxt)->trxn_head, (trxn))
+
+static bool mgmt_debug_bcknd_clnt;
+
+static struct mgmt_bcknd_client_ctxt mgmt_bcknd_clntctxt = {0};
+
+const char *mgmt_bcknd_client_names[MGMTD_CLIENT_NAME_MAX_LEN] = {
+ MGMTD_BCKND_CLIENT_STATICD, /* MGMTD_BCKND_CLIENT_ID_STATICD */
+ MGMTD_BCKND_CLIENT_BGPD, /* MGMTD_BCKND_CLIENT_ID_BGPDD */
+ "Unknown/Invalid", /* MGMTD_BCKND_CLIENT_ID_MAX */
+};
+
+/* Forward declarations */
+static void
+mgmt_bcknd_client_register_event(struct mgmt_bcknd_client_ctxt *clnt_ctxt,
+ enum mgmt_bcknd_event event);
+static void
+mgmt_bcknd_client_schedule_conn_retry(struct mgmt_bcknd_client_ctxt *clnt_ctxt,
+ unsigned long intvl_secs);
+static int mgmt_bcknd_client_send_msg(struct mgmt_bcknd_client_ctxt *clnt_ctxt,
+ Mgmtd__BckndMessage *bcknd_msg);
+
+static void
+mgmt_bcknd_server_disconnect(struct mgmt_bcknd_client_ctxt *clnt_ctxt,
+ bool reconnect)
+{
+ /* Notify client through registered callback (if any) */
+ if (clnt_ctxt->client_params.client_connect_notify)
+ (void)(*clnt_ctxt->client_params
+ .client_connect_notify)(
+ (uintptr_t)clnt_ctxt,
+ clnt_ctxt->client_params.user_data, false);
+
+ if (clnt_ctxt->conn_fd) {
+ close(clnt_ctxt->conn_fd);
+ clnt_ctxt->conn_fd = 0;
+ }
+
+ if (reconnect)
+ mgmt_bcknd_client_schedule_conn_retry(
+ clnt_ctxt,
+ clnt_ctxt->client_params.conn_retry_intvl_sec);
+}
+
+static struct mgmt_bcknd_batch_ctxt *
+mgmt_bcknd_find_batch_by_id(struct mgmt_bcknd_trxn_ctxt *trxn,
+ uint64_t batch_id)
+{
+ struct mgmt_bcknd_batch_ctxt *batch = NULL;
+
+ FOREACH_BCKND_TRXN_BATCH_IN_LIST (trxn, batch) {
+ if (batch->batch_id == batch_id)
+ return batch;
+ }
+
+ return NULL;
+}
+
+static struct mgmt_bcknd_batch_ctxt *
+mgmt_bcknd_batch_create(struct mgmt_bcknd_trxn_ctxt *trxn, uint64_t batch_id)
+{
+ struct mgmt_bcknd_batch_ctxt *batch = NULL;
+
+ batch = mgmt_bcknd_find_batch_by_id(trxn, batch_id);
+ if (!batch) {
+ batch = XCALLOC(MTYPE_MGMTD_BCKND_BATCH,
+ sizeof(struct mgmt_bcknd_batch_ctxt));
+ assert(batch);
+
+ batch->batch_id = batch_id;
+ mgmt_bcknd_batch_list_add_tail(&trxn->cfg_batches, batch);
+
+ MGMTD_BCKND_CLNT_DBG("Added new batch 0x%llx to transaction",
+ (unsigned long long)batch_id);
+ }
+
+ return batch;
+}
+
+static void mgmt_bcknd_batch_delete(struct mgmt_bcknd_trxn_ctxt *trxn,
+ struct mgmt_bcknd_batch_ctxt **batch)
+{
+ uint16_t indx;
+
+ if (!batch)
+ return;
+
+ mgmt_bcknd_batch_list_del(&trxn->cfg_batches, *batch);
+ if ((*batch)->trxn_req.event == MGMTD_BCKND_TRXN_PROC_SETCFG) {
+ for (indx = 0; indx < MGMTD_MAX_CFG_CHANGES_IN_BATCH; indx++) {
+ if ((*batch)->trxn_req.req.set_cfg.cfg_changes[indx]
+ .value) {
+ free((char *)(*batch)
+ ->trxn_req.req.set_cfg
+ .cfg_changes[indx]
+ .value);
+ }
+ }
+ }
+
+ XFREE(MTYPE_MGMTD_BCKND_BATCH, *batch);
+ *batch = NULL;
+}
+
+static void mgmt_bcknd_cleanup_all_batches(struct mgmt_bcknd_trxn_ctxt *trxn)
+{
+ struct mgmt_bcknd_batch_ctxt *batch = NULL;
+
+ FOREACH_BCKND_TRXN_BATCH_IN_LIST (trxn, batch) {
+ mgmt_bcknd_batch_delete(trxn, &batch);
+ }
+
+ FOREACH_BCKND_APPLY_BATCH_IN_LIST (trxn, batch) {
+ mgmt_bcknd_batch_delete(trxn, &batch);
+ }
+}
+
+static struct mgmt_bcknd_trxn_ctxt *
+mgmt_bcknd_find_trxn_by_id(struct mgmt_bcknd_client_ctxt *clnt_ctxt,
+ uint64_t trxn_id)
+{
+ struct mgmt_bcknd_trxn_ctxt *trxn = NULL;
+
+ FOREACH_BCKND_TRXN_IN_LIST (clnt_ctxt, trxn) {
+ if (trxn->trxn_id == trxn_id)
+ return trxn;
+ }
+
+ return NULL;
+}
+
+static struct mgmt_bcknd_trxn_ctxt *
+mgmt_bcknd_trxn_create(struct mgmt_bcknd_client_ctxt *clnt_ctxt,
+ uint64_t trxn_id)
+{
+ struct mgmt_bcknd_trxn_ctxt *trxn = NULL;
+
+ trxn = mgmt_bcknd_find_trxn_by_id(clnt_ctxt, trxn_id);
+ if (!trxn) {
+ trxn = XCALLOC(MTYPE_MGMTD_BCKND_TRXN,
+ sizeof(struct mgmt_bcknd_trxn_ctxt));
+ assert(trxn);
+
+ trxn->trxn_id = trxn_id;
+ trxn->clnt_ctxt = clnt_ctxt;
+ mgmt_bcknd_batch_list_init(&trxn->cfg_batches);
+ mgmt_bcknd_batch_list_init(&trxn->apply_cfgs);
+ mgmt_bcknd_trxn_list_add_tail(&clnt_ctxt->trxn_head, trxn);
+
+ MGMTD_BCKND_CLNT_DBG("Added new transaction 0x%llx",
+ (unsigned long long)trxn_id);
+ }
+
+ return trxn;
+}
+
+static void mgmt_bcknd_trxn_delete(struct mgmt_bcknd_client_ctxt *clnt_ctxt,
+ struct mgmt_bcknd_trxn_ctxt **trxn)
+{
+ char err_msg[] = "MGMT Transaction Delete";
+
+ if (!trxn)
+ return;
+
+ /*
+ * Remove the transaction from the list of transactions
+ * so that future lookups with the same transaction id
+ * does not return this one.
+ */
+ mgmt_bcknd_trxn_list_del(&clnt_ctxt->trxn_head, *trxn);
+
+ /*
+ * Time to delete the transaction which should also
+ * take care of cleaning up all batches created via
+ * CFGDATA_CREATE_REQs. But first notify the client
+ * about the transaction delete.
+ */
+ if (clnt_ctxt->client_params.trxn_notify)
+ (void)(*clnt_ctxt->client_params
+ .trxn_notify)(
+ (uintptr_t)clnt_ctxt,
+ clnt_ctxt->client_params.user_data,
+ &(*trxn)->client_data, false);
+
+ mgmt_bcknd_cleanup_all_batches(*trxn);
+ if ((*trxn)->nb_trxn)
+ nb_candidate_commit_abort((*trxn)->nb_trxn, err_msg,
+ sizeof(err_msg));
+ XFREE(MTYPE_MGMTD_BCKND_TRXN, *trxn);
+
+ *trxn = NULL;
+}
+
+static void
+mgmt_bcknd_cleanup_all_trxns(struct mgmt_bcknd_client_ctxt *clnt_ctxt)
+{
+ struct mgmt_bcknd_trxn_ctxt *trxn = NULL;
+
+ FOREACH_BCKND_TRXN_IN_LIST (clnt_ctxt, trxn) {
+ mgmt_bcknd_trxn_delete(clnt_ctxt, &trxn);
+ }
+}
+
+static int mgmt_bcknd_send_trxn_reply(struct mgmt_bcknd_client_ctxt *clnt_ctxt,
+ uint64_t trxn_id, bool create,
+ bool success)
+{
+ Mgmtd__BckndMessage bcknd_msg;
+ Mgmtd__BckndTrxnReply trxn_reply;
+
+ mgmtd__bcknd_trxn_reply__init(&trxn_reply);
+ trxn_reply.create = create;
+ trxn_reply.trxn_id = trxn_id;
+ trxn_reply.success = success;
+
+ mgmtd__bcknd_message__init(&bcknd_msg);
+ bcknd_msg.message_case = MGMTD__BCKND_MESSAGE__MESSAGE_TRXN_REPLY;
+ bcknd_msg.trxn_reply = &trxn_reply;
+
+ MGMTD_BCKND_CLNT_DBG(
+ "Sending TRXN_REPLY message to MGMTD for trxn 0x%llx",
+ (unsigned long long)trxn_id);
+
+ return mgmt_bcknd_client_send_msg(clnt_ctxt, &bcknd_msg);
+}
+
+static int mgmt_bcknd_process_trxn_req(struct mgmt_bcknd_client_ctxt *clnt_ctxt,
+ uint64_t trxn_id, bool create)
+{
+ struct mgmt_bcknd_trxn_ctxt *trxn;
+
+ trxn = mgmt_bcknd_find_trxn_by_id(clnt_ctxt, trxn_id);
+ if (create) {
+ if (trxn) {
+ /*
+ * Transaction with same trxn-id already exists.
+ * Should not happen under any circumstances.
+ */
+ MGMTD_BCKND_CLNT_ERR(
+ "Transaction 0x%llx already exists!!!",
+ (unsigned long long)trxn_id);
+ mgmt_bcknd_send_trxn_reply(clnt_ctxt, trxn_id, create,
+ false);
+ }
+
+ MGMTD_BCKND_CLNT_DBG("Created new transaction 0x%llx",
+ (unsigned long long)trxn_id);
+ trxn = mgmt_bcknd_trxn_create(clnt_ctxt, trxn_id);
+
+ if (clnt_ctxt->client_params.trxn_notify)
+ (void)(*clnt_ctxt->client_params
+ .trxn_notify)(
+ (uintptr_t)clnt_ctxt,
+ clnt_ctxt->client_params.user_data,
+ &trxn->client_data, true);
+ } else {
+ if (!trxn) {
+ /*
+ * Transaction with same trxn-id does not exists.
+ * Return sucess anyways.
+ */
+ MGMTD_BCKND_CLNT_DBG(
+ "Transaction to delete 0x%llx does NOT exists!!!",
+ (unsigned long long)trxn_id);
+ } else {
+ MGMTD_BCKND_CLNT_DBG("Delete transaction 0x%llx",
+ (unsigned long long)trxn_id);
+ mgmt_bcknd_trxn_delete(clnt_ctxt, &trxn);
+ }
+ }
+
+ mgmt_bcknd_send_trxn_reply(clnt_ctxt, trxn_id, create, true);
+
+ return 0;
+}
+
+static int
+mgmt_bcknd_send_cfgdata_create_reply(struct mgmt_bcknd_client_ctxt *clnt_ctxt,
+ uint64_t trxn_id, uint64_t batch_id,
+ bool success, const char *error_if_any)
+{
+ Mgmtd__BckndMessage bcknd_msg;
+ Mgmtd__BckndCfgDataCreateReply cfgdata_reply;
+
+ mgmtd__bcknd_cfg_data_create_reply__init(&cfgdata_reply);
+ cfgdata_reply.trxn_id = (uint64_t)trxn_id;
+ cfgdata_reply.batch_id = (uint64_t)batch_id;
+ cfgdata_reply.success = success;
+ if (error_if_any)
+ cfgdata_reply.error_if_any = (char *)error_if_any;
+
+ mgmtd__bcknd_message__init(&bcknd_msg);
+ bcknd_msg.message_case = MGMTD__BCKND_MESSAGE__MESSAGE_CFG_DATA_REPLY;
+ bcknd_msg.cfg_data_reply = &cfgdata_reply;
+
+ MGMTD_BCKND_CLNT_DBG(
+ "Sending CFGDATA_CREATE_REPLY message to MGMTD for trxn 0x%llx batch 0x%llx",
+ (unsigned long long)trxn_id, (unsigned long long)batch_id);
+
+ return mgmt_bcknd_client_send_msg(clnt_ctxt, &bcknd_msg);
+}
+
+static int mgmt_bcknd_trxn_cfg_abort(struct mgmt_bcknd_trxn_ctxt *trxn)
+{
+ char errmsg[BUFSIZ] = {0};
+
+ assert(trxn && trxn->clnt_ctxt);
+ if (!trxn->nb_trxn
+ || !CHECK_FLAG(trxn->flags, MGMTD_BCKND_TRXN_FLAGS_CFGPREP_FAILED))
+ return -1;
+
+ MGMTD_BCKND_CLNT_ERR("Aborting configurations for Trxn 0x%llx",
+ (unsigned long long)trxn->trxn_id);
+ nb_candidate_commit_abort(trxn->nb_trxn, errmsg, sizeof(errmsg));
+ trxn->nb_trxn = 0;
+
+ return 0;
+}
+
+static int mgmt_bcknd_trxn_cfg_prepare(struct mgmt_bcknd_trxn_ctxt *trxn)
+{
+ struct mgmt_bcknd_client_ctxt *clnt_ctxt;
+ struct mgmt_bcknd_trxn_req *trxn_req = NULL;
+ struct nb_context nb_ctxt = {0};
+ struct timeval edit_nb_cfg_start;
+ struct timeval edit_nb_cfg_end;
+ unsigned long edit_nb_cfg_tm;
+ struct timeval prep_nb_cfg_start;
+ struct timeval prep_nb_cfg_end;
+ unsigned long prep_nb_cfg_tm;
+ struct mgmt_bcknd_batch_ctxt *batch;
+ bool error;
+ char err_buf[BUFSIZ];
+ size_t num_processed;
+ bool debug_bcknd = mgmt_debug_bcknd_clnt;
+
+ assert(trxn && trxn->clnt_ctxt);
+ clnt_ctxt = trxn->clnt_ctxt;
+
+ num_processed = 0;
+ FOREACH_BCKND_TRXN_BATCH_IN_LIST (trxn, batch) {
+ trxn_req = &batch->trxn_req;
+ error = false;
+ nb_ctxt.client = NB_CLIENT_CLI;
+ nb_ctxt.user = (void *)clnt_ctxt->client_params.user_data;
+
+ if (!trxn->nb_trxn) {
+ /*
+ * This happens when the current backend client is only
+ * interested in consuming the config items but is not
+ * interested in validating it.
+ */
+ error = false;
+ if (debug_bcknd)
+ gettimeofday(&edit_nb_cfg_start, NULL);
+ nb_candidate_edit_config_changes(
+ clnt_ctxt->candidate_config,
+ trxn_req->req.set_cfg.cfg_changes,
+ (size_t)trxn_req->req.set_cfg.num_cfg_changes,
+ NULL, NULL, 0, err_buf, sizeof(err_buf),
+ &error);
+ if (error) {
+ err_buf[sizeof(err_buf) - 1] = 0;
+ MGMTD_BCKND_CLNT_ERR(
+ "Failed to update configs for Trxn %llx Batch %llx to Candidate! Err: '%s'",
+ (unsigned long long)trxn->trxn_id,
+ (unsigned long long)batch->batch_id,
+ err_buf);
+ return -1;
+ }
+ if (debug_bcknd) {
+ gettimeofday(&edit_nb_cfg_end, NULL);
+ edit_nb_cfg_tm = timeval_elapsed(
+ edit_nb_cfg_end, edit_nb_cfg_start);
+ clnt_ctxt->avg_edit_nb_cfg_tm =
+ ((clnt_ctxt->avg_edit_nb_cfg_tm
+ * clnt_ctxt->num_edit_nb_cfg)
+ + edit_nb_cfg_tm)
+ / (clnt_ctxt->num_edit_nb_cfg + 1);
+ }
+ clnt_ctxt->num_edit_nb_cfg++;
+ }
+
+ num_processed++;
+ }
+
+ if (!num_processed)
+ return 0;
+
+ /*
+ * Now prepare all the batches we have applied in one go.
+ */
+ nb_ctxt.client = NB_CLIENT_CLI;
+ nb_ctxt.user = (void *)clnt_ctxt->client_params.user_data;
+ if (debug_bcknd)
+ gettimeofday(&prep_nb_cfg_start, NULL);
+ if (nb_candidate_commit_prepare(
+ &nb_ctxt, clnt_ctxt->candidate_config, "MGMTD Backend Trxn",
+ &trxn->nb_trxn, true, true, err_buf, sizeof(err_buf) - 1)
+ != NB_OK) {
+ err_buf[sizeof(err_buf) - 1] = 0;
+ MGMTD_BCKND_CLNT_ERR(
+ "Failed to prepare configs for Trxn %llx, %u Batches! Err: '%s'",
+ (unsigned long long)trxn->trxn_id,
+ (uint32_t)num_processed, err_buf);
+ error = true;
+ SET_FLAG(trxn->flags, MGMTD_BCKND_TRXN_FLAGS_CFGPREP_FAILED);
+ }
+
+ MGMTD_BCKND_CLNT_DBG(
+ "Prepared configs for Trxn %llx, %u Batches! successfully!",
+ (unsigned long long)trxn->trxn_id, (uint32_t)num_processed);
+ if (debug_bcknd) {
+ gettimeofday(&prep_nb_cfg_end, NULL);
+ prep_nb_cfg_tm =
+ timeval_elapsed(prep_nb_cfg_end, prep_nb_cfg_start);
+ clnt_ctxt->avg_prep_nb_cfg_tm =
+ ((clnt_ctxt->avg_prep_nb_cfg_tm
+ * clnt_ctxt->num_prep_nb_cfg)
+ + prep_nb_cfg_tm)
+ / (clnt_ctxt->num_prep_nb_cfg + 1);
+ }
+ clnt_ctxt->num_prep_nb_cfg++;
+
+ FOREACH_BCKND_TRXN_BATCH_IN_LIST (trxn, batch) {
+ mgmt_bcknd_send_cfgdata_create_reply(
+ clnt_ctxt, trxn->trxn_id, batch->batch_id,
+ error ? false : true, error ? err_buf : NULL);
+ if (!error) {
+ SET_FLAG(batch->flags,
+ MGMTD_BCKND_BATCH_FLAGS_CFG_PREPARED);
+#ifdef MGMTD_LOCAL_VALIDATIONS_ENABLED
+ mgmt_bcknd_batch_list_del(&trxn->cfg_batches, batch);
+ mgmt_bcknd_batch_list_add_tail(&trxn->apply_cfgs,
+ batch);
+#endif /* MGMTD_LOCAL_VALIDATIONS_ENABLED */
+ }
+ }
+
+ if (debug_bcknd)
+ MGMTD_BCKND_CLNT_DBG(
+ "Avg-nb-edit-duration %lu uSec, nb-prep-duration %lu (avg: %lu) uSec, batch size %u",
+ clnt_ctxt->avg_edit_nb_cfg_tm, prep_nb_cfg_tm,
+ clnt_ctxt->avg_prep_nb_cfg_tm, (uint32_t)num_processed);
+
+ if (error)
+ mgmt_bcknd_trxn_cfg_abort(trxn);
+
+ return 0;
+}
+
+/*
+ * Process all CFG_DATA_REQs received so far and prepare them all in one go.
+ */
+static int
+mgmt_bcknd_update_setcfg_in_batch(struct mgmt_bcknd_client_ctxt *clnt_ctxt,
+ struct mgmt_bcknd_trxn_ctxt *trxn,
+ uint64_t batch_id,
+ Mgmtd__YangCfgDataReq * cfg_req[],
+ int num_req)
+{
+ struct mgmt_bcknd_batch_ctxt *batch = NULL;
+ struct mgmt_bcknd_trxn_req *trxn_req = NULL;
+ int index;
+ struct nb_cfg_change *cfg_chg;
+
+ batch = mgmt_bcknd_batch_create(trxn, batch_id);
+ if (!batch) {
+ MGMTD_BCKND_CLNT_ERR("Batch create failed!");
+ return -1;
+ }
+
+ trxn_req = &batch->trxn_req;
+ trxn_req->event = MGMTD_BCKND_TRXN_PROC_SETCFG;
+ MGMTD_BCKND_CLNT_DBG(
+ "Created Set-Config request for batch 0x%llx, trxn id 0x%llx, cfg-items:%d",
+ (unsigned long long)batch_id, (unsigned long long)trxn->trxn_id,
+ num_req);
+
+ trxn_req->req.set_cfg.num_cfg_changes = num_req;
+ for (index = 0; index < num_req; index++) {
+ cfg_chg = &trxn_req->req.set_cfg.cfg_changes[index];
+
+ if (cfg_req[index]->req_type
+ == MGMTD__CFG_DATA_REQ_TYPE__DELETE_DATA)
+ cfg_chg->operation = NB_OP_DESTROY;
+ else
+ cfg_chg->operation = NB_OP_CREATE;
+
+ strlcpy(cfg_chg->xpath, cfg_req[index]->data->xpath,
+ sizeof(cfg_chg->xpath));
+ cfg_chg->value = (cfg_req[index]->data->value
+ && cfg_req[index]
+ ->data->value
+ ->encoded_str_val
+ ? strdup(cfg_req[index]
+ ->data->value
+ ->encoded_str_val)
+ : NULL);
+ if (cfg_chg->value
+ && !strncmp(cfg_chg->value, MGMTD_BCKND_CONTAINER_NODE_VAL,
+ strlen(MGMTD_BCKND_CONTAINER_NODE_VAL))) {
+ free((char *)cfg_chg->value);
+ cfg_chg->value = NULL;
+ }
+ }
+
+ return 0;
+}
+
+static int
+mgmt_bcknd_process_cfgdata_req(struct mgmt_bcknd_client_ctxt *clnt_ctxt,
+ uint64_t trxn_id, uint64_t batch_id,
+ Mgmtd__YangCfgDataReq * cfg_req[], int num_req,
+ bool end_of_data)
+{
+ struct mgmt_bcknd_trxn_ctxt *trxn;
+
+ trxn = mgmt_bcknd_find_trxn_by_id(clnt_ctxt, trxn_id);
+ if (!trxn) {
+ MGMTD_BCKND_CLNT_ERR(
+ "Invalid trxn-id 0x%llx provided from MGMTD server",
+ (unsigned long long)trxn_id);
+ mgmt_bcknd_send_cfgdata_create_reply(
+ clnt_ctxt, trxn_id, batch_id, false,
+ "Transaction context not created yet");
+ } else {
+ mgmt_bcknd_update_setcfg_in_batch(clnt_ctxt, trxn, batch_id,
+ cfg_req, num_req);
+ }
+
+ if (trxn && end_of_data) {
+ MGMTD_BCKND_CLNT_DBG("Triggering CFG_PREPARE_REQ processing");
+ mgmt_bcknd_trxn_cfg_prepare(trxn);
+ }
+
+ return 0;
+}
+
+static int
+mgmt_bcknd_send_validate_reply(struct mgmt_bcknd_client_ctxt *clnt_ctxt,
+ uint64_t trxn_id, uint64_t batch_ids[],
+ size_t num_batch_ids, bool success,
+ const char *error_if_any)
+{
+ Mgmtd__BckndMessage bcknd_msg;
+ Mgmtd__BckndCfgDataValidateReply validate_reply;
+
+ mgmtd__bcknd_cfg_data_validate_reply__init(&validate_reply);
+ validate_reply.success = success;
+ validate_reply.trxn_id = trxn_id;
+ validate_reply.batch_ids = (uint64_t *)batch_ids;
+ validate_reply.n_batch_ids = num_batch_ids;
+
+ if (error_if_any)
+ validate_reply.error_if_any = (char *)error_if_any;
+
+ mgmtd__bcknd_message__init(&bcknd_msg);
+ bcknd_msg.message_case =
+ MGMTD__BCKND_MESSAGE__MESSAGE_CFG_VALIDATE_REPLY;
+ bcknd_msg.cfg_validate_reply = &validate_reply;
+
+ MGMTD_BCKND_CLNT_DBG(
+ "Sending CFG_VALIDATE_REPLY message to MGMTD for trxn 0x%llx %d batches [0x%llx - 0x%llx]",
+ (unsigned long long)trxn_id, (int)num_batch_ids,
+ (unsigned long long)batch_ids[0],
+ (unsigned long long)batch_ids[num_batch_ids - 1]);
+
+ return mgmt_bcknd_client_send_msg(clnt_ctxt, &bcknd_msg);
+}
+
+static int
+mgmt_bcknd_process_cfg_validate(struct mgmt_bcknd_client_ctxt *clnt_ctxt,
+ uint64_t trxn_id, uint64_t batch_ids[],
+ size_t num_batch_ids)
+{
+ int ret = 0;
+ size_t indx;
+ struct mgmt_bcknd_trxn_ctxt *trxn;
+ struct mgmt_bcknd_trxn_req *trxn_req = NULL;
+ struct mgmt_bcknd_batch_ctxt *batch;
+ bool error;
+ char err_buf[1024];
+ struct nb_context nb_ctxt = {0};
+
+ trxn = mgmt_bcknd_find_trxn_by_id(clnt_ctxt, trxn_id);
+ if (!trxn) {
+ mgmt_bcknd_send_validate_reply(clnt_ctxt, trxn_id, batch_ids,
+ num_batch_ids, false,
+ "Transaction not created yet!");
+ return -1;
+ }
+
+ for (indx = 0; indx < num_batch_ids; indx++) {
+ batch = mgmt_bcknd_find_batch_by_id(trxn, batch_ids[indx]);
+ if (!batch) {
+ mgmt_bcknd_send_validate_reply(
+ clnt_ctxt, trxn_id, batch_ids, num_batch_ids,
+ false, "Batch context not created!");
+ return -1;
+ }
+
+ if (batch->trxn_req.event != MGMTD_BCKND_TRXN_PROC_SETCFG) {
+ snprintf(err_buf, sizeof(err_buf),
+ "Batch-id 0x%llx not a Config Data Batch!",
+ (unsigned long long)batch_ids[indx]);
+ mgmt_bcknd_send_validate_reply(clnt_ctxt, trxn_id,
+ batch_ids, num_batch_ids,
+ false, err_buf);
+ return -1;
+ }
+
+ trxn_req = &batch->trxn_req;
+ error = false;
+ nb_candidate_edit_config_changes(
+ clnt_ctxt->candidate_config,
+ trxn_req->req.set_cfg.cfg_changes,
+ (size_t)trxn_req->req.set_cfg.num_cfg_changes, NULL,
+ NULL, 0, err_buf, sizeof(err_buf), &error);
+ if (error) {
+ err_buf[sizeof(err_buf) - 1] = 0;
+ MGMTD_BCKND_CLNT_ERR(
+ "Failed to apply configs for Trxn %llx Batch %llx to Candidate! Err: '%s'",
+ (unsigned long long)trxn_id,
+ (unsigned long long)batch_ids[indx], err_buf);
+ mgmt_bcknd_send_validate_reply(
+ clnt_ctxt, trxn_id, batch_ids, num_batch_ids,
+ false,
+ "Failed to update Candidate Db on backend!");
+ return -1;
+ }
+
+ /* Move the batch to APPLY list */
+ mgmt_bcknd_batch_list_del(&trxn->cfg_batches, batch);
+ mgmt_bcknd_batch_list_add_tail(&trxn->apply_cfgs,
+ batch);
+ }
+
+ nb_ctxt.client = NB_CLIENT_CLI;
+ nb_ctxt.user = (void *)clnt_ctxt->client_params.user_data;
+ if (nb_candidate_commit_prepare(&nb_ctxt, clnt_ctxt->candidate_config,
+ "MGMTD Trxn", &trxn->nb_trxn, false,
+ false, err_buf, sizeof(err_buf) - 1)
+ != NB_OK) {
+ err_buf[sizeof(err_buf) - 1] = 0;
+ MGMTD_BCKND_CLNT_ERR(
+ "Failed to validate configs for Trxn %llx Batch %llx! Err: '%s'",
+ (unsigned long long)trxn_id,
+ (unsigned long long)batch_ids[indx], err_buf);
+ mgmt_bcknd_send_validate_reply(
+ clnt_ctxt, trxn_id, batch_ids, num_batch_ids, false,
+ "Failed to validate Config on backend!");
+ return -1;
+ }
+
+ if (ret == 0) {
+ mgmt_bcknd_send_validate_reply(clnt_ctxt, trxn_id, batch_ids,
+ num_batch_ids, true, NULL);
+ }
+
+ return ret;
+}
+
+static int mgmt_bcknd_send_apply_reply(struct mgmt_bcknd_client_ctxt *clnt_ctxt,
+ uint64_t trxn_id, uint64_t batch_ids[],
+ size_t num_batch_ids, bool success,
+ const char *error_if_any)
+{
+ Mgmtd__BckndMessage bcknd_msg;
+ Mgmtd__BckndCfgDataApplyReply apply_reply;
+
+ mgmtd__bcknd_cfg_data_apply_reply__init(&apply_reply);
+ apply_reply.success = success;
+ apply_reply.trxn_id = trxn_id;
+ apply_reply.batch_ids = (uint64_t *)batch_ids;
+ apply_reply.n_batch_ids = num_batch_ids;
+
+ if (error_if_any)
+ apply_reply.error_if_any = (char *)error_if_any;
+
+ mgmtd__bcknd_message__init(&bcknd_msg);
+ bcknd_msg.message_case = MGMTD__BCKND_MESSAGE__MESSAGE_CFG_APPLY_REPLY;
+ bcknd_msg.cfg_apply_reply = &apply_reply;
+
+ MGMTD_BCKND_CLNT_DBG(
+ "Sending CFG_APPLY_REPLY message to MGMTD for trxn 0x%llx, %d batches [0x%llx - 0x%llx]",
+ (unsigned long long)trxn_id, (int)num_batch_ids,
+ success && num_batch_ids ?
+ (unsigned long long)batch_ids[0] : 0,
+ success && num_batch_ids ?
+ (unsigned long long)batch_ids[num_batch_ids - 1] : 0);
+
+ return mgmt_bcknd_client_send_msg(clnt_ctxt, &bcknd_msg);
+}
+
+static int mgmt_bcknd_trxn_proc_cfgapply(struct mgmt_bcknd_trxn_ctxt *trxn)
+{
+ struct mgmt_bcknd_client_ctxt *clnt_ctxt;
+ struct timeval apply_nb_cfg_start;
+ struct timeval apply_nb_cfg_end;
+ unsigned long apply_nb_cfg_tm;
+ struct mgmt_bcknd_batch_ctxt *batch;
+ char err_buf[BUFSIZ];
+ size_t num_processed;
+ static uint64_t batch_ids[MGMTD_BCKND_MAX_BATCH_IDS_IN_REQ];
+ bool debug_bcknd = mgmt_debug_bcknd_clnt;
+
+ assert(trxn && trxn->clnt_ctxt);
+ clnt_ctxt = trxn->clnt_ctxt;
+
+ assert(trxn->nb_trxn);
+ num_processed = 0;
+
+ /*
+ * Now apply all the batches we have applied in one go.
+ */
+ if (debug_bcknd)
+ gettimeofday(&apply_nb_cfg_start, NULL);
+ (void)nb_candidate_commit_apply(trxn->nb_trxn, true, &trxn->nb_trxn_id,
+ err_buf, sizeof(err_buf) - 1);
+ if (debug_bcknd) {
+ gettimeofday(&apply_nb_cfg_end, NULL);
+ apply_nb_cfg_tm =
+ timeval_elapsed(apply_nb_cfg_end, apply_nb_cfg_start);
+ clnt_ctxt->avg_apply_nb_cfg_tm =
+ ((clnt_ctxt->avg_apply_nb_cfg_tm
+ * clnt_ctxt->num_apply_nb_cfg)
+ + apply_nb_cfg_tm)
+ / (clnt_ctxt->num_apply_nb_cfg + 1);
+ }
+ clnt_ctxt->num_apply_nb_cfg++;
+ trxn->nb_trxn = NULL;
+
+ /*
+ * Send back CFG_APPLY_REPLY for all batches applied.
+ */
+ FOREACH_BCKND_APPLY_BATCH_IN_LIST (trxn, batch) {
+ /*
+ * No need to delete the batch yet. Will be deleted during
+ * transaction cleanup on receiving TRXN_DELETE_REQ.
+ */
+ SET_FLAG(batch->flags, MGMTD_BCKND_TRXN_FLAGS_CFG_APPLIED);
+ mgmt_bcknd_batch_list_del(&trxn->apply_cfgs, batch);
+ mgmt_bcknd_batch_list_add_tail(&trxn->cfg_batches, batch);
+
+ batch_ids[num_processed] = batch->batch_id;
+ num_processed++;
+ if (num_processed == MGMTD_BCKND_MAX_BATCH_IDS_IN_REQ) {
+ mgmt_bcknd_send_apply_reply(clnt_ctxt, trxn->trxn_id,
+ batch_ids, num_processed,
+ true, NULL);
+ num_processed = 0;
+ }
+ }
+
+ mgmt_bcknd_send_apply_reply(clnt_ctxt, trxn->trxn_id, batch_ids,
+ num_processed, true, NULL);
+
+ if (debug_bcknd)
+ MGMTD_BCKND_CLNT_DBG("Nb-apply-duration %lu (avg: %lu) uSec",
+ apply_nb_cfg_tm,
+ clnt_ctxt->avg_apply_nb_cfg_tm);
+
+ return 0;
+}
+
+static int
+mgmt_bcknd_process_cfg_apply(struct mgmt_bcknd_client_ctxt *clnt_ctxt,
+ uint64_t trxn_id)
+{
+ struct mgmt_bcknd_trxn_ctxt *trxn;
+
+ trxn = mgmt_bcknd_find_trxn_by_id(clnt_ctxt, trxn_id);
+ if (!trxn) {
+ mgmt_bcknd_send_apply_reply(clnt_ctxt, trxn_id, NULL, 0, false,
+ "Transaction not created yet!");
+ return -1;
+ }
+
+ MGMTD_BCKND_CLNT_DBG("Trigger CFG_APPLY_REQ processing");
+ mgmt_bcknd_trxn_proc_cfgapply(trxn);
+
+ return 0;
+}
+
+static int
+mgmt_bcknd_client_handle_msg(struct mgmt_bcknd_client_ctxt *clnt_ctxt,
+ Mgmtd__BckndMessage *bcknd_msg)
+{
+ switch (bcknd_msg->message_case) {
+ case MGMTD__BCKND_MESSAGE__MESSAGE_SUBSCR_REPLY:
+ MGMTD_BCKND_CLNT_DBG("Subscribe Reply Msg from mgmt, status %u",
+ bcknd_msg->subscr_reply->success);
+ break;
+ case MGMTD__BCKND_MESSAGE__MESSAGE_TRXN_REQ:
+ mgmt_bcknd_process_trxn_req(clnt_ctxt,
+ bcknd_msg->trxn_req->trxn_id,
+ bcknd_msg->trxn_req->create);
+ break;
+ case MGMTD__BCKND_MESSAGE__MESSAGE_CFG_DATA_REQ:
+ mgmt_bcknd_process_cfgdata_req(
+ clnt_ctxt, bcknd_msg->cfg_data_req->trxn_id,
+ bcknd_msg->cfg_data_req->batch_id,
+ bcknd_msg->cfg_data_req->data_req,
+ bcknd_msg->cfg_data_req->n_data_req,
+ bcknd_msg->cfg_data_req->end_of_data);
+ break;
+ case MGMTD__BCKND_MESSAGE__MESSAGE_CFG_VALIDATE_REQ:
+ mgmt_bcknd_process_cfg_validate(
+ clnt_ctxt,
+ (uint64_t)bcknd_msg->cfg_validate_req->trxn_id,
+ (uint64_t *)bcknd_msg->cfg_validate_req->batch_ids,
+ bcknd_msg->cfg_validate_req->n_batch_ids);
+ break;
+ case MGMTD__BCKND_MESSAGE__MESSAGE_CFG_APPLY_REQ:
+ mgmt_bcknd_process_cfg_apply(
+ clnt_ctxt, (uint64_t)bcknd_msg->cfg_apply_req->trxn_id);
+ break;
+ case MGMTD__BCKND_MESSAGE__MESSAGE_GET_REQ:
+ case MGMTD__BCKND_MESSAGE__MESSAGE_SUBSCR_REQ:
+ case MGMTD__BCKND_MESSAGE__MESSAGE_CFG_CMD_REQ:
+ case MGMTD__BCKND_MESSAGE__MESSAGE_SHOW_CMD_REQ:
+ /*
+ * TODO: Add handling code in future.
+ */
+ break;
+ /*
+ * NOTE: The following messages are always sent from Backend
+ * clients to MGMTd only and/or need not be handled here.
+ */
+ case MGMTD__BCKND_MESSAGE__MESSAGE_GET_REPLY:
+ case MGMTD__BCKND_MESSAGE__MESSAGE_TRXN_REPLY:
+ case MGMTD__BCKND_MESSAGE__MESSAGE_CFG_DATA_REPLY:
+ case MGMTD__BCKND_MESSAGE__MESSAGE_CFG_VALIDATE_REPLY:
+ case MGMTD__BCKND_MESSAGE__MESSAGE_CFG_APPLY_REPLY:
+ case MGMTD__BCKND_MESSAGE__MESSAGE_CFG_CMD_REPLY:
+ case MGMTD__BCKND_MESSAGE__MESSAGE_SHOW_CMD_REPLY:
+ case MGMTD__BCKND_MESSAGE__MESSAGE_NOTIFY_DATA:
+ case MGMTD__BCKND_MESSAGE__MESSAGE__NOT_SET:
+ default:
+ /*
+ * A 'default' case is being added contrary to the
+ * FRR code guidelines to take care of build
+ * failures on certain build systems (courtesy of
+ * the proto-c package).
+ */
+ break;
+ }
+
+ return 0;
+}
+
+static int
+mgmt_bcknd_client_process_msg(struct mgmt_bcknd_client_ctxt *clnt_ctxt,
+ uint8_t *msg_buf, int bytes_read)
+{
+ Mgmtd__BckndMessage *bcknd_msg;
+ struct mgmt_bcknd_msg *msg;
+ uint16_t bytes_left;
+ uint16_t processed = 0;
+
+ MGMTD_BCKND_CLNT_DBG(
+ "Got message of %d bytes from MGMTD Backend Server",
+ bytes_read);
+
+ bytes_left = bytes_read;
+ for (; bytes_left > MGMTD_BCKND_MSG_HDR_LEN;
+ bytes_left -= msg->hdr.len, msg_buf += msg->hdr.len) {
+ msg = (struct mgmt_bcknd_msg *)msg_buf;
+ if (msg->hdr.marker != MGMTD_BCKND_MSG_MARKER) {
+ MGMTD_BCKND_CLNT_DBG(
+ "Marker not found in message from MGMTD '%s'",
+ clnt_ctxt->client_params.name);
+ break;
+ }
+
+ if (bytes_left < msg->hdr.len) {
+ MGMTD_BCKND_CLNT_DBG(
+ "Incomplete message of %d bytes (epxected: %u) from MGMTD '%s'",
+ bytes_left, msg->hdr.len,
+ clnt_ctxt->client_params.name);
+ break;
+ }
+
+ bcknd_msg = mgmtd__bcknd_message__unpack(
+ NULL, (size_t)(msg->hdr.len - MGMTD_BCKND_MSG_HDR_LEN),
+ msg->payload);
+ if (!bcknd_msg) {
+ MGMTD_BCKND_CLNT_DBG(
+ "Failed to decode %d bytes from MGMTD '%s'",
+ msg->hdr.len, clnt_ctxt->client_params.name);
+ continue;
+ }
+
+ (void)mgmt_bcknd_client_handle_msg(clnt_ctxt, bcknd_msg);
+ mgmtd__bcknd_message__free_unpacked(bcknd_msg, NULL);
+ processed++;
+ clnt_ctxt->num_msg_rx++;
+ }
+
+ return processed;
+}
+
+static void mgmt_bcknd_client_proc_msgbufs(struct thread *thread)
+{
+ struct mgmt_bcknd_client_ctxt *clnt_ctxt;
+ struct stream *work;
+ int processed = 0;
+
+ clnt_ctxt = (struct mgmt_bcknd_client_ctxt *)THREAD_ARG(thread);
+ assert(clnt_ctxt);
+
+ if (clnt_ctxt->conn_fd == 0)
+ return;
+
+ for (; processed < MGMTD_BCKND_MAX_NUM_MSG_PROC;) {
+ work = stream_fifo_pop_safe(clnt_ctxt->ibuf_fifo);
+ if (!work)
+ break;
+
+ processed += mgmt_bcknd_client_process_msg(
+ clnt_ctxt, STREAM_DATA(work), stream_get_endp(work));
+
+ if (work != clnt_ctxt->ibuf_work) {
+ /* Free it up */
+ stream_free(work);
+ } else {
+ /* Reset stream buffer for next read */
+ stream_reset(work);
+ }
+ }
+
+ /*
+ * If we have more to process, reschedule for processing it.
+ */
+ if (stream_fifo_head(clnt_ctxt->ibuf_fifo))
+ mgmt_bcknd_client_register_event(clnt_ctxt,
+ MGMTD_BCKND_PROC_MSG);
+}
+
+static void mgmt_bcknd_client_read(struct thread *thread)
+{
+ struct mgmt_bcknd_client_ctxt *clnt_ctxt;
+ int bytes_read, msg_cnt;
+ size_t total_bytes, bytes_left;
+ struct mgmt_bcknd_msg_hdr *msg_hdr;
+ bool incomplete = false;
+
+ clnt_ctxt = (struct mgmt_bcknd_client_ctxt *)THREAD_ARG(thread);
+ assert(clnt_ctxt && clnt_ctxt->conn_fd);
+
+ total_bytes = 0;
+ bytes_left = STREAM_SIZE(clnt_ctxt->ibuf_work)
+ - stream_get_endp(clnt_ctxt->ibuf_work);
+ for (; bytes_left > MGMTD_BCKND_MSG_HDR_LEN;) {
+ bytes_read = stream_read_try(clnt_ctxt->ibuf_work,
+ clnt_ctxt->conn_fd, bytes_left);
+ MGMTD_BCKND_CLNT_DBG(
+ "Got %d bytes of message from MGMTD Backend server",
+ bytes_read);
+ if (bytes_read <= 0) {
+ if (bytes_read == -1
+ && (errno == EAGAIN || errno == EWOULDBLOCK)) {
+ mgmt_bcknd_client_register_event(
+ clnt_ctxt, MGMTD_BCKND_CONN_READ);
+ return;
+ }
+
+ if (!bytes_read) {
+ /* Looks like connection closed */
+ MGMTD_BCKND_CLNT_ERR(
+ "Got error (%d) while reading from MGMTD Backend server. Err: '%s'",
+ bytes_read, safe_strerror(errno));
+ mgmt_bcknd_server_disconnect(clnt_ctxt, true);
+ return;
+ }
+ break;
+ }
+
+ total_bytes += bytes_read;
+ bytes_left -= bytes_read;
+ }
+
+ /*
+ * Check if we would have read incomplete messages or not.
+ */
+ stream_set_getp(clnt_ctxt->ibuf_work, 0);
+ total_bytes = 0;
+ msg_cnt = 0;
+ bytes_left = stream_get_endp(clnt_ctxt->ibuf_work);
+ for (; bytes_left > MGMTD_BCKND_MSG_HDR_LEN;) {
+ msg_hdr = (struct mgmt_bcknd_msg_hdr
+ *)(STREAM_DATA(clnt_ctxt->ibuf_work)
+ + total_bytes);
+ if (msg_hdr->marker != MGMTD_BCKND_MSG_MARKER) {
+ /* Corrupted buffer. Force disconnect?? */
+ MGMTD_BCKND_CLNT_ERR(
+ "Received corrupted buffer from MGMTD backend server.");
+ mgmt_bcknd_server_disconnect(clnt_ctxt, true);
+ return;
+ }
+ if (msg_hdr->len > bytes_left)
+ break;
+
+ total_bytes += msg_hdr->len;
+ bytes_left -= msg_hdr->len;
+ msg_cnt++;
+ }
+
+ if (bytes_left > 0)
+ incomplete = true;
+
+ /*
+ * We would have read one or several messages.
+ * Schedule processing them now.
+ */
+ msg_hdr =
+ (struct mgmt_bcknd_msg_hdr *)(STREAM_DATA(clnt_ctxt->ibuf_work)
+ + total_bytes);
+ stream_set_endp(clnt_ctxt->ibuf_work, total_bytes);
+ stream_fifo_push(clnt_ctxt->ibuf_fifo, clnt_ctxt->ibuf_work);
+ clnt_ctxt->ibuf_work = stream_new(MGMTD_BCKND_MSG_MAX_LEN);
+ if (incomplete) {
+ stream_put(clnt_ctxt->ibuf_work, msg_hdr, bytes_left);
+ stream_set_endp(clnt_ctxt->ibuf_work, bytes_left);
+ }
+
+ if (msg_cnt)
+ mgmt_bcknd_client_register_event(clnt_ctxt,
+ MGMTD_BCKND_PROC_MSG);
+
+ mgmt_bcknd_client_register_event(clnt_ctxt, MGMTD_BCKND_CONN_READ);
+}
+
+static inline void
+mgmt_bcknd_client_sched_msg_write(struct mgmt_bcknd_client_ctxt *clnt_ctxt)
+{
+ if (!CHECK_FLAG(clnt_ctxt->flags, MGMTD_BCKND_CLNT_FLAGS_WRITES_OFF))
+ mgmt_bcknd_client_register_event(clnt_ctxt,
+ MGMTD_BCKND_CONN_WRITE);
+}
+
+static inline void
+mgmt_bcknd_client_writes_on(struct mgmt_bcknd_client_ctxt *clnt_ctxt)
+{
+ MGMTD_BCKND_CLNT_DBG("Resume writing msgs");
+ UNSET_FLAG(clnt_ctxt->flags, MGMTD_BCKND_CLNT_FLAGS_WRITES_OFF);
+ if (clnt_ctxt->obuf_work
+ || stream_fifo_count_safe(clnt_ctxt->obuf_fifo))
+ mgmt_bcknd_client_sched_msg_write(clnt_ctxt);
+}
+
+static inline void
+mgmt_bcknd_client_writes_off(struct mgmt_bcknd_client_ctxt *clnt_ctxt)
+{
+ SET_FLAG(clnt_ctxt->flags, MGMTD_BCKND_CLNT_FLAGS_WRITES_OFF);
+ MGMTD_BCKND_CLNT_DBG("Paused writing msgs");
+}
+
+static int mgmt_bcknd_client_send_msg(struct mgmt_bcknd_client_ctxt *clnt_ctxt,
+ Mgmtd__BckndMessage *bcknd_msg)
+{
+ size_t msg_size;
+ uint8_t *msg_buf = clnt_ctxt->msg_buf;
+ struct mgmt_bcknd_msg *msg;
+
+ if (clnt_ctxt->conn_fd == 0)
+ return -1;
+
+ msg_size = mgmtd__bcknd_message__get_packed_size(bcknd_msg);
+ msg_size += MGMTD_BCKND_MSG_HDR_LEN;
+ if (msg_size > MGMTD_BCKND_MSG_MAX_LEN) {
+ MGMTD_BCKND_CLNT_ERR(
+ "Message size %d more than max size'%d. Not sending!'",
+ (int)msg_size, (int)MGMTD_BCKND_MSG_MAX_LEN);
+ return -1;
+ }
+
+ msg = (struct mgmt_bcknd_msg *)msg_buf;
+ msg->hdr.marker = MGMTD_BCKND_MSG_MARKER;
+ msg->hdr.len = (uint16_t)msg_size;
+ mgmtd__bcknd_message__pack(bcknd_msg, msg->payload);
+
+ if (!clnt_ctxt->obuf_work)
+ clnt_ctxt->obuf_work = stream_new(MGMTD_BCKND_MSG_MAX_LEN);
+ if (STREAM_WRITEABLE(clnt_ctxt->obuf_work) < msg_size) {
+ stream_fifo_push(clnt_ctxt->obuf_fifo, clnt_ctxt->obuf_work);
+ clnt_ctxt->obuf_work = stream_new(MGMTD_BCKND_MSG_MAX_LEN);
+ }
+ stream_write(clnt_ctxt->obuf_work, (void *)msg_buf, msg_size);
+
+ mgmt_bcknd_client_sched_msg_write(clnt_ctxt);
+ clnt_ctxt->num_msg_tx++;
+ return 0;
+}
+
+static void mgmt_bcknd_client_write(struct thread *thread)
+{
+ int bytes_written = 0;
+ int processed = 0;
+ int msg_size = 0;
+ struct stream *s = NULL;
+ struct stream *free = NULL;
+ struct mgmt_bcknd_client_ctxt *clnt_ctxt;
+
+ clnt_ctxt = (struct mgmt_bcknd_client_ctxt *)THREAD_ARG(thread);
+ assert(clnt_ctxt && clnt_ctxt->conn_fd);
+
+ /* Ensure pushing any pending write buffer to FIFO */
+ if (clnt_ctxt->obuf_work) {
+ stream_fifo_push(clnt_ctxt->obuf_fifo, clnt_ctxt->obuf_work);
+ clnt_ctxt->obuf_work = NULL;
+ }
+
+ for (s = stream_fifo_head(clnt_ctxt->obuf_fifo);
+ s && processed < MGMTD_BCKND_MAX_NUM_MSG_WRITE;
+ s = stream_fifo_head(clnt_ctxt->obuf_fifo)) {
+ /* msg_size = (int)stream_get_size(s); */
+ msg_size = (int)STREAM_READABLE(s);
+ bytes_written = stream_flush(s, clnt_ctxt->conn_fd);
+ if (bytes_written == -1
+ && (errno == EAGAIN || errno == EWOULDBLOCK)) {
+ mgmt_bcknd_client_register_event(
+ clnt_ctxt, MGMTD_BCKND_CONN_WRITE);
+ return;
+ } else if (bytes_written != msg_size) {
+ MGMTD_BCKND_CLNT_ERR(
+ "Could not write all %d bytes (wrote: %d) to MGMTD Backend client socket. Err: '%s'",
+ msg_size, bytes_written, safe_strerror(errno));
+ if (bytes_written > 0) {
+ stream_forward_getp(s, (size_t)bytes_written);
+ stream_pulldown(s);
+ mgmt_bcknd_client_register_event(
+ clnt_ctxt, MGMTD_BCKND_CONN_WRITE);
+ return;
+ }
+ mgmt_bcknd_server_disconnect(clnt_ctxt, true);
+ return;
+ }
+
+ free = stream_fifo_pop(clnt_ctxt->obuf_fifo);
+ stream_free(free);
+ MGMTD_BCKND_CLNT_DBG(
+ "Wrote %d bytes of message to MGMTD Backend client socket.'",
+ bytes_written);
+ processed++;
+ }
+
+ if (s) {
+ mgmt_bcknd_client_writes_off(clnt_ctxt);
+ mgmt_bcknd_client_register_event(clnt_ctxt,
+ MGMTD_BCKND_CONN_WRITES_ON);
+ }
+}
+
+static void mgmt_bcknd_client_resume_writes(struct thread *thread)
+{
+ struct mgmt_bcknd_client_ctxt *clnt_ctxt;
+
+ clnt_ctxt = (struct mgmt_bcknd_client_ctxt *)THREAD_ARG(thread);
+ assert(clnt_ctxt && clnt_ctxt->conn_fd);
+
+ mgmt_bcknd_client_writes_on(clnt_ctxt);
+}
+
+static int mgmt_bcknd_send_subscr_req(struct mgmt_bcknd_client_ctxt *clnt_ctxt,
+ bool subscr_xpaths,
+ uint16_t num_reg_xpaths,
+ char **reg_xpaths)
+{
+ Mgmtd__BckndMessage bcknd_msg;
+ Mgmtd__BckndSubscribeReq subscr_req;
+
+ mgmtd__bcknd_subscribe_req__init(&subscr_req);
+ subscr_req.client_name = clnt_ctxt->client_params.name;
+ subscr_req.n_xpath_reg = num_reg_xpaths;
+ if (num_reg_xpaths)
+ subscr_req.xpath_reg = reg_xpaths;
+ else
+ subscr_req.xpath_reg = NULL;
+ subscr_req.subscribe_xpaths = subscr_xpaths;
+
+ mgmtd__bcknd_message__init(&bcknd_msg);
+ bcknd_msg.message_case = MGMTD__BCKND_MESSAGE__MESSAGE_SUBSCR_REQ;
+ bcknd_msg.subscr_req = &subscr_req;
+
+ return mgmt_bcknd_client_send_msg(clnt_ctxt, &bcknd_msg);
+}
+
+static int mgmt_bcknd_server_connect(struct mgmt_bcknd_client_ctxt *clnt_ctxt)
+{
+ int ret, sock, len;
+ struct sockaddr_un addr;
+
+ MGMTD_BCKND_CLNT_DBG("Trying to connect to MGMTD Backend server at %s",
+ MGMTD_BCKND_SERVER_PATH);
+
+ assert(!clnt_ctxt->conn_fd);
+
+ sock = socket(AF_UNIX, SOCK_STREAM, 0);
+ if (sock < 0) {
+ MGMTD_BCKND_CLNT_ERR("Failed to create socket");
+ goto mgmt_bcknd_server_connect_failed;
+ }
+
+ MGMTD_BCKND_CLNT_DBG(
+ "Created MGMTD Backend server socket successfully!");
+
+ memset(&addr, 0, sizeof(struct sockaddr_un));
+ addr.sun_family = AF_UNIX;
+ strlcpy(addr.sun_path, MGMTD_BCKND_SERVER_PATH, sizeof(addr.sun_path));
+#ifdef HAVE_STRUCT_SOCKADDR_UN_SUN_LEN
+ len = addr.sun_len = SUN_LEN(&addr);
+#else
+ len = sizeof(addr.sun_family) + strlen(addr.sun_path);
+#endif /* HAVE_STRUCT_SOCKADDR_UN_SUN_LEN */
+
+ ret = connect(sock, (struct sockaddr *)&addr, len);
+ if (ret < 0) {
+ MGMTD_BCKND_CLNT_ERR(
+ "Failed to connect to MGMTD Backend Server at %s. Err: %s",
+ addr.sun_path, safe_strerror(errno));
+ close(sock);
+ goto mgmt_bcknd_server_connect_failed;
+ }
+
+ MGMTD_BCKND_CLNT_DBG(
+ "Connected to MGMTD Backend Server at %s successfully!",
+ addr.sun_path);
+ clnt_ctxt->conn_fd = sock;
+
+ /* Make client socket non-blocking. */
+ set_nonblocking(sock);
+ setsockopt_so_sendbuf(clnt_ctxt->conn_fd,
+ MGMTD_SOCKET_BCKND_SEND_BUF_SIZE);
+ setsockopt_so_recvbuf(clnt_ctxt->conn_fd,
+ MGMTD_SOCKET_BCKND_RECV_BUF_SIZE);
+
+ mgmt_bcknd_client_register_event(clnt_ctxt, MGMTD_BCKND_CONN_READ);
+
+ /* Notify client through registered callback (if any) */
+ if (clnt_ctxt->client_params.client_connect_notify)
+ (void)(*clnt_ctxt->client_params
+ .client_connect_notify)(
+ (uintptr_t)clnt_ctxt,
+ clnt_ctxt->client_params.user_data, true);
+
+ /* Send SUBSCRIBE_REQ message */
+ if (mgmt_bcknd_send_subscr_req(clnt_ctxt, false, 0, NULL) != 0)
+ goto mgmt_bcknd_server_connect_failed;
+
+ return 0;
+
+mgmt_bcknd_server_connect_failed:
+ if (sock && sock != clnt_ctxt->conn_fd)
+ close(sock);
+
+ mgmt_bcknd_server_disconnect(clnt_ctxt, true);
+ return -1;
+}
+
+static void mgmt_bcknd_client_conn_timeout(struct thread *thread)
+{
+ struct mgmt_bcknd_client_ctxt *clnt_ctxt;
+
+ clnt_ctxt = (struct mgmt_bcknd_client_ctxt *)THREAD_ARG(thread);
+ assert(clnt_ctxt);
+
+ mgmt_bcknd_server_connect(clnt_ctxt);
+}
+
+static void
+mgmt_bcknd_client_register_event(struct mgmt_bcknd_client_ctxt *clnt_ctxt,
+ enum mgmt_bcknd_event event)
+{
+ struct timeval tv = {0};
+
+ switch (event) {
+ case MGMTD_BCKND_CONN_READ:
+ thread_add_read(clnt_ctxt->tm, mgmt_bcknd_client_read,
+ clnt_ctxt, clnt_ctxt->conn_fd,
+ &clnt_ctxt->conn_read_ev);
+ assert(clnt_ctxt->conn_read_ev);
+ break;
+ case MGMTD_BCKND_CONN_WRITE:
+ thread_add_write(clnt_ctxt->tm, mgmt_bcknd_client_write,
+ clnt_ctxt, clnt_ctxt->conn_fd,
+ &clnt_ctxt->conn_write_ev);
+ assert(clnt_ctxt->conn_write_ev);
+ break;
+ case MGMTD_BCKND_PROC_MSG:
+ tv.tv_usec = MGMTD_BCKND_MSG_PROC_DELAY_USEC;
+ thread_add_timer_tv(clnt_ctxt->tm,
+ mgmt_bcknd_client_proc_msgbufs, clnt_ctxt,
+ &tv, &clnt_ctxt->msg_proc_ev);
+ assert(clnt_ctxt->msg_proc_ev);
+ break;
+ case MGMTD_BCKND_CONN_WRITES_ON:
+ thread_add_timer_msec(
+ clnt_ctxt->tm, mgmt_bcknd_client_resume_writes,
+ clnt_ctxt, MGMTD_BCKND_MSG_WRITE_DELAY_MSEC,
+ &clnt_ctxt->conn_writes_on);
+ assert(clnt_ctxt->conn_writes_on);
+ break;
+ case MGMTD_BCKND_SERVER:
+ case MGMTD_BCKND_CONN_INIT:
+ case MGMTD_BCKND_SCHED_CFG_PREPARE:
+ case MGMTD_BCKND_RESCHED_CFG_PREPARE:
+ case MGMTD_BCKND_SCHED_CFG_APPLY:
+ case MGMTD_BCKND_RESCHED_CFG_APPLY:
+ assert(!"mgmt_bcknd_client_post_event() called incorrectly");
+ break;
+ }
+}
+
+static void
+mgmt_bcknd_client_schedule_conn_retry(struct mgmt_bcknd_client_ctxt *clnt_ctxt,
+ unsigned long intvl_secs)
+{
+ MGMTD_BCKND_CLNT_DBG(
+ "Scheduling MGMTD Backend server connection retry after %lu seconds",
+ intvl_secs);
+ thread_add_timer(clnt_ctxt->tm, mgmt_bcknd_client_conn_timeout,
+ (void *)clnt_ctxt, intvl_secs,
+ &clnt_ctxt->conn_retry_tmr);
+}
+
+extern struct nb_config *running_config;
+
+/*
+ * Initialize library and try connecting with MGMTD.
+ */
+uintptr_t mgmt_bcknd_client_lib_init(struct mgmt_bcknd_client_params *params,
+ struct thread_master *master_thread)
+{
+ assert(master_thread && params && strlen(params->name)
+ && !mgmt_bcknd_clntctxt.tm);
+
+ mgmt_bcknd_clntctxt.tm = master_thread;
+
+ if (!running_config)
+ assert(!"MGMTD Bcknd Client lib_init() after frr_init() only!");
+ mgmt_bcknd_clntctxt.running_config = running_config;
+ mgmt_bcknd_clntctxt.candidate_config = nb_config_new(NULL);
+
+ memcpy(&mgmt_bcknd_clntctxt.client_params, params,
+ sizeof(mgmt_bcknd_clntctxt.client_params));
+ if (!mgmt_bcknd_clntctxt.client_params.conn_retry_intvl_sec)
+ mgmt_bcknd_clntctxt.client_params.conn_retry_intvl_sec =
+ MGMTD_BCKND_DEFAULT_CONN_RETRY_INTVL_SEC;
+
+ assert(!mgmt_bcknd_clntctxt.ibuf_fifo && !mgmt_bcknd_clntctxt.ibuf_work
+ && !mgmt_bcknd_clntctxt.obuf_fifo
+ && !mgmt_bcknd_clntctxt.obuf_work);
+
+ mgmt_bcknd_trxn_list_init(&mgmt_bcknd_clntctxt.trxn_head);
+ mgmt_bcknd_clntctxt.ibuf_fifo = stream_fifo_new();
+ mgmt_bcknd_clntctxt.ibuf_work = stream_new(MGMTD_BCKND_MSG_MAX_LEN);
+ mgmt_bcknd_clntctxt.obuf_fifo = stream_fifo_new();
+ /* mgmt_bcknd_clntctxt.obuf_work = stream_new(MGMTD_BCKND_MSG_MAX_LEN);
+ */
+ mgmt_bcknd_clntctxt.obuf_work = NULL;
+
+ /* Start trying to connect to MGMTD backend server immediately */
+ mgmt_bcknd_client_schedule_conn_retry(&mgmt_bcknd_clntctxt, 1);
+
+ MGMTD_BCKND_CLNT_DBG("Initialized client '%s'", params->name);
+
+ return (uintptr_t)&mgmt_bcknd_clntctxt;
+}
+
+/*
+ * Subscribe with MGMTD for one or more YANG subtree(s).
+ */
+enum mgmt_result mgmt_bcknd_subscribe_yang_data(uintptr_t lib_hndl,
+ char *reg_yang_xpaths[],
+ int num_reg_xpaths)
+{
+ struct mgmt_bcknd_client_ctxt *clnt_ctxt;
+
+ clnt_ctxt = (struct mgmt_bcknd_client_ctxt *)lib_hndl;
+ if (!clnt_ctxt)
+ return MGMTD_INVALID_PARAM;
+
+ if (mgmt_bcknd_send_subscr_req(clnt_ctxt, true, num_reg_xpaths,
+ reg_yang_xpaths)
+ != 0)
+ return MGMTD_INTERNAL_ERROR;
+
+ return MGMTD_SUCCESS;
+}
+
+/*
+ * Unsubscribe with MGMTD for one or more YANG subtree(s).
+ */
+enum mgmt_result mgmt_bcknd_unsubscribe_yang_data(uintptr_t lib_hndl,
+ char *reg_yang_xpaths[],
+ int num_reg_xpaths)
+{
+ struct mgmt_bcknd_client_ctxt *clnt_ctxt;
+
+ clnt_ctxt = (struct mgmt_bcknd_client_ctxt *)lib_hndl;
+ if (!clnt_ctxt)
+ return MGMTD_INVALID_PARAM;
+
+
+ if (mgmt_bcknd_send_subscr_req(clnt_ctxt, false, num_reg_xpaths,
+ reg_yang_xpaths)
+ < 0)
+ return MGMTD_INTERNAL_ERROR;
+
+ return MGMTD_SUCCESS;
+}
+
+/*
+ * Send one or more YANG notifications to MGMTD daemon.
+ */
+enum mgmt_result mgmt_bcknd_send_yang_notify(uintptr_t lib_hndl,
+ Mgmtd__YangData * data_elems[],
+ int num_elems)
+{
+ struct mgmt_bcknd_client_ctxt *clnt_ctxt;
+
+ clnt_ctxt = (struct mgmt_bcknd_client_ctxt *)lib_hndl;
+ if (!clnt_ctxt)
+ return MGMTD_INVALID_PARAM;
+
+ return MGMTD_SUCCESS;
+}
+
+/*
+ * Destroy library and cleanup everything.
+ */
+void mgmt_bcknd_client_lib_destroy(uintptr_t lib_hndl)
+{
+ struct mgmt_bcknd_client_ctxt *clnt_ctxt;
+
+ clnt_ctxt = (struct mgmt_bcknd_client_ctxt *)lib_hndl;
+ assert(clnt_ctxt);
+
+ MGMTD_BCKND_CLNT_DBG("Destroying MGMTD Backend Client '%s'",
+ clnt_ctxt->client_params.name);
+
+ mgmt_bcknd_server_disconnect(clnt_ctxt, false);
+
+ assert(mgmt_bcknd_clntctxt.ibuf_fifo && mgmt_bcknd_clntctxt.obuf_fifo);
+
+ stream_fifo_free(mgmt_bcknd_clntctxt.ibuf_fifo);
+ if (mgmt_bcknd_clntctxt.ibuf_work)
+ stream_free(mgmt_bcknd_clntctxt.ibuf_work);
+ stream_fifo_free(mgmt_bcknd_clntctxt.obuf_fifo);
+ if (mgmt_bcknd_clntctxt.obuf_work)
+ stream_free(mgmt_bcknd_clntctxt.obuf_work);
+
+ THREAD_OFF(clnt_ctxt->conn_retry_tmr);
+ THREAD_OFF(clnt_ctxt->conn_read_ev);
+ THREAD_OFF(clnt_ctxt->conn_write_ev);
+ THREAD_OFF(clnt_ctxt->conn_writes_on);
+ THREAD_OFF(clnt_ctxt->msg_proc_ev);
+ mgmt_bcknd_cleanup_all_trxns(clnt_ctxt);
+ mgmt_bcknd_trxn_list_fini(&clnt_ctxt->trxn_head);
+}
--- /dev/null
+/*
+ * MGMTD Backend Client Library api interfaces
+ * Copyright (C) 2021 Vmware, Inc.
+ * Pushpasis Sarkar <spushpasis@vmware.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; see the file COPYING; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _FRR_MGMTD_BCKND_CLIENT_H_
+#define _FRR_MGMTD_BCKND_CLIENT_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "mgmtd/mgmt_defines.h"
+
+/***************************************************************
+ * Macros
+ ***************************************************************/
+
+#define MGMTD_BCKND_CLIENT_ERROR_STRING_MAX_LEN 32
+
+#define MGMTD_BCKND_DEFAULT_CONN_RETRY_INTVL_SEC 5
+
+#define MGMTD_BCKND_MSG_PROC_DELAY_USEC 10
+#define MGMTD_BCKND_MAX_NUM_MSG_PROC 500
+
+#define MGMTD_BCKND_MSG_WRITE_DELAY_MSEC 1
+#define MGMTD_BCKND_MAX_NUM_MSG_WRITE 1000
+
+#define GMGD_BCKND_MAX_NUM_REQ_ITEMS 64
+
+#define MGMTD_BCKND_MSG_MAX_LEN 16384
+
+#define MGMTD_SOCKET_BCKND_SEND_BUF_SIZE 65535
+#define MGMTD_SOCKET_BCKND_RECV_BUF_SIZE MGMTD_SOCKET_BCKND_SEND_BUF_SIZE
+
+/*
+ * MGMTD_BCKND_MSG_MAX_LEN must be used 80%
+ * since there is overhead of google protobuf
+ * that gets added to sent message
+ */
+#define MGMTD_BCKND_CFGDATA_PACKING_EFFICIENCY 0.8
+#define MGMTD_BCKND_CFGDATA_MAX_MSG_LEN \
+ (MGMTD_BCKND_MSG_MAX_LEN * MGMTD_BCKND_CFGDATA_PACKING_EFFICIENCY)
+
+#define MGMTD_BCKND_MAX_BATCH_IDS_IN_REQ \
+ (MGMTD_BCKND_MSG_MAX_LEN - 128) / sizeof(uint64_t)
+
+/*
+ * List of name identifiers for all backend clients to
+ * supply while calling mgmt_bcknd_client_lib_init().
+ */
+#define MGMTD_BCKND_CLIENT_BGPD "bgpd"
+#define MGMTD_BCKND_CLIENT_STATICD "staticd"
+
+
+#define MGMTD_BCKND_CONTAINER_NODE_VAL "<<container>>"
+
+/***************************************************************
+ * Data-structures
+ ***************************************************************/
+
+enum mgmt_bcknd_client_id {
+ MGMTD_BCKND_CLIENT_ID_MIN = 0,
+ MGMTD_BCKND_CLIENT_ID_STATICD = MGMTD_BCKND_CLIENT_ID_MIN,
+ MGMTD_BCKND_CLIENT_ID_BGPD,
+ MGMTD_BCKND_CLIENT_ID_MAX
+};
+
+#define FOREACH_MGMTD_BCKND_CLIENT_ID(id) \
+ for ((id) = MGMTD_BCKND_CLIENT_ID_MIN; \
+ (id) < MGMTD_BCKND_CLIENT_ID_MAX; (id)++)
+
+#define MGMTD_BCKND_MAX_CLIENTS_PER_XPATH_REG 32
+
+struct mgmt_bcknd_msg_hdr {
+ uint16_t marker;
+ uint16_t len; /* Includes header */
+};
+#define MGMTD_BCKND_MSG_HDR_LEN sizeof(struct mgmt_bcknd_msg_hdr)
+#define MGMTD_BCKND_MSG_MARKER 0xfeed
+
+struct mgmt_bcknd_msg {
+ struct mgmt_bcknd_msg_hdr hdr;
+ uint8_t payload[];
+};
+
+struct mgmt_bcknd_client_trxn_ctxt {
+ uintptr_t *user_ctx;
+};
+
+/*
+ * All the client-specific information this library needs to
+ * initialize itself, setup connection with MGMTD BackEnd interface
+ * and carry on all required procedures appropriately.
+ *
+ * BackEnd clients need to initialise a instance of this structure
+ * with appropriate data and pass it while calling the API
+ * to initialize the library (See mgmt_bcknd_client_lib_init for
+ * more details).
+ */
+struct mgmt_bcknd_client_params {
+ char name[MGMTD_CLIENT_NAME_MAX_LEN];
+ uintptr_t user_data;
+ unsigned long conn_retry_intvl_sec;
+
+ void (*client_connect_notify)(uintptr_t lib_hndl,
+ uintptr_t usr_data,
+ bool connected);
+
+ void (*client_subscribe_notify)(
+ uintptr_t lib_hndl, uintptr_t usr_data,
+ struct nb_yang_xpath **xpath,
+ enum mgmt_result subscribe_result[], int num_paths);
+
+ void (*trxn_notify)(
+ uintptr_t lib_hndl, uintptr_t usr_data,
+ struct mgmt_bcknd_client_trxn_ctxt *trxn_ctxt, bool destroyed);
+
+ enum mgmt_result (*data_validate)(
+ uintptr_t lib_hndl, uintptr_t usr_data,
+ struct mgmt_bcknd_client_trxn_ctxt *trxn_ctxt,
+ struct nb_yang_xpath *xpath, struct nb_yang_value *data,
+ bool delete, char *error_if_any);
+
+ enum mgmt_result (*data_apply)(
+ uintptr_t lib_hndl, uintptr_t usr_data,
+ struct mgmt_bcknd_client_trxn_ctxt *trxn_ctxt,
+ struct nb_yang_xpath *xpath, struct nb_yang_value *data,
+ bool delete);
+
+ enum mgmt_result (*get_data_elem)(
+ uintptr_t lib_hndl, uintptr_t usr_data,
+ struct mgmt_bcknd_client_trxn_ctxt *trxn_ctxt,
+ struct nb_yang_xpath *xpath, struct nb_yang_xpath_elem *elem);
+
+ enum mgmt_result (*get_data)(
+ uintptr_t lib_hndl, uintptr_t usr_data,
+ struct mgmt_bcknd_client_trxn_ctxt *trxn_ctxt,
+ struct nb_yang_xpath *xpath, bool keys_only,
+ struct nb_yang_xpath_elem **elems, int *num_elems,
+ int *next_key);
+
+ enum mgmt_result (*get_next_data)(
+ uintptr_t lib_hndl, uintptr_t usr_data,
+ struct mgmt_bcknd_client_trxn_ctxt *trxn_ctxt,
+ struct nb_yang_xpath *xpath, bool keys_only,
+ struct nb_yang_xpath_elem **elems, int *num_elems);
+};
+
+/***************************************************************
+ * Global data exported
+ ***************************************************************/
+
+extern const char *mgmt_bcknd_client_names[MGMTD_CLIENT_NAME_MAX_LEN];
+
+static inline const char *mgmt_bknd_client_id2name(enum mgmt_bcknd_client_id id)
+{
+ if (id > MGMTD_BCKND_CLIENT_ID_MAX)
+ id = MGMTD_BCKND_CLIENT_ID_MAX;
+ return mgmt_bcknd_client_names[id];
+}
+
+static inline enum mgmt_bcknd_client_id
+mgmt_bknd_client_name2id(const char *name)
+{
+ enum mgmt_bcknd_client_id id;
+
+ FOREACH_MGMTD_BCKND_CLIENT_ID (id) {
+ if (!strncmp(mgmt_bcknd_client_names[id], name,
+ MGMTD_CLIENT_NAME_MAX_LEN))
+ return id;
+ }
+
+ return MGMTD_BCKND_CLIENT_ID_MAX;
+}
+
+/***************************************************************
+ * API prototypes
+ ***************************************************************/
+
+/*
+ * Initialize library and try connecting with MGMTD.
+ *
+ * params
+ * Backend client parameters.
+ *
+ * master_thread
+ * Thread master.
+ *
+ * Returns:
+ * Backend client lib handler (nothing but address of mgmt_bcknd_clntctxt)
+ */
+extern uintptr_t
+mgmt_bcknd_client_lib_init(struct mgmt_bcknd_client_params *params,
+ struct thread_master *master_thread);
+
+/*
+ * Subscribe with MGMTD for one or more YANG subtree(s).
+ *
+ * lib_hndl
+ * Client library handler.
+ *
+ * reg_yang_xpaths
+ * Yang xpath(s) that needs to be subscribed to.
+ *
+ * num_xpaths
+ * Number of xpaths
+ *
+ * Returns:
+ * MGMTD_SUCCESS on success, MGMTD_* otherwise.
+ */
+extern enum mgmt_result mgmt_bcknd_subscribe_yang_data(uintptr_t lib_hndl,
+ char **reg_yang_xpaths,
+ int num_xpaths);
+
+/*
+ * Send one or more YANG notifications to MGMTD daemon.
+ *
+ * lib_hndl
+ * Client library handler.
+ *
+ * data_elems
+ * Yang data elements from data tree.
+ *
+ * num_elems
+ * Number of data elements.
+ *
+ * Returns:
+ * MGMTD_SUCCESS on success, MGMTD_* otherwise.
+ */
+extern enum mgmt_result
+mgmt_bcknd_send_yang_notify(uintptr_t lib_hndl, Mgmtd__YangData **data_elems,
+ int num_elems);
+
+/*
+ * Un-subscribe with MGMTD for one or more YANG subtree(s).
+ *
+ * lib_hndl
+ * Client library handler.
+ *
+ * reg_yang_xpaths
+ * Yang xpath(s) that needs to be un-subscribed from.
+ *
+ * num_reg_xpaths
+ * Number of subscribed xpaths
+ *
+ * Returns:
+ * MGMTD_SUCCESS on success, MGMTD_* otherwise.
+ */
+enum mgmt_result mgmt_bcknd_unsubscribe_yang_data(uintptr_t lib_hndl,
+ char **reg_yang_xpaths,
+ int num_reg_xpaths);
+
+/*
+ * Destroy library and cleanup everything.
+ */
+extern void mgmt_bcknd_client_lib_destroy(uintptr_t lib_hndl);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _FRR_MGMTD_BCKND_CLIENT_H_ */
config->dnode = yang_dnode_new(ly_native_ctx, true);
config->version = 0;
+ RB_INIT(nb_config_cbs, &config->cfg_chgs);
+
return config;
}
{
if (config->dnode)
yang_dnode_free(config->dnode);
+ nb_config_diff_del_changes(&config->cfg_chgs);
XFREE(MTYPE_NB_CONFIG, config);
}
dup->dnode = yang_dnode_dup(config->dnode);
dup->version = config->version;
+ RB_INIT(nb_config_cbs, &dup->cfg_chgs);
+
return dup;
}
RB_INSERT(nb_config_cbs, changes, &change->cb);
}
-static void nb_config_diff_del_changes(struct nb_config_cbs *changes)
+void nb_config_diff_del_changes(struct nb_config_cbs *changes)
{
while (!RB_EMPTY(nb_config_cbs, changes)) {
struct nb_config_change *change;
* configurations. Given a new subtree, calculate all new YANG data nodes,
* excluding default leafs and leaf-lists. This is a recursive function.
*/
-static void nb_config_diff_created(const struct lyd_node *dnode, uint32_t *seq,
- struct nb_config_cbs *changes)
+void nb_config_diff_created(const struct lyd_node *dnode, uint32_t *seq,
+ struct nb_config_cbs *changes)
{
enum nb_operation operation;
struct lyd_node *child;
}
#endif
-/* Calculate the delta between two different configurations. */
-static void nb_config_diff(const struct nb_config *config1,
- const struct nb_config *config2,
- struct nb_config_cbs *changes)
+/*
+ * Calculate the delta between two different configurations.
+ *
+ * NOTE: 'config1' is the reference DB, while 'config2' is
+ * the DB being compared against 'config1'. Typically 'config1'
+ * should be the Running DB and 'config2' is the Candidate DB.
+ */
+void nb_config_diff(const struct nb_config *config1,
+ const struct nb_config *config2,
+ struct nb_config_cbs *changes)
{
struct lyd_node *diff = NULL;
const struct lyd_node *root, *dnode;
return NB_OK;
}
+static void nb_update_candidate_changes(struct nb_config *candidate,
+ struct nb_cfg_change *change,
+ uint32_t *seq)
+{
+ enum nb_operation oper = change->operation;
+ char *xpath = change->xpath;
+ struct lyd_node *root = NULL;
+ struct lyd_node *dnode;
+ struct nb_config_cbs *cfg_chgs = &candidate->cfg_chgs;
+ int op;
+
+ switch (oper) {
+ case NB_OP_CREATE:
+ case NB_OP_MODIFY:
+ root = yang_dnode_get(candidate->dnode, xpath);
+ break;
+ case NB_OP_DESTROY:
+ root = yang_dnode_get(running_config->dnode, xpath);
+ /* code */
+ break;
+ default:
+ break;
+ }
+
+ if (!root)
+ return;
+
+ LYD_TREE_DFS_BEGIN (root, dnode) {
+ op = nb_lyd_diff_get_op(dnode);
+ switch (op) {
+ case 'c':
+ nb_config_diff_created(dnode, seq, cfg_chgs);
+ LYD_TREE_DFS_continue = 1;
+ break;
+ case 'd':
+ nb_config_diff_deleted(dnode, seq, cfg_chgs);
+ LYD_TREE_DFS_continue = 1;
+ break;
+ case 'r':
+ nb_config_diff_add_change(cfg_chgs, NB_OP_MODIFY, seq,
+ dnode);
+ break;
+ default:
+ break;
+ }
+ LYD_TREE_DFS_END(root, dnode);
+ }
+}
+
+static bool nb_is_operation_allowed(struct nb_node *nb_node,
+ struct nb_cfg_change *change)
+{
+ enum nb_operation oper = change->operation;
+
+ if (lysc_is_key(nb_node->snode)) {
+ if (oper == NB_OP_MODIFY || oper == NB_OP_DESTROY)
+ return false;
+ }
+ return true;
+}
+
+void nb_candidate_edit_config_changes(
+ struct nb_config *candidate_config, struct nb_cfg_change cfg_changes[],
+ size_t num_cfg_changes, const char *xpath_base, const char *curr_xpath,
+ int xpath_index, char *err_buf, int err_bufsize, bool *error)
+{
+ uint32_t seq = 0;
+
+ if (error)
+ *error = false;
+
+ if (xpath_base == NULL)
+ xpath_base = "";
+
+ /* Edit candidate configuration. */
+ for (size_t i = 0; i < num_cfg_changes; i++) {
+ struct nb_cfg_change *change = &cfg_changes[i];
+ struct nb_node *nb_node;
+ char xpath[XPATH_MAXLEN];
+ struct yang_data *data;
+ int ret;
+
+ /* Handle relative XPaths. */
+ memset(xpath, 0, sizeof(xpath));
+ if (xpath_index > 0
+ && (xpath_base[0] == '.' || change->xpath[0] == '.'))
+ strlcpy(xpath, curr_xpath, sizeof(xpath));
+ if (xpath_base[0]) {
+ if (xpath_base[0] == '.')
+ strlcat(xpath, xpath_base + 1, sizeof(xpath));
+ else
+ strlcat(xpath, xpath_base, sizeof(xpath));
+ }
+ if (change->xpath[0] == '.')
+ strlcat(xpath, change->xpath + 1, sizeof(xpath));
+ else
+ strlcpy(xpath, change->xpath, sizeof(xpath));
+
+ /* Find the northbound node associated to the data path. */
+ nb_node = nb_node_find(xpath);
+ if (!nb_node) {
+ flog_warn(EC_LIB_YANG_UNKNOWN_DATA_PATH,
+ "%s: unknown data path: %s", __func__, xpath);
+ if (error)
+ *error = true;
+ continue;
+ }
+ /* Find if the node to be edited is not a key node */
+ if (!nb_is_operation_allowed(nb_node, change)) {
+ zlog_err(" Xpath %s points to key node", xpath);
+ if (error)
+ *error = true;
+ break;
+ }
+
+ /* If the value is not set, get the default if it exists. */
+ if (change->value == NULL)
+ change->value = yang_snode_get_default(nb_node->snode);
+ data = yang_data_new(xpath, change->value);
+
+ /*
+ * Ignore "not found" errors when editing the candidate
+ * configuration.
+ */
+ ret = nb_candidate_edit(candidate_config, nb_node,
+ change->operation, xpath, NULL, data);
+ yang_data_free(data);
+ if (ret != NB_OK && ret != NB_ERR_NOT_FOUND) {
+ flog_warn(
+ EC_LIB_NB_CANDIDATE_EDIT_ERROR,
+ "%s: failed to edit candidate configuration: operation [%s] xpath [%s]",
+ __func__, nb_operation_name(change->operation),
+ xpath);
+ if (error)
+ *error = true;
+ continue;
+ }
+ nb_update_candidate_changes(candidate_config, change, &seq);
+ }
+
+ if (error && *error) {
+ char buf[BUFSIZ];
+
+ /*
+ * Failure to edit the candidate configuration should never
+ * happen in practice, unless there's a bug in the code. When
+ * that happens, log the error but otherwise ignore it.
+ */
+ snprintf(err_buf, err_bufsize,
+ "%% Failed to edit configuration.\n\n%s",
+ yang_print_errors(ly_native_ctx, buf, sizeof(buf)));
+ }
+}
+
bool nb_candidate_needs_update(const struct nb_config *candidate)
{
if (candidate->version < running_config->version)
* WARNING: lyd_validate() can change the configuration as part of the
* validation process.
*/
-static int nb_candidate_validate_yang(struct nb_config *candidate, char *errmsg,
- size_t errmsg_len)
+int nb_candidate_validate_yang(struct nb_config *candidate, char *errmsg,
+ size_t errmsg_len)
{
if (lyd_validate_all(&candidate->dnode, ly_native_ctx,
LYD_VALIDATE_NO_STATE, NULL)
}
/* Perform code-level validation using the northbound callbacks. */
-static int nb_candidate_validate_code(struct nb_context *context,
- struct nb_config *candidate,
- struct nb_config_cbs *changes,
- char *errmsg, size_t errmsg_len)
+int nb_candidate_validate_code(struct nb_context *context,
+ struct nb_config *candidate,
+ struct nb_config_cbs *changes, char *errmsg,
+ size_t errmsg_len)
{
struct nb_config_cb *cb;
struct lyd_node *root, *child;
return NB_OK;
}
+int nb_candidate_diff_and_validate_yang(struct nb_context *context,
+ struct nb_config *candidate,
+ struct nb_config_cbs *changes,
+ char *errmsg, size_t errmsg_len)
+{
+ if (nb_candidate_validate_yang(candidate, errmsg, sizeof(errmsg_len))
+ != NB_OK)
+ return NB_ERR_VALIDATION;
+
+ RB_INIT(nb_config_cbs, changes);
+ nb_config_diff(running_config, candidate, changes);
+
+ return NB_OK;
+}
+
int nb_candidate_validate(struct nb_context *context,
struct nb_config *candidate, char *errmsg,
size_t errmsg_len)
struct nb_config_cbs changes;
int ret;
- if (nb_candidate_validate_yang(candidate, errmsg, errmsg_len) != NB_OK)
- return NB_ERR_VALIDATION;
+ ret = nb_candidate_diff_and_validate_yang(context, candidate, &changes,
+ errmsg, errmsg_len);
+ if (ret != NB_OK)
+ return ret;
- RB_INIT(nb_config_cbs, &changes);
- nb_config_diff(running_config, candidate, &changes);
ret = nb_candidate_validate_code(context, candidate, &changes, errmsg,
errmsg_len);
nb_config_diff_del_changes(&changes);
struct nb_config *candidate,
const char *comment,
struct nb_transaction **transaction,
+ bool skip_validate, bool ignore_zero_change,
char *errmsg, size_t errmsg_len)
{
struct nb_config_cbs changes;
- if (nb_candidate_validate_yang(candidate, errmsg, errmsg_len)
- != NB_OK) {
+ if (!skip_validate
+ && nb_candidate_validate_yang(candidate, errmsg, errmsg_len)
+ != NB_OK) {
flog_warn(EC_LIB_NB_CANDIDATE_INVALID,
"%s: failed to validate candidate configuration",
__func__);
RB_INIT(nb_config_cbs, &changes);
nb_config_diff(running_config, candidate, &changes);
- if (RB_EMPTY(nb_config_cbs, &changes)) {
+ if (!ignore_zero_change && RB_EMPTY(nb_config_cbs, &changes)) {
snprintf(
errmsg, errmsg_len,
"No changes to apply were found during preparation phase");
return NB_ERR_NO_CHANGES;
}
- if (nb_candidate_validate_code(context, candidate, &changes, errmsg,
- errmsg_len)
- != NB_OK) {
+ if (!skip_validate
+ && nb_candidate_validate_code(context, candidate, &changes, errmsg,
+ errmsg_len)
+ != NB_OK) {
flog_warn(EC_LIB_NB_CANDIDATE_INVALID,
"%s: failed to validate candidate configuration",
__func__);
return NB_ERR_VALIDATION;
}
- *transaction = nb_transaction_new(context, candidate, &changes, comment,
- errmsg, errmsg_len);
+ /*
+ * Re-use an existing transaction if provided. Else allocate a new one.
+ */
+ if (!*transaction)
+ *transaction = nb_transaction_new(context, candidate, &changes,
+ comment, errmsg, errmsg_len);
if (*transaction == NULL) {
flog_warn(EC_LIB_NB_TRANSACTION_CREATION_FAILED,
"%s: failed to create transaction: %s", __func__,
int ret;
ret = nb_candidate_commit_prepare(context, candidate, comment,
- &transaction, errmsg, errmsg_len);
+ &transaction, false, false, errmsg,
+ errmsg_len);
/*
* Apply the changes if the preparation phase succeeded. Otherwise abort
* the transaction.
struct vty;
struct debug;
+struct nb_yang_xpath_tag {
+ uint32_t ns;
+ uint32_t id;
+};
+
+struct nb_yang_value {
+ struct lyd_value value;
+ LY_DATA_TYPE value_type;
+ uint8_t value_flags;
+};
+
+struct nb_yang_xpath_elem {
+ struct nb_yang_xpath_tag tag;
+ struct nb_yang_value val;
+};
+
+#define NB_MAX_NUM_KEYS UINT8_MAX
+#define NB_MAX_NUM_XPATH_TAGS UINT8_MAX
+
+struct nb_yang_xpath {
+ uint8_t length;
+ struct {
+ uint8_t num_keys;
+ struct nb_yang_xpath_elem keys[NB_MAX_NUM_KEYS];
+ } tags[NB_MAX_NUM_XPATH_TAGS];
+};
+
+#define NB_YANG_XPATH_KEY(__xpath, __indx1, __indx2) \
+ ((__xpath->num_tags > __indx1) \
+ && (__xpath->tags[__indx1].num_keys > __indx2) \
+ ? &__xpath->tags[__indx1].keys[__indx2] \
+ : NULL)
+
/* Northbound events. */
enum nb_event {
/*
NB_CLIENT_SYSREPO,
NB_CLIENT_GRPC,
NB_CLIENT_PCEP,
+ NB_CLIENT_MGMTD_SERVER,
+ NB_CLIENT_MGMTD_BCKND,
};
/* Northbound context. */
#endif
};
-/* Northbound configuration. */
-struct nb_config {
- struct lyd_node *dnode;
- uint32_t version;
-};
-
/* Northbound configuration callback. */
struct nb_config_cb {
RB_ENTRY(nb_config_cb) entry;
struct nb_config_cbs changes;
};
+/* Northbound configuration. */
+struct nb_config {
+ struct lyd_node *dnode;
+ uint32_t version;
+ struct nb_config_cbs cfg_chgs;
+};
+
/* Callback function used by nb_oper_data_iterate(). */
typedef int (*nb_oper_data_cb)(const struct lysc_node *snode,
struct yang_translator *translator,
const struct yang_data *previous,
const struct yang_data *data);
+extern void nb_config_diff_created(const struct lyd_node *dnode, uint32_t *seq,
+ struct nb_config_cbs *changes);
+
/*
* Check if a candidate configuration is outdated and needs to be updated.
*
*/
extern bool nb_candidate_needs_update(const struct nb_config *candidate);
+extern void nb_candidate_edit_config_changes(
+ struct nb_config *candidate_config, struct nb_cfg_change cfg_changes[],
+ size_t num_cfg_changes, const char *xpath_base, const char *curr_xpath,
+ int xpath_index, char *err_buf, int err_bufsize, bool *error);
+
+extern void nb_config_diff_del_changes(struct nb_config_cbs *changes);
+
+extern int nb_candidate_diff_and_validate_yang(struct nb_context *context,
+ struct nb_config *candidate,
+ struct nb_config_cbs *changes,
+ char *errmsg, size_t errmsg_len);
+
+extern void nb_config_diff(const struct nb_config *reference,
+ const struct nb_config *incremental,
+ struct nb_config_cbs *changes);
+
+extern int nb_candidate_validate_yang(struct nb_config *candidate, char *errmsg,
+ size_t errmsg_len);
+
+extern int nb_candidate_validate_code(struct nb_context *context,
+ struct nb_config *candidate,
+ struct nb_config_cbs *changes,
+ char *errmsg, size_t errmsg_len);
+
/*
* Update a candidate configuration by rebasing the changes on top of the latest
* running configuration. Resolve conflicts automatically by giving preference
struct nb_config *candidate,
const char *comment,
struct nb_transaction **transaction,
- char *errmsg, size_t errmsg_len);
+ bool skip_validate,
+ bool ignore_zero_change, char *errmsg,
+ size_t errmsg_len);
/*
* Abort a previously created configuration transaction, releasing all resources
bool clear_pending)
{
bool error = false;
-
- if (xpath_base == NULL)
- xpath_base = "";
+ char buf[BUFSIZ];
VTY_CHECK_XPATH;
- /* Edit candidate configuration. */
- for (size_t i = 0; i < vty->num_cfg_changes; i++) {
- struct nb_cfg_change *change = &vty->cfg_changes[i];
- struct nb_node *nb_node;
- char xpath[XPATH_MAXLEN];
- struct yang_data *data;
- int ret;
-
- /* Handle relative XPaths. */
- memset(xpath, 0, sizeof(xpath));
- if (vty->xpath_index > 0
- && (xpath_base[0] == '.' || change->xpath[0] == '.'))
- strlcpy(xpath, VTY_CURR_XPATH, sizeof(xpath));
- if (xpath_base[0]) {
- if (xpath_base[0] == '.')
- strlcat(xpath, xpath_base + 1, sizeof(xpath));
- else
- strlcat(xpath, xpath_base, sizeof(xpath));
- }
- if (change->xpath[0] == '.')
- strlcat(xpath, change->xpath + 1, sizeof(xpath));
- else
- strlcpy(xpath, change->xpath, sizeof(xpath));
-
- /* Find the northbound node associated to the data path. */
- nb_node = nb_node_find(xpath);
- if (!nb_node) {
- flog_warn(EC_LIB_YANG_UNKNOWN_DATA_PATH,
- "%s: unknown data path: %s", __func__, xpath);
- error = true;
- continue;
- }
-
- /* If the value is not set, get the default if it exists. */
- if (change->value == NULL)
- change->value = yang_snode_get_default(nb_node->snode);
- data = yang_data_new(xpath, change->value);
-
- /*
- * Ignore "not found" errors when editing the candidate
- * configuration.
- */
- ret = nb_candidate_edit(vty->candidate_config, nb_node,
- change->operation, xpath, NULL, data);
- yang_data_free(data);
- if (ret != NB_OK && ret != NB_ERR_NOT_FOUND) {
- flog_warn(
- EC_LIB_NB_CANDIDATE_EDIT_ERROR,
- "%s: failed to edit candidate configuration: operation [%s] xpath [%s]",
- __func__, nb_operation_name(change->operation),
- xpath);
- error = true;
- continue;
- }
- }
-
+ nb_candidate_edit_config_changes(
+ vty->candidate_config, vty->cfg_changes, vty->num_cfg_changes,
+ xpath_base, VTY_CURR_XPATH, vty->xpath_index, buf, sizeof(buf),
+ &error);
if (error) {
- char buf[BUFSIZ];
-
/*
* Failure to edit the candidate configuration should never
* happen in practice, unless there's a bug in the code. When
* that happens, log the error but otherwise ignore it.
*/
- vty_out(vty, "%% Failed to edit configuration.\n\n");
- vty_out(vty, "%s",
- yang_print_errors(ly_native_ctx, buf, sizeof(buf)));
+ vty_out(vty, "%s", buf);
}
/*
transaction = NULL;
context.client = NB_CLIENT_CONFD;
ret = nb_candidate_commit_prepare(&context, candidate, NULL,
- &transaction, errmsg, sizeof(errmsg));
+ &transaction, false, false, errmsg,
+ sizeof(errmsg));
if (ret != NB_OK && ret != NB_ERR_NO_CHANGES) {
enum confd_errcode errcode;
grpc_debug("`-> Performing PREPARE");
ret = nb_candidate_commit_prepare(
&context, candidate->config, comment.c_str(),
- &candidate->transaction, errmsg, sizeof(errmsg));
+ &candidate->transaction, false, false, errmsg,
+ sizeof(errmsg));
break;
case frr::CommitRequest::ABORT:
grpc_debug("`-> Performing ABORT");
* required to apply them.
*/
ret = nb_candidate_commit_prepare(&context, candidate, NULL,
- &transaction, errmsg, sizeof(errmsg));
+ &transaction, false, false, errmsg,
+ sizeof(errmsg));
if (ret != NB_OK && ret != NB_ERR_NO_CHANGES)
flog_warn(
EC_LIB_LIBSYSREPO,
lib/northbound.c \
lib/northbound_cli.c \
lib/northbound_db.c \
+ lib/mgmt_bcknd_client.c \
lib/mgmt_frntnd_client.c \
lib/ntop.c \
lib/openbsd-tree.c \
lib/buffer.h \
lib/checksum.h \
lib/mlag.h \
+ lib/mgmt_bcknd_client.h \
lib/mgmt_frntnd_client.h \
lib/mgmt.pb-c.h \
lib/mgmt_pb.h \
#include <zebra.h>
#include "mgmtd/mgmt.h"
#include "mgmtd/mgmt_vty.h"
+#include "mgmtd/mgmt_bcknd_server.h"
+#include "mgmtd/mgmt_bcknd_adapter.h"
#include "mgmtd/mgmt_frntnd_server.h"
#include "mgmtd/mgmt_frntnd_adapter.h"
#include "mgmtd/mgmt_db.h"
/* Initialize databases */
mgmt_db_init(mm);
+ /* Initialize the MGMTD Backend Adapter Module */
+ mgmt_bcknd_adapter_init(mm->master);
+
/* Initialize the MGMTD Frontend Adapter Module */
mgmt_frntnd_adapter_init(mm->master, mm);
+ /* Start the MGMTD Backend Server for clients to connect */
+ mgmt_bcknd_server_init(mm->master);
+
/* Start the MGMTD Frontend Server for clients to connect */
mgmt_frntnd_server_init(mm->master);
{
mgmt_frntnd_server_destroy();
mgmt_frntnd_adapter_destroy();
+ mgmt_bcknd_server_destroy();
+ mgmt_bcknd_adapter_destroy();
mgmt_db_destroy();
}
--- /dev/null
+/*
+ * MGMTD Backend Client Connection Adapter
+ * Copyright (C) 2021 Vmware, Inc.
+ * Pushpasis Sarkar <spushpasis@vmware.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; see the file COPYING; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <zebra.h>
+#include "thread.h"
+#include "sockopt.h"
+#include "network.h"
+#include "libfrr.h"
+#include "mgmt_pb.h"
+#include "mgmtd/mgmt.h"
+#include "mgmtd/mgmt_memory.h"
+#include "mgmt_bcknd_client.h"
+#include "mgmtd/mgmt_bcknd_adapter.h"
+
+#ifdef REDIRECT_DEBUG_TO_STDERR
+#define MGMTD_BCKND_ADPTR_DBG(fmt, ...) \
+ fprintf(stderr, "%s: " fmt "\n", __func__, ##__VA_ARGS__)
+#define MGMTD_BCKND_ADPTR_ERR(fmt, ...) \
+ fprintf(stderr, "%s: ERROR, " fmt "\n", __func__, ##__VA_ARGS__)
+#else /* REDIRECT_DEBUG_TO_STDERR */
+#define MGMTD_BCKND_ADPTR_DBG(fmt, ...) \
+ do { \
+ if (mgmt_debug_bcknd) \
+ zlog_err("%s: " fmt, __func__, ##__VA_ARGS__); \
+ } while (0)
+#define MGMTD_BCKND_ADPTR_ERR(fmt, ...) \
+ zlog_err("%s: ERROR: " fmt, __func__, ##__VA_ARGS__)
+#endif /* REDIRECT_DEBUG_TO_STDERR */
+
+#define FOREACH_ADPTR_IN_LIST(adptr) \
+ frr_each_safe(mgmt_bcknd_adptr_list, &mgmt_bcknd_adptrs, (adptr))
+
+/*
+ * Static mapping of YANG XPath regular expressions and
+ * the corresponding interested backend clients.
+ * NOTE: Thiis is a static mapping defined by all MGMTD
+ * backend client modules (for now, till we develop a
+ * more dynamic way of creating and updating this map).
+ * A running map is created by MGMTD in run-time to
+ * handle real-time mapping of YANG xpaths to one or
+ * more interested backend client adapters.
+ *
+ * Please see xpath_map_reg[] in lib/mgmt_bcknd_client.c
+ * for the actual map
+ */
+struct mgmt_bcknd_xpath_map_reg {
+ const char *xpath_regexp; /* Longest matching regular expression */
+ uint8_t num_clients; /* Number of clients */
+
+ const char *bcknd_clients
+ [MGMTD_BCKND_MAX_CLIENTS_PER_XPATH_REG]; /* List of clients */
+};
+
+struct mgmt_bcknd_xpath_regexp_map {
+ const char *xpath_regexp;
+ struct mgmt_bcknd_client_subscr_info bcknd_subscrs;
+};
+
+struct mgmt_bcknd_get_adptr_config_params {
+ struct mgmt_bcknd_client_adapter *adptr;
+ struct nb_config_cbs *cfg_chgs;
+ uint32_t seq;
+};
+
+/*
+ * Static mapping of YANG XPath regular expressions and
+ * the corresponding interested backend clients.
+ * NOTE: Thiis is a static mapping defined by all MGMTD
+ * backend client modules (for now, till we develop a
+ * more dynamic way of creating and updating this map).
+ * A running map is created by MGMTD in run-time to
+ * handle real-time mapping of YANG xpaths to one or
+ * more interested backend client adapters.
+ */
+static const struct mgmt_bcknd_xpath_map_reg xpath_static_map_reg[] = {
+ {.xpath_regexp = "/frr-vrf:lib/*",
+ .num_clients = 2,
+ .bcknd_clients = {MGMTD_BCKND_CLIENT_STATICD,
+ MGMTD_BCKND_CLIENT_BGPD}
+ },
+ {.xpath_regexp = "/frr-interface:lib/*",
+ .num_clients = 2,
+ .bcknd_clients = {MGMTD_BCKND_CLIENT_STATICD,
+ MGMTD_BCKND_CLIENT_BGPD}
+ },
+ {.xpath_regexp =
+ "/frr-routing:routing/control-plane-protocols/control-plane-protocol[type='frr-staticd:staticd'][name='staticd'][vrf='default']/frr-staticd:staticd/*",
+ .num_clients = 1,
+ .bcknd_clients = {MGMTD_BCKND_CLIENT_STATICD}
+ },
+ {.xpath_regexp =
+ "/frr-routing:routing/control-plane-protocols/control-plane-protocol[type='frr-bgp:bgp'][name='bgp'][vrf='default']/frr-bgp:bgp/*",
+ .num_clients = 1,
+ .bcknd_clients = {MGMTD_BCKND_CLIENT_BGPD}
+ }
+};
+
+#define MGMTD_BCKND_MAX_NUM_XPATH_MAP 256
+static struct mgmt_bcknd_xpath_regexp_map
+ mgmt_xpath_map[MGMTD_BCKND_MAX_NUM_XPATH_MAP];
+static int mgmt_num_xpath_maps;
+
+static struct thread_master *mgmt_bcknd_adptr_tm;
+
+static struct mgmt_bcknd_adptr_list_head mgmt_bcknd_adptrs;
+
+static struct mgmt_bcknd_client_adapter
+ *mgmt_bcknd_adptrs_by_id[MGMTD_BCKND_CLIENT_ID_MAX];
+
+/* Forward declarations */
+static void
+mgmt_bcknd_adptr_register_event(struct mgmt_bcknd_client_adapter *adptr,
+ enum mgmt_bcknd_event event);
+
+static struct mgmt_bcknd_client_adapter *
+mgmt_bcknd_find_adapter_by_fd(int conn_fd)
+{
+ struct mgmt_bcknd_client_adapter *adptr;
+
+ FOREACH_ADPTR_IN_LIST (adptr) {
+ if (adptr->conn_fd == conn_fd)
+ return adptr;
+ }
+
+ return NULL;
+}
+
+static struct mgmt_bcknd_client_adapter *
+mgmt_bcknd_find_adapter_by_name(const char *name)
+{
+ struct mgmt_bcknd_client_adapter *adptr;
+
+ FOREACH_ADPTR_IN_LIST (adptr) {
+ if (!strncmp(adptr->name, name, sizeof(adptr->name)))
+ return adptr;
+ }
+
+ return NULL;
+}
+
+static void
+mgmt_bcknd_cleanup_adapters(void)
+{
+ struct mgmt_bcknd_client_adapter *adptr;
+
+ FOREACH_ADPTR_IN_LIST (adptr)
+ mgmt_bcknd_adapter_unlock(&adptr);
+}
+
+static void mgmt_bcknd_xpath_map_init(void)
+{
+ int indx, num_xpath_maps;
+ uint16_t indx1;
+ enum mgmt_bcknd_client_id id;
+
+ MGMTD_BCKND_ADPTR_DBG("Init XPath Maps");
+
+ num_xpath_maps = (int)array_size(xpath_static_map_reg);
+ for (indx = 0; indx < num_xpath_maps; indx++) {
+ MGMTD_BCKND_ADPTR_DBG(" - XPATH: '%s'",
+ xpath_static_map_reg[indx].xpath_regexp);
+ mgmt_xpath_map[indx].xpath_regexp =
+ xpath_static_map_reg[indx].xpath_regexp;
+ for (indx1 = 0; indx1 < xpath_static_map_reg[indx].num_clients;
+ indx1++) {
+ id = mgmt_bknd_client_name2id(
+ xpath_static_map_reg[indx]
+ .bcknd_clients[indx1]);
+ MGMTD_BCKND_ADPTR_DBG(
+ " -- Client: '%s' --> Id: %u",
+ xpath_static_map_reg[indx].bcknd_clients[indx1],
+ id);
+ if (id < MGMTD_BCKND_CLIENT_ID_MAX) {
+ mgmt_xpath_map[indx]
+ .bcknd_subscrs.xpath_subscr[id]
+ .validate_config = 1;
+ mgmt_xpath_map[indx]
+ .bcknd_subscrs.xpath_subscr[id]
+ .notify_config = 1;
+ mgmt_xpath_map[indx]
+ .bcknd_subscrs.xpath_subscr[id]
+ .own_oper_data = 1;
+ }
+ }
+ }
+
+ mgmt_num_xpath_maps = indx;
+ MGMTD_BCKND_ADPTR_DBG("Total XPath Maps: %u", mgmt_num_xpath_maps);
+}
+
+static int mgmt_bcknd_eval_regexp_match(const char *xpath_regexp,
+ const char *xpath)
+{
+ int match_len = 0, re_indx = 0, xp_indx = 0;
+ int rexp_len, xpath_len;
+ bool match = true, re_wild = false, xp_wild = false;
+ bool delim = false, enter_wild_match = false;
+ char wild_delim = 0;
+
+ rexp_len = strlen(xpath_regexp);
+ xpath_len = strlen(xpath);
+
+ /*
+ * Remove the trailing wildcard from the regexp and Xpath.
+ */
+ if (rexp_len && xpath_regexp[rexp_len-1] == '*')
+ rexp_len--;
+ if (xpath_len && xpath[xpath_len-1] == '*')
+ xpath_len--;
+
+ if (!rexp_len || !xpath_len)
+ return 0;
+
+ for (re_indx = 0, xp_indx = 0;
+ match && re_indx < rexp_len && xp_indx < xpath_len;) {
+ match = (xpath_regexp[re_indx] == xpath[xp_indx]);
+
+ /*
+ * Check if we need to enter wildcard matching.
+ */
+ if (!enter_wild_match && !match &&
+ (xpath_regexp[re_indx] == '*'
+ || xpath[xp_indx] == '*')) {
+ /*
+ * Found wildcard
+ */
+ enter_wild_match =
+ (xpath_regexp[re_indx-1] == '/'
+ || xpath_regexp[re_indx-1] == '\''
+ || xpath[xp_indx-1] == '/'
+ || xpath[xp_indx-1] == '\'');
+ if (enter_wild_match) {
+ if (xpath_regexp[re_indx] == '*') {
+ /*
+ * Begin RE wildcard match.
+ */
+ re_wild = true;
+ wild_delim = xpath_regexp[re_indx-1];
+ } else if (xpath[xp_indx] == '*') {
+ /*
+ * Begin XP wildcard match.
+ */
+ xp_wild = true;
+ wild_delim = xpath[xp_indx-1];
+ }
+ }
+ }
+
+ /*
+ * Check if we need to exit wildcard matching.
+ */
+ if (enter_wild_match) {
+ if (re_wild && xpath[xp_indx] == wild_delim) {
+ /*
+ * End RE wildcard matching.
+ */
+ re_wild = false;
+ if (re_indx < rexp_len-1)
+ re_indx++;
+ enter_wild_match = false;
+ } else if (xp_wild
+ && xpath_regexp[re_indx] == wild_delim) {
+ /*
+ * End XP wildcard matching.
+ */
+ xp_wild = false;
+ if (xp_indx < xpath_len-1)
+ xp_indx++;
+ enter_wild_match = false;
+ }
+ }
+
+ match = (xp_wild || re_wild
+ || xpath_regexp[re_indx] == xpath[xp_indx]);
+
+ /*
+ * Check if we found a delimiter in both the Xpaths
+ */
+ if ((xpath_regexp[re_indx] == '/'
+ && xpath[xp_indx] == '/')
+ || (xpath_regexp[re_indx] == ']'
+ && xpath[xp_indx] == ']')
+ || (xpath_regexp[re_indx] == '['
+ && xpath[xp_indx] == '[')) {
+ /*
+ * Increment the match count if we have a
+ * new delimiter.
+ */
+ if (match && re_indx && xp_indx && !delim)
+ match_len++;
+ delim = true;
+ } else {
+ delim = false;
+ }
+
+ /*
+ * Proceed to the next character in the RE/XP string as
+ * necessary.
+ */
+ if (!re_wild)
+ re_indx++;
+ if (!xp_wild)
+ xp_indx++;
+ }
+
+ /*
+ * If we finished matching and the last token was a full match
+ * increment the match count appropriately.
+ */
+ if (match && !delim &&
+ (xpath_regexp[re_indx] == '/'
+ || xpath_regexp[re_indx] == ']'))
+ match_len++;
+
+ return match_len;
+}
+
+static void
+mgmt_bcknd_adapter_disconnect(struct mgmt_bcknd_client_adapter *adptr)
+{
+ if (adptr->conn_fd) {
+ close(adptr->conn_fd);
+ adptr->conn_fd = 0;
+ }
+
+ /*
+ * TODO: Notify about client disconnect for appropriate cleanup
+ * mgmt_trxn_notify_bcknd_adapter_conn(adptr, false);
+ */
+
+ if (adptr->id < MGMTD_BCKND_CLIENT_ID_MAX) {
+ mgmt_bcknd_adptrs_by_id[adptr->id] = NULL;
+ adptr->id = MGMTD_BCKND_CLIENT_ID_MAX;
+ }
+
+ mgmt_bcknd_adptr_list_del(&mgmt_bcknd_adptrs, adptr);
+
+ mgmt_bcknd_adapter_unlock(&adptr);
+}
+
+static void
+mgmt_bcknd_adapter_cleanup_old_conn(struct mgmt_bcknd_client_adapter *adptr)
+{
+ struct mgmt_bcknd_client_adapter *old;
+
+ FOREACH_ADPTR_IN_LIST (old) {
+ if (old != adptr
+ && !strncmp(adptr->name, old->name, sizeof(adptr->name))) {
+ /*
+ * We have a Zombie lingering around
+ */
+ MGMTD_BCKND_ADPTR_DBG(
+ "Client '%s' (FD:%d) seems to have reconnected. Removing old connection (FD:%d)!",
+ adptr->name, adptr->conn_fd, old->conn_fd);
+ mgmt_bcknd_adapter_disconnect(old);
+ }
+ }
+}
+
+static int
+mgmt_bcknd_adapter_handle_msg(struct mgmt_bcknd_client_adapter *adptr,
+ Mgmtd__BckndMessage *bcknd_msg)
+{
+ switch (bcknd_msg->message_case) {
+ case MGMTD__BCKND_MESSAGE__MESSAGE_SUBSCR_REQ:
+ MGMTD_BCKND_ADPTR_DBG(
+ "Got Subscribe Req Msg from '%s' to %sregister %u xpaths",
+ bcknd_msg->subscr_req->client_name,
+ !bcknd_msg->subscr_req->subscribe_xpaths
+ && bcknd_msg->subscr_req->n_xpath_reg
+ ? "de"
+ : "",
+ (uint32_t)bcknd_msg->subscr_req->n_xpath_reg);
+
+ if (strlen(bcknd_msg->subscr_req->client_name)) {
+ strlcpy(adptr->name, bcknd_msg->subscr_req->client_name,
+ sizeof(adptr->name));
+ adptr->id = mgmt_bknd_client_name2id(adptr->name);
+ if (adptr->id >= MGMTD_BCKND_CLIENT_ID_MAX) {
+ MGMTD_BCKND_ADPTR_ERR(
+ "Unable to resolve adapter '%s' to a valid ID. Disconnecting!",
+ adptr->name);
+ mgmt_bcknd_adapter_disconnect(adptr);
+ }
+ mgmt_bcknd_adptrs_by_id[adptr->id] = adptr;
+ mgmt_bcknd_adapter_cleanup_old_conn(adptr);
+ }
+ break;
+ case MGMTD__BCKND_MESSAGE__MESSAGE_TRXN_REPLY:
+ MGMTD_BCKND_ADPTR_DBG(
+ "Got %s TRXN_REPLY Msg for Trxn-Id 0x%llx from '%s' with '%s'",
+ bcknd_msg->trxn_reply->create ? "Create" : "Delete",
+ (unsigned long long)bcknd_msg->trxn_reply->trxn_id,
+ adptr->name,
+ bcknd_msg->trxn_reply->success ? "success" : "failure");
+ /*
+ * TODO: Forward the TRXN_REPLY to trxn module.
+ * mgmt_trxn_notify_bcknd_trxn_reply(
+ * bcknd_msg->trxn_reply->trxn_id,
+ * bcknd_msg->trxn_reply->create,
+ * bcknd_msg->trxn_reply->success, adptr);
+ */
+ break;
+ case MGMTD__BCKND_MESSAGE__MESSAGE_CFG_DATA_REPLY:
+ MGMTD_BCKND_ADPTR_DBG(
+ "Got CFGDATA_REPLY Msg from '%s' for Trxn-Id 0x%llx Batch-Id 0x%llx with Err:'%s'",
+ adptr->name,
+ (unsigned long long)bcknd_msg->cfg_data_reply->trxn_id,
+ (unsigned long long)bcknd_msg->cfg_data_reply->batch_id,
+ bcknd_msg->cfg_data_reply->error_if_any
+ ? bcknd_msg->cfg_data_reply->error_if_any
+ : "None");
+ /*
+ * TODO: Forward the CGFData-create reply to trxn module.
+ * mgmt_trxn_notify_bcknd_cfgdata_reply(
+ * bcknd_msg->cfg_data_reply->trxn_id,
+ * bcknd_msg->cfg_data_reply->batch_id,
+ * bcknd_msg->cfg_data_reply->success,
+ * bcknd_msg->cfg_data_reply->error_if_any, adptr);
+ */
+ break;
+ case MGMTD__BCKND_MESSAGE__MESSAGE_CFG_VALIDATE_REPLY:
+ MGMTD_BCKND_ADPTR_DBG(
+ "Got %s CFG_VALIDATE_REPLY Msg from '%s' for Trxn-Id 0x%llx for %d batches (Id 0x%llx-0x%llx), Err:'%s'",
+ bcknd_msg->cfg_validate_reply->success ? "successful"
+ : "failed",
+ adptr->name,
+ (unsigned long long)
+ bcknd_msg->cfg_validate_reply->trxn_id,
+ (int)bcknd_msg->cfg_validate_reply->n_batch_ids,
+ (unsigned long long)
+ bcknd_msg->cfg_validate_reply->batch_ids[0],
+ (unsigned long long)bcknd_msg->cfg_validate_reply
+ ->batch_ids[bcknd_msg->cfg_validate_reply
+ ->n_batch_ids
+ - 1],
+ bcknd_msg->cfg_validate_reply->error_if_any
+ ? bcknd_msg->cfg_validate_reply->error_if_any
+ : "None");
+ /*
+ * TODO: Forward the CGFData-validate reply to trxn module.
+ * mgmt_trxn_notify_bcknd_cfg_validate_reply(
+ * bcknd_msg->cfg_validate_reply->trxn_id,
+ * bcknd_msg->cfg_validate_reply->success,
+ * (uint64_t *)bcknd_msg->cfg_validate_reply->batch_ids,
+ * bcknd_msg->cfg_validate_reply->n_batch_ids,
+ * bcknd_msg->cfg_validate_reply->error_if_any, adptr);
+ */
+ break;
+ case MGMTD__BCKND_MESSAGE__MESSAGE_CFG_APPLY_REPLY:
+ MGMTD_BCKND_ADPTR_DBG(
+ "Got %s CFG_APPLY_REPLY Msg from '%s' for Trxn-Id 0x%llx for %d batches (Id 0x%llx-0x%llx), Err:'%s'",
+ bcknd_msg->cfg_apply_reply->success ? "successful"
+ : "failed",
+ adptr->name,
+ (unsigned long long)
+ bcknd_msg->cfg_apply_reply->trxn_id,
+ (int)bcknd_msg->cfg_apply_reply->n_batch_ids,
+ (unsigned long long)
+ bcknd_msg->cfg_apply_reply->batch_ids[0],
+ (unsigned long long)bcknd_msg->cfg_apply_reply
+ ->batch_ids[bcknd_msg->cfg_apply_reply
+ ->n_batch_ids
+ - 1],
+ bcknd_msg->cfg_apply_reply->error_if_any
+ ? bcknd_msg->cfg_apply_reply->error_if_any
+ : "None");
+ /* TODO: Forward the CGFData-apply reply to trxn module.
+ * mgmt_trxn_notify_bcknd_cfg_apply_reply(
+ * bcknd_msg->cfg_apply_reply->trxn_id,
+ * bcknd_msg->cfg_apply_reply->success,
+ * (uint64_t *)bcknd_msg->cfg_apply_reply->batch_ids,
+ * bcknd_msg->cfg_apply_reply->n_batch_ids,
+ * bcknd_msg->cfg_apply_reply->error_if_any, adptr);
+ */
+ break;
+ case MGMTD__BCKND_MESSAGE__MESSAGE_GET_REPLY:
+ case MGMTD__BCKND_MESSAGE__MESSAGE_CFG_CMD_REPLY:
+ case MGMTD__BCKND_MESSAGE__MESSAGE_SHOW_CMD_REPLY:
+ case MGMTD__BCKND_MESSAGE__MESSAGE_NOTIFY_DATA:
+ /*
+ * TODO: Add handling code in future.
+ */
+ break;
+ /*
+ * NOTE: The following messages are always sent from MGMTD to
+ * Backend clients only and/or need not be handled on MGMTd.
+ */
+ case MGMTD__BCKND_MESSAGE__MESSAGE_SUBSCR_REPLY:
+ case MGMTD__BCKND_MESSAGE__MESSAGE_GET_REQ:
+ case MGMTD__BCKND_MESSAGE__MESSAGE_TRXN_REQ:
+ case MGMTD__BCKND_MESSAGE__MESSAGE_CFG_DATA_REQ:
+ case MGMTD__BCKND_MESSAGE__MESSAGE_CFG_VALIDATE_REQ:
+ case MGMTD__BCKND_MESSAGE__MESSAGE_CFG_APPLY_REQ:
+ case MGMTD__BCKND_MESSAGE__MESSAGE_CFG_CMD_REQ:
+ case MGMTD__BCKND_MESSAGE__MESSAGE_SHOW_CMD_REQ:
+ case MGMTD__BCKND_MESSAGE__MESSAGE__NOT_SET:
+ default:
+ /*
+ * A 'default' case is being added contrary to the
+ * FRR code guidelines to take care of build
+ * failures on certain build systems (courtesy of
+ * the proto-c package).
+ */
+ break;
+ }
+
+ return 0;
+}
+
+static inline void
+mgmt_bcknd_adapter_sched_msg_write(struct mgmt_bcknd_client_adapter *adptr)
+{
+ if (!CHECK_FLAG(adptr->flags, MGMTD_BCKND_ADPTR_FLAGS_WRITES_OFF))
+ mgmt_bcknd_adptr_register_event(adptr, MGMTD_BCKND_CONN_WRITE);
+}
+
+static inline void
+mgmt_bcknd_adapter_writes_on(struct mgmt_bcknd_client_adapter *adptr)
+{
+ MGMTD_BCKND_ADPTR_DBG("Resume writing msgs for '%s'", adptr->name);
+ UNSET_FLAG(adptr->flags, MGMTD_BCKND_ADPTR_FLAGS_WRITES_OFF);
+ if (adptr->obuf_work || stream_fifo_count_safe(adptr->obuf_fifo))
+ mgmt_bcknd_adapter_sched_msg_write(adptr);
+}
+
+static inline void
+mgmt_bcknd_adapter_writes_off(struct mgmt_bcknd_client_adapter *adptr)
+{
+ SET_FLAG(adptr->flags, MGMTD_BCKND_ADPTR_FLAGS_WRITES_OFF);
+ MGMTD_BCKND_ADPTR_DBG("Pause writing msgs for '%s'", adptr->name);
+}
+
+static int mgmt_bcknd_adapter_send_msg(struct mgmt_bcknd_client_adapter *adptr,
+ Mgmtd__BckndMessage *bcknd_msg)
+{
+ size_t msg_size;
+ uint8_t *msg_buf = adptr->msg_buf;
+ struct mgmt_bcknd_msg *msg;
+
+ if (adptr->conn_fd == 0)
+ return -1;
+
+ msg_size = mgmtd__bcknd_message__get_packed_size(bcknd_msg);
+ msg_size += MGMTD_BCKND_MSG_HDR_LEN;
+ if (msg_size > MGMTD_BCKND_MSG_MAX_LEN) {
+ MGMTD_BCKND_ADPTR_ERR(
+ "Message size %d more than max size'%d. Not sending!'",
+ (int)msg_size, (int)MGMTD_BCKND_MSG_MAX_LEN);
+ return -1;
+ }
+
+ msg = (struct mgmt_bcknd_msg *)msg_buf;
+ msg->hdr.marker = MGMTD_BCKND_MSG_MARKER;
+ msg->hdr.len = (uint16_t)msg_size;
+ mgmtd__bcknd_message__pack(bcknd_msg, msg->payload);
+
+ if (!adptr->obuf_work)
+ adptr->obuf_work = stream_new(MGMTD_BCKND_MSG_MAX_LEN);
+ if (STREAM_WRITEABLE(adptr->obuf_work) < msg_size) {
+ stream_fifo_push(adptr->obuf_fifo, adptr->obuf_work);
+ adptr->obuf_work = stream_new(MGMTD_BCKND_MSG_MAX_LEN);
+ }
+ stream_write(adptr->obuf_work, (void *)msg_buf, msg_size);
+
+ mgmt_bcknd_adapter_sched_msg_write(adptr);
+ adptr->num_msg_tx++;
+ return 0;
+}
+
+static int mgmt_bcknd_send_trxn_req(struct mgmt_bcknd_client_adapter *adptr,
+ uint64_t trxn_id, bool create)
+{
+ Mgmtd__BckndMessage bcknd_msg;
+ Mgmtd__BckndTrxnReq trxn_req;
+
+ mgmtd__bcknd_trxn_req__init(&trxn_req);
+ trxn_req.create = create;
+ trxn_req.trxn_id = trxn_id;
+
+ mgmtd__bcknd_message__init(&bcknd_msg);
+ bcknd_msg.message_case = MGMTD__BCKND_MESSAGE__MESSAGE_TRXN_REQ;
+ bcknd_msg.trxn_req = &trxn_req;
+
+ MGMTD_BCKND_ADPTR_DBG(
+ "Sending TRXN_REQ message to Backend client '%s' for Trxn-Id %llx",
+ adptr->name, (unsigned long long)trxn_id);
+
+ return mgmt_bcknd_adapter_send_msg(adptr, &bcknd_msg);
+}
+
+static int
+mgmt_bcknd_send_cfgdata_create_req(struct mgmt_bcknd_client_adapter *adptr,
+ uint64_t trxn_id, uint64_t batch_id,
+ Mgmtd__YangCfgDataReq **cfgdata_reqs,
+ size_t num_reqs, bool end_of_data)
+{
+ Mgmtd__BckndMessage bcknd_msg;
+ Mgmtd__BckndCfgDataCreateReq cfgdata_req;
+
+ mgmtd__bcknd_cfg_data_create_req__init(&cfgdata_req);
+ cfgdata_req.batch_id = batch_id;
+ cfgdata_req.trxn_id = trxn_id;
+ cfgdata_req.data_req = cfgdata_reqs;
+ cfgdata_req.n_data_req = num_reqs;
+ cfgdata_req.end_of_data = end_of_data;
+
+ mgmtd__bcknd_message__init(&bcknd_msg);
+ bcknd_msg.message_case = MGMTD__BCKND_MESSAGE__MESSAGE_CFG_DATA_REQ;
+ bcknd_msg.cfg_data_req = &cfgdata_req;
+
+ MGMTD_BCKND_ADPTR_DBG(
+ "Sending CFGDATA_CREATE_REQ message to Backend client '%s' for Trxn-Id %llx, Batch-Id: %llx",
+ adptr->name, (unsigned long long)trxn_id,
+ (unsigned long long)batch_id);
+
+ return mgmt_bcknd_adapter_send_msg(adptr, &bcknd_msg);
+}
+
+static int
+mgmt_bcknd_send_cfgvalidate_req(struct mgmt_bcknd_client_adapter *adptr,
+ uint64_t trxn_id, uint64_t batch_ids[],
+ size_t num_batch_ids)
+{
+ Mgmtd__BckndMessage bcknd_msg;
+ Mgmtd__BckndCfgDataValidateReq vldt_req;
+
+ mgmtd__bcknd_cfg_data_validate_req__init(&vldt_req);
+ vldt_req.trxn_id = trxn_id;
+ vldt_req.batch_ids = (uint64_t *)batch_ids;
+ vldt_req.n_batch_ids = num_batch_ids;
+
+ mgmtd__bcknd_message__init(&bcknd_msg);
+ bcknd_msg.message_case = MGMTD__BCKND_MESSAGE__MESSAGE_CFG_VALIDATE_REQ;
+ bcknd_msg.cfg_validate_req = &vldt_req;
+
+ MGMTD_BCKND_ADPTR_DBG(
+ "Sending CFG_VALIDATE_REQ message to Backend client '%s' for Trxn-Id %llx, #Batches: %d [0x%llx - 0x%llx]",
+ adptr->name, (unsigned long long)trxn_id, (int)num_batch_ids,
+ (unsigned long long)batch_ids[0],
+ (unsigned long long)batch_ids[num_batch_ids - 1]);
+
+ return mgmt_bcknd_adapter_send_msg(adptr, &bcknd_msg);
+}
+
+static int mgmt_bcknd_send_cfgapply_req(struct mgmt_bcknd_client_adapter *adptr,
+ uint64_t trxn_id)
+{
+ Mgmtd__BckndMessage bcknd_msg;
+ Mgmtd__BckndCfgDataApplyReq apply_req;
+
+ mgmtd__bcknd_cfg_data_apply_req__init(&apply_req);
+ apply_req.trxn_id = trxn_id;
+
+ mgmtd__bcknd_message__init(&bcknd_msg);
+ bcknd_msg.message_case = MGMTD__BCKND_MESSAGE__MESSAGE_CFG_APPLY_REQ;
+ bcknd_msg.cfg_apply_req = &apply_req;
+
+ MGMTD_BCKND_ADPTR_DBG(
+ "Sending CFG_APPLY_REQ message to Backend client '%s' for Trxn-Id 0x%llx",
+ adptr->name, (unsigned long long)trxn_id);
+
+ return mgmt_bcknd_adapter_send_msg(adptr, &bcknd_msg);
+}
+
+static uint16_t
+mgmt_bcknd_adapter_process_msg(struct mgmt_bcknd_client_adapter *adptr,
+ uint8_t *msg_buf, uint16_t bytes_read)
+{
+ Mgmtd__BckndMessage *bcknd_msg;
+ struct mgmt_bcknd_msg *msg;
+ uint16_t bytes_left;
+ uint16_t processed = 0;
+
+ bytes_left = bytes_read;
+ for (; bytes_left > MGMTD_BCKND_MSG_HDR_LEN;
+ bytes_left -= msg->hdr.len, msg_buf += msg->hdr.len) {
+ msg = (struct mgmt_bcknd_msg *)msg_buf;
+ if (msg->hdr.marker != MGMTD_BCKND_MSG_MARKER) {
+ MGMTD_BCKND_ADPTR_DBG(
+ "Marker not found in message from MGMTD Backend adapter '%s'",
+ adptr->name);
+ break;
+ }
+
+ if (bytes_left < msg->hdr.len) {
+ MGMTD_BCKND_ADPTR_DBG(
+ "Incomplete message of %d bytes (epxected: %u) from MGMTD Backend adapter '%s'",
+ bytes_left, msg->hdr.len, adptr->name);
+ break;
+ }
+
+ bcknd_msg = mgmtd__bcknd_message__unpack(
+ NULL, (size_t)(msg->hdr.len - MGMTD_BCKND_MSG_HDR_LEN),
+ msg->payload);
+ if (!bcknd_msg) {
+ MGMTD_BCKND_ADPTR_DBG(
+ "Failed to decode %d bytes from MGMTD Backend adapter '%s'",
+ msg->hdr.len, adptr->name);
+ continue;
+ }
+
+ (void)mgmt_bcknd_adapter_handle_msg(adptr, bcknd_msg);
+ mgmtd__bcknd_message__free_unpacked(bcknd_msg, NULL);
+ processed++;
+ adptr->num_msg_rx++;
+ }
+
+ return processed;
+}
+
+static void mgmt_bcknd_adapter_proc_msgbufs(struct thread *thread)
+{
+ struct mgmt_bcknd_client_adapter *adptr;
+ struct stream *work;
+ int processed = 0;
+
+ adptr = (struct mgmt_bcknd_client_adapter *)THREAD_ARG(thread);
+ assert(adptr);
+
+ if (adptr->conn_fd == 0)
+ return;
+
+ for (; processed < MGMTD_BCKND_MAX_NUM_MSG_PROC;) {
+ work = stream_fifo_pop_safe(adptr->ibuf_fifo);
+ if (!work)
+ break;
+
+ processed += mgmt_bcknd_adapter_process_msg(
+ adptr, STREAM_DATA(work), stream_get_endp(work));
+
+ if (work != adptr->ibuf_work) {
+ /* Free it up */
+ stream_free(work);
+ } else {
+ /* Reset stream buffer for next read */
+ stream_reset(work);
+ }
+ }
+
+ /*
+ * If we have more to process, reschedule for processing it.
+ */
+ if (stream_fifo_head(adptr->ibuf_fifo))
+ mgmt_bcknd_adptr_register_event(adptr, MGMTD_BCKND_PROC_MSG);
+}
+
+static void mgmt_bcknd_adapter_read(struct thread *thread)
+{
+ struct mgmt_bcknd_client_adapter *adptr;
+ int bytes_read, msg_cnt;
+ size_t total_bytes, bytes_left;
+ struct mgmt_bcknd_msg_hdr *msg_hdr;
+ bool incomplete = false;
+
+ adptr = (struct mgmt_bcknd_client_adapter *)THREAD_ARG(thread);
+ assert(adptr && adptr->conn_fd);
+
+ total_bytes = 0;
+ bytes_left = STREAM_SIZE(adptr->ibuf_work)
+ - stream_get_endp(adptr->ibuf_work);
+ for (; bytes_left > MGMTD_BCKND_MSG_HDR_LEN;) {
+ bytes_read = stream_read_try(adptr->ibuf_work, adptr->conn_fd,
+ bytes_left);
+ MGMTD_BCKND_ADPTR_DBG(
+ "Got %d bytes of message from MGMTD Backend adapter '%s'",
+ bytes_read, adptr->name);
+ if (bytes_read <= 0) {
+ if (bytes_read == -1
+ && (errno == EAGAIN || errno == EWOULDBLOCK)) {
+ mgmt_bcknd_adptr_register_event(
+ adptr, MGMTD_BCKND_CONN_READ);
+ return;
+ }
+
+ if (!bytes_read) {
+ /* Looks like connection closed */
+ MGMTD_BCKND_ADPTR_ERR(
+ "Got error (%d) while reading from MGMTD Backend adapter '%s'. Err: '%s'",
+ bytes_read, adptr->name,
+ safe_strerror(errno));
+ mgmt_bcknd_adapter_disconnect(adptr);
+ return;
+ }
+ break;
+ }
+
+ total_bytes += bytes_read;
+ bytes_left -= bytes_read;
+ }
+
+ /*
+ * Check if we would have read incomplete messages or not.
+ */
+ stream_set_getp(adptr->ibuf_work, 0);
+ total_bytes = 0;
+ msg_cnt = 0;
+ bytes_left = stream_get_endp(adptr->ibuf_work);
+ for (; bytes_left > MGMTD_BCKND_MSG_HDR_LEN;) {
+ msg_hdr =
+ (struct mgmt_bcknd_msg_hdr *)(STREAM_DATA(
+ adptr->ibuf_work)
+ + total_bytes);
+ if (msg_hdr->marker != MGMTD_BCKND_MSG_MARKER) {
+ /* Corrupted buffer. Force disconnect?? */
+ MGMTD_BCKND_ADPTR_ERR(
+ "Received corrupted buffer from MGMTD Backend client.");
+ mgmt_bcknd_adapter_disconnect(adptr);
+ return;
+ }
+ if (msg_hdr->len > bytes_left)
+ break;
+
+ total_bytes += msg_hdr->len;
+ bytes_left -= msg_hdr->len;
+ msg_cnt++;
+ }
+
+ if (bytes_left > 0)
+ incomplete = true;
+
+ /*
+ * We would have read one or several messages.
+ * Schedule processing them now.
+ */
+ msg_hdr = (struct mgmt_bcknd_msg_hdr *)(STREAM_DATA(adptr->ibuf_work)
+ + total_bytes);
+ stream_set_endp(adptr->ibuf_work, total_bytes);
+ stream_fifo_push(adptr->ibuf_fifo, adptr->ibuf_work);
+ adptr->ibuf_work = stream_new(MGMTD_BCKND_MSG_MAX_LEN);
+ if (incomplete) {
+ stream_put(adptr->ibuf_work, msg_hdr, bytes_left);
+ stream_set_endp(adptr->ibuf_work, bytes_left);
+ }
+
+ if (msg_cnt)
+ mgmt_bcknd_adptr_register_event(adptr, MGMTD_BCKND_PROC_MSG);
+
+ mgmt_bcknd_adptr_register_event(adptr, MGMTD_BCKND_CONN_READ);
+}
+
+static void mgmt_bcknd_adapter_write(struct thread *thread)
+{
+ int bytes_written = 0;
+ int processed = 0;
+ int msg_size = 0;
+ struct stream *s = NULL;
+ struct stream *free = NULL;
+ struct mgmt_bcknd_client_adapter *adptr;
+
+ adptr = (struct mgmt_bcknd_client_adapter *)THREAD_ARG(thread);
+ assert(adptr && adptr->conn_fd);
+
+ /* Ensure pushing any pending write buffer to FIFO */
+ if (adptr->obuf_work) {
+ stream_fifo_push(adptr->obuf_fifo, adptr->obuf_work);
+ adptr->obuf_work = NULL;
+ }
+
+ for (s = stream_fifo_head(adptr->obuf_fifo);
+ s && processed < MGMTD_BCKND_MAX_NUM_MSG_WRITE;
+ s = stream_fifo_head(adptr->obuf_fifo)) {
+ /* msg_size = (int)stream_get_size(s); */
+ msg_size = (int)STREAM_READABLE(s);
+ bytes_written = stream_flush(s, adptr->conn_fd);
+ if (bytes_written == -1
+ && (errno == EAGAIN || errno == EWOULDBLOCK)) {
+ mgmt_bcknd_adptr_register_event(adptr,
+ MGMTD_BCKND_CONN_WRITE);
+ return;
+ } else if (bytes_written != msg_size) {
+ MGMTD_BCKND_ADPTR_ERR(
+ "Could not write all %d bytes (wrote: %d) to MGMTD Backend client socket. Err: '%s'",
+ msg_size, bytes_written, safe_strerror(errno));
+ if (bytes_written > 0) {
+ stream_forward_getp(s, (size_t)bytes_written);
+ stream_pulldown(s);
+ mgmt_bcknd_adptr_register_event(
+ adptr, MGMTD_BCKND_CONN_WRITE);
+ return;
+ }
+ mgmt_bcknd_adapter_disconnect(adptr);
+ return;
+ }
+
+ free = stream_fifo_pop(adptr->obuf_fifo);
+ stream_free(free);
+ MGMTD_BCKND_ADPTR_DBG(
+ "Wrote %d bytes of message to MGMTD Backend client socket.'",
+ bytes_written);
+ processed++;
+ }
+
+ if (s) {
+ mgmt_bcknd_adapter_writes_off(adptr);
+ mgmt_bcknd_adptr_register_event(adptr,
+ MGMTD_BCKND_CONN_WRITES_ON);
+ }
+}
+
+static void mgmt_bcknd_adapter_resume_writes(struct thread *thread)
+{
+ struct mgmt_bcknd_client_adapter *adptr;
+
+ adptr = (struct mgmt_bcknd_client_adapter *)THREAD_ARG(thread);
+ assert(adptr && adptr->conn_fd);
+
+ mgmt_bcknd_adapter_writes_on(adptr);
+}
+
+static void mgmt_bcknd_iter_and_get_cfg(struct mgmt_db_ctxt *db_ctxt,
+ char *xpath, struct lyd_node *node,
+ struct nb_node *nb_node, void *ctxt)
+{
+ struct mgmt_bcknd_client_subscr_info subscr_info;
+ struct mgmt_bcknd_get_adptr_config_params *parms;
+ struct mgmt_bcknd_client_adapter *adptr;
+ struct nb_config_cbs *root;
+ uint32_t *seq;
+
+ if (mgmt_bcknd_get_subscr_info_for_xpath(xpath, &subscr_info) != 0) {
+ MGMTD_BCKND_ADPTR_ERR(
+ "ERROR: Failed to get subscriber for '%s'", xpath);
+ return;
+ }
+
+ parms = (struct mgmt_bcknd_get_adptr_config_params *)ctxt;
+
+ adptr = parms->adptr;
+ if (!subscr_info.xpath_subscr[adptr->id].subscribed)
+ return;
+
+ root = parms->cfg_chgs;
+ seq = &parms->seq;
+ nb_config_diff_created(node, seq, root);
+}
+
+static void mgmt_bcknd_adapter_conn_init(struct thread *thread)
+{
+ struct mgmt_bcknd_client_adapter *adptr;
+
+ adptr = (struct mgmt_bcknd_client_adapter *)THREAD_ARG(thread);
+ assert(adptr && adptr->conn_fd);
+
+ /*
+ * TODO: Check first if the current session can run a CONFIG
+ * transaction or not. Reschedule if a CONFIG transaction
+ * from another session is already in progress.
+ if (mgmt_config_trxn_in_progress() != MGMTD_SESSION_ID_NONE) {
+ mgmt_bcknd_adptr_register_event(adptr, MGMTD_BCKND_CONN_INIT);
+ return 0;
+ }
+ */
+
+ /*
+ * TODO: Notify TRXN module to create a CONFIG transaction and
+ * download the CONFIGs identified for this new client.
+ * If the TRXN module fails to initiate the CONFIG transaction
+ * disconnect from the client forcing a reconnect later.
+ * That should also take care of destroying the adapter.
+ *
+ if (mgmt_trxn_notify_bcknd_adapter_conn(adptr, true) != 0) {
+ mgmt_bcknd_adapter_disconnect(adptr);
+ adptr = NULL;
+ }
+ */
+
+ return 0;
+}
+
+static void
+mgmt_bcknd_adptr_register_event(struct mgmt_bcknd_client_adapter *adptr,
+ enum mgmt_bcknd_event event)
+{
+ struct timeval tv = {0};
+
+ switch (event) {
+ case MGMTD_BCKND_CONN_INIT:
+ thread_add_timer_msec(mgmt_bcknd_adptr_tm,
+ mgmt_bcknd_adapter_conn_init, adptr,
+ MGMTD_BCKND_CONN_INIT_DELAY_MSEC,
+ &adptr->conn_init_ev);
+ assert(adptr->conn_init_ev);
+ break;
+ case MGMTD_BCKND_CONN_READ:
+ thread_add_read(mgmt_bcknd_adptr_tm, mgmt_bcknd_adapter_read,
+ adptr, adptr->conn_fd, &adptr->conn_read_ev);
+ assert(adptr->conn_read_ev);
+ break;
+ case MGMTD_BCKND_CONN_WRITE:
+ thread_add_write(mgmt_bcknd_adptr_tm, mgmt_bcknd_adapter_write,
+ adptr, adptr->conn_fd, &adptr->conn_write_ev);
+ assert(adptr->conn_write_ev);
+ break;
+ case MGMTD_BCKND_PROC_MSG:
+ tv.tv_usec = MGMTD_BCKND_MSG_PROC_DELAY_USEC;
+ thread_add_timer_tv(mgmt_bcknd_adptr_tm,
+ mgmt_bcknd_adapter_proc_msgbufs, adptr, &tv,
+ &adptr->proc_msg_ev);
+ assert(adptr->proc_msg_ev);
+ break;
+ case MGMTD_BCKND_CONN_WRITES_ON:
+ thread_add_timer_msec(mgmt_bcknd_adptr_tm,
+ mgmt_bcknd_adapter_resume_writes, adptr,
+ MGMTD_BCKND_MSG_WRITE_DELAY_MSEC,
+ &adptr->conn_writes_on);
+ assert(adptr->conn_writes_on);
+ break;
+ case MGMTD_BCKND_SERVER:
+ case MGMTD_BCKND_SCHED_CFG_PREPARE:
+ case MGMTD_BCKND_RESCHED_CFG_PREPARE:
+ case MGMTD_BCKND_SCHED_CFG_APPLY:
+ case MGMTD_BCKND_RESCHED_CFG_APPLY:
+ assert(!"mgmt_bcknd_adptr_post_event() called incorrectly");
+ break;
+ }
+}
+
+void mgmt_bcknd_adapter_lock(struct mgmt_bcknd_client_adapter *adptr)
+{
+ adptr->refcount++;
+}
+
+extern void mgmt_bcknd_adapter_unlock(struct mgmt_bcknd_client_adapter **adptr)
+{
+ assert(*adptr && (*adptr)->refcount);
+
+ (*adptr)->refcount--;
+ if (!(*adptr)->refcount) {
+ mgmt_bcknd_adptr_list_del(&mgmt_bcknd_adptrs, *adptr);
+
+ stream_fifo_free((*adptr)->ibuf_fifo);
+ stream_free((*adptr)->ibuf_work);
+ stream_fifo_free((*adptr)->obuf_fifo);
+ stream_free((*adptr)->obuf_work);
+
+ THREAD_OFF((*adptr)->conn_init_ev);
+ THREAD_OFF((*adptr)->conn_read_ev);
+ THREAD_OFF((*adptr)->conn_write_ev);
+ THREAD_OFF((*adptr)->conn_writes_on);
+ THREAD_OFF((*adptr)->proc_msg_ev);
+ XFREE(MTYPE_MGMTD_BCKND_ADPATER, *adptr);
+ }
+
+ *adptr = NULL;
+}
+
+int mgmt_bcknd_adapter_init(struct thread_master *tm)
+{
+ if (!mgmt_bcknd_adptr_tm) {
+ mgmt_bcknd_adptr_tm = tm;
+ memset(mgmt_xpath_map, 0, sizeof(mgmt_xpath_map));
+ mgmt_num_xpath_maps = 0;
+ memset(mgmt_bcknd_adptrs_by_id, 0,
+ sizeof(mgmt_bcknd_adptrs_by_id));
+ mgmt_bcknd_adptr_list_init(&mgmt_bcknd_adptrs);
+ mgmt_bcknd_xpath_map_init();
+ }
+
+ return 0;
+}
+
+void mgmt_bcknd_adapter_destroy(void)
+{
+ mgmt_bcknd_cleanup_adapters();
+}
+
+struct mgmt_bcknd_client_adapter *
+mgmt_bcknd_create_adapter(int conn_fd, union sockunion *from)
+{
+ struct mgmt_bcknd_client_adapter *adptr = NULL;
+
+ adptr = mgmt_bcknd_find_adapter_by_fd(conn_fd);
+ if (!adptr) {
+ adptr = XCALLOC(MTYPE_MGMTD_BCKND_ADPATER,
+ sizeof(struct mgmt_bcknd_client_adapter));
+ assert(adptr);
+
+ adptr->conn_fd = conn_fd;
+ adptr->id = MGMTD_BCKND_CLIENT_ID_MAX;
+ memcpy(&adptr->conn_su, from, sizeof(adptr->conn_su));
+ snprintf(adptr->name, sizeof(adptr->name), "Unknown-FD-%d",
+ adptr->conn_fd);
+ adptr->ibuf_fifo = stream_fifo_new();
+ adptr->ibuf_work = stream_new(MGMTD_BCKND_MSG_MAX_LEN);
+ adptr->obuf_fifo = stream_fifo_new();
+ /* adptr->obuf_work = stream_new(MGMTD_BCKND_MSG_MAX_LEN); */
+ adptr->obuf_work = NULL;
+ mgmt_bcknd_adapter_lock(adptr);
+
+ mgmt_bcknd_adptr_register_event(adptr, MGMTD_BCKND_CONN_READ);
+ mgmt_bcknd_adptr_list_add_tail(&mgmt_bcknd_adptrs, adptr);
+
+ RB_INIT(nb_config_cbs, &adptr->cfg_chgs);
+
+ MGMTD_BCKND_ADPTR_DBG("Added new MGMTD Backend adapter '%s'",
+ adptr->name);
+ }
+
+ /* Make client socket non-blocking. */
+ set_nonblocking(adptr->conn_fd);
+ setsockopt_so_sendbuf(adptr->conn_fd, MGMTD_SOCKET_BCKND_SEND_BUF_SIZE);
+ setsockopt_so_recvbuf(adptr->conn_fd, MGMTD_SOCKET_BCKND_RECV_BUF_SIZE);
+
+ /* Trigger resync of config with the new adapter */
+ mgmt_bcknd_adptr_register_event(adptr, MGMTD_BCKND_CONN_INIT);
+
+ return adptr;
+}
+
+struct mgmt_bcknd_client_adapter *
+mgmt_bcknd_get_adapter_by_id(enum mgmt_bcknd_client_id id)
+{
+ return (id < MGMTD_BCKND_CLIENT_ID_MAX ? mgmt_bcknd_adptrs_by_id[id]
+ : NULL);
+}
+
+struct mgmt_bcknd_client_adapter *
+mgmt_bcknd_get_adapter_by_name(const char *name)
+{
+ return mgmt_bcknd_find_adapter_by_name(name);
+}
+
+int mgmt_bcknd_get_adapter_config(struct mgmt_bcknd_client_adapter *adptr,
+ struct mgmt_db_ctxt *db_ctxt,
+ struct nb_config_cbs **cfg_chgs)
+{
+ char base_xpath[] = "/";
+ struct mgmt_bcknd_get_adptr_config_params parms;
+
+ assert(cfg_chgs);
+
+ if (RB_EMPTY(nb_config_cbs, &adptr->cfg_chgs)) {
+ parms.adptr = adptr;
+ parms.cfg_chgs = &adptr->cfg_chgs;
+ parms.seq = 0;
+
+ mgmt_db_iter_data(db_ctxt, base_xpath,
+ mgmt_bcknd_iter_and_get_cfg, (void *)&parms,
+ false);
+ }
+
+ *cfg_chgs = &adptr->cfg_chgs;
+ return 0;
+}
+
+int mgmt_bcknd_create_trxn(struct mgmt_bcknd_client_adapter *adptr,
+ uint64_t trxn_id)
+{
+ return mgmt_bcknd_send_trxn_req(adptr, trxn_id, true);
+}
+
+int mgmt_bcknd_destroy_trxn(struct mgmt_bcknd_client_adapter *adptr,
+ uint64_t trxn_id)
+{
+ return mgmt_bcknd_send_trxn_req(adptr, trxn_id, false);
+}
+
+int mgmt_bcknd_send_cfg_data_create_req(struct mgmt_bcknd_client_adapter *adptr,
+ uint64_t trxn_id, uint64_t batch_id,
+ struct mgmt_bcknd_cfgreq *cfg_req,
+ bool end_of_data)
+{
+ return mgmt_bcknd_send_cfgdata_create_req(
+ adptr, trxn_id, batch_id, cfg_req->cfgdata_reqs,
+ cfg_req->num_reqs, end_of_data);
+}
+
+extern int
+mgmt_bcknd_send_cfg_validate_req(struct mgmt_bcknd_client_adapter *adptr,
+ uint64_t trxn_id, uint64_t batch_ids[],
+ size_t num_batch_ids)
+{
+ return mgmt_bcknd_send_cfgvalidate_req(adptr, trxn_id, batch_ids,
+ num_batch_ids);
+}
+
+extern int
+mgmt_bcknd_send_cfg_apply_req(struct mgmt_bcknd_client_adapter *adptr,
+ uint64_t trxn_id)
+{
+ return mgmt_bcknd_send_cfgapply_req(adptr, trxn_id);
+}
+
+/*
+ * This function maps a YANG dtata Xpath to one or more
+ * Backend Clients that should be contacted for various purposes.
+ */
+int mgmt_bcknd_get_subscr_info_for_xpath(
+ const char *xpath, struct mgmt_bcknd_client_subscr_info *subscr_info)
+{
+ int indx, match, max_match = 0, num_reg;
+ enum mgmt_bcknd_client_id id;
+ struct mgmt_bcknd_client_subscr_info
+ *reg_maps[array_size(mgmt_xpath_map)] = {0};
+ bool root_xp = false;
+
+ if (!subscr_info)
+ return -1;
+
+ num_reg = 0;
+ memset(subscr_info, 0, sizeof(*subscr_info));
+
+ if (strlen(xpath) <= 2 && xpath[0] == '/'
+ && (!xpath[1] || xpath[1] == '*')) {
+ root_xp = true;
+ }
+
+ MGMTD_BCKND_ADPTR_DBG("XPATH: %s", xpath);
+ for (indx = 0; indx < mgmt_num_xpath_maps; indx++) {
+ /*
+ * For Xpaths: '/' and '/ *' all xpath maps should match
+ * the given xpath.
+ */
+ if (!root_xp) {
+ match = mgmt_bcknd_eval_regexp_match(
+ mgmt_xpath_map[indx].xpath_regexp, xpath);
+
+ if (!match || match < max_match)
+ continue;
+
+ if (match > max_match) {
+ num_reg = 0;
+ max_match = match;
+ }
+ }
+
+ reg_maps[num_reg] = &mgmt_xpath_map[indx].bcknd_subscrs;
+ num_reg++;
+ }
+
+ for (indx = 0; indx < num_reg; indx++) {
+ FOREACH_MGMTD_BCKND_CLIENT_ID (id) {
+ if (reg_maps[indx]->xpath_subscr[id].subscribed) {
+ MGMTD_BCKND_ADPTR_DBG(
+ "Cient: %s",
+ mgmt_bknd_client_id2name(id));
+ memcpy(&subscr_info->xpath_subscr[id],
+ ®_maps[indx]->xpath_subscr[id],
+ sizeof(subscr_info->xpath_subscr[id]));
+ }
+ }
+ }
+
+ return 0;
+}
+
+void mgmt_bcknd_adapter_status_write(struct vty *vty)
+{
+ struct mgmt_bcknd_client_adapter *adptr;
+
+ vty_out(vty, "MGMTD Backend Adapters\n");
+
+ FOREACH_ADPTR_IN_LIST (adptr) {
+ vty_out(vty, " Client: \t\t\t%s\n", adptr->name);
+ vty_out(vty, " Conn-FD: \t\t\t%d\n", adptr->conn_fd);
+ vty_out(vty, " Client-Id: \t\t\t%d\n", adptr->id);
+ vty_out(vty, " Ref-Count: \t\t\t%u\n", adptr->refcount);
+ vty_out(vty, " Msg-Sent: \t\t\t%u\n", adptr->num_msg_tx);
+ vty_out(vty, " Msg-Recvd: \t\t\t%u\n", adptr->num_msg_rx);
+ }
+ vty_out(vty, " Total: %d\n",
+ (int)mgmt_bcknd_adptr_list_count(&mgmt_bcknd_adptrs));
+}
+
+void mgmt_bcknd_xpath_register_write(struct vty *vty)
+{
+ int indx;
+ enum mgmt_bcknd_client_id id;
+ struct mgmt_bcknd_client_adapter *adptr;
+
+ vty_out(vty, "MGMTD Backend XPath Registry\n");
+
+ for (indx = 0; indx < mgmt_num_xpath_maps; indx++) {
+ vty_out(vty, " - XPATH: '%s'\n",
+ mgmt_xpath_map[indx].xpath_regexp);
+ FOREACH_MGMTD_BCKND_CLIENT_ID (id) {
+ if (mgmt_xpath_map[indx]
+ .bcknd_subscrs.xpath_subscr[id]
+ .subscribed) {
+ vty_out(vty,
+ " -- Client: '%s' \t Validate:%s, Notify:%s, Own:%s\n",
+ mgmt_bknd_client_id2name(id),
+ mgmt_xpath_map[indx]
+ .bcknd_subscrs
+ .xpath_subscr[id]
+ .validate_config
+ ? "T"
+ : "F",
+ mgmt_xpath_map[indx]
+ .bcknd_subscrs
+ .xpath_subscr[id]
+ .notify_config
+ ? "T"
+ : "F",
+ mgmt_xpath_map[indx]
+ .bcknd_subscrs
+ .xpath_subscr[id]
+ .own_oper_data
+ ? "T"
+ : "F");
+ adptr = mgmt_bcknd_get_adapter_by_id(id);
+ if (adptr) {
+ vty_out(vty, " -- Adapter: %p\n",
+ adptr);
+ }
+ }
+ }
+ }
+
+ vty_out(vty, "Total XPath Registries: %u\n", mgmt_num_xpath_maps);
+}
+
+void mgmt_bcknd_xpath_subscr_info_write(struct vty *vty, const char *xpath)
+{
+ struct mgmt_bcknd_client_subscr_info subscr;
+ enum mgmt_bcknd_client_id id;
+ struct mgmt_bcknd_client_adapter *adptr;
+
+ if (mgmt_bcknd_get_subscr_info_for_xpath(xpath, &subscr) != 0) {
+ vty_out(vty, "ERROR: Failed to get subscriber for '%s'\n",
+ xpath);
+ return;
+ }
+
+ vty_out(vty, "XPath: '%s'\n", xpath);
+ FOREACH_MGMTD_BCKND_CLIENT_ID (id) {
+ if (subscr.xpath_subscr[id].subscribed) {
+ vty_out(vty,
+ " -- Client: '%s' \t Validate:%s, Notify:%s, Own:%s\n",
+ mgmt_bknd_client_id2name(id),
+ subscr.xpath_subscr[id].validate_config ? "T"
+ : "F",
+ subscr.xpath_subscr[id].notify_config ? "T"
+ : "F",
+ subscr.xpath_subscr[id].own_oper_data ? "T"
+ : "F");
+ adptr = mgmt_bcknd_get_adapter_by_id(id);
+ if (adptr)
+ vty_out(vty, " -- Adapter: %p\n", adptr);
+ }
+ }
+}
--- /dev/null
+/*
+ * MGMTD Backend Client Connection Adapter
+ * Copyright (C) 2021 Vmware, Inc.
+ * Pushpasis Sarkar <spushpasis@vmware.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; see the file COPYING; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _FRR_MGMTD_BCKND_ADAPTER_H_
+#define _FRR_MGMTD_BCKND_ADAPTER_H_
+
+#include "mgmtd/mgmt_defines.h"
+#include "mgmt_bcknd_client.h"
+#include "mgmtd/mgmt_db.h"
+
+#define MGMTD_BCKND_CONN_INIT_DELAY_MSEC 50
+
+#define MGMTD_FIND_ADAPTER_BY_INDEX(adapter_index) \
+ mgmt_adaptr_ref[adapter_index]
+
+/* List of adapter clients of MGMTD */
+#define MGMTD_BCKND_CLIENT_INDEX_STATICD (1 << MGMTD_BCKND_CLIENT_ID_STATICD)
+#define MGMTD_BCKND_CLIENT_INDEX_BGPD (1 << MGMTD_BCKND_CLIENT_ID_BGPD)
+
+enum mgmt_bcknd_req_type {
+ MGMTD_BCKND_REQ_NONE = 0,
+ MGMTD_BCKND_REQ_CFG_VALIDATE,
+ MGMTD_BCKND_REQ_CFG_APPLY,
+ MGMTD_BCKND_REQ_DATA_GET_ELEM,
+ MGMTD_BCKND_REQ_DATA_GET_NEXT
+};
+
+struct mgmt_bcknd_cfgreq {
+ Mgmtd__YangCfgDataReq **cfgdata_reqs;
+ size_t num_reqs;
+};
+
+struct mgmt_bcknd_datareq {
+ Mgmtd__YangGetDataReq **getdata_reqs;
+ size_t num_reqs;
+};
+
+PREDECL_LIST(mgmt_bcknd_adptr_list);
+PREDECL_LIST(mgmt_trxn_badptr_list);
+
+struct mgmt_bcknd_client_adapter {
+ enum mgmt_bcknd_client_id id;
+ int conn_fd;
+ union sockunion conn_su;
+ struct thread *conn_init_ev;
+ struct thread *conn_read_ev;
+ struct thread *conn_write_ev;
+ struct thread *conn_writes_on;
+ struct thread *proc_msg_ev;
+ uint32_t flags;
+ char name[MGMTD_CLIENT_NAME_MAX_LEN];
+ uint8_t num_xpath_reg;
+ char xpath_reg[MGMTD_MAX_NUM_XPATH_REG][MGMTD_MAX_XPATH_LEN];
+
+ /* IO streams for read and write */
+ /* pthread_mutex_t ibuf_mtx; */
+ struct stream_fifo *ibuf_fifo;
+ /* pthread_mutex_t obuf_mtx; */
+ struct stream_fifo *obuf_fifo;
+
+ /* Private I/O buffers */
+ struct stream *ibuf_work;
+ struct stream *obuf_work;
+ uint8_t msg_buf[MGMTD_BCKND_MSG_MAX_LEN];
+
+ /* Buffer of data waiting to be written to client. */
+ /* struct buffer *wb; */
+
+ int refcount;
+ uint32_t num_msg_tx;
+ uint32_t num_msg_rx;
+
+ /*
+ * List of config items that should be sent to the
+ * backend during re/connect. This is temporarily
+ * created and then freed-up as soon as the initial
+ * config items has been applied onto the backend.
+ */
+ struct nb_config_cbs cfg_chgs;
+
+ struct mgmt_bcknd_adptr_list_item list_linkage;
+ struct mgmt_trxn_badptr_list_item trxn_list_linkage;
+};
+
+#define MGMTD_BCKND_ADPTR_FLAGS_WRITES_OFF (1U << 0)
+#define MGMTD_BCKND_ADPTR_FLAGS_CFG_SYNCED (1U << 1)
+
+DECLARE_LIST(mgmt_bcknd_adptr_list, struct mgmt_bcknd_client_adapter,
+ list_linkage);
+DECLARE_LIST(mgmt_trxn_badptr_list, struct mgmt_bcknd_client_adapter,
+ trxn_list_linkage);
+
+union mgmt_bcknd_xpath_subscr_info {
+ uint8_t subscribed;
+ struct {
+ uint8_t validate_config : 1;
+ uint8_t notify_config : 1;
+ uint8_t own_oper_data : 1;
+ };
+};
+
+struct mgmt_bcknd_client_subscr_info {
+ union mgmt_bcknd_xpath_subscr_info
+ xpath_subscr[MGMTD_BCKND_CLIENT_ID_MAX];
+};
+
+/* Initialise backend adapter module. */
+extern int mgmt_bcknd_adapter_init(struct thread_master *tm);
+
+/* Destroy the backend adapter module. */
+extern void mgmt_bcknd_adapter_destroy(void);
+
+/* Acquire lock for backend adapter. */
+extern void mgmt_bcknd_adapter_lock(struct mgmt_bcknd_client_adapter *adptr);
+
+/* Remove lock from backend adapter. */
+extern void mgmt_bcknd_adapter_unlock(struct mgmt_bcknd_client_adapter **adptr);
+
+/* Create backend adapter. */
+extern struct mgmt_bcknd_client_adapter *
+mgmt_bcknd_create_adapter(int conn_fd, union sockunion *su);
+
+/* Fetch backend adapter given an adapter name. */
+extern struct mgmt_bcknd_client_adapter *
+mgmt_bcknd_get_adapter_by_name(const char *name);
+
+/* Fetch backend adapter given an client ID. */
+extern struct mgmt_bcknd_client_adapter *
+mgmt_bcknd_get_adapter_by_id(enum mgmt_bcknd_client_id id);
+
+/* Fetch backend adapter config. */
+extern int
+mgmt_bcknd_get_adapter_config(struct mgmt_bcknd_client_adapter *adptr,
+ struct mgmt_db_ctxt *db_ctxt,
+ struct nb_config_cbs **cfg_chgs);
+
+/* Create a transaction. */
+extern int mgmt_bcknd_create_trxn(struct mgmt_bcknd_client_adapter *adptr,
+ uint64_t trxn_id);
+
+/* Destroy a transaction. */
+extern int mgmt_bcknd_destroy_trxn(struct mgmt_bcknd_client_adapter *adptr,
+ uint64_t trxn_id);
+
+/*
+ * Send config data create request to backend client.
+ *
+ * adaptr
+ * Backend adapter information.
+ *
+ * trxn_id
+ * Unique transaction identifier.
+ *
+ * batch_id
+ * Request batch ID.
+ *
+ * cfg_req
+ * Config data request.
+ *
+ * end_of_data
+ * TRUE if the data from last batch, FALSE otherwise.
+ *
+ * Returns:
+ * 0 on success, -1 on failure.
+ */
+extern int mgmt_bcknd_send_cfg_data_create_req(
+ struct mgmt_bcknd_client_adapter *adptr, uint64_t trxn_id,
+ uint64_t batch_id, struct mgmt_bcknd_cfgreq *cfg_req, bool end_of_data);
+
+/*
+ * Send config validate request to backend client.
+ *
+ * adaptr
+ * Backend adapter information.
+ *
+ * trxn_id
+ * Unique transaction identifier.
+ *
+ * batch_ids
+ * List of request batch IDs.
+ *
+ * num_batch_ids
+ * Number of batch ids.
+ *
+ * Returns:
+ * 0 on success, -1 on failure.
+ */
+extern int
+mgmt_bcknd_send_cfg_validate_req(struct mgmt_bcknd_client_adapter *adptr,
+ uint64_t trxn_id, uint64_t batch_ids[],
+ size_t num_batch_ids);
+
+/*
+ * Send config apply request to backend client.
+ *
+ * adaptr
+ * Backend adapter information.
+ *
+ * trxn_id
+ * Unique transaction identifier.
+ *
+ * Returns:
+ * 0 on success, -1 on failure.
+ */
+extern int
+mgmt_bcknd_send_cfg_apply_req(struct mgmt_bcknd_client_adapter *adptr,
+ uint64_t trxn_id);
+
+/*
+ * Dump backend adapter status to vty.
+ */
+extern void mgmt_bcknd_adapter_status_write(struct vty *vty);
+
+/*
+ * Dump xpath registry for each backend client to vty.
+ */
+extern void mgmt_bcknd_xpath_register_write(struct vty *vty);
+
+/*
+ * Maps a YANG dtata Xpath to one or more
+ * backend clients that should be contacted for various purposes.
+ */
+extern int mgmt_bcknd_get_subscr_info_for_xpath(
+ const char *xpath, struct mgmt_bcknd_client_subscr_info *subscr_info);
+
+/*
+ * Dump backend client information for a given xpath to vty.
+ */
+extern void mgmt_bcknd_xpath_subscr_info_write(struct vty *vty,
+ const char *xpath);
+
+#endif /* _FRR_MGMTD_BCKND_ADAPTER_H_ */
--- /dev/null
+/*
+ * MGMTD Backend Server
+ * Copyright (C) 2021 Vmware, Inc.
+ * Pushpasis Sarkar <spushpasis@vmware.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; see the file COPYING; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <zebra.h>
+#include "network.h"
+#include "libfrr.h"
+#include "mgmtd/mgmt.h"
+#include "mgmtd/mgmt_bcknd_server.h"
+#include "mgmtd/mgmt_bcknd_adapter.h"
+
+#ifdef REDIRECT_DEBUG_TO_STDERR
+#define MGMTD_BCKND_SRVR_DBG(fmt, ...) \
+ fprintf(stderr, "%s: " fmt "\n", __func__, ##__VA_ARGS__)
+#define MGMTD_BCKND_SRVR_ERR(fmt, ...) \
+ fprintf(stderr, "%s: ERROR, " fmt "\n", __func__, ##__VA_ARGS__)
+#else /* REDIRECT_DEBUG_TO_STDERR */
+#define MGMTD_BCKND_SRVR_DBG(fmt, ...) \
+ do { \
+ if (mgmt_debug_bcknd) \
+ zlog_err("%s: " fmt, __func__, ##__VA_ARGS__); \
+ } while (0)
+#define MGMTD_BCKND_SRVR_ERR(fmt, ...) \
+ zlog_err("%s: ERROR: " fmt, __func__, ##__VA_ARGS__)
+#endif /* REDIRECT_DEBUG_TO_STDERR */
+
+static int mgmt_bcknd_listen_fd;
+static struct thread_master *mgmt_bcknd_listen_tm;
+static struct thread *mgmt_bcknd_listen_ev;
+static void mgmt_bcknd_server_register_event(enum mgmt_bcknd_event event);
+
+static void mgmt_bcknd_conn_accept(struct thread *thread)
+{
+ int client_conn_fd;
+ union sockunion su;
+
+ if (!mgmt_bcknd_listen_fd)
+ return;
+
+ /* We continue hearing server listen socket. */
+ mgmt_bcknd_server_register_event(MGMTD_BCKND_SERVER);
+
+ memset(&su, 0, sizeof(union sockunion));
+
+ /* We can handle IPv4 or IPv6 socket. */
+ client_conn_fd = sockunion_accept(mgmt_bcknd_listen_fd, &su);
+ if (client_conn_fd < 0) {
+ MGMTD_BCKND_SRVR_ERR(
+ "Failed to accept MGMTD Backend client connection : %s",
+ safe_strerror(errno));
+ return;
+ }
+ set_nonblocking(client_conn_fd);
+ set_cloexec(client_conn_fd);
+
+ MGMTD_BCKND_SRVR_DBG("Got a new MGMTD Backend connection");
+
+ mgmt_bcknd_create_adapter(client_conn_fd, &su);
+}
+
+static void mgmt_bcknd_server_register_event(enum mgmt_bcknd_event event)
+{
+ if (event == MGMTD_BCKND_SERVER) {
+ thread_add_read(mgmt_bcknd_listen_tm, mgmt_bcknd_conn_accept,
+ NULL, mgmt_bcknd_listen_fd,
+ &mgmt_bcknd_listen_ev);
+ assert(mgmt_bcknd_listen_ev);
+ } else {
+ assert(!"mgmt_bcknd_server_post_event() called incorrectly");
+ }
+}
+
+static void mgmt_bcknd_server_start(const char *hostname)
+{
+ int ret;
+ int sock;
+ struct sockaddr_un addr;
+ mode_t old_mask;
+
+ /* Set umask */
+ old_mask = umask(0077);
+
+ sock = socket(AF_UNIX, SOCK_STREAM, PF_UNSPEC);
+ if (sock < 0) {
+ MGMTD_BCKND_SRVR_ERR("Failed to create server socket: %s",
+ safe_strerror(errno));
+ goto mgmt_bcknd_server_start_failed;
+ }
+
+ addr.sun_family = AF_UNIX,
+ strlcpy(addr.sun_path, MGMTD_BCKND_SERVER_PATH, sizeof(addr.sun_path));
+ unlink(addr.sun_path);
+ ret = bind(sock, (struct sockaddr *)&addr, sizeof(addr));
+ if (ret < 0) {
+ MGMTD_BCKND_SRVR_ERR(
+ "Failed to bind server socket to '%s'. Err: %s",
+ addr.sun_path, safe_strerror(errno));
+ goto mgmt_bcknd_server_start_failed;
+ }
+
+ ret = listen(sock, MGMTD_BCKND_MAX_CONN);
+ if (ret < 0) {
+ MGMTD_BCKND_SRVR_ERR("Failed to listen on server socket: %s",
+ safe_strerror(errno));
+ goto mgmt_bcknd_server_start_failed;
+ }
+
+ /* Restore umask */
+ umask(old_mask);
+
+ mgmt_bcknd_listen_fd = sock;
+ mgmt_bcknd_server_register_event(MGMTD_BCKND_SERVER);
+
+ MGMTD_BCKND_SRVR_DBG("Started MGMTD Backend Server!");
+ return;
+
+mgmt_bcknd_server_start_failed:
+ if (sock)
+ close(sock);
+
+ mgmt_bcknd_listen_fd = 0;
+ exit(-1);
+}
+
+int mgmt_bcknd_server_init(struct thread_master *master)
+{
+ if (mgmt_bcknd_listen_tm) {
+ MGMTD_BCKND_SRVR_DBG("MGMTD Backend Server already running!");
+ return 0;
+ }
+
+ mgmt_bcknd_listen_tm = master;
+
+ mgmt_bcknd_server_start("localhost");
+
+ return 0;
+}
+
+void mgmt_bcknd_server_destroy(void)
+{
+ if (mgmt_bcknd_listen_tm) {
+ MGMTD_BCKND_SRVR_DBG("Closing MGMTD Backend Server!");
+
+ if (mgmt_bcknd_listen_ev) {
+ THREAD_OFF(mgmt_bcknd_listen_ev);
+ mgmt_bcknd_listen_ev = NULL;
+ }
+
+ if (mgmt_bcknd_listen_fd) {
+ close(mgmt_bcknd_listen_fd);
+ mgmt_bcknd_listen_fd = 0;
+ }
+
+ mgmt_bcknd_listen_tm = NULL;
+ }
+}
--- /dev/null
+/*
+ * MGMTD Backend Server
+ * Copyright (C) 2021 Vmware, Inc.
+ * Pushpasis Sarkar
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; see the file COPYING; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _FRR_MGMTD_BCKND_SERVER_H_
+#define _FRR_MGMTD_BCKND_SERVER_H_
+
+#define MGMTD_BCKND_MAX_CONN 32
+
+/* Initialise backend server */
+extern int mgmt_bcknd_server_init(struct thread_master *master);
+
+/* Destroy backend server */
+extern void mgmt_bcknd_server_destroy(void);
+
+#endif /* _FRR_MGMTD_BCKND_SERVER_H_ */
#ifndef _FRR_MGMTD_DEFINES_H
#define _FRR_MGMTD_DEFINES_H
-#include "lib/mgmt_pb.h"
-
#define MGMTD_CLIENT_NAME_MAX_LEN 32
#define MGMTD_MAX_XPATH_LEN XPATH_MAXLEN
#define MGMTD_MAX_YANG_VALUE_LEN YANG_VALUE_MAXLEN
+#define MGMTD_MAX_NUM_XPATH_REG 128
+
+#define MGMTD_MAX_NUM_DATA_REQ_IN_BATCH 32
+
+#define MGMTD_MAX_CFG_CHANGES_IN_BATCH \
+ ((10 * MGMTD_BCKND_MSG_MAX_LEN) \
+ / (MGMTD_MAX_XPATH_LEN + MGMTD_MAX_YANG_VALUE_LEN))
+
enum mgmt_result {
MGMTD_SUCCESS = 0,
MGMTD_INVALID_PARAM,
MGMTD_FRNTND_PROC_MSG
};
+enum mgmt_bcknd_event {
+ MGMTD_BCKND_SERVER = 1,
+ MGMTD_BCKND_CONN_INIT,
+ MGMTD_BCKND_CONN_READ,
+ MGMTD_BCKND_CONN_WRITE,
+ MGMTD_BCKND_CONN_WRITES_ON,
+ MGMTD_BCKND_PROC_MSG,
+ MGMTD_BCKND_SCHED_CFG_PREPARE,
+ MGMTD_BCKND_RESCHED_CFG_PREPARE,
+ MGMTD_BCKND_SCHED_CFG_APPLY,
+ MGMTD_BCKND_RESCHED_CFG_APPLY,
+};
+
#define MGMTD_TRXN_ID_NONE 0
#endif /* _FRR_MGMTD_DEFINES_H */
DEFINE_MGROUP(MGMTD, "mgmt");
DEFINE_MTYPE(MGMTD, MGMTD, "MGMTD instance");
+DEFINE_MTYPE(MGMTD, MGMTD_BCKND_ADPATER, "MGMTD backend adapter");
DEFINE_MTYPE(MGMTD, MGMTD_FRNTND_ADPATER, "MGMTD Frontend adapter");
DEFINE_MTYPE(MGMTD, MGMTD_FRNTND_SESSN, "MGMTD Frontend Client Session");
DECLARE_MGROUP(MGMTD);
DECLARE_MTYPE(MGMTD);
+DECLARE_MTYPE(MGMTD_BCKND_ADPATER);
DECLARE_MTYPE(MGMTD_FRNTND_ADPATER);
DECLARE_MTYPE(MGMTD_FRNTND_SESSN);
#endif /* _FRR_MGMTD_MEMORY_H */
#include "json.h"
#include "mgmtd/mgmt.h"
#include "mgmtd/mgmt_vty.h"
+#include "mgmtd/mgmt_bcknd_server.h"
+#include "mgmtd/mgmt_bcknd_adapter.h"
#include "mgmtd/mgmt_frntnd_server.h"
#include "mgmtd/mgmt_frntnd_adapter.h"
#include "mgmtd/mgmt_db.h"
#include "mgmtd/mgmt_vty_clippy.c"
#endif
+DEFPY(show_mgmt_bcknd_adapter,
+ show_mgmt_bcknd_adapter_cmd,
+ "show mgmt backend-adapter all",
+ SHOW_STR
+ MGMTD_STR
+ MGMTD_BCKND_ADPTR_STR
+ "Display all Backend Adapters\n")
+{
+ mgmt_bcknd_adapter_status_write(vty);
+
+ return CMD_SUCCESS;
+}
+
+DEFPY(show_mgmt_bcknd_xpath_reg,
+ show_mgmt_bcknd_xpath_reg_cmd,
+ "show mgmt backend-yang-xpath-registry",
+ SHOW_STR
+ MGMTD_STR
+ "Backend Adapter YANG Xpath Registry\n")
+{
+ mgmt_bcknd_xpath_register_write(vty);
+
+ return CMD_SUCCESS;
+}
+
DEFPY(show_mgmt_frntnd_adapter,
show_mgmt_frntnd_adapter_cmd,
"show mgmt frontend-adapter all",
return CMD_SUCCESS;
}
+DEFPY(show_mgmt_map_xpath,
+ show_mgmt_map_xpath_cmd,
+ "show mgmt yang-xpath-subscription WORD$path",
+ SHOW_STR
+ MGMTD_STR
+ "Get YANG Backend Subscription\n"
+ "XPath expression specifying the YANG data path\n")
+{
+ mgmt_bcknd_xpath_subscr_info_write(vty, path);
+ return CMD_SUCCESS;
+}
+
DEFPY(mgmt_load_config,
mgmt_load_config_cmd,
"mgmt load-config file WORD$filepath <merge|replace>",
{
install_node(&debug_node);
+ install_element(VIEW_NODE, &show_mgmt_bcknd_adapter_cmd);
+ install_element(VIEW_NODE, &show_mgmt_bcknd_xpath_reg_cmd);
install_element(VIEW_NODE, &show_mgmt_frntnd_adapter_cmd);
install_element(VIEW_NODE, &show_mgmt_frntnd_adapter_detail_cmd);
install_element(VIEW_NODE, &show_mgmt_db_all_cmd);
install_element(VIEW_NODE, &show_mgmt_get_config_cmd);
install_element(VIEW_NODE, &show_mgmt_get_data_cmd);
install_element(VIEW_NODE, &show_mgmt_dump_data_cmd);
+ install_element(VIEW_NODE, &show_mgmt_map_xpath_cmd);
install_element(CONFIG_NODE, &mgmt_commit_apply_cmd);
install_element(CONFIG_NODE, &mgmt_commit_abort_cmd);
const char *value);
extern int mgmt_apply_vty_nb_commands(struct vty *vty,
const char *xpath_base_fmt, ...);
-extern int mgmt_hndl_bknd_cmd(const struct cmd_element *cmd, struct vty *vty,
- int argc, struct cmd_token *argv[]);
static inline LYD_FORMAT mgmt_str2format(const char *format_str)
{
mgmtd/mgmt_memory.c \
mgmtd/mgmt_db.c \
mgmtd/mgmt_vty.c \
+ mgmtd/mgmt_bcknd_server.c \
+ mgmtd/mgmt_bcknd_adapter.c \
mgmtd/mgmt_frntnd_server.c \
mgmtd/mgmt_frntnd_adapter.c \
# end
noinst_HEADERS += \
mgmtd/mgmt.h \
mgmtd/mgmt_memory.h \
+ mgmtd/mgmt_bcknd_adapter.h \
+ mgmtd/mgmt_bcknd_server.h \
mgmtd/mgmt_db.h \
mgmtd/mgmt_vty.h \
mgmtd/mgmt_frntnd_adapter.h \