summaryrefslogtreecommitdiff
path: root/zebra
diff options
context:
space:
mode:
Diffstat (limited to 'zebra')
-rw-r--r--zebra/dplane_fpm_nl.c29
-rw-r--r--zebra/fpm_listener.c141
-rw-r--r--zebra/rib.h7
-rw-r--r--zebra/zebra_dplane.c8
-rw-r--r--zebra/zebra_dplane.h3
-rw-r--r--zebra/zebra_rib.c134
-rw-r--r--zebra/zebra_vty.c15
7 files changed, 329 insertions, 8 deletions
diff --git a/zebra/dplane_fpm_nl.c b/zebra/dplane_fpm_nl.c
index 9f26852d1f..116a697de9 100644
--- a/zebra/dplane_fpm_nl.c
+++ b/zebra/dplane_fpm_nl.c
@@ -587,6 +587,10 @@ static void fpm_read(struct event *t)
struct zebra_dplane_ctx *ctx;
size_t available_bytes;
size_t hdr_available_bytes;
+ struct dplane_ctx_list_head batch_list;
+
+ /* Initialize the batch list */
+ dplane_ctx_q_init(&batch_list);
/* Let's ignore the input at the moment. */
rv = stream_read_try(fnc->ibuf, fnc->socket,
@@ -627,7 +631,7 @@ static void fpm_read(struct event *t)
while (available_bytes) {
if (available_bytes < (ssize_t)FPM_MSG_HDR_LEN) {
stream_pulldown(fnc->ibuf);
- return;
+ goto send_batch;
}
fpm.version = stream_getc(fnc->ibuf);
@@ -642,7 +646,7 @@ static void fpm_read(struct event *t)
__func__, fpm.version, fpm.msg_type);
FPM_RECONNECT(fnc);
- return;
+ goto send_batch;
}
/*
@@ -654,7 +658,7 @@ static void fpm_read(struct event *t)
"%s: Received message length: %u that does not even fill the FPM header",
__func__, fpm.msg_len);
FPM_RECONNECT(fnc);
- return;
+ goto send_batch;
}
/*
@@ -665,7 +669,7 @@ static void fpm_read(struct event *t)
if (fpm.msg_len > available_bytes) {
stream_rewind_getp(fnc->ibuf, FPM_MSG_HDR_LEN);
stream_pulldown(fnc->ibuf);
- return;
+ goto send_batch;
}
available_bytes -= FPM_MSG_HDR_LEN;
@@ -715,8 +719,9 @@ static void fpm_read(struct event *t)
break;
}
- /* Parse the route data into a dplane ctx, then
- * enqueue it to zebra for processing.
+ /*
+ * Parse the route data into a dplane ctx, add to ctx list
+ * and enqueue the batch of ctx to zebra for processing
*/
ctx = dplane_ctx_alloc();
dplane_ctx_route_init(ctx, DPLANE_OP_ROUTE_NOTIFY, NULL,
@@ -735,7 +740,8 @@ static void fpm_read(struct event *t)
* tableid to 0 in order for this to work.
*/
dplane_ctx_set_vrf(ctx, VRF_UNKNOWN);
- dplane_provider_enqueue_to_zebra(ctx);
+ /* Add to the list for batching */
+ dplane_ctx_enqueue_tail(&batch_list, ctx);
} else {
/*
* Let's continue to read other messages
@@ -755,6 +761,15 @@ static void fpm_read(struct event *t)
}
stream_reset(fnc->ibuf);
+
+send_batch:
+ /* Send all contexts to zebra in a single batch if we have any */
+ if (dplane_ctx_queue_count(&batch_list) > 0) {
+ if (IS_ZEBRA_DEBUG_FPM)
+ zlog_debug("%s: Sending batch of %u contexts to zebra", __func__,
+ dplane_ctx_queue_count(&batch_list));
+ dplane_provider_enqueue_ctx_list_to_zebra(&batch_list);
+ }
}
static void fpm_write(struct event *t)
diff --git a/zebra/fpm_listener.c b/zebra/fpm_listener.c
index 7ae9601ef4..73e9dc2482 100644
--- a/zebra/fpm_listener.c
+++ b/zebra/fpm_listener.c
@@ -19,6 +19,7 @@
#include <stdlib.h>
#include <unistd.h>
#include <time.h>
+#include <signal.h>
#ifdef GNU_LINUX
#include <stdint.h>
@@ -42,6 +43,35 @@
XREF_SETUP();
+PREDECL_RBTREE_UNIQ(fpm_route);
+
+/* Route structure to store in RB tree */
+struct fpm_route {
+ struct prefix prefix;
+ uint32_t table_id;
+ uint32_t nhg_id;
+ struct fpm_route_item rb_item;
+};
+
+/* Comparison function for routes */
+static int fpm_route_cmp(const struct fpm_route *a, const struct fpm_route *b)
+{
+ int ret;
+
+ /* First compare table IDs */
+ if (a->table_id < b->table_id)
+ return -1;
+ if (a->table_id > b->table_id)
+ return 1;
+
+ /* Then compare prefixes */
+ ret = prefix_cmp(&a->prefix, &b->prefix);
+ return ret;
+}
+
+/* RB tree for storing routes */
+DECLARE_RBTREE_UNIQ(fpm_route, struct fpm_route, rb_item, fpm_route_cmp);
+
struct glob {
int server_sock;
int sock;
@@ -49,6 +79,8 @@ struct glob {
bool reflect_fail_all;
bool dump_hex;
FILE *output_file;
+ const char *dump_file;
+ struct fpm_route_head route_tree;
};
struct glob glob_space;
@@ -758,6 +790,64 @@ static void fpm_listener_hexdump(const void *mem, size_t len)
}
/*
+ * handle_route_update
+ * Handles adding or removing a route from the route tree
+ */
+static void handle_route_update(struct netlink_msg_ctx *ctx, bool is_add)
+{
+ struct fpm_route *route;
+ struct fpm_route *existing;
+ struct fpm_route lookup = { 0 };
+
+ if (!ctx->dest || !ctx->rtmsg)
+ return;
+
+ /* Set up lookup key */
+ lookup.prefix.family = ctx->rtmsg->rtm_family;
+ lookup.prefix.prefixlen = ctx->rtmsg->rtm_dst_len;
+ memcpy(&lookup.prefix.u.prefix, RTA_DATA(ctx->dest),
+ (ctx->rtmsg->rtm_family == AF_INET) ? 4 : 16);
+ lookup.table_id = ctx->rtmsg->rtm_table;
+ lookup.nhg_id = ctx->nhgid ? *ctx->nhgid : 0;
+ /* Look up existing route */
+ existing = fpm_route_find(&glob->route_tree, &lookup);
+
+ if (is_add) {
+ if (existing) {
+ /* Route exists, update it */
+ existing->prefix = lookup.prefix;
+ existing->table_id = lookup.table_id;
+ existing->nhg_id = lookup.nhg_id;
+ } else {
+ /* Create new route structure */
+ route = calloc(1, sizeof(struct fpm_route));
+ if (!route) {
+ fprintf(stderr, "Failed to allocate route structure\n");
+ return;
+ }
+
+ /* Copy prefix information */
+ route->prefix = lookup.prefix;
+ route->table_id = lookup.table_id;
+ route->nhg_id = lookup.nhg_id;
+
+ /* Add route to tree */
+ if (fpm_route_add(&glob->route_tree, route)) {
+ fprintf(stderr, "Failed to add route to tree\n");
+ free(route);
+ }
+ }
+ } else {
+ /* Remove route from tree */
+ if (existing) {
+ existing = fpm_route_del(&glob->route_tree, existing);
+ if (existing)
+ free(existing);
+ }
+ }
+}
+
+/*
* parse_netlink_msg
*/
static void parse_netlink_msg(char *buf, size_t buf_len, fpm_msg_hdr_t *fpm)
@@ -789,6 +879,7 @@ static void parse_netlink_msg(char *buf, size_t buf_len, fpm_msg_hdr_t *fpm)
}
print_netlink_msg_ctx(ctx);
+ handle_route_update(ctx, hdr->nlmsg_type == RTM_NEWROUTE);
if (glob->reflect && hdr->nlmsg_type == RTM_NEWROUTE &&
ctx->rtmsg->rtm_protocol > RTPROT_STATIC) {
@@ -854,17 +945,62 @@ static void fpm_serve(void)
}
}
+/* Signal handler for SIGUSR1 */
+static void sigusr1_handler(int signum)
+{
+ struct fpm_route *route;
+ char buf[PREFIX_STRLEN];
+ FILE *out = glob->output_file;
+ FILE *dump_fp = NULL;
+
+ if (glob->dump_file) {
+ dump_fp = fopen(glob->dump_file, "w");
+ if (dump_fp) {
+ out = dump_fp;
+ setbuf(dump_fp, NULL);
+ } else
+ out = glob->output_file;
+ }
+
+ fprintf(out, "\n=== Route Tree Dump ===\n");
+ fprintf(out, "Timestamp: %s\n", get_timestamp());
+ fprintf(out, "Total routes: %zu\n", fpm_route_count(&glob->route_tree));
+ fprintf(out, "Routes:\n");
+
+ frr_each (fpm_route, &glob->route_tree, route) {
+ prefix2str(&route->prefix, buf, sizeof(buf));
+ fprintf(out, " Table %u, NHG %u: %s\n", route->table_id, route->nhg_id, buf);
+ }
+ fprintf(out, "=====================\n\n");
+ fflush(out);
+
+ if (dump_fp)
+ fclose(dump_fp);
+}
+
int main(int argc, char **argv)
{
pid_t daemon;
int r;
bool fork_daemon = false;
const char *output_file = NULL;
+ struct sigaction sa;
memset(glob, 0, sizeof(*glob));
glob->output_file = stdout;
+ fpm_route_init(&glob->route_tree);
+
+ /* Set up signal handler for SIGUSR1 */
+ memset(&sa, 0, sizeof(sa));
+ sa.sa_handler = sigusr1_handler;
+ sigemptyset(&sa.sa_mask);
+ sa.sa_flags = SA_RESTART;
+ if (sigaction(SIGUSR1, &sa, NULL) < 0) {
+ fprintf(stderr, "Failed to set up SIGUSR1 handler: %s\n", strerror(errno));
+ exit(1);
+ }
- while ((r = getopt(argc, argv, "rfdvo:")) != -1) {
+ while ((r = getopt(argc, argv, "rfdvo:z:")) != -1) {
switch (r) {
case 'r':
glob->reflect = true;
@@ -881,6 +1017,9 @@ int main(int argc, char **argv)
case 'o':
output_file = optarg;
break;
+ case 'z':
+ glob->dump_file = optarg;
+ break;
}
}
diff --git a/zebra/rib.h b/zebra/rib.h
index 652f6208f4..fa6ce4447b 100644
--- a/zebra/rib.h
+++ b/zebra/rib.h
@@ -192,6 +192,12 @@ struct route_entry {
struct meta_queue {
struct list *subq[MQ_SIZE];
uint32_t size; /* sum of lengths of all subqueues */
+ _Atomic uint32_t max_subq[MQ_SIZE]; /* Max size of individual sub queue */
+ _Atomic uint32_t max_metaq; /* Max size of the MetaQ */
+ _Atomic uint32_t total_subq[MQ_SIZE]; /* Total subq events */
+ _Atomic uint32_t total_metaq; /* Total MetaQ events */
+ _Atomic uint32_t re_subq[MQ_SIZE]; /* current RE count sub queue */
+ _Atomic uint32_t max_re_subq[MQ_SIZE]; /* Max RE in sub queue */
};
/*
@@ -474,6 +480,7 @@ extern void zebra_rib_evaluate_rn_nexthops(struct route_node *rn, uint32_t seq,
bool rt_delete);
extern void rib_update_handle_vrf_all(enum rib_update_event event, int rtype);
+int zebra_show_metaq_counter(struct vty *vty, bool uj);
/*
* rib_find_rn_from_ctx
diff --git a/zebra/zebra_dplane.c b/zebra/zebra_dplane.c
index 4344a8d79a..a6d43daa93 100644
--- a/zebra/zebra_dplane.c
+++ b/zebra/zebra_dplane.c
@@ -6590,6 +6590,14 @@ int dplane_provider_work_ready(void)
}
/*
+ * Enqueue a context list to zebra main.
+ */
+void dplane_provider_enqueue_ctx_list_to_zebra(struct dplane_ctx_list_head *batch_list)
+{
+ (zdplane_info.dg_results_cb)(batch_list);
+}
+
+/*
* Enqueue a context directly to zebra main.
*/
void dplane_provider_enqueue_to_zebra(struct zebra_dplane_ctx *ctx)
diff --git a/zebra/zebra_dplane.h b/zebra/zebra_dplane.h
index cabc70c232..1c03a29534 100644
--- a/zebra/zebra_dplane.h
+++ b/zebra/zebra_dplane.h
@@ -1236,6 +1236,9 @@ void dplane_provider_enqueue_out_ctx(struct zebra_dplane_provider *prov,
/* Enqueue a context directly to zebra main. */
void dplane_provider_enqueue_to_zebra(struct zebra_dplane_ctx *ctx);
+/* Enqueue a context list to zebra main. */
+void dplane_provider_enqueue_ctx_list_to_zebra(struct dplane_ctx_list_head *batch_list);
+
/* Enable collection of extra info about interfaces in route updates;
* this allows a provider/plugin to see some extra info in route update
* context objects.
diff --git a/zebra/zebra_rib.c b/zebra/zebra_rib.c
index e32b004ae9..c7dc5e5d07 100644
--- a/zebra/zebra_rib.c
+++ b/zebra/zebra_rib.c
@@ -30,6 +30,7 @@
#include "printfrr.h"
#include "frrscript.h"
#include "frrdistance.h"
+#include "lib/termtable.h"
#include "zebra/zebra_router.h"
#include "zebra/connected.h"
@@ -273,6 +274,63 @@ static const char *subqueue2str(enum meta_queue_indexes index)
return "Unknown";
}
+/* Handler for 'show zebra metaq' */
+int zebra_show_metaq_counter(struct vty *vty, bool uj)
+{
+ struct meta_queue *mq = zrouter.mq;
+ struct ttable *tt = NULL;
+ char *table = NULL;
+ json_object *json = NULL;
+ json_object *json_table = NULL;
+
+ if (!mq)
+ return CMD_WARNING;
+
+ /* Create a table for subqueue details */
+ tt = ttable_new(&ttable_styles[TTSTYLE_ASCII]);
+ ttable_add_row(tt, "SubQ|Current|Max Size|Total");
+
+ /* Add rows for each subqueue */
+ for (uint8_t i = 0; i < MQ_SIZE; i++) {
+ ttable_add_row(tt, "%s|%u|%u|%u", subqueue2str(i), mq->subq[i]->count,
+ mq->max_subq[i], mq->total_subq[i]);
+ }
+
+ /* For a better formatting between the content and separator */
+ tt->style.cell.rpad = 2;
+ tt->style.cell.lpad = 1;
+ ttable_restyle(tt);
+
+ if (uj) {
+ json = json_object_new_object();
+ /* Add MetaQ summary to the JSON object */
+ json_object_int_add(json, "currentSize", mq->size);
+ json_object_int_add(json, "maxSize", mq->max_metaq);
+ json_object_int_add(json, "total", mq->total_metaq);
+
+ /* Convert the table to JSON and add it to the main JSON object */
+ /* n = name/string, u = unsigned int */
+ json_table = ttable_json(tt, "sddd");
+ json_object_object_add(json, "subqueues", json_table);
+ vty_json(vty, json);
+ } else {
+ vty_out(vty, "MetaQ Summary\n");
+ vty_out(vty, "Current Size\t: %u\n", mq->size);
+ vty_out(vty, "Max Size\t: %u\n", mq->max_metaq);
+ vty_out(vty, "Total\t\t: %u\n", mq->total_metaq);
+
+ /* Dump the table */
+ table = ttable_dump(tt, "\n");
+ vty_out(vty, "%s\n", table);
+ XFREE(MTYPE_TMP_TTABLE, table);
+ }
+
+ /* Clean up the table */
+ ttable_del(tt);
+
+ return CMD_SUCCESS;
+}
+
printfrr_ext_autoreg_p("ZN", printfrr_zebra_node);
static ssize_t printfrr_zebra_node(struct fbuf *buf, struct printfrr_eargs *ea,
const void *ptr)
@@ -3257,6 +3315,7 @@ static int rib_meta_queue_add(struct meta_queue *mq, void *data)
struct route_node *rn = NULL;
struct route_entry *re = NULL, *curr_re = NULL;
uint8_t qindex = MQ_SIZE, curr_qindex = MQ_SIZE;
+ uint64_t curr, high;
rn = (struct route_node *)data;
@@ -3300,6 +3359,15 @@ static int rib_meta_queue_add(struct meta_queue *mq, void *data)
listnode_add(mq->subq[qindex], rn);
route_lock_node(rn);
mq->size++;
+ atomic_fetch_add_explicit(&mq->total_metaq, 1, memory_order_relaxed);
+ atomic_fetch_add_explicit(&mq->total_subq[qindex], 1, memory_order_relaxed);
+ curr = listcount(mq->subq[qindex]);
+ high = atomic_load_explicit(&mq->max_subq[qindex], memory_order_relaxed);
+ if (curr > high)
+ atomic_store_explicit(&mq->max_subq[qindex], curr, memory_order_relaxed);
+ high = atomic_load_explicit(&mq->max_metaq, memory_order_relaxed);
+ if (mq->size > high)
+ atomic_store_explicit(&mq->max_metaq, mq->size, memory_order_relaxed);
if (IS_ZEBRA_DEBUG_RIB_DETAILED)
rnode_debug(rn, re->vrf_id, "queued rn %p into sub-queue %s mq size %u", (void *)rn,
@@ -3310,8 +3378,21 @@ static int rib_meta_queue_add(struct meta_queue *mq, void *data)
static int early_label_meta_queue_add(struct meta_queue *mq, void *data)
{
+ uint64_t curr, high;
+
listnode_add(mq->subq[META_QUEUE_EARLY_LABEL], data);
mq->size++;
+ atomic_fetch_add_explicit(&mq->total_metaq, 1, memory_order_relaxed);
+ atomic_fetch_add_explicit(&mq->total_subq[META_QUEUE_EARLY_LABEL], 1, memory_order_relaxed);
+ curr = listcount(mq->subq[META_QUEUE_EARLY_LABEL]);
+ high = atomic_load_explicit(&mq->max_subq[META_QUEUE_EARLY_LABEL], memory_order_relaxed);
+ if (curr > high)
+ atomic_store_explicit(&mq->max_subq[META_QUEUE_EARLY_LABEL], curr,
+ memory_order_relaxed);
+ high = atomic_load_explicit(&mq->max_metaq, memory_order_relaxed);
+ if (mq->size > high)
+ atomic_store_explicit(&mq->max_metaq, mq->size, memory_order_relaxed);
+
return 0;
}
@@ -3320,6 +3401,7 @@ static int rib_meta_queue_nhg_ctx_add(struct meta_queue *mq, void *data)
struct nhg_ctx *ctx = NULL;
uint8_t qindex = META_QUEUE_NHG;
struct wq_nhg_wrapper *w;
+ uint64_t curr, high;
ctx = (struct nhg_ctx *)data;
@@ -3333,6 +3415,15 @@ static int rib_meta_queue_nhg_ctx_add(struct meta_queue *mq, void *data)
listnode_add(mq->subq[qindex], w);
mq->size++;
+ atomic_fetch_add_explicit(&mq->total_metaq, 1, memory_order_relaxed);
+ atomic_fetch_add_explicit(&mq->total_subq[qindex], 1, memory_order_relaxed);
+ curr = listcount(mq->subq[qindex]);
+ high = atomic_load_explicit(&mq->max_subq[qindex], memory_order_relaxed);
+ if (curr > high)
+ atomic_store_explicit(&mq->max_subq[qindex], curr, memory_order_relaxed);
+ high = atomic_load_explicit(&mq->max_metaq, memory_order_relaxed);
+ if (mq->size > high)
+ atomic_store_explicit(&mq->max_metaq, mq->size, memory_order_relaxed);
if (IS_ZEBRA_DEBUG_RIB_DETAILED)
zlog_debug("NHG Context id=%u queued into sub-queue %s mq size %u", ctx->id,
@@ -3347,6 +3438,7 @@ static int rib_meta_queue_nhg_process(struct meta_queue *mq, void *data,
struct nhg_hash_entry *nhe = NULL;
uint8_t qindex = META_QUEUE_NHG;
struct wq_nhg_wrapper *w;
+ uint64_t curr, high;
nhe = (struct nhg_hash_entry *)data;
@@ -3361,6 +3453,15 @@ static int rib_meta_queue_nhg_process(struct meta_queue *mq, void *data,
listnode_add(mq->subq[qindex], w);
mq->size++;
+ atomic_fetch_add_explicit(&mq->total_metaq, 1, memory_order_relaxed);
+ atomic_fetch_add_explicit(&mq->total_subq[qindex], 1, memory_order_relaxed);
+ curr = listcount(mq->subq[qindex]);
+ high = atomic_load_explicit(&mq->max_subq[qindex], memory_order_relaxed);
+ if (curr > high)
+ atomic_store_explicit(&mq->max_subq[qindex], curr, memory_order_relaxed);
+ high = atomic_load_explicit(&mq->max_metaq, memory_order_relaxed);
+ if (mq->size > high)
+ atomic_store_explicit(&mq->max_metaq, mq->size, memory_order_relaxed);
if (IS_ZEBRA_DEBUG_RIB_DETAILED)
zlog_debug("NHG id=%u queued into sub-queue %s mq size %u", nhe->id,
@@ -3381,8 +3482,19 @@ static int rib_meta_queue_nhg_del(struct meta_queue *mq, void *data)
static int rib_meta_queue_evpn_add(struct meta_queue *mq, void *data)
{
+ uint64_t curr, high;
+
listnode_add(mq->subq[META_QUEUE_EVPN], data);
mq->size++;
+ atomic_fetch_add_explicit(&mq->total_metaq, 1, memory_order_relaxed);
+ atomic_fetch_add_explicit(&mq->total_subq[META_QUEUE_EVPN], 1, memory_order_relaxed);
+ curr = listcount(mq->subq[META_QUEUE_EVPN]);
+ high = atomic_load_explicit(&mq->max_subq[META_QUEUE_EVPN], memory_order_relaxed);
+ if (curr > high)
+ atomic_store_explicit(&mq->max_subq[META_QUEUE_EVPN], curr, memory_order_relaxed);
+ high = atomic_load_explicit(&mq->max_metaq, memory_order_relaxed);
+ if (mq->size > high)
+ atomic_store_explicit(&mq->max_metaq, mq->size, memory_order_relaxed);
return 0;
}
@@ -4227,8 +4339,19 @@ void _route_entry_dump(const char *func, union prefixconstptr pp,
static int rib_meta_queue_gr_run_add(struct meta_queue *mq, void *data)
{
+ uint64_t curr, high;
+
listnode_add(mq->subq[META_QUEUE_GR_RUN], data);
mq->size++;
+ atomic_fetch_add_explicit(&mq->total_metaq, 1, memory_order_relaxed);
+ atomic_fetch_add_explicit(&mq->total_subq[META_QUEUE_GR_RUN], 1, memory_order_relaxed);
+ curr = listcount(mq->subq[META_QUEUE_GR_RUN]);
+ high = atomic_load_explicit(&mq->max_subq[META_QUEUE_GR_RUN], memory_order_relaxed);
+ if (curr > high)
+ atomic_store_explicit(&mq->max_subq[META_QUEUE_GR_RUN], curr, memory_order_relaxed);
+ high = atomic_load_explicit(&mq->max_metaq, memory_order_relaxed);
+ if (mq->size > high)
+ atomic_store_explicit(&mq->max_metaq, mq->size, memory_order_relaxed);
if (IS_ZEBRA_DEBUG_RIB_DETAILED)
zlog_debug("Graceful Run adding mq size %u", zrouter.mq->size);
@@ -4239,9 +4362,20 @@ static int rib_meta_queue_gr_run_add(struct meta_queue *mq, void *data)
static int rib_meta_queue_early_route_add(struct meta_queue *mq, void *data)
{
struct zebra_early_route *ere = data;
+ uint64_t curr, high;
listnode_add(mq->subq[META_QUEUE_EARLY_ROUTE], data);
mq->size++;
+ atomic_fetch_add_explicit(&mq->total_metaq, 1, memory_order_relaxed);
+ atomic_fetch_add_explicit(&mq->total_subq[META_QUEUE_EARLY_ROUTE], 1, memory_order_relaxed);
+ curr = listcount(mq->subq[META_QUEUE_EARLY_ROUTE]);
+ high = atomic_load_explicit(&mq->max_subq[META_QUEUE_EARLY_ROUTE], memory_order_relaxed);
+ if (curr > high)
+ atomic_store_explicit(&mq->max_subq[META_QUEUE_EARLY_ROUTE], curr,
+ memory_order_relaxed);
+ high = atomic_load_explicit(&mq->max_metaq, memory_order_relaxed);
+ if (mq->size > high)
+ atomic_store_explicit(&mq->max_metaq, mq->size, memory_order_relaxed);
if (IS_ZEBRA_DEBUG_RIB_DETAILED) {
struct vrf *vrf = vrf_lookup_by_id(ere->re->vrf_id);
diff --git a/zebra/zebra_vty.c b/zebra/zebra_vty.c
index 15bc2c20d2..9e4db11989 100644
--- a/zebra/zebra_vty.c
+++ b/zebra/zebra_vty.c
@@ -4047,6 +4047,20 @@ DEFUN (zebra_show_routing_tables_summary,
return CMD_SUCCESS;
}
+/* Display Zebra MetaQ counters */
+DEFUN (show_zebra_metaq_counters,
+ show_zebra_metaq_counters_cmd,
+ "show zebra metaq [json]",
+ SHOW_STR
+ ZEBRA_STR
+ "Zebra MetaQ counters\n"
+ JSON_STR)
+{
+ bool uj = use_json(argc, argv);
+
+ return zebra_show_metaq_counter(vty, uj);
+}
+
/* IPForwarding configuration write function. */
static int config_write_forwarding(struct vty *vty)
{
@@ -4336,6 +4350,7 @@ void zebra_vty_init(void)
install_element(VIEW_NODE, &show_dataplane_providers_cmd);
install_element(CONFIG_NODE, &zebra_dplane_queue_limit_cmd);
install_element(CONFIG_NODE, &no_zebra_dplane_queue_limit_cmd);
+ install_element(VIEW_NODE, &show_zebra_metaq_counters_cmd);
#ifdef HAVE_NETLINK
install_element(CONFIG_NODE, &zebra_kernel_netlink_batch_tx_buf_cmd);