diff options
| author | Mark Stapp <mjs.ietf@gmail.com> | 2025-04-16 09:16:40 -0400 |
|---|---|---|
| committer | GitHub <noreply@github.com> | 2025-04-16 09:16:40 -0400 |
| commit | 1ca756f315949b883584411bfe135f3b93f2b047 (patch) | |
| tree | e96754eec860526be9bdd5d262a0646ee1859817 /zebra | |
| parent | 21a32b010b821b22550d56a6218afdeb536271f8 (diff) | |
| parent | 751ae766486e4f03ebfa623767e0aef043261170 (diff) | |
Merge pull request #18497 from krishna-samy/show-metaq-counters
zebra: show command to display metaq info
Diffstat (limited to 'zebra')
| -rw-r--r-- | zebra/rib.h | 7 | ||||
| -rw-r--r-- | zebra/zebra_rib.c | 134 | ||||
| -rw-r--r-- | zebra/zebra_vty.c | 15 |
3 files changed, 156 insertions, 0 deletions
diff --git a/zebra/rib.h b/zebra/rib.h index 652f6208f4..fa6ce4447b 100644 --- a/zebra/rib.h +++ b/zebra/rib.h @@ -192,6 +192,12 @@ struct route_entry { struct meta_queue { struct list *subq[MQ_SIZE]; uint32_t size; /* sum of lengths of all subqueues */ + _Atomic uint32_t max_subq[MQ_SIZE]; /* Max size of individual sub queue */ + _Atomic uint32_t max_metaq; /* Max size of the MetaQ */ + _Atomic uint32_t total_subq[MQ_SIZE]; /* Total subq events */ + _Atomic uint32_t total_metaq; /* Total MetaQ events */ + _Atomic uint32_t re_subq[MQ_SIZE]; /* current RE count sub queue */ + _Atomic uint32_t max_re_subq[MQ_SIZE]; /* Max RE in sub queue */ }; /* @@ -474,6 +480,7 @@ extern void zebra_rib_evaluate_rn_nexthops(struct route_node *rn, uint32_t seq, bool rt_delete); extern void rib_update_handle_vrf_all(enum rib_update_event event, int rtype); +int zebra_show_metaq_counter(struct vty *vty, bool uj); /* * rib_find_rn_from_ctx diff --git a/zebra/zebra_rib.c b/zebra/zebra_rib.c index e32b004ae9..c7dc5e5d07 100644 --- a/zebra/zebra_rib.c +++ b/zebra/zebra_rib.c @@ -30,6 +30,7 @@ #include "printfrr.h" #include "frrscript.h" #include "frrdistance.h" +#include "lib/termtable.h" #include "zebra/zebra_router.h" #include "zebra/connected.h" @@ -273,6 +274,63 @@ static const char *subqueue2str(enum meta_queue_indexes index) return "Unknown"; } +/* Handler for 'show zebra metaq' */ +int zebra_show_metaq_counter(struct vty *vty, bool uj) +{ + struct meta_queue *mq = zrouter.mq; + struct ttable *tt = NULL; + char *table = NULL; + json_object *json = NULL; + json_object *json_table = NULL; + + if (!mq) + return CMD_WARNING; + + /* Create a table for subqueue details */ + tt = ttable_new(&ttable_styles[TTSTYLE_ASCII]); + ttable_add_row(tt, "SubQ|Current|Max Size|Total"); + + /* Add rows for each subqueue */ + for (uint8_t i = 0; i < MQ_SIZE; i++) { + ttable_add_row(tt, "%s|%u|%u|%u", subqueue2str(i), mq->subq[i]->count, + mq->max_subq[i], mq->total_subq[i]); + } + + /* For a better formatting between the content and separator */ + tt->style.cell.rpad = 2; + tt->style.cell.lpad = 1; + ttable_restyle(tt); + + if (uj) { + json = json_object_new_object(); + /* Add MetaQ summary to the JSON object */ + json_object_int_add(json, "currentSize", mq->size); + json_object_int_add(json, "maxSize", mq->max_metaq); + json_object_int_add(json, "total", mq->total_metaq); + + /* Convert the table to JSON and add it to the main JSON object */ + /* n = name/string, u = unsigned int */ + json_table = ttable_json(tt, "sddd"); + json_object_object_add(json, "subqueues", json_table); + vty_json(vty, json); + } else { + vty_out(vty, "MetaQ Summary\n"); + vty_out(vty, "Current Size\t: %u\n", mq->size); + vty_out(vty, "Max Size\t: %u\n", mq->max_metaq); + vty_out(vty, "Total\t\t: %u\n", mq->total_metaq); + + /* Dump the table */ + table = ttable_dump(tt, "\n"); + vty_out(vty, "%s\n", table); + XFREE(MTYPE_TMP_TTABLE, table); + } + + /* Clean up the table */ + ttable_del(tt); + + return CMD_SUCCESS; +} + printfrr_ext_autoreg_p("ZN", printfrr_zebra_node); static ssize_t printfrr_zebra_node(struct fbuf *buf, struct printfrr_eargs *ea, const void *ptr) @@ -3257,6 +3315,7 @@ static int rib_meta_queue_add(struct meta_queue *mq, void *data) struct route_node *rn = NULL; struct route_entry *re = NULL, *curr_re = NULL; uint8_t qindex = MQ_SIZE, curr_qindex = MQ_SIZE; + uint64_t curr, high; rn = (struct route_node *)data; @@ -3300,6 +3359,15 @@ static int rib_meta_queue_add(struct meta_queue *mq, void *data) listnode_add(mq->subq[qindex], rn); route_lock_node(rn); mq->size++; + atomic_fetch_add_explicit(&mq->total_metaq, 1, memory_order_relaxed); + atomic_fetch_add_explicit(&mq->total_subq[qindex], 1, memory_order_relaxed); + curr = listcount(mq->subq[qindex]); + high = atomic_load_explicit(&mq->max_subq[qindex], memory_order_relaxed); + if (curr > high) + atomic_store_explicit(&mq->max_subq[qindex], curr, memory_order_relaxed); + high = atomic_load_explicit(&mq->max_metaq, memory_order_relaxed); + if (mq->size > high) + atomic_store_explicit(&mq->max_metaq, mq->size, memory_order_relaxed); if (IS_ZEBRA_DEBUG_RIB_DETAILED) rnode_debug(rn, re->vrf_id, "queued rn %p into sub-queue %s mq size %u", (void *)rn, @@ -3310,8 +3378,21 @@ static int rib_meta_queue_add(struct meta_queue *mq, void *data) static int early_label_meta_queue_add(struct meta_queue *mq, void *data) { + uint64_t curr, high; + listnode_add(mq->subq[META_QUEUE_EARLY_LABEL], data); mq->size++; + atomic_fetch_add_explicit(&mq->total_metaq, 1, memory_order_relaxed); + atomic_fetch_add_explicit(&mq->total_subq[META_QUEUE_EARLY_LABEL], 1, memory_order_relaxed); + curr = listcount(mq->subq[META_QUEUE_EARLY_LABEL]); + high = atomic_load_explicit(&mq->max_subq[META_QUEUE_EARLY_LABEL], memory_order_relaxed); + if (curr > high) + atomic_store_explicit(&mq->max_subq[META_QUEUE_EARLY_LABEL], curr, + memory_order_relaxed); + high = atomic_load_explicit(&mq->max_metaq, memory_order_relaxed); + if (mq->size > high) + atomic_store_explicit(&mq->max_metaq, mq->size, memory_order_relaxed); + return 0; } @@ -3320,6 +3401,7 @@ static int rib_meta_queue_nhg_ctx_add(struct meta_queue *mq, void *data) struct nhg_ctx *ctx = NULL; uint8_t qindex = META_QUEUE_NHG; struct wq_nhg_wrapper *w; + uint64_t curr, high; ctx = (struct nhg_ctx *)data; @@ -3333,6 +3415,15 @@ static int rib_meta_queue_nhg_ctx_add(struct meta_queue *mq, void *data) listnode_add(mq->subq[qindex], w); mq->size++; + atomic_fetch_add_explicit(&mq->total_metaq, 1, memory_order_relaxed); + atomic_fetch_add_explicit(&mq->total_subq[qindex], 1, memory_order_relaxed); + curr = listcount(mq->subq[qindex]); + high = atomic_load_explicit(&mq->max_subq[qindex], memory_order_relaxed); + if (curr > high) + atomic_store_explicit(&mq->max_subq[qindex], curr, memory_order_relaxed); + high = atomic_load_explicit(&mq->max_metaq, memory_order_relaxed); + if (mq->size > high) + atomic_store_explicit(&mq->max_metaq, mq->size, memory_order_relaxed); if (IS_ZEBRA_DEBUG_RIB_DETAILED) zlog_debug("NHG Context id=%u queued into sub-queue %s mq size %u", ctx->id, @@ -3347,6 +3438,7 @@ static int rib_meta_queue_nhg_process(struct meta_queue *mq, void *data, struct nhg_hash_entry *nhe = NULL; uint8_t qindex = META_QUEUE_NHG; struct wq_nhg_wrapper *w; + uint64_t curr, high; nhe = (struct nhg_hash_entry *)data; @@ -3361,6 +3453,15 @@ static int rib_meta_queue_nhg_process(struct meta_queue *mq, void *data, listnode_add(mq->subq[qindex], w); mq->size++; + atomic_fetch_add_explicit(&mq->total_metaq, 1, memory_order_relaxed); + atomic_fetch_add_explicit(&mq->total_subq[qindex], 1, memory_order_relaxed); + curr = listcount(mq->subq[qindex]); + high = atomic_load_explicit(&mq->max_subq[qindex], memory_order_relaxed); + if (curr > high) + atomic_store_explicit(&mq->max_subq[qindex], curr, memory_order_relaxed); + high = atomic_load_explicit(&mq->max_metaq, memory_order_relaxed); + if (mq->size > high) + atomic_store_explicit(&mq->max_metaq, mq->size, memory_order_relaxed); if (IS_ZEBRA_DEBUG_RIB_DETAILED) zlog_debug("NHG id=%u queued into sub-queue %s mq size %u", nhe->id, @@ -3381,8 +3482,19 @@ static int rib_meta_queue_nhg_del(struct meta_queue *mq, void *data) static int rib_meta_queue_evpn_add(struct meta_queue *mq, void *data) { + uint64_t curr, high; + listnode_add(mq->subq[META_QUEUE_EVPN], data); mq->size++; + atomic_fetch_add_explicit(&mq->total_metaq, 1, memory_order_relaxed); + atomic_fetch_add_explicit(&mq->total_subq[META_QUEUE_EVPN], 1, memory_order_relaxed); + curr = listcount(mq->subq[META_QUEUE_EVPN]); + high = atomic_load_explicit(&mq->max_subq[META_QUEUE_EVPN], memory_order_relaxed); + if (curr > high) + atomic_store_explicit(&mq->max_subq[META_QUEUE_EVPN], curr, memory_order_relaxed); + high = atomic_load_explicit(&mq->max_metaq, memory_order_relaxed); + if (mq->size > high) + atomic_store_explicit(&mq->max_metaq, mq->size, memory_order_relaxed); return 0; } @@ -4227,8 +4339,19 @@ void _route_entry_dump(const char *func, union prefixconstptr pp, static int rib_meta_queue_gr_run_add(struct meta_queue *mq, void *data) { + uint64_t curr, high; + listnode_add(mq->subq[META_QUEUE_GR_RUN], data); mq->size++; + atomic_fetch_add_explicit(&mq->total_metaq, 1, memory_order_relaxed); + atomic_fetch_add_explicit(&mq->total_subq[META_QUEUE_GR_RUN], 1, memory_order_relaxed); + curr = listcount(mq->subq[META_QUEUE_GR_RUN]); + high = atomic_load_explicit(&mq->max_subq[META_QUEUE_GR_RUN], memory_order_relaxed); + if (curr > high) + atomic_store_explicit(&mq->max_subq[META_QUEUE_GR_RUN], curr, memory_order_relaxed); + high = atomic_load_explicit(&mq->max_metaq, memory_order_relaxed); + if (mq->size > high) + atomic_store_explicit(&mq->max_metaq, mq->size, memory_order_relaxed); if (IS_ZEBRA_DEBUG_RIB_DETAILED) zlog_debug("Graceful Run adding mq size %u", zrouter.mq->size); @@ -4239,9 +4362,20 @@ static int rib_meta_queue_gr_run_add(struct meta_queue *mq, void *data) static int rib_meta_queue_early_route_add(struct meta_queue *mq, void *data) { struct zebra_early_route *ere = data; + uint64_t curr, high; listnode_add(mq->subq[META_QUEUE_EARLY_ROUTE], data); mq->size++; + atomic_fetch_add_explicit(&mq->total_metaq, 1, memory_order_relaxed); + atomic_fetch_add_explicit(&mq->total_subq[META_QUEUE_EARLY_ROUTE], 1, memory_order_relaxed); + curr = listcount(mq->subq[META_QUEUE_EARLY_ROUTE]); + high = atomic_load_explicit(&mq->max_subq[META_QUEUE_EARLY_ROUTE], memory_order_relaxed); + if (curr > high) + atomic_store_explicit(&mq->max_subq[META_QUEUE_EARLY_ROUTE], curr, + memory_order_relaxed); + high = atomic_load_explicit(&mq->max_metaq, memory_order_relaxed); + if (mq->size > high) + atomic_store_explicit(&mq->max_metaq, mq->size, memory_order_relaxed); if (IS_ZEBRA_DEBUG_RIB_DETAILED) { struct vrf *vrf = vrf_lookup_by_id(ere->re->vrf_id); diff --git a/zebra/zebra_vty.c b/zebra/zebra_vty.c index 15bc2c20d2..9e4db11989 100644 --- a/zebra/zebra_vty.c +++ b/zebra/zebra_vty.c @@ -4047,6 +4047,20 @@ DEFUN (zebra_show_routing_tables_summary, return CMD_SUCCESS; } +/* Display Zebra MetaQ counters */ +DEFUN (show_zebra_metaq_counters, + show_zebra_metaq_counters_cmd, + "show zebra metaq [json]", + SHOW_STR + ZEBRA_STR + "Zebra MetaQ counters\n" + JSON_STR) +{ + bool uj = use_json(argc, argv); + + return zebra_show_metaq_counter(vty, uj); +} + /* IPForwarding configuration write function. */ static int config_write_forwarding(struct vty *vty) { @@ -4336,6 +4350,7 @@ void zebra_vty_init(void) install_element(VIEW_NODE, &show_dataplane_providers_cmd); install_element(CONFIG_NODE, &zebra_dplane_queue_limit_cmd); install_element(CONFIG_NODE, &no_zebra_dplane_queue_limit_cmd); + install_element(VIEW_NODE, &show_zebra_metaq_counters_cmd); #ifdef HAVE_NETLINK install_element(CONFIG_NODE, &zebra_kernel_netlink_batch_tx_buf_cmd); |
