summaryrefslogtreecommitdiff
path: root/zebra/zserv.c
diff options
context:
space:
mode:
authorDonald Sharp <sharpd@nvidia.com>2025-03-24 14:11:35 -0400
committerDonald Sharp <sharpd@nvidia.com>2025-03-25 09:10:46 -0400
commit937a9fb3e923beb1cf0a795daddb178cb1fe0ec4 (patch)
tree47313603c293309310f970122c06cbc12237b7f7 /zebra/zserv.c
parent12bf042c688fedf82637fab9ff77aa1eab271160 (diff)
zebra: Limit reading packets when MetaQ is full
Currently Zebra is just reading packets off the zapi wire and stacking them up for processing in zebra in the future. When there is significant churn in the network the size of zebra can grow without bounds due to the MetaQ sizing constraints. This ends up showing by the number of nexthops in the system. Reducing the number of packets serviced to limit the metaQ size to the packets to process allieviates this problem. Signed-off-by: Donald Sharp <sharpd@nvidia.com>
Diffstat (limited to 'zebra/zserv.c')
-rw-r--r--zebra/zserv.c6
1 files changed, 6 insertions, 0 deletions
diff --git a/zebra/zserv.c b/zebra/zserv.c
index d477cd051f..aab1bd0062 100644
--- a/zebra/zserv.c
+++ b/zebra/zserv.c
@@ -530,6 +530,12 @@ static void zserv_process_messages(struct event *thread)
struct stream_fifo *cache = stream_fifo_new();
uint32_t p2p = zrouter.packets_to_process;
bool need_resched = false;
+ uint32_t meta_queue_size = zebra_rib_meta_queue_size();
+
+ if (meta_queue_size < p2p)
+ p2p = p2p - meta_queue_size;
+ else
+ p2p = 0;
frr_with_mutex (&client->ibuf_mtx) {
uint32_t i;