summaryrefslogtreecommitdiff
path: root/bgpd/bgp_fsm.c
diff options
context:
space:
mode:
authorDonald Sharp <sharpd@nvidia.com>2025-03-21 07:48:50 -0400
committerDonald Sharp <sharpd@nvidia.com>2025-03-25 09:10:46 -0400
commit12bf042c688fedf82637fab9ff77aa1eab271160 (patch)
treec843f30f8d18fa4c03591c755b0805ba9a32a6af /bgpd/bgp_fsm.c
parentf3790640d32908065dac91d84a687a98001dca25 (diff)
bgpd: Modify bgp to handle packet events in a FIFO
Current behavor of BGP is to have a event per connection. Given that on startup of BGP with a high number of neighbors you end up with 2 * # of peers events that are being processed. Additionally once BGP has selected the connection this still only comes down to 512 events. This number of events is swamping the event system and in addition delaying any other work from being done in BGP at all because the the 512 events are always going to take precedence over everything else. The other main events are the handling of the metaQ(1 event), update group events( 1 per update group ) and the zebra batching event. These are being swamped. Modify the BGP code to have a FIFO of connections. As new data comes in to read, place the connection on the end of the FIFO. Have the bgp_process_packet handle up to 100 packets spread across the individual peers where each peer/connection is limited to the original quanta. During testing I noticed that withdrawal events at very very large scale are taking up to 40 seconds to process so I added a check for yielding to further limit the number of packets being processed. This change also allow for BGP to be interactive again on scale setups on initial convergence. Prior to this change any vtysh command entered would be delayed by 10's of seconds in my setup while BGP was doing other work. Signed-off-by: Donald Sharp <sharpd@nvidia.com>
Diffstat (limited to 'bgpd/bgp_fsm.c')
-rw-r--r--bgpd/bgp_fsm.c15
1 files changed, 12 insertions, 3 deletions
diff --git a/bgpd/bgp_fsm.c b/bgpd/bgp_fsm.c
index 478f8c9136..2c1cbee6d0 100644
--- a/bgpd/bgp_fsm.c
+++ b/bgpd/bgp_fsm.c
@@ -184,7 +184,11 @@ static struct peer *peer_xfer_conn(struct peer *from_peer)
EVENT_OFF(keeper->t_delayopen);
EVENT_OFF(keeper->t_connect_check_r);
EVENT_OFF(keeper->t_connect_check_w);
- EVENT_OFF(keeper->t_process_packet);
+
+ frr_with_mutex (&bm->peer_connection_mtx) {
+ if (peer_connection_fifo_member(&bm->connection_fifo, keeper))
+ peer_connection_fifo_del(&bm->connection_fifo, keeper);
+ }
/*
* At this point in time, it is possible that there are packets pending
@@ -305,8 +309,13 @@ static struct peer *peer_xfer_conn(struct peer *from_peer)
bgp_reads_on(keeper);
bgp_writes_on(keeper);
- event_add_event(bm->master, bgp_process_packet, keeper, 0,
- &keeper->t_process_packet);
+
+ frr_with_mutex (&bm->peer_connection_mtx) {
+ if (!peer_connection_fifo_member(&bm->connection_fifo, keeper)) {
+ peer_connection_fifo_add_tail(&bm->connection_fifo, keeper);
+ }
+ }
+ event_add_event(bm->master, bgp_process_packet, NULL, 0, &bm->e_process_packet);
return (peer);
}