bgp->main_zebra_update_hold = 1;
bgp->main_peers_update_hold = 1;
- /* Resume the queue processing. This should trigger the event that would
- take
- care of processing any work that was queued during the read-only
- mode. */
- work_queue_unplug(bm->process_main_queue);
+ /*
+ * Resume the queue processing. This should trigger the event that would
+ * take care of processing any work that was queued during the read-only
+ * mode.
+ */
+ work_queue_unplug(bgp->process_queue);
}
/**
struct peer *peer;
/* Stop the processing of queued work. Enqueue shall continue */
- work_queue_plug(bm->process_main_queue);
+ work_queue_plug(bgp->process_queue);
for (ALL_LIST_ELEMENTS(bgp->peer, node, nnode, peer))
peer->update_delay_over = 0;
XFREE(MTYPE_BGP_PROCESS_QUEUE, pqnode);
}
-void bgp_process_queue_init(void)
+void bgp_process_queue_init(struct bgp *bgp)
{
- if (!bm->process_main_queue)
- bm->process_main_queue =
- work_queue_new(bm->master, "process_main_queue");
+ if (!bgp->process_queue) {
+ char name[BUFSIZ];
- bm->process_main_queue->spec.workfunc = &bgp_process_wq;
- bm->process_main_queue->spec.del_item_data = &bgp_processq_del;
- bm->process_main_queue->spec.max_retries = 0;
- bm->process_main_queue->spec.hold = 50;
+ snprintf(name, BUFSIZ, "process_queue %s", bgp->name_pretty);
+ bgp->process_queue = work_queue_new(bm->master, name);
+ }
+
+ bgp->process_queue->spec.workfunc = &bgp_process_wq;
+ bgp->process_queue->spec.del_item_data = &bgp_processq_del;
+ bgp->process_queue->spec.max_retries = 0;
+ bgp->process_queue->spec.hold = 50;
/* Use a higher yield value of 50ms for main queue processing */
- bm->process_main_queue->spec.yield = 50 * 1000L;
+ bgp->process_queue->spec.yield = 50 * 1000L;
}
static struct bgp_process_queue *bgp_processq_alloc(struct bgp *bgp)
void bgp_process(struct bgp *bgp, struct bgp_dest *dest, afi_t afi, safi_t safi)
{
#define ARBITRARY_PROCESS_QLEN 10000
- struct work_queue *wq = bm->process_main_queue;
+ struct work_queue *wq = bgp->process_queue;
struct bgp_process_queue *pqnode;
int pqnode_reuse = 0;
{
struct bgp_process_queue *pqnode;
- if (bm->process_main_queue == NULL)
+ if (bgp->process_queue == NULL)
return;
pqnode = bgp_processq_alloc(bgp);
SET_FLAG(pqnode->flags, BGP_PROCESS_QUEUE_EOIU_MARKER);
- work_queue_add(bm->process_main_queue, pqnode);
+ work_queue_add(bgp->process_queue, pqnode);
}
static int bgp_maximum_prefix_restart_timer(struct thread *thread)
struct bgp_table *table)
{
struct bgp_dest *dest;
- int force = bm->process_main_queue ? 0 : 1;
+ int force = peer->bgp->process_queue ? 0 : 1;
if (!table)
table = peer->bgp->rib[afi][safi];
/* Prototypes. */
extern void bgp_rib_remove(struct bgp_dest *dest, struct bgp_path_info *pi,
struct peer *peer, afi_t afi, safi_t safi);
-extern void bgp_process_queue_init(void);
+extern void bgp_process_queue_init(struct bgp *bgp);
extern void bgp_route_init(void);
extern void bgp_route_finish(void);
extern void bgp_cleanup_routes(struct bgp *);
}
bgp_lock(bgp);
+
+ bgp_process_queue_init(bgp);
bgp->heuristic_coalesce = true;
bgp->inst_type = inst_type;
bgp->vrf_id = (inst_type == BGP_INSTANCE_TYPE_DEFAULT) ? VRF_DEFAULT
bgp_set_evpn(bgp_get_default());
}
+ if (bgp->process_queue)
+ work_queue_free_and_null(&bgp->process_queue);
+
thread_master_free_unused(bm->master);
bgp_unlock(bgp); /* initial reference */
bm->terminating = false;
bm->socket_buffer = buffer_size;
- bgp_process_queue_init();
-
bgp_mac_init();
/* init the rd id space.
assign 0th index in the bitfield,
bgp_notify_send(peer, BGP_NOTIFY_CEASE,
BGP_NOTIFY_CEASE_PEER_UNCONFIG);
- if (bm->process_main_queue)
- work_queue_free_and_null(&bm->process_main_queue);
-
if (bm->t_rmap_update)
BGP_TIMER_OFF(bm->t_rmap_update);
/* BGP thread master. */
struct thread_master *master;
- /* work queues */
- struct work_queue *process_main_queue;
-
/* Listening sockets */
struct list *listen_sockets;
/* Weighted ECMP related config. */
enum bgp_link_bw_handling lb_handling;
+ /* Process Queue for handling routes */
+ struct work_queue *process_queue;
+
QOBJ_FIELDS
};
DECLARE_QOBJ_TYPE(bgp)