summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--bgpd/Makefile.am5
-rw-r--r--bgpd/bgp_debug.c54
-rw-r--r--bgpd/bgp_debug.h3
-rw-r--r--bgpd/bgp_labelpool.c592
-rw-r--r--bgpd/bgp_labelpool.h51
-rw-r--r--bgpd/bgp_zebra.c40
-rw-r--r--bgpd/bgpd.c7
-rw-r--r--bgpd/bgpd.h4
-rw-r--r--lib/zclient.c40
-rw-r--r--lib/zclient.h7
10 files changed, 800 insertions, 3 deletions
diff --git a/bgpd/Makefile.am b/bgpd/Makefile.am
index 61d46dfcb9..a2880b7b94 100644
--- a/bgpd/Makefile.am
+++ b/bgpd/Makefile.am
@@ -87,7 +87,7 @@ libbgp_a_SOURCES = \
bgp_encap_tlv.c $(BGP_VNC_RFAPI_SRC) bgp_attr_evpn.c \
bgp_evpn.c bgp_evpn_vty.c bgp_vpn.c bgp_label.c bgp_rd.c \
bgp_keepalives.c bgp_io.c bgp_flowspec.c bgp_flowspec_util.c \
- bgp_flowspec_vty.c
+ bgp_flowspec_vty.c bgp_labelpool.c
noinst_HEADERS = \
bgp_memory.h \
@@ -100,7 +100,8 @@ noinst_HEADERS = \
bgp_updgrp.h bgp_bfd.h bgp_encap_tlv.h bgp_encap_types.h \
$(BGP_VNC_RFAPI_HD) bgp_attr_evpn.h bgp_evpn.h bgp_evpn_vty.h \
bgp_vpn.h bgp_label.h bgp_rd.h bgp_evpn_private.h bgp_keepalives.h \
- bgp_io.h bgp_flowspec.h bgp_flowspec_private.h bgp_flowspec_util.h
+ bgp_io.h bgp_flowspec.h bgp_flowspec_private.h bgp_flowspec_util.h \
+ bgp_labelpool.h
bgpd_SOURCES = bgp_main.c
bgpd_LDADD = libbgp.a $(BGP_VNC_RFP_LIB) ../lib/libfrr.la @LIBCAP@ @LIBM@
diff --git a/bgpd/bgp_debug.c b/bgpd/bgp_debug.c
index ae4ff5d67e..29ac5f520d 100644
--- a/bgpd/bgp_debug.c
+++ b/bgpd/bgp_debug.c
@@ -58,6 +58,7 @@ unsigned long conf_bgp_debug_nht;
unsigned long conf_bgp_debug_update_groups;
unsigned long conf_bgp_debug_vpn;
unsigned long conf_bgp_debug_flowspec;
+unsigned long conf_bgp_debug_labelpool;
unsigned long term_bgp_debug_as4;
unsigned long term_bgp_debug_neighbor_events;
@@ -73,6 +74,7 @@ unsigned long term_bgp_debug_nht;
unsigned long term_bgp_debug_update_groups;
unsigned long term_bgp_debug_vpn;
unsigned long term_bgp_debug_flowspec;
+unsigned long term_bgp_debug_labelpool;
struct list *bgp_debug_neighbor_events_peers = NULL;
struct list *bgp_debug_keepalive_peers = NULL;
@@ -1655,6 +1657,44 @@ DEFUN (no_debug_bgp_vpn,
return CMD_SUCCESS;
}
+DEFUN (debug_bgp_labelpool,
+ debug_bgp_labelpool_cmd,
+ "debug bgp labelpool",
+ DEBUG_STR
+ BGP_STR
+ "label pool\n")
+{
+ if (vty->node == CONFIG_NODE)
+ DEBUG_ON(labelpool, LABELPOOL);
+ else
+ TERM_DEBUG_ON(labelpool, LABELPOOL);
+
+ if (vty->node != CONFIG_NODE)
+ vty_out(vty, "enabled debug bgp labelpool\n");
+
+ return CMD_SUCCESS;
+}
+
+DEFUN (no_debug_bgp_labelpool,
+ no_debug_bgp_labelpool_cmd,
+ "no debug bgp labelpool",
+ NO_STR
+ DEBUG_STR
+ BGP_STR
+ "label pool\n")
+{
+ if (vty->node == CONFIG_NODE)
+ DEBUG_OFF(labelpool, LABELPOOL);
+ else
+ TERM_DEBUG_OFF(labelpool, LABELPOOL);
+
+
+ if (vty->node != CONFIG_NODE)
+ vty_out(vty, "disabled debug bgp labelpool\n");
+
+ return CMD_SUCCESS;
+}
+
DEFUN (no_debug_bgp,
no_debug_bgp_cmd,
"no debug bgp",
@@ -1692,6 +1732,7 @@ DEFUN (no_debug_bgp,
TERM_DEBUG_OFF(vpn, VPN_LEAK_RMAP_EVENT);
TERM_DEBUG_OFF(vpn, VPN_LEAK_LABEL);
TERM_DEBUG_OFF(flowspec, FLOWSPEC);
+ TERM_DEBUG_OFF(labelpool, LABELPOOL);
vty_out(vty, "All possible debugging has been turned off\n");
return CMD_SUCCESS;
@@ -1764,6 +1805,8 @@ DEFUN_NOSH (show_debugging_bgp,
vty_out(vty, " BGP vpn label event debugging is on\n");
if (BGP_DEBUG(flowspec, FLOWSPEC))
vty_out(vty, " BGP flowspec debugging is on\n");
+ if (BGP_DEBUG(labelpool, LABELPOOL))
+ vty_out(vty, " BGP labelpool debugging is on\n");
vty_out(vty, "\n");
return CMD_SUCCESS;
@@ -1819,6 +1862,8 @@ int bgp_debug_count(void)
ret++;
if (BGP_DEBUG(flowspec, FLOWSPEC))
ret++;
+ if (BGP_DEBUG(labelpool, LABELPOOL))
+ ret++;
return ret;
}
@@ -1916,6 +1961,10 @@ static int bgp_config_write_debug(struct vty *vty)
vty_out(vty, "debug bgp flowspec\n");
write++;
}
+ if (CONF_BGP_DEBUG(labelpool, LABELPOOL)) {
+ vty_out(vty, "debug bgp labelpool\n");
+ write++;
+ }
return write;
}
@@ -2015,6 +2064,11 @@ void bgp_debug_init(void)
install_element(CONFIG_NODE, &debug_bgp_vpn_cmd);
install_element(ENABLE_NODE, &no_debug_bgp_vpn_cmd);
install_element(CONFIG_NODE, &no_debug_bgp_vpn_cmd);
+
+ install_element(ENABLE_NODE, &debug_bgp_labelpool_cmd);
+ install_element(CONFIG_NODE, &debug_bgp_labelpool_cmd);
+ install_element(ENABLE_NODE, &no_debug_bgp_labelpool_cmd);
+ install_element(CONFIG_NODE, &no_debug_bgp_labelpool_cmd);
}
/* Return true if this prefix is on the per_prefix_list of prefixes to debug
diff --git a/bgpd/bgp_debug.h b/bgpd/bgp_debug.h
index a0b179e213..ad476ee918 100644
--- a/bgpd/bgp_debug.h
+++ b/bgpd/bgp_debug.h
@@ -74,6 +74,7 @@ extern unsigned long conf_bgp_debug_nht;
extern unsigned long conf_bgp_debug_update_groups;
extern unsigned long conf_bgp_debug_vpn;
extern unsigned long conf_bgp_debug_flowspec;
+extern unsigned long conf_bgp_debug_labelpool;
extern unsigned long term_bgp_debug_as4;
extern unsigned long term_bgp_debug_neighbor_events;
@@ -87,6 +88,7 @@ extern unsigned long term_bgp_debug_nht;
extern unsigned long term_bgp_debug_update_groups;
extern unsigned long term_bgp_debug_vpn;
extern unsigned long term_bgp_debug_flowspec;
+extern unsigned long term_bgp_debug_labelpool;
extern struct list *bgp_debug_neighbor_events_peers;
extern struct list *bgp_debug_keepalive_peers;
@@ -120,6 +122,7 @@ struct bgp_debug_filter {
#define BGP_DEBUG_VPN_LEAK_RMAP_EVENT 0x04
#define BGP_DEBUG_VPN_LEAK_LABEL 0x08
#define BGP_DEBUG_FLOWSPEC 0x01
+#define BGP_DEBUG_LABELPOOL 0x01
#define BGP_DEBUG_PACKET_SEND 0x01
#define BGP_DEBUG_PACKET_SEND_DETAIL 0x02
diff --git a/bgpd/bgp_labelpool.c b/bgpd/bgp_labelpool.c
new file mode 100644
index 0000000000..2c98cd9ef9
--- /dev/null
+++ b/bgpd/bgp_labelpool.c
@@ -0,0 +1,592 @@
+/*
+ * BGP Label Pool - Manage label chunk allocations from zebra asynchronously
+ *
+ * Copyright (C) 2018 LabN Consulting, L.L.C.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; see the file COPYING; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <zebra.h>
+
+#include "log.h"
+#include "memory.h"
+#include "stream.h"
+#include "mpls.h"
+#include "vty.h"
+#include "fifo.h"
+#include "linklist.h"
+#include "skiplist.h"
+#include "workqueue.h"
+#include "zclient.h"
+
+#include "bgpd/bgpd.h"
+#include "bgpd/bgp_labelpool.h"
+#include "bgpd/bgp_debug.h"
+
+/*
+ * Definitions and external declarations.
+ */
+extern struct zclient *zclient;
+
+/*
+ * Remember where pool data are kept
+ */
+static struct labelpool *lp;
+
+/* request this many labels at a time from zebra */
+#define LP_CHUNK_SIZE 50
+
+DEFINE_MTYPE_STATIC(BGPD, BGP_LABEL_CHUNK, "BGP Label Chunk")
+DEFINE_MTYPE_STATIC(BGPD, BGP_LABEL_FIFO, "BGP Label FIFO")
+DEFINE_MTYPE_STATIC(BGPD, BGP_LABEL_CB, "BGP Dynamic Label Assignment")
+DEFINE_MTYPE_STATIC(BGPD, BGP_LABEL_CBQ, "BGP Dynamic Label Callback")
+
+#define LABEL_FIFO_ADD(F, N) \
+ do { \
+ FIFO_ADD((F), (N)); \
+ (F)->count++; \
+ } while (0)
+
+#define LABEL_FIFO_DEL(F, N) \
+ do { \
+ FIFO_DEL((N)); \
+ (F)->count--; \
+ } while (0)
+
+#define LABEL_FIFO_INIT(F) \
+ do { \
+ FIFO_INIT((F)); \
+ (F)->count = 0; \
+ } while (0)
+
+#define LABEL_FIFO_COUNT(F) ((F)->count)
+
+#define LABEL_FIFO_EMPTY(F) FIFO_EMPTY(F)
+
+#define LABEL_FIFO_HEAD(F) ((F)->next == (F) ? NULL : (F)->next)
+
+struct lp_chunk {
+ uint32_t first;
+ uint32_t last;
+};
+
+/*
+ * label control block
+ */
+struct lp_lcb {
+ mpls_label_t label; /* MPLS_LABEL_NONE = not allocated */
+ int type;
+ void *labelid; /* unique ID */
+ /*
+ * callback for label allocation and loss
+ *
+ * allocated: false = lost
+ */
+ int (*cbfunc)(mpls_label_t label, void *lblid, bool alloc);
+};
+
+/* XXX same first elements as "struct fifo" */
+struct lp_fifo {
+ struct lp_fifo *next;
+ struct lp_fifo *prev;
+
+ uint32_t count;
+ struct lp_lcb lcb;
+};
+
+struct lp_cbq_item {
+ int (*cbfunc)(mpls_label_t label, void *lblid, bool alloc);
+ int type;
+ mpls_label_t label;
+ void *labelid;
+ bool allocated; /* false = lost */
+};
+
+static wq_item_status lp_cbq_docallback(struct work_queue *wq, void *data)
+{
+ struct lp_cbq_item *lcbq = data;
+ int rc;
+ int debug = BGP_DEBUG(labelpool, LABELPOOL);
+
+ if (debug)
+ zlog_debug("%s: calling callback with labelid=%p label=%u allocated=%d",
+ __func__, lcbq->labelid, lcbq->label, lcbq->allocated);
+
+ if (lcbq->label == MPLS_LABEL_NONE) {
+ /* shouldn't happen */
+ zlog_err("%s: error: label==MPLS_LABEL_NONE", __func__);
+ return WQ_SUCCESS;
+ }
+
+ rc = (*(lcbq->cbfunc))(lcbq->label, lcbq->labelid, lcbq->allocated);
+
+ if (lcbq->allocated && rc) {
+ /*
+ * Callback rejected allocation. This situation could arise
+ * if there was a label request followed by the requestor
+ * deciding it didn't need the assignment (e.g., config
+ * change) while the reply to the original request (with
+ * label) was in the work queue.
+ */
+ if (debug)
+ zlog_debug("%s: callback rejected allocation, releasing labelid=%p label=%u",
+ __func__, lcbq->labelid, lcbq->label);
+
+ uintptr_t lbl = lcbq->label;
+ void *labelid;
+ struct lp_lcb *lcb;
+
+ /*
+ * If the rejected label was marked inuse by this labelid,
+ * release the label back to the pool.
+ *
+ * Further, if the rejected label was still assigned to
+ * this labelid in the LCB, delete the LCB.
+ */
+ if (!skiplist_search(lp->inuse, (void *)lbl, &labelid)) {
+ if (labelid == lcbq->labelid) {
+ if (!skiplist_search(lp->ledger, labelid,
+ (void **)&lcb)) {
+ if (lcbq->label == lcb->label)
+ skiplist_delete(lp->ledger,
+ labelid, NULL);
+ }
+ skiplist_delete(lp->inuse, (void *)lbl, NULL);
+ }
+ }
+ }
+
+ return WQ_SUCCESS;
+}
+
+static void lp_cbq_item_free(struct work_queue *wq, void *data)
+{
+ XFREE(MTYPE_BGP_LABEL_CBQ, data);
+}
+
+static void lp_lcb_free(void *goner)
+{
+ if (goner)
+ XFREE(MTYPE_BGP_LABEL_CB, goner);
+}
+
+static void lp_chunk_free(void *goner)
+{
+ if (goner)
+ XFREE(MTYPE_BGP_LABEL_CHUNK, goner);
+}
+
+void bgp_lp_init(struct thread_master *master, struct labelpool *pool)
+{
+ if (BGP_DEBUG(labelpool, LABELPOOL))
+ zlog_debug("%s: entry", __func__);
+
+ lp = pool; /* Set module pointer to pool data */
+
+ lp->ledger = skiplist_new(0, NULL, lp_lcb_free);
+ lp->inuse = skiplist_new(0, NULL, NULL);
+ lp->chunks = list_new();
+ lp->chunks->del = lp_chunk_free;
+ lp->requests = XCALLOC(MTYPE_BGP_LABEL_FIFO, sizeof(struct lp_fifo));
+ LABEL_FIFO_INIT(lp->requests);
+ lp->callback_q = work_queue_new(master, "label callbacks");
+ if (!lp->callback_q) {
+ zlog_err("%s: Failed to allocate work queue", __func__);
+ exit(1);
+ }
+
+ lp->callback_q->spec.workfunc = lp_cbq_docallback;
+ lp->callback_q->spec.del_item_data = lp_cbq_item_free;
+ lp->callback_q->spec.max_retries = 0;
+}
+
+void bgp_lp_finish(void)
+{
+ struct lp_fifo *lf;
+
+ if (!lp)
+ return;
+
+ skiplist_free(lp->ledger);
+ lp->ledger = NULL;
+
+ skiplist_free(lp->inuse);
+ lp->inuse = NULL;
+
+ list_delete_and_null(&lp->chunks);
+
+ while ((lf = LABEL_FIFO_HEAD(lp->requests))) {
+
+ LABEL_FIFO_DEL(lp->requests, lf);
+ XFREE(MTYPE_BGP_LABEL_FIFO, lf);
+ }
+ XFREE(MTYPE_BGP_LABEL_FIFO, lp->requests);
+ lp->requests = NULL;
+
+ work_queue_free_and_null(&lp->callback_q);
+
+ lp = NULL;
+}
+
+static mpls_label_t get_label_from_pool(void *labelid)
+{
+ struct listnode *node;
+ struct lp_chunk *chunk;
+ int debug = BGP_DEBUG(labelpool, LABELPOOL);
+
+ /*
+ * Find a free label
+ * Linear search is not efficient but should be executed infrequently.
+ */
+ for (ALL_LIST_ELEMENTS_RO(lp->chunks, node, chunk)) {
+ uintptr_t lbl;
+
+ if (debug)
+ zlog_debug("%s: chunk first=%u last=%u",
+ __func__, chunk->first, chunk->last);
+
+ for (lbl = chunk->first; lbl <= chunk->last; ++lbl) {
+ /* labelid is key to all-request "ledger" list */
+ if (!skiplist_insert(lp->inuse, (void *)lbl, labelid)) {
+ /*
+ * Success
+ */
+ return lbl;
+ }
+ }
+ }
+ return MPLS_LABEL_NONE;
+}
+
+/*
+ * Success indicated by value of "label" field in returned LCB
+ */
+static struct lp_lcb *lcb_alloc(
+ int type,
+ void *labelid,
+ int (*cbfunc)(mpls_label_t label, void *labelid, bool allocated))
+{
+ /*
+ * Set up label control block
+ */
+ struct lp_lcb *new = XCALLOC(MTYPE_BGP_LABEL_CB,
+ sizeof(struct lp_lcb));
+
+ new->label = get_label_from_pool(labelid);
+ new->type = type;
+ new->labelid = labelid;
+ new->cbfunc = cbfunc;
+
+ return new;
+}
+
+/*
+ * Callers who need labels must supply a type, labelid, and callback.
+ * The type is a value defined in bgp_labelpool.h (add types as needed).
+ * The callback is for asynchronous notification of label allocation.
+ * The labelid is passed as an argument to the callback. It should be unique
+ * to the requested label instance.
+ *
+ * If zebra is not connected, callbacks with labels will be delayed
+ * until connection is established. If zebra connection is lost after
+ * labels have been assigned, existing assignments via this labelpool
+ * module will continue until reconnection.
+ *
+ * When connection to zebra is reestablished, previous label assignments
+ * will be invalidated (via callbacks having the "allocated" parameter unset)
+ * and new labels will be automatically reassigned by this labelpool module
+ * (that is, a requestor does not need to call lp_get() again if it is
+ * notified via callback that its label has been lost: it will eventually
+ * get another callback with a new label assignment).
+ *
+ * Prior requests for a given labelid are detected so that requests and
+ * assignments are not duplicated.
+ */
+void bgp_lp_get(
+ int type,
+ void *labelid,
+ int (*cbfunc)(mpls_label_t label, void *labelid, bool allocated))
+{
+ struct lp_lcb *lcb;
+ int requested = 0;
+ int debug = BGP_DEBUG(labelpool, LABELPOOL);
+
+ if (debug)
+ zlog_debug("%s: labelid=%p", __func__, labelid);
+
+ /*
+ * Have we seen this request before?
+ */
+ if (!skiplist_search(lp->ledger, labelid, (void **)&lcb)) {
+ requested = 1;
+ } else {
+ lcb = lcb_alloc(type, labelid, cbfunc);
+ if (debug)
+ zlog_debug("%s: inserting lcb=%p label=%u",
+ __func__, lcb, lcb->label);
+ int rc = skiplist_insert(lp->ledger, labelid, lcb);
+
+ if (rc) {
+ /* shouldn't happen */
+ zlog_err("%s: can't insert new LCB into ledger list",
+ __func__);
+ XFREE(MTYPE_BGP_LABEL_CB, lcb);
+ return;
+ }
+ }
+
+ if (lcb->label != MPLS_LABEL_NONE) {
+ /*
+ * Fast path: we filled the request from local pool (or
+ * this is a duplicate request that we filled already).
+ * Enqueue response work item with new label.
+ */
+ struct lp_cbq_item *q;
+
+ q = XCALLOC(MTYPE_BGP_LABEL_CBQ, sizeof(struct lp_cbq_item));
+
+ q->cbfunc = lcb->cbfunc;
+ q->type = lcb->type;
+ q->label = lcb->label;
+ q->labelid = lcb->labelid;
+ q->allocated = true;
+
+ work_queue_add(lp->callback_q, q);
+
+ return;
+ }
+
+ if (requested)
+ return;
+
+ if (debug)
+ zlog_debug("%s: slow path. lcb=%p label=%u",
+ __func__, lcb, lcb->label);
+
+ /*
+ * Slow path: we are out of labels in the local pool,
+ * so remember the request and also get another chunk from
+ * the label manager.
+ *
+ * We track number of outstanding label requests: don't
+ * need to get a chunk for each one.
+ */
+
+ struct lp_fifo *lf = XCALLOC(MTYPE_BGP_LABEL_FIFO,
+ sizeof(struct lp_fifo));
+
+ lf->lcb = *lcb;
+ LABEL_FIFO_ADD(lp->requests, lf);
+
+ if (LABEL_FIFO_COUNT(lp->requests) > lp->pending_count) {
+ if (!zclient_send_get_label_chunk(zclient, 0, LP_CHUNK_SIZE)) {
+ lp->pending_count += LP_CHUNK_SIZE;
+ return;
+ }
+ }
+}
+
+void bgp_lp_release(
+ int type,
+ void *labelid,
+ mpls_label_t label)
+{
+ struct lp_lcb *lcb;
+
+ if (!skiplist_search(lp->ledger, labelid, (void **)&lcb)) {
+ if (label == lcb->label && type == lcb->type) {
+ uintptr_t lbl = label;
+
+ /* no longer in use */
+ skiplist_delete(lp->inuse, (void *)lbl, NULL);
+
+ /* no longer requested */
+ skiplist_delete(lp->ledger, labelid, NULL);
+ }
+ }
+}
+
+/*
+ * zebra response giving us a chunk of labels
+ */
+void bgp_lp_event_chunk(uint8_t keep, uint32_t first, uint32_t last)
+{
+ struct lp_chunk *chunk;
+ int debug = BGP_DEBUG(labelpool, LABELPOOL);
+ struct lp_fifo *lf;
+
+ if (last < first) {
+ zlog_err("%s: zebra label chunk invalid: first=%u, last=%u",
+ __func__, first, last);
+ return;
+ }
+
+ chunk = XCALLOC(MTYPE_BGP_LABEL_CHUNK, sizeof(struct lp_chunk));
+
+ chunk->first = first;
+ chunk->last = last;
+
+ listnode_add(lp->chunks, chunk);
+
+ lp->pending_count -= (last - first + 1);
+
+ if (debug) {
+ zlog_debug("%s: %u pending requests", __func__,
+ LABEL_FIFO_COUNT(lp->requests));
+ }
+
+ while ((lf = LABEL_FIFO_HEAD(lp->requests))) {
+
+ struct lp_lcb *lcb;
+ void *labelid = lf->lcb.labelid;
+
+ if (skiplist_search(lp->ledger, labelid, (void **)&lcb)) {
+ /* request no longer in effect */
+
+ if (debug) {
+ zlog_debug("%s: labelid %p: request no longer in effect",
+ __func__, labelid);
+ }
+ goto finishedrequest;
+ }
+
+ /* have LCB */
+ if (lcb->label != MPLS_LABEL_NONE) {
+ /* request already has a label */
+ if (debug) {
+ zlog_debug("%s: labelid %p: request already has a label: %u=0x%x, lcb=%p",
+ __func__, labelid,
+ lcb->label, lcb->label, lcb);
+ }
+ goto finishedrequest;
+ }
+
+ lcb->label = get_label_from_pool(lcb->labelid);
+
+ if (lcb->label == MPLS_LABEL_NONE) {
+ /*
+ * Out of labels in local pool, await next chunk
+ */
+ if (debug) {
+ zlog_debug("%s: out of labels, await more",
+ __func__);
+ }
+ break;
+ }
+
+ /*
+ * we filled the request from local pool.
+ * Enqueue response work item with new label.
+ */
+ struct lp_cbq_item *q = XCALLOC(MTYPE_BGP_LABEL_CBQ,
+ sizeof(struct lp_cbq_item));
+
+ q->cbfunc = lcb->cbfunc;
+ q->type = lcb->type;
+ q->label = lcb->label;
+ q->labelid = lcb->labelid;
+ q->allocated = true;
+
+ if (debug)
+ zlog_debug("%s: assigning label %u to labelid %p",
+ __func__, q->label, q->labelid);
+
+ work_queue_add(lp->callback_q, q);
+
+finishedrequest:
+ LABEL_FIFO_DEL(lp->requests, lf);
+ XFREE(MTYPE_BGP_LABEL_FIFO, lf);
+ }
+}
+
+/*
+ * continue using allocated labels until zebra returns
+ */
+void bgp_lp_event_zebra_down(void)
+{
+ /* rats. */
+}
+
+/*
+ * Inform owners of previously-allocated labels that their labels
+ * are not valid. Request chunk from zebra large enough to satisfy
+ * previously-allocated labels plus any outstanding requests.
+ */
+void bgp_lp_event_zebra_up(void)
+{
+ int labels_needed;
+ int chunks_needed;
+ void *labelid;
+ struct lp_lcb *lcb;
+
+ /*
+ * Get label chunk allocation request dispatched to zebra
+ */
+ labels_needed = LABEL_FIFO_COUNT(lp->requests) +
+ skiplist_count(lp->inuse);
+
+ /* round up */
+ chunks_needed = (labels_needed / LP_CHUNK_SIZE) + 1;
+ labels_needed = chunks_needed * LP_CHUNK_SIZE;
+
+ zclient_send_get_label_chunk(zclient, 0, labels_needed);
+ lp->pending_count = labels_needed;
+
+ /*
+ * Invalidate current list of chunks
+ */
+ list_delete_all_node(lp->chunks);
+
+ /*
+ * Invalidate any existing labels and requeue them as requests
+ */
+ while (!skiplist_first(lp->inuse, NULL, &labelid)) {
+
+ /*
+ * Get LCB
+ */
+ if (!skiplist_search(lp->ledger, labelid, (void **)&lcb)) {
+
+ if (lcb->label != MPLS_LABEL_NONE) {
+ /*
+ * invalidate
+ */
+ struct lp_cbq_item *q;
+
+ q = XCALLOC(MTYPE_BGP_LABEL_CBQ,
+ sizeof(struct lp_cbq_item));
+ q->cbfunc = lcb->cbfunc;
+ q->type = lcb->type;
+ q->label = lcb->label;
+ q->labelid = lcb->labelid;
+ q->allocated = false;
+ work_queue_add(lp->callback_q, q);
+
+ lcb->label = MPLS_LABEL_NONE;
+ }
+
+ /*
+ * request queue
+ */
+ struct lp_fifo *lf = XCALLOC(MTYPE_BGP_LABEL_FIFO,
+ sizeof(struct lp_fifo));
+
+ lf->lcb = *lcb;
+ LABEL_FIFO_ADD(lp->requests, lf);
+ }
+
+ skiplist_delete_first(lp->inuse);
+ }
+}
diff --git a/bgpd/bgp_labelpool.h b/bgpd/bgp_labelpool.h
new file mode 100644
index 0000000000..fa35cde0e1
--- /dev/null
+++ b/bgpd/bgp_labelpool.h
@@ -0,0 +1,51 @@
+/*
+ * BGP Label Pool - Manage label chunk allocations from zebra asynchronously
+ *
+ * Copyright (C) 2018 LabN Consulting, L.L.C.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; see the file COPYING; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _FRR_BGP_LABELPOOL_H
+#define _FRR_BGP_LABELPOOL_H
+
+#include <zebra.h>
+
+#include "mpls.h"
+
+/*
+ * Types used in bgp_lp_get for debug tracking; add more as needed
+ */
+#define LP_TYPE_VRF 0x00000001
+
+struct labelpool {
+ struct skiplist *ledger; /* all requests */
+ struct skiplist *inuse; /* individual labels */
+ struct list *chunks; /* granted by zebra */
+ struct lp_fifo *requests; /* blocked on zebra */
+ struct work_queue *callback_q;
+ uint32_t pending_count; /* requested from zebra */
+};
+
+extern void bgp_lp_init(struct thread_master *master, struct labelpool *pool);
+extern void bgp_lp_finish(void);
+extern void bgp_lp_get(int type, void *labelid,
+ int (*cbfunc)(mpls_label_t label, void *labelid, bool allocated));
+extern void bgp_lp_release(int type, void *labelid, mpls_label_t label);
+extern void bgp_lp_event_chunk(uint8_t keep, uint32_t first, uint32_t last);
+extern void bgp_lp_event_zebra_down(void);
+extern void bgp_lp_event_zebra_up(void);
+
+#endif /* _FRR_BGP_LABELPOOL_H */
diff --git a/bgpd/bgp_zebra.c b/bgpd/bgp_zebra.c
index 023a866315..30ec0e96a6 100644
--- a/bgpd/bgp_zebra.c
+++ b/bgpd/bgp_zebra.c
@@ -55,6 +55,7 @@
#endif
#include "bgpd/bgp_evpn.h"
#include "bgpd/bgp_mplsvpn.h"
+#include "bgpd/bgp_labelpool.h"
/* All information about zebra. */
struct zclient *zclient = NULL;
@@ -1876,6 +1877,9 @@ static void bgp_zebra_connected(struct zclient *zclient)
/* Send the client registration */
bfd_client_sendmsg(zclient, ZEBRA_BFD_CLIENT_REGISTER);
+ /* tell label pool that zebra is connected */
+ lp_event_zebra_up();
+
/* TODO - What if we have peers and networks configured, do we have to
* kick-start them?
*/
@@ -2042,6 +2046,41 @@ static void bgp_zebra_process_local_ip_prefix(int cmd, struct zclient *zclient,
}
}
+static void bgp_zebra_process_label_chunk(
+ int cmd,
+ struct zclient *zclient,
+ zebra_size_t length,
+ vrf_id_t vrf_id)
+{
+ struct stream *s = NULL;
+ uint8_t response_keep;
+ uint32_t first;
+ uint32_t last;
+
+ s = zclient->ibuf;
+ STREAM_GETC(s, response_keep);
+ STREAM_GETL(s, first);
+ STREAM_GETL(s, last);
+
+ if (first > last ||
+ first < MPLS_LABEL_UNRESERVED_MIN ||
+ last > MPLS_LABEL_UNRESERVED_MAX) {
+
+ zlog_err("%s: Invalid Label chunk: %u - %u",
+ __func__, first, last);
+ return;
+ }
+ if (BGP_DEBUG(zebra, ZEBRA)) {
+ zlog_debug("Label Chunk assign: %u - %u (%u) ",
+ first, last, response_keep);
+ }
+
+ lp_event_chunk(response_keep, first, last);
+
+stream_failure: /* for STREAM_GETX */
+ return;
+}
+
extern struct zebra_privs_t bgpd_privs;
void bgp_zebra_init(struct thread_master *master)
@@ -2076,6 +2115,7 @@ void bgp_zebra_init(struct thread_master *master)
zclient->local_l3vni_del = bgp_zebra_process_local_l3vni;
zclient->local_ip_prefix_add = bgp_zebra_process_local_ip_prefix;
zclient->local_ip_prefix_del = bgp_zebra_process_local_ip_prefix;
+ zclient->label_chunk = bgp_zebra_process_label_chunk;
}
void bgp_zebra_destroy(void)
diff --git a/bgpd/bgpd.c b/bgpd/bgpd.c
index c46111e1fb..7c17cc3dae 100644
--- a/bgpd/bgpd.c
+++ b/bgpd/bgpd.c
@@ -81,7 +81,7 @@
#include "bgpd/bgp_io.h"
#include "bgpd/bgp_ecommunity.h"
#include "bgpd/bgp_flowspec.h"
-
+#include "bgpd/bgp_labelpool.h"
DEFINE_MTYPE_STATIC(BGPD, PEER_TX_SHUTDOWN_MSG, "Peer shutdown message (TX)");
DEFINE_QOBJ_TYPE(bgp_master)
@@ -7536,6 +7536,9 @@ void bgp_master_init(struct thread_master *master)
/* Enable multiple instances by default. */
bgp_option_set(BGP_OPT_MULTIPLE_INSTANCE);
+ /* mpls label dynamic allocation pool */
+ lp_init(bm->master, &bm->labelpool);
+
QOBJ_REG(bm, bgp_master);
}
@@ -7714,4 +7717,6 @@ void bgp_terminate(void)
if (bm->t_rmap_update)
BGP_TIMER_OFF(bm->t_rmap_update);
+
+ lp_finish();
}
diff --git a/bgpd/bgpd.h b/bgpd/bgpd.h
index 0c5f72662c..2f3a732acd 100644
--- a/bgpd/bgpd.h
+++ b/bgpd/bgpd.h
@@ -37,6 +37,7 @@
#include "bgp_memory.h"
#include "bitfield.h"
#include "vxlan.h"
+#include "bgp_labelpool.h"
#define BGP_MAX_HOSTNAME 64 /* Linux max, is larger than most other sys */
#define BGP_PEER_MAX_HASH_SIZE 16384
@@ -140,6 +141,9 @@ struct bgp_master {
/* Id space for automatic RD derivation for an EVI/VRF */
bitfield_t rd_idspace;
+ /* dynamic mpls label allocation pool */
+ struct labelpool labelpool;
+
QOBJ_FIELDS
};
DECLARE_QOBJ_TYPE(bgp_master)
diff --git a/lib/zclient.c b/lib/zclient.c
index d23f62dcd7..07029c1f5d 100644
--- a/lib/zclient.c
+++ b/lib/zclient.c
@@ -1991,6 +1991,40 @@ int lm_label_manager_connect(struct zclient *zclient)
return (int)result;
}
+/*
+ * Asynchronous label chunk request
+ *
+ * @param zclient Zclient used to connect to label manager (zebra)
+ * @param keep Avoid garbage collection
+ * @param chunk_size Amount of labels requested
+ * @result 0 on success, -1 otherwise
+ */
+int zclient_send_get_label_chunk(
+ struct zclient *zclient,
+ uint8_t keep,
+ uint32_t chunk_size)
+{
+ struct stream *s;
+
+ if (zclient_debug)
+ zlog_debug("Getting Label Chunk");
+
+ if (zclient->sock < 0)
+ return -1;
+
+ s = zclient->obuf;
+ stream_reset(s);
+
+ zclient_create_header(s, ZEBRA_GET_LABEL_CHUNK, VRF_DEFAULT);
+ stream_putc(s, keep);
+ stream_putl(s, chunk_size);
+
+ /* Put length at the first point of the stream. */
+ stream_putw_at(s, 0, stream_get_endp(s));
+
+ return zclient_send_message(zclient);
+}
+
/**
* Function to request a label chunk in a syncronous way
*
@@ -2604,6 +2638,12 @@ static int zclient_read(struct thread *thread)
if (zclient->rule_notify_owner)
(*zclient->rule_notify_owner)(command, zclient, length,
vrf_id);
+ break;
+ case ZEBRA_GET_LABEL_CHUNK:
+ if (zclient->label_chunk)
+ (*zclient->label_chunk)(command, zclient, length,
+ vrf_id);
+ break;
default:
break;
}
diff --git a/lib/zclient.h b/lib/zclient.h
index 9d3e5c3702..e85eac73fb 100644
--- a/lib/zclient.h
+++ b/lib/zclient.h
@@ -223,6 +223,8 @@ struct zclient {
uint16_t length, vrf_id_t vrf_id);
int (*rule_notify_owner)(int command, struct zclient *zclient,
uint16_t length, vrf_id_t vrf_id);
+ void (*label_chunk)(int command, struct zclient *zclient,
+ uint16_t length, vrf_id_t vrf_id);
};
/* Zebra API message flag. */
@@ -535,6 +537,11 @@ extern int zapi_ipv4_route(uint8_t, struct zclient *, struct prefix_ipv4 *,
extern struct interface *zebra_interface_link_params_read(struct stream *);
extern size_t zebra_interface_link_params_write(struct stream *,
struct interface *);
+extern int zclient_send_get_label_chunk(
+ struct zclient *zclient,
+ uint8_t keep,
+ uint32_t chunk_size);
+
extern int lm_label_manager_connect(struct zclient *zclient);
extern int lm_get_label_chunk(struct zclient *zclient, uint8_t keep,
uint32_t chunk_size, uint32_t *start,