summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--bgpd/bgp_label.c194
-rw-r--r--bgpd/bgp_labelpool.c388
-rw-r--r--bgpd/bgp_labelpool.h2
-rw-r--r--bgpd/bgp_route.c12
-rw-r--r--bgpd/bgp_table.h1
-rw-r--r--bgpd/bgpd.c22
-rw-r--r--bgpd/subdir.am1
-rw-r--r--doc/user/bgp.rst26
-rw-r--r--doc/user/pim.rst7
-rw-r--r--doc/user/rpki.rst4
-rwxr-xr-x[-rw-r--r--]lib/sockunion.c17
-rwxr-xr-x[-rw-r--r--]lib/sockunion.h1
-rw-r--r--nhrpd/README.kernel1
-rwxr-xr-x[-rw-r--r--]nhrpd/nhrp_cache.c42
-rwxr-xr-x[-rw-r--r--]nhrpd/nhrp_interface.c15
-rwxr-xr-x[-rw-r--r--]nhrpd/nhrp_nhs.c26
-rwxr-xr-x[-rw-r--r--]nhrpd/nhrp_peer.c29
-rwxr-xr-x[-rw-r--r--]nhrpd/nhrp_route.c7
-rw-r--r--nhrpd/nhrp_shortcut.c19
-rwxr-xr-x[-rw-r--r--]nhrpd/nhrpd.h5
-rw-r--r--pathd/path_pcep_cli.c7
-rw-r--r--pathd/path_pcep_controller.h1
-rw-r--r--pathd/path_pcep_pcc.c1
-rw-r--r--pimd/pim_bsm.c6
-rw-r--r--pimd/pim_bsm.h3
-rw-r--r--pimd/pim_cmd.c147
-rw-r--r--pimd/pim_ifchannel.c28
-rw-r--r--pimd/pim_igmp.c4
-rw-r--r--pimd/pim_igmpv2.c31
-rw-r--r--pimd/pim_igmpv2.h2
-rw-r--r--pimd/pim_rp.c2
-rw-r--r--pimd/pim_rp.h1
-rw-r--r--tests/topotests/bgp_lu_topo1/R1/bgpd.conf21
-rw-r--r--tests/topotests/bgp_lu_topo1/R1/labelpool.summ.json8
-rw-r--r--tests/topotests/bgp_lu_topo1/R1/zebra.conf6
-rw-r--r--tests/topotests/bgp_lu_topo1/R2/bgpd.conf23
-rw-r--r--tests/topotests/bgp_lu_topo1/R2/labelpool.summ.json8
-rw-r--r--tests/topotests/bgp_lu_topo1/R2/zebra.conf11
-rw-r--r--tests/topotests/bgp_lu_topo1/R3/bgpd.conf523
-rw-r--r--tests/topotests/bgp_lu_topo1/R3/zebra.conf9
-rw-r--r--tests/topotests/bgp_lu_topo1/test_bgp_lu.py178
-rw-r--r--zebra/dplane_fpm_nl.c4
-rw-r--r--zebra/label_manager.c5
-rw-r--r--zebra/zebra_dplane.c11
-rw-r--r--zebra/zebra_vxlan.c77
45 files changed, 1779 insertions, 157 deletions
diff --git a/bgpd/bgp_label.c b/bgpd/bgp_label.c
index 4f440cd1f8..5a31bd0243 100644
--- a/bgpd/bgp_label.c
+++ b/bgpd/bgp_label.c
@@ -120,6 +120,65 @@ mpls_label_t bgp_adv_label(struct bgp_dest *dest, struct bgp_path_info *pi,
return dest->local_label;
}
+static void bgp_send_fec_register_label_msg(struct bgp_dest *dest, bool reg,
+ uint32_t label_index)
+{
+ struct stream *s;
+ int command;
+ const struct prefix *p;
+ uint16_t flags = 0;
+ size_t flags_pos = 0;
+ mpls_label_t *local_label = &(dest->local_label);
+ bool have_label_to_reg =
+ bgp_is_valid_label(local_label)
+ && label_pton(local_label) != MPLS_LABEL_IMPLICIT_NULL;
+
+ p = bgp_dest_get_prefix(dest);
+
+ /* Check socket. */
+ if (!zclient || zclient->sock < 0)
+ return;
+
+ if (BGP_DEBUG(labelpool, LABELPOOL))
+ zlog_debug("%s: FEC %sregister %pRN label_index=%u label=%u",
+ __func__, reg ? "" : "un", bgp_dest_to_rnode(dest),
+ label_index, label_pton(local_label));
+ /* If the route node has a local_label assigned or the
+ * path node has an MPLS SR label index allowing zebra to
+ * derive the label, proceed with registration. */
+ s = zclient->obuf;
+ stream_reset(s);
+ command = (reg) ? ZEBRA_FEC_REGISTER : ZEBRA_FEC_UNREGISTER;
+ zclient_create_header(s, command, VRF_DEFAULT);
+ flags_pos = stream_get_endp(s); /* save position of 'flags' */
+ stream_putw(s, flags); /* initial flags */
+ stream_putw(s, PREFIX_FAMILY(p));
+ stream_put_prefix(s, p);
+ if (reg) {
+ /* label index takes precedence over auto-assigned label. */
+ if (label_index != 0) {
+ flags |= ZEBRA_FEC_REGISTER_LABEL_INDEX;
+ stream_putl(s, label_index);
+ } else if (have_label_to_reg) {
+ flags |= ZEBRA_FEC_REGISTER_LABEL;
+ stream_putl(s, label_pton(local_label));
+ }
+ SET_FLAG(dest->flags, BGP_NODE_REGISTERED_FOR_LABEL);
+ } else
+ UNSET_FLAG(dest->flags, BGP_NODE_REGISTERED_FOR_LABEL);
+
+ /* Set length and flags */
+ stream_putw_at(s, 0, stream_get_endp(s));
+
+ /*
+ * We only need to write new flags if this is a register
+ */
+ if (reg)
+ stream_putw_at(s, flags_pos, flags);
+
+ zclient_send_message(zclient);
+}
+
/**
* This is passed as the callback function to bgp_labelpool.c:bgp_lp_get()
* by bgp_reg_dereg_for_label() when a label needs to be obtained from
@@ -130,20 +189,21 @@ mpls_label_t bgp_adv_label(struct bgp_dest *dest, struct bgp_path_info *pi,
int bgp_reg_for_label_callback(mpls_label_t new_label, void *labelid,
bool allocated)
{
- struct bgp_path_info *pi;
struct bgp_dest *dest;
- pi = labelid;
- /* Is this path still valid? */
- if (!bgp_path_info_unlock(pi)) {
- if (BGP_DEBUG(labelpool, LABELPOOL))
- zlog_debug(
- "%s: bgp_path_info is no longer valid, ignoring",
- __func__);
+ dest = labelid;
+
+ /*
+ * if the route had been removed or the request has gone then reject
+ * the allocated label. The requesting code will have done what is
+ * required to allocate the correct label
+ */
+ if (!CHECK_FLAG(dest->flags, BGP_NODE_LABEL_REQUESTED)) {
+ bgp_dest_unlock_node(dest);
return -1;
}
- dest = pi->net;
+ bgp_dest_unlock_node(dest);
if (BGP_DEBUG(labelpool, LABELPOOL))
zlog_debug("%s: FEC %pRN label=%u, allocated=%d", __func__,
@@ -151,47 +211,15 @@ int bgp_reg_for_label_callback(mpls_label_t new_label, void *labelid,
if (!allocated) {
/*
- * previously-allocated label is now invalid
+ * previously-allocated label is now invalid, set to implicit
+ * null until new label arrives
*/
- if (pi->attr->label_index == MPLS_INVALID_LABEL_INDEX
- && pi->attr->label != MPLS_LABEL_NONE
- && CHECK_FLAG(dest->flags, BGP_NODE_REGISTERED_FOR_LABEL)) {
- bgp_unregister_for_label(dest);
+ if (CHECK_FLAG(dest->flags, BGP_NODE_REGISTERED_FOR_LABEL)) {
+ UNSET_FLAG(dest->flags, BGP_NODE_LABEL_REQUESTED);
label_ntop(MPLS_LABEL_IMPLICIT_NULL, 1,
&dest->local_label);
bgp_set_valid_label(&dest->local_label);
}
- return 0;
- }
-
- /*
- * label index is assigned, this should be handled by SR-related code,
- * so retry FEC registration and then reject label allocation for
- * it to be released to label pool
- */
- if (pi->attr->label_index != MPLS_INVALID_LABEL_INDEX) {
- flog_err(
- EC_BGP_LABEL,
- "%s: FEC %pRN Rejecting allocated label %u as Label Index is %u",
- __func__, bgp_dest_to_rnode(dest), new_label,
- pi->attr->label_index);
-
- bgp_register_for_label(pi->net, pi);
-
- return -1;
- }
-
- if (pi->attr->label != MPLS_INVALID_LABEL) {
- if (new_label == pi->attr->label) {
- /* already have same label, accept but do nothing */
- return 0;
- }
- /* Shouldn't happen: different label allocation */
- flog_err(EC_BGP_LABEL,
- "%s: %pRN had label %u but got new assignment %u",
- __func__, bgp_dest_to_rnode(dest), pi->attr->label,
- new_label);
- /* continue means use new one */
}
label_ntop(new_label, 1, &dest->local_label);
@@ -200,7 +228,7 @@ int bgp_reg_for_label_callback(mpls_label_t new_label, void *labelid,
/*
* Get back to registering the FEC
*/
- bgp_register_for_label(pi->net, pi);
+ bgp_send_fec_register_label_msg(dest, true, 0);
return 0;
}
@@ -209,20 +237,12 @@ void bgp_reg_dereg_for_label(struct bgp_dest *dest, struct bgp_path_info *pi,
bool reg)
{
bool with_label_index = false;
- struct stream *s;
const struct prefix *p;
- mpls_label_t *local_label;
- int command;
- uint16_t flags = 0;
- size_t flags_pos = 0;
+ bool have_label_to_reg =
+ bgp_is_valid_label(&dest->local_label)
+ && label_pton(&dest->local_label) != MPLS_LABEL_IMPLICIT_NULL;
p = bgp_dest_get_prefix(dest);
- local_label = &(dest->local_label);
- /* this prevents the loop when we're called by
- * bgp_reg_for_label_callback()
- */
- bool have_label_to_reg = bgp_is_valid_label(local_label)
- && label_pton(local_label) != MPLS_LABEL_IMPLICIT_NULL;
if (reg) {
assert(pi);
@@ -234,67 +254,37 @@ void bgp_reg_dereg_for_label(struct bgp_dest *dest, struct bgp_path_info *pi,
ATTR_FLAG_BIT(BGP_ATTR_PREFIX_SID))
&& pi->attr->label_index != BGP_INVALID_LABEL_INDEX) {
with_label_index = true;
+ UNSET_FLAG(dest->flags, BGP_NODE_LABEL_REQUESTED);
} else {
/*
- * If no label index was provided -- assume any label
+ * If no label has been registered -- assume any label
* from label pool will do. This means that label index
* always takes precedence over auto-assigned labels.
*/
if (!have_label_to_reg) {
+ SET_FLAG(dest->flags, BGP_NODE_LABEL_REQUESTED);
if (BGP_DEBUG(labelpool, LABELPOOL))
zlog_debug(
"%s: Requesting label from LP for %pFX",
__func__, p);
-
- /* bgp_reg_for_label_callback() will call back
- * __func__ when it gets a label from the pool.
- * This means we'll never register FECs without
- * valid labels.
+ /* bgp_reg_for_label_callback() will deal with
+ * fec registration when it gets a label from
+ * the pool. This means we'll never register
+ * FECs withoutvalid labels.
*/
- bgp_lp_get(LP_TYPE_BGP_LU, pi,
- bgp_reg_for_label_callback);
+ bgp_lp_get(LP_TYPE_BGP_LU, dest,
+ bgp_reg_for_label_callback);
return;
}
}
+ } else {
+ UNSET_FLAG(dest->flags, BGP_NODE_LABEL_REQUESTED);
+ bgp_lp_release(LP_TYPE_BGP_LU, dest,
+ label_pton(&dest->local_label));
}
- /* Check socket. */
- if (!zclient || zclient->sock < 0)
- return;
-
- /* If the route node has a local_label assigned or the
- * path node has an MPLS SR label index allowing zebra to
- * derive the label, proceed with registration. */
- s = zclient->obuf;
- stream_reset(s);
- command = (reg) ? ZEBRA_FEC_REGISTER : ZEBRA_FEC_UNREGISTER;
- zclient_create_header(s, command, VRF_DEFAULT);
- flags_pos = stream_get_endp(s); /* save position of 'flags' */
- stream_putw(s, flags); /* initial flags */
- stream_putw(s, PREFIX_FAMILY(p));
- stream_put_prefix(s, p);
- if (reg) {
- if (have_label_to_reg) {
- flags |= ZEBRA_FEC_REGISTER_LABEL;
- stream_putl(s, label_pton(local_label));
- } else if (with_label_index) {
- flags |= ZEBRA_FEC_REGISTER_LABEL_INDEX;
- stream_putl(s, pi->attr->label_index);
- }
- SET_FLAG(dest->flags, BGP_NODE_REGISTERED_FOR_LABEL);
- } else
- UNSET_FLAG(dest->flags, BGP_NODE_REGISTERED_FOR_LABEL);
-
- /* Set length and flags */
- stream_putw_at(s, 0, stream_get_endp(s));
-
- /*
- * We only need to write new flags if this is a register
- */
- if (reg)
- stream_putw_at(s, flags_pos, flags);
-
- zclient_send_message(zclient);
+ bgp_send_fec_register_label_msg(
+ dest, reg, with_label_index ? pi->attr->label_index : 0);
}
static int bgp_nlri_get_labels(struct peer *peer, uint8_t *pnt, uint8_t plen,
diff --git a/bgpd/bgp_labelpool.c b/bgpd/bgp_labelpool.c
index e8d8167c35..001340be35 100644
--- a/bgpd/bgp_labelpool.c
+++ b/bgpd/bgp_labelpool.c
@@ -182,18 +182,18 @@ void bgp_lp_init(struct thread_master *master, struct labelpool *pool)
lp->callback_q->spec.max_retries = 0;
}
-/* check if a label callback was for a BGP LU path, and if so, unlock it */
+/* check if a label callback was for a BGP LU node, and if so, unlock it */
static void check_bgp_lu_cb_unlock(struct lp_lcb *lcb)
{
if (lcb->type == LP_TYPE_BGP_LU)
- bgp_path_info_unlock(lcb->labelid);
+ bgp_dest_unlock_node(lcb->labelid);
}
-/* check if a label callback was for a BGP LU path, and if so, lock it */
+/* check if a label callback was for a BGP LU node, and if so, lock it */
static void check_bgp_lu_cb_lock(struct lp_lcb *lcb)
{
if (lcb->type == LP_TYPE_BGP_LU)
- bgp_path_info_lock(lcb->labelid);
+ bgp_dest_lock_node(lcb->labelid);
}
void bgp_lp_finish(void)
@@ -356,7 +356,7 @@ void bgp_lp_get(
q->labelid = lcb->labelid;
q->allocated = true;
- /* if this is a LU request, lock path info before queueing */
+ /* if this is a LU request, lock node before queueing */
check_bgp_lu_cb_lock(lcb);
work_queue_add(lp->callback_q, q);
@@ -384,7 +384,7 @@ void bgp_lp_get(
sizeof(struct lp_fifo));
lf->lcb = *lcb;
- /* if this is a LU request, lock path info before queueing */
+ /* if this is a LU request, lock node before queueing */
check_bgp_lu_cb_lock(lcb);
lp_fifo_add_tail(&lp->requests, lf);
@@ -394,7 +394,7 @@ void bgp_lp_get(
return;
if (zclient_send_get_label_chunk(zclient, 0, LP_CHUNK_SIZE,
MPLS_LABEL_BASE_ANY)
- == ZCLIENT_SEND_FAILURE)
+ != ZCLIENT_SEND_FAILURE)
lp->pending_count += LP_CHUNK_SIZE;
}
}
@@ -461,6 +461,9 @@ void bgp_lp_event_chunk(uint8_t keep, uint32_t first, uint32_t last)
zlog_debug("%s: labelid %p: request no longer in effect",
__func__, labelid);
}
+ /* if this was a BGP_LU request, unlock node
+ */
+ check_bgp_lu_cb_unlock(lcb);
goto finishedrequest;
}
@@ -472,7 +475,7 @@ void bgp_lp_event_chunk(uint8_t keep, uint32_t first, uint32_t last)
__func__, labelid,
lcb->label, lcb->label, lcb);
}
- /* if this was a BGP_LU request, unlock path info node
+ /* if this was a BGP_LU request, unlock node
*/
check_bgp_lu_cb_unlock(lcb);
@@ -538,6 +541,7 @@ void bgp_lp_event_zebra_up(void)
struct lp_lcb *lcb;
int lm_init_ok;
+ lp->reconnect_count++;
/*
* Get label chunk allocation request dispatched to zebra
*/
@@ -607,3 +611,371 @@ void bgp_lp_event_zebra_up(void)
skiplist_delete_first(lp->inuse);
}
}
+
+DEFUN(show_bgp_labelpool_summary, show_bgp_labelpool_summary_cmd,
+ "show bgp labelpool summary [json]",
+ SHOW_STR BGP_STR
+ "BGP Labelpool information\n"
+ "BGP Labelpool summary\n" JSON_STR)
+{
+ bool uj = use_json(argc, argv);
+ json_object *json = NULL;
+
+ if (!lp) {
+ if (uj)
+ vty_out(vty, "{}\n");
+ else
+ vty_out(vty, "No existing BGP labelpool\n");
+ return (CMD_WARNING);
+ }
+
+ if (uj) {
+ json = json_object_new_object();
+ json_object_int_add(json, "Ledger", skiplist_count(lp->ledger));
+ json_object_int_add(json, "InUse", skiplist_count(lp->inuse));
+ json_object_int_add(json, "Requests",
+ lp_fifo_count(&lp->requests));
+ json_object_int_add(json, "LabelChunks", listcount(lp->chunks));
+ json_object_int_add(json, "Pending", lp->pending_count);
+ json_object_int_add(json, "Reconnects", lp->reconnect_count);
+ vty_out(vty, "%s\n",
+ json_object_to_json_string_ext(
+ json, JSON_C_TO_STRING_PRETTY));
+ json_object_free(json);
+ } else {
+ vty_out(vty, "Labelpool Summary\n");
+ vty_out(vty, "-----------------\n");
+ vty_out(vty, "%-13s %d\n",
+ "Ledger:", skiplist_count(lp->ledger));
+ vty_out(vty, "%-13s %d\n", "InUse:", skiplist_count(lp->inuse));
+ vty_out(vty, "%-13s %zu\n",
+ "Requests:", lp_fifo_count(&lp->requests));
+ vty_out(vty, "%-13s %d\n",
+ "LabelChunks:", listcount(lp->chunks));
+ vty_out(vty, "%-13s %d\n", "Pending:", lp->pending_count);
+ vty_out(vty, "%-13s %d\n", "Reconnects:", lp->reconnect_count);
+ }
+ return CMD_SUCCESS;
+}
+
+DEFUN(show_bgp_labelpool_ledger, show_bgp_labelpool_ledger_cmd,
+ "show bgp labelpool ledger [json]",
+ SHOW_STR BGP_STR
+ "BGP Labelpool information\n"
+ "BGP Labelpool ledger\n" JSON_STR)
+{
+ bool uj = use_json(argc, argv);
+ json_object *json = NULL, *json_elem = NULL;
+ struct lp_lcb *lcb = NULL;
+ struct bgp_dest *dest;
+ void *cursor = NULL;
+ const struct prefix *p;
+ int rc, count;
+
+ if (!lp) {
+ if (uj)
+ vty_out(vty, "{}\n");
+ else
+ vty_out(vty, "No existing BGP labelpool\n");
+ return (CMD_WARNING);
+ }
+
+ if (uj) {
+ count = skiplist_count(lp->ledger);
+ if (!count) {
+ vty_out(vty, "{}\n");
+ return CMD_SUCCESS;
+ }
+ json = json_object_new_array();
+ } else {
+ vty_out(vty, "Prefix Label\n");
+ vty_out(vty, "---------------------------\n");
+ }
+
+ for (rc = skiplist_next(lp->ledger, (void **)&dest, (void **)&lcb,
+ &cursor);
+ !rc; rc = skiplist_next(lp->ledger, (void **)&dest, (void **)&lcb,
+ &cursor)) {
+ if (uj) {
+ json_elem = json_object_new_object();
+ json_object_array_add(json, json_elem);
+ }
+ switch (lcb->type) {
+ case LP_TYPE_BGP_LU:
+ if (!CHECK_FLAG(dest->flags, BGP_NODE_LABEL_REQUESTED))
+ if (uj) {
+ json_object_string_add(
+ json_elem, "prefix", "INVALID");
+ json_object_int_add(json_elem, "label",
+ lcb->label);
+ } else
+ vty_out(vty, "%-18s %u\n",
+ "INVALID", lcb->label);
+ else {
+ char buf[PREFIX2STR_BUFFER];
+ p = bgp_dest_get_prefix(dest);
+ prefix2str(p, buf, sizeof(buf));
+ if (uj) {
+ json_object_string_add(json_elem,
+ "prefix", buf);
+ json_object_int_add(json_elem, "label",
+ lcb->label);
+ } else
+ vty_out(vty, "%-18s %u\n", buf,
+ lcb->label);
+ }
+ break;
+ case LP_TYPE_VRF:
+ if (uj) {
+ json_object_string_add(json_elem, "prefix",
+ "VRF");
+ json_object_int_add(json_elem, "label",
+ lcb->label);
+ } else
+ vty_out(vty, "%-18s %u\n", "VRF",
+ lcb->label);
+
+ break;
+ }
+ }
+ if (uj) {
+ vty_out(vty, "%s\n",
+ json_object_to_json_string_ext(
+ json, JSON_C_TO_STRING_PRETTY));
+ json_object_free(json);
+ }
+ return CMD_SUCCESS;
+}
+
+DEFUN(show_bgp_labelpool_inuse, show_bgp_labelpool_inuse_cmd,
+ "show bgp labelpool inuse [json]",
+ SHOW_STR BGP_STR
+ "BGP Labelpool information\n"
+ "BGP Labelpool inuse\n" JSON_STR)
+{
+ bool uj = use_json(argc, argv);
+ json_object *json = NULL, *json_elem = NULL;
+ struct bgp_dest *dest;
+ mpls_label_t label;
+ struct lp_lcb *lcb;
+ void *cursor = NULL;
+ const struct prefix *p;
+ int rc, count;
+
+ if (!lp) {
+ vty_out(vty, "No existing BGP labelpool\n");
+ return (CMD_WARNING);
+ }
+ if (!lp) {
+ if (uj)
+ vty_out(vty, "{}\n");
+ else
+ vty_out(vty, "No existing BGP labelpool\n");
+ return (CMD_WARNING);
+ }
+
+ if (uj) {
+ count = skiplist_count(lp->inuse);
+ if (!count) {
+ vty_out(vty, "{}\n");
+ return CMD_SUCCESS;
+ }
+ json = json_object_new_array();
+ } else {
+ vty_out(vty, "Prefix Label\n");
+ vty_out(vty, "---------------------------\n");
+ }
+ for (rc = skiplist_next(lp->inuse, (void **)&label, (void **)&dest,
+ &cursor);
+ !rc; rc = skiplist_next(lp->ledger, (void **)&label,
+ (void **)&dest, &cursor)) {
+ if (skiplist_search(lp->ledger, dest, (void **)&lcb))
+ continue;
+
+ if (uj) {
+ json_elem = json_object_new_object();
+ json_object_array_add(json, json_elem);
+ }
+
+ switch (lcb->type) {
+ case LP_TYPE_BGP_LU:
+ if (!CHECK_FLAG(dest->flags, BGP_NODE_LABEL_REQUESTED))
+ if (uj) {
+ json_object_string_add(
+ json_elem, "prefix", "INVALID");
+ json_object_int_add(json_elem, "label",
+ label);
+ } else
+ vty_out(vty, "INVALID %u\n",
+ label);
+ else {
+ char buf[PREFIX2STR_BUFFER];
+ p = bgp_dest_get_prefix(dest);
+ prefix2str(p, buf, sizeof(buf));
+ if (uj) {
+ json_object_string_add(json_elem,
+ "prefix", buf);
+ json_object_int_add(json_elem, "label",
+ label);
+ } else
+ vty_out(vty, "%-18s %u\n", buf,
+ label);
+ }
+ break;
+ case LP_TYPE_VRF:
+ if (uj) {
+ json_object_string_add(json_elem, "prefix",
+ "VRF");
+ json_object_int_add(json_elem, "label", label);
+ } else
+ vty_out(vty, "%-18s %u\n", "VRF",
+ label);
+ break;
+ }
+ }
+ if (uj) {
+ vty_out(vty, "%s\n",
+ json_object_to_json_string_ext(
+ json, JSON_C_TO_STRING_PRETTY));
+ json_object_free(json);
+ }
+ return CMD_SUCCESS;
+}
+
+DEFUN(show_bgp_labelpool_requests, show_bgp_labelpool_requests_cmd,
+ "show bgp labelpool requests [json]",
+ SHOW_STR BGP_STR
+ "BGP Labelpool information\n"
+ "BGP Labelpool requests\n" JSON_STR)
+{
+ bool uj = use_json(argc, argv);
+ json_object *json = NULL, *json_elem = NULL;
+ struct bgp_dest *dest;
+ const struct prefix *p;
+ char buf[PREFIX2STR_BUFFER];
+ struct lp_fifo *item, *next;
+ int count;
+
+ if (!lp) {
+ if (uj)
+ vty_out(vty, "{}\n");
+ else
+ vty_out(vty, "No existing BGP labelpool\n");
+ return (CMD_WARNING);
+ }
+
+ if (uj) {
+ count = lp_fifo_count(&lp->requests);
+ if (!count) {
+ vty_out(vty, "{}\n");
+ return CMD_SUCCESS;
+ }
+ json = json_object_new_array();
+ } else {
+ vty_out(vty, "Prefix \n");
+ vty_out(vty, "----------------\n");
+ }
+
+ for (item = lp_fifo_first(&lp->requests); item; item = next) {
+ next = lp_fifo_next_safe(&lp->requests, item);
+ dest = item->lcb.labelid;
+ if (uj) {
+ json_elem = json_object_new_object();
+ json_object_array_add(json, json_elem);
+ }
+ switch (item->lcb.type) {
+ case LP_TYPE_BGP_LU:
+ if (!CHECK_FLAG(dest->flags,
+ BGP_NODE_LABEL_REQUESTED)) {
+ if (uj)
+ json_object_string_add(
+ json_elem, "prefix", "INVALID");
+ else
+ vty_out(vty, "INVALID\n");
+ } else {
+ p = bgp_dest_get_prefix(dest);
+ prefix2str(p, buf, sizeof(buf));
+ if (uj)
+ json_object_string_add(json_elem,
+ "prefix", buf);
+ else
+ vty_out(vty, "%-18s\n", buf);
+ }
+ break;
+ case LP_TYPE_VRF:
+ if (uj)
+ json_object_string_add(json_elem, "prefix",
+ "VRF");
+ else
+ vty_out(vty, "VRF\n");
+ break;
+ }
+ }
+ if (uj) {
+ vty_out(vty, "%s\n",
+ json_object_to_json_string_ext(
+ json, JSON_C_TO_STRING_PRETTY));
+ json_object_free(json);
+ }
+ return CMD_SUCCESS;
+}
+
+DEFUN(show_bgp_labelpool_chunks, show_bgp_labelpool_chunks_cmd,
+ "show bgp labelpool chunks [json]",
+ SHOW_STR BGP_STR
+ "BGP Labelpool information\n"
+ "BGP Labelpool chunks\n" JSON_STR)
+{
+ bool uj = use_json(argc, argv);
+ json_object *json = NULL, *json_elem;
+ struct listnode *node;
+ struct lp_chunk *chunk;
+ int count;
+
+ if (!lp) {
+ if (uj)
+ vty_out(vty, "{}\n");
+ else
+ vty_out(vty, "No existing BGP labelpool\n");
+ return (CMD_WARNING);
+ }
+
+ if (uj) {
+ count = listcount(lp->chunks);
+ if (!count) {
+ vty_out(vty, "{}\n");
+ return CMD_SUCCESS;
+ }
+ json = json_object_new_array();
+ } else {
+ vty_out(vty, "First Last\n");
+ vty_out(vty, "--------------\n");
+ }
+
+ for (ALL_LIST_ELEMENTS_RO(lp->chunks, node, chunk)) {
+ if (uj) {
+ json_elem = json_object_new_object();
+ json_object_array_add(json, json_elem);
+ json_object_int_add(json_elem, "first", chunk->first);
+ json_object_int_add(json_elem, "last", chunk->last);
+ } else
+ vty_out(vty, "%-10u %-10u\n", chunk->first,
+ chunk->last);
+ }
+ if (uj) {
+ vty_out(vty, "%s\n",
+ json_object_to_json_string_ext(
+ json, JSON_C_TO_STRING_PRETTY));
+ json_object_free(json);
+ }
+ return CMD_SUCCESS;
+}
+
+void bgp_lp_vty_init(void)
+{
+ install_element(VIEW_NODE, &show_bgp_labelpool_summary_cmd);
+ install_element(VIEW_NODE, &show_bgp_labelpool_ledger_cmd);
+ install_element(VIEW_NODE, &show_bgp_labelpool_inuse_cmd);
+ install_element(VIEW_NODE, &show_bgp_labelpool_requests_cmd);
+ install_element(VIEW_NODE, &show_bgp_labelpool_chunks_cmd);
+}
diff --git a/bgpd/bgp_labelpool.h b/bgpd/bgp_labelpool.h
index eaa3fce20b..d9f64acfe4 100644
--- a/bgpd/bgp_labelpool.h
+++ b/bgpd/bgp_labelpool.h
@@ -40,6 +40,7 @@ struct labelpool {
struct lp_fifo_head requests; /* blocked on zebra */
struct work_queue *callback_q;
uint32_t pending_count; /* requested from zebra */
+ uint32_t reconnect_count; /* zebra reconnections */
};
extern void bgp_lp_init(struct thread_master *master, struct labelpool *pool);
@@ -50,5 +51,6 @@ extern void bgp_lp_release(int type, void *labelid, mpls_label_t label);
extern void bgp_lp_event_chunk(uint8_t keep, uint32_t first, uint32_t last);
extern void bgp_lp_event_zebra_down(void);
extern void bgp_lp_event_zebra_up(void);
+extern void bgp_lp_vty_init(void);
#endif /* _FRR_BGP_LABELPOOL_H */
diff --git a/bgpd/bgp_route.c b/bgpd/bgp_route.c
index 7e1f7df533..0760bbd6a2 100644
--- a/bgpd/bgp_route.c
+++ b/bgpd/bgp_route.c
@@ -2755,7 +2755,10 @@ static void bgp_process_main_one(struct bgp *bgp, struct bgp_dest *dest,
== BGP_ROUTE_REDISTRIBUTE) {
if (CHECK_FLAG(
dest->flags,
- BGP_NODE_REGISTERED_FOR_LABEL))
+ BGP_NODE_REGISTERED_FOR_LABEL)
+ || CHECK_FLAG(
+ dest->flags,
+ BGP_NODE_LABEL_REQUESTED))
bgp_unregister_for_label(dest);
label_ntop(MPLS_LABEL_IMPLICIT_NULL, 1,
&dest->local_label);
@@ -2765,10 +2768,13 @@ static void bgp_process_main_one(struct bgp *bgp, struct bgp_dest *dest,
new_select);
}
} else if (CHECK_FLAG(dest->flags,
- BGP_NODE_REGISTERED_FOR_LABEL)) {
+ BGP_NODE_REGISTERED_FOR_LABEL)
+ || CHECK_FLAG(dest->flags,
+ BGP_NODE_LABEL_REQUESTED)) {
bgp_unregister_for_label(dest);
}
- } else if (CHECK_FLAG(dest->flags, BGP_NODE_REGISTERED_FOR_LABEL)) {
+ } else if (CHECK_FLAG(dest->flags, BGP_NODE_REGISTERED_FOR_LABEL)
+ || CHECK_FLAG(dest->flags, BGP_NODE_LABEL_REQUESTED)) {
bgp_unregister_for_label(dest);
}
diff --git a/bgpd/bgp_table.h b/bgpd/bgp_table.h
index 738d41ee6d..68b460149c 100644
--- a/bgpd/bgp_table.h
+++ b/bgpd/bgp_table.h
@@ -104,6 +104,7 @@ struct bgp_node {
#define BGP_NODE_SELECT_DEFER (1 << 4)
#define BGP_NODE_FIB_INSTALL_PENDING (1 << 5)
#define BGP_NODE_FIB_INSTALLED (1 << 6)
+#define BGP_NODE_LABEL_REQUESTED (1 << 7)
struct bgp_addpath_node_data tx_addpath;
diff --git a/bgpd/bgpd.c b/bgpd/bgpd.c
index 2149b14585..026b57193e 100644
--- a/bgpd/bgpd.c
+++ b/bgpd/bgpd.c
@@ -5039,20 +5039,20 @@ int peer_default_originate_unset(struct peer *peer, afi_t afi, safi_t safi)
continue;
/* Remove flag and configuration on peer-group member. */
- UNSET_FLAG(peer->af_flags[afi][safi],
+ UNSET_FLAG(member->af_flags[afi][safi],
PEER_FLAG_DEFAULT_ORIGINATE);
- if (peer->default_rmap[afi][safi].name)
+ if (member->default_rmap[afi][safi].name)
XFREE(MTYPE_ROUTE_MAP_NAME,
- peer->default_rmap[afi][safi].name);
- route_map_counter_decrement(peer->default_rmap[afi][safi].map);
- peer->default_rmap[afi][safi].name = NULL;
- peer->default_rmap[afi][safi].map = NULL;
+ member->default_rmap[afi][safi].name);
+ route_map_counter_decrement(member->default_rmap[afi][safi].map);
+ member->default_rmap[afi][safi].name = NULL;
+ member->default_rmap[afi][safi].map = NULL;
/* Update peer route announcements. */
- if (peer->status == Established && peer->afc_nego[afi][safi]) {
- update_group_adjust_peer(peer_af_find(peer, afi, safi));
- bgp_default_originate(peer, afi, safi, 1);
- bgp_announce_route(peer, afi, safi);
+ if (member->status == Established && member->afc_nego[afi][safi]) {
+ update_group_adjust_peer(peer_af_find(member, afi, safi));
+ bgp_default_originate(member, afi, safi, 1);
+ bgp_announce_route(member, afi, safi);
}
}
@@ -7620,6 +7620,8 @@ void bgp_init(unsigned short instance)
/* BFD init */
bgp_bfd_init();
+ bgp_lp_vty_init();
+
cmd_variable_handler_register(bgp_viewvrf_var_handlers);
}
diff --git a/bgpd/subdir.am b/bgpd/subdir.am
index ea60b921d1..ac84f4b9e4 100644
--- a/bgpd/subdir.am
+++ b/bgpd/subdir.am
@@ -18,6 +18,7 @@ vtysh_scan += \
bgpd/bgp_evpn_mh.c \
bgpd/bgp_evpn_vty.c \
bgpd/bgp_filter.c \
+ bgpd/bgp_labelpool.c \
bgpd/bgp_mplsvpn.c \
bgpd/bgp_nexthop.c \
bgpd/bgp_route.c \
diff --git a/doc/user/bgp.rst b/doc/user/bgp.rst
index 7549cec3ea..7bef7d19c8 100644
--- a/doc/user/bgp.rst
+++ b/doc/user/bgp.rst
@@ -3384,6 +3384,32 @@ attribute.
If ``json`` option is specified, output is displayed in JSON format.
+.. index:: show bgp labelpool <chunks|inuse|ledger|requests|summary> [json]
+.. clicmd:: show bgp labelpool <chunks|inuse|ledger|requests|summary> [json]
+
+ These commands display information about the BGP labelpool used for
+ the association of MPLS labels with routes for L3VPN and Labeled Unicast
+
+ If ``chunks`` option is specified, output shows the current list of label
+ chunks granted to BGP by Zebra, indicating the start and end label in
+ each chunk
+
+ If ``inuse`` option is specified, output shows the current inuse list of
+ label to prefix mappings
+
+ If ``ledger`` option is specified, output shows ledger list of all
+ label requests made per prefix
+
+ If ``requests`` option is specified, output shows current list of label
+ requests which have not yet been fulfilled by the labelpool
+
+ If ``summary`` option is specified, output is a summary of the counts for
+ the chunks, inuse, ledger and requests list along with the count of
+ outstanding chunk requests to Zebra and the nummber of zebra reconnects
+ that have happened
+
+ If ``json`` option is specified, output is displayed in JSON format.
+
.. _bgp-display-routes-by-lcommunity:
Displaying Routes by Large Community Attribute
diff --git a/doc/user/pim.rst b/doc/user/pim.rst
index bacf8637ae..05297a0609 100644
--- a/doc/user/pim.rst
+++ b/doc/user/pim.rst
@@ -726,6 +726,13 @@ Clear commands reset various variables.
Rescan PIM OIL (output interface list).
+.. index:: clear ip pim [vrf NAME] bsr-data
+.. clicmd:: clear ip pim [vrf NAME] bsr-data
+
+ This command will clear the BSM scope data struct. This command also
+ removes the next hop tracking for the bsr and resets the upstreams
+ for the dynamically learnt RPs.
+
PIM EVPN configuration
======================
To use PIM in the underlay for overlay BUM forwarding associate a multicast
diff --git a/doc/user/rpki.rst b/doc/user/rpki.rst
index 2c0e5876fa..451df1aa4e 100644
--- a/doc/user/rpki.rst
+++ b/doc/user/rpki.rst
@@ -271,5 +271,5 @@ RPKI Configuration Example
route-map rpki permit 40
!
-.. [Securing-BGP] Geoff Huston, Randy Bush: Securing BGP, In: The Internet Protocol Journal, Volume 14, No. 2, 2011. <http://www.cisco.com/web/about/ac123/ac147/archived_issues/ipj_14-2/142_bgp.html>
-.. [Resource-Certification] Geoff Huston: Resource Certification, In: The Internet Protocol Journal, Volume 12, No.1, 2009. <http://www.cisco.com/web/about/ac123/ac147/archived_issues/ipj_12-1/121_resource.html>
+.. [Securing-BGP] Geoff Huston, Randy Bush: Securing BGP, In: The Internet Protocol Journal, Volume 14, No. 2, 2011. <https://www.cisco.com/c/dam/en_us/about/ac123/ac147/archived_issues/ipj_14-2/ipj_14-2.pdf>
+.. [Resource-Certification] Geoff Huston: Resource Certification, In: The Internet Protocol Journal, Volume 12, No.1, 2009. <https://www.cisco.com/c/dam/en_us/about/ac123/ac147/archived_issues/ipj_12-1/ipj_12-1.pdf>
diff --git a/lib/sockunion.c b/lib/sockunion.c
index c999845659..1dbf77efa4 100644..100755
--- a/lib/sockunion.c
+++ b/lib/sockunion.c
@@ -708,3 +708,20 @@ static ssize_t printfrr_psu(char *buf, size_t bsz, const char *fmt,
fb.pos[0] = '\0';
return consumed;
}
+
+int sockunion_is_null(const union sockunion *su)
+{
+ unsigned char null_s6_addr[16] = {0};
+
+ switch (sockunion_family(su)) {
+ case AF_UNSPEC:
+ return 1;
+ case AF_INET:
+ return (su->sin.sin_addr.s_addr == 0);
+ case AF_INET6:
+ return !memcmp(su->sin6.sin6_addr.s6_addr, null_s6_addr,
+ sizeof(null_s6_addr));
+ default:
+ return 0;
+ }
+}
diff --git a/lib/sockunion.h b/lib/sockunion.h
index 72f12b77ca..5e80ba1090 100644..100755
--- a/lib/sockunion.h
+++ b/lib/sockunion.h
@@ -102,6 +102,7 @@ extern union sockunion *sockunion_getpeername(int);
extern union sockunion *sockunion_dup(const union sockunion *);
extern void sockunion_free(union sockunion *);
extern void sockunion_init(union sockunion *);
+extern int sockunion_is_null(const union sockunion *su);
#ifdef _FRR_ATTRIBUTE_PRINTFRR
#pragma FRR printfrr_ext "%pSU" (union sockunion *)
diff --git a/nhrpd/README.kernel b/nhrpd/README.kernel
index 5831316f1f..067ff9838c 100644
--- a/nhrpd/README.kernel
+++ b/nhrpd/README.kernel
@@ -32,6 +32,7 @@ This list tries to collect them to one source of information:
commit "ipv4: introduce ip_dst_mtu_maybe_forward and protect forwarding path against pmtu spoofing"
Workaround:
Set sysctl net.ipv4.ip_forward_use_pmtu=1
+ See: https://marc.info/?t=143636239500003&r=1&w=2 for details
(Should fix kernel to have this by default on for tunnel devices)
- subtle path mtu mishandling issues
diff --git a/nhrpd/nhrp_cache.c b/nhrpd/nhrp_cache.c
index 1c8fee8b07..0b5a0427e6 100644..100755
--- a/nhrpd/nhrp_cache.c
+++ b/nhrpd/nhrp_cache.c
@@ -69,12 +69,13 @@ static void nhrp_cache_free(struct nhrp_cache *c)
{
struct nhrp_interface *nifp = c->ifp->info;
- zassert(c->cur.type == NHRP_CACHE_INVALID && c->cur.peer == NULL);
- zassert(c->new.type == NHRP_CACHE_INVALID && c->new.peer == NULL);
+ debugf(NHRP_DEBUG_COMMON, "Deleting cache entry");
nhrp_cache_counts[c->cur.type]--;
notifier_call(&c->notifier_list, NOTIFY_CACHE_DELETE);
zassert(!notifier_active(&c->notifier_list));
hash_release(nifp->cache_hash, c);
+ THREAD_OFF(c->t_timeout);
+ THREAD_OFF(c->t_auth);
XFREE(MTYPE_NHRP_CACHE, c);
}
@@ -140,6 +141,41 @@ struct nhrp_cache_config *nhrp_cache_config_get(struct interface *ifp,
create ? nhrp_cache_config_alloc : NULL);
}
+static void do_nhrp_cache_free(struct hash_bucket *hb,
+ void *arg __attribute__((__unused__)))
+{
+ struct nhrp_cache *c = hb->data;
+
+ nhrp_cache_free(c);
+}
+
+static void do_nhrp_cache_config_free(struct hash_bucket *hb,
+ void *arg __attribute__((__unused__)))
+{
+ struct nhrp_cache_config *cc = hb->data;
+
+ nhrp_cache_config_free(cc);
+}
+
+void nhrp_cache_interface_del(struct interface *ifp)
+{
+ struct nhrp_interface *nifp = ifp->info;
+
+ debugf(NHRP_DEBUG_COMMON, "Cleaning up undeleted cache entries (%lu)",
+ nifp->cache_hash ? nifp->cache_hash->count : 0);
+
+ if (nifp->cache_hash) {
+ hash_iterate(nifp->cache_hash, do_nhrp_cache_free, NULL);
+ hash_free(nifp->cache_hash);
+ }
+
+ if (nifp->cache_config_hash) {
+ hash_iterate(nifp->cache_config_hash, do_nhrp_cache_config_free,
+ NULL);
+ hash_free(nifp->cache_config_hash);
+ }
+}
+
struct nhrp_cache *nhrp_cache_get(struct interface *ifp,
union sockunion *remote_addr, int create)
{
@@ -164,6 +200,7 @@ struct nhrp_cache *nhrp_cache_get(struct interface *ifp,
static int nhrp_cache_do_free(struct thread *t)
{
struct nhrp_cache *c = THREAD_ARG(t);
+
c->t_timeout = NULL;
nhrp_cache_free(c);
return 0;
@@ -172,6 +209,7 @@ static int nhrp_cache_do_free(struct thread *t)
static int nhrp_cache_do_timeout(struct thread *t)
{
struct nhrp_cache *c = THREAD_ARG(t);
+
c->t_timeout = NULL;
if (c->cur.type != NHRP_CACHE_INVALID)
nhrp_cache_update_binding(c, c->cur.type, -1, NULL, 0, NULL);
diff --git a/nhrpd/nhrp_interface.c b/nhrpd/nhrp_interface.c
index 7768383e6b..269499cc59 100644..100755
--- a/nhrpd/nhrp_interface.c
+++ b/nhrpd/nhrp_interface.c
@@ -49,6 +49,21 @@ static int nhrp_if_new_hook(struct interface *ifp)
static int nhrp_if_delete_hook(struct interface *ifp)
{
+ struct nhrp_interface *nifp = ifp->info;
+
+ debugf(NHRP_DEBUG_IF, "Deleted interface (%s)", ifp->name);
+
+ nhrp_cache_interface_del(ifp);
+ nhrp_nhs_interface_del(ifp);
+ nhrp_peer_interface_del(ifp);
+
+ if (nifp->ipsec_profile)
+ free(nifp->ipsec_profile);
+ if (nifp->ipsec_fallback_profile)
+ free(nifp->ipsec_fallback_profile);
+ if (nifp->source)
+ free(nifp->source);
+
XFREE(MTYPE_NHRP_IF, ifp->info);
return 0;
}
diff --git a/nhrpd/nhrp_nhs.c b/nhrpd/nhrp_nhs.c
index 085cab347f..540708f1ae 100644..100755
--- a/nhrpd/nhrp_nhs.c
+++ b/nhrpd/nhrp_nhs.c
@@ -35,6 +35,7 @@ static void nhrp_reg_reply(struct nhrp_reqid *reqid, void *arg)
union sockunion cie_nbma, cie_proto, *proto;
char buf[64];
int ok = 0, holdtime;
+ unsigned short mtu = 0;
nhrp_reqid_free(&nhrp_packet_reqid, &r->reqid);
@@ -57,6 +58,8 @@ static void nhrp_reg_reply(struct nhrp_reqid *reqid, void *arg)
|| (cie->code == NHRP_CODE_ADMINISTRATIVELY_PROHIBITED
&& nhs->hub)))
ok = 0;
+ mtu = ntohs(cie->mtu);
+ debugf(NHRP_DEBUG_COMMON, "NHS: CIE MTU: %d", mtu);
}
if (!ok)
@@ -96,7 +99,7 @@ static void nhrp_reg_reply(struct nhrp_reqid *reqid, void *arg)
c = nhrp_cache_get(ifp, &p->dst_proto, 1);
if (c)
nhrp_cache_update_binding(c, NHRP_CACHE_NHS, holdtime,
- nhrp_peer_ref(r->peer), 0, NULL);
+ nhrp_peer_ref(r->peer), mtu, NULL);
}
static int nhrp_reg_timeout(struct thread *t)
@@ -197,7 +200,8 @@ static int nhrp_reg_send_req(struct thread *t)
/* FIXME: push CIE for each local protocol address */
cie = nhrp_cie_push(zb, NHRP_CODE_SUCCESS, NULL, NULL);
- cie->prefix_length = 0xff;
+ /* RFC2332 5.2.1 if unique is set then prefix length must be 0xff */
+ cie->prefix_length = (if_ad->flags & NHRP_IFF_REG_NO_UNIQUE) ? 8 * sockunion_get_addrlen(dst_proto) : 0xff;
cie->holding_time = htons(if_ad->holdtime);
cie->mtu = htons(if_ad->mtu);
@@ -378,6 +382,24 @@ int nhrp_nhs_free(struct nhrp_nhs *nhs)
return 0;
}
+void nhrp_nhs_interface_del(struct interface *ifp)
+{
+ struct nhrp_interface *nifp = ifp->info;
+ struct nhrp_nhs *nhs, *tmp;
+ afi_t afi;
+
+ for (afi = 0; afi < AFI_MAX; afi++) {
+ debugf(NHRP_DEBUG_COMMON, "Cleaning up nhs entries (%d)",
+ !list_empty(&nifp->afi[afi].nhslist_head));
+
+ list_for_each_entry_safe(nhs, tmp, &nifp->afi[afi].nhslist_head,
+ nhslist_entry)
+ {
+ nhrp_nhs_free(nhs);
+ }
+ }
+}
+
void nhrp_nhs_terminate(void)
{
struct vrf *vrf = vrf_lookup_by_id(VRF_DEFAULT);
diff --git a/nhrpd/nhrp_peer.c b/nhrpd/nhrp_peer.c
index 2dc019ce65..9aaa9dec1e 100644..100755
--- a/nhrpd/nhrp_peer.c
+++ b/nhrpd/nhrp_peer.c
@@ -38,11 +38,17 @@ static void nhrp_packet_debug(struct zbuf *zb, const char *dir);
static void nhrp_peer_check_delete(struct nhrp_peer *p)
{
+ char buf[2][256];
struct nhrp_interface *nifp = p->ifp->info;
if (p->ref || notifier_active(&p->notifier_list))
return;
+ debugf(NHRP_DEBUG_COMMON, "Deleting peer ref:%d remote:%s local:%s",
+ p->ref,
+ sockunion2str(&p->vc->remote.nbma, buf[0], sizeof(buf[0])),
+ sockunion2str(&p->vc->local.nbma, buf[1], sizeof(buf[1])));
+
THREAD_OFF(p->t_fallback);
hash_release(nifp->peer_hash, p);
nhrp_interface_notify_del(p->ifp, &p->ifp_notifier);
@@ -185,6 +191,27 @@ static void *nhrp_peer_create(void *data)
return p;
}
+static void do_peer_hash_free(struct hash_bucket *hb,
+ void *arg __attribute__((__unused__)))
+{
+ struct nhrp_peer *p = hb->data;
+ nhrp_peer_check_delete(p);
+}
+
+void nhrp_peer_interface_del(struct interface *ifp)
+{
+ struct nhrp_interface *nifp = ifp->info;
+
+ debugf(NHRP_DEBUG_COMMON, "Cleaning up undeleted peer entries (%lu)",
+ nifp->peer_hash ? nifp->peer_hash->count : 0);
+
+ if (nifp->peer_hash) {
+ hash_iterate(nifp->peer_hash, do_peer_hash_free, NULL);
+ assert(nifp->peer_hash->count == 0);
+ hash_free(nifp->peer_hash);
+ }
+}
+
struct nhrp_peer *nhrp_peer_get(struct interface *ifp,
const union sockunion *remote_nbma)
{
@@ -271,6 +298,8 @@ int nhrp_peer_check(struct nhrp_peer *p, int establish)
return 0;
if (sockunion_family(&vc->local.nbma) == AF_UNSPEC)
return 0;
+ if (vc->ipsec)
+ return 1;
p->prio = establish > 1;
p->requested = 1;
diff --git a/nhrpd/nhrp_route.c b/nhrpd/nhrp_route.c
index e7d35b90ff..ce2b1fe2ff 100644..100755
--- a/nhrpd/nhrp_route.c
+++ b/nhrpd/nhrp_route.c
@@ -56,7 +56,7 @@ static void nhrp_route_update_put(struct route_node *rn)
struct route_info *ri = rn->info;
if (!ri->ifp && !ri->nhrp_ifp
- && sockunion_family(&ri->via) == AF_UNSPEC) {
+ && sockunion_is_null(&ri->via)) {
XFREE(MTYPE_NHRP_ROUTE, rn->info);
route_unlock_node(rn);
}
@@ -70,8 +70,7 @@ static void nhrp_route_update_zebra(const struct prefix *p,
struct route_node *rn;
struct route_info *ri;
- rn = nhrp_route_update_get(
- p, (sockunion_family(nexthop) != AF_UNSPEC) || ifp);
+ rn = nhrp_route_update_get(p, !sockunion_is_null(nexthop) || ifp);
if (rn) {
ri = rn->info;
ri->via = *nexthop;
@@ -225,7 +224,7 @@ int nhrp_route_read(ZAPI_CALLBACK_ARGS)
sockunion2str(&nexthop_addr, buf, sizeof(buf)),
ifp ? ifp->name : "(none)");
- nhrp_route_update_zebra(&api.prefix, &nexthop_addr, ifp);
+ nhrp_route_update_zebra(&api.prefix, &nexthop_addr, added ? ifp : NULL);
nhrp_shortcut_prefix_change(&api.prefix, !added);
return 0;
diff --git a/nhrpd/nhrp_shortcut.c b/nhrpd/nhrp_shortcut.c
index 2359cfa4ac..6ad0c9ea03 100644
--- a/nhrpd/nhrp_shortcut.c
+++ b/nhrpd/nhrp_shortcut.c
@@ -62,7 +62,7 @@ static void nhrp_shortcut_cache_notify(struct notifier_block *n,
s->p, s->cache->ifp->name);
nhrp_route_announce(1, s->type, s->p, s->cache->ifp,
- NULL, 0);
+ &s->cache->remote_addr, 0);
s->route_installed = 1;
}
break;
@@ -207,6 +207,7 @@ static void nhrp_shortcut_recv_resolution_rep(struct nhrp_reqid *reqid,
struct nhrp_extension_header *ext;
struct nhrp_cie_header *cie;
struct nhrp_cache *c = NULL;
+ struct nhrp_cache *c_dst_proto = NULL;
union sockunion *proto, cie_proto, *nbma, cie_nbma, nat_nbma;
struct prefix prefix, route_prefix;
struct zbuf extpl;
@@ -304,6 +305,22 @@ static void nhrp_shortcut_recv_resolution_rep(struct nhrp_reqid *reqid,
debugf(NHRP_DEBUG_COMMON,
"Shortcut: no cache for nbma %s", buf[2]);
}
+
+ /* Update cache binding for dst_proto as well */
+ if (proto != &pp->dst_proto) {
+ c_dst_proto = nhrp_cache_get(pp->ifp, &pp->dst_proto, 1);
+ if (c_dst_proto) {
+ debugf(NHRP_DEBUG_COMMON,
+ "Shortcut: cache found, update binding");
+ nhrp_cache_update_binding(c_dst_proto, NHRP_CACHE_DYNAMIC,
+ holding_time,
+ nhrp_peer_get(pp->ifp, nbma),
+ htons(cie->mtu), nbma);
+ } else {
+ debugf(NHRP_DEBUG_COMMON,
+ "Shortcut: no cache for nbma %s", buf[2]);
+ }
+ }
}
/* Update shortcut entry for subnet to protocol gw binding */
diff --git a/nhrpd/nhrpd.h b/nhrpd/nhrpd.h
index 80a365a3c3..a36d0c445d 100644..100755
--- a/nhrpd/nhrpd.h
+++ b/nhrpd/nhrpd.h
@@ -124,7 +124,7 @@ enum nhrp_notify_type {
struct nhrp_vc {
struct notifier_list notifier_list;
- uint8_t ipsec;
+ uint32_t ipsec;
uint8_t updating;
uint8_t abort_migration;
@@ -343,6 +343,7 @@ void nhrp_nhs_foreach(struct interface *ifp, afi_t afi,
void (*cb)(struct nhrp_nhs *, struct nhrp_registration *,
void *),
void *ctx);
+void nhrp_nhs_interface_del(struct interface *ifp);
void nhrp_route_update_nhrp(const struct prefix *p, struct interface *ifp);
void nhrp_route_announce(int add, enum nhrp_cache_type type,
@@ -366,6 +367,7 @@ void nhrp_shortcut_foreach(afi_t afi,
void nhrp_shortcut_purge(struct nhrp_shortcut *s, int force);
void nhrp_shortcut_prefix_change(const struct prefix *p, int deleted);
+void nhrp_cache_interface_del(struct interface *ifp);
void nhrp_cache_config_free(struct nhrp_cache_config *c);
struct nhrp_cache_config *nhrp_cache_config_get(struct interface *ifp,
union sockunion *remote_addr,
@@ -446,6 +448,7 @@ struct nhrp_reqid *nhrp_reqid_lookup(struct nhrp_reqid_pool *, uint32_t reqid);
int nhrp_packet_init(void);
+void nhrp_peer_interface_del(struct interface *ifp);
struct nhrp_peer *nhrp_peer_get(struct interface *ifp,
const union sockunion *remote_nbma);
struct nhrp_peer *nhrp_peer_ref(struct nhrp_peer *p);
diff --git a/pathd/path_pcep_cli.c b/pathd/path_pcep_cli.c
index 1e37304096..add3391f22 100644
--- a/pathd/path_pcep_cli.c
+++ b/pathd/path_pcep_cli.c
@@ -1156,14 +1156,17 @@ static void print_pcep_session(struct vty *vty, struct pce_opts *pce_opts,
}
if (pcc_info->is_best_multi_pce) {
- vty_out(vty, " MultiPCE precedence %d, best candidate\n",
+ vty_out(vty, " Precedence %d, best candidate\n",
((pcc_info->precedence > 0) ? pcc_info->precedence
: DEFAULT_PCE_PRECEDENCE));
} else {
- vty_out(vty, " MultiPCE precedence %d\n",
+ vty_out(vty, " Precedence %d\n",
((pcc_info->precedence > 0) ? pcc_info->precedence
: DEFAULT_PCE_PRECEDENCE));
}
+ vty_out(vty, " Confidence %s\n",
+ ((pcc_info->previous_best) ? "low"
+ : "normal"));
/* PCEPlib pcep session values, get a thread safe copy of the counters
*/
diff --git a/pathd/path_pcep_controller.h b/pathd/path_pcep_controller.h
index 8f25ccc1eb..f6eaa0ca2a 100644
--- a/pathd/path_pcep_controller.h
+++ b/pathd/path_pcep_controller.h
@@ -99,6 +99,7 @@ struct pcep_pcc_info {
uint32_t next_reqid;
uint32_t next_plspid;
bool is_best_multi_pce;
+ bool previous_best;
uint8_t precedence;
};
diff --git a/pathd/path_pcep_pcc.c b/pathd/path_pcep_pcc.c
index 6bb5ce4bd1..c1f60edd22 100644
--- a/pathd/path_pcep_pcc.c
+++ b/pathd/path_pcep_pcc.c
@@ -1101,6 +1101,7 @@ void pcep_pcc_copy_pcc_info(struct pcc_state **pcc,
pcc_info->status = pcc_state->status;
pcc_info->pcc_id = pcc_state->id;
pcc_info->is_best_multi_pce = pcc_state->is_best;
+ pcc_info->previous_best = pcc_state->previous_best;
pcc_info->precedence =
pcc_state->pce_opts ? pcc_state->pce_opts->precedence : 0;
memcpy(&pcc_info->pcc_addr, &pcc_state->pcc_addr_tr,
diff --git a/pimd/pim_bsm.c b/pimd/pim_bsm.c
index 1acfece895..e873af5759 100644
--- a/pimd/pim_bsm.c
+++ b/pimd/pim_bsm.c
@@ -63,7 +63,7 @@ void pim_bsm_write_config(struct vty *vty, struct interface *ifp)
}
}
-static void pim_free_bsgrp_data(struct bsgrp_node *bsgrp_node)
+void pim_free_bsgrp_data(struct bsgrp_node *bsgrp_node)
{
if (bsgrp_node->bsrp_list)
list_delete(&bsgrp_node->bsrp_list);
@@ -72,7 +72,7 @@ static void pim_free_bsgrp_data(struct bsgrp_node *bsgrp_node)
XFREE(MTYPE_PIM_BSGRP_NODE, bsgrp_node);
}
-static void pim_free_bsgrp_node(struct route_table *rt, struct prefix *grp)
+void pim_free_bsgrp_node(struct route_table *rt, struct prefix *grp)
{
struct route_node *rn;
@@ -222,7 +222,7 @@ static int pim_on_bs_timer(struct thread *t)
return 0;
}
-static void pim_bs_timer_stop(struct bsm_scope *scope)
+void pim_bs_timer_stop(struct bsm_scope *scope)
{
if (PIM_DEBUG_BSM)
zlog_debug("%s : BS timer being stopped of sz: %d", __func__,
diff --git a/pimd/pim_bsm.h b/pimd/pim_bsm.h
index 0758c94f19..2829c1e05a 100644
--- a/pimd/pim_bsm.h
+++ b/pimd/pim_bsm.h
@@ -195,4 +195,7 @@ int pim_bsm_process(struct interface *ifp,
bool pim_bsm_new_nbr_fwd(struct pim_neighbor *neigh, struct interface *ifp);
struct bsgrp_node *pim_bsm_get_bsgrp_node(struct bsm_scope *scope,
struct prefix *grp);
+void pim_bs_timer_stop(struct bsm_scope *scope);
+void pim_free_bsgrp_data(struct bsgrp_node *bsgrp_node);
+void pim_free_bsgrp_node(struct route_table *rt, struct prefix *grp);
#endif
diff --git a/pimd/pim_cmd.c b/pimd/pim_cmd.c
index 8e7b13cc17..ff85151839 100644
--- a/pimd/pim_cmd.c
+++ b/pimd/pim_cmd.c
@@ -4001,6 +4001,152 @@ DEFUN (clear_ip_pim_oil,
return CMD_SUCCESS;
}
+static void clear_pim_bsr_db(struct pim_instance *pim)
+{
+ struct route_node *rn;
+ struct route_node *rpnode;
+ struct bsgrp_node *bsgrp;
+ struct prefix nht_p;
+ struct prefix g_all;
+ struct rp_info *rp_all;
+ struct pim_upstream *up;
+ struct rp_info *rp_info;
+ bool is_bsr_tracking = true;
+
+ /* Remove next hop tracking for the bsr */
+ nht_p.family = AF_INET;
+ nht_p.prefixlen = IPV4_MAX_BITLEN;
+ nht_p.u.prefix4 = pim->global_scope.current_bsr;
+ if (PIM_DEBUG_BSM) {
+ zlog_debug("%s: Deregister BSR addr %pFX with Zebra NHT",
+ __func__, &nht_p);
+ }
+ pim_delete_tracked_nexthop(pim, &nht_p, NULL, NULL, is_bsr_tracking);
+
+ /* Reset scope zone data */
+ pim->global_scope.accept_nofwd_bsm = false;
+ pim->global_scope.state = ACCEPT_ANY;
+ pim->global_scope.current_bsr.s_addr = INADDR_ANY;
+ pim->global_scope.current_bsr_prio = 0;
+ pim->global_scope.current_bsr_first_ts = 0;
+ pim->global_scope.current_bsr_last_ts = 0;
+ pim->global_scope.bsm_frag_tag = 0;
+ list_delete_all_node(pim->global_scope.bsm_list);
+
+ pim_bs_timer_stop(&pim->global_scope);
+
+ for (rn = route_top(pim->global_scope.bsrp_table); rn;
+ rn = route_next(rn)) {
+ bsgrp = rn->info;
+ if (!bsgrp)
+ continue;
+
+ rpnode = route_node_lookup(pim->rp_table, &bsgrp->group);
+
+ if (!rpnode) {
+ pim_free_bsgrp_node(bsgrp->scope->bsrp_table,
+ &bsgrp->group);
+ pim_free_bsgrp_data(bsgrp);
+ continue;
+ }
+
+ rp_info = (struct rp_info *)rpnode->info;
+
+ if ((!rp_info) || (rp_info->rp_src != RP_SRC_BSR)) {
+ pim_free_bsgrp_node(bsgrp->scope->bsrp_table,
+ &bsgrp->group);
+ pim_free_bsgrp_data(bsgrp);
+ continue;
+ }
+
+ /* Deregister addr with Zebra NHT */
+ nht_p.family = AF_INET;
+ nht_p.prefixlen = IPV4_MAX_BITLEN;
+ nht_p.u.prefix4 = rp_info->rp.rpf_addr.u.prefix4;
+
+ if (PIM_DEBUG_PIM_NHT_RP) {
+ zlog_debug("%s: Deregister RP addr %pFX with Zebra ",
+ __func__, &nht_p);
+ }
+
+ pim_delete_tracked_nexthop(pim, &nht_p, NULL, rp_info, false);
+
+ if (!str2prefix("224.0.0.0/4", &g_all))
+ return;
+
+ rp_all = pim_rp_find_match_group(pim, &g_all);
+
+ if (rp_all == rp_info) {
+ rp_all->rp.rpf_addr.family = AF_INET;
+ rp_all->rp.rpf_addr.u.prefix4.s_addr = INADDR_NONE;
+ rp_all->i_am_rp = 0;
+ } else {
+ /* Delete the rp_info from rp-list */
+ listnode_delete(pim->rp_list, rp_info);
+
+ /* Delete the rp node from rp_table */
+ rpnode->info = NULL;
+ route_unlock_node(rpnode);
+ route_unlock_node(rpnode);
+ }
+
+ XFREE(MTYPE_PIM_RP, rp_info);
+
+ pim_free_bsgrp_node(bsgrp->scope->bsrp_table, &bsgrp->group);
+ pim_free_bsgrp_data(bsgrp);
+ }
+ pim_rp_refresh_group_to_rp_mapping(pim);
+
+
+ frr_each (rb_pim_upstream, &pim->upstream_head, up) {
+ /* Find the upstream (*, G) whose upstream address is same as
+ * the RP
+ */
+ if (up->sg.src.s_addr != INADDR_ANY)
+ continue;
+
+ struct prefix grp;
+ struct rp_info *trp_info;
+
+ grp.family = AF_INET;
+ grp.prefixlen = IPV4_MAX_BITLEN;
+ grp.u.prefix4 = up->sg.grp;
+
+ trp_info = pim_rp_find_match_group(pim, &grp);
+
+ /* RP not found for the group grp */
+ if (pim_rpf_addr_is_inaddr_none(&trp_info->rp)) {
+ pim_upstream_rpf_clear(pim, up);
+ pim_rp_set_upstream_addr(pim, &up->upstream_addr,
+ up->sg.src, up->sg.grp);
+ } else {
+ /* RP found for the group grp */
+ pim_upstream_update(pim, up);
+ }
+ }
+}
+
+
+DEFUN (clear_ip_pim_bsr_db,
+ clear_ip_pim_bsr_db_cmd,
+ "clear ip pim [vrf NAME] bsr-data",
+ CLEAR_STR
+ IP_STR
+ CLEAR_IP_PIM_STR
+ VRF_CMD_HELP_STR
+ "Reset pim bsr data\n")
+{
+ int idx = 2;
+ struct vrf *vrf = pim_cmd_lookup_vrf(vty, argv, argc, &idx);
+
+ if (!vrf)
+ return CMD_WARNING;
+
+ clear_pim_bsr_db(vrf->info);
+
+ return CMD_SUCCESS;
+}
+
DEFUN (show_ip_igmp_interface,
show_ip_igmp_interface_cmd,
"show ip igmp [vrf NAME] interface [detail|WORD] [json]",
@@ -11396,6 +11542,7 @@ void pim_cmd_init(void)
install_element(ENABLE_NODE, &clear_ip_pim_interface_traffic_cmd);
install_element(ENABLE_NODE, &clear_ip_pim_oil_cmd);
install_element(ENABLE_NODE, &clear_ip_pim_statistics_cmd);
+ install_element(ENABLE_NODE, &clear_ip_pim_bsr_db_cmd);
install_element(ENABLE_NODE, &show_debugging_pim_cmd);
diff --git a/pimd/pim_ifchannel.c b/pimd/pim_ifchannel.c
index e7ff434f4b..fc0f514a49 100644
--- a/pimd/pim_ifchannel.c
+++ b/pimd/pim_ifchannel.c
@@ -550,8 +550,21 @@ struct pim_ifchannel *pim_ifchannel_add(struct interface *ifp,
struct pim_upstream *up;
ch = pim_ifchannel_find(ifp, sg);
- if (ch)
+ if (ch) {
+ if (up_flags == PIM_UPSTREAM_FLAG_MASK_SRC_PIM)
+ PIM_IF_FLAG_SET_PROTO_PIM(ch->flags);
+
+ if (up_flags == PIM_UPSTREAM_FLAG_MASK_SRC_IGMP)
+ PIM_IF_FLAG_SET_PROTO_IGMP(ch->flags);
+
+ if (ch->upstream)
+ ch->upstream->flags |= up_flags;
+ else if (PIM_DEBUG_EVENTS)
+ zlog_debug("%s:%s No Upstream found", __func__,
+ pim_str_sg_dump(sg));
+
return ch;
+ }
pim_ifp = ifp->info;
@@ -642,6 +655,12 @@ static void ifjoin_to_noinfo(struct pim_ifchannel *ch, bool ch_del)
{
pim_forward_stop(ch, !ch_del);
pim_ifchannel_ifjoin_switch(__func__, ch, PIM_IFJOIN_NOINFO);
+
+ if (ch->upstream)
+ PIM_UPSTREAM_FLAG_UNSET_SRC_PIM(ch->upstream->flags);
+
+ PIM_IF_FLAG_UNSET_PROTO_PIM(ch->flags);
+
if (ch_del)
delete_on_noinfo(ch);
}
@@ -1272,6 +1291,13 @@ void pim_ifchannel_local_membership_del(struct interface *ifp,
* parent' delete_no_info */
}
}
+
+ /* Resettng the IGMP flags here */
+ if (orig->upstream)
+ PIM_UPSTREAM_FLAG_UNSET_SRC_IGMP(orig->upstream->flags);
+
+ PIM_IF_FLAG_UNSET_PROTO_IGMP(orig->flags);
+
delete_on_noinfo(orig);
}
diff --git a/pimd/pim_igmp.c b/pimd/pim_igmp.c
index 9924e335b0..73e42e9d83 100644
--- a/pimd/pim_igmp.c
+++ b/pimd/pim_igmp.c
@@ -558,8 +558,8 @@ int pim_igmp_packet(struct igmp_sock *igmp, char *buf, size_t len)
igmp_msg, igmp_msg_len);
case PIM_IGMP_V2_LEAVE_GROUP:
- return igmp_v2_recv_leave(igmp, ip_hdr->ip_src, from_str,
- igmp_msg, igmp_msg_len);
+ return igmp_v2_recv_leave(igmp, ip_hdr, from_str, igmp_msg,
+ igmp_msg_len);
case PIM_IGMP_MTRACE_RESPONSE:
return igmp_mtrace_recv_response(igmp, ip_hdr, ip_hdr->ip_src,
diff --git a/pimd/pim_igmpv2.c b/pimd/pim_igmpv2.c
index d836c66cbb..7f3c7a0f8c 100644
--- a/pimd/pim_igmpv2.c
+++ b/pimd/pim_igmpv2.c
@@ -158,12 +158,13 @@ int igmp_v2_recv_report(struct igmp_sock *igmp, struct in_addr from,
return 0;
}
-int igmp_v2_recv_leave(struct igmp_sock *igmp, struct in_addr from,
+int igmp_v2_recv_leave(struct igmp_sock *igmp, struct ip *ip_hdr,
const char *from_str, char *igmp_msg, int igmp_msg_len)
{
struct interface *ifp = igmp->interface;
struct in_addr group_addr;
char group_str[INET_ADDRSTRLEN];
+ struct in_addr from = ip_hdr->ip_src;
on_trace(__func__, igmp->interface, from);
@@ -184,8 +185,6 @@ int igmp_v2_recv_leave(struct igmp_sock *igmp, struct in_addr from,
return -1;
}
- /* Collecting IGMP Rx stats */
- igmp->rx_stats.leave_v2++;
memcpy(&group_addr, igmp_msg + 4, sizeof(struct in_addr));
@@ -195,6 +194,32 @@ int igmp_v2_recv_leave(struct igmp_sock *igmp, struct in_addr from,
zlog_debug("Recv IGMPv2 LEAVE from %s on %s for %s", from_str,
ifp->name, group_str);
}
+ /*
+ * As per RFC 2236, section 9:
+ Message Type Destination Group
+ ------------ -----------------
+ General Query ALL-SYSTEMS (224.0.0.1)
+ Group-Specific Query The group being queried
+ Membership Report The group being reported
+ Leave Message ALL-ROUTERS (224.0.0.2)
+
+ Note: in older (i.e., non-standard and now obsolete) versions of
+ IGMPv2, hosts send Leave Messages to the group being left. A
+ router SHOULD accept Leave Messages addressed to the group being
+ left in the interests of backwards compatibility with such hosts.
+ In all cases, however, hosts MUST send to the ALL-ROUTERS address
+ to be compliant with this specification.
+ */
+ if ((ntohl(ip_hdr->ip_dst.s_addr) != INADDR_ALLRTRS_GROUP)
+ && (ip_hdr->ip_dst.s_addr != group_addr.s_addr)) {
+ if (PIM_DEBUG_IGMP_EVENTS)
+ zlog_debug(
+ "IGMPv2 Leave message is ignored since received on address other than ALL-ROUTERS or Group-address");
+ return -1;
+ }
+
+ /* Collecting IGMP Rx stats */
+ igmp->rx_stats.leave_v2++;
/*
* RFC 3376
diff --git a/pimd/pim_igmpv2.h b/pimd/pim_igmpv2.h
index f0a6fdc5fb..29591ff16c 100644
--- a/pimd/pim_igmpv2.h
+++ b/pimd/pim_igmpv2.h
@@ -29,7 +29,7 @@ void igmp_v2_send_query(struct igmp_group *group, int fd, const char *ifname,
int igmp_v2_recv_report(struct igmp_sock *igmp, struct in_addr from,
const char *from_str, char *igmp_msg, int igmp_msg_len);
-int igmp_v2_recv_leave(struct igmp_sock *igmp, struct in_addr from,
+int igmp_v2_recv_leave(struct igmp_sock *igmp, struct ip *ip_hdr,
const char *from_str, char *igmp_msg, int igmp_msg_len);
#endif /* PIM_IGMPV2_H */
diff --git a/pimd/pim_rp.c b/pimd/pim_rp.c
index fa5d6f37bf..301a27001f 100644
--- a/pimd/pim_rp.c
+++ b/pimd/pim_rp.c
@@ -271,7 +271,7 @@ struct rp_info *pim_rp_find_match_group(struct pim_instance *pim,
*
* This is a placeholder function for now.
*/
-static void pim_rp_refresh_group_to_rp_mapping(struct pim_instance *pim)
+void pim_rp_refresh_group_to_rp_mapping(struct pim_instance *pim)
{
pim_msdp_i_am_rp_changed(pim);
pim_upstream_reeval_use_rpt(pim);
diff --git a/pimd/pim_rp.h b/pimd/pim_rp.h
index 8a12cb076c..dd7cd5d75e 100644
--- a/pimd/pim_rp.h
+++ b/pimd/pim_rp.h
@@ -86,4 +86,5 @@ int pim_rp_list_cmp(void *v1, void *v2);
struct rp_info *pim_rp_find_match_group(struct pim_instance *pim,
const struct prefix *group);
void pim_upstream_update(struct pim_instance *pim, struct pim_upstream *up);
+void pim_rp_refresh_group_to_rp_mapping(struct pim_instance *pim);
#endif
diff --git a/tests/topotests/bgp_lu_topo1/R1/bgpd.conf b/tests/topotests/bgp_lu_topo1/R1/bgpd.conf
new file mode 100644
index 0000000000..1bdb4c7a3e
--- /dev/null
+++ b/tests/topotests/bgp_lu_topo1/R1/bgpd.conf
@@ -0,0 +1,21 @@
+!
+debug bgp labelpool
+debug bgp zebra
+!
+router bgp 1
+ bgp router-id 10.0.0.1
+ timers bgp 3 9
+ no bgp ebgp-requires-policy
+ no bgp network import-check
+ neighbor 10.0.0.2 remote-as 2
+ neighbor 10.0.0.2 solo
+ neighbor 10.0.0.2 timers connect 10
+!
+ address-family ipv4 unicast
+ no neighbor 10.0.0.2 activate
+ exit-address-family
+ !
+ address-family ipv4 labeled-unicast
+ neighbor 10.0.0.2 activate
+ exit-address-family
+!
diff --git a/tests/topotests/bgp_lu_topo1/R1/labelpool.summ.json b/tests/topotests/bgp_lu_topo1/R1/labelpool.summ.json
new file mode 100644
index 0000000000..29e6c2cbf7
--- /dev/null
+++ b/tests/topotests/bgp_lu_topo1/R1/labelpool.summ.json
@@ -0,0 +1,8 @@
+{
+ "Ledger":506,
+ "InUse":506,
+ "Requests":0,
+ "LabelChunks":11,
+ "Pending":0,
+ "Reconnects":0
+}
diff --git a/tests/topotests/bgp_lu_topo1/R1/zebra.conf b/tests/topotests/bgp_lu_topo1/R1/zebra.conf
new file mode 100644
index 0000000000..4f6fee579f
--- /dev/null
+++ b/tests/topotests/bgp_lu_topo1/R1/zebra.conf
@@ -0,0 +1,6 @@
+debug zebra events
+debug zebra dplane
+debug zebra mpls
+!
+interface R1-eth0
+ ip address 10.0.0.1/24
diff --git a/tests/topotests/bgp_lu_topo1/R2/bgpd.conf b/tests/topotests/bgp_lu_topo1/R2/bgpd.conf
new file mode 100644
index 0000000000..bac608e1c3
--- /dev/null
+++ b/tests/topotests/bgp_lu_topo1/R2/bgpd.conf
@@ -0,0 +1,23 @@
+debug bgp labelpool
+debug bgp zebra
+!
+router bgp 2
+ bgp router-id 10.0.0.2
+ timers bgp 3 9
+ no bgp ebgp-requires-policy
+ no bgp network import-check
+ neighbor 10.0.1.3 remote-as 2
+ neighbor 10.0.1.3 update-source 10.0.1.2
+ neighbor 10.0.1.3 timers connect 10
+ neighbor 10.0.0.1 remote-as 1
+ neighbor 10.0.0.1 timers connect 10
+!
+ address-family ipv4 unicast
+ neighbor 10.0.1.3 activate
+ no neighbor 10.0.0.1 activate
+ exit-address-family
+ !
+ address-family ipv4 labeled-unicast
+ neighbor 10.0.0.1 activate
+ exit-address-family
+!
diff --git a/tests/topotests/bgp_lu_topo1/R2/labelpool.summ.json b/tests/topotests/bgp_lu_topo1/R2/labelpool.summ.json
new file mode 100644
index 0000000000..29e6c2cbf7
--- /dev/null
+++ b/tests/topotests/bgp_lu_topo1/R2/labelpool.summ.json
@@ -0,0 +1,8 @@
+{
+ "Ledger":506,
+ "InUse":506,
+ "Requests":0,
+ "LabelChunks":11,
+ "Pending":0,
+ "Reconnects":0
+}
diff --git a/tests/topotests/bgp_lu_topo1/R2/zebra.conf b/tests/topotests/bgp_lu_topo1/R2/zebra.conf
new file mode 100644
index 0000000000..33ee53efe7
--- /dev/null
+++ b/tests/topotests/bgp_lu_topo1/R2/zebra.conf
@@ -0,0 +1,11 @@
+!
+debug zebra events
+debug zebra dplane
+debug zebra mpls
+!
+interface R2-eth0
+ ip address 10.0.0.2/24
+!
+interface R2-eth1
+ ip address 10.0.1.2/24
+! \ No newline at end of file
diff --git a/tests/topotests/bgp_lu_topo1/R3/bgpd.conf b/tests/topotests/bgp_lu_topo1/R3/bgpd.conf
new file mode 100644
index 0000000000..b42df022e0
--- /dev/null
+++ b/tests/topotests/bgp_lu_topo1/R3/bgpd.conf
@@ -0,0 +1,523 @@
+log file /tmp/bgpd.log
+!
+debug bgp updates
+!
+router bgp 2
+ bgp router-id 10.0.1.3
+ timers bgp 3 9
+ no bgp ebgp-requires-policy
+ no bgp network import-check
+ neighbor 10.0.1.2 remote-as 2
+ neighbor 10.0.1.2 timers connect 10
+ !
+ address-family ipv4 unicast
+ neighbor 10.0.1.2 activate
+ network 11.0.0.1/32
+ network 11.0.0.2/32
+ network 11.0.0.3/32
+ network 11.0.0.4/32
+ network 11.0.0.5/32
+ network 11.0.0.6/32
+ network 11.0.0.7/32
+ network 11.0.0.8/32
+ network 11.0.0.9/32
+ network 11.0.0.10/32
+ network 11.0.0.11/32
+ network 11.0.0.12/32
+ network 11.0.0.13/32
+ network 11.0.0.14/32
+ network 11.0.0.15/32
+ network 11.0.0.16/32
+ network 11.0.0.17/32
+ network 11.0.0.18/32
+ network 11.0.0.19/32
+ network 11.0.0.20/32
+ network 11.0.0.21/32
+ network 11.0.0.22/32
+ network 11.0.0.23/32
+ network 11.0.0.24/32
+ network 11.0.0.25/32
+ network 11.0.0.26/32
+ network 11.0.0.27/32
+ network 11.0.0.28/32
+ network 11.0.0.29/32
+ network 11.0.0.30/32
+ network 11.0.0.31/32
+ network 11.0.0.32/32
+ network 11.0.0.33/32
+ network 11.0.0.34/32
+ network 11.0.0.35/32
+ network 11.0.0.36/32
+ network 11.0.0.37/32
+ network 11.0.0.38/32
+ network 11.0.0.39/32
+ network 11.0.0.40/32
+ network 11.0.0.41/32
+ network 11.0.0.42/32
+ network 11.0.0.43/32
+ network 11.0.0.44/32
+ network 11.0.0.45/32
+ network 11.0.0.46/32
+ network 11.0.0.47/32
+ network 11.0.0.48/32
+ network 11.0.0.49/32
+ network 11.0.0.50/32
+ network 11.0.0.51/32
+ network 11.0.0.52/32
+ network 11.0.0.53/32
+ network 11.0.0.54/32
+ network 11.0.0.55/32
+ network 11.0.0.56/32
+ network 11.0.0.57/32
+ network 11.0.0.58/32
+ network 11.0.0.59/32
+ network 11.0.0.60/32
+ network 11.0.0.61/32
+ network 11.0.0.62/32
+ network 11.0.0.63/32
+ network 11.0.0.64/32
+ network 11.0.0.65/32
+ network 11.0.0.66/32
+ network 11.0.0.67/32
+ network 11.0.0.68/32
+ network 11.0.0.69/32
+ network 11.0.0.70/32
+ network 11.0.0.71/32
+ network 11.0.0.72/32
+ network 11.0.0.73/32
+ network 11.0.0.74/32
+ network 11.0.0.75/32
+ network 11.0.0.76/32
+ network 11.0.0.77/32
+ network 11.0.0.78/32
+ network 11.0.0.79/32
+ network 11.0.0.80/32
+ network 11.0.0.81/32
+ network 11.0.0.82/32
+ network 11.0.0.83/32
+ network 11.0.0.84/32
+ network 11.0.0.85/32
+ network 11.0.0.86/32
+ network 11.0.0.87/32
+ network 11.0.0.88/32
+ network 11.0.0.89/32
+ network 11.0.0.90/32
+ network 11.0.0.91/32
+ network 11.0.0.92/32
+ network 11.0.0.93/32
+ network 11.0.0.94/32
+ network 11.0.0.95/32
+ network 11.0.0.96/32
+ network 11.0.0.97/32
+ network 11.0.0.98/32
+ network 11.0.0.99/32
+ network 11.0.0.100/32
+ network 11.0.0.101/32
+ network 11.0.0.102/32
+ network 11.0.0.103/32
+ network 11.0.0.104/32
+ network 11.0.0.105/32
+ network 11.0.0.106/32
+ network 11.0.0.107/32
+ network 11.0.0.108/32
+ network 11.0.0.109/32
+ network 11.0.0.110/32
+ network 11.0.0.111/32
+ network 11.0.0.112/32
+ network 11.0.0.113/32
+ network 11.0.0.114/32
+ network 11.0.0.115/32
+ network 11.0.0.116/32
+ network 11.0.0.117/32
+ network 11.0.0.118/32
+ network 11.0.0.119/32
+ network 11.0.0.120/32
+ network 11.0.0.121/32
+ network 11.0.0.122/32
+ network 11.0.0.123/32
+ network 11.0.0.124/32
+ network 11.0.0.125/32
+ network 11.0.0.126/32
+ network 11.0.0.127/32
+ network 11.0.0.128/32
+ network 11.0.0.129/32
+ network 11.0.0.130/32
+ network 11.0.0.131/32
+ network 11.0.0.132/32
+ network 11.0.0.133/32
+ network 11.0.0.134/32
+ network 11.0.0.135/32
+ network 11.0.0.136/32
+ network 11.0.0.137/32
+ network 11.0.0.138/32
+ network 11.0.0.139/32
+ network 11.0.0.140/32
+ network 11.0.0.141/32
+ network 11.0.0.142/32
+ network 11.0.0.143/32
+ network 11.0.0.144/32
+ network 11.0.0.145/32
+ network 11.0.0.146/32
+ network 11.0.0.147/32
+ network 11.0.0.148/32
+ network 11.0.0.149/32
+ network 11.0.0.150/32
+ network 11.0.0.151/32
+ network 11.0.0.152/32
+ network 11.0.0.153/32
+ network 11.0.0.154/32
+ network 11.0.0.155/32
+ network 11.0.0.156/32
+ network 11.0.0.157/32
+ network 11.0.0.158/32
+ network 11.0.0.159/32
+ network 11.0.0.160/32
+ network 11.0.0.161/32
+ network 11.0.0.162/32
+ network 11.0.0.163/32
+ network 11.0.0.164/32
+ network 11.0.0.165/32
+ network 11.0.0.166/32
+ network 11.0.0.167/32
+ network 11.0.0.168/32
+ network 11.0.0.169/32
+ network 11.0.0.170/32
+ network 11.0.0.171/32
+ network 11.0.0.172/32
+ network 11.0.0.173/32
+ network 11.0.0.174/32
+ network 11.0.0.175/32
+ network 11.0.0.176/32
+ network 11.0.0.177/32
+ network 11.0.0.178/32
+ network 11.0.0.179/32
+ network 11.0.0.180/32
+ network 11.0.0.181/32
+ network 11.0.0.182/32
+ network 11.0.0.183/32
+ network 11.0.0.184/32
+ network 11.0.0.185/32
+ network 11.0.0.186/32
+ network 11.0.0.187/32
+ network 11.0.0.188/32
+ network 11.0.0.189/32
+ network 11.0.0.190/32
+ network 11.0.0.191/32
+ network 11.0.0.192/32
+ network 11.0.0.193/32
+ network 11.0.0.194/32
+ network 11.0.0.195/32
+ network 11.0.0.196/32
+ network 11.0.0.197/32
+ network 11.0.0.198/32
+ network 11.0.0.199/32
+ network 11.0.0.200/32
+ network 11.0.0.201/32
+ network 11.0.0.202/32
+ network 11.0.0.203/32
+ network 11.0.0.204/32
+ network 11.0.0.205/32
+ network 11.0.0.206/32
+ network 11.0.0.207/32
+ network 11.0.0.208/32
+ network 11.0.0.209/32
+ network 11.0.0.210/32
+ network 11.0.0.211/32
+ network 11.0.0.212/32
+ network 11.0.0.213/32
+ network 11.0.0.214/32
+ network 11.0.0.215/32
+ network 11.0.0.216/32
+ network 11.0.0.217/32
+ network 11.0.0.218/32
+ network 11.0.0.219/32
+ network 11.0.0.220/32
+ network 11.0.0.221/32
+ network 11.0.0.222/32
+ network 11.0.0.223/32
+ network 11.0.0.224/32
+ network 11.0.0.225/32
+ network 11.0.0.226/32
+ network 11.0.0.227/32
+ network 11.0.0.228/32
+ network 11.0.0.229/32
+ network 11.0.0.230/32
+ network 11.0.0.231/32
+ network 11.0.0.232/32
+ network 11.0.0.233/32
+ network 11.0.0.234/32
+ network 11.0.0.235/32
+ network 11.0.0.236/32
+ network 11.0.0.237/32
+ network 11.0.0.238/32
+ network 11.0.0.239/32
+ network 11.0.0.240/32
+ network 11.0.0.241/32
+ network 11.0.0.242/32
+ network 11.0.0.243/32
+ network 11.0.0.244/32
+ network 11.0.0.245/32
+ network 11.0.0.246/32
+ network 11.0.0.247/32
+ network 11.0.0.248/32
+ network 11.0.0.249/32
+ network 11.0.0.250/32
+ network 11.0.0.251/32
+ network 11.0.0.252/32
+ network 11.0.0.253/32
+ network 11.0.1.1/32
+ network 11.0.1.2/32
+ network 11.0.1.3/32
+ network 11.0.1.4/32
+ network 11.0.1.5/32
+ network 11.0.1.6/32
+ network 11.0.1.7/32
+ network 11.0.1.8/32
+ network 11.0.1.9/32
+ network 11.0.1.10/32
+ network 11.0.1.11/32
+ network 11.0.1.12/32
+ network 11.0.1.13/32
+ network 11.0.1.14/32
+ network 11.0.1.15/32
+ network 11.0.1.16/32
+ network 11.0.1.17/32
+ network 11.0.1.18/32
+ network 11.0.1.19/32
+ network 11.0.1.20/32
+ network 11.0.1.21/32
+ network 11.0.1.22/32
+ network 11.0.1.23/32
+ network 11.0.1.24/32
+ network 11.0.1.25/32
+ network 11.0.1.26/32
+ network 11.0.1.27/32
+ network 11.0.1.28/32
+ network 11.0.1.29/32
+ network 11.0.1.30/32
+ network 11.0.1.31/32
+ network 11.0.1.32/32
+ network 11.0.1.33/32
+ network 11.0.1.34/32
+ network 11.0.1.35/32
+ network 11.0.1.36/32
+ network 11.0.1.37/32
+ network 11.0.1.38/32
+ network 11.0.1.39/32
+ network 11.0.1.40/32
+ network 11.0.1.41/32
+ network 11.0.1.42/32
+ network 11.0.1.43/32
+ network 11.0.1.44/32
+ network 11.0.1.45/32
+ network 11.0.1.46/32
+ network 11.0.1.47/32
+ network 11.0.1.48/32
+ network 11.0.1.49/32
+ network 11.0.1.50/32
+ network 11.0.1.51/32
+ network 11.0.1.52/32
+ network 11.0.1.53/32
+ network 11.0.1.54/32
+ network 11.0.1.55/32
+ network 11.0.1.56/32
+ network 11.0.1.57/32
+ network 11.0.1.58/32
+ network 11.0.1.59/32
+ network 11.0.1.60/32
+ network 11.0.1.61/32
+ network 11.0.1.62/32
+ network 11.0.1.63/32
+ network 11.0.1.64/32
+ network 11.0.1.65/32
+ network 11.0.1.66/32
+ network 11.0.1.67/32
+ network 11.0.1.68/32
+ network 11.0.1.69/32
+ network 11.0.1.70/32
+ network 11.0.1.71/32
+ network 11.0.1.72/32
+ network 11.0.1.73/32
+ network 11.0.1.74/32
+ network 11.0.1.75/32
+ network 11.0.1.76/32
+ network 11.0.1.77/32
+ network 11.0.1.78/32
+ network 11.0.1.79/32
+ network 11.0.1.80/32
+ network 11.0.1.81/32
+ network 11.0.1.82/32
+ network 11.0.1.83/32
+ network 11.0.1.84/32
+ network 11.0.1.85/32
+ network 11.0.1.86/32
+ network 11.0.1.87/32
+ network 11.0.1.88/32
+ network 11.0.1.89/32
+ network 11.0.1.90/32
+ network 11.0.1.91/32
+ network 11.0.1.92/32
+ network 11.0.1.93/32
+ network 11.0.1.94/32
+ network 11.0.1.95/32
+ network 11.0.1.96/32
+ network 11.0.1.97/32
+ network 11.0.1.98/32
+ network 11.0.1.99/32
+ network 11.0.1.100/32
+ network 11.0.1.101/32
+ network 11.0.1.102/32
+ network 11.0.1.103/32
+ network 11.0.1.104/32
+ network 11.0.1.105/32
+ network 11.0.1.106/32
+ network 11.0.1.107/32
+ network 11.0.1.108/32
+ network 11.0.1.109/32
+ network 11.0.1.110/32
+ network 11.0.1.111/32
+ network 11.0.1.112/32
+ network 11.0.1.113/32
+ network 11.0.1.114/32
+ network 11.0.1.115/32
+ network 11.0.1.116/32
+ network 11.0.1.117/32
+ network 11.0.1.118/32
+ network 11.0.1.119/32
+ network 11.0.1.120/32
+ network 11.0.1.121/32
+ network 11.0.1.122/32
+ network 11.0.1.123/32
+ network 11.0.1.124/32
+ network 11.0.1.125/32
+ network 11.0.1.126/32
+ network 11.0.1.127/32
+ network 11.0.1.128/32
+ network 11.0.1.129/32
+ network 11.0.1.130/32
+ network 11.0.1.131/32
+ network 11.0.1.132/32
+ network 11.0.1.133/32
+ network 11.0.1.134/32
+ network 11.0.1.135/32
+ network 11.0.1.136/32
+ network 11.0.1.137/32
+ network 11.0.1.138/32
+ network 11.0.1.139/32
+ network 11.0.1.140/32
+ network 11.0.1.141/32
+ network 11.0.1.142/32
+ network 11.0.1.143/32
+ network 11.0.1.144/32
+ network 11.0.1.145/32
+ network 11.0.1.146/32
+ network 11.0.1.147/32
+ network 11.0.1.148/32
+ network 11.0.1.149/32
+ network 11.0.1.150/32
+ network 11.0.1.151/32
+ network 11.0.1.152/32
+ network 11.0.1.153/32
+ network 11.0.1.154/32
+ network 11.0.1.155/32
+ network 11.0.1.156/32
+ network 11.0.1.157/32
+ network 11.0.1.158/32
+ network 11.0.1.159/32
+ network 11.0.1.160/32
+ network 11.0.1.161/32
+ network 11.0.1.162/32
+ network 11.0.1.163/32
+ network 11.0.1.164/32
+ network 11.0.1.165/32
+ network 11.0.1.166/32
+ network 11.0.1.167/32
+ network 11.0.1.168/32
+ network 11.0.1.169/32
+ network 11.0.1.170/32
+ network 11.0.1.171/32
+ network 11.0.1.172/32
+ network 11.0.1.173/32
+ network 11.0.1.174/32
+ network 11.0.1.175/32
+ network 11.0.1.176/32
+ network 11.0.1.177/32
+ network 11.0.1.178/32
+ network 11.0.1.179/32
+ network 11.0.1.180/32
+ network 11.0.1.181/32
+ network 11.0.1.182/32
+ network 11.0.1.183/32
+ network 11.0.1.184/32
+ network 11.0.1.185/32
+ network 11.0.1.186/32
+ network 11.0.1.187/32
+ network 11.0.1.188/32
+ network 11.0.1.189/32
+ network 11.0.1.190/32
+ network 11.0.1.191/32
+ network 11.0.1.192/32
+ network 11.0.1.193/32
+ network 11.0.1.194/32
+ network 11.0.1.195/32
+ network 11.0.1.196/32
+ network 11.0.1.197/32
+ network 11.0.1.198/32
+ network 11.0.1.199/32
+ network 11.0.1.200/32
+ network 11.0.1.201/32
+ network 11.0.1.202/32
+ network 11.0.1.203/32
+ network 11.0.1.204/32
+ network 11.0.1.205/32
+ network 11.0.1.206/32
+ network 11.0.1.207/32
+ network 11.0.1.208/32
+ network 11.0.1.209/32
+ network 11.0.1.210/32
+ network 11.0.1.211/32
+ network 11.0.1.212/32
+ network 11.0.1.213/32
+ network 11.0.1.214/32
+ network 11.0.1.215/32
+ network 11.0.1.216/32
+ network 11.0.1.217/32
+ network 11.0.1.218/32
+ network 11.0.1.219/32
+ network 11.0.1.220/32
+ network 11.0.1.221/32
+ network 11.0.1.222/32
+ network 11.0.1.223/32
+ network 11.0.1.224/32
+ network 11.0.1.225/32
+ network 11.0.1.226/32
+ network 11.0.1.227/32
+ network 11.0.1.228/32
+ network 11.0.1.229/32
+ network 11.0.1.230/32
+ network 11.0.1.231/32
+ network 11.0.1.232/32
+ network 11.0.1.233/32
+ network 11.0.1.234/32
+ network 11.0.1.235/32
+ network 11.0.1.236/32
+ network 11.0.1.237/32
+ network 11.0.1.238/32
+ network 11.0.1.239/32
+ network 11.0.1.240/32
+ network 11.0.1.241/32
+ network 11.0.1.242/32
+ network 11.0.1.243/32
+ network 11.0.1.244/32
+ network 11.0.1.245/32
+ network 11.0.1.246/32
+ network 11.0.1.247/32
+ network 11.0.1.248/32
+ network 11.0.1.249/32
+ network 11.0.1.250/32
+ network 11.0.1.251/32
+ network 11.0.1.252/32
+ network 11.0.1.253/32
+ exit-address-family
+ !
+!
diff --git a/tests/topotests/bgp_lu_topo1/R3/zebra.conf b/tests/topotests/bgp_lu_topo1/R3/zebra.conf
new file mode 100644
index 0000000000..524978bff6
--- /dev/null
+++ b/tests/topotests/bgp_lu_topo1/R3/zebra.conf
@@ -0,0 +1,9 @@
+log file /tmp/zebra.log
+!
+debug zebra events
+debug zebra packet detail
+debug zebra mpls
+!
+interface R3-eth0
+ ip address 10.0.1.3/24
+!
diff --git a/tests/topotests/bgp_lu_topo1/test_bgp_lu.py b/tests/topotests/bgp_lu_topo1/test_bgp_lu.py
new file mode 100644
index 0000000000..61418d7a79
--- /dev/null
+++ b/tests/topotests/bgp_lu_topo1/test_bgp_lu.py
@@ -0,0 +1,178 @@
+#!/usr/bin/env python
+
+#
+# test_bgp_lu.py
+# Part of NetDEF Topology Tests
+#
+# Copyright (c) 2020 by Volta Networks
+#
+# Permission to use, copy, modify, and/or distribute this software
+# for any purpose with or without fee is hereby granted, provided
+# that the above copyright notice and this permission notice appear
+# in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NETDEF DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NETDEF BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
+# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+# OF THIS SOFTWARE.
+#
+
+"""
+test_bgp_lu.py: Test BGP LU label allocation
+"""
+
+import os
+import sys
+import json
+from functools import partial
+from time import sleep
+import pytest
+
+# Save the Current Working Directory to find configuration files.
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, "../"))
+
+# pylint: disable=C0413
+# Import topogen and topotest helpers
+from lib import topotest
+from lib.topogen import Topogen, TopoRouter, get_topogen
+from lib.topolog import logger
+
+# Required to instantiate the topology builder class.
+from mininet.topo import Topo
+
+#Basic scenario for BGP-LU. Nodes are directly connected.
+#Node 3 is advertising many routes to 2, which advertises them
+#as BGP-LU to 1; this way we get routes with actual labels, as
+#opposed to implicit-null routes in the 2-node case.
+#
+# AS1 BGP-LU AS2 iBGP AS2
+#+-----+ +-----+ +-----+
+#| |.1 .2| |.2 .3| |
+#| 1 +----------------+ 2 +-----------------+ 3 |
+#| | 10.0.0.0/24 | | 10.0.1.0/24 | |
+#+-----+ +-----+ +-----+
+
+class TemplateTopo(Topo):
+ "Test topology builder"
+
+ def build(self, *_args, **_opts):
+ "Build function"
+ tgen = get_topogen(self)
+
+ # This function only purpose is to define allocation and relationship
+ # between routers, switches and hosts.
+ #
+ #
+ # Create routers
+ tgen.add_router("R1")
+ tgen.add_router("R2")
+ tgen.add_router("R3")
+
+ # R1-R2
+ switch = tgen.add_switch("s1")
+ switch.add_link(tgen.gears["R1"])
+ switch.add_link(tgen.gears["R2"])
+
+ # R2-R3
+ switch = tgen.add_switch("s2")
+ switch.add_link(tgen.gears["R2"])
+ switch.add_link(tgen.gears["R3"])
+
+
+
+def setup_module(mod):
+ "Sets up the pytest environment"
+ # This function initiates the topology build with Topogen...
+ tgen = Topogen(TemplateTopo, mod.__name__)
+ # ... and here it calls Mininet initialization functions.
+ tgen.start_topology()
+
+ # This is a sample of configuration loading.
+ router_list = tgen.routers()
+
+ # For all registred routers, load the zebra configuration file
+ for rname, router in router_list.items():
+ router.load_config(
+ TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname))
+ )
+ router.load_config(
+ TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format(rname))
+ )
+
+ # After loading the configurations, this function loads configured daemons.
+ tgen.start_router()
+
+
+def teardown_module(mod):
+ "Teardown the pytest environment"
+ tgen = get_topogen()
+
+ # This function tears down the whole topology.
+ tgen.stop_topology()
+
+def check_labelpool(router):
+ json_file = "{}/{}/labelpool.summ.json".format(CWD, router.name)
+ expected = json.loads(open(json_file).read())
+
+ test_func = partial(topotest.router_json_cmp, router, "show bgp labelpool summary json", expected)
+ _, result = topotest.run_and_expect(test_func, None, count=20, wait=1)
+ assertmsg = '"{}" JSON output mismatches - Did not converge'.format(router.name)
+ assert result is None, assertmsg
+
+def test_converge_bgplu():
+ "Wait for protocol convergence"
+
+ tgen = get_topogen()
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ #tgen.mininet_cli();
+ r1 = tgen.gears["R1"]
+ r2 = tgen.gears["R2"]
+
+ check_labelpool(r1)
+ check_labelpool(r2)
+
+def test_clear_bgplu():
+ "Wait for protocol convergence"
+
+ tgen = get_topogen()
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ #tgen.mininet_cli();
+ r1 = tgen.gears["R1"]
+ r2 = tgen.gears["R2"]
+
+ r1.vtysh_cmd("clear bgp 10.0.0.2")
+ check_labelpool(r1)
+ check_labelpool(r2)
+
+ r2.vtysh_cmd("clear bgp 10.0.1.3")
+ check_labelpool(r1)
+ check_labelpool(r2)
+
+ r1.vtysh_cmd("clear bgp 10.0.0.2")
+ r2.vtysh_cmd("clear bgp 10.0.1.3")
+ check_labelpool(r1)
+ check_labelpool(r2)
+
+def test_memory_leak():
+ "Run the memory leak test and report results."
+ tgen = get_topogen()
+ if not tgen.is_memleak_enabled():
+ pytest.skip("Memory leak test/report is disabled")
+
+ tgen.report_memory_leaks()
+
+
+if __name__ == "__main__":
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
diff --git a/zebra/dplane_fpm_nl.c b/zebra/dplane_fpm_nl.c
index b6ea6745e8..51ce59c477 100644
--- a/zebra/dplane_fpm_nl.c
+++ b/zebra/dplane_fpm_nl.c
@@ -1467,6 +1467,10 @@ static int fpm_nl_process(struct zebra_dplane_provider *prov)
thread_add_timer(fnc->fthread->master, fpm_process_queue,
fnc, 0, &fnc->t_dequeue);
+ /* Ensure dataplane thread is rescheduled if we hit the work limit */
+ if (counter >= limit)
+ dplane_provider_work_ready();
+
return 0;
}
diff --git a/zebra/label_manager.c b/zebra/label_manager.c
index ef51669022..2634a333ee 100644
--- a/zebra/label_manager.c
+++ b/zebra/label_manager.c
@@ -306,7 +306,7 @@ assign_label_chunk(uint8_t proto, unsigned short instance, uint32_t session_id,
{
struct label_manager_chunk *lmc;
struct listnode *node;
- uint32_t prev_end = 0;
+ uint32_t prev_end = MPLS_LABEL_UNRESERVED_MIN;
/* handle chunks request with a specific base label */
if (base != MPLS_LABEL_BASE_ANY)
@@ -328,8 +328,7 @@ assign_label_chunk(uint8_t proto, unsigned short instance, uint32_t session_id,
}
/* check if we hadve a "hole" behind us that we can squeeze into
*/
- if ((lmc->start > prev_end)
- && (lmc->start - prev_end >= size)) {
+ if ((lmc->start > prev_end) && (lmc->start - prev_end > size)) {
lmc = create_label_chunk(proto, instance, session_id,
keep, prev_end + 1,
prev_end + size);
diff --git a/zebra/zebra_dplane.c b/zebra/zebra_dplane.c
index ddab2b8742..da7f4cf64e 100644
--- a/zebra/zebra_dplane.c
+++ b/zebra/zebra_dplane.c
@@ -4537,6 +4537,7 @@ static int dplane_thread_loop(struct thread *event)
struct zebra_dplane_ctx *ctx, *tctx;
int limit, counter, error_counter;
uint64_t curr, high;
+ bool reschedule = false;
/* Capture work limit per cycle */
limit = zdplane_info.dg_updates_per_cycle;
@@ -4673,6 +4674,9 @@ static int dplane_thread_loop(struct thread *event)
dplane_provider_unlock(prov);
+ if (counter >= limit)
+ reschedule = true;
+
if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
zlog_debug("dplane dequeues %d completed work from provider %s",
counter, dplane_provider_get_name(prov));
@@ -4683,6 +4687,13 @@ static int dplane_thread_loop(struct thread *event)
DPLANE_UNLOCK();
}
+ /*
+ * We hit the work limit while processing at least one provider's
+ * output queue - ensure we come back and finish it.
+ */
+ if (reschedule)
+ dplane_provider_work_ready();
+
/* After all providers have been serviced, enqueue any completed
* work and any errors back to zebra so it can process the results.
*/
diff --git a/zebra/zebra_vxlan.c b/zebra/zebra_vxlan.c
index c22766cad5..697a6eecf1 100644
--- a/zebra/zebra_vxlan.c
+++ b/zebra/zebra_vxlan.c
@@ -1992,7 +1992,10 @@ static void zevpn_add_to_l3vni_list(struct hash_bucket *bucket, void *ctxt)
}
/*
- * handle transition of vni from l2 to l3 and vice versa
+ * Handle transition of vni from l2 to l3 and vice versa.
+ * This function handles only the L2VNI add/delete part of
+ * the above transition.
+ * L3VNI add/delete is handled by the calling functions.
*/
static int zebra_vxlan_handle_vni_transition(struct zebra_vrf *zvrf, vni_t vni,
int add)
@@ -2033,11 +2036,71 @@ static int zebra_vxlan_handle_vni_transition(struct zebra_vrf *zvrf, vni_t vni,
return -1;
}
} else {
- /* TODO_MITESH: This needs to be thought through. We don't have
- * enough information at this point to reprogram the vni as
- * l2-vni. One way is to store the required info in l3-vni and
- * used it solely for this purpose
- */
+ struct zebra_ns *zns;
+ struct route_node *rn;
+ struct interface *ifp;
+ struct zebra_if *zif;
+ struct zebra_l2info_vxlan *vxl;
+ struct interface *vlan_if;
+ bool found = false;
+
+ if (IS_ZEBRA_DEBUG_VXLAN)
+ zlog_debug("Adding L2-VNI %u - transition from L3-VNI",
+ vni);
+
+ /* Find VxLAN interface for this VNI. */
+ zns = zebra_ns_lookup(NS_DEFAULT);
+ for (rn = route_top(zns->if_table); rn; rn = route_next(rn)) {
+ ifp = (struct interface *)rn->info;
+ if (!ifp)
+ continue;
+ zif = ifp->info;
+ if (!zif || zif->zif_type != ZEBRA_IF_VXLAN)
+ continue;
+
+ vxl = &zif->l2info.vxl;
+ if (vxl->vni == vni) {
+ found = true;
+ break;
+ }
+ }
+
+ if (!found) {
+ if (IS_ZEBRA_DEBUG_VXLAN)
+ zlog_err(
+ "Adding L2-VNI - Failed to find VxLAN interface for VNI %u",
+ vni);
+ return -1;
+ }
+
+ /* Create VNI hash entry for L2VNI */
+ zevpn = zebra_evpn_lookup(vni);
+ if (zevpn)
+ return 0;
+
+ zevpn = zebra_evpn_add(vni);
+ if (!zevpn) {
+ flog_err(EC_ZEBRA_VNI_ADD_FAILED,
+ "Adding L2-VNI - Failed to add VNI hash, VNI %u",
+ vni);
+
+ return -1;
+ }
+
+ /* Find bridge interface for the VNI */
+ vlan_if = zvni_map_to_svi(vxl->access_vlan,
+ zif->brslave_info.br_if);
+ if (vlan_if)
+ zevpn->vrf_id = vlan_if->vrf_id;
+
+ zevpn->vxlan_if = ifp;
+ zevpn->local_vtep_ip = vxl->vtep_ip;
+
+ /* Inform BGP if the VNI is up and mapped to a bridge. */
+ if (if_is_operative(ifp) && zif->brslave_info.br_if) {
+ zebra_evpn_send_add_to_client(zevpn);
+ zebra_evpn_read_mac_neigh(zevpn, ifp);
+ }
}
return 0;
@@ -5201,6 +5264,7 @@ int zebra_vxlan_process_vrf_vni_cmd(struct zebra_vrf *zvrf, vni_t vni,
if (add) {
+ /* Remove L2VNI if present */
zebra_vxlan_handle_vni_transition(zvrf, vni, add);
/* check if the vni is already present under zvrf */
@@ -5295,6 +5359,7 @@ int zebra_vxlan_process_vrf_vni_cmd(struct zebra_vrf *zvrf, vni_t vni,
zvrf->l3vni = 0;
zl3vni_del(zl3vni);
+ /* Add L2VNI for this VNI */
zebra_vxlan_handle_vni_transition(zvrf, vni, add);
}
return 0;