summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--bgpd/bgp_community.c58
-rw-r--r--bgpd/bgp_evpn.c2
-rw-r--r--bgpd/bgp_fsm.c12
-rw-r--r--bgpd/bgp_nht.c3
-rw-r--r--bgpd/bgp_packet.c26
-rw-r--r--bgpd/bgp_routemap.c8
-rw-r--r--bgpd/bgp_vty.c17
-rw-r--r--bgpd/bgp_zebra.c29
-rw-r--r--bgpd/rfapi/rfapi_import.c3
-rw-r--r--doc/developer/topotests.rst25
-rw-r--r--doc/user/pim.rst40
-rw-r--r--doc/user/rpki.rst4
-rw-r--r--isisd/isis_pfpacket.c29
-rw-r--r--isisd/isis_route.c8
-rw-r--r--isisd/isis_vty_fabricd.c2
-rw-r--r--isisd/isisd.c8
-rw-r--r--lib/command.c14
-rw-r--r--lib/command_match.c4
-rw-r--r--lib/debug.c27
-rw-r--r--lib/debug.h14
-rw-r--r--lib/libfrr.c8
-rw-r--r--lib/linklist.c61
-rw-r--r--lib/linklist.h37
-rw-r--r--lib/log.c2
-rw-r--r--lib/nexthop.c206
-rw-r--r--lib/nexthop.h9
-rw-r--r--lib/northbound_cli.c2
-rw-r--r--lib/plist.c38
-rw-r--r--lib/prefix.c2
-rw-r--r--lib/table.h2
-rw-r--r--lib/vty.c25
-rw-r--r--ospfd/ospf_te.c2
-rw-r--r--pbrd/pbr_nht.c2
-rw-r--r--pimd/pim_assert.c2
-rw-r--r--pimd/pim_bsm.c1404
-rw-r--r--pimd/pim_bsm.h198
-rw-r--r--pimd/pim_cmd.c962
-rw-r--r--pimd/pim_cmd.h2
-rw-r--r--pimd/pim_hello.c13
-rw-r--r--pimd/pim_iface.c4
-rw-r--r--pimd/pim_iface.h7
-rw-r--r--pimd/pim_instance.c5
-rw-r--r--pimd/pim_instance.h6
-rw-r--r--pimd/pim_join.c6
-rw-r--r--pimd/pim_msdp.c2
-rw-r--r--pimd/pim_msg.c3
-rw-r--r--pimd/pim_msg.h19
-rw-r--r--pimd/pim_nht.c176
-rw-r--r--pimd/pim_nht.h15
-rw-r--r--pimd/pim_pim.c10
-rw-r--r--pimd/pim_register.c4
-rw-r--r--pimd/pim_rp.c339
-rw-r--r--pimd/pim_rp.h23
-rw-r--r--pimd/pim_rpf.c2
-rw-r--r--pimd/pim_upstream.c17
-rw-r--r--pimd/pim_upstream.h3
-rw-r--r--pimd/pim_vty.c9
-rw-r--r--pimd/pim_vxlan.c2
-rw-r--r--pimd/pimd.h6
-rw-r--r--pimd/subdir.am2
-rw-r--r--ripngd/ripngd.c14
-rw-r--r--snapcraft/README.snap_build.md8
-rw-r--r--snapcraft/README.usage.md2
-rw-r--r--snapcraft/defaults/fabricd.conf.default0
-rw-r--r--snapcraft/scripts/Makefile1
-rw-r--r--snapcraft/scripts/bgpd-service2
-rw-r--r--snapcraft/scripts/fabricd-service13
-rw-r--r--snapcraft/snapcraft.yaml.in51
-rw-r--r--staticd/static_routes.c2
-rw-r--r--vtysh/vtysh.c20
-rw-r--r--zebra/ioctl.c4
-rw-r--r--zebra/ioctl_solaris.c2
-rw-r--r--zebra/kernel_socket.c5
-rw-r--r--zebra/rib.h15
-rw-r--r--zebra/rt_netlink.c2
-rw-r--r--zebra/subdir.am2
-rw-r--r--zebra/zebra_dplane.c325
-rw-r--r--zebra/zebra_dplane.h65
-rw-r--r--zebra/zebra_mpls.c213
-rw-r--r--zebra/zebra_mpls.h3
-rw-r--r--zebra/zebra_nhg.c511
-rw-r--r--zebra/zebra_nhg.h29
-rw-r--r--zebra/zebra_rib.c1053
-rw-r--r--zebra/zebra_vty.c26
84 files changed, 5360 insertions, 978 deletions
diff --git a/bgpd/bgp_community.c b/bgpd/bgp_community.c
index 82762072df..6fc52ff9e0 100644
--- a/bgpd/bgp_community.c
+++ b/bgpd/bgp_community.c
@@ -205,7 +205,6 @@ static void set_community_string(struct community *com, bool make_json)
{
int i;
char *str;
- char *pnt;
int len;
int first;
uint32_t comval;
@@ -297,7 +296,7 @@ static void set_community_string(struct community *com, bool make_json)
}
/* Allocate memory. */
- str = pnt = XMALLOC(MTYPE_COMMUNITY_STR, len);
+ str = XCALLOC(MTYPE_COMMUNITY_STR, len);
first = 1;
/* Fill in string. */
@@ -308,12 +307,11 @@ static void set_community_string(struct community *com, bool make_json)
if (first)
first = 0;
else
- *pnt++ = ' ';
+ strlcat(str, " ", len);
switch (comval) {
case COMMUNITY_INTERNET:
- strcpy(pnt, "internet");
- pnt += strlen("internet");
+ strlcat(str, "internet", len);
if (make_json) {
json_string =
json_object_new_string("internet");
@@ -322,8 +320,7 @@ static void set_community_string(struct community *com, bool make_json)
}
break;
case COMMUNITY_GSHUT:
- strcpy(pnt, "graceful-shutdown");
- pnt += strlen("graceful-shutdown");
+ strlcat(str, "graceful-shutdown", len);
if (make_json) {
json_string = json_object_new_string(
"gracefulShutdown");
@@ -332,8 +329,7 @@ static void set_community_string(struct community *com, bool make_json)
}
break;
case COMMUNITY_ACCEPT_OWN:
- strcpy(pnt, "accept-own");
- pnt += strlen("accept-own");
+ strlcat(str, "accept-own", len);
if (make_json) {
json_string = json_object_new_string(
"acceptown");
@@ -342,8 +338,7 @@ static void set_community_string(struct community *com, bool make_json)
}
break;
case COMMUNITY_ROUTE_FILTER_TRANSLATED_v4:
- strcpy(pnt, "route-filter-translated-v4");
- pnt += strlen("route-filter-translated-v4");
+ strlcat(str, "route-filter-translated-v4", len);
if (make_json) {
json_string = json_object_new_string(
"routeFilterTranslatedV4");
@@ -352,8 +347,7 @@ static void set_community_string(struct community *com, bool make_json)
}
break;
case COMMUNITY_ROUTE_FILTER_v4:
- strcpy(pnt, "route-filter-v4");
- pnt += strlen("route-filter-v4");
+ strlcat(str, "route-filter-v4", len);
if (make_json) {
json_string = json_object_new_string(
"routeFilterV4");
@@ -362,8 +356,7 @@ static void set_community_string(struct community *com, bool make_json)
}
break;
case COMMUNITY_ROUTE_FILTER_TRANSLATED_v6:
- strcpy(pnt, "route-filter-translated-v6");
- pnt += strlen("route-filter-translated-v6");
+ strlcat(str, "route-filter-translated-v6", len);
if (make_json) {
json_string = json_object_new_string(
"routeFilterTranslatedV6");
@@ -372,8 +365,7 @@ static void set_community_string(struct community *com, bool make_json)
}
break;
case COMMUNITY_ROUTE_FILTER_v6:
- strcpy(pnt, "route-filter-v6");
- pnt += strlen("route-filter-v6");
+ strlcat(str, "route-filter-v6", len);
if (make_json) {
json_string = json_object_new_string(
"routeFilterV6");
@@ -382,8 +374,7 @@ static void set_community_string(struct community *com, bool make_json)
}
break;
case COMMUNITY_LLGR_STALE:
- strcpy(pnt, "llgr-stale");
- pnt += strlen("llgr-stale");
+ strlcat(str, "llgr-stale", len);
if (make_json) {
json_string = json_object_new_string(
"llgrStale");
@@ -392,8 +383,7 @@ static void set_community_string(struct community *com, bool make_json)
}
break;
case COMMUNITY_NO_LLGR:
- strcpy(pnt, "no-llgr");
- pnt += strlen("no-llgr");
+ strlcat(str, "no-llgr", len);
if (make_json) {
json_string = json_object_new_string(
"noLlgr");
@@ -402,8 +392,7 @@ static void set_community_string(struct community *com, bool make_json)
}
break;
case COMMUNITY_ACCEPT_OWN_NEXTHOP:
- strcpy(pnt, "accept-own-nexthop");
- pnt += strlen("accept-own-nexthop");
+ strlcat(str, "accept-own-nexthop", len);
if (make_json) {
json_string = json_object_new_string(
"acceptownnexthop");
@@ -412,8 +401,7 @@ static void set_community_string(struct community *com, bool make_json)
}
break;
case COMMUNITY_BLACKHOLE:
- strcpy(pnt, "blackhole");
- pnt += strlen("blackhole");
+ strlcat(str, "blackhole", len);
if (make_json) {
json_string = json_object_new_string(
"blackhole");
@@ -422,8 +410,7 @@ static void set_community_string(struct community *com, bool make_json)
}
break;
case COMMUNITY_NO_EXPORT:
- strcpy(pnt, "no-export");
- pnt += strlen("no-export");
+ strlcat(str, "no-export", len);
if (make_json) {
json_string =
json_object_new_string("noExport");
@@ -432,8 +419,7 @@ static void set_community_string(struct community *com, bool make_json)
}
break;
case COMMUNITY_NO_ADVERTISE:
- strcpy(pnt, "no-advertise");
- pnt += strlen("no-advertise");
+ strlcat(str, "no-advertise", len);
if (make_json) {
json_string =
json_object_new_string("noAdvertise");
@@ -442,8 +428,7 @@ static void set_community_string(struct community *com, bool make_json)
}
break;
case COMMUNITY_LOCAL_AS:
- strcpy(pnt, "local-AS");
- pnt += strlen("local-AS");
+ strlcat(str, "local-AS", len);
if (make_json) {
json_string = json_object_new_string("localAs");
json_object_array_add(json_community_list,
@@ -451,8 +436,7 @@ static void set_community_string(struct community *com, bool make_json)
}
break;
case COMMUNITY_NO_PEER:
- strcpy(pnt, "no-peer");
- pnt += strlen("no-peer");
+ strlcat(str, "no-peer", len);
if (make_json) {
json_string = json_object_new_string("noPeer");
json_object_array_add(json_community_list,
@@ -462,17 +446,17 @@ static void set_community_string(struct community *com, bool make_json)
default:
as = (comval >> 16) & 0xFFFF;
val = comval & 0xFFFF;
- sprintf(pnt, "%u:%d", as, val);
+ char buf[32];
+ snprintf(buf, sizeof(buf), "%u:%d", as, val);
+ strlcat(str, buf, len);
if (make_json) {
- json_string = json_object_new_string(pnt);
+ json_string = json_object_new_string(buf);
json_object_array_add(json_community_list,
json_string);
}
- pnt += strlen(pnt);
break;
}
}
- *pnt = '\0';
if (make_json) {
json_object_string_add(com->json, "string", str);
diff --git a/bgpd/bgp_evpn.c b/bgpd/bgp_evpn.c
index 112e4b836c..c4b2a606c5 100644
--- a/bgpd/bgp_evpn.c
+++ b/bgpd/bgp_evpn.c
@@ -2472,7 +2472,7 @@ static int install_evpn_route_entry_in_vrf(struct bgp *bgp_vrf,
if (bgp_debug_zebra(NULL)) {
zlog_debug(
- "installing evpn prefix %s as ip prefix %s in vrf %s",
+ "import evpn prefix %s as ip prefix %s in vrf %s",
prefix2str(evp, buf, sizeof(buf)),
prefix2str(pp, buf1, sizeof(buf)),
vrf_id_to_name(bgp_vrf->vrf_id));
diff --git a/bgpd/bgp_fsm.c b/bgpd/bgp_fsm.c
index 12ae1f841a..dd765731dc 100644
--- a/bgpd/bgp_fsm.c
+++ b/bgpd/bgp_fsm.c
@@ -949,9 +949,15 @@ void bgp_fsm_change_status(struct peer *peer, int status)
else if ((peer->status == Established) && (status != Established))
bgp->established_peers--;
- if (BGP_DEBUG(neighbor_events, NEIGHBOR_EVENTS))
- zlog_debug("%s : vrf %u, established_peers %u", __func__,
- bgp->vrf_id, bgp->established_peers);
+ if (bgp_debug_neighbor_events(peer)) {
+ struct vrf *vrf = vrf_lookup_by_id(bgp->vrf_id);
+
+ zlog_debug("%s : vrf %s(%u), Status: %s established_peers %u", __func__,
+ vrf ? vrf->name : "Unknown", bgp->vrf_id,
+ lookup_msg(bgp_status_msg, status, NULL),
+ bgp->established_peers);
+ }
+
/* Set to router ID to the value provided by RIB if there are no peers
* in the established state and peer count did not change
*/
diff --git a/bgpd/bgp_nht.c b/bgpd/bgp_nht.c
index 7e721db49d..fdfa15b445 100644
--- a/bgpd/bgp_nht.c
+++ b/bgpd/bgp_nht.c
@@ -474,8 +474,7 @@ void bgp_parse_nexthop_update(int command, vrf_id_t vrf_id)
continue;
for (oldnh = bnc->nexthop; oldnh; oldnh = oldnh->next)
- if (nexthop_same_no_recurse(oldnh, nexthop) &&
- nexthop_labels_match(oldnh, nexthop))
+ if (nexthop_same(oldnh, nexthop))
break;
if (!oldnh)
diff --git a/bgpd/bgp_packet.c b/bgpd/bgp_packet.c
index b5934fb56e..655a4745cb 100644
--- a/bgpd/bgp_packet.c
+++ b/bgpd/bgp_packet.c
@@ -709,12 +709,15 @@ void bgp_notify_send_with_data(struct peer *peer, uint8_t code,
XMALLOC(MTYPE_TMP, bgp_notify.length * 3);
for (i = 0; i < bgp_notify.length; i++)
if (first) {
- sprintf(c, " %02x", data[i]);
- strcat(bgp_notify.data, c);
+ snprintf(c, sizeof(c), " %02x",
+ data[i]);
+ strlcat(bgp_notify.data, c,
+ bgp_notify.length);
} else {
first = 1;
- sprintf(c, "%02x", data[i]);
- strcpy(bgp_notify.data, c);
+ snprintf(c, sizeof(c), "%02x", data[i]);
+ strlcpy(bgp_notify.data, c,
+ bgp_notify.length);
}
}
bgp_notify_print(peer, &bgp_notify, "sending");
@@ -1700,14 +1703,16 @@ static int bgp_notify_receive(struct peer *peer, bgp_size_t size)
XMALLOC(MTYPE_TMP, bgp_notify.length * 3);
for (i = 0; i < bgp_notify.length; i++)
if (first) {
- sprintf(c, " %02x",
+ snprintf(c, sizeof(c), " %02x",
stream_getc(peer->curr));
- strcat(bgp_notify.data, c);
+ strlcat(bgp_notify.data, c,
+ bgp_notify.length);
} else {
first = 1;
- sprintf(c, "%02x",
- stream_getc(peer->curr));
- strcpy(bgp_notify.data, c);
+ snprintf(c, sizeof(c), "%02x",
+ stream_getc(peer->curr));
+ strlcpy(bgp_notify.data, c,
+ bgp_notify.length);
}
bgp_notify.raw_data = (uint8_t *)peer->notify.data;
}
@@ -2299,6 +2304,9 @@ int bgp_process_packet(struct thread *thread)
__FUNCTION__, peer->host);
break;
default:
+ /* Suppress uninitialized variable warning */
+ mprc = 0;
+ (void)mprc;
/*
* The message type should have been sanitized before
* we ever got here. Receipt of a message with an
diff --git a/bgpd/bgp_routemap.c b/bgpd/bgp_routemap.c
index b0ae9d78d1..c8386e6cbe 100644
--- a/bgpd/bgp_routemap.c
+++ b/bgpd/bgp_routemap.c
@@ -4222,10 +4222,10 @@ DEFUN (set_community,
str = community_str(com, false);
if (additive) {
- argstr = XCALLOC(MTYPE_TMP,
- strlen(str) + strlen(" additive") + 1);
- strcpy(argstr, str);
- strcpy(argstr + strlen(str), " additive");
+ size_t argstr_sz = strlen(str) + strlen(" additive") + 1;
+ argstr = XCALLOC(MTYPE_TMP, argstr_sz);
+ strlcpy(argstr, str, argstr_sz);
+ strlcat(argstr, " additive", argstr_sz);
ret = generic_set_add(vty, VTY_GET_CONTEXT(route_map_index),
"community", argstr);
XFREE(MTYPE_TMP, argstr);
diff --git a/bgpd/bgp_vty.c b/bgpd/bgp_vty.c
index ae51f1d780..6451c8d8ed 100644
--- a/bgpd/bgp_vty.c
+++ b/bgpd/bgp_vty.c
@@ -1129,10 +1129,25 @@ DEFUN (no_router_bgp,
}
if (bgp->l3vni) {
- vty_out(vty, "%% Please unconfigure l3vni %u",
+ vty_out(vty, "%% Please unconfigure l3vni %u\n",
bgp->l3vni);
return CMD_WARNING_CONFIG_FAILED;
}
+
+ /* Cannot delete default instance if vrf instances exist */
+ if (bgp->inst_type == BGP_INSTANCE_TYPE_DEFAULT) {
+ struct listnode *node;
+ struct bgp *tmp_bgp;
+
+ for (ALL_LIST_ELEMENTS_RO(bm->bgp, node, tmp_bgp)) {
+ if (tmp_bgp->inst_type
+ == BGP_INSTANCE_TYPE_VRF) {
+ vty_out(vty,
+ "%% Cannot delete default BGP instance. Dependent VRF instances exist\n");
+ return CMD_WARNING_CONFIG_FAILED;
+ }
+ }
+ }
}
bgp_delete(bgp);
diff --git a/bgpd/bgp_zebra.c b/bgpd/bgp_zebra.c
index a45480fdc2..5e9fc57f59 100644
--- a/bgpd/bgp_zebra.c
+++ b/bgpd/bgp_zebra.c
@@ -1435,15 +1435,29 @@ void bgp_zebra_announce(struct bgp_node *rn, struct prefix *p,
for (i = 0; i < api.nexthop_num; i++) {
api_nh = &api.nexthops[i];
- if (api_nh->type == NEXTHOP_TYPE_IFINDEX)
+ switch (api_nh->type) {
+ case NEXTHOP_TYPE_IFINDEX:
nh_buf[0] = '\0';
- else {
- if (api_nh->type == NEXTHOP_TYPE_IPV4)
- nh_family = AF_INET;
- else
- nh_family = AF_INET6;
+ break;
+ case NEXTHOP_TYPE_IPV4:
+ case NEXTHOP_TYPE_IPV4_IFINDEX:
+ nh_family = AF_INET;
inet_ntop(nh_family, &api_nh->gate, nh_buf,
sizeof(nh_buf));
+ break;
+ case NEXTHOP_TYPE_IPV6:
+ case NEXTHOP_TYPE_IPV6_IFINDEX:
+ nh_family = AF_INET6;
+ inet_ntop(nh_family, &api_nh->gate, nh_buf,
+ sizeof(nh_buf));
+ break;
+ case NEXTHOP_TYPE_BLACKHOLE:
+ strlcpy(nh_buf, "blackhole", sizeof(nh_buf));
+ break;
+ default:
+ /* Note: add new nexthop case */
+ assert(0);
+ break;
}
label_buf[0] = '\0';
@@ -2960,6 +2974,9 @@ void bgp_zebra_announce_default(struct bgp *bgp, struct nexthop *nh,
SET_FLAG(api.message, ZAPI_MESSAGE_NEXTHOP);
api_nh = &api.nexthops[0];
+ api.distance = ZEBRA_EBGP_DISTANCE_DEFAULT;
+ SET_FLAG(api.message, ZAPI_MESSAGE_DISTANCE);
+
/* redirect IP */
if (nh->gate.ipv4.s_addr) {
char buff[PREFIX_STRLEN];
diff --git a/bgpd/rfapi/rfapi_import.c b/bgpd/rfapi/rfapi_import.c
index ad0900c2b8..b6d32d36ea 100644
--- a/bgpd/rfapi/rfapi_import.c
+++ b/bgpd/rfapi/rfapi_import.c
@@ -4111,6 +4111,9 @@ static void rfapiProcessPeerDownRt(struct peer *peer,
timer_service_func = rfapiWithdrawTimerEncap;
break;
default:
+ /* Suppress uninitialized variable warning */
+ rt = NULL;
+ timer_service_func = NULL;
assert(0);
}
diff --git a/doc/developer/topotests.rst b/doc/developer/topotests.rst
index 09f12ec436..e12bc37256 100644
--- a/doc/developer/topotests.rst
+++ b/doc/developer/topotests.rst
@@ -145,30 +145,23 @@ the following env variable can be set::
export TOPOTESTS_CHECK_STDERR=Yes
-(The value doesn't matter at this time. The check is if the env variable exists
-or not) There is no pass/fail on this reporting. The Output will be reported to
-the console::
-
- export TOPOTESTS_CHECK_MEMLEAK="/home/mydir/memleak_"
-
-This will enable the check and output to console and the writing of the
-information to files with the given prefix (followed by testname), ie
-:file:`/home/mydir/memcheck_test_bgp_multiview_topo1.txt` in case of a memory
-leak.
+(The value doesn't matter at this time. The check is whether the env
+variable exists or not.) There is no pass/fail on this reporting; the
+Output will be reported to the console.
Collect Memory Leak Information
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-FRR processes have the capabilities to report remaining memory allocations upon
-exit. To enable the reporting of the memory, define an environment variable
+FRR processes can report unfreed memory allocations upon exit. To
+enable the reporting of memory leaks, define an environment variable
``TOPOTESTS_CHECK_MEMLEAK`` with the file prefix, i.e.::
export TOPOTESTS_CHECK_MEMLEAK="/home/mydir/memleak_"
-This will enable the check and output to console and the writing of the
-information to files with the given prefix (followed by testname), ie
-:file:`/home/mydir/memcheck_test_bgp_multiview_topo1.txt` in case of a memory
-leak.
+This will enable the check and output to console and the writing of
+the information to files with the given prefix (followed by testname),
+ie :file:`/home/mydir/memcheck_test_bgp_multiview_topo1.txt` in case
+of a memory leak.
Running Topotests with AddressSanitizer
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
diff --git a/doc/user/pim.rst b/doc/user/pim.rst
index 5148d3baff..d05127059b 100644
--- a/doc/user/pim.rst
+++ b/doc/user/pim.rst
@@ -166,6 +166,20 @@ is in a vrf, enter the interface command with the vrf keyword at the end.
Turns on BFD support for PIM for this interface.
+.. index:: ip pim bsm
+.. clicmd:: ip pim bsm
+
+ Tell pim that we would like to use this interface to process bootstrap
+ messages. This is enabled by default. 'no' form of this command is used to
+ restrict bsm messages on this interface.
+
+.. index:: ip pim unicast-bsm
+.. clicmd:: ip pim unicast-bsm
+
+ Tell pim that we would like to allow interface to process unicast bootstrap
+ messages. This is enabled by default. 'no' form of this command is used to
+ restrict processing of unicast bsm messages on this interface.
+
.. index:: ip pim drpriority (1-4294967295)
.. clicmd:: ip pim drpriority (1-4294967295)
@@ -308,6 +322,12 @@ cause great confusion.
Display information about installed into the kernel S,G mroutes and in
addition display data about packet flow for the mroutes.
+.. index:: show ip mroute summary
+.. clicmd:: show ip mroute summary
+
+ Display total number of S,G mroutes and number of S,G mroutes installed
+ into the kernel.
+
.. index:: show ip pim assert
.. clicmd:: show ip pim assert
@@ -407,6 +427,21 @@ cause great confusion.
Display upstream information for S,G's and the RPF data associated with them.
+.. index:: show ip pim bsr
+.. clicmd:: show ip pim bsr
+
+ Display current bsr, its uptime and last received bsm age.
+
+.. index:: show ip pim bsrp-info
+.. clicmd:: show ip pim bsrp-info
+
+ Display group-to-rp mappings received from E-BSR.
+
+.. index:: show ip pim bsm-database
+.. clicmd:: show ip pim bsm-database
+
+ Display all fragments ofstored bootstrap message in user readable format.
+
.. index:: show ip rpf
.. clicmd:: show ip rpf
@@ -470,6 +505,11 @@ the config was written out.
This traces pim code and how it is running.
+.. index:: debug pim bsm
+.. clicmd:: debug pim bsm
+
+ This turns on debugging for BSR message processing.
+
.. index:: debug pim zebra
.. clicmd:: debug pim zebra
diff --git a/doc/user/rpki.rst b/doc/user/rpki.rst
index 295a26fda9..ca6b46d3cf 100644
--- a/doc/user/rpki.rst
+++ b/doc/user/rpki.rst
@@ -188,10 +188,6 @@ Validating BGP Updates
Create a clause for a route map to match prefixes with the specified RPKI
state.
- **Note** that the matching of invalid prefixes requires that invalid
- prefixes are considered for best path selection, i.e.,
- ``bgp bestpath prefix-validate disallow-invalid`` is not enabled.
-
In the following example, the router prefers valid routes over invalid
prefixes because invalid routes have a lower local preference.
diff --git a/isisd/isis_pfpacket.c b/isisd/isis_pfpacket.c
index 824acd0ff8..ea66e6950e 100644
--- a/isisd/isis_pfpacket.c
+++ b/isisd/isis_pfpacket.c
@@ -73,7 +73,6 @@ uint8_t ALL_ISS[6] = {0x09, 0x00, 0x2B, 0x00, 0x00, 0x05};
uint8_t ALL_ESS[6] = {0x09, 0x00, 0x2B, 0x00, 0x00, 0x04};
static uint8_t discard_buff[8192];
-static uint8_t sock_buff[8192];
/*
* if level is 0 we are joining p2p multicast
@@ -277,19 +276,22 @@ int isis_recv_pdu_bcast(struct isis_circuit *circuit, uint8_t *ssnpa)
return ISIS_WARNING;
}
- /* on lan we have to read to the static buff first */
- bytesread = recvfrom(circuit->fd, sock_buff, sizeof(sock_buff),
- MSG_DONTWAIT, (struct sockaddr *)&s_addr,
- (socklen_t *)&addr_len);
+ /* Ensure that we have enough space for a pdu padded to fill the mtu */
+ unsigned int max_size =
+ circuit->interface->mtu > circuit->interface->mtu6
+ ? circuit->interface->mtu
+ : circuit->interface->mtu6;
+ uint8_t temp_buff[max_size];
+ bytesread =
+ recvfrom(circuit->fd, temp_buff, max_size, MSG_DONTWAIT,
+ (struct sockaddr *)&s_addr, (socklen_t *)&addr_len);
if (bytesread < 0) {
- zlog_warn("isis_recv_pdu_bcast(): recvfrom() failed");
+ zlog_warn("%s: recvfrom() failed", __func__);
return ISIS_WARNING;
}
-
/* then we lose the LLC */
- stream_write(circuit->rcv_stream, sock_buff + LLC_LEN,
+ stream_write(circuit->rcv_stream, temp_buff + LLC_LEN,
bytesread - LLC_LEN);
-
memcpy(ssnpa, &s_addr.sll_addr, s_addr.sll_halen);
return ISIS_OK;
@@ -337,6 +339,7 @@ int isis_send_pdu_bcast(struct isis_circuit *circuit, int level)
{
struct msghdr msg;
struct iovec iov[2];
+ char temp_buff[LLC_LEN];
/* we need to do the LLC in here because of P2P circuits, which will
* not need it
@@ -361,16 +364,16 @@ int isis_send_pdu_bcast(struct isis_circuit *circuit, int level)
/* on a broadcast circuit */
/* first we put the LLC in */
- sock_buff[0] = 0xFE;
- sock_buff[1] = 0xFE;
- sock_buff[2] = 0x03;
+ temp_buff[0] = 0xFE;
+ temp_buff[1] = 0xFE;
+ temp_buff[2] = 0x03;
memset(&msg, 0, sizeof(msg));
msg.msg_name = &sa;
msg.msg_namelen = sizeof(struct sockaddr_ll);
msg.msg_iov = iov;
msg.msg_iovlen = 2;
- iov[0].iov_base = sock_buff;
+ iov[0].iov_base = temp_buff;
iov[0].iov_len = LLC_LEN;
iov[1].iov_base = circuit->snd_stream->data;
iov[1].iov_len = stream_get_endp(circuit->snd_stream);
diff --git a/isisd/isis_route.c b/isisd/isis_route.c
index 82005c911e..281eaf11bc 100644
--- a/isisd/isis_route.c
+++ b/isisd/isis_route.c
@@ -543,7 +543,8 @@ void isis_route_verify_merge(struct isis_area *area,
ISIS_ROUTE_FLAG_ZEBRA_SYNCED
);
continue;
- } else {
+ } else if (CHECK_FLAG(rinfo->flag,
+ ISIS_ROUTE_FLAG_ACTIVE)) {
/* Clear the ZEBRA_SYNCED flag on the L1
* route when L2 wins, otherwise L1
* won't get reinstalled when it
@@ -553,6 +554,11 @@ void isis_route_verify_merge(struct isis_area *area,
mrinfo->flag,
ISIS_ROUTE_FLAG_ZEBRA_SYNCED
);
+ } else if (
+ CHECK_FLAG(
+ mrinfo->flag,
+ ISIS_ROUTE_FLAG_ZEBRA_SYNCED)) {
+ continue;
}
}
mrnode->info = rnode->info;
diff --git a/isisd/isis_vty_fabricd.c b/isisd/isis_vty_fabricd.c
index 2476bd2552..431ad9712a 100644
--- a/isisd/isis_vty_fabricd.c
+++ b/isisd/isis_vty_fabricd.c
@@ -168,7 +168,7 @@ DEFUN (show_lsp_flooding,
area->area_tag : "null");
if (lspid) {
- struct isis_lsp *lsp = lsp_for_arg(head, lspid);
+ lsp = lsp_for_arg(head, lspid);
if (lsp)
lsp_print_flooding(vty, lsp);
diff --git a/isisd/isisd.c b/isisd/isisd.c
index 07be68d9ae..bee3b6deb5 100644
--- a/isisd/isisd.c
+++ b/isisd/isisd.c
@@ -272,7 +272,7 @@ int isis_area_destroy(const char *area_tag)
lsp_db_fini(&area->lspdb[1]);
/* invalidate and verify to delete all routes from zebra */
- isis_area_invalidate_routes(area, ISIS_LEVEL1 & ISIS_LEVEL2);
+ isis_area_invalidate_routes(area, area->is_type);
isis_area_verify_routes(area);
spftree_area_del(area);
@@ -738,11 +738,7 @@ DEFUN (clear_isis_neighbor_arg,
*/
void print_debug(struct vty *vty, int flags, int onoff)
{
- char onoffs[4];
- if (onoff)
- strcpy(onoffs, "on");
- else
- strcpy(onoffs, "off");
+ const char *onoffs = onoff ? "on" : "off";
if (flags & DEBUG_ADJ_PACKETS)
vty_out(vty,
diff --git a/lib/command.c b/lib/command.c
index 18426e0c51..5335969fbc 100644
--- a/lib/command.c
+++ b/lib/command.c
@@ -1760,10 +1760,10 @@ static int file_write_config(struct vty *vty)
dirfd = open(".", O_DIRECTORY | O_RDONLY);
/* if dirfd is invalid, directory sync fails, but we're still OK */
- config_file_sav = XMALLOC(
- MTYPE_TMP, strlen(config_file) + strlen(CONF_BACKUP_EXT) + 1);
- strcpy(config_file_sav, config_file);
- strcat(config_file_sav, CONF_BACKUP_EXT);
+ size_t config_file_sav_sz = strlen(config_file) + strlen(CONF_BACKUP_EXT) + 1;
+ config_file_sav = XMALLOC(MTYPE_TMP, config_file_sav_sz);
+ strlcpy(config_file_sav, config_file, config_file_sav_sz);
+ strlcat(config_file_sav, CONF_BACKUP_EXT, config_file_sav_sz);
config_file_tmp = XMALLOC(MTYPE_TMP, strlen(config_file) + 8);
@@ -2803,9 +2803,10 @@ void cmd_init(int terminal)
/* Each node's basic commands. */
install_element(VIEW_NODE, &show_version_cmd);
install_element(ENABLE_NODE, &show_startup_config_cmd);
- install_element(ENABLE_NODE, &debug_memstats_cmd);
if (terminal) {
+ install_element(ENABLE_NODE, &debug_memstats_cmd);
+
install_element(VIEW_NODE, &config_list_cmd);
install_element(VIEW_NODE, &config_exit_cmd);
install_element(VIEW_NODE, &config_quit_cmd);
@@ -2839,9 +2840,10 @@ void cmd_init(int terminal)
install_element(CONFIG_NODE, &domainname_cmd);
install_element(CONFIG_NODE, &no_domainname_cmd);
install_element(CONFIG_NODE, &frr_version_defaults_cmd);
- install_element(CONFIG_NODE, &debug_memstats_cmd);
if (terminal > 0) {
+ install_element(CONFIG_NODE, &debug_memstats_cmd);
+
install_element(CONFIG_NODE, &password_cmd);
install_element(CONFIG_NODE, &no_password_cmd);
install_element(CONFIG_NODE, &enable_password_cmd);
diff --git a/lib/command_match.c b/lib/command_match.c
index 8b34d1e3eb..9456e1585a 100644
--- a/lib/command_match.c
+++ b/lib/command_match.c
@@ -723,7 +723,7 @@ static enum match_type match_ipv4(const char *str)
if (str - sp > 3)
return no_match;
- strncpy(buf, sp, str - sp);
+ memcpy(buf, sp, str - sp);
if (atoi(buf) > 255)
return no_match;
@@ -774,7 +774,7 @@ static enum match_type match_ipv4_prefix(const char *str)
if (str - sp > 3)
return no_match;
- strncpy(buf, sp, str - sp);
+ memcpy(buf, sp, str - sp);
if (atoi(buf) > 255)
return no_match;
diff --git a/lib/debug.c b/lib/debug.c
index 72fd4648ee..3248ceb13b 100644
--- a/lib/debug.c
+++ b/lib/debug.c
@@ -18,29 +18,46 @@
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <zebra.h>
+#include "typesafe.h"
#include "debug.h"
#include "command.h"
-static const struct debug_callbacks *callbacks;
+static struct debug_cb_list_head cb_head;
+
+DECLARE_LIST(debug_cb_list, struct debug_callbacks, item)
/* All code in this section should be reentrant and MT-safe */
DEFUN_NOSH(debug_all, debug_all_cmd, "[no] debug all",
NO_STR DEBUG_STR "Toggle all debugging output\n")
{
+ struct debug_callbacks *cb;
+
bool set = !strmatch(argv[0]->text, "no");
uint32_t mode = DEBUG_NODE2MODE(vty->node);
- if (callbacks->debug_set_all)
- callbacks->debug_set_all(mode, set);
+ frr_each (debug_cb_list, &cb_head, cb)
+ cb->debug_set_all(mode, set);
+
return CMD_SUCCESS;
}
/* ------------------------------------------------------------------------- */
-void debug_init(const struct debug_callbacks *cb)
+void debug_init(struct debug_callbacks *cb)
+{
+ static bool inited = false;
+
+ if (!inited) {
+ inited = true;
+ debug_cb_list_init(&cb_head);
+ }
+
+ debug_cb_list_add_head(&cb_head, cb);
+}
+
+void debug_init_cli(void)
{
- callbacks = cb;
install_element(ENABLE_NODE, &debug_all_cmd);
install_element(CONFIG_NODE, &debug_all_cmd);
}
diff --git a/lib/debug.h b/lib/debug.h
index ace060d057..f25cd42691 100644
--- a/lib/debug.h
+++ b/lib/debug.h
@@ -84,6 +84,7 @@ struct debug {
const char *desc;
};
+PREDECL_LIST(debug_cb_list)
/*
* Callback set for debugging code.
*
@@ -93,6 +94,11 @@ struct debug {
*/
struct debug_callbacks {
/*
+ * Linked list of Callbacks to call
+ */
+ struct debug_cb_list_item item;
+
+ /*
* flags
* flags to set on debug flag fields
*
@@ -233,7 +239,13 @@ struct debug_callbacks {
*
* MT-Safe
*/
-void debug_init(const struct debug_callbacks *cb);
+void debug_init(struct debug_callbacks *cb);
+
+/*
+ * Turn on the cli to turn on/off debugs.
+ * Should only be called by libfrr
+ */
+void debug_init_cli(void);
#ifdef __cplusplus
}
diff --git a/lib/libfrr.c b/lib/libfrr.c
index 5970e70a6b..15de96feee 100644
--- a/lib/libfrr.c
+++ b/lib/libfrr.c
@@ -39,6 +39,7 @@
#include "db.h"
#include "northbound_cli.h"
#include "northbound_db.h"
+#include "debug.h"
DEFINE_HOOK(frr_late_init, (struct thread_master * tm), (tm))
DEFINE_KOOH(frr_early_fini, (), ())
@@ -80,8 +81,8 @@ static void opt_extend(const struct optspec *os)
{
const struct option *lo;
- strcat(comb_optstr, os->optstr);
- strcat(comb_helpstr, os->helpstr);
+ strlcat(comb_optstr, os->optstr, sizeof(comb_optstr));
+ strlcat(comb_helpstr, os->helpstr, sizeof(comb_helpstr));
for (lo = os->longopts; lo->name; lo++)
memcpy(comb_next_lo++, lo, sizeof(*lo));
}
@@ -654,6 +655,9 @@ struct thread_master *frr_init(void)
lib_error_init();
yang_init();
+
+ debug_init_cli();
+
nb_init(master, di->yang_modules, di->n_yang_modules);
if (nb_db_init() != NB_OK)
flog_warn(EC_LIB_NB_DATABASE,
diff --git a/lib/linklist.c b/lib/linklist.c
index 43bc709325..0d1efdf3aa 100644
--- a/lib/linklist.c
+++ b/lib/linklist.c
@@ -92,6 +92,46 @@ void listnode_add_head(struct list *list, void *val)
list->count++;
}
+bool listnode_add_sort_nodup(struct list *list, void *val)
+{
+ struct listnode *n;
+ struct listnode *new;
+ int ret;
+
+ assert(val != NULL);
+
+ if (list->cmp) {
+ for (n = list->head; n; n = n->next) {
+ ret = (*list->cmp)(val, n->data);
+ if (ret < 0) {
+ new = listnode_new();
+ new->data = val;
+
+ new->next = n;
+ new->prev = n->prev;
+
+ if (n->prev)
+ n->prev->next = new;
+ else
+ list->head = new;
+ n->prev = new;
+ list->count++;
+ return true;
+ }
+ /* found duplicate return false */
+ if (ret == 0)
+ return false;
+ }
+ }
+
+ new = listnode_new();
+ new->data = val;
+
+ LISTNODE_ATTACH(list, new);
+
+ return true;
+}
+
void listnode_add_sort(struct list *list, void *val)
{
struct listnode *n;
@@ -206,7 +246,7 @@ void listnode_move_to_tail(struct list *l, struct listnode *n)
LISTNODE_ATTACH(l, n);
}
-void listnode_delete(struct list *list, void *val)
+void listnode_delete(struct list *list, const void *val)
{
struct listnode *node = listnode_lookup(list, val);
@@ -242,6 +282,23 @@ void list_delete_all_node(struct list *list)
list->count = 0;
}
+void list_filter_out_nodes(struct list *list, bool (*cond)(void *data))
+{
+ struct listnode *node;
+ struct listnode *next;
+ void *data;
+
+ assert(list);
+
+ for (ALL_LIST_ELEMENTS(list, node, next, data)) {
+ if ((cond && cond(data)) || (!cond)) {
+ if (*list->del)
+ (*list->del)(data);
+ list_delete_node(list, node);
+ }
+ }
+}
+
void list_delete(struct list **list)
{
assert(*list);
@@ -250,7 +307,7 @@ void list_delete(struct list **list)
*list = NULL;
}
-struct listnode *listnode_lookup(struct list *list, void *data)
+struct listnode *listnode_lookup(struct list *list, const void *data)
{
struct listnode *node;
diff --git a/lib/linklist.h b/lib/linklist.h
index c2b289596d..d23d425d62 100644
--- a/lib/linklist.h
+++ b/lib/linklist.h
@@ -180,7 +180,7 @@ extern void listnode_move_to_tail(struct list *list, struct listnode *node);
* data
* data to insert into list
*/
-extern void listnode_delete(struct list *list, void *data);
+extern void listnode_delete(struct list *list, const void *data);
/*
* Find the listnode corresponding to an element in a list.
@@ -194,7 +194,7 @@ extern void listnode_delete(struct list *list, void *data);
* Returns:
* pointer to listnode storing the given data if found, NULL otherwise
*/
-extern struct listnode *listnode_lookup(struct list *list, void *data);
+extern struct listnode *listnode_lookup(struct list *list, const void *data);
/*
* Retrieve the element at the head of a list.
@@ -308,6 +308,39 @@ extern void list_delete_node(struct list *list, struct listnode *node);
*/
extern void list_add_list(struct list *list, struct list *add);
+/*
+ * Delete all nodes which satisfy a condition from a list.
+ * Deletes the node if cond function returns true for the node.
+ * If function ptr passed is NULL, it deletes all nodes
+ *
+ * list
+ * list to operate on
+ * cond
+ * function pointer which takes node data as input and return TRUE or FALSE
+ */
+
+extern void list_filter_out_nodes(struct list *list, bool (*cond)(void *data));
+
+/*
+ * Insert a new element into a list with insertion sort if there is no
+ * duplicate element present in the list. This assumes the input list is
+ * sorted. If unsorted, it will check for duplicate until it finds out
+ * the position to do insertion sort with the unsorted list.
+ *
+ * If list->cmp is set, this function is used to determine the position to
+ * insert the new element. If it is not set, this function is equivalent to
+ * listnode_add. duplicate element is determined by cmp function returning 0.
+ *
+ * Runtime is O(N).
+ *
+ * list
+ * list to operate on
+ *
+ * val
+ * element to add
+ */
+
+extern bool listnode_add_sort_nodup(struct list *list, void *val);
/* List iteration macro.
* Usage: for (ALL_LIST_ELEMENTS (...) { ... }
* It is safe to delete the listnode using this macro.
diff --git a/lib/log.c b/lib/log.c
index e64c00186b..5e3064a8d8 100644
--- a/lib/log.c
+++ b/lib/log.c
@@ -602,6 +602,8 @@ void zlog_backtrace_sigsafe(int priority, void *program_counter)
backtrace_symbols_fd(array, size, FD); \
}
#elif defined(HAVE_PRINTSTACK)
+ size = 0;
+
#define DUMP(FD) \
{ \
if (program_counter) \
diff --git a/lib/nexthop.c b/lib/nexthop.c
index 8e16e70590..57a2f1daaa 100644
--- a/lib/nexthop.c
+++ b/lib/nexthop.c
@@ -36,40 +36,132 @@
DEFINE_MTYPE_STATIC(LIB, NEXTHOP, "Nexthop")
DEFINE_MTYPE_STATIC(LIB, NH_LABEL, "Nexthop label")
-/* check if nexthops are same, non-recursive */
-int nexthop_same_no_recurse(const struct nexthop *next1,
- const struct nexthop *next2)
+static int _nexthop_labels_cmp(const struct nexthop *nh1,
+ const struct nexthop *nh2)
{
- if (next1->type != next2->type)
+ const struct mpls_label_stack *nhl1 = NULL;
+ const struct mpls_label_stack *nhl2 = NULL;
+
+ nhl1 = nh1->nh_label;
+ nhl2 = nh2->nh_label;
+
+ /* No labels is a match */
+ if (!nhl1 && !nhl2)
return 0;
- switch (next1->type) {
+ if (nhl1 && !nhl2)
+ return 1;
+
+ if (nhl2 && !nhl1)
+ return -1;
+
+ if (nhl1->num_labels > nhl2->num_labels)
+ return 1;
+
+ if (nhl1->num_labels < nhl2->num_labels)
+ return -1;
+
+ return memcmp(nhl1->label, nhl2->label, nhl1->num_labels);
+}
+
+static int _nexthop_g_addr_cmp(enum nexthop_types_t type,
+ const union g_addr *addr1,
+ const union g_addr *addr2)
+{
+ int ret = 0;
+
+ switch (type) {
case NEXTHOP_TYPE_IPV4:
case NEXTHOP_TYPE_IPV4_IFINDEX:
- if (!IPV4_ADDR_SAME(&next1->gate.ipv4, &next2->gate.ipv4))
- return 0;
- if (next1->ifindex && (next1->ifindex != next2->ifindex))
- return 0;
+ ret = IPV4_ADDR_CMP(&addr1->ipv4, &addr2->ipv4);
+ break;
+ case NEXTHOP_TYPE_IPV6:
+ case NEXTHOP_TYPE_IPV6_IFINDEX:
+ ret = IPV6_ADDR_CMP(&addr1->ipv6, &addr2->ipv6);
break;
case NEXTHOP_TYPE_IFINDEX:
- if (next1->ifindex != next2->ifindex)
- return 0;
+ case NEXTHOP_TYPE_BLACKHOLE:
+ /* No addr here */
break;
+ }
+
+ return ret;
+}
+
+static int _nexthop_gateway_cmp(const struct nexthop *nh1,
+ const struct nexthop *nh2)
+{
+ return _nexthop_g_addr_cmp(nh1->type, &nh1->gate, &nh2->gate);
+}
+
+static int _nexthop_source_cmp(const struct nexthop *nh1,
+ const struct nexthop *nh2)
+{
+ return _nexthop_g_addr_cmp(nh1->type, &nh1->src, &nh2->src);
+}
+
+static int _nexthop_cmp_no_labels(const struct nexthop *next1,
+ const struct nexthop *next2)
+{
+ int ret = 0;
+
+ if (next1->vrf_id < next2->vrf_id)
+ return -1;
+
+ if (next1->vrf_id > next2->vrf_id)
+ return 1;
+
+ if (next1->type < next2->type)
+ return -1;
+
+ if (next1->type > next2->type)
+ return 1;
+
+ switch (next1->type) {
+ case NEXTHOP_TYPE_IPV4:
case NEXTHOP_TYPE_IPV6:
- if (!IPV6_ADDR_SAME(&next1->gate.ipv6, &next2->gate.ipv6))
- return 0;
+ ret = _nexthop_gateway_cmp(next1, next2);
+ if (ret != 0)
+ return ret;
break;
+ case NEXTHOP_TYPE_IPV4_IFINDEX:
case NEXTHOP_TYPE_IPV6_IFINDEX:
- if (!IPV6_ADDR_SAME(&next1->gate.ipv6, &next2->gate.ipv6))
- return 0;
- if (next1->ifindex != next2->ifindex)
- return 0;
+ ret = _nexthop_gateway_cmp(next1, next2);
+ if (ret != 0)
+ return ret;
+ /* Intentional Fall-Through */
+ case NEXTHOP_TYPE_IFINDEX:
+ if (next1->ifindex < next2->ifindex)
+ return -1;
+
+ if (next1->ifindex > next2->ifindex)
+ return 1;
break;
- default:
- /* do nothing */
+ case NEXTHOP_TYPE_BLACKHOLE:
+ if (next1->bh_type < next2->bh_type)
+ return -1;
+
+ if (next1->bh_type > next2->bh_type)
+ return 1;
break;
}
- return 1;
+
+ ret = _nexthop_source_cmp(next1, next2);
+
+ return ret;
+}
+
+int nexthop_cmp(const struct nexthop *next1, const struct nexthop *next2)
+{
+ int ret = 0;
+
+ ret = _nexthop_cmp_no_labels(next1, next2);
+ if (ret != 0)
+ return ret;
+
+ ret = _nexthop_labels_cmp(next1, next2);
+
+ return ret;
}
int nexthop_same_firsthop(struct nexthop *next1, struct nexthop *next2)
@@ -121,27 +213,12 @@ const char *nexthop_type_to_str(enum nexthop_types_t nh_type)
/*
* Check if the labels match for the 2 nexthops specified.
*/
-int nexthop_labels_match(const struct nexthop *nh1, const struct nexthop *nh2)
+bool nexthop_labels_match(const struct nexthop *nh1, const struct nexthop *nh2)
{
- const struct mpls_label_stack *nhl1, *nhl2;
-
- nhl1 = nh1->nh_label;
- nhl2 = nh2->nh_label;
-
- /* No labels is a match */
- if (!nhl1 && !nhl2)
- return 1;
-
- if (!nhl1 || !nhl2)
- return 0;
-
- if (nhl1->num_labels != nhl2->num_labels)
- return 0;
-
- if (memcmp(nhl1->label, nhl2->label, nhl1->num_labels))
- return 0;
+ if (_nexthop_labels_cmp(nh1, nh2) != 0)
+ return false;
- return 1;
+ return true;
}
struct nexthop *nexthop_new(void)
@@ -180,45 +257,28 @@ bool nexthop_same(const struct nexthop *nh1, const struct nexthop *nh2)
if (nh1 == nh2)
return true;
- if (nh1->vrf_id != nh2->vrf_id)
+ if (nexthop_cmp(nh1, nh2) != 0)
return false;
- if (nh1->type != nh2->type)
+ return true;
+}
+
+bool nexthop_same_no_labels(const struct nexthop *nh1,
+ const struct nexthop *nh2)
+{
+ if (nh1 && !nh2)
return false;
- switch (nh1->type) {
- case NEXTHOP_TYPE_IFINDEX:
- if (nh1->ifindex != nh2->ifindex)
- return false;
- break;
- case NEXTHOP_TYPE_IPV4:
- if (nh1->gate.ipv4.s_addr != nh2->gate.ipv4.s_addr)
- return false;
- break;
- case NEXTHOP_TYPE_IPV4_IFINDEX:
- if (nh1->gate.ipv4.s_addr != nh2->gate.ipv4.s_addr)
- return false;
- if (nh1->ifindex != nh2->ifindex)
- return false;
- break;
- case NEXTHOP_TYPE_IPV6:
- if (memcmp(&nh1->gate.ipv6, &nh2->gate.ipv6, 16))
- return false;
- break;
- case NEXTHOP_TYPE_IPV6_IFINDEX:
- if (memcmp(&nh1->gate.ipv6, &nh2->gate.ipv6, 16))
- return false;
- if (nh1->ifindex != nh2->ifindex)
- return false;
- break;
- case NEXTHOP_TYPE_BLACKHOLE:
- if (nh1->bh_type != nh2->bh_type)
- return false;
- break;
- }
+ if (!nh1 && nh2)
+ return false;
+
+ if (nh1 == nh2)
+ return true;
+
+ if (_nexthop_cmp_no_labels(nh1, nh2) != 0)
+ return false;
- /* Compare labels too (if present) */
- return (!!nexthop_labels_match(nh1, nh2));
+ return true;
}
/* Update nexthop with label information. */
diff --git a/lib/nexthop.h b/lib/nexthop.h
index 663acaeb69..5b6c12d4ef 100644
--- a/lib/nexthop.h
+++ b/lib/nexthop.h
@@ -139,12 +139,13 @@ void nexthop_del_labels(struct nexthop *);
uint32_t nexthop_hash(const struct nexthop *nexthop);
extern bool nexthop_same(const struct nexthop *nh1, const struct nexthop *nh2);
+extern bool nexthop_same_no_labels(const struct nexthop *nh1,
+ const struct nexthop *nh2);
+extern int nexthop_cmp(const struct nexthop *nh1, const struct nexthop *nh2);
extern const char *nexthop_type_to_str(enum nexthop_types_t nh_type);
-extern int nexthop_same_no_recurse(const struct nexthop *next1,
- const struct nexthop *next2);
-extern int nexthop_labels_match(const struct nexthop *nh1,
- const struct nexthop *nh2);
+extern bool nexthop_labels_match(const struct nexthop *nh1,
+ const struct nexthop *nh2);
extern int nexthop_same_firsthop(struct nexthop *next1, struct nexthop *next2);
extern const char *nexthop2str(const struct nexthop *nexthop,
diff --git a/lib/northbound_cli.c b/lib/northbound_cli.c
index ae1b0578a0..7b7b526af0 100644
--- a/lib/northbound_cli.c
+++ b/lib/northbound_cli.c
@@ -1722,8 +1722,8 @@ void nb_cli_init(struct thread_master *tm)
/* Initialize the shared candidate configuration. */
vty_shared_candidate_config = nb_config_new(NULL);
- /* Install debug commands */
debug_init(&nb_dbg_cbs);
+
install_node(&nb_debug_node, nb_debug_config_write);
install_element(ENABLE_NODE, &debug_nb_cmd);
install_element(CONFIG_NODE, &debug_nb_cmd);
diff --git a/lib/plist.c b/lib/plist.c
index 2a97e1e5b2..9957ff1f51 100644
--- a/lib/plist.c
+++ b/lib/plist.c
@@ -998,22 +998,36 @@ static int vty_prefix_list_uninstall(struct vty *vty, afi_t afi,
return CMD_SUCCESS;
}
- /* We must have, at a minimum, both the type and prefix here */
- if ((typestr == NULL) || (prefix == NULL)) {
- vty_out(vty, "%% Both prefix and type required\n");
- return CMD_WARNING_CONFIG_FAILED;
- }
-
/* Check sequence number. */
if (seq)
seqnum = (int64_t)atol(seq);
+ /* Sequence number specified, but nothing else. */
+ if (seq && typestr == NULL && prefix == NULL && ge == NULL
+ && le == NULL) {
+ pentry = prefix_seq_check(plist, seqnum);
+
+ if (pentry == NULL) {
+ vty_out(vty,
+ "%% Can't find prefix-list %s with sequence number %lu\n",
+ name, seqnum);
+ return CMD_WARNING_CONFIG_FAILED;
+ }
+
+ prefix_list_entry_delete(plist, pentry, 1);
+ return CMD_SUCCESS;
+ }
+
/* ge and le number */
if (ge)
genum = atoi(ge);
if (le)
lenum = atoi(le);
+ /* We must have, at a minimum, both the type and prefix here */
+ if ((typestr == NULL) || (prefix == NULL))
+ return CMD_WARNING_CONFIG_FAILED;
+
/* Check of filter type. */
if (strncmp("permit", typestr, 1) == 0)
type = PREFIX_PERMIT;
@@ -1375,6 +1389,17 @@ DEFPY (no_ip_prefix_list,
action, dest, ge_str, le_str);
}
+DEFPY(no_ip_prefix_list_seq, no_ip_prefix_list_seq_cmd,
+ "no ip prefix-list WORD seq (1-4294967295)",
+ NO_STR IP_STR PREFIX_LIST_STR
+ "Name of a prefix list\n"
+ "sequence number of an entry\n"
+ "Sequence number\n")
+{
+ return vty_prefix_list_uninstall(vty, AFI_IP, prefix_list, seq_str,
+ NULL, NULL, NULL, NULL);
+}
+
DEFPY (no_ip_prefix_list_all,
no_ip_prefix_list_all_cmd,
"no ip prefix-list WORD",
@@ -2059,6 +2084,7 @@ static void prefix_list_init_ipv4(void)
install_element(CONFIG_NODE, &ip_prefix_list_cmd);
install_element(CONFIG_NODE, &no_ip_prefix_list_cmd);
+ install_element(CONFIG_NODE, &no_ip_prefix_list_seq_cmd);
install_element(CONFIG_NODE, &no_ip_prefix_list_all_cmd);
install_element(CONFIG_NODE, &ip_prefix_list_description_cmd);
diff --git a/lib/prefix.c b/lib/prefix.c
index d2a4c3a432..42d202ddbc 100644
--- a/lib/prefix.c
+++ b/lib/prefix.c
@@ -1365,7 +1365,7 @@ void prefix_mcast_inet4_dump(const char *onfail, struct in_addr addr,
int save_errno = errno;
if (addr.s_addr == INADDR_ANY)
- strcpy(buf, "*");
+ strlcpy(buf, "*", buf_size);
else {
if (!inet_ntop(AF_INET, &addr, buf, buf_size)) {
if (onfail)
diff --git a/lib/table.h b/lib/table.h
index 14be7ab656..eefd992546 100644
--- a/lib/table.h
+++ b/lib/table.h
@@ -298,6 +298,8 @@ static inline struct route_node *route_table_iter_next(route_table_iter_t *iter)
return NULL;
default:
+ /* Suppress uninitialized variable warning */
+ node = NULL;
assert(0);
}
diff --git a/lib/vty.c b/lib/vty.c
index 0ee9b78b91..2d97cca351 100644
--- a/lib/vty.c
+++ b/lib/vty.c
@@ -84,7 +84,7 @@ static char *vty_ipv6_accesslist_name = NULL;
static vector Vvty_serv_thread;
/* Current directory. */
-char *vty_cwd = NULL;
+char vty_cwd[MAXPATHLEN];
/* Login password check. */
static int no_password_check = 0;
@@ -998,7 +998,7 @@ static void vty_describe_fold(struct vty *vty, int cmd_width,
if (pos == 0)
break;
- strncpy(buf, p, pos);
+ memcpy(buf, p, pos);
buf[pos] = '\0';
vty_out(vty, " %-*s %s\n", cmd_width, cmd, buf);
@@ -1659,7 +1659,7 @@ static struct vty *vty_create(int vty_sock, union sockunion *su)
/* configurable parameters not part of basic init */
vty->v_timeout = vty_timeout_val;
- strcpy(vty->address, buf);
+ strlcpy(vty->address, buf, sizeof(vty->address));
if (no_password_check) {
if (host.advanced)
vty->node = ENABLE_NODE;
@@ -1795,7 +1795,7 @@ struct vty *vty_stdio(void (*atclose)(int isexit))
*/
vty->node = ENABLE_NODE;
vty->v_timeout = 0;
- strcpy(vty->address, "console");
+ strlcpy(vty->address, "console", sizeof(vty->address));
vty_stdio_resume();
return vty;
@@ -2384,9 +2384,10 @@ static FILE *vty_use_backup_config(const char *fullpath)
int c;
char buffer[512];
- fullpath_sav = malloc(strlen(fullpath) + strlen(CONF_BACKUP_EXT) + 1);
- strcpy(fullpath_sav, fullpath);
- strcat(fullpath_sav, CONF_BACKUP_EXT);
+ size_t fullpath_sav_sz = strlen(fullpath) + strlen(CONF_BACKUP_EXT) + 1;
+ fullpath_sav = malloc(fullpath_sav_sz);
+ strlcpy(fullpath_sav, fullpath, fullpath_sav_sz);
+ strlcat(fullpath_sav, CONF_BACKUP_EXT, fullpath_sav_sz);
sav = open(fullpath_sav, O_RDONLY);
if (sav < 0) {
@@ -3055,10 +3056,9 @@ void vty_reset(void)
static void vty_save_cwd(void)
{
- char cwd[MAXPATHLEN];
char *c;
- c = getcwd(cwd, MAXPATHLEN);
+ c = getcwd(vty_cwd, sizeof(vty_cwd));
if (!c) {
/*
@@ -3072,15 +3072,12 @@ static void vty_save_cwd(void)
SYSCONFDIR, errno);
exit(-1);
}
- if (getcwd(cwd, MAXPATHLEN) == NULL) {
+ if (getcwd(vty_cwd, sizeof(vty_cwd)) == NULL) {
flog_err_sys(EC_LIB_SYSTEM_CALL,
"Failure to getcwd, errno: %d", errno);
exit(-1);
}
}
-
- vty_cwd = XMALLOC(MTYPE_TMP, strlen(cwd) + 1);
- strcpy(vty_cwd, cwd);
}
char *vty_get_cwd(void)
@@ -3146,7 +3143,7 @@ void vty_init(struct thread_master *master_thread)
void vty_terminate(void)
{
- XFREE(MTYPE_TMP, vty_cwd);
+ memset(vty_cwd, 0x00, sizeof(vty_cwd));
if (vtyvec && Vvty_serv_thread) {
vty_reset();
diff --git a/ospfd/ospf_te.c b/ospfd/ospf_te.c
index 1488aa88cd..e683636639 100644
--- a/ospfd/ospf_te.c
+++ b/ospfd/ospf_te.c
@@ -2535,7 +2535,7 @@ DEFUN (show_ip_ospf_mpls_te_link,
struct interface *ifp = NULL;
struct listnode *node;
char *vrf_name = NULL;
- bool all_vrf;
+ bool all_vrf = false;
int inst = 0;
int idx_vrf = 0;
struct ospf *ospf = NULL;
diff --git a/pbrd/pbr_nht.c b/pbrd/pbr_nht.c
index 52506542bc..fc78b8ed1f 100644
--- a/pbrd/pbr_nht.c
+++ b/pbrd/pbr_nht.c
@@ -191,7 +191,7 @@ static void *pbr_nhgc_alloc(void *p)
new = XCALLOC(MTYPE_PBR_NHG, sizeof(*new));
- strcpy(new->name, pnhgc->name);
+ strlcpy(new->name, pnhgc->name, sizeof(pnhgc->name));
new->table_id = pbr_nht_get_next_tableid(false);
DEBUGD(&pbr_dbg_nht, "%s: NHT: %s assigned Table ID: %u",
diff --git a/pimd/pim_assert.c b/pimd/pim_assert.c
index 0a450834e3..438a0c9b64 100644
--- a/pimd/pim_assert.c
+++ b/pimd/pim_assert.c
@@ -418,7 +418,7 @@ int pim_assert_build_msg(uint8_t *pim_msg, int buf_size, struct interface *ifp,
Add PIM header
*/
pim_msg_size = pim_msg_curr - pim_msg;
- pim_msg_build_header(pim_msg, pim_msg_size, PIM_MSG_TYPE_ASSERT);
+ pim_msg_build_header(pim_msg, pim_msg_size, PIM_MSG_TYPE_ASSERT, false);
return pim_msg_size;
}
diff --git a/pimd/pim_bsm.c b/pimd/pim_bsm.c
new file mode 100644
index 0000000000..4ba8d08fe3
--- /dev/null
+++ b/pimd/pim_bsm.c
@@ -0,0 +1,1404 @@
+/*
+ * pim_bsm.c: PIM BSM handling routines
+ *
+ * Copyright (C) 2018-19 Vmware, Inc.
+ * Saravanan K
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING; if not, write to the
+ * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston,
+ * MA 02110-1301 USA
+ */
+#include "if.h"
+#include "pimd.h"
+#include "pim_iface.h"
+#include "pim_instance.h"
+#include "pim_rpf.h"
+#include "pim_hello.h"
+#include "pim_pim.h"
+#include "pim_nht.h"
+#include "pim_bsm.h"
+#include "pim_time.h"
+
+/* Functions forward declaration */
+static void pim_bs_timer_start(struct bsm_scope *scope, int bs_timeout);
+static void pim_g2rp_timer_start(struct bsm_rpinfo *bsrp, int hold_time);
+static inline void pim_g2rp_timer_restart(struct bsm_rpinfo *bsrp,
+ int hold_time);
+
+/* Memory Types */
+DEFINE_MTYPE_STATIC(PIMD, PIM_BSGRP_NODE, "PIM BSR advertised grp info")
+DEFINE_MTYPE_STATIC(PIMD, PIM_BSRP_NODE, "PIM BSR advertised RP info")
+DEFINE_MTYPE_STATIC(PIMD, PIM_BSM_INFO, "PIM BSM Info")
+DEFINE_MTYPE_STATIC(PIMD, PIM_BSM_PKT_VAR_MEM, "PIM BSM Packet")
+
+/* All bsm packets forwarded shall be fit within ip mtu less iphdr(max) */
+#define MAX_IP_HDR_LEN 24
+
+/* pim_bsm_write_config - Write the interface pim bsm configuration.*/
+void pim_bsm_write_config(struct vty *vty, struct interface *ifp)
+{
+ struct pim_interface *pim_ifp = ifp->info;
+
+ if (pim_ifp) {
+ if (!pim_ifp->bsm_enable)
+ vty_out(vty, " no ip pim bsm\n");
+ if (!pim_ifp->ucast_bsm_accept)
+ vty_out(vty, " no ip pim unicast-bsm\n");
+ }
+}
+
+static void pim_free_bsgrp_data(struct bsgrp_node *bsgrp_node)
+{
+ if (bsgrp_node->bsrp_list)
+ list_delete(&bsgrp_node->bsrp_list);
+ if (bsgrp_node->partial_bsrp_list)
+ list_delete(&bsgrp_node->partial_bsrp_list);
+ XFREE(MTYPE_PIM_BSGRP_NODE, bsgrp_node);
+}
+
+static void pim_free_bsgrp_node(struct route_table *rt, struct prefix *grp)
+{
+ struct route_node *rn;
+
+ rn = route_node_lookup(rt, grp);
+ if (rn) {
+ rn->info = NULL;
+ route_unlock_node(rn);
+ route_unlock_node(rn);
+ }
+}
+
+static void pim_bsm_node_free(struct bsm_info *bsm)
+{
+ if (bsm->bsm)
+ XFREE(MTYPE_PIM_BSM_PKT_VAR_MEM, bsm->bsm);
+ XFREE(MTYPE_PIM_BSM_INFO, bsm);
+}
+
+static int pim_g2rp_list_compare(struct bsm_rpinfo *node1,
+ struct bsm_rpinfo *node2)
+{
+ /* RP election Algo :
+ * Step-1 : Loweset Rp priority will have higher precedance.
+ * Step-2 : If priority same then higher hash val will have
+ * higher precedance.
+ * Step-3 : If Hash val is same then highest rp address will
+ * become elected RP.
+ */
+ if (node1->rp_prio < node2->rp_prio)
+ return -1;
+ if (node1->rp_prio > node2->rp_prio)
+ return 1;
+ if (node1->hash < node2->hash)
+ return 1;
+ if (node1->hash > node2->hash)
+ return -1;
+ if (node1->rp_address.s_addr < node2->rp_address.s_addr)
+ return 1;
+ if (node1->rp_address.s_addr > node2->rp_address.s_addr)
+ return -1;
+ return 0;
+}
+
+static void pim_free_bsrp_node(struct bsm_rpinfo *bsrp_info)
+{
+ if (bsrp_info->g2rp_timer)
+ THREAD_OFF(bsrp_info->g2rp_timer);
+ XFREE(MTYPE_PIM_BSRP_NODE, bsrp_info);
+}
+
+static struct list *pim_alloc_bsrp_list(void)
+{
+ struct list *new_list = NULL;
+
+ new_list = list_new();
+
+ if (!new_list)
+ return NULL;
+
+ new_list->cmp = (int (*)(void *, void *))pim_g2rp_list_compare;
+ new_list->del = (void (*)(void *))pim_free_bsrp_node;
+
+ return new_list;
+}
+
+static struct bsgrp_node *pim_bsm_new_bsgrp_node(struct route_table *rt,
+ struct prefix *grp)
+{
+ struct route_node *rn;
+ struct bsgrp_node *bsgrp;
+
+ rn = route_node_get(rt, grp);
+ if (!rn) {
+ zlog_warn("%s: route node creation failed",
+ __PRETTY_FUNCTION__);
+ return NULL;
+ }
+ bsgrp = XCALLOC(MTYPE_PIM_BSGRP_NODE, sizeof(struct bsgrp_node));
+
+ if (!bsgrp) {
+ if (PIM_DEBUG_BSM)
+ zlog_debug("%s: bsgrp alloc failed",
+ __PRETTY_FUNCTION__);
+ route_unlock_node(rn);
+ return NULL;
+ }
+
+ rn->info = bsgrp;
+ bsgrp->bsrp_list = pim_alloc_bsrp_list();
+ bsgrp->partial_bsrp_list = pim_alloc_bsrp_list();
+
+ if ((!bsgrp->bsrp_list) || (!bsgrp->partial_bsrp_list)) {
+ route_unlock_node(rn);
+ pim_free_bsgrp_data(bsgrp);
+ return NULL;
+ }
+
+ prefix_copy(&bsgrp->group, grp);
+ return bsgrp;
+}
+
+static int pim_on_bs_timer(struct thread *t)
+{
+ struct route_node *rn;
+ struct bsm_scope *scope;
+ struct bsgrp_node *bsgrp_node;
+ struct bsm_rpinfo *bsrp;
+ struct prefix nht_p;
+ char buf[PREFIX2STR_BUFFER];
+ bool is_bsr_tracking = true;
+
+ scope = THREAD_ARG(t);
+ THREAD_OFF(scope->bs_timer);
+
+ if (PIM_DEBUG_BSM)
+ zlog_debug("%s: Bootstrap Timer expired for scope: %d",
+ __PRETTY_FUNCTION__, scope->sz_id);
+
+ /* Remove next hop tracking for the bsr */
+ nht_p.family = AF_INET;
+ nht_p.prefixlen = IPV4_MAX_BITLEN;
+ nht_p.u.prefix4 = scope->current_bsr;
+ if (PIM_DEBUG_BSM) {
+ prefix2str(&nht_p, buf, sizeof(buf));
+ zlog_debug("%s: Deregister BSR addr %s with Zebra NHT",
+ __PRETTY_FUNCTION__, buf);
+ }
+ pim_delete_tracked_nexthop(scope->pim, &nht_p, NULL, NULL,
+ is_bsr_tracking);
+
+ /* Reset scope zone data */
+ scope->accept_nofwd_bsm = false;
+ scope->state = ACCEPT_ANY;
+ scope->current_bsr.s_addr = INADDR_ANY;
+ scope->current_bsr_prio = 0;
+ scope->current_bsr_first_ts = 0;
+ scope->current_bsr_last_ts = 0;
+ scope->bsm_frag_tag = 0;
+ list_delete_all_node(scope->bsm_list);
+
+ for (rn = route_top(scope->bsrp_table); rn; rn = route_next(rn)) {
+
+ bsgrp_node = (struct bsgrp_node *)rn->info;
+ if (!bsgrp_node) {
+ if (PIM_DEBUG_BSM)
+ zlog_debug("%s: bsgrp_node is null",
+ __PRETTY_FUNCTION__);
+ continue;
+ }
+ /* Give grace time for rp to continue for another hold time */
+ if ((bsgrp_node->bsrp_list) && (bsgrp_node->bsrp_list->count)) {
+ bsrp = listnode_head(bsgrp_node->bsrp_list);
+ pim_g2rp_timer_restart(bsrp, bsrp->rp_holdtime);
+ }
+ /* clear pending list */
+ if ((bsgrp_node->partial_bsrp_list)
+ && (bsgrp_node->partial_bsrp_list->count)) {
+ list_delete_all_node(bsgrp_node->partial_bsrp_list);
+ bsgrp_node->pend_rp_cnt = 0;
+ }
+ }
+ return 0;
+}
+
+static void pim_bs_timer_stop(struct bsm_scope *scope)
+{
+ if (PIM_DEBUG_BSM)
+ zlog_debug("%s : BS timer being stopped of sz: %d",
+ __PRETTY_FUNCTION__, scope->sz_id);
+ THREAD_OFF(scope->bs_timer);
+}
+
+static void pim_bs_timer_start(struct bsm_scope *scope, int bs_timeout)
+{
+ if (!scope) {
+ if (PIM_DEBUG_BSM)
+ zlog_debug("%s : Invalid scope(NULL).",
+ __PRETTY_FUNCTION__);
+ return;
+ }
+ THREAD_OFF(scope->bs_timer);
+ if (PIM_DEBUG_BSM)
+ zlog_debug("%s : starting bs timer for scope %d with timeout %d secs",
+ __PRETTY_FUNCTION__, scope->sz_id, bs_timeout);
+ thread_add_timer(router->master, pim_on_bs_timer, scope, bs_timeout,
+ &scope->bs_timer);
+}
+
+static inline void pim_bs_timer_restart(struct bsm_scope *scope, int bs_timeout)
+{
+ pim_bs_timer_start(scope, bs_timeout);
+}
+
+void pim_bsm_proc_init(struct pim_instance *pim)
+{
+ memset(&pim->global_scope, 0, sizeof(struct bsm_scope));
+
+ pim->global_scope.sz_id = PIM_GBL_SZ_ID;
+ pim->global_scope.bsrp_table = route_table_init();
+ pim->global_scope.accept_nofwd_bsm = true;
+ pim->global_scope.state = NO_INFO;
+ pim->global_scope.pim = pim;
+ pim->global_scope.bsm_list = list_new();
+ pim->global_scope.bsm_list->del = (void (*)(void *))pim_bsm_node_free;
+ pim_bs_timer_start(&pim->global_scope, PIM_BS_TIME);
+}
+
+void pim_bsm_proc_free(struct pim_instance *pim)
+{
+ struct route_node *rn;
+ struct bsgrp_node *bsgrp;
+
+ pim_bs_timer_stop(&pim->global_scope);
+
+ if (pim->global_scope.bsm_list)
+ list_delete(&pim->global_scope.bsm_list);
+
+ for (rn = route_top(pim->global_scope.bsrp_table); rn;
+ rn = route_next(rn)) {
+ bsgrp = rn->info;
+ if (!bsgrp)
+ continue;
+ pim_free_bsgrp_data(bsgrp);
+ }
+
+ if (pim->global_scope.bsrp_table)
+ route_table_finish(pim->global_scope.bsrp_table);
+}
+
+static bool is_hold_time_elapsed(void *data)
+{
+ struct bsm_rpinfo *bsrp;
+
+ bsrp = data;
+
+ if (bsrp->elapse_time < bsrp->rp_holdtime)
+ return false;
+ else
+ return true;
+}
+
+static int pim_on_g2rp_timer(struct thread *t)
+{
+ struct bsm_rpinfo *bsrp;
+ struct bsm_rpinfo *bsrp_node;
+ struct bsgrp_node *bsgrp_node;
+ struct listnode *bsrp_ln;
+ struct pim_instance *pim;
+ struct rp_info *rp_info;
+ struct route_node *rn;
+ uint16_t elapse;
+ struct in_addr bsrp_addr;
+
+ bsrp = THREAD_ARG(t);
+ THREAD_OFF(bsrp->g2rp_timer);
+ bsgrp_node = bsrp->bsgrp_node;
+
+ /* elapse time is the hold time of expired node */
+ elapse = bsrp->rp_holdtime;
+ bsrp_addr = bsrp->rp_address;
+
+ /* update elapse for all bsrp nodes */
+ for (ALL_LIST_ELEMENTS_RO(bsgrp_node->bsrp_list, bsrp_ln, bsrp_node))
+ bsrp_node->elapse_time += elapse;
+
+ /* remove the expired nodes from the list */
+ list_filter_out_nodes(bsgrp_node->bsrp_list, is_hold_time_elapsed);
+
+ /* Get the next elected rp node */
+ bsrp = listnode_head(bsgrp_node->bsrp_list);
+ pim = bsgrp_node->scope->pim;
+ rn = route_node_lookup(pim->rp_table, &bsgrp_node->group);
+
+ if (!rn) {
+ zlog_warn("%s: Route node doesn't exist", __PRETTY_FUNCTION__);
+ return 0;
+ }
+
+ rp_info = (struct rp_info *)rn->info;
+
+ if (!rp_info) {
+ route_unlock_node(rn);
+ return 0;
+ }
+
+ if (rp_info->rp_src != RP_SRC_STATIC) {
+ /* If new rp available, change it else delete the existing */
+ if (bsrp) {
+ bsrp_addr = bsrp->rp_address;
+ pim_g2rp_timer_start(
+ bsrp, (bsrp->rp_holdtime - bsrp->elapse_time));
+ pim_rp_change(pim, bsrp_addr, bsgrp_node->group,
+ RP_SRC_BSR);
+ } else {
+ pim_rp_del(pim, bsrp_addr, bsgrp_node->group, NULL,
+ RP_SRC_BSR);
+ }
+ }
+
+ if ((!bsgrp_node->bsrp_list->count)
+ && (!bsgrp_node->partial_bsrp_list->count)) {
+ pim_free_bsgrp_node(pim->global_scope.bsrp_table,
+ &bsgrp_node->group);
+ pim_free_bsgrp_data(bsgrp_node);
+ }
+
+ return 0;
+}
+
+static void pim_g2rp_timer_start(struct bsm_rpinfo *bsrp, int hold_time)
+{
+ if (!bsrp) {
+ if (PIM_DEBUG_BSM)
+ zlog_debug("%s : Invalid brsp(NULL).",
+ __PRETTY_FUNCTION__);
+ return;
+ }
+ THREAD_OFF(bsrp->g2rp_timer);
+ if (PIM_DEBUG_BSM) {
+ char buf[48];
+
+ zlog_debug(
+ "%s : starting g2rp timer for grp: %s - rp: %s with timeout %d secs(Actual Hold time : %d secs)",
+ __PRETTY_FUNCTION__,
+ prefix2str(&bsrp->bsgrp_node->group, buf, 48),
+ inet_ntoa(bsrp->rp_address), hold_time,
+ bsrp->rp_holdtime);
+ }
+
+ thread_add_timer(router->master, pim_on_g2rp_timer, bsrp, hold_time,
+ &bsrp->g2rp_timer);
+}
+
+static inline void pim_g2rp_timer_restart(struct bsm_rpinfo *bsrp,
+ int hold_time)
+{
+ pim_g2rp_timer_start(bsrp, hold_time);
+}
+
+static void pim_g2rp_timer_stop(struct bsm_rpinfo *bsrp)
+{
+ if (!bsrp)
+ return;
+
+ if (PIM_DEBUG_BSM) {
+ char buf[48];
+
+ zlog_debug("%s : stopping g2rp timer for grp: %s - rp: %s",
+ __PRETTY_FUNCTION__,
+ prefix2str(&bsrp->bsgrp_node->group, buf, 48),
+ inet_ntoa(bsrp->rp_address));
+ }
+
+ THREAD_OFF(bsrp->g2rp_timer);
+}
+
+static bool is_hold_time_zero(void *data)
+{
+ struct bsm_rpinfo *bsrp;
+
+ bsrp = data;
+
+ if (bsrp->rp_holdtime)
+ return false;
+ else
+ return true;
+}
+
+static void pim_instate_pend_list(struct bsgrp_node *bsgrp_node)
+{
+ struct bsm_rpinfo *active;
+ struct bsm_rpinfo *pend;
+ struct list *temp;
+ struct rp_info *rp_info;
+ struct route_node *rn;
+ struct pim_instance *pim;
+ struct rp_info *rp_all;
+ struct prefix group_all;
+ bool had_rp_node = true;
+
+ pim = bsgrp_node->scope->pim;
+ active = listnode_head(bsgrp_node->bsrp_list);
+
+ /* Remove nodes with hold time 0 & check if list still has a head */
+ list_filter_out_nodes(bsgrp_node->partial_bsrp_list, is_hold_time_zero);
+ pend = listnode_head(bsgrp_node->partial_bsrp_list);
+
+ if (!str2prefix("224.0.0.0/4", &group_all))
+ return;
+
+ rp_all = pim_rp_find_match_group(pim, &group_all);
+ rn = route_node_lookup(pim->rp_table, &bsgrp_node->group);
+
+ if (pend)
+ pim_g2rp_timer_start(pend, pend->rp_holdtime);
+
+ /* if rp node doesn't exist or exist but not configured(rp_all),
+ * install the rp from head(if exists) of partial list. List is
+ * is sorted such that head is the elected RP for the group.
+ */
+ if (!rn || (prefix_same(&rp_all->group, &bsgrp_node->group)
+ && pim_rpf_addr_is_inaddr_none(&rp_all->rp))) {
+ if (PIM_DEBUG_BSM)
+ zlog_debug("%s: Route node doesn't exist",
+ __PRETTY_FUNCTION__);
+ if (pend)
+ pim_rp_new(pim, pend->rp_address, bsgrp_node->group,
+ NULL, RP_SRC_BSR);
+ had_rp_node = false;
+ } else {
+ rp_info = (struct rp_info *)rn->info;
+ if (!rp_info) {
+ route_unlock_node(rn);
+ if (pend)
+ pim_rp_new(pim, pend->rp_address,
+ bsgrp_node->group, NULL, RP_SRC_BSR);
+ had_rp_node = false;
+ }
+ }
+
+ /* We didn't have rp node and pending list is empty(unlikely), cleanup*/
+ if ((!had_rp_node) && (!pend)) {
+ pim_free_bsgrp_node(bsgrp_node->scope->bsrp_table,
+ &bsgrp_node->group);
+ pim_free_bsgrp_data(bsgrp_node);
+ return;
+ }
+
+ if ((had_rp_node) && (rp_info->rp_src != RP_SRC_STATIC)) {
+ /* This means we searched and got rp node, needs unlock */
+ route_unlock_node(rn);
+
+ if (active && pend) {
+ if ((active->rp_address.s_addr
+ != pend->rp_address.s_addr))
+ pim_rp_change(pim, pend->rp_address,
+ bsgrp_node->group, RP_SRC_BSR);
+ }
+
+ /* Possible when the first BSM has group with 0 rp count */
+ if ((!active) && (!pend)) {
+ if (PIM_DEBUG_BSM) {
+ zlog_debug(
+ "%s: Both bsrp and partial list are empty",
+ __PRETTY_FUNCTION__);
+ }
+ pim_free_bsgrp_node(bsgrp_node->scope->bsrp_table,
+ &bsgrp_node->group);
+ pim_free_bsgrp_data(bsgrp_node);
+ return;
+ }
+
+ /* Possible when a group with 0 rp count received in BSM */
+ if ((active) && (!pend)) {
+ pim_rp_del(pim, active->rp_address, bsgrp_node->group,
+ NULL, RP_SRC_BSR);
+ pim_free_bsgrp_node(bsgrp_node->scope->bsrp_table,
+ &bsgrp_node->group);
+ if (PIM_DEBUG_BSM) {
+ zlog_debug("%s:Pend List is null,del grp node",
+ __PRETTY_FUNCTION__);
+ }
+ pim_free_bsgrp_data(bsgrp_node);
+ return;
+ }
+ }
+
+ if ((had_rp_node) && (rp_info->rp_src == RP_SRC_STATIC)) {
+ /* We need to unlock rn this case */
+ route_unlock_node(rn);
+ /* there is a chance that static rp exist and bsrp cleaned
+ * so clean bsgrp node if pending list empty
+ */
+ if (!pend) {
+ if (PIM_DEBUG_BSM)
+ zlog_debug(
+ "%s: Partial list is empty, static rp exists",
+ __PRETTY_FUNCTION__);
+ pim_free_bsgrp_node(bsgrp_node->scope->bsrp_table,
+ &bsgrp_node->group);
+ pim_free_bsgrp_data(bsgrp_node);
+ return;
+ }
+ }
+
+ /* swap the list & delete all nodes in partial list (old bsrp_list)
+ * before swap
+ * active is head of bsrp list
+ * pend is head of partial list
+ * After swap
+ * active is head of partial list
+ * pend is head of bsrp list
+ * So check appriate head after swap and clean the new partial list
+ */
+ temp = bsgrp_node->bsrp_list;
+ bsgrp_node->bsrp_list = bsgrp_node->partial_bsrp_list;
+ bsgrp_node->partial_bsrp_list = temp;
+
+ if (active) {
+ pim_g2rp_timer_stop(active);
+ list_delete_all_node(bsgrp_node->partial_bsrp_list);
+ }
+}
+
+static bool pim_bsr_rpf_check(struct pim_instance *pim, struct in_addr bsr,
+ struct in_addr ip_src_addr)
+{
+ struct pim_nexthop nexthop;
+ int result;
+
+ memset(&nexthop, 0, sizeof(nexthop));
+
+ /* New BSR recived */
+ if (bsr.s_addr != pim->global_scope.current_bsr.s_addr) {
+ result = pim_nexthop_match(pim, bsr, ip_src_addr);
+
+ /* Nexthop lookup pass for the new BSR address */
+ if (result)
+ return true;
+
+ if (PIM_DEBUG_BSM) {
+ char bsr_str[INET_ADDRSTRLEN];
+
+ pim_inet4_dump("<bsr?>", bsr, bsr_str, sizeof(bsr_str));
+ zlog_debug("%s : No route to BSR address %s",
+ __PRETTY_FUNCTION__, bsr_str);
+ }
+ return false;
+ }
+
+ return pim_nexthop_match_nht_cache(pim, bsr, ip_src_addr);
+}
+
+static bool is_preferred_bsr(struct pim_instance *pim, struct in_addr bsr,
+ uint32_t bsr_prio)
+{
+ if (bsr.s_addr == pim->global_scope.current_bsr.s_addr)
+ return true;
+
+ if (bsr_prio > pim->global_scope.current_bsr_prio)
+ return true;
+
+ else if (bsr_prio == pim->global_scope.current_bsr_prio) {
+ if (bsr.s_addr >= pim->global_scope.current_bsr.s_addr)
+ return true;
+ else
+ return false;
+ } else
+ return false;
+}
+
+static void pim_bsm_update(struct pim_instance *pim, struct in_addr bsr,
+ uint32_t bsr_prio)
+{
+ struct pim_nexthop_cache pnc;
+
+ if (bsr.s_addr != pim->global_scope.current_bsr.s_addr) {
+ struct prefix nht_p;
+ char buf[PREFIX2STR_BUFFER];
+ bool is_bsr_tracking = true;
+
+ /* De-register old BSR and register new BSR with Zebra NHT */
+ nht_p.family = AF_INET;
+ nht_p.prefixlen = IPV4_MAX_BITLEN;
+
+ if (pim->global_scope.current_bsr.s_addr != INADDR_ANY) {
+ nht_p.u.prefix4 = pim->global_scope.current_bsr;
+ if (PIM_DEBUG_BSM) {
+ prefix2str(&nht_p, buf, sizeof(buf));
+ zlog_debug(
+ "%s: Deregister BSR addr %s with Zebra NHT",
+ __PRETTY_FUNCTION__, buf);
+ }
+ pim_delete_tracked_nexthop(pim, &nht_p, NULL, NULL,
+ is_bsr_tracking);
+ }
+
+ nht_p.u.prefix4 = bsr;
+ if (PIM_DEBUG_BSM) {
+ prefix2str(&nht_p, buf, sizeof(buf));
+ zlog_debug(
+ "%s: NHT Register BSR addr %s with Zebra NHT",
+ __PRETTY_FUNCTION__, buf);
+ }
+
+ memset(&pnc, 0, sizeof(struct pim_nexthop_cache));
+ pim_find_or_track_nexthop(pim, &nht_p, NULL, NULL,
+ is_bsr_tracking, &pnc);
+ pim->global_scope.current_bsr = bsr;
+ pim->global_scope.current_bsr_first_ts =
+ pim_time_monotonic_sec();
+ pim->global_scope.state = ACCEPT_PREFERRED;
+ }
+ pim->global_scope.current_bsr_prio = bsr_prio;
+ pim->global_scope.current_bsr_last_ts = pim_time_monotonic_sec();
+}
+
+static bool pim_bsm_send_intf(uint8_t *buf, int len, struct interface *ifp,
+ struct in_addr dst_addr)
+{
+ struct pim_interface *pim_ifp;
+
+ pim_ifp = ifp->info;
+
+ if (!pim_ifp) {
+ if (PIM_DEBUG_BSM)
+ zlog_debug("%s: Pim interface not available for %s",
+ __PRETTY_FUNCTION__, ifp->name);
+ return false;
+ }
+
+ if (pim_ifp->pim_sock_fd == -1) {
+ if (PIM_DEBUG_BSM)
+ zlog_debug("%s: Pim sock not available for %s",
+ __PRETTY_FUNCTION__, ifp->name);
+ return false;
+ }
+
+ pim_msg_send(pim_ifp->pim_sock_fd, pim_ifp->primary_address, dst_addr,
+ buf, len, ifp->name);
+ pim_ifp->pim_ifstat_bsm_tx++;
+ pim_ifp->pim->bsm_sent++;
+ return true;
+}
+
+static bool pim_bsm_frag_send(uint8_t *buf, uint32_t len, struct interface *ifp,
+ uint32_t pim_mtu, struct in_addr dst_addr,
+ bool no_fwd)
+{
+ struct bsmmsg_grpinfo *grpinfo, *curgrp;
+ uint8_t *firstgrp_ptr;
+ uint8_t *pkt;
+ uint8_t *pak_start;
+ uint32_t parsed_len = 0;
+ uint32_t this_pkt_rem;
+ uint32_t copy_byte_count;
+ uint32_t this_pkt_len;
+ uint8_t total_rp_cnt;
+ uint8_t this_rp_cnt;
+ uint8_t frag_rp_cnt;
+ uint8_t rp_fit_cnt;
+ bool pak_pending = false;
+
+ /* MTU passed here is PIM MTU (IP MTU less IP Hdr) */
+ if (pim_mtu < (PIM_MIN_BSM_LEN)) {
+ zlog_warn(
+ "%s: mtu(pim mtu: %d) size less than minimum bootsrap len",
+ __PRETTY_FUNCTION__, pim_mtu);
+ if (PIM_DEBUG_BSM)
+ zlog_debug(
+ "%s: mtu (pim mtu:%d) less than minimum bootsrap len",
+ __PRETTY_FUNCTION__, pim_mtu);
+ return false;
+ }
+
+ pak_start = XCALLOC(MTYPE_PIM_BSM_PKT_VAR_MEM, pim_mtu);
+
+ if (!pak_start) {
+ if (PIM_DEBUG_BSM)
+ zlog_debug("%s: malloc failed", __PRETTY_FUNCTION__);
+ return false;
+ }
+
+ pkt = pak_start;
+
+ /* Fill PIM header later before sending packet to calc checksum */
+ pkt += PIM_MSG_HEADER_LEN;
+ buf += PIM_MSG_HEADER_LEN;
+
+ /* copy bsm header to new packet at offset of pim hdr */
+ memcpy(pkt, buf, PIM_BSM_HDR_LEN);
+ pkt += PIM_BSM_HDR_LEN;
+ buf += PIM_BSM_HDR_LEN;
+ parsed_len += (PIM_MSG_HEADER_LEN + PIM_BSM_HDR_LEN);
+
+ /* Store the position of first grp ptr, which can be reused for
+ * next packet to start filling group. old bsm header and pim hdr
+ * remains. So need not be filled again for next packet onwards.
+ */
+ firstgrp_ptr = pkt;
+
+ /* we received mtu excluding IP hdr len as param
+ * now this_pkt_rem is mtu excluding
+ * PIM_BSM_HDR_LEN + PIM_MSG_HEADER_LEN
+ */
+ this_pkt_rem = pim_mtu - (PIM_BSM_HDR_LEN + PIM_MSG_HEADER_LEN);
+
+ /* For each group till the packet length parsed */
+ while (parsed_len < len) {
+ /* pkt ---> fragment's current pointer
+ * buf ---> input buffer's current pointer
+ * mtu ---> size of the pim packet - PIM header
+ * curgrp ---> current group on the fragment
+ * grpinfo ---> current group on the input buffer
+ * this_pkt_rem ---> bytes remaing on the current fragment
+ * rp_fit_cnt ---> num of rp for current grp that
+ * fits this frag
+ * total_rp_cnt ---> total rp present for the group in the buf
+ * frag_rp_cnt ---> no of rp for the group to be fit in
+ * the frag
+ * this_rp_cnt ---> how many rp have we parsed
+ */
+ grpinfo = (struct bsmmsg_grpinfo *)buf;
+ memcpy(pkt, buf, PIM_BSM_GRP_LEN);
+ curgrp = (struct bsmmsg_grpinfo *)pkt;
+ parsed_len += PIM_BSM_GRP_LEN;
+ pkt += PIM_BSM_GRP_LEN;
+ buf += PIM_BSM_GRP_LEN;
+ this_pkt_rem -= PIM_BSM_GRP_LEN;
+
+ /* initialize rp count and total_rp_cnt before the rp loop */
+ this_rp_cnt = 0;
+ total_rp_cnt = grpinfo->frag_rp_count;
+
+ /* Loop till all RPs for the group parsed */
+ while (this_rp_cnt < total_rp_cnt) {
+ /* All RP from a group processed here.
+ * group is pointed by grpinfo.
+ * At this point make sure buf pointing to a RP
+ * within a group
+ */
+ rp_fit_cnt = this_pkt_rem / PIM_BSM_RP_LEN;
+
+ /* calculate how many rp am i going to copy in
+ * this frag
+ */
+ if (rp_fit_cnt > (total_rp_cnt - this_rp_cnt))
+ frag_rp_cnt = total_rp_cnt - this_rp_cnt;
+ else
+ frag_rp_cnt = rp_fit_cnt;
+
+ /* populate the frag rp count for the current grp */
+ curgrp->frag_rp_count = frag_rp_cnt;
+ copy_byte_count = frag_rp_cnt * PIM_BSM_RP_LEN;
+
+ /* copy all the rp that we are fitting in this
+ * frag for the grp
+ */
+ memcpy(pkt, buf, copy_byte_count);
+ this_rp_cnt += frag_rp_cnt;
+ buf += copy_byte_count;
+ pkt += copy_byte_count;
+ parsed_len += copy_byte_count;
+ this_pkt_rem -= copy_byte_count;
+
+ /* Either we couldn't fit all rp for the group or the
+ * mtu reached
+ */
+ if ((this_rp_cnt < total_rp_cnt)
+ || (this_pkt_rem
+ < (PIM_BSM_GRP_LEN + PIM_BSM_RP_LEN))) {
+ /* No space to fit in more rp, send this pkt */
+ this_pkt_len = pim_mtu - this_pkt_rem;
+ pim_msg_build_header(pak_start, this_pkt_len,
+ PIM_MSG_TYPE_BOOTSTRAP,
+ no_fwd);
+ pim_bsm_send_intf(pak_start, this_pkt_len, ifp,
+ dst_addr);
+
+ /* Construct next fragment. Reuse old packet */
+ pkt = firstgrp_ptr;
+ this_pkt_rem = pim_mtu - (PIM_BSM_HDR_LEN
+ + PIM_MSG_HEADER_LEN);
+
+ /* If pkt can't accomodate next group + atleast
+ * one rp, we must break out of this inner loop
+ * and process next RP
+ */
+ if (total_rp_cnt == this_rp_cnt)
+ break;
+
+ /* If some more RPs for the same group pending,
+ * fill grp hdr
+ */
+ memcpy(pkt, (uint8_t *)grpinfo,
+ PIM_BSM_GRP_LEN);
+ curgrp = (struct bsmmsg_grpinfo *)pkt;
+ pkt += PIM_BSM_GRP_LEN;
+ this_pkt_rem -= PIM_BSM_GRP_LEN;
+ pak_pending = false;
+ } else {
+ /* We filled something but not yet sent out */
+ pak_pending = true;
+ }
+ } /* while RP count */
+ } /*while parsed len */
+
+ /* Send if we have any unsent packet */
+ if (pak_pending) {
+ this_pkt_len = pim_mtu - this_pkt_rem;
+ pim_msg_build_header(pak_start, this_pkt_len,
+ PIM_MSG_TYPE_BOOTSTRAP, no_fwd);
+ pim_bsm_send_intf(pak_start, (pim_mtu - this_pkt_rem), ifp,
+ dst_addr);
+ }
+ XFREE(MTYPE_PIM_BSM_PKT_VAR_MEM, pak_start);
+ return true;
+}
+
+static void pim_bsm_fwd_whole_sz(struct pim_instance *pim, uint8_t *buf,
+ uint32_t len, int sz)
+{
+ struct interface *ifp;
+ struct pim_interface *pim_ifp;
+ struct in_addr dst_addr;
+ uint32_t pim_mtu;
+ bool no_fwd = FALSE;
+ bool ret = FALSE;
+
+ /* For now only global scope zone is supported, so send on all
+ * pim interfaces in the vrf
+ */
+ dst_addr = qpim_all_pim_routers_addr;
+ FOR_ALL_INTERFACES (pim->vrf, ifp) {
+ pim_ifp = ifp->info;
+ if ((!pim_ifp) || (!pim_ifp->bsm_enable))
+ continue;
+ pim_hello_require(ifp);
+ pim_mtu = ifp->mtu - MAX_IP_HDR_LEN;
+ if (pim_mtu < len) {
+ ret = pim_bsm_frag_send(buf, len, ifp, pim_mtu,
+ dst_addr, no_fwd);
+ if (PIM_DEBUG_BSM)
+ zlog_debug("%s: pim_bsm_frag_send returned %s",
+ __PRETTY_FUNCTION__,
+ ret ? "TRUE" : "FALSE");
+ } else {
+ pim_msg_build_header(buf, len, PIM_MSG_TYPE_BOOTSTRAP,
+ no_fwd);
+ if (!pim_bsm_send_intf(buf, len, ifp, dst_addr)) {
+ if (PIM_DEBUG_BSM)
+ zlog_debug(
+ "%s: pim_bsm_send_intf returned FALSE",
+ __PRETTY_FUNCTION__);
+ }
+ }
+ }
+}
+
+bool pim_bsm_new_nbr_fwd(struct pim_neighbor *neigh, struct interface *ifp)
+{
+ struct in_addr dst_addr;
+ struct pim_interface *pim_ifp;
+ struct bsm_scope *scope;
+ struct listnode *bsm_ln;
+ struct bsm_info *bsminfo;
+ char neigh_src_str[INET_ADDRSTRLEN];
+ uint32_t pim_mtu;
+ bool no_fwd = true;
+ bool ret = false;
+
+ if (PIM_DEBUG_BSM) {
+ pim_inet4_dump("<src?>", neigh->source_addr, neigh_src_str,
+ sizeof(neigh_src_str));
+ zlog_debug("%s: New neighbor %s seen on %s",
+ __PRETTY_FUNCTION__, neigh_src_str, ifp->name);
+ }
+
+ pim_ifp = ifp->info;
+
+ /* DR only forwards BSM packet */
+ if (pim_ifp->pim_dr_addr.s_addr == pim_ifp->primary_address.s_addr) {
+ if (PIM_DEBUG_BSM)
+ zlog_debug(
+ "%s: It is not DR, so don't forward BSM packet",
+ __PRETTY_FUNCTION__);
+ }
+
+ if (!pim_ifp->bsm_enable) {
+ if (PIM_DEBUG_BSM)
+ zlog_debug("%s: BSM proc not enabled on %s",
+ __PRETTY_FUNCTION__, ifp->name);
+ return ret;
+ }
+
+ scope = &pim_ifp->pim->global_scope;
+
+ if (!scope->bsm_list->count) {
+ if (PIM_DEBUG_BSM)
+ zlog_debug("%s: BSM list for the scope is empty",
+ __PRETTY_FUNCTION__);
+ return ret;
+ }
+
+ if (!pim_ifp->ucast_bsm_accept) {
+ dst_addr = qpim_all_pim_routers_addr;
+ if (PIM_DEBUG_BSM)
+ zlog_debug("%s: Sending BSM mcast to %s",
+ __PRETTY_FUNCTION__, neigh_src_str);
+ } else {
+ dst_addr = neigh->source_addr;
+ if (PIM_DEBUG_BSM)
+ zlog_debug("%s: Sending BSM ucast to %s",
+ __PRETTY_FUNCTION__, neigh_src_str);
+ }
+ pim_mtu = ifp->mtu - MAX_IP_HDR_LEN;
+ pim_hello_require(ifp);
+
+ for (ALL_LIST_ELEMENTS_RO(scope->bsm_list, bsm_ln, bsminfo)) {
+ if (pim_mtu < bsminfo->size) {
+ ret = pim_bsm_frag_send(bsminfo->bsm, bsminfo->size,
+ ifp, pim_mtu, dst_addr, no_fwd);
+ if (!ret) {
+ if (PIM_DEBUG_BSM)
+ zlog_debug(
+ "%s: pim_bsm_frag_send failed",
+ __PRETTY_FUNCTION__);
+ }
+ } else {
+ /* Pim header needs to be constructed */
+ pim_msg_build_header(bsminfo->bsm, bsminfo->size,
+ PIM_MSG_TYPE_BOOTSTRAP, no_fwd);
+ ret = pim_bsm_send_intf(bsminfo->bsm, bsminfo->size,
+ ifp, dst_addr);
+ if (!ret) {
+ if (PIM_DEBUG_BSM)
+ zlog_debug(
+ "%s: pim_bsm_frag_send failed",
+ __PRETTY_FUNCTION__);
+ }
+ }
+ }
+ return ret;
+}
+
+struct bsgrp_node *pim_bsm_get_bsgrp_node(struct bsm_scope *scope,
+ struct prefix *grp)
+{
+ struct route_node *rn;
+ struct bsgrp_node *bsgrp;
+
+ rn = route_node_lookup(scope->bsrp_table, grp);
+ if (!rn) {
+ if (PIM_DEBUG_BSM)
+ zlog_debug("%s: Route node doesn't exist for the group",
+ __PRETTY_FUNCTION__);
+ return NULL;
+ }
+ bsgrp = rn->info;
+ route_unlock_node(rn);
+
+ return bsgrp;
+}
+
+static uint32_t hash_calc_on_grp_rp(struct prefix group, struct in_addr rp,
+ uint8_t hashmasklen)
+{
+ uint64_t temp;
+ uint32_t hash;
+ uint32_t grpaddr;
+ uint32_t rp_add;
+ uint32_t mask = 0xffffffff;
+
+ /* mask to be made zero if hashmasklen is 0 because mask << 32
+ * may not give 0. hashmasklen can be 0 to 32.
+ */
+ if (hashmasklen == 0)
+ mask = 0;
+
+ /* in_addr stores ip in big endian, hence network byte order
+ * convert to uint32 before processing hash
+ */
+ grpaddr = ntohl(group.u.prefix4.s_addr);
+ /* Avoid shifting by 32 bit on a 32 bit register */
+ if (hashmasklen)
+ grpaddr = grpaddr & ((mask << (32 - hashmasklen)));
+ else
+ grpaddr = grpaddr & mask;
+ rp_add = ntohl(rp.s_addr);
+ temp = 1103515245 * ((1103515245 * grpaddr + 12345) ^ rp_add) + 12345;
+ hash = temp & (0x7fffffff);
+ return hash;
+}
+
+static bool pim_install_bsm_grp_rp(struct pim_instance *pim,
+ struct bsgrp_node *grpnode,
+ struct bsmmsg_rpinfo *rp)
+{
+ struct bsm_rpinfo *bsm_rpinfo;
+ uint8_t hashMask_len = pim->global_scope.hashMasklen;
+
+ /*memory allocation for bsm_rpinfo */
+ bsm_rpinfo = XCALLOC(MTYPE_PIM_BSRP_NODE, sizeof(*bsm_rpinfo));
+
+ if (!bsm_rpinfo) {
+ if (PIM_DEBUG_BSM)
+ zlog_debug("%s, Memory allocation failed.\r\n",
+ __PRETTY_FUNCTION__);
+ return false;
+ }
+
+ bsm_rpinfo->rp_prio = rp->rp_pri;
+ bsm_rpinfo->rp_holdtime = rp->rp_holdtime;
+ memcpy(&bsm_rpinfo->rp_address, &rp->rpaddr.addr,
+ sizeof(struct in_addr));
+ bsm_rpinfo->elapse_time = 0;
+
+ /* Back pointer to the group node. */
+ bsm_rpinfo->bsgrp_node = grpnode;
+
+ /* update hash for this rp node */
+ bsm_rpinfo->hash = hash_calc_on_grp_rp(grpnode->group, rp->rpaddr.addr,
+ hashMask_len);
+ if (listnode_add_sort_nodup(grpnode->partial_bsrp_list, bsm_rpinfo)) {
+ if (PIM_DEBUG_BSM)
+ zlog_debug(
+ "%s, bs_rpinfo node added to the partial bs_rplist.\r\n",
+ __PRETTY_FUNCTION__);
+ return true;
+ }
+
+ if (PIM_DEBUG_BSM)
+ zlog_debug("%s: list node not added\n", __PRETTY_FUNCTION__);
+
+ XFREE(MTYPE_PIM_BSRP_NODE, bsm_rpinfo);
+ return false;
+}
+
+static void pim_update_pending_rp_cnt(struct bsm_scope *sz,
+ struct bsgrp_node *bsgrp,
+ uint16_t bsm_frag_tag,
+ uint32_t total_rp_count)
+{
+ if (bsgrp->pend_rp_cnt) {
+ /* received bsm is different packet ,
+ * it is not same fragment.
+ */
+ if (bsm_frag_tag != bsgrp->frag_tag) {
+ if (PIM_DEBUG_BSM)
+ zlog_debug(
+ "%s,Received a new BSM ,so clear the pending bs_rpinfo list.\r\n",
+ __PRETTY_FUNCTION__);
+ list_delete_all_node(bsgrp->partial_bsrp_list);
+ bsgrp->pend_rp_cnt = total_rp_count;
+ }
+ } else
+ bsgrp->pend_rp_cnt = total_rp_count;
+
+ bsgrp->frag_tag = bsm_frag_tag;
+}
+
+/* Parsing BSR packet and adding to partial list of corresponding bsgrp node */
+static bool pim_bsm_parse_install_g2rp(struct bsm_scope *scope, uint8_t *buf,
+ int buflen, uint16_t bsm_frag_tag)
+{
+ struct bsmmsg_grpinfo grpinfo;
+ struct bsmmsg_rpinfo rpinfo;
+ struct prefix group;
+ struct bsgrp_node *bsgrp = NULL;
+ int frag_rp_cnt = 0;
+ int offset = 0;
+ int ins_count = 0;
+
+ while (buflen > offset) {
+ /* Extract Group tlv from BSM */
+ memcpy(&grpinfo, buf, sizeof(struct bsmmsg_grpinfo));
+
+ if (PIM_DEBUG_BSM) {
+ char grp_str[INET_ADDRSTRLEN];
+
+ pim_inet4_dump("<Group?>", grpinfo.group.addr, grp_str,
+ sizeof(grp_str));
+ zlog_debug(
+ "%s, Group %s Rpcount:%d Fragment-Rp-count:%d\r\n",
+ __PRETTY_FUNCTION__, grp_str, grpinfo.rp_count,
+ grpinfo.frag_rp_count);
+ }
+
+ buf += sizeof(struct bsmmsg_grpinfo);
+ offset += sizeof(struct bsmmsg_grpinfo);
+
+ if (grpinfo.rp_count == 0) {
+ if (PIM_DEBUG_BSM) {
+ char grp_str[INET_ADDRSTRLEN];
+
+ pim_inet4_dump("<Group?>", grpinfo.group.addr,
+ grp_str, sizeof(grp_str));
+ zlog_debug(
+ "%s, Rp count is zero for group: %s\r\n",
+ __PRETTY_FUNCTION__, grp_str);
+ }
+ return false;
+ }
+
+ group.family = AF_INET;
+ group.prefixlen = grpinfo.group.mask;
+ group.u.prefix4.s_addr = grpinfo.group.addr.s_addr;
+
+ /* Get the Group node for the BSM rp table */
+ bsgrp = pim_bsm_get_bsgrp_node(scope, &group);
+
+ if (!bsgrp) {
+ if (PIM_DEBUG_BSM)
+ zlog_debug(
+ "%s, Create new BSM Group node.\r\n",
+ __PRETTY_FUNCTION__);
+
+ /* create a new node to be added to the tree. */
+ bsgrp = pim_bsm_new_bsgrp_node(scope->bsrp_table,
+ &group);
+
+ if (!bsgrp) {
+ zlog_debug(
+ "%s, Failed to get the BSM group node.\r\n",
+ __PRETTY_FUNCTION__);
+ continue;
+ }
+
+ bsgrp->scope = scope;
+ }
+
+ pim_update_pending_rp_cnt(scope, bsgrp, bsm_frag_tag,
+ grpinfo.rp_count);
+ frag_rp_cnt = grpinfo.frag_rp_count;
+ ins_count = 0;
+
+ while (frag_rp_cnt--) {
+ /* Extract RP address tlv from BSM */
+ memcpy(&rpinfo, buf, sizeof(struct bsmmsg_rpinfo));
+ rpinfo.rp_holdtime = ntohs(rpinfo.rp_holdtime);
+ buf += sizeof(struct bsmmsg_rpinfo);
+ offset += sizeof(struct bsmmsg_rpinfo);
+
+ if (PIM_DEBUG_BSM) {
+ char rp_str[INET_ADDRSTRLEN];
+
+ pim_inet4_dump("<Rpaddr?>", rpinfo.rpaddr.addr,
+ rp_str, sizeof(rp_str));
+ zlog_debug(
+ "%s, Rp address - %s; pri:%d hold:%d\r\n",
+ __PRETTY_FUNCTION__, rp_str,
+ rpinfo.rp_pri, rpinfo.rp_holdtime);
+ }
+
+ /* Call Install api to update grp-rp mappings */
+ if (pim_install_bsm_grp_rp(scope->pim, bsgrp, &rpinfo))
+ ins_count++;
+ }
+
+ bsgrp->pend_rp_cnt -= ins_count;
+
+ if (!bsgrp->pend_rp_cnt) {
+ if (PIM_DEBUG_BSM)
+ zlog_debug(
+ "%s, Recvd all the rps for this group, so bsrp list with penidng rp list.",
+ __PRETTY_FUNCTION__);
+ /* replace the bsrp_list with pending list */
+ pim_instate_pend_list(bsgrp);
+ }
+ }
+ return true;
+}
+
+int pim_bsm_process(struct interface *ifp, struct ip *ip_hdr, uint8_t *buf,
+ uint32_t buf_size, bool no_fwd)
+{
+ struct bsm_hdr *bshdr;
+ int sz = PIM_GBL_SZ_ID;
+ struct bsmmsg_grpinfo *msg_grp;
+ struct pim_interface *pim_ifp = NULL;
+ struct bsm_info *bsminfo;
+ struct pim_instance *pim;
+ char bsr_str[INET_ADDRSTRLEN];
+ uint16_t frag_tag;
+ bool empty_bsm = FALSE;
+
+ /* BSM Packet acceptance validation */
+ pim_ifp = ifp->info;
+ if (!pim_ifp) {
+ if (PIM_DEBUG_BSM)
+ zlog_debug("%s: multicast not enabled on interface %s",
+ __PRETTY_FUNCTION__, ifp->name);
+ return -1;
+ }
+
+ pim_ifp->pim_ifstat_bsm_rx++;
+ pim = pim_ifp->pim;
+ pim->bsm_rcvd++;
+
+ /* Drop if bsm processing is disabled on interface */
+ if (!pim_ifp->bsm_enable) {
+ zlog_warn("%s: BSM not enabled on interface %s",
+ __PRETTY_FUNCTION__, ifp->name);
+ pim_ifp->pim_ifstat_bsm_cfg_miss++;
+ pim->bsm_dropped++;
+ return -1;
+ }
+
+ bshdr = (struct bsm_hdr *)(buf + PIM_MSG_HEADER_LEN);
+ pim_inet4_dump("<bsr?>", bshdr->bsr_addr.addr, bsr_str,
+ sizeof(bsr_str));
+ pim->global_scope.hashMasklen = bshdr->hm_len;
+ frag_tag = ntohs(bshdr->frag_tag);
+
+ /* Identify empty BSM */
+ if ((buf_size - PIM_BSM_HDR_LEN - PIM_MSG_HEADER_LEN) < PIM_BSM_GRP_LEN)
+ empty_bsm = true;
+
+ if (!empty_bsm) {
+ msg_grp = (struct bsmmsg_grpinfo *)(buf + PIM_MSG_HEADER_LEN
+ + PIM_BSM_HDR_LEN);
+ /* Currently we don't support scope zoned BSM */
+ if (msg_grp->group.sz) {
+ if (PIM_DEBUG_BSM)
+ zlog_debug(
+ "%s : Administratively scoped range BSM received",
+ __PRETTY_FUNCTION__);
+ pim_ifp->pim_ifstat_bsm_invalid_sz++;
+ pim->bsm_dropped++;
+ return -1;
+ }
+ }
+
+ /* Drop if bsr is not preferred bsr */
+ if (!is_preferred_bsr(pim, bshdr->bsr_addr.addr, bshdr->bsr_prio)) {
+ if (PIM_DEBUG_BSM)
+ zlog_debug("%s : Received a non-preferred BSM",
+ __PRETTY_FUNCTION__);
+ pim->bsm_dropped++;
+ return -1;
+ }
+
+ if (no_fwd) {
+ /* only accept no-forward BSM if quick refresh on startup */
+ if ((pim->global_scope.accept_nofwd_bsm)
+ || (frag_tag == pim->global_scope.bsm_frag_tag)) {
+ pim->global_scope.accept_nofwd_bsm = false;
+ } else {
+ if (PIM_DEBUG_BSM)
+ zlog_debug(
+ "%s : nofwd_bsm received on %s when accpt_nofwd_bsm false",
+ __PRETTY_FUNCTION__, bsr_str);
+ pim->bsm_dropped++;
+ pim_ifp->pim_ifstat_ucast_bsm_cfg_miss++;
+ return -1;
+ }
+ }
+
+ /* Mulicast BSM received */
+ if (ip_hdr->ip_dst.s_addr == qpim_all_pim_routers_addr.s_addr) {
+ if (!no_fwd) {
+ if (!pim_bsr_rpf_check(pim, bshdr->bsr_addr.addr,
+ ip_hdr->ip_src)) {
+ if (PIM_DEBUG_BSM)
+ zlog_debug(
+ "%s : RPF check fail for BSR address %s",
+ __PRETTY_FUNCTION__, bsr_str);
+ pim->bsm_dropped++;
+ return -1;
+ }
+ }
+ } else if (if_lookup_exact_address(&ip_hdr->ip_dst, AF_INET,
+ pim->vrf_id)) {
+ /* Unicast BSM received - if ucast bsm not enabled on
+ * the interface, drop it
+ */
+ if (!pim_ifp->ucast_bsm_accept) {
+ if (PIM_DEBUG_BSM)
+ zlog_debug(
+ "%s : Unicast BSM not enabled on interface %s",
+ __PRETTY_FUNCTION__, ifp->name);
+ pim_ifp->pim_ifstat_ucast_bsm_cfg_miss++;
+ pim->bsm_dropped++;
+ return -1;
+ }
+
+ } else {
+ if (PIM_DEBUG_BSM)
+ zlog_debug("%s : Invalid destination address",
+ __PRETTY_FUNCTION__);
+ pim->bsm_dropped++;
+ return -1;
+ }
+
+ if (empty_bsm) {
+ if (PIM_DEBUG_BSM)
+ zlog_debug("%s : Empty Pref BSM received",
+ __PRETTY_FUNCTION__);
+ }
+ /* Parse Update bsm rp table and install/uninstall rp if required */
+ if (!pim_bsm_parse_install_g2rp(
+ &pim_ifp->pim->global_scope,
+ (buf + PIM_BSM_HDR_LEN + PIM_MSG_HEADER_LEN),
+ (buf_size - PIM_BSM_HDR_LEN - PIM_MSG_HEADER_LEN),
+ frag_tag)) {
+ if (PIM_DEBUG_BSM) {
+ zlog_debug("%s, Parsing BSM failed.\r\n",
+ __PRETTY_FUNCTION__);
+ }
+ pim->bsm_dropped++;
+ return -1;
+ }
+ /* Restart the bootstrap timer */
+ pim_bs_timer_restart(&pim_ifp->pim->global_scope,
+ PIM_BSR_DEFAULT_TIMEOUT);
+
+ /* If new BSM received, clear the old bsm database */
+ if (pim_ifp->pim->global_scope.bsm_frag_tag != frag_tag) {
+ if (PIM_DEBUG_BSM) {
+ zlog_debug("%s: Current frag tag: %d Frag teg rcvd: %d",
+ __PRETTY_FUNCTION__,
+ pim_ifp->pim->global_scope.bsm_frag_tag,
+ frag_tag);
+ }
+ list_delete_all_node(pim_ifp->pim->global_scope.bsm_list);
+ pim_ifp->pim->global_scope.bsm_frag_tag = frag_tag;
+ }
+
+ /* update the scope information from bsm */
+ pim_bsm_update(pim, bshdr->bsr_addr.addr, bshdr->bsr_prio);
+
+ if (!no_fwd) {
+ pim_bsm_fwd_whole_sz(pim_ifp->pim, buf, buf_size, sz);
+ bsminfo = XCALLOC(MTYPE_PIM_BSM_INFO, sizeof(struct bsm_info));
+ if (!bsminfo) {
+ zlog_warn("%s: bsminfo alloc failed",
+ __PRETTY_FUNCTION__);
+ return 0;
+ }
+
+ bsminfo->bsm = XCALLOC(MTYPE_PIM_BSM_PKT_VAR_MEM, buf_size);
+ if (!bsminfo->bsm) {
+ zlog_warn("%s: bsm alloc failed", __PRETTY_FUNCTION__);
+ XFREE(MTYPE_PIM_BSM_INFO, bsminfo);
+ return 0;
+ }
+
+ bsminfo->size = buf_size;
+ memcpy(bsminfo->bsm, buf, buf_size);
+ listnode_add(pim_ifp->pim->global_scope.bsm_list, bsminfo);
+ }
+
+ return 0;
+}
diff --git a/pimd/pim_bsm.h b/pimd/pim_bsm.h
new file mode 100644
index 0000000000..0758c94f19
--- /dev/null
+++ b/pimd/pim_bsm.h
@@ -0,0 +1,198 @@
+/*
+ * pim_bsm.h: PIM BSM handling related
+ *
+ * Copyright (C) 2018-19 Vmware, Inc.
+ * Saravanan K
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING; if not, write to the
+ * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston,
+ * MA 02110-1301 USA
+ */
+
+#ifndef __PIM_BSM_H__
+#define __PIM_BSM_H__
+
+#include "if.h"
+#include "vty.h"
+#include "linklist.h"
+#include "table.h"
+#include "pim_rp.h"
+#include "pim_msg.h"
+
+/* Defines */
+#define PIM_GBL_SZ_ID 0 /* global scope zone id set to 0 */
+#define PIM_BS_TIME 60 /* RFC 5059 - Sec 5 */
+#define PIM_BSR_DEFAULT_TIMEOUT 130 /* RFC 5059 - Sec 5 */
+
+/* These structures are only encoded IPv4 specific */
+#define PIM_BSM_HDR_LEN sizeof(struct bsm_hdr)
+#define PIM_BSM_GRP_LEN sizeof(struct bsmmsg_grpinfo)
+#define PIM_BSM_RP_LEN sizeof(struct bsmmsg_rpinfo)
+
+#define PIM_MIN_BSM_LEN \
+ (PIM_HDR_LEN + PIM_BSM_HDR_LEN + PIM_BSM_GRP_LEN + PIM_BSM_RP_LEN)
+
+/* Datastructures
+ * ==============
+ */
+
+/* Non candidate BSR states */
+enum ncbsr_state {
+ NO_INFO = 0,
+ ACCEPT_ANY,
+ ACCEPT_PREFERRED
+};
+
+/* BSM scope - bsm processing is per scope */
+struct bsm_scope {
+ int sz_id; /* scope zone id */
+ enum ncbsr_state state; /* non candidate BSR state */
+ bool accept_nofwd_bsm; /* no fwd bsm accepted for scope */
+ struct in_addr current_bsr; /* current elected BSR for the sz */
+ uint32_t current_bsr_prio; /* current BSR priority */
+ int64_t current_bsr_first_ts; /* current BSR elected time */
+ int64_t current_bsr_last_ts; /* Last BSM received from E-BSR */
+ uint16_t bsm_frag_tag; /* Last received frag tag from E-BSR */
+ uint8_t hashMasklen; /* Mask in hash calc RFC 7761 4.7.2 */
+ struct pim_instance *pim; /* Back pointer to pim instance */
+ struct list *bsm_list; /* list of bsm frag for frowarding */
+ struct route_table *bsrp_table; /* group2rp mapping rcvd from BSR */
+ struct thread *bs_timer; /* Boot strap timer */
+ struct thread *sz_timer;
+};
+
+/* BSM packet - this is stored as list in bsm_list inside scope
+ * This is used for forwarding to new neighbors or restarting mcast routers
+ */
+struct bsm_info {
+ uint32_t size; /* size of the packet */
+ unsigned char *bsm; /* Actual packet */
+};
+
+/* This is the group node of the bsrp table in scope.
+ * this node maintains the list of rp for the group.
+ */
+struct bsgrp_node {
+ struct prefix group; /* Group range */
+ struct bsm_scope *scope; /* Back ptr to scope */
+ struct list *bsrp_list; /* list of RPs adv by BSR */
+ struct list *partial_bsrp_list; /* maintained until all RPs received */
+ int pend_rp_cnt; /* Total RP - Received RP */
+ uint16_t frag_tag; /* frag tag to identify the fragment */
+};
+
+/* This is the list node of bsrp_list and partial bsrp list in
+ * bsgrp_node. Hold info of each RP received for the group
+ */
+struct bsm_rpinfo {
+ uint32_t hash; /* Hash Value as per RFC 7761 4.7.2 */
+ uint32_t elapse_time; /* upd at expiry of elected RP node */
+ uint16_t rp_prio; /* RP priority */
+ uint16_t rp_holdtime; /* RP holdtime - g2rp timer value */
+ struct in_addr rp_address; /* RP Address */
+ struct bsgrp_node *bsgrp_node; /* Back ptr to bsgrp_node */
+ struct thread *g2rp_timer; /* Run only for elected RP node */
+};
+
+/* Structures to extract Bootstrap Message header and Grp to RP Mappings
+ * =====================================================================
+ * BSM Format:
+ *
+ * 0 1 2 3
+ * 0 1 2 3 4 5 6 7 0 1 2 3 4 5 6 7 0 1 2 3 4 5 6 7 0 1 2 3 4 5 6 7
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |PIM Ver| Type |N| Reserved | Checksum | PIM HDR
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Fragment Tag | Hash Mask Len | BSR Priority | BS HDR(1)
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | BSR Address (Encoded-Unicast format) | BS HDR(2)
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Group Address 1 (Encoded-Group format) |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | RP Count 1 | Frag RP Cnt 1 | Reserved |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | RP Address 1 (Encoded-Unicast format) |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | RP1 Holdtime | RP1 Priority | Reserved |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | RP Address 2 (Encoded-Unicast format) |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | RP2 Holdtime | RP2 Priority | Reserved |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | . |
+ * | . |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | RP Address m (Encoded-Unicast format) |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | RPm Holdtime | RPm Priority | Reserved |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Group Address 2 (Encoded-Group format) |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | . |
+ * | . |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Group Address n (Encoded-Group format) |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | RP Count n | Frag RP Cnt n | Reserved |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | RP Address 1 (Encoded-Unicast format) |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | RP1 Holdtime | RP1 Priority | Reserved |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | RP Address 2 (Encoded-Unicast format) |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | RP2 Holdtime | RP2 Priority | Reserved |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | . |
+ * | . |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | RP Address m (Encoded-Unicast format) |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | RPm Holdtime | RPm Priority | Reserved |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ */
+struct bsm_hdr {
+ uint16_t frag_tag;
+ uint8_t hm_len;
+ uint8_t bsr_prio;
+ struct pim_encoded_ipv4_unicast bsr_addr;
+} __attribute__((packed));
+
+struct bsmmsg_grpinfo {
+ struct pim_encoded_group_ipv4 group;
+ uint8_t rp_count;
+ uint8_t frag_rp_count;
+ uint16_t reserved;
+} __attribute__((packed));
+
+struct bsmmsg_rpinfo {
+ struct pim_encoded_ipv4_unicast rpaddr;
+ uint16_t rp_holdtime;
+ uint8_t rp_pri;
+ uint8_t reserved;
+} __attribute__((packed));
+
+/* API */
+void pim_bsm_proc_init(struct pim_instance *pim);
+void pim_bsm_proc_free(struct pim_instance *pim);
+void pim_bsm_write_config(struct vty *vty, struct interface *ifp);
+int pim_bsm_process(struct interface *ifp,
+ struct ip *ip_hdr,
+ uint8_t *buf,
+ uint32_t buf_size,
+ bool no_fwd);
+bool pim_bsm_new_nbr_fwd(struct pim_neighbor *neigh, struct interface *ifp);
+struct bsgrp_node *pim_bsm_get_bsgrp_node(struct bsm_scope *scope,
+ struct prefix *grp);
+#endif
diff --git a/pimd/pim_cmd.c b/pimd/pim_cmd.c
index cb2ba87ec6..e6e9c2d0c8 100644
--- a/pimd/pim_cmd.c
+++ b/pimd/pim_cmd.c
@@ -62,6 +62,7 @@
#include "pim_bfd.h"
#include "pim_vxlan.h"
#include "bfd.h"
+#include "pim_bsm.h"
#ifndef VTYSH_EXTRACT_PL
#include "pimd/pim_cmd_clippy.c"
@@ -1477,13 +1478,14 @@ static void pim_show_interface_traffic(struct pim_instance *pim,
json = json_object_new_object();
else {
vty_out(vty, "\n");
- vty_out(vty, "%-16s%-17s%-17s%-17s%-17s%-17s%-17s\n",
+ vty_out(vty, "%-16s%-17s%-17s%-17s%-17s%-17s%-17s%-17s\n",
"Interface", " HELLO", " JOIN",
" PRUNE", " REGISTER", "REGISTER-STOP",
- " ASSERT");
- vty_out(vty, "%-16s%-17s%-17s%-17s%-17s%-17s%-17s\n", "",
+ " ASSERT", " BSM");
+ vty_out(vty, "%-16s%-17s%-17s%-17s%-17s%-17s%-17s%-17s\n", "",
" Rx/Tx", " Rx/Tx", " Rx/Tx",
- " Rx/Tx", " Rx/Tx", " Rx/Tx");
+ " Rx/Tx", " Rx/Tx", " Rx/Tx",
+ " Rx/Tx");
vty_out(vty,
"---------------------------------------------------------------------------------------------------------------\n");
}
@@ -1518,12 +1520,15 @@ static void pim_show_interface_traffic(struct pim_instance *pim,
json_object_int_add(json_row, "assertRx",
pim_ifp->pim_ifstat_assert_recv);
json_object_int_add(json_row, "assertTx",
- pim_ifp->pim_ifstat_assert_send);
-
+ pim_ifp->pim_ifstat_assert_send);
+ json_object_int_add(json_row, "bsmRx",
+ pim_ifp->pim_ifstat_bsm_rx);
+ json_object_int_add(json_row, "bsmTx",
+ pim_ifp->pim_ifstat_bsm_tx);
json_object_object_add(json, ifp->name, json_row);
} else {
vty_out(vty,
- "%-16s %8u/%-8u %7u/%-7u %7u/%-7u %7u/%-7u %7u/%-7u %7u/%-7u \n",
+ "%-16s %8u/%-8u %7u/%-7u %7u/%-7u %7u/%-7u %7u/%-7u %7u/%-7u %7lu/%-7lu \n",
ifp->name, pim_ifp->pim_ifstat_hello_recv,
pim_ifp->pim_ifstat_hello_sent,
pim_ifp->pim_ifstat_join_recv,
@@ -1535,7 +1540,9 @@ static void pim_show_interface_traffic(struct pim_instance *pim,
pim_ifp->pim_ifstat_reg_stop_recv,
pim_ifp->pim_ifstat_reg_stop_send,
pim_ifp->pim_ifstat_assert_recv,
- pim_ifp->pim_ifstat_assert_send);
+ pim_ifp->pim_ifstat_assert_send,
+ pim_ifp->pim_ifstat_bsm_rx,
+ pim_ifp->pim_ifstat_bsm_tx);
}
}
if (uj) {
@@ -1559,14 +1566,15 @@ static void pim_show_interface_traffic_single(struct pim_instance *pim,
json = json_object_new_object();
else {
vty_out(vty, "\n");
- vty_out(vty, "%-16s%-17s%-17s%-17s%-17s%-17s%-17s\n",
+ vty_out(vty, "%-16s%-17s%-17s%-17s%-17s%-17s%-17s%-17s\n",
"Interface", " HELLO", " JOIN", " PRUNE",
- " REGISTER", " REGISTER-STOP", " ASSERT");
- vty_out(vty, "%-14s%-18s%-17s%-17s%-17s%-17s%-17s\n", "",
+ " REGISTER", " REGISTER-STOP", " ASSERT",
+ " BSM");
+ vty_out(vty, "%-14s%-18s%-17s%-17s%-17s%-17s%-17s%-17s\n", "",
" Rx/Tx", " Rx/Tx", " Rx/Tx", " Rx/Tx",
- " Rx/Tx", " Rx/Tx");
+ " Rx/Tx", " Rx/Tx", " Rx/Tx");
vty_out(vty,
- "---------------------------------------------------------------------------------------------------------------------\n");
+ "-------------------------------------------------------------------------------------------------------------------------------\n");
}
FOR_ALL_INTERFACES (pim->vrf, ifp) {
@@ -1605,11 +1613,15 @@ static void pim_show_interface_traffic_single(struct pim_instance *pim,
pim_ifp->pim_ifstat_assert_recv);
json_object_int_add(json_row, "assertTx",
pim_ifp->pim_ifstat_assert_send);
+ json_object_int_add(json_row, "bsmRx",
+ pim_ifp->pim_ifstat_bsm_rx);
+ json_object_int_add(json_row, "bsmTx",
+ pim_ifp->pim_ifstat_bsm_tx);
json_object_object_add(json, ifp->name, json_row);
} else {
vty_out(vty,
- "%-16s %8u/%-8u %7u/%-7u %7u/%-7u %7u/%-7u %7u/%-7u %7u/%-7u \n",
+ "%-16s %8u/%-8u %7u/%-7u %7u/%-7u %7u/%-7u %7u/%-7u %7u/%-7u %7lu/%-7lu \n",
ifp->name, pim_ifp->pim_ifstat_hello_recv,
pim_ifp->pim_ifstat_hello_sent,
pim_ifp->pim_ifstat_join_recv,
@@ -1621,7 +1633,9 @@ static void pim_show_interface_traffic_single(struct pim_instance *pim,
pim_ifp->pim_ifstat_reg_stop_recv,
pim_ifp->pim_ifstat_reg_stop_send,
pim_ifp->pim_ifstat_assert_recv,
- pim_ifp->pim_ifstat_assert_send);
+ pim_ifp->pim_ifstat_assert_send,
+ pim_ifp->pim_ifstat_bsm_rx,
+ pim_ifp->pim_ifstat_bsm_tx);
}
}
if (uj) {
@@ -1987,9 +2001,9 @@ static void pim_show_state(struct pim_instance *pim, struct vty *vty,
ifp_in = pim_if_find_by_vif_index(pim, c_oil->oil.mfcc_parent);
if (ifp_in)
- strcpy(in_ifname, ifp_in->name);
+ strlcpy(in_ifname, ifp_in->name, sizeof(in_ifname));
else
- strcpy(in_ifname, "<iif?>");
+ strlcpy(in_ifname, "<iif?>", sizeof(in_ifname));
if (src_or_group) {
if (strcmp(src_or_group, src_str)
@@ -2071,9 +2085,9 @@ static void pim_show_state(struct pim_instance *pim, struct vty *vty,
now - c_oil->oif_creation[oif_vif_index]);
if (ifp_out)
- strcpy(out_ifname, ifp_out->name);
+ strlcpy(out_ifname, ifp_out->name, sizeof(out_ifname));
else
- strcpy(out_ifname, "<oif?>");
+ strlcpy(out_ifname, "<oif?>", sizeof(out_ifname));
if (uj) {
json_ifp_out = json_object_new_object();
@@ -2352,37 +2366,37 @@ static void json_object_pim_upstream_add(json_object *json,
static const char *
pim_upstream_state2brief_str(enum pim_upstream_state join_state,
- char *state_str)
+ char *state_str, size_t state_str_len)
{
switch (join_state) {
case PIM_UPSTREAM_NOTJOINED:
- strcpy(state_str, "NotJ");
+ strlcpy(state_str, "NotJ", state_str_len);
break;
case PIM_UPSTREAM_JOINED:
- strcpy(state_str, "J");
+ strlcpy(state_str, "J", state_str_len);
break;
default:
- strcpy(state_str, "Unk");
+ strlcpy(state_str, "Unk", state_str_len);
}
return state_str;
}
static const char *pim_reg_state2brief_str(enum pim_reg_state reg_state,
- char *state_str)
+ char *state_str, size_t state_str_len)
{
switch (reg_state) {
case PIM_REG_NOINFO:
- strcpy(state_str, "RegNI");
+ strlcpy(state_str, "RegNI", state_str_len);
break;
case PIM_REG_JOIN:
- strcpy(state_str, "RegJ");
+ strlcpy(state_str, "RegJ", state_str_len);
break;
case PIM_REG_JOIN_PENDING:
case PIM_REG_PRUNE:
- strcpy(state_str, "RegP");
+ strlcpy(state_str, "RegP", state_str_len);
break;
default:
- strcpy(state_str, "Unk");
+ strlcpy(state_str, "Unk", state_str_len);
}
return state_str;
}
@@ -2450,13 +2464,13 @@ static void pim_show_upstream(struct pim_instance *pim, struct vty *vty,
pim_time_timer_to_hhmmss(msdp_reg_timer, sizeof(msdp_reg_timer),
up->t_msdp_reg_timer);
- pim_upstream_state2brief_str(up->join_state, state_str);
+ pim_upstream_state2brief_str(up->join_state, state_str, sizeof(state_str));
if (up->reg_state != PIM_REG_NOINFO) {
char tmp_str[PIM_REG_STATE_STR_LEN];
sprintf(state_str + strlen(state_str), ",%s",
- pim_reg_state2brief_str(up->reg_state,
- tmp_str));
+ pim_reg_state2brief_str(up->reg_state, tmp_str,
+ sizeof(tmp_str)));
}
if (uj) {
@@ -2507,7 +2521,7 @@ static void pim_show_upstream(struct pim_instance *pim, struct vty *vty,
pim_upstream_state2str(up->join_state));
json_object_string_add(
json_row, "regState",
- pim_reg_state2str(up->reg_state, state_str));
+ pim_reg_state2str(up->reg_state, state_str, sizeof(state_str)));
json_object_string_add(json_row, "upTime", uptime);
json_object_string_add(json_row, "joinTimer",
join_timer);
@@ -2888,6 +2902,413 @@ static void pim_show_nexthop(struct pim_instance *pim, struct vty *vty)
hash_walk(pim->rpf_hash, pim_print_pnc_cache_walkcb, &cwd);
}
+/* Display the bsm database details */
+static void pim_show_bsm_db(struct pim_instance *pim, struct vty *vty, bool uj)
+{
+ struct listnode *bsmnode;
+ int count = 0;
+ int fragment = 1;
+ struct bsm_info *bsm;
+ json_object *json = NULL;
+ json_object *json_group = NULL;
+ json_object *json_row = NULL;
+
+ count = pim->global_scope.bsm_list->count;
+
+ if (uj) {
+ json = json_object_new_object();
+ json_object_int_add(json, "Number of the fragments", count);
+ } else {
+ vty_out(vty, "Scope Zone: Global\n");
+ vty_out(vty, "Number of the fragments: %d\n", count);
+ vty_out(vty, "\n");
+ }
+
+ for (ALL_LIST_ELEMENTS_RO(pim->global_scope.bsm_list, bsmnode, bsm)) {
+ char grp_str[INET_ADDRSTRLEN];
+ char rp_str[INET_ADDRSTRLEN];
+ char bsr_str[INET_ADDRSTRLEN];
+ struct bsmmsg_grpinfo *group;
+ struct bsmmsg_rpinfo *rpaddr;
+ struct prefix grp;
+ struct bsm_hdr *hdr;
+ uint32_t offset = 0;
+ uint8_t *buf;
+ uint32_t len = 0;
+ uint32_t frag_rp_cnt = 0;
+
+ buf = bsm->bsm;
+ len = bsm->size;
+
+ /* skip pim header */
+ buf += PIM_MSG_HEADER_LEN;
+ len -= PIM_MSG_HEADER_LEN;
+
+ hdr = (struct bsm_hdr *)buf;
+
+ /* BSM starts with bsr header */
+ buf += sizeof(struct bsm_hdr);
+ len -= sizeof(struct bsm_hdr);
+
+ pim_inet4_dump("<BSR Address?>", hdr->bsr_addr.addr, bsr_str,
+ sizeof(bsr_str));
+
+
+ if (uj) {
+ json_object_string_add(json, "BSR address", bsr_str);
+ json_object_int_add(json, "BSR priority",
+ hdr->bsr_prio);
+ json_object_int_add(json, "Hashmask Length",
+ hdr->hm_len);
+ json_object_int_add(json, "Fragment Tag",
+ ntohs(hdr->frag_tag));
+ } else {
+ vty_out(vty, "BSM Fragment : %d\n", fragment);
+ vty_out(vty, "------------------\n");
+ vty_out(vty, "%-15s %-15s %-15s %-15s\n", "BSR-Address",
+ "BSR-Priority", "Hashmask-len", "Fragment-Tag");
+ vty_out(vty, "%-15s %-15d %-15d %-15d\n", bsr_str,
+ hdr->bsr_prio, hdr->hm_len,
+ ntohs(hdr->frag_tag));
+ }
+
+ vty_out(vty, "\n");
+
+ while (offset < len) {
+ group = (struct bsmmsg_grpinfo *)buf;
+
+ if (group->group.family == PIM_MSG_ADDRESS_FAMILY_IPV4)
+ grp.family = AF_INET;
+
+ grp.prefixlen = group->group.mask;
+ grp.u.prefix4.s_addr = group->group.addr.s_addr;
+
+ prefix2str(&grp, grp_str, sizeof(grp_str));
+
+ buf += sizeof(struct bsmmsg_grpinfo);
+ offset += sizeof(struct bsmmsg_grpinfo);
+
+ if (uj) {
+ json_object_object_get_ex(json, grp_str,
+ &json_group);
+ if (!json_group) {
+ json_group = json_object_new_object();
+ json_object_int_add(json_group,
+ "Rp Count",
+ group->rp_count);
+ json_object_int_add(
+ json_group, "Fragment Rp count",
+ group->frag_rp_count);
+ json_object_object_add(json, grp_str,
+ json_group);
+ }
+ } else {
+ vty_out(vty, "Group : %s\n", grp_str);
+ vty_out(vty, "-------------------\n");
+ vty_out(vty, "Rp Count:%d\n", group->rp_count);
+ vty_out(vty, "Fragment Rp Count : %d\n",
+ group->frag_rp_count);
+ }
+
+ frag_rp_cnt = group->frag_rp_count;
+
+ if (!frag_rp_cnt)
+ continue;
+
+ if (!uj)
+ vty_out(vty,
+ "RpAddress HoldTime Priority\n");
+
+ while (frag_rp_cnt--) {
+ rpaddr = (struct bsmmsg_rpinfo *)buf;
+
+ buf += sizeof(struct bsmmsg_rpinfo);
+ offset += sizeof(struct bsmmsg_rpinfo);
+
+ pim_inet4_dump("<Rp addr?>",
+ rpaddr->rpaddr.addr, rp_str,
+ sizeof(rp_str));
+
+ if (uj) {
+ json_row = json_object_new_object();
+ json_object_string_add(
+ json_row, "Rp Address", rp_str);
+ json_object_int_add(
+ json_row, "Rp HoldTime",
+ ntohs(rpaddr->rp_holdtime));
+ json_object_int_add(json_row,
+ "Rp Priority",
+ rpaddr->rp_pri);
+ json_object_object_add(
+ json_group, rp_str, json_row);
+ } else {
+ vty_out(vty, "%-15s %-12d %d\n", rp_str,
+ ntohs(rpaddr->rp_holdtime),
+ rpaddr->rp_pri);
+ }
+ }
+ vty_out(vty, "\n");
+ }
+
+ fragment++;
+ }
+
+ if (uj) {
+ vty_out(vty, "%s\n", json_object_to_json_string_ext(
+ json, JSON_C_TO_STRING_PRETTY));
+ json_object_free(json);
+ }
+}
+
+/*Display the group-rp mappings */
+static void pim_show_group_rp_mappings_info(struct pim_instance *pim,
+ struct vty *vty, bool uj)
+{
+ struct bsgrp_node *bsgrp;
+ struct listnode *rpnode;
+ struct bsm_rpinfo *bsm_rp;
+ struct route_node *rn;
+ char bsr_str[INET_ADDRSTRLEN];
+ json_object *json = NULL;
+ json_object *json_group = NULL;
+ json_object *json_row = NULL;
+
+ if (pim->global_scope.current_bsr.s_addr == INADDR_ANY)
+ strncpy(bsr_str, "0.0.0.0", sizeof(bsr_str));
+
+ else
+ pim_inet4_dump("<bsr?>", pim->global_scope.current_bsr, bsr_str,
+ sizeof(bsr_str));
+
+ if (uj) {
+ json = json_object_new_object();
+ json_object_string_add(json, "BSR Address", bsr_str);
+ } else {
+ vty_out(vty, "BSR Address %s\n", bsr_str);
+ }
+
+ for (rn = route_top(pim->global_scope.bsrp_table); rn;
+ rn = route_next(rn)) {
+ bsgrp = (struct bsgrp_node *)rn->info;
+
+ if (!bsgrp)
+ continue;
+
+ char grp_str[INET_ADDRSTRLEN];
+
+ prefix2str(&bsgrp->group, grp_str, sizeof(grp_str));
+
+ if (uj) {
+ json_object_object_get_ex(json, grp_str, &json_group);
+ if (!json_group) {
+ json_group = json_object_new_object();
+ json_object_object_add(json, grp_str,
+ json_group);
+ }
+ } else {
+ vty_out(vty, "Group Address %s\n", grp_str);
+ vty_out(vty, "--------------------------\n");
+ vty_out(vty, "%-15s %-15s %-15s %-15s\n", "Rp Address",
+ "priority", "Holdtime", "Hash");
+
+ vty_out(vty, "(ACTIVE)\n");
+ }
+
+ if (bsgrp->bsrp_list) {
+ for (ALL_LIST_ELEMENTS_RO(bsgrp->bsrp_list, rpnode,
+ bsm_rp)) {
+ char rp_str[INET_ADDRSTRLEN];
+
+ pim_inet4_dump("<Rp Address?>",
+ bsm_rp->rp_address, rp_str,
+ sizeof(rp_str));
+
+ if (uj) {
+ json_row = json_object_new_object();
+ json_object_string_add(
+ json_row, "Rp Address", rp_str);
+ json_object_int_add(
+ json_row, "Rp HoldTime",
+ bsm_rp->rp_holdtime);
+ json_object_int_add(json_row,
+ "Rp Priority",
+ bsm_rp->rp_prio);
+ json_object_int_add(json_row,
+ "Hash Val",
+ bsm_rp->hash);
+ json_object_object_add(
+ json_group, rp_str, json_row);
+
+ } else {
+ vty_out(vty,
+ "%-15s %-15u %-15u %-15u\n",
+ rp_str, bsm_rp->rp_prio,
+ bsm_rp->rp_holdtime,
+ bsm_rp->hash);
+ }
+ }
+ if (!bsgrp->bsrp_list->count && !uj)
+ vty_out(vty, "Active List is empty.\n");
+ }
+
+ if (uj) {
+ json_object_int_add(json_group, "Pending RP count",
+ bsgrp->pend_rp_cnt);
+ } else {
+ vty_out(vty, "(PENDING)\n");
+ vty_out(vty, "Pending RP count :%d\n",
+ bsgrp->pend_rp_cnt);
+ if (bsgrp->pend_rp_cnt)
+ vty_out(vty, "%-15s %-15s %-15s %-15s\n",
+ "Rp Address", "priority", "Holdtime",
+ "Hash");
+ }
+
+ if (bsgrp->partial_bsrp_list) {
+ for (ALL_LIST_ELEMENTS_RO(bsgrp->partial_bsrp_list,
+ rpnode, bsm_rp)) {
+ char rp_str[INET_ADDRSTRLEN];
+
+ pim_inet4_dump("<Rp Addr?>", bsm_rp->rp_address,
+ rp_str, sizeof(rp_str));
+
+ if (uj) {
+ json_row = json_object_new_object();
+ json_object_string_add(
+ json_row, "Rp Address", rp_str);
+ json_object_int_add(
+ json_row, "Rp HoldTime",
+ bsm_rp->rp_holdtime);
+ json_object_int_add(json_row,
+ "Rp Priority",
+ bsm_rp->rp_prio);
+ json_object_int_add(json_row,
+ "Hash Val",
+ bsm_rp->hash);
+ json_object_object_add(
+ json_group, rp_str, json_row);
+ } else {
+ vty_out(vty,
+ "%-15s %-15u %-15u %-15u\n",
+ rp_str, bsm_rp->rp_prio,
+ bsm_rp->rp_holdtime,
+ bsm_rp->hash);
+ }
+ }
+ if (!bsgrp->partial_bsrp_list->count && !uj)
+ vty_out(vty, "Partial List is empty\n");
+ }
+
+ if (!uj)
+ vty_out(vty, "\n");
+ }
+
+ if (uj) {
+ vty_out(vty, "%s\n", json_object_to_json_string_ext(
+ json, JSON_C_TO_STRING_PRETTY));
+ json_object_free(json);
+ }
+}
+
+/* pim statistics - just adding only bsm related now.
+ * We can continue to add all pim related stats here.
+ */
+static void pim_show_statistics(struct pim_instance *pim, struct vty *vty,
+ const char *ifname, bool uj)
+{
+ json_object *json = NULL;
+ struct interface *ifp;
+
+ if (uj) {
+ json = json_object_new_object();
+ json_object_int_add(json, "Number of Received BSMs",
+ pim->bsm_rcvd);
+ json_object_int_add(json, "Number of Forwared BSMs",
+ pim->bsm_sent);
+ json_object_int_add(json, "Number of Dropped BSMs",
+ pim->bsm_dropped);
+ } else {
+ vty_out(vty, "BSM Statistics :\n");
+ vty_out(vty, "----------------\n");
+ vty_out(vty, "Number of Received BSMs : %ld\n", pim->bsm_rcvd);
+ vty_out(vty, "Number of Forwared BSMs : %ld\n", pim->bsm_sent);
+ vty_out(vty, "Number of Dropped BSMs : %ld\n",
+ pim->bsm_dropped);
+ }
+
+ vty_out(vty, "\n");
+
+ /* scan interfaces */
+ FOR_ALL_INTERFACES (pim->vrf, ifp) {
+ struct pim_interface *pim_ifp = ifp->info;
+
+ if (ifname && strcmp(ifname, ifp->name))
+ continue;
+
+ if (!pim_ifp)
+ continue;
+
+ if (!uj) {
+ vty_out(vty, "Interface : %s\n", ifp->name);
+ vty_out(vty, "-------------------\n");
+ vty_out(vty,
+ "Number of BSMs dropped due to config miss : %u\n",
+ pim_ifp->pim_ifstat_bsm_cfg_miss);
+ vty_out(vty, "Number of unicast BSMs dropped : %u\n",
+ pim_ifp->pim_ifstat_ucast_bsm_cfg_miss);
+ vty_out(vty,
+ "Number of BSMs dropped due to invalid scope zone : %u\n",
+ pim_ifp->pim_ifstat_bsm_invalid_sz);
+ } else {
+
+ json_object *json_row = NULL;
+
+ json_row = json_object_new_object();
+
+ json_object_string_add(json_row, "If Name", ifp->name);
+ json_object_int_add(
+ json_row,
+ "Number of BSMs dropped due to config miss",
+ pim_ifp->pim_ifstat_bsm_cfg_miss);
+ json_object_int_add(
+ json_row, "Number of unicast BSMs dropped",
+ pim_ifp->pim_ifstat_ucast_bsm_cfg_miss);
+ json_object_int_add(json_row,
+ "Number of BSMs dropped due to invalid scope zone",
+ pim_ifp->pim_ifstat_bsm_invalid_sz);
+ json_object_object_add(json, ifp->name, json_row);
+ }
+ vty_out(vty, "\n");
+ }
+
+ if (uj) {
+ vty_out(vty, "%s\n", json_object_to_json_string_ext(
+ json, JSON_C_TO_STRING_PRETTY));
+ json_object_free(json);
+ }
+}
+
+static void clear_pim_statistics(struct pim_instance *pim)
+{
+ struct interface *ifp;
+
+ pim->bsm_rcvd = 0;
+ pim->bsm_sent = 0;
+ pim->bsm_dropped = 0;
+
+ /* scan interfaces */
+ FOR_ALL_INTERFACES (pim->vrf, ifp) {
+ struct pim_interface *pim_ifp = ifp->info;
+
+ if (!pim_ifp)
+ continue;
+
+ pim_ifp->pim_ifstat_bsm_cfg_miss = 0;
+ pim_ifp->pim_ifstat_ucast_bsm_cfg_miss = 0;
+ pim_ifp->pim_ifstat_bsm_invalid_sz = 0;
+ }
+}
+
static void igmp_show_groups(struct pim_instance *pim, struct vty *vty, bool uj)
{
struct interface *ifp;
@@ -3207,6 +3628,82 @@ static void igmp_show_source_retransmission(struct pim_instance *pim,
} /* scan interfaces */
}
+static void pim_show_bsr(struct pim_instance *pim,
+ struct vty *vty,
+ bool uj)
+{
+ char uptime[10];
+ char last_bsm_seen[10];
+ time_t now;
+ char bsr_state[20];
+ char bsr_str[PREFIX_STRLEN];
+ json_object *json = NULL;
+
+ vty_out(vty, "PIMv2 Bootstrap information\n");
+
+ if (pim->global_scope.current_bsr.s_addr == INADDR_ANY) {
+ strncpy(bsr_str, "0.0.0.0", sizeof(bsr_str));
+ pim_time_uptime(uptime, sizeof(uptime),
+ pim->global_scope.current_bsr_first_ts);
+ pim_time_uptime(last_bsm_seen, sizeof(last_bsm_seen),
+ pim->global_scope.current_bsr_last_ts);
+ }
+
+ else {
+ pim_inet4_dump("<bsr?>", pim->global_scope.current_bsr,
+ bsr_str, sizeof(bsr_str));
+ now = pim_time_monotonic_sec();
+ pim_time_uptime(uptime, sizeof(uptime),
+ (now - pim->global_scope.current_bsr_first_ts));
+ pim_time_uptime(last_bsm_seen, sizeof(last_bsm_seen),
+ now - pim->global_scope.current_bsr_last_ts);
+ }
+
+ switch (pim->global_scope.state) {
+ case NO_INFO:
+ strncpy(bsr_state, "NO_INFO", sizeof(bsr_state));
+ break;
+ case ACCEPT_ANY:
+ strncpy(bsr_state, "ACCEPT_ANY", sizeof(bsr_state));
+ break;
+ case ACCEPT_PREFERRED:
+ strncpy(bsr_state, "ACCEPT_PREFERRED", sizeof(bsr_state));
+ break;
+ default:
+ strncpy(bsr_state, "", sizeof(bsr_state));
+ }
+
+ if (uj) {
+ json = json_object_new_object();
+ json_object_string_add(json, "bsr", bsr_str);
+ json_object_int_add(json, "priority",
+ pim->global_scope.current_bsr_prio);
+ json_object_int_add(json, "fragment_tag",
+ pim->global_scope.bsm_frag_tag);
+ json_object_string_add(json, "state", bsr_state);
+ json_object_string_add(json, "upTime", uptime);
+ json_object_string_add(json, "last_bsm_seen", last_bsm_seen);
+ }
+
+ else {
+ vty_out(vty, "Current preferred BSR address: %s\n", bsr_str);
+ vty_out(vty,
+ "Priority Fragment-Tag State UpTime\n");
+ vty_out(vty, " %-12d %-12d %-13s %7s\n",
+ pim->global_scope.current_bsr_prio,
+ pim->global_scope.bsm_frag_tag,
+ bsr_state,
+ uptime);
+ vty_out(vty, "Last BSM seen: %s\n", last_bsm_seen);
+ }
+
+ if (uj) {
+ vty_out(vty, "%s\n", json_object_to_json_string_ext(
+ json, JSON_C_TO_STRING_PRETTY));
+ json_object_free(json);
+ }
+}
+
static void clear_igmp_interfaces(struct pim_instance *pim)
{
struct interface *ifp;
@@ -3282,6 +3779,25 @@ DEFUN (clear_ip_igmp_interfaces,
return CMD_SUCCESS;
}
+DEFUN (clear_ip_pim_statistics,
+ clear_ip_pim_statistics_cmd,
+ "clear ip pim statistics [vrf NAME]",
+ CLEAR_STR
+ IP_STR
+ CLEAR_IP_PIM_STR
+ VRF_CMD_HELP_STR
+ "Reset PIM statistics\n")
+{
+ int idx = 2;
+ struct vrf *vrf = pim_cmd_lookup_vrf(vty, argv, argc, &idx);
+
+ if (!vrf)
+ return CMD_WARNING;
+
+ clear_pim_statistics(vrf->info);
+ return CMD_SUCCESS;
+}
+
static void mroute_add_all(struct pim_instance *pim)
{
struct listnode *node;
@@ -3400,6 +3916,8 @@ DEFUN (clear_ip_pim_interface_traffic,
pim_ifp->pim_ifstat_reg_stop_send = 0;
pim_ifp->pim_ifstat_assert_recv = 0;
pim_ifp->pim_ifstat_assert_send = 0;
+ pim_ifp->pim_ifstat_bsm_rx = 0;
+ pim_ifp->pim_ifstat_bsm_tx = 0;
}
return CMD_SUCCESS;
@@ -4459,6 +4977,76 @@ DEFUN (show_ip_pim_interface_traffic,
return CMD_SUCCESS;
}
+DEFUN (show_ip_pim_bsm_db,
+ show_ip_pim_bsm_db_cmd,
+ "show ip pim bsm-database [vrf NAME] [json]",
+ SHOW_STR
+ IP_STR
+ PIM_STR
+ VRF_CMD_HELP_STR
+ "PIM cached bsm packets information\n"
+ JSON_STR)
+{
+ int idx = 2;
+ struct vrf *vrf = pim_cmd_lookup_vrf(vty, argv, argc, &idx);
+ bool uj = use_json(argc, argv);
+
+ if (!vrf)
+ return CMD_WARNING;
+
+ pim_show_bsm_db(vrf->info, vty, uj);
+ return CMD_SUCCESS;
+}
+
+DEFUN (show_ip_pim_bsrp,
+ show_ip_pim_bsrp_cmd,
+ "show ip pim bsrp-info [vrf NAME] [json]",
+ SHOW_STR
+ IP_STR
+ PIM_STR
+ VRF_CMD_HELP_STR
+ "PIM cached group-rp mappings information\n"
+ JSON_STR)
+{
+ int idx = 2;
+ struct vrf *vrf = pim_cmd_lookup_vrf(vty, argv, argc, &idx);
+ bool uj = use_json(argc, argv);
+
+ if (!vrf)
+ return CMD_WARNING;
+
+ pim_show_group_rp_mappings_info(vrf->info, vty, uj);
+
+ return CMD_SUCCESS;
+}
+
+DEFUN (show_ip_pim_statistics,
+ show_ip_pim_statistics_cmd,
+ "show ip pim [vrf NAME] statistics [interface WORD] [json]",
+ SHOW_STR
+ IP_STR
+ PIM_STR
+ VRF_CMD_HELP_STR
+ "PIM statistics\n"
+ "interface\n"
+ "PIM interface\n"
+ JSON_STR)
+{
+ int idx = 2;
+ struct vrf *vrf = pim_cmd_lookup_vrf(vty, argv, argc, &idx);
+ bool uj = use_json(argc, argv);
+
+ if (!vrf)
+ return CMD_WARNING;
+
+ if (argv_find(argv, argc, "WORD", &idx))
+ pim_show_statistics(vrf->info, vty, argv[idx]->arg, uj);
+ else
+ pim_show_statistics(vrf->info, vty, NULL, uj);
+
+ return CMD_SUCCESS;
+}
+
static void show_multicast_interfaces(struct pim_instance *pim, struct vty *vty)
{
struct interface *ifp;
@@ -4646,9 +5234,9 @@ static void show_mroute(struct pim_instance *pim, struct vty *vty,
ifp_in = pim_if_find_by_vif_index(pim, c_oil->oil.mfcc_parent);
if (ifp_in)
- strcpy(in_ifname, ifp_in->name);
+ strlcpy(in_ifname, ifp_in->name, sizeof(in_ifname));
else
- strcpy(in_ifname, "<iif?>");
+ strlcpy(in_ifname, "<iif?>", sizeof(in_ifname));
if (uj) {
@@ -4703,9 +5291,9 @@ static void show_mroute(struct pim_instance *pim, struct vty *vty,
found_oif = 1;
if (ifp_out)
- strcpy(out_ifname, ifp_out->name);
+ strlcpy(out_ifname, ifp_out->name, sizeof(out_ifname));
else
- strcpy(out_ifname, "<oif?>");
+ strlcpy(out_ifname, "<oif?>", sizeof(out_ifname));
if (uj) {
json_ifp_out = json_object_new_object();
@@ -4763,27 +5351,27 @@ static void show_mroute(struct pim_instance *pim, struct vty *vty,
} else {
if (c_oil->oif_flags[oif_vif_index]
& PIM_OIF_FLAG_PROTO_PIM) {
- strcpy(proto, "PIM");
+ strlcpy(proto, "PIM", sizeof(proto));
}
if (c_oil->oif_flags[oif_vif_index]
& PIM_OIF_FLAG_PROTO_IGMP) {
- strcpy(proto, "IGMP");
+ strlcpy(proto, "IGMP", sizeof(proto));
}
if (c_oil->oif_flags[oif_vif_index]
& PIM_OIF_FLAG_PROTO_VXLAN) {
- strcpy(proto, "VxLAN");
+ strlcpy(proto, "VxLAN", sizeof(proto));
}
if (c_oil->oif_flags[oif_vif_index]
& PIM_OIF_FLAG_PROTO_SOURCE) {
- strcpy(proto, "SRC");
+ strlcpy(proto, "SRC", sizeof(proto));
}
if (c_oil->oif_flags[oif_vif_index]
& PIM_OIF_FLAG_PROTO_STAR) {
- strcpy(proto, "STAR");
+ strlcpy(proto, "STAR", sizeof(proto));
}
vty_out(vty,
@@ -4822,9 +5410,9 @@ static void show_mroute(struct pim_instance *pim, struct vty *vty,
found_oif = 0;
if (ifp_in)
- strcpy(in_ifname, ifp_in->name);
+ strlcpy(in_ifname, ifp_in->name, sizeof(in_ifname));
else
- strcpy(in_ifname, "<iif?>");
+ strlcpy(in_ifname, "<iif?>", sizeof(in_ifname));
if (uj) {
@@ -4851,7 +5439,7 @@ static void show_mroute(struct pim_instance *pim, struct vty *vty,
json_object_string_add(json_source, "iif", in_ifname);
json_oil = NULL;
} else {
- strcpy(proto, "STATIC");
+ strlcpy(proto, "STATIC", sizeof(proto));
}
for (oif_vif_index = 0; oif_vif_index < MAXVIFS;
@@ -4873,9 +5461,9 @@ static void show_mroute(struct pim_instance *pim, struct vty *vty,
found_oif = 1;
if (ifp_out)
- strcpy(out_ifname, ifp_out->name);
+ strlcpy(out_ifname, ifp_out->name, sizeof(out_ifname));
else
- strcpy(out_ifname, "<oif?>");
+ strlcpy(out_ifname, "<oif?>", sizeof(out_ifname));
if (uj) {
json_ifp_out = json_object_new_object();
@@ -5117,6 +5705,97 @@ DEFUN (show_ip_mroute_count_vrf_all,
return CMD_SUCCESS;
}
+static void show_mroute_summary(struct pim_instance *pim, struct vty *vty)
+{
+ struct listnode *node;
+ struct channel_oil *c_oil;
+ struct static_route *s_route;
+ uint32_t starg_sw_mroute_cnt = 0;
+ uint32_t sg_sw_mroute_cnt = 0;
+ uint32_t starg_hw_mroute_cnt = 0;
+ uint32_t sg_hw_mroute_cnt = 0;
+
+ vty_out(vty, "Mroute Type Installed/Total\n");
+
+ for (ALL_LIST_ELEMENTS_RO(pim->channel_oil_list, node, c_oil)) {
+ if (!c_oil->installed) {
+ if (c_oil->oil.mfcc_origin.s_addr == INADDR_ANY)
+ starg_sw_mroute_cnt++;
+ else
+ sg_sw_mroute_cnt++;
+ } else {
+ if (c_oil->oil.mfcc_origin.s_addr == INADDR_ANY)
+ starg_hw_mroute_cnt++;
+ else
+ sg_hw_mroute_cnt++;
+ }
+ }
+
+ for (ALL_LIST_ELEMENTS_RO(pim->static_routes, node, s_route)) {
+ if (!s_route->c_oil.installed) {
+ if (s_route->c_oil.oil.mfcc_origin.s_addr == INADDR_ANY)
+ starg_sw_mroute_cnt++;
+ else
+ sg_sw_mroute_cnt++;
+ } else {
+ if (s_route->c_oil.oil.mfcc_origin.s_addr == INADDR_ANY)
+ starg_hw_mroute_cnt++;
+ else
+ sg_hw_mroute_cnt++;
+ }
+ }
+
+ vty_out(vty, "%-20s %d/%d\n", "(*, G)", starg_hw_mroute_cnt,
+ starg_sw_mroute_cnt + starg_hw_mroute_cnt);
+ vty_out(vty, "%-20s %d/%d\n", "(S, G)", sg_hw_mroute_cnt,
+ sg_sw_mroute_cnt + sg_hw_mroute_cnt);
+ vty_out(vty, "------\n");
+ vty_out(vty, "%-20s %d/%d\n", "Total",
+ (starg_hw_mroute_cnt + sg_hw_mroute_cnt),
+ (starg_sw_mroute_cnt +
+ starg_hw_mroute_cnt +
+ sg_sw_mroute_cnt +
+ sg_hw_mroute_cnt));
+}
+
+DEFUN (show_ip_mroute_summary,
+ show_ip_mroute_summary_cmd,
+ "show ip mroute [vrf NAME] summary",
+ SHOW_STR
+ IP_STR
+ MROUTE_STR
+ VRF_CMD_HELP_STR
+ "Summary of all mroutes\n")
+{
+ int idx = 2;
+ struct vrf *vrf = pim_cmd_lookup_vrf(vty, argv, argc, &idx);
+
+ if (!vrf)
+ return CMD_WARNING;
+
+ show_mroute_summary(vrf->info, vty);
+ return CMD_SUCCESS;
+}
+
+DEFUN (show_ip_mroute_summary_vrf_all,
+ show_ip_mroute_summary_vrf_all_cmd,
+ "show ip mroute vrf all summary",
+ SHOW_STR
+ IP_STR
+ MROUTE_STR
+ VRF_CMD_HELP_STR
+ "Summary of all mroutes\n")
+{
+ struct vrf *vrf;
+
+ RB_FOREACH (vrf, vrf_name_head, &vrfs_by_name) {
+ vty_out(vty, "VRF: %s\n", vrf->name);
+ show_mroute_summary(vrf->info, vty);
+ }
+
+ return CMD_SUCCESS;
+}
+
DEFUN (show_ip_rib,
show_ip_rib_cmd,
"show ip rib [vrf NAME] A.B.C.D",
@@ -5233,7 +5912,13 @@ static int pim_rp_cmd_worker(struct pim_instance *pim, struct vty *vty,
{
int result;
- result = pim_rp_new(pim, rp, group, plist);
+ result = pim_rp_new_config(pim, rp, group, plist);
+
+ if (result == PIM_GROUP_BAD_ADDR_MASK_COMBO) {
+ vty_out(vty, "%% Inconsistent address and mask: %s\n",
+ group);
+ return CMD_WARNING_CONFIG_FAILED;
+ }
if (result == PIM_GROUP_BAD_ADDRESS) {
vty_out(vty, "%% Bad group address specified: %s\n", group);
@@ -5559,7 +6244,7 @@ static int pim_no_rp_cmd_worker(struct pim_instance *pim, struct vty *vty,
const char *rp, const char *group,
const char *plist)
{
- int result = pim_rp_del(pim, rp, group, plist);
+ int result = pim_rp_del_config(pim, rp, group, plist);
if (result == PIM_GROUP_BAD_ADDRESS) {
vty_out(vty, "%% Bad group address specified: %s\n", group);
@@ -5778,6 +6463,27 @@ DEFUN (show_ip_pim_group_type,
return CMD_SUCCESS;
}
+DEFUN (show_ip_pim_bsr,
+ show_ip_pim_bsr_cmd,
+ "show ip pim bsr [json]",
+ SHOW_STR
+ IP_STR
+ PIM_STR
+ "boot-strap router information\n"
+ JSON_STR)
+{
+ int idx = 2;
+ struct vrf *vrf = pim_cmd_lookup_vrf(vty, argv, argc, &idx);
+ bool uj = use_json(argc, argv);
+
+ if (!vrf)
+ return CMD_WARNING;
+
+ pim_show_bsr(vrf->info, vty, uj);
+
+ return CMD_SUCCESS;
+}
+
DEFUN (ip_ssmpingd,
ip_ssmpingd_cmd,
"ip ssmpingd [A.B.C.D]",
@@ -7226,6 +7932,7 @@ DEFUN (debug_pim,
PIM_DO_DEBUG_PIM_TRACE;
PIM_DO_DEBUG_MSDP_EVENTS;
PIM_DO_DEBUG_MSDP_PACKETS;
+ PIM_DO_DEBUG_BSM;
return CMD_SUCCESS;
}
@@ -7244,6 +7951,7 @@ DEFUN (no_debug_pim,
PIM_DONT_DEBUG_PIM_PACKETDUMP_SEND;
PIM_DONT_DEBUG_PIM_PACKETDUMP_RECV;
+ PIM_DONT_DEBUG_BSM;
return CMD_SUCCESS;
}
@@ -7629,6 +8337,30 @@ DEFUN (no_debug_mtrace,
return CMD_SUCCESS;
}
+DEFUN (debug_bsm,
+ debug_bsm_cmd,
+ "debug pim bsm",
+ DEBUG_STR
+ DEBUG_PIM_STR
+ DEBUG_PIM_BSM_STR)
+{
+ PIM_DO_DEBUG_BSM;
+ return CMD_SUCCESS;
+}
+
+DEFUN (no_debug_bsm,
+ no_debug_bsm_cmd,
+ "no debug pim bsm",
+ NO_STR
+ DEBUG_STR
+ DEBUG_PIM_STR
+ DEBUG_PIM_BSM_STR)
+{
+ PIM_DONT_DEBUG_BSM;
+ return CMD_SUCCESS;
+}
+
+
DEFUN_NOSH (show_debugging_pim,
show_debugging_pim_cmd,
"show debugging [pim]",
@@ -7752,6 +8484,94 @@ DEFUN (no_ip_pim_bfd,
return CMD_SUCCESS;
}
+DEFUN (ip_pim_bsm,
+ ip_pim_bsm_cmd,
+ "ip pim bsm",
+ IP_STR
+ PIM_STR
+ "Enables BSM support on the interface\n")
+{
+ VTY_DECLVAR_CONTEXT(interface, ifp);
+ struct pim_interface *pim_ifp = ifp->info;
+
+ if (!pim_ifp) {
+ if (!pim_cmd_interface_add(ifp)) {
+ vty_out(vty, "Could not enable PIM SM on interface\n");
+ return CMD_WARNING;
+ }
+ }
+
+ pim_ifp = ifp->info;
+ pim_ifp->bsm_enable = true;
+
+ return CMD_SUCCESS;
+}
+
+DEFUN (no_ip_pim_bsm,
+ no_ip_pim_bsm_cmd,
+ "no ip pim bsm",
+ NO_STR
+ IP_STR
+ PIM_STR
+ "Disables BSM support\n")
+{
+ VTY_DECLVAR_CONTEXT(interface, ifp);
+ struct pim_interface *pim_ifp = ifp->info;
+
+ if (!pim_ifp) {
+ vty_out(vty, "Pim not enabled on this interface\n");
+ return CMD_WARNING;
+ }
+
+ pim_ifp->bsm_enable = false;
+
+ return CMD_SUCCESS;
+}
+
+DEFUN (ip_pim_ucast_bsm,
+ ip_pim_ucast_bsm_cmd,
+ "ip pim unicast-bsm",
+ IP_STR
+ PIM_STR
+ "Accept/Send unicast BSM on the interface\n")
+{
+ VTY_DECLVAR_CONTEXT(interface, ifp);
+ struct pim_interface *pim_ifp = ifp->info;
+
+ if (!pim_ifp) {
+ if (!pim_cmd_interface_add(ifp)) {
+ vty_out(vty, "Could not enable PIM SM on interface\n");
+ return CMD_WARNING;
+ }
+ }
+
+ pim_ifp = ifp->info;
+ pim_ifp->ucast_bsm_accept = true;
+
+ return CMD_SUCCESS;
+}
+
+DEFUN (no_ip_pim_ucast_bsm,
+ no_ip_pim_ucast_bsm_cmd,
+ "no ip pim unicast-bsm",
+ NO_STR
+ IP_STR
+ PIM_STR
+ "Block send/receive unicast BSM on this interface\n")
+{
+ VTY_DECLVAR_CONTEXT(interface, ifp);
+ struct pim_interface *pim_ifp = ifp->info;
+
+ if (!pim_ifp) {
+ vty_out(vty, "Pim not enabled on this interface\n");
+ return CMD_WARNING;
+ }
+
+ pim_ifp->ucast_bsm_accept = false;
+
+ return CMD_SUCCESS;
+}
+
#if HAVE_BFDD > 0
DEFUN_HIDDEN(
#else
@@ -8269,7 +9089,7 @@ static void ip_msdp_show_peers(struct pim_instance *pim, struct vty *vty,
pim_time_uptime(timebuf, sizeof(timebuf),
now - mp->uptime);
} else {
- strcpy(timebuf, "-");
+ strlcpy(timebuf, "-", sizeof(timebuf));
}
pim_inet4_dump("<peer?>", mp->peer, peer_str, sizeof(peer_str));
pim_inet4_dump("<local?>", mp->local, local_str,
@@ -8326,7 +9146,7 @@ static void ip_msdp_show_peers_detail(struct pim_instance *pim, struct vty *vty,
pim_time_uptime(timebuf, sizeof(timebuf),
now - mp->uptime);
} else {
- strcpy(timebuf, "-");
+ strlcpy(timebuf, "-", sizeof(timebuf));
}
pim_inet4_dump("<local?>", mp->local, local_str,
sizeof(local_str));
@@ -8505,18 +9325,18 @@ static void ip_msdp_show_sa(struct pim_instance *pim, struct vty *vty, bool uj)
if (sa->flags & PIM_MSDP_SAF_PEER) {
pim_inet4_dump("<rp?>", sa->rp, rp_str, sizeof(rp_str));
if (sa->up) {
- strcpy(spt_str, "yes");
+ strlcpy(spt_str, "yes", sizeof(spt_str));
} else {
- strcpy(spt_str, "no");
+ strlcpy(spt_str, "no", sizeof(spt_str));
}
} else {
- strcpy(rp_str, "-");
- strcpy(spt_str, "-");
+ strlcpy(rp_str, "-", sizeof(rp_str));
+ strlcpy(spt_str, "-", sizeof(spt_str));
}
if (sa->flags & PIM_MSDP_SAF_LOCAL) {
- strcpy(local_str, "yes");
+ strlcpy(local_str, "yes", sizeof(local_str));
} else {
- strcpy(local_str, "no");
+ strlcpy(local_str, "no", sizeof(local_str));
}
if (uj) {
json_object_object_get_ex(json, grp_str, &json_group);
@@ -8570,19 +9390,19 @@ static void ip_msdp_show_sa_entry_detail(struct pim_msdp_sa *sa,
pim_inet4_dump("<rp?>", sa->rp, rp_str, sizeof(rp_str));
pim_inet4_dump("<peer?>", sa->peer, peer_str, sizeof(peer_str));
if (sa->up) {
- strcpy(spt_str, "yes");
+ strlcpy(spt_str, "yes", sizeof(spt_str));
} else {
- strcpy(spt_str, "no");
+ strlcpy(spt_str, "no", sizeof(spt_str));
}
} else {
- strcpy(rp_str, "-");
- strcpy(peer_str, "-");
- strcpy(spt_str, "-");
+ strlcpy(rp_str, "-", sizeof(rp_str));
+ strlcpy(peer_str, "-", sizeof(peer_str));
+ strlcpy(spt_str, "-", sizeof(spt_str));
}
if (sa->flags & PIM_MSDP_SAF_LOCAL) {
- strcpy(local_str, "yes");
+ strlcpy(local_str, "yes", sizeof(local_str));
} else {
- strcpy(local_str, "no");
+ strlcpy(local_str, "no", sizeof(local_str));
}
pim_time_timer_to_hhmmss(statetimer, sizeof(statetimer),
sa->sa_state_timer);
@@ -9349,17 +10169,23 @@ void pim_cmd_init(void)
install_element(VIEW_NODE, &show_ip_pim_upstream_rpf_cmd);
install_element(VIEW_NODE, &show_ip_pim_rp_cmd);
install_element(VIEW_NODE, &show_ip_pim_rp_vrf_all_cmd);
+ install_element(VIEW_NODE, &show_ip_pim_bsr_cmd);
install_element(VIEW_NODE, &show_ip_multicast_cmd);
install_element(VIEW_NODE, &show_ip_multicast_vrf_all_cmd);
install_element(VIEW_NODE, &show_ip_mroute_cmd);
install_element(VIEW_NODE, &show_ip_mroute_vrf_all_cmd);
install_element(VIEW_NODE, &show_ip_mroute_count_cmd);
install_element(VIEW_NODE, &show_ip_mroute_count_vrf_all_cmd);
+ install_element(VIEW_NODE, &show_ip_mroute_summary_cmd);
+ install_element(VIEW_NODE, &show_ip_mroute_summary_vrf_all_cmd);
install_element(VIEW_NODE, &show_ip_rib_cmd);
install_element(VIEW_NODE, &show_ip_ssmpingd_cmd);
install_element(VIEW_NODE, &show_debugging_pim_cmd);
install_element(VIEW_NODE, &show_ip_pim_nexthop_cmd);
install_element(VIEW_NODE, &show_ip_pim_nexthop_lookup_cmd);
+ install_element(VIEW_NODE, &show_ip_pim_bsrp_cmd);
+ install_element(VIEW_NODE, &show_ip_pim_bsm_db_cmd);
+ install_element(VIEW_NODE, &show_ip_pim_statistics_cmd);
install_element(ENABLE_NODE, &clear_ip_interfaces_cmd);
install_element(ENABLE_NODE, &clear_ip_igmp_interfaces_cmd);
@@ -9367,6 +10193,7 @@ void pim_cmd_init(void)
install_element(ENABLE_NODE, &clear_ip_pim_interfaces_cmd);
install_element(ENABLE_NODE, &clear_ip_pim_interface_traffic_cmd);
install_element(ENABLE_NODE, &clear_ip_pim_oil_cmd);
+ install_element(ENABLE_NODE, &clear_ip_pim_statistics_cmd);
install_element(ENABLE_NODE, &debug_igmp_cmd);
install_element(ENABLE_NODE, &no_debug_igmp_cmd);
@@ -9414,6 +10241,8 @@ void pim_cmd_init(void)
install_element(ENABLE_NODE, &no_debug_msdp_packets_cmd);
install_element(ENABLE_NODE, &debug_mtrace_cmd);
install_element(ENABLE_NODE, &no_debug_mtrace_cmd);
+ install_element(ENABLE_NODE, &debug_bsm_cmd);
+ install_element(ENABLE_NODE, &no_debug_bsm_cmd);
install_element(CONFIG_NODE, &debug_igmp_cmd);
install_element(CONFIG_NODE, &no_debug_igmp_cmd);
@@ -9457,6 +10286,8 @@ void pim_cmd_init(void)
install_element(CONFIG_NODE, &no_debug_msdp_packets_cmd);
install_element(CONFIG_NODE, &debug_mtrace_cmd);
install_element(CONFIG_NODE, &no_debug_mtrace_cmd);
+ install_element(CONFIG_NODE, &debug_bsm_cmd);
+ install_element(CONFIG_NODE, &no_debug_bsm_cmd);
install_element(CONFIG_NODE, &ip_msdp_mesh_group_member_cmd);
install_element(VRF_NODE, &ip_msdp_mesh_group_member_cmd);
@@ -9480,6 +10311,11 @@ void pim_cmd_init(void)
install_element(VIEW_NODE, &show_ip_pim_vxlan_sg_work_cmd);
install_element(INTERFACE_NODE, &interface_pim_use_source_cmd);
install_element(INTERFACE_NODE, &interface_no_pim_use_source_cmd);
+ /* Install BSM command */
+ install_element(INTERFACE_NODE, &ip_pim_bsm_cmd);
+ install_element(INTERFACE_NODE, &no_ip_pim_bsm_cmd);
+ install_element(INTERFACE_NODE, &ip_pim_ucast_bsm_cmd);
+ install_element(INTERFACE_NODE, &no_ip_pim_ucast_bsm_cmd);
/* Install BFD command */
install_element(INTERFACE_NODE, &ip_pim_bfd_cmd);
install_element(INTERFACE_NODE, &ip_pim_bfd_param_cmd);
diff --git a/pimd/pim_cmd.h b/pimd/pim_cmd.h
index 67d6e43c34..558f28231b 100644
--- a/pimd/pim_cmd.h
+++ b/pimd/pim_cmd.h
@@ -65,6 +65,8 @@
#define DEBUG_MSDP_INTERNAL_STR "MSDP protocol internal\n"
#define DEBUG_MSDP_PACKETS_STR "MSDP protocol packets\n"
#define DEBUG_MTRACE_STR "Mtrace protocol activity\n"
+#define DEBUG_PIM_BSM_STR "BSR message processing activity\n"
+
void pim_cmd_init(void);
diff --git a/pimd/pim_hello.c b/pimd/pim_hello.c
index e482d321a4..721d153d76 100644
--- a/pimd/pim_hello.c
+++ b/pimd/pim_hello.c
@@ -31,6 +31,7 @@
#include "pim_iface.h"
#include "pim_neighbor.h"
#include "pim_upstream.h"
+#include "pim_bsm.h"
static void on_trace(const char *label, struct interface *ifp,
struct in_addr src)
@@ -367,6 +368,12 @@ int pim_hello_recv(struct interface *ifp, struct in_addr src_addr,
}
FREE_ADDR_LIST_THEN_RETURN(-8);
}
+ /* Forward BSM if required */
+ if (!pim_bsm_new_nbr_fwd(neigh, ifp)) {
+ if (PIM_DEBUG_PIM_HELLO)
+ zlog_debug("%s: forwarding bsm to new nbr failed",
+ __PRETTY_FUNCTION__);
+ }
/* actual addr list has been saved under neighbor */
return 0;
@@ -420,6 +427,12 @@ int pim_hello_recv(struct interface *ifp, struct in_addr src_addr,
}
FREE_ADDR_LIST_THEN_RETURN(-9);
}
+ /* Forward BSM if required */
+ if (!pim_bsm_new_nbr_fwd(neigh, ifp)) {
+ if (PIM_DEBUG_PIM_HELLO)
+ zlog_debug("%s: forwarding bsm to new nbr failed",
+ __PRETTY_FUNCTION__);
+ }
/* actual addr list is saved under neighbor */
return 0;
diff --git a/pimd/pim_iface.c b/pimd/pim_iface.c
index 0fb7f176ce..08be38c138 100644
--- a/pimd/pim_iface.c
+++ b/pimd/pim_iface.c
@@ -132,6 +132,10 @@ struct pim_interface *pim_if_new(struct interface *ifp, bool igmp, bool pim,
pim_ifp->igmp_specific_query_max_response_time_dsec =
IGMP_SPECIFIC_QUERY_MAX_RESPONSE_TIME_DSEC;
+ /* BSM config on interface: TRUE by default */
+ pim_ifp->bsm_enable = true;
+ pim_ifp->ucast_bsm_accept = true;
+
/*
RFC 3376: 8.3. Query Response Interval
The number of seconds represented by the [Query Response Interval]
diff --git a/pimd/pim_iface.h b/pimd/pim_iface.h
index fe96c07758..ab138589bd 100644
--- a/pimd/pim_iface.h
+++ b/pimd/pim_iface.h
@@ -128,6 +128,8 @@ struct pim_interface {
bool activeactive;
int64_t pim_ifstat_start; /* start timestamp for stats */
+ uint64_t pim_ifstat_bsm_rx;
+ uint64_t pim_ifstat_bsm_tx;
uint32_t pim_ifstat_hello_sent;
uint32_t pim_ifstat_hello_sendfail;
uint32_t pim_ifstat_hello_recv;
@@ -142,7 +144,12 @@ struct pim_interface {
uint32_t pim_ifstat_reg_stop_send;
uint32_t pim_ifstat_assert_recv;
uint32_t pim_ifstat_assert_send;
+ uint32_t pim_ifstat_bsm_cfg_miss;
+ uint32_t pim_ifstat_ucast_bsm_cfg_miss;
+ uint32_t pim_ifstat_bsm_invalid_sz;
struct bfd_info *bfd_info;
+ bool bsm_enable; /* bsm processing enable */
+ bool ucast_bsm_accept; /* ucast bsm processing */
};
/*
diff --git a/pimd/pim_instance.c b/pimd/pim_instance.c
index a2bf3d2783..6848d2dabb 100644
--- a/pimd/pim_instance.c
+++ b/pimd/pim_instance.c
@@ -33,6 +33,7 @@
#include "pim_static.h"
#include "pim_ssmpingd.h"
#include "pim_vty.h"
+#include "pim_bsm.h"
static void pim_instance_terminate(struct pim_instance *pim)
{
@@ -50,6 +51,8 @@ static void pim_instance_terminate(struct pim_instance *pim)
pim_rp_free(pim);
+ pim_bsm_proc_free(pim);
+
/* Traverse and cleanup rpf_hash */
if (pim->rpf_hash) {
hash_clean(pim->rpf_hash, (void *)pim_rp_list_hash_clean);
@@ -106,6 +109,8 @@ static struct pim_instance *pim_instance_init(struct vrf *vrf)
pim_rp_init(pim);
+ pim_bsm_proc_init(pim);
+
pim_oil_init(pim);
pim_upstream_init(pim);
diff --git a/pimd/pim_instance.h b/pimd/pim_instance.h
index 1740bcc790..06d41c4b53 100644
--- a/pimd/pim_instance.h
+++ b/pimd/pim_instance.h
@@ -26,6 +26,7 @@
#include "pim_str.h"
#include "pim_msdp.h"
#include "pim_assert.h"
+#include "pim_bsm.h"
#include "pim_vxlan_instance.h"
#if defined(HAVE_LINUX_MROUTE_H)
@@ -121,6 +122,11 @@ struct pim_instance {
bool ecmp_enable;
bool ecmp_rebalance_enable;
+ /* Bsm related */
+ struct bsm_scope global_scope;
+ uint64_t bsm_rcvd;
+ uint64_t bsm_sent;
+ uint64_t bsm_dropped;
/* If we need to rescan all our upstreams */
struct thread *rpf_cache_refresher;
diff --git a/pimd/pim_join.c b/pimd/pim_join.c
index cbacaf3ea8..5e1a4f0c5e 100644
--- a/pimd/pim_join.c
+++ b/pimd/pim_join.c
@@ -519,7 +519,7 @@ int pim_joinprune_send(struct pim_rpf *rpf, struct list *groups)
group_size = pim_msg_get_jp_group_size(group->sources);
if (group_size > packet_left) {
pim_msg_build_header(pim_msg, packet_size,
- PIM_MSG_TYPE_JOIN_PRUNE);
+ PIM_MSG_TYPE_JOIN_PRUNE, false);
if (pim_msg_send(pim_ifp->pim_sock_fd,
pim_ifp->primary_address,
qpim_all_pim_routers_addr, pim_msg,
@@ -576,7 +576,7 @@ int pim_joinprune_send(struct pim_rpf *rpf, struct list *groups)
if (packet_left < sizeof(struct pim_jp_groups)
|| msg->num_groups == 255) {
pim_msg_build_header(pim_msg, packet_size,
- PIM_MSG_TYPE_JOIN_PRUNE);
+ PIM_MSG_TYPE_JOIN_PRUNE, false);
if (pim_msg_send(pim_ifp->pim_sock_fd,
pim_ifp->primary_address,
qpim_all_pim_routers_addr, pim_msg,
@@ -596,7 +596,7 @@ int pim_joinprune_send(struct pim_rpf *rpf, struct list *groups)
if (!new_packet) {
// msg->num_groups = htons (msg->num_groups);
pim_msg_build_header(pim_msg, packet_size,
- PIM_MSG_TYPE_JOIN_PRUNE);
+ PIM_MSG_TYPE_JOIN_PRUNE, false);
if (pim_msg_send(pim_ifp->pim_sock_fd, pim_ifp->primary_address,
qpim_all_pim_routers_addr, pim_msg,
packet_size,
diff --git a/pimd/pim_msdp.c b/pimd/pim_msdp.c
index 3287e13719..74a3a9836b 100644
--- a/pimd/pim_msdp.c
+++ b/pimd/pim_msdp.c
@@ -1078,7 +1078,7 @@ static enum pim_msdp_err pim_msdp_peer_new(struct pim_instance *pim,
mp->mesh_group_name = XSTRDUP(MTYPE_PIM_MSDP_MG_NAME, mesh_group_name);
mp->state = PIM_MSDP_INACTIVE;
mp->fd = -1;
- strcpy(mp->last_reset, "-");
+ strlcpy(mp->last_reset, "-", sizeof(mp->last_reset));
/* higher IP address is listener */
if (ntohl(mp->local.s_addr) > ntohl(mp->peer.s_addr)) {
mp->flags |= PIM_MSDP_PEERF_LISTENER;
diff --git a/pimd/pim_msg.c b/pimd/pim_msg.c
index 63688f87e0..2e467502b1 100644
--- a/pimd/pim_msg.c
+++ b/pimd/pim_msg.c
@@ -39,7 +39,7 @@
#include "pim_oil.h"
void pim_msg_build_header(uint8_t *pim_msg, size_t pim_msg_size,
- uint8_t pim_msg_type)
+ uint8_t pim_msg_type, bool no_fwd)
{
struct pim_msg_header *header = (struct pim_msg_header *)pim_msg;
@@ -48,6 +48,7 @@ void pim_msg_build_header(uint8_t *pim_msg, size_t pim_msg_size,
*/
header->ver = PIM_PROTO_VERSION;
header->type = pim_msg_type;
+ header->Nbit = no_fwd;
header->reserved = 0;
diff --git a/pimd/pim_msg.h b/pimd/pim_msg.h
index ad9b5d9c01..5f50303967 100644
--- a/pimd/pim_msg.h
+++ b/pimd/pim_msg.h
@@ -23,6 +23,8 @@
#include <netinet/in.h>
#include "pim_jp_agg.h"
+
+#define PIM_HDR_LEN sizeof(struct pim_msg_header)
/*
Number Description
---------- ------------------
@@ -41,11 +43,20 @@ enum pim_msg_address_family {
/*
* Network Order pim_msg_hdr
+ * =========================
+ * PIM Header definition as per RFC 5059. N bit introduced to indicate
+ * do-not-forward option in PIM Boot strap Message.
+ * 0 1 2 3
+ * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |PIM Ver| Type |N| Reserved | Checksum |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
*/
struct pim_msg_header {
uint8_t type : 4;
uint8_t ver : 4;
- uint8_t reserved;
+ uint8_t Nbit : 1; /* No Fwd Bit */
+ uint8_t reserved : 7;
uint16_t checksum;
} __attribute__((packed));
@@ -58,7 +69,9 @@ struct pim_encoded_ipv4_unicast {
struct pim_encoded_group_ipv4 {
uint8_t ne;
uint8_t family;
- uint8_t reserved;
+ uint8_t bidir : 1; /* Bidir bit */
+ uint8_t reserved : 6; /* Reserved */
+ uint8_t sz : 1; /* scope zone bit */
uint8_t mask;
struct in_addr addr;
} __attribute__((packed));
@@ -88,7 +101,7 @@ struct pim_jp {
} __attribute__((packed));
void pim_msg_build_header(uint8_t *pim_msg, size_t pim_msg_size,
- uint8_t pim_msg_type);
+ uint8_t pim_msg_type, bool no_fwd);
uint8_t *pim_msg_addr_encode_ipv4_ucast(uint8_t *buf, struct in_addr addr);
uint8_t *pim_msg_addr_encode_ipv4_group(uint8_t *buf, struct in_addr addr);
diff --git a/pimd/pim_nht.c b/pimd/pim_nht.c
index 48b9f1f284..1a2f451524 100644
--- a/pimd/pim_nht.c
+++ b/pimd/pim_nht.c
@@ -121,6 +121,7 @@ static struct pim_nexthop_cache *pim_nexthop_cache_add(struct pim_instance *pim,
*/
int pim_find_or_track_nexthop(struct pim_instance *pim, struct prefix *addr,
struct pim_upstream *up, struct rp_info *rp,
+ bool bsr_track_needed,
struct pim_nexthop_cache *out_pnc)
{
struct pim_nexthop_cache *pnc = NULL;
@@ -157,6 +158,9 @@ int pim_find_or_track_nexthop(struct pim_instance *pim, struct prefix *addr,
if (up != NULL)
hash_get(pnc->upstream_hash, up, hash_alloc_intern);
+ if (bsr_track_needed)
+ pnc->bsr_tracking = true;
+
if (CHECK_FLAG(pnc->flags, PIM_NEXTHOP_VALID)) {
if (out_pnc)
memcpy(out_pnc, pnc, sizeof(struct pim_nexthop_cache));
@@ -167,7 +171,8 @@ int pim_find_or_track_nexthop(struct pim_instance *pim, struct prefix *addr,
}
void pim_delete_tracked_nexthop(struct pim_instance *pim, struct prefix *addr,
- struct pim_upstream *up, struct rp_info *rp)
+ struct pim_upstream *up, struct rp_info *rp,
+ bool del_bsr_tracking)
{
struct pim_nexthop_cache *pnc = NULL;
struct pim_nexthop_cache lookup;
@@ -208,6 +213,9 @@ void pim_delete_tracked_nexthop(struct pim_instance *pim, struct prefix *addr,
if (up)
hash_release(pnc->upstream_hash, up);
+ if (del_bsr_tracking)
+ pnc->bsr_tracking = false;
+
if (PIM_DEBUG_PIM_NHT) {
char buf[PREFIX_STRLEN];
prefix2str(addr, buf, sizeof buf);
@@ -218,7 +226,8 @@ void pim_delete_tracked_nexthop(struct pim_instance *pim, struct prefix *addr,
}
if (pnc->rp_list->count == 0
- && pnc->upstream_hash->count == 0) {
+ && pnc->upstream_hash->count == 0
+ && pnc->bsr_tracking == false) {
pim_sendmsg_zebra_rnh(pim, zclient, pnc,
ZEBRA_NEXTHOP_UNREGISTER);
@@ -233,6 +242,169 @@ void pim_delete_tracked_nexthop(struct pim_instance *pim, struct prefix *addr,
}
}
+/* Given a source address and a neighbor address, check if the neighbor is one
+ * of the next hop to reach the source. search from zebra route database
+ */
+bool pim_nexthop_match(struct pim_instance *pim, struct in_addr addr,
+ struct in_addr ip_src)
+{
+ struct pim_zlookup_nexthop nexthop_tab[MULTIPATH_NUM];
+ int i = 0;
+ ifindex_t first_ifindex = 0;
+ struct interface *ifp = NULL;
+ struct pim_neighbor *nbr = NULL;
+ int num_ifindex;
+
+ if (addr.s_addr == INADDR_NONE)
+ return 0;
+
+ memset(nexthop_tab, 0,
+ sizeof(struct pim_zlookup_nexthop) * MULTIPATH_NUM);
+ num_ifindex = zclient_lookup_nexthop(pim, nexthop_tab, MULTIPATH_NUM,
+ addr, PIM_NEXTHOP_LOOKUP_MAX);
+ if (num_ifindex < 1) {
+ char addr_str[INET_ADDRSTRLEN];
+
+ pim_inet4_dump("<addr?>", addr, addr_str, sizeof(addr_str));
+ zlog_warn(
+ "%s %s: could not find nexthop ifindex for address %s",
+ __FILE__, __PRETTY_FUNCTION__, addr_str);
+ return 0;
+ }
+
+ while (i < num_ifindex) {
+ first_ifindex = nexthop_tab[i].ifindex;
+
+ ifp = if_lookup_by_index(first_ifindex, pim->vrf_id);
+ if (!ifp) {
+ if (PIM_DEBUG_ZEBRA) {
+ char addr_str[INET_ADDRSTRLEN];
+
+ pim_inet4_dump("<addr?>", addr, addr_str,
+ sizeof(addr_str));
+ zlog_debug(
+ "%s %s: could not find interface for ifindex %d (address %s)",
+ __FILE__, __PRETTY_FUNCTION__,
+ first_ifindex, addr_str);
+ }
+ i++;
+ continue;
+ }
+
+ if (!ifp->info) {
+ if (PIM_DEBUG_ZEBRA) {
+ char addr_str[INET_ADDRSTRLEN];
+
+ pim_inet4_dump("<addr?>", addr, addr_str,
+ sizeof(addr_str));
+ zlog_debug(
+ "%s: multicast not enabled on input interface %s (ifindex=%d, RPF for source %s)",
+ __PRETTY_FUNCTION__, ifp->name,
+ first_ifindex, addr_str);
+ }
+ i++;
+ continue;
+ }
+
+ if (!pim_if_connected_to_source(ifp, addr)) {
+ nbr = pim_neighbor_find(
+ ifp, nexthop_tab[i].nexthop_addr.u.prefix4);
+ if (PIM_DEBUG_PIM_TRACE_DETAIL)
+ zlog_debug("ifp name: %s, pim nbr: %p",
+ ifp->name, nbr);
+ if (!nbr && !if_is_loopback(ifp)) {
+ i++;
+ continue;
+ }
+ }
+
+ if (nexthop_tab[i].nexthop_addr.u.prefix4.s_addr
+ == ip_src.s_addr)
+ return 1;
+
+ i++;
+ }
+
+ return 0;
+}
+
+/* Given a source address and a neighbor address, check if the neighbor is one
+ * of the next hop to reach the source. search from pim next hop cache
+ */
+bool pim_nexthop_match_nht_cache(struct pim_instance *pim, struct in_addr addr,
+ struct in_addr ip_src)
+{
+ struct pim_rpf rpf;
+ ifindex_t first_ifindex;
+ struct interface *ifp = NULL;
+ uint8_t nh_iter = 0;
+ struct pim_neighbor *nbr = NULL;
+ struct nexthop *nh_node = NULL;
+ struct pim_nexthop_cache *pnc = NULL;
+
+ memset(&rpf, 0, sizeof(struct pim_rpf));
+ rpf.rpf_addr.family = AF_INET;
+ rpf.rpf_addr.prefixlen = IPV4_MAX_BITLEN;
+ rpf.rpf_addr.u.prefix4 = addr;
+
+ pnc = pim_nexthop_cache_find(pim, &rpf);
+ if (!pnc || !pnc->nexthop_num)
+ return 0;
+
+ for (nh_node = pnc->nexthop; nh_node; nh_node = nh_node->next) {
+ first_ifindex = nh_node->ifindex;
+ ifp = if_lookup_by_index(first_ifindex, pim->vrf_id);
+ if (!ifp) {
+ if (PIM_DEBUG_PIM_NHT) {
+ char addr_str[INET_ADDRSTRLEN];
+
+ pim_inet4_dump("<addr?>", addr, addr_str,
+ sizeof(addr_str));
+ zlog_debug(
+ "%s %s: could not find interface for ifindex %d (address %s(%s))",
+ __FILE__, __PRETTY_FUNCTION__,
+ first_ifindex, addr_str,
+ pim->vrf->name);
+ }
+ nh_iter++;
+ continue;
+ }
+ if (!ifp->info) {
+ if (PIM_DEBUG_PIM_NHT) {
+ char addr_str[INET_ADDRSTRLEN];
+
+ pim_inet4_dump("<addr?>", addr, addr_str,
+ sizeof(addr_str));
+ zlog_debug(
+ "%s: multicast not enabled on input interface %s(%s) (ifindex=%d, RPF for source %s)",
+ __PRETTY_FUNCTION__, ifp->name,
+ pim->vrf->name, first_ifindex,
+ addr_str);
+ }
+ nh_iter++;
+ continue;
+ }
+
+ if (!pim_if_connected_to_source(ifp, addr)) {
+ nbr = pim_neighbor_find(ifp, nh_node->gate.ipv4);
+ if (!nbr && !if_is_loopback(ifp)) {
+ if (PIM_DEBUG_PIM_NHT)
+ zlog_debug(
+ "%s: pim nbr not found on input interface %s(%s)",
+ __PRETTY_FUNCTION__, ifp->name,
+ pim->vrf->name);
+ nh_iter++;
+ continue;
+ }
+ }
+
+ if (nh_node->gate.ipv4.s_addr == ip_src.s_addr)
+ return 1;
+ }
+
+ return 0;
+}
+
void pim_rp_nexthop_del(struct rp_info *rp_info)
{
rp_info->rp.source_nexthop.interface = NULL;
diff --git a/pimd/pim_nht.h b/pimd/pim_nht.h
index e7a5fa7720..12dbf167d1 100644
--- a/pimd/pim_nht.h
+++ b/pimd/pim_nht.h
@@ -45,14 +45,22 @@ struct pim_nexthop_cache {
struct list *rp_list;
struct hash *upstream_hash;
+ /* Ideally this has to be list of scope zone. But for now we can just
+ * have as a bool variable to say bsr_tracking.
+ * Later this variable can be changed as a list of scope zones for
+ * tracking same bsr for multiple scope zones.
+ */
+ bool bsr_tracking;
};
int pim_parse_nexthop_update(ZAPI_CALLBACK_ARGS);
int pim_find_or_track_nexthop(struct pim_instance *pim, struct prefix *addr,
struct pim_upstream *up, struct rp_info *rp,
+ bool bsr_track_needed,
struct pim_nexthop_cache *out_pnc);
void pim_delete_tracked_nexthop(struct pim_instance *pim, struct prefix *addr,
- struct pim_upstream *up, struct rp_info *rp);
+ struct pim_upstream *up, struct rp_info *rp,
+ bool del_bsr_tracking);
struct pim_nexthop_cache *pim_nexthop_cache_find(struct pim_instance *pim,
struct pim_rpf *rpf);
uint32_t pim_compute_ecmp_hash(struct prefix *src, struct prefix *grp);
@@ -64,4 +72,9 @@ void pim_sendmsg_zebra_rnh(struct pim_instance *pim, struct zclient *zclient,
int pim_ecmp_fib_lookup_if_vif_index(struct pim_instance *pim,
struct prefix *src, struct prefix *grp);
void pim_rp_nexthop_del(struct rp_info *rp_info);
+bool pim_nexthop_match(struct pim_instance *pim, struct in_addr addr,
+ struct in_addr ip_src);
+bool pim_nexthop_match_nht_cache(struct pim_instance *pim, struct in_addr addr,
+ struct in_addr ip_src);
+
#endif
diff --git a/pimd/pim_pim.c b/pimd/pim_pim.c
index 71b0d47928..12b28ed9af 100644
--- a/pimd/pim_pim.c
+++ b/pimd/pim_pim.c
@@ -39,6 +39,7 @@
#include "pim_msg.h"
#include "pim_register.h"
#include "pim_errors.h"
+#include "pim_bsm.h"
static int on_pim_hello_send(struct thread *t);
static int pim_hello_send(struct interface *ifp, uint16_t holdtime);
@@ -148,6 +149,7 @@ int pim_pim_packet(struct interface *ifp, uint8_t *buf, size_t len)
uint16_t checksum; /* computed checksum */
struct pim_neighbor *neigh;
struct pim_msg_header *header;
+ bool no_fwd;
if (len < sizeof(*ip_hdr)) {
if (PIM_DEBUG_PIM_PACKETS)
@@ -185,6 +187,7 @@ int pim_pim_packet(struct interface *ifp, uint8_t *buf, size_t len)
/* for computing checksum */
header->checksum = 0;
+ no_fwd = header->Nbit;
if (header->type == PIM_MSG_TYPE_REGISTER) {
/* First 8 byte header checksum */
@@ -273,6 +276,11 @@ int pim_pim_packet(struct interface *ifp, uint8_t *buf, size_t len)
pim_msg + PIM_MSG_HEADER_LEN,
pim_msg_len - PIM_MSG_HEADER_LEN);
break;
+ case PIM_MSG_TYPE_BOOTSTRAP:
+ return pim_bsm_process(ifp, ip_hdr, pim_msg, pim_msg_len,
+ no_fwd);
+ break;
+
default:
if (PIM_DEBUG_PIM_PACKETS) {
zlog_debug(
@@ -634,7 +642,7 @@ static int hello_send(struct interface *ifp, uint16_t holdtime)
zassert(pim_msg_size >= PIM_PIM_MIN_LEN);
zassert(pim_msg_size <= PIM_PIM_BUFSIZE_WRITE);
- pim_msg_build_header(pim_msg, pim_msg_size, PIM_MSG_TYPE_HELLO);
+ pim_msg_build_header(pim_msg, pim_msg_size, PIM_MSG_TYPE_HELLO, false);
if (pim_msg_send(pim_ifp->pim_sock_fd, pim_ifp->primary_address,
qpim_all_pim_routers_addr, pim_msg, pim_msg_size,
diff --git a/pimd/pim_register.c b/pimd/pim_register.c
index 431236eebe..3fe7e8bf64 100644
--- a/pimd/pim_register.c
+++ b/pimd/pim_register.c
@@ -94,7 +94,7 @@ void pim_register_stop_send(struct interface *ifp, struct prefix_sg *sg,
b1length += length;
pim_msg_build_header(buffer, b1length + PIM_MSG_REGISTER_STOP_LEN,
- PIM_MSG_TYPE_REG_STOP);
+ PIM_MSG_TYPE_REG_STOP, false);
pinfo = (struct pim_interface *)ifp->info;
if (!pinfo) {
@@ -208,7 +208,7 @@ void pim_register_send(const uint8_t *buf, int buf_size, struct in_addr src,
memcpy(b1, (const unsigned char *)buf, buf_size);
pim_msg_build_header(buffer, buf_size + PIM_MSG_REGISTER_LEN,
- PIM_MSG_TYPE_REGISTER);
+ PIM_MSG_TYPE_REGISTER, false);
++pinfo->pim_ifstat_reg_send;
diff --git a/pimd/pim_rp.c b/pimd/pim_rp.c
index 14643743ad..ca865d28c3 100644
--- a/pimd/pim_rp.c
+++ b/pimd/pim_rp.c
@@ -48,6 +48,7 @@
#include "pim_mroute.h"
#include "pim_oil.h"
#include "pim_zebra.h"
+#include "pim_bsm.h"
/* Cleanup pim->rpf_hash each node data */
void pim_rp_list_hash_clean(void *data)
@@ -372,7 +373,7 @@ void pim_upstream_update(struct pim_instance *pim, struct pim_upstream *up)
zlog_debug("%s: Deregister upstream %s addr %s with Zebra NHT",
__PRETTY_FUNCTION__, up->sg_str, buf);
}
- pim_delete_tracked_nexthop(pim, &nht_p, up, NULL);
+ pim_delete_tracked_nexthop(pim, &nht_p, up, NULL, false);
}
/* Update the upstream address */
@@ -406,10 +407,45 @@ void pim_upstream_update(struct pim_instance *pim, struct pim_upstream *up)
pim_zebra_update_all_interfaces(pim);
}
-int pim_rp_new(struct pim_instance *pim, const char *rp,
- const char *group_range, const char *plist)
+int pim_rp_new_config(struct pim_instance *pim, const char *rp,
+ const char *group_range, const char *plist)
{
int result = 0;
+ struct prefix group;
+ struct in_addr rp_addr;
+
+ if (group_range == NULL)
+ result = str2prefix("224.0.0.0/4", &group);
+ else {
+ result = str2prefix(group_range, &group);
+ if (result) {
+ struct prefix temp;
+
+ prefix_copy(&temp, &group);
+ apply_mask(&temp);
+ if (!prefix_same(&group, &temp))
+ return PIM_GROUP_BAD_ADDR_MASK_COMBO;
+ }
+ }
+
+ if (!result)
+ return PIM_GROUP_BAD_ADDRESS;
+
+ result = inet_pton(AF_INET, rp, &rp_addr);
+
+ if (result <= 0)
+ return PIM_RP_BAD_ADDRESS;
+
+ result = pim_rp_new(pim, rp_addr, group, plist, RP_SRC_STATIC);
+ return result;
+}
+
+int pim_rp_new(struct pim_instance *pim, struct in_addr rp_addr,
+ struct prefix group, const char *plist,
+ enum rp_source rp_src_flag)
+{
+ int result = 0;
+ char rp[INET_ADDRSTRLEN];
struct rp_info *rp_info;
struct rp_info *rp_all;
struct prefix group_all;
@@ -417,41 +453,19 @@ int pim_rp_new(struct pim_instance *pim, const char *rp,
struct rp_info *tmp_rp_info;
char buffer[BUFSIZ];
struct prefix nht_p;
- struct prefix temp;
struct route_node *rn;
struct pim_upstream *up;
struct listnode *upnode;
rp_info = XCALLOC(MTYPE_PIM_RP, sizeof(*rp_info));
- if (group_range == NULL)
- result = str2prefix("224.0.0.0/4", &rp_info->group);
- else {
- result = str2prefix(group_range, &rp_info->group);
- if (result) {
- prefix_copy(&temp, &rp_info->group);
- apply_mask(&temp);
- if (!prefix_same(&rp_info->group, &temp)) {
- XFREE(MTYPE_PIM_RP, rp_info);
- return PIM_GROUP_BAD_ADDR_MASK_COMBO;
- }
- }
- }
-
- if (!result) {
- XFREE(MTYPE_PIM_RP, rp_info);
- return PIM_GROUP_BAD_ADDRESS;
- }
-
rp_info->rp.rpf_addr.family = AF_INET;
rp_info->rp.rpf_addr.prefixlen = IPV4_MAX_PREFIXLEN;
- result = inet_pton(rp_info->rp.rpf_addr.family, rp,
- &rp_info->rp.rpf_addr.u.prefix4);
+ rp_info->rp.rpf_addr.u.prefix4 = rp_addr;
+ prefix_copy(&rp_info->group, &group);
+ rp_info->rp_src = rp_src_flag;
- if (result <= 0) {
- XFREE(MTYPE_PIM_RP, rp_info);
- return PIM_RP_BAD_ADDRESS;
- }
+ inet_ntop(AF_INET, &rp_info->rp.rpf_addr.u.prefix4, rp, sizeof(rp));
if (plist) {
/*
@@ -479,10 +493,10 @@ int pim_rp_new(struct pim_instance *pim, const char *rp,
if (rp_info->rp.rpf_addr.u.prefix4.s_addr
== tmp_rp_info->rp.rpf_addr.u.prefix4.s_addr) {
if (tmp_rp_info->plist)
- pim_rp_del(pim, rp, NULL,
- tmp_rp_info->plist);
+ pim_rp_del_config(pim, rp, NULL,
+ tmp_rp_info->plist);
else
- pim_rp_del(
+ pim_rp_del_config(
pim, rp,
prefix2str(&tmp_rp_info->group,
buffer, BUFSIZ),
@@ -516,7 +530,8 @@ int pim_rp_new(struct pim_instance *pim, const char *rp,
&& rp_info->rp.rpf_addr.u.prefix4.s_addr
== tmp_rp_info->rp.rpf_addr.u.prefix4
.s_addr) {
- pim_rp_del(pim, rp, NULL, tmp_rp_info->plist);
+ pim_rp_del_config(pim, rp, NULL,
+ tmp_rp_info->plist);
}
}
@@ -526,6 +541,7 @@ int pim_rp_new(struct pim_instance *pim, const char *rp,
if (prefix_same(&rp_all->group, &rp_info->group)
&& pim_rpf_addr_is_inaddr_none(&rp_all->rp)) {
rp_all->rp.rpf_addr = rp_info->rp.rpf_addr;
+ rp_all->rp_src = rp_src_flag;
XFREE(MTYPE_PIM_RP, rp_info);
/* Register addr with Zebra NHT */
@@ -556,8 +572,8 @@ int pim_rp_new(struct pim_instance *pim, const char *rp,
grp.family = AF_INET;
grp.prefixlen = IPV4_MAX_BITLEN;
grp.u.prefix4 = up->sg.grp;
- trp_info = pim_rp_find_match_group(pim,
- &grp);
+ trp_info = pim_rp_find_match_group(
+ pim, &grp);
if (trp_info == rp_all)
pim_upstream_update(pim, up);
}
@@ -565,24 +581,27 @@ int pim_rp_new(struct pim_instance *pim, const char *rp,
pim_rp_check_interfaces(pim, rp_all);
pim_rp_refresh_group_to_rp_mapping(pim);
-
pim_find_or_track_nexthop(pim, &nht_p, NULL, rp_all,
- NULL);
+ false, NULL);
+
if (!pim_ecmp_nexthop_lookup(pim,
&rp_all->rp.source_nexthop,
&nht_p, &rp_all->group, 1))
return PIM_RP_NO_PATH;
-
return PIM_SUCCESS;
}
/*
* Return if the group is already configured for this RP
*/
- if (pim_rp_find_exact(pim, rp_info->rp.rpf_addr.u.prefix4,
- &rp_info->group)) {
+ tmp_rp_info = pim_rp_find_exact(
+ pim, rp_info->rp.rpf_addr.u.prefix4, &rp_info->group);
+ if (tmp_rp_info) {
+ if ((tmp_rp_info->rp_src != rp_src_flag)
+ && (rp_src_flag == RP_SRC_STATIC))
+ tmp_rp_info->rp_src = rp_src_flag;
XFREE(MTYPE_PIM_RP, rp_info);
- return PIM_SUCCESS;
+ return result;
}
/*
@@ -604,8 +623,20 @@ int pim_rp_new(struct pim_instance *pim, const char *rp,
*/
if (prefix_same(&rp_info->group,
&tmp_rp_info->group)) {
+ if ((rp_src_flag == RP_SRC_STATIC)
+ && (tmp_rp_info->rp_src
+ == RP_SRC_STATIC)) {
+ XFREE(MTYPE_PIM_RP, rp_info);
+ return PIM_GROUP_OVERLAP;
+ }
+
+ result = pim_rp_change(
+ pim,
+ rp_info->rp.rpf_addr.u.prefix4,
+ tmp_rp_info->group,
+ rp_src_flag);
XFREE(MTYPE_PIM_RP, rp_info);
- return PIM_GROUP_OVERLAP;
+ return result;
}
}
}
@@ -654,8 +685,7 @@ int pim_rp_new(struct pim_instance *pim, const char *rp,
zlog_debug("%s: NHT Register RP addr %s grp %s with Zebra ",
__PRETTY_FUNCTION__, buf, buf1);
}
-
- pim_find_or_track_nexthop(pim, &nht_p, NULL, rp_info, NULL);
+ pim_find_or_track_nexthop(pim, &nht_p, NULL, rp_info, false, NULL);
if (!pim_ecmp_nexthop_lookup(pim, &rp_info->rp.source_nexthop, &nht_p,
&rp_info->group, 1))
return PIM_RP_NO_PATH;
@@ -663,21 +693,12 @@ int pim_rp_new(struct pim_instance *pim, const char *rp,
return PIM_SUCCESS;
}
-int pim_rp_del(struct pim_instance *pim, const char *rp,
- const char *group_range, const char *plist)
+int pim_rp_del_config(struct pim_instance *pim, const char *rp,
+ const char *group_range, const char *plist)
{
struct prefix group;
struct in_addr rp_addr;
- struct prefix g_all;
- struct rp_info *rp_info;
- struct rp_info *rp_all;
int result;
- struct prefix nht_p;
- struct route_node *rn;
- bool was_plist = false;
- struct rp_info *trp_info;
- struct pim_upstream *up;
- struct listnode *upnode;
if (group_range == NULL)
result = str2prefix("224.0.0.0/4", &group);
@@ -691,6 +712,32 @@ int pim_rp_del(struct pim_instance *pim, const char *rp,
if (result <= 0)
return PIM_RP_BAD_ADDRESS;
+ result = pim_rp_del(pim, rp_addr, group, plist, RP_SRC_STATIC);
+ return result;
+}
+
+int pim_rp_del(struct pim_instance *pim, struct in_addr rp_addr,
+ struct prefix group, const char *plist,
+ enum rp_source rp_src_flag)
+{
+ struct prefix g_all;
+ struct rp_info *rp_info;
+ struct rp_info *rp_all;
+ struct prefix nht_p;
+ struct route_node *rn;
+ bool was_plist = false;
+ struct rp_info *trp_info;
+ struct pim_upstream *up;
+ struct listnode *upnode;
+ struct bsgrp_node *bsgrp = NULL;
+ struct bsm_rpinfo *bsrp = NULL;
+ char grp_str[PREFIX2STR_BUFFER];
+ char rp_str[INET_ADDRSTRLEN];
+
+ if (!inet_ntop(AF_INET, &rp_addr, rp_str, sizeof(rp_str)))
+ sprintf(rp_str, "<rp?>");
+ prefix2str(&group, grp_str, sizeof(grp_str));
+
if (plist)
rp_info = pim_rp_find_prefix_list(pim, rp_addr, plist);
else
@@ -704,6 +751,42 @@ int pim_rp_del(struct pim_instance *pim, const char *rp,
was_plist = true;
}
+ if (PIM_DEBUG_TRACE)
+ zlog_debug("%s: Delete RP %s for the group %s",
+ __PRETTY_FUNCTION__, rp_str, grp_str);
+
+ /* While static RP is getting deleted, we need to check if dynamic RP
+ * present for the same group in BSM RP table, then install the dynamic
+ * RP for the group node into the main rp table
+ */
+ if (rp_src_flag == RP_SRC_STATIC) {
+ bsgrp = pim_bsm_get_bsgrp_node(&pim->global_scope, &group);
+
+ if (bsgrp) {
+ bsrp = listnode_head(bsgrp->bsrp_list);
+ if (bsrp) {
+ if (PIM_DEBUG_TRACE) {
+ char bsrp_str[INET_ADDRSTRLEN];
+
+ if (!inet_ntop(AF_INET, bsrp, bsrp_str,
+ sizeof(bsrp_str)))
+ sprintf(bsrp_str, "<bsrp?>");
+
+ zlog_debug("%s: BSM RP %s found for the group %s",
+ __PRETTY_FUNCTION__,
+ bsrp_str, grp_str);
+ }
+ return pim_rp_change(pim, bsrp->rp_address,
+ group, RP_SRC_BSR);
+ }
+ } else {
+ if (PIM_DEBUG_TRACE)
+ zlog_debug(
+ "%s: BSM RP not found for the group %s",
+ __PRETTY_FUNCTION__, grp_str);
+ }
+ }
+
/* Deregister addr with Zebra NHT */
nht_p.family = AF_INET;
nht_p.prefixlen = IPV4_MAX_BITLEN;
@@ -714,7 +797,7 @@ int pim_rp_del(struct pim_instance *pim, const char *rp,
zlog_debug("%s: Deregister RP addr %s with Zebra ",
__PRETTY_FUNCTION__, buf);
}
- pim_delete_tracked_nexthop(pim, &nht_p, NULL, rp_info);
+ pim_delete_tracked_nexthop(pim, &nht_p, NULL, rp_info, false);
if (!str2prefix("224.0.0.0/4", &g_all))
return PIM_RP_BAD_ADDRESS;
@@ -726,8 +809,9 @@ int pim_rp_del(struct pim_instance *pim, const char *rp,
/* Find the upstream (*, G) whose upstream address is
* same as the deleted RP
*/
- if ((up->upstream_addr.s_addr == rp_addr.s_addr) &&
- (up->sg.src.s_addr == INADDR_ANY)) {
+ if ((up->upstream_addr.s_addr
+ == rp_info->rp.rpf_addr.u.prefix4.s_addr)
+ && (up->sg.src.s_addr == INADDR_ANY)) {
struct prefix grp;
grp.family = AF_INET;
grp.prefixlen = IPV4_MAX_BITLEN;
@@ -777,8 +861,9 @@ int pim_rp_del(struct pim_instance *pim, const char *rp,
/* Find the upstream (*, G) whose upstream address is same as
* the deleted RP
*/
- if ((up->upstream_addr.s_addr == rp_addr.s_addr) &&
- (up->sg.src.s_addr == INADDR_ANY)) {
+ if ((up->upstream_addr.s_addr
+ == rp_info->rp.rpf_addr.u.prefix4.s_addr)
+ && (up->sg.src.s_addr == INADDR_ANY)) {
struct prefix grp;
grp.family = AF_INET;
@@ -790,9 +875,9 @@ int pim_rp_del(struct pim_instance *pim, const char *rp,
/* RP not found for the group grp */
if (pim_rpf_addr_is_inaddr_none(&trp_info->rp)) {
pim_upstream_rpf_clear(pim, up);
- pim_rp_set_upstream_addr(pim,
- &up->upstream_addr,
- up->sg.src, up->sg.grp);
+ pim_rp_set_upstream_addr(
+ pim, &up->upstream_addr, up->sg.src,
+ up->sg.grp);
}
/* RP found for the group grp */
@@ -805,6 +890,105 @@ int pim_rp_del(struct pim_instance *pim, const char *rp,
return PIM_SUCCESS;
}
+int pim_rp_change(struct pim_instance *pim, struct in_addr new_rp_addr,
+ struct prefix group, enum rp_source rp_src_flag)
+{
+ struct prefix nht_p;
+ struct route_node *rn;
+ int result = 0;
+ struct rp_info *rp_info = NULL;
+ struct pim_upstream *up;
+ struct listnode *upnode;
+
+ rn = route_node_lookup(pim->rp_table, &group);
+ if (!rn) {
+ result = pim_rp_new(pim, new_rp_addr, group, NULL, rp_src_flag);
+ return result;
+ }
+
+ rp_info = rn->info;
+
+ if (!rp_info) {
+ route_unlock_node(rn);
+ result = pim_rp_new(pim, new_rp_addr, group, NULL, rp_src_flag);
+ return result;
+ }
+
+ if (rp_info->rp.rpf_addr.u.prefix4.s_addr == new_rp_addr.s_addr) {
+ if (rp_info->rp_src != rp_src_flag) {
+ rp_info->rp_src = rp_src_flag;
+ route_unlock_node(rn);
+ return PIM_SUCCESS;
+ }
+ }
+
+ /* Deregister old RP addr with Zebra NHT */
+ if (rp_info->rp.rpf_addr.u.prefix4.s_addr != INADDR_ANY) {
+ nht_p.family = AF_INET;
+ nht_p.prefixlen = IPV4_MAX_BITLEN;
+ nht_p.u.prefix4 = rp_info->rp.rpf_addr.u.prefix4;
+ if (PIM_DEBUG_PIM_NHT_RP) {
+ char buf[PREFIX2STR_BUFFER];
+
+ prefix2str(&nht_p, buf, sizeof(buf));
+ zlog_debug("%s: Deregister RP addr %s with Zebra ",
+ __PRETTY_FUNCTION__, buf);
+ }
+ pim_delete_tracked_nexthop(pim, &nht_p, NULL, rp_info, false);
+ }
+
+ pim_rp_nexthop_del(rp_info);
+ listnode_delete(pim->rp_list, rp_info);
+ /* Update the new RP address*/
+ rp_info->rp.rpf_addr.u.prefix4 = new_rp_addr;
+ rp_info->rp_src = rp_src_flag;
+ rp_info->i_am_rp = 0;
+
+ listnode_add_sort(pim->rp_list, rp_info);
+
+ for (ALL_LIST_ELEMENTS_RO(pim->upstream_list, upnode, up)) {
+ if (up->sg.src.s_addr == INADDR_ANY) {
+ struct prefix grp;
+ struct rp_info *trp_info;
+
+ grp.family = AF_INET;
+ grp.prefixlen = IPV4_MAX_BITLEN;
+ grp.u.prefix4 = up->sg.grp;
+ trp_info = pim_rp_find_match_group(pim, &grp);
+
+ if (trp_info == rp_info)
+ pim_upstream_update(pim, up);
+ }
+ }
+
+ /* Register new RP addr with Zebra NHT */
+ nht_p.u.prefix4 = rp_info->rp.rpf_addr.u.prefix4;
+ if (PIM_DEBUG_PIM_NHT_RP) {
+ char buf[PREFIX2STR_BUFFER];
+ char buf1[PREFIX2STR_BUFFER];
+
+ prefix2str(&nht_p, buf, sizeof(buf));
+ prefix2str(&rp_info->group, buf1, sizeof(buf1));
+ zlog_debug("%s: NHT Register RP addr %s grp %s with Zebra ",
+ __PRETTY_FUNCTION__, buf, buf1);
+ }
+
+ pim_find_or_track_nexthop(pim, &nht_p, NULL, rp_info, false, NULL);
+ if (!pim_ecmp_nexthop_lookup(pim, &rp_info->rp.source_nexthop, &nht_p,
+ &rp_info->group, 1)) {
+ route_unlock_node(rn);
+ return PIM_RP_NO_PATH;
+ }
+
+ pim_rp_check_interfaces(pim, rp_info);
+
+ route_unlock_node(rn);
+
+ pim_rp_refresh_group_to_rp_mapping(pim);
+
+ return result;
+}
+
void pim_rp_setup(struct pim_instance *pim)
{
struct listnode *node;
@@ -819,7 +1003,8 @@ void pim_rp_setup(struct pim_instance *pim)
nht_p.prefixlen = IPV4_MAX_BITLEN;
nht_p.u.prefix4 = rp_info->rp.rpf_addr.u.prefix4;
- pim_find_or_track_nexthop(pim, &nht_p, NULL, rp_info, NULL);
+ pim_find_or_track_nexthop(pim, &nht_p, NULL, rp_info, false,
+ NULL);
if (!pim_ecmp_nexthop_lookup(pim, &rp_info->rp.source_nexthop,
&nht_p, &rp_info->group, 1))
if (PIM_DEBUG_PIM_NHT_RP)
@@ -969,8 +1154,8 @@ struct pim_rpf *pim_rp_g(struct pim_instance *pim, struct in_addr group)
"%s: NHT Register RP addr %s grp %s with Zebra",
__PRETTY_FUNCTION__, buf, buf1);
}
-
- pim_find_or_track_nexthop(pim, &nht_p, NULL, rp_info, NULL);
+ pim_find_or_track_nexthop(pim, &nht_p, NULL, rp_info, false,
+ NULL);
pim_rpf_set_refresh_time(pim);
(void)pim_ecmp_nexthop_lookup(pim, &rp_info->rp.source_nexthop,
&nht_p, &rp_info->group, 1);
@@ -1030,6 +1215,9 @@ int pim_rp_config_write(struct pim_instance *pim, struct vty *vty,
if (pim_rpf_addr_is_inaddr_none(&rp_info->rp))
continue;
+ if (rp_info->rp_src == RP_SRC_BSR)
+ continue;
+
if (rp_info->plist)
vty_out(vty, "%sip pim rp %s prefix-list %s\n", spaces,
inet_ntop(AF_INET,
@@ -1062,6 +1250,7 @@ void pim_rp_show_information(struct pim_instance *pim, struct vty *vty, bool uj)
struct rp_info *rp_info;
struct rp_info *prev_rp_info = NULL;
struct listnode *node;
+ char source[7];
json_object *json = NULL;
json_object *json_rp_rows = NULL;
@@ -1071,12 +1260,17 @@ void pim_rp_show_information(struct pim_instance *pim, struct vty *vty, bool uj)
json = json_object_new_object();
else
vty_out(vty,
- "RP address group/prefix-list OIF I am RP\n");
-
+ "RP address group/prefix-list OIF I am RP Source\n");
for (ALL_LIST_ELEMENTS_RO(pim->rp_list, node, rp_info)) {
if (!pim_rpf_addr_is_inaddr_none(&rp_info->rp)) {
char buf[48];
+ if (rp_info->rp_src == RP_SRC_STATIC)
+ strcpy(source, "Static");
+ else if (rp_info->rp_src == RP_SRC_BSR)
+ strcpy(source, "BSR");
+ else
+ strcpy(source, "None");
if (uj) {
/*
* If we have moved on to a new RP then add the
@@ -1119,6 +1313,8 @@ void pim_rp_show_information(struct pim_instance *pim, struct vty *vty, bool uj)
json_row, "group",
prefix2str(&rp_info->group, buf,
48));
+ json_object_string_add(json_row, "source",
+ source);
json_object_array_add(json_rp_rows, json_row);
} else {
@@ -1143,9 +1339,10 @@ void pim_rp_show_information(struct pim_instance *pim, struct vty *vty, bool uj)
if (rp_info->i_am_rp)
vty_out(vty, "yes\n");
else
- vty_out(vty, "no\n");
- }
+ vty_out(vty, "no");
+ vty_out(vty, "%14s\n", source);
+ }
prev_rp_info = rp_info;
}
}
@@ -1180,7 +1377,7 @@ void pim_resolve_rp_nh(struct pim_instance *pim, struct pim_neighbor *nbr)
nht_p.u.prefix4 = rp_info->rp.rpf_addr.u.prefix4;
memset(&pnc, 0, sizeof(struct pim_nexthop_cache));
if (!pim_find_or_track_nexthop(pim, &nht_p, NULL, rp_info,
- &pnc))
+ false, &pnc))
continue;
for (nh_node = pnc.nexthop; nh_node; nh_node = nh_node->next) {
diff --git a/pimd/pim_rp.h b/pimd/pim_rp.h
index 402ec30aba..6dc26c07a9 100644
--- a/pimd/pim_rp.h
+++ b/pimd/pim_rp.h
@@ -27,9 +27,16 @@
#include "pim_iface.h"
#include "pim_rpf.h"
+enum rp_source {
+ RP_SRC_NONE = 0,
+ RP_SRC_STATIC,
+ RP_SRC_BSR
+};
+
struct rp_info {
struct prefix group;
struct pim_rpf rp;
+ enum rp_source rp_src;
int i_am_rp;
char *plist;
};
@@ -39,10 +46,18 @@ void pim_rp_free(struct pim_instance *pim);
void pim_rp_list_hash_clean(void *data);
-int pim_rp_new(struct pim_instance *pim, const char *rp, const char *group,
- const char *plist);
-int pim_rp_del(struct pim_instance *pim, const char *rp, const char *group,
- const char *plist);
+int pim_rp_new_config(struct pim_instance *pim, const char *rp,
+ const char *group, const char *plist);
+int pim_rp_new(struct pim_instance *pim, struct in_addr rp_addr,
+ struct prefix group, const char *plist,
+ enum rp_source rp_src_flag);
+int pim_rp_del_config(struct pim_instance *pim, const char *rp,
+ const char *group, const char *plist);
+int pim_rp_del(struct pim_instance *pim, struct in_addr rp_addr,
+ struct prefix group, const char *plist,
+ enum rp_source rp_src_flag);
+int pim_rp_change(struct pim_instance *pim, struct in_addr new_rp_addr,
+ struct prefix group, enum rp_source rp_src_flag);
void pim_rp_prefix_list_update(struct pim_instance *pim,
struct prefix_list *plist);
diff --git a/pimd/pim_rpf.c b/pimd/pim_rpf.c
index dba46e63f0..d388802454 100644
--- a/pimd/pim_rpf.c
+++ b/pimd/pim_rpf.c
@@ -238,7 +238,7 @@ enum pim_rpf_result pim_rpf_update(struct pim_instance *pim,
if ((up->sg.src.s_addr == INADDR_ANY && I_am_RP(pim, up->sg.grp)) ||
PIM_UPSTREAM_FLAG_TEST_FHR(up->flags))
neigh_needed = FALSE;
- pim_find_or_track_nexthop(pim, &nht_p, up, NULL, NULL);
+ pim_find_or_track_nexthop(pim, &nht_p, up, NULL, false, NULL);
if (!pim_ecmp_nexthop_lookup(pim, &rpf->source_nexthop, &src, &grp,
neigh_needed))
return PIM_RPF_FAILURE;
diff --git a/pimd/pim_upstream.c b/pimd/pim_upstream.c
index 55d998f270..44b8ecbfea 100644
--- a/pimd/pim_upstream.c
+++ b/pimd/pim_upstream.c
@@ -237,7 +237,7 @@ struct pim_upstream *pim_upstream_del(struct pim_instance *pim,
zlog_debug("%s: Deregister upstream %s addr %s with Zebra NHT",
__PRETTY_FUNCTION__, up->sg_str, buf);
}
- pim_delete_tracked_nexthop(pim, &nht_p, up, NULL);
+ pim_delete_tracked_nexthop(pim, &nht_p, up, NULL, false);
}
XFREE(MTYPE_PIM_UPSTREAM, up);
@@ -1399,23 +1399,24 @@ const char *pim_upstream_state2str(enum pim_upstream_state join_state)
return "Unknown";
}
-const char *pim_reg_state2str(enum pim_reg_state reg_state, char *state_str)
+const char *pim_reg_state2str(enum pim_reg_state reg_state, char *state_str,
+ size_t state_str_len)
{
switch (reg_state) {
case PIM_REG_NOINFO:
- strcpy(state_str, "RegNoInfo");
+ strlcpy(state_str, "RegNoInfo", state_str_len);
break;
case PIM_REG_JOIN:
- strcpy(state_str, "RegJoined");
+ strlcpy(state_str, "RegJoined", state_str_len);
break;
case PIM_REG_JOIN_PENDING:
- strcpy(state_str, "RegJoinPend");
+ strlcpy(state_str, "RegJoinPend", state_str_len);
break;
case PIM_REG_PRUNE:
- strcpy(state_str, "RegPrune");
+ strlcpy(state_str, "RegPrune", state_str_len);
break;
default:
- strcpy(state_str, "RegUnknown");
+ strlcpy(state_str, "RegUnknown", state_str_len);
}
return state_str;
}
@@ -1432,7 +1433,7 @@ static int pim_upstream_register_stop_timer(struct thread *t)
char state_str[PIM_REG_STATE_STR_LEN];
zlog_debug("%s: (S,G)=%s[%s] upstream register stop timer %s",
__PRETTY_FUNCTION__, up->sg_str, pim->vrf->name,
- pim_reg_state2str(up->reg_state, state_str));
+ pim_reg_state2str(up->reg_state, state_str, sizeof(state_str)));
}
switch (up->reg_state) {
diff --git a/pimd/pim_upstream.h b/pimd/pim_upstream.h
index 102826ac71..02ae998290 100644
--- a/pimd/pim_upstream.h
+++ b/pimd/pim_upstream.h
@@ -283,7 +283,8 @@ void pim_upstream_switch(struct pim_instance *pim, struct pim_upstream *up,
const char *pim_upstream_state2str(enum pim_upstream_state join_state);
#define PIM_REG_STATE_STR_LEN 12
-const char *pim_reg_state2str(enum pim_reg_state state, char *state_str);
+const char *pim_reg_state2str(enum pim_reg_state state, char *state_str,
+ size_t state_str_len);
int pim_upstream_inherited_olist_decide(struct pim_instance *pim,
struct pim_upstream *up);
diff --git a/pimd/pim_vty.c b/pimd/pim_vty.c
index 2654ebc588..8d40f85132 100644
--- a/pimd/pim_vty.c
+++ b/pimd/pim_vty.c
@@ -39,6 +39,7 @@
#include "pim_msdp.h"
#include "pim_ssm.h"
#include "pim_bfd.h"
+#include "pim_bsm.h"
#include "pim_vxlan.h"
int pim_debug_config_write(struct vty *vty)
@@ -120,6 +121,11 @@ int pim_debug_config_write(struct vty *vty)
++writes;
}
+ if (PIM_DEBUG_BSM) {
+ vty_out(vty, "debug pim bsm\n");
+ ++writes;
+ }
+
if (PIM_DEBUG_VXLAN) {
vty_out(vty, "debug pim vxlan\n");
++writes;
@@ -383,7 +389,10 @@ int pim_interface_config_write(struct vty *vty)
writes +=
pim_static_write_mroute(pim, vty, ifp);
+ pim_bsm_write_config(vty, ifp);
+ ++writes;
pim_bfd_write_config(vty, ifp);
+ ++writes;
}
vty_endframe(vty, "!\n");
++writes;
diff --git a/pimd/pim_vxlan.c b/pimd/pim_vxlan.c
index 1c6b56568f..09669e206e 100644
--- a/pimd/pim_vxlan.c
+++ b/pimd/pim_vxlan.c
@@ -344,7 +344,7 @@ static void pim_vxlan_orig_mr_up_add(struct pim_vxlan_sg *vxlan_sg)
nht_p.prefixlen = IPV4_MAX_BITLEN;
nht_p.u.prefix4 = up->upstream_addr;
pim_delete_tracked_nexthop(vxlan_sg->pim,
- &nht_p, up, NULL);
+ &nht_p, up, NULL, false);
}
pim_upstream_ref(up, flags, __PRETTY_FUNCTION__);
vxlan_sg->up = up;
diff --git a/pimd/pimd.h b/pimd/pimd.h
index 2f2a870371..cdeaed3977 100644
--- a/pimd/pimd.h
+++ b/pimd/pimd.h
@@ -114,6 +114,7 @@
#define PIM_MASK_PIM_NHT_RP (1 << 24)
#define PIM_MASK_MTRACE (1 << 25)
#define PIM_MASK_VXLAN (1 << 26)
+#define PIM_MASK_BSM_PROC (1 << 27)
/* Remember 32 bits!!! */
/* PIM error codes */
@@ -182,11 +183,12 @@ extern uint8_t qpim_ecmp_rebalance_enable;
#define PIM_DEBUG_PIM_NHT_RP (router->debugs & PIM_MASK_PIM_NHT_RP)
#define PIM_DEBUG_MTRACE (router->debugs & PIM_MASK_MTRACE)
#define PIM_DEBUG_VXLAN (router->debugs & PIM_MASK_VXLAN)
+#define PIM_DEBUG_BSM (router->debugs & PIM_MASK_BSM_PROC)
#define PIM_DEBUG_EVENTS \
(router->debugs \
& (PIM_MASK_PIM_EVENTS | PIM_MASK_IGMP_EVENTS \
- | PIM_MASK_MSDP_EVENTS))
+ | PIM_MASK_MSDP_EVENTS | PIM_MASK_BSM_PROC))
#define PIM_DEBUG_PACKETS \
(router->debugs \
& (PIM_MASK_PIM_PACKETS | PIM_MASK_IGMP_PACKETS \
@@ -212,6 +214,7 @@ extern uint8_t qpim_ecmp_rebalance_enable;
#define PIM_DO_DEBUG_SSMPINGD (router->debugs |= PIM_MASK_SSMPINGD)
#define PIM_DO_DEBUG_MROUTE (router->debugs |= PIM_MASK_MROUTE)
#define PIM_DO_DEBUG_MROUTE_DETAIL (router->debugs |= PIM_MASK_MROUTE_DETAIL)
+#define PIM_DO_DEBUG_BSM (router->debugs |= PIM_MASK_BSM_PROC)
#define PIM_DO_DEBUG_PIM_HELLO (router->debugs |= PIM_MASK_PIM_HELLO)
#define PIM_DO_DEBUG_PIM_J_P (router->debugs |= PIM_MASK_PIM_J_P)
#define PIM_DO_DEBUG_PIM_REG (router->debugs |= PIM_MASK_PIM_REG)
@@ -253,6 +256,7 @@ extern uint8_t qpim_ecmp_rebalance_enable;
#define PIM_DONT_DEBUG_PIM_NHT_RP (router->debugs &= ~PIM_MASK_PIM_NHT_RP)
#define PIM_DONT_DEBUG_MTRACE (router->debugs &= ~PIM_MASK_MTRACE)
#define PIM_DONT_DEBUG_VXLAN (router->debugs &= ~PIM_MASK_VXLAN)
+#define PIM_DONT_DEBUG_BSM (router->debugs &= ~PIM_MASK_BSM_PROC)
void pim_router_init(void);
void pim_router_terminate(void);
diff --git a/pimd/subdir.am b/pimd/subdir.am
index 7f4810722b..240b62804f 100644
--- a/pimd/subdir.am
+++ b/pimd/subdir.am
@@ -17,6 +17,7 @@ pimd_libpim_a_SOURCES = \
pimd/pim_assert.c \
pimd/pim_bfd.c \
pimd/pim_br.c \
+ pimd/pim_bsm.c \
pimd/pim_cmd.c \
pimd/pim_errors.c \
pimd/pim_hello.c \
@@ -68,6 +69,7 @@ noinst_HEADERS += \
pimd/pim_assert.h \
pimd/pim_bfd.h \
pimd/pim_br.h \
+ pimd/pim_bsm.h \
pimd/pim_cmd.h \
pimd/pim_errors.h \
pimd/pim_hello.h \
diff --git a/ripngd/ripngd.c b/ripngd/ripngd.c
index 411689a7a7..71bc43049a 100644
--- a/ripngd/ripngd.c
+++ b/ripngd/ripngd.c
@@ -2011,26 +2011,26 @@ static char *ripng_route_subtype_print(struct ripng_info *rinfo)
memset(str, 0, 3);
if (rinfo->suppress)
- strcat(str, "S");
+ strlcat(str, "S", sizeof(str));
switch (rinfo->sub_type) {
case RIPNG_ROUTE_RTE:
- strcat(str, "n");
+ strlcat(str, "n", sizeof(str));
break;
case RIPNG_ROUTE_STATIC:
- strcat(str, "s");
+ strlcat(str, "s", sizeof(str));
break;
case RIPNG_ROUTE_DEFAULT:
- strcat(str, "d");
+ strlcat(str, "d", sizeof(str));
break;
case RIPNG_ROUTE_REDISTRIBUTE:
- strcat(str, "r");
+ strlcat(str, "r", sizeof(str));
break;
case RIPNG_ROUTE_INTERFACE:
- strcat(str, "i");
+ strlcat(str, "i", sizeof(str));
break;
default:
- strcat(str, "?");
+ strlcat(str, "?", sizeof(str));
break;
}
diff --git a/snapcraft/README.snap_build.md b/snapcraft/README.snap_build.md
index 7c42848451..e43f63f2d9 100644
--- a/snapcraft/README.snap_build.md
+++ b/snapcraft/README.snap_build.md
@@ -92,6 +92,14 @@ All the commands are prefixed with frr.
frr.ripngd-debug
frr.ldp-debug
frr.zebra-debug
+ frr.pimd-debug
+ frr.nhrpd-debug
+ frr.babeld-debug
+ frr.eigrpd-debug
+ frr.pbrd-debug
+ frr.staticd-debug
+ frr.bfdd-debug
+ frr.fabricd-debug
vtysh can be accessed as frr.vtysh (Make sure you have /snap/bin in your
path). If access as `vtysh` instead of `frr.vtysh` is needed, you can enable it
diff --git a/snapcraft/README.usage.md b/snapcraft/README.usage.md
index a7b51a5656..6a0864c8c5 100644
--- a/snapcraft/README.usage.md
+++ b/snapcraft/README.usage.md
@@ -66,6 +66,8 @@ depend on them). These are mainly intended to debug the Snap
Starts staticd daemon in foreground
- `frr.bfdd-debug`:
Starts bfdd daemon in foreground
+- `frr.fabricd-debug`:
+ Starts fabricd daemon in foreground
MPLS (LDP)
----------
diff --git a/snapcraft/defaults/fabricd.conf.default b/snapcraft/defaults/fabricd.conf.default
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/snapcraft/defaults/fabricd.conf.default
diff --git a/snapcraft/scripts/Makefile b/snapcraft/scripts/Makefile
index e3a7708f23..7ddb7f0769 100644
--- a/snapcraft/scripts/Makefile
+++ b/snapcraft/scripts/Makefile
@@ -17,6 +17,7 @@ install:
install -D -m 0755 pbrd-service $(DESTDIR)/bin/
install -D -m 0755 staticd-service $(DESTDIR)/bin/
install -D -m 0755 bfdd-service $(DESTDIR)/bin/
+ install -D -m 0755 fabricd-service $(DESTDIR)/bin/
install -D -m 0755 set-options $(DESTDIR)/bin/
install -D -m 0755 show_version $(DESTDIR)/bin/
diff --git a/snapcraft/scripts/bgpd-service b/snapcraft/scripts/bgpd-service
index 6c3a6f5959..64273d9f80 100644
--- a/snapcraft/scripts/bgpd-service
+++ b/snapcraft/scripts/bgpd-service
@@ -10,7 +10,7 @@ fi
if ! [ -e $SNAP_DATA/rpki.conf ]; then
echo "-M rpki" > $SNAP_DATA/rpki.conf
fi
-EXTRA_OPTIONS="`cat $SNAP_DATA/rpki.conf`"
+EXTRA_OPTIONS="`$SNAP/bin/cat $SNAP_DATA/rpki.conf`"
exec $SNAP/sbin/bgpd \
-f $SNAP_DATA/bgpd.conf \
--pid_file $SNAP_DATA/bgpd.pid \
diff --git a/snapcraft/scripts/fabricd-service b/snapcraft/scripts/fabricd-service
new file mode 100644
index 0000000000..586f061ef0
--- /dev/null
+++ b/snapcraft/scripts/fabricd-service
@@ -0,0 +1,13 @@
+#!/bin/sh
+
+set -e -x
+
+if ! [ -e $SNAP_DATA/fabricd.conf ]; then
+ cp $SNAP/etc/frr/fabricd.conf.default $SNAP_DATA/fabricd.conf
+fi
+exec $SNAP/sbin/fabricd \
+ -f $SNAP_DATA/fabricd.conf \
+ --pid_file $SNAP_DATA/fabricd.pid \
+ --socket $SNAP_DATA/zsock \
+ --vty_socket $SNAP_DATA
+
diff --git a/snapcraft/snapcraft.yaml.in b/snapcraft/snapcraft.yaml.in
index b70d6efee2..d8071e8cfe 100644
--- a/snapcraft/snapcraft.yaml.in
+++ b/snapcraft/snapcraft.yaml.in
@@ -4,8 +4,8 @@ summary: FRRouting BGP/OSPFv2/OSPFv3/ISIS/RIP/RIPng/PIM/LDP/EIGRP/BFD routing da
description: BGP/OSPFv2/OSPFv3/ISIS/RIP/RIPng/PIM/LDP/EIGRP/BFD routing daemon
FRRouting (FRR) is free software which manages TCP/IP based routing
protocols. It supports BGP4, BGP4+, OSPFv2, OSPFv3, IS-IS, RIPv1, RIPv2,
- RIPng, PIM, LDP, Babel, EIGRP, PBR (Policy-based routing) and BFD as well as
- the IPv6 versions of these.
+ RIPng, PIM, LDP, Babel, EIGRP, PBR (Policy-based routing), BFD and OpenFabric
+ as well as the IPv6 versions of these.
FRRouting (frr) is a fork of Quagga.
confinement: strict
grade: devel
@@ -127,6 +127,13 @@ apps:
- network
- network-bind
- network-control
+ fabricd:
+ command: bin/fabricd-service
+ daemon: simple
+ plugs:
+ - network
+ - network-bind
+ - network-control
set:
command: bin/set-options
zebra-debug:
@@ -136,7 +143,7 @@ apps:
- network-bind
- network-control
bgpd-debug:
- command: sbin/bgpd -f $SNAP_DATA/bgpd.conf --pid_file $SNAP_DATA/bgpd.pid --socket $SNAP_DATA/zsock --vty_socket $SNAP_DATA --moduledir $SNAP/lib/frr/modules `cat $SNAP_DATA/rpki.conf 2> /dev/null`
+ command: sbin/bgpd -f $SNAP_DATA/bgpd.conf --pid_file $SNAP_DATA/bgpd.pid --socket $SNAP_DATA/zsock --vty_socket $SNAP_DATA --moduledir $SNAP/lib/frr/modules
plugs:
- network
- network-bind
@@ -219,6 +226,12 @@ apps:
- network
- network-bind
- network-control
+ fabricd-debug:
+ command: sbin/fabricd -f $SNAP_DATA/fabricd.conf --pid_file $SNAP_DATA/fabricd.pid --socket $SNAP_DATA/zsock --vty_socket $SNAP_DATA
+ plugs:
+ - network
+ - network-bind
+ - network-control
parts:
rtrlib:
@@ -230,16 +243,33 @@ parts:
stage-packages:
- libssh-4
prime:
- - lib/x86_64-linux-gnu/librtr.so*
+ - lib/librtr.so*
- usr/lib/x86_64-linux-gnu/libssh.so*
source: https://github.com/rtrlib/rtrlib.git
source-type: git
- source-tag: v0.5.0
+ source-tag: v0.6.3
plugin: cmake
configflags:
- -DCMAKE_BUILD_TYPE=Release
+ libyang:
+ build-packages:
+ - cmake
+ - make
+ - gcc
+ - libpcre3-dev
+ stage-packages:
+ - libpcre3
+ source: https://github.com/CESNET/libyang.git
+ source-type: git
+ source-tag: v0.16-r3
+ plugin: cmake
+ configflags:
+ - -DCMAKE_INSTALL_PREFIX:PATH=/usr
+ - -DENABLE_LYD_PRIV=ON
+ - -DENABLE_CACHE=OFF
+ - -DCMAKE_BUILD_TYPE:String="Release"
frr:
- after: [rtrlib]
+ after: [rtrlib,libyang]
build-packages:
- gcc
- autoconf
@@ -272,13 +302,13 @@ parts:
- iproute2
- logrotate
- libcap2
- - libc6
- libtinfo5
- libreadline6
- libjson-c2
- libc-ares2
- libatm1
- libprotobuf-c1
+ - libdb5.3
plugin: autotools
source: ../frr-@PACKAGE_VERSION@.tar.gz
configflags:
@@ -322,7 +352,9 @@ parts:
eigrpd.conf.default: etc/frr/eigrpd.conf.default
pbrd.conf.default: etc/frr/pbrd.conf.default
bfdd.conf.default: etc/frr/bfdd.conf.default
+ fabricd.conf.default: etc/frr/fabricd.conf.default
vtysh.conf.default: etc/frr/vtysh.conf.default
+ staticd.conf.default: etc/frr/staticd.conf.default
frr-scripts:
plugin: make
source: scripts
@@ -344,3 +376,8 @@ parts:
README.snap_build.md: doc/README.snap_build.md
extra_version_info.txt: doc/extra_version_info.txt
+passthrough:
+ layout:
+ /usr/lib/x86_64-linux-gnu/libyang:
+ bind: $SNAP/usr/lib/x86_64-linux-gnu/libyang
+
diff --git a/staticd/static_routes.c b/staticd/static_routes.c
index cde31df14f..5f9ecad694 100644
--- a/staticd/static_routes.c
+++ b/staticd/static_routes.c
@@ -127,7 +127,7 @@ int static_add_route(afi_t afi, safi_t safi, uint8_t type, struct prefix *p,
si->tag = tag;
si->vrf_id = svrf->vrf->vrf_id;
si->nh_vrf_id = nh_svrf->vrf->vrf_id;
- strcpy(si->nh_vrfname, nh_svrf->vrf->name);
+ strlcpy(si->nh_vrfname, nh_svrf->vrf->name, sizeof(si->nh_vrfname));
si->table_id = table_id;
si->onlink = onlink;
diff --git a/vtysh/vtysh.c b/vtysh/vtysh.c
index a0b119c3eb..1ab70eeed2 100644
--- a/vtysh/vtysh.c
+++ b/vtysh/vtysh.c
@@ -681,7 +681,7 @@ int vtysh_mark_file(const char *filename)
while (fgets(vty->buf, VTY_BUFSIZ, confp)) {
lineno++;
tried = 0;
- strcpy(vty_buf_copy, vty->buf);
+ strlcpy(vty_buf_copy, vty->buf, VTY_BUFSIZ);
vty_buf_trimmed = trim(vty_buf_copy);
switch (vty->node) {
@@ -2553,6 +2553,15 @@ DEFUNSH(VTYSH_ALL, vtysh_log_timestamp_precision,
return CMD_SUCCESS;
}
+DEFUNSH(VTYSH_ALL, vtysh_debug_memstats,
+ vtysh_debug_memstats_cmd, "[no] debug memstats-at-exit",
+ NO_STR
+ "Debug\n"
+ "Print memory statistics at exit\n")
+{
+ return CMD_SUCCESS;
+}
+
DEFUNSH(VTYSH_ALL, no_vtysh_log_timestamp_precision,
no_vtysh_log_timestamp_precision_cmd, "no log timestamp precision",
NO_STR
@@ -2702,9 +2711,10 @@ static void backup_config_file(const char *fbackup)
{
char *integrate_sav = NULL;
- integrate_sav = malloc(strlen(fbackup) + strlen(CONF_BACKUP_EXT) + 1);
- strcpy(integrate_sav, fbackup);
- strcat(integrate_sav, CONF_BACKUP_EXT);
+ size_t integrate_sav_sz = strlen(fbackup) + strlen(CONF_BACKUP_EXT) + 1;
+ integrate_sav = malloc(integrate_sav_sz);
+ strlcpy(integrate_sav, fbackup, integrate_sav_sz);
+ strlcat(integrate_sav, CONF_BACKUP_EXT, integrate_sav_sz);
/* Move current configuration file to backup config file. */
if (unlink(integrate_sav) != 0) {
@@ -3848,6 +3858,8 @@ void vtysh_init_vty(void)
install_element(VIEW_NODE, &vtysh_show_debugging_hashtable_cmd);
install_element(ENABLE_NODE, &vtysh_debug_all_cmd);
install_element(CONFIG_NODE, &vtysh_debug_all_cmd);
+ install_element(ENABLE_NODE, &vtysh_debug_memstats_cmd);
+ install_element(CONFIG_NODE, &vtysh_debug_memstats_cmd);
/* misc lib show commands */
install_element(VIEW_NODE, &vtysh_show_memory_cmd);
diff --git a/zebra/ioctl.c b/zebra/ioctl.c
index 322527015b..8202e076af 100644
--- a/zebra/ioctl.c
+++ b/zebra/ioctl.c
@@ -245,7 +245,7 @@ static int if_set_prefix_ctx(const struct zebra_dplane_ctx *ctx)
p = (struct prefix_ipv4 *)dplane_ctx_get_intf_addr(ctx);
memset(&addreq, 0, sizeof(addreq));
- strncpy((char *)&addreq.ifra_name, dplane_ctx_get_ifname(ctx),
+ strlcpy((char *)&addreq.ifra_name, dplane_ctx_get_ifname(ctx),
sizeof(addreq.ifra_name));
memset(&addr, 0, sizeof(struct sockaddr_in));
@@ -296,7 +296,7 @@ static int if_unset_prefix_ctx(const struct zebra_dplane_ctx *ctx)
p = (struct prefix_ipv4 *)dplane_ctx_get_intf_addr(ctx);
memset(&addreq, 0, sizeof(addreq));
- strncpy((char *)&addreq.ifra_name, dplane_ctx_get_ifname(ctx),
+ strlcpy((char *)&addreq.ifra_name, dplane_ctx_get_ifname(ctx),
sizeof(addreq.ifra_name));
memset(&addr, 0, sizeof(struct sockaddr_in));
diff --git a/zebra/ioctl_solaris.c b/zebra/ioctl_solaris.c
index ccfa7a4a4c..1f96fa23ea 100644
--- a/zebra/ioctl_solaris.c
+++ b/zebra/ioctl_solaris.c
@@ -286,7 +286,7 @@ static int if_unset_prefix_ctx(const struct zebra_dplane_ctx *ctx)
p = (struct prefix_ipv4 *)dplane_ctx_get_intf_addr(ctx);
- strncpy(ifreq.ifr_name, dplane_ctx_get_ifname(ctx),
+ strlcpy(ifreq.ifr_name, dplane_ctx_get_ifname(ctx),
sizeof(ifreq.ifr_name));
memset(&addr, 0, sizeof(struct sockaddr_in));
diff --git a/zebra/kernel_socket.c b/zebra/kernel_socket.c
index 5f4bd3bbc6..156ce50725 100644
--- a/zebra/kernel_socket.c
+++ b/zebra/kernel_socket.c
@@ -304,12 +304,13 @@ size_t rta_getattr(caddr_t sap, void *destp, size_t destlen)
size_t rta_getsdlname(caddr_t sap, void *destp, short *destlen)
{
struct sockaddr_dl *sdl = (struct sockaddr_dl *)sap;
- struct sockaddr *sa = (struct sockaddr *)sap;
uint8_t *dest = destp;
size_t tlen, copylen;
copylen = sdl->sdl_nlen;
#ifdef HAVE_STRUCT_SOCKADDR_SA_LEN
+ struct sockaddr *sa = (struct sockaddr *)sap;
+
tlen = (sa->sa_len == 0) ? sizeof(ROUNDUP_TYPE) : ROUNDUP(sa->sa_len);
#else /* !HAVE_STRUCT_SOCKADDR_SA_LEN */
tlen = SAROUNDUP(sap);
@@ -522,7 +523,7 @@ static enum zebra_link_type sdl_to_zebra_link_type(unsigned int sdlt)
int ifm_read(struct if_msghdr *ifm)
{
struct interface *ifp = NULL;
- struct sockaddr_dl *sdl;
+ struct sockaddr_dl *sdl = NULL;
char ifname[IFNAMSIZ];
short ifnlen = 0;
int maskbit;
diff --git a/zebra/rib.h b/zebra/rib.h
index 0353c9bb99..a54e164d98 100644
--- a/zebra/rib.h
+++ b/zebra/rib.h
@@ -87,9 +87,12 @@ struct route_entry {
/* Link list. */
struct re_list_item next;
- /* Nexthop structure */
+ /* Nexthop structure (from RIB) */
struct nexthop_group ng;
+ /* Nexthop group from FIB (optional) */
+ struct nexthop_group fib_ng;
+
/* Tag */
route_tag_t tag;
@@ -529,6 +532,16 @@ static inline void rib_tables_iter_cleanup(rib_tables_iter_t *iter)
DECLARE_HOOK(rib_update, (struct route_node * rn, const char *reason),
(rn, reason))
+/*
+ * Access active nexthop-group, either RIB or FIB version
+ */
+static inline struct nexthop_group *rib_active_nhg(struct route_entry *re)
+{
+ if (re->fib_ng.nexthop)
+ return &(re->fib_ng);
+ else
+ return &(re->ng);
+}
extern void zebra_vty_init(void);
diff --git a/zebra/rt_netlink.c b/zebra/rt_netlink.c
index def5bf7d88..92c78a4cbb 100644
--- a/zebra/rt_netlink.c
+++ b/zebra/rt_netlink.c
@@ -813,7 +813,7 @@ static int netlink_route_change_read_multicast(struct nlmsghdr *h,
sprintf(temp, "%s(%d) ", ifp ? ifp->name : "Unknown",
oif[count]);
- strcat(oif_list, temp);
+ strlcat(oif_list, temp, sizeof(oif_list));
}
struct zebra_vrf *zvrf = zebra_vrf_lookup_by_id(vrf);
ifp = if_lookup_by_index(iif, vrf);
diff --git a/zebra/subdir.am b/zebra/subdir.am
index 1e36d020a3..25040a2717 100644
--- a/zebra/subdir.am
+++ b/zebra/subdir.am
@@ -75,6 +75,7 @@ zebra_zebra_SOURCES = \
zebra/zebra_mpls_null.c \
zebra/zebra_mpls_vty.c \
zebra/zebra_mroute.c \
+ zebra/zebra_nhg.c \
zebra/zebra_ns.c \
zebra/zebra_pbr.c \
zebra/zebra_ptm.c \
@@ -135,6 +136,7 @@ noinst_HEADERS += \
zebra/zebra_memory.h \
zebra/zebra_mpls.h \
zebra/zebra_mroute.h \
+ zebra/zebra_nhg.h \
zebra/zebra_ns.h \
zebra/zebra_pbr.h \
zebra/zebra_ptm.h \
diff --git a/zebra/zebra_dplane.c b/zebra/zebra_dplane.c
index 6fc62147c8..1707d3a68b 100644
--- a/zebra/zebra_dplane.c
+++ b/zebra/zebra_dplane.c
@@ -172,6 +172,11 @@ struct zebra_dplane_ctx {
uint32_t zd_seq;
uint32_t zd_old_seq;
+ /* Some updates may be generated by notifications: allow the
+ * plugin to notice and ignore results from its own notifications.
+ */
+ uint32_t zd_notif_provider;
+
/* TODO -- internal/sub-operation status? */
enum zebra_dplane_result zd_remote_status;
enum zebra_dplane_result zd_kernel_status;
@@ -222,6 +227,8 @@ struct zebra_dplane_provider {
/* Flags */
int dp_flags;
+ int (*dp_start)(struct zebra_dplane_provider *prov);
+
int (*dp_fp)(struct zebra_dplane_provider *prov);
int (*dp_fini)(struct zebra_dplane_provider *prov, bool early_p);
@@ -351,7 +358,7 @@ struct thread_master *dplane_get_thread_master(void)
/*
* Allocate a dataplane update context
*/
-static struct zebra_dplane_ctx *dplane_ctx_alloc(void)
+struct zebra_dplane_ctx *dplane_ctx_alloc(void)
{
struct zebra_dplane_ctx *p;
@@ -392,6 +399,7 @@ static void dplane_ctx_free(struct zebra_dplane_ctx **pctx)
case DPLANE_OP_ROUTE_DELETE:
case DPLANE_OP_SYS_ROUTE_ADD:
case DPLANE_OP_SYS_ROUTE_DELETE:
+ case DPLANE_OP_ROUTE_NOTIFY:
/* Free allocated nexthops */
if ((*pctx)->u.rinfo.zd_ng.nexthop) {
@@ -413,6 +421,7 @@ static void dplane_ctx_free(struct zebra_dplane_ctx **pctx)
case DPLANE_OP_LSP_INSTALL:
case DPLANE_OP_LSP_UPDATE:
case DPLANE_OP_LSP_DELETE:
+ case DPLANE_OP_LSP_NOTIFY:
{
zebra_nhlfe_t *nhlfe, *next;
@@ -543,6 +552,12 @@ bool dplane_ctx_is_skip_kernel(const struct zebra_dplane_ctx *ctx)
return CHECK_FLAG(ctx->zd_flags, DPLANE_CTX_FLAG_NO_KERNEL);
}
+void dplane_ctx_set_op(struct zebra_dplane_ctx *ctx, enum dplane_op_e op)
+{
+ DPLANE_CTX_VALID(ctx);
+ ctx->zd_op = op;
+}
+
enum dplane_op_e dplane_ctx_get_op(const struct zebra_dplane_ctx *ctx)
{
DPLANE_CTX_VALID(ctx);
@@ -569,6 +584,9 @@ const char *dplane_op2str(enum dplane_op_e op)
case DPLANE_OP_ROUTE_DELETE:
ret = "ROUTE_DELETE";
break;
+ case DPLANE_OP_ROUTE_NOTIFY:
+ ret = "ROUTE_NOTIFY";
+ break;
case DPLANE_OP_LSP_INSTALL:
ret = "LSP_INSTALL";
@@ -579,6 +597,9 @@ const char *dplane_op2str(enum dplane_op_e op)
case DPLANE_OP_LSP_DELETE:
ret = "LSP_DELETE";
break;
+ case DPLANE_OP_LSP_NOTIFY:
+ ret = "LSP_NOTIFY";
+ break;
case DPLANE_OP_PW_INSTALL:
ret = "PW_INSTALL";
@@ -625,6 +646,14 @@ const char *dplane_res2str(enum zebra_dplane_result res)
return ret;
}
+void dplane_ctx_set_dest(struct zebra_dplane_ctx *ctx,
+ const struct prefix *dest)
+{
+ DPLANE_CTX_VALID(ctx);
+
+ prefix_copy(&(ctx->u.rinfo.zd_dest), dest);
+}
+
const struct prefix *dplane_ctx_get_dest(const struct zebra_dplane_ctx *ctx)
{
DPLANE_CTX_VALID(ctx);
@@ -632,6 +661,16 @@ const struct prefix *dplane_ctx_get_dest(const struct zebra_dplane_ctx *ctx)
return &(ctx->u.rinfo.zd_dest);
}
+void dplane_ctx_set_src(struct zebra_dplane_ctx *ctx, const struct prefix *src)
+{
+ DPLANE_CTX_VALID(ctx);
+
+ if (src)
+ prefix_copy(&(ctx->u.rinfo.zd_src), src);
+ else
+ memset(&(ctx->u.rinfo.zd_src), 0, sizeof(struct prefix));
+}
+
/* Source prefix is a little special - return NULL for "no src prefix" */
const struct prefix *dplane_ctx_get_src(const struct zebra_dplane_ctx *ctx)
{
@@ -666,6 +705,13 @@ uint32_t dplane_ctx_get_old_seq(const struct zebra_dplane_ctx *ctx)
return ctx->zd_old_seq;
}
+void dplane_ctx_set_vrf(struct zebra_dplane_ctx *ctx, vrf_id_t vrf)
+{
+ DPLANE_CTX_VALID(ctx);
+
+ ctx->zd_vrf_id = vrf;
+}
+
vrf_id_t dplane_ctx_get_vrf(const struct zebra_dplane_ctx *ctx)
{
DPLANE_CTX_VALID(ctx);
@@ -673,6 +719,35 @@ vrf_id_t dplane_ctx_get_vrf(const struct zebra_dplane_ctx *ctx)
return ctx->zd_vrf_id;
}
+bool dplane_ctx_is_from_notif(const struct zebra_dplane_ctx *ctx)
+{
+ DPLANE_CTX_VALID(ctx);
+
+ return (ctx->zd_notif_provider != 0);
+}
+
+uint32_t dplane_ctx_get_notif_provider(const struct zebra_dplane_ctx *ctx)
+{
+ DPLANE_CTX_VALID(ctx);
+
+ return ctx->zd_notif_provider;
+}
+
+void dplane_ctx_set_notif_provider(struct zebra_dplane_ctx *ctx,
+ uint32_t id)
+{
+ DPLANE_CTX_VALID(ctx);
+
+ ctx->zd_notif_provider = id;
+}
+
+void dplane_ctx_set_type(struct zebra_dplane_ctx *ctx, int type)
+{
+ DPLANE_CTX_VALID(ctx);
+
+ ctx->u.rinfo.zd_type = type;
+}
+
int dplane_ctx_get_type(const struct zebra_dplane_ctx *ctx)
{
DPLANE_CTX_VALID(ctx);
@@ -687,6 +762,13 @@ int dplane_ctx_get_old_type(const struct zebra_dplane_ctx *ctx)
return ctx->u.rinfo.zd_old_type;
}
+void dplane_ctx_set_afi(struct zebra_dplane_ctx *ctx, afi_t afi)
+{
+ DPLANE_CTX_VALID(ctx);
+
+ ctx->u.rinfo.zd_afi = afi;
+}
+
afi_t dplane_ctx_get_afi(const struct zebra_dplane_ctx *ctx)
{
DPLANE_CTX_VALID(ctx);
@@ -694,6 +776,13 @@ afi_t dplane_ctx_get_afi(const struct zebra_dplane_ctx *ctx)
return ctx->u.rinfo.zd_afi;
}
+void dplane_ctx_set_safi(struct zebra_dplane_ctx *ctx, safi_t safi)
+{
+ DPLANE_CTX_VALID(ctx);
+
+ ctx->u.rinfo.zd_safi = safi;
+}
+
safi_t dplane_ctx_get_safi(const struct zebra_dplane_ctx *ctx)
{
DPLANE_CTX_VALID(ctx);
@@ -701,6 +790,13 @@ safi_t dplane_ctx_get_safi(const struct zebra_dplane_ctx *ctx)
return ctx->u.rinfo.zd_safi;
}
+void dplane_ctx_set_table(struct zebra_dplane_ctx *ctx, uint32_t table)
+{
+ DPLANE_CTX_VALID(ctx);
+
+ ctx->zd_table_id = table;
+}
+
uint32_t dplane_ctx_get_table(const struct zebra_dplane_ctx *ctx)
{
DPLANE_CTX_VALID(ctx);
@@ -715,6 +811,13 @@ route_tag_t dplane_ctx_get_tag(const struct zebra_dplane_ctx *ctx)
return ctx->u.rinfo.zd_tag;
}
+void dplane_ctx_set_tag(struct zebra_dplane_ctx *ctx, route_tag_t tag)
+{
+ DPLANE_CTX_VALID(ctx);
+
+ ctx->u.rinfo.zd_tag = tag;
+}
+
route_tag_t dplane_ctx_get_old_tag(const struct zebra_dplane_ctx *ctx)
{
DPLANE_CTX_VALID(ctx);
@@ -729,6 +832,13 @@ uint16_t dplane_ctx_get_instance(const struct zebra_dplane_ctx *ctx)
return ctx->u.rinfo.zd_instance;
}
+void dplane_ctx_set_instance(struct zebra_dplane_ctx *ctx, uint16_t instance)
+{
+ DPLANE_CTX_VALID(ctx);
+
+ ctx->u.rinfo.zd_instance = instance;
+}
+
uint16_t dplane_ctx_get_old_instance(const struct zebra_dplane_ctx *ctx)
{
DPLANE_CTX_VALID(ctx);
@@ -771,6 +881,13 @@ uint8_t dplane_ctx_get_distance(const struct zebra_dplane_ctx *ctx)
return ctx->u.rinfo.zd_distance;
}
+void dplane_ctx_set_distance(struct zebra_dplane_ctx *ctx, uint8_t distance)
+{
+ DPLANE_CTX_VALID(ctx);
+
+ ctx->u.rinfo.zd_distance = distance;
+}
+
uint8_t dplane_ctx_get_old_distance(const struct zebra_dplane_ctx *ctx)
{
DPLANE_CTX_VALID(ctx);
@@ -778,6 +895,17 @@ uint8_t dplane_ctx_get_old_distance(const struct zebra_dplane_ctx *ctx)
return ctx->u.rinfo.zd_old_distance;
}
+void dplane_ctx_set_nexthops(struct zebra_dplane_ctx *ctx, struct nexthop *nh)
+{
+ DPLANE_CTX_VALID(ctx);
+
+ if (ctx->u.rinfo.zd_ng.nexthop) {
+ nexthops_free(ctx->u.rinfo.zd_ng.nexthop);
+ ctx->u.rinfo.zd_ng.nexthop = NULL;
+ }
+ copy_nexthops(&(ctx->u.rinfo.zd_ng.nexthop), nh, NULL);
+}
+
const struct nexthop_group *dplane_ctx_get_ng(
const struct zebra_dplane_ctx *ctx)
{
@@ -811,6 +939,13 @@ mpls_label_t dplane_ctx_get_in_label(const struct zebra_dplane_ctx *ctx)
return ctx->u.lsp.ile.in_label;
}
+void dplane_ctx_set_in_label(struct zebra_dplane_ctx *ctx, mpls_label_t label)
+{
+ DPLANE_CTX_VALID(ctx);
+
+ ctx->u.lsp.ile.in_label = label;
+}
+
uint8_t dplane_ctx_get_addr_family(const struct zebra_dplane_ctx *ctx)
{
DPLANE_CTX_VALID(ctx);
@@ -818,6 +953,14 @@ uint8_t dplane_ctx_get_addr_family(const struct zebra_dplane_ctx *ctx)
return ctx->u.lsp.addr_family;
}
+void dplane_ctx_set_addr_family(struct zebra_dplane_ctx *ctx,
+ uint8_t family)
+{
+ DPLANE_CTX_VALID(ctx);
+
+ ctx->u.lsp.addr_family = family;
+}
+
uint32_t dplane_ctx_get_lsp_flags(const struct zebra_dplane_ctx *ctx)
{
DPLANE_CTX_VALID(ctx);
@@ -825,6 +968,14 @@ uint32_t dplane_ctx_get_lsp_flags(const struct zebra_dplane_ctx *ctx)
return ctx->u.lsp.flags;
}
+void dplane_ctx_set_lsp_flags(struct zebra_dplane_ctx *ctx,
+ uint32_t flags)
+{
+ DPLANE_CTX_VALID(ctx);
+
+ ctx->u.lsp.flags = flags;
+}
+
const zebra_nhlfe_t *dplane_ctx_get_nhlfe(const struct zebra_dplane_ctx *ctx)
{
DPLANE_CTX_VALID(ctx);
@@ -832,6 +983,24 @@ const zebra_nhlfe_t *dplane_ctx_get_nhlfe(const struct zebra_dplane_ctx *ctx)
return ctx->u.lsp.nhlfe_list;
}
+zebra_nhlfe_t *dplane_ctx_add_nhlfe(struct zebra_dplane_ctx *ctx,
+ enum lsp_types_t lsp_type,
+ enum nexthop_types_t nh_type,
+ union g_addr *gate,
+ ifindex_t ifindex,
+ mpls_label_t out_label)
+{
+ zebra_nhlfe_t *nhlfe;
+
+ DPLANE_CTX_VALID(ctx);
+
+ nhlfe = zebra_mpls_lsp_add_nhlfe(&(ctx->u.lsp),
+ lsp_type, nh_type, gate,
+ ifindex, out_label);
+
+ return nhlfe;
+}
+
const zebra_nhlfe_t *
dplane_ctx_get_best_nhlfe(const struct zebra_dplane_ctx *ctx)
{
@@ -840,6 +1009,16 @@ dplane_ctx_get_best_nhlfe(const struct zebra_dplane_ctx *ctx)
return ctx->u.lsp.best_nhlfe;
}
+const zebra_nhlfe_t *
+dplane_ctx_set_best_nhlfe(struct zebra_dplane_ctx *ctx,
+ zebra_nhlfe_t *nhlfe)
+{
+ DPLANE_CTX_VALID(ctx);
+
+ ctx->u.lsp.best_nhlfe = nhlfe;
+ return ctx->u.lsp.best_nhlfe;
+}
+
uint32_t dplane_ctx_get_lsp_num_ecmp(const struct zebra_dplane_ctx *ctx)
{
DPLANE_CTX_VALID(ctx);
@@ -1497,6 +1676,59 @@ done:
}
/*
+ * Update from an async notification, to bring other fibs up-to-date.
+ */
+enum zebra_dplane_result
+dplane_route_notif_update(struct route_node *rn,
+ struct route_entry *re,
+ enum dplane_op_e op,
+ struct zebra_dplane_ctx *ctx)
+{
+ enum zebra_dplane_result ret = ZEBRA_DPLANE_REQUEST_FAILURE;
+ struct zebra_dplane_ctx *new_ctx = NULL;
+ struct nexthop *nexthop;
+
+ if (rn == NULL || re == NULL)
+ goto done;
+
+ new_ctx = dplane_ctx_alloc();
+ if (new_ctx == NULL)
+ goto done;
+
+ /* Init context with info from zebra data structs */
+ dplane_ctx_route_init(new_ctx, op, rn, re);
+
+ /* For add/update, need to adjust the nexthops so that we match
+ * the notification state, which may not be the route-entry/RIB
+ * state.
+ */
+ if (op == DPLANE_OP_ROUTE_UPDATE ||
+ op == DPLANE_OP_ROUTE_INSTALL) {
+
+ nexthops_free(new_ctx->u.rinfo.zd_ng.nexthop);
+ new_ctx->u.rinfo.zd_ng.nexthop = NULL;
+
+ copy_nexthops(&(new_ctx->u.rinfo.zd_ng.nexthop),
+ (rib_active_nhg(re))->nexthop, NULL);
+
+ for (ALL_NEXTHOPS(new_ctx->u.rinfo.zd_ng, nexthop))
+ UNSET_FLAG(nexthop->flags, NEXTHOP_FLAG_FIB);
+
+ }
+
+ /* Capture info about the source of the notification, in 'ctx' */
+ dplane_ctx_set_notif_provider(new_ctx,
+ dplane_ctx_get_notif_provider(ctx));
+
+ dplane_route_enqueue(new_ctx);
+
+ ret = ZEBRA_DPLANE_REQUEST_QUEUED;
+
+done:
+ return ret;
+}
+
+/*
* Enqueue LSP add for the dataplane.
*/
enum zebra_dplane_result dplane_lsp_add(zebra_lsp_t *lsp)
@@ -1529,6 +1761,50 @@ enum zebra_dplane_result dplane_lsp_delete(zebra_lsp_t *lsp)
return ret;
}
+/* Update or un-install resulting from an async notification */
+enum zebra_dplane_result
+dplane_lsp_notif_update(zebra_lsp_t *lsp,
+ enum dplane_op_e op,
+ struct zebra_dplane_ctx *notif_ctx)
+{
+ enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
+ int ret = EINVAL;
+ struct zebra_dplane_ctx *ctx = NULL;
+
+ /* Obtain context block */
+ ctx = dplane_ctx_alloc();
+ if (ctx == NULL) {
+ ret = ENOMEM;
+ goto done;
+ }
+
+ ret = dplane_ctx_lsp_init(ctx, op, lsp);
+ if (ret != AOK)
+ goto done;
+
+ /* Capture info about the source of the notification */
+ dplane_ctx_set_notif_provider(
+ ctx,
+ dplane_ctx_get_notif_provider(notif_ctx));
+
+ ret = dplane_route_enqueue(ctx);
+
+done:
+ /* Update counter */
+ atomic_fetch_add_explicit(&zdplane_info.dg_lsps_in, 1,
+ memory_order_relaxed);
+
+ if (ret == AOK)
+ result = ZEBRA_DPLANE_REQUEST_QUEUED;
+ else {
+ atomic_fetch_add_explicit(&zdplane_info.dg_lsp_errors, 1,
+ memory_order_relaxed);
+ if (ctx)
+ dplane_ctx_free(&ctx);
+ }
+ return result;
+}
+
/*
* Enqueue pseudowire install for the dataplane.
*/
@@ -1823,6 +2099,7 @@ int dplane_show_provs_helper(struct vty *vty, bool detailed)
int dplane_provider_register(const char *name,
enum dplane_provider_prio prio,
int flags,
+ int (*start_fp)(struct zebra_dplane_provider *),
int (*fp)(struct zebra_dplane_provider *),
int (*fini_fp)(struct zebra_dplane_provider *,
bool early),
@@ -1853,6 +2130,7 @@ int dplane_provider_register(const char *name,
p->dp_priority = prio;
p->dp_fp = fp;
+ p->dp_start = start_fp;
p->dp_fini = fini_fp;
p->dp_data = data;
@@ -2046,6 +2324,20 @@ int dplane_provider_work_ready(void)
}
/*
+ * Enqueue a context directly to zebra main.
+ */
+void dplane_provider_enqueue_to_zebra(struct zebra_dplane_ctx *ctx)
+{
+ struct dplane_ctx_q temp_list;
+
+ /* Zebra's api takes a list, so we need to use a temporary list */
+ TAILQ_INIT(&temp_list);
+
+ TAILQ_INSERT_TAIL(&temp_list, ctx, zd_q_entries);
+ (zdplane_info.dg_results_cb)(&temp_list);
+}
+
+/*
* Kernel dataplane provider
*/
@@ -2207,9 +2499,11 @@ static int kernel_dplane_process_func(struct zebra_dplane_provider *prov)
res = kernel_dplane_address_update(ctx);
break;
- /* Ignore system 'notifications' - the kernel already knows */
+ /* Ignore 'notifications' - no-op */
case DPLANE_OP_SYS_ROUTE_ADD:
case DPLANE_OP_SYS_ROUTE_DELETE:
+ case DPLANE_OP_ROUTE_NOTIFY:
+ case DPLANE_OP_LSP_NOTIFY:
res = ZEBRA_DPLANE_REQUEST_SUCCESS;
break;
@@ -2320,7 +2614,7 @@ static void dplane_provider_init(void)
ret = dplane_provider_register("Kernel",
DPLANE_PRIO_KERNEL,
- DPLANE_PROV_FLAGS_DEFAULT,
+ DPLANE_PROV_FLAGS_DEFAULT, NULL,
kernel_dplane_process_func,
NULL,
NULL, NULL);
@@ -2333,7 +2627,7 @@ static void dplane_provider_init(void)
/* Optional test provider ... */
ret = dplane_provider_register("Test",
DPLANE_PRIO_PRE_KERNEL,
- DPLANE_PROV_FLAGS_DEFAULT,
+ DPLANE_PROV_FLAGS_DEFAULT, NULL,
test_dplane_process_func,
test_dplane_shutdown_func,
NULL /* data */, NULL);
@@ -2652,7 +2946,6 @@ static int dplane_thread_loop(struct thread *event)
TAILQ_INIT(&error_list);
-
/* Call through to zebra main */
(zdplane_info.dg_results_cb)(&work_list);
@@ -2717,13 +3010,14 @@ static void zebra_dplane_init_internal(void)
*/
void zebra_dplane_start(void)
{
- /* Start dataplane pthread */
-
+ struct zebra_dplane_provider *prov;
struct frr_pthread_attr pattr = {
.start = frr_pthread_attr_default.start,
.stop = frr_pthread_attr_default.stop
};
+ /* Start dataplane pthread */
+
zdplane_info.dg_pthread = frr_pthread_new(&pattr, "Zebra dplane thread",
"Zebra dplane");
@@ -2735,6 +3029,23 @@ void zebra_dplane_start(void)
thread_add_event(zdplane_info.dg_master, dplane_thread_loop, NULL, 0,
&zdplane_info.dg_t_update);
+ /* Call start callbacks for registered providers */
+
+ DPLANE_LOCK();
+ prov = TAILQ_FIRST(&zdplane_info.dg_providers_q);
+ DPLANE_UNLOCK();
+
+ while (prov) {
+
+ if (prov->dp_start)
+ (prov->dp_start)(prov);
+
+ /* Locate next provider */
+ DPLANE_LOCK();
+ prov = TAILQ_NEXT(prov, dp_prov_link);
+ DPLANE_UNLOCK();
+ }
+
frr_pthread_run(zdplane_info.dg_pthread, NULL);
}
diff --git a/zebra/zebra_dplane.h b/zebra/zebra_dplane.h
index d45628fdd0..6238026bcf 100644
--- a/zebra/zebra_dplane.h
+++ b/zebra/zebra_dplane.h
@@ -105,11 +105,13 @@ enum dplane_op_e {
DPLANE_OP_ROUTE_INSTALL,
DPLANE_OP_ROUTE_UPDATE,
DPLANE_OP_ROUTE_DELETE,
+ DPLANE_OP_ROUTE_NOTIFY,
/* LSP update */
DPLANE_OP_LSP_INSTALL,
DPLANE_OP_LSP_UPDATE,
DPLANE_OP_LSP_DELETE,
+ DPLANE_OP_LSP_NOTIFY,
/* Pseudowire update */
DPLANE_OP_PW_INSTALL,
@@ -139,6 +141,9 @@ void dplane_enable_sys_route_notifs(void);
*/
TAILQ_HEAD(dplane_ctx_q, zebra_dplane_ctx);
+/* Allocate a context object */
+struct zebra_dplane_ctx *dplane_ctx_alloc(void);
+
/* Return a dataplane results context block after use; the caller's pointer will
* be cleared.
*/
@@ -169,9 +174,12 @@ void dplane_ctx_set_status(struct zebra_dplane_ctx *ctx,
const char *dplane_res2str(enum zebra_dplane_result res);
enum dplane_op_e dplane_ctx_get_op(const struct zebra_dplane_ctx *ctx);
+void dplane_ctx_set_op(struct zebra_dplane_ctx *ctx, enum dplane_op_e op);
const char *dplane_op2str(enum dplane_op_e op);
const struct prefix *dplane_ctx_get_dest(const struct zebra_dplane_ctx *ctx);
+void dplane_ctx_set_dest(struct zebra_dplane_ctx *ctx,
+ const struct prefix *dest);
/* Retrieve last/current provider id */
uint32_t dplane_ctx_get_provider(const struct zebra_dplane_ctx *ctx);
@@ -186,29 +194,44 @@ bool dplane_ctx_is_skip_kernel(const struct zebra_dplane_ctx *ctx);
* to mean "no src prefix"
*/
const struct prefix *dplane_ctx_get_src(const struct zebra_dplane_ctx *ctx);
+void dplane_ctx_set_src(struct zebra_dplane_ctx *ctx, const struct prefix *src);
bool dplane_ctx_is_update(const struct zebra_dplane_ctx *ctx);
uint32_t dplane_ctx_get_seq(const struct zebra_dplane_ctx *ctx);
uint32_t dplane_ctx_get_old_seq(const struct zebra_dplane_ctx *ctx);
+void dplane_ctx_set_vrf(struct zebra_dplane_ctx *ctx, vrf_id_t vrf);
vrf_id_t dplane_ctx_get_vrf(const struct zebra_dplane_ctx *ctx);
+bool dplane_ctx_is_from_notif(const struct zebra_dplane_ctx *ctx);
+void dplane_ctx_set_notif_provider(struct zebra_dplane_ctx *ctx,
+ uint32_t id);
+uint32_t dplane_ctx_get_notif_provider(const struct zebra_dplane_ctx *ctx);
+
/* Accessors for route update information */
+void dplane_ctx_set_type(struct zebra_dplane_ctx *ctx, int type);
int dplane_ctx_get_type(const struct zebra_dplane_ctx *ctx);
int dplane_ctx_get_old_type(const struct zebra_dplane_ctx *ctx);
+void dplane_ctx_set_afi(struct zebra_dplane_ctx *ctx, afi_t afi);
afi_t dplane_ctx_get_afi(const struct zebra_dplane_ctx *ctx);
+void dplane_ctx_set_safi(struct zebra_dplane_ctx *ctx, safi_t safi);
safi_t dplane_ctx_get_safi(const struct zebra_dplane_ctx *ctx);
+void dplane_ctx_set_table(struct zebra_dplane_ctx *ctx, uint32_t table);
uint32_t dplane_ctx_get_table(const struct zebra_dplane_ctx *ctx);
route_tag_t dplane_ctx_get_tag(const struct zebra_dplane_ctx *ctx);
+void dplane_ctx_set_tag(struct zebra_dplane_ctx *ctx, route_tag_t tag);
route_tag_t dplane_ctx_get_old_tag(const struct zebra_dplane_ctx *ctx);
uint16_t dplane_ctx_get_instance(const struct zebra_dplane_ctx *ctx);
+void dplane_ctx_set_instance(struct zebra_dplane_ctx *ctx, uint16_t instance);
uint16_t dplane_ctx_get_old_instance(const struct zebra_dplane_ctx *ctx);
uint32_t dplane_ctx_get_metric(const struct zebra_dplane_ctx *ctx);
uint32_t dplane_ctx_get_old_metric(const struct zebra_dplane_ctx *ctx);
uint32_t dplane_ctx_get_mtu(const struct zebra_dplane_ctx *ctx);
uint32_t dplane_ctx_get_nh_mtu(const struct zebra_dplane_ctx *ctx);
uint8_t dplane_ctx_get_distance(const struct zebra_dplane_ctx *ctx);
+void dplane_ctx_set_distance(struct zebra_dplane_ctx *ctx, uint8_t distance);
uint8_t dplane_ctx_get_old_distance(const struct zebra_dplane_ctx *ctx);
+void dplane_ctx_set_nexthops(struct zebra_dplane_ctx *ctx, struct nexthop *nh);
const struct nexthop_group *dplane_ctx_get_ng(
const struct zebra_dplane_ctx *ctx);
const struct nexthop_group *dplane_ctx_get_old_ng(
@@ -216,11 +239,26 @@ const struct nexthop_group *dplane_ctx_get_old_ng(
/* Accessors for LSP information */
mpls_label_t dplane_ctx_get_in_label(const struct zebra_dplane_ctx *ctx);
+void dplane_ctx_set_in_label(struct zebra_dplane_ctx *ctx,
+ mpls_label_t label);
uint8_t dplane_ctx_get_addr_family(const struct zebra_dplane_ctx *ctx);
+void dplane_ctx_set_addr_family(struct zebra_dplane_ctx *ctx,
+ uint8_t family);
uint32_t dplane_ctx_get_lsp_flags(const struct zebra_dplane_ctx *ctx);
+void dplane_ctx_set_lsp_flags(struct zebra_dplane_ctx *ctx,
+ uint32_t flags);
const zebra_nhlfe_t *dplane_ctx_get_nhlfe(const struct zebra_dplane_ctx *ctx);
+zebra_nhlfe_t *dplane_ctx_add_nhlfe(struct zebra_dplane_ctx *ctx,
+ enum lsp_types_t lsp_type,
+ enum nexthop_types_t nh_type,
+ union g_addr *gate,
+ ifindex_t ifindex,
+ mpls_label_t out_label);
+
const zebra_nhlfe_t *dplane_ctx_get_best_nhlfe(
const struct zebra_dplane_ctx *ctx);
+const zebra_nhlfe_t *dplane_ctx_set_best_nhlfe(struct zebra_dplane_ctx *ctx,
+ zebra_nhlfe_t *nhlfe);
uint32_t dplane_ctx_get_lsp_num_ecmp(const struct zebra_dplane_ctx *ctx);
/* Accessors for pseudowire information */
@@ -282,6 +320,13 @@ enum zebra_dplane_result dplane_sys_route_add(struct route_node *rn,
enum zebra_dplane_result dplane_sys_route_del(struct route_node *rn,
struct route_entry *re);
+/* Update from an async notification, to bring other fibs up-to-date */
+enum zebra_dplane_result dplane_route_notif_update(
+ struct route_node *rn,
+ struct route_entry *re,
+ enum dplane_op_e op,
+ struct zebra_dplane_ctx *ctx);
+
/*
* Enqueue LSP change operations for the dataplane.
*/
@@ -289,6 +334,11 @@ enum zebra_dplane_result dplane_lsp_add(zebra_lsp_t *lsp);
enum zebra_dplane_result dplane_lsp_update(zebra_lsp_t *lsp);
enum zebra_dplane_result dplane_lsp_delete(zebra_lsp_t *lsp);
+/* Update or un-install resulting from an async notification */
+enum zebra_dplane_result dplane_lsp_notif_update(zebra_lsp_t *lsp,
+ enum dplane_op_e op,
+ struct zebra_dplane_ctx *ctx);
+
/*
* Enqueue pseudowire operations for the dataplane.
*/
@@ -321,7 +371,6 @@ uint32_t dplane_get_in_queue_len(void);
int dplane_show_helper(struct vty *vty, bool detailed);
int dplane_show_provs_helper(struct vty *vty, bool detailed);
-
/*
* Dataplane providers: modules that process or consume dataplane events.
*/
@@ -363,7 +412,13 @@ enum dplane_provider_prio {
* then checks the provider's outbound queue for completed work.
*/
-/* Providers offer an entry-point for shutdown and cleanup. This is called
+/*
+ * Providers can offer a 'start' callback; if present, the dataplane will
+ * call it when it is starting - when its pthread and event-scheduling
+ * thread_master are available.
+ */
+
+/* Providers can offer an entry-point for shutdown and cleanup. This is called
* with 'early' during shutdown, to indicate that the dataplane subsystem
* is allowing work to move through the providers and finish.
* When called without 'early', the provider should release
@@ -372,6 +427,7 @@ enum dplane_provider_prio {
int dplane_provider_register(const char *name,
enum dplane_provider_prio prio,
int flags,
+ int (*start_fp)(struct zebra_dplane_provider *),
int (*fp)(struct zebra_dplane_provider *),
int (*fini_fp)(struct zebra_dplane_provider *,
bool early),
@@ -409,10 +465,13 @@ struct zebra_dplane_ctx *dplane_provider_dequeue_in_ctx(
int dplane_provider_dequeue_in_list(struct zebra_dplane_provider *prov,
struct dplane_ctx_q *listp);
-/* Enqueue, maintain associated counter and locking */
+/* Enqueue completed work, maintain associated counter and locking */
void dplane_provider_enqueue_out_ctx(struct zebra_dplane_provider *prov,
struct zebra_dplane_ctx *ctx);
+/* Enqueue a context directly to zebra main. */
+void dplane_provider_enqueue_to_zebra(struct zebra_dplane_ctx *ctx);
+
/*
* Initialize the dataplane modules at zebra startup. This is currently called
* by the rib module. Zebra registers a results callback with the dataplane.
diff --git a/zebra/zebra_mpls.c b/zebra/zebra_mpls.c
index 5356a7f498..5214f1f22d 100644
--- a/zebra/zebra_mpls.c
+++ b/zebra/zebra_mpls.c
@@ -1693,8 +1693,9 @@ static char *snhlfe2str(zebra_snhlfe_t *snhlfe, char *buf, int size)
case NEXTHOP_TYPE_IPV6_IFINDEX:
inet_ntop(AF_INET6, &snhlfe->gate.ipv6, buf, size);
if (snhlfe->ifindex)
- strcat(buf,
- ifindex2ifname(snhlfe->ifindex, VRF_DEFAULT));
+ strlcat(buf,
+ ifindex2ifname(snhlfe->ifindex, VRF_DEFAULT),
+ size);
break;
default:
break;
@@ -1810,6 +1811,214 @@ void zebra_mpls_lsp_dplane_result(struct zebra_dplane_ctx *ctx)
}
/*
+ * Process async dplane notifications.
+ */
+void zebra_mpls_process_dplane_notify(struct zebra_dplane_ctx *ctx)
+{
+ struct zebra_vrf *zvrf;
+ zebra_ile_t tmp_ile;
+ struct hash *lsp_table;
+ zebra_lsp_t *lsp;
+ zebra_nhlfe_t *nhlfe;
+ const zebra_nhlfe_t *ctx_nhlfe;
+ struct nexthop *nexthop;
+ const struct nexthop *ctx_nexthop;
+ int start_count = 0, end_count = 0; /* Installed counts */
+ bool changed_p = false;
+ bool is_debug = (IS_ZEBRA_DEBUG_DPLANE | IS_ZEBRA_DEBUG_MPLS);
+
+ if (is_debug)
+ zlog_debug("LSP dplane notif, in-label %u",
+ dplane_ctx_get_in_label(ctx));
+
+ /* Look for zebra LSP object */
+ zvrf = vrf_info_lookup(VRF_DEFAULT);
+ if (zvrf == NULL)
+ goto done;
+
+ lsp_table = zvrf->lsp_table;
+
+ tmp_ile.in_label = dplane_ctx_get_in_label(ctx);
+ lsp = hash_lookup(lsp_table, &tmp_ile);
+ if (lsp == NULL) {
+ if (is_debug)
+ zlog_debug("dplane LSP notif: in-label %u not found",
+ dplane_ctx_get_in_label(ctx));
+ goto done;
+ }
+
+ /*
+ * The dataplane/forwarding plane is notifying zebra about the state
+ * of the nexthops associated with this LSP. First, we take a
+ * pre-scan pass to determine whether the LSP has transitioned
+ * from installed -> uninstalled. In that case, we need to have
+ * the existing state of the LSP objects available before making
+ * any changes.
+ */
+ for (nhlfe = lsp->nhlfe_list; nhlfe; nhlfe = nhlfe->next) {
+ char buf[NEXTHOP_STRLEN];
+
+ nexthop = nhlfe->nexthop;
+ if (!nexthop)
+ continue;
+
+ if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_FIB))
+ start_count++;
+
+ ctx_nexthop = NULL;
+ for (ctx_nhlfe = dplane_ctx_get_nhlfe(ctx);
+ ctx_nhlfe; ctx_nhlfe = ctx_nhlfe->next) {
+
+ ctx_nexthop = ctx_nhlfe->nexthop;
+ if (!ctx_nexthop)
+ continue;
+
+ if ((ctx_nexthop->type == nexthop->type) &&
+ nexthop_same(ctx_nexthop, nexthop)) {
+ /* Matched */
+ break;
+ }
+ }
+
+ if (is_debug)
+ nexthop2str(nexthop, buf, sizeof(buf));
+
+ if (ctx_nhlfe && ctx_nexthop) {
+ if (is_debug) {
+ const char *tstr = "";
+
+ if (!CHECK_FLAG(ctx_nhlfe->flags,
+ NHLFE_FLAG_INSTALLED))
+ tstr = "not ";
+
+ zlog_debug("LSP dplane notif: matched nh %s (%sinstalled)",
+ buf, tstr);
+ }
+
+ /* Test zebra nhlfe install state */
+ if (CHECK_FLAG(ctx_nhlfe->flags,
+ NHLFE_FLAG_INSTALLED)) {
+
+ if (!CHECK_FLAG(nhlfe->flags,
+ NHLFE_FLAG_INSTALLED))
+ changed_p = true;
+
+ /* Update counter */
+ end_count++;
+ } else {
+
+ if (CHECK_FLAG(nhlfe->flags,
+ NHLFE_FLAG_INSTALLED))
+ changed_p = true;
+ }
+
+ } else {
+ /* Not mentioned in lfib set -> uninstalled */
+ if (CHECK_FLAG(nhlfe->flags, NHLFE_FLAG_INSTALLED) ||
+ CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE) ||
+ CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_FIB)) {
+ changed_p = true;
+ }
+
+ if (is_debug)
+ zlog_debug("LSP dplane notif: no match, nh %s",
+ buf);
+ }
+ }
+
+ if (is_debug)
+ zlog_debug("LSP dplane notif: lfib start_count %d, end_count %d%s",
+ start_count, end_count,
+ changed_p ? ", changed" : "");
+
+ /*
+ * Has the LSP become uninstalled?
+ */
+ if (start_count > 0 && end_count == 0) {
+ /* Inform other lfibs */
+ dplane_lsp_notif_update(lsp, DPLANE_OP_LSP_DELETE, ctx);
+ }
+
+ /*
+ * Now we take a second pass and bring the zebra
+ * nexthop state into sync with the forwarding-plane state.
+ */
+ for (nhlfe = lsp->nhlfe_list; nhlfe; nhlfe = nhlfe->next) {
+ char buf[NEXTHOP_STRLEN];
+
+ nexthop = nhlfe->nexthop;
+ if (!nexthop)
+ continue;
+
+ ctx_nexthop = NULL;
+ for (ctx_nhlfe = dplane_ctx_get_nhlfe(ctx);
+ ctx_nhlfe; ctx_nhlfe = ctx_nhlfe->next) {
+
+ ctx_nexthop = ctx_nhlfe->nexthop;
+ if (!ctx_nexthop)
+ continue;
+
+ if ((ctx_nexthop->type == nexthop->type) &&
+ nexthop_same(ctx_nexthop, nexthop)) {
+ /* Matched */
+ break;
+ }
+ }
+
+ if (is_debug)
+ nexthop2str(nexthop, buf, sizeof(buf));
+
+ if (ctx_nhlfe && ctx_nexthop) {
+
+ /* Bring zebra nhlfe install state into sync */
+ if (CHECK_FLAG(ctx_nhlfe->flags,
+ NHLFE_FLAG_INSTALLED)) {
+
+ SET_FLAG(nhlfe->flags, NHLFE_FLAG_INSTALLED);
+
+ } else {
+
+ UNSET_FLAG(nhlfe->flags, NHLFE_FLAG_INSTALLED);
+ }
+
+ if (CHECK_FLAG(ctx_nhlfe->nexthop->flags,
+ NEXTHOP_FLAG_FIB)) {
+ SET_FLAG(nhlfe->nexthop->flags,
+ NEXTHOP_FLAG_ACTIVE);
+ SET_FLAG(nhlfe->nexthop->flags,
+ NEXTHOP_FLAG_FIB);
+ } else {
+ UNSET_FLAG(nhlfe->nexthop->flags,
+ NEXTHOP_FLAG_ACTIVE);
+ UNSET_FLAG(nhlfe->nexthop->flags,
+ NEXTHOP_FLAG_FIB);
+ }
+
+ } else {
+ /* Not mentioned in lfib set -> uninstalled */
+
+ UNSET_FLAG(nhlfe->flags, NHLFE_FLAG_INSTALLED);
+ UNSET_FLAG(nexthop->flags, NEXTHOP_FLAG_FIB);
+ UNSET_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE);
+ }
+ }
+
+ if (end_count > 0) {
+ SET_FLAG(lsp->flags, LSP_FLAG_INSTALLED);
+
+ if (changed_p)
+ dplane_lsp_notif_update(lsp, DPLANE_OP_LSP_UPDATE, ctx);
+
+ } else {
+ UNSET_FLAG(lsp->flags, LSP_FLAG_INSTALLED);
+ clear_nhlfe_installed(lsp);
+ }
+
+done:
+ dplane_ctx_fini(&ctx);
+}
+
+/*
* Install dynamic LSP entry.
*/
int zebra_mpls_lsp_install(struct zebra_vrf *zvrf, struct route_node *rn,
diff --git a/zebra/zebra_mpls.h b/zebra/zebra_mpls.h
index 3a131e1aaf..d983221cb5 100644
--- a/zebra/zebra_mpls.h
+++ b/zebra/zebra_mpls.h
@@ -352,6 +352,9 @@ struct zebra_dplane_ctx;
void zebra_mpls_lsp_dplane_result(struct zebra_dplane_ctx *ctx);
+/* Process async dplane notifications. */
+void zebra_mpls_process_dplane_notify(struct zebra_dplane_ctx *ctx);
+
/*
* Schedule all MPLS label forwarding entries for processing.
* Called upon changes that may affect one or more of them such as
diff --git a/zebra/zebra_nhg.c b/zebra/zebra_nhg.c
new file mode 100644
index 0000000000..f2a76d1c52
--- /dev/null
+++ b/zebra/zebra_nhg.c
@@ -0,0 +1,511 @@
+/* Zebra Nexthop Group Code.
+ * Copyright (C) 2019 Cumulus Networks, Inc.
+ * Donald Sharp
+ * Stephen Worley
+ *
+ * This file is part of FRR.
+ *
+ * FRR is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * FRR is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with FRR; see the file COPYING. If not, write to the Free
+ * Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#include <zebra.h>
+
+#include "lib/nexthop.h"
+#include "lib/routemap.h"
+
+#include "zebra/connected.h"
+#include "zebra/debug.h"
+#include "zebra/zebra_router.h"
+#include "zebra/zebra_nhg.h"
+#include "zebra/zebra_rnh.h"
+#include "zebra/zebra_routemap.h"
+#include "zebra/rt.h"
+
+static void nexthop_set_resolved(afi_t afi, const struct nexthop *newhop,
+ struct nexthop *nexthop)
+{
+ struct nexthop *resolved_hop;
+
+ resolved_hop = nexthop_new();
+ SET_FLAG(resolved_hop->flags, NEXTHOP_FLAG_ACTIVE);
+
+ resolved_hop->vrf_id = nexthop->vrf_id;
+ switch (newhop->type) {
+ case NEXTHOP_TYPE_IPV4:
+ case NEXTHOP_TYPE_IPV4_IFINDEX:
+ /* If the resolving route specifies a gateway, use it */
+ resolved_hop->type = newhop->type;
+ resolved_hop->gate.ipv4 = newhop->gate.ipv4;
+
+ if (newhop->ifindex) {
+ resolved_hop->type = NEXTHOP_TYPE_IPV4_IFINDEX;
+ resolved_hop->ifindex = newhop->ifindex;
+ }
+ break;
+ case NEXTHOP_TYPE_IPV6:
+ case NEXTHOP_TYPE_IPV6_IFINDEX:
+ resolved_hop->type = newhop->type;
+ resolved_hop->gate.ipv6 = newhop->gate.ipv6;
+
+ if (newhop->ifindex) {
+ resolved_hop->type = NEXTHOP_TYPE_IPV6_IFINDEX;
+ resolved_hop->ifindex = newhop->ifindex;
+ }
+ break;
+ case NEXTHOP_TYPE_IFINDEX:
+ /* If the resolving route is an interface route,
+ * it means the gateway we are looking up is connected
+ * to that interface. (The actual network is _not_ onlink).
+ * Therefore, the resolved route should have the original
+ * gateway as nexthop as it is directly connected.
+ *
+ * On Linux, we have to set the onlink netlink flag because
+ * otherwise, the kernel won't accept the route.
+ */
+ resolved_hop->flags |= NEXTHOP_FLAG_ONLINK;
+ if (afi == AFI_IP) {
+ resolved_hop->type = NEXTHOP_TYPE_IPV4_IFINDEX;
+ resolved_hop->gate.ipv4 = nexthop->gate.ipv4;
+ } else if (afi == AFI_IP6) {
+ resolved_hop->type = NEXTHOP_TYPE_IPV6_IFINDEX;
+ resolved_hop->gate.ipv6 = nexthop->gate.ipv6;
+ }
+ resolved_hop->ifindex = newhop->ifindex;
+ break;
+ case NEXTHOP_TYPE_BLACKHOLE:
+ resolved_hop->type = NEXTHOP_TYPE_BLACKHOLE;
+ resolved_hop->bh_type = nexthop->bh_type;
+ break;
+ }
+
+ if (newhop->flags & NEXTHOP_FLAG_ONLINK)
+ resolved_hop->flags |= NEXTHOP_FLAG_ONLINK;
+
+ /* Copy labels of the resolved route */
+ if (newhop->nh_label)
+ nexthop_add_labels(resolved_hop, newhop->nh_label_type,
+ newhop->nh_label->num_labels,
+ &newhop->nh_label->label[0]);
+
+ resolved_hop->rparent = nexthop;
+ nexthop_add(&nexthop->resolved, resolved_hop);
+}
+
+/*
+ * Given a nexthop we need to properly recursively resolve
+ * the route. As such, do a table lookup to find and match
+ * if at all possible. Set the nexthop->ifindex as appropriate
+ */
+static int nexthop_active(afi_t afi, struct route_entry *re,
+ struct nexthop *nexthop, struct route_node *top)
+{
+ struct prefix p;
+ struct route_table *table;
+ struct route_node *rn;
+ struct route_entry *match = NULL;
+ int resolved;
+ struct nexthop *newhop;
+ struct interface *ifp;
+ rib_dest_t *dest;
+
+ if ((nexthop->type == NEXTHOP_TYPE_IPV4)
+ || nexthop->type == NEXTHOP_TYPE_IPV6)
+ nexthop->ifindex = 0;
+
+ UNSET_FLAG(nexthop->flags, NEXTHOP_FLAG_RECURSIVE);
+ nexthops_free(nexthop->resolved);
+ nexthop->resolved = NULL;
+ re->nexthop_mtu = 0;
+
+ /*
+ * If the kernel has sent us a route, then
+ * by golly gee whiz it's a good route.
+ */
+ if (re->type == ZEBRA_ROUTE_KERNEL || re->type == ZEBRA_ROUTE_SYSTEM)
+ return 1;
+
+ /*
+ * Check to see if we should trust the passed in information
+ * for UNNUMBERED interfaces as that we won't find the GW
+ * address in the routing table.
+ * This check should suffice to handle IPv4 or IPv6 routes
+ * sourced from EVPN routes which are installed with the
+ * next hop as the remote VTEP IP.
+ */
+ if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_ONLINK)) {
+ ifp = if_lookup_by_index(nexthop->ifindex, nexthop->vrf_id);
+ if (!ifp) {
+ if (IS_ZEBRA_DEBUG_RIB_DETAILED)
+ zlog_debug(
+ "\t%s: Onlink and interface: %u[%u] does not exist",
+ __PRETTY_FUNCTION__, nexthop->ifindex,
+ nexthop->vrf_id);
+ return 0;
+ }
+ if (connected_is_unnumbered(ifp)) {
+ if (if_is_operative(ifp))
+ return 1;
+ else {
+ if (IS_ZEBRA_DEBUG_RIB_DETAILED)
+ zlog_debug(
+ "\t%s: Onlink and interface %s is not operative",
+ __PRETTY_FUNCTION__, ifp->name);
+ return 0;
+ }
+ }
+ if (!if_is_operative(ifp)) {
+ if (IS_ZEBRA_DEBUG_RIB_DETAILED)
+ zlog_debug(
+ "\t%s: Interface %s is not unnumbered",
+ __PRETTY_FUNCTION__, ifp->name);
+ return 0;
+ }
+ }
+
+ /* Make lookup prefix. */
+ memset(&p, 0, sizeof(struct prefix));
+ switch (afi) {
+ case AFI_IP:
+ p.family = AF_INET;
+ p.prefixlen = IPV4_MAX_PREFIXLEN;
+ p.u.prefix4 = nexthop->gate.ipv4;
+ break;
+ case AFI_IP6:
+ p.family = AF_INET6;
+ p.prefixlen = IPV6_MAX_PREFIXLEN;
+ p.u.prefix6 = nexthop->gate.ipv6;
+ break;
+ default:
+ assert(afi != AFI_IP && afi != AFI_IP6);
+ break;
+ }
+ /* Lookup table. */
+ table = zebra_vrf_table(afi, SAFI_UNICAST, nexthop->vrf_id);
+ if (!table) {
+ if (IS_ZEBRA_DEBUG_RIB_DETAILED)
+ zlog_debug("\t%s: Table not found",
+ __PRETTY_FUNCTION__);
+ return 0;
+ }
+
+ rn = route_node_match(table, (struct prefix *)&p);
+ while (rn) {
+ route_unlock_node(rn);
+
+ /* Lookup should halt if we've matched against ourselves ('top',
+ * if specified) - i.e., we cannot have a nexthop NH1 is
+ * resolved by a route NH1. The exception is if the route is a
+ * host route.
+ */
+ if (top && rn == top)
+ if (((afi == AFI_IP) && (rn->p.prefixlen != 32))
+ || ((afi == AFI_IP6) && (rn->p.prefixlen != 128))) {
+ if (IS_ZEBRA_DEBUG_RIB_DETAILED)
+ zlog_debug(
+ "\t%s: Matched against ourself and prefix length is not max bit length",
+ __PRETTY_FUNCTION__);
+ return 0;
+ }
+
+ /* Pick up selected route. */
+ /* However, do not resolve over default route unless explicitly
+ * allowed. */
+ if (is_default_prefix(&rn->p)
+ && !rnh_resolve_via_default(p.family)) {
+ if (IS_ZEBRA_DEBUG_RIB_DETAILED)
+ zlog_debug(
+ "\t:%s: Resolved against default route",
+ __PRETTY_FUNCTION__);
+ return 0;
+ }
+
+ dest = rib_dest_from_rnode(rn);
+ if (dest && dest->selected_fib
+ && !CHECK_FLAG(dest->selected_fib->status,
+ ROUTE_ENTRY_REMOVED)
+ && dest->selected_fib->type != ZEBRA_ROUTE_TABLE)
+ match = dest->selected_fib;
+
+ /* If there is no selected route or matched route is EGP, go up
+ tree. */
+ if (!match) {
+ do {
+ rn = rn->parent;
+ } while (rn && rn->info == NULL);
+ if (rn)
+ route_lock_node(rn);
+
+ continue;
+ }
+
+ if (match->type == ZEBRA_ROUTE_CONNECT) {
+ /* Directly point connected route. */
+ newhop = match->ng.nexthop;
+ if (newhop) {
+ if (nexthop->type == NEXTHOP_TYPE_IPV4
+ || nexthop->type == NEXTHOP_TYPE_IPV6)
+ nexthop->ifindex = newhop->ifindex;
+ }
+ return 1;
+ } else if (CHECK_FLAG(re->flags, ZEBRA_FLAG_ALLOW_RECURSION)) {
+ resolved = 0;
+ for (ALL_NEXTHOPS(match->ng, newhop)) {
+ if (!CHECK_FLAG(match->status,
+ ROUTE_ENTRY_INSTALLED))
+ continue;
+ if (CHECK_FLAG(newhop->flags,
+ NEXTHOP_FLAG_RECURSIVE))
+ continue;
+
+ SET_FLAG(nexthop->flags,
+ NEXTHOP_FLAG_RECURSIVE);
+ SET_FLAG(re->status,
+ ROUTE_ENTRY_NEXTHOPS_CHANGED);
+ nexthop_set_resolved(afi, newhop, nexthop);
+ resolved = 1;
+ }
+ if (resolved)
+ re->nexthop_mtu = match->mtu;
+ if (!resolved && IS_ZEBRA_DEBUG_RIB_DETAILED)
+ zlog_debug("\t%s: Recursion failed to find",
+ __PRETTY_FUNCTION__);
+ return resolved;
+ } else if (re->type == ZEBRA_ROUTE_STATIC) {
+ resolved = 0;
+ for (ALL_NEXTHOPS(match->ng, newhop)) {
+ if (!CHECK_FLAG(match->status,
+ ROUTE_ENTRY_INSTALLED))
+ continue;
+ if (CHECK_FLAG(newhop->flags,
+ NEXTHOP_FLAG_RECURSIVE))
+ continue;
+
+ SET_FLAG(nexthop->flags,
+ NEXTHOP_FLAG_RECURSIVE);
+ nexthop_set_resolved(afi, newhop, nexthop);
+ resolved = 1;
+ }
+ if (resolved)
+ re->nexthop_mtu = match->mtu;
+
+ if (!resolved && IS_ZEBRA_DEBUG_RIB_DETAILED)
+ zlog_debug(
+ "\t%s: Static route unable to resolve",
+ __PRETTY_FUNCTION__);
+ return resolved;
+ } else {
+ if (IS_ZEBRA_DEBUG_RIB_DETAILED) {
+ zlog_debug(
+ "\t%s: Route Type %s has not turned on recursion",
+ __PRETTY_FUNCTION__,
+ zebra_route_string(re->type));
+ if (re->type == ZEBRA_ROUTE_BGP
+ && !CHECK_FLAG(re->flags, ZEBRA_FLAG_IBGP))
+ zlog_debug(
+ "\tEBGP: see \"disable-ebgp-connected-route-check\" or \"disable-connected-check\"");
+ }
+ return 0;
+ }
+ }
+ if (IS_ZEBRA_DEBUG_RIB_DETAILED)
+ zlog_debug("\t%s: Nexthop did not lookup in table",
+ __PRETTY_FUNCTION__);
+ return 0;
+}
+
+/* This function verifies reachability of one given nexthop, which can be
+ * numbered or unnumbered, IPv4 or IPv6. The result is unconditionally stored
+ * in nexthop->flags field. The nexthop->ifindex will be updated
+ * appropriately as well. An existing route map can turn
+ * (otherwise active) nexthop into inactive, but not vice versa.
+ *
+ * The return value is the final value of 'ACTIVE' flag.
+ */
+static unsigned nexthop_active_check(struct route_node *rn,
+ struct route_entry *re,
+ struct nexthop *nexthop)
+{
+ struct interface *ifp;
+ route_map_result_t ret = RMAP_MATCH;
+ int family;
+ char buf[SRCDEST2STR_BUFFER];
+ const struct prefix *p, *src_p;
+ struct zebra_vrf *zvrf;
+
+ srcdest_rnode_prefixes(rn, &p, &src_p);
+
+ if (rn->p.family == AF_INET)
+ family = AFI_IP;
+ else if (rn->p.family == AF_INET6)
+ family = AFI_IP6;
+ else
+ family = 0;
+ switch (nexthop->type) {
+ case NEXTHOP_TYPE_IFINDEX:
+ ifp = if_lookup_by_index(nexthop->ifindex, nexthop->vrf_id);
+ if (ifp && if_is_operative(ifp))
+ SET_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE);
+ else
+ UNSET_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE);
+ break;
+ case NEXTHOP_TYPE_IPV4:
+ case NEXTHOP_TYPE_IPV4_IFINDEX:
+ family = AFI_IP;
+ if (nexthop_active(AFI_IP, re, nexthop, rn))
+ SET_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE);
+ else
+ UNSET_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE);
+ break;
+ case NEXTHOP_TYPE_IPV6:
+ family = AFI_IP6;
+ if (nexthop_active(AFI_IP6, re, nexthop, rn))
+ SET_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE);
+ else
+ UNSET_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE);
+ break;
+ case NEXTHOP_TYPE_IPV6_IFINDEX:
+ /* RFC 5549, v4 prefix with v6 NH */
+ if (rn->p.family != AF_INET)
+ family = AFI_IP6;
+ if (IN6_IS_ADDR_LINKLOCAL(&nexthop->gate.ipv6)) {
+ ifp = if_lookup_by_index(nexthop->ifindex,
+ nexthop->vrf_id);
+ if (ifp && if_is_operative(ifp))
+ SET_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE);
+ else
+ UNSET_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE);
+ } else {
+ if (nexthop_active(AFI_IP6, re, nexthop, rn))
+ SET_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE);
+ else
+ UNSET_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE);
+ }
+ break;
+ case NEXTHOP_TYPE_BLACKHOLE:
+ SET_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE);
+ break;
+ default:
+ break;
+ }
+ if (!CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE)) {
+ if (IS_ZEBRA_DEBUG_RIB_DETAILED)
+ zlog_debug("\t%s: Unable to find a active nexthop",
+ __PRETTY_FUNCTION__);
+ return 0;
+ }
+
+ /* XXX: What exactly do those checks do? Do we support
+ * e.g. IPv4 routes with IPv6 nexthops or vice versa?
+ */
+ if (RIB_SYSTEM_ROUTE(re) || (family == AFI_IP && p->family != AF_INET)
+ || (family == AFI_IP6 && p->family != AF_INET6))
+ return CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE);
+
+ /* The original code didn't determine the family correctly
+ * e.g. for NEXTHOP_TYPE_IFINDEX. Retrieve the correct afi
+ * from the rib_table_info in those cases.
+ * Possibly it may be better to use only the rib_table_info
+ * in every case.
+ */
+ if (!family) {
+ rib_table_info_t *info;
+
+ info = srcdest_rnode_table_info(rn);
+ family = info->afi;
+ }
+
+ memset(&nexthop->rmap_src.ipv6, 0, sizeof(union g_addr));
+
+ zvrf = zebra_vrf_lookup_by_id(nexthop->vrf_id);
+ if (!zvrf) {
+ if (IS_ZEBRA_DEBUG_RIB_DETAILED)
+ zlog_debug("\t%s: zvrf is NULL", __PRETTY_FUNCTION__);
+ return CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE);
+ }
+
+ /* It'll get set if required inside */
+ ret = zebra_route_map_check(family, re->type, re->instance, p, nexthop,
+ zvrf, re->tag);
+ if (ret == RMAP_DENYMATCH) {
+ if (IS_ZEBRA_DEBUG_RIB) {
+ srcdest_rnode2str(rn, buf, sizeof(buf));
+ zlog_debug(
+ "%u:%s: Filtering out with NH out %s due to route map",
+ re->vrf_id, buf,
+ ifindex2ifname(nexthop->ifindex,
+ nexthop->vrf_id));
+ }
+ UNSET_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE);
+ }
+ return CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE);
+}
+
+/*
+ * Iterate over all nexthops of the given RIB entry and refresh their
+ * ACTIVE flag. re->nexthop_active_num is updated accordingly. If any
+ * nexthop is found to toggle the ACTIVE flag, the whole re structure
+ * is flagged with ROUTE_ENTRY_CHANGED.
+ *
+ * Return value is the new number of active nexthops.
+ */
+int nexthop_active_update(struct route_node *rn, struct route_entry *re)
+{
+ struct nexthop *nexthop;
+ union g_addr prev_src;
+ unsigned int prev_active, new_active;
+ ifindex_t prev_index;
+
+ re->nexthop_active_num = 0;
+ UNSET_FLAG(re->status, ROUTE_ENTRY_CHANGED);
+
+ for (nexthop = re->ng.nexthop; nexthop; nexthop = nexthop->next) {
+ /* No protocol daemon provides src and so we're skipping
+ * tracking it */
+ prev_src = nexthop->rmap_src;
+ prev_active = CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE);
+ prev_index = nexthop->ifindex;
+ /*
+ * We need to respect the multipath_num here
+ * as that what we should be able to install from
+ * a multipath perpsective should not be a data plane
+ * decision point.
+ */
+ new_active = nexthop_active_check(rn, re, nexthop);
+ if (new_active
+ && re->nexthop_active_num >= zrouter.multipath_num) {
+ UNSET_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE);
+ new_active = 0;
+ }
+ if (new_active)
+ re->nexthop_active_num++;
+ /* Don't allow src setting on IPv6 addr for now */
+ if (prev_active != new_active || prev_index != nexthop->ifindex
+ || ((nexthop->type >= NEXTHOP_TYPE_IFINDEX
+ && nexthop->type < NEXTHOP_TYPE_IPV6)
+ && prev_src.ipv4.s_addr
+ != nexthop->rmap_src.ipv4.s_addr)
+ || ((nexthop->type >= NEXTHOP_TYPE_IPV6
+ && nexthop->type < NEXTHOP_TYPE_BLACKHOLE)
+ && !(IPV6_ADDR_SAME(&prev_src.ipv6,
+ &nexthop->rmap_src.ipv6)))
+ || CHECK_FLAG(re->status, ROUTE_ENTRY_LABELS_CHANGED)) {
+ SET_FLAG(re->status, ROUTE_ENTRY_CHANGED);
+ SET_FLAG(re->status, ROUTE_ENTRY_NEXTHOPS_CHANGED);
+ }
+ }
+
+ return re->nexthop_active_num;
+}
+
diff --git a/zebra/zebra_nhg.h b/zebra/zebra_nhg.h
new file mode 100644
index 0000000000..ff2351c759
--- /dev/null
+++ b/zebra/zebra_nhg.h
@@ -0,0 +1,29 @@
+/* Zebra Nexthop Group header.
+ * Copyright (C) 2019 Cumulus Networks, Inc.
+ * Donald Sharp
+ * Stephen Worley
+ *
+ * This file is part of FRR.
+ *
+ * FRR is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * FRR is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with FRR; see the file COPYING. If not, write to the Free
+ * Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef __ZEBRA_NHG_H__
+#define __ZEBRA_NHG_H__
+
+#include "zebra/rib.h"
+
+extern int nexthop_active_update(struct route_node *rn, struct route_entry *re);
+#endif
diff --git a/zebra/zebra_rib.c b/zebra/zebra_rib.c
index 391917ec68..83eb5f4223 100644
--- a/zebra/zebra_rib.c
+++ b/zebra/zebra_rib.c
@@ -54,6 +54,7 @@
#include "zebra/zebra_vxlan.h"
#include "zebra/zapi_msg.h"
#include "zebra/zebra_dplane.h"
+#include "zebra/zebra_nhg.h"
/*
* Event, list, and mutex for delivery of dataplane results
@@ -128,7 +129,7 @@ _rnode_zlog(const char *_func, vrf_id_t vrf_id, struct route_node *rn,
srcdest_rnode2str(rn, buf, sizeof(buf));
if (info->safi == SAFI_MULTICAST)
- strcat(buf, " (MRIB)");
+ strlcat(buf, " (MRIB)", sizeof(buf));
} else {
snprintf(buf, sizeof(buf), "{(route_node *) NULL}");
}
@@ -336,298 +337,6 @@ struct nexthop *route_entry_nexthop_blackhole_add(struct route_entry *re,
return nexthop;
}
-static void nexthop_set_resolved(afi_t afi, const struct nexthop *newhop,
- struct nexthop *nexthop)
-{
- struct nexthop *resolved_hop;
-
- resolved_hop = nexthop_new();
- SET_FLAG(resolved_hop->flags, NEXTHOP_FLAG_ACTIVE);
-
- resolved_hop->vrf_id = nexthop->vrf_id;
- switch (newhop->type) {
- case NEXTHOP_TYPE_IPV4:
- case NEXTHOP_TYPE_IPV4_IFINDEX:
- /* If the resolving route specifies a gateway, use it */
- resolved_hop->type = newhop->type;
- resolved_hop->gate.ipv4 = newhop->gate.ipv4;
-
- if (newhop->ifindex) {
- resolved_hop->type = NEXTHOP_TYPE_IPV4_IFINDEX;
- resolved_hop->ifindex = newhop->ifindex;
- }
- break;
- case NEXTHOP_TYPE_IPV6:
- case NEXTHOP_TYPE_IPV6_IFINDEX:
- resolved_hop->type = newhop->type;
- resolved_hop->gate.ipv6 = newhop->gate.ipv6;
-
- if (newhop->ifindex) {
- resolved_hop->type = NEXTHOP_TYPE_IPV6_IFINDEX;
- resolved_hop->ifindex = newhop->ifindex;
- }
- break;
- case NEXTHOP_TYPE_IFINDEX:
- /* If the resolving route is an interface route,
- * it means the gateway we are looking up is connected
- * to that interface. (The actual network is _not_ onlink).
- * Therefore, the resolved route should have the original
- * gateway as nexthop as it is directly connected.
- *
- * On Linux, we have to set the onlink netlink flag because
- * otherwise, the kernel won't accept the route.
- */
- resolved_hop->flags |= NEXTHOP_FLAG_ONLINK;
- if (afi == AFI_IP) {
- resolved_hop->type = NEXTHOP_TYPE_IPV4_IFINDEX;
- resolved_hop->gate.ipv4 = nexthop->gate.ipv4;
- } else if (afi == AFI_IP6) {
- resolved_hop->type = NEXTHOP_TYPE_IPV6_IFINDEX;
- resolved_hop->gate.ipv6 = nexthop->gate.ipv6;
- }
- resolved_hop->ifindex = newhop->ifindex;
- break;
- case NEXTHOP_TYPE_BLACKHOLE:
- resolved_hop->type = NEXTHOP_TYPE_BLACKHOLE;
- resolved_hop->bh_type = nexthop->bh_type;
- break;
- }
-
- if (newhop->flags & NEXTHOP_FLAG_ONLINK)
- resolved_hop->flags |= NEXTHOP_FLAG_ONLINK;
-
- /* Copy labels of the resolved route */
- if (newhop->nh_label)
- nexthop_add_labels(resolved_hop, newhop->nh_label_type,
- newhop->nh_label->num_labels,
- &newhop->nh_label->label[0]);
-
- resolved_hop->rparent = nexthop;
- nexthop_add(&nexthop->resolved, resolved_hop);
-}
-
-/*
- * Given a nexthop we need to properly recursively resolve
- * the route. As such, do a table lookup to find and match
- * if at all possible. Set the nexthop->ifindex as appropriate
- */
-static int nexthop_active(afi_t afi, struct route_entry *re,
- struct nexthop *nexthop,
- struct route_node *top)
-{
- struct prefix p;
- struct route_table *table;
- struct route_node *rn;
- struct route_entry *match = NULL;
- int resolved;
- struct nexthop *newhop;
- struct interface *ifp;
- rib_dest_t *dest;
-
- if ((nexthop->type == NEXTHOP_TYPE_IPV4)
- || nexthop->type == NEXTHOP_TYPE_IPV6)
- nexthop->ifindex = 0;
-
- UNSET_FLAG(nexthop->flags, NEXTHOP_FLAG_RECURSIVE);
- nexthops_free(nexthop->resolved);
- nexthop->resolved = NULL;
- re->nexthop_mtu = 0;
-
- /*
- * If the kernel has sent us a route, then
- * by golly gee whiz it's a good route.
- */
- if (re->type == ZEBRA_ROUTE_KERNEL ||
- re->type == ZEBRA_ROUTE_SYSTEM)
- return 1;
-
- /*
- * Check to see if we should trust the passed in information
- * for UNNUMBERED interfaces as that we won't find the GW
- * address in the routing table.
- * This check should suffice to handle IPv4 or IPv6 routes
- * sourced from EVPN routes which are installed with the
- * next hop as the remote VTEP IP.
- */
- if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_ONLINK)) {
- ifp = if_lookup_by_index(nexthop->ifindex, nexthop->vrf_id);
- if (!ifp) {
- if (IS_ZEBRA_DEBUG_RIB_DETAILED)
- zlog_debug(
- "\t%s: Onlink and interface: %u[%u] does not exist",
- __PRETTY_FUNCTION__, nexthop->ifindex,
- nexthop->vrf_id);
- return 0;
- }
- if (connected_is_unnumbered(ifp)) {
- if (if_is_operative(ifp))
- return 1;
- else {
- if (IS_ZEBRA_DEBUG_RIB_DETAILED)
- zlog_debug(
- "\t%s: Onlink and interface %s is not operative",
- __PRETTY_FUNCTION__, ifp->name);
- return 0;
- }
- }
- if (!if_is_operative(ifp)) {
- if (IS_ZEBRA_DEBUG_RIB_DETAILED)
- zlog_debug(
- "\t%s: Interface %s is not unnumbered",
- __PRETTY_FUNCTION__, ifp->name);
- return 0;
- }
- }
-
- /* Make lookup prefix. */
- memset(&p, 0, sizeof(struct prefix));
- switch (afi) {
- case AFI_IP:
- p.family = AF_INET;
- p.prefixlen = IPV4_MAX_PREFIXLEN;
- p.u.prefix4 = nexthop->gate.ipv4;
- break;
- case AFI_IP6:
- p.family = AF_INET6;
- p.prefixlen = IPV6_MAX_PREFIXLEN;
- p.u.prefix6 = nexthop->gate.ipv6;
- break;
- default:
- assert(afi != AFI_IP && afi != AFI_IP6);
- break;
- }
- /* Lookup table. */
- table = zebra_vrf_table(afi, SAFI_UNICAST, nexthop->vrf_id);
- if (!table) {
- if (IS_ZEBRA_DEBUG_RIB_DETAILED)
- zlog_debug("\t%s: Table not found",
- __PRETTY_FUNCTION__);
- return 0;
- }
-
- rn = route_node_match(table, (struct prefix *)&p);
- while (rn) {
- route_unlock_node(rn);
-
- /* Lookup should halt if we've matched against ourselves ('top',
- * if specified) - i.e., we cannot have a nexthop NH1 is
- * resolved by a route NH1. The exception is if the route is a
- * host route.
- */
- if (top && rn == top)
- if (((afi == AFI_IP) && (rn->p.prefixlen != 32))
- || ((afi == AFI_IP6) && (rn->p.prefixlen != 128))) {
- if (IS_ZEBRA_DEBUG_RIB_DETAILED)
- zlog_debug(
- "\t%s: Matched against ourself and prefix length is not max bit length",
- __PRETTY_FUNCTION__);
- return 0;
- }
-
- /* Pick up selected route. */
- /* However, do not resolve over default route unless explicitly
- * allowed. */
- if (is_default_prefix(&rn->p)
- && !rnh_resolve_via_default(p.family)) {
- if (IS_ZEBRA_DEBUG_RIB_DETAILED)
- zlog_debug(
- "\t:%s: Resolved against default route",
- __PRETTY_FUNCTION__);
- return 0;
- }
-
- dest = rib_dest_from_rnode(rn);
- if (dest && dest->selected_fib
- && !CHECK_FLAG(dest->selected_fib->status,
- ROUTE_ENTRY_REMOVED)
- && dest->selected_fib->type != ZEBRA_ROUTE_TABLE)
- match = dest->selected_fib;
-
- /* If there is no selected route or matched route is EGP, go up
- tree. */
- if (!match) {
- do {
- rn = rn->parent;
- } while (rn && rn->info == NULL);
- if (rn)
- route_lock_node(rn);
-
- continue;
- }
-
- if (match->type == ZEBRA_ROUTE_CONNECT) {
- /* Directly point connected route. */
- newhop = match->ng.nexthop;
- if (newhop) {
- if (nexthop->type == NEXTHOP_TYPE_IPV4
- || nexthop->type == NEXTHOP_TYPE_IPV6)
- nexthop->ifindex = newhop->ifindex;
- }
- return 1;
- } else if (CHECK_FLAG(re->flags, ZEBRA_FLAG_ALLOW_RECURSION)) {
- resolved = 0;
- for (ALL_NEXTHOPS(match->ng, newhop)) {
- if (!CHECK_FLAG(match->status,
- ROUTE_ENTRY_INSTALLED))
- continue;
- if (CHECK_FLAG(newhop->flags,
- NEXTHOP_FLAG_RECURSIVE))
- continue;
-
- SET_FLAG(nexthop->flags,
- NEXTHOP_FLAG_RECURSIVE);
- SET_FLAG(re->status,
- ROUTE_ENTRY_NEXTHOPS_CHANGED);
- nexthop_set_resolved(afi, newhop, nexthop);
- resolved = 1;
- }
- if (resolved)
- re->nexthop_mtu = match->mtu;
- if (!resolved && IS_ZEBRA_DEBUG_RIB_DETAILED)
- zlog_debug("\t%s: Recursion failed to find",
- __PRETTY_FUNCTION__);
- return resolved;
- } else if (re->type == ZEBRA_ROUTE_STATIC) {
- resolved = 0;
- for (ALL_NEXTHOPS(match->ng, newhop)) {
- if (!CHECK_FLAG(match->status,
- ROUTE_ENTRY_INSTALLED))
- continue;
- if (CHECK_FLAG(newhop->flags,
- NEXTHOP_FLAG_RECURSIVE))
- continue;
-
- SET_FLAG(nexthop->flags,
- NEXTHOP_FLAG_RECURSIVE);
- nexthop_set_resolved(afi, newhop, nexthop);
- resolved = 1;
- }
- if (resolved)
- re->nexthop_mtu = match->mtu;
-
- if (!resolved && IS_ZEBRA_DEBUG_RIB_DETAILED)
- zlog_debug(
- "\t%s: Static route unable to resolve",
- __PRETTY_FUNCTION__);
- return resolved;
- } else {
- if (IS_ZEBRA_DEBUG_RIB_DETAILED) {
- zlog_debug("\t%s: Route Type %s has not turned on recursion",
- __PRETTY_FUNCTION__,
- zebra_route_string(re->type));
- if (re->type == ZEBRA_ROUTE_BGP &&
- !CHECK_FLAG(re->flags, ZEBRA_FLAG_IBGP))
- zlog_debug("\tEBGP: see \"disable-ebgp-connected-route-check\" or \"disable-connected-check\"");
- }
- return 0;
- }
- }
- if (IS_ZEBRA_DEBUG_RIB_DETAILED)
- zlog_debug("\t%s: Nexthop did not lookup in table",
- __PRETTY_FUNCTION__);
- return 0;
-}
-
struct route_entry *rib_match(afi_t afi, safi_t safi, vrf_id_t vrf_id,
union g_addr *addr, struct route_node **rn_out)
{
@@ -798,190 +507,6 @@ struct route_entry *rib_lookup_ipv4(struct prefix_ipv4 *p, vrf_id_t vrf_id)
return NULL;
}
-/* This function verifies reachability of one given nexthop, which can be
- * numbered or unnumbered, IPv4 or IPv6. The result is unconditionally stored
- * in nexthop->flags field. The nexthop->ifindex will be updated
- * appropriately as well. An existing route map can turn
- * (otherwise active) nexthop into inactive, but not vice versa.
- *
- * The return value is the final value of 'ACTIVE' flag.
- */
-static unsigned nexthop_active_check(struct route_node *rn,
- struct route_entry *re,
- struct nexthop *nexthop)
-{
- struct interface *ifp;
- route_map_result_t ret = RMAP_MATCH;
- int family;
- char buf[SRCDEST2STR_BUFFER];
- const struct prefix *p, *src_p;
- struct zebra_vrf *zvrf;
-
- srcdest_rnode_prefixes(rn, &p, &src_p);
-
- if (rn->p.family == AF_INET)
- family = AFI_IP;
- else if (rn->p.family == AF_INET6)
- family = AFI_IP6;
- else
- family = 0;
- switch (nexthop->type) {
- case NEXTHOP_TYPE_IFINDEX:
- ifp = if_lookup_by_index(nexthop->ifindex, nexthop->vrf_id);
- if (ifp && if_is_operative(ifp))
- SET_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE);
- else
- UNSET_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE);
- break;
- case NEXTHOP_TYPE_IPV4:
- case NEXTHOP_TYPE_IPV4_IFINDEX:
- family = AFI_IP;
- if (nexthop_active(AFI_IP, re, nexthop, rn))
- SET_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE);
- else
- UNSET_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE);
- break;
- case NEXTHOP_TYPE_IPV6:
- family = AFI_IP6;
- if (nexthop_active(AFI_IP6, re, nexthop, rn))
- SET_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE);
- else
- UNSET_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE);
- break;
- case NEXTHOP_TYPE_IPV6_IFINDEX:
- /* RFC 5549, v4 prefix with v6 NH */
- if (rn->p.family != AF_INET)
- family = AFI_IP6;
- if (IN6_IS_ADDR_LINKLOCAL(&nexthop->gate.ipv6)) {
- ifp = if_lookup_by_index(nexthop->ifindex,
- nexthop->vrf_id);
- if (ifp && if_is_operative(ifp))
- SET_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE);
- else
- UNSET_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE);
- } else {
- if (nexthop_active(AFI_IP6, re, nexthop, rn))
- SET_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE);
- else
- UNSET_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE);
- }
- break;
- case NEXTHOP_TYPE_BLACKHOLE:
- SET_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE);
- break;
- default:
- break;
- }
- if (!CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE)) {
- if (IS_ZEBRA_DEBUG_RIB_DETAILED)
- zlog_debug("\t%s: Unable to find a active nexthop",
- __PRETTY_FUNCTION__);
- return 0;
- }
-
- /* XXX: What exactly do those checks do? Do we support
- * e.g. IPv4 routes with IPv6 nexthops or vice versa?
- */
- if (RIB_SYSTEM_ROUTE(re) || (family == AFI_IP && p->family != AF_INET)
- || (family == AFI_IP6 && p->family != AF_INET6))
- return CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE);
-
- /* The original code didn't determine the family correctly
- * e.g. for NEXTHOP_TYPE_IFINDEX. Retrieve the correct afi
- * from the rib_table_info in those cases.
- * Possibly it may be better to use only the rib_table_info
- * in every case.
- */
- if (!family) {
- rib_table_info_t *info;
-
- info = srcdest_rnode_table_info(rn);
- family = info->afi;
- }
-
- memset(&nexthop->rmap_src.ipv6, 0, sizeof(union g_addr));
-
- zvrf = zebra_vrf_lookup_by_id(nexthop->vrf_id);
- if (!zvrf) {
- if (IS_ZEBRA_DEBUG_RIB_DETAILED)
- zlog_debug("\t%s: zvrf is NULL", __PRETTY_FUNCTION__);
- return CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE);
- }
-
- /* It'll get set if required inside */
- ret = zebra_route_map_check(family, re->type, re->instance, p,
- nexthop, zvrf, re->tag);
- if (ret == RMAP_DENYMATCH) {
- if (IS_ZEBRA_DEBUG_RIB) {
- srcdest_rnode2str(rn, buf, sizeof(buf));
- zlog_debug(
- "%u:%s: Filtering out with NH out %s due to route map",
- re->vrf_id, buf,
- ifindex2ifname(nexthop->ifindex,
- nexthop->vrf_id));
- }
- UNSET_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE);
- }
- return CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE);
-}
-
-/*
- * Iterate over all nexthops of the given RIB entry and refresh their
- * ACTIVE flag. re->nexthop_active_num is updated accordingly. If any
- * nexthop is found to toggle the ACTIVE flag, the whole re structure
- * is flagged with ROUTE_ENTRY_CHANGED.
- *
- * Return value is the new number of active nexthops.
- */
-static int nexthop_active_update(struct route_node *rn, struct route_entry *re)
-{
- struct nexthop *nexthop;
- union g_addr prev_src;
- unsigned int prev_active, new_active;
- ifindex_t prev_index;
-
- re->nexthop_active_num = 0;
- UNSET_FLAG(re->status, ROUTE_ENTRY_CHANGED);
-
- for (nexthop = re->ng.nexthop; nexthop; nexthop = nexthop->next) {
- /* No protocol daemon provides src and so we're skipping
- * tracking it */
- prev_src = nexthop->rmap_src;
- prev_active = CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE);
- prev_index = nexthop->ifindex;
- /*
- * We need to respect the multipath_num here
- * as that what we should be able to install from
- * a multipath perpsective should not be a data plane
- * decision point.
- */
- new_active = nexthop_active_check(rn, re, nexthop);
- if (new_active
- && re->nexthop_active_num >= zrouter.multipath_num) {
- UNSET_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE);
- new_active = 0;
- }
- if (new_active)
- re->nexthop_active_num++;
- /* Don't allow src setting on IPv6 addr for now */
- if (prev_active != new_active || prev_index != nexthop->ifindex
- || ((nexthop->type >= NEXTHOP_TYPE_IFINDEX
- && nexthop->type < NEXTHOP_TYPE_IPV6)
- && prev_src.ipv4.s_addr
- != nexthop->rmap_src.ipv4.s_addr)
- || ((nexthop->type >= NEXTHOP_TYPE_IPV6
- && nexthop->type < NEXTHOP_TYPE_BLACKHOLE)
- && !(IPV6_ADDR_SAME(&prev_src.ipv6,
- &nexthop->rmap_src.ipv6)))
- || CHECK_FLAG(re->status, ROUTE_ENTRY_LABELS_CHANGED)) {
- SET_FLAG(re->status, ROUTE_ENTRY_CHANGED);
- SET_FLAG(re->status, ROUTE_ENTRY_NEXTHOPS_CHANGED);
- }
- }
-
- return re->nexthop_active_num;
-}
-
/*
* Is this RIB labeled-unicast? It must be of type BGP and all paths
* (nexthops) must have a label.
@@ -1062,8 +587,25 @@ void rib_install_kernel(struct route_node *rn, struct route_entry *re,
switch (ret) {
case ZEBRA_DPLANE_REQUEST_QUEUED:
SET_FLAG(re->status, ROUTE_ENTRY_QUEUED);
- if (old)
+
+ if (old) {
SET_FLAG(old->status, ROUTE_ENTRY_QUEUED);
+
+ /* Free old FIB nexthop group */
+ if (old->fib_ng.nexthop) {
+ nexthops_free(old->fib_ng.nexthop);
+ old->fib_ng.nexthop = NULL;
+ }
+
+ if (!RIB_SYSTEM_ROUTE(old)) {
+ /* Clear old route's FIB flags */
+ for (ALL_NEXTHOPS(old->ng, nexthop)) {
+ UNSET_FLAG(nexthop->flags,
+ NEXTHOP_FLAG_FIB);
+ }
+ }
+ }
+
if (zvrf)
zvrf->installs_queued++;
break;
@@ -1149,6 +691,12 @@ static void rib_uninstall(struct route_node *rn, struct route_entry *re)
dest->selected_fib = NULL;
+ /* Free FIB nexthop group, if present */
+ if (re->fib_ng.nexthop) {
+ nexthops_free(re->fib_ng.nexthop);
+ re->fib_ng.nexthop = NULL;
+ }
+
for (ALL_NEXTHOPS(re->ng, nexthop))
UNSET_FLAG(nexthop->flags, NEXTHOP_FLAG_FIB);
}
@@ -1841,21 +1389,239 @@ static void zebra_rib_fixup_system(struct route_node *rn)
}
/*
- * Route-update results processing after async dataplane update.
+ * Update a route from a dplane context. This consolidates common code
+ * that can be used in processing of results from FIB updates, and in
+ * async notification processing.
+ * The return is 'true' if the installed nexthops changed; 'false' otherwise.
*/
-static void rib_process_result(struct zebra_dplane_ctx *ctx)
+static bool rib_update_re_from_ctx(struct route_entry *re,
+ struct route_node *rn,
+ struct zebra_dplane_ctx *ctx)
+{
+ char dest_str[PREFIX_STRLEN] = "";
+ char nh_str[NEXTHOP_STRLEN];
+ struct nexthop *nexthop, *ctx_nexthop;
+ bool matched;
+ const struct nexthop_group *ctxnhg;
+ bool is_selected = false; /* Is 're' currently the selected re? */
+ bool changed_p = false; /* Change to nexthops? */
+ rib_dest_t *dest;
+
+ /* Note well: only capturing the prefix string if debug is enabled here;
+ * unconditional log messages will have to generate the string.
+ */
+ if (IS_ZEBRA_DEBUG_RIB)
+ prefix2str(&(rn->p), dest_str, sizeof(dest_str));
+
+ dest = rib_dest_from_rnode(rn);
+ if (dest)
+ is_selected = (re == dest->selected_fib);
+
+ if (IS_ZEBRA_DEBUG_RIB_DETAILED)
+ zlog_debug("update_from_ctx: %u:%s: %sSELECTED",
+ re->vrf_id, dest_str, (is_selected ? "" : "NOT "));
+
+ /* Update zebra's nexthop FIB flag for each nexthop that was installed.
+ * If the installed set differs from the set requested by the rib/owner,
+ * we use the fib-specific nexthop-group to record the actual FIB
+ * status.
+ */
+
+ /*
+ * First check the fib nexthop-group, if it's present. The comparison
+ * here is quite strict: we require that the fib sets match exactly.
+ */
+ matched = false;
+ do {
+ if (re->fib_ng.nexthop == NULL)
+ break;
+
+ matched = true;
+
+ /* First check the route's fib nexthops */
+ for (ALL_NEXTHOPS(re->fib_ng, nexthop)) {
+
+ if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_RECURSIVE))
+ continue;
+
+ ctx_nexthop = NULL;
+ for (ALL_NEXTHOPS_PTR(dplane_ctx_get_ng(ctx),
+ ctx_nexthop)) {
+ if (nexthop_same(ctx_nexthop, nexthop))
+ break;
+ }
+
+ if (ctx_nexthop == NULL) {
+ /* Nexthop not in the new installed set */
+ if (IS_ZEBRA_DEBUG_RIB_DETAILED) {
+ nexthop2str(nexthop, nh_str,
+ sizeof(nh_str));
+ zlog_debug("update_from_ctx: no match for fib nh %s",
+ nh_str);
+ }
+
+ matched = false;
+ break;
+ }
+ }
+
+ if (!matched)
+ break;
+
+ /* Check the new installed set */
+ ctx_nexthop = NULL;
+ for (ALL_NEXTHOPS_PTR(dplane_ctx_get_ng(ctx), ctx_nexthop)) {
+
+ if (CHECK_FLAG(ctx_nexthop->flags,
+ NEXTHOP_FLAG_RECURSIVE))
+ continue;
+
+ /* Compare with the current group's nexthops */
+ nexthop = NULL;
+ for (ALL_NEXTHOPS(re->fib_ng, nexthop)) {
+ if (nexthop_same(nexthop, ctx_nexthop))
+ break;
+ }
+
+ if (nexthop == NULL) {
+ /* Nexthop not in the old installed set */
+ if (IS_ZEBRA_DEBUG_RIB_DETAILED) {
+ nexthop2str(ctx_nexthop, nh_str,
+ sizeof(nh_str));
+ zlog_debug("update_from_ctx: no fib match for notif nh %s",
+ nh_str);
+ }
+ matched = false;
+ break;
+ }
+ }
+
+ } while (0);
+
+ /* If the new FIB set matches the existing FIB set, we're done. */
+ if (matched) {
+ if (IS_ZEBRA_DEBUG_RIB)
+ zlog_debug("%u:%s update_from_ctx(): existing fib nhg, no change",
+ re->vrf_id, dest_str);
+ goto done;
+
+ } else if (re->fib_ng.nexthop) {
+ /*
+ * Free stale fib list and move on to check the rib nhg.
+ */
+ if (IS_ZEBRA_DEBUG_RIB)
+ zlog_debug("%u:%s update_from_ctx(): replacing fib nhg",
+ re->vrf_id, dest_str);
+ nexthops_free(re->fib_ng.nexthop);
+ re->fib_ng.nexthop = NULL;
+
+ /* Note that the installed nexthops have changed */
+ changed_p = true;
+ } else {
+ if (IS_ZEBRA_DEBUG_RIB)
+ zlog_debug("%u:%s update_from_ctx(): no fib nhg",
+ re->vrf_id, dest_str);
+ }
+
+ /*
+ * Compare with the rib nexthop group. The comparison here is different:
+ * the RIB group may be a superset of the list installed in the FIB. We
+ * walk the RIB group, looking for the 'installable' candidate
+ * nexthops, and then check those against the set
+ * that is actually installed.
+ */
+ matched = true;
+ for (ALL_NEXTHOPS(re->ng, nexthop)) {
+
+ if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_RECURSIVE))
+ continue;
+
+ if (!CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE))
+ continue;
+
+ /* Check for a FIB nexthop corresponding to the RIB nexthop */
+ ctx_nexthop = NULL;
+ for (ALL_NEXTHOPS_PTR(dplane_ctx_get_ng(ctx), ctx_nexthop)) {
+ if (nexthop_same(ctx_nexthop, nexthop))
+ break;
+ }
+
+ /* If the FIB doesn't know about the nexthop,
+ * it's not installed
+ */
+ if (ctx_nexthop == NULL) {
+ if (IS_ZEBRA_DEBUG_RIB_DETAILED) {
+ nexthop2str(nexthop, nh_str, sizeof(nh_str));
+ zlog_debug("update_from_ctx: no notif match for rib nh %s",
+ nh_str);
+ }
+ matched = false;
+
+ if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_FIB))
+ changed_p = true;
+
+ UNSET_FLAG(nexthop->flags, NEXTHOP_FLAG_FIB);
+ break;
+ }
+
+ if (CHECK_FLAG(ctx_nexthop->flags, NEXTHOP_FLAG_FIB)) {
+ if (!CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_FIB))
+ changed_p = true;
+
+ SET_FLAG(nexthop->flags, NEXTHOP_FLAG_FIB);
+ } else {
+ if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_FIB))
+ changed_p = true;
+
+ UNSET_FLAG(nexthop->flags, NEXTHOP_FLAG_FIB);
+ }
+ }
+
+ /* If all nexthops were processed, we're done */
+ if (matched) {
+ if (IS_ZEBRA_DEBUG_RIB)
+ zlog_debug("%u:%s update_from_ctx(): rib nhg matched, changed '%s'",
+ re->vrf_id, dest_str,
+ (changed_p ? "true" : "false"));
+ goto done;
+ }
+
+ /* FIB nexthop set differs from the RIB set:
+ * create a fib-specific nexthop-group
+ */
+ if (IS_ZEBRA_DEBUG_RIB)
+ zlog_debug("%u:%s update_from_ctx(): changed %s, adding new fib nhg",
+ re->vrf_id, dest_str,
+ (changed_p ? "true" : "false"));
+
+ ctxnhg = dplane_ctx_get_ng(ctx);
+
+ if (ctxnhg->nexthop)
+ copy_nexthops(&(re->fib_ng.nexthop), ctxnhg->nexthop, NULL);
+ else {
+ /* Bit of a special case when the fib has _no_ installed
+ * nexthops.
+ */
+ nexthop = nexthop_new();
+ nexthop->type = NEXTHOP_TYPE_IPV4;
+ nexthop_add(&(re->fib_ng.nexthop), nexthop);
+ }
+
+done:
+ return changed_p;
+}
+
+/*
+ * Helper to locate a zebra route-node from a dplane context. This is used
+ * when processing dplane results, e.g. Note well: the route-node is returned
+ * with a ref held - route_unlock_node() must be called eventually.
+ */
+static struct route_node *
+rib_find_rn_from_ctx(const struct zebra_dplane_ctx *ctx)
{
struct route_table *table = NULL;
- struct zebra_vrf *zvrf = NULL;
struct route_node *rn = NULL;
- struct route_entry *re = NULL, *old_re = NULL, *rib;
- bool is_update = false;
- struct nexthop *nexthop, *ctx_nexthop;
- char dest_str[PREFIX_STRLEN] = "";
- enum dplane_op_e op;
- enum zebra_dplane_result status;
const struct prefix *dest_pfx, *src_pfx;
- uint32_t seq;
/* Locate rn and re(s) from ctx */
@@ -1865,7 +1631,7 @@ static void rib_process_result(struct zebra_dplane_ctx *ctx)
dplane_ctx_get_table(ctx));
if (table == NULL) {
if (IS_ZEBRA_DEBUG_DPLANE) {
- zlog_debug("Failed to process dplane results: no table for afi %d, safi %d, vrf %u",
+ zlog_debug("Failed to find route for ctx: no table for afi %d, safi %d, vrf %u",
dplane_ctx_get_afi(ctx),
dplane_ctx_get_safi(ctx),
dplane_ctx_get_vrf(ctx));
@@ -1873,8 +1639,35 @@ static void rib_process_result(struct zebra_dplane_ctx *ctx)
goto done;
}
- zvrf = vrf_info_lookup(dplane_ctx_get_vrf(ctx));
+ dest_pfx = dplane_ctx_get_dest(ctx);
+ src_pfx = dplane_ctx_get_src(ctx);
+
+ rn = srcdest_rnode_get(table, dest_pfx,
+ src_pfx ? (struct prefix_ipv6 *)src_pfx : NULL);
+
+done:
+ return rn;
+}
+
+
+/*
+ * Route-update results processing after async dataplane update.
+ */
+static void rib_process_result(struct zebra_dplane_ctx *ctx)
+{
+ struct zebra_vrf *zvrf = NULL;
+ struct route_node *rn = NULL;
+ struct route_entry *re = NULL, *old_re = NULL, *rib;
+ bool is_update = false;
+ char dest_str[PREFIX_STRLEN] = "";
+ enum dplane_op_e op;
+ enum zebra_dplane_result status;
+ const struct prefix *dest_pfx, *src_pfx;
+ uint32_t seq;
+ bool fib_changed = false;
+
+ zvrf = vrf_info_lookup(dplane_ctx_get_vrf(ctx));
dest_pfx = dplane_ctx_get_dest(ctx);
/* Note well: only capturing the prefix string if debug is enabled here;
@@ -1883,9 +1676,8 @@ static void rib_process_result(struct zebra_dplane_ctx *ctx)
if (IS_ZEBRA_DEBUG_DPLANE)
prefix2str(dest_pfx, dest_str, sizeof(dest_str));
- src_pfx = dplane_ctx_get_src(ctx);
- rn = srcdest_rnode_get(table, dplane_ctx_get_dest(ctx),
- src_pfx ? (struct prefix_ipv6 *)src_pfx : NULL);
+ /* Locate rn and re(s) from ctx */
+ rn = rib_find_rn_from_ctx(ctx);
if (rn == NULL) {
if (IS_ZEBRA_DEBUG_DPLANE) {
zlog_debug("Failed to process dplane results: no route for %u:%s",
@@ -1979,34 +1771,25 @@ static void rib_process_result(struct zebra_dplane_ctx *ctx)
UNSET_FLAG(old_re->status,
ROUTE_ENTRY_INSTALLED);
}
- /* Update zebra nexthop FIB flag for each
- * nexthop that was installed.
- */
- for (ALL_NEXTHOPS_PTR(dplane_ctx_get_ng(ctx),
- ctx_nexthop)) {
- if (!re)
- continue;
-
- for (ALL_NEXTHOPS(re->ng, nexthop)) {
- if (nexthop_same(ctx_nexthop, nexthop))
- break;
+ /* Update zebra route based on the results in
+ * the context struct.
+ */
+ if (re) {
+ fib_changed =
+ rib_update_re_from_ctx(re, rn, ctx);
+
+ if (!fib_changed) {
+ if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
+ zlog_debug("%u:%s no fib change for re",
+ dplane_ctx_get_vrf(
+ ctx),
+ dest_str);
}
- if (nexthop == NULL)
- continue;
-
- if (CHECK_FLAG(nexthop->flags,
- NEXTHOP_FLAG_RECURSIVE))
- continue;
-
- if (CHECK_FLAG(ctx_nexthop->flags,
- NEXTHOP_FLAG_FIB))
- SET_FLAG(nexthop->flags,
- NEXTHOP_FLAG_FIB);
- else
- UNSET_FLAG(nexthop->flags,
- NEXTHOP_FLAG_FIB);
+ /* Redistribute */
+ redistribute_update(dest_pfx, src_pfx,
+ re, NULL);
}
/*
@@ -2023,19 +1806,6 @@ static void rib_process_result(struct zebra_dplane_ctx *ctx)
if (zvrf)
zvrf->installs++;
- /* Redistribute */
- /*
- * TODO -- still calling the redist api using the
- * route_entries, and there's a corner-case here:
- * if there's no client for the 'new' route, a redist
- * deleting the 'old' route will be sent. But if the
- * 'old' context info was stale, 'old_re' will be
- * NULL here and that delete will not be sent.
- */
- if (re)
- redistribute_update(dest_pfx, src_pfx,
- re, old_re);
-
/* Notify route owner */
zsend_route_notify_owner_ctx(ctx, ZAPI_ROUTE_INSTALLED);
@@ -2110,6 +1880,179 @@ done:
dplane_ctx_fini(&ctx);
}
+/*
+ * Handle notification from async dataplane: the dataplane has detected
+ * some change to a route, and notifies zebra so that the control plane
+ * can reflect that change.
+ */
+static void rib_process_dplane_notify(struct zebra_dplane_ctx *ctx)
+{
+ struct route_node *rn = NULL;
+ struct route_entry *re = NULL;
+ struct nexthop *nexthop;
+ char dest_str[PREFIX_STRLEN] = "";
+ const struct prefix *dest_pfx, *src_pfx;
+ rib_dest_t *dest;
+ bool fib_changed = false;
+ bool debug_p = IS_ZEBRA_DEBUG_DPLANE | IS_ZEBRA_DEBUG_RIB;
+ int start_count, end_count;
+ dest_pfx = dplane_ctx_get_dest(ctx);
+
+ /* Note well: only capturing the prefix string if debug is enabled here;
+ * unconditional log messages will have to generate the string.
+ */
+ if (debug_p)
+ prefix2str(dest_pfx, dest_str, sizeof(dest_str));
+
+ /* Locate rn and re(s) from ctx */
+ rn = rib_find_rn_from_ctx(ctx);
+ if (rn == NULL) {
+ if (debug_p) {
+ zlog_debug("Failed to process dplane notification: no routes for %u:%s",
+ dplane_ctx_get_vrf(ctx), dest_str);
+ }
+ goto done;
+ }
+
+ dest = rib_dest_from_rnode(rn);
+ srcdest_rnode_prefixes(rn, &dest_pfx, &src_pfx);
+
+ if (debug_p)
+ zlog_debug("%u:%s Processing dplane notif ctx %p",
+ dplane_ctx_get_vrf(ctx), dest_str, ctx);
+
+ /*
+ * Take a pass through the routes, look for matches with the context
+ * info.
+ */
+ RNODE_FOREACH_RE(rn, re) {
+ if (rib_route_match_ctx(re, ctx, false /*!update*/))
+ break;
+ }
+
+ /* No match? Nothing we can do */
+ if (re == NULL) {
+ if (debug_p)
+ zlog_debug("%u:%s Unable to process dplane notification: no entry for type %s",
+ dplane_ctx_get_vrf(ctx), dest_str,
+ zebra_route_string(
+ dplane_ctx_get_type(ctx)));
+
+ goto done;
+ }
+
+ /* Is this a notification that ... matters? We only really care about
+ * the route that is currently selected for installation.
+ */
+ if (re != dest->selected_fib) {
+ /* TODO -- don't skip processing entirely? We might like to
+ * at least report on the event.
+ */
+ if (debug_p)
+ zlog_debug("%u:%s dplane notif, but type %s not selected_fib",
+ dplane_ctx_get_vrf(ctx), dest_str,
+ zebra_route_string(
+ dplane_ctx_get_type(ctx)));
+ goto done;
+ }
+
+ /* We'll want to determine whether the installation status of the
+ * route has changed: we'll check the status before processing,
+ * and then again if there's been a change.
+ */
+ start_count = 0;
+ for (ALL_NEXTHOPS_PTR(rib_active_nhg(re), nexthop)) {
+ if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_FIB))
+ start_count++;
+ }
+
+ /* Update zebra's nexthop FIB flags based on the context struct's
+ * nexthops.
+ */
+ fib_changed = rib_update_re_from_ctx(re, rn, ctx);
+
+ if (!fib_changed) {
+ if (debug_p)
+ zlog_debug("%u:%s No change from dplane notification",
+ dplane_ctx_get_vrf(ctx), dest_str);
+
+ goto done;
+ }
+
+ /*
+ * Perform follow-up work if the actual status of the prefix
+ * changed.
+ */
+
+ end_count = 0;
+ for (ALL_NEXTHOPS_PTR(rib_active_nhg(re), nexthop)) {
+ if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_FIB))
+ end_count++;
+ }
+
+ /* Various fib transitions: changed nexthops; from installed to
+ * not-installed; or not-installed to installed.
+ */
+ if (start_count > 0 && end_count > 0) {
+
+ /* Changed nexthops - update kernel/others */
+ dplane_route_notif_update(rn, re,
+ DPLANE_OP_ROUTE_UPDATE, ctx);
+
+ } else if (start_count == 0 && end_count > 0) {
+ if (debug_p)
+ zlog_debug("%u:%s installed transition from dplane notification",
+ dplane_ctx_get_vrf(ctx), dest_str);
+
+ /* We expect this to be the selected route, so we want
+ * to tell others about this transistion.
+ */
+ SET_FLAG(re->status, ROUTE_ENTRY_INSTALLED);
+
+ /* Changed nexthops - update kernel/others */
+ dplane_route_notif_update(rn, re, DPLANE_OP_ROUTE_INSTALL, ctx);
+
+ /* Redistribute, lsp, and nht update */
+ redistribute_update(dest_pfx, src_pfx, re, NULL);
+
+ zebra_rib_evaluate_rn_nexthops(
+ rn, zebra_router_get_next_sequence());
+
+ zebra_rib_evaluate_mpls(rn);
+
+ } else if (start_count > 0 && end_count == 0) {
+ if (debug_p)
+ zlog_debug("%u:%s un-installed transition from dplane notification",
+ dplane_ctx_get_vrf(ctx), dest_str);
+
+ /* Transition from _something_ installed to _nothing_
+ * installed.
+ */
+ /* We expect this to be the selected route, so we want
+ * to tell others about this transistion.
+ */
+ UNSET_FLAG(re->status, ROUTE_ENTRY_INSTALLED);
+
+ /* Changed nexthops - update kernel/others */
+ dplane_route_notif_update(rn, re, DPLANE_OP_ROUTE_DELETE, ctx);
+
+ /* Redistribute, lsp, and nht update */
+ redistribute_delete(dest_pfx, src_pfx, re);
+
+ zebra_rib_evaluate_rn_nexthops(
+ rn, zebra_router_get_next_sequence());
+
+ zebra_rib_evaluate_mpls(rn);
+ }
+
+done:
+ if (rn)
+ route_unlock_node(rn);
+
+ /* Return context to dataplane module */
+ dplane_ctx_fini(&ctx);
+}
+
/* Take a list of route_node structs and return 1, if there was a record
* picked from it and processed by rib_process(). Don't process more,
* than one RN record; operate only in the specified sub-queue.
@@ -2133,6 +2076,7 @@ static unsigned int process_subq(struct list *subq, uint8_t qindex)
if (IS_ZEBRA_DEBUG_RIB_DETAILED) {
char buf[SRCDEST2STR_BUFFER];
+
srcdest_rnode2str(rnode, buf, sizeof(buf));
zlog_debug("%u:%s: rn %p dequeued from sub-queue %u",
zvrf ? zvrf_id(zvrf) : 0, buf, rnode, qindex);
@@ -2468,6 +2412,8 @@ void rib_unlink(struct route_node *rn, struct route_entry *re)
dest->selected_fib = NULL;
nexthops_free(re->ng.nexthop);
+ nexthops_free(re->fib_ng.nexthop);
+
XFREE(MTYPE_RE, re);
}
@@ -2704,15 +2650,9 @@ int rib_add_multipath(afi_t afi, safi_t safi, struct prefix *p,
apply_mask_ipv6(src_p);
/* Set default distance by route type. */
- if (re->distance == 0) {
+ if (re->distance == 0)
re->distance = route_distance(re->type);
- /* iBGP distance is 200. */
- if (re->type == ZEBRA_ROUTE_BGP
- && CHECK_FLAG(re->flags, ZEBRA_FLAG_IBGP))
- re->distance = 200;
- }
-
/* Lookup route node.*/
rn = srcdest_rnode_get(table, p, src_p);
@@ -2855,7 +2795,11 @@ void rib_delete(afi_t afi, safi_t safi, vrf_id_t vrf_id, int type,
break;
}
for (ALL_NEXTHOPS(re->ng, rtnh))
- if (nexthop_same_no_recurse(rtnh, nh)) {
+ /*
+ * No guarantee all kernel send nh with labels
+ * on delete.
+ */
+ if (nexthop_same_no_labels(rtnh, nh)) {
same = re;
break;
}
@@ -3327,13 +3271,40 @@ static int rib_process_dplane_results(struct thread *thread)
case DPLANE_OP_ROUTE_INSTALL:
case DPLANE_OP_ROUTE_UPDATE:
case DPLANE_OP_ROUTE_DELETE:
- rib_process_result(ctx);
+ {
+ /* Bit of special case for route updates
+ * that were generated by async notifications:
+ * we don't want to continue processing these
+ * in the rib.
+ */
+ if (dplane_ctx_get_notif_provider(ctx) == 0)
+ rib_process_result(ctx);
+ else
+ dplane_ctx_fini(&ctx);
+ }
+ break;
+
+ case DPLANE_OP_ROUTE_NOTIFY:
+ rib_process_dplane_notify(ctx);
break;
case DPLANE_OP_LSP_INSTALL:
case DPLANE_OP_LSP_UPDATE:
case DPLANE_OP_LSP_DELETE:
- zebra_mpls_lsp_dplane_result(ctx);
+ {
+ /* Bit of special case for LSP updates
+ * that were generated by async notifications:
+ * we don't want to continue processing these.
+ */
+ if (dplane_ctx_get_notif_provider(ctx) == 0)
+ zebra_mpls_lsp_dplane_result(ctx);
+ else
+ dplane_ctx_fini(&ctx);
+ }
+ break;
+
+ case DPLANE_OP_LSP_NOTIFY:
+ zebra_mpls_process_dplane_notify(ctx);
break;
case DPLANE_OP_PW_INSTALL:
diff --git a/zebra/zebra_vty.c b/zebra/zebra_vty.c
index ece8f40dcf..257fb168d2 100644
--- a/zebra/zebra_vty.c
+++ b/zebra/zebra_vty.c
@@ -382,7 +382,8 @@ static void vty_show_ip_route_detail(struct vty *vty, struct route_node *rn,
}
static void vty_show_ip_route(struct vty *vty, struct route_node *rn,
- struct route_entry *re, json_object *json)
+ struct route_entry *re, json_object *json,
+ bool is_fib)
{
struct nexthop *nexthop;
int len = 0;
@@ -394,11 +395,20 @@ static void vty_show_ip_route(struct vty *vty, struct route_node *rn,
time_t uptime;
struct tm *tm;
rib_dest_t *dest = rib_dest_from_rnode(rn);
+ struct nexthop_group *nhg;
uptime = monotime(NULL);
uptime -= re->uptime;
tm = gmtime(&uptime);
+ /* If showing fib information, use the fib view of the
+ * nexthops.
+ */
+ if (is_fib)
+ nhg = rib_active_nhg(re);
+ else
+ nhg = &(re->ng);
+
if (json) {
json_route = json_object_new_object();
json_nexthops = json_object_new_array();
@@ -455,7 +465,7 @@ static void vty_show_ip_route(struct vty *vty, struct route_node *rn,
json_object_string_add(json_route, "uptime", buf);
- for (ALL_NEXTHOPS(re->ng, nexthop)) {
+ for (ALL_NEXTHOPS_PTR(nhg, nexthop)) {
json_nexthop = json_object_new_object();
json_object_int_add(json_nexthop, "flags",
@@ -625,8 +635,8 @@ static void vty_show_ip_route(struct vty *vty, struct route_node *rn,
}
/* Nexthop information. */
- for (ALL_NEXTHOPS(re->ng, nexthop)) {
- if (nexthop == re->ng.nexthop) {
+ for (ALL_NEXTHOPS_PTR(nhg, nexthop)) {
+ if (nexthop == nhg->nexthop) {
/* Prefix information. */
len = vty_out(vty, "%c", zebra_route_char(re->type));
if (re->instance)
@@ -779,7 +789,7 @@ static void vty_show_ip_route_detail_json(struct vty *vty,
*/
if (use_fib && re != dest->selected_fib)
continue;
- vty_show_ip_route(vty, rn, re, json_prefix);
+ vty_show_ip_route(vty, rn, re, json_prefix, use_fib);
}
prefix2str(&rn->p, buf, sizeof(buf));
@@ -865,7 +875,7 @@ static void do_show_route_helper(struct vty *vty, struct zebra_vrf *zvrf,
}
}
- vty_show_ip_route(vty, rn, re, json_prefix);
+ vty_show_ip_route(vty, rn, re, json_prefix, use_fib);
}
if (json_prefix) {
@@ -1552,7 +1562,7 @@ DEFUN (show_ipv6_mroute,
vty_out(vty, SHOW_ROUTE_V6_HEADER);
first = 0;
}
- vty_show_ip_route(vty, rn, re, NULL);
+ vty_show_ip_route(vty, rn, re, NULL, false);
}
return CMD_SUCCESS;
}
@@ -1584,7 +1594,7 @@ DEFUN (show_ipv6_mroute_vrf_all,
vty_out(vty, SHOW_ROUTE_V6_HEADER);
first = 0;
}
- vty_show_ip_route(vty, rn, re, NULL);
+ vty_show_ip_route(vty, rn, re, NULL, false);
}
}
return CMD_SUCCESS;