summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--alpine/APKBUILD.in13
-rw-r--r--bgpd/bgp_community.c16
-rw-r--r--bgpd/bgp_network.c53
-rw-r--r--doc/developer/workflow.rst9
-rw-r--r--doc/user/pbr.rst107
-rw-r--r--doc/user/snmp.rst34
-rw-r--r--isisd/isis_circuit.c3
-rw-r--r--lib/nexthop_group.c54
-rw-r--r--lib/nexthop_group.h3
-rw-r--r--lib/zclient.c76
-rw-r--r--lib/zclient.h9
-rw-r--r--ospfd/ospf_ext.c194
-rw-r--r--ospfd/ospf_ext.h4
-rw-r--r--ospfd/ospf_ri.c10
-rw-r--r--pbrd/pbr_nht.c54
-rw-r--r--pbrd/pbr_nht.h2
-rw-r--r--pbrd/pbr_vty.c159
-rw-r--r--pimd/pim_iface.c1
-rw-r--r--pimd/pim_iface.h1
-rw-r--r--pimd/pim_neighbor.c11
-rw-r--r--pimd/pim_pim.c1
-rw-r--r--pimd/pim_register.c29
-rw-r--r--pimd/pim_register.h1
-rw-r--r--pimd/pim_upstream.c21
-rw-r--r--pimd/pim_upstream.h1
-rw-r--r--sharpd/sharp_vty.c46
-rw-r--r--sharpd/sharp_zebra.c69
-rw-r--r--sharpd/sharp_zebra.h4
-rw-r--r--tests/topotests/all-protocol-startup/r1/pbrd.conf10
-rwxr-xr-xtests/topotests/all-protocol-startup/test_all_protocol_startup.py1
-rw-r--r--tests/topotests/lib/topogen.py2
-rw-r--r--tests/topotests/lib/topotest.py1
-rw-r--r--tests/topotests/ospf-sr-topo1/r2/ospfd.conf2
-rw-r--r--tests/topotests/pbr-topo1/r1/pbr-interface.json12
-rw-r--r--tests/topotests/pbr-topo1/r1/pbr-map.json60
-rw-r--r--tests/topotests/pbr-topo1/r1/pbr-nexthop-groups.json58
-rw-r--r--tests/topotests/pbr-topo1/r1/pbrd.conf33
-rw-r--r--tests/topotests/pbr-topo1/r1/zebra.conf11
-rwxr-xr-xtests/topotests/pbr-topo1/test_pbr_topo1.py180
-rw-r--r--zebra/rt_netlink.c10
-rw-r--r--zebra/zapi_msg.c61
-rw-r--r--zebra/zebra_dplane.c95
-rw-r--r--zebra/zebra_dplane.h16
-rw-r--r--zebra/zebra_mpls.c1051
-rw-r--r--zebra/zebra_mpls.h67
-rw-r--r--zebra/zebra_mpls_openbsd.c6
-rw-r--r--zebra/zebra_nhg.c14
-rw-r--r--zebra/zebra_nhg.h6
-rw-r--r--zebra/zebra_rib.c1
49 files changed, 2185 insertions, 497 deletions
diff --git a/alpine/APKBUILD.in b/alpine/APKBUILD.in
index d4657dfe55..c7d699bd48 100644
--- a/alpine/APKBUILD.in
+++ b/alpine/APKBUILD.in
@@ -6,7 +6,7 @@ pkgdesc="FRRouting is a fork of quagga"
url="https://frrouting.org/"
arch="x86_64"
license="GPL-2.0"
-depends="json-c c-ares ipsec-tools iproute2 python py-ipaddr bash"
+depends="json-c c-ares ipsec-tools iproute2 python3 py-ipaddr bash"
makedepends="ncurses-dev net-snmp-dev gawk texinfo perl
acct autoconf automake bash binutils bison bsd-compat-headers build-base
c-ares c-ares-dev ca-certificates cryptsetup-libs curl device-mapper-libs
@@ -16,9 +16,9 @@ makedepends="ncurses-dev net-snmp-dev gawk texinfo perl
libltdl libressl libssh2 libstdc++ libtool libuuid libyang-dev
linux-headers lzip lzo m4 make mkinitfs mpc1 mpfr4 mtools musl-dev
ncurses-libs ncurses-terminfo ncurses-terminfo-base patch pax-utils pcre
- perl pkgconf python2 python2-dev readline readline-dev sqlite-libs
- squashfs-tools sudo tar texinfo xorriso xz-libs py-pip rtrlib
- rtrlib-dev"
+ perl pkgconf python3 python3-dev readline readline-dev sqlite-libs
+ squashfs-tools sudo tar texinfo xorriso xz-libs py-pip rtrlib rtrlib-dev
+ py3-sphinx"
checkdepends="pytest py-setuptools"
install="$pkgname.pre-install $pkgname.pre-deinstall $pkgname.post-deinstall"
subpackages="$pkgname-dev $pkgname-doc $pkgname-dbg"
@@ -35,11 +35,6 @@ _user=frr
build() {
cd "$builddir"
- _localpythondir=$PWD/.python
- pip2 install --prefix $_localpythondir sphinx
- export PATH=${_localpythondir}/bin:$PATH
- export PYTHONPATH=${_localpythondir}/lib/python2.7/site-packages
-
./configure \
--prefix=/usr \
--sbindir=$_sbindir \
diff --git a/bgpd/bgp_community.c b/bgpd/bgp_community.c
index 0d60fbf479..b6cc2b839f 100644
--- a/bgpd/bgp_community.c
+++ b/bgpd/bgp_community.c
@@ -677,6 +677,14 @@ community_gettoken(const char *buf, enum community_token *token, uint32_t *val)
p += strlen("graceful-shutdown");
return p;
}
+ if (strncmp(p, "accept-own-nexthop",
+ strlen("accept-own-nexthop"))
+ == 0) {
+ *val = COMMUNITY_ACCEPT_OWN_NEXTHOP;
+ *token = community_token_accept_own_nexthop;
+ p += strlen("accept-own-nexthop");
+ return p;
+ }
if (strncmp(p, "accept-own", strlen("accept-own"))
== 0) {
*val = COMMUNITY_ACCEPT_OWN;
@@ -728,14 +736,6 @@ community_gettoken(const char *buf, enum community_token *token, uint32_t *val)
p += strlen("no-llgr");
return p;
}
- if (strncmp(p, "accept-own-nexthop",
- strlen("accept-own-nexthop"))
- == 0) {
- *val = COMMUNITY_ACCEPT_OWN_NEXTHOP;
- *token = community_token_accept_own_nexthop;
- p += strlen("accept-own-nexthop");
- return p;
- }
if (strncmp(p, "blackhole", strlen("blackhole"))
== 0) {
*val = COMMUNITY_BLACKHOLE;
diff --git a/bgpd/bgp_network.c b/bgpd/bgp_network.c
index 037aeec288..00cc1f67a1 100644
--- a/bgpd/bgp_network.c
+++ b/bgpd/bgp_network.c
@@ -360,10 +360,13 @@ static int bgp_accept(struct thread *thread)
sockunion_init(&su);
+ bgp = bgp_lookup_by_name(listener->name);
+
/* Register accept thread. */
accept_sock = THREAD_FD(thread);
if (accept_sock < 0) {
- flog_err_sys(EC_LIB_SOCKET, "accept_sock is nevative value %d",
+ flog_err_sys(EC_LIB_SOCKET,
+ "[Error] BGP accept socket fd is negative: %d",
accept_sock);
return -1;
}
@@ -374,10 +377,37 @@ static int bgp_accept(struct thread *thread)
/* Accept client connection. */
bgp_sock = sockunion_accept(accept_sock, &su);
+ int save_errno = errno;
if (bgp_sock < 0) {
- flog_err_sys(EC_LIB_SOCKET,
- "[Error] BGP socket accept failed (%s)",
- safe_strerror(errno));
+ if (save_errno == EINVAL) {
+ struct vrf *vrf =
+ bgp ? vrf_lookup_by_id(bgp->vrf_id) : NULL;
+
+ /*
+ * It appears that sometimes, when VRFs are deleted on
+ * the system, it takes a little while for us to get
+ * notified about that. In the meantime we endlessly
+ * loop on accept(), because the socket, having been
+ * bound to a now-deleted VRF device, is in some weird
+ * state which causes accept() to fail.
+ *
+ * To avoid this, if we see accept() fail with EINVAL,
+ * we cancel ourselves and trust that when the VRF
+ * deletion notification comes in the event handler for
+ * that will take care of cleaning us up.
+ */
+ flog_err_sys(
+ EC_LIB_SOCKET,
+ "[Error] accept() failed with error \"%s\" on BGP listener socket %d for BGP instance in VRF \"%s\"; refreshing socket",
+ safe_strerror(save_errno), accept_sock,
+ VRF_LOGNAME(vrf));
+ THREAD_OFF(listener->thread);
+ } else {
+ flog_err_sys(
+ EC_LIB_SOCKET,
+ "[Error] BGP socket accept failed (%s); retrying",
+ safe_strerror(save_errno));
+ }
return -1;
}
set_nonblocking(bgp_sock);
@@ -421,9 +451,9 @@ static int bgp_accept(struct thread *thread)
if (!peer1) {
if (bgp_debug_neighbor_events(NULL)) {
zlog_debug(
- "[Event] %s connection rejected - not configured"
- " and not valid for dynamic",
- inet_sutop(&su, buf));
+ "[Event] %s connection rejected(%s:%u:%s) - not configured and not valid for dynamic",
+ inet_sutop(&su, buf), bgp->name_pretty, bgp->as,
+ VRF_LOGNAME(vrf_lookup_by_id(bgp->vrf_id)));
}
close(bgp_sock);
return -1;
@@ -432,8 +462,9 @@ static int bgp_accept(struct thread *thread)
if (CHECK_FLAG(peer1->flags, PEER_FLAG_SHUTDOWN)) {
if (bgp_debug_neighbor_events(peer1))
zlog_debug(
- "[Event] connection from %s rejected due to admin shutdown",
- inet_sutop(&su, buf));
+ "[Event] connection from %s rejected(%s:%u:%s) due to admin shutdown",
+ inet_sutop(&su, buf), bgp->name_pretty, bgp->as,
+ VRF_LOGNAME(vrf_lookup_by_id(bgp->vrf_id)));
close(bgp_sock);
return -1;
}
@@ -887,7 +918,7 @@ void bgp_close_vrf_socket(struct bgp *bgp)
for (ALL_LIST_ELEMENTS(bm->listen_sockets, node, next, listener)) {
if (listener->bgp == bgp) {
- thread_cancel(listener->thread);
+ THREAD_OFF(listener->thread);
close(listener->fd);
listnode_delete(bm->listen_sockets, listener);
XFREE(MTYPE_BGP_LISTENER, listener->name);
@@ -909,7 +940,7 @@ void bgp_close(void)
for (ALL_LIST_ELEMENTS(bm->listen_sockets, node, next, listener)) {
if (listener->bgp)
continue;
- thread_cancel(listener->thread);
+ THREAD_OFF(listener->thread);
close(listener->fd);
listnode_delete(bm->listen_sockets, listener);
XFREE(MTYPE_BGP_LISTENER, listener->name);
diff --git a/doc/developer/workflow.rst b/doc/developer/workflow.rst
index 6885a41e0f..49fa4fe832 100644
--- a/doc/developer/workflow.rst
+++ b/doc/developer/workflow.rst
@@ -1021,6 +1021,15 @@ JSON Output
All JSON keys are to be camelCased, with no spaces.
+Use of const
+^^^^^^^^^^^^
+
+Please consider using ``const`` when possible: it's a useful hint to
+callers about the limits to side-effects from your apis, and it makes
+it possible to use your apis in paths that involve ``const``
+objects. If you encounter existing apis that *could* be ``const``,
+consider including changes in your own pull-request.
+
.. _documentation:
diff --git a/doc/user/pbr.rst b/doc/user/pbr.rst
index b9b28baced..149949e863 100644
--- a/doc/user/pbr.rst
+++ b/doc/user/pbr.rst
@@ -56,10 +56,36 @@ listing of ECMP nexthops used to forward packets for when a pbr-map is matched.
Showing Nexthop Group Information
---------------------------------
-.. clicmd:: show pbr nexthop-groups [NAME]
+.. clicmd:: show pbr nexthop-groups [NAME] [json]
Display information on a PBR nexthop-group. If ``NAME`` is omitted, all
- nexthop groups are shown.
+ nexthop groups are shown. Setting ``json`` will provide the same
+ information in an array of objects which obey the schema below:
+
+ +-----------+----------------------------+---------+
+ | Key | Description | Type |
+ +===========+============================+=========+
+ | id | Unique ID | Integer |
+ +-----------+----------------------------+---------+
+ | name | Name of this group | String |
+ +-----------+----------------------------+---------+
+ | valid | Is this group well-formed? | Boolean |
+ +-----------+----------------------------+---------+
+ | installed | ... and is it installed? | Boolean |
+ +-----------+----------------------------+---------+
+ | nexthops | Nexthops within this group | Array |
+ +-----------+----------------------------+---------+
+
+ Each element within ``nexthops`` describes a single target within this
+ group, and its structure is described by the JSON below:
+
+ +---------+------------------------------+---------+
+ | Key | Description | Type |
+ +=========+==============================+=========+
+ | nexthop | Name of this nexthop | String |
+ +---------+------------------------------+---------+
+ | valid | Is this nexthop well-formed? | Boolean |
+ +---------+------------------------------+---------+
.. _pbr-maps:
@@ -115,11 +141,68 @@ end destination.
Not supported with NETNS VRF backend.
-.. clicmd:: show pbr map [NAME] [detail]
+.. clicmd:: show pbr map [NAME] [detail|json]
Display pbr maps either all or by ``NAME``. If ``detail`` is set, it will
give information about the rules unique ID used internally and some extra
debugging information about install state for the nexthop/nexthop group.
+ Setting ``json`` will provide the same information in an array of objects
+ which obey the schema below:
+
+ +----------+--------------------------------+---------+
+ | Key | Description | Type |
+ +==========+================================+=========+
+ | name | Map name | String |
+ +----------+--------------------------------+---------+
+ | valid | Is the map well-formed? | Boolean |
+ +----------+--------------------------------+---------+
+ | policies | Rules to match packets against | Array |
+ +----------+--------------------------------+---------+
+
+ Each element of the ``policies`` array is composed of a handful of objects
+ representing the policies associated with this map. Each policy is
+ described as below (not all fields are required):
+
+ +-----------------+-------------------------------------------+---------+
+ | Key | Description | Type |
+ +=================+===========================================+=========+
+ | id | Unique ID | Integer |
+ +-----------------+-------------------------------------------+---------+
+ | sequenceNumber | Order of this policy within the map | Integer |
+ +-----------------+-------------------------------------------+---------+
+ | ruleNumber | Rule number to install into | Integer |
+ +-----------------+-------------------------------------------+---------+
+ | vrfUnchanged | Use interface's VRF | Boolean |
+ +-----------------+-------------------------------------------+---------+
+ | installed | Is this policy installed? | Boolean |
+ +-----------------+-------------------------------------------+---------+
+ | installedReason | Why (or why not?) | String |
+ +-----------------+-------------------------------------------+---------+
+ | matchSrc | Match packets with this source address | String |
+ +-----------------+-------------------------------------------+---------+
+ | matchDst | ... or with this destination address | String |
+ +-----------------+-------------------------------------------+---------+
+ | matchMark | ... or with this marker | Integer |
+ +-----------------+-------------------------------------------+---------+
+ | vrfName | Associated VRF (if relevant) | String |
+ +-----------------+-------------------------------------------+---------+
+ | nexthopGroup | This policy's nexthop group (if relevant) | Object |
+ +-----------------+-------------------------------------------+---------+
+
+ Finally, the ``nexthopGroup`` object above cotains information we know
+ about the configured nexthop for this policy:
+
+ +---------------------+--------------------------------------+---------+
+ | Key | Description | Type |
+ +=====================+======================================+=========+
+ | tableId | Nexthop table ID | Integer |
+ +---------------------+--------------------------------------+---------+
+ | name | Name of the nexthop group | String |
+ +---------------------+--------------------------------------+---------+
+ | installed | Is this nexthop group installed? | Boolean |
+ +---------------------+--------------------------------------+---------+
+ | installedInternally | Do we think this group is installed? | Integer |
+ +---------------------+--------------------------------------+---------+
.. _pbr-policy:
@@ -141,6 +224,24 @@ causes the policy to be installed into the kernel.
even if one is on the master. Each must have the PBR map explicitly added
to the interface.
+.. clicmd:: show pbr interface [NAME] [json]
+
+ Enumerates all interfaces which ``pbrd`` is keeping track of. Passing
+ ``json`` will return an array of interfaces; each returned interface will
+ adhere to the JSON schema below:
+
+ +--------+----------------------------+---------+
+ | Key | Description | Type |
+ +========+============================+=========+
+ | name | Interface name | String |
+ +--------+----------------------------+---------+
+ | index | Device Index | Integer |
+ +--------+----------------------------+---------+
+ | policy | PBR map for this interface | String |
+ +--------+----------------------------+---------+
+ | valid | Is the map well-formed? | Boolean |
+ +--------+----------------------------+---------+
+
.. _pbr-details:
PBR Details
diff --git a/doc/user/snmp.rst b/doc/user/snmp.rst
index 5579969c0e..d214926245 100644
--- a/doc/user/snmp.rst
+++ b/doc/user/snmp.rst
@@ -23,6 +23,40 @@ the latest version of `net-snmp` which was formerly known as `ucd-snmp`. It is
free and open software and available at `http://www.net-snmp.org/ <http://www.net-snmp.org/>`_
and as binary package for most Linux distributions.
+.. _net-smtp-configuration:
+
+NET-SNMP configuration
+======================
+
+Routers with a heavy amount of routes (e.g. BGP full table) might experience
+problems with a hanging vtysh from time to time, 100% CPU on the snmpd or
+even crashes of the frr daemon(s) due to stalls within AgentX. Once snmp
+agents connects they start receiving a heavy amount of SNMP data (all the
+routes) which cannot be handled quick enough. It's recommended (by several
+vendors as well) to exclude these OID's unless you really need them, which
+can be achieved by amending the default view from SNMP
+
+:file:`/etc/snmp/snmpd.conf`:
+
+::
+ # This is the default view
+ view all included .1 80
+ # Remove ipRouteTable from view
+ view all excluded .1.3.6.1.2.1.4.21
+ # Remove ipNetToMediaTable from view
+ view all excluded .1.3.6.1.2.1.4.22
+ # Remove ipNetToPhysicalPhysAddress from view
+ view all excluded .1.3.6.1.2.1.4.35
+ # Remove ipCidrRouteTable from view
+ view all excluded .1.3.6.1.2.1.4.24
+ # Optionally protect SNMP private/secret values
+ view all excluded .1.3.6.1.6.3.15
+ view all excluded .1.3.6.1.6.3.16
+ view all excluded .1.3.6.1.6.3.18
+ # Optionally allow SNMP public info (sysName, location, etc)
+ view system included .iso.org.dod.internet.mgmt.mib-2.system
+
+
.. _agentx-configuration:
AgentX configuration
diff --git a/isisd/isis_circuit.c b/isisd/isis_circuit.c
index 1f76a3b2bb..003be8d682 100644
--- a/isisd/isis_circuit.c
+++ b/isisd/isis_circuit.c
@@ -267,7 +267,8 @@ void isis_circuit_add_addr(struct isis_circuit *circuit,
listnode_add(circuit->ip_addrs, ipv4);
/* Update Local IP address parameter if MPLS TE is enable */
- if (circuit->ext && IS_MPLS_TE(circuit->ext)) {
+ if (circuit->ext && circuit->area
+ && IS_MPLS_TE(circuit->area->mta)) {
circuit->ext->local_addr.s_addr = ipv4->prefix.s_addr;
SET_SUBTLV(circuit->ext, EXT_LOCAL_ADDR);
}
diff --git a/lib/nexthop_group.c b/lib/nexthop_group.c
index c23c57d2e1..c62096a126 100644
--- a/lib/nexthop_group.c
+++ b/lib/nexthop_group.c
@@ -996,6 +996,60 @@ void nexthop_group_write_nexthop(struct vty *vty, struct nexthop *nh)
vty_out(vty, "\n");
}
+void nexthop_group_json_nexthop(json_object *j, struct nexthop *nh)
+{
+ char buf[100];
+ struct vrf *vrf;
+
+ switch (nh->type) {
+ case NEXTHOP_TYPE_IFINDEX:
+ json_object_string_add(j, "nexthop",
+ ifindex2ifname(nh->ifindex, nh->vrf_id));
+ break;
+ case NEXTHOP_TYPE_IPV4:
+ json_object_string_add(j, "nexthop", inet_ntoa(nh->gate.ipv4));
+ break;
+ case NEXTHOP_TYPE_IPV4_IFINDEX:
+ json_object_string_add(j, "nexthop", inet_ntoa(nh->gate.ipv4));
+ json_object_string_add(j, "vrfId",
+ ifindex2ifname(nh->ifindex, nh->vrf_id));
+ break;
+ case NEXTHOP_TYPE_IPV6:
+ json_object_string_add(
+ j, "nexthop",
+ inet_ntop(AF_INET6, &nh->gate.ipv6, buf, sizeof(buf)));
+ break;
+ case NEXTHOP_TYPE_IPV6_IFINDEX:
+ json_object_string_add(
+ j, "nexthop",
+ inet_ntop(AF_INET6, &nh->gate.ipv6, buf, sizeof(buf)));
+ json_object_string_add(j, "vrfId",
+ ifindex2ifname(nh->ifindex, nh->vrf_id));
+ break;
+ case NEXTHOP_TYPE_BLACKHOLE:
+ break;
+ }
+
+ if (nh->vrf_id != VRF_DEFAULT) {
+ vrf = vrf_lookup_by_id(nh->vrf_id);
+ json_object_string_add(j, "targetVrf", vrf->name);
+ }
+
+ if (nh->nh_label && nh->nh_label->num_labels > 0) {
+ char buf[200];
+
+ mpls_label2str(nh->nh_label->num_labels, nh->nh_label->label,
+ buf, sizeof(buf), 0);
+ json_object_string_add(j, "label", buf);
+ }
+
+ if (nh->weight)
+ json_object_int_add(j, "weight", nh->weight);
+
+ if (CHECK_FLAG(nh->flags, NEXTHOP_FLAG_HAS_BACKUP))
+ json_object_int_add(j, "backupIdx", nh->backup_idx);
+}
+
static void nexthop_group_write_nexthop_internal(struct vty *vty,
struct nexthop_hold *nh)
{
diff --git a/lib/nexthop_group.h b/lib/nexthop_group.h
index 3a5a1299c1..9888dad982 100644
--- a/lib/nexthop_group.h
+++ b/lib/nexthop_group.h
@@ -22,6 +22,7 @@
#define __NEXTHOP_GROUP__
#include <vty.h>
+#include "json.h"
#ifdef __cplusplus
extern "C" {
@@ -136,6 +137,8 @@ extern struct nexthop_group_cmd *nhgc_find(const char *name);
extern void nexthop_group_write_nexthop(struct vty *vty, struct nexthop *nh);
+extern void nexthop_group_json_nexthop(json_object *j, struct nexthop *nh);
+
/* Return the number of nexthops in this nhg */
extern uint8_t nexthop_group_nexthop_num(const struct nexthop_group *nhg);
extern uint8_t
diff --git a/lib/zclient.c b/lib/zclient.c
index be2c4e54a0..02532e7069 100644
--- a/lib/zclient.c
+++ b/lib/zclient.c
@@ -1543,6 +1543,39 @@ int zapi_backup_nexthop_from_nexthop(struct zapi_nexthop *znh,
}
/*
+ * Format some info about a zapi nexthop, for debug or logging.
+ */
+const char *zapi_nexthop2str(const struct zapi_nexthop *znh, char *buf,
+ int bufsize)
+{
+ char tmp[INET6_ADDRSTRLEN];
+
+ switch (znh->type) {
+ case NEXTHOP_TYPE_IFINDEX:
+ snprintf(buf, bufsize, "if %u", znh->ifindex);
+ break;
+ case NEXTHOP_TYPE_IPV4:
+ case NEXTHOP_TYPE_IPV4_IFINDEX:
+ inet_ntop(AF_INET, &znh->gate.ipv4, tmp, sizeof(tmp));
+ snprintf(buf, bufsize, "%s if %u", tmp, znh->ifindex);
+ break;
+ case NEXTHOP_TYPE_IPV6:
+ case NEXTHOP_TYPE_IPV6_IFINDEX:
+ inet_ntop(AF_INET6, &znh->gate.ipv6, tmp, sizeof(tmp));
+ snprintf(buf, bufsize, "%s if %u", tmp, znh->ifindex);
+ break;
+ case NEXTHOP_TYPE_BLACKHOLE:
+ snprintf(buf, bufsize, "blackhole");
+ break;
+ default:
+ snprintf(buf, bufsize, "unknown");
+ break;
+ }
+
+ return buf;
+}
+
+/*
* Decode the nexthop-tracking update message
*/
bool zapi_nexthop_update_decode(struct stream *s, struct zapi_route *nhr)
@@ -2811,6 +2844,27 @@ int zapi_labels_encode(struct stream *s, int cmd, struct zapi_labels *zl)
return -1;
}
+ if (CHECK_FLAG(zl->message, ZAPI_LABELS_HAS_BACKUPS)) {
+
+ if (zl->backup_nexthop_num > MULTIPATH_NUM) {
+ flog_err(
+ EC_LIB_ZAPI_ENCODE,
+ "%s: label %u: can't encode %u nexthops (maximum is %u)",
+ __func__, zl->local_label, zl->nexthop_num,
+ MULTIPATH_NUM);
+ return -1;
+ }
+ stream_putw(s, zl->backup_nexthop_num);
+
+ for (int i = 0; i < zl->backup_nexthop_num; i++) {
+ znh = &zl->backup_nexthops[i];
+
+ if (zapi_nexthop_encode(s, znh, 0) < 0)
+ return -1;
+ }
+
+ }
+
/* Put length at the first point of the stream. */
stream_putw_at(s, 0, stream_get_endp(s));
@@ -2885,6 +2939,28 @@ int zapi_labels_decode(struct stream *s, struct zapi_labels *zl)
return -1;
}
+ if (CHECK_FLAG(zl->message, ZAPI_LABELS_HAS_BACKUPS)) {
+ STREAM_GETW(s, zl->backup_nexthop_num);
+
+ if (zl->backup_nexthop_num > MULTIPATH_NUM) {
+ flog_warn(
+ EC_LIB_ZAPI_ENCODE,
+ "%s: Prefix %pFX has %d backup nexthops, but we can only use the first %d",
+ __func__, &zl->route.prefix,
+ zl->backup_nexthop_num, MULTIPATH_NUM);
+ }
+
+ zl->backup_nexthop_num = MIN(MULTIPATH_NUM,
+ zl->backup_nexthop_num);
+
+ for (int i = 0; i < zl->backup_nexthop_num; i++) {
+ znh = &zl->backup_nexthops[i];
+
+ if (zapi_nexthop_decode(s, znh, 0) < 0)
+ return -1;
+ }
+ }
+
return 0;
stream_failure:
return -1;
diff --git a/lib/zclient.h b/lib/zclient.h
index e3e929c8d6..feaabc0549 100644
--- a/lib/zclient.h
+++ b/lib/zclient.h
@@ -484,7 +484,8 @@ struct zapi_route {
struct zapi_labels {
uint8_t message;
-#define ZAPI_LABELS_FTN 0x01
+#define ZAPI_LABELS_FTN 0x01
+#define ZAPI_LABELS_HAS_BACKUPS 0x02
enum lsp_types_t type;
mpls_label_t local_label;
struct {
@@ -495,6 +496,10 @@ struct zapi_labels {
uint16_t nexthop_num;
struct zapi_nexthop nexthops[MULTIPATH_NUM];
+
+ /* Backup nexthops, if present */
+ uint16_t backup_nexthop_num;
+ struct zapi_nexthop backup_nexthops[MULTIPATH_NUM];
};
struct zapi_pw {
@@ -800,6 +805,8 @@ int zapi_backup_nexthop_from_nexthop(struct zapi_nexthop *znh,
const struct nexthop *nh);
extern bool zapi_nexthop_update_decode(struct stream *s,
struct zapi_route *nhr);
+const char *zapi_nexthop2str(const struct zapi_nexthop *znh, char *buf,
+ int bufsize);
/* Decode the zebra error message */
extern bool zapi_error_decode(struct stream *s, enum zebra_error_types *error);
diff --git a/ospfd/ospf_ext.c b/ospfd/ospf_ext.c
index 47883d5f39..1543e2015d 100644
--- a/ospfd/ospf_ext.c
+++ b/ospfd/ospf_ext.c
@@ -80,7 +80,6 @@ static struct ospf_ext_lp OspfEXT;
*/
/* Extended Prefix Opaque LSA related callback functions */
-static void ospf_ext_pref_ism_change(struct ospf_interface *oi, int old_status);
static void ospf_ext_pref_show_info(struct vty *vty, struct ospf_lsa *lsa);
static int ospf_ext_pref_lsa_originate(void *arg);
static struct ospf_lsa *ospf_ext_pref_lsa_refresh(struct ospf_lsa *lsa);
@@ -89,7 +88,7 @@ static void ospf_ext_pref_lsa_schedule(struct ext_itf *exti,
/* Extended Link Opaque LSA related callback functions */
static int ospf_ext_link_new_if(struct interface *ifp);
static int ospf_ext_link_del_if(struct interface *ifp);
-static void ospf_ext_link_ism_change(struct ospf_interface *oi, int old_status);
+static void ospf_ext_ism_change(struct ospf_interface *oi, int old_status);
static void ospf_ext_link_nsm_change(struct ospf_neighbor *nbr, int old_status);
static void ospf_ext_link_show_info(struct vty *vty, struct ospf_lsa *lsa);
static int ospf_ext_link_lsa_originate(void *arg);
@@ -125,7 +124,7 @@ int ospf_ext_init(void)
OSPF_OPAQUE_AREA_LSA, OPAQUE_TYPE_EXTENDED_LINK_LSA,
ospf_ext_link_new_if, /* new if */
ospf_ext_link_del_if, /* del if */
- ospf_ext_link_ism_change, /* ism change */
+ ospf_ext_ism_change, /* ism change */
ospf_ext_link_nsm_change, /* nsm change */
NULL, /* Write router config. */
NULL, /* Write interface conf. */
@@ -148,7 +147,7 @@ int ospf_ext_init(void)
OspfEXT.scope, OPAQUE_TYPE_EXTENDED_PREFIX_LSA,
NULL, /* new if handle by link */
NULL, /* del if handle by link */
- ospf_ext_pref_ism_change, /* ism change */
+ NULL, /* ism change */
NULL, /* nsm change */
ospf_sr_config_write_router, /* Write router config. */
NULL, /* Write interface conf. */
@@ -200,7 +199,15 @@ void ospf_ext_term(void)
*/
void ospf_ext_finish(void)
{
- // list_delete_all_node(OspfEXT.iflist);
+
+ struct listnode *node;
+ struct ext_itf *exti;
+
+ /* Flush Router Info LSA */
+ for (ALL_LIST_ELEMENTS_RO(OspfEXT.iflist, node, exti))
+ if (CHECK_FLAG(exti->flags, EXT_LPFLG_LSA_ENGAGED))
+ ospf_ext_lsa_schedule(exti, FLUSH_THIS_LSA);
+
OspfEXT.enabled = false;
}
@@ -471,11 +478,15 @@ uint32_t ospf_ext_schedule_prefix_index(struct interface *ifp, uint32_t index,
set_prefix_sid(exti, SR_ALGORITHM_SPF, index, SID_INDEX, flags);
/* Try to Schedule LSA */
- SET_FLAG(exti->flags, EXT_LPFLG_LSA_ACTIVE);
- if (CHECK_FLAG(exti->flags, EXT_LPFLG_LSA_ENGAGED))
- ospf_ext_pref_lsa_schedule(exti, REFRESH_THIS_LSA);
- else
- ospf_ext_pref_lsa_schedule(exti, REORIGINATE_THIS_LSA);
+ // SET_FLAG(exti->flags, EXT_LPFLG_LSA_ACTIVE);
+ if (CHECK_FLAG(exti->flags, EXT_LPFLG_LSA_ACTIVE)) {
+ if (CHECK_FLAG(exti->flags, EXT_LPFLG_LSA_ENGAGED))
+ ospf_ext_pref_lsa_schedule(exti,
+ REFRESH_THIS_LSA);
+ else
+ ospf_ext_pref_lsa_schedule(
+ exti, REORIGINATE_THIS_LSA);
+ }
} else {
if (IS_DEBUG_OSPF_SR)
zlog_debug("EXT (%s): Remove prefix for interface %s",
@@ -483,8 +494,7 @@ uint32_t ospf_ext_schedule_prefix_index(struct interface *ifp, uint32_t index,
if (CHECK_FLAG(exti->flags, EXT_LPFLG_LSA_ENGAGED)) {
ospf_ext_pref_lsa_schedule(exti, FLUSH_THIS_LSA);
- UNSET_FLAG(exti->flags, EXT_LPFLG_LSA_ENGAGED);
- UNSET_FLAG(exti->flags, EXT_LPFLG_LSA_ACTIVE);
+ exti->flags = EXT_LPFLG_LSA_INACTIVE;
}
}
@@ -509,18 +519,26 @@ void ospf_ext_update_sr(bool enable)
if (enable) {
OspfEXT.enabled = true;
+
/* Refresh LSAs if already engaged or originate */
- for (ALL_LIST_ELEMENTS_RO(OspfEXT.iflist, node, exti))
+ for (ALL_LIST_ELEMENTS_RO(OspfEXT.iflist, node, exti)) {
+ if (!CHECK_FLAG(exti->flags, EXT_LPFLG_LSA_ACTIVE))
+ continue;
+
if (CHECK_FLAG(exti->flags, EXT_LPFLG_LSA_ENGAGED))
ospf_ext_lsa_schedule(exti, REFRESH_THIS_LSA);
else
ospf_ext_lsa_schedule(exti,
REORIGINATE_THIS_LSA);
+ }
} else {
/* Start by Flushing engaged LSAs */
- for (ALL_LIST_ELEMENTS_RO(OspfEXT.iflist, node, exti))
+ for (ALL_LIST_ELEMENTS_RO(OspfEXT.iflist, node, exti)) {
if (CHECK_FLAG(exti->flags, EXT_LPFLG_LSA_ENGAGED))
ospf_ext_lsa_schedule(exti, FLUSH_THIS_LSA);
+ exti->flags = EXT_LPFLG_LSA_INACTIVE;
+ }
+
/* And then disable Extended Link/Prefix */
OspfEXT.enabled = false;
}
@@ -580,39 +598,10 @@ static int ospf_ext_link_del_if(struct interface *ifp)
}
/*
- * Determine if an Interface belongs to an Extended Link Adjacency or LAN Adj.
- * type and allocate new instance value accordingly
- */
-static void ospf_ext_link_ism_change(struct ospf_interface *oi, int old_status)
-{
- struct ext_itf *exti;
-
- /* Get interface information for Segment Routing */
- exti = lookup_ext_by_ifp(oi->ifp);
- if (exti == NULL)
- return;
-
- /* Determine if interface is related to Adjacency or LAN Adj. SID */
- if (oi->type != OSPF_IFTYPE_LOOPBACK) {
- if (oi->state == ISM_DR)
- exti->stype = LAN_ADJ_SID;
- else
- exti->stype = ADJ_SID;
-
- exti->instance = get_ext_link_instance_value();
- exti->type = OPAQUE_TYPE_EXTENDED_LINK_LSA;
-
- zlog_debug("EXT (%s): Set %s SID to interface %s ", __func__,
- exti->stype == ADJ_SID ? "Adj." : "LAN Adj.",
- oi->ifp->name);
- }
-}
-
-/*
- * Determine if an Interface belongs to an Extended Prefix and
- * allocate new instance value accordingly
+ * Determine if an Interface belongs to an Extended Link Adjacency or
+ * Extended Prefix SID type and allocate new instance value accordingly
*/
-static void ospf_ext_pref_ism_change(struct ospf_interface *oi, int old_status)
+static void ospf_ext_ism_change(struct ospf_interface *oi, int old_status)
{
struct ext_itf *exti;
@@ -625,18 +614,45 @@ static void ospf_ext_pref_ism_change(struct ospf_interface *oi, int old_status)
return;
}
- /* Determine if interface is related to a Node SID */
+ /* Reset Extended information if ospf interface goes Down */
+ if (oi->state == ISM_Down) {
+ exti->area = NULL;
+ exti->flags = EXT_LPFLG_LSA_INACTIVE;
+ return;
+ }
+
+ /* Determine if interface is related to a Prefix or an Adjacency SID */
if (oi->type == OSPF_IFTYPE_LOOPBACK) {
exti->stype = PREF_SID;
- exti->instance = get_ext_pref_instance_value();
exti->type = OPAQUE_TYPE_EXTENDED_PREFIX_LSA;
+ exti->flags = EXT_LPFLG_LSA_ACTIVE;
+ exti->instance = get_ext_pref_instance_value();
+ exti->area = oi->area;
- zlog_debug("EXT (%s): Set Node SID to interface %s ", __func__,
- oi->ifp->name);
+ zlog_debug("EXT (%s): Set Prefix SID to interface %s ",
+ __func__, oi->ifp->name);
/* Complete SRDB if the interface belongs to a Prefix */
if (OspfEXT.enabled)
ospf_sr_update_prefix(oi->ifp, oi->address);
+ } else {
+ /* Determine if interface is related to Adj. or LAN Adj. SID */
+ if (oi->state == ISM_DR)
+ exti->stype = LAN_ADJ_SID;
+ else
+ exti->stype = ADJ_SID;
+
+ exti->type = OPAQUE_TYPE_EXTENDED_LINK_LSA;
+ exti->instance = get_ext_link_instance_value();
+ exti->area = oi->area;
+
+ /*
+ * Note: Adjacency SID information are completed when ospf
+ * adjacency become up see ospf_ext_link_nsm_change()
+ */
+ zlog_debug("EXT (%s): Set %sAdjacency SID for interface %s ",
+ __func__, exti->stype == ADJ_SID ? "" : "LAN-",
+ oi->ifp->name);
}
}
@@ -663,17 +679,6 @@ static void ospf_ext_link_nsm_change(struct ospf_neighbor *nbr, int old_status)
return;
}
- if (oi->area == NULL || oi->area->ospf == NULL) {
- flog_warn(EC_OSPF_EXT_LSA_UNEXPECTED,
- "EXT (%s): Cannot refer to OSPF from OI(%s)",
- __func__, IF_NAME(oi));
- return;
- }
-
- /* Keep Area information in combination with SR info. */
- exti->area = oi->area;
- OspfEXT.area = oi->area;
-
/* Process only Adjacency/LAN SID */
if (exti->stype == PREF_SID)
return;
@@ -731,19 +736,17 @@ static void ospf_ext_link_nsm_change(struct ospf_neighbor *nbr, int old_status)
break;
default:
- if (CHECK_FLAG(exti->flags, EXT_LPFLG_LSA_ENGAGED)) {
+ if (CHECK_FLAG(exti->flags, EXT_LPFLG_LSA_ENGAGED))
ospf_ext_link_lsa_schedule(exti, FLUSH_THIS_LSA);
- UNSET_FLAG(exti->flags, EXT_LPFLG_LSA_ENGAGED);
- UNSET_FLAG(exti->flags, EXT_LPFLG_LSA_ACTIVE);
- }
+ exti->flags = EXT_LPFLG_LSA_INACTIVE;
return;
}
if (IS_DEBUG_OSPF_SR)
- zlog_debug("EXT (%s): Complete %s SID to interface %s ",
- __func__,
- exti->stype == ADJ_SID ? "Adj." : "LAN Adj.",
- oi->ifp->name);
+ zlog_debug(
+ "EXT (%s): Complete %sAdjacency SID for interface %s ",
+ __func__, exti->stype == ADJ_SID ? "" : "LAN-",
+ oi->ifp->name);
/* flood this links params if everything is ok */
SET_FLAG(exti->flags, EXT_LPFLG_LSA_ACTIVE);
@@ -1244,6 +1247,10 @@ static int ospf_ext_link_lsa_originate(void *arg)
|| (!IPV4_ADDR_SAME(&exti->area->area_id, &area->area_id)))
continue;
+ /* Skip Extended Link which are not Active */
+ if (!CHECK_FLAG(exti->flags, EXT_LPFLG_LSA_ACTIVE))
+ continue;
+
/* Check if LSA not already engaged */
if (CHECK_FLAG(exti->flags, EXT_LPFLG_LSA_ENGAGED)) {
if (CHECK_FLAG(exti->flags,
@@ -1467,19 +1474,23 @@ static void ospf_ext_pref_lsa_schedule(struct ext_itf *exti,
opcode == FLUSH_THIS_LSA ? "Flush" : "",
exti->ifp ? exti->ifp->name : "-");
- /* Set LSA header information */
+ /* Verify Area */
if (exti->area == NULL) {
- flog_warn(
- EC_OSPF_EXT_LSA_UNEXPECTED,
- "EXT (%s): Flooding is Area scope but area is not yet set",
- __func__);
- if (OspfEXT.area == NULL) {
- top = ospf_lookup_by_vrf_id(VRF_DEFAULT);
- OspfEXT.area = ospf_area_lookup_by_area_id(
- top, OspfEXT.area_id);
+ if (IS_DEBUG_OSPF(lsa, LSA_GENERATE))
+ zlog_debug(
+ "EXT (%s): Area is not yet set. Try to use Backbone Area",
+ __func__);
+
+ top = ospf_lookup_by_vrf_id(VRF_DEFAULT);
+ struct in_addr backbone = {.s_addr = INADDR_ANY};
+ exti->area = ospf_area_lookup_by_area_id(top, backbone);
+ if (exti->area == NULL) {
+ flog_warn(EC_OSPF_EXT_LSA_UNEXPECTED,
+ "EXT (%s): Unable to set Area", __func__);
+ return;
}
- exti->area = OspfEXT.area;
}
+ /* Set LSA header information */
lsa.area = exti->area;
lsa.data = &lsah;
lsah.type = OSPF_OPAQUE_AREA_LSA;
@@ -1528,19 +1539,23 @@ static void ospf_ext_link_lsa_schedule(struct ext_itf *exti,
opcode == FLUSH_THIS_LSA ? "Flush" : "",
exti->ifp ? exti->ifp->name : "-");
- /* Set LSA header information */
+ /* Verify Area */
if (exti->area == NULL) {
- flog_warn(
- EC_OSPF_EXT_LSA_UNEXPECTED,
- "EXT (%s): Flooding is Area scope but area is not yet set",
- __func__);
- if (OspfEXT.area == NULL) {
- top = ospf_lookup_by_vrf_id(VRF_DEFAULT);
- OspfEXT.area = ospf_area_lookup_by_area_id(
- top, OspfEXT.area_id);
+ if (IS_DEBUG_OSPF(lsa, LSA_GENERATE))
+ zlog_debug(
+ "EXT (%s): Area is not yet set. Try to use Backbone Area",
+ __func__);
+
+ top = ospf_lookup_by_vrf_id(VRF_DEFAULT);
+ struct in_addr backbone = {.s_addr = INADDR_ANY};
+ exti->area = ospf_area_lookup_by_area_id(top, backbone);
+ if (exti->area == NULL) {
+ flog_warn(EC_OSPF_EXT_LSA_UNEXPECTED,
+ "EXT (%s): Unable to set Area", __func__);
+ return;
}
- exti->area = OspfEXT.area;
}
+ /* Set LSA header information */
lsa.area = exti->area;
lsa.data = &lsah;
lsah.type = OSPF_OPAQUE_AREA_LSA;
@@ -1557,7 +1572,6 @@ static void ospf_ext_link_lsa_schedule(struct ext_itf *exti,
ospf_opaque_lsa_refresh_schedule(&lsa);
break;
case FLUSH_THIS_LSA:
- UNSET_FLAG(exti->flags, EXT_LPFLG_LSA_ENGAGED);
ospf_opaque_lsa_flush_schedule(&lsa);
break;
}
diff --git a/ospfd/ospf_ext.h b/ospfd/ospf_ext.h
index c3f9ae94dc..0071584e26 100644
--- a/ospfd/ospf_ext.h
+++ b/ospfd/ospf_ext.h
@@ -151,10 +151,6 @@ struct ospf_ext_lp {
*/
uint8_t scope;
- /* area pointer if flooding is Type 10 Null if flooding is AS scope */
- struct ospf_area *area;
- struct in_addr area_id;
-
/* List of interface with Segment Routing enable */
struct list *iflist;
};
diff --git a/ospfd/ospf_ri.c b/ospfd/ospf_ri.c
index c3d53ad5ed..ececed0643 100644
--- a/ospfd/ospf_ri.c
+++ b/ospfd/ospf_ri.c
@@ -178,6 +178,14 @@ void ospf_router_info_term(void)
void ospf_router_info_finish(void)
{
+ struct listnode *node, *nnode;
+ struct ospf_ri_area_info *ai;
+
+ /* Flush Router Info LSA */
+ for (ALL_LIST_ELEMENTS(OspfRI.area_info, node, nnode, ai))
+ if (CHECK_FLAG(ai->flags, RIFLG_LSA_ENGAGED))
+ ospf_router_info_lsa_schedule(ai, FLUSH_THIS_LSA);
+
list_delete_all_node(OspfRI.pce_info.pce_domain);
list_delete_all_node(OspfRI.pce_info.pce_neighbor);
@@ -510,6 +518,8 @@ static void initialize_params(struct ospf_router_info *ori)
/* Try to get available Area's context from ospf at this step.
* Do it latter if not available */
if (OspfRI.scope == OSPF_OPAQUE_AREA_LSA) {
+ if (!list_isempty(OspfRI.area_info))
+ list_delete_all_node(OspfRI.area_info);
for (ALL_LIST_ELEMENTS(top->areas, node, nnode, area)) {
zlog_debug("RI (%s): Add area %s to Router Information",
__func__, inet_ntoa(area->area_id));
diff --git a/pbrd/pbr_nht.c b/pbrd/pbr_nht.c
index 2f3591ac8d..98be958fce 100644
--- a/pbrd/pbr_nht.c
+++ b/pbrd/pbr_nht.c
@@ -1030,8 +1030,22 @@ static void pbr_nht_show_nhg_nexthops(struct hash_bucket *b, void *data)
nexthop_group_write_nexthop(vty, pnhc->nexthop);
}
+static void pbr_nht_json_nhg_nexthops(struct hash_bucket *b, void *data)
+{
+ struct pbr_nexthop_cache *pnhc = b->data;
+ json_object *all_hops = data;
+ json_object *this_hop;
+
+ this_hop = json_object_new_object();
+ nexthop_group_json_nexthop(this_hop, pnhc->nexthop);
+ json_object_boolean_add(this_hop, "valid", pnhc->valid);
+
+ json_object_array_add(all_hops, this_hop);
+}
+
struct pbr_nht_show {
struct vty *vty;
+ json_object *json;
const char *name;
};
@@ -1051,6 +1065,36 @@ static void pbr_nht_show_nhg(struct hash_bucket *b, void *data)
hash_iterate(pnhgc->nhh, pbr_nht_show_nhg_nexthops, vty);
}
+static void pbr_nht_json_nhg(struct hash_bucket *b, void *data)
+{
+ struct pbr_nexthop_group_cache *pnhgc = b->data;
+ struct pbr_nht_show *pns = data;
+ json_object *j, *this_group, *group_hops;
+
+ if (pns->name && strcmp(pns->name, pnhgc->name) != 0)
+ return;
+
+ j = pns->json;
+ this_group = json_object_new_object();
+
+ if (!j || !this_group)
+ return;
+
+ json_object_int_add(this_group, "id", pnhgc->table_id);
+ json_object_string_add(this_group, "name", pnhgc->name);
+ json_object_boolean_add(this_group, "valid", pnhgc->valid);
+ json_object_boolean_add(this_group, "installed", pnhgc->installed);
+
+ group_hops = json_object_new_array();
+
+ if (group_hops) {
+ hash_iterate(pnhgc->nhh, pbr_nht_json_nhg_nexthops, group_hops);
+ json_object_object_add(this_group, "nexthops", group_hops);
+ }
+
+ json_object_array_add(j, this_group);
+}
+
void pbr_nht_show_nexthop_group(struct vty *vty, const char *name)
{
struct pbr_nht_show pns;
@@ -1061,6 +1105,16 @@ void pbr_nht_show_nexthop_group(struct vty *vty, const char *name)
hash_iterate(pbr_nhg_hash, pbr_nht_show_nhg, &pns);
}
+void pbr_nht_json_nexthop_group(json_object *j, const char *name)
+{
+ struct pbr_nht_show pns;
+
+ pns.name = name;
+ pns.json = j;
+
+ hash_iterate(pbr_nhg_hash, pbr_nht_json_nhg, &pns);
+}
+
void pbr_nht_init(void)
{
pbr_nhg_hash = hash_create_size(
diff --git a/pbrd/pbr_nht.h b/pbrd/pbr_nht.h
index 2533942547..cbcf71d2f5 100644
--- a/pbrd/pbr_nht.h
+++ b/pbrd/pbr_nht.h
@@ -24,6 +24,7 @@
#include <lib/nexthop_group.h>
#include "pbr_map.h"
+#include "json.h"
#define PBR_NHC_NAMELEN PBR_MAP_NAMELEN + 10
@@ -112,6 +113,7 @@ extern char *pbr_nht_nexthop_make_name(char *name, size_t l, uint32_t seqno,
char *buffer);
extern void pbr_nht_show_nexthop_group(struct vty *vty, const char *name);
+extern void pbr_nht_json_nexthop_group(json_object *j, const char *name);
/*
* When we get a callback from zebra about a nexthop changing
diff --git a/pbrd/pbr_vty.c b/pbrd/pbr_vty.c
index a52c2d1e30..54029206cc 100644
--- a/pbrd/pbr_vty.c
+++ b/pbrd/pbr_vty.c
@@ -27,6 +27,7 @@
#include "nexthop_group.h"
#include "nexthop_group_private.h"
#include "log.h"
+#include "json.h"
#include "debug.h"
#include "pbr.h"
@@ -590,6 +591,61 @@ static void vty_show_pbrms(struct vty *vty,
}
}
+static void vty_json_pbrms(json_object *j, struct vty *vty,
+ const struct pbr_map_sequence *pbrms)
+{
+ json_object *jpbrm, *nexthop_group;
+ char *nhg_name = pbrms->nhgrp_name ? pbrms->nhgrp_name
+ : pbrms->internal_nhg_name;
+ char buf[PREFIX_STRLEN];
+ char rbuf[64];
+
+ jpbrm = json_object_new_object();
+
+ json_object_int_add(jpbrm, "id", pbrms->unique);
+
+ if (pbrms->reason)
+ pbr_map_reason_string(pbrms->reason, rbuf, sizeof(rbuf));
+
+ json_object_int_add(jpbrm, "sequenceNumber", pbrms->seqno);
+ json_object_int_add(jpbrm, "ruleNumber", pbrms->ruleno);
+ json_object_boolean_add(jpbrm, "vrfUnchanged", pbrms->vrf_unchanged);
+ json_object_boolean_add(jpbrm, "installed",
+ pbr_nht_get_installed(nhg_name));
+ json_object_string_add(jpbrm, "installedReason",
+ pbrms->reason ? rbuf : "Valid");
+
+ if (nhg_name) {
+ nexthop_group = json_object_new_object();
+
+ json_object_int_add(nexthop_group, "tableId",
+ pbr_nht_get_table(nhg_name));
+ json_object_string_add(nexthop_group, "name", nhg_name);
+ json_object_boolean_add(nexthop_group, "installed",
+ pbr_nht_get_installed(nhg_name));
+ json_object_int_add(nexthop_group, "installedInternally",
+ pbrms->nhs_installed);
+
+ json_object_object_add(jpbrm, "nexthopGroup", nexthop_group);
+ }
+
+ if (pbrms->vrf_lookup)
+ json_object_string_add(jpbrm, "vrfName", pbrms->vrf_name);
+
+ if (pbrms->src)
+ json_object_string_add(
+ jpbrm, "matchSrc",
+ prefix2str(pbrms->src, buf, sizeof(buf)));
+ if (pbrms->dst)
+ json_object_string_add(
+ jpbrm, "matchDst",
+ prefix2str(pbrms->dst, buf, sizeof(buf)));
+ if (pbrms->mark)
+ json_object_int_add(jpbrm, "matchMark", pbrms->mark);
+
+ json_object_array_add(j, jpbrm);
+}
+
static void vty_show_pbr_map(struct vty *vty, const struct pbr_map *pbrm,
bool detail)
{
@@ -603,54 +659,121 @@ static void vty_show_pbr_map(struct vty *vty, const struct pbr_map *pbrm,
vty_show_pbrms(vty, pbrms, detail);
}
+static void vty_json_pbr_map(json_object *j, struct vty *vty,
+ const struct pbr_map *pbrm)
+{
+ struct pbr_map_sequence *pbrms;
+ struct listnode *node;
+ json_object *jpbrms;
+
+ json_object_string_add(j, "name", pbrm->name);
+ json_object_boolean_add(j, "valid", pbrm->valid);
+
+ jpbrms = json_object_new_array();
+
+ for (ALL_LIST_ELEMENTS_RO(pbrm->seqnumbers, node, pbrms))
+ vty_json_pbrms(jpbrms, vty, pbrms);
+
+ json_object_object_add(j, "policies", jpbrms);
+}
+
DEFPY (show_pbr_map,
show_pbr_map_cmd,
- "show pbr map [NAME$name] [detail$detail]",
+ "show pbr map [NAME$name] [detail$detail|json$json]",
SHOW_STR
PBR_STR
"PBR Map\n"
"PBR Map Name\n"
- "Detailed information\n")
+ "Detailed information\n"
+ JSON_STR)
{
struct pbr_map *pbrm;
+ json_object *j = NULL;
+
+ if (json)
+ j = json_object_new_array();
RB_FOREACH (pbrm, pbr_map_entry_head, &pbr_maps) {
+ json_object *this_map = NULL;
if (name && strcmp(name, pbrm->name) != 0)
continue;
+ if (j)
+ this_map = json_object_new_object();
+
+ if (this_map) {
+ vty_json_pbr_map(this_map, vty, pbrm);
+
+ json_object_array_add(j, this_map);
+ continue;
+ }
+
vty_show_pbr_map(vty, pbrm, detail);
}
+
+ if (j) {
+ vty_out(vty, "%s\n",
+ json_object_to_json_string_ext(
+ j, JSON_C_TO_STRING_PRETTY));
+ json_object_free(j);
+ }
+
return CMD_SUCCESS;
}
DEFPY(show_pbr_nexthop_group,
show_pbr_nexthop_group_cmd,
- "show pbr nexthop-groups [WORD$word]",
+ "show pbr nexthop-groups [WORD$word] [json$json]",
SHOW_STR
PBR_STR
"Nexthop Groups\n"
- "Optional Name of the nexthop group\n")
+ "Optional Name of the nexthop group\n"
+ JSON_STR)
{
- pbr_nht_show_nexthop_group(vty, word);
+ json_object *j = NULL;
+
+ if (json)
+ j = json_object_new_array();
+
+ if (j) {
+ pbr_nht_json_nexthop_group(j, word);
+
+ vty_out(vty, "%s\n",
+ json_object_to_json_string_ext(
+ j, JSON_C_TO_STRING_PRETTY));
+
+ json_object_free(j);
+ } else
+ pbr_nht_show_nexthop_group(vty, word);
+
return CMD_SUCCESS;
}
DEFPY (show_pbr_interface,
show_pbr_interface_cmd,
- "show pbr interface [NAME$name]",
+ "show pbr interface [NAME$name] [json$json]",
SHOW_STR
PBR_STR
"PBR Interface\n"
- "PBR Interface Name\n")
+ "PBR Interface Name\n"
+ JSON_STR)
{
struct interface *ifp;
struct vrf *vrf;
struct pbr_interface *pbr_ifp;
+ json_object *j = NULL;
+
+ if (json)
+ j = json_object_new_array();
RB_FOREACH(vrf, vrf_name_head, &vrfs_by_name) {
FOR_ALL_INTERFACES(vrf, ifp) {
struct pbr_map *pbrm;
+ json_object *this_iface = NULL;
+
+ if (j)
+ this_iface = json_object_new_object();
if (!ifp->info)
continue;
@@ -664,6 +787,21 @@ DEFPY (show_pbr_interface,
continue;
pbrm = pbrm_find(pbr_ifp->mapname);
+
+ if (this_iface) {
+ json_object_string_add(this_iface, "name",
+ ifp->name);
+ json_object_int_add(this_iface, "index",
+ ifp->ifindex);
+ json_object_string_add(this_iface, "policy",
+ pbr_ifp->mapname);
+ json_object_boolean_add(this_iface, "valid",
+ pbrm);
+
+ json_object_array_add(j, this_iface);
+ continue;
+ }
+
vty_out(vty, " %s(%d) with pbr-policy %s", ifp->name,
ifp->ifindex, pbr_ifp->mapname);
if (!pbrm)
@@ -672,6 +810,13 @@ DEFPY (show_pbr_interface,
}
}
+ if (j) {
+ vty_out(vty, "%s\n",
+ json_object_to_json_string_ext(
+ j, JSON_C_TO_STRING_PRETTY));
+ json_object_free(j);
+ }
+
return CMD_SUCCESS;
}
diff --git a/pimd/pim_iface.c b/pimd/pim_iface.c
index b25b6eaa8c..b79fb689dc 100644
--- a/pimd/pim_iface.c
+++ b/pimd/pim_iface.c
@@ -138,6 +138,7 @@ struct pim_interface *pim_if_new(struct interface *ifp, bool igmp, bool pim,
/* BSM config on interface: true by default */
pim_ifp->bsm_enable = true;
pim_ifp->ucast_bsm_accept = true;
+ pim_ifp->am_i_dr = false;
/*
RFC 3376: 8.3. Query Response Interval
diff --git a/pimd/pim_iface.h b/pimd/pim_iface.h
index 570bf5eac3..13aaf8d3b2 100644
--- a/pimd/pim_iface.h
+++ b/pimd/pim_iface.h
@@ -133,6 +133,7 @@ struct pim_interface {
/* Turn on Active-Active for this interface */
bool activeactive;
+ bool am_i_dr;
int64_t pim_ifstat_start; /* start timestamp for stats */
uint64_t pim_ifstat_bsm_rx;
diff --git a/pimd/pim_neighbor.c b/pimd/pim_neighbor.c
index d8a797f980..4d6625bf6f 100644
--- a/pimd/pim_neighbor.c
+++ b/pimd/pim_neighbor.c
@@ -40,6 +40,7 @@
#include "pim_join.h"
#include "pim_jp_agg.h"
#include "pim_bfd.h"
+#include "pim_register.h"
static void dr_election_by_addr(struct interface *ifp)
{
@@ -141,6 +142,16 @@ int pim_if_dr_election(struct interface *ifp)
pim_if_update_join_desired(pim_ifp);
pim_if_update_could_assert(ifp);
pim_if_update_assert_tracking_desired(ifp);
+
+ if (PIM_I_am_DR(pim_ifp))
+ pim_ifp->am_i_dr = true;
+ else {
+ if (pim_ifp->am_i_dr == true) {
+ pim_reg_del_on_couldreg_fail(ifp);
+ pim_ifp->am_i_dr = false;
+ }
+ }
+
return 1;
}
diff --git a/pimd/pim_pim.c b/pimd/pim_pim.c
index 3976b262e3..bf31d4e450 100644
--- a/pimd/pim_pim.c
+++ b/pimd/pim_pim.c
@@ -501,6 +501,7 @@ void pim_sock_reset(struct interface *ifp)
pim_ifp->pim_dr_num_nondrpri_neighbors =
0; /* neighbors without dr_pri */
pim_ifp->pim_dr_addr = pim_ifp->primary_address;
+ pim_ifp->am_i_dr = true;
pim_ifstat_reset(ifp);
}
diff --git a/pimd/pim_register.c b/pimd/pim_register.c
index cb6aae7fae..19e15f3ede 100644
--- a/pimd/pim_register.c
+++ b/pimd/pim_register.c
@@ -520,3 +520,32 @@ int pim_register_recv(struct interface *ifp, struct in_addr dest_addr,
return 0;
}
+
+/*
+ * This routine scan all upstream and update register state and remove pimreg
+ * when couldreg becomes false.
+ */
+void pim_reg_del_on_couldreg_fail(struct interface *ifp)
+{
+ struct pim_interface *pim_ifp = ifp->info;
+ struct pim_instance *pim;
+ struct pim_upstream *up;
+
+ if (!pim_ifp)
+ return;
+
+ pim = pim_ifp->pim;
+
+ frr_each (rb_pim_upstream, &pim->upstream_head, up) {
+ if (ifp != up->rpf.source_nexthop.interface)
+ continue;
+
+ if (!pim_upstream_could_register(up)
+ && (up->reg_state != PIM_REG_NOINFO)) {
+ pim_channel_del_oif(up->channel_oil, pim->regiface,
+ PIM_OIF_FLAG_PROTO_PIM, __func__);
+ THREAD_OFF(up->t_rs_timer);
+ up->reg_state = PIM_REG_NOINFO;
+ }
+ }
+}
diff --git a/pimd/pim_register.h b/pimd/pim_register.h
index c5a28fee41..caaacd9d54 100644
--- a/pimd/pim_register.h
+++ b/pimd/pim_register.h
@@ -43,5 +43,6 @@ void pim_register_stop_send(struct interface *ifp, struct prefix_sg *sg,
struct in_addr src, struct in_addr originator);
void pim_register_join(struct pim_upstream *up);
void pim_null_register_send(struct pim_upstream *up);
+void pim_reg_del_on_couldreg_fail(struct interface *ifp);
#endif
diff --git a/pimd/pim_upstream.c b/pimd/pim_upstream.c
index cf5ea2fa53..d3fb0d46de 100644
--- a/pimd/pim_upstream.c
+++ b/pimd/pim_upstream.c
@@ -414,7 +414,8 @@ void pim_upstream_join_suppress(struct pim_upstream *up,
struct in_addr rpf_addr, int holdtime)
{
long t_joinsuppress_msec;
- long join_timer_remain_msec;
+ long join_timer_remain_msec = 0;
+ struct pim_neighbor *nbr = NULL;
if (!up->rpf.source_nexthop.interface) {
if (PIM_DEBUG_PIM_TRACE)
@@ -427,7 +428,18 @@ void pim_upstream_join_suppress(struct pim_upstream *up,
MIN(pim_if_t_suppressed_msec(up->rpf.source_nexthop.interface),
1000 * holdtime);
- join_timer_remain_msec = pim_time_timer_remain_msec(up->t_join_timer);
+ if (up->t_join_timer)
+ join_timer_remain_msec =
+ pim_time_timer_remain_msec(up->t_join_timer);
+ else {
+ /* Remove it from jp agg from the nbr for suppression */
+ nbr = pim_neighbor_find(up->rpf.source_nexthop.interface,
+ up->rpf.rpf_addr.u.prefix4);
+ if (nbr) {
+ join_timer_remain_msec =
+ pim_time_timer_remain_msec(nbr->jp_timer);
+ }
+ }
if (PIM_DEBUG_PIM_TRACE) {
char rpf_str[INET_ADDRSTRLEN];
@@ -446,6 +458,9 @@ void pim_upstream_join_suppress(struct pim_upstream *up,
t_joinsuppress_msec);
}
+ if (nbr)
+ pim_jp_agg_remove_group(nbr->upstream_jp_agg, up, nbr);
+
pim_upstream_join_timer_restart_msec(up, t_joinsuppress_msec);
}
}
@@ -532,7 +547,7 @@ static void forward_off(struct pim_upstream *up)
} /* scan iface channel list */
}
-static int pim_upstream_could_register(struct pim_upstream *up)
+int pim_upstream_could_register(struct pim_upstream *up)
{
struct pim_interface *pim_ifp = NULL;
diff --git a/pimd/pim_upstream.h b/pimd/pim_upstream.h
index ca693ee73f..b3379c67b2 100644
--- a/pimd/pim_upstream.h
+++ b/pimd/pim_upstream.h
@@ -394,4 +394,5 @@ void pim_upstream_update_use_rpt(struct pim_upstream *up,
uint32_t pim_up_mlag_local_cost(struct pim_upstream *up);
uint32_t pim_up_mlag_peer_cost(struct pim_upstream *up);
void pim_upstream_reeval_use_rpt(struct pim_instance *pim);
+int pim_upstream_could_register(struct pim_upstream *up);
#endif /* PIM_UPSTREAM_H */
diff --git a/sharpd/sharp_vty.c b/sharpd/sharp_vty.c
index e2ea773055..987588947e 100644
--- a/sharpd/sharp_vty.c
+++ b/sharpd/sharp_vty.c
@@ -410,6 +410,8 @@ DEFPY(sharp_lsp_prefix_v4, sharp_lsp_prefix_v4_cmd,
"Instance\n")
{
struct nexthop_group_cmd *nhgc = NULL;
+ struct nexthop_group_cmd *backup_nhgc = NULL;
+ struct nexthop_group *backup_nhg = NULL;
struct prefix p = {};
int type = 0;
@@ -441,9 +443,23 @@ DEFPY(sharp_lsp_prefix_v4, sharp_lsp_prefix_v4_cmd,
return CMD_WARNING;
}
+ /* Use group's backup nexthop info if present */
+ if (nhgc->backup_list_name[0]) {
+ backup_nhgc = nhgc_find(nhgc->backup_list_name);
+
+ if (!backup_nhgc) {
+ vty_out(vty,
+ "%% Backup group %s not found for group %s\n",
+ nhgc->backup_list_name,
+ nhgname);
+ return CMD_WARNING;
+ }
+ backup_nhg = &(backup_nhgc->nhg);
+ }
+
if (sharp_install_lsps_helper(true, pfx->family > 0 ? &p : NULL,
type, instance, inlabel,
- &(nhgc->nhg)) == 0)
+ &(nhgc->nhg), backup_nhg) == 0)
return CMD_SUCCESS;
else {
vty_out(vty, "%% LSP install failed!\n");
@@ -454,7 +470,7 @@ DEFPY(sharp_lsp_prefix_v4, sharp_lsp_prefix_v4_cmd,
DEFPY(sharp_remove_lsp_prefix_v4, sharp_remove_lsp_prefix_v4_cmd,
"sharp remove lsp \
(0-100000)$inlabel\
- nexthop-group NHGNAME$nhgname\
+ [nexthop-group NHGNAME$nhgname] \
[prefix A.B.C.D/M$pfx\
" FRR_IP_REDIST_STR_SHARPD "$type_str [instance (0-255)$instance]]",
"Sharp Routing Protocol\n"
@@ -472,6 +488,7 @@ DEFPY(sharp_remove_lsp_prefix_v4, sharp_remove_lsp_prefix_v4_cmd,
struct nexthop_group_cmd *nhgc = NULL;
struct prefix p = {};
int type = 0;
+ struct nexthop_group *nhg = NULL;
/* We're offered a v4 prefix */
if (pfx->family > 0 && type_str) {
@@ -489,21 +506,24 @@ DEFPY(sharp_remove_lsp_prefix_v4, sharp_remove_lsp_prefix_v4_cmd,
return CMD_WARNING;
}
- nhgc = nhgc_find(nhgname);
- if (!nhgc) {
- vty_out(vty, "%% Nexthop-group '%s' does not exist\n",
- nhgname);
- return CMD_WARNING;
- }
+ if (nhgname) {
+ nhgc = nhgc_find(nhgname);
+ if (!nhgc) {
+ vty_out(vty, "%% Nexthop-group '%s' does not exist\n",
+ nhgname);
+ return CMD_WARNING;
+ }
- if (nhgc->nhg.nexthop == NULL) {
- vty_out(vty, "%% Nexthop-group '%s' is empty\n", nhgname);
- return CMD_WARNING;
+ if (nhgc->nhg.nexthop == NULL) {
+ vty_out(vty, "%% Nexthop-group '%s' is empty\n",
+ nhgname);
+ return CMD_WARNING;
+ }
+ nhg = &(nhgc->nhg);
}
if (sharp_install_lsps_helper(false, pfx->family > 0 ? &p : NULL,
- type, instance, inlabel,
- &(nhgc->nhg)) == 0)
+ type, instance, inlabel, nhg, NULL) == 0)
return CMD_SUCCESS;
else {
vty_out(vty, "%% LSP remove failed!\n");
diff --git a/sharpd/sharp_zebra.c b/sharpd/sharp_zebra.c
index e1bd6f5722..0795096440 100644
--- a/sharpd/sharp_zebra.c
+++ b/sharpd/sharp_zebra.c
@@ -89,7 +89,8 @@ static int sharp_ifp_down(struct interface *ifp)
int sharp_install_lsps_helper(bool install_p, const struct prefix *p,
uint8_t type, int instance, uint32_t in_label,
- const struct nexthop_group *nhg)
+ const struct nexthop_group *nhg,
+ const struct nexthop_group *backup_nhg)
{
struct zapi_labels zl = {};
struct zapi_nexthop *znh;
@@ -106,32 +107,68 @@ int sharp_install_lsps_helper(bool install_p, const struct prefix *p,
zl.route.instance = instance;
}
+ /* List of nexthops is optional for delete */
i = 0;
- for (ALL_NEXTHOPS_PTR(nhg, nh)) {
- znh = &zl.nexthops[i];
+ if (nhg) {
+ for (ALL_NEXTHOPS_PTR(nhg, nh)) {
+ znh = &zl.nexthops[i];
- /* Must have labels to be useful */
- if (nh->nh_label == NULL || nh->nh_label->num_labels == 0)
- continue;
+ /* Must have labels to be useful */
+ if (nh->nh_label == NULL ||
+ nh->nh_label->num_labels == 0)
+ continue;
- if (nh->type == NEXTHOP_TYPE_IFINDEX ||
- nh->type == NEXTHOP_TYPE_BLACKHOLE)
- /* Hmm - can't really deal with these types */
- continue;
+ if (nh->type == NEXTHOP_TYPE_IFINDEX ||
+ nh->type == NEXTHOP_TYPE_BLACKHOLE)
+ /* Hmm - can't really deal with these types */
+ continue;
- ret = zapi_nexthop_from_nexthop(znh, nh);
- if (ret < 0)
- return -1;
+ ret = zapi_nexthop_from_nexthop(znh, nh);
+ if (ret < 0)
+ return -1;
- i++;
+ i++;
+ }
}
- /* Whoops - no nexthops isn't very useful */
- if (i == 0)
+ /* Whoops - no nexthops isn't very useful for install */
+ if (i == 0 && install_p)
return -1;
zl.nexthop_num = i;
+ /* Add optional backup nexthop info. Since these are used by index,
+ * we can't just skip over an invalid backup nexthop: we will
+ * invalidate the entire operation.
+ */
+ if (backup_nhg != NULL) {
+ i = 0;
+ for (ALL_NEXTHOPS_PTR(backup_nhg, nh)) {
+ znh = &zl.backup_nexthops[i];
+
+ /* Must have labels to be useful */
+ if (nh->nh_label == NULL ||
+ nh->nh_label->num_labels == 0)
+ return -1;
+
+ if (nh->type == NEXTHOP_TYPE_IFINDEX ||
+ nh->type == NEXTHOP_TYPE_BLACKHOLE)
+ /* Hmm - can't really deal with these types */
+ return -1;
+
+ ret = zapi_nexthop_from_nexthop(znh, nh);
+ if (ret < 0)
+ return -1;
+
+ i++;
+ }
+
+ if (i > 0)
+ SET_FLAG(zl.message, ZAPI_LABELS_HAS_BACKUPS);
+
+ zl.backup_nexthop_num = i;
+ }
+
if (install_p)
ret = zebra_send_mpls_labels(zclient, ZEBRA_MPLS_LABELS_ADD,
&zl);
diff --git a/sharpd/sharp_zebra.h b/sharpd/sharp_zebra.h
index 926bff676b..2b8e19dd97 100644
--- a/sharpd/sharp_zebra.h
+++ b/sharpd/sharp_zebra.h
@@ -42,5 +42,7 @@ extern void sharp_remove_routes_helper(struct prefix *p, vrf_id_t vrf_id,
int sharp_install_lsps_helper(bool install_p, const struct prefix *p,
uint8_t type, int instance, uint32_t in_label,
- const struct nexthop_group *nhg);
+ const struct nexthop_group *nhg,
+ const struct nexthop_group *backup_nhg);
+
#endif
diff --git a/tests/topotests/all-protocol-startup/r1/pbrd.conf b/tests/topotests/all-protocol-startup/r1/pbrd.conf
new file mode 100644
index 0000000000..360fb13a1b
--- /dev/null
+++ b/tests/topotests/all-protocol-startup/r1/pbrd.conf
@@ -0,0 +1,10 @@
+log file pbrd.log
+
+nexthop-group A
+ nexthop 192.168.161.4
+!
+pbr-map FOO seq 10
+ match dst-ip 4.5.6.7/32
+ match src-ip 6.7.8.8/32
+ set nexthop-group A
+! \ No newline at end of file
diff --git a/tests/topotests/all-protocol-startup/test_all_protocol_startup.py b/tests/topotests/all-protocol-startup/test_all_protocol_startup.py
index fb211957a7..14e00b9664 100755
--- a/tests/topotests/all-protocol-startup/test_all_protocol_startup.py
+++ b/tests/topotests/all-protocol-startup/test_all_protocol_startup.py
@@ -123,6 +123,7 @@ def setup_module(module):
net['r%s' % i].loadConf('sharpd')
net['r%s' % i].loadConf('nhrpd', '%s/r%s/nhrpd.conf' % (thisDir, i))
net['r%s' % i].loadConf('babeld', '%s/r%s/babeld.conf' % (thisDir, i))
+ net['r%s' % i].loadConf('pbrd', '%s/r%s/pbrd.conf' % (thisDir, i))
net['r%s' % i].startRouter()
# For debugging after starting Quagga/FRR daemons, uncomment the next line
diff --git a/tests/topotests/lib/topogen.py b/tests/topotests/lib/topogen.py
index 673d65376f..414dc17874 100644
--- a/tests/topotests/lib/topogen.py
+++ b/tests/topotests/lib/topogen.py
@@ -556,6 +556,7 @@ class TopoRouter(TopoGear):
RD_BFD = 13
RD_SHARP = 14
RD_BABEL = 15
+ RD_PBRD = 16
RD = {
RD_ZEBRA: "zebra",
RD_RIP: "ripd",
@@ -572,6 +573,7 @@ class TopoRouter(TopoGear):
RD_BFD: "bfdd",
RD_SHARP: "sharpd",
RD_BABEL: "babeld",
+ RD_PBRD: "pbrd",
}
def __init__(self, tgen, cls, name, **params):
diff --git a/tests/topotests/lib/topotest.py b/tests/topotests/lib/topotest.py
index b35606df8f..6262082193 100644
--- a/tests/topotests/lib/topotest.py
+++ b/tests/topotests/lib/topotest.py
@@ -871,6 +871,7 @@ class Router(Node):
"bfdd": 0,
"sharpd": 0,
"babeld": 0,
+ "pbrd": 0,
}
self.daemons_options = {"zebra": ""}
self.reportCores = True
diff --git a/tests/topotests/ospf-sr-topo1/r2/ospfd.conf b/tests/topotests/ospf-sr-topo1/r2/ospfd.conf
index 8555ea29f1..4d6146aaa7 100644
--- a/tests/topotests/ospf-sr-topo1/r2/ospfd.conf
+++ b/tests/topotests/ospf-sr-topo1/r2/ospfd.conf
@@ -1,4 +1,6 @@
!
+debug ospf sr
+!
interface lo
ip ospf area 0.0.0.0
!
diff --git a/tests/topotests/pbr-topo1/r1/pbr-interface.json b/tests/topotests/pbr-topo1/r1/pbr-interface.json
new file mode 100644
index 0000000000..452b24dcd7
--- /dev/null
+++ b/tests/topotests/pbr-topo1/r1/pbr-interface.json
@@ -0,0 +1,12 @@
+[
+ {
+ "name":"r1-eth1",
+ "policy":"EVA",
+ "valid":true
+ },
+ {
+ "name":"r1-eth2",
+ "policy":"DONNA",
+ "valid":true
+ }
+]
diff --git a/tests/topotests/pbr-topo1/r1/pbr-map.json b/tests/topotests/pbr-topo1/r1/pbr-map.json
new file mode 100644
index 0000000000..6b9eaa9ceb
--- /dev/null
+++ b/tests/topotests/pbr-topo1/r1/pbr-map.json
@@ -0,0 +1,60 @@
+[
+ {
+ "name":"DONNA",
+ "valid":true,
+ "policies":[
+ {
+ "id":3,
+ "sequenceNumber":5,
+ "ruleNumber":304,
+ "vrfUnchanged":false,
+ "installed":true,
+ "installedReason":"Valid",
+ "nexthopGroup":{
+ "tableId":10002,
+ "name":"C",
+ "installed":true,
+ "installedInternally":1
+ },
+ "matchSrc":"1.2.0.0\/16",
+ "matchDst":"3.4.5.0\/24"
+ }
+ ]
+ },
+ {
+ "name":"EVA",
+ "valid":true,
+ "policies":[
+ {
+ "id":1,
+ "sequenceNumber":5,
+ "ruleNumber":304,
+ "vrfUnchanged":false,
+ "installed":true,
+ "installedReason":"Valid",
+ "nexthopGroup":{
+ "tableId":10003,
+ "name":"EVA5",
+ "installed":true,
+ "installedInternally":1
+ },
+ "matchSrc":"4.5.6.7\/32"
+ },
+ {
+ "id":2,
+ "sequenceNumber":10,
+ "ruleNumber":309,
+ "vrfUnchanged":false,
+ "installed":true,
+ "installedReason":"Valid",
+ "nexthopGroup":{
+ "tableId":10000,
+ "name":"A",
+ "installed":true,
+ "installedInternally":1
+ },
+ "matchDst":"9.9.9.9\/32"
+ }
+ ]
+ }
+]
diff --git a/tests/topotests/pbr-topo1/r1/pbr-nexthop-groups.json b/tests/topotests/pbr-topo1/r1/pbr-nexthop-groups.json
new file mode 100644
index 0000000000..ff85438ad5
--- /dev/null
+++ b/tests/topotests/pbr-topo1/r1/pbr-nexthop-groups.json
@@ -0,0 +1,58 @@
+[
+ {
+ "id":10000,
+ "name":"A",
+ "valid":true,
+ "installed":true,
+ "nexthops":[
+ {
+ "nexthop":"192.168.2.2",
+ "valid":true
+ },
+ {
+ "nexthop":"192.168.3.2",
+ "valid":true
+ },
+ {
+ "nexthop":"192.168.1.2",
+ "valid":true
+ }
+ ]
+ },
+ {
+ "id":10002,
+ "name":"C",
+ "valid":true,
+ "installed":true,
+ "nexthops":[
+ {
+ "nexthop":"192.168.1.44",
+ "valid":true
+ }
+ ]
+ },
+ {
+ "id":10001,
+ "name":"B",
+ "valid":false,
+ "installed":false,
+ "nexthops":[
+ {
+ "nexthop":"192.168.50.1",
+ "valid":false
+ }
+ ]
+ },
+ {
+ "id":10003,
+ "name":"EVA5",
+ "valid":true,
+ "installed":true,
+ "nexthops":[
+ {
+ "nexthop":"192.168.1.5",
+ "valid":true
+ }
+ ]
+ }
+]
diff --git a/tests/topotests/pbr-topo1/r1/pbrd.conf b/tests/topotests/pbr-topo1/r1/pbrd.conf
new file mode 100644
index 0000000000..234683f307
--- /dev/null
+++ b/tests/topotests/pbr-topo1/r1/pbrd.conf
@@ -0,0 +1,33 @@
+nexthop-group A
+ nexthop 192.168.1.2
+ nexthop 192.168.2.2
+ nexthop 192.168.3.2
+ nexhtop 192.168.4.2
+!
+# This one is bogus and should
+# never work
+nexthop-group B
+ nexthop 192.168.50.1
+!
+nexthop-group C
+ nexthop 192.168.1.44
+!
+pbr-map EVA seq 5
+ match src-ip 4.5.6.7/32
+ set nexthop 192.168.1.5
+!
+pbr-map EVA seq 10
+ match dst-ip 9.9.9.9/32
+ set nexthop-group A
+!
+pbr-map DONNA seq 5
+ match dst-ip 3.4.5.0/24
+ match src-ip 1.2.0.0/16
+ set nexthop-group C
+!
+
+int r1-eth1
+ pbr-policy EVA
+!
+int r1-eth2
+ pbr-policy DONNA
diff --git a/tests/topotests/pbr-topo1/r1/zebra.conf b/tests/topotests/pbr-topo1/r1/zebra.conf
new file mode 100644
index 0000000000..f29b146a62
--- /dev/null
+++ b/tests/topotests/pbr-topo1/r1/zebra.conf
@@ -0,0 +1,11 @@
+int r1-eth0
+ ip address 192.168.1.1/24
+
+int r1-eth1
+ ip address 192.168.2.1/24
+
+int r1-eth2
+ ip address 192.168.3.1/24
+
+int r1-eth3
+ ip address 192.168.4.1/24
diff --git a/tests/topotests/pbr-topo1/test_pbr_topo1.py b/tests/topotests/pbr-topo1/test_pbr_topo1.py
new file mode 100755
index 0000000000..2853165d45
--- /dev/null
+++ b/tests/topotests/pbr-topo1/test_pbr_topo1.py
@@ -0,0 +1,180 @@
+#!/usr/bin/env python
+
+#
+# test_pbr_topo1.py
+#
+# Copyright (c) 2020 by
+# Cumulus Networks, Inc.
+# Donald Sharp
+#
+# Permission to use, copy, modify, and/or distribute this software
+# for any purpose with or without fee is hereby granted, provided
+# that the above copyright notice and this permission notice appear
+# in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NETDEF DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NETDEF BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
+# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+# OF THIS SOFTWARE.
+#
+
+"""
+test_pbr_topo1.py: Testing PBR
+
+"""
+
+import os
+import re
+import sys
+import pytest
+import json
+
+# Save the Current Working Directory to find configuration files.
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, "../"))
+
+# pylint: disable=C0413
+# Import topogen and topotest helpers
+from lib import topotest
+from lib.topogen import Topogen, TopoRouter, get_topogen
+from lib.topolog import logger
+
+# Required to instantiate the topology builder class.
+from mininet.topo import Topo
+
+#####################################################
+##
+## Network Topology Definition
+##
+#####################################################
+
+
+class NetworkTopo(Topo):
+ "PBR Topology 1"
+
+ def build(self, **_opts):
+ "Build function"
+
+ tgen = get_topogen(self)
+
+ for routern in range(1, 2):
+ tgen.add_router("r{}".format(routern))
+
+ # On main router
+ # First switch is for a dummy interface (for local network)
+ switch = tgen.add_switch("sw1")
+ switch.add_link(tgen.gears["r1"])
+
+ # Switches for PBR
+ # switch 2 switch is for connection to PBR router
+ switch = tgen.add_switch("sw2")
+ switch.add_link(tgen.gears["r1"])
+
+ # switch 4 is stub on remote PBR router
+ switch = tgen.add_switch("sw4")
+ switch.add_link(tgen.gears["r1"])
+
+ # switch 3 is between PBR routers
+ switch = tgen.add_switch("sw3")
+ switch.add_link(tgen.gears["r1"])
+
+
+#####################################################
+##
+## Tests starting
+##
+#####################################################
+
+
+def setup_module(module):
+ "Setup topology"
+ tgen = Topogen(NetworkTopo, module.__name__)
+ tgen.start_topology()
+
+ # This is a sample of configuration loading.
+ router_list = tgen.routers()
+ for rname, router in router_list.iteritems():
+ router.load_config(
+ TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname))
+ )
+ router.load_config(
+ TopoRouter.RD_PBRD, os.path.join(CWD, "{}/pbrd.conf".format(rname))
+ )
+
+ tgen.start_router()
+ #gen.mininet_cli()
+
+def teardown_module(_mod):
+ "Teardown the pytest environment"
+ tgen = get_topogen()
+
+ # This function tears down the whole topology.
+ tgen.stop_topology()
+
+
+def test_converge_protocols():
+ "Wait for protocol convergence"
+
+ tgen = get_topogen()
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ topotest.sleep(5, "Waiting for PBR convergence")
+
+
+def test_pbr_data():
+ "Test PBR 'show ip eigrp'"
+
+ tgen = get_topogen()
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ # Verify PBR Status
+ logger.info("Verifying PBR routes")
+
+ router_list = tgen.routers().values()
+ for router in router_list:
+ intf_file = "{}/{}/pbr-interface.json".format(CWD, router.name)
+
+ logger.info(intf_file)
+ # Read expected result from file
+ expected = json.loads(open(intf_file).read())
+
+ # Actual output from router
+ actual = router.vtysh_cmd("show pbr interface json", isjson=True)
+
+ assertmsg = '"show pbr interface" mismatches on {}'.format(router.name)
+ assert topotest.json_cmp(actual, expected) is None, assertmsg
+
+ map_file = "{}/{}/pbr-map.json".format(CWD, router.name)
+ logger.info(map_file)
+ # Read expected result from file
+ expected = json.loads(open(map_file).read())
+
+ # Actual output from router
+ actual = router.vtysh_cmd("show pbr map json", isjson=True)
+
+ assertmsg = '"show pbr map" mismatches on {}'.format(router.name)
+ assert topotest.json_cmp(actual, expected) is None, assertmsg
+
+ nexthop_file = "{}/{}/pbr-nexthop-groups.json".format(CWD, router.name)
+
+ # Read expected result from file
+ expected = json.loads(open(nexthop_file).read())
+
+ # Actual output from router
+ actual = router.vtysh_cmd("show pbr nexthop-groups json", isjson=True)
+
+ assertmsg = '"show pbr nexthop-groups" mismatches on {}'.format(router.name)
+ assert topotest.json_cmp(actual, expected) is None, assertmsg
+
+if __name__ == "__main__":
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
+
diff --git a/zebra/rt_netlink.c b/zebra/rt_netlink.c
index 2e6cc7cd06..095c5570a2 100644
--- a/zebra/rt_netlink.c
+++ b/zebra/rt_netlink.c
@@ -3504,6 +3504,7 @@ enum zebra_dplane_result kernel_neigh_update_ctx(struct zebra_dplane_ctx *ctx)
int netlink_mpls_multipath(int cmd, struct zebra_dplane_ctx *ctx)
{
mpls_lse_t lse;
+ const struct nhlfe_list_head *head;
const zebra_nhlfe_t *nhlfe;
struct nexthop *nexthop = NULL;
unsigned int nexthop_num;
@@ -3524,7 +3525,8 @@ int netlink_mpls_multipath(int cmd, struct zebra_dplane_ctx *ctx)
* or multipath case.
*/
nexthop_num = 0;
- for (nhlfe = dplane_ctx_get_nhlfe(ctx); nhlfe; nhlfe = nhlfe->next) {
+ head = dplane_ctx_get_nhlfe_list(ctx);
+ frr_each(nhlfe_list_const, head, nhlfe) {
nexthop = nhlfe->nexthop;
if (!nexthop)
continue;
@@ -3579,8 +3581,7 @@ int netlink_mpls_multipath(int cmd, struct zebra_dplane_ctx *ctx)
routedesc);
nexthop_num = 0;
- for (nhlfe = dplane_ctx_get_nhlfe(ctx);
- nhlfe; nhlfe = nhlfe->next) {
+ frr_each(nhlfe_list_const, head, nhlfe) {
nexthop = nhlfe->nexthop;
if (!nexthop)
continue;
@@ -3618,8 +3619,7 @@ int netlink_mpls_multipath(int cmd, struct zebra_dplane_ctx *ctx)
routedesc);
nexthop_num = 0;
- for (nhlfe = dplane_ctx_get_nhlfe(ctx);
- nhlfe; nhlfe = nhlfe->next) {
+ frr_each(nhlfe_list_const, head, nhlfe) {
nexthop = nhlfe->nexthop;
if (!nexthop)
continue;
diff --git a/zebra/zapi_msg.c b/zebra/zapi_msg.c
index ffe5ca4845..16714acc6e 100644
--- a/zebra/zapi_msg.c
+++ b/zebra/zapi_msg.c
@@ -1998,6 +1998,7 @@ static void zread_mpls_labels_add(ZAPI_HANDLER_ARGS)
{
struct stream *s;
struct zapi_labels zl;
+ int ret;
/* Get input stream. */
s = msg;
@@ -2011,20 +2012,11 @@ static void zread_mpls_labels_add(ZAPI_HANDLER_ARGS)
if (!mpls_enabled)
return;
- for (int i = 0; i < zl.nexthop_num; i++) {
- struct zapi_nexthop *znh;
-
- znh = &zl.nexthops[i];
-
- mpls_lsp_install(zvrf, zl.type, zl.local_label,
- znh->label_num, znh->labels,
- znh->type, &znh->gate, znh->ifindex);
-
- if (CHECK_FLAG(zl.message, ZAPI_LABELS_FTN))
- mpls_ftn_update(1, zvrf, zl.type, &zl.route.prefix,
- znh->type, &znh->gate, znh->ifindex,
- zl.route.type, zl.route.instance,
- znh->labels[0]);
+ ret = mpls_zapi_labels_process(true, zvrf, &zl);
+ if (ret < 0) {
+ if (IS_ZEBRA_DEBUG_RECV)
+ zlog_debug("%s: Error processing zapi request",
+ __func__);
}
}
@@ -2042,6 +2034,7 @@ static void zread_mpls_labels_delete(ZAPI_HANDLER_ARGS)
{
struct stream *s;
struct zapi_labels zl;
+ int ret;
/* Get input stream. */
s = msg;
@@ -2056,21 +2049,11 @@ static void zread_mpls_labels_delete(ZAPI_HANDLER_ARGS)
return;
if (zl.nexthop_num > 0) {
- for (int i = 0; i < zl.nexthop_num; i++) {
- struct zapi_nexthop *znh;
-
- znh = &zl.nexthops[i];
- mpls_lsp_uninstall(zvrf, zl.type, zl.local_label,
- znh->type, &znh->gate,
- znh->ifindex);
-
- if (CHECK_FLAG(zl.message, ZAPI_LABELS_FTN))
- mpls_ftn_update(0, zvrf, zl.type,
- &zl.route.prefix, znh->type,
- &znh->gate, znh->ifindex,
- zl.route.type,
- zl.route.instance,
- znh->labels[0]);
+ ret = mpls_zapi_labels_process(false /*delete*/, zvrf, &zl);
+ if (ret < 0) {
+ if (IS_ZEBRA_DEBUG_RECV)
+ zlog_debug("%s: Error processing zapi request",
+ __func__);
}
} else {
mpls_lsp_uninstall_all_vrf(zvrf, zl.type, zl.local_label);
@@ -2110,26 +2093,16 @@ static void zread_mpls_labels_replace(ZAPI_HANDLER_ARGS)
if (!mpls_enabled)
return;
+ /* This removes everything, then re-adds from the client's
+ * zapi message. Since the LSP will be processed later, on this
+ * this same pthread, all of the changes will 'appear' at once.
+ */
mpls_lsp_uninstall_all_vrf(zvrf, zl.type, zl.local_label);
if (CHECK_FLAG(zl.message, ZAPI_LABELS_FTN))
mpls_ftn_uninstall(zvrf, zl.type, &zl.route.prefix,
zl.route.type, zl.route.instance);
- for (int i = 0; i < zl.nexthop_num; i++) {
- struct zapi_nexthop *znh;
-
- znh = &zl.nexthops[i];
- mpls_lsp_install(zvrf, zl.type, zl.local_label,
- znh->label_num, znh->labels, znh->type,
- &znh->gate, znh->ifindex);
-
- if (CHECK_FLAG(zl.message, ZAPI_LABELS_FTN)) {
- mpls_ftn_update(1, zvrf, zl.type, &zl.route.prefix,
- znh->type, &znh->gate, znh->ifindex,
- zl.route.type, zl.route.instance,
- znh->labels[0]);
- }
- }
+ mpls_zapi_labels_process(true, zvrf, &zl);
}
/* Send response to a table manager connect request to client */
diff --git a/zebra/zebra_dplane.c b/zebra/zebra_dplane.c
index 4bd95ba963..2c8ef37cbe 100644
--- a/zebra/zebra_dplane.c
+++ b/zebra/zebra_dplane.c
@@ -33,6 +33,7 @@
#include "zebra/zebra_router.h"
#include "zebra/zebra_dplane.h"
#include "zebra/zebra_vxlan_private.h"
+#include "zebra/zebra_mpls.h"
#include "zebra/rt.h"
#include "zebra/debug.h"
@@ -510,20 +511,28 @@ static void dplane_ctx_free_internal(struct zebra_dplane_ctx *ctx)
case DPLANE_OP_LSP_DELETE:
case DPLANE_OP_LSP_NOTIFY:
{
- zebra_nhlfe_t *nhlfe, *next;
+ zebra_nhlfe_t *nhlfe;
- /* Free allocated NHLFEs */
- for (nhlfe = ctx->u.lsp.nhlfe_list; nhlfe; nhlfe = next) {
- next = nhlfe->next;
+ /* Unlink and free allocated NHLFEs */
+ frr_each_safe(nhlfe_list, &ctx->u.lsp.nhlfe_list, nhlfe) {
+ nhlfe_list_del(&ctx->u.lsp.nhlfe_list, nhlfe);
+ zebra_mpls_nhlfe_free(nhlfe);
+ }
- zebra_mpls_nhlfe_del(nhlfe);
+ /* Unlink and free allocated backup NHLFEs, if present */
+ frr_each_safe(nhlfe_list,
+ &(ctx->u.lsp.backup_nhlfe_list), nhlfe) {
+ nhlfe_list_del(&ctx->u.lsp.backup_nhlfe_list,
+ nhlfe);
+ zebra_mpls_nhlfe_free(nhlfe);
}
- /* Clear pointers in lsp struct, in case we're cacheing
+ /* Clear pointers in lsp struct, in case we're caching
* free context structs.
*/
- ctx->u.lsp.nhlfe_list = NULL;
+ nhlfe_list_init(&ctx->u.lsp.nhlfe_list);
ctx->u.lsp.best_nhlfe = NULL;
+ nhlfe_list_init(&ctx->u.lsp.backup_nhlfe_list);
break;
}
@@ -1228,11 +1237,18 @@ void dplane_ctx_set_lsp_flags(struct zebra_dplane_ctx *ctx,
ctx->u.lsp.flags = flags;
}
-const zebra_nhlfe_t *dplane_ctx_get_nhlfe(const struct zebra_dplane_ctx *ctx)
+const struct nhlfe_list_head *dplane_ctx_get_nhlfe_list(
+ const struct zebra_dplane_ctx *ctx)
{
DPLANE_CTX_VALID(ctx);
+ return &(ctx->u.lsp.nhlfe_list);
+}
- return ctx->u.lsp.nhlfe_list;
+const struct nhlfe_list_head *dplane_ctx_get_backup_nhlfe_list(
+ const struct zebra_dplane_ctx *ctx)
+{
+ DPLANE_CTX_VALID(ctx);
+ return &(ctx->u.lsp.backup_nhlfe_list);
}
zebra_nhlfe_t *dplane_ctx_add_nhlfe(struct zebra_dplane_ctx *ctx,
@@ -1241,7 +1257,7 @@ zebra_nhlfe_t *dplane_ctx_add_nhlfe(struct zebra_dplane_ctx *ctx,
union g_addr *gate,
ifindex_t ifindex,
uint8_t num_labels,
- mpls_label_t out_labels[])
+ mpls_label_t *out_labels)
{
zebra_nhlfe_t *nhlfe;
@@ -1254,6 +1270,26 @@ zebra_nhlfe_t *dplane_ctx_add_nhlfe(struct zebra_dplane_ctx *ctx,
return nhlfe;
}
+zebra_nhlfe_t *dplane_ctx_add_backup_nhlfe(struct zebra_dplane_ctx *ctx,
+ enum lsp_types_t lsp_type,
+ enum nexthop_types_t nh_type,
+ union g_addr *gate,
+ ifindex_t ifindex,
+ uint8_t num_labels,
+ mpls_label_t *out_labels)
+{
+ zebra_nhlfe_t *nhlfe;
+
+ DPLANE_CTX_VALID(ctx);
+
+ nhlfe = zebra_mpls_lsp_add_backup_nhlfe(&(ctx->u.lsp),
+ lsp_type, nh_type, gate,
+ ifindex, num_labels,
+ out_labels);
+
+ return nhlfe;
+}
+
const zebra_nhlfe_t *
dplane_ctx_get_best_nhlfe(const struct zebra_dplane_ctx *ctx)
{
@@ -1747,27 +1783,21 @@ static int dplane_ctx_lsp_init(struct zebra_dplane_ctx *ctx,
memset(&ctx->u.lsp, 0, sizeof(ctx->u.lsp));
+ nhlfe_list_init(&(ctx->u.lsp.nhlfe_list));
+ nhlfe_list_init(&(ctx->u.lsp.backup_nhlfe_list));
ctx->u.lsp.ile = lsp->ile;
ctx->u.lsp.addr_family = lsp->addr_family;
ctx->u.lsp.num_ecmp = lsp->num_ecmp;
ctx->u.lsp.flags = lsp->flags;
/* Copy source LSP's nhlfes, and capture 'best' nhlfe */
- for (nhlfe = lsp->nhlfe_list; nhlfe; nhlfe = nhlfe->next) {
+ frr_each(nhlfe_list, &lsp->nhlfe_list, nhlfe) {
/* Not sure if this is meaningful... */
if (nhlfe->nexthop == NULL)
continue;
- new_nhlfe =
- zebra_mpls_lsp_add_nhlfe(
- &(ctx->u.lsp),
- nhlfe->type,
- nhlfe->nexthop->type,
- &(nhlfe->nexthop->gate),
- nhlfe->nexthop->ifindex,
- nhlfe->nexthop->nh_label->num_labels,
- nhlfe->nexthop->nh_label->label);
-
+ new_nhlfe = zebra_mpls_lsp_add_nh(&(ctx->u.lsp), nhlfe->type,
+ nhlfe->nexthop);
if (new_nhlfe == NULL || new_nhlfe->nexthop == NULL) {
ret = ENOMEM;
break;
@@ -1781,9 +1811,32 @@ static int dplane_ctx_lsp_init(struct zebra_dplane_ctx *ctx,
ctx->u.lsp.best_nhlfe = new_nhlfe;
}
+ if (ret != AOK)
+ goto done;
+
+ /* Capture backup nhlfes/nexthops */
+ frr_each(nhlfe_list, &lsp->backup_nhlfe_list, nhlfe) {
+ /* Not sure if this is meaningful... */
+ if (nhlfe->nexthop == NULL)
+ continue;
+
+ new_nhlfe = zebra_mpls_lsp_add_backup_nh(&(ctx->u.lsp),
+ nhlfe->type,
+ nhlfe->nexthop);
+ if (new_nhlfe == NULL || new_nhlfe->nexthop == NULL) {
+ ret = ENOMEM;
+ break;
+ }
+
+ /* Need to copy flags too */
+ new_nhlfe->flags = nhlfe->flags;
+ new_nhlfe->nexthop->flags = nhlfe->nexthop->flags;
+ }
+
/* On error the ctx will be cleaned-up, so we don't need to
* deal with any allocated nhlfe or nexthop structs here.
*/
+done:
return ret;
}
diff --git a/zebra/zebra_dplane.h b/zebra/zebra_dplane.h
index e16e69c03d..7f8049b767 100644
--- a/zebra/zebra_dplane.h
+++ b/zebra/zebra_dplane.h
@@ -311,14 +311,26 @@ void dplane_ctx_set_addr_family(struct zebra_dplane_ctx *ctx,
uint32_t dplane_ctx_get_lsp_flags(const struct zebra_dplane_ctx *ctx);
void dplane_ctx_set_lsp_flags(struct zebra_dplane_ctx *ctx,
uint32_t flags);
-const zebra_nhlfe_t *dplane_ctx_get_nhlfe(const struct zebra_dplane_ctx *ctx);
+const struct nhlfe_list_head *dplane_ctx_get_nhlfe_list(
+ const struct zebra_dplane_ctx *ctx);
+const struct nhlfe_list_head *dplane_ctx_get_backup_nhlfe_list(
+ const struct zebra_dplane_ctx *ctx);
+
zebra_nhlfe_t *dplane_ctx_add_nhlfe(struct zebra_dplane_ctx *ctx,
enum lsp_types_t lsp_type,
enum nexthop_types_t nh_type,
union g_addr *gate,
ifindex_t ifindex,
uint8_t num_labels,
- mpls_label_t out_labels[]);
+ mpls_label_t *out_labels);
+
+zebra_nhlfe_t *dplane_ctx_add_backup_nhlfe(struct zebra_dplane_ctx *ctx,
+ enum lsp_types_t lsp_type,
+ enum nexthop_types_t nh_type,
+ union g_addr *gate,
+ ifindex_t ifindex,
+ uint8_t num_labels,
+ mpls_label_t *out_labels);
const zebra_nhlfe_t *dplane_ctx_get_best_nhlfe(
const struct zebra_dplane_ctx *ctx);
diff --git a/zebra/zebra_mpls.c b/zebra/zebra_mpls.c
index 0aaede4507..1210430b06 100644
--- a/zebra/zebra_mpls.c
+++ b/zebra/zebra_mpls.c
@@ -93,28 +93,38 @@ static void lsp_processq_del(struct work_queue *wq, void *data);
static void lsp_processq_complete(struct work_queue *wq);
static int lsp_processq_add(zebra_lsp_t *lsp);
static void *lsp_alloc(void *p);
+
+/* Check whether lsp can be freed - no nhlfes, e.g., and call free api */
+static void lsp_check_free(struct hash *lsp_table, zebra_lsp_t **plsp);
+
/* Free lsp; sets caller's pointer to NULL */
static void lsp_free(struct hash *lsp_table, zebra_lsp_t **plsp);
static char *nhlfe2str(zebra_nhlfe_t *nhlfe, char *buf, int size);
static int nhlfe_nhop_match(zebra_nhlfe_t *nhlfe, enum nexthop_types_t gtype,
const union g_addr *gate, ifindex_t ifindex);
-static zebra_nhlfe_t *nhlfe_find(zebra_lsp_t *lsp, enum lsp_types_t lsp_type,
+static zebra_nhlfe_t *nhlfe_find(struct nhlfe_list_head *list,
+ enum lsp_types_t lsp_type,
enum nexthop_types_t gtype,
const union g_addr *gate, ifindex_t ifindex);
static zebra_nhlfe_t *nhlfe_add(zebra_lsp_t *lsp, enum lsp_types_t lsp_type,
enum nexthop_types_t gtype,
const union g_addr *gate, ifindex_t ifindex,
- uint8_t num_labels, mpls_label_t *labels);
-static int nhlfe_del(zebra_nhlfe_t *snhlfe);
+ uint8_t num_labels, const mpls_label_t *labels);
+static int nhlfe_del(zebra_nhlfe_t *nhlfe);
+static void nhlfe_free(zebra_nhlfe_t *nhlfe);
static void nhlfe_out_label_update(zebra_nhlfe_t *nhlfe,
struct mpls_label_stack *nh_label);
static int mpls_lsp_uninstall_all(struct hash *lsp_table, zebra_lsp_t *lsp,
enum lsp_types_t type);
+static int lsp_backup_uninstall(struct zebra_vrf *zvrf, enum lsp_types_t type,
+ mpls_label_t in_label,
+ enum nexthop_types_t gtype,
+ const union g_addr *gate, ifindex_t ifindex);
static int mpls_static_lsp_uninstall_all(struct zebra_vrf *zvrf,
mpls_label_t in_label);
static void nhlfe_print(zebra_nhlfe_t *nhlfe, struct vty *vty);
-static void lsp_print(zebra_lsp_t *lsp, void *ctxt);
+static void lsp_print(struct vty *vty, zebra_lsp_t *lsp);
static void *slsp_alloc(void *p);
static int snhlfe_match(zebra_snhlfe_t *snhlfe, enum nexthop_types_t gtype,
const union g_addr *gate, ifindex_t ifindex);
@@ -131,7 +141,13 @@ static char *snhlfe2str(zebra_snhlfe_t *snhlfe, char *buf, int size);
static void mpls_lsp_uninstall_all_type(struct hash_bucket *bucket, void *ctxt);
static void mpls_ftn_uninstall_all(struct zebra_vrf *zvrf,
int afi, enum lsp_types_t lsp_type);
+static int lsp_znh_install(zebra_lsp_t *lsp, enum lsp_types_t type,
+ const struct zapi_nexthop *znh);
+static int lsp_backup_znh_install(zebra_lsp_t *lsp, enum lsp_types_t type,
+ const struct zapi_nexthop *znh);
+/* List implementations - declare internal linkage */
+DECLARE_DLIST(snhlfe_list, struct zebra_snhlfe_t_, list);
/* Static functions */
@@ -143,7 +159,7 @@ static void clear_nhlfe_installed(zebra_lsp_t *lsp)
zebra_nhlfe_t *nhlfe;
struct nexthop *nexthop;
- for (nhlfe = lsp->nhlfe_list; nhlfe; nhlfe = nhlfe->next) {
+ frr_each_safe(nhlfe_list, &lsp->nhlfe_list, nhlfe) {
nexthop = nhlfe->nexthop;
if (!nexthop)
continue;
@@ -196,7 +212,8 @@ static int lsp_install(struct zebra_vrf *zvrf, mpls_label_t label,
if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_RECURSIVE))
continue;
- nhlfe = nhlfe_find(lsp, lsp_type, nexthop->type, &nexthop->gate,
+ nhlfe = nhlfe_find(&lsp->nhlfe_list, lsp_type,
+ nexthop->type, &nexthop->gate,
nexthop->ifindex);
if (nhlfe) {
/* Clear deleted flag (in case it was set) */
@@ -251,9 +268,9 @@ static int lsp_install(struct zebra_vrf *zvrf, mpls_label_t label,
if (added || changed) {
if (lsp_processq_add(lsp))
return -1;
- } else if (!lsp->nhlfe_list
- && !CHECK_FLAG(lsp->flags, LSP_FLAG_SCHEDULED))
- lsp_free(lsp_table, &lsp);
+ } else {
+ lsp_check_free(lsp_table, &lsp);
+ }
return 0;
}
@@ -267,7 +284,7 @@ static int lsp_uninstall(struct zebra_vrf *zvrf, mpls_label_t label)
struct hash *lsp_table;
zebra_ile_t tmp_ile;
zebra_lsp_t *lsp;
- zebra_nhlfe_t *nhlfe, *nhlfe_next;
+ zebra_nhlfe_t *nhlfe;
char buf[BUFSIZ];
/* Lookup table. */
@@ -278,12 +295,11 @@ static int lsp_uninstall(struct zebra_vrf *zvrf, mpls_label_t label)
/* If entry is not present, exit. */
tmp_ile.in_label = label;
lsp = hash_lookup(lsp_table, &tmp_ile);
- if (!lsp || !lsp->nhlfe_list)
+ if (!lsp || (nhlfe_list_first(&lsp->nhlfe_list) == NULL))
return 0;
/* Mark NHLFEs for delete or directly delete, as appropriate. */
- for (nhlfe = lsp->nhlfe_list; nhlfe; nhlfe = nhlfe_next) {
- nhlfe_next = nhlfe->next;
+ frr_each_safe(nhlfe_list, &lsp->nhlfe_list, nhlfe) {
/* Skip static NHLFEs */
if (nhlfe->type == ZEBRA_LSP_STATIC)
@@ -308,9 +324,9 @@ static int lsp_uninstall(struct zebra_vrf *zvrf, mpls_label_t label)
if (CHECK_FLAG(lsp->flags, LSP_FLAG_INSTALLED)) {
if (lsp_processq_add(lsp))
return -1;
- } else if (!lsp->nhlfe_list
- && !CHECK_FLAG(lsp->flags, LSP_FLAG_SCHEDULED))
- lsp_free(lsp_table, &lsp);
+ } else {
+ lsp_check_free(lsp_table, &lsp);
+ }
return 0;
}
@@ -784,7 +800,7 @@ static void lsp_select_best_nhlfe(zebra_lsp_t *lsp)
* only
* concerned with non-deleted NHLFEs.
*/
- for (nhlfe = lsp->nhlfe_list; nhlfe; nhlfe = nhlfe->next) {
+ frr_each_safe(nhlfe_list, &lsp->nhlfe_list, nhlfe) {
/* Clear selection flags. */
UNSET_FLAG(nhlfe->flags,
(NHLFE_FLAG_SELECTED | NHLFE_FLAG_MULTIPATH));
@@ -809,7 +825,7 @@ static void lsp_select_best_nhlfe(zebra_lsp_t *lsp)
* new (uninstalled) NHLFE has been selected, an installed entry that is
* still selected has a change or an installed entry is to be removed.
*/
- for (nhlfe = lsp->nhlfe_list; nhlfe; nhlfe = nhlfe->next) {
+ frr_each(nhlfe_list, &lsp->nhlfe_list, nhlfe) {
int nh_chg, nh_sel, nh_inst;
nexthop = nhlfe->nexthop;
@@ -967,8 +983,7 @@ static wq_item_status lsp_process(struct work_queue *wq, void *data)
* Any NHLFE that was installed but is not
* selected now needs to have its flags updated.
*/
- for (nhlfe = lsp->nhlfe_list; nhlfe;
- nhlfe = nhlfe->next) {
+ frr_each_safe(nhlfe_list, &lsp->nhlfe_list, nhlfe) {
nexthop = nhlfe->nexthop;
if (!nexthop)
continue;
@@ -1012,7 +1027,7 @@ static void lsp_processq_del(struct work_queue *wq, void *data)
struct zebra_vrf *zvrf;
zebra_lsp_t *lsp;
struct hash *lsp_table;
- zebra_nhlfe_t *nhlfe, *nhlfe_next;
+ zebra_nhlfe_t *nhlfe;
zvrf = vrf_info_lookup(VRF_DEFAULT);
assert(zvrf);
@@ -1031,14 +1046,17 @@ static void lsp_processq_del(struct work_queue *wq, void *data)
*/
UNSET_FLAG(lsp->flags, LSP_FLAG_SCHEDULED);
- for (nhlfe = lsp->nhlfe_list; nhlfe; nhlfe = nhlfe_next) {
- nhlfe_next = nhlfe->next;
+ frr_each_safe(nhlfe_list, &lsp->nhlfe_list, nhlfe) {
+ if (CHECK_FLAG(nhlfe->flags, NHLFE_FLAG_DELETED))
+ nhlfe_del(nhlfe);
+ }
+
+ frr_each_safe(nhlfe_list, &lsp->backup_nhlfe_list, nhlfe) {
if (CHECK_FLAG(nhlfe->flags, NHLFE_FLAG_DELETED))
nhlfe_del(nhlfe);
}
- if (!lsp->nhlfe_list)
- lsp_free(lsp_table, &lsp);
+ lsp_check_free(lsp_table, &lsp);
}
/*
@@ -1080,6 +1098,8 @@ static void *lsp_alloc(void *p)
lsp = XCALLOC(MTYPE_LSP, sizeof(zebra_lsp_t));
lsp->ile = *ile;
+ nhlfe_list_init(&lsp->nhlfe_list);
+ nhlfe_list_init(&lsp->backup_nhlfe_list);
if (IS_ZEBRA_DEBUG_MPLS)
zlog_debug("Alloc LSP in-label %u", lsp->ile.in_label);
@@ -1088,13 +1108,31 @@ static void *lsp_alloc(void *p)
}
/*
+ * Check whether lsp can be freed - no nhlfes, e.g., and call free api
+ */
+static void lsp_check_free(struct hash *lsp_table, zebra_lsp_t **plsp)
+{
+ zebra_lsp_t *lsp;
+
+ if (plsp == NULL || *plsp == NULL)
+ return;
+
+ lsp = *plsp;
+
+ if ((nhlfe_list_first(&lsp->nhlfe_list) == NULL) &&
+ (nhlfe_list_first(&lsp->backup_nhlfe_list) == NULL) &&
+ !CHECK_FLAG(lsp->flags, LSP_FLAG_SCHEDULED))
+ lsp_free(lsp_table, plsp);
+}
+
+/*
* Dtor for an LSP: remove from ile hash, release any internal allocations,
* free LSP object.
*/
static void lsp_free(struct hash *lsp_table, zebra_lsp_t **plsp)
{
zebra_lsp_t *lsp;
- zebra_nhlfe_t *nhlfe, *nhlfe_next;
+ zebra_nhlfe_t *nhlfe;
if (plsp == NULL || *plsp == NULL)
return;
@@ -1106,11 +1144,12 @@ static void lsp_free(struct hash *lsp_table, zebra_lsp_t **plsp)
lsp->ile.in_label, lsp->flags);
/* Free nhlfes, if any. */
- for (nhlfe = lsp->nhlfe_list; nhlfe; nhlfe = nhlfe_next) {
- nhlfe_next = nhlfe->next;
+ frr_each_safe(nhlfe_list, &lsp->nhlfe_list, nhlfe)
+ nhlfe_del(nhlfe);
+ /* Free backup nhlfes, if any. */
+ frr_each_safe(nhlfe_list, &lsp->backup_nhlfe_list, nhlfe)
nhlfe_del(nhlfe);
- }
hash_release(lsp_table, &lsp->ile);
XFREE(MTYPE_LSP, lsp);
@@ -1190,16 +1229,14 @@ static int nhlfe_nhop_match(zebra_nhlfe_t *nhlfe, enum nexthop_types_t gtype,
/*
* Locate NHLFE that matches with passed info.
*/
-static zebra_nhlfe_t *nhlfe_find(zebra_lsp_t *lsp, enum lsp_types_t lsp_type,
+static zebra_nhlfe_t *nhlfe_find(struct nhlfe_list_head *list,
+ enum lsp_types_t lsp_type,
enum nexthop_types_t gtype,
const union g_addr *gate, ifindex_t ifindex)
{
zebra_nhlfe_t *nhlfe;
- if (!lsp)
- return NULL;
-
- for (nhlfe = lsp->nhlfe_list; nhlfe; nhlfe = nhlfe->next) {
+ frr_each_safe(nhlfe_list, list, nhlfe) {
if (nhlfe->type != lsp_type)
continue;
if (!nhlfe_nhop_match(nhlfe, gtype, gate, ifindex))
@@ -1210,13 +1247,13 @@ static zebra_nhlfe_t *nhlfe_find(zebra_lsp_t *lsp, enum lsp_types_t lsp_type,
}
/*
- * Add NHLFE. Base entry must have been created and duplicate
- * check done.
+ * Allocate and init new NHLFE.
*/
-static zebra_nhlfe_t *nhlfe_add(zebra_lsp_t *lsp, enum lsp_types_t lsp_type,
- enum nexthop_types_t gtype,
- const union g_addr *gate, ifindex_t ifindex,
- uint8_t num_labels, mpls_label_t labels[])
+static zebra_nhlfe_t *nhlfe_alloc(zebra_lsp_t *lsp, enum lsp_types_t lsp_type,
+ enum nexthop_types_t gtype,
+ const union g_addr *gate, ifindex_t ifindex,
+ uint8_t num_labels,
+ const mpls_label_t *labels)
{
zebra_nhlfe_t *nhlfe;
struct nexthop *nexthop;
@@ -1235,6 +1272,7 @@ static zebra_nhlfe_t *nhlfe_add(zebra_lsp_t *lsp, enum lsp_types_t lsp_type,
XFREE(MTYPE_NHLFE, nhlfe);
return NULL;
}
+
nexthop_add_labels(nexthop, lsp_type, num_labels, labels);
nexthop->vrf_id = VRF_DEFAULT;
@@ -1260,18 +1298,85 @@ static zebra_nhlfe_t *nhlfe_add(zebra_lsp_t *lsp, enum lsp_types_t lsp_type,
XFREE(MTYPE_NHLFE, nhlfe);
return NULL;
}
-
nhlfe->nexthop = nexthop;
- if (lsp->nhlfe_list)
- lsp->nhlfe_list->prev = nhlfe;
- nhlfe->next = lsp->nhlfe_list;
- lsp->nhlfe_list = nhlfe;
return nhlfe;
}
/*
- * Delete NHLFE. Entry must be present on list.
+ * Add NHLFE. Base entry must have been created and duplicate
+ * check done.
+ */
+static zebra_nhlfe_t *nhlfe_add(zebra_lsp_t *lsp, enum lsp_types_t lsp_type,
+ enum nexthop_types_t gtype,
+ const union g_addr *gate, ifindex_t ifindex,
+ uint8_t num_labels, const mpls_label_t *labels)
+{
+ zebra_nhlfe_t *nhlfe;
+
+ if (!lsp)
+ return NULL;
+
+ /* Allocate new object */
+ nhlfe = nhlfe_alloc(lsp, lsp_type, gtype, gate, ifindex, num_labels,
+ labels);
+
+ /* Enqueue to LSP, at head of list. */
+ if (nhlfe)
+ nhlfe_list_add_head(&lsp->nhlfe_list, nhlfe);
+
+ return nhlfe;
+}
+
+/*
+ * Add backup NHLFE. Base entry must have been created and duplicate
+ * check done.
+ */
+static zebra_nhlfe_t *nhlfe_backup_add(zebra_lsp_t *lsp,
+ enum lsp_types_t lsp_type,
+ enum nexthop_types_t gtype,
+ const union g_addr *gate,
+ ifindex_t ifindex, uint8_t num_labels,
+ const mpls_label_t *labels)
+{
+ zebra_nhlfe_t *nhlfe;
+
+ if (!lsp)
+ return NULL;
+
+ /* Allocate new object */
+ nhlfe = nhlfe_alloc(lsp, lsp_type, gtype, gate, ifindex, num_labels,
+ labels);
+
+ SET_FLAG(nhlfe->flags, NHLFE_FLAG_IS_BACKUP);
+
+ /* Enqueue to LSP, at tail of list. */
+ if (nhlfe)
+ nhlfe_list_add_tail(&lsp->backup_nhlfe_list, nhlfe);
+
+ return nhlfe;
+}
+
+/*
+ * Common delete for NHLFEs.
+ */
+static void nhlfe_free(zebra_nhlfe_t *nhlfe)
+{
+ if (!nhlfe)
+ return;
+
+ /* Free nexthop. */
+ if (nhlfe->nexthop)
+ nexthop_free(nhlfe->nexthop);
+
+ nhlfe->nexthop = NULL;
+
+ XFREE(MTYPE_NHLFE, nhlfe);
+}
+
+
+/*
+ * Disconnect NHLFE from LSP, and free. Entry must be present on LSP's list.
*/
static int nhlfe_del(zebra_nhlfe_t *nhlfe)
{
@@ -1284,22 +1389,18 @@ static int nhlfe_del(zebra_nhlfe_t *nhlfe)
if (!lsp)
return -1;
- /* Free nexthop. */
- if (nhlfe->nexthop)
- nexthop_free(nhlfe->nexthop);
+ if (nhlfe == lsp->best_nhlfe)
+ lsp->best_nhlfe = NULL;
/* Unlink from LSP */
- if (nhlfe->next)
- nhlfe->next->prev = nhlfe->prev;
- if (nhlfe->prev)
- nhlfe->prev->next = nhlfe->next;
+ if (CHECK_FLAG(nhlfe->flags, NHLFE_FLAG_IS_BACKUP))
+ nhlfe_list_del(&lsp->backup_nhlfe_list, nhlfe);
else
- lsp->nhlfe_list = nhlfe->next;
+ nhlfe_list_del(&lsp->nhlfe_list, nhlfe);
- if (nhlfe == lsp->best_nhlfe)
- lsp->best_nhlfe = NULL;
+ nhlfe->lsp = NULL;
- XFREE(MTYPE_NHLFE, nhlfe);
+ nhlfe_free(nhlfe);
return 0;
}
@@ -1316,14 +1417,12 @@ static void nhlfe_out_label_update(zebra_nhlfe_t *nhlfe,
static int mpls_lsp_uninstall_all(struct hash *lsp_table, zebra_lsp_t *lsp,
enum lsp_types_t type)
{
- zebra_nhlfe_t *nhlfe, *nhlfe_next;
+ zebra_nhlfe_t *nhlfe;
int schedule_lsp = 0;
char buf[BUFSIZ];
/* Mark NHLFEs for delete or directly delete, as appropriate. */
- for (nhlfe = lsp->nhlfe_list; nhlfe; nhlfe = nhlfe_next) {
- nhlfe_next = nhlfe->next;
-
+ frr_each_safe(nhlfe_list, &lsp->nhlfe_list, nhlfe) {
/* Skip non-static NHLFEs */
if (nhlfe->type != type)
continue;
@@ -1344,13 +1443,34 @@ static int mpls_lsp_uninstall_all(struct hash *lsp_table, zebra_lsp_t *lsp,
}
}
+ frr_each_safe(nhlfe_list, &lsp->backup_nhlfe_list, nhlfe) {
+ /* Skip non-static NHLFEs */
+ if (nhlfe->type != type)
+ continue;
+
+ if (IS_ZEBRA_DEBUG_MPLS) {
+ nhlfe2str(nhlfe, buf, BUFSIZ);
+ zlog_debug(
+ "Del backup LSP in-label %u type %d nexthop %s flags 0x%x",
+ lsp->ile.in_label, type, buf, nhlfe->flags);
+ }
+
+ if (CHECK_FLAG(nhlfe->flags, NHLFE_FLAG_INSTALLED)) {
+ UNSET_FLAG(nhlfe->flags, NHLFE_FLAG_CHANGED);
+ SET_FLAG(nhlfe->flags, NHLFE_FLAG_DELETED);
+ schedule_lsp = 1;
+ } else {
+ nhlfe_del(nhlfe);
+ }
+ }
+
/* Queue LSP for processing, if needed, else delete. */
if (schedule_lsp) {
if (lsp_processq_add(lsp))
return -1;
- } else if (!lsp->nhlfe_list
- && !CHECK_FLAG(lsp->flags, LSP_FLAG_SCHEDULED))
- lsp_free(lsp_table, &lsp);
+ } else {
+ lsp_check_free(lsp_table, &lsp);
+ }
return 0;
}
@@ -1374,7 +1494,7 @@ static int mpls_static_lsp_uninstall_all(struct zebra_vrf *zvrf,
/* If entry is not present, exit. */
tmp_ile.in_label = in_label;
lsp = hash_lookup(lsp_table, &tmp_ile);
- if (!lsp || !lsp->nhlfe_list)
+ if (!lsp || (nhlfe_list_first(&lsp->nhlfe_list) == NULL))
return 0;
return mpls_lsp_uninstall_all(lsp_table, lsp, ZEBRA_LSP_STATIC);
@@ -1424,7 +1544,7 @@ static json_object *nhlfe_json(zebra_nhlfe_t *nhlfe)
static void nhlfe_print(zebra_nhlfe_t *nhlfe, struct vty *vty)
{
struct nexthop *nexthop;
- char buf[BUFSIZ];
+ char buf[MPLS_LABEL_STRLEN];
nexthop = nhlfe->nexthop;
if (!nexthop || !nexthop->nh_label) // unexpected
@@ -1432,7 +1552,9 @@ static void nhlfe_print(zebra_nhlfe_t *nhlfe, struct vty *vty)
vty_out(vty, " type: %s remote label: %s distance: %d\n",
nhlfe_type2str(nhlfe->type),
- label2str(nexthop->nh_label->label[0], buf, BUFSIZ),
+ mpls_label2str(nexthop->nh_label->num_labels,
+ nexthop->nh_label->label,
+ buf, sizeof(buf), 0),
nhlfe->distance);
switch (nexthop->type) {
case NEXTHOP_TYPE_IPV4:
@@ -1464,19 +1586,36 @@ static void nhlfe_print(zebra_nhlfe_t *nhlfe, struct vty *vty)
/*
* Print an LSP forwarding entry.
*/
-static void lsp_print(zebra_lsp_t *lsp, void *ctxt)
+static void lsp_print(struct vty *vty, zebra_lsp_t *lsp)
{
- zebra_nhlfe_t *nhlfe;
- struct vty *vty;
-
- vty = (struct vty *)ctxt;
+ zebra_nhlfe_t *nhlfe, *backup;
+ int i;
vty_out(vty, "Local label: %u%s\n", lsp->ile.in_label,
CHECK_FLAG(lsp->flags, LSP_FLAG_INSTALLED) ? " (installed)"
: "");
- for (nhlfe = lsp->nhlfe_list; nhlfe; nhlfe = nhlfe->next)
+ frr_each(nhlfe_list, &lsp->nhlfe_list, nhlfe) {
nhlfe_print(nhlfe, vty);
+
+ if (nhlfe->nexthop &&
+ CHECK_FLAG(nhlfe->nexthop->flags,
+ NEXTHOP_FLAG_HAS_BACKUP)) {
+ /* Find backup in backup list */
+
+ i = 0;
+ frr_each(nhlfe_list, &lsp->backup_nhlfe_list, backup) {
+ if (i == nhlfe->nexthop->backup_idx)
+ break;
+ i++;
+ }
+
+ if (backup) {
+ vty_out(vty, " [backup %d]", i);
+ nhlfe_print(backup, vty);
+ }
+ }
+ }
}
/*
@@ -1493,7 +1632,7 @@ static json_object *lsp_json(zebra_lsp_t *lsp)
if (CHECK_FLAG(lsp->flags, LSP_FLAG_INSTALLED))
json_object_boolean_true_add(json, "installed");
- for (nhlfe = lsp->nhlfe_list; nhlfe; nhlfe = nhlfe->next)
+ frr_each(nhlfe_list, &lsp->nhlfe_list, nhlfe)
json_object_array_add(json_nhlfe_list, nhlfe_json(nhlfe));
json_object_object_add(json, "nexthops", json_nhlfe_list);
@@ -1541,6 +1680,8 @@ static void *slsp_alloc(void *p)
slsp = XCALLOC(MTYPE_SLSP, sizeof(zebra_slsp_t));
slsp->ile = *ile;
+ snhlfe_list_init(&slsp->snhlfe_list);
+
return ((void *)slsp);
}
@@ -1600,7 +1741,7 @@ static zebra_snhlfe_t *snhlfe_find(zebra_slsp_t *slsp,
if (!slsp)
return NULL;
- for (snhlfe = slsp->snhlfe_list; snhlfe; snhlfe = snhlfe->next) {
+ frr_each_safe(snhlfe_list, &slsp->snhlfe_list, snhlfe) {
if (!snhlfe_match(snhlfe, gtype, gate, ifindex))
break;
}
@@ -1642,10 +1783,7 @@ static zebra_snhlfe_t *snhlfe_add(zebra_slsp_t *slsp,
return NULL;
}
- if (slsp->snhlfe_list)
- slsp->snhlfe_list->prev = snhlfe;
- snhlfe->next = slsp->snhlfe_list;
- slsp->snhlfe_list = snhlfe;
+ snhlfe_list_add_head(&slsp->snhlfe_list, snhlfe);
return snhlfe;
}
@@ -1664,14 +1802,8 @@ static int snhlfe_del(zebra_snhlfe_t *snhlfe)
if (!slsp)
return -1;
- if (snhlfe->next)
- snhlfe->next->prev = snhlfe->prev;
- if (snhlfe->prev)
- snhlfe->prev->next = snhlfe->next;
- else
- slsp->snhlfe_list = snhlfe->next;
+ snhlfe_list_del(&slsp->snhlfe_list, snhlfe);
- snhlfe->prev = snhlfe->next = NULL;
XFREE(MTYPE_SNHLFE_IFNAME, snhlfe->ifname);
XFREE(MTYPE_SNHLFE, snhlfe);
@@ -1683,13 +1815,12 @@ static int snhlfe_del(zebra_snhlfe_t *snhlfe)
*/
static int snhlfe_del_all(zebra_slsp_t *slsp)
{
- zebra_snhlfe_t *snhlfe, *snhlfe_next;
+ zebra_snhlfe_t *snhlfe;
if (!slsp)
return -1;
- for (snhlfe = slsp->snhlfe_list; snhlfe; snhlfe = snhlfe_next) {
- snhlfe_next = snhlfe->next;
+ frr_each_safe(snhlfe_list, &slsp->snhlfe_list, snhlfe) {
snhlfe_del(snhlfe);
}
@@ -1793,8 +1924,7 @@ void zebra_mpls_lsp_dplane_result(struct zebra_dplane_ctx *ctx)
if (status == ZEBRA_DPLANE_REQUEST_SUCCESS) {
/* Update zebra object */
SET_FLAG(lsp->flags, LSP_FLAG_INSTALLED);
- for (nhlfe = lsp->nhlfe_list; nhlfe;
- nhlfe = nhlfe->next) {
+ frr_each(nhlfe_list, &lsp->nhlfe_list, nhlfe) {
nexthop = nhlfe->nexthop;
if (!nexthop)
continue;
@@ -1837,6 +1967,7 @@ void zebra_mpls_process_dplane_notify(struct zebra_dplane_ctx *ctx)
struct hash *lsp_table;
zebra_lsp_t *lsp;
zebra_nhlfe_t *nhlfe;
+ const struct nhlfe_list_head *head;
const zebra_nhlfe_t *ctx_nhlfe;
struct nexthop *nexthop;
const struct nexthop *ctx_nexthop;
@@ -1872,7 +2003,8 @@ void zebra_mpls_process_dplane_notify(struct zebra_dplane_ctx *ctx)
* the existing state of the LSP objects available before making
* any changes.
*/
- for (nhlfe = lsp->nhlfe_list; nhlfe; nhlfe = nhlfe->next) {
+ head = dplane_ctx_get_nhlfe_list(ctx);
+ frr_each_safe(nhlfe_list, &lsp->nhlfe_list, nhlfe) {
char buf[NEXTHOP_STRLEN];
nexthop = nhlfe->nexthop;
@@ -1883,9 +2015,7 @@ void zebra_mpls_process_dplane_notify(struct zebra_dplane_ctx *ctx)
start_count++;
ctx_nexthop = NULL;
- for (ctx_nhlfe = dplane_ctx_get_nhlfe(ctx);
- ctx_nhlfe; ctx_nhlfe = ctx_nhlfe->next) {
-
+ frr_each(nhlfe_list_const, head, ctx_nhlfe) {
ctx_nexthop = ctx_nhlfe->nexthop;
if (!ctx_nexthop)
continue;
@@ -1960,7 +2090,7 @@ void zebra_mpls_process_dplane_notify(struct zebra_dplane_ctx *ctx)
* Now we take a second pass and bring the zebra
* nexthop state into sync with the forwarding-plane state.
*/
- for (nhlfe = lsp->nhlfe_list; nhlfe; nhlfe = nhlfe->next) {
+ frr_each_safe(nhlfe_list, &lsp->nhlfe_list, nhlfe) {
char buf[NEXTHOP_STRLEN];
nexthop = nhlfe->nexthop;
@@ -1968,9 +2098,7 @@ void zebra_mpls_process_dplane_notify(struct zebra_dplane_ctx *ctx)
continue;
ctx_nexthop = NULL;
- for (ctx_nhlfe = dplane_ctx_get_nhlfe(ctx);
- ctx_nhlfe; ctx_nhlfe = ctx_nhlfe->next) {
-
+ frr_each(nhlfe_list_const, head, ctx_nhlfe) {
ctx_nexthop = ctx_nhlfe->nexthop;
if (!ctx_nexthop)
continue;
@@ -2088,7 +2216,8 @@ int zebra_mpls_lsp_uninstall(struct zebra_vrf *zvrf, struct route_node *rn,
}
/*
- * Add an NHLFE to an LSP, return the newly-added object
+ * Add an NHLFE to an LSP, return the newly-added object. This path only changes
+ * the LSP object - nothing is scheduled for processing, for example.
*/
zebra_nhlfe_t *zebra_mpls_lsp_add_nhlfe(zebra_lsp_t *lsp,
enum lsp_types_t lsp_type,
@@ -2096,7 +2225,7 @@ zebra_nhlfe_t *zebra_mpls_lsp_add_nhlfe(zebra_lsp_t *lsp,
union g_addr *gate,
ifindex_t ifindex,
uint8_t num_labels,
- mpls_label_t out_labels[])
+ const mpls_label_t *out_labels)
{
/* Just a public pass-through to the internal implementation */
return nhlfe_add(lsp, lsp_type, gtype, gate, ifindex, num_labels,
@@ -2104,12 +2233,68 @@ zebra_nhlfe_t *zebra_mpls_lsp_add_nhlfe(zebra_lsp_t *lsp,
}
/*
+ * Add a backup NHLFE to an LSP, return the newly-added object.
+ * This path only changes the LSP object - nothing is scheduled for
+ * processing, for example.
+ */
+zebra_nhlfe_t *zebra_mpls_lsp_add_backup_nhlfe(zebra_lsp_t *lsp,
+ enum lsp_types_t lsp_type,
+ enum nexthop_types_t gtype,
+ union g_addr *gate,
+ ifindex_t ifindex,
+ uint8_t num_labels,
+ const mpls_label_t *out_labels)
+{
+ /* Just a public pass-through to the internal implementation */
+ return nhlfe_backup_add(lsp, lsp_type, gtype, gate, ifindex, num_labels,
+ out_labels);
+}
+
+/*
+ * Add an NHLFE to an LSP based on a nexthop; return the newly-added object
+ */
+zebra_nhlfe_t *zebra_mpls_lsp_add_nh(zebra_lsp_t *lsp,
+ enum lsp_types_t lsp_type,
+ const struct nexthop *nh)
+{
+ zebra_nhlfe_t *nhlfe;
+
+ if (nh->nh_label == NULL || nh->nh_label->num_labels == 0)
+ return NULL;
+
+ nhlfe = nhlfe_add(lsp, lsp_type, nh->type, &nh->gate, nh->ifindex,
+ nh->nh_label->num_labels, nh->nh_label->label);
+
+ return nhlfe;
+}
+
+/*
+ * Add a backup NHLFE to an LSP based on a nexthop;
+ * return the newly-added object.
+ */
+zebra_nhlfe_t *zebra_mpls_lsp_add_backup_nh(zebra_lsp_t *lsp,
+ enum lsp_types_t lsp_type,
+ const struct nexthop *nh)
+{
+ zebra_nhlfe_t *nhlfe;
+
+ if (nh->nh_label == NULL || nh->nh_label->num_labels == 0)
+ return NULL;
+
+ nhlfe = nhlfe_backup_add(lsp, lsp_type, nh->type, &nh->gate,
+ nh->ifindex, nh->nh_label->num_labels,
+ nh->nh_label->label);
+
+ return nhlfe;
+}
+
+/*
* Free an allocated NHLFE
*/
-void zebra_mpls_nhlfe_del(zebra_nhlfe_t *nhlfe)
+void zebra_mpls_nhlfe_free(zebra_nhlfe_t *nhlfe)
{
/* Just a pass-through to the internal implementation */
- nhlfe_del(nhlfe);
+ nhlfe_free(nhlfe);
}
/*
@@ -2579,22 +2764,23 @@ void zebra_mpls_print_fec(struct vty *vty, struct zebra_vrf *zvrf,
fec_print(rn->info, vty);
}
-static void mpls_zebra_nhg_update(struct route_entry *re, afi_t afi,
- struct nexthop_group *new_grp)
+static void mpls_zebra_nhe_update(struct route_entry *re, afi_t afi,
+ struct nhg_hash_entry *new_nhe)
{
struct nhg_hash_entry *nhe;
- nhe = zebra_nhg_rib_find(0, new_grp, afi);
+ nhe = zebra_nhg_rib_find_nhe(new_nhe, afi);
route_entry_update_nhe(re, nhe);
}
-static bool mpls_ftn_update_nexthop(int add, struct nexthop *nexthop,
- enum lsp_types_t type, mpls_label_t label)
+static bool ftn_update_nexthop(bool add_p, struct nexthop *nexthop,
+ enum lsp_types_t type,
+ const struct zapi_nexthop *znh)
{
- if (add && nexthop->nh_label_type == ZEBRA_LSP_NONE)
- nexthop_add_labels(nexthop, type, 1, &label);
- else if (!add && nexthop->nh_label_type == type)
+ if (add_p && nexthop->nh_label_type == ZEBRA_LSP_NONE)
+ nexthop_add_labels(nexthop, type, znh->label_num, znh->labels);
+ else if (!add_p && nexthop->nh_label_type == type)
nexthop_del_labels(nexthop);
else
return false;
@@ -2602,20 +2788,15 @@ static bool mpls_ftn_update_nexthop(int add, struct nexthop *nexthop,
return true;
}
-/*
- * Install/uninstall a FEC-To-NHLFE (FTN) binding.
- */
-int mpls_ftn_update(int add, struct zebra_vrf *zvrf, enum lsp_types_t type,
- struct prefix *prefix, enum nexthop_types_t gtype,
- union g_addr *gate, ifindex_t ifindex, uint8_t route_type,
- unsigned short route_instance, mpls_label_t out_label)
+int mpls_ftn_uninstall(struct zebra_vrf *zvrf, enum lsp_types_t type,
+ struct prefix *prefix, uint8_t route_type,
+ unsigned short route_instance)
{
struct route_table *table;
struct route_node *rn;
struct route_entry *re;
struct nexthop *nexthop;
- struct nexthop_group new_grp = {};
- bool found;
+ struct nhg_hash_entry *new_nhe;
afi_t afi = family2afi(prefix->family);
/* Lookup table. */
@@ -2631,111 +2812,374 @@ int mpls_ftn_update(int add, struct zebra_vrf *zvrf, enum lsp_types_t type,
if (re->type == route_type && re->instance == route_instance)
break;
}
-
if (re == NULL)
return -1;
/*
- * Copy over current nexthops into a temporary group.
- * We can't just change the values here since we are hashing
- * on labels. We need to create a whole new group
+ * Nexthops are now shared by multiple routes, so we have to make
+ * a local copy, modify the copy, then update the route.
*/
- nexthop_group_copy(&new_grp, &(re->nhe->nhg));
+ new_nhe = zebra_nhe_copy(re->nhe, 0);
+
+ for (nexthop = new_nhe->nhg.nexthop; nexthop; nexthop = nexthop->next)
+ nexthop_del_labels(nexthop);
+
+ /* Update backup routes/nexthops also, if present. */
+ if (zebra_nhg_get_backup_nhg(new_nhe) != NULL) {
+ for (nexthop = new_nhe->backup_info->nhe->nhg.nexthop; nexthop;
+ nexthop = nexthop->next)
+ nexthop_del_labels(nexthop);
+ }
- found = false;
- for (nexthop = new_grp.nexthop; nexthop; nexthop = nexthop->next) {
+ SET_FLAG(re->status, ROUTE_ENTRY_CHANGED);
+ SET_FLAG(re->status, ROUTE_ENTRY_LABELS_CHANGED);
+
+ mpls_zebra_nhe_update(re, afi, new_nhe);
+
+ zebra_nhg_free(new_nhe);
+
+ rib_queue_add(rn);
+
+ return 0;
+}
+
+/*
+ * Iterate through a list of nexthops, for a match for 'znh'. If found,
+ * update its labels according to 'add_p', and return 'true' if successful.
+ */
+static bool ftn_update_znh(bool add_p, enum lsp_types_t type,
+ struct nexthop *head, const struct zapi_nexthop *znh)
+{
+ bool found = false, success = false;
+ struct nexthop *nexthop;
+
+ for (nexthop = head; nexthop; nexthop = nexthop->next) {
switch (nexthop->type) {
case NEXTHOP_TYPE_IPV4:
case NEXTHOP_TYPE_IPV4_IFINDEX:
- if (gtype != NEXTHOP_TYPE_IPV4
- && gtype != NEXTHOP_TYPE_IPV4_IFINDEX)
+ if (znh->type != NEXTHOP_TYPE_IPV4
+ && znh->type != NEXTHOP_TYPE_IPV4_IFINDEX)
continue;
- if (!IPV4_ADDR_SAME(&nexthop->gate.ipv4, &gate->ipv4))
+ if (!IPV4_ADDR_SAME(&nexthop->gate.ipv4,
+ &znh->gate.ipv4))
continue;
if (nexthop->type == NEXTHOP_TYPE_IPV4_IFINDEX
- && nexthop->ifindex != ifindex)
+ && nexthop->ifindex != znh->ifindex)
continue;
- if (!mpls_ftn_update_nexthop(add, nexthop, type,
- out_label))
- break;
+
found = true;
+
+ if (!ftn_update_nexthop(add_p, nexthop, type, znh))
+ break;
+
+ success = true;
break;
case NEXTHOP_TYPE_IPV6:
case NEXTHOP_TYPE_IPV6_IFINDEX:
- if (gtype != NEXTHOP_TYPE_IPV6
- && gtype != NEXTHOP_TYPE_IPV6_IFINDEX)
+ if (znh->type != NEXTHOP_TYPE_IPV6
+ && znh->type != NEXTHOP_TYPE_IPV6_IFINDEX)
continue;
- if (!IPV6_ADDR_SAME(&nexthop->gate.ipv6, &gate->ipv6))
+ if (!IPV6_ADDR_SAME(&nexthop->gate.ipv6,
+ &znh->gate.ipv6))
continue;
if (nexthop->type == NEXTHOP_TYPE_IPV6_IFINDEX
- && nexthop->ifindex != ifindex)
+ && nexthop->ifindex != znh->ifindex)
continue;
- if (!mpls_ftn_update_nexthop(add, nexthop, type,
- out_label))
- break;
+
found = true;
+
+ if (!ftn_update_nexthop(add_p, nexthop, type, znh))
+ break;
+ success = true;
break;
default:
break;
}
+
+ if (found)
+ break;
+ }
+
+ return success;
+}
+
+/*
+ * Install/uninstall LSP and (optionally) FEC-To-NHLFE (FTN) bindings,
+ * using zapi message info.
+ * There are several changes that need to be made, in several zebra
+ * data structures, so we want to do all the work required at once.
+ */
+int mpls_zapi_labels_process(bool add_p, struct zebra_vrf *zvrf,
+ const struct zapi_labels *zl)
+{
+ int i, counter, ret = 0;
+ char buf[NEXTHOP_STRLEN], prefix_buf[PREFIX_STRLEN];
+ const struct zapi_nexthop *znh;
+ struct route_table *table;
+ struct route_node *rn = NULL;
+ struct route_entry *re = NULL;
+ struct nhg_hash_entry *new_nhe = NULL;
+ bool found;
+ afi_t afi = AFI_IP;
+ const struct prefix *prefix = NULL;
+ struct hash *lsp_table;
+ zebra_ile_t tmp_ile;
+ zebra_lsp_t *lsp = NULL;
+
+ /* Prep LSP for add case */
+ if (add_p) {
+ /* Lookup table. */
+ lsp_table = zvrf->lsp_table;
+ if (!lsp_table)
+ return -1;
+
+ /* Find or create LSP object */
+ tmp_ile.in_label = zl->local_label;
+ lsp = hash_get(lsp_table, &tmp_ile, lsp_alloc);
+ if (!lsp)
+ return -1;
+ }
+
+ /* Prep for route/FEC update if requested */
+ if (CHECK_FLAG(zl->message, ZAPI_LABELS_FTN)) {
+ prefix = &zl->route.prefix;
+
+ afi = family2afi(prefix->family);
+
+ /* Lookup table. */
+ table = zebra_vrf_table(afi, SAFI_UNICAST, zvrf_id(zvrf));
+ if (table) {
+ /* Lookup existing route */
+ rn = route_node_get(table, prefix);
+ RNODE_FOREACH_RE(rn, re) {
+ if (CHECK_FLAG(re->status, ROUTE_ENTRY_REMOVED))
+ continue;
+ if (re->type == zl->route.type &&
+ re->instance == zl->route.instance)
+ break;
+ }
+ }
+
+ if (re) {
+ /*
+ * Copy over current nexthops into a temporary group.
+ * We can't just change the values here since the nhgs
+ * are shared and if the labels change, we'll need
+ * to find or create a new nhg. We need to create
+ * a whole temporary group, make changes to it,
+ * then attach that to the route.
+ */
+ new_nhe = zebra_nhe_copy(re->nhe, 0);
+
+ } else {
+ /*
+ * The old version of the zapi code
+ * attempted to manage LSPs before trying to
+ * find a route/FEC, so we'll continue that way.
+ */
+ if (IS_ZEBRA_DEBUG_RECV || IS_ZEBRA_DEBUG_MPLS) {
+ prefix2str(prefix, prefix_buf,
+ sizeof(prefix_buf));
+ zlog_debug("%s: FTN update requested: no route for prefix %s",
+ __func__, prefix_buf);
+ }
+ }
}
- if (found) {
+ /*
+ * Use info from the zapi nexthops to add/replace/remove LSP/FECs
+ */
+
+ counter = 0;
+ for (i = 0; i < zl->nexthop_num; i++) {
+
+ znh = &zl->nexthops[i];
+
+ /* Attempt LSP update */
+ if (add_p)
+ ret = lsp_znh_install(lsp, zl->type, znh);
+ else
+ ret = mpls_lsp_uninstall(zvrf, zl->type,
+ zl->local_label, znh->type,
+ &znh->gate, znh->ifindex);
+ if (ret < 0) {
+ if (IS_ZEBRA_DEBUG_RECV || IS_ZEBRA_DEBUG_MPLS) {
+ zapi_nexthop2str(znh, buf, sizeof(buf));
+ zlog_debug("%s: Unable to %sinstall LSP: label %u, znh %s",
+ __func__, (add_p ? "" : "un"),
+ zl->local_label, buf);
+ }
+ continue;
+ }
+
+ /* Attempt route/FEC update if requested */
+ if (re == NULL)
+ continue;
+
+ /* Search the route's nexthops for a match, and update it. */
+ found = ftn_update_znh(add_p, zl->type, new_nhe->nhg.nexthop,
+ znh);
+ if (found) {
+ counter++;
+ } else if (IS_ZEBRA_DEBUG_RECV | IS_ZEBRA_DEBUG_MPLS) {
+ zapi_nexthop2str(znh, buf, sizeof(buf));
+ prefix2str(prefix, prefix_buf, sizeof(prefix_buf));
+ zlog_debug("%s: Unable to update FEC: prefix %s, label %u, znh %s",
+ __func__, prefix_buf, zl->local_label, buf);
+ }
+ }
+
+ /*
+ * Process backup LSPs/nexthop entries also. We associate backup
+ * LSP info with backup nexthops.
+ */
+ if (!CHECK_FLAG(zl->message, ZAPI_LABELS_HAS_BACKUPS))
+ goto znh_done;
+
+ for (i = 0; i < zl->backup_nexthop_num; i++) {
+
+ znh = &zl->backup_nexthops[i];
+
+ if (add_p)
+ ret = lsp_backup_znh_install(lsp, zl->type, znh);
+ else
+ ret = lsp_backup_uninstall(zvrf, zl->type,
+ zl->local_label,
+ znh->type, &znh->gate,
+ znh->ifindex);
+
+ if (ret < 0) {
+ if (IS_ZEBRA_DEBUG_RECV ||
+ IS_ZEBRA_DEBUG_MPLS) {
+ zapi_nexthop2str(znh, buf, sizeof(buf));
+ zlog_debug("%s: Unable to %sinstall backup LSP: label %u, znh %s",
+ __func__, (add_p ? "" : "un"),
+ zl->local_label, buf);
+ }
+ continue;
+ }
+
+ /* Attempt backup nexthop/FEC update if requested */
+ if (re == NULL || zebra_nhg_get_backup_nhg(new_nhe) == NULL)
+ continue;
+
+ /* Search the route's backup nexthops for a match
+ * and update it.
+ */
+ found = ftn_update_znh(add_p, zl->type,
+ new_nhe->backup_info->nhe->nhg.nexthop,
+ znh);
+ if (found) {
+ counter++;
+ } else if (IS_ZEBRA_DEBUG_RECV | IS_ZEBRA_DEBUG_MPLS) {
+ zapi_nexthop2str(znh, buf, sizeof(buf));
+ prefix2str(prefix, prefix_buf, sizeof(prefix_buf));
+ zlog_debug("%s: Unable to update backup FEC: prefix %s, label %u, znh %s",
+ __func__, prefix_buf, zl->local_label, buf);
+ }
+ }
+
+znh_done:
+
+ /*
+ * If we made changes, update the route, and schedule it
+ * for rib processing
+ */
+ if (re != NULL && counter > 0) {
+ assert(rn != NULL);
+
SET_FLAG(re->status, ROUTE_ENTRY_CHANGED);
SET_FLAG(re->status, ROUTE_ENTRY_LABELS_CHANGED);
- mpls_zebra_nhg_update(re, afi, &new_grp);
+ mpls_zebra_nhe_update(re, afi, new_nhe);
rib_queue_add(rn);
}
- nexthops_free(new_grp.nexthop);
+ if (new_nhe)
+ zebra_nhg_free(new_nhe);
- return found ? 0 : -1;
+ return ret;
}
-int mpls_ftn_uninstall(struct zebra_vrf *zvrf, enum lsp_types_t type,
- struct prefix *prefix, uint8_t route_type,
- unsigned short route_instance)
+/*
+ * Install/update a NHLFE for an LSP in the forwarding table. This may be
+ * a new LSP entry or a new NHLFE for an existing in-label or an update of
+ * the out-label for an existing NHLFE (update case).
+ */
+static zebra_nhlfe_t *
+lsp_add_nhlfe(zebra_lsp_t *lsp, enum lsp_types_t type,
+ uint8_t num_out_labels, const mpls_label_t *out_labels,
+ enum nexthop_types_t gtype, const union g_addr *gate,
+ ifindex_t ifindex)
{
- struct route_table *table;
- struct route_node *rn;
- struct route_entry *re;
- struct nexthop *nexthop;
- struct nexthop_group new_grp = {};
- afi_t afi = family2afi(prefix->family);
+ zebra_nhlfe_t *nhlfe;
+ char buf[MPLS_LABEL_STRLEN];
- /* Lookup table. */
- table = zebra_vrf_table(afi, SAFI_UNICAST, zvrf_id(zvrf));
- if (!table)
- return -1;
+ nhlfe = nhlfe_find(&lsp->nhlfe_list, type, gtype, gate, ifindex);
+ if (nhlfe) {
+ struct nexthop *nh = nhlfe->nexthop;
- /* Lookup existing route */
- rn = route_node_get(table, prefix);
- RNODE_FOREACH_RE (rn, re) {
- if (CHECK_FLAG(re->status, ROUTE_ENTRY_REMOVED))
- continue;
- if (re->type == route_type && re->instance == route_instance)
- break;
- }
- if (re == NULL)
- return -1;
+ assert(nh);
+ assert(nh->nh_label);
- nexthop_group_copy(&new_grp, &(re->nhe->nhg));
+ /* Clear deleted flag (in case it was set) */
+ UNSET_FLAG(nhlfe->flags, NHLFE_FLAG_DELETED);
+ if (nh->nh_label->num_labels == num_out_labels
+ && !memcmp(nh->nh_label->label, out_labels,
+ sizeof(mpls_label_t) * num_out_labels))
+ /* No change */
+ return nhlfe;
- for (nexthop = new_grp.nexthop; nexthop; nexthop = nexthop->next)
- nexthop_del_labels(nexthop);
+ if (IS_ZEBRA_DEBUG_MPLS) {
+ char buf2[MPLS_LABEL_STRLEN];
+ char buf3[MPLS_LABEL_STRLEN];
- SET_FLAG(re->status, ROUTE_ENTRY_CHANGED);
- SET_FLAG(re->status, ROUTE_ENTRY_LABELS_CHANGED);
+ nhlfe2str(nhlfe, buf, sizeof(buf));
+ mpls_label2str(num_out_labels, out_labels, buf2,
+ sizeof(buf2), 0);
+ mpls_label2str(nh->nh_label->num_labels,
+ nh->nh_label->label, buf3, sizeof(buf3),
+ 0);
+
+ zlog_debug("LSP in-label %u type %d nexthop %s out-label(s) changed to %s (old %s)",
+ lsp->ile.in_label, type, buf, buf2, buf3);
+ }
+
+ /* Update out label(s), trigger processing. */
+ if (nh->nh_label->num_labels == num_out_labels)
+ memcpy(nh->nh_label->label, out_labels,
+ sizeof(mpls_label_t) * num_out_labels);
+ else {
+ nexthop_del_labels(nh);
+ nexthop_add_labels(nh, type, num_out_labels,
+ out_labels);
+ }
+ } else {
+ /* Add LSP entry to this nexthop */
+ nhlfe = nhlfe_add(lsp, type, gtype, gate, ifindex,
+ num_out_labels, out_labels);
+ if (!nhlfe)
+ return NULL;
- mpls_zebra_nhg_update(re, afi, &new_grp);
+ if (IS_ZEBRA_DEBUG_MPLS) {
+ char buf2[MPLS_LABEL_STRLEN];
- nexthops_free(new_grp.nexthop);
+ nhlfe2str(nhlfe, buf, sizeof(buf));
+ mpls_label2str(num_out_labels, out_labels, buf2,
+ sizeof(buf2), 0);
- rib_queue_add(rn);
+ zlog_debug("Add LSP in-label %u type %d nexthop %s out-label(s) %s",
+ lsp->ile.in_label, type, buf, buf2);
+ }
- return 0;
+ lsp->addr_family = NHLFE_FAMILY(nhlfe);
+ }
+
+ /* Mark NHLFE, queue LSP for processing. */
+ SET_FLAG(nhlfe->flags, NHLFE_FLAG_CHANGED);
+
+ return nhlfe;
}
/*
@@ -2743,29 +3187,16 @@ int mpls_ftn_uninstall(struct zebra_vrf *zvrf, enum lsp_types_t type,
* a new LSP entry or a new NHLFE for an existing in-label or an update of
* the out-label for an existing NHLFE (update case).
*/
-int mpls_lsp_install(struct zebra_vrf *zvrf, enum lsp_types_t type,
- mpls_label_t in_label, uint8_t num_out_labels,
- mpls_label_t out_labels[], enum nexthop_types_t gtype,
- const union g_addr *gate, ifindex_t ifindex)
+static zebra_nhlfe_t *
+lsp_add_backup_nhlfe(zebra_lsp_t *lsp, enum lsp_types_t type,
+ uint8_t num_out_labels, const mpls_label_t *out_labels,
+ enum nexthop_types_t gtype, const union g_addr *gate,
+ ifindex_t ifindex)
{
- struct hash *lsp_table;
- zebra_ile_t tmp_ile;
- zebra_lsp_t *lsp;
zebra_nhlfe_t *nhlfe;
- char buf[BUFSIZ];
+ char buf[MPLS_LABEL_STRLEN];
- /* Lookup table. */
- lsp_table = zvrf->lsp_table;
- if (!lsp_table)
- return -1;
-
- /* Find or create LSP object */
- tmp_ile.in_label = in_label;
- lsp = hash_get(lsp_table, &tmp_ile, lsp_alloc);
- if (!lsp)
- return -1;
-
- nhlfe = nhlfe_find(lsp, type, gtype, gate, ifindex);
+ nhlfe = nhlfe_find(&lsp->backup_nhlfe_list, type, gtype, gate, ifindex);
if (nhlfe) {
struct nexthop *nh = nhlfe->nexthop;
@@ -2778,23 +3209,21 @@ int mpls_lsp_install(struct zebra_vrf *zvrf, enum lsp_types_t type,
&& !memcmp(nh->nh_label->label, out_labels,
sizeof(mpls_label_t) * num_out_labels))
/* No change */
- return 0;
+ return nhlfe;
if (IS_ZEBRA_DEBUG_MPLS) {
char buf2[MPLS_LABEL_STRLEN];
char buf3[MPLS_LABEL_STRLEN];
- nhlfe2str(nhlfe, buf, BUFSIZ);
+ nhlfe2str(nhlfe, buf, sizeof(buf));
mpls_label2str(num_out_labels, out_labels, buf2,
sizeof(buf2), 0);
mpls_label2str(nh->nh_label->num_labels,
nh->nh_label->label, buf3, sizeof(buf3),
0);
- zlog_debug(
- "LSP in-label %u type %d nexthop %s "
- "out-label(s) changed to %s (old %s)",
- in_label, type, buf, buf2, buf3);
+ zlog_debug("LSP in-label %u type %d backup nexthop %s out-label(s) changed to %s (old %s)",
+ lsp->ile.in_label, type, buf, buf2, buf3);
}
/* Update out label(s), trigger processing. */
@@ -2808,22 +3237,20 @@ int mpls_lsp_install(struct zebra_vrf *zvrf, enum lsp_types_t type,
}
} else {
/* Add LSP entry to this nexthop */
- nhlfe = nhlfe_add(lsp, type, gtype, gate, ifindex,
- num_out_labels, out_labels);
+ nhlfe = nhlfe_backup_add(lsp, type, gtype, gate, ifindex,
+ num_out_labels, out_labels);
if (!nhlfe)
- return -1;
+ return NULL;
if (IS_ZEBRA_DEBUG_MPLS) {
- char buf2[BUFSIZ];
+ char buf2[MPLS_LABEL_STRLEN];
- nhlfe2str(nhlfe, buf, BUFSIZ);
+ nhlfe2str(nhlfe, buf, sizeof(buf));
mpls_label2str(num_out_labels, out_labels, buf2,
sizeof(buf2), 0);
- zlog_debug(
- "Add LSP in-label %u type %d nexthop %s "
- "out-label(s) %s",
- in_label, type, buf, buf2);
+ zlog_debug("Add LSP in-label %u type %d backup nexthop %s out-label(s) %s",
+ lsp->ile.in_label, type, buf, buf2);
}
lsp->addr_family = NHLFE_FAMILY(nhlfe);
@@ -2831,6 +3258,92 @@ int mpls_lsp_install(struct zebra_vrf *zvrf, enum lsp_types_t type,
/* Mark NHLFE, queue LSP for processing. */
SET_FLAG(nhlfe->flags, NHLFE_FLAG_CHANGED);
+
+ return nhlfe;
+}
+
+/*
+ * Install an LSP and forwarding entry; used primarily
+ * from zapi message processing.
+ */
+int mpls_lsp_install(struct zebra_vrf *zvrf, enum lsp_types_t type,
+ mpls_label_t in_label, uint8_t num_out_labels,
+ const mpls_label_t *out_labels, enum nexthop_types_t gtype,
+ const union g_addr *gate, ifindex_t ifindex)
+{
+ struct hash *lsp_table;
+ zebra_ile_t tmp_ile;
+ zebra_lsp_t *lsp;
+ zebra_nhlfe_t *nhlfe;
+
+ /* Lookup table. */
+ lsp_table = zvrf->lsp_table;
+ if (!lsp_table)
+ return -1;
+
+ /* Find or create LSP object */
+ tmp_ile.in_label = in_label;
+ lsp = hash_get(lsp_table, &tmp_ile, lsp_alloc);
+ if (!lsp)
+ return -1;
+
+ nhlfe = lsp_add_nhlfe(lsp, type, num_out_labels, out_labels, gtype,
+ gate, ifindex);
+ if (nhlfe == NULL)
+ return -1;
+
+ /* Queue LSP for processing. */
+ if (lsp_processq_add(lsp))
+ return -1;
+
+ return 0;
+}
+
+/*
+ * Install or replace NHLFE, using info from zapi nexthop
+ */
+static int lsp_znh_install(zebra_lsp_t *lsp, enum lsp_types_t type,
+ const struct zapi_nexthop *znh)
+{
+ zebra_nhlfe_t *nhlfe;
+
+ nhlfe = lsp_add_nhlfe(lsp, type, znh->label_num, znh->labels,
+ znh->type, &znh->gate, znh->ifindex);
+ if (nhlfe == NULL)
+ return -1;
+
+ /* Update backup info if present */
+ if (CHECK_FLAG(znh->flags, ZAPI_NEXTHOP_FLAG_HAS_BACKUP)) {
+ nhlfe->nexthop->backup_idx = znh->backup_idx;
+ SET_FLAG(nhlfe->nexthop->flags, NEXTHOP_FLAG_HAS_BACKUP);
+ }
+
+ /* Queue LSP for processing. */
+ if (lsp_processq_add(lsp))
+ return -1;
+
+ return 0;
+}
+
+/*
+ * Install/update backup NHLFE for an LSP, using info from a zapi message.
+ */
+static int lsp_backup_znh_install(zebra_lsp_t *lsp, enum lsp_types_t type,
+ const struct zapi_nexthop *znh)
+{
+ zebra_nhlfe_t *nhlfe;
+
+ nhlfe = lsp_add_backup_nhlfe(lsp, type, znh->label_num,
+ znh->labels, znh->type, &znh->gate,
+ znh->ifindex);
+ if (nhlfe == NULL) {
+ if (IS_ZEBRA_DEBUG_MPLS)
+ zlog_debug("%s: unable to add backup nhlfe, label: %u",
+ __func__, lsp->ile.in_label);
+ return -1;
+ }
+
+ /* Queue LSP for processing. */
if (lsp_processq_add(lsp))
return -1;
@@ -2861,7 +3374,7 @@ int mpls_lsp_uninstall(struct zebra_vrf *zvrf, enum lsp_types_t type,
lsp = hash_lookup(lsp_table, &tmp_ile);
if (!lsp)
return 0;
- nhlfe = nhlfe_find(lsp, type, gtype, gate, ifindex);
+ nhlfe = nhlfe_find(&lsp->nhlfe_list, type, gtype, gate, ifindex);
if (!nhlfe)
return 0;
@@ -2881,10 +3394,58 @@ int mpls_lsp_uninstall(struct zebra_vrf *zvrf, enum lsp_types_t type,
nhlfe_del(nhlfe);
/* Free LSP entry if no other NHLFEs and not scheduled. */
- if (!lsp->nhlfe_list
- && !CHECK_FLAG(lsp->flags, LSP_FLAG_SCHEDULED))
- lsp_free(lsp_table, &lsp);
+ lsp_check_free(lsp_table, &lsp);
+
+ }
+ return 0;
+}
+
+/*
+ * Uninstall a particular NHLFE in the forwarding table. If this is
+ * the only NHLFE, the entire LSP forwarding entry has to be deleted.
+ */
+static int lsp_backup_uninstall(struct zebra_vrf *zvrf, enum lsp_types_t type,
+ mpls_label_t in_label,
+ enum nexthop_types_t gtype,
+ const union g_addr *gate, ifindex_t ifindex)
+{
+ struct hash *lsp_table;
+ zebra_ile_t tmp_ile;
+ zebra_lsp_t *lsp;
+ zebra_nhlfe_t *nhlfe;
+ char buf[BUFSIZ];
+
+ /* Lookup table. */
+ lsp_table = zvrf->lsp_table;
+ if (!lsp_table)
+ return -1;
+
+ /* If entry is not present, exit. */
+ tmp_ile.in_label = in_label;
+ lsp = hash_lookup(lsp_table, &tmp_ile);
+ if (!lsp)
+ return 0;
+ nhlfe = nhlfe_find(&lsp->backup_nhlfe_list, type, gtype, gate, ifindex);
+ if (!nhlfe)
+ return 0;
+
+ if (IS_ZEBRA_DEBUG_MPLS) {
+ nhlfe2str(nhlfe, buf, BUFSIZ);
+ zlog_debug("Del backup LSP in-label %u type %d nexthop %s flags 0x%x",
+ in_label, type, buf, nhlfe->flags);
+ }
+ /* Mark NHLFE for delete or directly delete, as appropriate. */
+ if (CHECK_FLAG(nhlfe->flags, NHLFE_FLAG_INSTALLED)) {
+ UNSET_FLAG(nhlfe->flags, NHLFE_FLAG_CHANGED);
+ SET_FLAG(nhlfe->flags, NHLFE_FLAG_DELETED);
+ if (lsp_processq_add(lsp))
+ return -1;
+ } else {
+ nhlfe_del(nhlfe);
+
+ /* Free LSP entry if no other NHLFEs and not scheduled. */
+ lsp_check_free(lsp_table, &lsp);
}
return 0;
}
@@ -2921,7 +3482,7 @@ static void mpls_lsp_uninstall_all_type(struct hash_bucket *bucket, void *ctxt)
struct hash *lsp_table;
lsp = (zebra_lsp_t *)bucket->data;
- if (!lsp->nhlfe_list)
+ if (nhlfe_list_first(&lsp->nhlfe_list) == NULL)
return;
lsp_table = args->lsp_table;
@@ -2942,7 +3503,8 @@ static void mpls_ftn_uninstall_all(struct zebra_vrf *zvrf,
struct route_node *rn;
struct route_entry *re;
struct nexthop *nexthop;
- int update;
+ struct nexthop_group *nhg;
+ bool update;
/* Process routes of interested address-families. */
table = zebra_vrf_table(afi, SAFI_UNICAST, zvrf_id(zvrf));
@@ -2950,13 +3512,15 @@ static void mpls_ftn_uninstall_all(struct zebra_vrf *zvrf,
return;
for (rn = route_top(table); rn; rn = route_next(rn)) {
- update = 0;
+ update = false;
+
RNODE_FOREACH_RE (rn, re) {
- struct nexthop_group new_grp = {};
+ struct nhg_hash_entry *new_nhe;
- nexthop_group_copy(&new_grp, &(re->nhe->nhg));
+ new_nhe = zebra_nhe_copy(re->nhe, 0);
- for (nexthop = new_grp.nexthop; nexthop;
+ nhg = &new_nhe->nhg;
+ for (nexthop = nhg->nexthop; nexthop;
nexthop = nexthop->next) {
if (nexthop->nh_label_type != lsp_type)
continue;
@@ -2965,13 +3529,30 @@ static void mpls_ftn_uninstall_all(struct zebra_vrf *zvrf,
SET_FLAG(re->status, ROUTE_ENTRY_CHANGED);
SET_FLAG(re->status,
ROUTE_ENTRY_LABELS_CHANGED);
- update = 1;
+ update = true;
+ }
+
+ /* Check for backup info and update that also */
+ nhg = zebra_nhg_get_backup_nhg(new_nhe);
+ if (nhg != NULL) {
+ for (nexthop = nhg->nexthop; nexthop;
+ nexthop = nexthop->next) {
+ if (nexthop->nh_label_type != lsp_type)
+ continue;
+
+ nexthop_del_labels(nexthop);
+ SET_FLAG(re->status,
+ ROUTE_ENTRY_CHANGED);
+ SET_FLAG(re->status,
+ ROUTE_ENTRY_LABELS_CHANGED);
+ update = true;
+ }
}
if (CHECK_FLAG(re->status, ROUTE_ENTRY_LABELS_CHANGED))
- mpls_zebra_nhg_update(re, afi, &new_grp);
+ mpls_zebra_nhe_update(re, afi, new_nhe);
- nexthops_free(new_grp.nexthop);
+ zebra_nhg_free(new_nhe);
}
if (update)
@@ -3014,15 +3595,17 @@ int zebra_mpls_lsp_label_consistent(struct zebra_vrf *zvrf,
return 1;
/* If not only NHLFE, cannot allow label change. */
- if (snhlfe != slsp->snhlfe_list || snhlfe->next)
+ if (snhlfe != snhlfe_list_first(&slsp->snhlfe_list) ||
+ snhlfe_list_next(&slsp->snhlfe_list, snhlfe) != NULL)
return 0;
} else {
/* If other NHLFEs exist, label operation must match. */
- if (slsp->snhlfe_list) {
+ snhlfe = snhlfe_list_first(&slsp->snhlfe_list);
+ if (snhlfe != NULL) {
int cur_op, new_op;
- cur_op = (slsp->snhlfe_list->out_label
- == MPLS_LABEL_IMPLICIT_NULL);
+ cur_op = (snhlfe->out_label ==
+ MPLS_LABEL_IMPLICIT_NULL);
new_op = (out_label == MPLS_LABEL_IMPLICIT_NULL);
if (cur_op != new_op)
return 0;
@@ -3159,7 +3742,7 @@ int zebra_mpls_static_lsp_del(struct zebra_vrf *zvrf, mpls_label_t in_label,
/* Remove entire static LSP entry if no NHLFE - valid in either case
* above. */
- if (!slsp->snhlfe_list) {
+ if (snhlfe_list_first(&slsp->snhlfe_list) == NULL) {
slsp = hash_release(slsp_table, &tmp_ile);
XFREE(MTYPE_SLSP, slsp);
}
@@ -3208,7 +3791,7 @@ void zebra_mpls_print_lsp(struct vty *vty, struct zebra_vrf *zvrf,
json, JSON_C_TO_STRING_PRETTY));
json_object_free(json);
} else
- lsp_print(lsp, (void *)vty);
+ lsp_print(vty, lsp);
}
/*
@@ -3247,8 +3830,7 @@ void zebra_mpls_print_lsp_table(struct vty *vty, struct zebra_vrf *zvrf,
ttable_rowseps(tt, 0, BOTTOM, true, '-');
for (ALL_LIST_ELEMENTS_RO(lsp_list, node, lsp)) {
- for (nhlfe = lsp->nhlfe_list; nhlfe;
- nhlfe = nhlfe->next) {
+ frr_each_safe(nhlfe_list, &lsp->nhlfe_list, nhlfe) {
struct nexthop *nexthop;
const char *out_label_str;
char nh_buf[NEXTHOP_STRLEN];
@@ -3320,8 +3902,7 @@ int zebra_mpls_write_lsp_config(struct vty *vty, struct zebra_vrf *zvrf)
hash_get_sorted_list(zvrf->slsp_table, slsp_cmp);
for (ALL_LIST_ELEMENTS_RO(slsp_list, node, slsp)) {
- for (snhlfe = slsp->snhlfe_list; snhlfe;
- snhlfe = snhlfe->next) {
+ frr_each(snhlfe_list, &slsp->snhlfe_list, snhlfe) {
char buf[BUFSIZ];
char lstr[30];
diff --git a/zebra/zebra_mpls.h b/zebra/zebra_mpls.h
index e468fb9c1b..9b5fb39573 100644
--- a/zebra/zebra_mpls.h
+++ b/zebra/zebra_mpls.h
@@ -55,6 +55,10 @@ typedef struct zebra_nhlfe_t_ zebra_nhlfe_t;
typedef struct zebra_lsp_t_ zebra_lsp_t;
typedef struct zebra_fec_t_ zebra_fec_t;
+/* Declare LSP nexthop list types */
+PREDECL_DLIST(snhlfe_list);
+PREDECL_DLIST(nhlfe_list);
+
/*
* (Outgoing) nexthop label forwarding entry configuration
*/
@@ -71,9 +75,8 @@ struct zebra_snhlfe_t_ {
/* Backpointer to base entry. */
zebra_slsp_t *slsp;
- /* Pointers to more outgoing information for same in-label */
- zebra_snhlfe_t *next;
- zebra_snhlfe_t *prev;
+ /* Linkage for LSPs' lists */
+ struct snhlfe_list_item list;
};
/*
@@ -96,10 +99,12 @@ struct zebra_nhlfe_t_ {
#define NHLFE_FLAG_MULTIPATH (1 << 2)
#define NHLFE_FLAG_DELETED (1 << 3)
#define NHLFE_FLAG_INSTALLED (1 << 4)
+#define NHLFE_FLAG_IS_BACKUP (1 << 5)
- zebra_nhlfe_t *next;
- zebra_nhlfe_t *prev;
uint8_t distance;
+
+ /* Linkage for LSPs' lists */
+ struct nhlfe_list_item list;
};
/*
@@ -117,7 +122,7 @@ struct zebra_slsp_t_ {
zebra_ile_t ile;
/* List of outgoing nexthop static configuration */
- zebra_snhlfe_t *snhlfe_list;
+ struct snhlfe_list_head snhlfe_list;
};
/*
@@ -127,11 +132,18 @@ struct zebra_lsp_t_ {
/* Incoming label */
zebra_ile_t ile;
- /* List of NHLFE, pointer to best and num equal-cost. */
- zebra_nhlfe_t *nhlfe_list;
+ /* List of NHLFEs, pointer to best, and num equal-cost. */
+ struct nhlfe_list_head nhlfe_list;
+
zebra_nhlfe_t *best_nhlfe;
uint32_t num_ecmp;
+ /* Backup nhlfes, if present. The nexthop in a primary/active nhlfe
+ * refers to its backup (if any) by index, so the order of this list
+ * is significant.
+ */
+ struct nhlfe_list_head backup_nhlfe_list;
+
/* Flags */
uint32_t flags;
#define LSP_FLAG_SCHEDULED (1 << 0)
@@ -164,6 +176,9 @@ struct zebra_fec_t_ {
struct list *client_list;
};
+/* Declare typesafe list apis/macros */
+DECLARE_DLIST(nhlfe_list, struct zebra_nhlfe_t_, list);
+
/* Function declarations. */
/*
@@ -201,10 +216,31 @@ zebra_nhlfe_t *zebra_mpls_lsp_add_nhlfe(zebra_lsp_t *lsp,
union g_addr *gate,
ifindex_t ifindex,
uint8_t num_labels,
- mpls_label_t out_labels[]);
+ const mpls_label_t *out_labels);
+
+/* Add or update a backup NHLFE for an LSP; return the object */
+zebra_nhlfe_t *zebra_mpls_lsp_add_backup_nhlfe(zebra_lsp_t *lsp,
+ enum lsp_types_t lsp_type,
+ enum nexthop_types_t gtype,
+ union g_addr *gate,
+ ifindex_t ifindex,
+ uint8_t num_labels,
+ const mpls_label_t *out_labels);
+
+/*
+ * Add NHLFE or backup NHLFE to an LSP based on a nexthop. These just maintain
+ * the LSP and NHLFE objects; nothing is scheduled for processing.
+ * Return: the newly-added object
+ */
+zebra_nhlfe_t *zebra_mpls_lsp_add_nh(zebra_lsp_t *lsp,
+ enum lsp_types_t lsp_type,
+ const struct nexthop *nh);
+zebra_nhlfe_t *zebra_mpls_lsp_add_backup_nh(zebra_lsp_t *lsp,
+ enum lsp_types_t lsp_type,
+ const struct nexthop *nh);
/* Free an allocated NHLFE */
-void zebra_mpls_nhlfe_del(zebra_nhlfe_t *nhlfe);
+void zebra_mpls_nhlfe_free(zebra_nhlfe_t *nhlfe);
int zebra_mpls_fec_register(struct zebra_vrf *zvrf, struct prefix *p,
uint32_t label, uint32_t label_index,
@@ -266,12 +302,11 @@ void zebra_mpls_print_fec(struct vty *vty, struct zebra_vrf *zvrf,
struct prefix *p);
/*
- * Install/uninstall a FEC-To-NHLFE (FTN) binding.
+ * Handle zapi request to install/uninstall LSP and
+ * (optionally) FEC-To-NHLFE (FTN) bindings.
*/
-int mpls_ftn_update(int add, struct zebra_vrf *zvrf, enum lsp_types_t type,
- struct prefix *prefix, enum nexthop_types_t gtype,
- union g_addr *gate, ifindex_t ifindex, uint8_t route_type,
- unsigned short route_instance, mpls_label_t out_label);
+int mpls_zapi_labels_process(bool add_p, struct zebra_vrf *zvrf,
+ const struct zapi_labels *zl);
/*
* Uninstall all NHLFEs bound to a single FEC.
@@ -287,7 +322,7 @@ int mpls_ftn_uninstall(struct zebra_vrf *zvrf, enum lsp_types_t type,
*/
int mpls_lsp_install(struct zebra_vrf *zvrf, enum lsp_types_t type,
mpls_label_t in_label, uint8_t num_out_labels,
- mpls_label_t out_labels[], enum nexthop_types_t gtype,
+ const mpls_label_t *out_labels, enum nexthop_types_t gtype,
const union g_addr *gate, ifindex_t ifindex);
/*
diff --git a/zebra/zebra_mpls_openbsd.c b/zebra/zebra_mpls_openbsd.c
index 5e18414985..c8a3cbbbce 100644
--- a/zebra/zebra_mpls_openbsd.c
+++ b/zebra/zebra_mpls_openbsd.c
@@ -239,8 +239,9 @@ static int kernel_send_rtmsg_v6(int action, mpls_label_t in_label,
static int kernel_lsp_cmd(struct zebra_dplane_ctx *ctx)
{
+ const struct nhlfe_list_head *head;
const zebra_nhlfe_t *nhlfe;
- struct nexthop *nexthop = NULL;
+ const struct nexthop *nexthop = NULL;
unsigned int nexthop_num = 0;
int action;
@@ -258,7 +259,8 @@ static int kernel_lsp_cmd(struct zebra_dplane_ctx *ctx)
return -1;
}
- for (nhlfe = dplane_ctx_get_nhlfe(ctx); nhlfe; nhlfe = nhlfe->next) {
+ head = dplane_ctx_get_nhlfe_list(ctx);
+ frr_each(nhlfe_list_const, head, nhlfe) {
nexthop = nhlfe->nexthop;
if (!nexthop)
continue;
diff --git a/zebra/zebra_nhg.c b/zebra/zebra_nhg.c
index 1ac18c6fdd..d07ceb652c 100644
--- a/zebra/zebra_nhg.c
+++ b/zebra/zebra_nhg.c
@@ -371,8 +371,12 @@ struct nhg_hash_entry *zebra_nhg_alloc(void)
return nhe;
}
-static struct nhg_hash_entry *zebra_nhg_copy(const struct nhg_hash_entry *orig,
- uint32_t id)
+/*
+ * Allocate new nhe and make shallow copy of 'orig'; no
+ * recursive info is copied.
+ */
+struct nhg_hash_entry *zebra_nhe_copy(const struct nhg_hash_entry *orig,
+ uint32_t id)
{
struct nhg_hash_entry *nhe;
@@ -401,7 +405,7 @@ static void *zebra_nhg_hash_alloc(void *arg)
struct nhg_hash_entry *nhe = NULL;
struct nhg_hash_entry *copy = arg;
- nhe = zebra_nhg_copy(copy, copy->id);
+ nhe = zebra_nhe_copy(copy, copy->id);
/* Mark duplicate nexthops in a group at creation time. */
nexthop_group_mark_duplicates(&(nhe->nhg));
@@ -1140,7 +1144,7 @@ static int nhg_ctx_process_new(struct nhg_ctx *ctx)
* their attributes are unhashable.
*/
- kernel_nhe = zebra_nhg_copy(nhe, id);
+ kernel_nhe = zebra_nhe_copy(nhe, id);
if (IS_ZEBRA_DEBUG_NHG_DETAIL)
zlog_debug("%s: copying kernel nhe (%u), dup of %u",
@@ -2245,7 +2249,7 @@ int nexthop_active_update(struct route_node *rn, struct route_entry *re)
/* Make a local copy of the existing nhe, so we don't work on/modify
* the shared nhe.
*/
- curr_nhe = zebra_nhg_copy(re->nhe, re->nhe->id);
+ curr_nhe = zebra_nhe_copy(re->nhe, re->nhe->id);
if (IS_ZEBRA_DEBUG_NHG_DETAIL)
zlog_debug("%s: re %p nhe %p (%u), curr_nhe %p",
diff --git a/zebra/zebra_nhg.h b/zebra/zebra_nhg.h
index 785ce20b75..de5f097472 100644
--- a/zebra/zebra_nhg.h
+++ b/zebra/zebra_nhg.h
@@ -202,6 +202,12 @@ void zebra_nhg_hash_free(void *p);
void zebra_nhe_init(struct nhg_hash_entry *nhe, afi_t afi,
const struct nexthop *nh);
+/*
+ * Shallow copy of 'orig', into new/allocated nhe.
+ */
+struct nhg_hash_entry *zebra_nhe_copy(const struct nhg_hash_entry *orig,
+ uint32_t id);
+
/* Allocate, free backup nexthop info objects */
struct nhg_backup_info *zebra_nhg_backup_alloc(void);
void zebra_nhg_backup_free(struct nhg_backup_info **p);
diff --git a/zebra/zebra_rib.c b/zebra/zebra_rib.c
index f52ed471f0..0fc716db17 100644
--- a/zebra/zebra_rib.c
+++ b/zebra/zebra_rib.c
@@ -2276,6 +2276,7 @@ rib_dest_t *zebra_rib_create_dest(struct route_node *rn)
dest = XCALLOC(MTYPE_RIB_DEST, sizeof(rib_dest_t));
rnh_list_init(&dest->nht);
+ re_list_init(&dest->routes);
route_lock_node(rn); /* rn route table reference */
rn->info = dest;
dest->rnode = rn;