summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.clang-format2
-rw-r--r--bfdd/bfd.c2
-rw-r--r--bfdd/bfd_packet.c16
-rw-r--r--bgpd/bgp_advertise.c1
-rw-r--r--bgpd/bgp_advertise.h3
-rw-r--r--bgpd/bgp_bmp.c2240
-rw-r--r--bgpd/bgp_bmp.h303
-rw-r--r--bgpd/bgp_evpn.c26
-rw-r--r--bgpd/bgp_evpn_private.h5
-rw-r--r--bgpd/bgp_evpn_vty.c9
-rw-r--r--bgpd/bgp_fsm.c21
-rw-r--r--bgpd/bgp_io.c12
-rw-r--r--bgpd/bgp_keepalives.c12
-rw-r--r--bgpd/bgp_network.c18
-rw-r--r--bgpd/bgp_open.c2
-rw-r--r--bgpd/bgp_packet.c24
-rw-r--r--bgpd/bgp_route.c14
-rw-r--r--bgpd/bgp_routemap.c14
-rw-r--r--bgpd/bgp_rpki.c102
-rw-r--r--bgpd/bgp_vty.c783
-rw-r--r--bgpd/bgp_vty.h5
-rw-r--r--bgpd/bgpd.c8
-rw-r--r--bgpd/bgpd.h4
-rw-r--r--bgpd/rfapi/rfapi.c4
-rw-r--r--bgpd/rfapi/vnc_zebra.c4
-rw-r--r--bgpd/subdir.am11
-rwxr-xr-xconfigure.ac77
-rw-r--r--debian/frr.install1
-rw-r--r--doc/developer/library.rst1
-rw-r--r--doc/developer/locking.rst73
-rw-r--r--doc/developer/subdir.am1
-rw-r--r--doc/developer/workflow.rst22
-rw-r--r--doc/user/bgp.rst6
-rw-r--r--doc/user/bmp.rst170
-rw-r--r--doc/user/eigrpd.rst26
-rw-r--r--doc/user/index.rst1
-rw-r--r--doc/user/pbr.rst6
-rw-r--r--doc/user/rpki.rst25
-rw-r--r--doc/user/subdir.am1
-rw-r--r--eigrpd/eigrp_cli.c26
-rw-r--r--eigrpd/eigrp_filter.c16
-rw-r--r--eigrpd/eigrp_filter.h10
-rw-r--r--eigrpd/eigrp_fsm.c28
-rw-r--r--eigrpd/eigrp_hello.c32
-rw-r--r--eigrpd/eigrp_interface.c11
-rw-r--r--eigrpd/eigrp_main.c2
-rw-r--r--eigrpd/eigrp_neighbor.c25
-rw-r--r--eigrpd/eigrp_neighbor.h33
-rw-r--r--eigrpd/eigrp_network.c13
-rw-r--r--eigrpd/eigrp_network.h2
-rw-r--r--eigrpd/eigrp_northbound.c20
-rw-r--r--eigrpd/eigrp_packet.c21
-rw-r--r--eigrpd/eigrp_routemap.c26
-rw-r--r--eigrpd/eigrp_structs.h4
-rw-r--r--eigrpd/eigrp_topology.c53
-rw-r--r--eigrpd/eigrp_topology.h43
-rw-r--r--eigrpd/eigrp_update.c21
-rw-r--r--eigrpd/eigrp_vrf.c50
-rw-r--r--eigrpd/eigrp_vrf.h23
-rw-r--r--eigrpd/eigrp_vty.c130
-rw-r--r--eigrpd/eigrp_zebra.c40
-rw-r--r--eigrpd/eigrp_zebra.h7
-rw-r--r--eigrpd/eigrpd.c42
-rw-r--r--eigrpd/eigrpd.h4
-rw-r--r--eigrpd/subdir.am2
-rw-r--r--isisd/isis_bpf.c2
-rw-r--r--isisd/isis_dlpi.c2
-rw-r--r--isisd/isis_memory.c1
-rw-r--r--isisd/isis_memory.h1
-rw-r--r--isisd/isis_northbound.c88
-rw-r--r--isisd/isis_pfpacket.c2
-rw-r--r--isisd/isis_route.c249
-rw-r--r--isisd/isis_route.h12
-rw-r--r--isisd/isis_tlvs.c18
-rw-r--r--isisd/isis_zebra.c60
-rw-r--r--isisd/isisd.c1
-rw-r--r--isisd/isisd.h3
-rw-r--r--ldpd/socket.c8
-rw-r--r--lib/command.c69
-rw-r--r--lib/command.h1
-rw-r--r--lib/compiler.h43
-rw-r--r--lib/ferr.c21
-rw-r--r--lib/frr_pthread.c24
-rw-r--r--lib/frr_pthread.h48
-rw-r--r--lib/frrcu.c5
-rw-r--r--lib/hash.c9
-rw-r--r--lib/log.c155
-rw-r--r--lib/monotime.h14
-rw-r--r--lib/nexthop.c9
-rw-r--r--lib/nexthop.h3
-rw-r--r--lib/nexthop_group.c25
-rw-r--r--lib/northbound.c13
-rw-r--r--lib/northbound_sysrepo.c2
-rw-r--r--lib/prefix.c2
-rw-r--r--lib/prefix.h2
-rw-r--r--lib/privs.c9
-rw-r--r--lib/privs.h8
-rw-r--r--lib/pullwr.c275
-rw-r--r--lib/pullwr.h110
-rw-r--r--lib/routemap.c53
-rw-r--r--lib/routemap.h25
-rw-r--r--lib/stream.c17
-rw-r--r--lib/subdir.am2
-rw-r--r--lib/thread.c120
-rw-r--r--lib/vrf.c2
-rw-r--r--lib/vty.c9
-rw-r--r--lib/zclient.c88
-rw-r--r--lib/zebra.h1
-rw-r--r--ospf6d/ospf6_asbr.c25
-rw-r--r--ospf6d/ospf6_network.c2
-rw-r--r--ospfd/ospf_interface.c46
-rw-r--r--ospfd/ospf_interface.h1
-rw-r--r--ospfd/ospf_ism.h5
-rw-r--r--ospfd/ospf_network.c2
-rw-r--r--ospfd/ospf_packet.c58
-rw-r--r--ospfd/ospf_packet.h6
-rw-r--r--ospfd/ospfd.c2
-rw-r--r--pbrd/pbr_map.c10
-rw-r--r--pbrd/pbr_map.h3
-rw-r--r--pbrd/pbr_vty.c33
-rw-r--r--pbrd/pbr_zebra.c2
-rw-r--r--pimd/pim_mroute.c6
-rw-r--r--pimd/pim_msdp_socket.c2
-rw-r--r--pimd/pim_sock.c6
-rw-r--r--pimd/pim_vty.c4
-rw-r--r--pimd/pimd.h3
-rw-r--r--python/clidef.py1
-rw-r--r--redhat/frr.spec.in1
-rw-r--r--ripd/ripd.c4
-rw-r--r--ripngd/ripng_interface.c2
-rw-r--r--ripngd/ripngd.c5
-rw-r--r--staticd/static_zebra.c8
-rw-r--r--tests/bgpd/test_peer_attr.c6
-rw-r--r--tests/lib/test_privs.c4
-rw-r--r--tests/topotests/Dockerfile8
-rwxr-xr-xtests/topotests/bgp-basic-functionality-topo1/test_bgp_basic_functionality.py2
-rwxr-xr-xtests/topotests/bgp-ecmp-topo2/ebgp_ecmp_topo2.json664
-rwxr-xr-xtests/topotests/bgp-ecmp-topo2/ibgp_ecmp_topo2.json674
-rwxr-xr-xtests/topotests/bgp-ecmp-topo2/test_ebgp_ecmp_topo2.py817
-rwxr-xr-xtests/topotests/bgp-ecmp-topo2/test_ibgp_ecmp_topo2.py813
-rwxr-xr-xtests/topotests/bgp-path-attributes-topo1/test_bgp_path_attributes.py6
-rwxr-xr-xtests/topotests/bgp-prefix-list-topo1/test_prefix_lists.py34
-rw-r--r--tests/topotests/bgp_comm-list_delete/test_bgp_comm-list_delete.py2
-rw-r--r--tests/topotests/bgp_l3vpn_to_bgp_direct/scripts/adjacencies.py14
-rw-r--r--tests/topotests/bgp_l3vpn_to_bgp_vrf/scripts/adjacencies.py8
-rw-r--r--tests/topotests/bgp_rfapi_basic_sanity/scripts/adjacencies.py8
-rwxr-xr-xtests/topotests/docker/inner/compile_frr.sh1
-rwxr-xr-xtests/topotests/example-topojson-test/test_topo_json_single_link_loopback/test_example_topojson.py2
-rw-r--r--tests/topotests/lib/bgp.py559
-rw-r--r--tests/topotests/lib/common_config.py417
-rw-r--r--tests/topotests/lib/topojson.py25
-rw-r--r--tests/topotests/pytest.ini4
-rwxr-xr-xtools/checkpatch.pl29
-rw-r--r--tools/coccinelle/frr_with_mutex.cocci23
-rw-r--r--tools/coccinelle/zprivs.cocci12
-rw-r--r--vrrpd/vrrp.c9
-rw-r--r--vrrpd/vrrp_arp.c2
-rw-r--r--vrrpd/vrrp_ndisc.c3
-rw-r--r--vtysh/vtysh.c107
-rw-r--r--zebra/connected.c2
-rw-r--r--zebra/if_ioctl_solaris.c6
-rw-r--r--zebra/if_netlink.c2
-rw-r--r--zebra/ioctl.c6
-rw-r--r--zebra/ioctl_solaris.c4
-rw-r--r--zebra/ipforward_proc.c8
-rw-r--r--zebra/ipforward_solaris.c2
-rw-r--r--zebra/ipforward_sysctl.c10
-rw-r--r--zebra/irdp_main.c2
-rw-r--r--zebra/kernel_netlink.c12
-rw-r--r--zebra/kernel_socket.c2
-rw-r--r--zebra/rt.h11
-rw-r--r--zebra/rt_netlink.c230
-rw-r--r--zebra/rt_socket.c25
-rw-r--r--zebra/rtadv.c2
-rw-r--r--zebra/zapi_msg.c2
-rw-r--r--zebra/zebra_dplane.c274
-rw-r--r--zebra/zebra_dplane.h57
-rw-r--r--zebra/zebra_fpm.c3
-rw-r--r--zebra/zebra_mpls_openbsd.c4
-rw-r--r--zebra/zebra_netns_notify.c8
-rw-r--r--zebra/zebra_nhg.c35
-rw-r--r--zebra/zebra_ns.c2
-rw-r--r--zebra/zebra_rib.c20
-rw-r--r--zebra/zebra_rnh.c14
-rw-r--r--zebra/zebra_rnh.h14
-rw-r--r--zebra/zebra_routemap.c14
-rw-r--r--zebra/zebra_vrf.c11
-rw-r--r--zebra/zebra_vrf.h3
-rw-r--r--zebra/zebra_vty.c26
-rw-r--r--zebra/zebra_vxlan.c60
-rw-r--r--zebra/zserv.c18
191 files changed, 9568 insertions, 2161 deletions
diff --git a/.clang-format b/.clang-format
index 4bd962747f..654577d936 100644
--- a/.clang-format
+++ b/.clang-format
@@ -28,6 +28,8 @@ ForEachMacros:
- frr_each
- frr_each_safe
- frr_each_from
+ - frr_with_mutex
+ - frr_with_privs
- LIST_FOREACH
- LIST_FOREACH_SAFE
- SLIST_FOREACH
diff --git a/bfdd/bfd.c b/bfdd/bfd.c
index 5d143d4e5f..1f1568f511 100644
--- a/bfdd/bfd.c
+++ b/bfdd/bfd.c
@@ -1706,6 +1706,8 @@ static int bfd_vrf_disable(struct vrf *vrf)
socket_close(&bvrf->bg_mhop);
socket_close(&bvrf->bg_shop6);
socket_close(&bvrf->bg_mhop6);
+ socket_close(&bvrf->bg_echo);
+ socket_close(&bvrf->bg_echov6);
/* free context */
XFREE(MTYPE_BFDD_VRF, bvrf);
diff --git a/bfdd/bfd_packet.c b/bfdd/bfd_packet.c
index d68a1ad5fd..7fbe6db163 100644
--- a/bfdd/bfd_packet.c
+++ b/bfdd/bfd_packet.c
@@ -894,7 +894,7 @@ int bp_udp_shop(vrf_id_t vrf_id)
{
int sd;
- frr_elevate_privs(&bglobal.bfdd_privs) {
+ frr_with_privs(&bglobal.bfdd_privs) {
sd = vrf_socket(AF_INET, SOCK_DGRAM, PF_UNSPEC, vrf_id, NULL);
}
if (sd == -1)
@@ -909,7 +909,7 @@ int bp_udp_mhop(vrf_id_t vrf_id)
{
int sd;
- frr_elevate_privs(&bglobal.bfdd_privs) {
+ frr_with_privs(&bglobal.bfdd_privs) {
sd = vrf_socket(AF_INET, SOCK_DGRAM, PF_UNSPEC, vrf_id, NULL);
}
if (sd == -1)
@@ -934,7 +934,7 @@ int bp_peer_socket(const struct bfd_session *bs)
&& bs->key.vrfname[0])
device_to_bind = (const char *)bs->key.vrfname;
- frr_elevate_privs(&bglobal.bfdd_privs) {
+ frr_with_privs(&bglobal.bfdd_privs) {
sd = vrf_socket(AF_INET, SOCK_DGRAM, PF_UNSPEC,
bs->vrf->vrf_id, device_to_bind);
}
@@ -1001,7 +1001,7 @@ int bp_peer_socketv6(const struct bfd_session *bs)
&& bs->key.vrfname[0])
device_to_bind = (const char *)bs->key.vrfname;
- frr_elevate_privs(&bglobal.bfdd_privs) {
+ frr_with_privs(&bglobal.bfdd_privs) {
sd = vrf_socket(AF_INET6, SOCK_DGRAM, PF_UNSPEC,
bs->vrf->vrf_id, device_to_bind);
}
@@ -1121,7 +1121,7 @@ int bp_udp6_shop(vrf_id_t vrf_id)
{
int sd;
- frr_elevate_privs(&bglobal.bfdd_privs) {
+ frr_with_privs(&bglobal.bfdd_privs) {
sd = vrf_socket(AF_INET6, SOCK_DGRAM, PF_UNSPEC, vrf_id, NULL);
}
if (sd == -1)
@@ -1137,7 +1137,7 @@ int bp_udp6_mhop(vrf_id_t vrf_id)
{
int sd;
- frr_elevate_privs(&bglobal.bfdd_privs) {
+ frr_with_privs(&bglobal.bfdd_privs) {
sd = vrf_socket(AF_INET6, SOCK_DGRAM, PF_UNSPEC, vrf_id, NULL);
}
if (sd == -1)
@@ -1153,7 +1153,7 @@ int bp_echo_socket(vrf_id_t vrf_id)
{
int s;
- frr_elevate_privs(&bglobal.bfdd_privs) {
+ frr_with_privs(&bglobal.bfdd_privs) {
s = vrf_socket(AF_INET, SOCK_DGRAM, 0, vrf_id, NULL);
}
if (s == -1)
@@ -1169,7 +1169,7 @@ int bp_echov6_socket(vrf_id_t vrf_id)
{
int s;
- frr_elevate_privs(&bglobal.bfdd_privs) {
+ frr_with_privs(&bglobal.bfdd_privs) {
s = vrf_socket(AF_INET6, SOCK_DGRAM, 0, vrf_id, NULL);
}
if (s == -1)
diff --git a/bgpd/bgp_advertise.c b/bgpd/bgp_advertise.c
index 497fb0749e..76a65f7f04 100644
--- a/bgpd/bgp_advertise.c
+++ b/bgpd/bgp_advertise.c
@@ -194,6 +194,7 @@ void bgp_adj_in_set(struct bgp_node *rn, struct peer *peer, struct attr *attr,
adj = XCALLOC(MTYPE_BGP_ADJ_IN, sizeof(struct bgp_adj_in));
adj->peer = peer_lock(peer); /* adj_in peer reference */
adj->attr = bgp_attr_intern(attr);
+ adj->uptime = bgp_clock();
adj->addpath_rx_id = addpath_id;
BGP_ADJ_IN_ADD(rn, adj);
bgp_lock_node(rn);
diff --git a/bgpd/bgp_advertise.h b/bgpd/bgp_advertise.h
index 1b55b6e64b..c983598756 100644
--- a/bgpd/bgp_advertise.h
+++ b/bgpd/bgp_advertise.h
@@ -101,6 +101,9 @@ struct bgp_adj_in {
/* Received attribute. */
struct attr *attr;
+ /* timestamp (monotime) */
+ time_t uptime;
+
/* Addpath identifier */
uint32_t addpath_rx_id;
};
diff --git a/bgpd/bgp_bmp.c b/bgpd/bgp_bmp.c
new file mode 100644
index 0000000000..8fca202345
--- /dev/null
+++ b/bgpd/bgp_bmp.c
@@ -0,0 +1,2240 @@
+/* BMP support.
+ * Copyright (C) 2018 Yasuhiro Ohara
+ * Copyright (C) 2019 David Lamparter for NetDEF, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; see the file COPYING; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <zebra.h>
+
+#include "log.h"
+#include "stream.h"
+#include "sockunion.h"
+#include "command.h"
+#include "prefix.h"
+#include "thread.h"
+#include "linklist.h"
+#include "queue.h"
+#include "pullwr.h"
+#include "memory.h"
+#include "network.h"
+#include "filter.h"
+#include "lib_errors.h"
+#include "stream.h"
+#include "libfrr.h"
+#include "version.h"
+#include "jhash.h"
+#include "termtable.h"
+
+#include "bgpd/bgp_table.h"
+#include "bgpd/bgpd.h"
+#include "bgpd/bgp_route.h"
+#include "bgpd/bgp_attr.h"
+#include "bgpd/bgp_dump.h"
+#include "bgpd/bgp_errors.h"
+#include "bgpd/bgp_packet.h"
+#include "bgpd/bgp_bmp.h"
+#include "bgpd/bgp_fsm.h"
+#include "bgpd/bgp_updgrp.h"
+#include "bgpd/bgp_vty.h"
+
+static void bmp_close(struct bmp *bmp);
+static struct bmp_bgp *bmp_bgp_find(struct bgp *bgp);
+static void bmp_targets_put(struct bmp_targets *bt);
+static struct bmp_bgp_peer *bmp_bgp_peer_find(uint64_t peerid);
+static struct bmp_bgp_peer *bmp_bgp_peer_get(struct peer *peer);
+static void bmp_active_disconnected(struct bmp_active *ba);
+static void bmp_active_put(struct bmp_active *ba);
+
+DEFINE_MGROUP(BMP, "BMP (BGP Monitoring Protocol)")
+
+DEFINE_MTYPE_STATIC(BMP, BMP_CONN, "BMP connection state")
+DEFINE_MTYPE_STATIC(BMP, BMP_TARGETS, "BMP targets")
+DEFINE_MTYPE_STATIC(BMP, BMP_TARGETSNAME, "BMP targets name")
+DEFINE_MTYPE_STATIC(BMP, BMP_LISTENER, "BMP listener")
+DEFINE_MTYPE_STATIC(BMP, BMP_ACTIVE, "BMP active connection config")
+DEFINE_MTYPE_STATIC(BMP, BMP_ACLNAME, "BMP access-list name")
+DEFINE_MTYPE_STATIC(BMP, BMP_QUEUE, "BMP update queue item")
+DEFINE_MTYPE_STATIC(BMP, BMP, "BMP instance state")
+DEFINE_MTYPE_STATIC(BMP, BMP_MIRRORQ, "BMP route mirroring buffer")
+DEFINE_MTYPE_STATIC(BMP, BMP_PEER, "BMP per BGP peer data")
+DEFINE_MTYPE_STATIC(BMP, BMP_OPEN, "BMP stored BGP OPEN message")
+
+DEFINE_QOBJ_TYPE(bmp_targets)
+
+static int bmp_bgp_cmp(const struct bmp_bgp *a, const struct bmp_bgp *b)
+{
+ if (a->bgp < b->bgp)
+ return -1;
+ if (a->bgp > b->bgp)
+ return 1;
+ return 0;
+}
+
+static uint32_t bmp_bgp_hash(const struct bmp_bgp *e)
+{
+ return jhash(&e->bgp, sizeof(e->bgp), 0x55aa5a5a);
+}
+
+DECLARE_HASH(bmp_bgph, struct bmp_bgp, bbi, bmp_bgp_cmp, bmp_bgp_hash)
+
+struct bmp_bgph_head bmp_bgph;
+
+static int bmp_bgp_peer_cmp(const struct bmp_bgp_peer *a,
+ const struct bmp_bgp_peer *b)
+{
+ if (a->peerid < b->peerid)
+ return -1;
+ if (a->peerid > b->peerid)
+ return 1;
+ return 0;
+}
+
+static uint32_t bmp_bgp_peer_hash(const struct bmp_bgp_peer *e)
+{
+ return e->peerid;
+}
+
+DECLARE_HASH(bmp_peerh, struct bmp_bgp_peer, bpi,
+ bmp_bgp_peer_cmp, bmp_bgp_peer_hash)
+
+struct bmp_peerh_head bmp_peerh;
+
+DECLARE_LIST(bmp_mirrorq, struct bmp_mirrorq, bmi)
+
+/* listener management */
+
+static int bmp_listener_cmp(const struct bmp_listener *a,
+ const struct bmp_listener *b)
+{
+ int c;
+
+ c = sockunion_cmp(&a->addr, &b->addr);
+ if (c)
+ return c;
+ if (a->port < b->port)
+ return -1;
+ if (a->port > b->port)
+ return 1;
+ return 0;
+}
+
+DECLARE_SORTLIST_UNIQ(bmp_listeners, struct bmp_listener, bli, bmp_listener_cmp)
+
+static int bmp_targets_cmp(const struct bmp_targets *a,
+ const struct bmp_targets *b)
+{
+ return strcmp(a->name, b->name);
+}
+
+DECLARE_SORTLIST_UNIQ(bmp_targets, struct bmp_targets, bti, bmp_targets_cmp)
+
+DECLARE_LIST(bmp_session, struct bmp, bsi)
+
+DECLARE_DLIST(bmp_qlist, struct bmp_queue_entry, bli)
+
+static int bmp_qhash_cmp(const struct bmp_queue_entry *a,
+ const struct bmp_queue_entry *b)
+{
+ int ret;
+ ret = prefix_cmp(&a->p, &b->p);
+ if (ret)
+ return ret;
+ ret = memcmp(&a->peerid, &b->peerid,
+ offsetof(struct bmp_queue_entry, refcount) -
+ offsetof(struct bmp_queue_entry, peerid));
+ return ret;
+}
+
+static uint32_t bmp_qhash_hkey(const struct bmp_queue_entry *e)
+{
+ uint32_t key;
+
+ key = prefix_hash_key((void *)&e->p);
+ key = jhash(&e->peerid,
+ offsetof(struct bmp_queue_entry, refcount) -
+ offsetof(struct bmp_queue_entry, peerid),
+ key);
+ return key;
+}
+
+DECLARE_HASH(bmp_qhash, struct bmp_queue_entry, bhi,
+ bmp_qhash_cmp, bmp_qhash_hkey)
+
+static int bmp_active_cmp(const struct bmp_active *a,
+ const struct bmp_active *b)
+{
+ int c;
+
+ c = strcmp(a->hostname, b->hostname);
+ if (c)
+ return c;
+ if (a->port < b->port)
+ return -1;
+ if (a->port > b->port)
+ return 1;
+ return 0;
+}
+
+DECLARE_SORTLIST_UNIQ(bmp_actives, struct bmp_active, bai, bmp_active_cmp)
+
+static struct bmp *bmp_new(struct bmp_targets *bt, int bmp_sock)
+{
+ struct bmp *new = XCALLOC(MTYPE_BMP_CONN, sizeof(struct bmp));
+ afi_t afi;
+ safi_t safi;
+
+ monotime(&new->t_up);
+ new->targets = bt;
+ new->socket = bmp_sock;
+ new->syncafi = AFI_MAX;
+
+ FOREACH_AFI_SAFI (afi, safi) {
+ new->afistate[afi][safi] = bt->afimon[afi][safi]
+ ? BMP_AFI_NEEDSYNC : BMP_AFI_INACTIVE;
+ }
+
+ bmp_session_add_tail(&bt->sessions, new);
+ return new;
+}
+
+static void bmp_free(struct bmp *bmp)
+{
+ bmp_session_del(&bmp->targets->sessions, bmp);
+ XFREE(MTYPE_BMP_CONN, bmp);
+}
+
+static void bmp_common_hdr(struct stream *s, uint8_t ver, uint8_t type)
+{
+ stream_putc(s, ver);
+ stream_putl(s, 0); //dummy message length. will be set later.
+ stream_putc(s, type);
+}
+
+static void bmp_per_peer_hdr(struct stream *s, struct peer *peer,
+ uint8_t flags, const struct timeval *tv)
+{
+ char peer_distinguisher[8];
+
+#define BMP_PEER_TYPE_GLOBAL_INSTANCE 0
+#define BMP_PEER_TYPE_RD_INSTANCE 1
+#define BMP_PEER_TYPE_LOCAL_INSTANCE 2
+
+#define BMP_PEER_FLAG_V (1 << 7)
+#define BMP_PEER_FLAG_L (1 << 6)
+#define BMP_PEER_FLAG_A (1 << 5)
+
+ /* Peer Type */
+ stream_putc(s, BMP_PEER_TYPE_GLOBAL_INSTANCE);
+
+ /* Peer Flags */
+ if (peer->su.sa.sa_family == AF_INET6)
+ SET_FLAG(flags, BMP_PEER_FLAG_V);
+ else
+ UNSET_FLAG(flags, BMP_PEER_FLAG_V);
+ stream_putc(s, flags);
+
+ /* Peer Distinguisher */
+ memset (&peer_distinguisher[0], 0, 8);
+ stream_put(s, &peer_distinguisher[0], 8);
+
+ /* Peer Address */
+ if (peer->su.sa.sa_family == AF_INET6)
+ stream_put(s, &peer->su.sin6.sin6_addr, 16);
+ else if (peer->su.sa.sa_family == AF_INET) {
+ stream_putl(s, 0);
+ stream_putl(s, 0);
+ stream_putl(s, 0);
+ stream_put_in_addr(s, &peer->su.sin.sin_addr);
+ } else {
+ stream_putl(s, 0);
+ stream_putl(s, 0);
+ stream_putl(s, 0);
+ stream_putl(s, 0);
+ }
+
+ /* Peer AS */
+ stream_putl(s, peer->as);
+
+ /* Peer BGP ID */
+ stream_put_in_addr(s, &peer->remote_id);
+
+ /* Timestamp */
+ if (tv) {
+ stream_putl(s, tv->tv_sec);
+ stream_putl(s, tv->tv_usec);
+ } else {
+ stream_putl(s, 0);
+ stream_putl(s, 0);
+ }
+}
+
+static void bmp_put_info_tlv(struct stream *s, uint16_t type,
+ const char *string)
+{
+ int len = strlen (string);
+ stream_putw(s, type);
+ stream_putw(s, len);
+ stream_put(s, string, len);
+}
+
+static int bmp_send_initiation(struct bmp *bmp)
+{
+ int len;
+ struct stream *s;
+ s = stream_new(BGP_MAX_PACKET_SIZE);
+ bmp_common_hdr(s, BMP_VERSION_3, BMP_TYPE_INITIATION);
+
+#define BMP_INFO_TYPE_SYSDESCR 1
+#define BMP_INFO_TYPE_SYSNAME 2
+ bmp_put_info_tlv(s, BMP_INFO_TYPE_SYSDESCR,
+ FRR_FULL_NAME " " FRR_VER_SHORT);
+ bmp_put_info_tlv(s, BMP_INFO_TYPE_SYSNAME, cmd_hostname_get());
+
+ len = stream_get_endp(s);
+ stream_putl_at(s, BMP_LENGTH_POS, len); //message length is set.
+
+ pullwr_write_stream(bmp->pullwr, s);
+ stream_free(s);
+ return 0;
+}
+
+static void bmp_notify_put(struct stream *s, struct bgp_notify *nfy)
+{
+ size_t len_pos;
+ uint8_t marker[16] = {
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ };
+
+ stream_put(s, marker, sizeof(marker));
+ len_pos = stream_get_endp(s);
+ stream_putw(s, 0);
+ stream_putc(s, BGP_MSG_NOTIFY);
+ stream_putc(s, nfy->code);
+ stream_putc(s, nfy->subcode);
+ stream_put(s, nfy->data, nfy->length);
+
+ stream_putw_at(s, len_pos, stream_get_endp(s) - len_pos
+ + sizeof(marker));
+}
+
+static struct stream *bmp_peerstate(struct peer *peer, bool down)
+{
+ struct stream *s;
+ size_t len;
+ struct timeval uptime, uptime_real;
+
+ uptime.tv_sec = peer->uptime;
+ uptime.tv_usec = 0;
+ monotime_to_realtime(&uptime, &uptime_real);
+
+#define BGP_BMP_MAX_PACKET_SIZE 1024
+ s = stream_new(BGP_MAX_PACKET_SIZE);
+
+ if (peer->status == Established && !down) {
+ struct bmp_bgp_peer *bbpeer;
+
+ bmp_common_hdr(s, BMP_VERSION_3,
+ BMP_TYPE_PEER_UP_NOTIFICATION);
+ bmp_per_peer_hdr(s, peer, 0, &uptime_real);
+
+ /* Local Address (16 bytes) */
+ if (peer->su_local->sa.sa_family == AF_INET6)
+ stream_put(s, &peer->su_local->sin6.sin6_addr, 16);
+ else if (peer->su_local->sa.sa_family == AF_INET) {
+ stream_putl(s, 0);
+ stream_putl(s, 0);
+ stream_putl(s, 0);
+ stream_put_in_addr(s, &peer->su_local->sin.sin_addr);
+ }
+
+ /* Local Port, Remote Port */
+ if (peer->su_local->sa.sa_family == AF_INET6)
+ stream_putw(s, peer->su_local->sin6.sin6_port);
+ else if (peer->su_local->sa.sa_family == AF_INET)
+ stream_putw(s, peer->su_local->sin.sin_port);
+ if (peer->su_remote->sa.sa_family == AF_INET6)
+ stream_putw(s, peer->su_remote->sin6.sin6_port);
+ else if (peer->su_remote->sa.sa_family == AF_INET)
+ stream_putw(s, peer->su_remote->sin.sin_port);
+
+ static const uint8_t dummy_open[] = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x00, 0x13, 0x01,
+ };
+
+ bbpeer = bmp_bgp_peer_find(peer->qobj_node.nid);
+
+ if (bbpeer && bbpeer->open_tx)
+ stream_put(s, bbpeer->open_tx, bbpeer->open_tx_len);
+ else {
+ stream_put(s, dummy_open, sizeof(dummy_open));
+ zlog_warn("bmp: missing TX OPEN message for peer %s\n",
+ peer->host);
+ }
+ if (bbpeer && bbpeer->open_rx)
+ stream_put(s, bbpeer->open_rx, bbpeer->open_rx_len);
+ else {
+ stream_put(s, dummy_open, sizeof(dummy_open));
+ zlog_warn("bmp: missing RX OPEN message for peer %s\n",
+ peer->host);
+ }
+
+ if (peer->desc)
+ bmp_put_info_tlv(s, 0, peer->desc);
+ } else {
+ uint8_t type;
+ size_t type_pos;
+
+ bmp_common_hdr(s, BMP_VERSION_3,
+ BMP_TYPE_PEER_DOWN_NOTIFICATION);
+ bmp_per_peer_hdr(s, peer, 0, &uptime_real);
+
+ type_pos = stream_get_endp(s);
+ stream_putc(s, 0); /* placeholder for down reason */
+
+ switch (peer->last_reset) {
+ case PEER_DOWN_NOTIFY_RECEIVED:
+ type = BMP_PEERDOWN_REMOTE_NOTIFY;
+ bmp_notify_put(s, &peer->notify);
+ break;
+ case PEER_DOWN_CLOSE_SESSION:
+ type = BMP_PEERDOWN_REMOTE_CLOSE;
+ break;
+ default:
+ type = BMP_PEERDOWN_LOCAL_NOTIFY;
+ stream_put(s, peer->last_reset_cause,
+ peer->last_reset_cause_size);
+ break;
+ }
+ stream_putc_at(s, type_pos, type);
+ }
+
+ len = stream_get_endp(s);
+ stream_putl_at(s, BMP_LENGTH_POS, len); //message length is set.
+ return s;
+}
+
+
+static int bmp_send_peerup(struct bmp *bmp)
+{
+ struct peer *peer;
+ struct listnode *node;
+ struct stream *s;
+
+ /* Walk down all peers */
+ for (ALL_LIST_ELEMENTS_RO(bmp->targets->bgp->peer, node, peer)) {
+ s = bmp_peerstate(peer, false);
+ pullwr_write_stream(bmp->pullwr, s);
+ stream_free(s);
+ }
+
+ return 0;
+}
+
+/* XXX: kludge - filling the pullwr's buffer */
+static void bmp_send_all(struct bmp_bgp *bmpbgp, struct stream *s)
+{
+ struct bmp_targets *bt;
+ struct bmp *bmp;
+
+ frr_each(bmp_targets, &bmpbgp->targets, bt)
+ frr_each(bmp_session, &bt->sessions, bmp)
+ pullwr_write_stream(bmp->pullwr, s);
+ stream_free(s);
+}
+
+/*
+ * Route Mirroring
+ */
+
+#define BMP_MIRROR_TLV_TYPE_BGP_MESSAGE 0
+#define BMP_MIRROR_TLV_TYPE_INFO 1
+
+#define BMP_MIRROR_INFO_CODE_ERRORPDU 0
+#define BMP_MIRROR_INFO_CODE_LOSTMSGS 1
+
+static struct bmp_mirrorq *bmp_pull_mirror(struct bmp *bmp)
+{
+ struct bmp_mirrorq *bmq;
+
+ bmq = bmp->mirrorpos;
+ if (!bmq)
+ return NULL;
+
+ bmp->mirrorpos = bmp_mirrorq_next(&bmp->targets->bmpbgp->mirrorq, bmq);
+
+ bmq->refcount--;
+ if (!bmq->refcount) {
+ bmp->targets->bmpbgp->mirror_qsize -= sizeof(*bmq) + bmq->len;
+ bmp_mirrorq_del(&bmp->targets->bmpbgp->mirrorq, bmq);
+ }
+ return bmq;
+}
+
+static void bmp_mirror_cull(struct bmp_bgp *bmpbgp)
+{
+ while (bmpbgp->mirror_qsize > bmpbgp->mirror_qsizelimit) {
+ struct bmp_mirrorq *bmq, *inner;
+ struct bmp_targets *bt;
+ struct bmp *bmp;
+
+ bmq = bmp_mirrorq_first(&bmpbgp->mirrorq);
+
+ frr_each(bmp_targets, &bmpbgp->targets, bt) {
+ if (!bt->mirror)
+ continue;
+ frr_each(bmp_session, &bt->sessions, bmp) {
+ if (bmp->mirrorpos != bmq)
+ continue;
+
+ while ((inner = bmp_pull_mirror(bmp))) {
+ if (!inner->refcount)
+ XFREE(MTYPE_BMP_MIRRORQ,
+ inner);
+ }
+
+ zlog_warn("bmp[%s] lost mirror messages due to buffer size limit",
+ bmp->remote);
+ bmp->mirror_lost = true;
+ pullwr_bump(bmp->pullwr);
+ }
+ }
+ }
+}
+
+static int bmp_mirror_packet(struct peer *peer, uint8_t type, bgp_size_t size,
+ struct stream *packet)
+{
+ struct bmp_bgp *bmpbgp = bmp_bgp_find(peer->bgp);
+ struct timeval tv;
+ struct bmp_mirrorq *qitem;
+ struct bmp_targets *bt;
+ struct bmp *bmp;
+
+ gettimeofday(&tv, NULL);
+
+ if (type == BGP_MSG_OPEN) {
+ struct bmp_bgp_peer *bbpeer = bmp_bgp_peer_get(peer);
+
+ XFREE(MTYPE_BMP_OPEN, bbpeer->open_rx);
+
+ bbpeer->open_rx_len = size;
+ bbpeer->open_rx = XMALLOC(MTYPE_BMP_OPEN, size);
+ memcpy(bbpeer->open_rx, packet->data, size);
+ }
+
+ if (!bmpbgp)
+ return 0;
+
+ qitem = XCALLOC(MTYPE_BMP_MIRRORQ, sizeof(*qitem) + size);
+ qitem->peerid = peer->qobj_node.nid;
+ qitem->tv = tv;
+ qitem->len = size;
+ memcpy(qitem->data, packet->data, size);
+
+ frr_each(bmp_targets, &bmpbgp->targets, bt) {
+ if (!bt->mirror)
+ continue;
+ frr_each(bmp_session, &bt->sessions, bmp) {
+ qitem->refcount++;
+ if (!bmp->mirrorpos)
+ bmp->mirrorpos = qitem;
+ pullwr_bump(bmp->pullwr);
+ }
+ }
+ if (qitem->refcount == 0)
+ XFREE(MTYPE_BMP_MIRRORQ, qitem);
+ else {
+ bmpbgp->mirror_qsize += sizeof(*qitem) + size;
+ bmp_mirrorq_add_tail(&bmpbgp->mirrorq, qitem);
+
+ bmp_mirror_cull(bmpbgp);
+
+ bmpbgp->mirror_qsizemax = MAX(bmpbgp->mirror_qsizemax,
+ bmpbgp->mirror_qsize);
+ }
+ return 0;
+}
+
+static void bmp_wrmirror_lost(struct bmp *bmp, struct pullwr *pullwr)
+{
+ struct stream *s;
+ struct timeval tv;
+
+ gettimeofday(&tv, NULL);
+
+ s = stream_new(BGP_MAX_PACKET_SIZE);
+
+ bmp_common_hdr(s, BMP_VERSION_3, BMP_TYPE_ROUTE_MIRRORING);
+ bmp_per_peer_hdr(s, bmp->targets->bgp->peer_self, 0, &tv);
+
+ stream_putw(s, BMP_MIRROR_TLV_TYPE_INFO);
+ stream_putw(s, 2);
+ stream_putw(s, BMP_MIRROR_INFO_CODE_LOSTMSGS);
+ stream_putl_at(s, BMP_LENGTH_POS, stream_get_endp(s));
+
+ bmp->cnt_mirror_overruns++;
+ pullwr_write_stream(bmp->pullwr, s);
+ stream_free(s);
+}
+
+static bool bmp_wrmirror(struct bmp *bmp, struct pullwr *pullwr)
+{
+ struct bmp_mirrorq *bmq;
+ struct peer *peer;
+ bool written = false;
+
+ if (bmp->mirror_lost) {
+ bmp_wrmirror_lost(bmp, pullwr);
+ bmp->mirror_lost = false;
+ return true;
+ }
+
+ bmq = bmp_pull_mirror(bmp);
+ if (!bmq)
+ return false;
+
+ peer = QOBJ_GET_TYPESAFE(bmq->peerid, peer);
+ if (!peer) {
+ zlog_info("bmp: skipping mirror message for deleted peer");
+ goto out;
+ }
+
+ struct stream *s;
+ s = stream_new(BGP_MAX_PACKET_SIZE);
+
+ bmp_common_hdr(s, BMP_VERSION_3, BMP_TYPE_ROUTE_MIRRORING);
+ bmp_per_peer_hdr(s, peer, 0, &bmq->tv);
+
+ /* BMP Mirror TLV. */
+ stream_putw(s, BMP_MIRROR_TLV_TYPE_BGP_MESSAGE);
+ stream_putw(s, bmq->len);
+ stream_putl_at(s, BMP_LENGTH_POS, stream_get_endp(s) + bmq->len);
+
+ bmp->cnt_mirror++;
+ pullwr_write_stream(bmp->pullwr, s);
+ pullwr_write(bmp->pullwr, bmq->data, bmq->len);
+
+ stream_free(s);
+ written = true;
+
+out:
+ if (!bmq->refcount)
+ XFREE(MTYPE_BMP_MIRRORQ, bmq);
+ return written;
+}
+
+static int bmp_outgoing_packet(struct peer *peer, uint8_t type, bgp_size_t size,
+ struct stream *packet)
+{
+ if (type == BGP_MSG_OPEN) {
+ struct bmp_bgp_peer *bbpeer = bmp_bgp_peer_get(peer);
+
+ XFREE(MTYPE_BMP_OPEN, bbpeer->open_tx);
+
+ bbpeer->open_tx_len = size;
+ bbpeer->open_tx = XMALLOC(MTYPE_BMP_OPEN, size);
+ memcpy(bbpeer->open_tx, packet->data, size);
+ }
+ return 0;
+}
+
+static int bmp_peer_established(struct peer *peer)
+{
+ struct bmp_bgp *bmpbgp = bmp_bgp_find(peer->bgp);
+
+ if (!bmpbgp)
+ return 0;
+
+ if (peer->doppelganger && (peer->doppelganger->status != Deleted)) {
+ struct bmp_bgp_peer *bbpeer, *bbdopp;
+
+ bbpeer = bmp_bgp_peer_get(peer);
+ bbdopp = bmp_bgp_peer_find(peer->doppelganger->qobj_node.nid);
+ if (bbdopp) {
+ XFREE(MTYPE_BMP_OPEN, bbpeer->open_tx);
+ XFREE(MTYPE_BMP_OPEN, bbpeer->open_rx);
+
+ bbpeer->open_tx = bbdopp->open_tx;
+ bbpeer->open_tx_len = bbdopp->open_tx_len;
+ bbpeer->open_rx = bbdopp->open_rx;
+ bbpeer->open_rx_len = bbdopp->open_rx_len;
+
+ bmp_peerh_del(&bmp_peerh, bbdopp);
+ XFREE(MTYPE_BMP_PEER, bbdopp);
+ }
+ }
+
+ bmp_send_all(bmpbgp, bmp_peerstate(peer, false));
+ return 0;
+}
+
+static int bmp_peer_backward(struct peer *peer)
+{
+ struct bmp_bgp *bmpbgp = bmp_bgp_find(peer->bgp);
+ struct bmp_bgp_peer *bbpeer;
+
+ if (!bmpbgp)
+ return 0;
+
+ bbpeer = bmp_bgp_peer_find(peer->qobj_node.nid);
+ if (bbpeer) {
+ XFREE(MTYPE_BMP_OPEN, bbpeer->open_tx);
+ bbpeer->open_tx_len = 0;
+ XFREE(MTYPE_BMP_OPEN, bbpeer->open_rx);
+ bbpeer->open_rx_len = 0;
+ }
+
+ bmp_send_all(bmpbgp, bmp_peerstate(peer, true));
+ return 0;
+}
+
+static void bmp_eor(struct bmp *bmp, afi_t afi, safi_t safi, uint8_t flags)
+{
+ struct peer *peer;
+ struct listnode *node;
+ struct stream *s, *s2;
+ iana_afi_t pkt_afi;
+ iana_safi_t pkt_safi;
+
+ s = stream_new(BGP_MAX_PACKET_SIZE);
+
+ /* Make BGP update packet. */
+ bgp_packet_set_marker(s, BGP_MSG_UPDATE);
+
+ /* Unfeasible Routes Length */
+ stream_putw(s, 0);
+
+ if (afi == AFI_IP && safi == SAFI_UNICAST) {
+ /* Total Path Attribute Length */
+ stream_putw(s, 0);
+ } else {
+ /* Convert AFI, SAFI to values for packet. */
+ bgp_map_afi_safi_int2iana(afi, safi, &pkt_afi, &pkt_safi);
+
+ /* Total Path Attribute Length */
+ stream_putw(s, 6);
+ stream_putc(s, BGP_ATTR_FLAG_OPTIONAL);
+ stream_putc(s, BGP_ATTR_MP_UNREACH_NLRI);
+ stream_putc(s, 3);
+ stream_putw(s, pkt_afi);
+ stream_putc(s, pkt_safi);
+ }
+
+ bgp_packet_set_size(s);
+
+ for (ALL_LIST_ELEMENTS_RO(bmp->targets->bgp->peer, node, peer)) {
+ if (!peer->afc_nego[afi][safi])
+ continue;
+
+ s2 = stream_new(BGP_MAX_PACKET_SIZE);
+
+ bmp_common_hdr(s2, BMP_VERSION_3,
+ BMP_TYPE_ROUTE_MONITORING);
+ bmp_per_peer_hdr(s2, peer, flags, NULL);
+
+ stream_putl_at(s2, BMP_LENGTH_POS,
+ stream_get_endp(s) + stream_get_endp(s2));
+
+ bmp->cnt_update++;
+ pullwr_write_stream(bmp->pullwr, s2);
+ pullwr_write_stream(bmp->pullwr, s);
+ stream_free(s2);
+ }
+ stream_free(s);
+}
+
+static struct stream *bmp_update(struct prefix *p, struct peer *peer,
+ struct attr *attr, afi_t afi, safi_t safi)
+{
+ struct bpacket_attr_vec_arr vecarr;
+ struct stream *s;
+ size_t attrlen_pos = 0, mpattrlen_pos = 0;
+ bgp_size_t total_attr_len = 0;
+
+ bpacket_attr_vec_arr_reset(&vecarr);
+
+ s = stream_new(BGP_MAX_PACKET_SIZE);
+ bgp_packet_set_marker(s, BGP_MSG_UPDATE);
+
+ /* 2: withdrawn routes length */
+ stream_putw(s, 0);
+
+ /* 3: total attributes length - attrlen_pos stores the position */
+ attrlen_pos = stream_get_endp(s);
+ stream_putw(s, 0);
+
+ /* 5: Encode all the attributes, except MP_REACH_NLRI attr. */
+ total_attr_len = bgp_packet_attribute(NULL, peer, s, attr,
+ &vecarr, NULL, afi, safi, peer, NULL, NULL, 0, 0, 0);
+
+ /* space check? */
+
+ /* peer_cap_enhe & add-path removed */
+ if (afi == AFI_IP && safi == SAFI_UNICAST)
+ stream_put_prefix(s, p);
+ else {
+ size_t p1 = stream_get_endp(s);
+
+ /* MPLS removed for now */
+
+ mpattrlen_pos = bgp_packet_mpattr_start(s, peer, afi, safi,
+ &vecarr, attr);
+ bgp_packet_mpattr_prefix(s, afi, safi, p, NULL, NULL, 0,
+ 0, 0, attr);
+ bgp_packet_mpattr_end(s, mpattrlen_pos);
+ total_attr_len += stream_get_endp(s) - p1;
+ }
+
+ /* set the total attribute length correctly */
+ stream_putw_at(s, attrlen_pos, total_attr_len);
+ bgp_packet_set_size(s);
+ return s;
+}
+
+static struct stream *bmp_withdraw(struct prefix *p, afi_t afi, safi_t safi)
+{
+ struct stream *s;
+ size_t attrlen_pos = 0, mp_start, mplen_pos;
+ bgp_size_t total_attr_len = 0;
+ bgp_size_t unfeasible_len;
+
+ s = stream_new(BGP_MAX_PACKET_SIZE);
+
+ bgp_packet_set_marker(s, BGP_MSG_UPDATE);
+ stream_putw(s, 0);
+
+ if (afi == AFI_IP && safi == SAFI_UNICAST) {
+ stream_put_prefix(s, p);
+ unfeasible_len = stream_get_endp(s) - BGP_HEADER_SIZE
+ - BGP_UNFEASIBLE_LEN;
+ stream_putw_at(s, BGP_HEADER_SIZE, unfeasible_len);
+ stream_putw(s, 0);
+ } else {
+ attrlen_pos = stream_get_endp(s);
+ /* total attr length = 0 for now. reevaluate later */
+ stream_putw(s, 0);
+ mp_start = stream_get_endp(s);
+ mplen_pos = bgp_packet_mpunreach_start(s, afi, safi);
+
+ bgp_packet_mpunreach_prefix(s, p, afi, safi, NULL, NULL, 0,
+ 0, 0, NULL);
+ /* Set the mp_unreach attr's length */
+ bgp_packet_mpunreach_end(s, mplen_pos);
+
+ /* Set total path attribute length. */
+ total_attr_len = stream_get_endp(s) - mp_start;
+ stream_putw_at(s, attrlen_pos, total_attr_len);
+ }
+
+ bgp_packet_set_size(s);
+ return s;
+}
+
+static void bmp_monitor(struct bmp *bmp, struct peer *peer, uint8_t flags,
+ struct prefix *p, struct attr *attr, afi_t afi,
+ safi_t safi, time_t uptime)
+{
+ struct stream *hdr, *msg;
+ struct timeval tv = { .tv_sec = uptime, .tv_usec = 0 };
+
+ if (attr)
+ msg = bmp_update(p, peer, attr, afi, safi);
+ else
+ msg = bmp_withdraw(p, afi, safi);
+
+ hdr = stream_new(BGP_MAX_PACKET_SIZE);
+ bmp_common_hdr(hdr, BMP_VERSION_3, BMP_TYPE_ROUTE_MONITORING);
+ bmp_per_peer_hdr(hdr, peer, flags, &tv);
+
+ stream_putl_at(hdr, BMP_LENGTH_POS,
+ stream_get_endp(hdr) + stream_get_endp(msg));
+
+ bmp->cnt_update++;
+ pullwr_write_stream(bmp->pullwr, hdr);
+ pullwr_write_stream(bmp->pullwr, msg);
+ stream_free(hdr);
+ stream_free(msg);
+}
+
+static bool bmp_wrsync(struct bmp *bmp, struct pullwr *pullwr)
+{
+ afi_t afi;
+ safi_t safi;
+
+ if (bmp->syncafi == AFI_MAX) {
+ FOREACH_AFI_SAFI (afi, safi) {
+ if (bmp->afistate[afi][safi] != BMP_AFI_NEEDSYNC)
+ continue;
+
+ bmp->afistate[afi][safi] = BMP_AFI_SYNC;
+
+ bmp->syncafi = afi;
+ bmp->syncsafi = safi;
+ bmp->syncpeerid = 0;
+ memset(&bmp->syncpos, 0, sizeof(bmp->syncpos));
+ bmp->syncpos.family = afi2family(afi);
+ zlog_info("bmp[%s] %s %s sending table",
+ bmp->remote,
+ afi2str(bmp->syncafi),
+ safi2str(bmp->syncsafi));
+ /* break does not work here, 2 loops... */
+ goto afibreak;
+ }
+ if (bmp->syncafi == AFI_MAX)
+ return false;
+ }
+
+afibreak:
+ afi = bmp->syncafi;
+ safi = bmp->syncsafi;
+
+ if (!bmp->targets->afimon[afi][safi]) {
+ /* shouldn't happen */
+ bmp->afistate[afi][safi] = BMP_AFI_INACTIVE;
+ bmp->syncafi = AFI_MAX;
+ bmp->syncsafi = SAFI_MAX;
+ return true;
+ }
+
+ struct bgp_table *table = bmp->targets->bgp->rib[afi][safi];
+ struct bgp_node *bn;
+ struct bgp_path_info *bpi = NULL, *bpiter;
+ struct bgp_adj_in *adjin = NULL, *adjiter;
+
+ bn = bgp_node_lookup(table, &bmp->syncpos);
+ do {
+ if (!bn) {
+ bn = bgp_table_get_next(table, &bmp->syncpos);
+ if (!bn) {
+ zlog_info("bmp[%s] %s %s table completed (EoR)",
+ bmp->remote, afi2str(afi),
+ safi2str(safi));
+ bmp_eor(bmp, afi, safi, BMP_PEER_FLAG_L);
+ bmp_eor(bmp, afi, safi, 0);
+
+ bmp->afistate[afi][safi] = BMP_AFI_LIVE;
+ bmp->syncafi = AFI_MAX;
+ bmp->syncsafi = SAFI_MAX;
+ return true;
+ }
+ bmp->syncpeerid = 0;
+ prefix_copy(&bmp->syncpos, &bn->p);
+ }
+
+ if (bmp->targets->afimon[afi][safi] & BMP_MON_POSTPOLICY) {
+ for (bpiter = bn->info; bpiter; bpiter = bpiter->next) {
+ if (!CHECK_FLAG(bpiter->flags, BGP_PATH_VALID))
+ continue;
+ if (bpiter->peer->qobj_node.nid
+ <= bmp->syncpeerid)
+ continue;
+ if (bpi && bpiter->peer->qobj_node.nid
+ > bpi->peer->qobj_node.nid)
+ continue;
+ bpi = bpiter;
+ }
+ }
+ if (bmp->targets->afimon[afi][safi] & BMP_MON_PREPOLICY) {
+ for (adjiter = bn->adj_in; adjiter;
+ adjiter = adjiter->next) {
+ if (adjiter->peer->qobj_node.nid
+ <= bmp->syncpeerid)
+ continue;
+ if (adjin && adjiter->peer->qobj_node.nid
+ > adjin->peer->qobj_node.nid)
+ continue;
+ adjin = adjiter;
+ }
+ }
+ if (bpi || adjin)
+ break;
+
+ bn = NULL;
+ } while (1);
+
+ if (adjin && bpi
+ && adjin->peer->qobj_node.nid < bpi->peer->qobj_node.nid) {
+ bpi = NULL;
+ bmp->syncpeerid = adjin->peer->qobj_node.nid;
+ } else if (adjin && bpi
+ && adjin->peer->qobj_node.nid > bpi->peer->qobj_node.nid) {
+ adjin = NULL;
+ bmp->syncpeerid = bpi->peer->qobj_node.nid;
+ } else if (bpi) {
+ bmp->syncpeerid = bpi->peer->qobj_node.nid;
+ } else if (adjin) {
+ bmp->syncpeerid = adjin->peer->qobj_node.nid;
+ }
+
+ if (bpi)
+ bmp_monitor(bmp, bpi->peer, BMP_PEER_FLAG_L, &bn->p, bpi->attr,
+ afi, safi, bpi->uptime);
+ if (adjin)
+ bmp_monitor(bmp, adjin->peer, 0, &bn->p, adjin->attr,
+ afi, safi, adjin->uptime);
+
+ return true;
+}
+
+static struct bmp_queue_entry *bmp_pull(struct bmp *bmp)
+{
+ struct bmp_queue_entry *bqe;
+
+ bqe = bmp->queuepos;
+ if (!bqe)
+ return NULL;
+
+ bmp->queuepos = bmp_qlist_next(&bmp->targets->updlist, bqe);
+
+ bqe->refcount--;
+ if (!bqe->refcount) {
+ bmp_qhash_del(&bmp->targets->updhash, bqe);
+ bmp_qlist_del(&bmp->targets->updlist, bqe);
+ }
+ return bqe;
+}
+
+static bool bmp_wrqueue(struct bmp *bmp, struct pullwr *pullwr)
+{
+ struct bmp_queue_entry *bqe;
+ struct peer *peer;
+ struct bgp_node *bn;
+ bool written = false;
+
+ bqe = bmp_pull(bmp);
+ if (!bqe)
+ return false;
+
+ afi_t afi = bqe->afi;
+ safi_t safi = bqe->safi;
+
+ switch (bmp->afistate[afi][safi]) {
+ case BMP_AFI_INACTIVE:
+ case BMP_AFI_NEEDSYNC:
+ goto out;
+ case BMP_AFI_SYNC:
+ if (prefix_cmp(&bqe->p, &bmp->syncpos) <= 0)
+ /* currently syncing but have already passed this
+ * prefix => send it. */
+ break;
+
+ /* currently syncing & haven't reached this prefix yet
+ * => it'll be sent as part of the table sync, no need here */
+ goto out;
+ case BMP_AFI_LIVE:
+ break;
+ }
+
+ peer = QOBJ_GET_TYPESAFE(bqe->peerid, peer);
+ if (!peer) {
+ zlog_info("bmp: skipping queued item for deleted peer");
+ goto out;
+ }
+ if (peer->status != Established)
+ goto out;
+
+ bn = bgp_node_lookup(bmp->targets->bgp->rib[afi][safi], &bqe->p);
+
+ if (bmp->targets->afimon[afi][safi] & BMP_MON_POSTPOLICY) {
+ struct bgp_path_info *bpi;
+
+ for (bpi = bn ? bn->info : NULL; bpi; bpi = bpi->next) {
+ if (!CHECK_FLAG(bpi->flags, BGP_PATH_VALID))
+ continue;
+ if (bpi->peer == peer)
+ break;
+ }
+
+ bmp_monitor(bmp, peer, BMP_PEER_FLAG_L, &bqe->p,
+ bpi ? bpi->attr : NULL, afi, safi,
+ bpi ? bpi->uptime : monotime(NULL));
+ written = true;
+ }
+
+ if (bmp->targets->afimon[afi][safi] & BMP_MON_PREPOLICY) {
+ struct bgp_adj_in *adjin;
+
+ for (adjin = bn ? bn->adj_in : NULL; adjin;
+ adjin = adjin->next) {
+ if (adjin->peer == peer)
+ break;
+ }
+ bmp_monitor(bmp, peer, BMP_PEER_FLAG_L, &bqe->p,
+ adjin ? adjin->attr : NULL, afi, safi,
+ adjin ? adjin->uptime : monotime(NULL));
+ written = true;
+ }
+
+out:
+ if (!bqe->refcount)
+ XFREE(MTYPE_BMP_QUEUE, bqe);
+ return written;
+}
+
+static void bmp_wrfill(struct bmp *bmp, struct pullwr *pullwr)
+{
+ switch(bmp->state) {
+ case BMP_PeerUp:
+ bmp_send_peerup(bmp);
+ bmp->state = BMP_Run;
+ break;
+
+ case BMP_Run:
+ if (bmp_wrmirror(bmp, pullwr))
+ break;
+ if (bmp_wrqueue(bmp, pullwr))
+ break;
+ if (bmp_wrsync(bmp, pullwr))
+ break;
+ break;
+ }
+}
+
+static void bmp_wrerr(struct bmp *bmp, struct pullwr *pullwr, bool eof)
+{
+ if (eof)
+ zlog_info("bmp[%s] disconnected", bmp->remote);
+ else
+ flog_warn(EC_LIB_SYSTEM_CALL, "bmp[%s] connection error: %s",
+ bmp->remote, strerror(errno));
+
+ bmp_close(bmp);
+ bmp_free(bmp);
+}
+
+static void bmp_process_one(struct bmp_targets *bt, struct bgp *bgp,
+ afi_t afi, safi_t safi, struct bgp_node *bn, struct peer *peer)
+{
+ struct bmp *bmp;
+ struct bmp_queue_entry *bqe, bqeref;
+ size_t refcount;
+ char buf[256];
+
+ prefix2str(&bn->p, buf, sizeof(buf));
+
+ refcount = bmp_session_count(&bt->sessions);
+ if (refcount == 0)
+ return;
+
+ memset(&bqeref, 0, sizeof(bqeref));
+ prefix_copy(&bqeref.p, &bn->p);
+ bqeref.peerid = peer->qobj_node.nid;
+ bqeref.afi = afi;
+ bqeref.safi = safi;
+
+ bqe = bmp_qhash_find(&bt->updhash, &bqeref);
+ if (bqe) {
+ if (bqe->refcount >= refcount)
+ /* nothing to do here */
+ return;
+
+ bmp_qlist_del(&bt->updlist, bqe);
+ } else {
+ bqe = XMALLOC(MTYPE_BMP_QUEUE, sizeof(*bqe));
+ memcpy(bqe, &bqeref, sizeof(*bqe));
+
+ bmp_qhash_add(&bt->updhash, bqe);
+ }
+
+ bqe->refcount = refcount;
+ bmp_qlist_add_tail(&bt->updlist, bqe);
+
+ frr_each (bmp_session, &bt->sessions, bmp)
+ if (!bmp->queuepos)
+ bmp->queuepos = bqe;
+}
+
+static int bmp_process(struct bgp *bgp, afi_t afi, safi_t safi,
+ struct bgp_node *bn, struct peer *peer, bool withdraw)
+{
+ struct bmp_bgp *bmpbgp = bmp_bgp_find(peer->bgp);
+ struct bmp_targets *bt;
+ struct bmp *bmp;
+
+ if (!bmpbgp)
+ return 0;
+
+ frr_each(bmp_targets, &bmpbgp->targets, bt) {
+ if (!bt->afimon[afi][safi])
+ continue;
+
+ bmp_process_one(bt, bgp, afi, safi, bn, peer);
+
+ frr_each(bmp_session, &bt->sessions, bmp) {
+ pullwr_bump(bmp->pullwr);
+ }
+ }
+ return 0;
+}
+
+static void bmp_stat_put_u32(struct stream *s, size_t *cnt, uint16_t type,
+ uint32_t value)
+{
+ stream_putw(s, type);
+ stream_putw(s, 4);
+ stream_putl(s, value);
+ (*cnt)++;
+}
+
+static int bmp_stats(struct thread *thread)
+{
+ struct bmp_targets *bt = THREAD_ARG(thread);
+ struct stream *s;
+ struct peer *peer;
+ struct listnode *node;
+ struct timeval tv;
+
+ if (bt->stat_msec)
+ thread_add_timer_msec(bm->master, bmp_stats, bt, bt->stat_msec,
+ &bt->t_stats);
+
+ gettimeofday(&tv, NULL);
+
+ /* Walk down all peers */
+ for (ALL_LIST_ELEMENTS_RO(bt->bgp->peer, node, peer)) {
+ size_t count = 0, count_pos, len;
+
+ if (peer->status != Established)
+ continue;
+
+ s = stream_new(BGP_MAX_PACKET_SIZE);
+ bmp_common_hdr(s, BMP_VERSION_3, BMP_TYPE_STATISTICS_REPORT);
+ bmp_per_peer_hdr(s, peer, 0, &tv);
+
+ count_pos = stream_get_endp(s);
+ stream_putl(s, 0);
+
+ bmp_stat_put_u32(s, &count, BMP_STATS_PFX_REJECTED,
+ peer->stat_pfx_filter);
+ bmp_stat_put_u32(s, &count, BMP_STATS_UPD_LOOP_ASPATH,
+ peer->stat_pfx_aspath_loop);
+ bmp_stat_put_u32(s, &count, BMP_STATS_UPD_LOOP_ORIGINATOR,
+ peer->stat_pfx_originator_loop);
+ bmp_stat_put_u32(s, &count, BMP_STATS_UPD_LOOP_CLUSTER,
+ peer->stat_pfx_cluster_loop);
+ bmp_stat_put_u32(s, &count, BMP_STATS_PFX_DUP_WITHDRAW,
+ peer->stat_pfx_dup_withdraw);
+ bmp_stat_put_u32(s, &count, BMP_STATS_UPD_7606_WITHDRAW,
+ peer->stat_upd_7606);
+ bmp_stat_put_u32(s, &count, BMP_STATS_FRR_NH_INVALID,
+ peer->stat_pfx_nh_invalid);
+
+ stream_putl_at(s, count_pos, count);
+
+ len = stream_get_endp(s);
+ stream_putl_at(s, BMP_LENGTH_POS, len);
+
+ bmp_send_all(bt->bmpbgp, s);
+ }
+ return 0;
+}
+
+static struct bmp *bmp_open(struct bmp_targets *bt, int bmp_sock)
+{
+ union sockunion su, *sumem;
+ struct prefix p;
+ int on = 1;
+ struct access_list *acl = NULL;
+ enum filter_type ret;
+ char buf[SU_ADDRSTRLEN];
+ struct bmp *bmp;
+
+ sumem = sockunion_getpeername(bmp_sock);
+ if (!sumem) {
+ close(bmp_sock);
+ return NULL;
+ }
+ memcpy(&su, sumem, sizeof(su));
+ sockunion_free(sumem);
+
+ set_nonblocking(bmp_sock);
+ set_cloexec(bmp_sock);
+ shutdown(bmp_sock, SHUT_RD);
+
+ sockunion2hostprefix(&su, &p);
+
+ acl = NULL;
+ switch (p.family) {
+ case AF_INET:
+ acl = access_list_lookup(AFI_IP, bt->acl_name);
+ break;
+ case AF_INET6:
+ acl = access_list_lookup(AFI_IP6, bt->acl6_name);
+ break;
+ default:
+ break;
+ }
+
+ ret = FILTER_PERMIT;
+ if (acl) {
+ ret = access_list_apply(acl, &p);
+ }
+
+ sockunion2str(&su, buf, SU_ADDRSTRLEN);
+ snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), ":%u",
+ su.sa.sa_family == AF_INET
+ ? ntohs(su.sin.sin_port)
+ : ntohs(su.sin6.sin6_port));
+
+ if (ret == FILTER_DENY) {
+ bt->cnt_aclrefused++;
+ zlog_info("bmp[%s] connection refused by access-list", buf);
+ close(bmp_sock);
+ return NULL;
+ }
+ bt->cnt_accept++;
+
+ setsockopt(bmp_sock, SOL_SOCKET, SO_KEEPALIVE, &on, sizeof(on));
+ setsockopt(bmp_sock, IPPROTO_TCP, TCP_NODELAY, &on, sizeof(on));
+
+ zlog_info("bmp[%s] connection established", buf);
+
+ /* Allocate new BMP structure and set up default values. */
+ bmp = bmp_new(bt, bmp_sock);
+ strlcpy(bmp->remote, buf, sizeof(bmp->remote));
+
+ bmp->state = BMP_PeerUp;
+ bmp->pullwr = pullwr_new(bm->master, bmp_sock, bmp, bmp_wrfill,
+ bmp_wrerr);
+ bmp_send_initiation(bmp);
+
+ return bmp;
+}
+
+/* Accept BMP connection. */
+static int bmp_accept(struct thread *thread)
+{
+ union sockunion su;
+ struct bmp_listener *bl = THREAD_ARG(thread);
+ int bmp_sock;
+
+ /* We continue hearing BMP socket. */
+ thread_add_read(bm->master, bmp_accept, bl, bl->sock, &bl->t_accept);
+
+ memset(&su, 0, sizeof(union sockunion));
+
+ /* We can handle IPv4 or IPv6 socket. */
+ bmp_sock = sockunion_accept(bl->sock, &su);
+ if (bmp_sock < 0) {
+ zlog_info("bmp: accept_sock failed: %s\n",
+ safe_strerror (errno));
+ return -1;
+ }
+ bmp_open(bl->targets, bmp_sock);
+ return 0;
+}
+
+static void bmp_close(struct bmp *bmp)
+{
+ struct bmp_queue_entry *bqe;
+ struct bmp_mirrorq *bmq;
+
+ if (bmp->active)
+ bmp_active_disconnected(bmp->active);
+
+ while ((bmq = bmp_pull_mirror(bmp)))
+ if (!bmq->refcount)
+ XFREE(MTYPE_BMP_MIRRORQ, bmq);
+ while ((bqe = bmp_pull(bmp)))
+ if (!bqe->refcount)
+ XFREE(MTYPE_BMP_QUEUE, bqe);
+
+ THREAD_OFF(bmp->t_read);
+ pullwr_del(bmp->pullwr);
+ close(bmp->socket);
+}
+
+static struct bmp_bgp *bmp_bgp_find(struct bgp *bgp)
+{
+ struct bmp_bgp dummy = { .bgp = bgp };
+ return bmp_bgph_find(&bmp_bgph, &dummy);
+}
+
+static struct bmp_bgp *bmp_bgp_get(struct bgp *bgp)
+{
+ struct bmp_bgp *bmpbgp;
+
+ bmpbgp = bmp_bgp_find(bgp);
+ if (bmpbgp)
+ return bmpbgp;
+
+ bmpbgp = XCALLOC(MTYPE_BMP, sizeof(*bmpbgp));
+ bmpbgp->bgp = bgp;
+ bmpbgp->mirror_qsizelimit = ~0UL;
+ bmp_mirrorq_init(&bmpbgp->mirrorq);
+ bmp_bgph_add(&bmp_bgph, bmpbgp);
+
+ return bmpbgp;
+}
+
+static void bmp_bgp_put(struct bmp_bgp *bmpbgp)
+{
+ struct bmp_targets *bt;
+
+ bmp_bgph_del(&bmp_bgph, bmpbgp);
+
+ frr_each_safe(bmp_targets, &bmpbgp->targets, bt)
+ bmp_targets_put(bt);
+
+ bmp_mirrorq_fini(&bmpbgp->mirrorq);
+ XFREE(MTYPE_BMP, bmpbgp);
+}
+
+static int bmp_bgp_del(struct bgp *bgp)
+{
+ struct bmp_bgp *bmpbgp = bmp_bgp_find(bgp);
+
+ if (bmpbgp)
+ bmp_bgp_put(bmpbgp);
+ return 0;
+}
+
+static struct bmp_bgp_peer *bmp_bgp_peer_find(uint64_t peerid)
+{
+ struct bmp_bgp_peer dummy = { .peerid = peerid };
+ return bmp_peerh_find(&bmp_peerh, &dummy);
+}
+
+static struct bmp_bgp_peer *bmp_bgp_peer_get(struct peer *peer)
+{
+ struct bmp_bgp_peer *bbpeer;
+
+ bbpeer = bmp_bgp_peer_find(peer->qobj_node.nid);
+ if (bbpeer)
+ return bbpeer;
+
+ bbpeer = XCALLOC(MTYPE_BMP_PEER, sizeof(*bbpeer));
+ bbpeer->peerid = peer->qobj_node.nid;
+ bmp_peerh_add(&bmp_peerh, bbpeer);
+
+ return bbpeer;
+}
+
+static struct bmp_targets *bmp_targets_find1(struct bgp *bgp, const char *name)
+{
+ struct bmp_bgp *bmpbgp = bmp_bgp_find(bgp);
+ struct bmp_targets dummy;
+
+ if (!bmpbgp)
+ return NULL;
+ dummy.name = (char *)name;
+ return bmp_targets_find(&bmpbgp->targets, &dummy);
+}
+
+static struct bmp_targets *bmp_targets_get(struct bgp *bgp, const char *name)
+{
+ struct bmp_targets *bt;
+
+ bt = bmp_targets_find1(bgp, name);
+ if (bt)
+ return bt;
+
+ bt = XCALLOC(MTYPE_BMP_TARGETS, sizeof(*bt));
+ bt->name = XSTRDUP(MTYPE_BMP_TARGETSNAME, name);
+ bt->bgp = bgp;
+ bt->bmpbgp = bmp_bgp_get(bgp);
+ bmp_session_init(&bt->sessions);
+ bmp_qhash_init(&bt->updhash);
+ bmp_qlist_init(&bt->updlist);
+ bmp_actives_init(&bt->actives);
+ bmp_listeners_init(&bt->listeners);
+
+ QOBJ_REG(bt, bmp_targets);
+ bmp_targets_add(&bt->bmpbgp->targets, bt);
+ return bt;
+}
+
+static void bmp_targets_put(struct bmp_targets *bt)
+{
+ struct bmp *bmp;
+ struct bmp_active *ba;
+
+ frr_each_safe (bmp_actives, &bt->actives, ba)
+ bmp_active_put(ba);
+
+ frr_each_safe(bmp_session, &bt->sessions, bmp) {
+ bmp_close(bmp);
+ bmp_free(bmp);
+ }
+
+ bmp_targets_del(&bt->bmpbgp->targets, bt);
+ QOBJ_UNREG(bt);
+
+ bmp_listeners_fini(&bt->listeners);
+ bmp_actives_fini(&bt->actives);
+ bmp_qhash_fini(&bt->updhash);
+ bmp_qlist_fini(&bt->updlist);
+
+ XFREE(MTYPE_BMP_ACLNAME, bt->acl_name);
+ XFREE(MTYPE_BMP_ACLNAME, bt->acl6_name);
+ bmp_session_fini(&bt->sessions);
+
+ XFREE(MTYPE_BMP_TARGETSNAME, bt->name);
+ XFREE(MTYPE_BMP_TARGETS, bt);
+}
+
+static struct bmp_listener *bmp_listener_find(struct bmp_targets *bt,
+ const union sockunion *su,
+ int port)
+{
+ struct bmp_listener dummy;
+ dummy.addr = *su;
+ dummy.port = port;
+ return bmp_listeners_find(&bt->listeners, &dummy);
+}
+
+static struct bmp_listener *bmp_listener_get(struct bmp_targets *bt,
+ const union sockunion *su,
+ int port)
+{
+ struct bmp_listener *bl = bmp_listener_find(bt, su, port);
+
+ if (bl)
+ return bl;
+
+ bl = XCALLOC(MTYPE_BMP_LISTENER, sizeof(*bl));
+ bl->targets = bt;
+ bl->addr = *su;
+ bl->port = port;
+ bl->sock = -1;
+
+ bmp_listeners_add(&bt->listeners, bl);
+ return bl;
+}
+
+static void bmp_listener_put(struct bmp_listener *bl)
+{
+ bmp_listeners_del(&bl->targets->listeners, bl);
+ XFREE(MTYPE_BMP_LISTENER, bl);
+}
+
+static void bmp_listener_start(struct bmp_listener *bl)
+{
+ int sock, ret;
+
+ sock = socket(bl->addr.sa.sa_family, SOCK_STREAM, 0);
+ if (sock < 0)
+ return;
+
+ sockopt_reuseaddr(sock);
+ sockopt_reuseport(sock);
+ sockopt_v6only(bl->addr.sa.sa_family, sock);
+ set_cloexec(sock);
+
+ ret = sockunion_bind(sock, &bl->addr, bl->port, &bl->addr);
+ if (ret < 0)
+ goto out_sock;
+
+ ret = listen(sock, 3);
+ if (ret < 0)
+ goto out_sock;
+
+ bl->sock = sock;
+ thread_add_read(bm->master, bmp_accept, bl, sock, &bl->t_accept);
+ return;
+out_sock:
+ close(sock);
+}
+
+static void bmp_listener_stop(struct bmp_listener *bl)
+{
+ THREAD_OFF(bl->t_accept);
+
+ if (bl->sock != -1)
+ close(bl->sock);
+ bl->sock = -1;
+}
+
+static struct bmp_active *bmp_active_find(struct bmp_targets *bt,
+ const char *hostname, int port)
+{
+ struct bmp_active dummy;
+ dummy.hostname = (char *)hostname;
+ dummy.port = port;
+ return bmp_actives_find(&bt->actives, &dummy);
+}
+
+static struct bmp_active *bmp_active_get(struct bmp_targets *bt,
+ const char *hostname, int port)
+{
+ struct bmp_active *ba;
+
+ ba = bmp_active_find(bt, hostname, port);
+ if (ba)
+ return ba;
+
+ ba = XCALLOC(MTYPE_BMP_ACTIVE, sizeof(*ba));
+ ba->targets = bt;
+ ba->hostname = XSTRDUP(MTYPE_TMP, hostname);
+ ba->port = port;
+ ba->minretry = BMP_DFLT_MINRETRY;
+ ba->maxretry = BMP_DFLT_MAXRETRY;
+ ba->socket = -1;
+
+ bmp_actives_add(&bt->actives, ba);
+ return ba;
+}
+
+static void bmp_active_put(struct bmp_active *ba)
+{
+ THREAD_OFF(ba->t_timer);
+ THREAD_OFF(ba->t_read);
+ THREAD_OFF(ba->t_write);
+
+ bmp_actives_del(&ba->targets->actives, ba);
+
+ if (ba->bmp) {
+ ba->bmp->active = NULL;
+ bmp_close(ba->bmp);
+ bmp_free(ba->bmp);
+ }
+ if (ba->socket != -1)
+ close(ba->socket);
+
+ XFREE(MTYPE_TMP, ba->hostname);
+ XFREE(MTYPE_BMP_ACTIVE, ba);
+}
+
+static void bmp_active_setup(struct bmp_active *ba);
+
+static void bmp_active_connect(struct bmp_active *ba)
+{
+ enum connect_result res;
+ char buf[SU_ADDRSTRLEN];
+
+ for (; ba->addrpos < ba->addrtotal; ba->addrpos++) {
+ ba->socket = sockunion_socket(&ba->addrs[ba->addrpos]);
+ if (ba->socket < 0) {
+ zlog_warn("bmp[%s]: failed to create socket",
+ ba->hostname);
+ continue;
+ }
+
+ set_nonblocking(ba->socket);
+ res = sockunion_connect(ba->socket, &ba->addrs[ba->addrpos],
+ htons(ba->port), 0);
+ switch (res) {
+ case connect_error:
+ sockunion2str(&ba->addrs[ba->addrpos], buf,
+ sizeof(buf));
+ zlog_warn("bmp[%s]: failed to connect to %s:%d",
+ ba->hostname, buf, ba->port);
+ close(ba->socket);
+ ba->socket = -1;
+ continue;
+ case connect_success:
+ break;
+ case connect_in_progress:
+ bmp_active_setup(ba);
+ return;
+ }
+ }
+
+ /* exhausted all addresses */
+ ba->curretry += ba->curretry / 2;
+ bmp_active_setup(ba);
+}
+
+static void bmp_active_resolved(struct resolver_query *resq, int numaddrs,
+ union sockunion *addr)
+{
+ struct bmp_active *ba = container_of(resq, struct bmp_active, resq);
+ unsigned i;
+
+ if (numaddrs <= 0) {
+ zlog_warn("bmp[%s]: hostname resolution failed", ba->hostname);
+ ba->curretry += ba->curretry / 2;
+ bmp_active_setup(ba);
+ return;
+ }
+ if (numaddrs > (int)array_size(ba->addrs))
+ numaddrs = array_size(ba->addrs);
+
+ ba->addrpos = 0;
+ ba->addrtotal = numaddrs;
+ for (i = 0; i < ba->addrtotal; i++)
+ memcpy(&ba->addrs[i], &addr[i], sizeof(ba->addrs[0]));
+
+ bmp_active_connect(ba);
+}
+
+static int bmp_active_thread(struct thread *t)
+{
+ struct bmp_active *ba = THREAD_ARG(t);
+ socklen_t slen;
+ int status, ret;
+ char buf[SU_ADDRSTRLEN];
+
+ /* all 3 end up here, though only timer or read+write are active
+ * at a time */
+ THREAD_OFF(ba->t_timer);
+ THREAD_OFF(ba->t_read);
+ THREAD_OFF(ba->t_write);
+
+ if (ba->socket == -1) {
+ resolver_resolve(&ba->resq, AF_UNSPEC, ba->hostname,
+ bmp_active_resolved);
+ return 0;
+ }
+
+ slen = sizeof(status);
+ ret = getsockopt(ba->socket, SOL_SOCKET, SO_ERROR, (void *)&status,
+ &slen);
+
+ sockunion2str(&ba->addrs[ba->addrpos], buf, sizeof(buf));
+ if (ret < 0 || status != 0) {
+ zlog_warn("bmp[%s]: failed to connect to %s:%d",
+ ba->hostname, buf, ba->port);
+ goto out_next;
+ }
+
+ zlog_warn("bmp[%s]: outbound connection to %s:%d",
+ ba->hostname, buf, ba->port);
+
+ ba->bmp = bmp_open(ba->targets, ba->socket);
+ if (!ba->bmp)
+ goto out_next;
+
+ ba->bmp->active = ba;
+ ba->socket = -1;
+ ba->curretry = ba->minretry;
+ return 0;
+
+out_next:
+ close(ba->socket);
+ ba->socket = -1;
+ ba->addrpos++;
+ bmp_active_connect(ba);
+ return 0;
+}
+
+static void bmp_active_disconnected(struct bmp_active *ba)
+{
+ ba->bmp = NULL;
+ bmp_active_setup(ba);
+}
+
+static void bmp_active_setup(struct bmp_active *ba)
+{
+ THREAD_OFF(ba->t_timer);
+ THREAD_OFF(ba->t_read);
+ THREAD_OFF(ba->t_write);
+
+ if (ba->bmp)
+ return;
+ if (ba->resq.callback)
+ return;
+
+ if (ba->curretry > ba->maxretry)
+ ba->curretry = ba->maxretry;
+
+ if (ba->socket == -1)
+ thread_add_timer_msec(bm->master, bmp_active_thread, ba,
+ ba->curretry, &ba->t_timer);
+ else {
+ thread_add_read(bm->master, bmp_active_thread, ba, ba->socket,
+ &ba->t_read);
+ thread_add_write(bm->master, bmp_active_thread, ba, ba->socket,
+ &ba->t_write);
+ }
+}
+
+static struct cmd_node bmp_node = {BMP_NODE, "%s(config-bgp-bmp)# "};
+
+#define BMP_STR "BGP Monitoring Protocol\n"
+
+#ifndef VTYSH_EXTRACT_PL
+#include "bgp_bmp_clippy.c"
+#endif
+
+DEFPY_NOSH(bmp_targets_main,
+ bmp_targets_cmd,
+ "bmp targets BMPTARGETS",
+ BMP_STR
+ "Create BMP target group\n"
+ "Name of the BMP target group\n")
+{
+ VTY_DECLVAR_CONTEXT(bgp, bgp);
+ struct bmp_targets *bt;
+
+ bt = bmp_targets_get(bgp, bmptargets);
+
+ VTY_PUSH_CONTEXT_SUB(BMP_NODE, bt);
+ return CMD_SUCCESS;
+}
+
+DEFPY(no_bmp_targets_main,
+ no_bmp_targets_cmd,
+ "no bmp targets BMPTARGETS",
+ NO_STR
+ BMP_STR
+ "Delete BMP target group\n"
+ "Name of the BMP target group\n")
+{
+ VTY_DECLVAR_CONTEXT(bgp, bgp);
+ struct bmp_targets *bt;
+
+ bt = bmp_targets_find1(bgp, bmptargets);
+ if (!bt) {
+ vty_out(vty, "%% BMP target group not found\n");
+ return CMD_WARNING;
+ }
+ bmp_targets_put(bt);
+ return CMD_SUCCESS;
+}
+
+DEFPY(bmp_listener_main,
+ bmp_listener_cmd,
+ "bmp listener <X:X::X:X|A.B.C.D> port (1-65535)",
+ BMP_STR
+ "Listen for inbound BMP connections\n"
+ "IPv6 address to listen on\n"
+ "IPv4 address to listen on\n"
+ "TCP Port number\n"
+ "TCP Port number\n")
+{
+ VTY_DECLVAR_CONTEXT_SUB(bmp_targets, bt);
+ struct bmp_listener *bl;
+
+ bl = bmp_listener_get(bt, listener, port);
+ if (bl->sock == -1)
+ bmp_listener_start(bl);
+
+ return CMD_SUCCESS;
+}
+
+DEFPY(no_bmp_listener_main,
+ no_bmp_listener_cmd,
+ "no bmp listener <X:X::X:X|A.B.C.D> port (1-65535)",
+ NO_STR
+ BMP_STR
+ "Create BMP listener\n"
+ "IPv6 address to listen on\n"
+ "IPv4 address to listen on\n"
+ "TCP Port number\n"
+ "TCP Port number\n")
+{
+ VTY_DECLVAR_CONTEXT_SUB(bmp_targets, bt);
+ struct bmp_listener *bl;
+
+ bl = bmp_listener_find(bt, listener, port);
+ if (!bl) {
+ vty_out(vty, "%% BMP listener not found\n");
+ return CMD_WARNING;
+ }
+ bmp_listener_stop(bl);
+ bmp_listener_put(bl);
+ return CMD_SUCCESS;
+}
+
+DEFPY(bmp_connect,
+ bmp_connect_cmd,
+ "[no] bmp connect HOSTNAME port (1-65535) "
+ "{min-retry (100-86400000)"
+ "|max-retry (100-86400000)}",
+ NO_STR
+ BMP_STR
+ "Actively establish connection to monitoring station\n"
+ "Monitoring station hostname or address\n"
+ "TCP port\n"
+ "TCP port\n"
+ "Minimum connection retry interval\n"
+ "Minimum connection retry interval (milliseconds)\n"
+ "Maximum connection retry interval\n"
+ "Maximum connection retry interval (milliseconds)\n")
+{
+ VTY_DECLVAR_CONTEXT_SUB(bmp_targets, bt);
+ struct bmp_active *ba;
+
+ if (no) {
+ ba = bmp_active_find(bt, hostname, port);
+ if (!ba) {
+ vty_out(vty, "%% No such active connection found\n");
+ return CMD_WARNING;
+ }
+ bmp_active_put(ba);
+ return CMD_SUCCESS;
+ }
+
+ ba = bmp_active_get(bt, hostname, port);
+ if (min_retry_str)
+ ba->minretry = min_retry;
+ if (max_retry_str)
+ ba->maxretry = max_retry;
+ ba->curretry = ba->minretry;
+ bmp_active_setup(ba);
+
+ return CMD_SUCCESS;
+}
+
+DEFPY(bmp_acl,
+ bmp_acl_cmd,
+ "[no] <ip|ipv6>$af access-list WORD",
+ NO_STR
+ IP_STR
+ IPV6_STR
+ "Access list to restrict BMP sessions\n"
+ "Access list name\n")
+{
+ VTY_DECLVAR_CONTEXT_SUB(bmp_targets, bt);
+ char **what;
+
+ if (no)
+ access_list = NULL;
+ if (!strcmp(af, "ipv6"))
+ what = &bt->acl6_name;
+ else
+ what = &bt->acl_name;
+
+ XFREE(MTYPE_BMP_ACLNAME, *what);
+ if (access_list)
+ *what = XSTRDUP(MTYPE_BMP_ACLNAME, access_list);
+
+ return CMD_SUCCESS;
+}
+
+DEFPY(bmp_stats_cfg,
+ bmp_stats_cmd,
+ "[no] bmp stats [interval (100-86400000)]",
+ NO_STR
+ BMP_STR
+ "Send BMP statistics messages\n"
+ "Specify BMP stats interval\n"
+ "Interval (milliseconds) to send BMP Stats in\n")
+{
+ VTY_DECLVAR_CONTEXT_SUB(bmp_targets, bt);
+
+ THREAD_OFF(bt->t_stats);
+ if (no)
+ bt->stat_msec = 0;
+ else if (interval_str)
+ bt->stat_msec = interval;
+ else
+ bt->stat_msec = BMP_STAT_DEFAULT_TIMER;
+
+ if (bt->stat_msec)
+ thread_add_timer_msec(bm->master, bmp_stats, bt, bt->stat_msec,
+ &bt->t_stats);
+ return CMD_SUCCESS;
+}
+
+DEFPY(bmp_monitor_cfg,
+ bmp_monitor_cmd,
+ "[no] bmp monitor "BGP_AFI_CMD_STR" <unicast|multicast> <pre-policy|post-policy>$policy",
+ NO_STR
+ BMP_STR
+ "Send BMP route monitoring messages\n"
+ BGP_AFI_HELP_STR
+ "Address family modifier\n"
+ "Address family modifier\n"
+ "Send state before policy and filter processing\n"
+ "Send state with policy and filters applied\n")
+{
+ int index = 0;
+ uint8_t flag, prev;
+ afi_t afi;
+ safi_t safi;
+
+ VTY_DECLVAR_CONTEXT_SUB(bmp_targets, bt);
+ struct bmp *bmp;
+
+ argv_find_and_parse_afi(argv, argc, &index, &afi);
+ argv_find_and_parse_safi(argv, argc, &index, &safi);
+
+ if (policy[1] == 'r')
+ flag = BMP_MON_PREPOLICY;
+ else
+ flag = BMP_MON_POSTPOLICY;
+
+ prev = bt->afimon[afi][safi];
+ if (no)
+ bt->afimon[afi][safi] &= ~flag;
+ else
+ bt->afimon[afi][safi] |= flag;
+
+ if (prev == bt->afimon[afi][safi])
+ return CMD_SUCCESS;
+
+ frr_each (bmp_session, &bt->sessions, bmp) {
+ if (bmp->syncafi == afi && bmp->syncsafi == safi) {
+ bmp->syncafi = AFI_MAX;
+ bmp->syncsafi = SAFI_MAX;
+ }
+
+ if (!bt->afimon[afi][safi]) {
+ bmp->afistate[afi][safi] = BMP_AFI_INACTIVE;
+ continue;
+ }
+
+ bmp->afistate[afi][safi] = BMP_AFI_NEEDSYNC;
+ }
+
+ return CMD_SUCCESS;
+}
+
+DEFPY(bmp_mirror_cfg,
+ bmp_mirror_cmd,
+ "[no] bmp mirror",
+ NO_STR
+ BMP_STR
+ "Send BMP route mirroring messages\n")
+{
+ VTY_DECLVAR_CONTEXT_SUB(bmp_targets, bt);
+ struct bmp *bmp;
+
+ if (bt->mirror == !no)
+ return CMD_SUCCESS;
+
+ bt->mirror = !no;
+ if (bt->mirror)
+ return CMD_SUCCESS;
+
+ frr_each (bmp_session, &bt->sessions, bmp) {
+ struct bmp_mirrorq *bmq;
+
+ while ((bmq = bmp_pull_mirror(bmp)))
+ if (!bmq->refcount)
+ XFREE(MTYPE_BMP_MIRRORQ, bmq);
+ }
+ return CMD_SUCCESS;
+}
+
+DEFPY(bmp_mirror_limit_cfg,
+ bmp_mirror_limit_cmd,
+ "bmp mirror buffer-limit (0-4294967294)",
+ BMP_STR
+ "Route Mirroring settings\n"
+ "Configure maximum memory used for buffered mirroring messages\n"
+ "Limit in bytes\n")
+{
+ VTY_DECLVAR_CONTEXT(bgp, bgp);
+ struct bmp_bgp *bmpbgp;
+
+ bmpbgp = bmp_bgp_get(bgp);
+ bmpbgp->mirror_qsizelimit = buffer_limit;
+
+ return CMD_SUCCESS;
+}
+
+DEFPY(no_bmp_mirror_limit_cfg,
+ no_bmp_mirror_limit_cmd,
+ "no bmp mirror buffer-limit [(0-4294967294)]",
+ NO_STR
+ BMP_STR
+ "Route Mirroring settings\n"
+ "Configure maximum memory used for buffered mirroring messages\n"
+ "Limit in bytes\n")
+{
+ VTY_DECLVAR_CONTEXT(bgp, bgp);
+ struct bmp_bgp *bmpbgp;
+
+ bmpbgp = bmp_bgp_get(bgp);
+ bmpbgp->mirror_qsizelimit = ~0UL;
+
+ return CMD_SUCCESS;
+}
+
+
+DEFPY(show_bmp,
+ show_bmp_cmd,
+ "show bmp",
+ SHOW_STR
+ BMP_STR)
+{
+ struct bmp_bgp *bmpbgp;
+ struct bmp_targets *bt;
+ struct bmp_listener *bl;
+ struct bmp *bmp;
+ struct ttable *tt;
+ char buf[SU_ADDRSTRLEN];
+
+ frr_each(bmp_bgph, &bmp_bgph, bmpbgp) {
+ vty_out(vty, "BMP state for BGP %s:\n\n",
+ bmpbgp->bgp->name_pretty);
+ vty_out(vty, " Route Mirroring %9zu bytes (%zu messages) pending\n",
+ bmpbgp->mirror_qsize,
+ bmp_mirrorq_count(&bmpbgp->mirrorq));
+ vty_out(vty, " %9zu bytes maximum buffer used\n",
+ bmpbgp->mirror_qsizemax);
+ if (bmpbgp->mirror_qsizelimit != ~0UL)
+ vty_out(vty, " %9zu bytes buffer size limit\n",
+ bmpbgp->mirror_qsizelimit);
+ vty_out(vty, "\n");
+
+ frr_each(bmp_targets, &bmpbgp->targets, bt) {
+ vty_out(vty, " Targets \"%s\":\n", bt->name);
+ vty_out(vty, " Route Mirroring %sabled\n",
+ bt->mirror ? "en" : "dis");
+
+ afi_t afi;
+ safi_t safi;
+
+ FOREACH_AFI_SAFI (afi, safi) {
+ const char *str = NULL;
+
+ switch (bt->afimon[afi][safi]) {
+ case BMP_MON_PREPOLICY:
+ str = "pre-policy";
+ break;
+ case BMP_MON_POSTPOLICY:
+ str = "post-policy";
+ break;
+ case BMP_MON_PREPOLICY | BMP_MON_POSTPOLICY:
+ str = "pre-policy and post-policy";
+ break;
+ }
+ if (!str)
+ continue;
+ vty_out(vty, " Route Monitoring %s %s %s\n",
+ afi2str(afi), safi2str(safi), str);
+ }
+
+ vty_out(vty, " Listeners:\n");
+ frr_each (bmp_listeners, &bt->listeners, bl)
+ vty_out(vty, " %s:%d\n",
+ sockunion2str(&bl->addr, buf,
+ SU_ADDRSTRLEN), bl->port);
+
+ vty_out(vty, "\n %zu connected clients:\n",
+ bmp_session_count(&bt->sessions));
+ tt = ttable_new(&ttable_styles[TTSTYLE_BLANK]);
+ ttable_add_row(tt, "remote|uptime|MonSent|MirrSent|MirrLost|ByteSent|ByteQ|ByteQKernel");
+ ttable_rowseps(tt, 0, BOTTOM, true, '-');
+
+ frr_each (bmp_session, &bt->sessions, bmp) {
+ uint64_t total;
+ size_t q, kq;
+
+ pullwr_stats(bmp->pullwr, &total, &q, &kq);
+
+ ttable_add_row(tt, "%s|-|%Lu|%Lu|%Lu|%Lu|%zu|%zu",
+ bmp->remote,
+ bmp->cnt_update,
+ bmp->cnt_mirror,
+ bmp->cnt_mirror_overruns,
+ total, q, kq);
+ }
+ char *out = ttable_dump(tt, "\n");
+ vty_out(vty, "%s", out);
+ XFREE(MTYPE_TMP, out);
+ ttable_del(tt);
+ vty_out(vty, "\n");
+ }
+ }
+
+ return CMD_SUCCESS;
+}
+
+static int bmp_config_write(struct bgp *bgp, struct vty *vty)
+{
+ struct bmp_bgp *bmpbgp = bmp_bgp_find(bgp);
+ struct bmp_targets *bt;
+ struct bmp_listener *bl;
+ struct bmp_active *ba;
+ char buf[SU_ADDRSTRLEN];
+ afi_t afi;
+ safi_t safi;
+
+ if (!bmpbgp)
+ return 0;
+
+ if (bmpbgp->mirror_qsizelimit != ~0UL)
+ vty_out(vty, " !\n bmp mirror buffer-limit %zu\n",
+ bmpbgp->mirror_qsizelimit);
+
+ frr_each(bmp_targets, &bmpbgp->targets, bt) {
+ vty_out(vty, " !\n bmp targets %s\n", bt->name);
+
+ if (bt->acl6_name)
+ vty_out(vty, " ipv6 access-list %s\n", bt->acl6_name);
+ if (bt->acl_name)
+ vty_out(vty, " ip access-list %s\n", bt->acl_name);
+
+ if (bt->stat_msec)
+ vty_out(vty, " bmp stats interval %d\n",
+ bt->stat_msec);
+
+ if (bt->mirror)
+ vty_out(vty, " bmp mirror\n");
+
+ FOREACH_AFI_SAFI (afi, safi) {
+ const char *afi_str = (afi == AFI_IP) ? "ipv4" : "ipv6";
+
+ if (bt->afimon[afi][safi] & BMP_MON_PREPOLICY)
+ vty_out(vty, " bmp monitor %s %s pre-policy\n",
+ afi_str, safi2str(safi));
+ if (bt->afimon[afi][safi] & BMP_MON_POSTPOLICY)
+ vty_out(vty, " bmp monitor %s %s post-policy\n",
+ afi_str, safi2str(safi));
+ }
+ frr_each (bmp_listeners, &bt->listeners, bl)
+ vty_out(vty, " \n bmp listener %s port %d\n",
+ sockunion2str(&bl->addr, buf, SU_ADDRSTRLEN),
+ bl->port);
+
+ frr_each (bmp_actives, &bt->actives, ba)
+ vty_out(vty, " bmp connect %s port %u min-retry %u max-retry %u\n",
+ ba->hostname, ba->port, ba->minretry, ba->maxretry);
+ }
+
+ return 0;
+}
+
+static int bgp_bmp_init(struct thread_master *tm)
+{
+ install_node(&bmp_node, NULL);
+ install_default(BMP_NODE);
+ install_element(BGP_NODE, &bmp_targets_cmd);
+ install_element(BGP_NODE, &no_bmp_targets_cmd);
+
+ install_element(BMP_NODE, &bmp_listener_cmd);
+ install_element(BMP_NODE, &no_bmp_listener_cmd);
+ install_element(BMP_NODE, &bmp_connect_cmd);
+ install_element(BMP_NODE, &bmp_acl_cmd);
+ install_element(BMP_NODE, &bmp_stats_cmd);
+ install_element(BMP_NODE, &bmp_monitor_cmd);
+ install_element(BMP_NODE, &bmp_mirror_cmd);
+
+ install_element(BGP_NODE, &bmp_mirror_limit_cmd);
+ install_element(BGP_NODE, &no_bmp_mirror_limit_cmd);
+
+ install_element(VIEW_NODE, &show_bmp_cmd);
+
+ resolver_init(tm);
+ return 0;
+}
+
+static int bgp_bmp_module_init(void)
+{
+ hook_register(bgp_packet_dump, bmp_mirror_packet);
+ hook_register(bgp_packet_send, bmp_outgoing_packet);
+ hook_register(peer_established, bmp_peer_established);
+ hook_register(peer_backward_transition, bmp_peer_backward);
+ hook_register(bgp_process, bmp_process);
+ hook_register(bgp_inst_config_write, bmp_config_write);
+ hook_register(bgp_inst_delete, bmp_bgp_del);
+ hook_register(frr_late_init, bgp_bmp_init);
+ return 0;
+}
+
+FRR_MODULE_SETUP(.name = "bgpd_bmp", .version = FRR_VERSION,
+ .description = "bgpd BMP module",
+ .init = bgp_bmp_module_init)
diff --git a/bgpd/bgp_bmp.h b/bgpd/bgp_bmp.h
new file mode 100644
index 0000000000..9d270e808c
--- /dev/null
+++ b/bgpd/bgp_bmp.h
@@ -0,0 +1,303 @@
+/* BMP support.
+ * Copyright (C) 2018 Yasuhiro Ohara
+ * Copyright (C) 2019 David Lamparter for NetDEF, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; see the file COPYING; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _BGP_BMP_H_
+#define _BGP_BMP_H_
+
+#include "zebra.h"
+#include "typesafe.h"
+#include "pullwr.h"
+#include "qobj.h"
+#include "resolver.h"
+
+#define BMP_VERSION_3 3
+
+#define BMP_LENGTH_POS 1
+
+/* BMP message types */
+#define BMP_TYPE_ROUTE_MONITORING 0
+#define BMP_TYPE_STATISTICS_REPORT 1
+#define BMP_TYPE_PEER_DOWN_NOTIFICATION 2
+#define BMP_TYPE_PEER_UP_NOTIFICATION 3
+#define BMP_TYPE_INITIATION 4
+#define BMP_TYPE_TERMINATION 5
+#define BMP_TYPE_ROUTE_MIRRORING 6
+
+#define BMP_READ_BUFSIZ 1024
+
+/* bmp->state */
+#define BMP_None 0
+#define BMP_PeerUp 2
+#define BMP_Run 3
+
+/* This one is for BMP Route Monitoring messages, i.e. delivering updates
+ * in somewhat processed (as opposed to fully raw, see mirroring below) form.
+ * RFC explicitly says that we can skip old updates if we haven't sent them out
+ * yet and another newer update for the same prefix arrives.
+ *
+ * So, at most one of these can exist for each (bgp, afi, safi, prefix, peerid)
+ * tuple; if some prefix is "re-added" to the queue, the existing entry is
+ * instead moved to the end of the queue. This ensures that the queue size is
+ * bounded by the BGP table size.
+ *
+ * bmp_qlist is the queue itself while bmp_qhash is used to efficiently check
+ * whether a tuple is already on the list. The queue is maintained per
+ * bmp_target.
+ *
+ * refcount = number of "struct bmp *" whose queue position is before this
+ * entry, i.e. number of BMP sessions where we still want to send this out.
+ * Decremented on send so we know when we're done with an entry (i.e. this
+ * always happens from the front of the queue.)
+ */
+
+PREDECL_DLIST(bmp_qlist)
+PREDECL_HASH(bmp_qhash)
+
+struct bmp_queue_entry {
+ struct bmp_qlist_item bli;
+ struct bmp_qhash_item bhi;
+
+ struct prefix p;
+ uint64_t peerid;
+ afi_t afi;
+ safi_t safi;
+
+ size_t refcount;
+};
+
+/* This is for BMP Route Mirroring, which feeds fully raw BGP PDUs out to BMP
+ * receivers. So, this goes directly off packet RX/TX handling instead of
+ * grabbing bits from tables.
+ *
+ * There is *one* queue for each "struct bgp *" where we throw everything on,
+ * with a size limit. Refcount works the same as for monitoring above.
+ */
+
+PREDECL_LIST(bmp_mirrorq)
+
+struct bmp_mirrorq {
+ struct bmp_mirrorq_item bmi;
+
+ size_t refcount;
+ uint64_t peerid;
+ struct timeval tv;
+
+ size_t len;
+ uint8_t data[0];
+};
+
+enum {
+ BMP_AFI_INACTIVE = 0,
+ BMP_AFI_NEEDSYNC,
+ BMP_AFI_SYNC,
+ BMP_AFI_LIVE,
+};
+
+PREDECL_LIST(bmp_session)
+
+struct bmp_active;
+struct bmp_targets;
+
+/* an established BMP session to a peer */
+struct bmp {
+ struct bmp_session_item bsi;
+ struct bmp_targets *targets;
+ struct bmp_active *active;
+
+ int socket;
+ char remote[SU_ADDRSTRLEN + 6];
+ struct thread *t_read;
+
+ struct pullwr *pullwr;
+
+ int state;
+
+ /* queue positions must remain synced with refcounts in the items.
+ * Whenever appending a queue item, we need to know the correct number
+ * of "struct bmp *" that want it, and when moving these positions
+ * ahead we need to make sure that refcount is decremented. Also, on
+ * disconnects we need to walk the queue and drop our reference.
+ */
+ struct bmp_queue_entry *queuepos;
+ struct bmp_mirrorq *mirrorpos;
+ bool mirror_lost;
+
+ /* enum BMP_AFI_* */
+ uint8_t afistate[AFI_MAX][SAFI_MAX];
+
+ /* counters for the various BMP packet types */
+ uint64_t cnt_update, cnt_mirror;
+ /* number of times this peer wasn't fast enough in consuming the
+ * mirror queue
+ */
+ uint64_t cnt_mirror_overruns;
+ struct timeval t_up;
+
+ /* synchronization / startup works by repeatedly finding the next
+ * table entry, the sync* fields note down what we sent last
+ */
+ struct prefix syncpos;
+ uint64_t syncpeerid;
+ afi_t syncafi;
+ safi_t syncsafi;
+};
+
+/* config & state for an active outbound connection. When the connection
+ * succeeds, "bmp" is set up.
+ */
+
+PREDECL_SORTLIST_UNIQ(bmp_actives)
+
+#define BMP_DFLT_MINRETRY 30000
+#define BMP_DFLT_MAXRETRY 720000
+
+struct bmp_active {
+ struct bmp_actives_item bai;
+ struct bmp_targets *targets;
+ struct bmp *bmp;
+
+ char *hostname;
+ int port;
+ unsigned minretry, maxretry;
+
+ struct resolver_query resq;
+
+ unsigned curretry;
+ unsigned addrpos, addrtotal;
+ union sockunion addrs[8];
+ int socket;
+ struct thread *t_timer, *t_read, *t_write;
+};
+
+/* config & state for passive / listening sockets */
+PREDECL_SORTLIST_UNIQ(bmp_listeners)
+
+struct bmp_listener {
+ struct bmp_listeners_item bli;
+
+ struct bmp_targets *targets;
+
+ union sockunion addr;
+ int port;
+
+ struct thread *t_accept;
+ int sock;
+};
+
+/* bmp_targets - plural since it may contain multiple bmp_listener &
+ * bmp_active items. If they have the same config, BMP session should be
+ * put in the same targets since that's a bit more effective.
+ */
+PREDECL_SORTLIST_UNIQ(bmp_targets)
+
+struct bmp_targets {
+ struct bmp_targets_item bti;
+
+ struct bmp_bgp *bmpbgp;
+ struct bgp *bgp;
+ char *name;
+
+ struct bmp_listeners_head listeners;
+
+ char *acl_name;
+ char *acl6_name;
+#define BMP_STAT_DEFAULT_TIMER 60000
+ int stat_msec;
+
+ /* only IPv4 & IPv6 / unicast & multicast supported for now */
+#define BMP_MON_PREPOLICY (1 << 0)
+#define BMP_MON_POSTPOLICY (1 << 1)
+ uint8_t afimon[AFI_MAX][SAFI_MAX];
+ bool mirror;
+
+ struct bmp_actives_head actives;
+
+ struct thread *t_stats;
+ struct bmp_session_head sessions;
+
+ struct bmp_qhash_head updhash;
+ struct bmp_qlist_head updlist;
+
+ uint64_t cnt_accept, cnt_aclrefused;
+
+ QOBJ_FIELDS
+};
+DECLARE_QOBJ_TYPE(bmp_targets)
+
+/* per struct peer * data. Lookup by peer->qobj_node.nid, created on demand,
+ * deleted in peer_backward hook. */
+PREDECL_HASH(bmp_peerh)
+
+struct bmp_bgp_peer {
+ struct bmp_peerh_item bpi;
+
+ uint64_t peerid;
+ /* struct peer *peer; */
+
+ uint8_t *open_rx;
+ size_t open_rx_len;
+
+ uint8_t *open_tx;
+ size_t open_tx_len;
+};
+
+/* per struct bgp * data */
+PREDECL_HASH(bmp_bgph)
+
+struct bmp_bgp {
+ struct bmp_bgph_item bbi;
+
+ struct bgp *bgp;
+ struct bmp_targets_head targets;
+
+ struct bmp_mirrorq_head mirrorq;
+ size_t mirror_qsize, mirror_qsizemax;
+
+ size_t mirror_qsizelimit;
+};
+
+enum {
+ BMP_PEERDOWN_LOCAL_NOTIFY = 1,
+ BMP_PEERDOWN_LOCAL_FSM = 2,
+ BMP_PEERDOWN_REMOTE_NOTIFY = 3,
+ BMP_PEERDOWN_REMOTE_CLOSE = 4,
+ BMP_PEERDOWN_ENDMONITOR = 5,
+};
+
+enum {
+ BMP_STATS_PFX_REJECTED = 0,
+ BMP_STATS_PFX_DUP_ADV = 1,
+ BMP_STATS_PFX_DUP_WITHDRAW = 2,
+ BMP_STATS_UPD_LOOP_CLUSTER = 3,
+ BMP_STATS_UPD_LOOP_ASPATH = 4,
+ BMP_STATS_UPD_LOOP_ORIGINATOR = 5,
+ BMP_STATS_UPD_LOOP_CONFED = 6,
+ BMP_STATS_SIZE_ADJ_RIB_IN = 7,
+ BMP_STATS_SIZE_LOC_RIB = 8,
+ BMP_STATS_SIZE_ADJ_RIB_IN_SAFI = 9,
+ BMP_STATS_SIZE_LOC_RIB_IN_SAFI = 10,
+ BMP_STATS_UPD_7606_WITHDRAW = 11,
+ BMP_STATS_PFX_7606_WITHDRAW = 12,
+ BMP_STATS_UPD_DUP = 13,
+ BMP_STATS_FRR_NH_INVALID = 65531,
+};
+
+DECLARE_MGROUP(BMP)
+
+#endif /*_BGP_BMP_H_*/
diff --git a/bgpd/bgp_evpn.c b/bgpd/bgp_evpn.c
index 4949519a9b..6c77f18f33 100644
--- a/bgpd/bgp_evpn.c
+++ b/bgpd/bgp_evpn.c
@@ -4466,7 +4466,8 @@ void bgp_evpn_configure_import_rt_for_vrf(struct bgp *bgp_vrf,
struct ecommunity *ecomadd)
{
/* uninstall routes from vrf */
- uninstall_routes_for_vrf(bgp_vrf);
+ if (is_l3vni_live(bgp_vrf))
+ uninstall_routes_for_vrf(bgp_vrf);
/* Cleanup the RT to VRF mapping */
bgp_evpn_unmap_vrf_from_its_rts(bgp_vrf);
@@ -4478,11 +4479,11 @@ void bgp_evpn_configure_import_rt_for_vrf(struct bgp *bgp_vrf,
listnode_add_sort(bgp_vrf->vrf_import_rtl, ecomadd);
SET_FLAG(bgp_vrf->vrf_flags, BGP_VRF_IMPORT_RT_CFGD);
- /* map VRF to its RTs */
- bgp_evpn_map_vrf_to_its_rts(bgp_vrf);
-
- /* install routes matching the new VRF */
- install_routes_for_vrf(bgp_vrf);
+ /* map VRF to its RTs and install routes matching the new RTs */
+ if (is_l3vni_live(bgp_vrf)) {
+ bgp_evpn_map_vrf_to_its_rts(bgp_vrf);
+ install_routes_for_vrf(bgp_vrf);
+ }
}
void bgp_evpn_unconfigure_import_rt_for_vrf(struct bgp *bgp_vrf,
@@ -4492,7 +4493,8 @@ void bgp_evpn_unconfigure_import_rt_for_vrf(struct bgp *bgp_vrf,
struct ecommunity *ecom = NULL;
/* uninstall routes from vrf */
- uninstall_routes_for_vrf(bgp_vrf);
+ if (is_l3vni_live(bgp_vrf))
+ uninstall_routes_for_vrf(bgp_vrf);
/* Cleanup the RT to VRF mapping */
bgp_evpn_unmap_vrf_from_its_rts(bgp_vrf);
@@ -4516,11 +4518,11 @@ void bgp_evpn_unconfigure_import_rt_for_vrf(struct bgp *bgp_vrf,
evpn_auto_rt_import_add_for_vrf(bgp_vrf);
}
- /* map VRFs to its RTs */
- bgp_evpn_map_vrf_to_its_rts(bgp_vrf);
-
- /* install routes matching this new RT */
- install_routes_for_vrf(bgp_vrf);
+ /* map VRFs to its RTs and install routes matching this new RT */
+ if (is_l3vni_live(bgp_vrf)) {
+ bgp_evpn_map_vrf_to_its_rts(bgp_vrf);
+ install_routes_for_vrf(bgp_vrf);
+ }
}
void bgp_evpn_configure_export_rt_for_vrf(struct bgp *bgp_vrf,
diff --git a/bgpd/bgp_evpn_private.h b/bgpd/bgp_evpn_private.h
index a5a091242f..f6bde2e9fa 100644
--- a/bgpd/bgp_evpn_private.h
+++ b/bgpd/bgp_evpn_private.h
@@ -275,6 +275,11 @@ static inline int is_vni_live(struct bgpevpn *vpn)
return (CHECK_FLAG(vpn->flags, VNI_FLAG_LIVE));
}
+static inline int is_l3vni_live(struct bgp *bgp_vrf)
+{
+ return (bgp_vrf->l3vni && bgp_vrf->l3vni_svi_ifindex);
+}
+
static inline int is_rd_configured(struct bgpevpn *vpn)
{
return (CHECK_FLAG(vpn->flags, VNI_FLAG_RD_CFGD));
diff --git a/bgpd/bgp_evpn_vty.c b/bgpd/bgp_evpn_vty.c
index e6d81e54c4..30380f6add 100644
--- a/bgpd/bgp_evpn_vty.c
+++ b/bgpd/bgp_evpn_vty.c
@@ -3696,7 +3696,7 @@ DEFUN(show_bgp_l2vpn_evpn_es,
*/
DEFUN(show_bgp_l2vpn_evpn_summary,
show_bgp_l2vpn_evpn_summary_cmd,
- "show bgp [vrf VRFNAME] l2vpn evpn summary [json]",
+ "show bgp [vrf VRFNAME] l2vpn evpn summary [failed] [json]",
SHOW_STR
BGP_STR
"bgp vrf\n"
@@ -3704,15 +3704,20 @@ DEFUN(show_bgp_l2vpn_evpn_summary,
L2VPN_HELP_STR
EVPN_HELP_STR
"Summary of BGP neighbor status\n"
+ "Show only sessions not in Established state\n"
JSON_STR)
{
int idx_vrf = 0;
bool uj = use_json(argc, argv);
char *vrf = NULL;
+ bool show_failed = false;
if (argv_find(argv, argc, "vrf", &idx_vrf))
vrf = argv[++idx_vrf]->arg;
- return bgp_show_summary_vty(vty, vrf, AFI_L2VPN, SAFI_EVPN, uj);
+ if (argv_find(argv, argc, "failed", &idx_vrf))
+ show_failed = true;
+ return bgp_show_summary_vty(vty, vrf, AFI_L2VPN, SAFI_EVPN,
+ show_failed, uj);
}
/*
diff --git a/bgpd/bgp_fsm.c b/bgpd/bgp_fsm.c
index c9c6fc157e..cc7f81d9ff 100644
--- a/bgpd/bgp_fsm.c
+++ b/bgpd/bgp_fsm.c
@@ -179,9 +179,7 @@ static struct peer *peer_xfer_conn(struct peer *from_peer)
* on various buffers. Those need to be transferred or dropped,
* otherwise we'll get spurious failures during session establishment.
*/
- pthread_mutex_lock(&peer->io_mtx);
- pthread_mutex_lock(&from_peer->io_mtx);
- {
+ frr_with_mutex(&peer->io_mtx, &from_peer->io_mtx) {
fd = peer->fd;
peer->fd = from_peer->fd;
from_peer->fd = fd;
@@ -222,8 +220,6 @@ static struct peer *peer_xfer_conn(struct peer *from_peer)
ringbuf_copy(peer->ibuf_work, from_peer->ibuf_work,
ringbuf_remain(from_peer->ibuf_work));
}
- pthread_mutex_unlock(&from_peer->io_mtx);
- pthread_mutex_unlock(&peer->io_mtx);
peer->as = from_peer->as;
peer->v_holdtime = from_peer->v_holdtime;
@@ -244,6 +240,7 @@ static struct peer *peer_xfer_conn(struct peer *from_peer)
from_peer->last_event = last_evt;
from_peer->last_major_event = last_maj_evt;
peer->remote_id = from_peer->remote_id;
+ peer->last_reset = from_peer->last_reset;
if (from_peer->hostname != NULL) {
if (peer->hostname) {
@@ -551,7 +548,11 @@ const char *peer_down_str[] = {"",
"Intf peering v6only config change",
"BFD down received",
"Interface down",
- "Neighbor address lost"};
+ "Neighbor address lost",
+ "Waiting for NHT",
+ "Waiting for Peer IPv6 LLA",
+ "Waiting for VRF to be initialized",
+ "No AFI/SAFI activated for peer"};
static int bgp_graceful_restart_timer_expire(struct thread *thread)
{
@@ -1162,8 +1163,7 @@ int bgp_stop(struct peer *peer)
BGP_TIMER_OFF(peer->t_routeadv);
/* Clear input and output buffer. */
- pthread_mutex_lock(&peer->io_mtx);
- {
+ frr_with_mutex(&peer->io_mtx) {
if (peer->ibuf)
stream_fifo_clean(peer->ibuf);
if (peer->obuf)
@@ -1179,7 +1179,6 @@ int bgp_stop(struct peer *peer)
peer->curr = NULL;
}
}
- pthread_mutex_unlock(&peer->io_mtx);
/* Close of file descriptor. */
if (peer->fd >= 0) {
@@ -1410,6 +1409,7 @@ int bgp_start(struct peer *peer)
zlog_debug(
"%s [FSM] Unable to get neighbor's IP address, waiting...",
peer->host);
+ peer->last_reset = PEER_DOWN_NBR_ADDR;
return -1;
}
@@ -1455,6 +1455,7 @@ int bgp_start(struct peer *peer)
EC_BGP_FSM,
"%s [FSM] In a VRF that is not initialised yet",
peer->host);
+ peer->last_reset = PEER_DOWN_VRF_UNINIT;
return -1;
}
@@ -1466,7 +1467,7 @@ int bgp_start(struct peer *peer)
if (bgp_debug_neighbor_events(peer))
zlog_debug("%s [FSM] Waiting for NHT",
peer->host);
-
+ peer->last_reset = PEER_DOWN_WAITING_NHT;
BGP_EVENT_ADD(peer, TCP_connection_open_failed);
return 0;
}
diff --git a/bgpd/bgp_io.c b/bgpd/bgp_io.c
index f111822e53..c2d06a4b5a 100644
--- a/bgpd/bgp_io.c
+++ b/bgpd/bgp_io.c
@@ -132,12 +132,10 @@ static int bgp_process_writes(struct thread *thread)
struct frr_pthread *fpt = bgp_pth_io;
- pthread_mutex_lock(&peer->io_mtx);
- {
+ frr_with_mutex(&peer->io_mtx) {
status = bgp_write(peer);
reschedule = (stream_fifo_head(peer->obuf) != NULL);
}
- pthread_mutex_unlock(&peer->io_mtx);
/* no problem */
if (CHECK_FLAG(status, BGP_IO_TRANS_ERR)) {
@@ -184,11 +182,9 @@ static int bgp_process_reads(struct thread *thread)
struct frr_pthread *fpt = bgp_pth_io;
- pthread_mutex_lock(&peer->io_mtx);
- {
+ frr_with_mutex(&peer->io_mtx) {
status = bgp_read(peer);
}
- pthread_mutex_unlock(&peer->io_mtx);
/* error checking phase */
if (CHECK_FLAG(status, BGP_IO_TRANS_ERR)) {
@@ -237,11 +233,9 @@ static int bgp_process_reads(struct thread *thread)
assert(ringbuf_get(ibw, pktbuf, pktsize) == pktsize);
stream_put(pkt, pktbuf, pktsize);
- pthread_mutex_lock(&peer->io_mtx);
- {
+ frr_with_mutex(&peer->io_mtx) {
stream_fifo_push(peer->ibuf, pkt);
}
- pthread_mutex_unlock(&peer->io_mtx);
added_pkt = true;
} else
diff --git a/bgpd/bgp_keepalives.c b/bgpd/bgp_keepalives.c
index bec3bdcb8d..6de1c216a6 100644
--- a/bgpd/bgp_keepalives.c
+++ b/bgpd/bgp_keepalives.c
@@ -245,8 +245,7 @@ void bgp_keepalives_on(struct peer *peer)
*/
assert(peerhash_mtx);
- pthread_mutex_lock(peerhash_mtx);
- {
+ frr_with_mutex(peerhash_mtx) {
holder.peer = peer;
if (!hash_lookup(peerhash, &holder)) {
struct pkat *pkat = pkat_new(peer);
@@ -255,7 +254,6 @@ void bgp_keepalives_on(struct peer *peer)
}
SET_FLAG(peer->thread_flags, PEER_THREAD_KEEPALIVES_ON);
}
- pthread_mutex_unlock(peerhash_mtx);
bgp_keepalives_wake();
}
@@ -275,8 +273,7 @@ void bgp_keepalives_off(struct peer *peer)
*/
assert(peerhash_mtx);
- pthread_mutex_lock(peerhash_mtx);
- {
+ frr_with_mutex(peerhash_mtx) {
holder.peer = peer;
struct pkat *res = hash_release(peerhash, &holder);
if (res) {
@@ -285,16 +282,13 @@ void bgp_keepalives_off(struct peer *peer)
}
UNSET_FLAG(peer->thread_flags, PEER_THREAD_KEEPALIVES_ON);
}
- pthread_mutex_unlock(peerhash_mtx);
}
void bgp_keepalives_wake(void)
{
- pthread_mutex_lock(peerhash_mtx);
- {
+ frr_with_mutex(peerhash_mtx) {
pthread_cond_signal(peerhash_cond);
}
- pthread_mutex_unlock(peerhash_mtx);
}
int bgp_keepalives_stop(struct frr_pthread *fpt, void **result)
diff --git a/bgpd/bgp_network.c b/bgpd/bgp_network.c
index 1dadf00e8f..887caee95e 100644
--- a/bgpd/bgp_network.c
+++ b/bgpd/bgp_network.c
@@ -122,7 +122,7 @@ static int bgp_md5_set_connect(int socket, union sockunion *su,
int ret = -1;
#if HAVE_DECL_TCP_MD5SIG
- frr_elevate_privs(&bgpd_privs) {
+ frr_with_privs(&bgpd_privs) {
ret = bgp_md5_set_socket(socket, su, prefixlen, password);
}
#endif /* HAVE_TCP_MD5SIG */
@@ -140,8 +140,7 @@ static int bgp_md5_set_password(struct peer *peer, const char *password)
* Set or unset the password on the listen socket(s). Outbound
* connections are taken care of in bgp_connect() below.
*/
- frr_elevate_privs(&bgpd_privs)
- {
+ frr_with_privs(&bgpd_privs) {
for (ALL_LIST_ELEMENTS_RO(bm->listen_sockets, node, listener))
if (listener->su.sa.sa_family
== peer->su.sa.sa_family) {
@@ -167,8 +166,7 @@ int bgp_md5_set_prefix(struct prefix *p, const char *password)
struct bgp_listener *listener;
/* Set or unset the password on the listen socket(s). */
- frr_elevate_privs(&bgpd_privs)
- {
+ frr_with_privs(&bgpd_privs) {
for (ALL_LIST_ELEMENTS_RO(bm->listen_sockets, node, listener))
if (listener->su.sa.sa_family == p->family) {
prefix2sockunion(p, &su);
@@ -610,7 +608,7 @@ int bgp_connect(struct peer *peer)
zlog_debug("Peer address not learnt: Returning from connect");
return 0;
}
- frr_elevate_privs(&bgpd_privs) {
+ frr_with_privs(&bgpd_privs) {
/* Make socket for the peer. */
peer->fd = vrf_sockunion_socket(&peer->su, peer->bgp->vrf_id,
bgp_get_bound_name(peer));
@@ -630,7 +628,7 @@ int bgp_connect(struct peer *peer)
sockopt_reuseport(peer->fd);
#ifdef IPTOS_PREC_INTERNETCONTROL
- frr_elevate_privs(&bgpd_privs) {
+ frr_with_privs(&bgpd_privs) {
if (sockunion_family(&peer->su) == AF_INET)
setsockopt_ipv4_tos(peer->fd,
IPTOS_PREC_INTERNETCONTROL);
@@ -708,7 +706,7 @@ static int bgp_listener(int sock, struct sockaddr *sa, socklen_t salen,
sockopt_reuseaddr(sock);
sockopt_reuseport(sock);
- frr_elevate_privs(&bgpd_privs) {
+ frr_with_privs(&bgpd_privs) {
#ifdef IPTOS_PREC_INTERNETCONTROL
if (sa->sa_family == AF_INET)
@@ -767,7 +765,7 @@ int bgp_socket(struct bgp *bgp, unsigned short port, const char *address)
snprintf(port_str, sizeof(port_str), "%d", port);
port_str[sizeof(port_str) - 1] = '\0';
- frr_elevate_privs(&bgpd_privs) {
+ frr_with_privs(&bgpd_privs) {
ret = vrf_getaddrinfo(address, port_str, &req, &ainfo_save,
bgp->vrf_id);
}
@@ -788,7 +786,7 @@ int bgp_socket(struct bgp *bgp, unsigned short port, const char *address)
if (ainfo->ai_family != AF_INET && ainfo->ai_family != AF_INET6)
continue;
- frr_elevate_privs(&bgpd_privs) {
+ frr_with_privs(&bgpd_privs) {
sock = vrf_socket(ainfo->ai_family,
ainfo->ai_socktype,
ainfo->ai_protocol, bgp->vrf_id,
diff --git a/bgpd/bgp_open.c b/bgpd/bgp_open.c
index 64529f6ef3..7e5e07099d 100644
--- a/bgpd/bgp_open.c
+++ b/bgpd/bgp_open.c
@@ -503,7 +503,7 @@ static int bgp_capability_restart(struct peer *peer,
if (bgp_debug_neighbor_events(peer))
zlog_debug(
"%s Address family %s is%spreserved",
- peer->host, afi_safi_print(afi, safi),
+ peer->host, get_afi_safi_str(afi, safi, false),
CHECK_FLAG(
peer->af_cap[afi][safi],
PEER_CAP_RESTART_AF_PRESERVE_RCV)
diff --git a/bgpd/bgp_packet.c b/bgpd/bgp_packet.c
index 99522a6522..c7c1780c21 100644
--- a/bgpd/bgp_packet.c
+++ b/bgpd/bgp_packet.c
@@ -41,6 +41,7 @@
#include "bgpd/bgpd.h"
#include "bgpd/bgp_table.h"
#include "bgpd/bgp_dump.h"
+#include "bgpd/bgp_bmp.h"
#include "bgpd/bgp_attr.h"
#include "bgpd/bgp_debug.h"
#include "bgpd/bgp_errors.h"
@@ -123,9 +124,9 @@ int bgp_packet_set_size(struct stream *s)
*/
static void bgp_packet_add(struct peer *peer, struct stream *s)
{
- pthread_mutex_lock(&peer->io_mtx);
- stream_fifo_push(peer->obuf, s);
- pthread_mutex_unlock(&peer->io_mtx);
+ frr_with_mutex(&peer->io_mtx) {
+ stream_fifo_push(peer->obuf, s);
+ }
}
static struct stream *bgp_update_packet_eor(struct peer *peer, afi_t afi,
@@ -140,7 +141,7 @@ static struct stream *bgp_update_packet_eor(struct peer *peer, afi_t afi,
if (bgp_debug_neighbor_events(peer))
zlog_debug("send End-of-RIB for %s to %s",
- afi_safi_print(afi, safi), peer->host);
+ get_afi_safi_str(afi, safi, false), peer->host);
s = stream_new(BGP_MAX_PACKET_SIZE);
@@ -664,7 +665,7 @@ void bgp_notify_send_with_data(struct peer *peer, uint8_t code,
struct stream *s;
/* Lock I/O mutex to prevent other threads from pushing packets */
- pthread_mutex_lock(&peer->io_mtx);
+ frr_mutex_lock_autounlock(&peer->io_mtx);
/* ============================================== */
/* Allocate new stream. */
@@ -755,9 +756,6 @@ void bgp_notify_send_with_data(struct peer *peer, uint8_t code,
stream_fifo_push(peer->obuf, s);
bgp_write_notify(peer);
-
- /* ============================================== */
- pthread_mutex_unlock(&peer->io_mtx);
}
/*
@@ -1660,7 +1658,7 @@ static int bgp_update_receive(struct peer *peer, bgp_size_t size)
bgp_clear_stale_route(peer, afi, safi);
zlog_info("%%NOTIFICATION: rcvd End-of-RIB for %s from %s in vrf %s",
- afi_safi_print(afi, safi), peer->host,
+ get_afi_safi_str(afi, safi, false), peer->host,
vrf ? vrf->name : VRF_DEFAULT_NAME);
}
}
@@ -2236,11 +2234,9 @@ int bgp_process_packet(struct thread *thread)
bgp_size_t size;
char notify_data_length[2];
- pthread_mutex_lock(&peer->io_mtx);
- {
+ frr_with_mutex(&peer->io_mtx) {
peer->curr = stream_fifo_pop(peer->ibuf);
}
- pthread_mutex_unlock(&peer->io_mtx);
if (peer->curr == NULL) // no packets to process, hmm...
return 0;
@@ -2359,15 +2355,13 @@ int bgp_process_packet(struct thread *thread)
if (fsm_update_result != FSM_PEER_TRANSFERRED
&& fsm_update_result != FSM_PEER_STOPPED) {
- pthread_mutex_lock(&peer->io_mtx);
- {
+ frr_with_mutex(&peer->io_mtx) {
// more work to do, come back later
if (peer->ibuf->count > 0)
thread_add_timer_msec(
bm->master, bgp_process_packet, peer, 0,
&peer->t_process_packet);
}
- pthread_mutex_unlock(&peer->io_mtx);
}
return 0;
diff --git a/bgpd/bgp_route.c b/bgpd/bgp_route.c
index 183debddba..32c9fb16f3 100644
--- a/bgpd/bgp_route.c
+++ b/bgpd/bgp_route.c
@@ -2750,7 +2750,7 @@ int bgp_maximum_prefix_overflow(struct peer *peer, afi_t afi, safi_t safi,
zlog_info(
"%%MAXPFXEXCEED: No. of %s prefix received from %s %ld exceed, "
"limit %ld",
- afi_safi_print(afi, safi), peer->host,
+ get_afi_safi_str(afi, safi, false), peer->host,
peer->pcount[afi][safi], peer->pmax[afi][safi]);
SET_FLAG(peer->af_sflags[afi][safi], PEER_STATUS_PREFIX_LIMIT);
@@ -2811,7 +2811,7 @@ int bgp_maximum_prefix_overflow(struct peer *peer, afi_t afi, safi_t safi,
zlog_info(
"%%MAXPFX: No. of %s prefix received from %s reaches %ld, max %ld",
- afi_safi_print(afi, safi), peer->host,
+ get_afi_safi_str(afi, safi, false), peer->host,
peer->pcount[afi][safi], peer->pmax[afi][safi]);
SET_FLAG(peer->af_sflags[afi][safi],
PEER_STATUS_PREFIX_THRESHOLD);
@@ -10696,7 +10696,7 @@ static int bgp_table_stats(struct vty *vty, struct bgp *bgp, afi_t afi,
return CMD_WARNING;
}
- vty_out(vty, "BGP %s RIB statistics\n", afi_safi_print(afi, safi));
+ vty_out(vty, "BGP %s RIB statistics\n", get_afi_safi_str(afi, safi, false));
/* labeled-unicast routes live in the unicast table */
if (safi == SAFI_LABELED_UNICAST)
@@ -10895,7 +10895,7 @@ static int bgp_peer_counts(struct vty *vty, struct peer *peer, afi_t afi,
if (use_json) {
json_object_string_add(json, "prefixCountsFor", peer->host);
json_object_string_add(json, "multiProtocol",
- afi_safi_print(afi, safi));
+ get_afi_safi_str(afi, safi, true));
json_object_int_add(json, "pfxCounter",
peer->pcount[afi][safi]);
@@ -10921,10 +10921,10 @@ static int bgp_peer_counts(struct vty *vty, struct peer *peer, afi_t afi,
&& bgp_flag_check(peer->bgp, BGP_FLAG_SHOW_HOSTNAME)) {
vty_out(vty, "Prefix counts for %s/%s, %s\n",
peer->hostname, peer->host,
- afi_safi_print(afi, safi));
+ get_afi_safi_str(afi, safi, false));
} else {
vty_out(vty, "Prefix counts for %s, %s\n", peer->host,
- afi_safi_print(afi, safi));
+ get_afi_safi_str(afi, safi, false));
}
vty_out(vty, "PfxCt: %ld\n", peer->pcount[afi][safi]);
@@ -11552,7 +11552,7 @@ DEFUN (show_ip_bgp_neighbor_received_prefix_filter,
if (count) {
if (!uj)
vty_out(vty, "Address Family: %s\n",
- afi_safi_print(afi, safi));
+ get_afi_safi_str(afi, safi, false));
prefix_bgp_show_prefix_list(vty, afi, name, uj);
} else {
if (uj)
diff --git a/bgpd/bgp_routemap.c b/bgpd/bgp_routemap.c
index aaae9e380e..545ca19762 100644
--- a/bgpd/bgp_routemap.c
+++ b/bgpd/bgp_routemap.c
@@ -3057,7 +3057,7 @@ static int bgp_route_match_add(struct vty *vty, const char *command,
{
VTY_DECLVAR_CONTEXT(route_map_index, index);
int retval = CMD_SUCCESS;
- int ret;
+ enum rmap_compile_rets ret;
ret = route_map_add_match(index, command, arg, type);
switch (ret) {
@@ -3074,6 +3074,11 @@ static int bgp_route_match_add(struct vty *vty, const char *command,
route_map_upd8_dependency(type, arg, index->map->name);
}
break;
+ case RMAP_DUPLICATE_RULE:
+ /*
+ * Intentionally doing nothing here.
+ */
+ break;
}
return retval;
@@ -3084,7 +3089,7 @@ static int bgp_route_match_delete(struct vty *vty, const char *command,
const char *arg, route_map_event_t type)
{
VTY_DECLVAR_CONTEXT(route_map_index, index);
- int ret;
+ enum rmap_compile_rets ret;
int retval = CMD_SUCCESS;
char *dep_name = NULL;
const char *tmpstr;
@@ -3117,6 +3122,11 @@ static int bgp_route_match_delete(struct vty *vty, const char *command,
if (type != RMAP_EVENT_MATCH_DELETED && dep_name)
route_map_upd8_dependency(type, dep_name, rmap_name);
break;
+ case RMAP_DUPLICATE_RULE:
+ /*
+ * Nothing to do here
+ */
+ break;
}
XFREE(MTYPE_ROUTE_MAP_RULE, dep_name);
diff --git a/bgpd/bgp_rpki.c b/bgpd/bgp_rpki.c
index 22840d54c6..2cfd65896c 100644
--- a/bgpd/bgp_rpki.c
+++ b/bgpd/bgp_rpki.c
@@ -51,12 +51,6 @@
#include "lib/thread.h"
#ifndef VTYSH_EXTRACT_PL
#include "rtrlib/rtrlib.h"
-#include "rtrlib/rtr_mgr.h"
-#include "rtrlib/lib/ip.h"
-#include "rtrlib/transport/tcp/tcp_transport.h"
-#if defined(FOUND_SSH)
-#include "rtrlib/transport/ssh/ssh_transport.h"
-#endif
#endif
#include "hook.h"
#include "libfrr.h"
@@ -76,8 +70,6 @@ DEFINE_MTYPE_STATIC(BGPD, BGP_RPKI_CACHE_GROUP, "BGP RPKI Cache server group")
#define POLLING_PERIOD_DEFAULT 3600
#define EXPIRE_INTERVAL_DEFAULT 7200
#define RETRY_INTERVAL_DEFAULT 600
-#define TIMEOUT_DEFAULT 600
-#define INITIAL_SYNCHRONISATION_TIMEOUT_DEFAULT 30
#define RPKI_DEBUG(...) \
if (rpki_debug) { \
@@ -147,8 +139,6 @@ static int rpki_debug;
static unsigned int polling_period;
static unsigned int expire_interval;
static unsigned int retry_interval;
-static unsigned int timeout;
-static unsigned int initial_synchronisation_timeout;
static int rpki_sync_socket_rtr;
static int rpki_sync_socket_bgpd;
@@ -538,9 +528,6 @@ static int bgp_rpki_init(struct thread_master *master)
polling_period = POLLING_PERIOD_DEFAULT;
expire_interval = EXPIRE_INTERVAL_DEFAULT;
retry_interval = RETRY_INTERVAL_DEFAULT;
- timeout = TIMEOUT_DEFAULT;
- initial_synchronisation_timeout =
- INITIAL_SYNCHRONISATION_TIMEOUT_DEFAULT;
install_cli_commands();
rpki_init_sync_socket();
return 0;
@@ -756,8 +743,6 @@ static int add_cache(struct cache *cache)
group.sockets_len = 1;
group.sockets = &cache->rtr_socket;
- listnode_add(cache_list, cache);
-
if (rtr_is_running) {
init_tr_socket(cache);
@@ -767,6 +752,8 @@ static int add_cache(struct cache *cache)
}
}
+ listnode_add(cache_list, cache);
+
return SUCCESS;
}
@@ -793,7 +780,12 @@ static int add_tcp_cache(const char *host, const char *port,
cache->rtr_socket = rtr_socket;
cache->preference = preference;
- return add_cache(cache);
+ int ret = add_cache(cache);
+ if (ret != SUCCESS) {
+ free_cache(cache);
+ }
+
+ return ret;
}
#if defined(FOUND_SSH)
@@ -829,7 +821,12 @@ static int add_ssh_cache(const char *host, const unsigned int port,
cache->rtr_socket = rtr_socket;
cache->preference = preference;
- return add_cache(cache);
+ int ret = add_cache(cache);
+ if (ret != SUCCESS) {
+ free_cache(cache);
+ }
+
+ return ret;
}
#endif
@@ -869,9 +866,6 @@ static int config_write(struct vty *vty)
vty_out(vty, "!\n");
vty_out(vty, "rpki\n");
vty_out(vty, " rpki polling_period %d\n", polling_period);
- vty_out(vty, " rpki timeout %d\n", timeout);
- vty_out(vty, " rpki initial-synchronisation-timeout %d\n",
- initial_synchronisation_timeout);
for (ALL_LIST_ELEMENTS_RO(cache_list, cache_node, cache)) {
switch (cache->type) {
struct tr_tcp_config *tcp_config;
@@ -1020,48 +1014,64 @@ DEFUN (no_rpki_retry_interval,
return CMD_SUCCESS;
}
-DEFPY (rpki_timeout,
+#if (CONFDATE > 20200901)
+CPP_NOTICE("bgpd: time to remove rpki timeout")
+CPP_NOTICE("bgpd: this includes rpki_timeout and rpki_synchronisation_timeout")
+#endif
+
+DEFPY_HIDDEN (rpki_timeout,
rpki_timeout_cmd,
"rpki timeout (1-4294967295)$to_arg",
RPKI_OUTPUT_STRING
"Set timeout\n"
"Timeout value\n")
{
- timeout = to_arg;
+ vty_out(vty,
+ "This config option is deprecated, and is scheduled for removal.\n");
+ vty_out(vty,
+ "This functionality has also already been removed because it caused bugs and was pointless\n");
return CMD_SUCCESS;
}
-DEFUN (no_rpki_timeout,
+DEFUN_HIDDEN (no_rpki_timeout,
no_rpki_timeout_cmd,
"no rpki timeout",
NO_STR
RPKI_OUTPUT_STRING
"Set timeout back to default\n")
{
- timeout = TIMEOUT_DEFAULT;
+ vty_out(vty,
+ "This config option is deprecated, and is scheduled for removal.\n");
+ vty_out(vty,
+ "This functionality has also already been removed because it caused bugs and was pointless\n");
return CMD_SUCCESS;
}
-DEFPY (rpki_synchronisation_timeout,
+DEFPY_HIDDEN (rpki_synchronisation_timeout,
rpki_synchronisation_timeout_cmd,
"rpki initial-synchronisation-timeout (1-4294967295)$ito_arg",
RPKI_OUTPUT_STRING
"Set a timeout for the initial synchronisation of prefix validation data\n"
"Timeout value\n")
{
- initial_synchronisation_timeout = ito_arg;
+ vty_out(vty,
+ "This config option is deprecated, and is scheduled for removal.\n");
+ vty_out(vty,
+ "This functionality has also already been removed because it caused bugs and was pointless\n");
return CMD_SUCCESS;
}
-DEFUN (no_rpki_synchronisation_timeout,
+DEFUN_HIDDEN (no_rpki_synchronisation_timeout,
no_rpki_synchronisation_timeout_cmd,
"no rpki initial-synchronisation-timeout",
NO_STR
RPKI_OUTPUT_STRING
"Set the initial synchronisation timeout back to default (30 sec.)\n")
{
- initial_synchronisation_timeout =
- INITIAL_SYNCHRONISATION_TIMEOUT_DEFAULT;
+ vty_out(vty,
+ "This config option is deprecated, and is scheduled for removal.\n");
+ vty_out(vty,
+ "This functionality has also already been removed because it caused bugs and was pointless\n");
return CMD_SUCCESS;
}
@@ -1083,6 +1093,18 @@ DEFPY (rpki_cache,
"Preference value\n")
{
int return_value;
+ struct listnode *cache_node;
+ struct cache *current_cache;
+
+ for (ALL_LIST_ELEMENTS_RO(cache_list, cache_node, current_cache)) {
+ if (current_cache->preference == preference) {
+ vty_out(vty,
+ "Cache with preference %ld is already configured\n",
+ preference);
+ return CMD_WARNING;
+ }
+ }
+
// use ssh connection
if (ssh_uname) {
@@ -1128,11 +1150,11 @@ DEFPY (no_rpki_cache,
return CMD_WARNING;
}
- if (rtr_is_running) {
+ if (rtr_is_running && listcount(cache_list) == 1) {
+ stop();
+ } else if (rtr_is_running) {
if (rtr_mgr_remove_group(rtr_config, preference) == RTR_ERROR) {
vty_out(vty, "Could not remove cache %ld", preference);
- if (listcount(cache_list) == 1)
- vty_out(vty, " because it is the last cache");
vty_out(vty, "\n");
return CMD_WARNING;
@@ -1385,7 +1407,7 @@ DEFUN (match_rpki,
"Prefix not found\n")
{
VTY_DECLVAR_CONTEXT(route_map_index, index);
- int ret;
+ enum rmap_compile_rets ret;
ret = route_map_add_match(index, "rpki", argv[2]->arg,
RMAP_EVENT_MATCH_ADDED);
@@ -1397,6 +1419,12 @@ DEFUN (match_rpki,
case RMAP_COMPILE_ERROR:
vty_out(vty, "%% BGP Argument is malformed.\n");
return CMD_WARNING_CONFIG_FAILED;
+ case RMAP_COMPILE_SUCCESS:
+ case RMAP_DUPLICATE_RULE:
+ /*
+ * Intentionally doing nothing here
+ */
+ break;
}
}
return CMD_SUCCESS;
@@ -1413,7 +1441,7 @@ DEFUN (no_match_rpki,
"Prefix not found\n")
{
VTY_DECLVAR_CONTEXT(route_map_index, index);
- int ret;
+ enum rmap_compile_rets ret;
ret = route_map_delete_match(index, "rpki", argv[3]->arg);
if (ret) {
@@ -1424,6 +1452,12 @@ DEFUN (no_match_rpki,
case RMAP_COMPILE_ERROR:
vty_out(vty, "%% BGP Argument is malformed.\n");
break;
+ case RMAP_COMPILE_SUCCESS:
+ case RMAP_DUPLICATE_RULE:
+ /*
+ * Nothing to do here
+ */
+ break;
}
return CMD_WARNING_CONFIG_FAILED;
}
diff --git a/bgpd/bgp_vty.c b/bgpd/bgp_vty.c
index 0d0e433980..17bc83ed2e 100644
--- a/bgpd/bgp_vty.c
+++ b/bgpd/bgp_vty.c
@@ -22,6 +22,7 @@
#include "command.h"
#include "lib/json.h"
+#include "lib_errors.h"
#include "lib/zclient.h"
#include "prefix.h"
#include "plist.h"
@@ -130,6 +131,80 @@ static enum node_type bgp_node_type(afi_t afi, safi_t safi)
return BGP_IPV4_NODE;
}
+static const char *get_afi_safi_vty_str(afi_t afi, safi_t safi)
+{
+ if (afi == AFI_IP && safi == SAFI_UNICAST)
+ return "IPv4 Unicast";
+ else if (afi == AFI_IP && safi == SAFI_MULTICAST)
+ return "IPv4 Multicast";
+ else if (afi == AFI_IP && safi == SAFI_LABELED_UNICAST)
+ return "IPv4 Labeled Unicast";
+ else if (afi == AFI_IP && safi == SAFI_MPLS_VPN)
+ return "IPv4 VPN";
+ else if (afi == AFI_IP && safi == SAFI_ENCAP)
+ return "IPv4 Encap";
+ else if (afi == AFI_IP && safi == SAFI_FLOWSPEC)
+ return "IPv4 Flowspec";
+ else if (afi == AFI_IP6 && safi == SAFI_UNICAST)
+ return "IPv6 Unicast";
+ else if (afi == AFI_IP6 && safi == SAFI_MULTICAST)
+ return "IPv6 Multicast";
+ else if (afi == AFI_IP6 && safi == SAFI_LABELED_UNICAST)
+ return "IPv6 Labeled Unicast";
+ else if (afi == AFI_IP6 && safi == SAFI_MPLS_VPN)
+ return "IPv6 VPN";
+ else if (afi == AFI_IP6 && safi == SAFI_ENCAP)
+ return "IPv6 Encap";
+ else if (afi == AFI_IP6 && safi == SAFI_FLOWSPEC)
+ return "IPv6 Flowspec";
+ else if (afi == AFI_L2VPN && safi == SAFI_EVPN)
+ return "L2VPN EVPN";
+ else {
+ flog_err(EC_LIB_DEVELOPMENT, "New afi/safi that needs to be taken care of?");
+ return "Unknown";
+ }
+}
+
+/*
+ * Please note that we have intentionally camelCased
+ * the return strings here. So if you want
+ * to use this function, please ensure you
+ * are doing this within json output
+ */
+static const char *get_afi_safi_json_str(afi_t afi, safi_t safi)
+{
+ if (afi == AFI_IP && safi == SAFI_UNICAST)
+ return "ipv4Unicast";
+ else if (afi == AFI_IP && safi == SAFI_MULTICAST)
+ return "ipv4Multicast";
+ else if (afi == AFI_IP && safi == SAFI_LABELED_UNICAST)
+ return "ipv4LabeledUnicast";
+ else if (afi == AFI_IP && safi == SAFI_MPLS_VPN)
+ return "ipv4Vpn";
+ else if (afi == AFI_IP && safi == SAFI_ENCAP)
+ return "ipv4Encap";
+ else if (afi == AFI_IP && safi == SAFI_FLOWSPEC)
+ return "ipv4Flowspec";
+ else if (afi == AFI_IP6 && safi == SAFI_UNICAST)
+ return "ipv6Unicast";
+ else if (afi == AFI_IP6 && safi == SAFI_MULTICAST)
+ return "ipv6Multicast";
+ else if (afi == AFI_IP6 && safi == SAFI_LABELED_UNICAST)
+ return "ipv6LabeledUnicast";
+ else if (afi == AFI_IP6 && safi == SAFI_MPLS_VPN)
+ return "ipv6Vpn";
+ else if (afi == AFI_IP6 && safi == SAFI_ENCAP)
+ return "ipv6Encap";
+ else if (afi == AFI_IP6 && safi == SAFI_FLOWSPEC)
+ return "ipv6Flowspec";
+ else if (afi == AFI_L2VPN && safi == SAFI_EVPN)
+ return "l2VpnEvpn";
+ else {
+ flog_err(EC_LIB_DEVELOPMENT, "New afi/safi that needs to be taken care of?");
+ return "Unknown";
+ }
+}
+
/* Utility function to get address family from current node. */
afi_t bgp_node_afi(struct vty *vty)
{
@@ -584,7 +659,7 @@ static void bgp_clear_vty_error(struct vty *vty, struct peer *peer, afi_t afi,
case BGP_ERR_AF_UNCONFIGURED:
vty_out(vty,
"%%BGP: Enable %s address family for the neighbor %s\n",
- afi_safi_print(afi, safi), peer->host);
+ get_afi_safi_str(afi, safi, false), peer->host);
break;
case BGP_ERR_SOFT_RECONFIG_UNCONFIGURED:
vty_out(vty,
@@ -738,7 +813,7 @@ static int bgp_clear(struct vty *vty, struct bgp *bgp, afi_t afi, safi_t safi,
if (!found)
vty_out(vty,
"%%BGP: No %s peer belonging to peer-group %s is configured\n",
- afi_safi_print(afi, safi), arg);
+ get_afi_safi_str(afi, safi, false), arg);
return CMD_SUCCESS;
}
@@ -760,7 +835,7 @@ static int bgp_clear(struct vty *vty, struct bgp *bgp, afi_t afi, safi_t safi,
if (!found)
vty_out(vty,
"%%BGP: No external %s peer is configured\n",
- afi_safi_print(afi, safi));
+ get_afi_safi_str(afi, safi, false));
return CMD_SUCCESS;
}
@@ -784,7 +859,7 @@ static int bgp_clear(struct vty *vty, struct bgp *bgp, afi_t afi, safi_t safi,
if (!found)
vty_out(vty,
"%%BGP: No %s peer is configured with AS %s\n",
- afi_safi_print(afi, safi), arg);
+ get_afi_safi_str(afi, safi, false), arg);
return CMD_SUCCESS;
}
@@ -7794,9 +7869,145 @@ static void bgp_show_bestpath_json(struct bgp *bgp, json_object *json)
json_object_object_add(json, "bestPath", bestpath);
}
+/* Print the error code/subcode for why the peer is down */
+static void bgp_show_peer_reset(struct vty * vty, struct peer *peer,
+ json_object *json_peer, bool use_json)
+{
+ const char *code_str;
+ const char *subcode_str;
+
+ if (use_json) {
+ if (peer->last_reset == PEER_DOWN_NOTIFY_SEND
+ || peer->last_reset == PEER_DOWN_NOTIFY_RECEIVED) {
+ char errorcodesubcode_hexstr[5];
+ char errorcodesubcode_str[256];
+
+ code_str = bgp_notify_code_str(peer->notify.code);
+ subcode_str = bgp_notify_subcode_str(
+ peer->notify.code,
+ peer->notify.subcode);
+
+ sprintf(errorcodesubcode_hexstr, "%02X%02X",
+ peer->notify.code, peer->notify.subcode);
+ json_object_string_add(json_peer,
+ "lastErrorCodeSubcode",
+ errorcodesubcode_hexstr);
+ snprintf(errorcodesubcode_str, 255, "%s%s",
+ code_str, subcode_str);
+ json_object_string_add(json_peer,
+ "lastNotificationReason",
+ errorcodesubcode_str);
+ if (peer->last_reset == PEER_DOWN_NOTIFY_RECEIVED
+ && peer->notify.code == BGP_NOTIFY_CEASE
+ && (peer->notify.subcode
+ == BGP_NOTIFY_CEASE_ADMIN_SHUTDOWN
+ || peer->notify.subcode
+ == BGP_NOTIFY_CEASE_ADMIN_RESET)
+ && peer->notify.length) {
+ char msgbuf[1024];
+ const char *msg_str;
+
+ msg_str = bgp_notify_admin_message(
+ msgbuf, sizeof(msgbuf),
+ (uint8_t *)peer->notify.data,
+ peer->notify.length);
+ if (msg_str)
+ json_object_string_add(
+ json_peer,
+ "lastShutdownDescription",
+ msg_str);
+ }
+
+ }
+ json_object_string_add(json_peer, "lastResetDueTo",
+ peer_down_str[(int)peer->last_reset]);
+ json_object_int_add(json_peer, "lastResetCode",
+ peer->last_reset);
+ } else {
+ if (peer->last_reset == PEER_DOWN_NOTIFY_SEND
+ || peer->last_reset == PEER_DOWN_NOTIFY_RECEIVED) {
+ code_str = bgp_notify_code_str(peer->notify.code);
+ subcode_str =
+ bgp_notify_subcode_str(peer->notify.code,
+ peer->notify.subcode);
+ vty_out(vty, " Notification %s (%s%s)\n",
+ peer->last_reset == PEER_DOWN_NOTIFY_SEND
+ ? "sent"
+ : "received",
+ code_str, subcode_str);
+ } else {
+ vty_out(vty, " %s\n",
+ peer_down_str[(int)peer->last_reset]);
+ }
+ }
+}
+
+static inline bool bgp_has_peer_failed(struct peer *peer, afi_t afi,
+ safi_t safi)
+{
+ return ((peer->status != Established) ||
+ !peer->afc_recv[afi][safi]);
+}
+
+static void bgp_show_failed_summary(struct vty *vty, struct bgp *bgp,
+ struct peer *peer, json_object *json_peer,
+ int max_neighbor_width, bool use_json)
+{
+ char timebuf[BGP_UPTIME_LEN], dn_flag[2];
+ int len;
+
+ if (use_json) {
+ if (peer_dynamic_neighbor(peer))
+ json_object_boolean_true_add(json_peer,
+ "dynamicPeer");
+ if (peer->hostname)
+ json_object_string_add(json_peer, "hostname",
+ peer->hostname);
+
+ if (peer->domainname)
+ json_object_string_add(json_peer, "domainname",
+ peer->domainname);
+ json_object_int_add(json_peer, "connectionsEstablished",
+ peer->established);
+ json_object_int_add(json_peer, "connectionsDropped",
+ peer->dropped);
+ peer_uptime(peer->uptime, timebuf, BGP_UPTIME_LEN,
+ use_json, json_peer);
+ if (peer->status == Established)
+ json_object_string_add(json_peer, "lastResetDueTo",
+ "AFI/SAFI Not Negotiated");
+ else
+ bgp_show_peer_reset(NULL, peer, json_peer, true);
+ } else {
+ dn_flag[1] = '\0';
+ dn_flag[0] = peer_dynamic_neighbor(peer) ? '*' : '\0';
+ if (peer->hostname
+ && bgp_flag_check(bgp, BGP_FLAG_SHOW_HOSTNAME))
+ len = vty_out(vty, "%s%s(%s)", dn_flag,
+ peer->hostname, peer->host);
+ else
+ len = vty_out(vty, "%s%s", dn_flag, peer->host);
+
+ /* pad the neighbor column with spaces */
+ if (len < max_neighbor_width)
+ vty_out(vty, "%*s", max_neighbor_width - len,
+ " ");
+ vty_out(vty, "%7d %7d %8s", peer->established,
+ peer->dropped,
+ peer_uptime(peer->uptime, timebuf,
+ BGP_UPTIME_LEN, 0, NULL));
+ if (peer->status == Established)
+ vty_out(vty, " AFI/SAFI Not Negotiated\n");
+ else
+ bgp_show_peer_reset(vty, peer, NULL,
+ false);
+ }
+}
+
+
/* Show BGP peer's summary information. */
static int bgp_show_summary(struct vty *vty, struct bgp *bgp, int afi, int safi,
- bool use_json)
+ bool show_failed, bool use_json)
{
struct peer *peer;
struct listnode *node, *nnode;
@@ -7804,7 +8015,7 @@ static int bgp_show_summary(struct vty *vty, struct bgp *bgp, int afi, int safi,
char timebuf[BGP_UPTIME_LEN], dn_flag[2];
char neighbor_buf[VTY_BUFSIZ];
int neighbor_col_default_width = 16;
- int len;
+ int len, failed_count = 0;
int max_neighbor_width = 0;
int pfx_rcd_safi;
json_object *json = NULL;
@@ -7816,6 +8027,7 @@ static int bgp_show_summary(struct vty *vty, struct bgp *bgp, int afi, int safi,
* to
* display the correct PfxRcd value we must look at SAFI_UNICAST
*/
+
if (safi == SAFI_LABELED_UNICAST)
pfx_rcd_safi = SAFI_UNICAST;
else
@@ -7824,6 +8036,20 @@ static int bgp_show_summary(struct vty *vty, struct bgp *bgp, int afi, int safi,
if (use_json) {
json = json_object_new_object();
json_peers = json_object_new_object();
+ for (ALL_LIST_ELEMENTS(bgp->peer, node, nnode, peer)) {
+ if (!CHECK_FLAG(peer->flags, PEER_FLAG_CONFIG_NODE))
+ continue;
+
+ if (peer->afc[afi][safi]) {
+ /* See if we have at least a single failed peer */
+ if (bgp_has_peer_failed(peer, afi, safi))
+ failed_count++;
+ count++;
+ }
+ if (peer_dynamic_neighbor(peer))
+ dn_count++;
+ }
+
} else {
/* Loop over all neighbors that will be displayed to determine
* how many
@@ -7852,6 +8078,11 @@ static int bgp_show_summary(struct vty *vty, struct bgp *bgp, int afi, int safi,
if (len > max_neighbor_width)
max_neighbor_width = len;
+
+ /* See if we have at least a single failed peer */
+ if (bgp_has_peer_failed(peer, afi, safi))
+ failed_count++;
+ count++;
}
}
@@ -7862,6 +8093,23 @@ static int bgp_show_summary(struct vty *vty, struct bgp *bgp, int afi, int safi,
max_neighbor_width = neighbor_col_default_width;
}
+ if (show_failed && !failed_count) {
+ if (use_json) {
+ json_object_int_add(json, "failedPeersCount", 0);
+ json_object_int_add(json, "dynamicPeers", dn_count);
+ json_object_int_add(json, "totalPeers", count);
+
+ vty_out(vty, "%s\n", json_object_to_json_string_ext(
+ json, JSON_C_TO_STRING_PRETTY));
+ json_object_free(json);
+ } else {
+ vty_out(vty, "%% No failed BGP neighbors found\n");
+ vty_out(vty, "\nTotal number of neighbors %d\n", count);
+ }
+ return CMD_SUCCESS;
+ }
+
+ count = 0; /* Reset the value as its used again */
for (ALL_LIST_ELEMENTS(bgp->peer, node, nnode, peer)) {
if (!CHECK_FLAG(peer->flags, PEER_FLAG_CONFIG_NODE))
continue;
@@ -8063,78 +8311,97 @@ static int bgp_show_summary(struct vty *vty, struct bgp *bgp, int afi, int safi,
vty_out(vty, "Neighbor");
vty_out(vty, "%*s", max_neighbor_width - 8,
" ");
- vty_out(vty,
+ if (show_failed)
+ vty_out(vty, "EstdCnt DropCnt ResetTime Reason\n");
+ else
+ vty_out(vty,
"V AS MsgRcvd MsgSent TblVer InQ OutQ Up/Down State/PfxRcd\n");
}
}
count++;
+ /* Works for both failed & successful cases */
+ if (peer_dynamic_neighbor(peer))
+ dn_count++;
if (use_json) {
- json_peer = json_object_new_object();
+ json_peer = NULL;
+
+ if (show_failed &&
+ bgp_has_peer_failed(peer, afi, safi)) {
+ json_peer = json_object_new_object();
+ bgp_show_failed_summary(vty, bgp, peer,
+ json_peer, 0, use_json);
+ } else if (!show_failed) {
+ json_peer = json_object_new_object();
+ if (peer_dynamic_neighbor(peer)) {
+ json_object_boolean_true_add(json_peer,
+ "dynamicPeer");
+ }
- if (peer_dynamic_neighbor(peer)) {
- dn_count++;
- json_object_boolean_true_add(json_peer,
- "dynamicPeer");
+ if (peer->hostname)
+ json_object_string_add(json_peer, "hostname",
+ peer->hostname);
+
+ if (peer->domainname)
+ json_object_string_add(json_peer, "domainname",
+ peer->domainname);
+
+ json_object_int_add(json_peer, "remoteAs", peer->as);
+ json_object_int_add(json_peer, "version", 4);
+ json_object_int_add(json_peer, "msgRcvd",
+ PEER_TOTAL_RX(peer));
+ json_object_int_add(json_peer, "msgSent",
+ PEER_TOTAL_TX(peer));
+
+ json_object_int_add(json_peer, "tableVersion",
+ peer->version[afi][safi]);
+ json_object_int_add(json_peer, "outq",
+ peer->obuf->count);
+ json_object_int_add(json_peer, "inq", 0);
+ peer_uptime(peer->uptime, timebuf, BGP_UPTIME_LEN,
+ use_json, json_peer);
+
+ /*
+ * Adding "pfxRcd" field to match with the corresponding
+ * CLI. "prefixReceivedCount" will be deprecated in
+ * future.
+ */
+ json_object_int_add(json_peer, "prefixReceivedCount",
+ peer->pcount[afi][pfx_rcd_safi]);
+ json_object_int_add(json_peer, "pfxRcd",
+ peer->pcount[afi][pfx_rcd_safi]);
+
+ paf = peer_af_find(peer, afi, pfx_rcd_safi);
+ if (paf && PAF_SUBGRP(paf))
+ json_object_int_add(json_peer,
+ "pfxSnt",
+ (PAF_SUBGRP(paf))->scount);
+ if (CHECK_FLAG(peer->flags, PEER_FLAG_SHUTDOWN))
+ json_object_string_add(json_peer, "state",
+ "Idle (Admin)");
+ else if (peer->afc_recv[afi][safi])
+ json_object_string_add(
+ json_peer, "state",
+ lookup_msg(bgp_status_msg, peer->status,
+ NULL));
+ else if (CHECK_FLAG(peer->sflags,
+ PEER_STATUS_PREFIX_OVERFLOW))
+ json_object_string_add(json_peer, "state",
+ "Idle (PfxCt)");
+ else
+ json_object_string_add(
+ json_peer, "state",
+ lookup_msg(bgp_status_msg, peer->status,
+ NULL));
+ json_object_int_add(json_peer, "connectionsEstablished",
+ peer->established);
+ json_object_int_add(json_peer, "connectionsDropped",
+ peer->dropped);
}
-
- if (peer->hostname)
- json_object_string_add(json_peer, "hostname",
- peer->hostname);
-
- if (peer->domainname)
- json_object_string_add(json_peer, "domainname",
- peer->domainname);
-
- json_object_int_add(json_peer, "remoteAs", peer->as);
- json_object_int_add(json_peer, "version", 4);
- json_object_int_add(json_peer, "msgRcvd",
- PEER_TOTAL_RX(peer));
- json_object_int_add(json_peer, "msgSent",
- PEER_TOTAL_TX(peer));
-
- json_object_int_add(json_peer, "tableVersion",
- peer->version[afi][safi]);
- json_object_int_add(json_peer, "outq",
- peer->obuf->count);
- json_object_int_add(json_peer, "inq", 0);
- peer_uptime(peer->uptime, timebuf, BGP_UPTIME_LEN,
- use_json, json_peer);
-
- /*
- * Adding "pfxRcd" field to match with the corresponding
- * CLI. "prefixReceivedCount" will be deprecated in
- * future.
- */
- json_object_int_add(json_peer, "prefixReceivedCount",
- peer->pcount[afi][pfx_rcd_safi]);
- json_object_int_add(json_peer, "pfxRcd",
- peer->pcount[afi][pfx_rcd_safi]);
-
- paf = peer_af_find(peer, afi, pfx_rcd_safi);
- if (paf && PAF_SUBGRP(paf))
- json_object_int_add(json_peer,
- "pfxSnt",
- (PAF_SUBGRP(paf))->scount);
-
- if (CHECK_FLAG(peer->flags, PEER_FLAG_SHUTDOWN))
- json_object_string_add(json_peer, "state",
- "Idle (Admin)");
- else if (peer->afc_recv[afi][safi])
- json_object_string_add(
- json_peer, "state",
- lookup_msg(bgp_status_msg, peer->status,
- NULL));
- else if (CHECK_FLAG(peer->sflags,
- PEER_STATUS_PREFIX_OVERFLOW))
- json_object_string_add(json_peer, "state",
- "Idle (PfxCt)");
- else
- json_object_string_add(
- json_peer, "state",
- lookup_msg(bgp_status_msg, peer->status,
- NULL));
+ /* Avoid creating empty peer dicts in JSON */
+ if (json_peer == NULL)
+ continue;
if (peer->conf_if)
json_object_string_add(json_peer, "idType",
@@ -8145,65 +8412,72 @@ static int bgp_show_summary(struct vty *vty, struct bgp *bgp, int afi, int safi,
else if (peer->su.sa.sa_family == AF_INET6)
json_object_string_add(json_peer, "idType",
"ipv6");
-
json_object_object_add(json_peers, peer->host,
json_peer);
} else {
- memset(dn_flag, '\0', sizeof(dn_flag));
- if (peer_dynamic_neighbor(peer)) {
- dn_count++;
- dn_flag[0] = '*';
- }
-
- if (peer->hostname
- && bgp_flag_check(bgp, BGP_FLAG_SHOW_HOSTNAME))
- len = vty_out(vty, "%s%s(%s)", dn_flag,
- peer->hostname, peer->host);
- else
- len = vty_out(vty, "%s%s", dn_flag, peer->host);
-
- /* pad the neighbor column with spaces */
- if (len < max_neighbor_width)
- vty_out(vty, "%*s", max_neighbor_width - len,
- " ");
-
- vty_out(vty, "4 %10u %7u %7u %8" PRIu64 " %4d %4zd %8s",
- peer->as, PEER_TOTAL_RX(peer),
- PEER_TOTAL_TX(peer), peer->version[afi][safi],
- 0, peer->obuf->count,
- peer_uptime(peer->uptime, timebuf,
- BGP_UPTIME_LEN, 0, NULL));
+ if (show_failed &&
+ bgp_has_peer_failed(peer, afi, safi)) {
+ bgp_show_failed_summary(vty, bgp, peer, NULL,
+ max_neighbor_width,
+ use_json);
+ } else if (!show_failed) {
+ memset(dn_flag, '\0', sizeof(dn_flag));
+ if (peer_dynamic_neighbor(peer)) {
+ dn_flag[0] = '*';
+ }
- if (peer->status == Established)
- if (peer->afc_recv[afi][safi])
- vty_out(vty, " %12ld",
- peer->pcount[afi]
- [pfx_rcd_safi]);
- else
- vty_out(vty, " NoNeg");
- else {
- if (CHECK_FLAG(peer->flags, PEER_FLAG_SHUTDOWN))
- vty_out(vty, " Idle (Admin)");
- else if (CHECK_FLAG(
- peer->sflags,
- PEER_STATUS_PREFIX_OVERFLOW))
- vty_out(vty, " Idle (PfxCt)");
+ if (peer->hostname
+ && bgp_flag_check(bgp, BGP_FLAG_SHOW_HOSTNAME))
+ len = vty_out(vty, "%s%s(%s)", dn_flag,
+ peer->hostname, peer->host);
else
- vty_out(vty, " %12s",
- lookup_msg(bgp_status_msg,
- peer->status, NULL));
+ len = vty_out(vty, "%s%s", dn_flag, peer->host);
+
+ /* pad the neighbor column with spaces */
+ if (len < max_neighbor_width)
+ vty_out(vty, "%*s", max_neighbor_width - len,
+ " ");
+
+ vty_out(vty, "4 %10u %7u %7u %8" PRIu64 " %4d %4zd %8s",
+ peer->as, PEER_TOTAL_RX(peer),
+ PEER_TOTAL_TX(peer), peer->version[afi][safi],
+ 0, peer->obuf->count,
+ peer_uptime(peer->uptime, timebuf,
+ BGP_UPTIME_LEN, 0, NULL));
+
+ if (peer->status == Established)
+ if (peer->afc_recv[afi][safi])
+ vty_out(vty, " %12ld",
+ peer->pcount[afi]
+ [pfx_rcd_safi]);
+ else
+ vty_out(vty, " NoNeg");
+ else {
+ if (CHECK_FLAG(peer->flags, PEER_FLAG_SHUTDOWN))
+ vty_out(vty, " Idle (Admin)");
+ else if (CHECK_FLAG(
+ peer->sflags,
+ PEER_STATUS_PREFIX_OVERFLOW))
+ vty_out(vty, " Idle (PfxCt)");
+ else
+ vty_out(vty, " %12s",
+ lookup_msg(bgp_status_msg,
+ peer->status, NULL));
+ }
+ vty_out(vty, "\n");
}
- vty_out(vty, "\n");
+
}
}
if (use_json) {
json_object_object_add(json, "peers", json_peers);
-
+ json_object_int_add(json, "failedPeers", failed_count);
json_object_int_add(json, "totalPeers", count);
json_object_int_add(json, "dynamicPeers", dn_count);
- bgp_show_bestpath_json(bgp, json);
+ if (!show_failed)
+ bgp_show_bestpath_json(bgp, json);
vty_out(vty, "%s\n", json_object_to_json_string_ext(
json, JSON_C_TO_STRING_PRETTY));
@@ -8213,7 +8487,7 @@ static int bgp_show_summary(struct vty *vty, struct bgp *bgp, int afi, int safi,
vty_out(vty, "\nTotal number of neighbors %d\n", count);
else {
vty_out(vty, "No %s neighbor is configured\n",
- afi_safi_print(afi, safi));
+ get_afi_safi_str(afi, safi, false));
}
if (dn_count) {
@@ -8227,7 +8501,7 @@ static int bgp_show_summary(struct vty *vty, struct bgp *bgp, int afi, int safi,
}
static void bgp_show_summary_afi_safi(struct vty *vty, struct bgp *bgp, int afi,
- int safi, bool use_json)
+ int safi, bool show_failed, bool use_json)
{
int is_first = 1;
int afi_wildcard = (afi == AFI_MAX);
@@ -8260,15 +8534,18 @@ static void bgp_show_summary_afi_safi(struct vty *vty, struct bgp *bgp, int afi,
is_first = 0;
vty_out(vty, "\"%s\":",
- afi_safi_json(afi,
- safi));
+ get_afi_safi_str(afi,
+ safi,
+ true));
} else {
vty_out(vty, "\n%s Summary:\n",
- afi_safi_print(afi,
- safi));
+ get_afi_safi_str(afi,
+ safi,
+ false));
}
}
- bgp_show_summary(vty, bgp, afi, safi, use_json);
+ bgp_show_summary(vty, bgp, afi, safi, show_failed,
+ use_json);
}
safi++;
if (!safi_wildcard)
@@ -8290,7 +8567,8 @@ static void bgp_show_summary_afi_safi(struct vty *vty, struct bgp *bgp, int afi,
}
static void bgp_show_all_instances_summary_vty(struct vty *vty, afi_t afi,
- safi_t safi, bool use_json)
+ safi_t safi, bool show_failed,
+ bool use_json)
{
struct listnode *node, *nnode;
struct bgp *bgp;
@@ -8318,7 +8596,8 @@ static void bgp_show_all_instances_summary_vty(struct vty *vty, afi_t afi,
? VRF_DEFAULT_NAME
: bgp->name);
}
- bgp_show_summary_afi_safi(vty, bgp, afi, safi, use_json);
+ bgp_show_summary_afi_safi(vty, bgp, afi, safi, show_failed,
+ use_json);
}
if (use_json)
@@ -8328,13 +8607,14 @@ static void bgp_show_all_instances_summary_vty(struct vty *vty, afi_t afi,
}
int bgp_show_summary_vty(struct vty *vty, const char *name, afi_t afi,
- safi_t safi, bool use_json)
+ safi_t safi, bool show_failed, bool use_json)
{
struct bgp *bgp;
if (name) {
if (strmatch(name, "all")) {
bgp_show_all_instances_summary_vty(vty, afi, safi,
+ show_failed,
use_json);
return CMD_SUCCESS;
} else {
@@ -8350,7 +8630,7 @@ int bgp_show_summary_vty(struct vty *vty, const char *name, afi_t afi,
}
bgp_show_summary_afi_safi(vty, bgp, afi, safi,
- use_json);
+ show_failed, use_json);
return CMD_SUCCESS;
}
}
@@ -8358,7 +8638,8 @@ int bgp_show_summary_vty(struct vty *vty, const char *name, afi_t afi,
bgp = bgp_get_default();
if (bgp)
- bgp_show_summary_afi_safi(vty, bgp, afi, safi, use_json);
+ bgp_show_summary_afi_safi(vty, bgp, afi, safi, show_failed,
+ use_json);
else {
if (use_json)
vty_out(vty, "{}\n");
@@ -8373,7 +8654,7 @@ int bgp_show_summary_vty(struct vty *vty, const char *name, afi_t afi,
/* `show [ip] bgp summary' commands. */
DEFUN (show_ip_bgp_summary,
show_ip_bgp_summary_cmd,
- "show [ip] bgp [<view|vrf> VIEWVRFNAME] ["BGP_AFI_CMD_STR" ["BGP_SAFI_WITH_LABEL_CMD_STR"]] summary [json]",
+ "show [ip] bgp [<view|vrf> VIEWVRFNAME] ["BGP_AFI_CMD_STR" ["BGP_SAFI_WITH_LABEL_CMD_STR"]] summary [failed] [json]",
SHOW_STR
IP_STR
BGP_STR
@@ -8381,11 +8662,13 @@ DEFUN (show_ip_bgp_summary,
BGP_AFI_HELP_STR
BGP_SAFI_WITH_LABEL_HELP_STR
"Summary of BGP neighbor status\n"
+ "Show only sessions not in Established state\n"
JSON_STR)
{
char *vrf = NULL;
afi_t afi = AFI_MAX;
safi_t safi = SAFI_MAX;
+ bool show_failed = false;
int idx = 0;
@@ -8405,79 +8688,20 @@ DEFUN (show_ip_bgp_summary,
argv_find_and_parse_safi(argv, argc, &idx, &safi);
}
+ if (argv_find(argv, argc, "failed", &idx))
+ show_failed = true;
+
bool uj = use_json(argc, argv);
- return bgp_show_summary_vty(vty, vrf, afi, safi, uj);
+ return bgp_show_summary_vty(vty, vrf, afi, safi, show_failed, uj);
}
-const char *afi_safi_print(afi_t afi, safi_t safi)
+const char *get_afi_safi_str(afi_t afi, safi_t safi, bool for_json)
{
- if (afi == AFI_IP && safi == SAFI_UNICAST)
- return "IPv4 Unicast";
- else if (afi == AFI_IP && safi == SAFI_MULTICAST)
- return "IPv4 Multicast";
- else if (afi == AFI_IP && safi == SAFI_LABELED_UNICAST)
- return "IPv4 Labeled Unicast";
- else if (afi == AFI_IP && safi == SAFI_MPLS_VPN)
- return "IPv4 VPN";
- else if (afi == AFI_IP && safi == SAFI_ENCAP)
- return "IPv4 Encap";
- else if (afi == AFI_IP && safi == SAFI_FLOWSPEC)
- return "IPv4 Flowspec";
- else if (afi == AFI_IP6 && safi == SAFI_UNICAST)
- return "IPv6 Unicast";
- else if (afi == AFI_IP6 && safi == SAFI_MULTICAST)
- return "IPv6 Multicast";
- else if (afi == AFI_IP6 && safi == SAFI_LABELED_UNICAST)
- return "IPv6 Labeled Unicast";
- else if (afi == AFI_IP6 && safi == SAFI_MPLS_VPN)
- return "IPv6 VPN";
- else if (afi == AFI_IP6 && safi == SAFI_ENCAP)
- return "IPv6 Encap";
- else if (afi == AFI_IP6 && safi == SAFI_FLOWSPEC)
- return "IPv6 Flowspec";
- else if (afi == AFI_L2VPN && safi == SAFI_EVPN)
- return "L2VPN EVPN";
- else
- return "Unknown";
-}
-
-/*
- * Please note that we have intentionally camelCased
- * the return strings here. So if you want
- * to use this function, please ensure you
- * are doing this within json output
- */
-const char *afi_safi_json(afi_t afi, safi_t safi)
-{
- if (afi == AFI_IP && safi == SAFI_UNICAST)
- return "ipv4Unicast";
- else if (afi == AFI_IP && safi == SAFI_MULTICAST)
- return "ipv4Multicast";
- else if (afi == AFI_IP && safi == SAFI_LABELED_UNICAST)
- return "ipv4LabeledUnicast";
- else if (afi == AFI_IP && safi == SAFI_MPLS_VPN)
- return "ipv4Vpn";
- else if (afi == AFI_IP && safi == SAFI_ENCAP)
- return "ipv4Encap";
- else if (afi == AFI_IP && safi == SAFI_FLOWSPEC)
- return "ipv4Flowspec";
- else if (afi == AFI_IP6 && safi == SAFI_UNICAST)
- return "ipv6Unicast";
- else if (afi == AFI_IP6 && safi == SAFI_MULTICAST)
- return "ipv6Multicast";
- else if (afi == AFI_IP6 && safi == SAFI_LABELED_UNICAST)
- return "ipv6LabeledUnicast";
- else if (afi == AFI_IP6 && safi == SAFI_MPLS_VPN)
- return "ipv6Vpn";
- else if (afi == AFI_IP6 && safi == SAFI_ENCAP)
- return "ipv6Encap";
- else if (afi == AFI_IP6 && safi == SAFI_FLOWSPEC)
- return "ipv6Flowspec";
- else if (afi == AFI_L2VPN && safi == SAFI_EVPN)
- return "l2VpnEvpn";
+ if (for_json)
+ return get_afi_safi_json_str(afi, safi);
else
- return "Unknown";
+ return get_afi_safi_vty_str(afi, safi);
}
/* Show BGP peer's information. */
@@ -8855,14 +9079,14 @@ static void bgp_show_peer_afi(struct vty *vty, struct peer *p, afi_t afi,
"prefixAllowedRestartIntervalMsecs",
p->pmax_restart[afi][safi] * 60000);
}
- json_object_object_add(json_neigh, afi_safi_print(afi, safi),
+ json_object_object_add(json_neigh, get_afi_safi_str(afi, safi, true),
json_addr);
} else {
filter = &p->filter[afi][safi];
vty_out(vty, " For address family: %s\n",
- afi_safi_print(afi, safi));
+ get_afi_safi_str(afi, safi, false));
if (peer_group_active(p))
vty_out(vty, " %s peer-group member\n",
@@ -9150,8 +9374,6 @@ static void bgp_show_peer(struct vty *vty, struct peer *p, bool use_json,
char buf1[PREFIX2STR_BUFFER], buf[SU_ADDRSTRLEN];
char timebuf[BGP_UPTIME_LEN];
char dn_flag[2];
- const char *subcode_str;
- const char *code_str;
afi_t afi;
safi_t safi;
uint16_t i;
@@ -9569,8 +9791,8 @@ static void bgp_show_peer(struct vty *vty, struct peer *p, bool use_json,
json_object *json_sub = NULL;
json_sub =
json_object_new_object();
- print_store = afi_safi_print(
- afi, safi);
+ print_store = get_afi_safi_str(
+ afi, safi, true);
if (CHECK_FLAG(
p->af_cap[afi]
@@ -9748,9 +9970,9 @@ static void bgp_show_peer(struct vty *vty, struct peer *p, bool use_json,
[AFI_IP]
[safi],
PEER_CAP_ENHE_AF_RCV)) {
- print_store = afi_safi_print(
+ print_store = get_afi_safi_str(
AFI_IP,
- safi);
+ safi, true);
json_object_string_add(
json_nxt,
print_store,
@@ -9850,8 +10072,9 @@ static void bgp_show_peer(struct vty *vty, struct peer *p, bool use_json,
json_object_object_add(
json_multi,
- afi_safi_print(afi,
- safi),
+ get_afi_safi_str(afi,
+ safi,
+ true),
json_exten);
}
}
@@ -9957,9 +10180,10 @@ static void bgp_show_peer(struct vty *vty, struct peer *p, bool use_json,
restart_af_count++;
json_object_object_add(
json_restart,
- afi_safi_print(
+ get_afi_safi_str(
afi,
- safi),
+ safi,
+ true),
json_sub);
}
}
@@ -10018,9 +10242,10 @@ static void bgp_show_peer(struct vty *vty, struct peer *p, bool use_json,
PEER_CAP_ADDPATH_AF_TX_RCV)) {
vty_out(vty,
" %s: TX ",
- afi_safi_print(
+ get_afi_safi_str(
afi,
- safi));
+ safi,
+ false));
if (CHECK_FLAG(
p->af_cap
@@ -10029,9 +10254,10 @@ static void bgp_show_peer(struct vty *vty, struct peer *p, bool use_json,
PEER_CAP_ADDPATH_AF_TX_ADV))
vty_out(vty,
"advertised %s",
- afi_safi_print(
+ get_afi_safi_str(
afi,
- safi));
+ safi,
+ false));
if (CHECK_FLAG(
p->af_cap
@@ -10061,9 +10287,10 @@ static void bgp_show_peer(struct vty *vty, struct peer *p, bool use_json,
PEER_CAP_ADDPATH_AF_RX_RCV)) {
vty_out(vty,
" %s: RX ",
- afi_safi_print(
+ get_afi_safi_str(
afi,
- safi));
+ safi,
+ false));
if (CHECK_FLAG(
p->af_cap
@@ -10072,9 +10299,10 @@ static void bgp_show_peer(struct vty *vty, struct peer *p, bool use_json,
PEER_CAP_ADDPATH_AF_RX_ADV))
vty_out(vty,
"advertised %s",
- afi_safi_print(
+ get_afi_safi_str(
afi,
- safi));
+ safi,
+ false));
if (CHECK_FLAG(
p->af_cap
@@ -10145,9 +10373,10 @@ static void bgp_show_peer(struct vty *vty, struct peer *p, bool use_json,
PEER_CAP_ENHE_AF_RCV))
vty_out(vty,
" %s\n",
- afi_safi_print(
+ get_afi_safi_str(
AFI_IP,
- safi));
+ safi,
+ false));
}
}
@@ -10194,8 +10423,10 @@ static void bgp_show_peer(struct vty *vty, struct peer *p, bool use_json,
|| p->afc_recv[afi][safi]) {
vty_out(vty,
" Address Family %s:",
- afi_safi_print(afi,
- safi));
+ get_afi_safi_str(
+ afi,
+ safi,
+ false));
if (p->afc_adv[afi][safi])
vty_out(vty,
" advertised");
@@ -10280,9 +10511,10 @@ static void bgp_show_peer(struct vty *vty, struct peer *p, bool use_json,
restart_af_count
? ", "
: "",
- afi_safi_print(
+ get_afi_safi_str(
afi,
- safi),
+ safi,
+ false),
CHECK_FLAG(
p->af_cap
[afi]
@@ -10321,8 +10553,9 @@ static void bgp_show_peer(struct vty *vty, struct peer *p, bool use_json,
PEER_STATUS_EOR_SEND)) {
json_object_boolean_true_add(
json_grace_send,
- afi_safi_print(afi,
- safi));
+ get_afi_safi_str(afi,
+ safi,
+ true));
eor_send_af_count++;
}
}
@@ -10332,8 +10565,9 @@ static void bgp_show_peer(struct vty *vty, struct peer *p, bool use_json,
PEER_STATUS_EOR_RECEIVED)) {
json_object_boolean_true_add(
json_grace_recv,
- afi_safi_print(afi,
- safi));
+ get_afi_safi_str(afi,
+ safi,
+ true));
eor_receive_af_count++;
}
}
@@ -10371,8 +10605,9 @@ static void bgp_show_peer(struct vty *vty, struct peer *p, bool use_json,
vty_out(vty, "%s%s",
eor_send_af_count ? ", "
: "",
- afi_safi_print(afi,
- safi));
+ get_afi_safi_str(afi,
+ safi,
+ false));
eor_send_af_count++;
}
}
@@ -10386,8 +10621,9 @@ static void bgp_show_peer(struct vty *vty, struct peer *p, bool use_json,
eor_receive_af_count
? ", "
: "",
- afi_safi_print(afi,
- safi));
+ get_afi_safi_str(afi,
+ safi,
+ false));
eor_receive_af_count++;
}
}
@@ -10573,88 +10809,13 @@ static void bgp_show_peer(struct vty *vty, struct peer *p, bool use_json,
(tm->tm_sec * 1000)
+ (tm->tm_min * 60000)
+ (tm->tm_hour * 3600000));
- json_object_string_add(
- json_neigh, "lastResetDueTo",
- peer_down_str[(int)p->last_reset]);
- if (p->last_reset == PEER_DOWN_NOTIFY_SEND
- || p->last_reset == PEER_DOWN_NOTIFY_RECEIVED) {
- char errorcodesubcode_hexstr[5];
- char errorcodesubcode_str[256];
-
- code_str = bgp_notify_code_str(p->notify.code);
- subcode_str = bgp_notify_subcode_str(
- p->notify.code, p->notify.subcode);
-
- sprintf(errorcodesubcode_hexstr, "%02X%02X",
- p->notify.code, p->notify.subcode);
- json_object_string_add(json_neigh,
- "lastErrorCodeSubcode",
- errorcodesubcode_hexstr);
- snprintf(errorcodesubcode_str, 255, "%s%s",
- code_str, subcode_str);
- json_object_string_add(json_neigh,
- "lastNotificationReason",
- errorcodesubcode_str);
- if (p->last_reset == PEER_DOWN_NOTIFY_RECEIVED
- && p->notify.code == BGP_NOTIFY_CEASE
- && (p->notify.subcode
- == BGP_NOTIFY_CEASE_ADMIN_SHUTDOWN
- || p->notify.subcode
- == BGP_NOTIFY_CEASE_ADMIN_RESET)
- && p->notify.length) {
- char msgbuf[1024];
- const char *msg_str;
-
- msg_str = bgp_notify_admin_message(
- msgbuf, sizeof(msgbuf),
- (uint8_t *)p->notify.data,
- p->notify.length);
- if (msg_str)
- json_object_string_add(
- json_neigh,
- "lastShutdownDescription",
- msg_str);
- }
- }
+ bgp_show_peer_reset(NULL, p, json_neigh, true);
} else {
vty_out(vty, " Last reset %s, ",
peer_uptime(p->resettime, timebuf,
BGP_UPTIME_LEN, 0, NULL));
- if (p->last_reset == PEER_DOWN_NOTIFY_SEND
- || p->last_reset == PEER_DOWN_NOTIFY_RECEIVED) {
- code_str = bgp_notify_code_str(p->notify.code);
- subcode_str = bgp_notify_subcode_str(
- p->notify.code, p->notify.subcode);
- vty_out(vty, "due to NOTIFICATION %s (%s%s)\n",
- p->last_reset == PEER_DOWN_NOTIFY_SEND
- ? "sent"
- : "received",
- code_str, subcode_str);
- if (p->last_reset == PEER_DOWN_NOTIFY_RECEIVED
- && p->notify.code == BGP_NOTIFY_CEASE
- && (p->notify.subcode
- == BGP_NOTIFY_CEASE_ADMIN_SHUTDOWN
- || p->notify.subcode
- == BGP_NOTIFY_CEASE_ADMIN_RESET)
- && p->notify.length) {
- char msgbuf[1024];
- const char *msg_str;
-
- msg_str = bgp_notify_admin_message(
- msgbuf, sizeof(msgbuf),
- (uint8_t *)p->notify.data,
- p->notify.length);
- if (msg_str)
- vty_out(vty,
- " Message: \"%s\"\n",
- msg_str);
- }
- } else {
- vty_out(vty, "due to %s\n",
- peer_down_str[(int)p->last_reset]);
- }
-
+ bgp_show_peer_reset(vty, p, NULL, false);
if (p->last_reset_cause_size) {
msg = p->last_reset_cause;
vty_out(vty,
@@ -11319,7 +11480,7 @@ static int bgp_show_route_leak_vty(struct vty *vty, const char *name,
/* Provide context for the block */
json_object_string_add(json, "vrf", name ? name : "default");
json_object_string_add(json, "afiSafi",
- afi_safi_print(afi, safi));
+ get_afi_safi_str(afi, safi, true));
if (!CHECK_FLAG(bgp->af_flags[afi][safi],
BGP_CONFIG_VRF_TO_VRF_IMPORT)) {
@@ -11400,11 +11561,11 @@ static int bgp_show_route_leak_vty(struct vty *vty, const char *name,
BGP_CONFIG_VRF_TO_VRF_IMPORT))
vty_out(vty,
"This VRF is not importing %s routes from any other VRF\n",
- afi_safi_print(afi, safi));
+ get_afi_safi_str(afi, safi, false));
else {
vty_out(vty,
"This VRF is importing %s routes from the following VRFs:\n",
- afi_safi_print(afi, safi));
+ get_afi_safi_str(afi, safi, false));
for (ALL_LIST_ELEMENTS_RO(
bgp->vpn_policy[afi].import_vrf,
@@ -11428,11 +11589,11 @@ static int bgp_show_route_leak_vty(struct vty *vty, const char *name,
BGP_CONFIG_VRF_TO_VRF_EXPORT))
vty_out(vty,
"This VRF is not exporting %s routes to any other VRF\n",
- afi_safi_print(afi, safi));
+ get_afi_safi_str(afi, safi, false));
else {
vty_out(vty,
"This VRF is exporting %s routes to the following VRFs:\n",
- afi_safi_print(afi, safi));
+ get_afi_safi_str(afi, safi, false));
for (ALL_LIST_ELEMENTS_RO(
bgp->vpn_policy[afi].export_vrf,
@@ -11809,7 +11970,7 @@ static int bgp_show_one_peer_group(struct vty *vty, struct peer_group *group)
FOREACH_AFI_SAFI (afi, safi) {
if (conf->afc[afi][safi]) {
af_cfgd = 1;
- vty_out(vty, " %s;", afi_safi_print(afi, safi));
+ vty_out(vty, " %s;", get_afi_safi_str(afi, safi, false));
}
}
if (!af_cfgd)
diff --git a/bgpd/bgp_vty.h b/bgpd/bgp_vty.h
index d9df2b4cfe..27b5ea47b2 100644
--- a/bgpd/bgp_vty.h
+++ b/bgpd/bgp_vty.h
@@ -45,8 +45,7 @@ struct bgp;
"Address Family modifier\n"
extern void bgp_vty_init(void);
-extern const char *afi_safi_print(afi_t afi, safi_t safi);
-extern const char *afi_safi_json(afi_t afi, safi_t safi);
+extern const char *get_afi_safi_str(afi_t afi, safi_t safi, bool for_json);
extern void bgp_config_write_update_delay(struct vty *vty, struct bgp *bgp);
extern void bgp_config_write_wpkt_quanta(struct vty *vty, struct bgp *bgp);
extern void bgp_config_write_rpkt_quanta(struct vty *vty, struct bgp *bgp);
@@ -72,7 +71,7 @@ extern int bgp_vty_find_and_parse_afi_safi_bgp(struct vty *vty,
safi_t *safi, struct bgp **bgp,
bool use_json);
extern int bgp_show_summary_vty(struct vty *vty, const char *name, afi_t afi,
- safi_t safi, bool use_json);
+ safi_t safi, bool show_failed, bool use_json);
extern void bgp_vpn_policy_config_write_afi(struct vty *vty, struct bgp *bgp,
afi_t afi);
#endif /* _QUAGGA_BGP_VTY_H */
diff --git a/bgpd/bgpd.c b/bgpd/bgpd.c
index b5f267cc38..5b31fbb3a8 100644
--- a/bgpd/bgpd.c
+++ b/bgpd/bgpd.c
@@ -1582,6 +1582,12 @@ struct peer *peer_create(union sockunion *su, const char *conf_if,
}
active = peer_active(peer);
+ if (!active) {
+ if (peer->su.sa.sa_family == AF_UNSPEC)
+ peer->last_reset = PEER_DOWN_NBR_ADDR;
+ else
+ peer->last_reset = PEER_DOWN_NOAFI_ACTIVATED;
+ }
/* Last read and reset time set */
peer->readtime = peer->resettime = bgp_clock();
@@ -2221,10 +2227,12 @@ int peer_delete(struct peer *peer)
bgp = peer->bgp;
accept_peer = CHECK_FLAG(peer->sflags, PEER_STATUS_ACCEPT_PEER);
+ bgp_keepalives_off(peer);
bgp_reads_off(peer);
bgp_writes_off(peer);
assert(!CHECK_FLAG(peer->thread_flags, PEER_THREAD_WRITES_ON));
assert(!CHECK_FLAG(peer->thread_flags, PEER_THREAD_READS_ON));
+ assert(!CHECK_FLAG(peer->thread_flags, PEER_THREAD_KEEPALIVES_ON));
if (CHECK_FLAG(peer->sflags, PEER_STATUS_NSF_WAIT))
peer_nsf_stop(peer);
diff --git a/bgpd/bgpd.h b/bgpd/bgpd.h
index 9e05fd5629..477c036009 100644
--- a/bgpd/bgpd.h
+++ b/bgpd/bgpd.h
@@ -1194,6 +1194,10 @@ struct peer {
#define PEER_DOWN_BFD_DOWN 24 /* BFD down */
#define PEER_DOWN_IF_DOWN 25 /* Interface down */
#define PEER_DOWN_NBR_ADDR_DEL 26 /* Peer address lost */
+#define PEER_DOWN_WAITING_NHT 27 /* Waiting for NHT to resolve */
+#define PEER_DOWN_NBR_ADDR 28 /* Waiting for peer IPv6 IP Addr */
+#define PEER_DOWN_VRF_UNINIT 29 /* Associated VRF is not init yet */
+#define PEER_DOWN_NOAFI_ACTIVATED 30 /* No AFI/SAFI activated for peer */
size_t last_reset_cause_size;
uint8_t last_reset_cause[BGP_MAX_PACKET_SIZE];
diff --git a/bgpd/rfapi/rfapi.c b/bgpd/rfapi/rfapi.c
index e7905e5622..9b8f64ee67 100644
--- a/bgpd/rfapi/rfapi.c
+++ b/bgpd/rfapi/rfapi.c
@@ -1282,8 +1282,7 @@ static int rfapi_open_inner(struct rfapi_descriptor *rfd, struct bgp *bgp,
* since this peer is not on the I/O thread, this lock is not strictly
* necessary, but serves as a reminder to those who may meddle...
*/
- pthread_mutex_lock(&rfd->peer->io_mtx);
- {
+ frr_with_mutex(&rfd->peer->io_mtx) {
// we don't need any I/O related facilities
if (rfd->peer->ibuf)
stream_fifo_free(rfd->peer->ibuf);
@@ -1300,7 +1299,6 @@ static int rfapi_open_inner(struct rfapi_descriptor *rfd, struct bgp *bgp,
rfd->peer->obuf_work = NULL;
rfd->peer->ibuf_work = NULL;
}
- pthread_mutex_unlock(&rfd->peer->io_mtx);
{ /* base code assumes have valid host pointer */
char buf[BUFSIZ];
diff --git a/bgpd/rfapi/vnc_zebra.c b/bgpd/rfapi/vnc_zebra.c
index 481500dfb4..80a590f56a 100644
--- a/bgpd/rfapi/vnc_zebra.c
+++ b/bgpd/rfapi/vnc_zebra.c
@@ -191,8 +191,7 @@ static void vnc_redistribute_add(struct prefix *p, uint32_t metric,
* is not strictly necessary, but serves as a reminder
* to those who may meddle...
*/
- pthread_mutex_lock(&vncHD1VR.peer->io_mtx);
- {
+ frr_with_mutex(&vncHD1VR.peer->io_mtx) {
// we don't need any I/O related facilities
if (vncHD1VR.peer->ibuf)
stream_fifo_free(vncHD1VR.peer->ibuf);
@@ -209,7 +208,6 @@ static void vnc_redistribute_add(struct prefix *p, uint32_t metric,
vncHD1VR.peer->obuf_work = NULL;
vncHD1VR.peer->ibuf_work = NULL;
}
- pthread_mutex_unlock(&vncHD1VR.peer->io_mtx);
/* base code assumes have valid host pointer */
vncHD1VR.peer->host =
diff --git a/bgpd/subdir.am b/bgpd/subdir.am
index d281fe4e59..b338fd4f3d 100644
--- a/bgpd/subdir.am
+++ b/bgpd/subdir.am
@@ -27,6 +27,7 @@ vtysh_scan += \
# can be loaded as DSO - always include for vtysh
vtysh_scan += $(top_srcdir)/bgpd/bgp_rpki.c
+vtysh_scan += $(top_srcdir)/bgpd/bgp_bmp.c
if ENABLE_BGP_VNC
vtysh_scan += \
@@ -42,6 +43,9 @@ endif
if RPKI
module_LTLIBRARIES += bgpd/bgpd_rpki.la
endif
+if BGP_BMP
+module_LTLIBRARIES += bgpd/bgpd_bmp.la
+endif
man8 += $(MANBUILD)/bgpd.8
endif
@@ -129,6 +133,7 @@ noinst_HEADERS += \
bgpd/bgp_damp.h \
bgpd/bgp_debug.h \
bgpd/bgp_dump.h \
+ bgpd/bgp_bmp.h \
bgpd/bgp_ecommunity.h \
bgpd/bgp_encap_tlv.h \
bgpd/bgp_encap_types.h \
@@ -216,6 +221,10 @@ bgpd_bgpd_rpki_la_CFLAGS = $(WERROR) $(RTRLIB_CFLAGS)
bgpd_bgpd_rpki_la_LDFLAGS = -avoid-version -module -shared -export-dynamic
bgpd_bgpd_rpki_la_LIBADD = $(RTRLIB_LIBS)
+bgpd_bgpd_bmp_la_SOURCES = bgpd/bgp_bmp.c
+bgpd_bgpd_bmp_la_LIBADD = lib/libfrrcares.la
+bgpd_bgpd_bmp_la_LDFLAGS = -avoid-version -module -shared -export-dynamic
+
bgpd/bgp_evpn_vty_clippy.c: $(CLIPPY_DEPS)
bgpd/bgp_evpn_vty.$(OBJEXT): bgpd/bgp_evpn_vty_clippy.c
bgpd/bgp_vty_clippy.c: $(CLIPPY_DEPS)
@@ -229,3 +238,5 @@ bgpd/bgp_routemap.$(OBJEXT): bgpd/bgp_routemap_clippy.c
bgpd/bgp_rpki_clippy.c: $(CLIPPY_DEPS)
$(AUTOMAKE_DUMMY)bgpd/bgpd_bgpd_rpki_la-bgp_rpki.lo: bgpd/bgp_rpki_clippy.c
$(AUTOMAKE_DUMMY)bgpd/bgpd_rpki_la-bgp_rpki.lo: bgpd/bgp_rpki_clippy.c
+bgpd/bgp_bmp_clippy.c: $(CLIPPY_DEPS)
+bgpd/bgp_bmp.lo: bgpd/bgp_bmp_clippy.c
diff --git a/configure.ac b/configure.ac
index 134c8692d4..6c1b35b5f2 100755
--- a/configure.ac
+++ b/configure.ac
@@ -479,12 +479,14 @@ AC_ARG_ENABLE([staticd],
AS_HELP_STRING([--disable-staticd], [do not build staticd]))
AC_ARG_ENABLE([fabricd],
AS_HELP_STRING([--disable-fabricd], [do not build fabricd]))
-AC_ARG_ENABLE([bgp-announce],
- AS_HELP_STRING([--disable-bgp-announce,], [turn off BGP route announcement]))
AC_ARG_ENABLE([vrrpd],
AS_HELP_STRING([--disable-vrrpd], [do not build vrrpd]))
+AC_ARG_ENABLE([bgp-announce],
+ AS_HELP_STRING([--disable-bgp-announce,], [turn off BGP route announcement]))
AC_ARG_ENABLE([bgp-vnc],
AS_HELP_STRING([--disable-bgp-vnc],[turn off BGP VNC support]))
+AC_ARG_ENABLE([bgp-bmp],
+ AS_HELP_STRING([--disable-bgp-bmp],[turn off BGP BMP support]))
AC_ARG_ENABLE([snmp],
AS_HELP_STRING([--enable-snmp], [enable SNMP support for agentx]))
AC_ARG_ENABLE([config_rollbacks],
@@ -1061,6 +1063,7 @@ case "$host_os" in
AC_CHECK_LIB([nsl], [main])
AC_CHECK_LIB([umem], [main])
SOLARIS="solaris"
+ AC_MSG_WARN([--Solaris support is being considered for deprecation, please let us know if you are still using this--])
;;
linux*)
AC_MSG_RESULT([Linux])
@@ -1449,6 +1452,16 @@ if test "x$enable_pcreposix" = "xyes"; then
fi
AC_SUBST([HAVE_LIBPCREPOSIX])
+dnl ------------------
+dnl check C-Ares library
+dnl ------------------
+PKG_CHECK_MODULES([CARES], [libcares], [
+ c_ares_found=true
+],[
+ c_ares_found=false
+])
+AM_CONDITIONAL([CARES], [$c_ares_found])
+
dnl ##########################################################################
dnl test "${enable_clippy_only}" != "yes"
fi
@@ -1518,9 +1531,21 @@ fi
NHRPD=""
case "$host_os" in
linux*)
- if test "${enable_nhrpd}" != "no"; then
- NHRPD="nhrpd"
- fi
+ case "${enable_nhrpd}" in
+ no)
+ ;;
+ yes)
+ if test "$c_ares_found" != "true" ; then
+ AC_MSG_ERROR([nhrpd requires libcares. Please install c-ares and its -dev headers.])
+ fi
+ NHRPD="nhrpd"
+ ;;
+ *)
+ if test "$c_ares_found" = "true" ; then
+ NHRPD="nhrpd"
+ fi
+ ;;
+ esac
;;
*)
if test "${enable_nhrpd}" = "yes"; then
@@ -1554,22 +1579,29 @@ if test "${enable_bgp_vnc}" != "no";then
AC_DEFINE([ENABLE_BGP_VNC], [1], [Enable BGP VNC support])
fi
+bgpd_bmp=false
+case "${enable_bmp}" in
+ no)
+ ;;
+ yes)
+ if test "$c_ares_found" != "true" ; then
+ AC_MSG_ERROR([BMP support requires libcares. Please install c-ares and its -dev headers.])
+ fi
+ bgpd_bmp=true
+ ;;
+ *)
+ if test "$c_ares_found" = "true" ; then
+ bgpd_bmp=true
+ fi
+ ;;
+esac
+
dnl ##########################################################################
dnl LARGE if block
if test "${enable_clippy_only}" != "yes"; then
dnl ##########################################################################
dnl ------------------
-dnl check C-Ares library
-dnl ------------------
-if test "${NHRPD}" != ""; then
- PKG_CHECK_MODULES([CARES], [libcares], ,[
- AC_MSG_ERROR([trying to build nhrpd, but libcares not found. install c-ares and its -dev headers.])
- ])
-fi
-AM_CONDITIONAL([CARES], [test "${NHRPD}" != ""])
-
-dnl ------------------
dnl check Net-SNMP library
dnl ------------------
if test "${enable_snmp}" != "" -a "${enable_snmp}" != "no"; then
@@ -2006,6 +2038,20 @@ if test "${enable_capabilities}" != "no"; then
-o x"${frr_ac_lcaps}" = x"yes"; then
AC_DEFINE([HAVE_CAPABILITIES], [1], [capabilities])
fi
+
+ case "$host_os" in
+ linux*)
+ if test "$frr_ac_lcaps" != "yes"; then
+ AC_MSG_ERROR([libcap and/or its headers were not found. Running FRR without libcap support built in causes a huge performance penalty.])
+ fi
+ ;;
+ esac
+else
+ case "$host_os" in
+ linux*)
+ AC_MSG_WARN([Running FRR without libcap support built in causes a huge performance penalty.])
+ ;;
+ esac
fi
AC_SUBST([LIBCAP])
@@ -2192,6 +2238,7 @@ AC_DEFINE_UNQUOTED([WATCHFRR_SH_PATH], ["${CFG_SBIN%/}/watchfrr.sh"], [path to w
dnl various features
AM_CONDITIONAL([SUPPORT_REALMS], [test "${enable_realms}" = "yes"])
AM_CONDITIONAL([ENABLE_BGP_VNC], [test x${enable_bgp_vnc} != xno])
+AM_CONDITIONAL([BGP_BMP], [$bgpd_bmp])
dnl northbound
AM_CONDITIONAL([SQLITE3], [$SQLITE3])
AM_CONDITIONAL([CONFD], [test "x$enable_confd" != "x"])
diff --git a/debian/frr.install b/debian/frr.install
index fe34b23d02..09bddf0fc6 100644
--- a/debian/frr.install
+++ b/debian/frr.install
@@ -10,6 +10,7 @@ usr/lib/frr/watchfrr
usr/lib/frr/zebra
usr/lib/*/frr/modules/zebra_irdp.so
usr/lib/*/frr/modules/zebra_fpm.so
+usr/lib/*/frr/modules/bgpd_bmp.so
usr/share/doc/frr/examples
usr/share/man/
usr/share/yang/
diff --git a/doc/developer/library.rst b/doc/developer/library.rst
index 7cd493ccc4..a904a4e778 100644
--- a/doc/developer/library.rst
+++ b/doc/developer/library.rst
@@ -11,6 +11,7 @@ Library Facilities (libfrr)
rcu
lists
logging
+ locking
hooks
cli
modules
diff --git a/doc/developer/locking.rst b/doc/developer/locking.rst
new file mode 100644
index 0000000000..aee05aae06
--- /dev/null
+++ b/doc/developer/locking.rst
@@ -0,0 +1,73 @@
+Locking
+=======
+
+FRR ships two small wrappers around ``pthread_mutex_lock()`` /
+``pthread_mutex_unlock``. Use ``#include "frr_pthread.h"`` to get these
+macros.
+
+.. c:function:: frr_with_mutex(pthread_mutex_t *mutex)
+
+ Begin a C statement block that is executed with the mutex locked. Any
+ exit from the block (``break``, ``return``, ``goto``, end of block) will
+ cause the mutex to be unlocked::
+
+ int somefunction(int option)
+ {
+ frr_with_mutex(&my_mutex) {
+ /* mutex will be locked */
+
+ if (!option)
+ /* mutex will be unlocked before return */
+ return -1;
+
+ if (something(option))
+ /* mutex will be unlocked before goto */
+ goto out_err;
+
+ somethingelse();
+
+ /* mutex will be unlocked at end of block */
+ }
+
+ return 0;
+
+ out_err:
+ somecleanup();
+ return -1;
+ }
+
+ This is a macro that internally uses a ``for`` loop. It is explicitly
+ acceptable to use ``break`` to get out of the block. Even though a single
+ statement works correctly, FRR coding style requires that this macro always
+ be used with a ``{ ... }`` block.
+
+.. c:function:: frr_mutex_lock_autounlock(pthread_mutex_t *mutex)
+
+ Lock mutex and unlock at the end of the current C statement block::
+
+ int somefunction(int option)
+ {
+ frr_mutex_lock_autounlock(&my_mutex);
+ /* mutex will be locked */
+
+ ...
+ if (error)
+ /* mutex will be unlocked before return */
+ return -1;
+ ...
+
+ /* mutex will be unlocked before return */
+ return 0;
+ }
+
+ This is a macro that internally creates a variable with a destructor.
+ When the variable goes out of scope (i.e. the block ends), the mutex is
+ released.
+
+ .. warning::
+
+ This macro should only used when :c:func:`frr_with_mutex` would
+ result in excessively/weirdly nested code. This generally is an
+ indicator that the code might be trying to do too many things with
+ the lock held. Try any possible venues to reduce the amount of
+ code covered by the lock and move to :c:func:`frr_with_mutex`.
diff --git a/doc/developer/subdir.am b/doc/developer/subdir.am
index 1fc593e566..557a41c51f 100644
--- a/doc/developer/subdir.am
+++ b/doc/developer/subdir.am
@@ -31,6 +31,7 @@ dev_RSTFILES = \
doc/developer/index.rst \
doc/developer/library.rst \
doc/developer/lists.rst \
+ doc/developer/locking.rst \
doc/developer/logging.rst \
doc/developer/maintainer-release-build.rst \
doc/developer/memtypes.rst \
diff --git a/doc/developer/workflow.rst b/doc/developer/workflow.rst
index 07c43ac2de..3c6887fbac 100644
--- a/doc/developer/workflow.rst
+++ b/doc/developer/workflow.rst
@@ -767,6 +767,28 @@ ways that can be unexpected for the original implementor. As such debugs
ability to turn on/off debugs from the CLI and it is expected that the
developer will use this convention to allow control of their debugs.
+Custom syntax-like block macros
+-------------------------------
+
+FRR uses some macros that behave like the ``for`` or ``if`` C keywords. These
+macros follow these patterns:
+
+- loop-style macros are named ``frr_each_*`` (and ``frr_each``)
+- single run macros are named ``frr_with_*``
+- to avoid confusion, ``frr_with_*`` macros must always use a ``{ ... }``
+ block even if the block only contains one statement. The ``frr_each``
+ constructs are assumed to be well-known enough to use normal ``for`` rules.
+- ``break``, ``return`` and ``goto`` all work correctly. For loop-style
+ macros, ``continue`` works correctly too.
+
+Both the ``each`` and ``with`` keywords are inspired by other (more
+higher-level) programming languages that provide these constructs.
+
+There are also some older iteration macros, e.g. ``ALL_LIST_ELEMENTS`` and
+``FOREACH_AFI_SAFI``. These macros in some cases do **not** fulfill the above
+pattern (e.g. ``break`` does not work in ``FOREACH_AFI_SAFI`` because it
+expands to 2 nested loops.)
+
Static Analysis and Sanitizers
------------------------------
Clang/LLVM and GCC come with a variety of tools that can be used to help find
diff --git a/doc/user/bgp.rst b/doc/user/bgp.rst
index 92d4126cec..da339b4409 100644
--- a/doc/user/bgp.rst
+++ b/doc/user/bgp.rst
@@ -2251,6 +2251,12 @@ structure is extended with :clicmd:`show bgp [afi] [safi]`.
Show a bgp peer summary for the specified address family, and subsequent
address-family.
+.. index:: show bgp [afi] [safi] summary failed [json]
+.. clicmd:: show bgp [afi] [safi] summary failed [json]
+
+ Show a bgp peer summary for peers that are not succesfully exchanging routes
+ for the specified address family, and subsequent address-family.
+
.. index:: show bgp [afi] [safi] neighbor [PEER]
.. clicmd:: show bgp [afi] [safi] neighbor [PEER]
diff --git a/doc/user/bmp.rst b/doc/user/bmp.rst
new file mode 100644
index 0000000000..061800c14e
--- /dev/null
+++ b/doc/user/bmp.rst
@@ -0,0 +1,170 @@
+.. _bmp:
+
+***
+BMP
+***
+
+:abbr:`BMP` (BGP Monitoring Protocol, :rfc:`7854`) is used to send monitoring
+data from BGP routers to network management entities.
+
+Implementation characteristics
+==============================
+
+The `BMP` implementation in FRR has the following properties:
+
+- only the :rfc:`7854` features are currently implemented. This means protocol
+ version 3 without any extensions. It is not possible to use an older draft
+ protocol version of BMP.
+
+- the following statistics codes are implemented:
+
+ - 0: count of prefixes rejected
+ - 2: count of duplicate prefix withdrawals
+ - 3: count of **prefixes** with loop in cluster id
+ - 4: count of **prefixes** with loop in AS-path
+ - 5: count of **prefixes** with loop in originator
+ - 11: count of updates subjected to :rfc:`7607` "treat as withdrawal"
+ handling due to errors
+ - 65531: *experimental* count of prefixes rejected due to invalid next-hop
+
+ Note that stat items 3, 4 and 5 are specified to count updates, but FRR
+ implements them as prefix-based counters.
+
+- **route mirroring** is fully implemented, however BGP OPEN messages are not
+ currently included in route mirroring messages. Their contents can be
+ extracted from the "peer up" notification for sessions that established
+ successfully. OPEN messages for failed sessions cannot currently be
+ mirrored.
+
+- **route monitoring** is available for IPv4 and IPv6 AFIs, unicast and
+ multicast SAFIs. Other SAFIs (VPN, Labeled-Unicast, Flowspec, etc.) are not
+ currently supported.
+
+- monitoring peers that have BGP **add-path** enabled on the session will
+ result in somewhat unpredictable behaviour. Currently, the outcome is:
+
+ - route mirroring functions as intended, messages are copied verbatim
+ - the add-path ID is never included in route monitoring messages
+ - if multiple paths were received from a peer, an unpredictable path is
+ picked and sent on the BMP session. The selection will differ for
+ pre-policy and post-policy monitoring sessions.
+ - as long as any path is present, something will be advertised on BMP
+ sessions. Only after the last path is gone a withdrawal will be sent on
+ BMP sessions.
+ - updates to additional paths will trigger BMP route monitoring messages.
+ There is no guarantee on consistency regarding which path is sent in these
+ messages.
+
+- monitoring peers with :rfc:`5549` extended next-hops has not been tested.
+
+Starting BMP
+============
+
+BMP is implemented as a loadable module. This means that to use BMP, ``bgpd``
+must be started with the ``-M bmp`` option. It is not possible to enable BMP
+if ``bgpd`` was started without this option.
+
+Configuring BMP
+===============
+
+All of FRR's BMP configuration options are located inside the
+:clicmd:`router bgp ASN` block. Configure BGP first before proceeding to BMP
+setup.
+
+There is one option that applies to the BGP instance as a whole:
+
+.. index:: bmp mirror buffer-limit(0-4294967294)
+.. clicmd:: [no] bmp mirror buffer-limit(0-4294967294)
+
+ This sets the maximum amount of memory used for buffering BGP messages
+ (updates, keepalives, ...) for sending in BMP Route Mirroring.
+
+ The buffer is for the entire BGP instance; if multiple BMP targets are
+ configured they reference the same buffer and do not consume additional
+ memory. Queue overhead is included in accounting this memory, so the
+ actual space available for BGP messages is slightly less than the value
+ configured here.
+
+ If the buffer fills up, the oldest messages are removed from the buffer and
+ any BMP sessions where the now-removed messages were still pending have
+ their **entire** queue flushed and a "Mirroring Messages Lost" BMP message
+ is sent.
+
+ BMP Route Monitoring is not affected by this option.
+
+All other configuration is managed per targets:
+
+.. index:: bmp targets NAME
+.. clicmd:: [no] bmp targets NAME
+
+ Create/delete a targets group. As implied by the plural name, targets may
+ cover multiple outbound active BMP sessions as well as inbound passive
+ listeners.
+
+ If BMP sessions have the same configuration, putting them in the same
+ ``bmp targets`` will reduce overhead.
+
+BMP session configuration
+-------------------------
+
+Inside a ``bmp targets`` block, the following commands control session
+establishment:
+
+.. index:: bmp connect HOSTNAME port (1-65535) {min-retry MSEC|max-retry MSEC}
+.. clicmd:: [no] bmp connect HOSTNAME port (1-65535) {min-retry MSEC|max-retry MSEC}
+
+ Add/remove an active outbound BMP session. HOSTNAME is resolved via DNS,
+ if multiple addresses are returned they are tried in nondeterministic
+ order. Only one connection will be established even if multiple addresses
+ are returned. ``min-retry`` and ``max-retry`` specify (in milliseconds)
+ bounds for exponential backoff.
+
+.. warning::
+
+ ``ip access-list`` and ``ipv6 access-list`` are checked for outbound
+ connections resulting from ``bmp connect`` statements.
+
+.. index:: bmp listener <X:X::X:X|A.B.C.D> port (1-65535)
+.. clicmd:: [no] bmp listener <X:X::X:X|A.B.C.D> port (1-65535)
+
+ Accept incoming BMP sessions on the specified address and port. You can
+ use ``0.0.0.0`` and ``::`` to listen on all IPv4/IPv6 addresses.
+
+.. clicmd:: [no] ip access-list NAME
+.. clicmd:: [no] ipv6 access-list NAME
+
+ Restrict BMP sessions to the addresses allowed by the respective access
+ lists. The access lists are checked for both passive and active BMP
+ sessions. Changes do not affect currently established sessions.
+
+BMP data feed configuration
+---------------------------
+
+The following commands configure what BMP messages are sent on sessions
+associated with a particular ``bmp targets``:
+
+.. index:: bmp stats [interval (100-86400000)]
+.. clicmd:: [no] bmp stats [interval (100-86400000)]
+
+ Send BMP Statistics (counter) messages at the specified interval (in
+ milliseconds.)
+
+.. index:: bmp monitor AFI SAFI <pre-policy|post-policy>
+.. clicmd:: [no] bmp monitor AFI SAFI <pre-policy|post-policy>
+
+ Perform Route Monitoring for the specified AFI and SAFI. Only IPv4 and
+ IPv6 are currently valid for AFI, and only unicast and multicast are valid
+ for SAFI. Other AFI/SAFI combinations may be added in the future.
+
+ All BGP neighbors are included in Route Monitoring. Options to select
+ a subset of BGP sessions may be added in the future.
+
+.. index:: bmp mirror
+.. clicmd:: [no] bmp mirror
+
+ Perform Route Mirroring for all BGP neighbors. Since this provides a
+ direct feed of BGP messages, there are no AFI/SAFI options to be
+ configured.
+
+ All BGP neighbors are included in Route Mirroring. Options to select
+ a subset of BGP sessions may be added in the future.
diff --git a/doc/user/eigrpd.rst b/doc/user/eigrpd.rst
index 330267a8ee..915270a562 100644
--- a/doc/user/eigrpd.rst
+++ b/doc/user/eigrpd.rst
@@ -65,15 +65,16 @@ Certain signals have special meanings to *eigrpd*.
EIGRP Configuration
===================
-.. index:: router eigrp (1-65535)
-.. clicmd:: router eigrp (1-65535)
+.. index:: router eigrp (1-65535) [vrf NAME]
+.. clicmd:: router eigrp (1-65535) [vrf NAME]
The `router eigrp` command is necessary to enable EIGRP. To disable EIGRP,
use the `no router eigrp (1-65535)` command. EIGRP must be enabled before
- carrying out any of the EIGRP commands.
+ carrying out any of the EIGRP commands. Specify vrf NAME if you want
+ eigrp to work within the specified vrf.
-.. index:: no router eigrp (1-65535)
-.. clicmd:: no router eigrp (1-65535)
+.. index:: no router eigrp (1-65535) [vrf NAME]
+.. clicmd:: no router eigrp (1-65535) [vrf NAME]
Disable EIGRP.
@@ -189,8 +190,8 @@ How to Announce EIGRP route
Show EIGRP Information
======================
-.. index:: show ip eigrp topology
-.. clicmd:: show ip eigrp topology
+.. index:: show ip eigrp [vrf NAME] topology
+.. clicmd:: show ip eigrp [vrf NAME] topology
Display current EIGRP status.
@@ -207,6 +208,17 @@ Show EIGRP Information
P 10.0.2.0/24, 1 successors, FD is 256256, serno: 0
via Connected, enp0s3
+.. index:: show ip eigrp [vrf NAME] interface
+.. clicmd:: show ip eigrp [vrf NAME] interface
+
+ Display the list of interfaces associated with a particular eigrp
+ instance.
+
+..index:: show ip eigrp [vrf NAME] neighbor
+..clicmd:: show ip eigrp [vrf NAME] neighbor
+
+ Display the list of neighbors that have been established within
+ a particular eigrp instance.
EIGRP Debug Commands
====================
diff --git a/doc/user/index.rst b/doc/user/index.rst
index 4e14de6737..6c3b14e062 100644
--- a/doc/user/index.rst
+++ b/doc/user/index.rst
@@ -57,6 +57,7 @@ Protocols
static
vnc
vrrp
+ bmp
########
Appendix
diff --git a/doc/user/pbr.rst b/doc/user/pbr.rst
index 6230bf777a..fab4343f50 100644
--- a/doc/user/pbr.rst
+++ b/doc/user/pbr.rst
@@ -91,6 +91,12 @@ end destination.
both v4 and v6 prefixes. This command is used in conjunction of the
:clicmd:`match src-ip PREFIX` command for matching.
+.. clicmd:: match mark (1-4294967295)
+
+ Select the mark to match. This is a linux only command and if attempted
+ on another platform it will be denied. This mark translates to the
+ underlying `ip rule .... fwmark XXXX` command.
+
.. clicmd:: set nexthop-group NAME
Use the nexthop-group NAME as the place to forward packets when the match
diff --git a/doc/user/rpki.rst b/doc/user/rpki.rst
index ca6b46d3cf..dfac10b4f2 100644
--- a/doc/user/rpki.rst
+++ b/doc/user/rpki.rst
@@ -112,31 +112,6 @@ The following commands are independent of a specific cache server.
The default value is 300 seconds.
-.. index:: rpki timeout <1-4,294,967,296>
-.. clicmd:: rpki timeout <1-4,294,967,296>
-
-.. index:: no rpki timeout
-.. clicmd:: no rpki timeout
-
- Set the number of seconds the router waits for the cache reply. If the cache
- server is not replying within this time period, the router deletes all
- received prefix records from the prefix table.
-
- The default value is 600 seconds.
-
-.. index:: rpki initial-synchronisation-timeout <1-4,294,967,296>
-.. clicmd:: rpki initial-synchronisation-timeout <1-4,294,967,296>
-
-.. index:: no rpki initial-synchronisation-timeout
-.. clicmd:: no rpki initial-synchronisation-timeout
-
- Set the number of seconds until the first synchronization with the cache
- server needs to be completed. If the timeout expires, BGP routing is started
- without RPKI. The router will try to establish the cache server connection in
- the background.
-
- The default value is 30 seconds.
-
The following commands configure one or multiple cache servers.
.. index:: rpki cache (A.B.C.D|WORD) PORT [SSH_USERNAME] [SSH_PRIVKEY_PATH] [SSH_PUBKEY_PATH] [KNOWN_HOSTS_PATH] PREFERENCE
diff --git a/doc/user/subdir.am b/doc/user/subdir.am
index 1e4d86c722..0f0a8a0774 100644
--- a/doc/user/subdir.am
+++ b/doc/user/subdir.am
@@ -7,6 +7,7 @@ user_RSTFILES = \
doc/user/ldpd.rst \
doc/user/basic.rst \
doc/user/bgp.rst \
+ doc/user/bmp.rst \
doc/user/bugs.rst \
doc/user/conf.py \
doc/user/eigrpd.rst \
diff --git a/eigrpd/eigrp_cli.c b/eigrpd/eigrp_cli.c
index ba657a7d5d..a93d4c8280 100644
--- a/eigrpd/eigrp_cli.c
+++ b/eigrpd/eigrp_cli.c
@@ -40,17 +40,18 @@
DEFPY_NOSH(
router_eigrp,
router_eigrp_cmd,
- "router eigrp (1-65535)$as",
+ "router eigrp (1-65535)$as [vrf NAME]",
ROUTER_STR
EIGRP_STR
- AS_STR)
+ AS_STR
+ VRF_CMD_HELP_STR)
{
char xpath[XPATH_MAXLEN];
int rv;
snprintf(xpath, sizeof(xpath),
- "/frr-eigrpd:eigrpd/instance[asn='%s'][vrf='']",
- as_str);
+ "/frr-eigrpd:eigrpd/instance[asn='%s'][vrf='%s']",
+ as_str, vrf ? vrf : VRF_DEFAULT_NAME);
nb_cli_enqueue_change(vty, xpath, NB_OP_CREATE, NULL);
rv = nb_cli_apply_changes(vty, NULL);
@@ -60,20 +61,21 @@ DEFPY_NOSH(
return rv;
}
-DEFPY_NOSH(
+DEFPY(
no_router_eigrp,
no_router_eigrp_cmd,
- "no router eigrp (1-65535)$as",
+ "no router eigrp (1-65535)$as [vrf NAME]",
NO_STR
ROUTER_STR
EIGRP_STR
- AS_STR)
+ AS_STR
+ VRF_CMD_HELP_STR)
{
char xpath[XPATH_MAXLEN];
snprintf(xpath, sizeof(xpath),
- "/frr-eigrpd:eigrpd/instance[asn='%s'][vrf='']",
- as_str);
+ "/frr-eigrpd:eigrpd/instance[asn='%s'][vrf='%s']",
+ as_str, vrf ? vrf : VRF_DEFAULT_NAME);
nb_cli_enqueue_change(vty, xpath, NB_OP_DESTROY, NULL);
return nb_cli_apply_changes(vty, NULL);
@@ -83,8 +85,12 @@ void eigrp_cli_show_header(struct vty *vty, struct lyd_node *dnode,
bool show_defaults)
{
const char *asn = yang_dnode_get_string(dnode, "./asn");
+ const char *vrf = yang_dnode_get_string(dnode, "./vrf");
- vty_out(vty, "router eigrp %s\n", asn);
+ vty_out(vty, "router eigrp %s", asn);
+ if (strcmp(vrf, VRF_DEFAULT_NAME))
+ vty_out(vty, " vrf %s", vrf);
+ vty_out(vty, "\n");
}
void eigrp_cli_show_end_header(struct vty *vty, struct lyd_node *dnode)
diff --git a/eigrpd/eigrp_filter.c b/eigrpd/eigrp_filter.c
index 93eed9452c..9d5d45ca50 100644
--- a/eigrpd/eigrp_filter.c
+++ b/eigrpd/eigrp_filter.c
@@ -65,17 +65,15 @@
void eigrp_distribute_update(struct distribute_ctx *ctx,
struct distribute *dist)
{
+ struct eigrp *e = eigrp_lookup(ctx->vrf->vrf_id);
struct interface *ifp;
struct eigrp_interface *ei = NULL;
struct access_list *alist;
struct prefix_list *plist;
// struct route_map *routemap;
- struct eigrp *e;
/* if no interface address is present, set list to eigrp process struct
*/
- e = eigrp_lookup();
- assert(e != NULL);
/* Check if distribute-list was set for process or interface */
if (!dist->ifname) {
@@ -174,7 +172,7 @@ void eigrp_distribute_update(struct distribute_ctx *ctx,
return;
}
- ifp = if_lookup_by_name(dist->ifname, VRF_DEFAULT);
+ ifp = if_lookup_by_name(dist->ifname, e->vrf_id);
if (ifp == NULL)
return;
@@ -288,7 +286,7 @@ void eigrp_distribute_update_interface(struct interface *ifp)
struct distribute *dist;
struct eigrp *eigrp;
- eigrp = eigrp_lookup();
+ eigrp = eigrp_lookup(ifp->vrf_id);
if (!eigrp)
return;
dist = distribute_lookup(eigrp->distribute_ctx, ifp->name);
@@ -302,11 +300,13 @@ void eigrp_distribute_update_interface(struct interface *ifp)
*/
void eigrp_distribute_update_all(struct prefix_list *notused)
{
- struct vrf *vrf = vrf_lookup_by_id(VRF_DEFAULT);
+ struct vrf *vrf;
struct interface *ifp;
- FOR_ALL_INTERFACES (vrf, ifp)
- eigrp_distribute_update_interface(ifp);
+ RB_FOREACH (vrf, vrf_name_head, &vrfs_by_name) {
+ FOR_ALL_INTERFACES (vrf, ifp)
+ eigrp_distribute_update_interface(ifp);
+ }
}
/*
diff --git a/eigrpd/eigrp_filter.h b/eigrpd/eigrp_filter.h
index 34d00ecc13..03f45cedbf 100644
--- a/eigrpd/eigrp_filter.h
+++ b/eigrpd/eigrp_filter.h
@@ -35,10 +35,10 @@
extern void eigrp_distribute_update(struct distribute_ctx *ctx,
struct distribute *dist);
-extern void eigrp_distribute_update_interface(struct interface *);
-extern void eigrp_distribute_update_all(struct prefix_list *);
-extern void eigrp_distribute_update_all_wrapper(struct access_list *);
-extern int eigrp_distribute_timer_process(struct thread *);
-extern int eigrp_distribute_timer_interface(struct thread *);
+extern void eigrp_distribute_update_interface(struct interface *ifp);
+extern void eigrp_distribute_update_all(struct prefix_list *plist);
+extern void eigrp_distribute_update_all_wrapper(struct access_list *alist);
+extern int eigrp_distribute_timer_process(struct thread *thread);
+extern int eigrp_distribute_timer_interface(struct thread *thread);
#endif /* EIGRPD_EIGRP_FILTER_H_ */
diff --git a/eigrpd/eigrp_fsm.c b/eigrpd/eigrp_fsm.c
index 4d6d73e202..cc6d47f488 100644
--- a/eigrpd/eigrp_fsm.c
+++ b/eigrpd/eigrp_fsm.c
@@ -445,7 +445,7 @@ int eigrp_fsm_event_nq_fcn(struct eigrp_fsm_action_message *msg)
prefix->rdistance = prefix->distance = prefix->fdistance = ne->distance;
prefix->reported_metric = ne->total_metric;
- if (eigrp_nbr_count_get()) {
+ if (eigrp_nbr_count_get(eigrp)) {
prefix->req_action |= EIGRP_FSM_NEED_QUERY;
listnode_add(eigrp->topology_changes_internalIPV4, prefix);
} else {
@@ -471,7 +471,7 @@ int eigrp_fsm_event_q_fcn(struct eigrp_fsm_action_message *msg)
prefix->state = EIGRP_FSM_STATE_ACTIVE_3;
prefix->rdistance = prefix->distance = prefix->fdistance = ne->distance;
prefix->reported_metric = ne->total_metric;
- if (eigrp_nbr_count_get()) {
+ if (eigrp_nbr_count_get(eigrp)) {
prefix->req_action |= EIGRP_FSM_NEED_QUERY;
listnode_add(eigrp->topology_changes_internalIPV4, prefix);
} else {
@@ -486,7 +486,7 @@ int eigrp_fsm_event_q_fcn(struct eigrp_fsm_action_message *msg)
int eigrp_fsm_event_keep_state(struct eigrp_fsm_action_message *msg)
{
- struct eigrp *eigrp;
+ struct eigrp *eigrp = msg->eigrp;
struct eigrp_prefix_entry *prefix = msg->prefix;
struct eigrp_nexthop_entry *ne = listnode_head(prefix->entries);
@@ -499,13 +499,11 @@ int eigrp_fsm_event_keep_state(struct eigrp_fsm_action_message *msg)
if (msg->packet_type == EIGRP_OPC_QUERY)
eigrp_send_reply(msg->adv_router, prefix);
prefix->req_action |= EIGRP_FSM_NEED_UPDATE;
- eigrp = eigrp_lookup();
- assert(eigrp);
listnode_add(eigrp->topology_changes_internalIPV4,
prefix);
}
- eigrp_topology_update_node_flags(prefix);
- eigrp_update_routing_table(prefix);
+ eigrp_topology_update_node_flags(eigrp, prefix);
+ eigrp_update_routing_table(eigrp, prefix);
}
if (msg->packet_type == EIGRP_OPC_QUERY)
@@ -536,9 +534,10 @@ int eigrp_fsm_event_lr(struct eigrp_fsm_action_message *msg)
prefix->state = EIGRP_FSM_STATE_PASSIVE;
prefix->req_action |= EIGRP_FSM_NEED_UPDATE;
listnode_add(eigrp->topology_changes_internalIPV4, prefix);
- eigrp_topology_update_node_flags(prefix);
- eigrp_update_routing_table(prefix);
- eigrp_update_topology_table_prefix(eigrp->topology_table, prefix);
+ eigrp_topology_update_node_flags(eigrp, prefix);
+ eigrp_update_routing_table(eigrp, prefix);
+ eigrp_update_topology_table_prefix(eigrp, eigrp->topology_table,
+ prefix);
return 1;
}
@@ -588,9 +587,10 @@ int eigrp_fsm_event_lr_fcs(struct eigrp_fsm_action_message *msg)
}
prefix->req_action |= EIGRP_FSM_NEED_UPDATE;
listnode_add(eigrp->topology_changes_internalIPV4, prefix);
- eigrp_topology_update_node_flags(prefix);
- eigrp_update_routing_table(prefix);
- eigrp_update_topology_table_prefix(eigrp->topology_table, prefix);
+ eigrp_topology_update_node_flags(eigrp, prefix);
+ eigrp_update_routing_table(eigrp, prefix);
+ eigrp_update_topology_table_prefix(eigrp, eigrp->topology_table,
+ prefix);
return 1;
}
@@ -612,7 +612,7 @@ int eigrp_fsm_event_lr_fcn(struct eigrp_fsm_action_message *msg)
prefix->rdistance = prefix->distance = best_successor->distance;
prefix->reported_metric = best_successor->total_metric;
- if (eigrp_nbr_count_get()) {
+ if (eigrp_nbr_count_get(eigrp)) {
prefix->req_action |= EIGRP_FSM_NEED_QUERY;
listnode_add(eigrp->topology_changes_internalIPV4, prefix);
} else {
diff --git a/eigrpd/eigrp_hello.c b/eigrpd/eigrp_hello.c
index dacd5caeb5..6f93cd7b3e 100644
--- a/eigrpd/eigrp_hello.c
+++ b/eigrpd/eigrp_hello.c
@@ -147,7 +147,7 @@ eigrp_hello_parameter_decode(struct eigrp_neighbor *nbr,
zlog_info("Neighbor %s (%s) is pending: new adjacency",
inet_ntoa(nbr->src),
ifindex2ifname(nbr->ei->ifp->ifindex,
- VRF_DEFAULT));
+ eigrp->vrf_id));
/* Expedited hello sent */
eigrp_hello_send(nbr->ei, EIGRP_HELLO_NORMAL, NULL);
@@ -167,7 +167,7 @@ eigrp_hello_parameter_decode(struct eigrp_neighbor *nbr,
"Neighbor %s (%s) is down: Interface PEER-TERMINATION received",
inet_ntoa(nbr->src),
ifindex2ifname(nbr->ei->ifp->ifindex,
- VRF_DEFAULT));
+ eigrp->vrf_id));
eigrp_nbr_delete(nbr);
return NULL;
} else {
@@ -175,7 +175,7 @@ eigrp_hello_parameter_decode(struct eigrp_neighbor *nbr,
"Neighbor %s (%s) going down: Kvalue mismatch",
inet_ntoa(nbr->src),
ifindex2ifname(nbr->ei->ifp->ifindex,
- VRF_DEFAULT));
+ eigrp->vrf_id));
eigrp_nbr_state_set(nbr, EIGRP_NEIGHBOR_DOWN);
}
}
@@ -245,6 +245,7 @@ static void eigrp_sw_version_decode(struct eigrp_neighbor *nbr,
static void eigrp_peer_termination_decode(struct eigrp_neighbor *nbr,
struct eigrp_tlv_hdr_type *tlv)
{
+ struct eigrp *eigrp = nbr->ei->eigrp;
struct TLV_Peer_Termination_type *param =
(struct TLV_Peer_Termination_type *)tlv;
@@ -254,7 +255,7 @@ static void eigrp_peer_termination_decode(struct eigrp_neighbor *nbr,
if (my_ip == received_ip) {
zlog_info("Neighbor %s (%s) is down: Peer Termination received",
inet_ntoa(nbr->src),
- ifindex2ifname(nbr->ei->ifp->ifindex, VRF_DEFAULT));
+ ifindex2ifname(nbr->ei->ifp->ifindex, eigrp->vrf_id));
/* set neighbor to DOWN */
nbr->state = EIGRP_NEIGHBOR_DOWN;
/* delete neighbor */
@@ -330,7 +331,7 @@ void eigrp_hello_receive(struct eigrp *eigrp, struct ip *iph,
if (IS_DEBUG_EIGRP_PACKET(eigrph->opcode - 1, RECV))
zlog_debug("Processing Hello size[%u] int(%s) nbr(%s)", size,
- ifindex2ifname(nbr->ei->ifp->ifindex, VRF_DEFAULT),
+ ifindex2ifname(nbr->ei->ifp->ifindex, eigrp->vrf_id),
inet_ntoa(nbr->src));
size -= EIGRP_HEADER_LEN;
@@ -484,21 +485,15 @@ static uint16_t eigrp_tidlist_encode(struct stream *s)
* Part of conditional receive process
*
*/
-static uint16_t eigrp_sequence_encode(struct stream *s)
+static uint16_t eigrp_sequence_encode(struct eigrp *eigrp, struct stream *s)
{
uint16_t length = EIGRP_TLV_SEQ_BASE_LEN;
- struct eigrp *eigrp;
struct eigrp_interface *ei;
struct listnode *node, *node2, *nnode2;
struct eigrp_neighbor *nbr;
size_t backup_end, size_end;
int found;
- eigrp = eigrp_lookup();
- if (eigrp == NULL) {
- return 0;
- }
-
// add in the parameters TLV
backup_end = stream_get_endp(s);
stream_putw(s, EIGRP_TLV_SEQ);
@@ -541,15 +536,10 @@ static uint16_t eigrp_sequence_encode(struct stream *s)
* Part of conditional receive process
*
*/
-static uint16_t eigrp_next_sequence_encode(struct stream *s)
+static uint16_t eigrp_next_sequence_encode(struct eigrp *eigrp,
+ struct stream *s)
{
uint16_t length = EIGRP_NEXT_SEQUENCE_TLV_SIZE;
- struct eigrp *eigrp;
-
- eigrp = eigrp_lookup();
- if (eigrp == NULL) {
- return 0;
- }
// add in the parameters TLV
stream_putw(s, EIGRP_TLV_NEXT_MCAST_SEQ);
@@ -659,8 +649,8 @@ static struct eigrp_packet *eigrp_hello_encode(struct eigrp_interface *ei,
length += eigrp_sw_version_encode(ep->s);
if (flags & EIGRP_HELLO_ADD_SEQUENCE) {
- length += eigrp_sequence_encode(ep->s);
- length += eigrp_next_sequence_encode(ep->s);
+ length += eigrp_sequence_encode(ei->eigrp, ep->s);
+ length += eigrp_next_sequence_encode(ei->eigrp, ep->s);
}
// add in the TID list if doing multi-topology
diff --git a/eigrpd/eigrp_interface.c b/eigrpd/eigrp_interface.c
index c52a98ee25..fd1d3f5cb9 100644
--- a/eigrpd/eigrp_interface.c
+++ b/eigrpd/eigrp_interface.c
@@ -206,7 +206,7 @@ int eigrp_if_up(struct eigrp_interface *ei)
eigrp_prefix_entry_add(eigrp->topology_table, pe);
listnode_add(eigrp->topology_changes_internalIPV4, pe);
- eigrp_nexthop_entry_add(pe, ne);
+ eigrp_nexthop_entry_add(eigrp, pe, ne);
for (ALL_LIST_ELEMENTS(eigrp->eiflist, node, nnode, ei2)) {
eigrp_update_send(ei2);
@@ -218,7 +218,7 @@ int eigrp_if_up(struct eigrp_interface *ei)
struct eigrp_fsm_action_message msg;
ne->prefix = pe;
- eigrp_nexthop_entry_add(pe, ne);
+ eigrp_nexthop_entry_add(eigrp, pe, ne);
msg.packet_type = EIGRP_OPC_UPDATE;
msg.eigrp = eigrp;
@@ -329,10 +329,7 @@ void eigrp_if_free(struct eigrp_interface *ei, int source)
{
struct prefix dest_addr;
struct eigrp_prefix_entry *pe;
- struct eigrp *eigrp = eigrp_lookup();
-
- if (!eigrp)
- return;
+ struct eigrp *eigrp = ei->eigrp;
if (source == INTERFACE_DOWN_BY_VTY) {
THREAD_OFF(ei->t_hello);
@@ -344,7 +341,7 @@ void eigrp_if_free(struct eigrp_interface *ei, int source)
pe = eigrp_topology_table_lookup_ipv4(eigrp->topology_table,
&dest_addr);
if (pe)
- eigrp_prefix_entry_delete(eigrp->topology_table, pe);
+ eigrp_prefix_entry_delete(eigrp, eigrp->topology_table, pe);
eigrp_if_down(ei);
diff --git a/eigrpd/eigrp_main.c b/eigrpd/eigrp_main.c
index 1781a88173..299825dd1b 100644
--- a/eigrpd/eigrp_main.c
+++ b/eigrpd/eigrp_main.c
@@ -65,6 +65,7 @@
#include "eigrpd/eigrp_snmp.h"
#include "eigrpd/eigrp_filter.h"
#include "eigrpd/eigrp_errors.h"
+#include "eigrpd/eigrp_vrf.h"
//#include "eigrpd/eigrp_routemap.h"
/* eigprd privileges */
@@ -182,6 +183,7 @@ int main(int argc, char **argv, char **envp)
master = eigrp_om->master;
eigrp_error_init();
+ eigrp_vrf_init();
vrf_init(NULL, NULL, NULL, NULL, NULL);
/*EIGRPd init*/
diff --git a/eigrpd/eigrp_neighbor.c b/eigrpd/eigrp_neighbor.c
index 66dd5f3419..2ae3997fae 100644
--- a/eigrpd/eigrp_neighbor.c
+++ b/eigrpd/eigrp_neighbor.c
@@ -194,13 +194,12 @@ void eigrp_nbr_delete(struct eigrp_neighbor *nbr)
int holddown_timer_expired(struct thread *thread)
{
- struct eigrp_neighbor *nbr;
-
- nbr = THREAD_ARG(thread);
+ struct eigrp_neighbor *nbr = THREAD_ARG(thread);
+ struct eigrp *eigrp = nbr->ei->eigrp;
zlog_info("Neighbor %s (%s) is down: holding time expired",
inet_ntoa(nbr->src),
- ifindex2ifname(nbr->ei->ifp->ifindex, VRF_DEFAULT));
+ ifindex2ifname(nbr->ei->ifp->ifindex, eigrp->vrf_id));
nbr->state = EIGRP_NEIGHBOR_DOWN;
eigrp_nbr_delete(nbr);
@@ -297,19 +296,13 @@ void eigrp_nbr_state_update(struct eigrp_neighbor *nbr)
}
}
-int eigrp_nbr_count_get(void)
+int eigrp_nbr_count_get(struct eigrp *eigrp)
{
struct eigrp_interface *iface;
struct listnode *node, *node2, *nnode2;
struct eigrp_neighbor *nbr;
- struct eigrp *eigrp = eigrp_lookup();
uint32_t counter;
- if (eigrp == NULL) {
- zlog_debug("EIGRP Routing Process not enabled");
- return 0;
- }
-
counter = 0;
for (ALL_LIST_ELEMENTS_RO(eigrp->eiflist, node, iface)) {
for (ALL_LIST_ELEMENTS(iface->nbrs, node2, nnode2, nbr)) {
@@ -335,20 +328,16 @@ int eigrp_nbr_count_get(void)
*/
void eigrp_nbr_hard_restart(struct eigrp_neighbor *nbr, struct vty *vty)
{
- if (nbr == NULL) {
- flog_err(EC_EIGRP_CONFIG,
- "Nbr Hard restart: Neighbor not specified.");
- return;
- }
+ struct eigrp *eigrp = nbr->ei->eigrp;
zlog_debug("Neighbor %s (%s) is down: manually cleared",
inet_ntoa(nbr->src),
- ifindex2ifname(nbr->ei->ifp->ifindex, VRF_DEFAULT));
+ ifindex2ifname(nbr->ei->ifp->ifindex, eigrp->vrf_id));
if (vty != NULL) {
vty_time_print(vty, 0);
vty_out(vty, "Neighbor %s (%s) is down: manually cleared\n",
inet_ntoa(nbr->src),
- ifindex2ifname(nbr->ei->ifp->ifindex, VRF_DEFAULT));
+ ifindex2ifname(nbr->ei->ifp->ifindex, eigrp->vrf_id));
}
/* send Hello with Peer Termination TLV */
diff --git a/eigrpd/eigrp_neighbor.h b/eigrpd/eigrp_neighbor.h
index 21a8c6a004..1c308fa981 100644
--- a/eigrpd/eigrp_neighbor.h
+++ b/eigrpd/eigrp_neighbor.h
@@ -33,24 +33,25 @@
#define _ZEBRA_EIGRP_NEIGHBOR_H
/* Prototypes */
-extern struct eigrp_neighbor *eigrp_nbr_get(struct eigrp_interface *,
- struct eigrp_header *, struct ip *);
-extern struct eigrp_neighbor *eigrp_nbr_new(struct eigrp_interface *);
-extern void eigrp_nbr_delete(struct eigrp_neighbor *);
+extern struct eigrp_neighbor *eigrp_nbr_get(struct eigrp_interface *ei,
+ struct eigrp_header *,
+ struct ip *addr);
+extern struct eigrp_neighbor *eigrp_nbr_new(struct eigrp_interface *ei);
+extern void eigrp_nbr_delete(struct eigrp_neighbor *neigh);
-extern int holddown_timer_expired(struct thread *);
+extern int holddown_timer_expired(struct thread *thread);
-extern int eigrp_neighborship_check(struct eigrp_neighbor *,
- struct TLV_Parameter_Type *);
-extern void eigrp_nbr_state_update(struct eigrp_neighbor *);
-extern void eigrp_nbr_state_set(struct eigrp_neighbor *, uint8_t state);
-extern uint8_t eigrp_nbr_state_get(struct eigrp_neighbor *);
-extern int eigrp_nbr_count_get(void);
-extern const char *eigrp_nbr_state_str(struct eigrp_neighbor *);
-extern struct eigrp_neighbor *eigrp_nbr_lookup_by_addr(struct eigrp_interface *,
- struct in_addr *);
-extern struct eigrp_neighbor *eigrp_nbr_lookup_by_addr_process(struct eigrp *,
- struct in_addr);
+extern int eigrp_neighborship_check(struct eigrp_neighbor *neigh,
+ struct TLV_Parameter_Type *tlv);
+extern void eigrp_nbr_state_update(struct eigrp_neighbor *neigh);
+extern void eigrp_nbr_state_set(struct eigrp_neighbor *neigh, uint8_t state);
+extern uint8_t eigrp_nbr_state_get(struct eigrp_neighbor *neigh);
+extern int eigrp_nbr_count_get(struct eigrp *eigrp);
+extern const char *eigrp_nbr_state_str(struct eigrp_neighbor *neigh);
+extern struct eigrp_neighbor *
+eigrp_nbr_lookup_by_addr(struct eigrp_interface *ei, struct in_addr *addr);
+extern struct eigrp_neighbor *
+eigrp_nbr_lookup_by_addr_process(struct eigrp *eigrp, struct in_addr addr);
extern void eigrp_nbr_hard_restart(struct eigrp_neighbor *nbr, struct vty *vty);
extern int eigrp_nbr_split_horizon_check(struct eigrp_nexthop_entry *ne,
diff --git a/eigrpd/eigrp_network.c b/eigrpd/eigrp_network.c
index bbb9487b4d..c7ffbf9f0e 100644
--- a/eigrpd/eigrp_network.c
+++ b/eigrpd/eigrp_network.c
@@ -53,7 +53,7 @@ static int eigrp_network_match_iface(const struct prefix *connected_prefix,
static void eigrp_network_run_interface(struct eigrp *, struct prefix *,
struct interface *);
-int eigrp_sock_init(void)
+int eigrp_sock_init(struct vrf *vrf)
{
int eigrp_sock;
int ret;
@@ -61,8 +61,10 @@ int eigrp_sock_init(void)
int hincl = 1;
#endif
- frr_elevate_privs(&eigrpd_privs) {
- eigrp_sock = socket(AF_INET, SOCK_RAW, IPPROTO_EIGRPIGP);
+ frr_with_privs(&eigrpd_privs) {
+ eigrp_sock = vrf_socket(
+ AF_INET, SOCK_RAW, IPPROTO_EIGRPIGP, vrf->vrf_id,
+ vrf->vrf_id != VRF_DEFAULT ? vrf->name : NULL);
if (eigrp_sock < 0) {
zlog_err("eigrp_read_sock_init: socket: %s",
safe_strerror(errno));
@@ -209,7 +211,7 @@ int eigrp_if_drop_allspfrouters(struct eigrp *top, struct prefix *p,
int eigrp_network_set(struct eigrp *eigrp, struct prefix *p)
{
- struct vrf *vrf = vrf_lookup_by_id(VRF_DEFAULT);
+ struct vrf *vrf = vrf_lookup_by_id(eigrp->vrf_id);
struct route_node *rn;
struct interface *ifp;
@@ -290,6 +292,9 @@ void eigrp_if_update(struct interface *ifp)
* we need to check eac one and add the interface as approperate
*/
for (ALL_LIST_ELEMENTS(eigrp_om->eigrp, node, nnode, eigrp)) {
+ if (ifp->vrf_id != eigrp->vrf_id)
+ continue;
+
/* EIGRP must be on and Router-ID must be configured. */
if (eigrp->router_id.s_addr == 0)
continue;
diff --git a/eigrpd/eigrp_network.h b/eigrpd/eigrp_network.h
index b3c76bbecc..7839fc946b 100644
--- a/eigrpd/eigrp_network.h
+++ b/eigrpd/eigrp_network.h
@@ -30,7 +30,7 @@
/* Prototypes */
-extern int eigrp_sock_init(void);
+extern int eigrp_sock_init(struct vrf *vrf);
extern int eigrp_if_ipmulticast(struct eigrp *, struct prefix *, unsigned int);
extern int eigrp_network_set(struct eigrp *eigrp, struct prefix *p);
extern int eigrp_network_unset(struct eigrp *eigrp, struct prefix *p);
diff --git a/eigrpd/eigrp_northbound.c b/eigrpd/eigrp_northbound.c
index 5f0c91809e..4ccce2ebb8 100644
--- a/eigrpd/eigrp_northbound.c
+++ b/eigrpd/eigrp_northbound.c
@@ -79,13 +79,18 @@ static int eigrpd_instance_create(enum nb_event event,
union nb_resource *resource)
{
struct eigrp *eigrp;
+ const char *vrf;
+ vrf_id_t vrfid;
switch (event) {
case NB_EV_VALIDATE:
/* NOTHING */
break;
case NB_EV_PREPARE:
- eigrp = eigrp_get(yang_dnode_get_string(dnode, "./asn"));
+ vrf = yang_dnode_get_string(dnode, "./vrf");
+ vrfid = vrf_name_to_id(vrf);
+
+ eigrp = eigrp_get(yang_dnode_get_uint16(dnode, "./asn"), vrfid);
resource->ptr = eigrp;
break;
case NB_EV_ABORT:
@@ -745,14 +750,17 @@ static int eigrpd_instance_redistribute_create(enum nb_event event,
union nb_resource *resource)
{
struct eigrp_metrics metrics;
+ const char *vrfname;
struct eigrp *eigrp;
uint32_t proto;
+ vrf_id_t vrfid;
switch (event) {
case NB_EV_VALIDATE:
proto = yang_dnode_get_enum(dnode, "./protocol");
- if (vrf_bitmap_check(zclient->redist[AFI_IP][proto],
- VRF_DEFAULT))
+ vrfname = yang_dnode_get_string(dnode, "../vrf");
+ vrfid = vrf_name_to_id(vrfname);
+ if (vrf_bitmap_check(zclient->redist[AFI_IP][proto], vrfid))
return NB_ERR_INCONSISTENCY;
break;
case NB_EV_PREPARE:
@@ -1181,7 +1189,8 @@ static int lib_interface_eigrp_instance_create(enum nb_event event,
break;
}
- eigrp = eigrp_get(yang_dnode_get_string(dnode, "./asn"));
+ eigrp = eigrp_get(yang_dnode_get_uint16(dnode, "./asn"),
+ ifp->vrf_id);
eif = eigrp_interface_lookup(eigrp, ifp->name);
if (eif == NULL)
return NB_ERR_INCONSISTENCY;
@@ -1192,7 +1201,8 @@ static int lib_interface_eigrp_instance_create(enum nb_event event,
break;
case NB_EV_APPLY:
ifp = nb_running_get_entry(dnode, NULL, true);
- eigrp = eigrp_get(yang_dnode_get_string(dnode, "./asn"));
+ eigrp = eigrp_get(yang_dnode_get_uint16(dnode, "./asn"),
+ ifp->vrf_id);
eif = eigrp_interface_lookup(eigrp, ifp->name);
if (eif == NULL)
return NB_ERR_INCONSISTENCY;
diff --git a/eigrpd/eigrp_packet.c b/eigrpd/eigrp_packet.c
index 4efb91e4a0..ba8271d46e 100644
--- a/eigrpd/eigrp_packet.c
+++ b/eigrpd/eigrp_packet.c
@@ -77,11 +77,13 @@ const struct message eigrp_packet_type_str[] = {
static unsigned char zeropad[16] = {0};
/* Forward function reference*/
-static struct stream *eigrp_recv_packet(int, struct interface **,
- struct stream *);
-static int eigrp_verify_header(struct stream *, struct eigrp_interface *,
- struct ip *, struct eigrp_header *);
-static int eigrp_check_network_mask(struct eigrp_interface *, struct in_addr);
+static struct stream *eigrp_recv_packet(struct eigrp *eigrp, int fd,
+ struct interface **ifp,
+ struct stream *s);
+static int eigrp_verify_header(struct stream *s, struct eigrp_interface *ei,
+ struct ip *addr, struct eigrp_header *header);
+static int eigrp_check_network_mask(struct eigrp_interface *ei,
+ struct in_addr mask);
static int eigrp_retrans_count_exceeded(struct eigrp_packet *ep,
struct eigrp_neighbor *nbr)
@@ -495,7 +497,7 @@ int eigrp_read(struct thread *thread)
thread_add_read(master, eigrp_read, eigrp, eigrp->fd, &eigrp->t_read);
stream_reset(eigrp->ibuf);
- if (!(ibuf = eigrp_recv_packet(eigrp->fd, &ifp, eigrp->ibuf))) {
+ if (!(ibuf = eigrp_recv_packet(eigrp, eigrp->fd, &ifp, eigrp->ibuf))) {
/* This raw packet is known to be at least as big as its IP
* header. */
return -1;
@@ -525,7 +527,7 @@ int eigrp_read(struct thread *thread)
ifindex
retrieval but do not. */
c = if_lookup_address((void *)&iph->ip_src, AF_INET,
- VRF_DEFAULT);
+ eigrp->vrf_id);
if (c == NULL)
return 0;
@@ -706,7 +708,8 @@ int eigrp_read(struct thread *thread)
return 0;
}
-static struct stream *eigrp_recv_packet(int fd, struct interface **ifp,
+static struct stream *eigrp_recv_packet(struct eigrp *eigrp,
+ int fd, struct interface **ifp,
struct stream *ibuf)
{
int ret;
@@ -774,7 +777,7 @@ static struct stream *eigrp_recv_packet(int fd, struct interface **ifp,
ifindex = getsockopt_ifindex(AF_INET, &msgh);
- *ifp = if_lookup_by_index(ifindex, VRF_DEFAULT);
+ *ifp = if_lookup_by_index(ifindex, eigrp->vrf_id);
if (ret != ip_len) {
zlog_warn(
diff --git a/eigrpd/eigrp_routemap.c b/eigrpd/eigrp_routemap.c
index bac7494774..d78588644f 100644
--- a/eigrpd/eigrp_routemap.c
+++ b/eigrpd/eigrp_routemap.c
@@ -135,7 +135,8 @@ void eigrp_rmap_update(const char *notused)
static int eigrp_route_match_add(struct vty *vty, struct route_map_index *index,
const char *command, const char *arg)
{
- int ret;
+ enum rmap_compile_rets ret;
+
ret = route_map_add_match(index, command, arg, type);
switch (ret) {
case RMAP_RULE_MISSING:
@@ -147,6 +148,10 @@ static int eigrp_route_match_add(struct vty *vty, struct route_map_index *index,
return CMD_WARNING_CONFIG_FAILED;
break;
case RMAP_COMPILE_SUCCESS:
+ case RMAP_DUPLICATE_RULE:
+ /*
+ * Intentionally not handling these cases
+ */
break;
}
@@ -158,7 +163,8 @@ static int eigrp_route_match_delete(struct vty *vty,
struct route_map_index *index,
const char *command, const char *arg)
{
- int ret;
+ enum rmap_compile_rets ret;
+
ret = route_map_delete_match(index, command, arg);
switch (ret) {
case RMAP_RULE_MISSING:
@@ -170,6 +176,10 @@ static int eigrp_route_match_delete(struct vty *vty,
return CMD_WARNING_CONFIG_FAILED;
break;
case RMAP_COMPILE_SUCCESS:
+ case RMAP_DUPLICATE_RULE:
+ /*
+ * These cases intentionally ignored
+ */
break;
}
@@ -180,7 +190,7 @@ static int eigrp_route_match_delete(struct vty *vty,
static int eigrp_route_set_add(struct vty *vty, struct route_map_index *index,
const char *command, const char *arg)
{
- int ret;
+ enum rmap_compile_rets ret;
ret = route_map_add_set(index, command, arg);
switch (ret) {
@@ -201,6 +211,10 @@ static int eigrp_route_set_add(struct vty *vty, struct route_map_index *index,
}
break;
case RMAP_COMPILE_SUCCESS:
+ case RMAP_DUPLICATE_RULE:
+ /*
+ * These cases intentionally left blank here
+ */
break;
}
@@ -212,7 +226,7 @@ static int eigrp_route_set_delete(struct vty *vty,
struct route_map_index *index,
const char *command, const char *arg)
{
- int ret;
+ enum rmap_compile_rets ret;
ret = route_map_delete_set(index, command, arg);
switch (ret) {
@@ -225,6 +239,10 @@ static int eigrp_route_set_delete(struct vty *vty,
return CMD_WARNING_CONFIG_FAILED;
break;
case RMAP_COMPILE_SUCCESS:
+ case RMAP_DUPLICATE_RULE:
+ /*
+ * These cases intentionally not handled
+ */
break;
}
diff --git a/eigrpd/eigrp_structs.h b/eigrpd/eigrp_structs.h
index 1b9186f011..e50858f071 100644
--- a/eigrpd/eigrp_structs.h
+++ b/eigrpd/eigrp_structs.h
@@ -69,6 +69,8 @@ struct eigrp_metrics {
};
struct eigrp {
+ vrf_id_t vrf_id;
+
uint16_t AS; /* Autonomous system number */
uint16_t vrid; /* Virtual Router ID */
uint8_t k_values[6]; /*Array for K values configuration*/
@@ -85,7 +87,7 @@ struct eigrp {
struct list *eiflist; /* eigrp interfaces */
uint8_t passive_interface_default; /* passive-interface default */
- unsigned int fd;
+ int fd;
unsigned int maxsndbuflen;
uint32_t sequence_number; /*Global EIGRP sequence number*/
diff --git a/eigrpd/eigrp_topology.c b/eigrpd/eigrp_topology.c
index e861cdb333..9cc612eaf1 100644
--- a/eigrpd/eigrp_topology.c
+++ b/eigrpd/eigrp_topology.c
@@ -117,9 +117,9 @@ struct eigrp_nexthop_entry *eigrp_nexthop_entry_new(void)
/*
* Freeing topology table list
*/
-void eigrp_topology_free(struct route_table *table)
+void eigrp_topology_free(struct eigrp *eigrp, struct route_table *table)
{
- eigrp_topology_delete_all(table);
+ eigrp_topology_delete_all(eigrp, table);
route_table_finish(table);
}
@@ -150,7 +150,8 @@ void eigrp_prefix_entry_add(struct route_table *topology,
/*
* Adding topology entry to topology node
*/
-void eigrp_nexthop_entry_add(struct eigrp_prefix_entry *node,
+void eigrp_nexthop_entry_add(struct eigrp *eigrp,
+ struct eigrp_prefix_entry *node,
struct eigrp_nexthop_entry *entry)
{
struct list *l = list_new();
@@ -161,7 +162,8 @@ void eigrp_nexthop_entry_add(struct eigrp_prefix_entry *node,
listnode_add_sort(node->entries, entry);
entry->prefix = node;
- eigrp_zebra_route_add(node->destination, l, node->fdistance);
+ eigrp_zebra_route_add(eigrp, node->destination,
+ l, node->fdistance);
}
list_delete(&l);
@@ -170,10 +172,9 @@ void eigrp_nexthop_entry_add(struct eigrp_prefix_entry *node,
/*
* Deleting topology node from topology table
*/
-void eigrp_prefix_entry_delete(struct route_table *table,
+void eigrp_prefix_entry_delete(struct eigrp *eigrp, struct route_table *table,
struct eigrp_prefix_entry *pe)
{
- struct eigrp *eigrp = eigrp_lookup();
struct eigrp_nexthop_entry *ne;
struct listnode *node, *nnode;
struct route_node *rn;
@@ -192,10 +193,10 @@ void eigrp_prefix_entry_delete(struct route_table *table,
listnode_delete(eigrp->topology_changes_internalIPV4, pe);
for (ALL_LIST_ELEMENTS(pe->entries, node, nnode, ne))
- eigrp_nexthop_entry_delete(pe, ne);
+ eigrp_nexthop_entry_delete(eigrp, pe, ne);
list_delete(&pe->entries);
list_delete(&pe->rij);
- eigrp_zebra_route_delete(pe->destination);
+ eigrp_zebra_route_delete(eigrp, pe->destination);
prefix_free(pe->destination);
rn->info = NULL;
@@ -207,12 +208,13 @@ void eigrp_prefix_entry_delete(struct route_table *table,
/*
* Deleting topology entry from topology node
*/
-void eigrp_nexthop_entry_delete(struct eigrp_prefix_entry *node,
+void eigrp_nexthop_entry_delete(struct eigrp *eigrp,
+ struct eigrp_prefix_entry *node,
struct eigrp_nexthop_entry *entry)
{
if (listnode_lookup(node->entries, entry) != NULL) {
listnode_delete(node->entries, entry);
- eigrp_zebra_route_delete(node->destination);
+ eigrp_zebra_route_delete(eigrp, node->destination);
XFREE(MTYPE_EIGRP_NEXTHOP_ENTRY, entry);
}
}
@@ -220,7 +222,8 @@ void eigrp_nexthop_entry_delete(struct eigrp_prefix_entry *node,
/*
* Deleting all nodes from topology table
*/
-void eigrp_topology_delete_all(struct route_table *topology)
+void eigrp_topology_delete_all(struct eigrp *eigrp,
+ struct route_table *topology)
{
struct route_node *rn;
struct eigrp_prefix_entry *pe;
@@ -231,7 +234,7 @@ void eigrp_topology_delete_all(struct route_table *topology)
if (!pe)
continue;
- eigrp_prefix_entry_delete(topology, pe);
+ eigrp_prefix_entry_delete(eigrp, topology, pe);
}
}
@@ -426,17 +429,15 @@ void eigrp_topology_update_all_node_flags(struct eigrp *eigrp)
if (!pe)
continue;
- eigrp_topology_update_node_flags(pe);
+ eigrp_topology_update_node_flags(eigrp, pe);
}
}
-void eigrp_topology_update_node_flags(struct eigrp_prefix_entry *dest)
+void eigrp_topology_update_node_flags(struct eigrp *eigrp,
+ struct eigrp_prefix_entry *dest)
{
struct listnode *node;
struct eigrp_nexthop_entry *entry;
- struct eigrp *eigrp = eigrp_lookup();
-
- assert(eigrp);
for (ALL_LIST_ELEMENTS_RO(dest->entries, node, entry)) {
if (entry->reported_distance < dest->fdistance) {
@@ -464,27 +465,24 @@ void eigrp_topology_update_node_flags(struct eigrp_prefix_entry *dest)
}
}
-void eigrp_update_routing_table(struct eigrp_prefix_entry *prefix)
+void eigrp_update_routing_table(struct eigrp *eigrp,
+ struct eigrp_prefix_entry *prefix)
{
- struct eigrp *eigrp = eigrp_lookup();
struct list *successors;
struct listnode *node;
struct eigrp_nexthop_entry *entry;
- if (!eigrp)
- return;
-
successors = eigrp_topology_get_successor_max(prefix, eigrp->max_paths);
if (successors) {
- eigrp_zebra_route_add(prefix->destination, successors,
+ eigrp_zebra_route_add(eigrp, prefix->destination, successors,
prefix->fdistance);
for (ALL_LIST_ELEMENTS_RO(successors, node, entry))
entry->flags |= EIGRP_NEXTHOP_ENTRY_INTABLE_FLAG;
list_delete(&successors);
} else {
- eigrp_zebra_route_delete(prefix->destination);
+ eigrp_zebra_route_delete(eigrp, prefix->destination);
for (ALL_LIST_ELEMENTS_RO(prefix->entries, node, entry))
entry->flags &= ~EIGRP_NEXTHOP_ENTRY_INTABLE_FLAG;
}
@@ -525,7 +523,8 @@ void eigrp_topology_neighbor_down(struct eigrp *eigrp,
eigrp_update_send_all(eigrp, nbr->ei);
}
-void eigrp_update_topology_table_prefix(struct route_table *table,
+void eigrp_update_topology_table_prefix(struct eigrp *eigrp,
+ struct route_table *table,
struct eigrp_prefix_entry *prefix)
{
struct listnode *node1, *node2;
@@ -533,11 +532,11 @@ void eigrp_update_topology_table_prefix(struct route_table *table,
struct eigrp_nexthop_entry *entry;
for (ALL_LIST_ELEMENTS(prefix->entries, node1, node2, entry)) {
if (entry->distance == EIGRP_MAX_METRIC) {
- eigrp_nexthop_entry_delete(prefix, entry);
+ eigrp_nexthop_entry_delete(eigrp, prefix, entry);
}
}
if (prefix->distance == EIGRP_MAX_METRIC
&& prefix->nt != EIGRP_TOPOLOGY_TYPE_CONNECTED) {
- eigrp_prefix_entry_delete(table, prefix);
+ eigrp_prefix_entry_delete(eigrp, table, prefix);
}
}
diff --git a/eigrpd/eigrp_topology.h b/eigrpd/eigrp_topology.h
index 16bf2261cc..718cece403 100644
--- a/eigrpd/eigrp_topology.h
+++ b/eigrpd/eigrp_topology.h
@@ -37,34 +37,41 @@ extern struct route_table *eigrp_topology_new(void);
extern void eigrp_topology_init(struct route_table *table);
extern struct eigrp_prefix_entry *eigrp_prefix_entry_new(void);
extern struct eigrp_nexthop_entry *eigrp_nexthop_entry_new(void);
-extern void eigrp_topology_free(struct route_table *table);
+extern void eigrp_topology_free(struct eigrp *eigrp, struct route_table *table);
extern void eigrp_prefix_entry_add(struct route_table *table,
struct eigrp_prefix_entry *pe);
-extern void eigrp_nexthop_entry_add(struct eigrp_prefix_entry *,
- struct eigrp_nexthop_entry *);
-extern void eigrp_prefix_entry_delete(struct route_table *table,
+extern void eigrp_nexthop_entry_add(struct eigrp *eigrp,
+ struct eigrp_prefix_entry *pe,
+ struct eigrp_nexthop_entry *ne);
+extern void eigrp_prefix_entry_delete(struct eigrp *eigrp,
+ struct route_table *table,
struct eigrp_prefix_entry *pe);
-extern void eigrp_nexthop_entry_delete(struct eigrp_prefix_entry *,
- struct eigrp_nexthop_entry *);
-extern void eigrp_topology_delete_all(struct route_table *table);
+extern void eigrp_nexthop_entry_delete(struct eigrp *eigrp,
+ struct eigrp_prefix_entry *pe,
+ struct eigrp_nexthop_entry *ne);
+extern void eigrp_topology_delete_all(struct eigrp *eigrp,
+ struct route_table *table);
extern struct eigrp_prefix_entry *
eigrp_topology_table_lookup_ipv4(struct route_table *table, struct prefix *p);
-extern struct list *eigrp_topology_get_successor(struct eigrp_prefix_entry *);
+extern struct list *eigrp_topology_get_successor(struct eigrp_prefix_entry *pe);
extern struct list *
eigrp_topology_get_successor_max(struct eigrp_prefix_entry *pe,
unsigned int maxpaths);
extern struct eigrp_nexthop_entry *
-eigrp_prefix_entry_lookup(struct list *, struct eigrp_neighbor *);
-extern struct list *eigrp_neighbor_prefixes_lookup(struct eigrp *,
- struct eigrp_neighbor *);
-extern void eigrp_topology_update_all_node_flags(struct eigrp *);
-extern void eigrp_topology_update_node_flags(struct eigrp_prefix_entry *);
+eigrp_prefix_entry_lookup(struct list *entries, struct eigrp_neighbor *neigh);
+extern struct list *eigrp_neighbor_prefixes_lookup(struct eigrp *eigrp,
+ struct eigrp_neighbor *n);
+extern void eigrp_topology_update_all_node_flags(struct eigrp *eigrp);
+extern void eigrp_topology_update_node_flags(struct eigrp *eigrp,
+ struct eigrp_prefix_entry *pe);
extern enum metric_change
-eigrp_topology_update_distance(struct eigrp_fsm_action_message *);
-extern void eigrp_update_routing_table(struct eigrp_prefix_entry *);
-extern void eigrp_topology_neighbor_down(struct eigrp *,
- struct eigrp_neighbor *);
-extern void eigrp_update_topology_table_prefix(struct route_table *table,
+eigrp_topology_update_distance(struct eigrp_fsm_action_message *msg);
+extern void eigrp_update_routing_table(struct eigrp *eigrp,
+ struct eigrp_prefix_entry *pe);
+extern void eigrp_topology_neighbor_down(struct eigrp *eigrp,
+ struct eigrp_neighbor *neigh);
+extern void eigrp_update_topology_table_prefix(struct eigrp *eigrp,
+ struct route_table *table,
struct eigrp_prefix_entry *pe);
#endif
diff --git a/eigrpd/eigrp_update.c b/eigrpd/eigrp_update.c
index 8db4903077..6e2a81e32a 100644
--- a/eigrpd/eigrp_update.c
+++ b/eigrpd/eigrp_update.c
@@ -211,7 +211,7 @@ void eigrp_update_receive(struct eigrp *eigrp, struct ip *iph,
zlog_debug(
"Processing Update size[%u] int(%s) nbr(%s) seq [%u] flags [%0x]",
size,
- ifindex2ifname(nbr->ei->ifp->ifindex, VRF_DEFAULT),
+ ifindex2ifname(nbr->ei->ifp->ifindex, eigrp->vrf_id),
inet_ntoa(nbr->src), nbr->recv_sequence_number, flags);
@@ -221,7 +221,7 @@ void eigrp_update_receive(struct eigrp *eigrp, struct ip *iph,
zlog_info("Neighbor %s (%s) is resync: peer graceful-restart",
inet_ntoa(nbr->src),
- ifindex2ifname(nbr->ei->ifp->ifindex, VRF_DEFAULT));
+ ifindex2ifname(nbr->ei->ifp->ifindex, eigrp->vrf_id));
/* get all prefixes from neighbor from topology table */
nbr_prefixes = eigrp_neighbor_prefixes_lookup(eigrp, nbr);
@@ -233,7 +233,7 @@ void eigrp_update_receive(struct eigrp *eigrp, struct ip *iph,
zlog_info("Neighbor %s (%s) is resync: peer graceful-restart",
inet_ntoa(nbr->src),
- ifindex2ifname(nbr->ei->ifp->ifindex, VRF_DEFAULT));
+ ifindex2ifname(nbr->ei->ifp->ifindex, eigrp->vrf_id));
/* get all prefixes from neighbor from topology table */
nbr_prefixes = eigrp_neighbor_prefixes_lookup(eigrp, nbr);
@@ -282,12 +282,12 @@ void eigrp_update_receive(struct eigrp *eigrp, struct ip *iph,
zlog_info("Neighbor %s (%s) is down: peer restarted",
inet_ntoa(nbr->src),
ifindex2ifname(nbr->ei->ifp->ifindex,
- VRF_DEFAULT));
+ eigrp->vrf_id));
eigrp_nbr_state_set(nbr, EIGRP_NEIGHBOR_PENDING);
zlog_info("Neighbor %s (%s) is pending: new adjacency",
inet_ntoa(nbr->src),
ifindex2ifname(nbr->ei->ifp->ifindex,
- VRF_DEFAULT));
+ eigrp->vrf_id));
eigrp_update_send_init(nbr);
}
}
@@ -366,11 +366,11 @@ void eigrp_update_receive(struct eigrp *eigrp, struct ip *iph,
eigrp_prefix_entry_add(eigrp->topology_table,
pe);
- eigrp_nexthop_entry_add(pe, ne);
+ eigrp_nexthop_entry_add(eigrp, pe, ne);
pe->distance = pe->fdistance = pe->rdistance =
ne->distance;
pe->reported_metric = ne->total_metric;
- eigrp_topology_update_node_flags(pe);
+ eigrp_topology_update_node_flags(eigrp, pe);
pe->req_action |= EIGRP_FSM_NEED_UPDATE;
listnode_add(
@@ -965,19 +965,20 @@ void eigrp_update_send_GR(struct eigrp_neighbor *nbr, enum GR_type gr_type,
zlog_info(
"Neighbor %s (%s) is resync: route configuration changed",
inet_ntoa(nbr->src),
- ifindex2ifname(ei->ifp->ifindex, VRF_DEFAULT));
+ ifindex2ifname(ei->ifp->ifindex, eigrp->vrf_id));
} else if (gr_type == EIGRP_GR_MANUAL) {
/* Graceful restart was called manually */
zlog_info("Neighbor %s (%s) is resync: manually cleared",
inet_ntoa(nbr->src),
- ifindex2ifname(ei->ifp->ifindex, VRF_DEFAULT));
+ ifindex2ifname(ei->ifp->ifindex, eigrp->vrf_id));
if (vty != NULL) {
vty_time_print(vty, 0);
vty_out(vty,
"Neighbor %s (%s) is resync: manually cleared\n",
inet_ntoa(nbr->src),
- ifindex2ifname(ei->ifp->ifindex, VRF_DEFAULT));
+ ifindex2ifname(ei->ifp->ifindex,
+ eigrp->vrf_id));
}
}
diff --git a/eigrpd/eigrp_vrf.c b/eigrpd/eigrp_vrf.c
new file mode 100644
index 0000000000..c8c8491056
--- /dev/null
+++ b/eigrpd/eigrp_vrf.c
@@ -0,0 +1,50 @@
+/*
+ * eigrp - vrf code
+ * Copyright (C) 2019 Cumulus Networks, Inc.
+ * Donald Sharp
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; see the file COPYING; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include <zebra.h>
+
+#include "vrf.h"
+
+#include "eigrpd/eigrp_vrf.h"
+
+static int eigrp_vrf_new(struct vrf *vrf)
+{
+ return 0;
+}
+
+static int eigrp_vrf_enable(struct vrf *vrf)
+{
+ return 0;
+}
+
+static int eigrp_vrf_disable(struct vrf *vrf)
+{
+ return 0;
+}
+
+static int eigrp_vrf_delete(struct vrf *vrf)
+{
+ return 0;
+}
+
+void eigrp_vrf_init(void)
+{
+ vrf_init(eigrp_vrf_new, eigrp_vrf_enable,
+ eigrp_vrf_disable, eigrp_vrf_delete, NULL);
+}
diff --git a/eigrpd/eigrp_vrf.h b/eigrpd/eigrp_vrf.h
new file mode 100644
index 0000000000..423a4be551
--- /dev/null
+++ b/eigrpd/eigrp_vrf.h
@@ -0,0 +1,23 @@
+/*
+ * eigrp - vrf code
+ * Copyright (C) 2019 Cumulus Networks, Inc.
+ * Donald Sharp
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; see the file COPYING; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#ifndef __EIGRP_VRF_H__
+
+extern void eigrp_vrf_init(void);
+#endif
diff --git a/eigrpd/eigrp_vty.c b/eigrpd/eigrp_vty.c
index 207622e659..e4a499425b 100644
--- a/eigrpd/eigrp_vty.c
+++ b/eigrpd/eigrp_vty.c
@@ -83,12 +83,31 @@ static void eigrp_vty_display_prefix_entry(struct vty *vty,
}
}
+static struct eigrp *eigrp_vty_get_eigrp(struct vty *vty, const char *vrf_name)
+{
+ struct vrf *vrf;
+
+ if (vrf_name)
+ vrf = vrf_lookup_by_name(vrf_name);
+ else
+ vrf = vrf_lookup_by_id(VRF_DEFAULT);
+
+ if (!vrf) {
+ vty_out(vty, "VRF %s specified does not exist",
+ vrf_name ? vrf_name : VRF_DEFAULT_NAME);
+ return NULL;
+ }
+
+ return eigrp_lookup(vrf->vrf_id);
+}
+
DEFPY (show_ip_eigrp_topology_all,
show_ip_eigrp_topology_all_cmd,
- "show ip eigrp topology [all-links$all]",
+ "show ip eigrp [vrf NAME] topology [all-links$all]",
SHOW_STR
IP_STR
"IP-EIGRP show commands\n"
+ VRF_CMD_HELP_STR
"IP-EIGRP topology\n"
"Show all links in topology table\n")
{
@@ -96,7 +115,7 @@ DEFPY (show_ip_eigrp_topology_all,
struct eigrp_prefix_entry *tn;
struct route_node *rn;
- eigrp = eigrp_lookup();
+ eigrp = eigrp_vty_get_eigrp(vty, vrf);
if (eigrp == NULL) {
vty_out(vty, " EIGRP Routing Process not enabled\n");
return CMD_SUCCESS;
@@ -119,10 +138,11 @@ DEFPY (show_ip_eigrp_topology_all,
DEFPY (show_ip_eigrp_topology,
show_ip_eigrp_topology_cmd,
- "show ip eigrp topology <A.B.C.D$address|A.B.C.D/M$prefix>",
+ "show ip eigrp [vrf NAME] topology <A.B.C.D$address|A.B.C.D/M$prefix>",
SHOW_STR
IP_STR
"IP-EIGRP show commands\n"
+ VRF_CMD_HELP_STR
"IP-EIGRP topology\n"
"For a specific address\n"
"For a specific prefix\n")
@@ -132,7 +152,7 @@ DEFPY (show_ip_eigrp_topology,
struct route_node *rn;
struct prefix cmp;
- eigrp = eigrp_lookup();
+ eigrp = eigrp_vty_get_eigrp(vty, vrf);
if (eigrp == NULL) {
vty_out(vty, " EIGRP Routing Process not enabled\n");
return CMD_SUCCESS;
@@ -167,12 +187,13 @@ DEFPY (show_ip_eigrp_topology,
return CMD_SUCCESS;
}
-DEFUN (show_ip_eigrp_interfaces,
+DEFPY (show_ip_eigrp_interfaces,
show_ip_eigrp_interfaces_cmd,
- "show ip eigrp interfaces [IFNAME] [detail]",
+ "show ip eigrp [vrf NAME] interfaces [IFNAME] [detail]$detail",
SHOW_STR
IP_STR
"IP-EIGRP show commands\n"
+ VRF_CMD_HELP_STR
"IP-EIGRP interfaces\n"
"Interface name to look at\n"
"Detailed information\n")
@@ -180,22 +201,13 @@ DEFUN (show_ip_eigrp_interfaces,
struct eigrp_interface *ei;
struct eigrp *eigrp;
struct listnode *node;
- int idx = 0;
- bool detail = false;
- const char *ifname = NULL;
- eigrp = eigrp_lookup();
+ eigrp = eigrp_vty_get_eigrp(vty, vrf);
if (eigrp == NULL) {
vty_out(vty, "EIGRP Routing Process not enabled\n");
return CMD_SUCCESS;
}
- if (argv_find(argv, argc, "IFNAME", &idx))
- ifname = argv[idx]->arg;
-
- if (argv_find(argv, argc, "detail", &idx))
- detail = true;
-
if (!ifname)
show_ip_eigrp_interface_header(vty, eigrp);
@@ -210,12 +222,13 @@ DEFUN (show_ip_eigrp_interfaces,
return CMD_SUCCESS;
}
-DEFUN (show_ip_eigrp_neighbors,
+DEFPY (show_ip_eigrp_neighbors,
show_ip_eigrp_neighbors_cmd,
- "show ip eigrp neighbors [IFNAME] [detail]",
+ "show ip eigrp [vrf NAME] neighbors [IFNAME] [detail]$detail",
SHOW_STR
IP_STR
"IP-EIGRP show commands\n"
+ VRF_CMD_HELP_STR
"IP-EIGRP neighbors\n"
"Interface to show on\n"
"Detailed Information\n")
@@ -224,21 +237,13 @@ DEFUN (show_ip_eigrp_neighbors,
struct eigrp_interface *ei;
struct listnode *node, *node2, *nnode2;
struct eigrp_neighbor *nbr;
- bool detail = false;
- int idx = 0;
- const char *ifname = NULL;
- eigrp = eigrp_lookup();
+ eigrp = eigrp_vty_get_eigrp(vty, vrf);
if (eigrp == NULL) {
vty_out(vty, " EIGRP Routing Process not enabled\n");
return CMD_SUCCESS;
}
- if (argv_find(argv, argc, "IFNAME", &idx))
- ifname = argv[idx]->arg;
-
- detail = (argv_find(argv, argc, "detail", &idx));
-
show_ip_eigrp_neighbor_header(vty, eigrp);
for (ALL_LIST_ELEMENTS_RO(eigrp->eiflist, node, ei)) {
@@ -246,7 +251,7 @@ DEFUN (show_ip_eigrp_neighbors,
for (ALL_LIST_ELEMENTS(ei->nbrs, node2, nnode2, nbr)) {
if (detail || (nbr->state == EIGRP_NEIGHBOR_UP))
show_ip_eigrp_neighbor_sub(vty, nbr,
- detail);
+ !!detail);
}
}
}
@@ -257,12 +262,13 @@ DEFUN (show_ip_eigrp_neighbors,
/*
* Execute hard restart for all neighbors
*/
-DEFUN (clear_ip_eigrp_neighbors,
+DEFPY (clear_ip_eigrp_neighbors,
clear_ip_eigrp_neighbors_cmd,
- "clear ip eigrp neighbors",
+ "clear ip eigrp [vrf NAME] neighbors",
CLEAR_STR
IP_STR
"Clear IP-EIGRP\n"
+ VRF_CMD_HELP_STR
"Clear IP-EIGRP neighbors\n")
{
struct eigrp *eigrp;
@@ -271,7 +277,7 @@ DEFUN (clear_ip_eigrp_neighbors,
struct eigrp_neighbor *nbr;
/* Check if eigrp process is enabled */
- eigrp = eigrp_lookup();
+ eigrp = eigrp_vty_get_eigrp(vty, vrf);
if (eigrp == NULL) {
vty_out(vty, " EIGRP Routing Process not enabled\n");
return CMD_SUCCESS;
@@ -289,13 +295,13 @@ DEFUN (clear_ip_eigrp_neighbors,
"Neighbor %s (%s) is down: manually cleared",
inet_ntoa(nbr->src),
ifindex2ifname(nbr->ei->ifp->ifindex,
- VRF_DEFAULT));
+ eigrp->vrf_id));
vty_time_print(vty, 0);
vty_out(vty,
"Neighbor %s (%s) is down: manually cleared\n",
inet_ntoa(nbr->src),
ifindex2ifname(nbr->ei->ifp->ifindex,
- VRF_DEFAULT));
+ eigrp->vrf_id));
/* set neighbor to DOWN */
nbr->state = EIGRP_NEIGHBOR_DOWN;
@@ -311,12 +317,13 @@ DEFUN (clear_ip_eigrp_neighbors,
/*
* Execute hard restart for all neighbors on interface
*/
-DEFUN (clear_ip_eigrp_neighbors_int,
+DEFPY (clear_ip_eigrp_neighbors_int,
clear_ip_eigrp_neighbors_int_cmd,
- "clear ip eigrp neighbors IFNAME",
+ "clear ip eigrp [vrf NAME] neighbors IFNAME",
CLEAR_STR
IP_STR
"Clear IP-EIGRP\n"
+ VRF_CMD_HELP_STR
"Clear IP-EIGRP neighbors\n"
"Interface's name\n")
{
@@ -324,20 +331,18 @@ DEFUN (clear_ip_eigrp_neighbors_int,
struct eigrp_interface *ei;
struct listnode *node2, *nnode2;
struct eigrp_neighbor *nbr;
- int idx = 0;
/* Check if eigrp process is enabled */
- eigrp = eigrp_lookup();
+ eigrp = eigrp_vty_get_eigrp(vty, vrf);
if (eigrp == NULL) {
vty_out(vty, " EIGRP Routing Process not enabled\n");
return CMD_SUCCESS;
}
/* lookup interface by specified name */
- argv_find(argv, argc, "IFNAME", &idx);
- ei = eigrp_if_lookup_by_name(eigrp, argv[idx]->arg);
+ ei = eigrp_if_lookup_by_name(eigrp, ifname);
if (ei == NULL) {
- vty_out(vty, " Interface (%s) doesn't exist\n", argv[idx]->arg);
+ vty_out(vty, " Interface (%s) doesn't exist\n", ifname);
return CMD_WARNING;
}
@@ -350,13 +355,13 @@ DEFUN (clear_ip_eigrp_neighbors_int,
zlog_debug("Neighbor %s (%s) is down: manually cleared",
inet_ntoa(nbr->src),
ifindex2ifname(nbr->ei->ifp->ifindex,
- VRF_DEFAULT));
+ eigrp->vrf_id));
vty_time_print(vty, 0);
vty_out(vty,
"Neighbor %s (%s) is down: manually cleared\n",
inet_ntoa(nbr->src),
ifindex2ifname(nbr->ei->ifp->ifindex,
- VRF_DEFAULT));
+ eigrp->vrf_id));
/* set neighbor to DOWN */
nbr->state = EIGRP_NEIGHBOR_DOWN;
@@ -371,26 +376,21 @@ DEFUN (clear_ip_eigrp_neighbors_int,
/*
* Execute hard restart for neighbor specified by IP
*/
-DEFUN (clear_ip_eigrp_neighbors_IP,
+DEFPY (clear_ip_eigrp_neighbors_IP,
clear_ip_eigrp_neighbors_IP_cmd,
- "clear ip eigrp neighbors A.B.C.D",
+ "clear ip eigrp [vrf NAME] neighbors A.B.C.D$nbr_addr",
CLEAR_STR
IP_STR
"Clear IP-EIGRP\n"
+ VRF_CMD_HELP_STR
"Clear IP-EIGRP neighbors\n"
"IP-EIGRP neighbor address\n")
{
struct eigrp *eigrp;
struct eigrp_neighbor *nbr;
- struct in_addr nbr_addr;
-
- if (!inet_aton(argv[4]->arg, &nbr_addr)) {
- vty_out(vty, "Unable to parse %s", argv[4]->arg);
- return CMD_WARNING;
- }
/* Check if eigrp process is enabled */
- eigrp = eigrp_lookup();
+ eigrp = eigrp_vty_get_eigrp(vty, vrf);
if (eigrp == NULL) {
vty_out(vty, " EIGRP Routing Process not enabled\n");
return CMD_SUCCESS;
@@ -414,19 +414,20 @@ DEFUN (clear_ip_eigrp_neighbors_IP,
/*
* Execute graceful restart for all neighbors
*/
-DEFUN (clear_ip_eigrp_neighbors_soft,
+DEFPY (clear_ip_eigrp_neighbors_soft,
clear_ip_eigrp_neighbors_soft_cmd,
- "clear ip eigrp neighbors soft",
+ "clear ip eigrp [vrf NAME] neighbors soft",
CLEAR_STR
IP_STR
"Clear IP-EIGRP\n"
+ VRF_CMD_HELP_STR
"Clear IP-EIGRP neighbors\n"
"Resync with peers without adjacency reset\n")
{
struct eigrp *eigrp;
/* Check if eigrp process is enabled */
- eigrp = eigrp_lookup();
+ eigrp = eigrp_vty_get_eigrp(vty, vrf);
if (eigrp == NULL) {
vty_out(vty, " EIGRP Routing Process not enabled\n");
return CMD_SUCCESS;
@@ -441,12 +442,13 @@ DEFUN (clear_ip_eigrp_neighbors_soft,
/*
* Execute graceful restart for all neighbors on interface
*/
-DEFUN (clear_ip_eigrp_neighbors_int_soft,
+DEFPY (clear_ip_eigrp_neighbors_int_soft,
clear_ip_eigrp_neighbors_int_soft_cmd,
- "clear ip eigrp neighbors IFNAME soft",
+ "clear ip eigrp [vrf NAME] neighbors IFNAME soft",
CLEAR_STR
IP_STR
"Clear IP-EIGRP\n"
+ VRF_CMD_HELP_STR
"Clear IP-EIGRP neighbors\n"
"Interface's name\n"
"Resync with peer without adjacency reset\n")
@@ -455,14 +457,14 @@ DEFUN (clear_ip_eigrp_neighbors_int_soft,
struct eigrp_interface *ei;
/* Check if eigrp process is enabled */
- eigrp = eigrp_lookup();
+ eigrp = eigrp_vty_get_eigrp(vty, vrf);
if (eigrp == NULL) {
vty_out(vty, " EIGRP Routing Process not enabled\n");
return CMD_SUCCESS;
}
/* lookup interface by specified name */
- ei = eigrp_if_lookup_by_name(eigrp, argv[4]->arg);
+ ei = eigrp_if_lookup_by_name(eigrp, ifname);
if (ei == NULL) {
vty_out(vty, " Interface (%s) doesn't exist\n", argv[4]->arg);
return CMD_WARNING;
@@ -476,27 +478,23 @@ DEFUN (clear_ip_eigrp_neighbors_int_soft,
/*
* Execute graceful restart for neighbor specified by IP
*/
-DEFUN (clear_ip_eigrp_neighbors_IP_soft,
+DEFPY (clear_ip_eigrp_neighbors_IP_soft,
clear_ip_eigrp_neighbors_IP_soft_cmd,
- "clear ip eigrp neighbors A.B.C.D soft",
+ "clear ip eigrp [vrf NAME] neighbors A.B.C.D$nbr_addr soft",
CLEAR_STR
IP_STR
"Clear IP-EIGRP\n"
+ VRF_CMD_HELP_STR
"Clear IP-EIGRP neighbors\n"
"IP-EIGRP neighbor address\n"
"Resync with peer without adjacency reset\n")
{
struct eigrp *eigrp;
struct eigrp_neighbor *nbr;
- struct in_addr nbr_addr;
- if (!inet_aton(argv[4]->arg, &nbr_addr)) {
- vty_out(vty, "Unable to parse: %s", argv[4]->arg);
- return CMD_WARNING;
- }
/* Check if eigrp process is enabled */
- eigrp = eigrp_lookup();
+ eigrp = eigrp_vty_get_eigrp(vty, vrf);
if (eigrp == NULL) {
vty_out(vty, " EIGRP Routing Process not enabled\n");
return CMD_SUCCESS;
diff --git a/eigrpd/eigrp_zebra.c b/eigrpd/eigrp_zebra.c
index 0a74e86263..63cbe84ef2 100644
--- a/eigrpd/eigrp_zebra.c
+++ b/eigrpd/eigrp_zebra.c
@@ -59,7 +59,8 @@ static int eigrp_interface_address_add(ZAPI_CALLBACK_ARGS);
static int eigrp_interface_address_delete(ZAPI_CALLBACK_ARGS);
static int eigrp_interface_state_up(ZAPI_CALLBACK_ARGS);
static int eigrp_interface_state_down(ZAPI_CALLBACK_ARGS);
-static struct interface *zebra_interface_if_lookup(struct stream *);
+static struct interface *zebra_interface_if_lookup(struct stream *,
+ vrf_id_t vrf_id);
static int eigrp_zebra_read_route(ZAPI_CALLBACK_ARGS);
@@ -79,7 +80,7 @@ static int eigrp_router_id_update_zebra(ZAPI_CALLBACK_ARGS)
router_id_zebra = router_id.u.prefix4;
- eigrp = eigrp_lookup();
+ eigrp = eigrp_lookup(vrf_id);
if (eigrp != NULL)
eigrp_router_id_update(eigrp);
@@ -137,7 +138,7 @@ static int eigrp_zebra_read_route(ZAPI_CALLBACK_ARGS)
if (IPV4_NET127(ntohl(api.prefix.u.prefix4.s_addr)))
return 0;
- eigrp = eigrp_lookup();
+ eigrp = eigrp_lookup(vrf_id);
if (eigrp == NULL)
return 0;
@@ -257,7 +258,7 @@ static int eigrp_interface_state_up(ZAPI_CALLBACK_ARGS)
{
struct interface *ifp;
- ifp = zebra_interface_if_lookup(zclient->ibuf);
+ ifp = zebra_interface_if_lookup(zclient->ibuf, vrf_id);
if (ifp == NULL)
return 0;
@@ -328,7 +329,8 @@ static int eigrp_interface_state_down(ZAPI_CALLBACK_ARGS)
return 0;
}
-static struct interface *zebra_interface_if_lookup(struct stream *s)
+static struct interface *zebra_interface_if_lookup(struct stream *s,
+ vrf_id_t vrf_id)
{
char ifname_tmp[INTERFACE_NAMSIZ];
@@ -336,11 +338,11 @@ static struct interface *zebra_interface_if_lookup(struct stream *s)
stream_get(ifname_tmp, s, INTERFACE_NAMSIZ);
/* And look it up. */
- return if_lookup_by_name(ifname_tmp, VRF_DEFAULT);
+ return if_lookup_by_name(ifname_tmp, vrf_id);
}
-void eigrp_zebra_route_add(struct prefix *p, struct list *successors,
- uint32_t distance)
+void eigrp_zebra_route_add(struct eigrp *eigrp, struct prefix *p,
+ struct list *successors, uint32_t distance)
{
struct zapi_route api;
struct zapi_nexthop *api_nh;
@@ -352,7 +354,7 @@ void eigrp_zebra_route_add(struct prefix *p, struct list *successors,
return;
memset(&api, 0, sizeof(api));
- api.vrf_id = VRF_DEFAULT;
+ api.vrf_id = eigrp->vrf_id;
api.type = ZEBRA_ROUTE_EIGRP;
api.safi = SAFI_UNICAST;
api.metric = distance;
@@ -366,7 +368,7 @@ void eigrp_zebra_route_add(struct prefix *p, struct list *successors,
if (count >= MULTIPATH_NUM)
break;
api_nh = &api.nexthops[count];
- api_nh->vrf_id = VRF_DEFAULT;
+ api_nh->vrf_id = eigrp->vrf_id;
if (te->adv_router->src.s_addr) {
api_nh->gate.ipv4 = te->adv_router->src;
api_nh->type = NEXTHOP_TYPE_IPV4_IFINDEX;
@@ -388,7 +390,7 @@ void eigrp_zebra_route_add(struct prefix *p, struct list *successors,
zclient_route_send(ZEBRA_ROUTE_ADD, zclient, &api);
}
-void eigrp_zebra_route_delete(struct prefix *p)
+void eigrp_zebra_route_delete(struct eigrp *eigrp, struct prefix *p)
{
struct zapi_route api;
@@ -396,7 +398,7 @@ void eigrp_zebra_route_delete(struct prefix *p)
return;
memset(&api, 0, sizeof(api));
- api.vrf_id = VRF_DEFAULT;
+ api.vrf_id = eigrp->vrf_id;
api.type = ZEBRA_ROUTE_EIGRP;
api.safi = SAFI_UNICAST;
memcpy(&api.prefix, p, sizeof(*p));
@@ -411,20 +413,20 @@ void eigrp_zebra_route_delete(struct prefix *p)
return;
}
-int eigrp_is_type_redistributed(int type)
+static int eigrp_is_type_redistributed(int type, vrf_id_t vrf_id)
{
return ((DEFAULT_ROUTE_TYPE(type))
? vrf_bitmap_check(zclient->default_information[AFI_IP],
- VRF_DEFAULT)
+ vrf_id)
: vrf_bitmap_check(zclient->redist[AFI_IP][type],
- VRF_DEFAULT));
+ vrf_id));
}
int eigrp_redistribute_set(struct eigrp *eigrp, int type,
struct eigrp_metrics metric)
{
- if (eigrp_is_type_redistributed(type)) {
+ if (eigrp_is_type_redistributed(type, eigrp->vrf_id)) {
if (eigrp_metrics_is_same(metric, eigrp->dmetric[type])) {
eigrp->dmetric[type] = metric;
}
@@ -443,7 +445,7 @@ int eigrp_redistribute_set(struct eigrp *eigrp, int type,
eigrp->dmetric[type] = metric;
zclient_redistribute(ZEBRA_REDISTRIBUTE_ADD, zclient, AFI_IP, type, 0,
- VRF_DEFAULT);
+ eigrp->vrf_id);
++eigrp->redistribute;
@@ -453,10 +455,10 @@ int eigrp_redistribute_set(struct eigrp *eigrp, int type,
int eigrp_redistribute_unset(struct eigrp *eigrp, int type)
{
- if (eigrp_is_type_redistributed(type)) {
+ if (eigrp_is_type_redistributed(type, eigrp->vrf_id)) {
memset(&eigrp->dmetric[type], 0, sizeof(struct eigrp_metrics));
zclient_redistribute(ZEBRA_REDISTRIBUTE_DELETE, zclient, AFI_IP,
- type, 0, VRF_DEFAULT);
+ type, 0, eigrp->vrf_id);
--eigrp->redistribute;
}
diff --git a/eigrpd/eigrp_zebra.h b/eigrpd/eigrp_zebra.h
index 86b337cfe6..112f2584ce 100644
--- a/eigrpd/eigrp_zebra.h
+++ b/eigrpd/eigrp_zebra.h
@@ -33,11 +33,10 @@
extern void eigrp_zebra_init(void);
-extern void eigrp_zebra_route_add(struct prefix *, struct list *,
- uint32_t distance);
-extern void eigrp_zebra_route_delete(struct prefix *);
+extern void eigrp_zebra_route_add(struct eigrp *eigrp, struct prefix *p,
+ struct list *successors, uint32_t distance);
+extern void eigrp_zebra_route_delete(struct eigrp *eigrp, struct prefix *);
extern int eigrp_redistribute_set(struct eigrp *, int, struct eigrp_metrics);
extern int eigrp_redistribute_unset(struct eigrp *, int);
-extern int eigrp_is_type_redistributed(int);
#endif /* _ZEBRA_EIGRP_ZEBRA_H_ */
diff --git a/eigrpd/eigrpd.c b/eigrpd/eigrpd.c
index 93f8b6f90e..e93dc0cbfa 100644
--- a/eigrpd/eigrpd.c
+++ b/eigrpd/eigrpd.c
@@ -64,8 +64,6 @@ static struct eigrp_master eigrp_master;
struct eigrp_master *eigrp_om;
-static struct eigrp *eigrp_new(const char *);
-
extern struct zclient *zclient;
extern struct in_addr router_id_zebra;
@@ -95,7 +93,7 @@ extern struct in_addr router_id_zebra;
*/
void eigrp_router_id_update(struct eigrp *eigrp)
{
- struct vrf *vrf = vrf_lookup_by_id(VRF_DEFAULT);
+ struct vrf *vrf = vrf_lookup_by_id(eigrp->vrf_id);
struct interface *ifp;
struct in_addr router_id, router_id_old;
@@ -136,14 +134,14 @@ void eigrp_master_init(void)
}
/* Allocate new eigrp structure. */
-static struct eigrp *eigrp_new(const char *AS)
+static struct eigrp *eigrp_new(uint16_t as, vrf_id_t vrf_id)
{
struct eigrp *eigrp = XCALLOC(MTYPE_EIGRP_TOP, sizeof(struct eigrp));
- int eigrp_socket;
/* init information relevant to peers */
+ eigrp->vrf_id = vrf_id;
eigrp->vrid = 0;
- eigrp->AS = atoi(AS);
+ eigrp->AS = as;
eigrp->router_id.s_addr = 0;
eigrp->router_id_static.s_addr = 0;
eigrp->sequence_number = 1;
@@ -161,14 +159,15 @@ static struct eigrp *eigrp_new(const char *AS)
eigrp->passive_interface_default = EIGRP_IF_ACTIVE;
eigrp->networks = eigrp_topology_new();
- if ((eigrp_socket = eigrp_sock_init()) < 0) {
+ eigrp->fd = eigrp_sock_init(vrf_lookup_by_id(vrf_id));
+
+ if (eigrp->fd < 0) {
flog_err_sys(
EC_LIB_SOCKET,
"eigrp_new: fatal error: eigrp_sock_init was unable to open a socket");
exit(1);
}
- eigrp->fd = eigrp_socket;
eigrp->maxsndbuflen = getsockopt_so_sendbuf(eigrp->fd);
eigrp->ibuf = stream_new(EIGRP_PACKET_MAX_LEN + 1);
@@ -200,16 +199,15 @@ static struct eigrp *eigrp_new(const char *AS)
eigrp->routemap[EIGRP_FILTER_OUT] = NULL;
/* Distribute list install. */
- eigrp->distribute_ctx = distribute_list_ctx_create(
- vrf_lookup_by_id(VRF_DEFAULT));
+ eigrp->distribute_ctx =
+ distribute_list_ctx_create(vrf_lookup_by_id(eigrp->vrf_id));
distribute_list_add_hook(eigrp->distribute_ctx,
eigrp_distribute_update);
distribute_list_delete_hook(eigrp->distribute_ctx,
eigrp_distribute_update);
/*
- eigrp->if_rmap_ctx = if_rmap_ctx_create(
- VRF_DEFAULT_NAME);
+ eigrp->if_rmap_ctx = if_rmap_ctx_create(eigrp->vrf_id);
if_rmap_hook_add (eigrp_if_rmap_update);
if_rmap_hook_delete (eigrp_if_rmap_update);
*/
@@ -217,13 +215,13 @@ static struct eigrp *eigrp_new(const char *AS)
return eigrp;
}
-struct eigrp *eigrp_get(const char *AS)
+struct eigrp *eigrp_get(uint16_t as, vrf_id_t vrf_id)
{
struct eigrp *eigrp;
- eigrp = eigrp_lookup();
+ eigrp = eigrp_lookup(vrf_id);
if (eigrp == NULL) {
- eigrp = eigrp_new(AS);
+ eigrp = eigrp_new(as, vrf_id);
listnode_add(eigrp_om->eigrp, eigrp);
}
@@ -285,7 +283,7 @@ void eigrp_finish_final(struct eigrp *eigrp)
list_delete(&eigrp->eiflist);
list_delete(&eigrp->oi_write_q);
- eigrp_topology_free(eigrp->topology_table);
+ eigrp_topology_free(eigrp, eigrp->topology_table);
eigrp_nbr_delete(eigrp->neighbor_self);
@@ -300,10 +298,14 @@ void eigrp_finish_final(struct eigrp *eigrp)
}
/*Look for existing eigrp process*/
-struct eigrp *eigrp_lookup(void)
+struct eigrp *eigrp_lookup(vrf_id_t vrf_id)
{
- if (listcount(eigrp_om->eigrp) == 0)
- return NULL;
+ struct eigrp *eigrp;
+ struct listnode *node, *nnode;
+
+ for (ALL_LIST_ELEMENTS(eigrp_om->eigrp, node, nnode, eigrp))
+ if (eigrp->vrf_id == vrf_id)
+ return eigrp;
- return listgetdata(listhead(eigrp_om->eigrp));
+ return NULL;
}
diff --git a/eigrpd/eigrpd.h b/eigrpd/eigrpd.h
index 3ef3a9c0cc..6b4d45d1fc 100644
--- a/eigrpd/eigrpd.h
+++ b/eigrpd/eigrpd.h
@@ -48,8 +48,8 @@ extern void eigrp_master_init(void);
extern void eigrp_terminate(void);
extern void eigrp_finish_final(struct eigrp *);
extern void eigrp_finish(struct eigrp *);
-extern struct eigrp *eigrp_get(const char *);
-extern struct eigrp *eigrp_lookup(void);
+extern struct eigrp *eigrp_get(uint16_t as, vrf_id_t vrf_id);
+extern struct eigrp *eigrp_lookup(vrf_id_t vrf_id);
extern void eigrp_router_id_update(struct eigrp *);
/* eigrp_cli.c */
diff --git a/eigrpd/subdir.am b/eigrpd/subdir.am
index cc46766586..5a65c654e9 100644
--- a/eigrpd/subdir.am
+++ b/eigrpd/subdir.am
@@ -35,6 +35,7 @@ eigrpd_libeigrp_a_SOURCES = \
eigrpd/eigrp_snmp.c \
eigrpd/eigrp_topology.c \
eigrpd/eigrp_update.c \
+ eigrpd/eigrp_vrf.c \
eigrpd/eigrp_vty.c \
eigrpd/eigrp_zebra.c \
eigrpd/eigrpd.c \
@@ -66,6 +67,7 @@ noinst_HEADERS += \
eigrpd/eigrp_packet.h \
eigrpd/eigrp_snmp.h \
eigrpd/eigrp_structs.h \
+ eigrpd/eigrp_vrf.h \
eigrpd/eigrp_vty.h \
eigrpd/eigrp_zebra.h \
# end
diff --git a/isisd/isis_bpf.c b/isisd/isis_bpf.c
index 4e9aef47ad..d6b85b2fa3 100644
--- a/isisd/isis_bpf.c
+++ b/isisd/isis_bpf.c
@@ -187,7 +187,7 @@ int isis_sock_init(struct isis_circuit *circuit)
{
int retval = ISIS_OK;
- frr_elevate_privs(&isisd_privs) {
+ frr_with_privs(&isisd_privs) {
retval = open_bpf_dev(circuit);
diff --git a/isisd/isis_dlpi.c b/isisd/isis_dlpi.c
index a96dd93804..7d3dfcb01e 100644
--- a/isisd/isis_dlpi.c
+++ b/isisd/isis_dlpi.c
@@ -467,7 +467,7 @@ int isis_sock_init(struct isis_circuit *circuit)
{
int retval = ISIS_OK;
- frr_elevate_privs(&isisd_privs) {
+ frr_with_privs(&isisd_privs) {
retval = open_dlpi_dev(circuit);
diff --git a/isisd/isis_memory.c b/isisd/isis_memory.c
index 7d1ad6b049..2725459767 100644
--- a/isisd/isis_memory.c
+++ b/isisd/isis_memory.c
@@ -39,7 +39,6 @@ DEFINE_MTYPE(ISISD, ISIS_SPFTREE, "ISIS SPFtree")
DEFINE_MTYPE(ISISD, ISIS_VERTEX, "ISIS vertex")
DEFINE_MTYPE(ISISD, ISIS_ROUTE_INFO, "ISIS route info")
DEFINE_MTYPE(ISISD, ISIS_NEXTHOP, "ISIS nexthop")
-DEFINE_MTYPE(ISISD, ISIS_NEXTHOP6, "ISIS nexthop6")
DEFINE_MTYPE(ISISD, ISIS_DICT, "ISIS dictionary")
DEFINE_MTYPE(ISISD, ISIS_DICT_NODE, "ISIS dictionary node")
DEFINE_MTYPE(ISISD, ISIS_EXT_ROUTE, "ISIS redistributed route")
diff --git a/isisd/isis_memory.h b/isisd/isis_memory.h
index 4078c7a671..e672340e84 100644
--- a/isisd/isis_memory.h
+++ b/isisd/isis_memory.h
@@ -38,7 +38,6 @@ DECLARE_MTYPE(ISIS_SPFTREE)
DECLARE_MTYPE(ISIS_VERTEX)
DECLARE_MTYPE(ISIS_ROUTE_INFO)
DECLARE_MTYPE(ISIS_NEXTHOP)
-DECLARE_MTYPE(ISIS_NEXTHOP6)
DECLARE_MTYPE(ISIS_DICT)
DECLARE_MTYPE(ISIS_DICT_NODE)
DECLARE_MTYPE(ISIS_EXT_ROUTE)
diff --git a/isisd/isis_northbound.c b/isisd/isis_northbound.c
index 0982a468a6..c1b630eb2d 100644
--- a/isisd/isis_northbound.c
+++ b/isisd/isis_northbound.c
@@ -1524,29 +1524,64 @@ static int lib_interface_isis_create(enum nb_event event,
struct interface *ifp;
struct isis_circuit *circuit;
const char *area_tag = yang_dnode_get_string(dnode, "./area-tag");
+ uint32_t min_mtu, actual_mtu;
- if (event != NB_EV_APPLY)
- return NB_OK;
+ switch (event) {
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_VALIDATE:
+ /* check if interface mtu is sufficient. If the area has not
+ * been created yet, assume default MTU for the area
+ */
+ ifp = nb_running_get_entry(dnode, NULL, true);
+ /* zebra might not know yet about the MTU - nothing we can do */
+ if (ifp->mtu == 0)
+ break;
+ actual_mtu =
+ if_is_broadcast(ifp) ? ifp->mtu - LLC_LEN : ifp->mtu;
+ area = isis_area_lookup(area_tag);
+ if (area)
+ min_mtu = area->lsp_mtu;
+ else
+#ifndef FABRICD
+ min_mtu = yang_get_default_uint16(
+ "/frr-isisd:isis/instance/lsp/mtu");
+#else
+ min_mtu = DEFAULT_LSP_MTU;
+#endif /* ifndef FABRICD */
+ if (actual_mtu < min_mtu) {
+ flog_warn(EC_LIB_NB_CB_CONFIG_VALIDATE,
+ "Interface %s has MTU %" PRIu32
+ ", minimum MTU for the area is %" PRIu32 "",
+ ifp->name, actual_mtu, min_mtu);
+ return NB_ERR_VALIDATION;
+ }
+ break;
+ case NB_EV_APPLY:
+ area = isis_area_lookup(area_tag);
+ /* The area should have already be created. We are
+ * setting the priority of the global isis area creation
+ * slightly lower, so it should be executed first, but I
+ * cannot rely on that so here I have to check.
+ */
+ if (!area) {
+ flog_err(
+ EC_LIB_NB_CB_CONFIG_APPLY,
+ "%s: attempt to create circuit for area %s before the area has been created",
+ __func__, area_tag);
+ abort();
+ }
- area = isis_area_lookup(area_tag);
- /* The area should have already be created. We are
- * setting the priority of the global isis area creation
- * slightly lower, so it should be executed first, but I
- * cannot rely on that so here I have to check.
- */
- if (!area) {
- flog_err(
- EC_LIB_NB_CB_CONFIG_APPLY,
- "%s: attempt to create circuit for area %s before the area has been created",
- __func__, area_tag);
- abort();
+ ifp = nb_running_get_entry(dnode, NULL, true);
+ circuit = isis_circuit_create(area, ifp);
+ assert(circuit
+ && (circuit->state == C_STATE_CONF
+ || circuit->state == C_STATE_UP));
+ nb_running_set_entry(dnode, circuit);
+ break;
}
- ifp = nb_running_get_entry(dnode, NULL, true);
- circuit = isis_circuit_create(area, ifp);
- assert(circuit->state == C_STATE_CONF || circuit->state == C_STATE_UP);
- nb_running_set_entry(dnode, circuit);
-
return NB_OK;
}
@@ -1561,21 +1596,8 @@ static int lib_interface_isis_destroy(enum nb_event event,
circuit = nb_running_unset_entry(dnode);
if (!circuit)
return NB_ERR_INCONSISTENCY;
- /* delete circuit through csm changes */
- switch (circuit->state) {
- case C_STATE_UP:
- isis_csm_state_change(IF_DOWN_FROM_Z, circuit,
- circuit->interface);
- isis_csm_state_change(ISIS_DISABLE, circuit, circuit->area);
- break;
- case C_STATE_CONF:
+ if (circuit->state == C_STATE_UP || circuit->state == C_STATE_CONF)
isis_csm_state_change(ISIS_DISABLE, circuit, circuit->area);
- break;
- case C_STATE_INIT:
- isis_csm_state_change(IF_DOWN_FROM_Z, circuit,
- circuit->interface);
- break;
- }
return NB_OK;
}
diff --git a/isisd/isis_pfpacket.c b/isisd/isis_pfpacket.c
index ea66e6950e..69ac3fc555 100644
--- a/isisd/isis_pfpacket.c
+++ b/isisd/isis_pfpacket.c
@@ -183,7 +183,7 @@ int isis_sock_init(struct isis_circuit *circuit)
{
int retval = ISIS_OK;
- frr_elevate_privs(&isisd_privs) {
+ frr_with_privs(&isisd_privs) {
retval = open_packet_socket(circuit);
diff --git a/isisd/isis_route.c b/isisd/isis_route.c
index 281eaf11bc..636a63e290 100644
--- a/isisd/isis_route.c
+++ b/isisd/isis_route.c
@@ -28,6 +28,7 @@
#include "linklist.h"
#include "vty.h"
#include "log.h"
+#include "lib_errors.h"
#include "memory.h"
#include "prefix.h"
#include "hash.h"
@@ -48,26 +49,25 @@
#include "isis_route.h"
#include "isis_zebra.h"
-static struct isis_nexthop *isis_nexthop_create(struct in_addr *ip,
+static struct isis_nexthop *nexthoplookup(struct list *nexthops, int family,
+ union g_addr *ip, ifindex_t ifindex);
+
+static struct isis_nexthop *isis_nexthop_create(int family, union g_addr *ip,
ifindex_t ifindex)
{
- struct listnode *node;
struct isis_nexthop *nexthop;
- for (ALL_LIST_ELEMENTS_RO(isis->nexthops, node, nexthop)) {
- if (nexthop->ifindex != ifindex)
- continue;
- if (ip && memcmp(&nexthop->ip, ip, sizeof(struct in_addr)) != 0)
- continue;
-
+ nexthop = nexthoplookup(isis->nexthops, family, ip, ifindex);
+ if (nexthop) {
nexthop->lock++;
return nexthop;
}
nexthop = XCALLOC(MTYPE_ISIS_NEXTHOP, sizeof(struct isis_nexthop));
+ nexthop->family = family;
nexthop->ifindex = ifindex;
- memcpy(&nexthop->ip, ip, sizeof(struct in_addr));
+ nexthop->ip = *ip;
listnode_add(isis->nexthops, nexthop);
nexthop->lock++;
@@ -85,116 +85,79 @@ static void isis_nexthop_delete(struct isis_nexthop *nexthop)
return;
}
-static int nexthoplookup(struct list *nexthops, struct in_addr *ip,
- ifindex_t ifindex)
+static struct isis_nexthop *nexthoplookup(struct list *nexthops, int family,
+ union g_addr *ip, ifindex_t ifindex)
{
struct listnode *node;
struct isis_nexthop *nh;
for (ALL_LIST_ELEMENTS_RO(nexthops, node, nh)) {
- if (!(memcmp(ip, &nh->ip, sizeof(struct in_addr)))
- && ifindex == nh->ifindex)
- return 1;
- }
-
- return 0;
-}
-
-static struct isis_nexthop6 *isis_nexthop6_new(struct in6_addr *ip6,
- ifindex_t ifindex)
-{
- struct isis_nexthop6 *nexthop6;
-
- nexthop6 = XCALLOC(MTYPE_ISIS_NEXTHOP6, sizeof(struct isis_nexthop6));
-
- nexthop6->ifindex = ifindex;
- memcpy(&nexthop6->ip6, ip6, sizeof(struct in6_addr));
- nexthop6->lock++;
-
- return nexthop6;
-}
-
-static struct isis_nexthop6 *isis_nexthop6_create(struct in6_addr *ip6,
- ifindex_t ifindex)
-{
- struct listnode *node;
- struct isis_nexthop6 *nexthop6;
-
- for (ALL_LIST_ELEMENTS_RO(isis->nexthops6, node, nexthop6)) {
- if (nexthop6->ifindex != ifindex)
+ if (nh->family != family)
continue;
- if (ip6
- && memcmp(&nexthop6->ip6, ip6, sizeof(struct in6_addr))
- != 0)
+ if (nh->ifindex != ifindex)
continue;
- nexthop6->lock++;
- return nexthop6;
- }
-
- nexthop6 = isis_nexthop6_new(ip6, ifindex);
-
- return nexthop6;
-}
-
-static void isis_nexthop6_delete(struct isis_nexthop6 *nexthop6)
-{
-
- nexthop6->lock--;
- if (nexthop6->lock == 0) {
- listnode_delete(isis->nexthops6, nexthop6);
- XFREE(MTYPE_ISIS_NEXTHOP6, nexthop6);
- }
-
- return;
-}
-
-static int nexthop6lookup(struct list *nexthops6, struct in6_addr *ip6,
- ifindex_t ifindex)
-{
- struct listnode *node;
- struct isis_nexthop6 *nh6;
+ switch (family) {
+ case AF_INET:
+ if (IPV4_ADDR_CMP(&nh->ip.ipv4, &ip->ipv4))
+ continue;
+ break;
+ case AF_INET6:
+ if (IPV6_ADDR_CMP(&nh->ip.ipv6, &ip->ipv6))
+ continue;
+ break;
+ default:
+ flog_err(EC_LIB_DEVELOPMENT,
+ "%s: unknown address family [%d]", __func__,
+ family);
+ exit(1);
+ }
- for (ALL_LIST_ELEMENTS_RO(nexthops6, node, nh6)) {
- if (!(memcmp(ip6, &nh6->ip6, sizeof(struct in6_addr)))
- && ifindex == nh6->ifindex)
- return 1;
+ return nh;
}
- return 0;
+ return NULL;
}
-static void adjinfo2nexthop(struct list *nexthops, struct isis_adjacency *adj)
+static void adjinfo2nexthop(int family, struct list *nexthops,
+ struct isis_adjacency *adj)
{
struct isis_nexthop *nh;
-
- for (unsigned int i = 0; i < adj->ipv4_address_count; i++) {
- struct in_addr *ipv4_addr = &adj->ipv4_addresses[i];
- if (!nexthoplookup(nexthops, ipv4_addr,
- adj->circuit->interface->ifindex)) {
- nh = isis_nexthop_create(
- ipv4_addr, adj->circuit->interface->ifindex);
- nh->router_address = adj->router_address;
- listnode_add(nexthops, nh);
- return;
+ union g_addr ip = {};
+
+ switch (family) {
+ case AF_INET:
+ for (unsigned int i = 0; i < adj->ipv4_address_count; i++) {
+ ip.ipv4 = adj->ipv4_addresses[i];
+
+ if (!nexthoplookup(nexthops, AF_INET, &ip,
+ adj->circuit->interface->ifindex)) {
+ nh = isis_nexthop_create(
+ AF_INET, &ip,
+ adj->circuit->interface->ifindex);
+ listnode_add(nexthops, nh);
+ break;
+ }
}
- }
-}
-
-static void adjinfo2nexthop6(struct list *nexthops6, struct isis_adjacency *adj)
-{
- struct isis_nexthop6 *nh6;
-
- for (unsigned int i = 0; i < adj->ipv6_address_count; i++) {
- struct in6_addr *ipv6_addr = &adj->ipv6_addresses[i];
- if (!nexthop6lookup(nexthops6, ipv6_addr,
- adj->circuit->interface->ifindex)) {
- nh6 = isis_nexthop6_create(
- ipv6_addr, adj->circuit->interface->ifindex);
- nh6->router_address6 = adj->router_address6;
- listnode_add(nexthops6, nh6);
- return;
+ break;
+ case AF_INET6:
+ for (unsigned int i = 0; i < adj->ipv6_address_count; i++) {
+ ip.ipv6 = adj->ipv6_addresses[i];
+
+ if (!nexthoplookup(nexthops, AF_INET6, &ip,
+ adj->circuit->interface->ifindex)) {
+ nh = isis_nexthop_create(
+ AF_INET6, &ip,
+ adj->circuit->interface->ifindex);
+ listnode_add(nexthops, nh);
+ break;
+ }
}
+ break;
+ default:
+ flog_err(EC_LIB_DEVELOPMENT, "%s: unknown address family [%d]",
+ __func__, family);
+ exit(1);
}
}
@@ -210,35 +173,32 @@ static struct isis_route_info *isis_route_info_new(struct prefix *prefix,
rinfo = XCALLOC(MTYPE_ISIS_ROUTE_INFO, sizeof(struct isis_route_info));
- if (prefix->family == AF_INET) {
- rinfo->nexthops = list_new();
- for (ALL_LIST_ELEMENTS_RO(adjacencies, node, adj)) {
- /* check for force resync this route */
- if (CHECK_FLAG(adj->circuit->flags,
- ISIS_CIRCUIT_FLAPPED_AFTER_SPF))
- SET_FLAG(rinfo->flag,
- ISIS_ROUTE_FLAG_ZEBRA_RESYNC);
- /* update neighbor router address */
+ rinfo->nexthops = list_new();
+ for (ALL_LIST_ELEMENTS_RO(adjacencies, node, adj)) {
+ /* check for force resync this route */
+ if (CHECK_FLAG(adj->circuit->flags,
+ ISIS_CIRCUIT_FLAPPED_AFTER_SPF))
+ SET_FLAG(rinfo->flag, ISIS_ROUTE_FLAG_ZEBRA_RESYNC);
+
+ /* update neighbor router address */
+ switch (prefix->family) {
+ case AF_INET:
if (depth == 2 && prefix->prefixlen == 32)
adj->router_address = prefix->u.prefix4;
- adjinfo2nexthop(rinfo->nexthops, adj);
- }
- }
- if (prefix->family == AF_INET6) {
- rinfo->nexthops6 = list_new();
- for (ALL_LIST_ELEMENTS_RO(adjacencies, node, adj)) {
- /* check for force resync this route */
- if (CHECK_FLAG(adj->circuit->flags,
- ISIS_CIRCUIT_FLAPPED_AFTER_SPF))
- SET_FLAG(rinfo->flag,
- ISIS_ROUTE_FLAG_ZEBRA_RESYNC);
- /* update neighbor router address */
+ break;
+ case AF_INET6:
if (depth == 2 && prefix->prefixlen == 128
&& (!src_p || !src_p->prefixlen)) {
adj->router_address6 = prefix->u.prefix6;
}
- adjinfo2nexthop6(rinfo->nexthops6, adj);
+ break;
+ default:
+ flog_err(EC_LIB_DEVELOPMENT,
+ "%s: unknown address family [%d]", __func__,
+ prefix->family);
+ exit(1);
}
+ adjinfo2nexthop(prefix->family, rinfo->nexthops, adj);
}
rinfo->cost = cost;
@@ -255,12 +215,6 @@ static void isis_route_info_delete(struct isis_route_info *route_info)
list_delete(&route_info->nexthops);
}
- if (route_info->nexthops6) {
- route_info->nexthops6->del =
- (void (*)(void *))isis_nexthop6_delete;
- list_delete(&route_info->nexthops6);
- }
-
XFREE(MTYPE_ISIS_ROUTE_INFO, route_info);
}
@@ -280,7 +234,6 @@ static int isis_route_info_same(struct isis_route_info *new,
{
struct listnode *node;
struct isis_nexthop *nexthop;
- struct isis_nexthop6 *nexthop6;
if (!CHECK_FLAG(old->flag, ISIS_ROUTE_FLAG_ZEBRA_SYNCED))
return 0;
@@ -291,31 +244,15 @@ static int isis_route_info_same(struct isis_route_info *new,
if (!isis_route_info_same_attrib(new, old))
return 0;
- if (family == AF_INET) {
- for (ALL_LIST_ELEMENTS_RO(new->nexthops, node, nexthop))
- if (nexthoplookup(old->nexthops, &nexthop->ip,
- nexthop->ifindex)
- == 0)
- return 0;
-
- for (ALL_LIST_ELEMENTS_RO(old->nexthops, node, nexthop))
- if (nexthoplookup(new->nexthops, &nexthop->ip,
- nexthop->ifindex)
- == 0)
- return 0;
- } else if (family == AF_INET6) {
- for (ALL_LIST_ELEMENTS_RO(new->nexthops6, node, nexthop6))
- if (nexthop6lookup(old->nexthops6, &nexthop6->ip6,
- nexthop6->ifindex)
- == 0)
- return 0;
-
- for (ALL_LIST_ELEMENTS_RO(old->nexthops6, node, nexthop6))
- if (nexthop6lookup(new->nexthops6, &nexthop6->ip6,
- nexthop6->ifindex)
- == 0)
- return 0;
- }
+ for (ALL_LIST_ELEMENTS_RO(new->nexthops, node, nexthop))
+ if (!nexthoplookup(old->nexthops, nexthop->family, &nexthop->ip,
+ nexthop->ifindex))
+ return 0;
+
+ for (ALL_LIST_ELEMENTS_RO(old->nexthops, node, nexthop))
+ if (!nexthoplookup(new->nexthops, nexthop->family, &nexthop->ip,
+ nexthop->ifindex))
+ return 0;
return 1;
}
diff --git a/isisd/isis_route.h b/isisd/isis_route.h
index 9d6858586b..a20a7e038f 100644
--- a/isisd/isis_route.h
+++ b/isisd/isis_route.h
@@ -25,17 +25,12 @@
#ifndef _ZEBRA_ISIS_ROUTE_H
#define _ZEBRA_ISIS_ROUTE_H
-struct isis_nexthop6 {
- ifindex_t ifindex;
- struct in6_addr ip6;
- struct in6_addr router_address6;
- unsigned int lock;
-};
+#include "lib/nexthop.h"
struct isis_nexthop {
ifindex_t ifindex;
- struct in_addr ip;
- struct in_addr router_address;
+ int family;
+ union g_addr ip;
unsigned int lock;
};
@@ -47,7 +42,6 @@ struct isis_route_info {
uint32_t cost;
uint32_t depth;
struct list *nexthops;
- struct list *nexthops6;
};
struct isis_route_info *isis_route_create(struct prefix *prefix,
diff --git a/isisd/isis_tlvs.c b/isisd/isis_tlvs.c
index 488dfedae4..ee253c7a31 100644
--- a/isisd/isis_tlvs.c
+++ b/isisd/isis_tlvs.c
@@ -22,7 +22,9 @@
*/
#include <zebra.h>
+#ifdef CRYPTO_INTERNAL
#include "md5.h"
+#endif
#include "memory.h"
#include "stream.h"
#include "sbuf.h"
@@ -2770,8 +2772,16 @@ static void update_auth_hmac_md5(struct isis_auth *auth, struct stream *s,
safe_auth_md5(s, &checksum, &rem_lifetime);
memset(STREAM_DATA(s) + auth->offset, 0, 16);
+#ifdef CRYPTO_OPENSSL
+ uint8_t *result = (uint8_t *)HMAC(EVP_md5(), auth->passwd,
+ auth->plength, STREAM_DATA(s),
+ stream_get_endp(s), NULL, NULL);
+
+ memcpy(digest, result, 16);
+#elif CRYPTO_INTERNAL
hmac_md5(STREAM_DATA(s), stream_get_endp(s), auth->passwd,
auth->plength, digest);
+#endif
memcpy(auth->value, digest, 16);
memcpy(STREAM_DATA(s) + auth->offset, digest, 16);
@@ -3310,8 +3320,16 @@ static bool auth_validator_hmac_md5(struct isis_passwd *passwd,
safe_auth_md5(stream, &checksum, &rem_lifetime);
memset(STREAM_DATA(stream) + auth->offset, 0, 16);
+#ifdef CRYPTO_OPENSSL
+ uint8_t *result = (uint8_t *)HMAC(EVP_md5(), passwd->passwd,
+ passwd->len, STREAM_DATA(stream),
+ stream_get_endp(stream), NULL, NULL);
+
+ memcpy(digest, result, 16);
+#elif CRYPTO_INTERNAL
hmac_md5(STREAM_DATA(stream), stream_get_endp(stream), passwd->passwd,
passwd->len, digest);
+#endif
memcpy(STREAM_DATA(stream) + auth->offset, auth->value, 16);
bool rv = !memcmp(digest, auth->value, 16);
diff --git a/isisd/isis_zebra.c b/isisd/isis_zebra.c
index e2ef934696..e8481a558b 100644
--- a/isisd/isis_zebra.c
+++ b/isisd/isis_zebra.c
@@ -27,6 +27,7 @@
#include "command.h"
#include "memory.h"
#include "log.h"
+#include "lib_errors.h"
#include "if.h"
#include "network.h"
#include "prefix.h"
@@ -225,7 +226,6 @@ static void isis_zebra_route_add_route(struct prefix *prefix,
struct zapi_route api;
struct zapi_nexthop *api_nh;
struct isis_nexthop *nexthop;
- struct isis_nexthop6 *nexthop6;
struct listnode *node;
int count = 0;
@@ -250,47 +250,41 @@ static void isis_zebra_route_add_route(struct prefix *prefix,
#endif
/* Nexthops */
- switch (prefix->family) {
- case AF_INET:
- for (ALL_LIST_ELEMENTS_RO(route_info->nexthops, node,
- nexthop)) {
- if (count >= MULTIPATH_NUM)
- break;
- api_nh = &api.nexthops[count];
- if (fabricd)
- api_nh->onlink = true;
- api_nh->vrf_id = VRF_DEFAULT;
+ for (ALL_LIST_ELEMENTS_RO(route_info->nexthops, node, nexthop)) {
+ if (count >= MULTIPATH_NUM)
+ break;
+ api_nh = &api.nexthops[count];
+ if (fabricd)
+ api_nh->onlink = true;
+ api_nh->vrf_id = VRF_DEFAULT;
+
+ switch (nexthop->family) {
+ case AF_INET:
/* FIXME: can it be ? */
- if (nexthop->ip.s_addr != INADDR_ANY) {
+ if (nexthop->ip.ipv4.s_addr != INADDR_ANY) {
api_nh->type = NEXTHOP_TYPE_IPV4_IFINDEX;
- api_nh->gate.ipv4 = nexthop->ip;
+ api_nh->gate.ipv4 = nexthop->ip.ipv4;
} else {
api_nh->type = NEXTHOP_TYPE_IFINDEX;
}
- api_nh->ifindex = nexthop->ifindex;
- count++;
- }
- break;
- case AF_INET6:
- for (ALL_LIST_ELEMENTS_RO(route_info->nexthops6, node,
- nexthop6)) {
- if (count >= MULTIPATH_NUM)
- break;
- if (!IN6_IS_ADDR_LINKLOCAL(&nexthop6->ip6)
- && !IN6_IS_ADDR_UNSPECIFIED(&nexthop6->ip6)) {
+ break;
+ case AF_INET6:
+ if (!IN6_IS_ADDR_LINKLOCAL(&nexthop->ip.ipv6)
+ && !IN6_IS_ADDR_UNSPECIFIED(&nexthop->ip.ipv6)) {
continue;
}
-
- api_nh = &api.nexthops[count];
- if (fabricd)
- api_nh->onlink = true;
- api_nh->vrf_id = VRF_DEFAULT;
- api_nh->gate.ipv6 = nexthop6->ip6;
- api_nh->ifindex = nexthop6->ifindex;
+ api_nh->gate.ipv6 = nexthop->ip.ipv6;
api_nh->type = NEXTHOP_TYPE_IPV6_IFINDEX;
- count++;
+ break;
+ default:
+ flog_err(EC_LIB_DEVELOPMENT,
+ "%s: unknown address family [%d]", __func__,
+ nexthop->family);
+ exit(1);
}
- break;
+
+ api_nh->ifindex = nexthop->ifindex;
+ count++;
}
if (!count)
return;
diff --git a/isisd/isisd.c b/isisd/isisd.c
index bee3b6deb5..67f557ab50 100644
--- a/isisd/isisd.c
+++ b/isisd/isisd.c
@@ -88,7 +88,6 @@ void isis_new(unsigned long process_id)
isis->init_circ_list = list_new();
isis->uptime = time(NULL);
isis->nexthops = list_new();
- isis->nexthops6 = list_new();
dyn_cache_init();
/*
* uncomment the next line for full debugs
diff --git a/isisd/isisd.h b/isisd/isisd.h
index f8486ae0d6..393b1d67c7 100644
--- a/isisd/isisd.h
+++ b/isisd/isisd.h
@@ -69,8 +69,7 @@ struct isis {
uint32_t router_id; /* Router ID from zebra */
struct list *area_list; /* list of IS-IS areas */
struct list *init_circ_list;
- struct list *nexthops; /* IPv4 next hops from this IS */
- struct list *nexthops6; /* IPv6 next hops from this IS */
+ struct list *nexthops; /* IP next hops from this IS */
uint8_t max_area_addrs; /* maximumAreaAdresses */
struct area_addr *man_area_addrs; /* manualAreaAddresses */
uint32_t debugs; /* bitmap for debug */
diff --git a/ldpd/socket.c b/ldpd/socket.c
index b31db2c7bc..8706d03c6f 100644
--- a/ldpd/socket.c
+++ b/ldpd/socket.c
@@ -79,7 +79,7 @@ ldp_create_socket(int af, enum socket_type type)
sock_set_bindany(fd, 1);
break;
}
- frr_elevate_privs(&ldpd_privs) {
+ frr_with_privs(&ldpd_privs) {
if (sock_set_reuse(fd, 1) == -1) {
close(fd);
return (-1);
@@ -254,7 +254,7 @@ int
sock_set_bindany(int fd, int enable)
{
#ifdef HAVE_SO_BINDANY
- frr_elevate_privs(&ldpd_privs) {
+ frr_with_privs(&ldpd_privs) {
if (setsockopt(fd, SOL_SOCKET, SO_BINDANY, &enable,
sizeof(int)) < 0) {
log_warn("%s: error setting SO_BINDANY", __func__);
@@ -269,7 +269,7 @@ sock_set_bindany(int fd, int enable)
}
return (0);
#elif defined(IP_BINDANY)
- frr_elevate_privs(&ldpd_privs) {
+ frr_with_privs(&ldpd_privs) {
if (setsockopt(fd, IPPROTO_IP, IP_BINDANY, &enable, sizeof(int))
< 0) {
log_warn("%s: error setting IP_BINDANY", __func__);
@@ -304,7 +304,7 @@ sock_set_md5sig(int fd, int af, union ldpd_addr *addr, const char *password)
#if HAVE_DECL_TCP_MD5SIG
addr2sa(af, addr, 0, &su);
- frr_elevate_privs(&ldpe_privs) {
+ frr_with_privs(&ldpe_privs) {
ret = sockopt_tcp_signature(fd, &su, password);
save_errno = errno;
}
diff --git a/lib/command.c b/lib/command.c
index 9dabc2af7e..de28a726b0 100644
--- a/lib/command.c
+++ b/lib/command.c
@@ -151,6 +151,7 @@ const char *node_names[] = {
"bfd peer", /* BFD_PEER_NODE */
"openfabric", // OPENFABRIC_NODE
"vrrp", /* VRRP_NODE */
+ "bmp", /* BMP_NODE */
};
/* clang-format on */
@@ -975,6 +976,7 @@ enum node_type node_parent(enum node_type node)
case BGP_IPV6M_NODE:
case BGP_EVPN_NODE:
case BGP_IPV6L_NODE:
+ case BMP_NODE:
ret = BGP_NODE;
break;
case BGP_EVPN_VNI_NODE:
@@ -1491,6 +1493,7 @@ void cmd_exit(struct vty *vty)
case BGP_IPV6M_NODE:
case BGP_EVPN_NODE:
case BGP_IPV6L_NODE:
+ case BMP_NODE:
vty->node = BGP_NODE;
break;
case BGP_EVPN_VNI_NODE:
@@ -2708,15 +2711,66 @@ DEFUN (no_banner_motd,
DEFUN(find,
find_cmd,
- "find COMMAND...",
- "Find CLI command containing text\n"
- "Text to search for\n")
+ "find REGEX",
+ "Find CLI command matching a regular expression\n"
+ "Search pattern (POSIX regex)\n")
{
- char *text = argv_concat(argv, argc, 1);
+ char *pattern = argv[1]->arg;
const struct cmd_node *node;
const struct cmd_element *cli;
vector clis;
+ regex_t exp = {};
+
+ int cr = regcomp(&exp, pattern, REG_NOSUB | REG_EXTENDED);
+
+ if (cr != 0) {
+ switch (cr) {
+ case REG_BADBR:
+ vty_out(vty, "%% Invalid {...} expression\n");
+ break;
+ case REG_BADRPT:
+ vty_out(vty, "%% Bad repetition operator\n");
+ break;
+ case REG_BADPAT:
+ vty_out(vty, "%% Regex syntax error\n");
+ break;
+ case REG_ECOLLATE:
+ vty_out(vty, "%% Invalid collating element\n");
+ break;
+ case REG_ECTYPE:
+ vty_out(vty, "%% Invalid character class name\n");
+ break;
+ case REG_EESCAPE:
+ vty_out(vty,
+ "%% Regex ended with escape character (\\)\n");
+ break;
+ case REG_ESUBREG:
+ vty_out(vty,
+ "%% Invalid number in \\digit construction\n");
+ break;
+ case REG_EBRACK:
+ vty_out(vty, "%% Unbalanced square brackets\n");
+ break;
+ case REG_EPAREN:
+ vty_out(vty, "%% Unbalanced parentheses\n");
+ break;
+ case REG_EBRACE:
+ vty_out(vty, "%% Unbalanced braces\n");
+ break;
+ case REG_ERANGE:
+ vty_out(vty,
+ "%% Invalid endpoint in range expression\n");
+ break;
+ case REG_ESPACE:
+ vty_out(vty, "%% Failed to compile (out of memory)\n");
+ break;
+ }
+
+ goto done;
+ }
+
+
for (unsigned int i = 0; i < vector_active(cmdvec); i++) {
node = vector_slot(cmdvec, i);
if (!node)
@@ -2724,14 +2778,15 @@ DEFUN(find,
clis = node->cmd_vector;
for (unsigned int j = 0; j < vector_active(clis); j++) {
cli = vector_slot(clis, j);
- if (strcasestr(cli->string, text))
+
+ if (regexec(&exp, cli->string, 0, NULL, 0) == 0)
vty_out(vty, " (%s) %s\n",
node_names[node->node], cli->string);
}
}
- XFREE(MTYPE_TMP, text);
-
+done:
+ regfree(&exp);
return CMD_SUCCESS;
}
diff --git a/lib/command.h b/lib/command.h
index 8dc35a0fdc..137d3748ae 100644
--- a/lib/command.h
+++ b/lib/command.h
@@ -159,6 +159,7 @@ enum node_type {
BFD_PEER_NODE, /* BFD peer configuration mode. */
OPENFABRIC_NODE, /* OpenFabric router configuration node */
VRRP_NODE, /* VRRP node */
+ BMP_NODE, /* BMP config under router bgp */
NODE_TYPE_MAX, /* maximum */
};
diff --git a/lib/compiler.h b/lib/compiler.h
index 6700ca9e8b..e430925e69 100644
--- a/lib/compiler.h
+++ b/lib/compiler.h
@@ -121,6 +121,49 @@ extern "C" {
#define macro_inline static inline __attribute__((unused))
#define macro_pure static inline __attribute__((unused, pure))
+
+/* variadic macros, use like:
+ * #define V_0() ...
+ * #define V_1(x) ...
+ * #define V(...) MACRO_VARIANT(V, ##__VA_ARGS__)(__VA_ARGS__)
+ */
+#define _MACRO_VARIANT(A0,A1,A2,A3,A4,A5,A6,A7,A8,A9,A10, N, ...) N
+
+#define _CONCAT2(a, b) a ## b
+#define _CONCAT(a, b) _CONCAT2(a,b)
+
+#define MACRO_VARIANT(NAME, ...) \
+ _CONCAT(NAME, _MACRO_VARIANT(0, ##__VA_ARGS__, \
+ _10, _9, _8, _7, _6, _5, _4, _3, _2, _1, _0))
+
+#define NAMECTR(name) _CONCAT(name, __COUNTER__)
+
+/* per-arg repeat macros, use like:
+ * #define PERARG(n) ...n...
+ * #define FOO(...) MACRO_REPEAT(PERARG, ##__VA_ARGS__)
+ */
+
+#define _MACRO_REPEAT_0(NAME)
+#define _MACRO_REPEAT_1(NAME, A1) \
+ NAME(A1)
+#define _MACRO_REPEAT_2(NAME, A1, A2) \
+ NAME(A1) NAME(A2)
+#define _MACRO_REPEAT_3(NAME, A1, A2, A3) \
+ NAME(A1) NAME(A2) NAME(A3)
+#define _MACRO_REPEAT_4(NAME, A1, A2, A3, A4) \
+ NAME(A1) NAME(A2) NAME(A3) NAME(A4)
+#define _MACRO_REPEAT_5(NAME, A1, A2, A3, A4, A5) \
+ NAME(A1) NAME(A2) NAME(A3) NAME(A4) NAME(A5)
+#define _MACRO_REPEAT_6(NAME, A1, A2, A3, A4, A5, A6) \
+ NAME(A1) NAME(A2) NAME(A3) NAME(A4) NAME(A5) NAME(A6)
+#define _MACRO_REPEAT_7(NAME, A1, A2, A3, A4, A5, A6, A7) \
+ NAME(A1) NAME(A2) NAME(A3) NAME(A4) NAME(A5) NAME(A6) NAME(A7)
+#define _MACRO_REPEAT_8(NAME, A1, A2, A3, A4, A5, A6, A7, A8) \
+ NAME(A1) NAME(A2) NAME(A3) NAME(A4) NAME(A5) NAME(A6) NAME(A7) NAME(A8)
+
+#define MACRO_REPEAT(NAME, ...) \
+ MACRO_VARIANT(_MACRO_REPEAT, ##__VA_ARGS__)(NAME, ##__VA_ARGS__)
+
/*
* for warnings on macros, put in the macro content like this:
* #define MACRO BLA CPP_WARN("MACRO has been deprecated")
diff --git a/lib/ferr.c b/lib/ferr.c
index fd5fb50172..ccf63dea17 100644
--- a/lib/ferr.c
+++ b/lib/ferr.c
@@ -33,6 +33,7 @@
#include "command.h"
#include "json.h"
#include "linklist.h"
+#include "frr_pthread.h"
DEFINE_MTYPE_STATIC(LIB, ERRINFO, "error information")
@@ -83,14 +84,12 @@ void log_ref_add(struct log_ref *ref)
{
uint32_t i = 0;
- pthread_mutex_lock(&refs_mtx);
- {
+ frr_with_mutex(&refs_mtx) {
while (ref[i].code != END_FERR) {
hash_get(refs, &ref[i], hash_alloc_intern);
i++;
}
}
- pthread_mutex_unlock(&refs_mtx);
}
struct log_ref *log_ref_get(uint32_t code)
@@ -99,11 +98,9 @@ struct log_ref *log_ref_get(uint32_t code)
struct log_ref *ref;
holder.code = code;
- pthread_mutex_lock(&refs_mtx);
- {
+ frr_with_mutex(&refs_mtx) {
ref = hash_lookup(refs, &holder);
}
- pthread_mutex_unlock(&refs_mtx);
return ref;
}
@@ -118,11 +115,9 @@ void log_ref_display(struct vty *vty, uint32_t code, bool json)
if (json)
top = json_object_new_object();
- pthread_mutex_lock(&refs_mtx);
- {
+ frr_with_mutex(&refs_mtx) {
errlist = code ? list_new() : hash_to_list(refs);
}
- pthread_mutex_unlock(&refs_mtx);
if (code) {
ref = log_ref_get(code);
@@ -189,23 +184,19 @@ DEFUN_NOSH(show_error_code,
void log_ref_init(void)
{
- pthread_mutex_lock(&refs_mtx);
- {
+ frr_with_mutex(&refs_mtx) {
refs = hash_create(ferr_hash_key, ferr_hash_cmp,
"Error Reference Texts");
}
- pthread_mutex_unlock(&refs_mtx);
}
void log_ref_fini(void)
{
- pthread_mutex_lock(&refs_mtx);
- {
+ frr_with_mutex(&refs_mtx) {
hash_clean(refs, NULL);
hash_free(refs);
refs = NULL;
}
- pthread_mutex_unlock(&refs_mtx);
}
void log_ref_vty_init(void)
diff --git a/lib/frr_pthread.c b/lib/frr_pthread.c
index bdb6c2a397..21dfc9256f 100644
--- a/lib/frr_pthread.c
+++ b/lib/frr_pthread.c
@@ -49,21 +49,17 @@ static struct list *frr_pthread_list;
void frr_pthread_init(void)
{
- pthread_mutex_lock(&frr_pthread_list_mtx);
- {
+ frr_with_mutex(&frr_pthread_list_mtx) {
frr_pthread_list = list_new();
frr_pthread_list->del = (void (*)(void *))&frr_pthread_destroy;
}
- pthread_mutex_unlock(&frr_pthread_list_mtx);
}
void frr_pthread_finish(void)
{
- pthread_mutex_lock(&frr_pthread_list_mtx);
- {
+ frr_with_mutex(&frr_pthread_list_mtx) {
list_delete(&frr_pthread_list);
}
- pthread_mutex_unlock(&frr_pthread_list_mtx);
}
struct frr_pthread *frr_pthread_new(struct frr_pthread_attr *attr,
@@ -94,11 +90,9 @@ struct frr_pthread *frr_pthread_new(struct frr_pthread_attr *attr,
pthread_mutex_init(fpt->running_cond_mtx, NULL);
pthread_cond_init(fpt->running_cond, NULL);
- pthread_mutex_lock(&frr_pthread_list_mtx);
- {
+ frr_with_mutex(&frr_pthread_list_mtx) {
listnode_add(frr_pthread_list, fpt);
}
- pthread_mutex_unlock(&frr_pthread_list_mtx);
return fpt;
}
@@ -162,23 +156,19 @@ int frr_pthread_run(struct frr_pthread *fpt, const pthread_attr_t *attr)
void frr_pthread_wait_running(struct frr_pthread *fpt)
{
- pthread_mutex_lock(fpt->running_cond_mtx);
- {
+ frr_with_mutex(fpt->running_cond_mtx) {
while (!fpt->running)
pthread_cond_wait(fpt->running_cond,
fpt->running_cond_mtx);
}
- pthread_mutex_unlock(fpt->running_cond_mtx);
}
void frr_pthread_notify_running(struct frr_pthread *fpt)
{
- pthread_mutex_lock(fpt->running_cond_mtx);
- {
+ frr_with_mutex(fpt->running_cond_mtx) {
fpt->running = true;
pthread_cond_signal(fpt->running_cond);
}
- pthread_mutex_unlock(fpt->running_cond_mtx);
}
int frr_pthread_stop(struct frr_pthread *fpt, void **result)
@@ -190,14 +180,12 @@ int frr_pthread_stop(struct frr_pthread *fpt, void **result)
void frr_pthread_stop_all(void)
{
- pthread_mutex_lock(&frr_pthread_list_mtx);
- {
+ frr_with_mutex(&frr_pthread_list_mtx) {
struct listnode *n;
struct frr_pthread *fpt;
for (ALL_LIST_ELEMENTS_RO(frr_pthread_list, n, fpt))
frr_pthread_stop(fpt, NULL);
}
- pthread_mutex_unlock(&frr_pthread_list_mtx);
}
/*
diff --git a/lib/frr_pthread.h b/lib/frr_pthread.h
index 6096a50370..f70c8a0db4 100644
--- a/lib/frr_pthread.h
+++ b/lib/frr_pthread.h
@@ -215,6 +215,54 @@ void frr_pthread_stop_all(void);
#define pthread_condattr_setclock(A, B)
#endif
+/* mutex auto-lock/unlock */
+
+/* variant 1:
+ * (for short blocks, multiple mutexes supported)
+ * break & return can be used for aborting the block
+ *
+ * frr_with_mutex(&mtx, &mtx2) {
+ * if (error)
+ * break;
+ * ...
+ * }
+ */
+#define _frr_with_mutex(mutex) \
+ *NAMECTR(_mtx_) __attribute__(( \
+ unused, cleanup(_frr_mtx_unlock))) = _frr_mtx_lock(mutex), \
+ /* end */
+
+#define frr_with_mutex(...) \
+ for (pthread_mutex_t MACRO_REPEAT(_frr_with_mutex, ##__VA_ARGS__) \
+ *_once = NULL; _once == NULL; _once = (void *)1) \
+ /* end */
+
+/* variant 2:
+ * (more suitable for long blocks, no extra indentation)
+ *
+ * frr_mutex_lock_autounlock(&mtx);
+ * ...
+ */
+#define frr_mutex_lock_autounlock(mutex) \
+ pthread_mutex_t *NAMECTR(_mtx_) \
+ __attribute__((unused, cleanup(_frr_mtx_unlock))) = \
+ _frr_mtx_lock(mutex) \
+ /* end */
+
+static inline pthread_mutex_t *_frr_mtx_lock(pthread_mutex_t *mutex)
+{
+ pthread_mutex_lock(mutex);
+ return mutex;
+}
+
+static inline void _frr_mtx_unlock(pthread_mutex_t **mutex)
+{
+ if (!*mutex)
+ return;
+ pthread_mutex_unlock(*mutex);
+ *mutex = NULL;
+}
+
#ifdef __cplusplus
}
#endif
diff --git a/lib/frrcu.c b/lib/frrcu.c
index 7e6475b648..54626f909d 100644
--- a/lib/frrcu.c
+++ b/lib/frrcu.c
@@ -55,7 +55,6 @@
#include "atomlist.h"
DEFINE_MTYPE_STATIC(LIB, RCU_THREAD, "RCU thread")
-DEFINE_MTYPE_STATIC(LIB, RCU_NEXT, "RCU sequence barrier")
DECLARE_ATOMLIST(rcu_heads, struct rcu_head, head)
@@ -226,7 +225,7 @@ static void rcu_bump(void)
{
struct rcu_next *rn;
- rn = XMALLOC(MTYPE_RCU_NEXT, sizeof(*rn));
+ rn = XMALLOC(MTYPE_RCU_THREAD, sizeof(*rn));
/* note: each RCUA_NEXT item corresponds to exactly one seqno bump.
* This means we don't need to communicate which seqno is which
@@ -269,7 +268,7 @@ static void rcu_bump(void)
* "last item is being deleted - start over" case, and then we may end
* up accessing old RCU queue items that are already free'd.
*/
- rcu_free_internal(MTYPE_RCU_NEXT, rn, head_free);
+ rcu_free_internal(MTYPE_RCU_THREAD, rn, head_free);
/* Only allow the RCU sweeper to run after these 2 items are queued.
*
diff --git a/lib/hash.c b/lib/hash.c
index 9d9d39702e..7f8a237047 100644
--- a/lib/hash.c
+++ b/lib/hash.c
@@ -28,6 +28,7 @@
#include "vty.h"
#include "command.h"
#include "libfrr.h"
+#include "frr_pthread.h"
DEFINE_MTYPE_STATIC(LIB, HASH, "Hash")
DEFINE_MTYPE_STATIC(LIB, HASH_BACKET, "Hash Bucket")
@@ -54,14 +55,12 @@ struct hash *hash_create_size(unsigned int size,
hash->name = name ? XSTRDUP(MTYPE_HASH, name) : NULL;
hash->stats.empty = hash->size;
- pthread_mutex_lock(&_hashes_mtx);
- {
+ frr_with_mutex(&_hashes_mtx) {
if (!_hashes)
_hashes = list_new();
listnode_add(_hashes, hash);
}
- pthread_mutex_unlock(&_hashes_mtx);
return hash;
}
@@ -311,8 +310,7 @@ struct list *hash_to_list(struct hash *hash)
void hash_free(struct hash *hash)
{
- pthread_mutex_lock(&_hashes_mtx);
- {
+ frr_with_mutex(&_hashes_mtx) {
if (_hashes) {
listnode_delete(_hashes, hash);
if (_hashes->count == 0) {
@@ -320,7 +318,6 @@ void hash_free(struct hash *hash)
}
}
}
- pthread_mutex_unlock(&_hashes_mtx);
XFREE(MTYPE_HASH, hash->name);
diff --git a/lib/log.c b/lib/log.c
index 51a0ddd6b7..f1c0fabfba 100644
--- a/lib/log.c
+++ b/lib/log.c
@@ -31,6 +31,7 @@
#include "lib_errors.h"
#include "lib/hook.h"
#include "printfrr.h"
+#include "frr_pthread.h"
#ifndef SUNOS_5
#include <sys/un.h>
@@ -83,89 +84,70 @@ static int zlog_filter_lookup(const char *lookup)
void zlog_filter_clear(void)
{
- pthread_mutex_lock(&loglock);
- zlog_filter_count = 0;
- pthread_mutex_unlock(&loglock);
+ frr_with_mutex(&loglock) {
+ zlog_filter_count = 0;
+ }
}
int zlog_filter_add(const char *filter)
{
- pthread_mutex_lock(&loglock);
+ frr_with_mutex(&loglock) {
+ if (zlog_filter_count >= ZLOG_FILTERS_MAX)
+ return 1;
- int ret = 0;
+ if (zlog_filter_lookup(filter) != -1)
+ /* Filter already present */
+ return -1;
- if (zlog_filter_count >= ZLOG_FILTERS_MAX) {
- ret = 1;
- goto done;
- }
+ strlcpy(zlog_filters[zlog_filter_count], filter,
+ sizeof(zlog_filters[0]));
- if (zlog_filter_lookup(filter) != -1) {
- /* Filter already present */
- ret = -1;
- goto done;
- }
+ if (zlog_filters[zlog_filter_count][0] == '\0')
+ /* Filter was either empty or didn't get copied
+ * correctly
+ */
+ return -1;
- strlcpy(zlog_filters[zlog_filter_count], filter,
- sizeof(zlog_filters[0]));
-
- if (zlog_filters[zlog_filter_count][0] == '\0') {
- /* Filter was either empty or didn't get copied correctly */
- ret = -1;
- goto done;
+ zlog_filter_count++;
}
-
- zlog_filter_count++;
-
-done:
- pthread_mutex_unlock(&loglock);
- return ret;
+ return 0;
}
int zlog_filter_del(const char *filter)
{
- pthread_mutex_lock(&loglock);
-
- int found_idx = zlog_filter_lookup(filter);
- int last_idx = zlog_filter_count - 1;
- int ret = 0;
+ frr_with_mutex(&loglock) {
+ int found_idx = zlog_filter_lookup(filter);
+ int last_idx = zlog_filter_count - 1;
- if (found_idx == -1) {
- /* Didn't find the filter to delete */
- ret = -1;
- goto done;
- }
-
- /* Adjust the filter array */
- memmove(zlog_filters[found_idx], zlog_filters[found_idx + 1],
- (last_idx - found_idx) * sizeof(zlog_filters[0]));
+ if (found_idx == -1)
+ /* Didn't find the filter to delete */
+ return -1;
- zlog_filter_count--;
+ /* Adjust the filter array */
+ memmove(zlog_filters[found_idx], zlog_filters[found_idx + 1],
+ (last_idx - found_idx) * sizeof(zlog_filters[0]));
-done:
- pthread_mutex_unlock(&loglock);
- return ret;
+ zlog_filter_count--;
+ }
+ return 0;
}
/* Dump all filters to buffer, delimited by new line */
int zlog_filter_dump(char *buf, size_t max_size)
{
- pthread_mutex_lock(&loglock);
-
- int ret = 0;
int len = 0;
- for (int i = 0; i < zlog_filter_count; i++) {
- ret = snprintf(buf + len, max_size - len, " %s\n",
- zlog_filters[i]);
- len += ret;
- if ((ret < 0) || ((size_t)len >= max_size)) {
- len = -1;
- goto done;
+ frr_with_mutex(&loglock) {
+ for (int i = 0; i < zlog_filter_count; i++) {
+ int ret;
+ ret = snprintf(buf + len, max_size - len, " %s\n",
+ zlog_filters[i]);
+ len += ret;
+ if ((ret < 0) || ((size_t)len >= max_size))
+ return -1;
}
}
-done:
- pthread_mutex_unlock(&loglock);
return len;
}
@@ -363,7 +345,7 @@ search:
/* va_list version of zlog. */
void vzlog(int priority, const char *format, va_list args)
{
- pthread_mutex_lock(&loglock);
+ frr_mutex_lock_autounlock(&loglock);
char proto_str[32] = "";
int original_errno = errno;
@@ -430,36 +412,31 @@ out:
if (msg != buf)
XFREE(MTYPE_TMP, msg);
errno = original_errno;
- pthread_mutex_unlock(&loglock);
}
int vzlog_test(int priority)
{
- pthread_mutex_lock(&loglock);
-
- int ret = 0;
+ frr_mutex_lock_autounlock(&loglock);
struct zlog *zl = zlog_default;
/* When zlog_default is also NULL, use stderr for logging. */
if (zl == NULL)
- ret = 1;
+ return 1;
/* Syslog output */
else if (priority <= zl->maxlvl[ZLOG_DEST_SYSLOG])
- ret = 1;
+ return 1;
/* File output. */
else if ((priority <= zl->maxlvl[ZLOG_DEST_FILE]) && zl->fp)
- ret = 1;
+ return 1;
/* stdout output. */
else if (priority <= zl->maxlvl[ZLOG_DEST_STDOUT])
- ret = 1;
+ return 1;
/* Terminal monitor. */
else if (priority <= zl->maxlvl[ZLOG_DEST_MONITOR])
- ret = 1;
-
- pthread_mutex_unlock(&loglock);
+ return 1;
- return ret;
+ return 0;
}
/*
@@ -870,9 +847,9 @@ void openzlog(const char *progname, const char *protoname,
openlog(progname, syslog_flags, zl->facility);
- pthread_mutex_lock(&loglock);
- zlog_default = zl;
- pthread_mutex_unlock(&loglock);
+ frr_with_mutex(&loglock) {
+ zlog_default = zl;
+ }
#ifdef HAVE_GLIBC_BACKTRACE
/* work around backtrace() using lazily resolved dynamically linked
@@ -889,7 +866,8 @@ void openzlog(const char *progname, const char *protoname,
void closezlog(void)
{
- pthread_mutex_lock(&loglock);
+ frr_mutex_lock_autounlock(&loglock);
+
struct zlog *zl = zlog_default;
closelog();
@@ -901,15 +879,14 @@ void closezlog(void)
XFREE(MTYPE_ZLOG, zl);
zlog_default = NULL;
- pthread_mutex_unlock(&loglock);
}
/* Called from command.c. */
void zlog_set_level(zlog_dest_t dest, int log_level)
{
- pthread_mutex_lock(&loglock);
- zlog_default->maxlvl[dest] = log_level;
- pthread_mutex_unlock(&loglock);
+ frr_with_mutex(&loglock) {
+ zlog_default->maxlvl[dest] = log_level;
+ }
}
int zlog_set_file(const char *filename, int log_level)
@@ -929,15 +906,15 @@ int zlog_set_file(const char *filename, int log_level)
if (fp == NULL) {
ret = 0;
} else {
- pthread_mutex_lock(&loglock);
- zl = zlog_default;
-
- /* Set flags. */
- zl->filename = XSTRDUP(MTYPE_ZLOG, filename);
- zl->maxlvl[ZLOG_DEST_FILE] = log_level;
- zl->fp = fp;
- logfile_fd = fileno(fp);
- pthread_mutex_unlock(&loglock);
+ frr_with_mutex(&loglock) {
+ zl = zlog_default;
+
+ /* Set flags. */
+ zl->filename = XSTRDUP(MTYPE_ZLOG, filename);
+ zl->maxlvl[ZLOG_DEST_FILE] = log_level;
+ zl->fp = fp;
+ logfile_fd = fileno(fp);
+ }
}
return ret;
@@ -946,7 +923,7 @@ int zlog_set_file(const char *filename, int log_level)
/* Reset opend file. */
int zlog_reset_file(void)
{
- pthread_mutex_lock(&loglock);
+ frr_mutex_lock_autounlock(&loglock);
struct zlog *zl = zlog_default;
@@ -959,8 +936,6 @@ int zlog_reset_file(void)
XFREE(MTYPE_ZLOG, zl->filename);
zl->filename = NULL;
- pthread_mutex_unlock(&loglock);
-
return 1;
}
diff --git a/lib/monotime.h b/lib/monotime.h
index ca27c45dc6..e246f177de 100644
--- a/lib/monotime.h
+++ b/lib/monotime.h
@@ -84,6 +84,20 @@ static inline int64_t monotime_until(const struct timeval *ref,
return (int64_t)tv.tv_sec * 1000000LL + tv.tv_usec;
}
+static inline time_t monotime_to_realtime(const struct timeval *mono,
+ struct timeval *realout)
+{
+ struct timeval delta, real;
+
+ monotime_since(mono, &delta);
+ gettimeofday(&real, NULL);
+
+ timersub(&real, &delta, &real);
+ if (realout)
+ *realout = real;
+ return real.tv_sec;
+}
+
/* Char buffer size for time-to-string api */
#define MONOTIME_STRLEN 32
diff --git a/lib/nexthop.c b/lib/nexthop.c
index 0984c1a168..cf5bed3d62 100644
--- a/lib/nexthop.c
+++ b/lib/nexthop.c
@@ -65,9 +65,8 @@ static int _nexthop_labels_cmp(const struct nexthop *nh1,
return memcmp(nhl1->label, nhl2->label, nhl1->num_labels);
}
-static int _nexthop_g_addr_cmp(enum nexthop_types_t type,
- const union g_addr *addr1,
- const union g_addr *addr2)
+int nexthop_g_addr_cmp(enum nexthop_types_t type, const union g_addr *addr1,
+ const union g_addr *addr2)
{
int ret = 0;
@@ -92,13 +91,13 @@ static int _nexthop_g_addr_cmp(enum nexthop_types_t type,
static int _nexthop_gateway_cmp(const struct nexthop *nh1,
const struct nexthop *nh2)
{
- return _nexthop_g_addr_cmp(nh1->type, &nh1->gate, &nh2->gate);
+ return nexthop_g_addr_cmp(nh1->type, &nh1->gate, &nh2->gate);
}
static int _nexthop_source_cmp(const struct nexthop *nh1,
const struct nexthop *nh2)
{
- return _nexthop_g_addr_cmp(nh1->type, &nh1->src, &nh2->src);
+ return nexthop_g_addr_cmp(nh1->type, &nh1->src, &nh2->src);
}
static int _nexthop_cmp_no_labels(const struct nexthop *next1,
diff --git a/lib/nexthop.h b/lib/nexthop.h
index 20401cd581..9dd5fc6fd3 100644
--- a/lib/nexthop.h
+++ b/lib/nexthop.h
@@ -142,6 +142,9 @@ extern bool nexthop_same(const struct nexthop *nh1, const struct nexthop *nh2);
extern bool nexthop_same_no_labels(const struct nexthop *nh1,
const struct nexthop *nh2);
extern int nexthop_cmp(const struct nexthop *nh1, const struct nexthop *nh2);
+extern int nexthop_g_addr_cmp(enum nexthop_types_t type,
+ const union g_addr *addr1,
+ const union g_addr *addr2);
extern const char *nexthop_type_to_str(enum nexthop_types_t nh_type);
extern bool nexthop_labels_match(const struct nexthop *nh1,
diff --git a/lib/nexthop_group.c b/lib/nexthop_group.c
index 5602018b30..abe2096cec 100644
--- a/lib/nexthop_group.c
+++ b/lib/nexthop_group.c
@@ -60,6 +60,16 @@ nexthop_group_cmd_compare(const struct nexthop_group_cmd *nhgc1,
return strcmp(nhgc1->name, nhgc2->name);
}
+static struct nexthop *nexthop_group_tail(const struct nexthop_group *nhg)
+{
+ struct nexthop *nexthop = nhg->nexthop;
+
+ while (nexthop && nexthop->next)
+ nexthop = nexthop->next;
+
+ return nexthop;
+}
+
uint8_t nexthop_group_nexthop_num(const struct nexthop_group *nhg)
{
struct nexthop *nhop;
@@ -129,7 +139,20 @@ void _nexthop_add(struct nexthop **target, struct nexthop *nexthop)
void _nexthop_group_add_sorted(struct nexthop_group *nhg,
struct nexthop *nexthop)
{
- struct nexthop *position, *prev;
+ struct nexthop *position, *prev, *tail;
+
+ /* Try to just append to the end first
+ * This trust it is already sorted
+ */
+
+ tail = nexthop_group_tail(nhg);
+
+ if (tail && (nexthop_cmp(tail, nexthop) < 0)) {
+ tail->next = nexthop;
+ nexthop->prev = tail;
+
+ return;
+ }
for (position = nhg->nexthop, prev = NULL; position;
prev = position, position = position->next) {
diff --git a/lib/northbound.c b/lib/northbound.c
index 48b450e969..a814f23e14 100644
--- a/lib/northbound.c
+++ b/lib/northbound.c
@@ -26,6 +26,7 @@
#include "command.h"
#include "debug.h"
#include "db.h"
+#include "frr_pthread.h"
#include "northbound.h"
#include "northbound_cli.h"
#include "northbound_db.h"
@@ -723,8 +724,7 @@ int nb_running_lock(enum nb_client client, const void *user)
{
int ret = -1;
- pthread_mutex_lock(&running_config_mgmt_lock.mtx);
- {
+ frr_with_mutex(&running_config_mgmt_lock.mtx) {
if (!running_config_mgmt_lock.locked) {
running_config_mgmt_lock.locked = true;
running_config_mgmt_lock.owner_client = client;
@@ -732,7 +732,6 @@ int nb_running_lock(enum nb_client client, const void *user)
ret = 0;
}
}
- pthread_mutex_unlock(&running_config_mgmt_lock.mtx);
return ret;
}
@@ -741,8 +740,7 @@ int nb_running_unlock(enum nb_client client, const void *user)
{
int ret = -1;
- pthread_mutex_lock(&running_config_mgmt_lock.mtx);
- {
+ frr_with_mutex(&running_config_mgmt_lock.mtx) {
if (running_config_mgmt_lock.locked
&& running_config_mgmt_lock.owner_client == client
&& running_config_mgmt_lock.owner_user == user) {
@@ -752,7 +750,6 @@ int nb_running_unlock(enum nb_client client, const void *user)
ret = 0;
}
}
- pthread_mutex_unlock(&running_config_mgmt_lock.mtx);
return ret;
}
@@ -761,14 +758,12 @@ int nb_running_lock_check(enum nb_client client, const void *user)
{
int ret = -1;
- pthread_mutex_lock(&running_config_mgmt_lock.mtx);
- {
+ frr_with_mutex(&running_config_mgmt_lock.mtx) {
if (!running_config_mgmt_lock.locked
|| (running_config_mgmt_lock.owner_client == client
&& running_config_mgmt_lock.owner_user == user))
ret = 0;
}
- pthread_mutex_unlock(&running_config_mgmt_lock.mtx);
return ret;
}
diff --git a/lib/northbound_sysrepo.c b/lib/northbound_sysrepo.c
index 44a55137f8..77183282ba 100644
--- a/lib/northbound_sysrepo.c
+++ b/lib/northbound_sysrepo.c
@@ -425,7 +425,7 @@ static int frr_sr_state_cb(const char *xpath, sr_val_t **values,
exit:
list_delete(&elements);
*values = NULL;
- values_cnt = 0;
+ *values_cnt = 0;
return SR_ERR_OK;
}
diff --git a/lib/prefix.c b/lib/prefix.c
index ad8dea273e..35b679ab90 100644
--- a/lib/prefix.c
+++ b/lib/prefix.c
@@ -441,7 +441,7 @@ void prefix_hexdump(const struct prefix *p)
zlog_hexdump(p, sizeof(struct prefix));
}
-int is_zero_mac(struct ethaddr *mac)
+int is_zero_mac(const struct ethaddr *mac)
{
int i = 0;
diff --git a/lib/prefix.h b/lib/prefix.h
index e338140f1a..24c146e022 100644
--- a/lib/prefix.h
+++ b/lib/prefix.h
@@ -470,7 +470,7 @@ extern void masklen2ip6(const int, struct in6_addr *);
extern const char *inet6_ntoa(struct in6_addr);
-extern int is_zero_mac(struct ethaddr *mac);
+extern int is_zero_mac(const struct ethaddr *mac);
extern int prefix_str2mac(const char *str, struct ethaddr *mac);
extern char *prefix_mac2str(const struct ethaddr *mac, char *buf, int size);
diff --git a/lib/privs.c b/lib/privs.c
index a3314c6c3c..09efedf684 100644
--- a/lib/privs.c
+++ b/lib/privs.c
@@ -24,6 +24,7 @@
#include "log.h"
#include "privs.h"
#include "memory.h"
+#include "frr_pthread.h"
#include "lib_errors.h"
#include "lib/queue.h"
@@ -760,8 +761,7 @@ struct zebra_privs_t *_zprivs_raise(struct zebra_privs_t *privs,
* Serialize 'raise' operations; particularly important for
* OSes where privs are process-wide.
*/
- pthread_mutex_lock(&(privs->mutex));
- {
+ frr_with_mutex(&(privs->mutex)) {
/* Locate ref-counting object to use */
refs = get_privs_refs(privs);
@@ -775,7 +775,6 @@ struct zebra_privs_t *_zprivs_raise(struct zebra_privs_t *privs,
refs->raised_in_funcname = funcname;
}
}
- pthread_mutex_unlock(&(privs->mutex));
return privs;
}
@@ -791,8 +790,7 @@ void _zprivs_lower(struct zebra_privs_t **privs)
/* Serialize 'lower privs' operation - particularly important
* when OS privs are process-wide.
*/
- pthread_mutex_lock(&(*privs)->mutex);
- {
+ frr_with_mutex(&(*privs)->mutex) {
refs = get_privs_refs(*privs);
if (--(refs->refcount) == 0) {
@@ -806,7 +804,6 @@ void _zprivs_lower(struct zebra_privs_t **privs)
refs->raised_in_funcname = NULL;
}
}
- pthread_mutex_unlock(&(*privs)->mutex);
*privs = NULL;
}
diff --git a/lib/privs.h b/lib/privs.h
index 2b0b44b3f2..db5707d675 100644
--- a/lib/privs.h
+++ b/lib/privs.h
@@ -109,16 +109,16 @@ extern void zprivs_get_ids(struct zprivs_ids_t *);
/*
* Wrapper around zprivs, to be used as:
- * frr_elevate_privs(&privs) {
+ * frr_with_privs(&privs) {
* ... code ...
* if (error)
* break; -- break can be used to get out of the block
* ... code ...
* }
*
- * The argument to frr_elevate_privs() can be NULL to leave privileges as-is
+ * The argument to frr_with_privs() can be NULL to leave privileges as-is
* (mostly useful for conditional privilege-raising, i.e.:)
- * frr_elevate_privs(cond ? &privs : NULL) {}
+ * frr_with_privs(cond ? &privs : NULL) {}
*
* NB: The code block is always executed, regardless of whether privileges
* could be raised or not, or whether NULL was given or not. This is fully
@@ -138,7 +138,7 @@ extern struct zebra_privs_t *_zprivs_raise(struct zebra_privs_t *privs,
const char *funcname);
extern void _zprivs_lower(struct zebra_privs_t **privs);
-#define frr_elevate_privs(privs) \
+#define frr_with_privs(privs) \
for (struct zebra_privs_t *_once = NULL, \
*_privs __attribute__( \
(unused, cleanup(_zprivs_lower))) = \
diff --git a/lib/pullwr.c b/lib/pullwr.c
new file mode 100644
index 0000000000..0c326f29d4
--- /dev/null
+++ b/lib/pullwr.c
@@ -0,0 +1,275 @@
+/*
+ * Pull-driven write event handler
+ * Copyright (C) 2019 David Lamparter
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; see the file COPYING; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "zebra.h"
+
+#include "pullwr.h"
+#include "memory.h"
+#include "monotime.h"
+
+/* defaults */
+#define PULLWR_THRESH 16384 /* size at which we start to call write() */
+#define PULLWR_MAXSPIN 2500 /* max µs to spend grabbing more data */
+
+struct pullwr {
+ int fd;
+ struct thread_master *tm;
+ /* writer == NULL <=> we're idle */
+ struct thread *writer;
+
+ void *arg;
+ void (*fill)(void *, struct pullwr *);
+ void (*err)(void *, struct pullwr *, bool);
+
+ /* ring buffer (although it's "un-ringed" on resizing, it WILL wrap
+ * around if data is trickling in while keeping it at a constant size)
+ */
+ size_t bufsz, valid, pos;
+ uint64_t total_written;
+ char *buffer;
+
+ size_t thresh; /* PULLWR_THRESH */
+ int64_t maxspin; /* PULLWR_MAXSPIN */
+};
+
+DEFINE_MTYPE_STATIC(LIB, PULLWR_HEAD, "pull-driven write controller")
+DEFINE_MTYPE_STATIC(LIB, PULLWR_BUF, "pull-driven write buffer")
+
+static int pullwr_run(struct thread *t);
+
+struct pullwr *_pullwr_new(struct thread_master *tm, int fd,
+ void *arg,
+ void (*fill)(void *, struct pullwr *),
+ void (*err)(void *, struct pullwr *, bool))
+{
+ struct pullwr *pullwr;
+
+ pullwr = XCALLOC(MTYPE_PULLWR_HEAD, sizeof(*pullwr));
+ pullwr->fd = fd;
+ pullwr->tm = tm;
+ pullwr->arg = arg;
+ pullwr->fill = fill;
+ pullwr->err = err;
+
+ pullwr->thresh = PULLWR_THRESH;
+ pullwr->maxspin = PULLWR_MAXSPIN;
+
+ return pullwr;
+}
+
+void pullwr_del(struct pullwr *pullwr)
+{
+ THREAD_OFF(pullwr->writer);
+
+ XFREE(MTYPE_PULLWR_BUF, pullwr->buffer);
+ XFREE(MTYPE_PULLWR_HEAD, pullwr);
+}
+
+void pullwr_cfg(struct pullwr *pullwr, int64_t max_spin_usec,
+ size_t write_threshold)
+{
+ pullwr->maxspin = max_spin_usec ?: PULLWR_MAXSPIN;
+ pullwr->thresh = write_threshold ?: PULLWR_THRESH;
+}
+
+void pullwr_bump(struct pullwr *pullwr)
+{
+ if (pullwr->writer)
+ return;
+
+ thread_add_timer(pullwr->tm, pullwr_run, pullwr, 0, &pullwr->writer);
+}
+
+static size_t pullwr_iov(struct pullwr *pullwr, struct iovec *iov)
+{
+ size_t len1;
+
+ if (pullwr->valid == 0)
+ return 0;
+
+ if (pullwr->pos + pullwr->valid <= pullwr->bufsz) {
+ iov[0].iov_base = pullwr->buffer + pullwr->pos;
+ iov[0].iov_len = pullwr->valid;
+ return 1;
+ }
+
+ len1 = pullwr->bufsz - pullwr->pos;
+
+ iov[0].iov_base = pullwr->buffer + pullwr->pos;
+ iov[0].iov_len = len1;
+ iov[1].iov_base = pullwr->buffer;
+ iov[1].iov_len = pullwr->valid - len1;
+ return 2;
+}
+
+static void pullwr_resize(struct pullwr *pullwr, size_t need)
+{
+ struct iovec iov[2];
+ size_t niov, newsize;
+ char *newbuf;
+
+ /* the buffer is maintained at pullwr->thresh * 2 since we'll be
+ * trying to fill it as long as it's anywhere below pullwr->thresh.
+ * That means we frequently end up a little short of it and then write
+ * something that goes over the threshold. So, just use double.
+ */
+ if (need) {
+ /* resize up */
+ if (pullwr->bufsz - pullwr->valid >= need)
+ return;
+
+ newsize = MAX((pullwr->valid + need) * 2, pullwr->thresh * 2);
+ newbuf = XMALLOC(MTYPE_PULLWR_BUF, newsize);
+ } else if (!pullwr->valid) {
+ /* resize down, buffer empty */
+ newsize = 0;
+ newbuf = NULL;
+ } else {
+ /* resize down */
+ if (pullwr->bufsz - pullwr->valid < pullwr->thresh)
+ return;
+ newsize = MAX(pullwr->valid, pullwr->thresh * 2);
+ newbuf = XMALLOC(MTYPE_PULLWR_BUF, newsize);
+ }
+
+ niov = pullwr_iov(pullwr, iov);
+ if (niov >= 1) {
+ memcpy(newbuf, iov[0].iov_base, iov[0].iov_len);
+ if (niov >= 2)
+ memcpy(newbuf + iov[0].iov_len,
+ iov[1].iov_base, iov[1].iov_len);
+ }
+
+ XFREE(MTYPE_PULLWR_BUF, pullwr->buffer);
+ pullwr->buffer = newbuf;
+ pullwr->bufsz = newsize;
+ pullwr->pos = 0;
+}
+
+void pullwr_write(struct pullwr *pullwr, const void *data, size_t len)
+{
+ pullwr_resize(pullwr, len);
+
+ if (pullwr->pos + pullwr->valid > pullwr->bufsz) {
+ size_t pos;
+
+ pos = (pullwr->pos + pullwr->valid) % pullwr->bufsz;
+ memcpy(pullwr->buffer + pos, data, len);
+ } else {
+ size_t max1, len1;
+ max1 = pullwr->bufsz - (pullwr->pos + pullwr->valid);
+ max1 = MIN(max1, len);
+
+ memcpy(pullwr->buffer + pullwr->pos + pullwr->valid,
+ data, max1);
+ len1 = len - max1;
+
+ if (len1)
+ memcpy(pullwr->buffer, (char *)data + max1, len1);
+
+ }
+ pullwr->valid += len;
+
+ pullwr_bump(pullwr);
+}
+
+static int pullwr_run(struct thread *t)
+{
+ struct pullwr *pullwr = THREAD_ARG(t);
+ struct iovec iov[2];
+ size_t niov, lastvalid;
+ ssize_t nwr;
+ struct timeval t0;
+ bool maxspun = false;
+
+ monotime(&t0);
+
+ do {
+ lastvalid = pullwr->valid - 1;
+ while (pullwr->valid < pullwr->thresh
+ && pullwr->valid != lastvalid
+ && !maxspun) {
+ lastvalid = pullwr->valid;
+ pullwr->fill(pullwr->arg, pullwr);
+
+ /* check after doing at least one fill() call so we
+ * don't spin without making progress on slow boxes
+ */
+ if (!maxspun && monotime_since(&t0, NULL)
+ >= pullwr->maxspin)
+ maxspun = true;
+ }
+
+ if (pullwr->valid == 0) {
+ /* we made a fill() call above that didn't feed any
+ * data in, and we have nothing more queued, so we go
+ * into idle, i.e. no calling thread_add_write()
+ */
+ pullwr_resize(pullwr, 0);
+ return 0;
+ }
+
+ niov = pullwr_iov(pullwr, iov);
+ assert(niov);
+
+ nwr = writev(pullwr->fd, iov, niov);
+ if (nwr < 0) {
+ if (errno == EAGAIN || errno == EWOULDBLOCK)
+ break;
+ pullwr->err(pullwr->arg, pullwr, false);
+ return 0;
+ }
+
+ if (nwr == 0) {
+ pullwr->err(pullwr->arg, pullwr, true);
+ return 0;
+ }
+
+ pullwr->total_written += nwr;
+ pullwr->valid -= nwr;
+ pullwr->pos += nwr;
+ pullwr->pos %= pullwr->bufsz;
+ } while (pullwr->valid == 0 && !maxspun);
+ /* pullwr->valid != 0 implies we did an incomplete write, i.e. socket
+ * is full and we go wait until it's available for writing again.
+ */
+
+ thread_add_write(pullwr->tm, pullwr_run, pullwr, pullwr->fd,
+ &pullwr->writer);
+
+ /* if we hit the time limit, just keep the buffer, we'll probably need
+ * it anyway & another run is already coming up.
+ */
+ if (!maxspun)
+ pullwr_resize(pullwr, 0);
+ return 0;
+}
+
+void pullwr_stats(struct pullwr *pullwr, uint64_t *total_written,
+ size_t *pending, size_t *kernel_pending)
+{
+ int tmp;
+
+ *total_written = pullwr->total_written;
+ *pending = pullwr->valid;
+
+ if (ioctl(pullwr->fd, TIOCOUTQ, &tmp) != 0)
+ tmp = 0;
+ *kernel_pending = tmp;
+}
diff --git a/lib/pullwr.h b/lib/pullwr.h
new file mode 100644
index 0000000000..601eac1b79
--- /dev/null
+++ b/lib/pullwr.h
@@ -0,0 +1,110 @@
+/*
+ * Pull-driven write event handler
+ * Copyright (C) 2019 David Lamparter
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; see the file COPYING; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _WRITEPOLL_H
+#define _WRITEPOLL_H
+
+#include <stdbool.h>
+#include <stdint.h>
+
+#include "thread.h"
+#include "stream.h"
+
+struct pullwr;
+
+/* This is a "pull-driven" write event handler. Instead of having some buffer
+ * or being driven by the availability of data, it triggers on the space being
+ * available on the socket for data to be written on and then calls fill() to
+ * get data to be sent.
+ *
+ * pullwr_* maintains an "idle" vs. "active" state, going into idle when a
+ * fill() call completes without feeing more data into it. The overall
+ * semantics are:
+ * - to put data out, call pullwr_write(). This is possible from both inside
+ * fill() callbacks or anywhere else. Doing so puts the pullwr into
+ * active state.
+ * - in active state, the fill() callback will be called and should feed more
+ * data in. It should NOT loop to push out more than one "unit" of data;
+ * the pullwr code handles this by calling fill() until it has enough data.
+ * - if there's nothing more to be sent, fill() returns without doing anything
+ * and pullwr goes into idle state after flushing all buffered data out.
+ * - when new data becomes available, pullwr_bump() should be called to put
+ * the pullwr back into active mode so it will collect data from fill(),
+ * or you can directly call pullwr_write().
+ * - only calling pullwr_write() from within fill() is the cleanest way of
+ * doing things.
+ *
+ * When the err() callback is called, the pullwr should be considered unusable
+ * and released with pullwr_del(). This can be done from inside the callback,
+ * the pullwr code holds no more references on it when calling err().
+ */
+extern struct pullwr *_pullwr_new(struct thread_master *tm, int fd,
+ void *arg,
+ void (*fill)(void *, struct pullwr *),
+ void (*err)(void *, struct pullwr *, bool eof));
+extern void pullwr_del(struct pullwr *pullwr);
+
+/* type-checking wrapper. makes sure fill() and err() take a first argument
+ * whose type is identical to the type of arg.
+ * => use "void fill(struct mystruct *arg, ...)" - no "void *arg"
+ */
+#define pullwr_new(tm, fd, arg, fill, err) ({ \
+ void (*fill_typechk)(typeof(arg), struct pullwr *) = fill; \
+ void (*err_typechk)(typeof(arg), struct pullwr *, bool) = err; \
+ _pullwr_new(tm, fd, arg, (void *)fill_typechk, (void *)err_typechk); \
+})
+
+/* max_spin_usec is the time after which the pullwr event handler will stop
+ * trying to get more data from fill() and yield control back to the
+ * thread_master. It does reschedule itself to continue later; this is
+ * only to make sure we don't freeze the entire process if we're piping a
+ * lot of data to a local endpoint that reads quickly (i.e. no backpressure)
+ *
+ * default: 2500 (2.5 ms)
+ *
+ * write_threshold is the amount of data buffered from fill() calls at which
+ * the pullwr code starts calling write(). But this is not a "limit".
+ * pullwr will keep poking fill() for more data until
+ * (a) max_spin_usec is reached; fill() will be called again later after
+ * returning to the thread_master to give other events a chance to run
+ * (b) fill() returns without pushing any data onto the pullwr with
+ * pullwr_write(), so fill() will NOT be called again until a call to
+ * pullwr_bump() or pullwr_write() comes in.
+ *
+ * default: 16384 (16 kB)
+ *
+ * passing 0 for either value (or not calling it at all) uses the default.
+ */
+extern void pullwr_cfg(struct pullwr *pullwr, int64_t max_spin_usec,
+ size_t write_threshold);
+
+extern void pullwr_bump(struct pullwr *pullwr);
+extern void pullwr_write(struct pullwr *pullwr,
+ const void *data, size_t len);
+
+static inline void pullwr_write_stream(struct pullwr *pullwr,
+ struct stream *s)
+{
+ pullwr_write(pullwr, s->data, stream_get_endp(s));
+}
+
+extern void pullwr_stats(struct pullwr *pullwr, uint64_t *total_written,
+ size_t *pending, size_t *kernel_pending);
+
+#endif /* _WRITEPOLL_H */
diff --git a/lib/routemap.c b/lib/routemap.c
index eca02e8366..fc15183bf9 100644
--- a/lib/routemap.c
+++ b/lib/routemap.c
@@ -474,7 +474,7 @@ int generic_match_add(struct vty *vty, struct route_map_index *index,
const char *command, const char *arg,
route_map_event_t type)
{
- int ret;
+ enum rmap_compile_rets ret;
ret = route_map_add_match(index, command, arg, type);
switch (ret) {
@@ -493,6 +493,11 @@ int generic_match_add(struct vty *vty, struct route_map_index *index,
frr_protonameinst);
return CMD_WARNING_CONFIG_FAILED;
break;
+ case RMAP_DUPLICATE_RULE:
+ /*
+ * Nothing to do here move along
+ */
+ break;
}
return CMD_SUCCESS;
@@ -502,7 +507,7 @@ int generic_match_delete(struct vty *vty, struct route_map_index *index,
const char *command, const char *arg,
route_map_event_t type)
{
- int ret;
+ enum rmap_compile_rets ret;
int retval = CMD_SUCCESS;
char *dep_name = NULL;
const char *tmpstr;
@@ -537,6 +542,11 @@ int generic_match_delete(struct vty *vty, struct route_map_index *index,
if (type != RMAP_EVENT_MATCH_DELETED && dep_name)
route_map_upd8_dependency(type, dep_name, rmap_name);
break;
+ case RMAP_DUPLICATE_RULE:
+ /*
+ * Nothing to do here
+ */
+ break;
}
XFREE(MTYPE_ROUTE_MAP_RULE, dep_name);
@@ -548,7 +558,7 @@ int generic_match_delete(struct vty *vty, struct route_map_index *index,
int generic_set_add(struct vty *vty, struct route_map_index *index,
const char *command, const char *arg)
{
- int ret;
+ enum rmap_compile_rets ret;
ret = route_map_add_set(index, command, arg);
switch (ret) {
@@ -563,6 +573,7 @@ int generic_set_add(struct vty *vty, struct route_map_index *index,
return CMD_WARNING_CONFIG_FAILED;
break;
case RMAP_COMPILE_SUCCESS:
+ case RMAP_DUPLICATE_RULE:
break;
}
@@ -572,7 +583,7 @@ int generic_set_add(struct vty *vty, struct route_map_index *index,
int generic_set_delete(struct vty *vty, struct route_map_index *index,
const char *command, const char *arg)
{
- int ret;
+ enum rmap_compile_rets ret;
ret = route_map_delete_set(index, command, arg);
switch (ret) {
@@ -587,6 +598,7 @@ int generic_set_delete(struct vty *vty, struct route_map_index *index,
return CMD_WARNING_CONFIG_FAILED;
break;
case RMAP_COMPILE_SUCCESS:
+ case RMAP_DUPLICATE_RULE:
break;
}
@@ -1388,8 +1400,10 @@ static route_map_event_t get_route_map_delete_event(route_map_event_t type)
}
/* Add match statement to route map. */
-int route_map_add_match(struct route_map_index *index, const char *match_name,
- const char *match_arg, route_map_event_t type)
+enum rmap_compile_rets route_map_add_match(struct route_map_index *index,
+ const char *match_name,
+ const char *match_arg,
+ route_map_event_t type)
{
struct route_map_rule *rule;
struct route_map_rule *next;
@@ -1464,15 +1478,16 @@ int route_map_add_match(struct route_map_index *index, const char *match_name,
}
/* Delete specified route match rule. */
-int route_map_delete_match(struct route_map_index *index,
- const char *match_name, const char *match_arg)
+enum rmap_compile_rets route_map_delete_match(struct route_map_index *index,
+ const char *match_name,
+ const char *match_arg)
{
struct route_map_rule *rule;
struct route_map_rule_cmd *cmd;
cmd = route_map_lookup_match(match_name);
if (cmd == NULL)
- return 1;
+ return RMAP_RULE_MISSING;
for (rule = index->match_list.head; rule; rule = rule->next)
if (rule->cmd == cmd && (rulecmp(rule->rule_str, match_arg) == 0
@@ -1485,15 +1500,16 @@ int route_map_delete_match(struct route_map_index *index,
index->map->name,
RMAP_EVENT_CALL_ADDED);
}
- return 0;
+ return RMAP_COMPILE_SUCCESS;
}
/* Can't find matched rule. */
- return 1;
+ return RMAP_RULE_MISSING;
}
/* Add route-map set statement to the route map. */
-int route_map_add_set(struct route_map_index *index, const char *set_name,
- const char *set_arg)
+enum rmap_compile_rets route_map_add_set(struct route_map_index *index,
+ const char *set_name,
+ const char *set_arg)
{
struct route_map_rule *rule;
struct route_map_rule *next;
@@ -1543,15 +1559,16 @@ int route_map_add_set(struct route_map_index *index, const char *set_name,
}
/* Delete route map set rule. */
-int route_map_delete_set(struct route_map_index *index, const char *set_name,
- const char *set_arg)
+enum rmap_compile_rets route_map_delete_set(struct route_map_index *index,
+ const char *set_name,
+ const char *set_arg)
{
struct route_map_rule *rule;
struct route_map_rule_cmd *cmd;
cmd = route_map_lookup_set(set_name);
if (cmd == NULL)
- return 1;
+ return RMAP_RULE_MISSING;
for (rule = index->set_list.head; rule; rule = rule->next)
if ((rule->cmd == cmd) && (rulecmp(rule->rule_str, set_arg) == 0
@@ -1564,10 +1581,10 @@ int route_map_delete_set(struct route_map_index *index, const char *set_name,
index->map->name,
RMAP_EVENT_CALL_ADDED);
}
- return 0;
+ return RMAP_COMPILE_SUCCESS;
}
/* Can't find matched rule. */
- return 1;
+ return RMAP_RULE_MISSING;
}
static enum route_map_cmd_result_t
diff --git a/lib/routemap.h b/lib/routemap.h
index f9ad0f64a9..40525987e9 100644
--- a/lib/routemap.h
+++ b/lib/routemap.h
@@ -126,7 +126,7 @@ struct route_map_rule_cmd {
};
/* Route map apply error. */
-enum {
+enum rmap_compile_rets {
RMAP_COMPILE_SUCCESS,
/* Route map rule is missing. */
@@ -220,25 +220,28 @@ extern void route_map_init(void);
extern void route_map_finish(void);
/* Add match statement to route map. */
-extern int route_map_add_match(struct route_map_index *index,
- const char *match_name, const char *match_arg,
- route_map_event_t type);
+extern enum rmap_compile_rets route_map_add_match(struct route_map_index *index,
+ const char *match_name,
+ const char *match_arg,
+ route_map_event_t type);
/* Delete specified route match rule. */
-extern int route_map_delete_match(struct route_map_index *index,
- const char *match_name,
- const char *match_arg);
+extern enum rmap_compile_rets
+route_map_delete_match(struct route_map_index *index,
+ const char *match_name, const char *match_arg);
extern const char *route_map_get_match_arg(struct route_map_index *index,
const char *match_name);
/* Add route-map set statement to the route map. */
-extern int route_map_add_set(struct route_map_index *index,
- const char *set_name, const char *set_arg);
+extern enum rmap_compile_rets route_map_add_set(struct route_map_index *index,
+ const char *set_name,
+ const char *set_arg);
/* Delete route map set rule. */
-extern int route_map_delete_set(struct route_map_index *index,
- const char *set_name, const char *set_arg);
+extern enum rmap_compile_rets
+route_map_delete_set(struct route_map_index *index,
+ const char *set_name, const char *set_arg);
/* Install rule command to the match list. */
extern void route_map_install_match(struct route_map_rule_cmd *cmd);
diff --git a/lib/stream.c b/lib/stream.c
index dfd13ca186..2e1a0193a2 100644
--- a/lib/stream.c
+++ b/lib/stream.c
@@ -28,6 +28,7 @@
#include "network.h"
#include "prefix.h"
#include "log.h"
+#include "frr_pthread.h"
#include "lib_errors.h"
DEFINE_MTYPE_STATIC(LIB, STREAM, "Stream")
@@ -1136,11 +1137,9 @@ void stream_fifo_push(struct stream_fifo *fifo, struct stream *s)
void stream_fifo_push_safe(struct stream_fifo *fifo, struct stream *s)
{
- pthread_mutex_lock(&fifo->mtx);
- {
+ frr_with_mutex(&fifo->mtx) {
stream_fifo_push(fifo, s);
}
- pthread_mutex_unlock(&fifo->mtx);
}
/* Delete first stream from fifo. */
@@ -1170,11 +1169,9 @@ struct stream *stream_fifo_pop_safe(struct stream_fifo *fifo)
{
struct stream *ret;
- pthread_mutex_lock(&fifo->mtx);
- {
+ frr_with_mutex(&fifo->mtx) {
ret = stream_fifo_pop(fifo);
}
- pthread_mutex_unlock(&fifo->mtx);
return ret;
}
@@ -1188,11 +1185,9 @@ struct stream *stream_fifo_head_safe(struct stream_fifo *fifo)
{
struct stream *ret;
- pthread_mutex_lock(&fifo->mtx);
- {
+ frr_with_mutex(&fifo->mtx) {
ret = stream_fifo_head(fifo);
}
- pthread_mutex_unlock(&fifo->mtx);
return ret;
}
@@ -1212,11 +1207,9 @@ void stream_fifo_clean(struct stream_fifo *fifo)
void stream_fifo_clean_safe(struct stream_fifo *fifo)
{
- pthread_mutex_lock(&fifo->mtx);
- {
+ frr_with_mutex(&fifo->mtx) {
stream_fifo_clean(fifo);
}
- pthread_mutex_unlock(&fifo->mtx);
}
size_t stream_fifo_count_safe(struct stream_fifo *fifo)
diff --git a/lib/subdir.am b/lib/subdir.am
index 2be7537bcc..e0f1352380 100644
--- a/lib/subdir.am
+++ b/lib/subdir.am
@@ -65,6 +65,7 @@ lib_libfrr_la_SOURCES = \
lib/prefix.c \
lib/privs.c \
lib/ptm_lib.c \
+ lib/pullwr.c \
lib/qobj.c \
lib/ringbuf.c \
lib/routemap.c \
@@ -203,6 +204,7 @@ pkginclude_HEADERS += \
lib/printfrr.h \
lib/privs.h \
lib/ptm_lib.h \
+ lib/pullwr.h \
lib/pw.h \
lib/qobj.h \
lib/queue.h \
diff --git a/lib/thread.c b/lib/thread.c
index 943b849ebf..90c794e88d 100644
--- a/lib/thread.c
+++ b/lib/thread.c
@@ -33,6 +33,7 @@
#include "network.h"
#include "jhash.h"
#include "frratomic.h"
+#include "frr_pthread.h"
#include "lib_errors.h"
DEFINE_MTYPE_STATIC(LIB, THREAD, "Thread")
@@ -173,8 +174,7 @@ static void cpu_record_print(struct vty *vty, uint8_t filter)
tmp.funcname = "TOTAL";
tmp.types = filter;
- pthread_mutex_lock(&masters_mtx);
- {
+ frr_with_mutex(&masters_mtx) {
for (ALL_LIST_ELEMENTS_RO(masters, ln, m)) {
const char *name = m->name ? m->name : "main";
@@ -206,7 +206,6 @@ static void cpu_record_print(struct vty *vty, uint8_t filter)
vty_out(vty, "\n");
}
}
- pthread_mutex_unlock(&masters_mtx);
vty_out(vty, "\n");
vty_out(vty, "Total thread statistics\n");
@@ -240,11 +239,9 @@ static void cpu_record_clear(uint8_t filter)
struct thread_master *m;
struct listnode *ln;
- pthread_mutex_lock(&masters_mtx);
- {
+ frr_with_mutex(&masters_mtx) {
for (ALL_LIST_ELEMENTS_RO(masters, ln, m)) {
- pthread_mutex_lock(&m->mtx);
- {
+ frr_with_mutex(&m->mtx) {
void *args[2] = {tmp, m->cpu_record};
hash_iterate(
m->cpu_record,
@@ -252,10 +249,8 @@ static void cpu_record_clear(uint8_t filter)
void *))cpu_record_hash_clear,
args);
}
- pthread_mutex_unlock(&m->mtx);
}
}
- pthread_mutex_unlock(&masters_mtx);
}
static uint8_t parse_filter(const char *filterstr)
@@ -370,13 +365,11 @@ DEFUN (show_thread_poll,
struct listnode *node;
struct thread_master *m;
- pthread_mutex_lock(&masters_mtx);
- {
+ frr_with_mutex(&masters_mtx) {
for (ALL_LIST_ELEMENTS_RO(masters, node, m)) {
show_thread_poll_helper(vty, m);
}
}
- pthread_mutex_unlock(&masters_mtx);
return CMD_SUCCESS;
}
@@ -487,26 +480,22 @@ struct thread_master *thread_master_create(const char *name)
sizeof(struct pollfd) * rv->handler.pfdsize);
/* add to list of threadmasters */
- pthread_mutex_lock(&masters_mtx);
- {
+ frr_with_mutex(&masters_mtx) {
if (!masters)
masters = list_new();
listnode_add(masters, rv);
}
- pthread_mutex_unlock(&masters_mtx);
return rv;
}
void thread_master_set_name(struct thread_master *master, const char *name)
{
- pthread_mutex_lock(&master->mtx);
- {
+ frr_with_mutex(&master->mtx) {
XFREE(MTYPE_THREAD_MASTER, master->name);
master->name = XSTRDUP(MTYPE_THREAD_MASTER, name);
}
- pthread_mutex_unlock(&master->mtx);
}
#define THREAD_UNUSED_DEPTH 10
@@ -569,13 +558,11 @@ static void thread_array_free(struct thread_master *m,
*/
void thread_master_free_unused(struct thread_master *m)
{
- pthread_mutex_lock(&m->mtx);
- {
+ frr_with_mutex(&m->mtx) {
struct thread *t;
while ((t = thread_list_pop(&m->unuse)))
thread_free(m, t);
}
- pthread_mutex_unlock(&m->mtx);
}
/* Stop thread scheduler. */
@@ -583,14 +570,12 @@ void thread_master_free(struct thread_master *m)
{
struct thread *t;
- pthread_mutex_lock(&masters_mtx);
- {
+ frr_with_mutex(&masters_mtx) {
listnode_delete(masters, m);
if (masters->count == 0) {
list_delete(&masters);
}
}
- pthread_mutex_unlock(&masters_mtx);
thread_array_free(m, m->read);
thread_array_free(m, m->write);
@@ -621,11 +606,9 @@ unsigned long thread_timer_remain_msec(struct thread *thread)
{
int64_t remain;
- pthread_mutex_lock(&thread->mtx);
- {
+ frr_with_mutex(&thread->mtx) {
remain = monotime_until(&thread->u.sands, NULL) / 1000LL;
}
- pthread_mutex_unlock(&thread->mtx);
return remain < 0 ? 0 : remain;
}
@@ -642,11 +625,9 @@ unsigned long thread_timer_remain_second(struct thread *thread)
struct timeval thread_timer_remain(struct thread *thread)
{
struct timeval remain;
- pthread_mutex_lock(&thread->mtx);
- {
+ frr_with_mutex(&thread->mtx) {
monotime_until(&thread->u.sands, &remain);
}
- pthread_mutex_unlock(&thread->mtx);
return remain;
}
@@ -770,14 +751,10 @@ struct thread *funcname_thread_add_read_write(int dir, struct thread_master *m,
struct thread **thread_array;
assert(fd >= 0 && fd < m->fd_limit);
- pthread_mutex_lock(&m->mtx);
- {
- if (t_ptr
- && *t_ptr) // thread is already scheduled; don't reschedule
- {
- pthread_mutex_unlock(&m->mtx);
- return NULL;
- }
+ frr_with_mutex(&m->mtx) {
+ if (t_ptr && *t_ptr)
+ // thread is already scheduled; don't reschedule
+ break;
/* default to a new pollfd */
nfds_t queuepos = m->handler.pfdcount;
@@ -817,12 +794,10 @@ struct thread *funcname_thread_add_read_write(int dir, struct thread_master *m,
m->handler.pfdcount++;
if (thread) {
- pthread_mutex_lock(&thread->mtx);
- {
+ frr_with_mutex(&thread->mtx) {
thread->u.fd = fd;
thread_array[thread->u.fd] = thread;
}
- pthread_mutex_unlock(&thread->mtx);
if (t_ptr) {
*t_ptr = thread;
@@ -832,7 +807,6 @@ struct thread *funcname_thread_add_read_write(int dir, struct thread_master *m,
AWAKEN(m);
}
- pthread_mutex_unlock(&m->mtx);
return thread;
}
@@ -850,19 +824,14 @@ funcname_thread_add_timer_timeval(struct thread_master *m,
assert(type == THREAD_TIMER);
assert(time_relative);
- pthread_mutex_lock(&m->mtx);
- {
- if (t_ptr
- && *t_ptr) // thread is already scheduled; don't reschedule
- {
- pthread_mutex_unlock(&m->mtx);
+ frr_with_mutex(&m->mtx) {
+ if (t_ptr && *t_ptr)
+ // thread is already scheduled; don't reschedule
return NULL;
- }
thread = thread_get(m, type, func, arg, debugargpass);
- pthread_mutex_lock(&thread->mtx);
- {
+ frr_with_mutex(&thread->mtx) {
monotime(&thread->u.sands);
timeradd(&thread->u.sands, time_relative,
&thread->u.sands);
@@ -872,11 +841,9 @@ funcname_thread_add_timer_timeval(struct thread_master *m,
thread->ref = t_ptr;
}
}
- pthread_mutex_unlock(&thread->mtx);
AWAKEN(m);
}
- pthread_mutex_unlock(&m->mtx);
return thread;
}
@@ -933,26 +900,20 @@ struct thread *funcname_thread_add_event(struct thread_master *m,
void *arg, int val,
struct thread **t_ptr, debugargdef)
{
- struct thread *thread;
+ struct thread *thread = NULL;
assert(m != NULL);
- pthread_mutex_lock(&m->mtx);
- {
- if (t_ptr
- && *t_ptr) // thread is already scheduled; don't reschedule
- {
- pthread_mutex_unlock(&m->mtx);
- return NULL;
- }
+ frr_with_mutex(&m->mtx) {
+ if (t_ptr && *t_ptr)
+ // thread is already scheduled; don't reschedule
+ break;
thread = thread_get(m, THREAD_EVENT, func, arg, debugargpass);
- pthread_mutex_lock(&thread->mtx);
- {
+ frr_with_mutex(&thread->mtx) {
thread->u.val = val;
thread_list_add_tail(&m->event, thread);
}
- pthread_mutex_unlock(&thread->mtx);
if (t_ptr) {
*t_ptr = thread;
@@ -961,7 +922,6 @@ struct thread *funcname_thread_add_event(struct thread_master *m,
AWAKEN(m);
}
- pthread_mutex_unlock(&m->mtx);
return thread;
}
@@ -1143,15 +1103,13 @@ void thread_cancel_event(struct thread_master *master, void *arg)
{
assert(master->owner == pthread_self());
- pthread_mutex_lock(&master->mtx);
- {
+ frr_with_mutex(&master->mtx) {
struct cancel_req *cr =
XCALLOC(MTYPE_TMP, sizeof(struct cancel_req));
cr->eventobj = arg;
listnode_add(master->cancel_req, cr);
do_thread_cancel(master);
}
- pthread_mutex_unlock(&master->mtx);
}
/**
@@ -1167,15 +1125,13 @@ void thread_cancel(struct thread *thread)
assert(master->owner == pthread_self());
- pthread_mutex_lock(&master->mtx);
- {
+ frr_with_mutex(&master->mtx) {
struct cancel_req *cr =
XCALLOC(MTYPE_TMP, sizeof(struct cancel_req));
cr->thread = thread;
listnode_add(master->cancel_req, cr);
do_thread_cancel(master);
}
- pthread_mutex_unlock(&master->mtx);
}
/**
@@ -1208,8 +1164,7 @@ void thread_cancel_async(struct thread_master *master, struct thread **thread,
assert(!(thread && eventobj) && (thread || eventobj));
assert(master->owner != pthread_self());
- pthread_mutex_lock(&master->mtx);
- {
+ frr_with_mutex(&master->mtx) {
master->canceled = false;
if (thread) {
@@ -1228,7 +1183,6 @@ void thread_cancel_async(struct thread_master *master, struct thread **thread,
while (!master->canceled)
pthread_cond_wait(&master->cancel_cond, &master->mtx);
}
- pthread_mutex_unlock(&master->mtx);
}
/* ------------------------------------------------------------------------- */
@@ -1527,22 +1481,18 @@ unsigned long thread_consumed_time(RUSAGE_T *now, RUSAGE_T *start,
int thread_should_yield(struct thread *thread)
{
int result;
- pthread_mutex_lock(&thread->mtx);
- {
+ frr_with_mutex(&thread->mtx) {
result = monotime_since(&thread->real, NULL)
> (int64_t)thread->yield;
}
- pthread_mutex_unlock(&thread->mtx);
return result;
}
void thread_set_yield_time(struct thread *thread, unsigned long yield_time)
{
- pthread_mutex_lock(&thread->mtx);
- {
+ frr_with_mutex(&thread->mtx) {
thread->yield = yield_time;
}
- pthread_mutex_unlock(&thread->mtx);
}
void thread_getrusage(RUSAGE_T *r)
@@ -1637,20 +1587,16 @@ void funcname_thread_execute(struct thread_master *m,
struct thread *thread;
/* Get or allocate new thread to execute. */
- pthread_mutex_lock(&m->mtx);
- {
+ frr_with_mutex(&m->mtx) {
thread = thread_get(m, THREAD_EVENT, func, arg, debugargpass);
/* Set its event value. */
- pthread_mutex_lock(&thread->mtx);
- {
+ frr_with_mutex(&thread->mtx) {
thread->add_type = THREAD_EXECUTE;
thread->u.val = val;
thread->ref = &thread;
}
- pthread_mutex_unlock(&thread->mtx);
}
- pthread_mutex_unlock(&m->mtx);
/* Execute thread doing all accounting. */
thread_call(thread);
diff --git a/lib/vrf.c b/lib/vrf.c
index 229f19f29a..575e96bae4 100644
--- a/lib/vrf.c
+++ b/lib/vrf.c
@@ -755,7 +755,7 @@ DEFUN_NOSH (vrf_netns,
if (!pathname)
return CMD_WARNING_CONFIG_FAILED;
- frr_elevate_privs(vrf_daemon_privs) {
+ frr_with_privs(vrf_daemon_privs) {
ret = vrf_netns_handler_create(vty, vrf, pathname,
NS_UNKNOWN, NS_UNKNOWN);
}
diff --git a/lib/vty.c b/lib/vty.c
index c1535802cf..deb9391bd5 100644
--- a/lib/vty.c
+++ b/lib/vty.c
@@ -348,6 +348,15 @@ void vty_hello(struct vty *vty)
vty_out(vty, "MOTD file not found\n");
} else if (host.motd)
vty_out(vty, "%s", host.motd);
+
+#if CONFDATE > 20200901
+ CPP_NOTICE("Please remove solaris code from system as it is deprecated");
+#endif
+#ifdef SUNOS_5
+ zlog_warn("If you are using FRR on Solaris, the FRR developers would love to hear from you\n");
+ zlog_warn("Please send email to dev@lists.frrouting.org about this message\n");
+ zlog_warn("We are considering deprecating Solaris and want to find users of Solaris systems\n");
+#endif
}
/* Put out prompt and wait input from user. */
diff --git a/lib/zclient.c b/lib/zclient.c
index 2d79d9b3c5..f809704f86 100644
--- a/lib/zclient.c
+++ b/lib/zclient.c
@@ -766,6 +766,92 @@ int zclient_route_send(uint8_t cmd, struct zclient *zclient,
return zclient_send_message(zclient);
}
+static int zapi_nexthop_labels_cmp(const struct zapi_nexthop *next1,
+ const struct zapi_nexthop *next2)
+{
+ if (next1->label_num > next2->label_num)
+ return 1;
+
+ if (next1->label_num < next2->label_num)
+ return -1;
+
+ return memcmp(next1->labels, next2->labels, next1->label_num);
+}
+
+static int zapi_nexthop_cmp_no_labels(const struct zapi_nexthop *next1,
+ const struct zapi_nexthop *next2)
+{
+ int ret = 0;
+
+ if (next1->vrf_id < next2->vrf_id)
+ return -1;
+
+ if (next1->vrf_id > next2->vrf_id)
+ return 1;
+
+ if (next1->type < next2->type)
+ return -1;
+
+ if (next1->type > next2->type)
+ return 1;
+
+ switch (next1->type) {
+ case NEXTHOP_TYPE_IPV4:
+ case NEXTHOP_TYPE_IPV6:
+ ret = nexthop_g_addr_cmp(next1->type, &next1->gate,
+ &next2->gate);
+ if (ret != 0)
+ return ret;
+ break;
+ case NEXTHOP_TYPE_IPV4_IFINDEX:
+ case NEXTHOP_TYPE_IPV6_IFINDEX:
+ ret = nexthop_g_addr_cmp(next1->type, &next1->gate,
+ &next2->gate);
+ if (ret != 0)
+ return ret;
+ /* Intentional Fall-Through */
+ case NEXTHOP_TYPE_IFINDEX:
+ if (next1->ifindex < next2->ifindex)
+ return -1;
+
+ if (next1->ifindex > next2->ifindex)
+ return 1;
+ break;
+ case NEXTHOP_TYPE_BLACKHOLE:
+ if (next1->bh_type < next2->bh_type)
+ return -1;
+
+ if (next1->bh_type > next2->bh_type)
+ return 1;
+ break;
+ }
+
+ return 0;
+}
+
+static int zapi_nexthop_cmp(const void *item1, const void *item2)
+{
+ int ret = 0;
+
+ const struct zapi_nexthop *next1 = item1;
+ const struct zapi_nexthop *next2 = item2;
+
+ ret = zapi_nexthop_cmp_no_labels(next1, next2);
+ if (ret != 0)
+ return ret;
+
+ ret = zapi_nexthop_labels_cmp(next1, next2);
+
+ return ret;
+}
+
+static void zapi_nexthop_group_sort(struct zapi_nexthop *nh_grp,
+ uint16_t nexthop_num)
+{
+ qsort(nh_grp, nexthop_num, sizeof(struct zapi_nexthop),
+ &zapi_nexthop_cmp);
+}
+
int zapi_route_encode(uint8_t cmd, struct stream *s, struct zapi_route *api)
{
struct zapi_nexthop *api_nh;
@@ -821,6 +907,8 @@ int zapi_route_encode(uint8_t cmd, struct stream *s, struct zapi_route *api)
return -1;
}
+ zapi_nexthop_group_sort(api->nexthops, api->nexthop_num);
+
stream_putw(s, api->nexthop_num);
for (i = 0; i < api->nexthop_num; i++) {
diff --git a/lib/zebra.h b/lib/zebra.h
index 789a93a3c4..b17ef700b4 100644
--- a/lib/zebra.h
+++ b/lib/zebra.h
@@ -136,6 +136,7 @@ typedef unsigned char uint8_t;
#ifdef CRYPTO_OPENSSL
#include <openssl/evp.h>
+#include <openssl/hmac.h>
#endif
#include "openbsd-tree.h"
diff --git a/ospf6d/ospf6_asbr.c b/ospf6d/ospf6_asbr.c
index 33b9f71b5f..4d1c085081 100644
--- a/ospf6d/ospf6_asbr.c
+++ b/ospf6d/ospf6_asbr.c
@@ -1581,7 +1581,7 @@ static struct route_map_rule_cmd ospf6_routemap_rule_set_tag_cmd = {
route_map_rule_tag_free,
};
-static int route_map_command_status(struct vty *vty, int ret)
+static int route_map_command_status(struct vty *vty, enum rmap_compile_rets ret)
{
switch (ret) {
case RMAP_RULE_MISSING:
@@ -1593,6 +1593,7 @@ static int route_map_command_status(struct vty *vty, int ret)
return CMD_WARNING_CONFIG_FAILED;
break;
case RMAP_COMPILE_SUCCESS:
+ case RMAP_DUPLICATE_RULE:
break;
}
@@ -1610,8 +1611,10 @@ DEFUN (ospf6_routemap_set_metric_type,
{
VTY_DECLVAR_CONTEXT(route_map_index, route_map_index);
int idx_external = 2;
- int ret = route_map_add_set(route_map_index, "metric-type",
- argv[idx_external]->arg);
+ enum rmap_compile_rets ret = route_map_add_set(route_map_index,
+ "metric-type",
+ argv[idx_external]->arg);
+
return route_map_command_status(vty, ret);
}
@@ -1627,7 +1630,9 @@ DEFUN (ospf6_routemap_no_set_metric_type,
{
VTY_DECLVAR_CONTEXT(route_map_index, route_map_index);
char *ext = (argc == 4) ? argv[3]->text : NULL;
- int ret = route_map_delete_set(route_map_index, "metric-type", ext);
+ enum rmap_compile_rets ret = route_map_delete_set(route_map_index,
+ "metric-type", ext);
+
return route_map_command_status(vty, ret);
}
@@ -1641,8 +1646,10 @@ DEFUN (ospf6_routemap_set_forwarding,
{
VTY_DECLVAR_CONTEXT(route_map_index, route_map_index);
int idx_ipv6 = 2;
- int ret = route_map_add_set(route_map_index, "forwarding-address",
- argv[idx_ipv6]->arg);
+ enum rmap_compile_rets ret = route_map_add_set(route_map_index,
+ "forwarding-address",
+ argv[idx_ipv6]->arg);
+
return route_map_command_status(vty, ret);
}
@@ -1657,8 +1664,10 @@ DEFUN (ospf6_routemap_no_set_forwarding,
{
VTY_DECLVAR_CONTEXT(route_map_index, route_map_index);
int idx_ipv6 = 3;
- int ret = route_map_delete_set(route_map_index, "forwarding-address",
- argv[idx_ipv6]->arg);
+ enum rmap_compile_rets ret = route_map_delete_set(route_map_index,
+ "forwarding-address",
+ argv[idx_ipv6]->arg);
+
return route_map_command_status(vty, ret);
}
diff --git a/ospf6d/ospf6_network.c b/ospf6d/ospf6_network.c
index 625ad884f2..9a18680b8b 100644
--- a/ospf6d/ospf6_network.c
+++ b/ospf6d/ospf6_network.c
@@ -85,7 +85,7 @@ void ospf6_serv_close(void)
/* Make ospf6d's server socket. */
int ospf6_serv_sock(void)
{
- frr_elevate_privs(&ospf6d_privs) {
+ frr_with_privs(&ospf6d_privs) {
ospf6_sock = socket(AF_INET6, SOCK_RAW, IPPROTO_OSPFIGP);
if (ospf6_sock < 0) {
diff --git a/ospfd/ospf_interface.c b/ospfd/ospf_interface.c
index ce1604a5b1..3877708708 100644
--- a/ospfd/ospf_interface.c
+++ b/ospfd/ospf_interface.c
@@ -225,12 +225,14 @@ struct ospf_interface *ospf_if_new(struct ospf *ospf, struct interface *ifp,
{
struct ospf_interface *oi;
- if ((oi = ospf_if_table_lookup(ifp, p)) == NULL) {
- oi = XCALLOC(MTYPE_OSPF_IF, sizeof(struct ospf_interface));
- memset(oi, 0, sizeof(struct ospf_interface));
- } else
+ oi = ospf_if_table_lookup(ifp, p);
+ if (oi)
return oi;
+ oi = XCALLOC(MTYPE_OSPF_IF, sizeof(struct ospf_interface));
+
+ oi->obuf = ospf_fifo_new();
+
/* Set zebra interface pointer. */
oi->ifp = ifp;
oi->address = p;
@@ -264,8 +266,6 @@ struct ospf_interface *ospf_if_new(struct ospf *ospf, struct interface *ifp,
oi->ospf = ospf;
- ospf_if_stream_set(oi);
-
QOBJ_REG(oi, ospf_interface);
if (IS_DEBUG_OSPF_EVENT)
@@ -325,8 +325,7 @@ void ospf_if_free(struct ospf_interface *oi)
{
ospf_if_down(oi);
- if (oi->obuf)
- ospf_fifo_free(oi->obuf);
+ ospf_fifo_free(oi->obuf);
assert(oi->state == ISM_Down);
@@ -490,29 +489,20 @@ static void ospf_if_reset_stats(struct ospf_interface *oi)
oi->ls_ack_in = oi->ls_ack_out = 0;
}
-void ospf_if_stream_set(struct ospf_interface *oi)
-{
- /* set output fifo queue. */
- if (oi->obuf == NULL)
- oi->obuf = ospf_fifo_new();
-}
-
void ospf_if_stream_unset(struct ospf_interface *oi)
{
struct ospf *ospf = oi->ospf;
- if (oi->obuf) {
- /* flush the interface packet queue */
- ospf_fifo_flush(oi->obuf);
- /*reset protocol stats */
- ospf_if_reset_stats(oi);
-
- if (oi->on_write_q) {
- listnode_delete(ospf->oi_write_q, oi);
- if (list_isempty(ospf->oi_write_q))
- OSPF_TIMER_OFF(ospf->t_write);
- oi->on_write_q = 0;
- }
+ /* flush the interface packet queue */
+ ospf_fifo_flush(oi->obuf);
+ /*reset protocol stats */
+ ospf_if_reset_stats(oi);
+
+ if (oi->on_write_q) {
+ listnode_delete(ospf->oi_write_q, oi);
+ if (list_isempty(ospf->oi_write_q))
+ OSPF_TIMER_OFF(ospf->t_write);
+ oi->on_write_q = 0;
}
}
@@ -903,8 +893,6 @@ struct ospf_interface *ospf_vl_new(struct ospf *ospf,
ospf_area_add_if(voi->area, voi);
- ospf_if_stream_set(voi);
-
if (IS_DEBUG_OSPF_EVENT)
zlog_debug("ospf_vl_new(): Stop");
return voi;
diff --git a/ospfd/ospf_interface.h b/ospfd/ospf_interface.h
index b88d405875..0c903954d3 100644
--- a/ospfd/ospf_interface.h
+++ b/ospfd/ospf_interface.h
@@ -285,7 +285,6 @@ extern void ospf_if_update_params(struct interface *, struct in_addr);
extern int ospf_if_new_hook(struct interface *);
extern void ospf_if_init(void);
-extern void ospf_if_stream_set(struct ospf_interface *);
extern void ospf_if_stream_unset(struct ospf_interface *);
extern void ospf_if_reset_variables(struct ospf_interface *);
extern int ospf_if_is_enable(struct ospf_interface *);
diff --git a/ospfd/ospf_ism.h b/ospfd/ospf_ism.h
index 5ae99ab320..8d21403695 100644
--- a/ospfd/ospf_ism.h
+++ b/ospfd/ospf_ism.h
@@ -53,8 +53,9 @@
listnode_add((O)->oi_write_q, oi); \
oi->on_write_q = 1; \
} \
- thread_add_write(master, ospf_write, (O), (O)->fd, \
- &(O)->t_write); \
+ if (!list_isempty((O)->oi_write_q)) \
+ thread_add_write(master, ospf_write, (O), (O)->fd, \
+ &(O)->t_write); \
} while (0)
/* Macro for OSPF ISM timer turn on. */
diff --git a/ospfd/ospf_network.c b/ospfd/ospf_network.c
index 1415a6e8b7..b8e2dac70e 100644
--- a/ospfd/ospf_network.c
+++ b/ospfd/ospf_network.c
@@ -190,7 +190,7 @@ int ospf_sock_init(struct ospf *ospf)
/* silently return since VRF is not ready */
return -1;
}
- frr_elevate_privs(&ospfd_privs) {
+ frr_with_privs(&ospfd_privs) {
ospf_sock = vrf_socket(AF_INET, SOCK_RAW, IPPROTO_OSPFIGP,
ospf->vrf_id, ospf->name);
if (ospf_sock < 0) {
diff --git a/ospfd/ospf_packet.c b/ospfd/ospf_packet.c
index 62b0444796..5a29c1fb07 100644
--- a/ospfd/ospf_packet.c
+++ b/ospfd/ospf_packet.c
@@ -132,7 +132,7 @@ static int ospf_auth_type(struct ospf_interface *oi)
return auth_type;
}
-struct ospf_packet *ospf_packet_new(size_t size)
+static struct ospf_packet *ospf_packet_new(size_t size)
{
struct ospf_packet *new;
@@ -231,22 +231,8 @@ void ospf_fifo_free(struct ospf_fifo *fifo)
XFREE(MTYPE_OSPF_FIFO, fifo);
}
-void ospf_packet_add(struct ospf_interface *oi, struct ospf_packet *op)
+static void ospf_packet_add(struct ospf_interface *oi, struct ospf_packet *op)
{
- if (!oi->obuf) {
- flog_err(
- EC_OSPF_PKT_PROCESS,
- "ospf_packet_add(interface %s in state %d [%s], packet type %s, "
- "destination %s) called with NULL obuf, ignoring "
- "(please report this bug)!\n",
- IF_NAME(oi), oi->state,
- lookup_msg(ospf_ism_state_msg, oi->state, NULL),
- lookup_msg(ospf_packet_type_str,
- stream_getc_from(op->s, 1), NULL),
- inet_ntoa(op->dst));
- return;
- }
-
/* Add packet to end of queue. */
ospf_fifo_push(oi->obuf, op);
@@ -257,20 +243,6 @@ void ospf_packet_add(struct ospf_interface *oi, struct ospf_packet *op)
static void ospf_packet_add_top(struct ospf_interface *oi,
struct ospf_packet *op)
{
- if (!oi->obuf) {
- flog_err(
- EC_OSPF_PKT_PROCESS,
- "ospf_packet_add(interface %s in state %d [%s], packet type %s, "
- "destination %s) called with NULL obuf, ignoring "
- "(please report this bug)!\n",
- IF_NAME(oi), oi->state,
- lookup_msg(ospf_ism_state_msg, oi->state, NULL),
- lookup_msg(ospf_packet_type_str,
- stream_getc_from(op->s, 1), NULL),
- inet_ntoa(op->dst));
- return;
- }
-
/* Add packet to head of queue. */
ospf_fifo_push_head(oi->obuf, op);
@@ -278,7 +250,7 @@ static void ospf_packet_add_top(struct ospf_interface *oi,
/* ospf_fifo_debug (oi->obuf); */
}
-void ospf_packet_delete(struct ospf_interface *oi)
+static void ospf_packet_delete(struct ospf_interface *oi)
{
struct ospf_packet *op;
@@ -288,7 +260,7 @@ void ospf_packet_delete(struct ospf_interface *oi)
ospf_packet_free(op);
}
-struct ospf_packet *ospf_packet_dup(struct ospf_packet *op)
+static struct ospf_packet *ospf_packet_dup(struct ospf_packet *op)
{
struct ospf_packet *new;
@@ -698,12 +670,9 @@ static int ospf_write(struct thread *thread)
return -1;
}
- ospf->t_write = NULL;
-
node = listhead(ospf->oi_write_q);
assert(node);
oi = listgetdata(node);
- assert(oi);
#ifdef WANT_OSPF_WRITE_FRAGMENT
/* seed ipid static with low order bits of time */
@@ -906,9 +875,7 @@ static int ospf_write(struct thread *thread)
/* Setup to service from the head of the queue again */
if (!list_isempty(ospf->oi_write_q)) {
node = listhead(ospf->oi_write_q);
- assert(node);
oi = listgetdata(node);
- assert(oi);
}
}
@@ -4062,6 +4029,23 @@ static void ospf_ls_upd_queue_send(struct ospf_interface *oi,
oi->on_write_q = 1;
}
ospf_write(&os_packet_thd);
+ /*
+ * We are fake calling ospf_write with a fake
+ * thread. Imagine that we have oi_a already
+ * enqueued and we have turned on the write
+ * thread(t_write).
+ * Now this function calls this for oi_b
+ * so the on_write_q has oi_a and oi_b on
+ * it, ospf_write runs and clears the packets
+ * for both oi_a and oi_b. Removing them from
+ * the on_write_q. After this thread of execution
+ * finishes we will execute the t_write thread
+ * with nothing in the on_write_q causing an
+ * assert. So just make sure that the t_write
+ * is actually turned off.
+ */
+ if (list_isempty(oi->ospf->oi_write_q))
+ OSPF_TIMER_OFF(oi->ospf->t_write);
} else {
/* Hook thread to write packet. */
OSPF_ISM_WRITE_ON(oi->ospf);
diff --git a/ospfd/ospf_packet.h b/ospfd/ospf_packet.h
index a508727968..5a3e029f2e 100644
--- a/ospfd/ospf_packet.h
+++ b/ospfd/ospf_packet.h
@@ -131,8 +131,6 @@ struct ospf_ls_update {
#define IS_SET_DD_ALL(X) ((X) & OSPF_DD_FLAG_ALL)
/* Prototypes. */
-extern void ospf_output_forward(struct stream *, int);
-extern struct ospf_packet *ospf_packet_new(size_t);
extern void ospf_packet_free(struct ospf_packet *);
extern struct ospf_fifo *ospf_fifo_new(void);
extern void ospf_fifo_push(struct ospf_fifo *, struct ospf_packet *);
@@ -140,10 +138,6 @@ extern struct ospf_packet *ospf_fifo_pop(struct ospf_fifo *);
extern struct ospf_packet *ospf_fifo_head(struct ospf_fifo *);
extern void ospf_fifo_flush(struct ospf_fifo *);
extern void ospf_fifo_free(struct ospf_fifo *);
-extern void ospf_packet_add(struct ospf_interface *, struct ospf_packet *);
-extern void ospf_packet_delete(struct ospf_interface *);
-extern struct stream *ospf_stream_dup(struct stream *);
-extern struct ospf_packet *ospf_packet_dup(struct ospf_packet *);
extern int ospf_read(struct thread *);
extern void ospf_hello_send(struct ospf_interface *);
diff --git a/ospfd/ospfd.c b/ospfd/ospfd.c
index b91a55f635..e48a5b4d36 100644
--- a/ospfd/ospfd.c
+++ b/ospfd/ospfd.c
@@ -2097,7 +2097,7 @@ static int ospf_vrf_enable(struct vrf *vrf)
old_vrf_id);
if (old_vrf_id != ospf->vrf_id) {
- frr_elevate_privs(&ospfd_privs) {
+ frr_with_privs(&ospfd_privs) {
/* stop zebra redist to us for old vrf */
zclient_send_dereg_requests(zclient,
old_vrf_id);
diff --git a/pbrd/pbr_map.c b/pbrd/pbr_map.c
index 5e67990d5e..1a8461c6c1 100644
--- a/pbrd/pbr_map.c
+++ b/pbrd/pbr_map.c
@@ -316,7 +316,7 @@ struct pbr_map_sequence *pbrms_get(const char *name, uint32_t seqno)
pbrms->ruleno = pbr_nht_get_next_rule(seqno);
pbrms->parent = pbrm;
pbrms->reason =
- PBR_MAP_INVALID_SRCDST |
+ PBR_MAP_INVALID_EMPTY |
PBR_MAP_INVALID_NO_NEXTHOPS;
QOBJ_REG(pbrms, pbr_map_sequence);
@@ -350,10 +350,10 @@ pbr_map_sequence_check_nexthops_valid(struct pbr_map_sequence *pbrms)
}
}
-static void pbr_map_sequence_check_src_dst_valid(struct pbr_map_sequence *pbrms)
+static void pbr_map_sequence_check_not_empty(struct pbr_map_sequence *pbrms)
{
- if (!pbrms->src && !pbrms->dst)
- pbrms->reason |= PBR_MAP_INVALID_SRCDST;
+ if (!pbrms->src && !pbrms->dst && !pbrms->mark)
+ pbrms->reason |= PBR_MAP_INVALID_EMPTY;
}
/*
@@ -364,7 +364,7 @@ static void pbr_map_sequence_check_valid(struct pbr_map_sequence *pbrms)
{
pbr_map_sequence_check_nexthops_valid(pbrms);
- pbr_map_sequence_check_src_dst_valid(pbrms);
+ pbr_map_sequence_check_not_empty(pbrms);
}
static bool pbr_map_check_valid_internal(struct pbr_map *pbrm)
diff --git a/pbrd/pbr_map.h b/pbrd/pbr_map.h
index 945f76bb2b..112acfe44e 100644
--- a/pbrd/pbr_map.h
+++ b/pbrd/pbr_map.h
@@ -87,6 +87,7 @@ struct pbr_map_sequence {
*/
struct prefix *src;
struct prefix *dst;
+ uint32_t mark;
/*
* Family of the src/dst. Needed when deleting since we clear them
@@ -126,7 +127,7 @@ struct pbr_map_sequence {
#define PBR_MAP_INVALID_NEXTHOP (1 << 1)
#define PBR_MAP_INVALID_NO_NEXTHOPS (1 << 2)
#define PBR_MAP_INVALID_BOTH_NHANDGRP (1 << 3)
-#define PBR_MAP_INVALID_SRCDST (1 << 4)
+#define PBR_MAP_INVALID_EMPTY (1 << 4)
uint64_t reason;
QOBJ_FIELDS
diff --git a/pbrd/pbr_vty.c b/pbrd/pbr_vty.c
index 95f38563b1..5e7addc9d2 100644
--- a/pbrd/pbr_vty.c
+++ b/pbrd/pbr_vty.c
@@ -172,6 +172,33 @@ DEFPY(pbr_map_match_dst, pbr_map_match_dst_cmd,
return CMD_SUCCESS;
}
+DEFPY(pbr_map_match_mark, pbr_map_match_mark_cmd,
+ "[no] match mark (1-4294967295)$mark",
+ NO_STR
+ "Match the rest of the command\n"
+ "Choose the mark value to use\n"
+ "mark\n")
+{
+ struct pbr_map_sequence *pbrms = VTY_GET_CONTEXT(pbr_map_sequence);
+
+#ifndef GNU_LINUX
+ vty_out(vty, "pbr marks are not supported on this platform");
+ return CMD_WARNING_CONFIG_FAILED;
+#endif
+
+ if (!no) {
+ if (pbrms->mark == (uint32_t) mark)
+ return CMD_SUCCESS;
+ pbrms->mark = (uint32_t) mark;
+ } else {
+ pbrms->mark = 0;
+ }
+
+ pbr_map_check(pbrms);
+
+ return CMD_SUCCESS;
+ }
+
DEFPY(pbr_map_nexthop_group, pbr_map_nexthop_group_cmd,
"[no] set nexthop-group NHGNAME$name",
NO_STR
@@ -453,6 +480,8 @@ DEFPY (show_pbr_map,
vty_out(vty, "\tDST Match: %s\n",
prefix2str(pbrms->dst, buf,
sizeof(buf)));
+ if (pbrms->mark)
+ vty_out(vty, "\tMARK Match: %u\n", pbrms->mark);
if (pbrms->nhgrp_name) {
vty_out(vty,
@@ -632,6 +661,9 @@ static int pbr_vty_map_config_write_sequence(struct vty *vty,
vty_out(vty, " match dst-ip %s\n",
prefix2str(pbrms->dst, buff, sizeof(buff)));
+ if (pbrms->mark)
+ vty_out(vty, " match mark %u\n", pbrms->mark);
+
if (pbrms->nhgrp_name)
vty_out(vty, " set nexthop-group %s\n", pbrms->nhgrp_name);
@@ -704,6 +736,7 @@ void pbr_vty_init(void)
install_element(INTERFACE_NODE, &pbr_policy_cmd);
install_element(PBRMAP_NODE, &pbr_map_match_src_cmd);
install_element(PBRMAP_NODE, &pbr_map_match_dst_cmd);
+ install_element(PBRMAP_NODE, &pbr_map_match_mark_cmd);
install_element(PBRMAP_NODE, &pbr_map_nexthop_group_cmd);
install_element(PBRMAP_NODE, &pbr_map_nexthop_cmd);
install_element(VIEW_NODE, &show_pbr_cmd);
diff --git a/pbrd/pbr_zebra.c b/pbrd/pbr_zebra.c
index 466a9a13ae..d74d0fcd23 100644
--- a/pbrd/pbr_zebra.c
+++ b/pbrd/pbr_zebra.c
@@ -526,7 +526,7 @@ static void pbr_encode_pbr_map_sequence(struct stream *s,
stream_putw(s, 0); /* src port */
pbr_encode_pbr_map_sequence_prefix(s, pbrms->dst, family);
stream_putw(s, 0); /* dst port */
- stream_putl(s, 0); /* fwmark */
+ stream_putl(s, pbrms->mark);
if (pbrms->nhgrp_name)
stream_putl(s, pbr_nht_get_table(pbrms->nhgrp_name));
else if (pbrms->nhg)
diff --git a/pimd/pim_mroute.c b/pimd/pim_mroute.c
index 1c66007fbb..f7f4b54aea 100644
--- a/pimd/pim_mroute.c
+++ b/pimd/pim_mroute.c
@@ -57,7 +57,7 @@ static int pim_mroute_set(struct pim_instance *pim, int enable)
* We need to create the VRF table for the pim mroute_socket
*/
if (pim->vrf_id != VRF_DEFAULT) {
- frr_elevate_privs(&pimd_privs) {
+ frr_with_privs(&pimd_privs) {
data = pim->vrf->data.l.table_id;
err = setsockopt(pim->mroute_socket, IPPROTO_IP,
@@ -75,7 +75,7 @@ static int pim_mroute_set(struct pim_instance *pim, int enable)
}
}
- frr_elevate_privs(&pimd_privs) {
+ frr_with_privs(&pimd_privs) {
opt = enable ? MRT_INIT : MRT_DONE;
/*
* *BSD *cares* about what value we pass down
@@ -735,7 +735,7 @@ int pim_mroute_socket_enable(struct pim_instance *pim)
{
int fd;
- frr_elevate_privs(&pimd_privs) {
+ frr_with_privs(&pimd_privs) {
fd = socket(AF_INET, SOCK_RAW, IPPROTO_IGMP);
diff --git a/pimd/pim_msdp_socket.c b/pimd/pim_msdp_socket.c
index b1f7cfd2c6..22eb8bc7b4 100644
--- a/pimd/pim_msdp_socket.c
+++ b/pimd/pim_msdp_socket.c
@@ -175,7 +175,7 @@ int pim_msdp_sock_listen(struct pim_instance *pim)
}
}
- frr_elevate_privs(&pimd_privs) {
+ frr_with_privs(&pimd_privs) {
/* bind to well known TCP port */
rc = bind(sock, (struct sockaddr *)&sin, socklen);
}
diff --git a/pimd/pim_sock.c b/pimd/pim_sock.c
index c4538a4ac5..82255cd3b0 100644
--- a/pimd/pim_sock.c
+++ b/pimd/pim_sock.c
@@ -46,7 +46,7 @@ int pim_socket_raw(int protocol)
{
int fd;
- frr_elevate_privs(&pimd_privs) {
+ frr_with_privs(&pimd_privs) {
fd = socket(AF_INET, SOCK_RAW, protocol);
@@ -65,7 +65,7 @@ void pim_socket_ip_hdr(int fd)
{
const int on = 1;
- frr_elevate_privs(&pimd_privs) {
+ frr_with_privs(&pimd_privs) {
if (setsockopt(fd, IPPROTO_IP, IP_HDRINCL, &on, sizeof(on)))
zlog_err("%s: Could not turn on IP_HDRINCL option: %s",
@@ -83,7 +83,7 @@ int pim_socket_bind(int fd, struct interface *ifp)
int ret = 0;
#ifdef SO_BINDTODEVICE
- frr_elevate_privs(&pimd_privs) {
+ frr_with_privs(&pimd_privs) {
ret = setsockopt(fd, SOL_SOCKET, SO_BINDTODEVICE, ifp->name,
strlen(ifp->name));
diff --git a/pimd/pim_vty.c b/pimd/pim_vty.c
index d1935195df..1c4ecf299f 100644
--- a/pimd/pim_vty.c
+++ b/pimd/pim_vty.c
@@ -81,7 +81,7 @@ int pim_debug_config_write(struct vty *vty)
++writes;
}
- if (PIM_DEBUG_MROUTE_DETAIL) {
+ if (PIM_DEBUG_MROUTE_DETAIL_ONLY) {
vty_out(vty, "debug mroute detail\n");
++writes;
}
@@ -107,7 +107,7 @@ int pim_debug_config_write(struct vty *vty)
vty_out(vty, "debug pim trace\n");
++writes;
}
- if (PIM_DEBUG_PIM_TRACE_DETAIL) {
+ if (PIM_DEBUG_PIM_TRACE_DETAIL_ONLY) {
vty_out(vty, "debug pim trace detail\n");
++writes;
}
diff --git a/pimd/pimd.h b/pimd/pimd.h
index 175936e0a7..3b83d3b6c7 100644
--- a/pimd/pimd.h
+++ b/pimd/pimd.h
@@ -163,6 +163,8 @@ extern uint8_t qpim_ecmp_rebalance_enable;
#define PIM_DEBUG_PIM_TRACE (router->debugs & PIM_MASK_PIM_TRACE)
#define PIM_DEBUG_PIM_TRACE_DETAIL \
(router->debugs & (PIM_MASK_PIM_TRACE_DETAIL | PIM_MASK_PIM_TRACE))
+#define PIM_DEBUG_PIM_TRACE_DETAIL_ONLY \
+ (router->debugs & PIM_MASK_PIM_TRACE_DETAIL)
#define PIM_DEBUG_IGMP_EVENTS (router->debugs & PIM_MASK_IGMP_EVENTS)
#define PIM_DEBUG_IGMP_PACKETS (router->debugs & PIM_MASK_IGMP_PACKETS)
#define PIM_DEBUG_IGMP_TRACE (router->debugs & PIM_MASK_IGMP_TRACE)
@@ -173,6 +175,7 @@ extern uint8_t qpim_ecmp_rebalance_enable;
#define PIM_DEBUG_MROUTE (router->debugs & PIM_MASK_MROUTE)
#define PIM_DEBUG_MROUTE_DETAIL \
(router->debugs & (PIM_MASK_MROUTE_DETAIL | PIM_MASK_MROUTE))
+#define PIM_DEBUG_MROUTE_DETAIL_ONLY (router->debugs & PIM_MASK_MROUTE_DETAIL)
#define PIM_DEBUG_PIM_HELLO (router->debugs & PIM_MASK_PIM_HELLO)
#define PIM_DEBUG_PIM_J_P (router->debugs & PIM_MASK_PIM_J_P)
#define PIM_DEBUG_PIM_REG (router->debugs & PIM_MASK_PIM_REG)
diff --git a/python/clidef.py b/python/clidef.py
index bc2f5caebf..baa6ed52b2 100644
--- a/python/clidef.py
+++ b/python/clidef.py
@@ -351,6 +351,7 @@ if __name__ == '__main__':
macros = Macros()
macros.load('lib/route_types.h')
macros.load(os.path.join(basepath, 'lib/command.h'))
+ macros.load(os.path.join(basepath, 'bgpd/bgp_vty.h'))
# sigh :(
macros['PROTO_REDIST_STR'] = 'FRR_REDIST_STR_ISISD'
diff --git a/redhat/frr.spec.in b/redhat/frr.spec.in
index 014cae02ee..fa0a6d8a0a 100644
--- a/redhat/frr.spec.in
+++ b/redhat/frr.spec.in
@@ -634,6 +634,7 @@ fi
%{_libdir}/frr/modules/bgpd_rpki.so
%endif
%{_libdir}/frr/modules/zebra_irdp.so
+%{_libdir}/frr/modules/bgpd_bmp.so
%{_bindir}/*
%config(noreplace) %{configdir}/[!v]*.conf*
%config(noreplace) %attr(750,%{frr_user},%{frr_user}) %{configdir}/daemons
diff --git a/ripd/ripd.c b/ripd/ripd.c
index 561fbcb52d..ad373aebdf 100644
--- a/ripd/ripd.c
+++ b/ripd/ripd.c
@@ -1395,7 +1395,7 @@ int rip_create_socket(struct vrf *vrf)
/* Make datagram socket. */
if (vrf->vrf_id != VRF_DEFAULT)
vrf_dev = vrf->name;
- frr_elevate_privs(&ripd_privs) {
+ frr_with_privs(&ripd_privs) {
sock = vrf_socket(AF_INET, SOCK_DGRAM, IPPROTO_UDP, vrf->vrf_id,
vrf_dev);
if (sock < 0) {
@@ -1415,7 +1415,7 @@ int rip_create_socket(struct vrf *vrf)
#endif
setsockopt_so_recvbuf(sock, RIP_UDP_RCV_BUF);
- frr_elevate_privs(&ripd_privs) {
+ frr_with_privs(&ripd_privs) {
if ((ret = bind(sock, (struct sockaddr *)&addr, sizeof(addr)))
< 0) {
zlog_err("%s: Can't bind socket %d to %s port %d: %s",
diff --git a/ripngd/ripng_interface.c b/ripngd/ripng_interface.c
index 49ed13a2c2..9ed9dc28fe 100644
--- a/ripngd/ripng_interface.c
+++ b/ripngd/ripng_interface.c
@@ -75,7 +75,7 @@ static int ripng_multicast_join(struct interface *ifp, int sock)
* While this is bogus, privs are available and easy to use
* for this call as a workaround.
*/
- frr_elevate_privs(&ripngd_privs) {
+ frr_with_privs(&ripngd_privs) {
ret = setsockopt(sock, IPPROTO_IPV6, IPV6_JOIN_GROUP,
(char *)&mreq, sizeof(mreq));
diff --git a/ripngd/ripngd.c b/ripngd/ripngd.c
index 3314892e74..49f7dda646 100644
--- a/ripngd/ripngd.c
+++ b/ripngd/ripngd.c
@@ -120,8 +120,7 @@ int ripng_make_socket(struct vrf *vrf)
/* Make datagram socket. */
if (vrf->vrf_id != VRF_DEFAULT)
vrf_dev = vrf->name;
- frr_elevate_privs(&ripngd_privs)
- {
+ frr_with_privs(&ripngd_privs) {
sock = vrf_socket(AF_INET6, SOCK_DGRAM, IPPROTO_UDP,
vrf->vrf_id, vrf_dev);
if (sock < 0) {
@@ -160,7 +159,7 @@ int ripng_make_socket(struct vrf *vrf)
#endif /* SIN6_LEN */
ripaddr.sin6_port = htons(RIPNG_PORT_DEFAULT);
- frr_elevate_privs(&ripngd_privs) {
+ frr_with_privs(&ripngd_privs) {
ret = bind(sock, (struct sockaddr *)&ripaddr, sizeof(ripaddr));
if (ret < 0) {
zlog_err("Can't bind ripng socket: %s.",
diff --git a/staticd/static_zebra.c b/staticd/static_zebra.c
index 13c04259d7..27605da63f 100644
--- a/staticd/static_zebra.c
+++ b/staticd/static_zebra.c
@@ -130,6 +130,7 @@ static int interface_state_up(ZAPI_CALLBACK_ARGS)
/* Install any static reliant on this interface coming up */
static_install_intf_nh(ifp);
+ static_ifindex_update(ifp, true);
}
return 0;
@@ -137,7 +138,12 @@ static int interface_state_up(ZAPI_CALLBACK_ARGS)
static int interface_state_down(ZAPI_CALLBACK_ARGS)
{
- zebra_interface_state_read(zclient->ibuf, vrf_id);
+ struct interface *ifp;
+
+ ifp = zebra_interface_state_read(zclient->ibuf, vrf_id);
+
+ if (ifp)
+ static_ifindex_update(ifp, false);
return 0;
}
diff --git a/tests/bgpd/test_peer_attr.c b/tests/bgpd/test_peer_attr.c
index 78016dc9ce..8e1b62ac15 100644
--- a/tests/bgpd/test_peer_attr.c
+++ b/tests/bgpd/test_peer_attr.c
@@ -1170,7 +1170,7 @@ static void test_peer_attr(struct test *test, struct test_peer_attr *pa)
/* Test Preparation: Switch and activate address-family. */
if (!is_attr_type_global(pa->type)) {
test_log(test, "prepare: switch address-family to [%s]",
- afi_safi_print(pa->afi, pa->safi));
+ get_afi_safi_str(pa->afi, pa->safi, false));
test_execute(test, "address-family %s %s",
str_from_afi(pa->afi), str_from_safi(pa->safi));
test_execute(test, "neighbor %s activate", g->name);
@@ -1237,7 +1237,7 @@ static void test_peer_attr(struct test *test, struct test_peer_attr *pa)
/* Test Preparation: Switch and activate address-family. */
if (!is_attr_type_global(pa->type)) {
test_log(test, "prepare: switch address-family to [%s]",
- afi_safi_print(pa->afi, pa->safi));
+ get_afi_safi_str(pa->afi, pa->safi, false));
test_execute(test, "address-family %s %s",
str_from_afi(pa->afi), str_from_safi(pa->safi));
test_execute(test, "neighbor %s activate", g->name);
@@ -1285,7 +1285,7 @@ static void test_peer_attr(struct test *test, struct test_peer_attr *pa)
/* Test Preparation: Switch and activate address-family. */
if (!is_attr_type_global(pa->type)) {
test_log(test, "prepare: switch address-family to [%s]",
- afi_safi_print(pa->afi, pa->safi));
+ get_afi_safi_str(pa->afi, pa->safi, false));
test_execute(test, "address-family %s %s",
str_from_afi(pa->afi), str_from_safi(pa->safi));
test_execute(test, "neighbor %s activate", g->name);
diff --git a/tests/lib/test_privs.c b/tests/lib/test_privs.c
index fc3d908661..de638bc67a 100644
--- a/tests/lib/test_privs.c
+++ b/tests/lib/test_privs.c
@@ -113,7 +113,7 @@ int main(int argc, char **argv)
((test_privs.current_state() == ZPRIVS_RAISED) ? "Raised" : "Lowered")
printf("%s\n", PRIV_STATE());
- frr_elevate_privs(&test_privs) {
+ frr_with_privs(&test_privs) {
printf("%s\n", PRIV_STATE());
}
@@ -125,7 +125,7 @@ int main(int argc, char **argv)
/* but these should continue to work... */
printf("%s\n", PRIV_STATE());
- frr_elevate_privs(&test_privs) {
+ frr_with_privs(&test_privs) {
printf("%s\n", PRIV_STATE());
}
diff --git a/tests/topotests/Dockerfile b/tests/topotests/Dockerfile
index ea6fa4b9e0..4602688782 100644
--- a/tests/topotests/Dockerfile
+++ b/tests/topotests/Dockerfile
@@ -40,12 +40,12 @@ RUN export DEBIAN_FRONTEND=noninteractive \
pytest
RUN cd /tmp \
- && wget -q https://ci1.netdef.org/artifact/LIBYANG-YANGRELEASE/shared/build-1/Ubuntu-18.04-x86_64-Packages/libyang-dev_0.16.46_amd64.deb \
+ && wget -q https://ci1.netdef.org/artifact/LIBYANG-YANGRELEASE/shared/build-10/Debian-AMD64-Packages/libyang-dev_0.16.105-1_amd64.deb \
-O libyang-dev.deb \
- && wget -q https://ci1.netdef.org/artifact/LIBYANG-YANGRELEASE/shared/build-1/Ubuntu-18.04-x86_64-Packages/libyang_0.16.46_amd64.deb \
+ && wget -q https://ci1.netdef.org/artifact/LIBYANG-YANGRELEASE/shared/build-10/Debian-AMD64-Packages/libyang0.16_0.16.105-1_amd64.deb \
-O libyang.deb \
- && echo "039252cc66eb254a97e160b1c325af669470cde8a02d73ec9f7b920ed3c7997c libyang.deb" | sha256sum -c - \
- && echo "e7e2d5bfc7b33b3218df8bef404432970f9b4ad10d6dbbdcb0e0be2babbb68e9 libyang-dev.deb" | sha256sum -c - \
+ && echo "34bef017e527a590020185f05dc39203bdf1c86223e0d990839623ec629d8598 libyang.deb" | sha256sum -c - \
+ && echo "fe9cc6e3b173ca56ef49428c281e96bf76c0f910aa75cf85098076411484e8f4 libyang-dev.deb" | sha256sum -c - \
&& dpkg -i libyang*.deb \
&& rm libyang*.deb
diff --git a/tests/topotests/bgp-basic-functionality-topo1/test_bgp_basic_functionality.py b/tests/topotests/bgp-basic-functionality-topo1/test_bgp_basic_functionality.py
index 095ebe3344..e99111d90b 100755
--- a/tests/topotests/bgp-basic-functionality-topo1/test_bgp_basic_functionality.py
+++ b/tests/topotests/bgp-basic-functionality-topo1/test_bgp_basic_functionality.py
@@ -375,7 +375,7 @@ def test_static_routes(request):
# Verifying RIB routes
dut = 'r3'
protocol = 'bgp'
- next_hop = '10.0.0.2'
+ next_hop = ['10.0.0.2', '10.0.0.5']
result = verify_rib(tgen, 'ipv4', dut, input_dict, next_hop=next_hop,
protocol=protocol)
assert result is True, "Testcase {} :Failed \n Error: {}". \
diff --git a/tests/topotests/bgp-ecmp-topo2/ebgp_ecmp_topo2.json b/tests/topotests/bgp-ecmp-topo2/ebgp_ecmp_topo2.json
new file mode 100755
index 0000000000..50797f130a
--- /dev/null
+++ b/tests/topotests/bgp-ecmp-topo2/ebgp_ecmp_topo2.json
@@ -0,0 +1,664 @@
+{
+ "address_types": [
+ "ipv4",
+ "ipv6"
+ ],
+ "ipv4base": "10.0.0.0",
+ "ipv4mask": 24,
+ "ipv6base": "fd00::",
+ "ipv6mask": 64,
+ "link_ip_start": {
+ "ipv4": "10.0.0.0",
+ "v4mask": 24,
+ "ipv6": "fd00::",
+ "v6mask": 64
+ },
+ "lo_prefix": {
+ "ipv4": "1.0.",
+ "v4mask": 32,
+ "ipv6": "2001:DB8:F::",
+ "v6mask": 128
+ },
+ "routers": {
+ "r1": {
+ "links": {
+ "lo": {
+ "ipv4": "auto",
+ "ipv6": "auto",
+ "type": "loopback"
+ },
+ "r2-link1": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ }
+ },
+ "bgp": {
+ "local_as": "100",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "r1-link1": {}
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "r1-link1": {}
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "r2": {
+ "links": {
+ "lo": {
+ "ipv4": "auto",
+ "ipv6": "auto",
+ "type": "loopback"
+ },
+ "r1-link1": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link1": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link2": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link3": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link4": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link5": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link6": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link7": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link8": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link9": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link10": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link11": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link12": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link13": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link14": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link15": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link16": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link17": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link18": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link19": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link20": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link21": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link22": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link23": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link24": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link25": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link26": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link27": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link28": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link29": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link30": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link31": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link32": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ }
+ },
+ "bgp": {
+ "local_as": "200",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r2-link1": {}
+ }
+ },
+ "r3": {
+ "dest_link": {
+ "r2-link1": {
+ "next_hop_self": true
+ },
+ "r2-link2": {
+ "next_hop_self": true
+ },
+ "r2-link3": {
+ "next_hop_self": true
+ },
+ "r2-link4": {
+ "next_hop_self": true
+ },
+ "r2-link5": {
+ "next_hop_self": true
+ },
+ "r2-link6": {
+ "next_hop_self": true
+ },
+ "r2-link7": {
+ "next_hop_self": true
+ },
+ "r2-link8": {
+ "next_hop_self": true
+ },
+ "r2-link9": {
+ "next_hop_self": true
+ },
+ "r2-link10": {
+ "next_hop_self": true
+ },
+ "r2-link11": {
+ "next_hop_self": true
+ },
+ "r2-link12": {
+ "next_hop_self": true
+ },
+ "r2-link13": {
+ "next_hop_self": true
+ },
+ "r2-link14": {
+ "next_hop_self": true
+ },
+ "r2-link15": {
+ "next_hop_self": true
+ },
+ "r2-link16": {
+ "next_hop_self": true
+ },
+ "r2-link17": {
+ "next_hop_self": true
+ },
+ "r2-link18": {
+ "next_hop_self": true
+ },
+ "r2-link19": {
+ "next_hop_self": true
+ },
+ "r2-link20": {
+ "next_hop_self": true
+ },
+ "r2-link21": {
+ "next_hop_self": true
+ },
+ "r2-link22": {
+ "next_hop_self": true
+ },
+ "r2-link23": {
+ "next_hop_self": true
+ },
+ "r2-link24": {
+ "next_hop_self": true
+ },
+ "r2-link25": {
+ "next_hop_self": true
+ },
+ "r2-link26": {
+ "next_hop_self": true
+ },
+ "r2-link27": {
+ "next_hop_self": true
+ },
+ "r2-link28": {
+ "next_hop_self": true
+ },
+ "r2-link29": {
+ "next_hop_self": true
+ },
+ "r2-link30": {
+ "next_hop_self": true
+ },
+ "r2-link31": {
+ "next_hop_self": true
+ },
+ "r2-link32": {
+ "next_hop_self": true
+ }
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r2-link1": {}
+ }
+ },
+ "r3": {
+ "dest_link": {
+ "r2-link1": {
+ "next_hop_self": true
+ },
+ "r2-link2": {
+ "next_hop_self": true
+ },
+ "r2-link3": {
+ "next_hop_self": true
+ },
+ "r2-link4": {
+ "next_hop_self": true
+ },
+ "r2-link5": {
+ "next_hop_self": true
+ },
+ "r2-link6": {
+ "next_hop_self": true
+ },
+ "r2-link7": {
+ "next_hop_self": true
+ },
+ "r2-link8": {
+ "next_hop_self": true
+ },
+ "r2-link9": {
+ "next_hop_self": true
+ },
+ "r2-link10": {
+ "next_hop_self": true
+ },
+ "r2-link11": {
+ "next_hop_self": true
+ },
+ "r2-link12": {
+ "next_hop_self": true
+ },
+ "r2-link13": {
+ "next_hop_self": true
+ },
+ "r2-link14": {
+ "next_hop_self": true
+ },
+ "r2-link15": {
+ "next_hop_self": true
+ },
+ "r2-link16": {
+ "next_hop_self": true
+ },
+ "r2-link17": {
+ "next_hop_self": true
+ },
+ "r2-link18": {
+ "next_hop_self": true
+ },
+ "r2-link19": {
+ "next_hop_self": true
+ },
+ "r2-link20": {
+ "next_hop_self": true
+ },
+ "r2-link21": {
+ "next_hop_self": true
+ },
+ "r2-link22": {
+ "next_hop_self": true
+ },
+ "r2-link23": {
+ "next_hop_self": true
+ },
+ "r2-link24": {
+ "next_hop_self": true
+ },
+ "r2-link25": {
+ "next_hop_self": true
+ },
+ "r2-link26": {
+ "next_hop_self": true
+ },
+ "r2-link27": {
+ "next_hop_self": true
+ },
+ "r2-link28": {
+ "next_hop_self": true
+ },
+ "r2-link29": {
+ "next_hop_self": true
+ },
+ "r2-link30": {
+ "next_hop_self": true
+ },
+ "r2-link31": {
+ "next_hop_self": true
+ },
+ "r2-link32": {
+ "next_hop_self": true
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "r3": {
+ "links": {
+ "lo": {
+ "ipv4": "auto",
+ "ipv6": "auto",
+ "type": "loopback"
+ },
+ "r2-link1": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link2": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link3": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link4": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link5": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link6": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link7": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link8": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link9": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link10": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link11": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link12": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link13": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link14": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link15": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link16": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link17": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link18": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link19": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link20": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link21": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link22": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link23": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link24": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link25": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link26": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link27": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link28": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link29": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link30": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link31": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link32": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ }
+ },
+ "bgp": {
+ "local_as": "300",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "maximum_paths": {
+ "ebgp": 32
+ },
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "r3-link1": {},
+ "r3-link2": {},
+ "r3-link3": {},
+ "r3-link4": {},
+ "r3-link5": {},
+ "r3-link6": {},
+ "r3-link7": {},
+ "r3-link8": {},
+ "r3-link9": {},
+ "r3-link10": {},
+ "r3-link11": {},
+ "r3-link12": {},
+ "r3-link13": {},
+ "r3-link14": {},
+ "r3-link15": {},
+ "r3-link16": {},
+ "r3-link17": {},
+ "r3-link18": {},
+ "r3-link19": {},
+ "r3-link20": {},
+ "r3-link21": {},
+ "r3-link22": {},
+ "r3-link23": {},
+ "r3-link24": {},
+ "r3-link25": {},
+ "r3-link26": {},
+ "r3-link27": {},
+ "r3-link28": {},
+ "r3-link29": {},
+ "r3-link30": {},
+ "r3-link31": {},
+ "r3-link32": {}
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "maximum_paths": {
+ "ebgp": 32
+ },
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "r3-link1": {},
+ "r3-link2": {},
+ "r3-link3": {},
+ "r3-link4": {},
+ "r3-link5": {},
+ "r3-link6": {},
+ "r3-link7": {},
+ "r3-link8": {},
+ "r3-link9": {},
+ "r3-link10": {},
+ "r3-link11": {},
+ "r3-link12": {},
+ "r3-link13": {},
+ "r3-link14": {},
+ "r3-link15": {},
+ "r3-link16": {},
+ "r3-link17": {},
+ "r3-link18": {},
+ "r3-link19": {},
+ "r3-link20": {},
+ "r3-link21": {},
+ "r3-link22": {},
+ "r3-link23": {},
+ "r3-link24": {},
+ "r3-link25": {},
+ "r3-link26": {},
+ "r3-link27": {},
+ "r3-link28": {},
+ "r3-link29": {},
+ "r3-link30": {},
+ "r3-link31": {},
+ "r3-link32": {}
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/tests/topotests/bgp-ecmp-topo2/ibgp_ecmp_topo2.json b/tests/topotests/bgp-ecmp-topo2/ibgp_ecmp_topo2.json
new file mode 100755
index 0000000000..9010b8c273
--- /dev/null
+++ b/tests/topotests/bgp-ecmp-topo2/ibgp_ecmp_topo2.json
@@ -0,0 +1,674 @@
+{
+ "address_types": [
+ "ipv4",
+ "ipv6"
+ ],
+ "ipv4base": "10.0.0.0",
+ "ipv4mask": 24,
+ "ipv6base": "fd00::",
+ "ipv6mask": 64,
+ "link_ip_start": {
+ "ipv4": "10.0.0.0",
+ "v4mask": 24,
+ "ipv6": "fd00::",
+ "v6mask": 64
+ },
+ "lo_prefix": {
+ "ipv4": "1.0.",
+ "v4mask": 32,
+ "ipv6": "2001:DB8:F::",
+ "v6mask": 128
+ },
+ "routers": {
+ "r1": {
+ "links": {
+ "lo": {
+ "ipv4": "auto",
+ "ipv6": "auto",
+ "type": "loopback"
+ },
+ "r2-link1": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ }
+ },
+ "bgp": {
+ "local_as": "100",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "r1-link1": {}
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "r1-link1": {}
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "r2": {
+ "links": {
+ "lo": {
+ "ipv4": "auto",
+ "ipv6": "auto",
+ "type": "loopback"
+ },
+ "r1-link1": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link1": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link2": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link3": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link4": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link5": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link6": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link7": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link8": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link9": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link10": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link11": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link12": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link13": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link14": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link15": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link16": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link17": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link18": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link19": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link20": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link21": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link22": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link23": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link24": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link25": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link26": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link27": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link28": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link29": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link30": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link31": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link32": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ }
+ },
+ "bgp": {
+ "local_as": "100",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r2-link1": {}
+ }
+ },
+ "r3": {
+ "dest_link": {
+ "r2-link1": {
+ "next_hop_self": true
+ },
+ "r2-link2": {
+ "next_hop_self": true
+ },
+ "r2-link3": {
+ "next_hop_self": true
+ },
+ "r2-link4": {
+ "next_hop_self": true
+ },
+ "r2-link5": {
+ "next_hop_self": true
+ },
+ "r2-link6": {
+ "next_hop_self": true
+ },
+ "r2-link7": {
+ "next_hop_self": true
+ },
+ "r2-link8": {
+ "next_hop_self": true
+ },
+ "r2-link9": {
+ "next_hop_self": true
+ },
+ "r2-link10": {
+ "next_hop_self": true
+ },
+ "r2-link11": {
+ "next_hop_self": true
+ },
+ "r2-link12": {
+ "next_hop_self": true
+ },
+ "r2-link13": {
+ "next_hop_self": true
+ },
+ "r2-link14": {
+ "next_hop_self": true
+ },
+ "r2-link15": {
+ "next_hop_self": true
+ },
+ "r2-link16": {
+ "next_hop_self": true
+ },
+ "r2-link17": {
+ "next_hop_self": true
+ },
+ "r2-link18": {
+ "next_hop_self": true
+ },
+ "r2-link19": {
+ "next_hop_self": true
+ },
+ "r2-link20": {
+ "next_hop_self": true
+ },
+ "r2-link21": {
+ "next_hop_self": true
+ },
+ "r2-link22": {
+ "next_hop_self": true
+ },
+ "r2-link23": {
+ "next_hop_self": true
+ },
+ "r2-link24": {
+ "next_hop_self": true
+ },
+ "r2-link25": {
+ "next_hop_self": true
+ },
+ "r2-link26": {
+ "next_hop_self": true
+ },
+ "r2-link27": {
+ "next_hop_self": true
+ },
+ "r2-link28": {
+ "next_hop_self": true
+ },
+ "r2-link29": {
+ "next_hop_self": true
+ },
+ "r2-link30": {
+ "next_hop_self": true
+ },
+ "r2-link31": {
+ "next_hop_self": true
+ },
+ "r2-link32": {
+ "next_hop_self": true
+ }
+ }
+ }
+ },
+ "redistribute": [
+ {
+ "redist_type": "static"
+ }
+ ]
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r2-link1": {}
+ }
+ },
+ "r3": {
+ "dest_link": {
+ "r2-link1": {
+ "next_hop_self": true
+ },
+ "r2-link2": {
+ "next_hop_self": true
+ },
+ "r2-link3": {
+ "next_hop_self": true
+ },
+ "r2-link4": {
+ "next_hop_self": true
+ },
+ "r2-link5": {
+ "next_hop_self": true
+ },
+ "r2-link6": {
+ "next_hop_self": true
+ },
+ "r2-link7": {
+ "next_hop_self": true
+ },
+ "r2-link8": {
+ "next_hop_self": true
+ },
+ "r2-link9": {
+ "next_hop_self": true
+ },
+ "r2-link10": {
+ "next_hop_self": true
+ },
+ "r2-link11": {
+ "next_hop_self": true
+ },
+ "r2-link12": {
+ "next_hop_self": true
+ },
+ "r2-link13": {
+ "next_hop_self": true
+ },
+ "r2-link14": {
+ "next_hop_self": true
+ },
+ "r2-link15": {
+ "next_hop_self": true
+ },
+ "r2-link16": {
+ "next_hop_self": true
+ },
+ "r2-link17": {
+ "next_hop_self": true
+ },
+ "r2-link18": {
+ "next_hop_self": true
+ },
+ "r2-link19": {
+ "next_hop_self": true
+ },
+ "r2-link20": {
+ "next_hop_self": true
+ },
+ "r2-link21": {
+ "next_hop_self": true
+ },
+ "r2-link22": {
+ "next_hop_self": true
+ },
+ "r2-link23": {
+ "next_hop_self": true
+ },
+ "r2-link24": {
+ "next_hop_self": true
+ },
+ "r2-link25": {
+ "next_hop_self": true
+ },
+ "r2-link26": {
+ "next_hop_self": true
+ },
+ "r2-link27": {
+ "next_hop_self": true
+ },
+ "r2-link28": {
+ "next_hop_self": true
+ },
+ "r2-link29": {
+ "next_hop_self": true
+ },
+ "r2-link30": {
+ "next_hop_self": true
+ },
+ "r2-link31": {
+ "next_hop_self": true
+ },
+ "r2-link32": {
+ "next_hop_self": true
+ }
+ }
+ }
+ },
+ "redistribute": [
+ {
+ "redist_type": "static"
+ }
+ ]
+ }
+ }
+ }
+ }
+ },
+ "r3": {
+ "links": {
+ "lo": {
+ "ipv4": "auto",
+ "ipv6": "auto",
+ "type": "loopback"
+ },
+ "r2-link1": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link2": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link3": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link4": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link5": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link6": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link7": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link8": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link9": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link10": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link11": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link12": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link13": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link14": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link15": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link16": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link17": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link18": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link19": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link20": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link21": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link22": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link23": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link24": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link25": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link26": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link27": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link28": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link29": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link30": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link31": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link32": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ }
+ },
+ "bgp": {
+ "local_as": "100",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "maximum_paths": {
+ "ibgp": 32
+ },
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "r3-link1": {},
+ "r3-link2": {},
+ "r3-link3": {},
+ "r3-link4": {},
+ "r3-link5": {},
+ "r3-link6": {},
+ "r3-link7": {},
+ "r3-link8": {},
+ "r3-link9": {},
+ "r3-link10": {},
+ "r3-link11": {},
+ "r3-link12": {},
+ "r3-link13": {},
+ "r3-link14": {},
+ "r3-link15": {},
+ "r3-link16": {},
+ "r3-link17": {},
+ "r3-link18": {},
+ "r3-link19": {},
+ "r3-link20": {},
+ "r3-link21": {},
+ "r3-link22": {},
+ "r3-link23": {},
+ "r3-link24": {},
+ "r3-link25": {},
+ "r3-link26": {},
+ "r3-link27": {},
+ "r3-link28": {},
+ "r3-link29": {},
+ "r3-link30": {},
+ "r3-link31": {},
+ "r3-link32": {}
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "maximum_paths": {
+ "ibgp": 32
+ },
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "r3-link1": {},
+ "r3-link2": {},
+ "r3-link3": {},
+ "r3-link4": {},
+ "r3-link5": {},
+ "r3-link6": {},
+ "r3-link7": {},
+ "r3-link8": {},
+ "r3-link9": {},
+ "r3-link10": {},
+ "r3-link11": {},
+ "r3-link12": {},
+ "r3-link13": {},
+ "r3-link14": {},
+ "r3-link15": {},
+ "r3-link16": {},
+ "r3-link17": {},
+ "r3-link18": {},
+ "r3-link19": {},
+ "r3-link20": {},
+ "r3-link21": {},
+ "r3-link22": {},
+ "r3-link23": {},
+ "r3-link24": {},
+ "r3-link25": {},
+ "r3-link26": {},
+ "r3-link27": {},
+ "r3-link28": {},
+ "r3-link29": {},
+ "r3-link30": {},
+ "r3-link31": {},
+ "r3-link32": {}
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/tests/topotests/bgp-ecmp-topo2/test_ebgp_ecmp_topo2.py b/tests/topotests/bgp-ecmp-topo2/test_ebgp_ecmp_topo2.py
new file mode 100755
index 0000000000..4b9f419bf2
--- /dev/null
+++ b/tests/topotests/bgp-ecmp-topo2/test_ebgp_ecmp_topo2.py
@@ -0,0 +1,817 @@
+#!/usr/bin/env python
+
+#
+# Copyright (c) 2019 by VMware, Inc. ("VMware")
+# Used Copyright (c) 2018 by Network Device Education Foundation, Inc.
+# ("NetDEF") in this file.
+#
+# Permission to use, copy, modify, and/or distribute this software
+# for any purpose with or without fee is hereby granted, provided
+# that the above copyright notice and this permission notice appear
+# in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND VMWARE DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL VMWARE BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
+# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+# OF THIS SOFTWARE.
+#
+
+
+"""
+Following tests are covered to test ecmp functionality on EBGP.
+1. Verify routes installed as per maximum-paths configuration (8/16/32)
+2. Disable/Shut selected paths nexthops and verify other next are installed in
+ the RIB of DUT. Enable interfaces and verify RIB count.
+3. Verify BGP table and RIB in DUT after clear BGP routes and neighbors.
+4. Verify routes are cleared from BGP and RIB table of DUT when
+ redistribute static configuration is removed.
+5. Shut BGP neigbors one by one and verify BGP and routing table updated
+ accordingly in DUT
+6. Delete static routes and verify routers are cleared from BGP table and RIB
+ of DUT.
+7. Verify routes are cleared from BGP and RIB table of DUT when advertise
+ network configuration is removed.
+"""
+import os
+import sys
+import time
+import json
+import pytest
+# Save the Current Working Directory to find configuration files.
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, '../'))
+sys.path.append(os.path.join(CWD, '../../'))
+
+# pylint: disable=C0413
+# Import topogen and topotest helpers
+from lib.topogen import Topogen, get_topogen
+from mininet.topo import Topo
+
+from lib.common_config import (
+ start_topology, write_test_header,
+ write_test_footer,
+ verify_rib, create_static_routes, check_address_types,
+ interface_status, reset_config_on_routers
+)
+from lib.topolog import logger
+from lib.bgp import (
+ verify_bgp_convergence, create_router_bgp,
+ clear_bgp_and_verify)
+from lib.topojson import build_topo_from_json, build_config_from_json
+
+# Reading the data from JSON File for topology and configuration creation
+jsonFile = "{}/ebgp_ecmp_topo2.json".format(CWD)
+
+try:
+ with open(jsonFile, "r") as topoJson:
+ topo = json.load(topoJson)
+except IOError:
+ assert False, "Could not read file {}".format(jsonFile)
+
+# Global variables
+NEXT_HOPS = {"ipv4": [], "ipv6": []}
+INTF_LIST_R3 = []
+INTF_LIST_R2 = []
+NETWORK = {"ipv4": "11.0.20.1/32", "ipv6": "1::/64"}
+NEXT_HOP_IP = {"ipv4": "10.0.0.1", "ipv6": "fd00::1"}
+BGP_CONVERGENCE = False
+
+
+class CreateTopo(Topo):
+ """
+ Test topology builder.
+
+ * `Topo`: Topology object
+ """
+
+ def build(self, *_args, **_opts):
+ """Build function."""
+ tgen = get_topogen(self)
+
+ # Building topology from json file
+ build_topo_from_json(tgen, topo)
+
+
+def setup_module(mod):
+ """
+ Sets up the pytest environment.
+
+ * `mod`: module name
+ """
+ global NEXT_HOPS, INTF_LIST_R3, INTF_LIST_R2, TEST_STATIC
+ global ADDR_TYPES
+
+ testsuite_run_time = time.asctime(time.localtime(time.time()))
+ logger.info("Testsuite start time: {}".format(testsuite_run_time))
+ logger.info("=" * 40)
+
+ logger.info("Running setup_module to create topology")
+
+ # This function initiates the topology build with Topogen...
+ tgen = Topogen(CreateTopo, mod.__name__)
+
+ # Starting topology, create tmp files which are loaded to routers
+ # to start deamons and then start routers
+ start_topology(tgen)
+
+ # Creating configuration from JSON
+ build_config_from_json(tgen, topo)
+
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ # tgen.mininet_cli()
+ # Api call verify whether BGP is converged
+ ADDR_TYPES = check_address_types()
+
+ BGP_CONVERGENCE = verify_bgp_convergence(tgen, topo)
+ assert BGP_CONVERGENCE is True, ("setup_module :Failed \n Error:"
+ " {}".format(BGP_CONVERGENCE))
+
+ link_data = [val for links, val in
+ topo["routers"]["r2"]["links"].iteritems()
+ if "r3" in links]
+ for adt in ADDR_TYPES:
+ NEXT_HOPS[adt] = [val[adt].split("/")[0] for val in link_data]
+ if adt == "ipv4":
+ NEXT_HOPS[adt] = sorted(
+ NEXT_HOPS[adt], key=lambda x: int(x.split(".")[2]))
+ elif adt == "ipv6":
+ NEXT_HOPS[adt] = sorted(
+ NEXT_HOPS[adt], key=lambda x: int(x.split(':')[-3], 16))
+
+ INTF_LIST_R2 = [val["interface"].split("/")[0] for val in link_data]
+ INTF_LIST_R2 = sorted(INTF_LIST_R2, key=lambda x: int(x.split("eth")[1]))
+
+ link_data = [val for links, val in
+ topo["routers"]["r3"]["links"].iteritems()
+ if "r2" in links]
+ INTF_LIST_R3 = [val["interface"].split("/")[0] for val in link_data]
+ INTF_LIST_R3 = sorted(INTF_LIST_R3, key=lambda x: int(x.split("eth")[1]))
+
+ # STATIC_ROUTE = True
+ logger.info("Running setup_module() done")
+
+
+def teardown_module():
+ """
+ Teardown the pytest environment.
+
+ * `mod`: module name
+ """
+
+ logger.info("Running teardown_module to delete topology")
+
+ tgen = get_topogen()
+
+ # Stop toplogy and Remove tmp files
+ tgen.stop_topology()
+
+
+def static_or_nw(tgen, topo, tc_name, test_type, dut):
+
+ if test_type == "redist_static":
+ input_dict_static = {
+ dut: {
+ "static_routes": [
+ {
+ "network": NETWORK["ipv4"],
+ "next_hop": NEXT_HOP_IP["ipv4"]
+ },
+ {
+ "network": NETWORK["ipv6"],
+ "next_hop": NEXT_HOP_IP["ipv6"]
+ }
+ ]
+ }
+ }
+ logger.info("Configuring static route on router %s", dut)
+ result = create_static_routes(tgen, input_dict_static)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ input_dict_2 = {
+ dut: {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "redistribute": [{
+ "redist_type": "static"
+ }]
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "redistribute": [{
+ "redist_type": "static"
+ }]
+ }
+ }
+ }
+ }
+ }
+ }
+
+ logger.info("Configuring redistribute static route on router %s", dut)
+ result = create_router_bgp(tgen, topo, input_dict_2)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ elif test_type == "advertise_nw":
+ input_dict_nw = {
+ dut: {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "advertise_networks": [
+ {"network": NETWORK["ipv4"]}
+ ]
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "advertise_networks": [
+ {"network": NETWORK["ipv6"]}
+ ]
+ }
+ }
+ }
+ }
+ }
+ }
+
+ logger.info("Advertising networks %s %s from router %s",
+ NETWORK["ipv4"], NETWORK["ipv6"], dut)
+ result = create_router_bgp(tgen, topo, input_dict_nw)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+
+@pytest.mark.parametrize("ecmp_num", ["8", "16", "32"])
+@pytest.mark.parametrize("test_type", ["redist_static", "advertise_nw"])
+def test_modify_ecmp_max_paths(request, ecmp_num, test_type):
+ """
+ Verify routes installed as per maximum-paths
+ configuration (8/16/32).
+ """
+
+ tc_name = request.node.name
+ write_test_header(tc_name)
+ tgen = get_topogen()
+
+ reset_config_on_routers(tgen)
+
+ static_or_nw(tgen, topo, tc_name, test_type, "r2")
+
+ input_dict = {
+ "r3": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "maximum_paths": {
+ "ebgp": ecmp_num,
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "maximum_paths": {
+ "ebgp": ecmp_num,
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+
+ logger.info("Configuring bgp maximum-paths %s on router r3", ecmp_num)
+ result = create_router_bgp(tgen, topo, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ # Verifying RIB routes
+ dut = "r3"
+ protocol = "bgp"
+
+ for addr_type in ADDR_TYPES:
+ input_dict_1 = {
+ "r3": {
+ "static_routes": [
+ {
+ "network": NETWORK[addr_type]
+ }
+ ]
+ }
+ }
+
+ logger.info("Verifying %s routes on r3", addr_type)
+ result = verify_rib(tgen, addr_type, dut, input_dict_1,
+ next_hop=NEXT_HOPS[addr_type],
+ protocol=protocol)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+def test_ecmp_after_clear_bgp(request):
+ """ Verify BGP table and RIB in DUT after clear BGP routes and neighbors"""
+
+ tc_name = request.node.name
+ write_test_header(tc_name)
+ tgen = get_topogen()
+
+ reset_config_on_routers(tgen)
+
+ # Verifying RIB routes
+ dut = "r3"
+ protocol = "bgp"
+
+ static_or_nw(tgen, topo, tc_name, "redist_static", "r2")
+ for addr_type in ADDR_TYPES:
+ input_dict_1 = {
+ "r3": {
+ "static_routes": [
+ {
+ "network": NETWORK[addr_type]
+ }
+ ]
+ }
+ }
+
+ logger.info("Verifying %s routes on r3", addr_type)
+ result = verify_rib(tgen, addr_type, dut, input_dict_1,
+ next_hop=NEXT_HOPS[addr_type],
+ protocol=protocol)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ # Clear bgp
+ result = clear_bgp_and_verify(tgen, topo, dut)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ for addr_type in ADDR_TYPES:
+ input_dict_1 = {
+ "r3": {
+ "static_routes": [
+ {
+ "network": NETWORK[addr_type]
+ }
+ ]
+ }
+ }
+ logger.info("Verifying %s routes on r3", addr_type)
+ result = verify_rib(tgen, addr_type, dut, input_dict_1,
+ next_hop=NEXT_HOPS[addr_type],
+ protocol=protocol)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+def test_ecmp_remove_redistribute_static(request):
+ """ Verify routes are cleared from BGP and RIB table of DUT when
+ redistribute static configuration is removed."""
+
+ tc_name = request.node.name
+ write_test_header(tc_name)
+ tgen = get_topogen()
+
+ reset_config_on_routers(tgen)
+ static_or_nw(tgen, topo, tc_name, "redist_static", "r2")
+ for addr_type in ADDR_TYPES:
+
+ # Verifying RIB routes
+ dut = "r3"
+ protocol = "bgp"
+ input_dict_1 = {
+ "r3": {
+ "static_routes": [
+ {
+ "network": NETWORK[addr_type]
+ }
+ ]
+ }
+ }
+
+ logger.info("Verifying %s routes on r3", addr_type)
+ result = verify_rib(tgen, addr_type, dut, input_dict_1,
+ next_hop=NEXT_HOPS[addr_type],
+ protocol=protocol)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ input_dict_2 = {
+ "r2": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "redistribute": [{
+ "redist_type": "static",
+ "delete": True
+
+ }]
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "redistribute": [{
+ "redist_type": "static",
+ "delete": True
+
+ }]
+ }
+ }
+ }
+ }
+ }
+ }
+
+ logger.info("Remove redistribute static")
+ result = create_router_bgp(tgen, topo, input_dict_2)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ for addr_type in ADDR_TYPES:
+
+ # Verifying RIB routes
+ dut = "r3"
+ protocol = "bgp"
+ input_dict_1 = {
+ "r3": {
+ "static_routes": [
+ {
+ "network": NETWORK[addr_type]
+ }
+ ]
+ }
+ }
+
+ logger.info("Verifying %s routes on r3 are deleted", addr_type)
+ result = verify_rib(tgen, addr_type, dut, input_dict_1,
+ next_hop=[], protocol=protocol, expected=False)
+ assert result is not True, "Testcase {} : Failed \n Routes still" \
+ " present in RIB".format(tc_name)
+
+ logger.info("Enable redistribute static")
+ input_dict_2 = {
+ "r2": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "redistribute": [{
+ "redist_type": "static"
+ }]
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "redistribute": [{
+ "redist_type": "static"
+ }]
+ }
+ }
+ }
+ }
+ }
+ }
+ result = create_router_bgp(tgen, topo, input_dict_2)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ for addr_type in ADDR_TYPES:
+ # Verifying RIB routes
+ dut = "r3"
+ protocol = "bgp"
+ input_dict_1 = {
+ "r3": {
+ "static_routes": [
+ {
+ "network": NETWORK[addr_type]
+ }
+ ]
+ }
+ }
+ logger.info("Verifying %s routes on r3", addr_type)
+ result = verify_rib(tgen, addr_type, dut, input_dict_1,
+ next_hop=NEXT_HOPS[addr_type],
+ protocol=protocol)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+def test_ecmp_shut_bgp_neighbor(request):
+ """
+ Disable/Shut selected paths nexthops and verify other next are installed in
+ the RIB of DUT. Enable interfaces and verify RIB count.
+
+ Shut BGP neigbors one by one and verify BGP and routing table updated
+ accordingly in DUT
+ """
+
+ tc_name = request.node.name
+ write_test_header(tc_name)
+ tgen = get_topogen()
+
+ logger.info(INTF_LIST_R2)
+ # Verifying RIB routes
+ dut = "r3"
+ protocol = "bgp"
+
+ reset_config_on_routers(tgen)
+ static_or_nw(tgen, topo, tc_name, "redist_static", "r2")
+
+ for addr_type in ADDR_TYPES:
+ input_dict = {
+ "r3": {
+ "static_routes": [
+ {
+ "network": NETWORK[addr_type]
+ }
+ ]
+ }
+ }
+
+ logger.info("Verifying %s routes on r3", addr_type)
+ result = verify_rib(tgen, addr_type, dut, input_dict,
+ next_hop=NEXT_HOPS[addr_type],
+ protocol=protocol)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ for intf_num in range(len(INTF_LIST_R2)+1, 16):
+ intf_val = INTF_LIST_R2[intf_num:intf_num+16]
+
+ input_dict_1 = {
+ "r2": {
+ "interface_list": [intf_val],
+ "status": "down"
+ }
+ }
+ logger.info("Shutting down neighbor interface {} on r2".
+ format(intf_val))
+ result = interface_status(tgen, topo, input_dict_1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ for addr_type in ADDR_TYPES:
+ if intf_num + 16 < 32:
+ check_hops = NEXT_HOPS[addr_type]
+ else:
+ check_hops = []
+
+ input_dict = {
+ "r3": {
+ "static_routes": [
+ {
+ "network": NETWORK[addr_type]
+ }
+ ]
+ }
+ }
+ logger.info("Verifying %s routes on r3", addr_type)
+ result = verify_rib(tgen, addr_type, dut, input_dict,
+ next_hop=check_hops,
+ protocol=protocol)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ input_dict_1 = {
+ "r2": {
+ "interface_list": INTF_LIST_R2,
+ "status": "up"
+ }
+ }
+
+ logger.info("Enabling all neighbor interface {} on r2")
+ result = interface_status(tgen, topo, input_dict_1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ static_or_nw(tgen, topo, tc_name, "redist_static", "r2")
+ for addr_type in ADDR_TYPES:
+ input_dict = {
+ "r3": {
+ "static_routes": [
+ {
+ "network": NETWORK[addr_type]
+ }
+ ]
+ }
+ }
+
+ logger.info("Verifying %s routes on r3", addr_type)
+ result = verify_rib(tgen, addr_type, dut, input_dict,
+ next_hop=NEXT_HOPS[addr_type],
+ protocol=protocol)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+def test_ecmp_remove_static_route(request):
+ """
+ Delete static routes and verify routers are cleared from BGP table,
+ and RIB of DUT.
+ """
+
+ tc_name = request.node.name
+ write_test_header(tc_name)
+ tgen = get_topogen()
+
+ # Verifying RIB routes
+ dut = "r3"
+ protocol = "bgp"
+
+ reset_config_on_routers(tgen)
+
+ static_or_nw(tgen, topo, tc_name, "redist_static", "r2")
+ for addr_type in ADDR_TYPES:
+ input_dict_1 = {
+ "r3": {
+ "static_routes": [
+ {
+ "network": NETWORK[addr_type]
+ }
+ ]
+ }
+ }
+
+ logger.info("Verifying %s routes on r3", addr_type)
+ result = verify_rib(
+ tgen, addr_type, dut, input_dict_1,
+ next_hop=NEXT_HOPS[addr_type], protocol=protocol)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ for addr_type in ADDR_TYPES:
+ input_dict_2 = {
+ "r2": {
+ "static_routes": [
+ {
+ "network": NETWORK[addr_type],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ "delete": True
+ }
+ ]
+ }
+ }
+
+ logger.info("Remove static routes")
+ result = create_static_routes(tgen, input_dict_2)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ logger.info("Verifying %s routes on r3 are removed", addr_type)
+ result = verify_rib(tgen, addr_type, dut, input_dict_2,
+ next_hop=[], protocol=protocol, expected=False)
+ assert result is not True, "Testcase {} : Failed \n Routes still" \
+ " present in RIB".format(tc_name)
+
+ for addr_type in ADDR_TYPES:
+ # Enable static routes
+ input_dict_4 = {
+ "r2": {
+ "static_routes": [
+ {
+ "network": NETWORK[addr_type],
+ "next_hop": NEXT_HOP_IP[addr_type]
+ }
+ ]
+ }
+ }
+
+ logger.info("Enable static route")
+ result = create_static_routes(tgen, input_dict_4)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ logger.info("Verifying %s routes on r3", addr_type)
+ result = verify_rib(tgen, addr_type, dut, input_dict_4,
+ next_hop=NEXT_HOPS[addr_type],
+ protocol=protocol)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+
+def test_ecmp_remove_nw_advertise(request):
+ """
+ Verify routes are cleared from BGP and RIB table of DUT,
+ when advertise network configuration is removed
+ """
+
+ tc_name = request.node.name
+ write_test_header(tc_name)
+ tgen = get_topogen()
+
+ # Verifying RIB routes
+ dut = "r3"
+ protocol = "bgp"
+
+ reset_config_on_routers(tgen)
+ static_or_nw(tgen, topo, tc_name, "advertise_nw", "r2")
+ for addr_type in ADDR_TYPES:
+ input_dict = {
+ "r3": {
+ "static_routes": [
+ {
+ "network": NETWORK[addr_type]
+ }
+ ]
+ }
+ }
+
+ logger.info("Verifying %s routes on r3", addr_type)
+ result = verify_rib(tgen, addr_type, dut, input_dict,
+ next_hop=NEXT_HOPS[addr_type],
+ protocol=protocol)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ input_dict_3 = {
+ "r2": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "advertise_networks": [{
+ "network": NETWORK["ipv4"],
+ "delete": True
+ }]
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "advertise_networks": [{
+ "network": NETWORK["ipv6"],
+ "delete": True
+ }]
+ }
+ }
+ }
+ }
+ }
+ }
+
+ logger.info("Withdraw advertised networks")
+ result = create_router_bgp(tgen, topo, input_dict_3)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ for addr_type in ADDR_TYPES:
+ input_dict = {
+ "r3": {
+ "static_routes": [
+ {
+ "network": NETWORK[addr_type]
+ }
+ ]
+ }
+ }
+
+ logger.info("Verifying %s routes on r3", addr_type)
+ result = verify_rib(tgen, addr_type, dut, input_dict,
+ next_hop=[], protocol=protocol, expected=False)
+ assert result is not True, "Testcase {} : Failed \n Routes still" \
+ " present in RIB".format(tc_name)
+
+ static_or_nw(tgen, topo, tc_name, "advertise_nw", "r2")
+ for addr_type in ADDR_TYPES:
+ input_dict = {
+ "r3": {
+ "static_routes": [
+ {
+ "network": NETWORK[addr_type]
+ }
+ ]
+ }
+ }
+ logger.info("Verifying %s routes on r3", addr_type)
+ result = verify_rib(tgen, addr_type, dut, input_dict,
+ next_hop=NEXT_HOPS[addr_type],
+ protocol=protocol)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+if __name__ == "__main__":
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
diff --git a/tests/topotests/bgp-ecmp-topo2/test_ibgp_ecmp_topo2.py b/tests/topotests/bgp-ecmp-topo2/test_ibgp_ecmp_topo2.py
new file mode 100755
index 0000000000..a9f18ed1fa
--- /dev/null
+++ b/tests/topotests/bgp-ecmp-topo2/test_ibgp_ecmp_topo2.py
@@ -0,0 +1,813 @@
+#!/usr/bin/env python
+
+#
+# Copyright (c) 2019 by VMware, Inc. ("VMware")
+# Used Copyright (c) 2018 by Network Device Education Foundation, Inc.
+# ("NetDEF") in this file.
+#
+# Permission to use, copy, modify, and/or distribute this software
+# for any purpose with or without fee is hereby granted, provided
+# that the above copyright notice and this permission notice appear
+# in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND VMWARE DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL VMWARE BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
+# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+# OF THIS SOFTWARE.
+#
+
+
+"""
+Following tests are covered to test ecmp functionality on EBGP.
+1. Verify routes installed as per maximum-paths configuration (8/16/32)
+2. Disable/Shut selected paths nexthops and verify other next are installed in
+ the RIB of DUT. Enable interfaces and verify RIB count.
+3. Verify BGP table and RIB in DUT after clear BGP routes and neighbors.
+4. Verify routes are cleared from BGP and RIB table of DUT when
+ redistribute static configuration is removed.
+5. Shut BGP neigbors one by one and verify BGP and routing table updated
+ accordingly in DUT
+6. Delete static routes and verify routers are cleared from BGP table and RIB
+ of DUT.
+7. Verify routes are cleared from BGP and RIB table of DUT when advertise
+ network configuration is removed.
+"""
+import os
+import sys
+import time
+import json
+import pytest
+# Save the Current Working Directory to find configuration files.
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, '../'))
+sys.path.append(os.path.join(CWD, '../../'))
+
+# pylint: disable=C0413
+# Import topogen and topotest helpers
+from lib.topogen import Topogen, get_topogen
+from mininet.topo import Topo
+
+from lib.common_config import (
+ start_topology, write_test_header,
+ write_test_footer,
+ verify_rib, create_static_routes, check_address_types,
+ interface_status, reset_config_on_routers
+)
+from lib.topolog import logger
+from lib.bgp import (
+ verify_bgp_convergence, create_router_bgp,
+ clear_bgp_and_verify)
+from lib.topojson import build_topo_from_json, build_config_from_json
+
+# Reading the data from JSON File for topology and configuration creation
+jsonFile = "{}/ibgp_ecmp_topo2.json".format(CWD)
+
+try:
+ with open(jsonFile, "r") as topoJson:
+ topo = json.load(topoJson)
+except IOError:
+ assert False, "Could not read file {}".format(jsonFile)
+
+# Global variables
+NEXT_HOPS = {"ipv4": [], "ipv6": []}
+INTF_LIST_R3 = []
+INTF_LIST_R2 = []
+NETWORK = {"ipv4": "11.0.20.1/32", "ipv6": "1::/64"}
+NEXT_HOP_IP = {"ipv4": "10.0.0.1", "ipv6": "fd00::1"}
+BGP_CONVERGENCE = False
+
+
+class CreateTopo(Topo):
+ """
+ Test topology builder.
+
+ * `Topo`: Topology object
+ """
+
+ def build(self, *_args, **_opts):
+ """Build function."""
+ tgen = get_topogen(self)
+
+ # Building topology from json file
+ build_topo_from_json(tgen, topo)
+
+
+def setup_module(mod):
+ """
+ Sets up the pytest environment.
+
+ * `mod`: module name
+ """
+ global NEXT_HOPS, INTF_LIST_R3, INTF_LIST_R2, TEST_STATIC
+ global ADDR_TYPES
+
+ testsuite_run_time = time.asctime(time.localtime(time.time()))
+ logger.info("Testsuite start time: {}".format(testsuite_run_time))
+ logger.info("=" * 40)
+
+ logger.info("Running setup_module to create topology")
+
+ # This function initiates the topology build with Topogen...
+ tgen = Topogen(CreateTopo, mod.__name__)
+
+ # Starting topology, create tmp files which are loaded to routers
+ # to start deamons and then start routers
+ start_topology(tgen)
+
+ # Creating configuration from JSON
+ build_config_from_json(tgen, topo)
+
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ # tgen.mininet_cli()
+ # Api call verify whether BGP is converged
+ ADDR_TYPES = check_address_types()
+
+ for addr_type in ADDR_TYPES:
+ BGP_CONVERGENCE = verify_bgp_convergence(tgen, topo)
+ assert BGP_CONVERGENCE is True, ("setup_module :Failed \n Error:"
+ " {}".format(BGP_CONVERGENCE))
+
+ link_data = [val for links, val in
+ topo["routers"]["r2"]["links"].iteritems()
+ if "r3" in links]
+ for adt in ADDR_TYPES:
+ NEXT_HOPS[adt] = [val[adt].split("/")[0] for val in link_data]
+ if adt == "ipv4":
+ NEXT_HOPS[adt] = sorted(
+ NEXT_HOPS[adt], key=lambda x: int(x.split(".")[2]))
+ elif adt == "ipv6":
+ NEXT_HOPS[adt] = sorted(
+ NEXT_HOPS[adt], key=lambda x: int(x.split(':')[-3], 16))
+
+ INTF_LIST_R2 = [val["interface"].split("/")[0] for val in link_data]
+ INTF_LIST_R2 = sorted(INTF_LIST_R2, key=lambda x: int(x.split("eth")[1]))
+
+ link_data = [val for links, val in
+ topo["routers"]["r3"]["links"].iteritems()
+ if "r2" in links]
+ INTF_LIST_R3 = [val["interface"].split("/")[0] for val in link_data]
+ INTF_LIST_R3 = sorted(INTF_LIST_R3, key=lambda x: int(x.split("eth")[1]))
+
+ # STATIC_ROUTE = True
+ logger.info("Running setup_module() done")
+
+
+def teardown_module():
+ """
+ Teardown the pytest environment.
+
+ * `mod`: module name
+ """
+
+ logger.info("Running teardown_module to delete topology")
+
+ tgen = get_topogen()
+
+ # Stop toplogy and Remove tmp files
+ tgen.stop_topology()
+
+
+def static_or_nw(tgen, topo, tc_name, test_type, dut):
+
+ if test_type == "redist_static":
+ input_dict_static = {
+ dut: {
+ "static_routes": [
+ {
+ "network": NETWORK["ipv4"],
+ "next_hop": NEXT_HOP_IP["ipv4"]
+ },
+ {
+ "network": NETWORK["ipv6"],
+ "next_hop": NEXT_HOP_IP["ipv6"]
+ }
+ ]
+ }
+ }
+ logger.info("Configuring static route on router %s", dut)
+ result = create_static_routes(tgen, input_dict_static)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ input_dict_2 = {
+ dut: {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "redistribute": [{
+ "redist_type": "static"
+ }]
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "redistribute": [{
+ "redist_type": "static"
+ }]
+ }
+ }
+ }
+ }
+ }
+ }
+
+ logger.info("Configuring redistribute static route on router %s", dut)
+ result = create_router_bgp(tgen, topo, input_dict_2)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ elif test_type == "advertise_nw":
+ input_dict_nw = {
+ dut: {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "advertise_networks": [
+ {"network": NETWORK["ipv4"]}
+ ]
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "advertise_networks": [
+ {"network": NETWORK["ipv6"]}
+ ]
+ }
+ }
+ }
+ }
+ }
+ }
+
+ logger.info("Advertising networks %s %s from router %s",
+ NETWORK["ipv4"], NETWORK["ipv6"], dut)
+ result = create_router_bgp(tgen, topo, input_dict_nw)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+
+@pytest.mark.parametrize("ecmp_num", ["8", "16", "32"])
+@pytest.mark.parametrize("test_type", ["redist_static", "advertise_nw"])
+def test_modify_ecmp_max_paths(request, ecmp_num, test_type):
+ """
+ Verify routes installed as per maximum-paths
+ configuration (8/16/32).
+ """
+
+ tc_name = request.node.name
+ write_test_header(tc_name)
+ tgen = get_topogen()
+
+ reset_config_on_routers(tgen)
+
+ static_or_nw(tgen, topo, tc_name, test_type, "r2")
+
+ input_dict = {
+ "r3": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "maximum_paths": {
+ "ibgp": ecmp_num,
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "maximum_paths": {
+ "ibgp": ecmp_num,
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+
+ logger.info("Configuring bgp maximum-paths %s on router r3", ecmp_num)
+ result = create_router_bgp(tgen, topo, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ # Verifying RIB routes
+ dut = "r3"
+ protocol = "bgp"
+
+ for addr_type in ADDR_TYPES:
+ input_dict_1 = {
+ "r3": {
+ "static_routes": [
+ {
+ "network": NETWORK[addr_type]
+ }
+ ]
+ }
+ }
+
+ logger.info("Verifying %s routes on r3", addr_type)
+ result = verify_rib(tgen, addr_type, dut, input_dict_1,
+ next_hop=NEXT_HOPS[addr_type],
+ protocol=protocol)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+def test_ecmp_after_clear_bgp(request):
+ """ Verify BGP table and RIB in DUT after clear BGP routes and neighbors"""
+
+ tc_name = request.node.name
+ write_test_header(tc_name)
+ tgen = get_topogen()
+
+ reset_config_on_routers(tgen)
+
+ # Verifying RIB routes
+ dut = "r3"
+ protocol = "bgp"
+
+ static_or_nw(tgen, topo, tc_name, "redist_static", "r2")
+ for addr_type in ADDR_TYPES:
+ input_dict_1 = {
+ "r3": {
+ "static_routes": [
+ {
+ "network": NETWORK[addr_type]
+ }
+ ]
+ }
+ }
+
+ logger.info("Verifying %s routes on r3", addr_type)
+ result = verify_rib(tgen, addr_type, dut, input_dict_1,
+ next_hop=NEXT_HOPS[addr_type],
+ protocol=protocol)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ # Clear bgp
+ result = clear_bgp_and_verify(tgen, topo, dut)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ for addr_type in ADDR_TYPES:
+ input_dict_1 = {
+ "r3": {
+ "static_routes": [
+ {
+ "network": NETWORK[addr_type]
+ }
+ ]
+ }
+ }
+ logger.info("Verifying %s routes on r3", addr_type)
+ result = verify_rib(tgen, addr_type, dut, input_dict_1,
+ next_hop=NEXT_HOPS[addr_type],
+ protocol=protocol)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+def test_ecmp_remove_redistribute_static(request):
+ """ Verify routes are cleared from BGP and RIB table of DUT when
+ redistribute static configuration is removed."""
+
+ tc_name = request.node.name
+ write_test_header(tc_name)
+ tgen = get_topogen()
+
+ reset_config_on_routers(tgen)
+ static_or_nw(tgen, topo, tc_name, "redist_static", "r2")
+ for addr_type in ADDR_TYPES:
+
+ # Verifying RIB routes
+ dut = "r3"
+ protocol = "bgp"
+ input_dict_1 = {
+ "r3": {
+ "static_routes": [
+ {
+ "network": NETWORK[addr_type]
+ }
+ ]
+ }
+ }
+
+ logger.info("Verifying %s routes on r3", addr_type)
+ result = verify_rib(tgen, addr_type, dut, input_dict_1,
+ next_hop=NEXT_HOPS[addr_type],
+ protocol=protocol)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ input_dict_2 = {
+ "r2": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "redistribute": [{
+ "redist_type": "static",
+ "delete": True
+
+ }]
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "redistribute": [{
+ "redist_type": "static",
+ "delete": True
+
+ }]
+ }
+ }
+ }
+ }
+ }
+ }
+
+ logger.info("Remove redistribute static")
+ result = create_router_bgp(tgen, topo, input_dict_2)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ for addr_type in ADDR_TYPES:
+
+ # Verifying RIB routes
+ dut = "r3"
+ protocol = "bgp"
+ input_dict_1 = {
+ "r3": {
+ "static_routes": [
+ {
+ "network": NETWORK[addr_type]
+ }
+ ]
+ }
+ }
+
+ logger.info("Verifying %s routes on r3 are deleted", addr_type)
+ result = verify_rib(tgen, addr_type, dut, input_dict_1,
+ next_hop=[], protocol=protocol, expected=False)
+ assert result is not True, "Testcase {} : Failed \n Routes still" \
+ " present in RIB".format(tc_name)
+
+ logger.info("Enable redistribute static")
+ input_dict_2 = {
+ "r2": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "redistribute": [{
+ "redist_type": "static"
+ }]
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "redistribute": [{
+ "redist_type": "static"
+ }]
+ }
+ }
+ }
+ }
+ }
+ }
+ result = create_router_bgp(tgen, topo, input_dict_2)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ for addr_type in ADDR_TYPES:
+ # Verifying RIB routes
+ dut = "r3"
+ protocol = "bgp"
+ input_dict_1 = {
+ "r3": {
+ "static_routes": [
+ {
+ "network": NETWORK[addr_type]
+ }
+ ]
+ }
+ }
+ logger.info("Verifying %s routes on r3", addr_type)
+ result = verify_rib(tgen, addr_type, dut, input_dict_1,
+ next_hop=NEXT_HOPS[addr_type],
+ protocol=protocol)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+def test_ecmp_shut_bgp_neighbor(request):
+ """ Shut BGP neigbors one by one and verify BGP and routing table updated
+ accordingly in DUT """
+
+ tc_name = request.node.name
+ write_test_header(tc_name)
+ tgen = get_topogen()
+
+ logger.info(INTF_LIST_R2)
+ # Verifying RIB routes
+ dut = "r3"
+ protocol = "bgp"
+
+ reset_config_on_routers(tgen)
+ static_or_nw(tgen, topo, tc_name, "redist_static", "r2")
+
+ for addr_type in ADDR_TYPES:
+ input_dict = {
+ "r3": {
+ "static_routes": [
+ {
+ "network": NETWORK[addr_type]
+ }
+ ]
+ }
+ }
+
+ logger.info("Verifying %s routes on r3", addr_type)
+ result = verify_rib(tgen, addr_type, dut, input_dict,
+ next_hop=NEXT_HOPS[addr_type],
+ protocol=protocol)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ for intf_num in range(len(INTF_LIST_R2)+1, 16):
+ intf_val = INTF_LIST_R2[intf_num:intf_num+16]
+
+ input_dict_1 = {
+ "r2": {
+ "interface_list": [intf_val],
+ "status": "down"
+ }
+ }
+ logger.info("Shutting down neighbor interface {} on r2".
+ format(intf_val))
+ result = interface_status(tgen, topo, input_dict_1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ for addr_type in ADDR_TYPES:
+ if intf_num + 16 < 32:
+ check_hops = NEXT_HOPS[addr_type]
+ else:
+ check_hops = []
+
+ input_dict = {
+ "r3": {
+ "static_routes": [
+ {
+ "network": NETWORK[addr_type]
+ }
+ ]
+ }
+ }
+ logger.info("Verifying %s routes on r3", addr_type)
+ result = verify_rib(tgen, addr_type, dut, input_dict,
+ next_hop=check_hops,
+ protocol=protocol)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ input_dict_1 = {
+ "r2": {
+ "interface_list": INTF_LIST_R2,
+ "status": "up"
+ }
+ }
+
+ logger.info("Enabling all neighbor interface {} on r2")
+ result = interface_status(tgen, topo, input_dict_1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ static_or_nw(tgen, topo, tc_name, "redist_static", "r2")
+ for addr_type in ADDR_TYPES:
+ input_dict = {
+ "r3": {
+ "static_routes": [
+ {
+ "network": NETWORK[addr_type]
+ }
+ ]
+ }
+ }
+
+ logger.info("Verifying %s routes on r3", addr_type)
+ result = verify_rib(tgen, addr_type, dut, input_dict,
+ next_hop=NEXT_HOPS[addr_type],
+ protocol=protocol)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+def test_ecmp_remove_static_route(request):
+ """
+ Delete static routes and verify routers are cleared from BGP table,
+ and RIB of DUT.
+ """
+
+ tc_name = request.node.name
+ write_test_header(tc_name)
+ tgen = get_topogen()
+
+ # Verifying RIB routes
+ dut = "r3"
+ protocol = "bgp"
+
+ reset_config_on_routers(tgen)
+
+ static_or_nw(tgen, topo, tc_name, "redist_static", "r2")
+ for addr_type in ADDR_TYPES:
+ input_dict_1 = {
+ "r3": {
+ "static_routes": [
+ {
+ "network": NETWORK[addr_type]
+ }
+ ]
+ }
+ }
+
+ logger.info("Verifying %s routes on r3", addr_type)
+ result = verify_rib(
+ tgen, addr_type, dut, input_dict_1,
+ next_hop=NEXT_HOPS[addr_type], protocol=protocol)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ for addr_type in ADDR_TYPES:
+ input_dict_2 = {
+ "r2": {
+ "static_routes": [
+ {
+ "network": NETWORK[addr_type],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ "delete": True
+ }
+ ]
+ }
+ }
+
+ logger.info("Remove static routes")
+ result = create_static_routes(tgen, input_dict_2)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ logger.info("Verifying %s routes on r3 are removed", addr_type)
+ result = verify_rib(tgen, addr_type, dut, input_dict_2,
+ next_hop=[], protocol=protocol, expected=False)
+ assert result is not True, "Testcase {} : Failed \n Routes still" \
+ " present in RIB".format(tc_name)
+
+ for addr_type in ADDR_TYPES:
+ # Enable static routes
+ input_dict_4 = {
+ "r2": {
+ "static_routes": [
+ {
+ "network": NETWORK[addr_type],
+ "next_hop": NEXT_HOP_IP[addr_type]
+ }
+ ]
+ }
+ }
+
+ logger.info("Enable static route")
+ result = create_static_routes(tgen, input_dict_4)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ logger.info("Verifying %s routes on r3", addr_type)
+ result = verify_rib(tgen, addr_type, dut, input_dict_4,
+ next_hop=NEXT_HOPS[addr_type],
+ protocol=protocol)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+def test_ecmp_remove_nw_advertise(request):
+ """
+ Verify routes are cleared from BGP and RIB table of DUT,
+ when advertise network configuration is removed
+ """
+
+ tc_name = request.node.name
+ write_test_header(tc_name)
+ tgen = get_topogen()
+
+ # Verifying RIB routes
+ dut = "r3"
+ protocol = "bgp"
+
+ reset_config_on_routers(tgen)
+ static_or_nw(tgen, topo, tc_name, "advertise_nw", "r2")
+ for addr_type in ADDR_TYPES:
+ input_dict = {
+ "r3": {
+ "static_routes": [
+ {
+ "network": NETWORK[addr_type]
+ }
+ ]
+ }
+ }
+
+ logger.info("Verifying %s routes on r3", addr_type)
+ result = verify_rib(tgen, addr_type, dut, input_dict,
+ next_hop=NEXT_HOPS[addr_type],
+ protocol=protocol)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ input_dict_3 = {
+ "r2": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "advertise_networks": [{
+ "network": NETWORK["ipv4"],
+ "delete": True
+ }]
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "advertise_networks": [{
+ "network": NETWORK["ipv6"],
+ "delete": True
+ }]
+ }
+ }
+ }
+ }
+ }
+ }
+
+ logger.info("Withdraw advertised networks")
+ result = create_router_bgp(tgen, topo, input_dict_3)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ for addr_type in ADDR_TYPES:
+ input_dict = {
+ "r3": {
+ "static_routes": [
+ {
+ "network": NETWORK[addr_type]
+ }
+ ]
+ }
+ }
+
+ logger.info("Verifying %s routes on r3", addr_type)
+ result = verify_rib(tgen, addr_type, dut, input_dict,
+ next_hop=[], protocol=protocol, expected=False)
+ assert result is not True, "Testcase {} : Failed \n Routes still" \
+ " present in RIB".format(tc_name)
+
+ static_or_nw(tgen, topo, tc_name, "advertise_nw", "r2")
+ for addr_type in ADDR_TYPES:
+ input_dict = {
+ "r3": {
+ "static_routes": [
+ {
+ "network": NETWORK[addr_type]
+ }
+ ]
+ }
+ }
+ logger.info("Verifying %s routes on r3", addr_type)
+ result = verify_rib(tgen, addr_type, dut, input_dict,
+ next_hop=NEXT_HOPS[addr_type],
+ protocol=protocol)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+
+if __name__ == "__main__":
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
diff --git a/tests/topotests/bgp-path-attributes-topo1/test_bgp_path_attributes.py b/tests/topotests/bgp-path-attributes-topo1/test_bgp_path_attributes.py
index 2b9c411ff2..9f92b4b290 100755
--- a/tests/topotests/bgp-path-attributes-topo1/test_bgp_path_attributes.py
+++ b/tests/topotests/bgp-path-attributes-topo1/test_bgp_path_attributes.py
@@ -219,7 +219,8 @@ def test_next_hop_attribute(request):
dut = "r1"
protocol = "bgp"
result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol, expected=False)
- assert result is not True
+ assert result is not True, "Testcase {} : Failed \n Error: Routes still" \
+ " present in RIB".format(tc_name)
# Configure next-hop-self to bgp neighbor
input_dict_1 = {
@@ -484,7 +485,7 @@ def test_localpref_attribute(request):
"neighbor": {
"r1": {
"dest_link": {
- "r3": {
+ "r2": {
"route_maps": [
{"name": "RMAP_LOCAL_PREF",
"direction": "in"}
@@ -499,6 +500,7 @@ def test_localpref_attribute(request):
}
}
}
+
result = create_router_bgp(tgen, topo, input_dict_4)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
tc_name, result)
diff --git a/tests/topotests/bgp-prefix-list-topo1/test_prefix_lists.py b/tests/topotests/bgp-prefix-list-topo1/test_prefix_lists.py
index d3892e9d07..b8975997ea 100755
--- a/tests/topotests/bgp-prefix-list-topo1/test_prefix_lists.py
+++ b/tests/topotests/bgp-prefix-list-topo1/test_prefix_lists.py
@@ -386,8 +386,8 @@ def test_ip_prefix_lists_out_permit(request):
tc_name, result)
result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol, expected=False)
- assert result is not True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is not True, "Testcase {} : Failed \n Error: Routes still" \
+ " present in RIB".format(tc_name)
write_test_footer(tc_name)
@@ -497,8 +497,8 @@ def test_ip_prefix_lists_in_deny_and_permit_any(request):
dut = "r3"
protocol = "bgp"
result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol, expected=False)
- assert result is not True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is not True, "Testcase {} : Failed \n Error: Routes still" \
+ " present in RIB".format(tc_name)
write_test_footer(tc_name)
@@ -542,7 +542,6 @@ def test_delete_prefix_lists(request):
result = verify_prefix_lists(tgen, input_dict_2)
assert result is not True, "Testcase {} : Failed \n Error: {}".format(
tc_name, result)
- logger.info(result)
# Delete prefix list
input_dict_2 = {
@@ -714,9 +713,8 @@ def test_ip_prefix_lists_out_deny_and_permit_any(request):
dut = "r4"
protocol = "bgp"
result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol, expected=False)
- assert result is not True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
-
+ assert result is not True, "Testcase {} : Failed \n Error: Routes still" \
+ " present in RIB".format(tc_name)
write_test_footer(tc_name)
@@ -859,8 +857,8 @@ def test_modify_prefix_lists_in_permit_to_deny(request):
dut = "r3"
protocol = "bgp"
result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol, expected=False)
- assert result is not True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is not True, "Testcase {} : Failed \n Error: Routes still" \
+ " present in RIB".format(tc_name)
write_test_footer(tc_name)
@@ -972,8 +970,8 @@ def test_modify_prefix_lists_in_deny_to_permit(request):
dut = "r3"
protocol = "bgp"
result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol, expected=False)
- assert result is not True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is not True, "Testcase {} : Failed \n Error: Routes still" \
+ " present in RIB".format(tc_name)
# Modify ip prefix list
input_dict_1 = {
@@ -1152,8 +1150,8 @@ def test_modify_prefix_lists_out_permit_to_deny(request):
dut = "r4"
protocol = "bgp"
result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol, expected=False)
- assert result is not True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is not True, "Testcase {} : Failed \n Error: Routes still" \
+ " present in RIB".format(tc_name)
write_test_footer(tc_name)
@@ -1265,8 +1263,8 @@ def test_modify_prefix_lists_out_deny_to_permit(request):
dut = "r4"
protocol = "bgp"
result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol, expected=False)
- assert result is not True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is not True, "Testcase {} : Failed \n Error: Routes still" \
+ " present in RIB".format(tc_name)
# Modify ip prefix list
input_dict_1 = {
@@ -1439,8 +1437,8 @@ def test_ip_prefix_lists_implicit_deny(request):
dut = "r4"
protocol = "bgp"
result = verify_rib(tgen, "ipv4", dut, input_dict_1, protocol=protocol, expected=False)
- assert result is not True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is not True, "Testcase {} : Failed \n Error: Routes still" \
+ " present in RIB".format(tc_name)
write_test_footer(tc_name)
diff --git a/tests/topotests/bgp_comm-list_delete/test_bgp_comm-list_delete.py b/tests/topotests/bgp_comm-list_delete/test_bgp_comm-list_delete.py
index de6c35ba8f..ed350ebfeb 100644
--- a/tests/topotests/bgp_comm-list_delete/test_bgp_comm-list_delete.py
+++ b/tests/topotests/bgp_comm-list_delete/test_bgp_comm-list_delete.py
@@ -88,7 +88,7 @@ def test_bgp_maximum_prefix_invalid():
while True:
output = json.loads(tgen.gears[router].vtysh_cmd("show ip bgp neighbor 192.168.255.1 json"))
if output['192.168.255.1']['bgpState'] == 'Established':
- if output['192.168.255.1']['addressFamilyInfo']['IPv4 Unicast']['acceptedPrefixCounter'] == 2:
+ if output['192.168.255.1']['addressFamilyInfo']['ipv4Unicast']['acceptedPrefixCounter'] == 2:
return True
def _bgp_comm_list_delete(router):
diff --git a/tests/topotests/bgp_l3vpn_to_bgp_direct/scripts/adjacencies.py b/tests/topotests/bgp_l3vpn_to_bgp_direct/scripts/adjacencies.py
index 7b3a883afa..1317a510d1 100644
--- a/tests/topotests/bgp_l3vpn_to_bgp_direct/scripts/adjacencies.py
+++ b/tests/topotests/bgp_l3vpn_to_bgp_direct/scripts/adjacencies.py
@@ -2,16 +2,16 @@ from lutil import luCommand
luCommand('ce1','ping 192.168.1.1 -c 1',' 0. packet loss','pass','CE->PE ping')
luCommand('ce2','ping 192.168.1.1 -c 1',' 0. packet loss','pass','CE->PE ping')
luCommand('ce3','ping 192.168.1.1 -c 1',' 0. packet loss','pass','CE->PE ping')
-luCommand('ce1','vtysh -c "show bgp summary"',' 00:0','wait','Adjacencies up',90)
-luCommand('ce2','vtysh -c "show bgp summary"',' 00:0','wait','Adjacencies up')
-luCommand('ce3','vtysh -c "show bgp summary"',' 00:0','wait','Adjacencies up')
+luCommand('ce1','vtysh -c "show bgp summary"',' 00:0','wait','Adjacencies up',180)
+luCommand('ce2','vtysh -c "show bgp summary"',' 00:0','wait','Adjacencies up',180)
+luCommand('ce3','vtysh -c "show bgp summary"',' 00:0','wait','Adjacencies up',180)
luCommand('r1','ping 2.2.2.2 -c 1',' 0. packet loss','wait','PE->P2 (loopback) ping',60)
luCommand('r3','ping 2.2.2.2 -c 1',' 0. packet loss','wait','PE->P2 (loopback) ping',60)
luCommand('r4','ping 2.2.2.2 -c 1',' 0. packet loss','wait','PE->P2 (loopback) ping',60)
-luCommand('r2','vtysh -c "show bgp summary"',' 00:0.* 00:0.* 00:0','wait','Core adjacencies up')
-luCommand('r1','vtysh -c "show bgp summary"',' 00:0','pass','Core adjacencies up')
-luCommand('r3','vtysh -c "show bgp summary"',' 00:0','pass','Core adjacencies up')
-luCommand('r4','vtysh -c "show bgp summary"',' 00:0','pass','Core adjacencies up')
+luCommand('r2','vtysh -c "show bgp summary"',' 00:0.* 00:0.* 00:0','wait','Core adjacencies up',180)
+luCommand('r1','vtysh -c "show bgp summary"',' 00:0','wait','Core adjacencies up',180)
+luCommand('r3','vtysh -c "show bgp summary"',' 00:0','wait','Core adjacencies up',180)
+luCommand('r4','vtysh -c "show bgp summary"',' 00:0','wait','Core adjacencies up',180)
luCommand('r1','vtysh -c "show bgp vrf all summary"',' 00:0.* 00:0','pass','All adjacencies up')
luCommand('r3','vtysh -c "show bgp vrf all summary"',' 00:0.* 00:0','pass','All adjacencies up')
luCommand('r4','vtysh -c "show bgp vrf all summary"',' 00:0.* 00:0','pass','All adjacencies up')
diff --git a/tests/topotests/bgp_l3vpn_to_bgp_vrf/scripts/adjacencies.py b/tests/topotests/bgp_l3vpn_to_bgp_vrf/scripts/adjacencies.py
index 5674120b9c..c2b0cf9e7a 100644
--- a/tests/topotests/bgp_l3vpn_to_bgp_vrf/scripts/adjacencies.py
+++ b/tests/topotests/bgp_l3vpn_to_bgp_vrf/scripts/adjacencies.py
@@ -6,10 +6,10 @@ luCommand('ce4','vtysh -c "show bgp vrf all summary"',' 00:0','wait','Adjacencie
luCommand('r1','ping 2.2.2.2 -c 1',' 0. packet loss','wait','PE->P2 (loopback) ping',60)
luCommand('r3','ping 2.2.2.2 -c 1',' 0. packet loss','wait','PE->P2 (loopback) ping',60)
luCommand('r4','ping 2.2.2.2 -c 1',' 0. packet loss','wait','PE->P2 (loopback) ping',60)
-luCommand('r2','vtysh -c "show bgp summary"',' 00:0.* 00:0.* 00:0','wait','Core adjacencies up',300)
-luCommand('r1','vtysh -c "show bgp summary"',' 00:0','pass','Core adjacencies up')
-luCommand('r3','vtysh -c "show bgp summary"',' 00:0','pass','Core adjacencies up')
-luCommand('r4','vtysh -c "show bgp summary"',' 00:0','pass','Core adjacencies up')
+luCommand('r2','vtysh -c "show bgp summary"',' 00:0.* 00:0.* 00:0','wait','Core adjacencies up',180)
+luCommand('r1','vtysh -c "show bgp summary"',' 00:0','wait','Core adjacencies up', 180)
+luCommand('r3','vtysh -c "show bgp summary"',' 00:0','wait','Core adjacencies up', 180)
+luCommand('r4','vtysh -c "show bgp summary"',' 00:0','wait','Core adjacencies up', 180)
luCommand('r1','vtysh -c "show bgp vrf all summary"',' 00:0.* 00:0','pass','All adjacencies up')
luCommand('r3','vtysh -c "show bgp vrf all summary"',' 00:0.* 00:0','pass','All adjacencies up')
luCommand('r4','vtysh -c "show bgp vrf all summary"',' 00:0.* 00:0.* 00:0','pass','All adjacencies up')
diff --git a/tests/topotests/bgp_rfapi_basic_sanity/scripts/adjacencies.py b/tests/topotests/bgp_rfapi_basic_sanity/scripts/adjacencies.py
index 1f53791f6a..6fbe4ff1c0 100644
--- a/tests/topotests/bgp_rfapi_basic_sanity/scripts/adjacencies.py
+++ b/tests/topotests/bgp_rfapi_basic_sanity/scripts/adjacencies.py
@@ -1,10 +1,10 @@
luCommand('r1','ping 2.2.2.2 -c 1',' 0. packet loss','wait','PE->P2 (loopback) ping',60)
luCommand('r3','ping 2.2.2.2 -c 1',' 0. packet loss','wait','PE->P2 (loopback) ping',60)
luCommand('r4','ping 2.2.2.2 -c 1',' 0. packet loss','wait','PE->P2 (loopback) ping',60)
-luCommand('r2','vtysh -c "show bgp summary"',' 00:0.* 00:0.* 00:0','wait','Core adjacencies up',30)
-luCommand('r1','vtysh -c "show bgp vrf all summary"',' 00:0','pass','All adjacencies up')
-luCommand('r3','vtysh -c "show bgp vrf all summary"',' 00:0','pass','All adjacencies up')
-luCommand('r4','vtysh -c "show bgp vrf all summary"',' 00:0','pass','All adjacencies up')
+luCommand('r2','vtysh -c "show bgp summary"',' 00:0.* 00:0.* 00:0','wait','Core adjacencies up',180)
+luCommand('r1','vtysh -c "show bgp vrf all summary"',' 00:0','wait','All adjacencies up',180)
+luCommand('r3','vtysh -c "show bgp vrf all summary"',' 00:0','wait','All adjacencies up',180)
+luCommand('r4','vtysh -c "show bgp vrf all summary"',' 00:0','wait','All adjacencies up',180)
luCommand('r1','ping 3.3.3.3 -c 1',' 0. packet loss','wait','PE->PE3 (loopback) ping')
luCommand('r1','ping 4.4.4.4 -c 1',' 0. packet loss','wait','PE->PE4 (loopback) ping')
#luCommand('r4','ping 3.3.3.3 -c 1',' 0. packet loss','wait','PE->PE3 (loopback) ping')
diff --git a/tests/topotests/docker/inner/compile_frr.sh b/tests/topotests/docker/inner/compile_frr.sh
index 2d72082c1e..dee0ec8118 100755
--- a/tests/topotests/docker/inner/compile_frr.sh
+++ b/tests/topotests/docker/inner/compile_frr.sh
@@ -84,6 +84,7 @@ if [ ! -e Makefile ]; then
--enable-static-bin \
--enable-static \
--enable-shared \
+ --enable-dev-build \
--with-moduledir=/usr/lib/frr/modules \
--prefix=/usr \
--localstatedir=/var/run/frr \
diff --git a/tests/topotests/example-topojson-test/test_topo_json_single_link_loopback/test_example_topojson.py b/tests/topotests/example-topojson-test/test_topo_json_single_link_loopback/test_example_topojson.py
index b794b96a63..cd069aaec5 100755
--- a/tests/topotests/example-topojson-test/test_topo_json_single_link_loopback/test_example_topojson.py
+++ b/tests/topotests/example-topojson-test/test_topo_json_single_link_loopback/test_example_topojson.py
@@ -178,7 +178,7 @@ def test_static_routes(request):
# Static routes are created as part of initial configuration,
# verifying RIB
dut = 'r3'
- next_hop = '10.0.0.1'
+ next_hop = ['10.0.0.1', '10.0.0.5']
input_dict = {
"r1": {
"static_routes": [
diff --git a/tests/topotests/lib/bgp.py b/tests/topotests/lib/bgp.py
index 2613f45f1c..c47dddb8d4 100644
--- a/tests/topotests/lib/bgp.py
+++ b/tests/topotests/lib/bgp.py
@@ -32,7 +32,8 @@ from lib.common_config import (create_common_configuration,
load_config_to_router,
check_address_types,
generate_ips,
- find_interface_with_greater_ip)
+ find_interface_with_greater_ip,
+ run_frr_cmd, retry)
BGP_CONVERGENCE_TIMEOUT = 10
@@ -116,8 +117,8 @@ def create_router_bgp(tgen, topo, input_dict=None, build=False):
logger.debug("Router %s: 'bgp' not present in input_dict", router)
continue
- result = __create_bgp_global(tgen, input_dict, router, build)
- if result is True:
+ data_all_bgp = __create_bgp_global(tgen, input_dict, router, build)
+ if data_all_bgp:
bgp_data = input_dict[router]["bgp"]
bgp_addr_data = bgp_data.setdefault("address_family", {})
@@ -134,8 +135,18 @@ def create_router_bgp(tgen, topo, input_dict=None, build=False):
or ipv6_data.setdefault("unicast", {}) else False
if neigh_unicast:
- result = __create_bgp_unicast_neighbor(
- tgen, topo, input_dict, router, build)
+ data_all_bgp = __create_bgp_unicast_neighbor(
+ tgen, topo, input_dict, router,
+ config_data=data_all_bgp)
+
+ try:
+ result = create_common_configuration(tgen, router, data_all_bgp,
+ "bgp", build)
+ except InvalidCLIError:
+ # Traceback
+ errormsg = traceback.format_exc()
+ logger.error(errormsg)
+ return errormsg
logger.debug("Exiting lib API: create_router_bgp()")
return result
@@ -157,77 +168,66 @@ def __create_bgp_global(tgen, input_dict, router, build=False):
True or False
"""
- result = False
logger.debug("Entering lib API: __create_bgp_global()")
- try:
-
- bgp_data = input_dict[router]["bgp"]
- del_bgp_action = bgp_data.setdefault("delete", False)
- if del_bgp_action:
- config_data = ["no router bgp"]
- result = create_common_configuration(tgen, router, config_data,
- "bgp", build=build)
- return result
- config_data = []
+ bgp_data = input_dict[router]["bgp"]
+ del_bgp_action = bgp_data.setdefault("delete", False)
+ if del_bgp_action:
+ config_data = ["no router bgp"]
- if "local_as" not in bgp_data and build:
- logger.error("Router %s: 'local_as' not present in input_dict"
- "for BGP", router)
- return False
+ return config_data
- local_as = bgp_data.setdefault("local_as", "")
- cmd = "router bgp {}".format(local_as)
- vrf_id = bgp_data.setdefault("vrf", None)
- if vrf_id:
- cmd = "{} vrf {}".format(cmd, vrf_id)
-
- config_data.append(cmd)
+ config_data = []
- router_id = bgp_data.setdefault("router_id", None)
- del_router_id = bgp_data.setdefault("del_router_id", False)
- if del_router_id:
- config_data.append("no bgp router-id")
- if router_id:
- config_data.append("bgp router-id {}".format(
- router_id))
+ if "local_as" not in bgp_data and build:
+ logger.error("Router %s: 'local_as' not present in input_dict"
+ "for BGP", router)
+ return False
- aggregate_address = bgp_data.setdefault("aggregate_address",
- {})
- if aggregate_address:
- network = aggregate_address.setdefault("network", None)
- if not network:
- logger.error("Router %s: 'network' not present in "
- "input_dict for BGP", router)
- else:
- cmd = "aggregate-address {}".format(network)
+ local_as = bgp_data.setdefault("local_as", "")
+ cmd = "router bgp {}".format(local_as)
+ vrf_id = bgp_data.setdefault("vrf", None)
+ if vrf_id:
+ cmd = "{} vrf {}".format(cmd, vrf_id)
+
+ config_data.append(cmd)
+
+ router_id = bgp_data.setdefault("router_id", None)
+ del_router_id = bgp_data.setdefault("del_router_id", False)
+ if del_router_id:
+ config_data.append("no bgp router-id")
+ if router_id:
+ config_data.append("bgp router-id {}".format(
+ router_id))
+
+ aggregate_address = bgp_data.setdefault("aggregate_address",
+ {})
+ if aggregate_address:
+ network = aggregate_address.setdefault("network", None)
+ if not network:
+ logger.error("Router %s: 'network' not present in "
+ "input_dict for BGP", router)
+ else:
+ cmd = "aggregate-address {}".format(network)
- as_set = aggregate_address.setdefault("as_set", False)
- summary = aggregate_address.setdefault("summary", False)
- del_action = aggregate_address.setdefault("delete", False)
- if as_set:
- cmd = "{} {}".format(cmd, "as-set")
- if summary:
- cmd = "{} {}".format(cmd, "summary")
+ as_set = aggregate_address.setdefault("as_set", False)
+ summary = aggregate_address.setdefault("summary", False)
+ del_action = aggregate_address.setdefault("delete", False)
+ if as_set:
+ cmd = "{} {}".format(cmd, "as-set")
+ if summary:
+ cmd = "{} {}".format(cmd, "summary")
- if del_action:
- cmd = "no {}".format(cmd)
+ if del_action:
+ cmd = "no {}".format(cmd)
- config_data.append(cmd)
+ config_data.append(cmd)
- result = create_common_configuration(tgen, router, config_data,
- "bgp", build=build)
- except InvalidCLIError:
- # Traceback
- errormsg = traceback.format_exc()
- logger.error(errormsg)
- return errormsg
-
- logger.debug("Exiting lib API: create_bgp_global()")
- return result
+ return config_data
-def __create_bgp_unicast_neighbor(tgen, topo, input_dict, router, build=False):
+def __create_bgp_unicast_neighbor(tgen, topo, input_dict, router,
+ config_data=None):
"""
Helper API to create configuration for address-family unicast
@@ -240,124 +240,118 @@ def __create_bgp_unicast_neighbor(tgen, topo, input_dict, router, build=False):
* `build` : Only for initial setup phase this is set as True.
"""
- result = False
logger.debug("Entering lib API: __create_bgp_unicast_neighbor()")
- try:
- config_data = ["router bgp"]
- bgp_data = input_dict[router]["bgp"]["address_family"]
- for addr_type, addr_dict in bgp_data.iteritems():
- if not addr_dict:
- continue
+ add_neigh = True
+ if "router bgp "in config_data:
+ add_neigh = False
+ bgp_data = input_dict[router]["bgp"]["address_family"]
- if not check_address_types(addr_type):
- continue
+ for addr_type, addr_dict in bgp_data.iteritems():
+ if not addr_dict:
+ continue
+ if not check_address_types(addr_type):
+ continue
+
+ addr_data = addr_dict["unicast"]
+ if addr_data:
config_data.append("address-family {} unicast".format(
addr_type
))
- addr_data = addr_dict["unicast"]
- advertise_network = addr_data.setdefault("advertise_networks",
- [])
- for advertise_network_dict in advertise_network:
- network = advertise_network_dict["network"]
- if type(network) is not list:
- network = [network]
-
- if "no_of_network" in advertise_network_dict:
- no_of_network = advertise_network_dict["no_of_network"]
- else:
- no_of_network = 1
-
- del_action = advertise_network_dict.setdefault("delete",
- False)
+ advertise_network = addr_data.setdefault("advertise_networks",
+ [])
+ for advertise_network_dict in advertise_network:
+ network = advertise_network_dict["network"]
+ if type(network) is not list:
+ network = [network]
+
+ if "no_of_network" in advertise_network_dict:
+ no_of_network = advertise_network_dict["no_of_network"]
+ else:
+ no_of_network = 1
- # Generating IPs for verification
- prefix = str(
- ipaddr.IPNetwork(unicode(network[0])).prefixlen)
- network_list = generate_ips(network, no_of_network)
- for ip in network_list:
- ip = str(ipaddr.IPNetwork(unicode(ip)).network)
+ del_action = advertise_network_dict.setdefault("delete",
+ False)
- cmd = "network {}/{}\n".format(ip, prefix)
- if del_action:
- cmd = "no {}".format(cmd)
+ # Generating IPs for verification
+ prefix = str(
+ ipaddr.IPNetwork(unicode(network[0])).prefixlen)
+ network_list = generate_ips(network, no_of_network)
+ for ip in network_list:
+ ip = str(ipaddr.IPNetwork(unicode(ip)).network)
- config_data.append(cmd)
+ cmd = "network {}/{}".format(ip, prefix)
+ if del_action:
+ cmd = "no {}".format(cmd)
- max_paths = addr_data.setdefault("maximum_paths", {})
- if max_paths:
- ibgp = max_paths.setdefault("ibgp", None)
- ebgp = max_paths.setdefault("ebgp", None)
- if ibgp:
- config_data.append("maximum-paths ibgp {}".format(
- ibgp
- ))
- if ebgp:
- config_data.append("maximum-paths {}".format(
- ebgp
- ))
-
- aggregate_address = addr_data.setdefault("aggregate_address",
- {})
- if aggregate_address:
- ip = aggregate_address("network", None)
- attribute = aggregate_address("attribute", None)
- if ip:
- cmd = "aggregate-address {}".format(ip)
- if attribute:
- cmd = "{} {}".format(cmd, attribute)
+ config_data.append(cmd)
- config_data.append(cmd)
+ max_paths = addr_data.setdefault("maximum_paths", {})
+ if max_paths:
+ ibgp = max_paths.setdefault("ibgp", None)
+ ebgp = max_paths.setdefault("ebgp", None)
+ if ibgp:
+ config_data.append("maximum-paths ibgp {}".format(
+ ibgp
+ ))
+ if ebgp:
+ config_data.append("maximum-paths {}".format(
+ ebgp
+ ))
+
+ aggregate_address = addr_data.setdefault("aggregate_address",
+ {})
+ if aggregate_address:
+ ip = aggregate_address("network", None)
+ attribute = aggregate_address("attribute", None)
+ if ip:
+ cmd = "aggregate-address {}".format(ip)
+ if attribute:
+ cmd = "{} {}".format(cmd, attribute)
- redistribute_data = addr_data.setdefault("redistribute", {})
- if redistribute_data:
- for redistribute in redistribute_data:
- if "redist_type" not in redistribute:
- logger.error("Router %s: 'redist_type' not present in "
- "input_dict", router)
- else:
- cmd = "redistribute {}".format(
- redistribute["redist_type"])
- redist_attr = redistribute.setdefault("attribute",
- None)
- if redist_attr:
- cmd = "{} {}".format(cmd, redist_attr)
- del_action = redistribute.setdefault("delete", False)
- if del_action:
- cmd = "no {}".format(cmd)
- config_data.append(cmd)
+ config_data.append(cmd)
- if "neighbor" in addr_data:
- neigh_data = __create_bgp_neighbor(topo, input_dict,
- router, addr_type)
- config_data.extend(neigh_data)
+ redistribute_data = addr_data.setdefault("redistribute", {})
+ if redistribute_data:
+ for redistribute in redistribute_data:
+ if "redist_type" not in redistribute:
+ logger.error("Router %s: 'redist_type' not present in "
+ "input_dict", router)
+ else:
+ cmd = "redistribute {}".format(
+ redistribute["redist_type"])
+ redist_attr = redistribute.setdefault("attribute",
+ None)
+ if redist_attr:
+ cmd = "{} {}".format(cmd, redist_attr)
+ del_action = redistribute.setdefault("delete", False)
+ if del_action:
+ cmd = "no {}".format(cmd)
+ config_data.append(cmd)
- for addr_type, addr_dict in bgp_data.iteritems():
- if not addr_dict or not check_address_types(addr_type):
- continue
+ if "neighbor" in addr_data:
+ neigh_data = __create_bgp_neighbor(topo, input_dict,
+ router, addr_type, add_neigh)
+ config_data.extend(neigh_data)
- addr_data = addr_dict["unicast"]
- if "neighbor" in addr_data:
- neigh_addr_data = __create_bgp_unicast_address_family(
- topo, input_dict, router, addr_type)
+ for addr_type, addr_dict in bgp_data.iteritems():
+ if not addr_dict or not check_address_types(addr_type):
+ continue
- config_data.extend(neigh_addr_data)
+ addr_data = addr_dict["unicast"]
+ if "neighbor" in addr_data:
+ neigh_addr_data = __create_bgp_unicast_address_family(
+ topo, input_dict, router, addr_type, add_neigh)
- result = create_common_configuration(tgen, router, config_data,
- None, build=build)
+ config_data.extend(neigh_addr_data)
- except InvalidCLIError:
- # Traceback
- errormsg = traceback.format_exc()
- logger.error(errormsg)
- return errormsg
logger.debug("Exiting lib API: __create_bgp_unicast_neighbor()")
- return result
+ return config_data
-def __create_bgp_neighbor(topo, input_dict, router, addr_type):
+def __create_bgp_neighbor(topo, input_dict, router, addr_type, add_neigh=True):
"""
Helper API to create neighbor specific configuration
@@ -391,7 +385,8 @@ def __create_bgp_neighbor(topo, input_dict, router, addr_type):
neigh_cxt = "neighbor {}".format(ip_addr)
- config_data.append("{} remote-as {}".format(neigh_cxt, remote_as))
+ if add_neigh:
+ config_data.append("{} remote-as {}".format(neigh_cxt, remote_as))
if addr_type == "ipv6":
config_data.append("address-family ipv6 unicast")
config_data.append("{} activate".format(neigh_cxt))
@@ -429,7 +424,8 @@ def __create_bgp_neighbor(topo, input_dict, router, addr_type):
return config_data
-def __create_bgp_unicast_address_family(topo, input_dict, router, addr_type):
+def __create_bgp_unicast_address_family(topo, input_dict, router, addr_type,
+ add_neigh=True):
"""
API prints bgp global config to bgp_json file.
@@ -531,6 +527,7 @@ def __create_bgp_unicast_address_family(topo, input_dict, router, addr_type):
#############################################
# Verification APIs
#############################################
+@retry(attempts=3, wait=2, return_is_str=True)
def verify_router_id(tgen, topo, input_dict):
"""
Running command "show ip bgp json" for DUT and reading router-id
@@ -565,7 +562,7 @@ def verify_router_id(tgen, topo, input_dict):
errormsg(str) or True
"""
- logger.info("Entering lib API: verify_router_id()")
+ logger.debug("Entering lib API: verify_router_id()")
for router in input_dict.keys():
if router not in tgen.routers():
continue
@@ -576,9 +573,9 @@ def verify_router_id(tgen, topo, input_dict):
"del_router_id", False)
logger.info("Checking router %s router-id", router)
- show_bgp_json = rnode.vtysh_cmd("show ip bgp json",
+ show_bgp_json = run_frr_cmd(rnode, "show bgp summary json",
isjson=True)
- router_id_out = show_bgp_json["routerId"]
+ router_id_out = show_bgp_json["ipv4Unicast"]["routerId"]
router_id_out = ipaddr.IPv4Address(unicode(router_id_out))
# Once router-id is deleted, highest interface ip should become
@@ -598,100 +595,84 @@ def verify_router_id(tgen, topo, input_dict):
router_id_out)
return errormsg
- logger.info("Exiting lib API: verify_router_id()")
+ logger.debug("Exiting lib API: verify_router_id()")
return True
+@retry(attempts=20, wait=2, return_is_str=True)
def verify_bgp_convergence(tgen, topo):
"""
API will verify if BGP is converged with in the given time frame.
Running "show bgp summary json" command and verify bgp neighbor
state is established,
-
Parameters
----------
* `tgen`: topogen object
* `topo`: input json file data
* `addr_type`: ip_type, ipv4/ipv6
-
Usage
-----
# To veriry is BGP is converged for all the routers used in
topology
results = verify_bgp_convergence(tgen, topo, "ipv4")
-
Returns
-------
errormsg(str) or True
"""
- logger.info("Entering lib API: verify_bgp_confergence()")
+ logger.debug("Entering lib API: verify_bgp_convergence()")
for router, rnode in tgen.routers().iteritems():
- logger.info("Verifying BGP Convergence on router %s:", router)
-
- for retry in range(1, 11):
- show_bgp_json = rnode.vtysh_cmd("show bgp summary json",
- isjson=True)
- # Verifying output dictionary show_bgp_json is empty or not
- if not bool(show_bgp_json):
- errormsg = "BGP is not running"
- return errormsg
+ logger.info("Verifying BGP Convergence on router %s", router)
+ show_bgp_json = run_frr_cmd(rnode, "show bgp summary json",
+ isjson=True)
+ # Verifying output dictionary show_bgp_json is empty or not
+ if not bool(show_bgp_json):
+ errormsg = "BGP is not running"
+ return errormsg
- # To find neighbor ip type
+ # To find neighbor ip type
+ bgp_addr_type = topo["routers"][router]["bgp"]["address_family"]
+ for addr_type in bgp_addr_type.keys():
+ if not check_address_types(addr_type):
+ continue
total_peer = 0
- bgp_addr_type = topo["routers"][router]["bgp"]["address_family"]
- for addr_type in bgp_addr_type.keys():
- if not check_address_types(addr_type):
- continue
-
- bgp_neighbors = bgp_addr_type[addr_type]["unicast"]["neighbor"]
-
- for bgp_neighbor in bgp_neighbors:
- total_peer += len(bgp_neighbors[bgp_neighbor]["dest_link"])
-
- for addr_type in bgp_addr_type.keys():
- bgp_neighbors = bgp_addr_type[addr_type]["unicast"]["neighbor"]
-
- no_of_peer = 0
- for bgp_neighbor, peer_data in bgp_neighbors.iteritems():
- for dest_link in peer_data["dest_link"].keys():
- data = topo["routers"][bgp_neighbor]["links"]
- if dest_link in data:
- neighbor_ip = \
- data[dest_link][addr_type].split("/")[0]
- if addr_type == "ipv4":
- ipv4_data = show_bgp_json["ipv4Unicast"][
- "peers"]
- nh_state = ipv4_data[neighbor_ip]["state"]
- else:
- ipv6_data = show_bgp_json["ipv6Unicast"][
- "peers"]
- nh_state = ipv6_data[neighbor_ip]["state"]
-
- if nh_state == "Established":
- no_of_peer += 1
- if no_of_peer == total_peer:
- logger.info("BGP is Converged for router %s", router)
- break
- else:
- logger.warning("BGP is not yet Converged for router %s",
- router)
- sleeptime = 2 * retry
- if sleeptime <= BGP_CONVERGENCE_TIMEOUT:
- # Waiting for BGP to converge
- logger.info("Waiting for %s sec for BGP to converge on"
- " router %s...", sleeptime, router)
- sleep(sleeptime)
- else:
- show_bgp_summary = rnode.vtysh_cmd("show bgp summary")
- errormsg = "TIMEOUT!! BGP is not converged in {} " \
- "seconds for router {} \n {}".format(
- BGP_CONVERGENCE_TIMEOUT, router,
- show_bgp_summary)
- return errormsg
+ bgp_neighbors = bgp_addr_type[addr_type]["unicast"]["neighbor"]
+
+ for bgp_neighbor in bgp_neighbors:
+ total_peer += len(bgp_neighbors[bgp_neighbor]["dest_link"])
+
+ for addr_type in bgp_addr_type.keys():
+ if not check_address_types(addr_type):
+ continue
+ bgp_neighbors = bgp_addr_type[addr_type]["unicast"]["neighbor"]
+
+ no_of_peer = 0
+ for bgp_neighbor, peer_data in bgp_neighbors.iteritems():
+ for dest_link in peer_data["dest_link"].keys():
+ data = topo["routers"][bgp_neighbor]["links"]
+ if dest_link in data:
+ neighbor_ip = \
+ data[dest_link][addr_type].split("/")[0]
+ if addr_type == "ipv4":
+ ipv4_data = show_bgp_json["ipv4Unicast"][
+ "peers"]
+ nh_state = ipv4_data[neighbor_ip]["state"]
+ else:
+ ipv6_data = show_bgp_json["ipv6Unicast"][
+ "peers"]
+ nh_state = ipv6_data[neighbor_ip]["state"]
+
+ if nh_state == "Established":
+ no_of_peer += 1
+ if no_of_peer == total_peer:
+ logger.info("BGP is Converged for router %s", router)
+ else:
+ errormsg = "BGP is not converged for router {}".format(
+ router)
+ return errormsg
- logger.info("Exiting API: verify_bgp_confergence()")
+ logger.debug("Exiting API: verify_bgp_convergence()")
return True
@@ -723,7 +704,7 @@ def modify_as_number(tgen, topo, input_dict):
errormsg(str) or True
"""
- logger.info("Entering lib API: modify_as_number()")
+ logger.debug("Entering lib API: modify_as_number()")
try:
new_topo = deepcopy(topo["routers"])
@@ -757,11 +738,12 @@ def modify_as_number(tgen, topo, input_dict):
logger.error(errormsg)
return errormsg
- logger.info("Exiting lib API: modify_as_number()")
+ logger.debug("Exiting lib API: modify_as_number()")
return True
+@retry(attempts=3, wait=2, return_is_str=True)
def verify_as_numbers(tgen, topo, input_dict):
"""
This API is to verify AS numbers for given DUT by running
@@ -791,7 +773,7 @@ def verify_as_numbers(tgen, topo, input_dict):
errormsg(str) or True
"""
- logger.info("Entering lib API: verify_as_numbers()")
+ logger.debug("Entering lib API: verify_as_numbers()")
for router in input_dict.keys():
if router not in tgen.routers():
continue
@@ -800,7 +782,7 @@ def verify_as_numbers(tgen, topo, input_dict):
logger.info("Verifying AS numbers for dut %s:", router)
- show_ip_bgp_neighbor_json = rnode.vtysh_cmd(
+ show_ip_bgp_neighbor_json = run_frr_cmd(rnode,
"show ip bgp neighbor json", isjson=True)
local_as = input_dict[router]["bgp"]["local_as"]
bgp_addr_type = topo["routers"][router]["bgp"]["address_family"]
@@ -846,7 +828,7 @@ def verify_as_numbers(tgen, topo, input_dict):
"neighbor %s, found expected: %s",
router, bgp_neighbor, remote_as)
- logger.info("Exiting lib API: verify_AS_numbers()")
+ logger.debug("Exiting lib API: verify_AS_numbers()")
return True
@@ -873,7 +855,7 @@ def clear_bgp_and_verify(tgen, topo, router):
errormsg(str) or True
"""
- logger.info("Entering lib API: clear_bgp_and_verify()")
+ logger.debug("Entering lib API: clear_bgp_and_verify()")
if router not in tgen.routers():
return False
@@ -883,20 +865,14 @@ def clear_bgp_and_verify(tgen, topo, router):
peer_uptime_before_clear_bgp = {}
# Verifying BGP convergence before bgp clear command
for retry in range(1, 11):
- sleeptime = 2 * retry
- if sleeptime <= BGP_CONVERGENCE_TIMEOUT:
- # Waiting for BGP to converge
- logger.info("Waiting for %s sec for BGP to converge on router"
- " %s...", sleeptime, router)
- sleep(sleeptime)
- else:
- errormsg = "TIMEOUT!! BGP is not converged in {} seconds for" \
- " router {}".format(BGP_CONVERGENCE_TIMEOUT, router)
- return errormsg
+ sleeptime = 3
+ # Waiting for BGP to converge
+ logger.info("Waiting for %s sec for BGP to converge on router"
+ " %s...", sleeptime, router)
+ sleep(sleeptime)
- show_bgp_json = rnode.vtysh_cmd("show bgp summary json",
+ show_bgp_json = run_frr_cmd(rnode, "show bgp summary json",
isjson=True)
- logger.info(show_bgp_json)
# Verifying output dictionary show_bgp_json is empty or not
if not bool(show_bgp_json):
errormsg = "BGP is not running"
@@ -950,33 +926,33 @@ def clear_bgp_and_verify(tgen, topo, router):
" clear", router)
break
else:
- logger.warning("BGP is not yet Converged for router %s "
- "before bgp clear", router)
+ logger.info("BGP is not yet Converged for router %s "
+ "before bgp clear", router)
+ else:
+ errormsg = "TIMEOUT!! BGP is not converged in 30 seconds for" \
+ " router {}".format(router)
+ return errormsg
logger.info(peer_uptime_before_clear_bgp)
# Clearing BGP
logger.info("Clearing BGP neighborship for router %s..", router)
for addr_type in bgp_addr_type.keys():
if addr_type == "ipv4":
- rnode.vtysh_cmd("clear ip bgp *")
+ run_frr_cmd(rnode, "clear ip bgp *")
elif addr_type == "ipv6":
- rnode.vtysh_cmd("clear bgp ipv6 *")
+ run_frr_cmd(rnode, "clear bgp ipv6 *")
peer_uptime_after_clear_bgp = {}
# Verifying BGP convergence after bgp clear command
- for retry in range(1, 11):
- sleeptime = 2 * retry
- if sleeptime <= BGP_CONVERGENCE_TIMEOUT:
- # Waiting for BGP to converge
- logger.info("Waiting for %s sec for BGP to converge on router"
- " %s...", sleeptime, router)
- sleep(sleeptime)
- else:
- errormsg = "TIMEOUT!! BGP is not converged in {} seconds for" \
- " router {}".format(BGP_CONVERGENCE_TIMEOUT, router)
- return errormsg
+ for retry in range(11):
+ sleeptime = 3
+ # Waiting for BGP to converge
+ logger.info("Waiting for %s sec for BGP to converge on router"
+ " %s...", sleeptime, router)
+ sleep(sleeptime)
+
- show_bgp_json = rnode.vtysh_cmd("show bgp summary json",
+ show_bgp_json = run_frr_cmd(rnode, "show bgp summary json",
isjson=True)
# Verifying output dictionary show_bgp_json is empty or not
if not bool(show_bgp_json):
@@ -1028,9 +1004,12 @@ def clear_bgp_and_verify(tgen, topo, router):
router)
break
else:
- logger.warning("BGP is not yet Converged for router %s after"
- " bgp clear", router)
-
+ logger.info("BGP is not yet Converged for router %s after"
+ " bgp clear", router)
+ else:
+ errormsg = "TIMEOUT!! BGP is not converged in 30 seconds for" \
+ " router {}".format(router)
+ return errormsg
logger.info(peer_uptime_after_clear_bgp)
# Comparing peerUptimeEstablishedEpoch dictionaries
if peer_uptime_before_clear_bgp != peer_uptime_after_clear_bgp:
@@ -1041,7 +1020,7 @@ def clear_bgp_and_verify(tgen, topo, router):
" {}".format(router)
return errormsg
- logger.info("Exiting lib API: clear_bgp_and_verify()")
+ logger.debug("Exiting lib API: clear_bgp_and_verify()")
return True
@@ -1077,7 +1056,7 @@ def verify_bgp_timers_and_functionality(tgen, topo, input_dict):
errormsg(str) or True
"""
- logger.info("Entering lib API: verify_bgp_timers_and_functionality()")
+ logger.debug("Entering lib API: verify_bgp_timers_and_functionality()")
sleep(5)
router_list = tgen.routers()
for router in input_dict.keys():
@@ -1090,7 +1069,7 @@ def verify_bgp_timers_and_functionality(tgen, topo, input_dict):
router)
show_ip_bgp_neighbor_json = \
- rnode.vtysh_cmd("show ip bgp neighbor json", isjson=True)
+ run_frr_cmd(rnode, "show ip bgp neighbor json", isjson=True)
bgp_addr_type = input_dict[router]["bgp"]["address_family"]
@@ -1178,7 +1157,7 @@ def verify_bgp_timers_and_functionality(tgen, topo, input_dict):
sleep(keepalivetimer)
sleep(2)
show_bgp_json = \
- rnode.vtysh_cmd("show bgp summary json",
+ run_frr_cmd(rnode, "show bgp summary json",
isjson=True)
if addr_type == "ipv4":
@@ -1192,17 +1171,13 @@ def verify_bgp_timers_and_functionality(tgen, topo, input_dict):
(holddowntimer - keepalivetimer):
if nh_state != "Established":
errormsg = "BGP neighborship has not gone " \
- "down in {} sec for neighbor {}\n" \
- "show_bgp_json: \n {} ".format(
- timer, bgp_neighbor,
- show_bgp_json)
+ "down in {} sec for neighbor {}" \
+ .format(timer, bgp_neighbor)
return errormsg
else:
logger.info("BGP neighborship is intact in %s"
- " sec for neighbor %s \n "
- "show_bgp_json : \n %s",
- timer, bgp_neighbor,
- show_bgp_json)
+ " sec for neighbor %s",
+ timer, bgp_neighbor)
####################
# Shutting down peer interface and verifying that BGP
@@ -1229,7 +1204,7 @@ def verify_bgp_timers_and_functionality(tgen, topo, input_dict):
sleep(keepalivetimer)
sleep(2)
show_bgp_json = \
- rnode.vtysh_cmd("show bgp summary json",
+ run_frr_cmd(rnode, "show bgp summary json",
isjson=True)
if addr_type == "ipv4":
@@ -1242,22 +1217,19 @@ def verify_bgp_timers_and_functionality(tgen, topo, input_dict):
if timer == holddowntimer:
if nh_state == "Established":
errormsg = "BGP neighborship has not gone " \
- "down in {} sec for neighbor {}\n" \
- "show_bgp_json: \n {} ".format(
- timer, bgp_neighbor,
- show_bgp_json)
+ "down in {} sec for neighbor {}" \
+ .format(timer, bgp_neighbor)
return errormsg
else:
logger.info("BGP neighborship has gone down in"
- " %s sec for neighbor %s \n"
- "show_bgp_json : \n %s",
- timer, bgp_neighbor,
- show_bgp_json)
+ " %s sec for neighbor %s",
+ timer, bgp_neighbor)
- logger.info("Exiting lib API: verify_bgp_timers_and_functionality()")
+ logger.debug("Exiting lib API: verify_bgp_timers_and_functionality()")
return True
+@retry(attempts=3, wait=2, return_is_str=True)
def verify_best_path_as_per_bgp_attribute(tgen, addr_type, router, input_dict,
attribute):
"""
@@ -1319,7 +1291,7 @@ def verify_best_path_as_per_bgp_attribute(tgen, addr_type, router, input_dict,
sleep(2)
logger.info("Verifying router %s RIB for best path:", router)
- sh_ip_bgp_json = rnode.vtysh_cmd(command, isjson=True)
+ sh_ip_bgp_json = run_frr_cmd(rnode, command, isjson=True)
for route_val in input_dict.values():
net_data = route_val["bgp"]["address_family"]["ipv4"]["unicast"]
@@ -1380,7 +1352,7 @@ def verify_best_path_as_per_bgp_attribute(tgen, addr_type, router, input_dict,
else:
command = "show ipv6 route json"
- rib_routes_json = rnode.vtysh_cmd(command, isjson=True)
+ rib_routes_json = run_frr_cmd(rnode, command, isjson=True)
# Verifying output dictionary rib_routes_json is not empty
if not bool(rib_routes_json):
@@ -1417,6 +1389,7 @@ def verify_best_path_as_per_bgp_attribute(tgen, addr_type, router, input_dict,
return True
+@retry(attempts=3, wait=2, return_is_str=True)
def verify_best_path_as_per_admin_distance(tgen, addr_type, router, input_dict,
attribute):
"""
@@ -1451,7 +1424,7 @@ def verify_best_path_as_per_admin_distance(tgen, addr_type, router, input_dict,
errormsg(str) or True
"""
- logger.info("Entering lib API: verify_best_path_as_per_admin_distance()")
+ logger.debug("Entering lib API: verify_best_path_as_per_admin_distance()")
router_list = tgen.routers()
if router not in router_list:
return False
@@ -1490,7 +1463,7 @@ def verify_best_path_as_per_admin_distance(tgen, addr_type, router, input_dict,
compare = "LOWEST"
# Show ip route
- rib_routes_json = rnode.vtysh_cmd(command, isjson=True)
+ rib_routes_json = run_frr_cmd(rnode, command, isjson=True)
# Verifying output dictionary rib_routes_json is not empty
if not bool(rib_routes_json):
diff --git a/tests/topotests/lib/common_config.py b/tests/topotests/lib/common_config.py
index 75880cfd28..f2d33f94ae 100644
--- a/tests/topotests/lib/common_config.py
+++ b/tests/topotests/lib/common_config.py
@@ -23,24 +23,32 @@ from datetime import datetime
from time import sleep
from subprocess import call
from subprocess import STDOUT as SUB_STDOUT
+from subprocess import PIPE as SUB_PIPE
+from subprocess import Popen
+from functools import wraps
+from re import search as re_search
+
import StringIO
import os
import ConfigParser
import traceback
import socket
import ipaddr
+import re
from lib import topotest
from functools import partial
from lib.topolog import logger, logger_config
from lib.topogen import TopoRouter
+from lib.topotest import interface_set_status
FRRCFG_FILE = "frr_json.conf"
FRRCFG_BKUP_FILE = "frr_json_initial.conf"
ERROR_LIST = ["Malformed", "Failure", "Unknown"]
+ROUTER_LIST = []
####
CD = os.path.dirname(os.path.realpath(__file__))
@@ -142,6 +150,35 @@ class InvalidCLIError(Exception):
pass
+def run_frr_cmd(rnode, cmd, isjson=False):
+ """
+ Execute frr show commands in priviledged mode
+
+ * `rnode`: router node on which commands needs to executed
+ * `cmd`: Command to be executed on frr
+ * `isjson`: If command is to get json data or not
+
+ :return str:
+ """
+
+ if cmd:
+ ret_data = rnode.vtysh_cmd(cmd, isjson=isjson)
+
+ if True:
+ if isjson:
+ logger.debug(ret_data)
+ print_data = rnode.vtysh_cmd(cmd.rstrip("json"), isjson=False)
+ else:
+ print_data = ret_data
+
+ logger.info('Output for command [ %s] on router %s:\n%s',
+ cmd.rstrip("json"), rnode.name, print_data)
+ return ret_data
+
+ else:
+ raise InvalidCLIError('No actual cmd passed')
+
+
def create_common_configuration(tgen, router, data, config_type=None,
build=False):
"""
@@ -186,6 +223,7 @@ def create_common_configuration(tgen, router, data, config_type=None,
frr_cfg_fd.write(config_map[config_type])
for line in data:
frr_cfg_fd.write("{} \n".format(str(line)))
+ frr_cfg_fd.write("\n")
except IOError as err:
logger.error("Unable to open FRR Config File. error(%s): %s" %
@@ -215,10 +253,13 @@ def reset_config_on_routers(tgen, routerName=None):
logger.debug("Entering API: reset_config_on_routers")
router_list = tgen.routers()
- for rname, router in router_list.iteritems():
+ for rname in ROUTER_LIST:
if routerName and routerName != rname:
continue
+ router = router_list[rname]
+ logger.info("Configuring router %s to initial test configuration",
+ rname)
cfg = router.run("vtysh -c 'show running'")
fname = "{}/{}/frr.sav".format(TMPDIR, rname)
dname = "{}/{}/delta.conf".format(TMPDIR, rname)
@@ -235,16 +276,35 @@ def reset_config_on_routers(tgen, routerName=None):
f.close()
- command = "/usr/lib/frr/frr-reload.py --input {}/{}/frr.sav" \
- " --test {}/{}/frr_json_initial.conf > {}". \
- format(TMPDIR, rname, TMPDIR, rname, dname)
- result = call(command, shell=True, stderr=SUB_STDOUT)
+ run_cfg_file = "{}/{}/frr.sav".format(TMPDIR, rname)
+ init_cfg_file = "{}/{}/frr_json_initial.conf".format(TMPDIR, rname)
+ command = "/usr/lib/frr/frr-reload.py --input {} --test {} > {}". \
+ format(run_cfg_file, init_cfg_file, dname)
+ result = call(command, shell=True, stderr=SUB_STDOUT,
+ stdout=SUB_PIPE)
# Assert if command fail
if result > 0:
- errormsg = ("Command:{} is failed due to non-zero exit"
- " code".format(command))
- return errormsg
+ logger.error("Delta file creation failed. Command executed %s",
+ command)
+ with open(run_cfg_file, 'r') as fd:
+ logger.info('Running configuration saved in %s is:\n%s',
+ run_cfg_file, fd.read())
+ with open(init_cfg_file, 'r') as fd:
+ logger.info('Test configuration saved in %s is:\n%s',
+ init_cfg_file, fd.read())
+
+ err_cmd = ['/usr/bin/vtysh', '-m', '-f', run_cfg_file]
+ result = Popen(err_cmd, stdout=SUB_PIPE, stderr=SUB_PIPE)
+ output = result.communicate()
+ for out_data in output:
+ temp_data = out_data.decode('utf-8').lower()
+ for out_err in ERROR_LIST:
+ if out_err.lower() in temp_data:
+ logger.error("Found errors while validating data in"
+ " %s", run_cfg_file)
+ raise InvalidCLIError(out_data)
+ raise InvalidCLIError("Unknown error in %s", output)
f = open(dname, "r")
delta = StringIO.StringIO()
@@ -264,7 +324,7 @@ def reset_config_on_routers(tgen, routerName=None):
delta.write("end\n")
output = router.vtysh_multicmd(delta.getvalue(),
pretty_output=False)
- logger.info("New configuration for router {}:".format(rname))
+
delta.close()
delta = StringIO.StringIO()
cfg = router.run("vtysh -c 'show running'")
@@ -276,6 +336,8 @@ def reset_config_on_routers(tgen, routerName=None):
# Router current configuration to log file or console if
# "show_router_config" is defined in "pytest.ini"
if show_router_config:
+ logger.info("Configuration on router {} after config reset:".
+ format(rname))
logger.info(delta.getvalue())
delta.close()
@@ -297,34 +359,39 @@ def load_config_to_router(tgen, routerName, save_bkup=False):
logger.debug("Entering API: load_config_to_router")
router_list = tgen.routers()
- for rname, router in router_list.iteritems():
- if rname == routerName:
- try:
- frr_cfg_file = "{}/{}/{}".format(TMPDIR, rname, FRRCFG_FILE)
- frr_cfg_bkup = "{}/{}/{}".format(TMPDIR, rname,
- FRRCFG_BKUP_FILE)
- with open(frr_cfg_file, "r") as cfg:
- data = cfg.read()
- if save_bkup:
- with open(frr_cfg_bkup, "w") as bkup:
- bkup.write(data)
-
- output = router.vtysh_multicmd(data, pretty_output=False)
- for out_err in ERROR_LIST:
- if out_err.lower() in output.lower():
- raise InvalidCLIError("%s" % output)
- except IOError as err:
- errormsg = ("Unable to open config File. error(%s):"
- " %s", (err.errno, err.strerror))
- return errormsg
+ for rname in ROUTER_LIST:
+ if routerName and routerName != rname:
+ continue
- logger.info("New configuration for router {}:".format(rname))
- new_config = router.run("vtysh -c 'show running'")
+ router = router_list[rname]
+ try:
+ frr_cfg_file = "{}/{}/{}".format(TMPDIR, rname, FRRCFG_FILE)
+ frr_cfg_bkup = "{}/{}/{}".format(TMPDIR, rname,
+ FRRCFG_BKUP_FILE)
+ with open(frr_cfg_file, "r+") as cfg:
+ data = cfg.read()
+ logger.info("Applying following configuration on router"
+ " {}:\n{}".format(rname, data))
+ if save_bkup:
+ with open(frr_cfg_bkup, "w") as bkup:
+ bkup.write(data)
+
+ output = router.vtysh_multicmd(data, pretty_output=False)
+ for out_err in ERROR_LIST:
+ if out_err.lower() in output.lower():
+ raise InvalidCLIError("%s" % output)
+
+ cfg.truncate(0)
+ except IOError as err:
+ errormsg = ("Unable to open config File. error(%s):"
+ " %s", (err.errno, err.strerror))
+ return errormsg
- # Router current configuration to log file or console if
- # "show_router_config" is defined in "pytest.ini"
- if show_router_config:
- logger.info(new_config)
+ # Router current configuration to log file or console if
+ # "show_router_config" is defined in "pytest.ini"
+ if show_router_config:
+ new_config = router.run("vtysh -c 'show running'")
+ logger.info(new_config)
logger.debug("Exting API: load_config_to_router")
return True
@@ -337,21 +404,25 @@ def start_topology(tgen):
* `tgen` : topogen object
"""
- global TMPDIR
+ global TMPDIR, ROUTER_LIST
# Starting topology
tgen.start_topology()
# Starting deamons
+
router_list = tgen.routers()
+ ROUTER_LIST = sorted(router_list.keys(),
+ key=lambda x: int(re_search('\d+', x).group(0)))
TMPDIR = os.path.join(LOGDIR, tgen.modname)
- for rname, router in router_list.iteritems():
+ router_list = tgen.routers()
+ for rname in ROUTER_LIST:
+ router = router_list[rname]
try:
os.chdir(TMPDIR)
- # Creating rouer named dir and empty zebra.conf bgpd.conf files
+ # Creating router named dir and empty zebra.conf bgpd.conf files
# inside the current directory
-
if os.path.isdir('{}'.format(rname)):
os.system("rm -rf {}".format(rname))
os.mkdir('{}'.format(rname))
@@ -371,13 +442,11 @@ def start_topology(tgen):
router.load_config(
TopoRouter.RD_ZEBRA,
'{}/{}/zebra.conf'.format(TMPDIR, rname)
- # os.path.join(tmpdir, '{}/zebra.conf'.format(rname))
)
# Loading empty bgpd.conf file to router, to start the bgp deamon
router.load_config(
TopoRouter.RD_BGP,
'{}/{}/bgpd.conf'.format(TMPDIR, rname)
- # os.path.join(tmpdir, '{}/bgpd.conf'.format(rname))
)
# Starting routers
@@ -446,27 +515,31 @@ def validate_ip_address(ip_address):
" address" % ip_address)
-def check_address_types(addr_type):
+def check_address_types(addr_type=None):
"""
Checks environment variable set and compares with the current address type
"""
- global ADDRESS_TYPES
- if ADDRESS_TYPES is None:
- ADDRESS_TYPES = "dual"
-
- if ADDRESS_TYPES == "dual":
- ADDRESS_TYPES = ["ipv4", "ipv6"]
- elif ADDRESS_TYPES == "ipv4":
- ADDRESS_TYPES = ["ipv4"]
- elif ADDRESS_TYPES == "ipv6":
- ADDRESS_TYPES = ["ipv6"]
-
- if addr_type not in ADDRESS_TYPES:
+
+ addr_types_env = os.environ.get("ADDRESS_TYPES")
+ if not addr_types_env:
+ addr_types_env = "dual"
+
+ if addr_types_env == "dual":
+ addr_types = ["ipv4", "ipv6"]
+ elif addr_types_env == "ipv4":
+ addr_types = ["ipv4"]
+ elif addr_types_env == "ipv6":
+ addr_types = ["ipv6"]
+
+ if addr_type is None:
+ return addr_types
+
+ if addr_type not in addr_types:
logger.error("{} not in supported/configured address types {}".
- format(addr_type, ADDRESS_TYPES))
+ format(addr_type, addr_types))
return False
- return ADDRESS_TYPES
+ return True
def generate_ips(network, no_of_ips):
@@ -548,7 +621,7 @@ def write_test_header(tc_name):
""" Display message at beginning of test case"""
count = 20
logger.info("*"*(len(tc_name)+count))
- logger.info("START -> Testcase : %s", tc_name)
+ logger.info("START -> Testcase : %s" % tc_name)
logger.info("*"*(len(tc_name)+count))
@@ -556,10 +629,169 @@ def write_test_footer(tc_name):
""" Display message at end of test case"""
count = 21
logger.info("="*(len(tc_name)+count))
- logger.info("PASSED -> Testcase : %s", tc_name)
+ logger.info("Testcase : %s -> PASSED", tc_name)
logger.info("="*(len(tc_name)+count))
+def interface_status(tgen, topo, input_dict):
+ """
+ Delete ip route maps from device
+
+ * `tgen` : Topogen object
+ * `topo` : json file data
+ * `input_dict` : for which router, route map has to be deleted
+
+ Usage
+ -----
+ input_dict = {
+ "r3": {
+ "interface_list": ['eth1-r1-r2', 'eth2-r1-r3'],
+ "status": "down"
+ }
+ }
+ Returns
+ -------
+ errormsg(str) or True
+ """
+ logger.debug("Entering lib API: interface_status()")
+
+ try:
+ global frr_cfg
+ for router in input_dict.keys():
+
+ interface_list = input_dict[router]['interface_list']
+ status = input_dict[router].setdefault('status', 'up')
+ for intf in interface_list:
+ rnode = tgen.routers()[router]
+ interface_set_status(rnode, intf, status)
+
+ # Load config to router
+ load_config_to_router(tgen, router)
+
+ except Exception as e:
+ # handle any exception
+ logger.error("Error %s occured. Arguments %s.", e.message, e.args)
+
+ # Traceback
+ errormsg = traceback.format_exc()
+ logger.error(errormsg)
+ return errormsg
+
+ logger.debug("Exiting lib API: interface_status()")
+ return True
+
+
+def retry(attempts=3, wait=2, return_is_str=True, initial_wait=0):
+ """
+ Retries function execution, if return is an errormsg or exception
+
+ * `attempts`: Number of attempts to make
+ * `wait`: Number of seconds to wait between each attempt
+ * `return_is_str`: Return val is an errormsg in case of failure
+ * `initial_wait`: Sleeps for this much seconds before executing function
+
+ """
+
+ def _retry(func):
+
+ @wraps(func)
+ def func_retry(*args, **kwargs):
+ _wait = kwargs.pop('wait', wait)
+ _attempts = kwargs.pop('attempts', attempts)
+ _attempts = int(_attempts)
+ if _attempts < 0:
+ raise ValueError("attempts must be 0 or greater")
+
+ if initial_wait > 0:
+ logger.info("Waiting for [%s]s as initial delay", initial_wait)
+ sleep(initial_wait)
+
+ _return_is_str = kwargs.pop('return_is_str', return_is_str)
+ for i in range(1, _attempts + 1):
+ try:
+ _expected = kwargs.setdefault('expected', True)
+ kwargs.pop('expected')
+ ret = func(*args, **kwargs)
+ logger.debug("Function returned %s" % ret)
+ if return_is_str and isinstance(ret, bool):
+ return ret
+ elif return_is_str and _expected is False:
+ return ret
+
+ if _attempts == i:
+ return ret
+ except Exception as err:
+ if _attempts == i:
+ logger.info("Max number of attempts (%r) reached",
+ _attempts)
+ raise
+ else:
+ logger.info("Function returned %s", err)
+ if i < _attempts:
+ logger.info("Retry [#%r] after sleeping for %ss"
+ % (i, _wait))
+ sleep(_wait)
+ func_retry._original = func
+ return func_retry
+ return _retry
+
+
+def disable_v6_link_local(tgen, router, intf_name=None):
+ """
+ Disables ipv6 link local addresses for a particular interface or
+ all interfaces
+
+ * `tgen`: tgen onject
+ * `router` : router for which hightest interface should be
+ calculated
+ * `intf_name` : Interface name for which v6 link local needs to
+ be disabled
+ """
+
+ router_list = tgen.routers()
+ for rname, rnode in router_list.iteritems():
+ if rname != router:
+ continue
+
+ linklocal = []
+
+ ifaces = router_list[router].run('ip -6 address')
+
+ # Fix newlines (make them all the same)
+ ifaces = ('\n'.join(ifaces.splitlines()) + '\n').splitlines()
+
+ interface = None
+ ll_per_if_count = 0
+ for line in ifaces:
+ # Interface name
+ m = re.search('[0-9]+: ([^:]+)[@if0-9:]+ <', line)
+ if m:
+ interface = m.group(1).split("@")[0]
+ ll_per_if_count = 0
+
+ # Interface ip
+ m = re.search('inet6 (fe80::[0-9a-f]+:[0-9a-f]+:[0-9a-f]+'
+ ':[0-9a-f]+[/0-9]*) scope link', line)
+ if m:
+ local = m.group(1)
+ ll_per_if_count += 1
+ if ll_per_if_count > 1:
+ linklocal += [["%s-%s" % (interface, ll_per_if_count), local]]
+ else:
+ linklocal += [[interface, local]]
+
+ if len(linklocal[0]) > 1:
+ link_local_dict = {item[0]: item[1] for item in linklocal}
+
+ for lname, laddr in link_local_dict.items():
+
+ if intf_name is not None and lname != intf_name:
+ continue
+
+ cmd = "ip addr del {} dev {}".format(laddr, lname)
+ router_list[router].run(cmd)
+
+
#############################################
# These APIs, will used by testcase
#############################################
@@ -589,19 +821,22 @@ def create_interfaces_cfg(tgen, topo, build=False):
interface_name = destRouterLink
else:
interface_name = data["interface"]
- interface_data.append("interface {}\n".format(
+ if "ipv6" in data:
+ disable_v6_link_local(tgen, c_router, interface_name)
+ interface_data.append("interface {}".format(
str(interface_name)
))
if "ipv4" in data:
intf_addr = c_data["links"][destRouterLink]["ipv4"]
- interface_data.append("ip address {}\n".format(
+ interface_data.append("ip address {}".format(
intf_addr
))
if "ipv6" in data:
intf_addr = c_data["links"][destRouterLink]["ipv6"]
- interface_data.append("ipv6 address {}\n".format(
+ interface_data.append("ipv6 address {}".format(
intf_addr
))
+
result = create_common_configuration(tgen, c_router,
interface_data,
"interface_config",
@@ -662,7 +897,7 @@ def create_static_routes(tgen, input_dict, build=False):
for router in input_dict.keys():
if "static_routes" not in input_dict[router]:
errormsg = "static_routes not present in input_dict"
- logger.info(errormsg)
+ logger.debug(errormsg)
continue
static_routes_list = []
@@ -768,7 +1003,7 @@ def create_prefix_lists(tgen, input_dict, build=False):
for router in input_dict.keys():
if "prefix_lists" not in input_dict[router]:
errormsg = "prefix_lists not present in input_dict"
- logger.info(errormsg)
+ logger.debug(errormsg)
continue
config_data = []
@@ -922,7 +1157,7 @@ def create_route_maps(tgen, input_dict, build=False):
for router in input_dict.keys():
if "route_maps" not in input_dict[router]:
errormsg = "route_maps not present in input_dict"
- logger.info(errormsg)
+ logger.debug(errormsg)
continue
rmap_data = []
for rmap_name, rmap_value in \
@@ -1014,7 +1249,7 @@ def create_route_maps(tgen, input_dict, build=False):
# Weight
if weight:
- rmap_data.append("set weight {} \n".format(
+ rmap_data.append("set weight {}".format(
weight))
# Adding MATCH and SET sequence to RMAP if defined
@@ -1092,7 +1327,8 @@ def create_route_maps(tgen, input_dict, build=False):
#############################################
# Verification APIs
#############################################
-def _verify_rib(tgen, addr_type, dut, input_dict, next_hop=None, protocol=None):
+@retry(attempts=10, return_is_str=True, initial_wait=2)
+def verify_rib(tgen, addr_type, dut, input_dict, next_hop=None, protocol=None):
"""
Data will be read from input_dict or input JSON file, API will generate
same prefixes, which were redistributed by either create_static_routes() or
@@ -1140,7 +1376,7 @@ def _verify_rib(tgen, addr_type, dut, input_dict, next_hop=None, protocol=None):
errormsg(str) or True
"""
- logger.info("Entering lib API: verify_rib()")
+ logger.debug("Entering lib API: verify_rib()")
router_list = tgen.routers()
for routerInput in input_dict.keys():
@@ -1160,9 +1396,8 @@ def _verify_rib(tgen, addr_type, dut, input_dict, next_hop=None, protocol=None):
else:
command = "show ipv6 route json"
- sleep(10)
logger.info("Checking router %s RIB:", router)
- rib_routes_json = rnode.vtysh_cmd(command, isjson=True)
+ rib_routes_json = run_frr_cmd(rnode, command, isjson=True)
# Verifying output dictionary rib_routes_json is not empty
if bool(rib_routes_json) is False:
@@ -1181,7 +1416,7 @@ def _verify_rib(tgen, addr_type, dut, input_dict, next_hop=None, protocol=None):
if "no_of_ip" in static_route:
no_of_ip = static_route["no_of_ip"]
else:
- no_of_ip = 0
+ no_of_ip = 1
# Generating IPs for verification
ip_list = generate_ips(network, no_of_ip)
@@ -1199,9 +1434,9 @@ def _verify_rib(tgen, addr_type, dut, input_dict, next_hop=None, protocol=None):
found_hops = [rib_r["ip"] for rib_r in
rib_routes_json[st_rt][0][
"nexthops"]]
- for nh in next_hop:
+ for nh in found_hops:
nh_found = False
- if nh and nh in found_hops:
+ if nh and nh in next_hop:
nh_found = True
else:
errormsg = ("Nexthop {} is Missing for {}"
@@ -1257,30 +1492,10 @@ def _verify_rib(tgen, addr_type, dut, input_dict, next_hop=None, protocol=None):
logger.info("Verified routes in router %s RIB, found routes"
" are: %s", dut, found_routes)
- logger.info("Exiting lib API: verify_rib()")
+ logger.debug("Exiting lib API: verify_rib()")
return True
-def verify_rib(tgen, addr_type, dut, input_dict, next_hop=None, protocol=None, expected=True):
- """
- Wrapper function for `_verify_rib` that tries multiple time to get results.
-
- When the expected result is `False` we actually should expect for an string instead.
- """
-
- # Use currying to hide the parameters and create a test function.
- test_func = partial(_verify_rib, tgen, addr_type, dut, input_dict, next_hop, protocol)
-
- # Call the test function and expect it to return True, otherwise try it again.
- if expected is True:
- _, result = topotest.run_and_expect(test_func, True, count=20, wait=6)
- else:
- _, result = topotest.run_and_expect_type(test_func, str, count=20, wait=6)
-
- # Return as normal.
- return result
-
-
def verify_admin_distance_for_static_routes(tgen, input_dict):
"""
API to verify admin distance for static routes as defined in input_dict/
@@ -1311,7 +1526,7 @@ def verify_admin_distance_for_static_routes(tgen, input_dict):
errormsg(str) or True
"""
- logger.info("Entering lib API: verify_admin_distance_for_static_routes()")
+ logger.debug("Entering lib API: verify_admin_distance_for_static_routes()")
for router in input_dict.keys():
if router not in tgen.routers():
@@ -1326,7 +1541,7 @@ def verify_admin_distance_for_static_routes(tgen, input_dict):
command = "show ip route json"
else:
command = "show ipv6 route json"
- show_ip_route_json = rnode.vtysh_cmd(command, isjson=True)
+ show_ip_route_json = run_frr_cmd(rnode, command, isjson=True)
logger.info("Verifying admin distance for static route %s"
" under dut %s:", static_route, router)
@@ -1356,7 +1571,7 @@ def verify_admin_distance_for_static_routes(tgen, input_dict):
format(network, router))
return errormsg
- logger.info("Exiting lib API: verify_admin_distance_for_static_routes()")
+ logger.debug("Exiting lib API: verify_admin_distance_for_static_routes()")
return True
@@ -1384,7 +1599,7 @@ def verify_prefix_lists(tgen, input_dict):
errormsg(str) or True
"""
- logger.info("Entering lib API: verify_prefix_lists()")
+ logger.debug("Entering lib API: verify_prefix_lists()")
for router in input_dict.keys():
if router not in tgen.routers():
@@ -1393,7 +1608,7 @@ def verify_prefix_lists(tgen, input_dict):
rnode = tgen.routers()[router]
# Show ip prefix list
- show_prefix_list = rnode.vtysh_cmd("show ip prefix-list")
+ show_prefix_list = run_frr_cmd(rnode, "show ip prefix-list")
# Verify Prefix list is deleted
prefix_lists_addr = input_dict[router]["prefix_lists"]
@@ -1403,12 +1618,12 @@ def verify_prefix_lists(tgen, input_dict):
for prefix_list in prefix_lists_addr[addr_type].keys():
if prefix_list in show_prefix_list:
- errormsg = ("Prefix list {} is not deleted from router"
+ errormsg = ("Prefix list {} is/are present in the router"
" {}".format(prefix_list, router))
return errormsg
- logger.info("Prefix list %s is/are deleted successfully"
+ logger.info("Prefix list %s is/are not present in the router"
" from router %s", prefix_list, router)
- logger.info("Exiting lib API: verify_prefix_lissts()")
+ logger.debug("Exiting lib API: verify_prefix_lissts()")
return True
diff --git a/tests/topotests/lib/topojson.py b/tests/topotests/lib/topojson.py
index 4130451d2e..7a00fe4c50 100644
--- a/tests/topotests/lib/topojson.py
+++ b/tests/topotests/lib/topojson.py
@@ -20,6 +20,7 @@
from collections import OrderedDict
from json import dumps as json_dumps
+from re import search as re_search
import ipaddr
import pytest
@@ -38,6 +39,9 @@ from lib.common_config import (
from lib.bgp import create_router_bgp
+ROUTER_LIST = []
+
+
def build_topo_from_json(tgen, topo):
"""
Reads configuration from JSON file. Adds routers, creates interface
@@ -48,13 +52,15 @@ def build_topo_from_json(tgen, topo):
* `topo`: json file data
"""
- listRouters = []
- for routerN in sorted(topo['routers'].iteritems()):
- logger.info('Topo: Add router {}'.format(routerN[0]))
- tgen.add_router(routerN[0])
- listRouters.append(routerN[0])
+ ROUTER_LIST = sorted(topo['routers'].keys(),
+ key=lambda x: int(re_search('\d+', x).group(0)))
+
+ listRouters = ROUTER_LIST[:]
+ for routerN in ROUTER_LIST:
+ logger.info('Topo: Add router {}'.format(routerN))
+ tgen.add_router(routerN)
+ listRouters.append(routerN)
- listRouters.sort()
if 'ipv4base' in topo:
ipv4Next = ipaddr.IPv4Address(topo['link_ip_start']['ipv4'])
ipv4Step = 2 ** (32 - topo['link_ip_start']['v4mask'])
@@ -78,7 +84,7 @@ def build_topo_from_json(tgen, topo):
elif 'link' in x:
return int(x.split('-link')[1])
else:
- return int(x.split('r')[1])
+ return int(re_search('\d+', x).group(0))
for destRouterLink, data in sorted(topo['routers'][curRouter]['links']. \
iteritems(),
key=lambda x: link_sort(x[0])):
@@ -179,12 +185,13 @@ def build_config_from_json(tgen, topo, save_bkup=True):
data = topo["routers"]
for func_type in func_dict.keys():
- logger.info('Building configuration for {}'.format(func_type))
+ logger.info('Checking for {} configuration in input data'.format(
+ func_type))
func_dict.get(func_type)(tgen, data, build=True)
for router in sorted(topo['routers'].keys()):
- logger.info('Configuring router {}...'.format(router))
+ logger.debug('Configuring router {}...'.format(router))
result = load_config_to_router(tgen, router, save_bkup)
if not result:
diff --git a/tests/topotests/pytest.ini b/tests/topotests/pytest.ini
index 7ea38491d8..ade5bfd501 100644
--- a/tests/topotests/pytest.ini
+++ b/tests/topotests/pytest.ini
@@ -1,6 +1,6 @@
# Skip pytests example directory
[pytest]
-norecursedirs = .git example-test lib docker
+norecursedirs = .git example-test example-topojson-test lib docker
[topogen]
# Default configuration values
@@ -15,7 +15,7 @@ norecursedirs = .git example-test lib docker
# Display router current configuration during test execution,
# by default configuration will not be shown
-show_router_config = True
+# show_router_config = True
# Default daemons binaries path.
#frrdir = /usr/lib/frr
diff --git a/tools/checkpatch.pl b/tools/checkpatch.pl
index 1b48d10a09..c0624d933e 100755
--- a/tools/checkpatch.pl
+++ b/tools/checkpatch.pl
@@ -3351,7 +3351,7 @@ sub process {
# if/while/etc brace do not go on next line, unless defining a do while loop,
# or if that brace on the next line is for something else
- if ($line =~ /(.*)\b((?:if|while|for|switch|(?:[a-z_]+|)for_each[a-z_]+)\s*\(|do\b|else\b)/ && $line !~ /^.\s*\#/) {
+ if ($line =~ /(.*)\b((?:if|while|for|switch|(?:[a-z_]+|)frr_(each|with)[a-z_]+)\s*\(|do\b|else\b)/ && $line !~ /^.\s*\#/) {
my $pre_ctx = "$1$2";
my ($level, @ctx) = ctx_statement_level($linenr, $realcnt, 0);
@@ -3397,7 +3397,7 @@ sub process {
}
# Check relative indent for conditionals and blocks.
- if ($line =~ /\b(?:(?:if|while|for|(?:[a-z_]+|)for_each[a-z_]+)\s*\(|(?:do|else)\b)/ && $line !~ /^.\s*#/ && $line !~ /\}\s*while\s*/) {
+ if ($line =~ /\b(?:(?:if|while|for|(?:[a-z_]+|)frr_(each|with)[a-z_]+)\s*\(|(?:do|else)\b)/ && $line !~ /^.\s*#/ && $line !~ /\}\s*while\s*/) {
($stat, $cond, $line_nr_next, $remain_next, $off_next) =
ctx_statement_block($linenr, $realcnt, 0)
if (!defined $stat);
@@ -5177,6 +5177,31 @@ sub process {
}
}
+ if (!defined $suppress_ifbraces{$linenr - 1} &&
+ $line =~ /\b(frr_with_)/) {
+ my ($level, $endln, @chunks) =
+ ctx_statement_full($linenr, $realcnt, $-[0]);
+
+ # Check the condition.
+ my ($cond, $block) = @{$chunks[0]};
+ #print "CHECKING<$linenr> cond<$cond> block<$block>\n";
+ if (defined $cond) {
+ substr($block, 0, length($cond), '');
+ }
+
+ if ($level == 0 && $block !~ /^\s*\{/) {
+ my $herectx = $here . "\n";
+ my $cnt = statement_rawlines($block);
+
+ for (my $n = 0; $n < $cnt; $n++) {
+ $herectx .= raw_line($linenr, $n) . "\n";
+ }
+
+ WARN("BRACES",
+ "braces {} are mandatory for frr_with_* blocks\n" . $herectx);
+ }
+ }
+
# check for single line unbalanced braces
if ($sline =~ /^.\s*\}\s*else\s*$/ ||
$sline =~ /^.\s*else\s*\{\s*$/) {
diff --git a/tools/coccinelle/frr_with_mutex.cocci b/tools/coccinelle/frr_with_mutex.cocci
new file mode 100644
index 0000000000..ec8b73917c
--- /dev/null
+++ b/tools/coccinelle/frr_with_mutex.cocci
@@ -0,0 +1,23 @@
+@@
+expression E;
+iterator name frr_with_mutex;
+@@
+
+- pthread_mutex_lock(E);
++ frr_with_mutex(E) {
+- {
+ ...
+- }
+- pthread_mutex_unlock(E);
++ }
+
+
+@@
+expression E;
+@@
+
+- pthread_mutex_lock(E);
++ frr_with_mutex(E) {
+ ...
+- pthread_mutex_unlock(E);
++ }
diff --git a/tools/coccinelle/zprivs.cocci b/tools/coccinelle/zprivs.cocci
index 76d13c3f0d..11628a7eae 100644
--- a/tools/coccinelle/zprivs.cocci
+++ b/tools/coccinelle/zprivs.cocci
@@ -2,12 +2,12 @@
identifier change;
identifier end;
expression E, f, g;
-iterator name frr_elevate_privs;
+iterator name frr_with_privs;
@@
- if (E.change(ZPRIVS_RAISE))
- f;
-+ frr_elevate_privs(&E) {
++ frr_with_privs(&E) {
<+...
- goto end;
+ break;
@@ -20,7 +20,7 @@ iterator name frr_elevate_privs;
@@
identifier change, errno, safe_strerror, exit;
expression E, f1, f2, f3, ret, fn;
-iterator name frr_elevate_privs;
+iterator name frr_with_privs;
@@
if (E.change(ZPRIVS_RAISE))
@@ -44,7 +44,7 @@ iterator name frr_elevate_privs;
@@
identifier change;
expression E, f1, f2, f3, ret;
-iterator name frr_elevate_privs;
+iterator name frr_with_privs;
@@
if (E.change(ZPRIVS_RAISE))
@@ -64,12 +64,12 @@ iterator name frr_elevate_privs;
@@
identifier change;
expression E, f, g;
-iterator name frr_elevate_privs;
+iterator name frr_with_privs;
@@
- if (E.change(ZPRIVS_RAISE))
- f;
-+ frr_elevate_privs(&E) {
++ frr_with_privs(&E) {
...
- if (E.change(ZPRIVS_LOWER))
- g;
diff --git a/vrrpd/vrrp.c b/vrrpd/vrrp.c
index 951ad3f58f..b4049b55eb 100644
--- a/vrrpd/vrrp.c
+++ b/vrrpd/vrrp.c
@@ -1065,8 +1065,7 @@ static int vrrp_socket(struct vrrp_router *r)
int ret;
bool failed = false;
- frr_elevate_privs(&vrrp_privs)
- {
+ frr_with_privs(&vrrp_privs) {
r->sock_rx = socket(r->family, SOCK_RAW, IPPROTO_VRRP);
r->sock_tx = socket(r->family, SOCK_RAW, IPPROTO_VRRP);
}
@@ -1102,8 +1101,7 @@ static int vrrp_socket(struct vrrp_router *r)
setsockopt_ipv4_multicast_loop(r->sock_tx, 0);
/* Bind Rx socket to exact interface */
- frr_elevate_privs(&vrrp_privs)
- {
+ frr_with_privs(&vrrp_privs) {
ret = setsockopt(r->sock_rx, SOL_SOCKET,
SO_BINDTODEVICE, r->vr->ifp->name,
strlen(r->vr->ifp->name));
@@ -1213,8 +1211,7 @@ static int vrrp_socket(struct vrrp_router *r)
setsockopt_ipv6_multicast_loop(r->sock_tx, 0);
/* Bind Rx socket to exact interface */
- frr_elevate_privs(&vrrp_privs)
- {
+ frr_with_privs(&vrrp_privs) {
ret = setsockopt(r->sock_rx, SOL_SOCKET,
SO_BINDTODEVICE, r->vr->ifp->name,
strlen(r->vr->ifp->name));
diff --git a/vrrpd/vrrp_arp.c b/vrrpd/vrrp_arp.c
index 78e153a082..750050e8c3 100644
--- a/vrrpd/vrrp_arp.c
+++ b/vrrpd/vrrp_arp.c
@@ -188,7 +188,7 @@ void vrrp_garp_init(void)
/* Create the socket descriptor */
/* FIXME: why ETH_P_RARP? */
errno = 0;
- frr_elevate_privs(&vrrp_privs) {
+ frr_with_privs(&vrrp_privs) {
garp_fd = socket(PF_PACKET, SOCK_RAW | SOCK_CLOEXEC,
htons(ETH_P_RARP));
}
diff --git a/vrrpd/vrrp_ndisc.c b/vrrpd/vrrp_ndisc.c
index 348958509a..dc546b09a2 100644
--- a/vrrpd/vrrp_ndisc.c
+++ b/vrrpd/vrrp_ndisc.c
@@ -214,8 +214,7 @@ int vrrp_ndisc_una_send_all(struct vrrp_router *r)
void vrrp_ndisc_init(void)
{
- frr_elevate_privs(&vrrp_privs)
- {
+ frr_with_privs(&vrrp_privs) {
ndisc_fd = socket(AF_PACKET, SOCK_RAW, htons(ETH_P_IPV6));
}
diff --git a/vtysh/vtysh.c b/vtysh/vtysh.c
index b053392bff..a762e9555c 100644
--- a/vtysh/vtysh.c
+++ b/vtysh/vtysh.c
@@ -1260,6 +1260,8 @@ static struct cmd_node bgp_vrf_policy_node = {BGP_VRF_POLICY_NODE,
static struct cmd_node bgp_vnc_l2_group_node = {
BGP_VNC_L2_GROUP_NODE, "%s(config-router-vnc-l2-group)# "};
+static struct cmd_node bmp_node = {BMP_NODE, "%s(config-bgp-bmp)# "};
+
static struct cmd_node ospf_node = {OSPF_NODE, "%s(config-router)# "};
static struct cmd_node eigrp_node = {EIGRP_NODE, "%s(config-router)# "};
@@ -1335,7 +1337,7 @@ DEFUNSH(VTYSH_REALLYALL, vtysh_end_all, vtysh_end_all_cmd, "end",
}
DEFUNSH(VTYSH_BGPD, router_bgp, router_bgp_cmd,
- "router bgp [(1-4294967295) [<view|vrf> WORD]]",
+ "router bgp [(1-4294967295)$instasn [<view|vrf> WORD]]",
ROUTER_STR BGP_STR AS_STR
"BGP view\nBGP VRF\n"
"View/VRF name\n")
@@ -1478,6 +1480,18 @@ DEFUNSH(VTYSH_BGPD,
return CMD_SUCCESS;
}
+DEFUNSH(VTYSH_BGPD,
+ bmp_targets,
+ bmp_targets_cmd,
+ "bmp targets BMPTARGETS",
+ "BGP Monitoring Protocol\n"
+ "Create BMP target group\n"
+ "Name of the BMP target group\n")
+{
+ vty->node = BMP_NODE;
+ return CMD_SUCCESS;
+}
+
DEFUNSH(VTYSH_BGPD, address_family_evpn, address_family_evpn_cmd,
"address-family <l2vpn evpn>",
"Enter Address Family command mode\n"
@@ -1585,10 +1599,11 @@ DEFUNSH(VTYSH_OSPFD, router_ospf, router_ospf_cmd,
return CMD_SUCCESS;
}
-DEFUNSH(VTYSH_EIGRPD, router_eigrp, router_eigrp_cmd, "router eigrp (1-65535)",
+DEFUNSH(VTYSH_EIGRPD, router_eigrp, router_eigrp_cmd, "router eigrp (1-65535) [vrf NAME]",
"Enable a routing process\n"
"Start EIGRP configuration\n"
- "AS number to use\n")
+ "AS number to use\n"
+ VRF_CMD_HELP_STR)
{
vty->node = EIGRP_NODE;
return CMD_SUCCESS;
@@ -1842,6 +1857,7 @@ static int vtysh_exit(struct vty *vty)
case BGP_VNC_DEFAULTS_NODE:
case BGP_VNC_NVE_GROUP_NODE:
case BGP_VNC_L2_GROUP_NODE:
+ case BMP_NODE:
vty->node = BGP_NODE;
break;
case BGP_EVPN_VNI_NODE:
@@ -1932,6 +1948,19 @@ DEFUNSH(VTYSH_BGPD, rpki_quit, rpki_quit_cmd, "quit",
return rpki_exit(self, vty, argc, argv);
}
+DEFUNSH(VTYSH_BGPD, bmp_exit, bmp_exit_cmd, "exit",
+ "Exit current mode and down to previous mode\n")
+{
+ vtysh_exit(vty);
+ return CMD_SUCCESS;
+}
+
+DEFUNSH(VTYSH_BGPD, bmp_quit, bmp_quit_cmd, "quit",
+ "Exit current mode and down to previous mode\n")
+{
+ return bmp_exit(self, vty, argc, argv);
+}
+
DEFUNSH(VTYSH_VRF, exit_vrf_config, exit_vrf_config_cmd, "exit-vrf",
"Exit from VRF configuration mode\n")
{
@@ -3259,15 +3288,66 @@ DEFUN (no_vtysh_output_file,
DEFUN(find,
find_cmd,
- "find COMMAND...",
- "Find CLI command containing text\n"
- "Text to search for\n")
+ "find REGEX",
+ "Find CLI command matching a regular expression\n"
+ "Search pattern (POSIX regex)\n")
{
- char *text = argv_concat(argv, argc, 1);
+ char *pattern = argv[1]->arg;
const struct cmd_node *node;
const struct cmd_element *cli;
vector clis;
+ regex_t exp = {};
+
+ int cr = regcomp(&exp, pattern, REG_NOSUB | REG_EXTENDED);
+
+ if (cr != 0) {
+ switch (cr) {
+ case REG_BADBR:
+ vty_out(vty, "%% Invalid \\{...\\} expression\n");
+ break;
+ case REG_BADRPT:
+ vty_out(vty, "%% Bad repetition operator\n");
+ break;
+ case REG_BADPAT:
+ vty_out(vty, "%% Regex syntax error\n");
+ break;
+ case REG_ECOLLATE:
+ vty_out(vty, "%% Invalid collating element\n");
+ break;
+ case REG_ECTYPE:
+ vty_out(vty, "%% Invalid character class name\n");
+ break;
+ case REG_EESCAPE:
+ vty_out(vty,
+ "%% Regex ended with escape character (\\)\n");
+ break;
+ case REG_ESUBREG:
+ vty_out(vty,
+ "%% Invalid number in \\digit construction\n");
+ break;
+ case REG_EBRACK:
+ vty_out(vty, "%% Unbalanced square brackets\n");
+ break;
+ case REG_EPAREN:
+ vty_out(vty, "%% Unbalanced parentheses\n");
+ break;
+ case REG_EBRACE:
+ vty_out(vty, "%% Unbalanced braces\n");
+ break;
+ case REG_ERANGE:
+ vty_out(vty,
+ "%% Invalid endpoint in range expression\n");
+ break;
+ case REG_ESPACE:
+ vty_out(vty, "%% Failed to compile (out of memory)\n");
+ break;
+ }
+
+ goto done;
+ }
+
+
for (unsigned int i = 0; i < vector_active(cmdvec); i++) {
node = vector_slot(cmdvec, i);
if (!node)
@@ -3275,14 +3355,15 @@ DEFUN(find,
clis = node->cmd_vector;
for (unsigned int j = 0; j < vector_active(clis); j++) {
cli = vector_slot(clis, j);
- if (strcasestr(cli->string, text))
+
+ if (regexec(&exp, cli->string, 0, NULL, 0) == 0)
vty_out(vty, " (%s) %s\n",
node_names[node->node], cli->string);
}
}
- XFREE(MTYPE_TMP, text);
-
+done:
+ regfree(&exp);
return CMD_SUCCESS;
}
@@ -3620,6 +3701,7 @@ void vtysh_init_vty(void)
install_node(&openfabric_node, NULL);
install_node(&vty_node, NULL);
install_node(&rpki_node, NULL);
+ install_node(&bmp_node, NULL);
#if HAVE_BFDD > 0
install_node(&bfd_node, NULL);
install_node(&bfd_peer_node, NULL);
@@ -3853,6 +3935,11 @@ void vtysh_init_vty(void)
install_element(BGP_FLOWSPECV4_NODE, &exit_address_family_cmd);
install_element(BGP_FLOWSPECV6_NODE, &exit_address_family_cmd);
+ install_element(BGP_NODE, &bmp_targets_cmd);
+ install_element(BMP_NODE, &bmp_exit_cmd);
+ install_element(BMP_NODE, &bmp_quit_cmd);
+ install_element(BMP_NODE, &vtysh_end_all_cmd);
+
install_element(CONFIG_NODE, &rpki_cmd);
install_element(RPKI_NODE, &rpki_exit_cmd);
install_element(RPKI_NODE, &rpki_quit_cmd);
diff --git a/zebra/connected.c b/zebra/connected.c
index ffc991861c..6b92945c63 100644
--- a/zebra/connected.c
+++ b/zebra/connected.c
@@ -235,7 +235,7 @@ void connected_up(struct interface *ifp, struct connected *ifc)
return;
break;
case AFI_IP6:
-#ifndef LINUX
+#ifndef GNU_LINUX
/* XXX: It is already done by rib_bogus_ipv6 within rib_add */
if (IN6_IS_ADDR_UNSPECIFIED(&p.u.prefix6))
return;
diff --git a/zebra/if_ioctl_solaris.c b/zebra/if_ioctl_solaris.c
index 8b539a9049..2a2504ebf8 100644
--- a/zebra/if_ioctl_solaris.c
+++ b/zebra/if_ioctl_solaris.c
@@ -60,7 +60,7 @@ static int interface_list_ioctl(int af)
size_t needed, lastneeded = 0;
char *buf = NULL;
- frr_elevate_privs(&zserv_privs) {
+ frr_with_privs(&zserv_privs) {
sock = socket(af, SOCK_DGRAM, 0);
}
@@ -72,7 +72,7 @@ static int interface_list_ioctl(int af)
}
calculate_lifc_len:
- frr_elevate_privs(&zserv_privs) {
+ frr_with_privs(&zserv_privs) {
lifn.lifn_family = af;
lifn.lifn_flags = LIFC_NOXMIT;
/* we want NOXMIT interfaces too */
@@ -107,7 +107,7 @@ calculate_lifc_len:
lifconf.lifc_len = needed;
lifconf.lifc_buf = buf;
- frr_elevate_privs(&zserv_privs) {
+ frr_with_privs(&zserv_privs) {
ret = ioctl(sock, SIOCGLIFCONF, &lifconf);
}
diff --git a/zebra/if_netlink.c b/zebra/if_netlink.c
index e157c2d70a..c71b95f753 100644
--- a/zebra/if_netlink.c
+++ b/zebra/if_netlink.c
@@ -385,7 +385,7 @@ static int get_iflink_speed(struct interface *interface)
ifdata.ifr_data = (caddr_t)&ecmd;
/* use ioctl to get IP address of an interface */
- frr_elevate_privs(&zserv_privs) {
+ frr_with_privs(&zserv_privs) {
sd = vrf_socket(PF_INET, SOCK_DGRAM, IPPROTO_IP,
interface->vrf_id,
NULL);
diff --git a/zebra/ioctl.c b/zebra/ioctl.c
index 8202e076af..b461a08881 100644
--- a/zebra/ioctl.c
+++ b/zebra/ioctl.c
@@ -57,7 +57,7 @@ int if_ioctl(unsigned long request, caddr_t buffer)
int ret;
int err = 0;
- frr_elevate_privs(&zserv_privs) {
+ frr_with_privs(&zserv_privs) {
sock = socket(AF_INET, SOCK_DGRAM, 0);
if (sock < 0) {
zlog_err("Cannot create UDP socket: %s",
@@ -83,7 +83,7 @@ int vrf_if_ioctl(unsigned long request, caddr_t buffer, vrf_id_t vrf_id)
int ret;
int err = 0;
- frr_elevate_privs(&zserv_privs) {
+ frr_with_privs(&zserv_privs) {
sock = vrf_socket(AF_INET, SOCK_DGRAM, 0, vrf_id, NULL);
if (sock < 0) {
zlog_err("Cannot create UDP socket: %s",
@@ -110,7 +110,7 @@ static int if_ioctl_ipv6(unsigned long request, caddr_t buffer)
int ret;
int err = 0;
- frr_elevate_privs(&zserv_privs) {
+ frr_with_privs(&zserv_privs) {
sock = socket(AF_INET6, SOCK_DGRAM, 0);
if (sock < 0) {
zlog_err("Cannot create IPv6 datagram socket: %s",
diff --git a/zebra/ioctl_solaris.c b/zebra/ioctl_solaris.c
index 1f96fa23ea..2c71d949f7 100644
--- a/zebra/ioctl_solaris.c
+++ b/zebra/ioctl_solaris.c
@@ -66,7 +66,7 @@ int if_ioctl(unsigned long request, caddr_t buffer)
int ret;
int err;
- frr_elevate_privs(&zserv_privs) {
+ frr_with_privs(&zserv_privs) {
sock = socket(AF_INET, SOCK_DGRAM, 0);
if (sock < 0) {
@@ -96,7 +96,7 @@ int if_ioctl_ipv6(unsigned long request, caddr_t buffer)
int ret;
int err;
- frr_elevate_privs(&zserv_privs) {
+ frr_with_privs(&zserv_privs) {
sock = socket(AF_INET6, SOCK_DGRAM, 0);
if (sock < 0) {
diff --git a/zebra/ipforward_proc.c b/zebra/ipforward_proc.c
index 8f44c377b3..709d2176aa 100644
--- a/zebra/ipforward_proc.c
+++ b/zebra/ipforward_proc.c
@@ -76,7 +76,7 @@ int ipforward_on(void)
{
FILE *fp;
- frr_elevate_privs(&zserv_privs) {
+ frr_with_privs(&zserv_privs) {
fp = fopen(proc_ipv4_forwarding, "w");
@@ -97,7 +97,7 @@ int ipforward_off(void)
{
FILE *fp;
- frr_elevate_privs(&zserv_privs) {
+ frr_with_privs(&zserv_privs) {
fp = fopen(proc_ipv4_forwarding, "w");
@@ -143,7 +143,7 @@ int ipforward_ipv6_on(void)
{
FILE *fp;
- frr_elevate_privs(&zserv_privs) {
+ frr_with_privs(&zserv_privs) {
fp = fopen(proc_ipv6_forwarding, "w");
@@ -165,7 +165,7 @@ int ipforward_ipv6_off(void)
{
FILE *fp;
- frr_elevate_privs(&zserv_privs) {
+ frr_with_privs(&zserv_privs) {
fp = fopen(proc_ipv6_forwarding, "w");
diff --git a/zebra/ipforward_solaris.c b/zebra/ipforward_solaris.c
index 1bb743059c..1a45328248 100644
--- a/zebra/ipforward_solaris.c
+++ b/zebra/ipforward_solaris.c
@@ -83,7 +83,7 @@ static int solaris_nd(const int cmd, const char *parameter, const int value)
strioctl.ic_len = ND_BUFFER_SIZE;
strioctl.ic_dp = nd_buf;
- frr_elevate_privs(&zserv_privs) {
+ frr_with_privs(&zserv_privs) {
if ((fd = open(device, O_RDWR)) < 0) {
flog_err_sys(EC_LIB_SYSTEM_CALL,
"failed to open device %s - %s", device,
diff --git a/zebra/ipforward_sysctl.c b/zebra/ipforward_sysctl.c
index cc9421c275..ac8f537075 100644
--- a/zebra/ipforward_sysctl.c
+++ b/zebra/ipforward_sysctl.c
@@ -56,7 +56,7 @@ int ipforward_on(void)
int ipforwarding = 1;
len = sizeof ipforwarding;
- frr_elevate_privs(&zserv_privs) {
+ frr_with_privs(&zserv_privs) {
if (sysctl(mib, MIB_SIZ, NULL, NULL, &ipforwarding, len) < 0) {
flog_err_sys(EC_LIB_SYSTEM_CALL,
"Can't set ipforwarding on");
@@ -72,7 +72,7 @@ int ipforward_off(void)
int ipforwarding = 0;
len = sizeof ipforwarding;
- frr_elevate_privs(&zserv_privs) {
+ frr_with_privs(&zserv_privs) {
if (sysctl(mib, MIB_SIZ, NULL, NULL, &ipforwarding, len) < 0) {
flog_err_sys(EC_LIB_SYSTEM_CALL,
"Can't set ipforwarding on");
@@ -97,7 +97,7 @@ int ipforward_ipv6(void)
int ip6forwarding = 0;
len = sizeof ip6forwarding;
- frr_elevate_privs(&zserv_privs) {
+ frr_with_privs(&zserv_privs) {
if (sysctl(mib_ipv6, MIB_SIZ, &ip6forwarding, &len, 0, 0) < 0) {
flog_err_sys(EC_LIB_SYSTEM_CALL,
"can't get ip6forwarding value");
@@ -113,7 +113,7 @@ int ipforward_ipv6_on(void)
int ip6forwarding = 1;
len = sizeof ip6forwarding;
- frr_elevate_privs(&zserv_privs) {
+ frr_with_privs(&zserv_privs) {
if (sysctl(mib_ipv6, MIB_SIZ, NULL, NULL, &ip6forwarding, len)
< 0) {
flog_err_sys(EC_LIB_SYSTEM_CALL,
@@ -130,7 +130,7 @@ int ipforward_ipv6_off(void)
int ip6forwarding = 0;
len = sizeof ip6forwarding;
- frr_elevate_privs(&zserv_privs) {
+ frr_with_privs(&zserv_privs) {
if (sysctl(mib_ipv6, MIB_SIZ, NULL, NULL, &ip6forwarding, len)
< 0) {
flog_err_sys(EC_LIB_SYSTEM_CALL,
diff --git a/zebra/irdp_main.c b/zebra/irdp_main.c
index 38d241eaa5..0de618625d 100644
--- a/zebra/irdp_main.c
+++ b/zebra/irdp_main.c
@@ -82,7 +82,7 @@ int irdp_sock_init(void)
int save_errno;
int sock;
- frr_elevate_privs(&zserv_privs) {
+ frr_with_privs(&zserv_privs) {
sock = socket(AF_INET, SOCK_RAW, IPPROTO_ICMP);
save_errno = errno;
diff --git a/zebra/kernel_netlink.c b/zebra/kernel_netlink.c
index 2c306434a3..f52b4746ae 100644
--- a/zebra/kernel_netlink.c
+++ b/zebra/kernel_netlink.c
@@ -183,7 +183,7 @@ static int netlink_recvbuf(struct nlsock *nl, uint32_t newsize)
}
/* Try force option (linux >= 2.6.14) and fall back to normal set */
- frr_elevate_privs(&zserv_privs) {
+ frr_with_privs(&zserv_privs) {
ret = setsockopt(nl->sock, SOL_SOCKET, SO_RCVBUFFORCE,
&nl_rcvbufsize,
sizeof(nl_rcvbufsize));
@@ -220,7 +220,7 @@ static int netlink_socket(struct nlsock *nl, unsigned long groups,
int sock;
int namelen;
- frr_elevate_privs(&zserv_privs) {
+ frr_with_privs(&zserv_privs) {
sock = ns_socket(AF_NETLINK, SOCK_RAW, NETLINK_ROUTE, ns_id);
if (sock < 0) {
zlog_err("Can't open %s socket: %s", nl->name,
@@ -352,7 +352,7 @@ static void netlink_write_incoming(const char *buf, const unsigned int size,
FILE *f;
snprintf(fname, MAXPATHLEN, "%s/%s_%u", frr_vtydir, "netlink", counter);
- frr_elevate_privs(&zserv_privs) {
+ frr_with_privs(&zserv_privs) {
f = fopen(fname, "w");
}
if (f) {
@@ -373,7 +373,7 @@ static long netlink_read_file(char *buf, const char *fname)
FILE *f;
long file_bytes = -1;
- frr_elevate_privs(&zserv_privs) {
+ frr_with_privs(&zserv_privs) {
f = fopen(fname, "r");
}
if (f) {
@@ -989,7 +989,7 @@ int netlink_talk_info(int (*filter)(struct nlmsghdr *, ns_id_t, int startup),
n->nlmsg_flags);
/* Send message to netlink interface. */
- frr_elevate_privs(&zserv_privs) {
+ frr_with_privs(&zserv_privs) {
status = sendmsg(nl->sock, &msg, 0);
save_errno = errno;
}
@@ -1056,7 +1056,7 @@ int netlink_request(struct nlsock *nl, struct nlmsghdr *n)
snl.nl_family = AF_NETLINK;
/* Raise capabilities and send message, then lower capabilities. */
- frr_elevate_privs(&zserv_privs) {
+ frr_with_privs(&zserv_privs) {
ret = sendto(nl->sock, (void *)n, n->nlmsg_len, 0,
(struct sockaddr *)&snl, sizeof snl);
}
diff --git a/zebra/kernel_socket.c b/zebra/kernel_socket.c
index 156ce50725..60fbbcc059 100644
--- a/zebra/kernel_socket.c
+++ b/zebra/kernel_socket.c
@@ -1426,7 +1426,7 @@ static int kernel_read(struct thread *thread)
/* Make routing socket. */
static void routing_socket(struct zebra_ns *zns)
{
- frr_elevate_privs(&zserv_privs) {
+ frr_with_privs(&zserv_privs) {
routing_sock = ns_socket(AF_ROUTE, SOCK_RAW, 0, zns->ns_id);
dplane_routing_sock =
diff --git a/zebra/rt.h b/zebra/rt.h
index 727d2d0c55..59b42fed18 100644
--- a/zebra/rt.h
+++ b/zebra/rt.h
@@ -57,6 +57,8 @@ enum zebra_dplane_result kernel_address_update_ctx(
enum zebra_dplane_result kernel_mac_update_ctx(struct zebra_dplane_ctx *ctx);
+enum zebra_dplane_result kernel_neigh_update_ctx(struct zebra_dplane_ctx *ctx);
+
extern int kernel_neigh_update(int cmd, int ifindex, uint32_t addr, char *lla,
int llalen, ns_id_t ns_id);
extern int kernel_interface_set_master(struct interface *master,
@@ -66,15 +68,6 @@ extern int mpls_kernel_init(void);
extern uint32_t kernel_get_speed(struct interface *ifp);
extern int kernel_get_ipmr_sg_stats(struct zebra_vrf *zvrf, void *mroute);
-extern int kernel_add_vtep(vni_t vni, struct interface *ifp,
- struct in_addr *vtep_ip);
-extern int kernel_del_vtep(vni_t vni, struct interface *ifp,
- struct in_addr *vtep_ip);
-extern int kernel_add_neigh(struct interface *ifp, struct ipaddr *ip,
- struct ethaddr *mac, uint8_t flags);
-extern int kernel_del_neigh(struct interface *ifp, struct ipaddr *ip);
-extern int kernel_upd_neigh(struct interface *ifp, struct ipaddr *ip,
- struct ethaddr *mac, uint8_t flags, uint16_t state);
/*
* Southbound Initialization routines to get initial starting
diff --git a/zebra/rt_netlink.c b/zebra/rt_netlink.c
index 3655763164..5edcf9bb8a 100644
--- a/zebra/rt_netlink.c
+++ b/zebra/rt_netlink.c
@@ -92,6 +92,41 @@ void rt_netlink_init(void)
inet_pton(AF_INET, ipv4_ll_buf, &ipv4_ll);
}
+/*
+ * Mapping from dataplane neighbor flags to netlink flags
+ */
+static uint8_t neigh_flags_to_netlink(uint8_t dplane_flags)
+{
+ uint8_t flags = 0;
+
+ if (dplane_flags & DPLANE_NTF_EXT_LEARNED)
+ flags |= NTF_EXT_LEARNED;
+ if (dplane_flags & DPLANE_NTF_ROUTER)
+ flags |= NTF_ROUTER;
+
+ return flags;
+}
+
+/*
+ * Mapping from dataplane neighbor state to netlink state
+ */
+static uint16_t neigh_state_to_netlink(uint16_t dplane_state)
+{
+ uint16_t state = 0;
+
+ if (dplane_state & DPLANE_NUD_REACHABLE)
+ state |= NUD_REACHABLE;
+ if (dplane_state & DPLANE_NUD_STALE)
+ state |= NUD_STALE;
+ if (dplane_state & DPLANE_NUD_NOARP)
+ state |= NUD_NOARP;
+ if (dplane_state & DPLANE_NUD_PROBE)
+ state |= NUD_PROBE;
+
+ return state;
+}
+
+
static inline int is_selfroute(int proto)
{
if ((proto == RTPROT_BGP) || (proto == RTPROT_OSPF)
@@ -1019,33 +1054,28 @@ static void _netlink_route_build_singlepath(const char *routedesc, int bytelen,
label_buf[0] = '\0';
assert(nexthop);
- for (const struct nexthop *nh = nexthop; nh; nh = nh->rparent) {
- char label_buf1[20];
+ char label_buf1[20];
- nh_label = nh->nh_label;
- if (!nh_label || !nh_label->num_labels)
- continue;
+ nh_label = nexthop->nh_label;
- for (int i = 0; i < nh_label->num_labels; i++) {
- if (nh_label->label[i] == MPLS_LABEL_IMPLICIT_NULL)
- continue;
+ for (int i = 0; nh_label && i < nh_label->num_labels; i++) {
+ if (nh_label->label[i] == MPLS_LABEL_IMPLICIT_NULL)
+ continue;
- if (IS_ZEBRA_DEBUG_KERNEL) {
- if (!num_labels)
- sprintf(label_buf, "label %u",
- nh_label->label[i]);
- else {
- sprintf(label_buf1, "/%u",
- nh_label->label[i]);
- strlcat(label_buf, label_buf1,
- sizeof(label_buf));
- }
+ if (IS_ZEBRA_DEBUG_KERNEL) {
+ if (!num_labels)
+ sprintf(label_buf, "label %u",
+ nh_label->label[i]);
+ else {
+ sprintf(label_buf1, "/%u", nh_label->label[i]);
+ strlcat(label_buf, label_buf1,
+ sizeof(label_buf));
}
-
- out_lse[num_labels] =
- mpls_lse_encode(nh_label->label[i], 0, 0, 0);
- num_labels++;
}
+
+ out_lse[num_labels] =
+ mpls_lse_encode(nh_label->label[i], 0, 0, 0);
+ num_labels++;
}
if (num_labels) {
@@ -1210,33 +1240,28 @@ static void _netlink_route_build_multipath(const char *routedesc, int bytelen,
label_buf[0] = '\0';
assert(nexthop);
- for (const struct nexthop *nh = nexthop; nh; nh = nh->rparent) {
- char label_buf1[20];
+ char label_buf1[20];
- nh_label = nh->nh_label;
- if (!nh_label || !nh_label->num_labels)
- continue;
+ nh_label = nexthop->nh_label;
- for (int i = 0; i < nh_label->num_labels; i++) {
- if (nh_label->label[i] == MPLS_LABEL_IMPLICIT_NULL)
- continue;
+ for (int i = 0; nh_label && i < nh_label->num_labels; i++) {
+ if (nh_label->label[i] == MPLS_LABEL_IMPLICIT_NULL)
+ continue;
- if (IS_ZEBRA_DEBUG_KERNEL) {
- if (!num_labels)
- sprintf(label_buf, "label %u",
- nh_label->label[i]);
- else {
- sprintf(label_buf1, "/%u",
- nh_label->label[i]);
- strlcat(label_buf, label_buf1,
- sizeof(label_buf));
- }
+ if (IS_ZEBRA_DEBUG_KERNEL) {
+ if (!num_labels)
+ sprintf(label_buf, "label %u",
+ nh_label->label[i]);
+ else {
+ sprintf(label_buf1, "/%u", nh_label->label[i]);
+ strlcat(label_buf, label_buf1,
+ sizeof(label_buf));
}
-
- out_lse[num_labels] =
- mpls_lse_encode(nh_label->label[i], 0, 0, 0);
- num_labels++;
}
+
+ out_lse[num_labels] =
+ mpls_lse_encode(nh_label->label[i], 0, 0, 0);
+ num_labels++;
}
if (num_labels) {
@@ -1902,19 +1927,17 @@ int kernel_neigh_update(int add, int ifindex, uint32_t addr, char *lla,
* Add remote VTEP to the flood list for this VxLAN interface (VNI). This
* is done by adding an FDB entry with a MAC of 00:00:00:00:00:00.
*/
-static int netlink_vxlan_flood_list_update(struct interface *ifp,
- struct in_addr *vtep_ip, int cmd)
+static int netlink_vxlan_flood_update_ctx(const struct zebra_dplane_ctx *ctx,
+ int cmd)
{
- struct zebra_ns *zns;
struct {
struct nlmsghdr n;
struct ndmsg ndm;
char buf[256];
} req;
uint8_t dst_mac[6] = {0x0, 0x0, 0x0, 0x0, 0x0, 0x0};
- struct zebra_vrf *zvrf = zebra_vrf_lookup_by_id(ifp->vrf_id);
+ const struct ipaddr *addr;
- zns = zvrf->zns;
memset(&req, 0, sizeof(req));
req.n.nlmsg_len = NLMSG_LENGTH(sizeof(struct ndmsg));
@@ -1928,39 +1951,14 @@ static int netlink_vxlan_flood_list_update(struct interface *ifp,
addattr_l(&req.n, sizeof(req), NDA_LLADDR, &dst_mac, 6);
- req.ndm.ndm_ifindex = ifp->ifindex;
- addattr_l(&req.n, sizeof(req), NDA_DST, &vtep_ip->s_addr, 4);
+ req.ndm.ndm_ifindex = dplane_ctx_get_ifindex(ctx);
- return netlink_talk(netlink_talk_filter, &req.n, &zns->netlink_cmd, zns,
- 0);
-}
+ addr = dplane_ctx_neigh_get_ipaddr(ctx);
-/*
- * Add remote VTEP for this VxLAN interface (VNI). In Linux, this involves
- * adding
- * a "flood" MAC FDB entry.
- */
-int kernel_add_vtep(vni_t vni, struct interface *ifp, struct in_addr *vtep_ip)
-{
- if (IS_ZEBRA_DEBUG_VXLAN)
- zlog_debug("Install %s into flood list for VNI %u intf %s(%u)",
- inet_ntoa(*vtep_ip), vni, ifp->name, ifp->ifindex);
+ addattr_l(&req.n, sizeof(req), NDA_DST, &(addr->ipaddr_v4), 4);
- return netlink_vxlan_flood_list_update(ifp, vtep_ip, RTM_NEWNEIGH);
-}
-
-/*
- * Remove remote VTEP for this VxLAN interface (VNI). In Linux, this involves
- * deleting the "flood" MAC FDB entry.
- */
-int kernel_del_vtep(vni_t vni, struct interface *ifp, struct in_addr *vtep_ip)
-{
- if (IS_ZEBRA_DEBUG_VXLAN)
- zlog_debug(
- "Uninstall %s from flood list for VNI %u intf %s(%u)",
- inet_ntoa(*vtep_ip), vni, ifp->name, ifp->ifindex);
-
- return netlink_vxlan_flood_list_update(ifp, vtep_ip, RTM_DELNEIGH);
+ return netlink_talk_info(netlink_talk_filter, &req.n,
+ dplane_ctx_get_ns(ctx), 0);
}
#ifndef NDA_RTA
@@ -2778,9 +2776,11 @@ int netlink_neigh_change(struct nlmsghdr *h, ns_id_t ns_id)
return 0;
}
-static int netlink_neigh_update2(struct interface *ifp, struct ipaddr *ip,
- struct ethaddr *mac, uint8_t flags,
- uint16_t state, int cmd)
+/*
+ * Utility neighbor-update function, using info from dplane context.
+ */
+static int netlink_neigh_update_ctx(const struct zebra_dplane_ctx *ctx,
+ int cmd)
{
struct {
struct nlmsghdr n;
@@ -2788,15 +2788,23 @@ static int netlink_neigh_update2(struct interface *ifp, struct ipaddr *ip,
char buf[256];
} req;
int ipa_len;
-
- struct zebra_ns *zns;
char buf[INET6_ADDRSTRLEN];
char buf2[ETHER_ADDR_STRLEN];
- struct zebra_vrf *zvrf = zebra_vrf_lookup_by_id(ifp->vrf_id);
+ const struct ipaddr *ip;
+ const struct ethaddr *mac;
+ uint8_t flags;
+ uint16_t state;
- zns = zvrf->zns;
memset(&req, 0, sizeof(req));
+ ip = dplane_ctx_neigh_get_ipaddr(ctx);
+ mac = dplane_ctx_neigh_get_mac(ctx);
+ if (is_zero_mac(mac))
+ mac = NULL;
+
+ flags = neigh_flags_to_netlink(dplane_ctx_neigh_get_flags(ctx));
+ state = neigh_state_to_netlink(dplane_ctx_neigh_get_state(ctx));
+
req.n.nlmsg_len = NLMSG_LENGTH(sizeof(struct ndmsg));
req.n.nlmsg_flags = NLM_F_REQUEST;
if (cmd == RTM_NEWNEIGH)
@@ -2804,7 +2812,7 @@ static int netlink_neigh_update2(struct interface *ifp, struct ipaddr *ip,
req.n.nlmsg_type = cmd; // RTM_NEWNEIGH or RTM_DELNEIGH
req.ndm.ndm_family = IS_IPADDR_V4(ip) ? AF_INET : AF_INET6;
req.ndm.ndm_state = state;
- req.ndm.ndm_ifindex = ifp->ifindex;
+ req.ndm.ndm_ifindex = dplane_ctx_get_ifindex(ctx);
req.ndm.ndm_type = RTN_UNICAST;
req.ndm.ndm_flags = flags;
@@ -2816,13 +2824,16 @@ static int netlink_neigh_update2(struct interface *ifp, struct ipaddr *ip,
if (IS_ZEBRA_DEBUG_KERNEL)
zlog_debug("Tx %s family %s IF %s(%u) Neigh %s MAC %s flags 0x%x state 0x%x",
nl_msg_type_to_str(cmd),
- nl_family_to_str(req.ndm.ndm_family), ifp->name,
- ifp->ifindex, ipaddr2str(ip, buf, sizeof(buf)),
+ nl_family_to_str(req.ndm.ndm_family),
+ dplane_ctx_get_ifname(ctx),
+ dplane_ctx_get_ifindex(ctx),
+ ipaddr2str(ip, buf, sizeof(buf)),
mac ? prefix_mac2str(mac, buf2, sizeof(buf2))
- : "null", flags, state);
+ : "null",
+ flags, state);
- return netlink_talk(netlink_talk_filter, &req.n, &zns->netlink_cmd, zns,
- 0);
+ return netlink_talk_info(netlink_talk_filter, &req.n,
+ dplane_ctx_get_ns(ctx), 0);
}
/*
@@ -2833,23 +2844,30 @@ enum zebra_dplane_result kernel_mac_update_ctx(struct zebra_dplane_ctx *ctx)
return netlink_macfdb_update_ctx(ctx);
}
-int kernel_add_neigh(struct interface *ifp, struct ipaddr *ip,
- struct ethaddr *mac, uint8_t flags)
+enum zebra_dplane_result kernel_neigh_update_ctx(struct zebra_dplane_ctx *ctx)
{
- return netlink_neigh_update2(ifp, ip, mac, flags,
- NUD_NOARP, RTM_NEWNEIGH);
-}
+ int ret = -1;
-int kernel_del_neigh(struct interface *ifp, struct ipaddr *ip)
-{
- return netlink_neigh_update2(ifp, ip, NULL, 0, 0, RTM_DELNEIGH);
-}
+ switch (dplane_ctx_get_op(ctx)) {
+ case DPLANE_OP_NEIGH_INSTALL:
+ case DPLANE_OP_NEIGH_UPDATE:
+ ret = netlink_neigh_update_ctx(ctx, RTM_NEWNEIGH);
+ break;
+ case DPLANE_OP_NEIGH_DELETE:
+ ret = netlink_neigh_update_ctx(ctx, RTM_DELNEIGH);
+ break;
+ case DPLANE_OP_VTEP_ADD:
+ ret = netlink_vxlan_flood_update_ctx(ctx, RTM_NEWNEIGH);
+ break;
+ case DPLANE_OP_VTEP_DELETE:
+ ret = netlink_vxlan_flood_update_ctx(ctx, RTM_DELNEIGH);
+ break;
+ default:
+ break;
+ }
-int kernel_upd_neigh(struct interface *ifp, struct ipaddr *ip,
- struct ethaddr *mac, uint8_t flags, uint16_t state)
-{
- return netlink_neigh_update2(ifp, ip, mac, flags,
- state, RTM_NEWNEIGH);
+ return (ret == 0 ?
+ ZEBRA_DPLANE_REQUEST_SUCCESS : ZEBRA_DPLANE_REQUEST_FAILURE);
}
/*
diff --git a/zebra/rt_socket.c b/zebra/rt_socket.c
index 7e9a42a617..dc0f29bdbc 100644
--- a/zebra/rt_socket.c
+++ b/zebra/rt_socket.c
@@ -314,7 +314,7 @@ enum zebra_dplane_result kernel_route_update(struct zebra_dplane_ctx *ctx)
type = dplane_ctx_get_type(ctx);
old_type = dplane_ctx_get_old_type(ctx);
- frr_elevate_privs(&zserv_privs) {
+ frr_with_privs(&zserv_privs) {
if (dplane_ctx_get_op(ctx) == DPLANE_OP_ROUTE_DELETE) {
if (!RSYSTEM_ROUTE(type))
@@ -371,17 +371,13 @@ int kernel_neigh_update(int add, int ifindex, uint32_t addr, char *lla,
return 0;
}
-extern int kernel_get_ipmr_sg_stats(struct zebra_vrf *zvrf, void *mroute)
-{
- return 0;
-}
-
-int kernel_add_vtep(vni_t vni, struct interface *ifp, struct in_addr *vtep_ip)
+/* NYI on routing-socket platforms, but we've always returned 'success'... */
+enum zebra_dplane_result kernel_neigh_update_ctx(struct zebra_dplane_ctx *ctx)
{
- return 0;
+ return ZEBRA_DPLANE_REQUEST_SUCCESS;
}
-int kernel_del_vtep(vni_t vni, struct interface *ifp, struct in_addr *vtep_ip)
+extern int kernel_get_ipmr_sg_stats(struct zebra_vrf *zvrf, void *mroute)
{
return 0;
}
@@ -394,17 +390,6 @@ enum zebra_dplane_result kernel_mac_update_ctx(struct zebra_dplane_ctx *ctx)
return ZEBRA_DPLANE_REQUEST_SUCCESS;
}
-int kernel_add_neigh(struct interface *ifp, struct ipaddr *ip,
- struct ethaddr *mac, uint8_t flags)
-{
- return 0;
-}
-
-int kernel_del_neigh(struct interface *ifp, struct ipaddr *ip)
-{
- return 0;
-}
-
extern int kernel_interface_set_master(struct interface *master,
struct interface *slave)
{
diff --git a/zebra/rtadv.c b/zebra/rtadv.c
index 5841c44b03..b084fb99ca 100644
--- a/zebra/rtadv.c
+++ b/zebra/rtadv.c
@@ -760,7 +760,7 @@ static int rtadv_make_socket(ns_id_t ns_id)
int ret = 0;
struct icmp6_filter filter;
- frr_elevate_privs(&zserv_privs) {
+ frr_with_privs(&zserv_privs) {
sock = ns_socket(AF_INET6, SOCK_RAW, IPPROTO_ICMPV6, ns_id);
diff --git a/zebra/zapi_msg.c b/zebra/zapi_msg.c
index b6a8ee950c..fa6a2f62ec 100644
--- a/zebra/zapi_msg.c
+++ b/zebra/zapi_msg.c
@@ -2507,7 +2507,7 @@ static void zserv_write_incoming(struct stream *orig, uint16_t command)
snprintf(fname, MAXPATHLEN, "%s/%u", frr_vtydir, command);
- frr_elevate_privs(&zserv_privs) {
+ frr_with_privs(&zserv_privs) {
fd = open(fname, O_CREAT | O_WRONLY | O_EXCL, 0644);
}
stream_flush(copy, fd);
diff --git a/zebra/zebra_dplane.c b/zebra/zebra_dplane.c
index 512a9b4021..2bf541617c 100644
--- a/zebra/zebra_dplane.c
+++ b/zebra/zebra_dplane.c
@@ -148,7 +148,7 @@ struct dplane_intf_info {
};
/*
- * MAC address info for the dataplane.
+ * EVPN MAC address info for the dataplane.
*/
struct dplane_mac_info {
vlanid_t vid;
@@ -159,6 +159,16 @@ struct dplane_mac_info {
};
/*
+ * EVPN neighbor info for the dataplane
+ */
+struct dplane_neigh_info {
+ struct ipaddr ip_addr;
+ struct ethaddr mac;
+ uint32_t flags;
+ uint16_t state;
+};
+
+/*
* The context block used to exchange info about route updates across
* the boundary between the zebra main context (and pthread) and the
* dataplane layer (and pthread).
@@ -204,6 +214,7 @@ struct zebra_dplane_ctx {
struct dplane_pw_info pw;
struct dplane_intf_info intf;
struct dplane_mac_info macinfo;
+ struct dplane_neigh_info neigh;
} u;
/* Namespace info, used especially for netlink kernel communication */
@@ -321,6 +332,9 @@ static struct zebra_dplane_globals {
_Atomic uint32_t dg_macs_in;
_Atomic uint32_t dg_mac_errors;
+ _Atomic uint32_t dg_neighs_in;
+ _Atomic uint32_t dg_neigh_errors;
+
_Atomic uint32_t dg_update_yields;
/* Dataplane pthread */
@@ -365,6 +379,12 @@ static enum zebra_dplane_result mac_update_internal(
enum dplane_op_e op, const struct interface *ifp,
vlanid_t vid, const struct ethaddr *mac,
struct in_addr vtep_ip, bool sticky);
+static enum zebra_dplane_result neigh_update_internal(
+ enum dplane_op_e op,
+ const struct interface *ifp,
+ const struct ethaddr *mac,
+ const struct ipaddr *ip,
+ uint32_t flags, uint16_t state);
/*
* Public APIs
@@ -485,6 +505,11 @@ static void dplane_ctx_free(struct zebra_dplane_ctx **pctx)
case DPLANE_OP_MAC_INSTALL:
case DPLANE_OP_MAC_DELETE:
+ case DPLANE_OP_NEIGH_INSTALL:
+ case DPLANE_OP_NEIGH_UPDATE:
+ case DPLANE_OP_NEIGH_DELETE:
+ case DPLANE_OP_VTEP_ADD:
+ case DPLANE_OP_VTEP_DELETE:
case DPLANE_OP_NONE:
break;
}
@@ -651,6 +676,22 @@ const char *dplane_op2str(enum dplane_op_e op)
case DPLANE_OP_MAC_DELETE:
ret = "MAC_DELETE";
break;
+
+ case DPLANE_OP_NEIGH_INSTALL:
+ ret = "NEIGH_INSTALL";
+ break;
+ case DPLANE_OP_NEIGH_UPDATE:
+ ret = "NEIGH_UPDATE";
+ break;
+ case DPLANE_OP_NEIGH_DELETE:
+ ret = "NEIGH_DELETE";
+ break;
+ case DPLANE_OP_VTEP_ADD:
+ ret = "VTEP_ADD";
+ break;
+ case DPLANE_OP_VTEP_DELETE:
+ ret = "VTEP_DELETE";
+ break;
}
return ret;
@@ -1231,6 +1272,33 @@ const struct in_addr *dplane_ctx_mac_get_vtep_ip(
return &(ctx->u.macinfo.vtep_ip);
}
+/* Accessors for neighbor information */
+const struct ipaddr *dplane_ctx_neigh_get_ipaddr(
+ const struct zebra_dplane_ctx *ctx)
+{
+ DPLANE_CTX_VALID(ctx);
+ return &(ctx->u.neigh.ip_addr);
+}
+
+const struct ethaddr *dplane_ctx_neigh_get_mac(
+ const struct zebra_dplane_ctx *ctx)
+{
+ DPLANE_CTX_VALID(ctx);
+ return &(ctx->u.neigh.mac);
+}
+
+uint32_t dplane_ctx_neigh_get_flags(const struct zebra_dplane_ctx *ctx)
+{
+ DPLANE_CTX_VALID(ctx);
+ return ctx->u.neigh.flags;
+}
+
+uint16_t dplane_ctx_neigh_get_state(const struct zebra_dplane_ctx *ctx)
+{
+ DPLANE_CTX_VALID(ctx);
+ return ctx->u.neigh.state;
+}
+
/*
* End of dplane context accessors
*/
@@ -2161,6 +2229,165 @@ mac_update_internal(enum dplane_op_e op,
}
/*
+ * Enqueue evpn neighbor add for the dataplane.
+ */
+enum zebra_dplane_result dplane_neigh_add(const struct interface *ifp,
+ const struct ipaddr *ip,
+ const struct ethaddr *mac,
+ uint32_t flags)
+{
+ enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
+
+ result = neigh_update_internal(DPLANE_OP_NEIGH_INSTALL,
+ ifp, mac, ip, flags, 0);
+
+ return result;
+}
+
+/*
+ * Enqueue evpn neighbor update for the dataplane.
+ */
+enum zebra_dplane_result dplane_neigh_update(const struct interface *ifp,
+ const struct ipaddr *ip,
+ const struct ethaddr *mac)
+{
+ enum zebra_dplane_result result;
+
+ result = neigh_update_internal(DPLANE_OP_NEIGH_UPDATE,
+ ifp, mac, ip, 0, DPLANE_NUD_PROBE);
+
+ return result;
+}
+
+/*
+ * Enqueue evpn neighbor delete for the dataplane.
+ */
+enum zebra_dplane_result dplane_neigh_delete(const struct interface *ifp,
+ const struct ipaddr *ip)
+{
+ enum zebra_dplane_result result;
+
+ result = neigh_update_internal(DPLANE_OP_NEIGH_DELETE,
+ ifp, NULL, ip, 0, 0);
+
+ return result;
+}
+
+/*
+ * Enqueue evpn VTEP add for the dataplane.
+ */
+enum zebra_dplane_result dplane_vtep_add(const struct interface *ifp,
+ const struct in_addr *ip,
+ vni_t vni)
+{
+ enum zebra_dplane_result result;
+ struct ethaddr mac = { {0, 0, 0, 0, 0, 0} };
+ struct ipaddr addr;
+
+ if (IS_ZEBRA_DEBUG_VXLAN)
+ zlog_debug("Install %s into flood list for VNI %u intf %s(%u)",
+ inet_ntoa(*ip), vni, ifp->name, ifp->ifindex);
+
+ SET_IPADDR_V4(&addr);
+ addr.ipaddr_v4 = *ip;
+
+ result = neigh_update_internal(DPLANE_OP_VTEP_ADD,
+ ifp, &mac, &addr, 0, 0);
+
+ return result;
+}
+
+/*
+ * Enqueue evpn VTEP add for the dataplane.
+ */
+enum zebra_dplane_result dplane_vtep_delete(const struct interface *ifp,
+ const struct in_addr *ip,
+ vni_t vni)
+{
+ enum zebra_dplane_result result;
+ struct ethaddr mac = { {0, 0, 0, 0, 0, 0} };
+ struct ipaddr addr;
+
+ if (IS_ZEBRA_DEBUG_VXLAN)
+ zlog_debug(
+ "Uninstall %s from flood list for VNI %u intf %s(%u)",
+ inet_ntoa(*ip), vni, ifp->name, ifp->ifindex);
+
+ SET_IPADDR_V4(&addr);
+ addr.ipaddr_v4 = *ip;
+
+ result = neigh_update_internal(DPLANE_OP_VTEP_DELETE,
+ ifp, &mac, &addr, 0, 0);
+
+ return result;
+}
+
+/*
+ * Common helper api for evpn neighbor updates
+ */
+static enum zebra_dplane_result
+neigh_update_internal(enum dplane_op_e op,
+ const struct interface *ifp,
+ const struct ethaddr *mac,
+ const struct ipaddr *ip,
+ uint32_t flags, uint16_t state)
+{
+ enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
+ int ret;
+ struct zebra_dplane_ctx *ctx = NULL;
+ struct zebra_ns *zns;
+
+ if (IS_ZEBRA_DEBUG_DPLANE_DETAIL) {
+ char buf1[ETHER_ADDR_STRLEN], buf2[PREFIX_STRLEN];
+
+ zlog_debug("init neigh ctx %s: ifp %s, mac %s, ip %s",
+ dplane_op2str(op),
+ prefix_mac2str(mac, buf1, sizeof(buf1)),
+ ifp->name,
+ ipaddr2str(ip, buf2, sizeof(buf2)));
+ }
+
+ ctx = dplane_ctx_alloc();
+
+ ctx->zd_op = op;
+ ctx->zd_status = ZEBRA_DPLANE_REQUEST_SUCCESS;
+ ctx->zd_vrf_id = ifp->vrf_id;
+
+ zns = zebra_ns_lookup(ifp->vrf_id);
+ dplane_ctx_ns_init(ctx, zns, false);
+
+ strlcpy(ctx->zd_ifname, ifp->name, sizeof(ctx->zd_ifname));
+ ctx->zd_ifindex = ifp->ifindex;
+
+ /* Init the neighbor-specific data area */
+ memset(&ctx->u.neigh, 0, sizeof(ctx->u.neigh));
+
+ ctx->u.neigh.ip_addr = *ip;
+ if (mac)
+ ctx->u.neigh.mac = *mac;
+ ctx->u.neigh.flags = flags;
+ ctx->u.neigh.state = state;
+
+ /* Enqueue for processing on the dplane pthread */
+ ret = dplane_update_enqueue(ctx);
+
+ /* Increment counter */
+ atomic_fetch_add_explicit(&zdplane_info.dg_neighs_in, 1,
+ memory_order_relaxed);
+
+ if (ret == AOK)
+ result = ZEBRA_DPLANE_REQUEST_QUEUED;
+ else {
+ /* Error counter */
+ atomic_fetch_add_explicit(&zdplane_info.dg_neigh_errors, 1,
+ memory_order_relaxed);
+ dplane_ctx_free(&ctx);
+ }
+
+ return result;
+}
+
+/*
* Handler for 'show dplane'
*/
int dplane_show_helper(struct vty *vty, bool detailed)
@@ -2223,6 +2450,13 @@ int dplane_show_helper(struct vty *vty, bool detailed)
vty_out(vty, "EVPN MAC updates: %"PRIu64"\n", incoming);
vty_out(vty, "EVPN MAC errors: %"PRIu64"\n", errs);
+ incoming = atomic_load_explicit(&zdplane_info.dg_neighs_in,
+ memory_order_relaxed);
+ errs = atomic_load_explicit(&zdplane_info.dg_neigh_errors,
+ memory_order_relaxed);
+ vty_out(vty, "EVPN neigh updates: %"PRIu64"\n", incoming);
+ vty_out(vty, "EVPN neigh errors: %"PRIu64"\n", errs);
+
return CMD_SUCCESS;
}
@@ -2616,7 +2850,7 @@ kernel_dplane_address_update(struct zebra_dplane_ctx *ctx)
}
/*
- * Handler for kernel-facing MAC address updates
+ * Handler for kernel-facing EVPN MAC address updates
*/
static enum zebra_dplane_result
kernel_dplane_mac_update(struct zebra_dplane_ctx *ctx)
@@ -2644,6 +2878,34 @@ kernel_dplane_mac_update(struct zebra_dplane_ctx *ctx)
}
/*
+ * Handler for kernel-facing EVPN neighbor updates
+ */
+static enum zebra_dplane_result
+kernel_dplane_neigh_update(struct zebra_dplane_ctx *ctx)
+{
+ enum zebra_dplane_result res;
+
+ if (IS_ZEBRA_DEBUG_DPLANE_DETAIL) {
+ char buf[PREFIX_STRLEN];
+
+ ipaddr2str(dplane_ctx_neigh_get_ipaddr(ctx), buf,
+ sizeof(buf));
+
+ zlog_debug("Dplane %s, ip %s, ifindex %u",
+ dplane_op2str(dplane_ctx_get_op(ctx)),
+ buf, dplane_ctx_get_ifindex(ctx));
+ }
+
+ res = kernel_neigh_update_ctx(ctx);
+
+ if (res != ZEBRA_DPLANE_REQUEST_SUCCESS)
+ atomic_fetch_add_explicit(&zdplane_info.dg_neigh_errors,
+ 1, memory_order_relaxed);
+
+ return res;
+}
+
+/*
* Kernel provider callback
*/
static int kernel_dplane_process_func(struct zebra_dplane_provider *prov)
@@ -2702,6 +2964,14 @@ static int kernel_dplane_process_func(struct zebra_dplane_provider *prov)
res = kernel_dplane_mac_update(ctx);
break;
+ case DPLANE_OP_NEIGH_INSTALL:
+ case DPLANE_OP_NEIGH_UPDATE:
+ case DPLANE_OP_NEIGH_DELETE:
+ case DPLANE_OP_VTEP_ADD:
+ case DPLANE_OP_VTEP_DELETE:
+ res = kernel_dplane_neigh_update(ctx);
+ break;
+
/* Ignore 'notifications' - no-op */
case DPLANE_OP_SYS_ROUTE_ADD:
case DPLANE_OP_SYS_ROUTE_DELETE:
diff --git a/zebra/zebra_dplane.h b/zebra/zebra_dplane.h
index 912fda45d3..31f0fc98b3 100644
--- a/zebra/zebra_dplane.h
+++ b/zebra/zebra_dplane.h
@@ -129,8 +129,33 @@ enum dplane_op_e {
/* MAC address update */
DPLANE_OP_MAC_INSTALL,
DPLANE_OP_MAC_DELETE,
+
+ /* EVPN neighbor updates */
+ DPLANE_OP_NEIGH_INSTALL,
+ DPLANE_OP_NEIGH_UPDATE,
+ DPLANE_OP_NEIGH_DELETE,
+
+ /* EVPN VTEP updates */
+ DPLANE_OP_VTEP_ADD,
+ DPLANE_OP_VTEP_DELETE,
};
+/*
+ * The vxlan/evpn neighbor management code needs some values to use
+ * when programming neighbor changes. Offer some platform-neutral values
+ * here for use within the dplane apis and plugins.
+ */
+
+/* Neighbor cache flags */
+#define DPLANE_NTF_EXT_LEARNED 0x01
+#define DPLANE_NTF_ROUTER 0x02
+
+/* Neighbor cache states */
+#define DPLANE_NUD_REACHABLE 0x01
+#define DPLANE_NUD_STALE 0x02
+#define DPLANE_NUD_NOARP 0x04
+#define DPLANE_NUD_PROBE 0x08
+
/* Enable system route notifications */
void dplane_enable_sys_route_notifs(void);
@@ -304,6 +329,14 @@ const struct ethaddr *dplane_ctx_mac_get_addr(
const struct in_addr *dplane_ctx_mac_get_vtep_ip(
const struct zebra_dplane_ctx *ctx);
+/* Accessors for neighbor information */
+const struct ipaddr *dplane_ctx_neigh_get_ipaddr(
+ const struct zebra_dplane_ctx *ctx);
+const struct ethaddr *dplane_ctx_neigh_get_mac(
+ const struct zebra_dplane_ctx *ctx);
+uint32_t dplane_ctx_neigh_get_flags(const struct zebra_dplane_ctx *ctx);
+uint16_t dplane_ctx_neigh_get_state(const struct zebra_dplane_ctx *ctx);
+
/* Namespace info - esp. for netlink communication */
const struct zebra_dplane_info *dplane_ctx_get_ns(
const struct zebra_dplane_ctx *ctx);
@@ -379,6 +412,30 @@ enum zebra_dplane_result dplane_mac_del(const struct interface *ifp,
const struct ethaddr *mac,
struct in_addr vtep_ip);
+/*
+ * Enqueue evpn neighbor updates for the dataplane.
+ */
+enum zebra_dplane_result dplane_neigh_add(const struct interface *ifp,
+ const struct ipaddr *ip,
+ const struct ethaddr *mac,
+ uint32_t flags);
+enum zebra_dplane_result dplane_neigh_update(const struct interface *ifp,
+ const struct ipaddr *ip,
+ const struct ethaddr *mac);
+enum zebra_dplane_result dplane_neigh_delete(const struct interface *ifp,
+ const struct ipaddr *ip);
+
+/*
+ * Enqueue evpn VTEP updates for the dataplane.
+ */
+enum zebra_dplane_result dplane_vtep_add(const struct interface *ifp,
+ const struct in_addr *ip,
+ vni_t vni);
+enum zebra_dplane_result dplane_vtep_delete(const struct interface *ifp,
+ const struct in_addr *ip,
+ vni_t vni);
+
+
/* Retrieve the limit on the number of pending, unprocessed updates. */
uint32_t dplane_get_in_queue_limit(void);
diff --git a/zebra/zebra_fpm.c b/zebra/zebra_fpm.c
index eaf43095bc..4144c0afe0 100644
--- a/zebra/zebra_fpm.c
+++ b/zebra/zebra_fpm.c
@@ -1927,6 +1927,9 @@ static inline void zfpm_init_message_format(const char *format)
"FPM protobuf message format is not available");
return;
}
+ flog_warn(EC_ZEBRA_PROTOBUF_NOT_AVAILABLE,
+ "FPM protobuf message format is deprecated and scheduled to be removed. "
+ "Please convert to using netlink format or contact dev@lists.frrouting.org with your use case.");
zfpm_g->message_format = ZFPM_MSG_FORMAT_PROTOBUF;
return;
}
diff --git a/zebra/zebra_mpls_openbsd.c b/zebra/zebra_mpls_openbsd.c
index 9f3ea70c77..fcd476dc2c 100644
--- a/zebra/zebra_mpls_openbsd.c
+++ b/zebra/zebra_mpls_openbsd.c
@@ -119,7 +119,7 @@ static int kernel_send_rtmsg_v4(int action, mpls_label_t in_label,
hdr.rtm_mpls = MPLS_OP_SWAP;
}
- frr_elevate_privs(&zserv_privs) {
+ frr_with_privs(&zserv_privs) {
ret = writev(kr_state.fd, iov, iovcnt);
}
@@ -226,7 +226,7 @@ static int kernel_send_rtmsg_v6(int action, mpls_label_t in_label,
hdr.rtm_mpls = MPLS_OP_SWAP;
}
- frr_elevate_privs(&zserv_privs) {
+ frr_with_privs(&zserv_privs) {
ret = writev(kr_state.fd, iov, iovcnt);
}
diff --git a/zebra/zebra_netns_notify.c b/zebra/zebra_netns_notify.c
index 476638591b..d42cf3d60a 100644
--- a/zebra/zebra_netns_notify.c
+++ b/zebra/zebra_netns_notify.c
@@ -77,7 +77,7 @@ static void zebra_ns_notify_create_context_from_entry_name(const char *name)
if (netnspath == NULL)
return;
- frr_elevate_privs(&zserv_privs) {
+ frr_with_privs(&zserv_privs) {
ns_id = zebra_ns_id_get(netnspath);
}
if (ns_id == NS_UNKNOWN)
@@ -97,7 +97,7 @@ static void zebra_ns_notify_create_context_from_entry_name(const char *name)
ns_map_nsid_with_external(ns_id, false);
return;
}
- frr_elevate_privs(&zserv_privs) {
+ frr_with_privs(&zserv_privs) {
ret = vrf_netns_handler_create(NULL, vrf, netnspath,
ns_id_external, ns_id);
}
@@ -202,14 +202,14 @@ static int zebra_ns_ready_read(struct thread *t)
netnspath = zns_info->netnspath;
if (--zns_info->retries == 0)
stop_retry = 1;
- frr_elevate_privs(&zserv_privs) {
+ frr_with_privs(&zserv_privs) {
err = ns_switch_to_netns(netnspath);
}
if (err < 0)
return zebra_ns_continue_read(zns_info, stop_retry);
/* go back to default ns */
- frr_elevate_privs(&zserv_privs) {
+ frr_with_privs(&zserv_privs) {
err = ns_switchback_to_initial();
}
if (err < 0)
diff --git a/zebra/zebra_nhg.c b/zebra/zebra_nhg.c
index f4b86f3cfe..ee2956d3ea 100644
--- a/zebra/zebra_nhg.c
+++ b/zebra/zebra_nhg.c
@@ -25,6 +25,7 @@
#include "lib/nexthop.h"
#include "lib/nexthop_group_private.h"
#include "lib/routemap.h"
+#include "lib/mpls.h"
#include "zebra/connected.h"
#include "zebra/debug.h"
@@ -38,6 +39,10 @@ static void nexthop_set_resolved(afi_t afi, const struct nexthop *newhop,
struct nexthop *nexthop)
{
struct nexthop *resolved_hop;
+ uint8_t num_labels = 0;
+ mpls_label_t labels[MPLS_MAX_LABELS];
+ enum lsp_types_t label_type = ZEBRA_LSP_NONE;
+ int i = 0;
resolved_hop = nexthop_new();
SET_FLAG(resolved_hop->flags, NEXTHOP_FLAG_ACTIVE);
@@ -94,11 +99,24 @@ static void nexthop_set_resolved(afi_t afi, const struct nexthop *newhop,
if (newhop->flags & NEXTHOP_FLAG_ONLINK)
resolved_hop->flags |= NEXTHOP_FLAG_ONLINK;
- /* Copy labels of the resolved route */
- if (newhop->nh_label)
- nexthop_add_labels(resolved_hop, newhop->nh_label_type,
- newhop->nh_label->num_labels,
- &newhop->nh_label->label[0]);
+ /* Copy labels of the resolved route and the parent resolving to it */
+ if (newhop->nh_label) {
+ for (i = 0; i < newhop->nh_label->num_labels; i++)
+ labels[num_labels++] = newhop->nh_label->label[i];
+ label_type = newhop->nh_label_type;
+ }
+
+ if (nexthop->nh_label) {
+ for (i = 0; i < nexthop->nh_label->num_labels; i++)
+ labels[num_labels++] = nexthop->nh_label->label[i];
+
+ /* If the parent has labels, use its type */
+ label_type = nexthop->nh_label_type;
+ }
+
+ if (num_labels)
+ nexthop_add_labels(resolved_hop, label_type, num_labels,
+ labels);
resolved_hop->rparent = nexthop;
_nexthop_add(&nexthop->resolved, resolved_hop);
@@ -120,6 +138,7 @@ static int nexthop_active(afi_t afi, struct route_entry *re,
struct nexthop *newhop;
struct interface *ifp;
rib_dest_t *dest;
+ struct zebra_vrf *zvrf;
if ((nexthop->type == NEXTHOP_TYPE_IPV4)
|| nexthop->type == NEXTHOP_TYPE_IPV6)
@@ -194,7 +213,9 @@ static int nexthop_active(afi_t afi, struct route_entry *re,
}
/* Lookup table. */
table = zebra_vrf_table(afi, SAFI_UNICAST, nexthop->vrf_id);
- if (!table) {
+ /* get zvrf */
+ zvrf = zebra_vrf_lookup_by_id(nexthop->vrf_id);
+ if (!table || !zvrf) {
if (IS_ZEBRA_DEBUG_RIB_DETAILED)
zlog_debug("\t%s: Table not found",
__PRETTY_FUNCTION__);
@@ -224,7 +245,7 @@ static int nexthop_active(afi_t afi, struct route_entry *re,
/* However, do not resolve over default route unless explicitly
* allowed. */
if (is_default_prefix(&rn->p)
- && !rnh_resolve_via_default(p.family)) {
+ && !rnh_resolve_via_default(zvrf, p.family)) {
if (IS_ZEBRA_DEBUG_RIB_DETAILED)
zlog_debug(
"\t:%s: Resolved against default route",
diff --git a/zebra/zebra_ns.c b/zebra/zebra_ns.c
index 94918365a3..37f53bf911 100644
--- a/zebra/zebra_ns.c
+++ b/zebra/zebra_ns.c
@@ -180,7 +180,7 @@ int zebra_ns_init(const char *optional_default_name)
dzns = zebra_ns_alloc();
- frr_elevate_privs(&zserv_privs) {
+ frr_with_privs(&zserv_privs) {
ns_id = zebra_ns_id_get_default();
}
ns_id_external = ns_map_nsid_with_external(ns_id, true);
diff --git a/zebra/zebra_rib.c b/zebra/zebra_rib.c
index 335cc8294c..157c67fa62 100644
--- a/zebra/zebra_rib.c
+++ b/zebra/zebra_rib.c
@@ -37,6 +37,7 @@
#include "vrf.h"
#include "workqueue.h"
#include "nexthop_group_private.h"
+#include "frr_pthread.h"
#include "zebra/zebra_router.h"
#include "zebra/connected.h"
@@ -3204,12 +3205,10 @@ static int rib_process_dplane_results(struct thread *thread)
TAILQ_INIT(&ctxlist);
/* Take lock controlling queue of results */
- pthread_mutex_lock(&dplane_mutex);
- {
+ frr_with_mutex(&dplane_mutex) {
/* Dequeue list of context structs */
dplane_ctx_list_append(&ctxlist, &rib_dplane_q);
}
- pthread_mutex_unlock(&dplane_mutex);
/* Dequeue context block */
ctx = dplane_ctx_dequeue(&ctxlist);
@@ -3275,10 +3274,19 @@ static int rib_process_dplane_results(struct thread *thread)
zebra_vxlan_handle_result(ctx);
break;
- default:
+ /* Some op codes not handled here */
+ case DPLANE_OP_ADDR_INSTALL:
+ case DPLANE_OP_ADDR_UNINSTALL:
+ case DPLANE_OP_NEIGH_INSTALL:
+ case DPLANE_OP_NEIGH_UPDATE:
+ case DPLANE_OP_NEIGH_DELETE:
+ case DPLANE_OP_VTEP_ADD:
+ case DPLANE_OP_VTEP_DELETE:
+ case DPLANE_OP_NONE:
/* Don't expect this: just return the struct? */
dplane_ctx_fini(&ctx);
break;
+
} /* Dispatch by op code */
ctx = dplane_ctx_dequeue(&ctxlist);
@@ -3300,12 +3308,10 @@ static int rib_process_dplane_results(struct thread *thread)
static int rib_dplane_results(struct dplane_ctx_q *ctxlist)
{
/* Take lock controlling queue of results */
- pthread_mutex_lock(&dplane_mutex);
- {
+ frr_with_mutex(&dplane_mutex) {
/* Enqueue context blocks */
dplane_ctx_list_append(&rib_dplane_q, ctxlist);
}
- pthread_mutex_unlock(&dplane_mutex);
/* Ensure event is signalled to zebra main pthread */
thread_add_event(zrouter.master, rib_process_dplane_results, NULL, 0,
diff --git a/zebra/zebra_rnh.c b/zebra/zebra_rnh.c
index da2fe4a30c..666ebb70e8 100644
--- a/zebra/zebra_rnh.c
+++ b/zebra/zebra_rnh.c
@@ -62,9 +62,6 @@ static int send_client(struct rnh *rnh, struct zserv *client, rnh_type_t type,
static void print_rnh(struct route_node *rn, struct vty *vty);
static int zebra_client_cleanup_rnh(struct zserv *client);
-int zebra_rnh_ip_default_route = 0;
-int zebra_rnh_ipv6_default_route = 0;
-
void zebra_rnh_init(void)
{
hook_register(zserv_client_close, zebra_client_cleanup_rnh);
@@ -656,7 +653,7 @@ zebra_rnh_resolve_nexthop_entry(struct zebra_vrf *zvrf, afi_t afi,
* match route to be exact if so specified
*/
if (is_default_prefix(&rn->p)
- && !rnh_resolve_via_default(rn->p.family)) {
+ && !rnh_resolve_via_default(zvrf, rn->p.family)) {
if (IS_ZEBRA_DEBUG_NHT_DETAILED)
zlog_debug(
"\tNot allowed to resolve through default prefix");
@@ -1213,3 +1210,12 @@ static int zebra_client_cleanup_rnh(struct zserv *client)
return 0;
}
+
+int rnh_resolve_via_default(struct zebra_vrf *zvrf, int family)
+{
+ if (((family == AF_INET) && zvrf->zebra_rnh_ip_default_route)
+ || ((family == AF_INET6) && zvrf->zebra_rnh_ipv6_default_route))
+ return 1;
+ else
+ return 0;
+}
diff --git a/zebra/zebra_rnh.h b/zebra/zebra_rnh.h
index c7d2c0d298..6e2dab8d9f 100644
--- a/zebra/zebra_rnh.h
+++ b/zebra/zebra_rnh.h
@@ -29,20 +29,8 @@
extern "C" {
#endif
-extern int zebra_rnh_ip_default_route;
-extern int zebra_rnh_ipv6_default_route;
-
extern void zebra_rnh_init(void);
-static inline int rnh_resolve_via_default(int family)
-{
- if (((family == AF_INET) && zebra_rnh_ip_default_route)
- || ((family == AF_INET6) && zebra_rnh_ipv6_default_route))
- return 1;
- else
- return 0;
-}
-
static inline const char *rnh_type2str(rnh_type_t type)
{
switch (type) {
@@ -72,6 +60,8 @@ extern void zebra_print_rnh_table(vrf_id_t vrfid, afi_t afi, struct vty *vty,
rnh_type_t type, struct prefix *p);
extern char *rnh_str(struct rnh *rnh, char *buf, int size);
+extern int rnh_resolve_via_default(struct zebra_vrf *zvrf, int family);
+
#ifdef __cplusplus
}
#endif
diff --git a/zebra/zebra_routemap.c b/zebra/zebra_routemap.c
index cee2c8980f..88d2091394 100644
--- a/zebra/zebra_routemap.c
+++ b/zebra/zebra_routemap.c
@@ -64,7 +64,7 @@ static int zebra_route_match_add(struct vty *vty, const char *command,
const char *arg, route_map_event_t type)
{
VTY_DECLVAR_CONTEXT(route_map_index, index);
- int ret;
+ enum rmap_compile_rets ret;
int retval = CMD_SUCCESS;
ret = route_map_add_match(index, command, arg, type);
@@ -82,6 +82,11 @@ static int zebra_route_match_add(struct vty *vty, const char *command,
route_map_upd8_dependency(type, arg, index->map->name);
}
break;
+ case RMAP_DUPLICATE_RULE:
+ /*
+ * Nothing to do here
+ */
+ break;
}
return retval;
@@ -92,7 +97,7 @@ static int zebra_route_match_delete(struct vty *vty, const char *command,
const char *arg, route_map_event_t type)
{
VTY_DECLVAR_CONTEXT(route_map_index, index);
- int ret;
+ enum rmap_compile_rets ret;
int retval = CMD_SUCCESS;
char *dep_name = NULL;
const char *tmpstr;
@@ -125,6 +130,11 @@ static int zebra_route_match_delete(struct vty *vty, const char *command,
if (type != RMAP_EVENT_MATCH_DELETED && dep_name)
route_map_upd8_dependency(type, dep_name, rmap_name);
break;
+ case RMAP_DUPLICATE_RULE:
+ /*
+ * Nothing to do here
+ */
+ break;
}
XFREE(MTYPE_ROUTE_MAP_RULE, dep_name);
diff --git a/zebra/zebra_vrf.c b/zebra/zebra_vrf.c
index fcc94a7be9..72d0b6866d 100644
--- a/zebra/zebra_vrf.c
+++ b/zebra/zebra_vrf.c
@@ -488,6 +488,11 @@ static int vrf_config_write(struct vty *vty)
if (zvrf_id(zvrf) == VRF_DEFAULT) {
if (zvrf->l3vni)
vty_out(vty, "vni %u\n", zvrf->l3vni);
+ if (zvrf->zebra_rnh_ip_default_route)
+ vty_out(vty, "ip nht resolve-via-default\n");
+
+ if (zvrf->zebra_rnh_ipv6_default_route)
+ vty_out(vty, "ipv6 nht resolve-via-default\n");
} else {
vty_frame(vty, "vrf %s\n", zvrf_name(zvrf));
if (zvrf->l3vni)
@@ -497,8 +502,14 @@ static int vrf_config_write(struct vty *vty)
? " prefix-routes-only"
: "");
zebra_ns_config_write(vty, (struct ns *)vrf->ns_ctxt);
+ if (zvrf->zebra_rnh_ip_default_route)
+ vty_out(vty, " ip nht resolve-via-default\n");
+
+ if (zvrf->zebra_rnh_ipv6_default_route)
+ vty_out(vty, " ipv6 nht resolve-via-default\n");
}
+
zebra_routemap_config_write_protocol(vty, zvrf);
if (zvrf_id(zvrf) != VRF_DEFAULT)
diff --git a/zebra/zebra_vrf.h b/zebra/zebra_vrf.h
index f92e1a010b..6c80f9bcb4 100644
--- a/zebra/zebra_vrf.h
+++ b/zebra/zebra_vrf.h
@@ -174,6 +174,9 @@ struct zebra_vrf {
#if defined(HAVE_RTADV)
struct rtadv rtadv;
#endif /* HAVE_RTADV */
+
+ int zebra_rnh_ip_default_route;
+ int zebra_rnh_ipv6_default_route;
};
#define PROTO_RM_NAME(zvrf, afi, rtype) zvrf->proto_rm[afi][rtype].name
#define NHT_RM_NAME(zvrf, afi, rtype) zvrf->nht_rm[afi][rtype].name
diff --git a/zebra/zebra_vty.c b/zebra/zebra_vty.c
index 5c0dc27380..38de01e228 100644
--- a/zebra/zebra_vty.c
+++ b/zebra/zebra_vty.c
@@ -455,6 +455,10 @@ static void vty_show_ip_route(struct vty *vty, struct route_node *rn,
re->status);
json_object_int_add(json_route, "internalFlags",
re->flags);
+ json_object_int_add(json_route, "internalNextHopNum",
+ re->nexthop_num);
+ json_object_int_add(json_route, "internalNextHopActiveNum",
+ re->nexthop_active_num);
if (uptime < ONE_DAY_SECOND)
sprintf(buf, "%02d:%02d:%02d", tm->tm_hour, tm->tm_min,
tm->tm_sec);
@@ -1086,10 +1090,10 @@ DEFUN (ip_nht_default_route,
if (!zvrf)
return CMD_WARNING;
- if (zebra_rnh_ip_default_route)
+ if (zvrf->zebra_rnh_ip_default_route)
return CMD_SUCCESS;
- zebra_rnh_ip_default_route = 1;
+ zvrf->zebra_rnh_ip_default_route = 1;
zebra_evaluate_rnh(zvrf, AFI_IP, 1, RNH_NEXTHOP_TYPE, NULL);
return CMD_SUCCESS;
@@ -1108,10 +1112,10 @@ DEFUN (no_ip_nht_default_route,
if (!zvrf)
return CMD_WARNING;
- if (!zebra_rnh_ip_default_route)
+ if (!zvrf->zebra_rnh_ip_default_route)
return CMD_SUCCESS;
- zebra_rnh_ip_default_route = 0;
+ zvrf->zebra_rnh_ip_default_route = 0;
zebra_evaluate_rnh(zvrf, AFI_IP, 1, RNH_NEXTHOP_TYPE, NULL);
return CMD_SUCCESS;
}
@@ -1128,10 +1132,10 @@ DEFUN (ipv6_nht_default_route,
if (!zvrf)
return CMD_WARNING;
- if (zebra_rnh_ipv6_default_route)
+ if (zvrf->zebra_rnh_ipv6_default_route)
return CMD_SUCCESS;
- zebra_rnh_ipv6_default_route = 1;
+ zvrf->zebra_rnh_ipv6_default_route = 1;
zebra_evaluate_rnh(zvrf, AFI_IP6, 1, RNH_NEXTHOP_TYPE, NULL);
return CMD_SUCCESS;
}
@@ -1150,10 +1154,10 @@ DEFUN (no_ipv6_nht_default_route,
if (!zvrf)
return CMD_WARNING;
- if (!zebra_rnh_ipv6_default_route)
+ if (!zvrf->zebra_rnh_ipv6_default_route)
return CMD_SUCCESS;
- zebra_rnh_ipv6_default_route = 0;
+ zvrf->zebra_rnh_ipv6_default_route = 0;
zebra_evaluate_rnh(zvrf, AFI_IP6, 1, RNH_NEXTHOP_TYPE, NULL);
return CMD_SUCCESS;
}
@@ -2625,12 +2629,6 @@ static int config_write_protocol(struct vty *vty)
if (allow_delete)
vty_out(vty, "allow-external-route-update\n");
- if (zebra_rnh_ip_default_route)
- vty_out(vty, "ip nht resolve-via-default\n");
-
- if (zebra_rnh_ipv6_default_route)
- vty_out(vty, "ipv6 nht resolve-via-default\n");
-
if (zrouter.ribq->spec.hold != ZEBRA_RIB_PROCESS_HOLD_TIME)
vty_out(vty, "zebra work-queue %u\n", zrouter.ribq->spec.hold);
diff --git a/zebra/zebra_vxlan.c b/zebra/zebra_vxlan.c
index bb31247b6a..bb8b61e7e3 100644
--- a/zebra/zebra_vxlan.c
+++ b/zebra/zebra_vxlan.c
@@ -2515,9 +2515,7 @@ static int zvni_neigh_install(zebra_vni_t *zvni, zebra_neigh_t *n)
struct zebra_if *zif;
struct zebra_l2info_vxlan *vxl;
struct interface *vlan_if;
-#ifdef GNU_LINUX
- uint8_t flags;
-#endif
+ int flags;
int ret = 0;
if (!(n->flags & ZEBRA_NEIGH_REMOTE))
@@ -2531,13 +2529,14 @@ static int zvni_neigh_install(zebra_vni_t *zvni, zebra_neigh_t *n)
vlan_if = zvni_map_to_svi(vxl->access_vlan, zif->brslave_info.br_if);
if (!vlan_if)
return -1;
-#ifdef GNU_LINUX
- flags = NTF_EXT_LEARNED;
+
+ flags = DPLANE_NTF_EXT_LEARNED;
if (n->flags & ZEBRA_NEIGH_ROUTER_FLAG)
- flags |= NTF_ROUTER;
+ flags |= DPLANE_NTF_ROUTER;
ZEBRA_NEIGH_SET_ACTIVE(n);
- ret = kernel_add_neigh(vlan_if, &n->ip, &n->emac, flags);
-#endif
+
+ dplane_neigh_add(vlan_if, &n->ip, &n->emac, flags);
+
return ret;
}
@@ -2569,7 +2568,10 @@ static int zvni_neigh_uninstall(zebra_vni_t *zvni, zebra_neigh_t *n)
ZEBRA_NEIGH_SET_INACTIVE(n);
n->loc_seq = 0;
- return kernel_del_neigh(vlan_if, &n->ip);
+
+ dplane_neigh_delete(vlan_if, &n->ip);
+
+ return 0;
}
/*
@@ -2590,12 +2592,9 @@ static int zvni_neigh_probe(zebra_vni_t *zvni, zebra_neigh_t *n)
if (!vlan_if)
return -1;
-#ifdef GNU_LINUX
- return kernel_upd_neigh(vlan_if, &n->ip, &n->emac,
- 0, NUD_PROBE);
-#else
+ dplane_neigh_update(vlan_if, &n->ip, &n->emac);
+
return 0;
-#endif
}
/*
@@ -4291,9 +4290,13 @@ static int zvni_vtep_del_all(zebra_vni_t *zvni, int uninstall)
static int zvni_vtep_install(zebra_vni_t *zvni, zebra_vtep_t *zvtep)
{
if (is_vxlan_flooding_head_end() &&
- (zvtep->flood_control == VXLAN_FLOOD_HEAD_END_REPL))
- return kernel_add_vtep(zvni->vni, zvni->vxlan_if,
- &zvtep->vtep_ip);
+ (zvtep->flood_control == VXLAN_FLOOD_HEAD_END_REPL)) {
+ if (ZEBRA_DPLANE_REQUEST_FAILURE ==
+ dplane_vtep_add(zvni->vxlan_if,
+ &zvtep->vtep_ip, zvni->vni))
+ return -1;
+ }
+
return 0;
}
@@ -4308,7 +4311,11 @@ static int zvni_vtep_uninstall(zebra_vni_t *zvni, struct in_addr *vtep_ip)
return -1;
}
- return kernel_del_vtep(zvni->vni, zvni->vxlan_if, vtep_ip);
+ if (ZEBRA_DPLANE_REQUEST_FAILURE ==
+ dplane_vtep_delete(zvni->vxlan_if, vtep_ip, zvni->vni))
+ return -1;
+
+ return 0;
}
/*
@@ -4678,9 +4685,7 @@ static int zl3vni_nh_del(zebra_l3vni_t *zl3vni, zebra_neigh_t *n)
*/
static int zl3vni_nh_install(zebra_l3vni_t *zl3vni, zebra_neigh_t *n)
{
-#ifdef GNU_LINUX
uint8_t flags;
-#endif
int ret = 0;
if (!is_l3vni_oper_up(zl3vni))
@@ -4689,12 +4694,13 @@ static int zl3vni_nh_install(zebra_l3vni_t *zl3vni, zebra_neigh_t *n)
if (!(n->flags & ZEBRA_NEIGH_REMOTE)
|| !(n->flags & ZEBRA_NEIGH_REMOTE_NH))
return 0;
-#ifdef GNU_LINUX
- flags = NTF_EXT_LEARNED;
+
+ flags = DPLANE_NTF_EXT_LEARNED;
if (n->flags & ZEBRA_NEIGH_ROUTER_FLAG)
- flags |= NTF_ROUTER;
- ret = kernel_add_neigh(zl3vni->svi_if, &n->ip, &n->emac, flags);
-#endif
+ flags |= DPLANE_NTF_ROUTER;
+
+ dplane_neigh_add(zl3vni->svi_if, &n->ip, &n->emac, flags);
+
return ret;
}
@@ -4710,7 +4716,9 @@ static int zl3vni_nh_uninstall(zebra_l3vni_t *zl3vni, zebra_neigh_t *n)
if (!zl3vni->svi_if || !if_is_operative(zl3vni->svi_if))
return 0;
- return kernel_del_neigh(zl3vni->svi_if, &n->ip);
+ dplane_neigh_delete(zl3vni->svi_if, &n->ip);
+
+ return 0;
}
/* add remote vtep as a neigh entry */
diff --git a/zebra/zserv.c b/zebra/zserv.c
index 70b4594813..c008441d6a 100644
--- a/zebra/zserv.c
+++ b/zebra/zserv.c
@@ -231,13 +231,11 @@ static int zserv_write(struct thread *thread)
cache = stream_fifo_new();
- pthread_mutex_lock(&client->obuf_mtx);
- {
+ frr_with_mutex(&client->obuf_mtx) {
while (stream_fifo_head(client->obuf_fifo))
stream_fifo_push(cache,
stream_fifo_pop(client->obuf_fifo));
}
- pthread_mutex_unlock(&client->obuf_mtx);
if (cache->tail) {
msg = cache->tail;
@@ -427,13 +425,11 @@ static int zserv_read(struct thread *thread)
memory_order_relaxed);
/* publish read packets on client's input queue */
- pthread_mutex_lock(&client->ibuf_mtx);
- {
+ frr_with_mutex(&client->ibuf_mtx) {
while (cache->head)
stream_fifo_push(client->ibuf_fifo,
stream_fifo_pop(cache));
}
- pthread_mutex_unlock(&client->ibuf_mtx);
/* Schedule job to process those packets */
zserv_event(client, ZSERV_PROCESS_MESSAGES);
@@ -499,8 +495,7 @@ static int zserv_process_messages(struct thread *thread)
uint32_t p2p = zrouter.packets_to_process;
bool need_resched = false;
- pthread_mutex_lock(&client->ibuf_mtx);
- {
+ frr_with_mutex(&client->ibuf_mtx) {
uint32_t i;
for (i = 0; i < p2p && stream_fifo_head(client->ibuf_fifo);
++i) {
@@ -516,7 +511,6 @@ static int zserv_process_messages(struct thread *thread)
if (stream_fifo_head(client->ibuf_fifo))
need_resched = true;
}
- pthread_mutex_unlock(&client->ibuf_mtx);
while (stream_fifo_head(cache)) {
msg = stream_fifo_pop(cache);
@@ -535,11 +529,9 @@ static int zserv_process_messages(struct thread *thread)
int zserv_send_message(struct zserv *client, struct stream *msg)
{
- pthread_mutex_lock(&client->obuf_mtx);
- {
+ frr_with_mutex(&client->obuf_mtx) {
stream_fifo_push(client->obuf_fifo, msg);
}
- pthread_mutex_unlock(&client->obuf_mtx);
zserv_client_event(client, ZSERV_CLIENT_WRITE);
@@ -790,7 +782,7 @@ void zserv_start(char *path)
setsockopt_so_recvbuf(zsock, 1048576);
setsockopt_so_sendbuf(zsock, 1048576);
- frr_elevate_privs((sa.ss_family != AF_UNIX) ? &zserv_privs : NULL) {
+ frr_with_privs((sa.ss_family != AF_UNIX) ? &zserv_privs : NULL) {
ret = bind(zsock, (struct sockaddr *)&sa, sa_len);
}
if (ret < 0) {