summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.clang-format2
-rw-r--r--README.md9
-rw-r--r--bfdd/bfd.c2
-rw-r--r--bfdd/bfd_packet.c16
-rw-r--r--bgpd/bgp_advertise.c1
-rw-r--r--bgpd/bgp_advertise.h3
-rw-r--r--bgpd/bgp_attr.c44
-rw-r--r--bgpd/bgp_attr.h5
-rw-r--r--bgpd/bgp_bmp.c2245
-rw-r--r--bgpd/bgp_bmp.h303
-rw-r--r--bgpd/bgp_dump.c11
-rw-r--r--bgpd/bgp_dump.h2
-rw-r--r--bgpd/bgp_evpn.c42
-rw-r--r--bgpd/bgp_evpn_private.h5
-rw-r--r--bgpd/bgp_evpn_vty.c9
-rw-r--r--bgpd/bgp_fsm.c30
-rw-r--r--bgpd/bgp_fsm.h1
-rw-r--r--bgpd/bgp_io.c12
-rw-r--r--bgpd/bgp_keepalives.c21
-rw-r--r--bgpd/bgp_mplsvpn.c10
-rw-r--r--bgpd/bgp_network.c18
-rw-r--r--bgpd/bgp_open.c2
-rw-r--r--bgpd/bgp_packet.c24
-rw-r--r--bgpd/bgp_route.c106
-rw-r--r--bgpd/bgp_route.h9
-rw-r--r--bgpd/bgp_routemap.c60
-rw-r--r--bgpd/bgp_rpki.c102
-rw-r--r--bgpd/bgp_snmp.c6
-rw-r--r--bgpd/bgp_vty.c783
-rw-r--r--bgpd/bgp_vty.h5
-rw-r--r--bgpd/bgpd.c8
-rw-r--r--bgpd/bgpd.h8
-rw-r--r--bgpd/rfapi/rfapi.c8
-rw-r--r--bgpd/rfapi/vnc_export_bgp.c8
-rw-r--r--bgpd/rfapi/vnc_zebra.c4
-rw-r--r--bgpd/subdir.am11
-rwxr-xr-xconfigure.ac77
-rw-r--r--debian/frr-pythontools.install1
-rw-r--r--debian/frr.install1
-rwxr-xr-xdebian/rules1
-rw-r--r--doc/developer/library.rst1
-rw-r--r--doc/developer/locking.rst73
-rw-r--r--doc/developer/subdir.am1
-rw-r--r--doc/developer/workflow.rst22
-rw-r--r--doc/user/bgp.rst50
-rw-r--r--doc/user/bmp.rst170
-rw-r--r--doc/user/eigrpd.rst26
-rw-r--r--doc/user/index.rst1
-rw-r--r--doc/user/ospf_fundamentals.rst2
-rw-r--r--doc/user/pbr.rst6
-rw-r--r--doc/user/rpki.rst25
-rw-r--r--doc/user/subdir.am1
-rw-r--r--doc/user/zebra.rst64
-rw-r--r--eigrpd/eigrp_cli.c26
-rw-r--r--eigrpd/eigrp_filter.c16
-rw-r--r--eigrpd/eigrp_filter.h10
-rw-r--r--eigrpd/eigrp_fsm.c28
-rw-r--r--eigrpd/eigrp_hello.c32
-rw-r--r--eigrpd/eigrp_interface.c11
-rw-r--r--eigrpd/eigrp_main.c2
-rw-r--r--eigrpd/eigrp_neighbor.c25
-rw-r--r--eigrpd/eigrp_neighbor.h33
-rw-r--r--eigrpd/eigrp_network.c13
-rw-r--r--eigrpd/eigrp_network.h2
-rw-r--r--eigrpd/eigrp_northbound.c20
-rw-r--r--eigrpd/eigrp_packet.c21
-rw-r--r--eigrpd/eigrp_routemap.c26
-rw-r--r--eigrpd/eigrp_structs.h4
-rw-r--r--eigrpd/eigrp_topology.c53
-rw-r--r--eigrpd/eigrp_topology.h43
-rw-r--r--eigrpd/eigrp_update.c21
-rw-r--r--eigrpd/eigrp_vrf.c50
-rw-r--r--eigrpd/eigrp_vrf.h23
-rw-r--r--eigrpd/eigrp_vty.c130
-rw-r--r--eigrpd/eigrp_zebra.c40
-rw-r--r--eigrpd/eigrp_zebra.h7
-rw-r--r--eigrpd/eigrpd.c42
-rw-r--r--eigrpd/eigrpd.h4
-rw-r--r--eigrpd/subdir.am2
-rw-r--r--isisd/isis_bpf.c2
-rw-r--r--isisd/isis_dlpi.c2
-rw-r--r--isisd/isis_memory.c1
-rw-r--r--isisd/isis_memory.h1
-rw-r--r--isisd/isis_northbound.c88
-rw-r--r--isisd/isis_pfpacket.c2
-rw-r--r--isisd/isis_route.c249
-rw-r--r--isisd/isis_route.h12
-rw-r--r--isisd/isis_tlvs.c18
-rw-r--r--isisd/isis_zebra.c60
-rw-r--r--isisd/isisd.c1
-rw-r--r--isisd/isisd.h3
-rw-r--r--ldpd/l2vpn.c8
-rw-r--r--ldpd/lde.c15
-rw-r--r--ldpd/lde.h9
-rw-r--r--ldpd/lde_lib.c24
-rw-r--r--ldpd/ldp_zebra.c71
-rw-r--r--ldpd/ldpd.h3
-rw-r--r--ldpd/socket.c8
-rw-r--r--lib/command.c75
-rw-r--r--lib/command.h1
-rw-r--r--lib/compiler.h43
-rw-r--r--lib/ferr.c21
-rw-r--r--lib/frr_pthread.c24
-rw-r--r--lib/frr_pthread.h48
-rw-r--r--lib/frrcu.c5
-rw-r--r--lib/hash.c9
-rw-r--r--lib/log.c156
-rw-r--r--lib/monotime.h14
-rw-r--r--lib/mpls.h2
-rw-r--r--lib/nexthop.c9
-rw-r--r--lib/nexthop.h3
-rw-r--r--lib/nexthop_group.c25
-rw-r--r--lib/northbound.c13
-rw-r--r--lib/northbound_sysrepo.c2
-rw-r--r--lib/prefix.c2
-rw-r--r--lib/prefix.h2
-rw-r--r--lib/privs.c9
-rw-r--r--lib/privs.h8
-rw-r--r--lib/pullwr.c275
-rw-r--r--lib/pullwr.h110
-rw-r--r--lib/routemap.c53
-rw-r--r--lib/routemap.h25
-rw-r--r--lib/stream.c17
-rw-r--r--lib/subdir.am2
-rw-r--r--lib/thread.c120
-rw-r--r--lib/vrf.c2
-rw-r--r--lib/vty.c9
-rw-r--r--lib/yang_wrappers.c53
-rw-r--r--lib/yang_wrappers.h8
-rw-r--r--lib/zclient.c225
-rw-r--r--lib/zclient.h29
-rw-r--r--lib/zebra.h1
-rw-r--r--ospf6d/ospf6_asbr.c25
-rw-r--r--ospf6d/ospf6_network.c2
-rw-r--r--ospfd/ospf_interface.c46
-rw-r--r--ospfd/ospf_interface.h1
-rw-r--r--ospfd/ospf_ism.h5
-rw-r--r--ospfd/ospf_network.c2
-rw-r--r--ospfd/ospf_packet.c69
-rw-r--r--ospfd/ospf_packet.h6
-rw-r--r--ospfd/ospf_sr.c95
-rw-r--r--ospfd/ospf_sr.h3
-rw-r--r--ospfd/ospfd.c2
-rw-r--r--pbrd/pbr_map.c10
-rw-r--r--pbrd/pbr_map.h3
-rw-r--r--pbrd/pbr_vty.c33
-rw-r--r--pbrd/pbr_zebra.c2
-rw-r--r--pimd/pim_assert.c4
-rw-r--r--pimd/pim_mroute.c6
-rw-r--r--pimd/pim_msdp_socket.c2
-rw-r--r--pimd/pim_sock.c6
-rw-r--r--pimd/pim_vty.c4
-rw-r--r--pimd/pimd.h3
-rw-r--r--python/clidef.py1
-rw-r--r--redhat/frr.spec.in4
-rw-r--r--ripd/ripd.c4
-rw-r--r--ripngd/ripng_interface.c2
-rw-r--r--ripngd/ripngd.c5
-rw-r--r--staticd/static_routes.c3
-rw-r--r--staticd/static_vty.c18
-rw-r--r--staticd/static_zebra.c33
-rw-r--r--tests/bgpd/test_peer_attr.c6
-rw-r--r--tests/lib/test_privs.c4
-rw-r--r--tests/topotests/Dockerfile9
-rw-r--r--tests/topotests/all-protocol-startup/r1/ipv4_routes.ref1
-rw-r--r--tests/topotests/all-protocol-startup/r1/ipv6_routes.ref3
-rw-r--r--tests/topotests/all-protocol-startup/r1/zebra.conf7
-rwxr-xr-xtests/topotests/bgp-basic-functionality-topo1/test_bgp_basic_functionality.py2
-rwxr-xr-xtests/topotests/bgp-ecmp-topo2/ebgp_ecmp_topo2.json664
-rwxr-xr-xtests/topotests/bgp-ecmp-topo2/ibgp_ecmp_topo2.json674
-rwxr-xr-xtests/topotests/bgp-ecmp-topo2/test_ebgp_ecmp_topo2.py817
-rwxr-xr-xtests/topotests/bgp-ecmp-topo2/test_ibgp_ecmp_topo2.py813
-rwxr-xr-xtests/topotests/bgp-path-attributes-topo1/test_bgp_path_attributes.py6
-rwxr-xr-xtests/topotests/bgp-prefix-list-topo1/test_prefix_lists.py34
-rw-r--r--tests/topotests/bgp_aggregate-address_route-map/__init__.py0
-rw-r--r--tests/topotests/bgp_aggregate-address_route-map/r1/bgpd.conf10
-rw-r--r--tests/topotests/bgp_aggregate-address_route-map/r1/zebra.conf9
-rw-r--r--tests/topotests/bgp_aggregate-address_route-map/r2/bgpd.conf4
-rw-r--r--tests/topotests/bgp_aggregate-address_route-map/r2/zebra.conf6
-rw-r--r--tests/topotests/bgp_aggregate-address_route-map/test_bgp_aggregate-address_route-map.py131
-rw-r--r--tests/topotests/bgp_comm-list_delete/test_bgp_comm-list_delete.py2
-rw-r--r--tests/topotests/bgp_l3vpn_to_bgp_direct/scripts/adjacencies.py14
-rw-r--r--tests/topotests/bgp_l3vpn_to_bgp_vrf/scripts/adjacencies.py8
-rw-r--r--tests/topotests/bgp_rfapi_basic_sanity/scripts/adjacencies.py8
-rwxr-xr-xtests/topotests/docker/inner/compile_frr.sh1
-rwxr-xr-xtests/topotests/example-topojson-test/test_topo_json_single_link_loopback/test_example_topojson.py2
-rw-r--r--tests/topotests/ldp-topo1/r1/ip_mpls_route.ref-15
-rw-r--r--tests/topotests/ldp-topo1/r1/show_ipv4_route.ref-17
-rw-r--r--tests/topotests/ldp-topo1/r1/show_mpls_ldp_binding.ref-142
-rw-r--r--tests/topotests/ldp-topo1/r1/show_mpls_ldp_discovery.ref-17
-rw-r--r--tests/topotests/ldp-topo1/r1/show_mpls_ldp_interface.ref-12
-rw-r--r--tests/topotests/ldp-topo1/r1/show_mpls_ldp_neighbor.ref-18
-rw-r--r--tests/topotests/ldp-topo1/r1/show_mpls_table.ref16
-rw-r--r--tests/topotests/ldp-topo1/r1/show_mpls_table.ref-18
-rw-r--r--tests/topotests/ldp-topo1/r1/show_mpls_table.ref-no-impl-null8
-rw-r--r--tests/topotests/ldp-topo1/r2/show_ipv4_route.ref-17
-rw-r--r--tests/topotests/ldp-topo1/r2/show_mpls_ldp_binding.ref-156
-rw-r--r--tests/topotests/ldp-topo1/r2/show_mpls_ldp_discovery.ref-112
-rw-r--r--tests/topotests/ldp-topo1/r2/show_mpls_ldp_interface.ref-13
-rw-r--r--tests/topotests/ldp-topo1/r2/show_mpls_ldp_neighbor.ref-126
-rw-r--r--tests/topotests/ldp-topo1/r2/show_mpls_table.ref14
-rw-r--r--tests/topotests/ldp-topo1/r2/show_mpls_table.ref-17
-rw-r--r--tests/topotests/ldp-topo1/r2/show_mpls_table.ref-no-impl-null7
-rw-r--r--tests/topotests/ldp-topo1/r3/show_ipv4_route.ref-17
-rw-r--r--tests/topotests/ldp-topo1/r3/show_mpls_ldp_binding.ref-149
-rw-r--r--tests/topotests/ldp-topo1/r3/show_mpls_ldp_discovery.ref-19
-rw-r--r--tests/topotests/ldp-topo1/r3/show_mpls_ldp_interface.ref-12
-rw-r--r--tests/topotests/ldp-topo1/r3/show_mpls_ldp_neighbor.ref-117
-rw-r--r--tests/topotests/ldp-topo1/r3/show_mpls_table.ref20
-rw-r--r--tests/topotests/ldp-topo1/r3/show_mpls_table.ref-110
-rw-r--r--tests/topotests/ldp-topo1/r3/show_mpls_table.ref-no-impl-null10
-rw-r--r--tests/topotests/ldp-topo1/r4/show_ipv4_route.ref-17
-rw-r--r--tests/topotests/ldp-topo1/r4/show_mpls_ldp_binding.ref-149
-rw-r--r--tests/topotests/ldp-topo1/r4/show_mpls_ldp_discovery.ref-19
-rw-r--r--tests/topotests/ldp-topo1/r4/show_mpls_ldp_interface.ref-12
-rw-r--r--tests/topotests/ldp-topo1/r4/show_mpls_ldp_neighbor.ref-117
-rw-r--r--tests/topotests/ldp-topo1/r4/show_mpls_table.ref18
-rw-r--r--tests/topotests/ldp-topo1/r4/show_mpls_table.ref-19
-rw-r--r--tests/topotests/ldp-topo1/r4/show_mpls_table.ref-no-impl-null9
-rwxr-xr-xtests/topotests/ldp-topo1/test_ldp_topo1.py119
-rw-r--r--tests/topotests/lib/bgp.py563
-rw-r--r--tests/topotests/lib/common_config.py417
-rw-r--r--tests/topotests/lib/topojson.py25
-rw-r--r--tests/topotests/ospf-sr-topo1/r1/zebra_mpls.json12
-rw-r--r--tests/topotests/ospf-sr-topo1/r2/zebra_mpls.json18
-rw-r--r--tests/topotests/ospf-sr-topo1/r3/zebra_mpls.json10
-rw-r--r--tests/topotests/ospf-sr-topo1/r4/zebra_mpls.json12
-rw-r--r--tests/topotests/pytest.ini4
-rwxr-xr-xtools/checkpatch.pl29
-rw-r--r--tools/coccinelle/frr_with_mutex.cocci23
-rw-r--r--tools/coccinelle/zprivs.cocci12
-rw-r--r--tools/etc/frr/support_bundle_commands.conf10
-rwxr-xr-x[-rw-r--r--]tools/generate_support_bundle.py2
-rw-r--r--tools/subdir.am2
-rw-r--r--vrrpd/vrrp.c9
-rw-r--r--vrrpd/vrrp_arp.c2
-rw-r--r--vrrpd/vrrp_ndisc.c3
-rw-r--r--vtysh/vtysh.c107
-rw-r--r--yang/frr-eigrpd.yang9
-rw-r--r--yang/frr-interface.yang13
-rw-r--r--yang/frr-isisd.yang7
-rw-r--r--yang/frr-ripd.yang27
-rw-r--r--yang/frr-ripngd.yang23
-rw-r--r--yang/libyang_plugins/frr_user_types.c18
-rw-r--r--zebra/connected.c2
-rw-r--r--zebra/if_ioctl_solaris.c6
-rw-r--r--zebra/if_netlink.c2
-rw-r--r--zebra/ioctl.c6
-rw-r--r--zebra/ioctl_solaris.c4
-rw-r--r--zebra/ipforward_proc.c8
-rw-r--r--zebra/ipforward_solaris.c2
-rw-r--r--zebra/ipforward_sysctl.c10
-rw-r--r--zebra/irdp_main.c2
-rw-r--r--zebra/kernel_netlink.c12
-rw-r--r--zebra/kernel_socket.c2
-rw-r--r--zebra/rt.h11
-rw-r--r--zebra/rt_netlink.c294
-rw-r--r--zebra/rt_socket.c25
-rw-r--r--zebra/rtadv.c2
-rw-r--r--zebra/zapi_msg.c197
-rw-r--r--zebra/zebra_dplane.c294
-rw-r--r--zebra/zebra_dplane.h60
-rw-r--r--zebra/zebra_fpm.c3
-rw-r--r--zebra/zebra_mpls.c185
-rw-r--r--zebra/zebra_mpls.h34
-rw-r--r--zebra/zebra_mpls_openbsd.c4
-rw-r--r--zebra/zebra_netns_notify.c8
-rw-r--r--zebra/zebra_nhg.c68
-rw-r--r--zebra/zebra_ns.c2
-rw-r--r--zebra/zebra_rib.c31
-rw-r--r--zebra/zebra_rnh.c21
-rw-r--r--zebra/zebra_rnh.h14
-rw-r--r--zebra/zebra_routemap.c14
-rw-r--r--zebra/zebra_vrf.c11
-rw-r--r--zebra/zebra_vrf.h3
-rw-r--r--zebra/zebra_vty.c58
-rw-r--r--zebra/zebra_vxlan.c169
-rw-r--r--zebra/zserv.c18
278 files changed, 11092 insertions, 3197 deletions
diff --git a/.clang-format b/.clang-format
index 4bd962747f..654577d936 100644
--- a/.clang-format
+++ b/.clang-format
@@ -28,6 +28,8 @@ ForEachMacros:
- frr_each
- frr_each_safe
- frr_each_from
+ - frr_with_mutex
+ - frr_with_privs
- LIST_FOREACH
- LIST_FOREACH_SAFE
- SLIST_FOREACH
diff --git a/README.md b/README.md
index e8c775684a..bb13481cf2 100644
--- a/README.md
+++ b/README.md
@@ -27,14 +27,15 @@ FRR currently supports the following protocols:
Installation & Use
------------------
-Packages are available for various distributions on our
+For source tarballs, see the
[releases page](https://github.com/FRRouting/frr/releases).
-Snaps are also available [here](https://snapcraft.io/frr).
+For Debian and its derivatives, use the APT repository at
+[https://deb.frrouting.org/](https://deb.frrouting.org/).
Instructions on building and installing from source for supported platforms may
-be found
-[here](http://docs.frrouting.org/projects/dev-guide/en/latest/building.html).
+be found in the
+[developer docs](http://docs.frrouting.org/projects/dev-guide/en/latest/building.html).
Once installed, please refer to the [user guide](http://docs.frrouting.org/)
for instructions on use.
diff --git a/bfdd/bfd.c b/bfdd/bfd.c
index 44589566ba..feb9827ce1 100644
--- a/bfdd/bfd.c
+++ b/bfdd/bfd.c
@@ -1714,6 +1714,8 @@ static int bfd_vrf_disable(struct vrf *vrf)
socket_close(&bvrf->bg_mhop);
socket_close(&bvrf->bg_shop6);
socket_close(&bvrf->bg_mhop6);
+ socket_close(&bvrf->bg_echo);
+ socket_close(&bvrf->bg_echov6);
/* free context */
XFREE(MTYPE_BFDD_VRF, bvrf);
diff --git a/bfdd/bfd_packet.c b/bfdd/bfd_packet.c
index d68a1ad5fd..7fbe6db163 100644
--- a/bfdd/bfd_packet.c
+++ b/bfdd/bfd_packet.c
@@ -894,7 +894,7 @@ int bp_udp_shop(vrf_id_t vrf_id)
{
int sd;
- frr_elevate_privs(&bglobal.bfdd_privs) {
+ frr_with_privs(&bglobal.bfdd_privs) {
sd = vrf_socket(AF_INET, SOCK_DGRAM, PF_UNSPEC, vrf_id, NULL);
}
if (sd == -1)
@@ -909,7 +909,7 @@ int bp_udp_mhop(vrf_id_t vrf_id)
{
int sd;
- frr_elevate_privs(&bglobal.bfdd_privs) {
+ frr_with_privs(&bglobal.bfdd_privs) {
sd = vrf_socket(AF_INET, SOCK_DGRAM, PF_UNSPEC, vrf_id, NULL);
}
if (sd == -1)
@@ -934,7 +934,7 @@ int bp_peer_socket(const struct bfd_session *bs)
&& bs->key.vrfname[0])
device_to_bind = (const char *)bs->key.vrfname;
- frr_elevate_privs(&bglobal.bfdd_privs) {
+ frr_with_privs(&bglobal.bfdd_privs) {
sd = vrf_socket(AF_INET, SOCK_DGRAM, PF_UNSPEC,
bs->vrf->vrf_id, device_to_bind);
}
@@ -1001,7 +1001,7 @@ int bp_peer_socketv6(const struct bfd_session *bs)
&& bs->key.vrfname[0])
device_to_bind = (const char *)bs->key.vrfname;
- frr_elevate_privs(&bglobal.bfdd_privs) {
+ frr_with_privs(&bglobal.bfdd_privs) {
sd = vrf_socket(AF_INET6, SOCK_DGRAM, PF_UNSPEC,
bs->vrf->vrf_id, device_to_bind);
}
@@ -1121,7 +1121,7 @@ int bp_udp6_shop(vrf_id_t vrf_id)
{
int sd;
- frr_elevate_privs(&bglobal.bfdd_privs) {
+ frr_with_privs(&bglobal.bfdd_privs) {
sd = vrf_socket(AF_INET6, SOCK_DGRAM, PF_UNSPEC, vrf_id, NULL);
}
if (sd == -1)
@@ -1137,7 +1137,7 @@ int bp_udp6_mhop(vrf_id_t vrf_id)
{
int sd;
- frr_elevate_privs(&bglobal.bfdd_privs) {
+ frr_with_privs(&bglobal.bfdd_privs) {
sd = vrf_socket(AF_INET6, SOCK_DGRAM, PF_UNSPEC, vrf_id, NULL);
}
if (sd == -1)
@@ -1153,7 +1153,7 @@ int bp_echo_socket(vrf_id_t vrf_id)
{
int s;
- frr_elevate_privs(&bglobal.bfdd_privs) {
+ frr_with_privs(&bglobal.bfdd_privs) {
s = vrf_socket(AF_INET, SOCK_DGRAM, 0, vrf_id, NULL);
}
if (s == -1)
@@ -1169,7 +1169,7 @@ int bp_echov6_socket(vrf_id_t vrf_id)
{
int s;
- frr_elevate_privs(&bglobal.bfdd_privs) {
+ frr_with_privs(&bglobal.bfdd_privs) {
s = vrf_socket(AF_INET6, SOCK_DGRAM, 0, vrf_id, NULL);
}
if (s == -1)
diff --git a/bgpd/bgp_advertise.c b/bgpd/bgp_advertise.c
index 497fb0749e..76a65f7f04 100644
--- a/bgpd/bgp_advertise.c
+++ b/bgpd/bgp_advertise.c
@@ -194,6 +194,7 @@ void bgp_adj_in_set(struct bgp_node *rn, struct peer *peer, struct attr *attr,
adj = XCALLOC(MTYPE_BGP_ADJ_IN, sizeof(struct bgp_adj_in));
adj->peer = peer_lock(peer); /* adj_in peer reference */
adj->attr = bgp_attr_intern(attr);
+ adj->uptime = bgp_clock();
adj->addpath_rx_id = addpath_id;
BGP_ADJ_IN_ADD(rn, adj);
bgp_lock_node(rn);
diff --git a/bgpd/bgp_advertise.h b/bgpd/bgp_advertise.h
index 1b55b6e64b..c983598756 100644
--- a/bgpd/bgp_advertise.h
+++ b/bgpd/bgp_advertise.h
@@ -101,6 +101,9 @@ struct bgp_adj_in {
/* Received attribute. */
struct attr *attr;
+ /* timestamp (monotime) */
+ time_t uptime;
+
/* Addpath identifier */
uint32_t addpath_rx_id;
};
diff --git a/bgpd/bgp_attr.c b/bgpd/bgp_attr.c
index e21c84355e..ab9dc3092a 100644
--- a/bgpd/bgp_attr.c
+++ b/bgpd/bgp_attr.c
@@ -724,10 +724,13 @@ struct attr *bgp_attr_aggregate_intern(struct bgp *bgp, uint8_t origin,
struct community *community,
struct ecommunity *ecommunity,
struct lcommunity *lcommunity,
- int as_set, uint8_t atomic_aggregate)
+ struct bgp_aggregate *aggregate,
+ uint8_t atomic_aggregate,
+ struct prefix *p)
{
struct attr attr;
struct attr *new;
+ int ret;
memset(&attr, 0, sizeof(struct attr));
@@ -778,7 +781,7 @@ struct attr *bgp_attr_aggregate_intern(struct bgp *bgp, uint8_t origin,
attr.label = MPLS_INVALID_LABEL;
attr.weight = BGP_ATTR_DEFAULT_WEIGHT;
attr.mp_nexthop_len = IPV6_MAX_BYTELEN;
- if (!as_set || atomic_aggregate)
+ if (!aggregate->as_set || atomic_aggregate)
attr.flag |= ATTR_FLAG_BIT(BGP_ATTR_ATOMIC_AGGREGATE);
attr.flag |= ATTR_FLAG_BIT(BGP_ATTR_AGGREGATOR);
if (CHECK_FLAG(bgp->config, BGP_CONFIG_CONFEDERATION))
@@ -789,7 +792,42 @@ struct attr *bgp_attr_aggregate_intern(struct bgp *bgp, uint8_t origin,
attr.label_index = BGP_INVALID_LABEL_INDEX;
attr.label = MPLS_INVALID_LABEL;
- new = bgp_attr_intern(&attr);
+ /* Apply route-map */
+ if (aggregate->rmap.name) {
+ struct attr attr_tmp = attr;
+ struct bgp_path_info rmap_path;
+
+ memset(&rmap_path, 0, sizeof(struct bgp_path_info));
+ rmap_path.peer = bgp->peer_self;
+ rmap_path.attr = &attr_tmp;
+
+ SET_FLAG(bgp->peer_self->rmap_type, PEER_RMAP_TYPE_AGGREGATE);
+
+ ret = route_map_apply(aggregate->rmap.map, p, RMAP_BGP,
+ &rmap_path);
+
+ bgp->peer_self->rmap_type = 0;
+
+ if (ret == RMAP_DENYMATCH) {
+ /* Free uninterned attribute. */
+ bgp_attr_flush(&attr_tmp);
+
+ /* Unintern original. */
+ aspath_unintern(&attr.aspath);
+ return NULL;
+ }
+
+ if (bgp_flag_check(bgp, BGP_FLAG_GRACEFUL_SHUTDOWN))
+ bgp_attr_add_gshut_community(&attr_tmp);
+
+ new = bgp_attr_intern(&attr_tmp);
+ } else {
+
+ if (bgp_flag_check(bgp, BGP_FLAG_GRACEFUL_SHUTDOWN))
+ bgp_attr_add_gshut_community(&attr);
+
+ new = bgp_attr_intern(&attr);
+ }
aspath_unintern(&new->aspath);
return new;
diff --git a/bgpd/bgp_attr.h b/bgpd/bgp_attr.h
index 1592a8df4e..f1a871fe43 100644
--- a/bgpd/bgp_attr.h
+++ b/bgpd/bgp_attr.h
@@ -272,8 +272,9 @@ extern struct attr *bgp_attr_aggregate_intern(struct bgp *bgp, uint8_t origin,
struct community *community,
struct ecommunity *ecommunity,
struct lcommunity *lcommunity,
- int as_set,
- uint8_t atomic_aggregate);
+ struct bgp_aggregate *aggregate,
+ uint8_t atomic_aggregate,
+ struct prefix *p);
extern bgp_size_t bgp_packet_attribute(struct bgp *bgp, struct peer *,
struct stream *, struct attr *,
struct bpacket_attr_vec_arr *vecarr,
diff --git a/bgpd/bgp_bmp.c b/bgpd/bgp_bmp.c
new file mode 100644
index 0000000000..3af373b562
--- /dev/null
+++ b/bgpd/bgp_bmp.c
@@ -0,0 +1,2245 @@
+/* BMP support.
+ * Copyright (C) 2018 Yasuhiro Ohara
+ * Copyright (C) 2019 David Lamparter for NetDEF, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; see the file COPYING; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <zebra.h>
+
+#include "log.h"
+#include "stream.h"
+#include "sockunion.h"
+#include "command.h"
+#include "prefix.h"
+#include "thread.h"
+#include "linklist.h"
+#include "queue.h"
+#include "pullwr.h"
+#include "memory.h"
+#include "network.h"
+#include "filter.h"
+#include "lib_errors.h"
+#include "stream.h"
+#include "libfrr.h"
+#include "version.h"
+#include "jhash.h"
+#include "termtable.h"
+
+#include "bgpd/bgp_table.h"
+#include "bgpd/bgpd.h"
+#include "bgpd/bgp_route.h"
+#include "bgpd/bgp_attr.h"
+#include "bgpd/bgp_dump.h"
+#include "bgpd/bgp_errors.h"
+#include "bgpd/bgp_packet.h"
+#include "bgpd/bgp_bmp.h"
+#include "bgpd/bgp_fsm.h"
+#include "bgpd/bgp_updgrp.h"
+#include "bgpd/bgp_vty.h"
+
+static void bmp_close(struct bmp *bmp);
+static struct bmp_bgp *bmp_bgp_find(struct bgp *bgp);
+static void bmp_targets_put(struct bmp_targets *bt);
+static struct bmp_bgp_peer *bmp_bgp_peer_find(uint64_t peerid);
+static struct bmp_bgp_peer *bmp_bgp_peer_get(struct peer *peer);
+static void bmp_active_disconnected(struct bmp_active *ba);
+static void bmp_active_put(struct bmp_active *ba);
+
+DEFINE_MGROUP(BMP, "BMP (BGP Monitoring Protocol)")
+
+DEFINE_MTYPE_STATIC(BMP, BMP_CONN, "BMP connection state")
+DEFINE_MTYPE_STATIC(BMP, BMP_TARGETS, "BMP targets")
+DEFINE_MTYPE_STATIC(BMP, BMP_TARGETSNAME, "BMP targets name")
+DEFINE_MTYPE_STATIC(BMP, BMP_LISTENER, "BMP listener")
+DEFINE_MTYPE_STATIC(BMP, BMP_ACTIVE, "BMP active connection config")
+DEFINE_MTYPE_STATIC(BMP, BMP_ACLNAME, "BMP access-list name")
+DEFINE_MTYPE_STATIC(BMP, BMP_QUEUE, "BMP update queue item")
+DEFINE_MTYPE_STATIC(BMP, BMP, "BMP instance state")
+DEFINE_MTYPE_STATIC(BMP, BMP_MIRRORQ, "BMP route mirroring buffer")
+DEFINE_MTYPE_STATIC(BMP, BMP_PEER, "BMP per BGP peer data")
+DEFINE_MTYPE_STATIC(BMP, BMP_OPEN, "BMP stored BGP OPEN message")
+
+DEFINE_QOBJ_TYPE(bmp_targets)
+
+static int bmp_bgp_cmp(const struct bmp_bgp *a, const struct bmp_bgp *b)
+{
+ if (a->bgp < b->bgp)
+ return -1;
+ if (a->bgp > b->bgp)
+ return 1;
+ return 0;
+}
+
+static uint32_t bmp_bgp_hash(const struct bmp_bgp *e)
+{
+ return jhash(&e->bgp, sizeof(e->bgp), 0x55aa5a5a);
+}
+
+DECLARE_HASH(bmp_bgph, struct bmp_bgp, bbi, bmp_bgp_cmp, bmp_bgp_hash)
+
+struct bmp_bgph_head bmp_bgph;
+
+static int bmp_bgp_peer_cmp(const struct bmp_bgp_peer *a,
+ const struct bmp_bgp_peer *b)
+{
+ if (a->peerid < b->peerid)
+ return -1;
+ if (a->peerid > b->peerid)
+ return 1;
+ return 0;
+}
+
+static uint32_t bmp_bgp_peer_hash(const struct bmp_bgp_peer *e)
+{
+ return e->peerid;
+}
+
+DECLARE_HASH(bmp_peerh, struct bmp_bgp_peer, bpi,
+ bmp_bgp_peer_cmp, bmp_bgp_peer_hash)
+
+struct bmp_peerh_head bmp_peerh;
+
+DECLARE_LIST(bmp_mirrorq, struct bmp_mirrorq, bmi)
+
+/* listener management */
+
+static int bmp_listener_cmp(const struct bmp_listener *a,
+ const struct bmp_listener *b)
+{
+ int c;
+
+ c = sockunion_cmp(&a->addr, &b->addr);
+ if (c)
+ return c;
+ if (a->port < b->port)
+ return -1;
+ if (a->port > b->port)
+ return 1;
+ return 0;
+}
+
+DECLARE_SORTLIST_UNIQ(bmp_listeners, struct bmp_listener, bli, bmp_listener_cmp)
+
+static int bmp_targets_cmp(const struct bmp_targets *a,
+ const struct bmp_targets *b)
+{
+ return strcmp(a->name, b->name);
+}
+
+DECLARE_SORTLIST_UNIQ(bmp_targets, struct bmp_targets, bti, bmp_targets_cmp)
+
+DECLARE_LIST(bmp_session, struct bmp, bsi)
+
+DECLARE_DLIST(bmp_qlist, struct bmp_queue_entry, bli)
+
+static int bmp_qhash_cmp(const struct bmp_queue_entry *a,
+ const struct bmp_queue_entry *b)
+{
+ int ret;
+ ret = prefix_cmp(&a->p, &b->p);
+ if (ret)
+ return ret;
+ ret = memcmp(&a->peerid, &b->peerid,
+ offsetof(struct bmp_queue_entry, refcount) -
+ offsetof(struct bmp_queue_entry, peerid));
+ return ret;
+}
+
+static uint32_t bmp_qhash_hkey(const struct bmp_queue_entry *e)
+{
+ uint32_t key;
+
+ key = prefix_hash_key((void *)&e->p);
+ key = jhash(&e->peerid,
+ offsetof(struct bmp_queue_entry, refcount) -
+ offsetof(struct bmp_queue_entry, peerid),
+ key);
+ return key;
+}
+
+DECLARE_HASH(bmp_qhash, struct bmp_queue_entry, bhi,
+ bmp_qhash_cmp, bmp_qhash_hkey)
+
+static int bmp_active_cmp(const struct bmp_active *a,
+ const struct bmp_active *b)
+{
+ int c;
+
+ c = strcmp(a->hostname, b->hostname);
+ if (c)
+ return c;
+ if (a->port < b->port)
+ return -1;
+ if (a->port > b->port)
+ return 1;
+ return 0;
+}
+
+DECLARE_SORTLIST_UNIQ(bmp_actives, struct bmp_active, bai, bmp_active_cmp)
+
+static struct bmp *bmp_new(struct bmp_targets *bt, int bmp_sock)
+{
+ struct bmp *new = XCALLOC(MTYPE_BMP_CONN, sizeof(struct bmp));
+ afi_t afi;
+ safi_t safi;
+
+ monotime(&new->t_up);
+ new->targets = bt;
+ new->socket = bmp_sock;
+ new->syncafi = AFI_MAX;
+
+ FOREACH_AFI_SAFI (afi, safi) {
+ new->afistate[afi][safi] = bt->afimon[afi][safi]
+ ? BMP_AFI_NEEDSYNC : BMP_AFI_INACTIVE;
+ }
+
+ bmp_session_add_tail(&bt->sessions, new);
+ return new;
+}
+
+static void bmp_free(struct bmp *bmp)
+{
+ bmp_session_del(&bmp->targets->sessions, bmp);
+ XFREE(MTYPE_BMP_CONN, bmp);
+}
+
+static void bmp_common_hdr(struct stream *s, uint8_t ver, uint8_t type)
+{
+ stream_putc(s, ver);
+ stream_putl(s, 0); //dummy message length. will be set later.
+ stream_putc(s, type);
+}
+
+static void bmp_per_peer_hdr(struct stream *s, struct peer *peer,
+ uint8_t flags, const struct timeval *tv)
+{
+ char peer_distinguisher[8];
+
+#define BMP_PEER_TYPE_GLOBAL_INSTANCE 0
+#define BMP_PEER_TYPE_RD_INSTANCE 1
+#define BMP_PEER_TYPE_LOCAL_INSTANCE 2
+
+#define BMP_PEER_FLAG_V (1 << 7)
+#define BMP_PEER_FLAG_L (1 << 6)
+#define BMP_PEER_FLAG_A (1 << 5)
+
+ /* Peer Type */
+ stream_putc(s, BMP_PEER_TYPE_GLOBAL_INSTANCE);
+
+ /* Peer Flags */
+ if (peer->su.sa.sa_family == AF_INET6)
+ SET_FLAG(flags, BMP_PEER_FLAG_V);
+ else
+ UNSET_FLAG(flags, BMP_PEER_FLAG_V);
+ stream_putc(s, flags);
+
+ /* Peer Distinguisher */
+ memset (&peer_distinguisher[0], 0, 8);
+ stream_put(s, &peer_distinguisher[0], 8);
+
+ /* Peer Address */
+ if (peer->su.sa.sa_family == AF_INET6)
+ stream_put(s, &peer->su.sin6.sin6_addr, 16);
+ else if (peer->su.sa.sa_family == AF_INET) {
+ stream_putl(s, 0);
+ stream_putl(s, 0);
+ stream_putl(s, 0);
+ stream_put_in_addr(s, &peer->su.sin.sin_addr);
+ } else {
+ stream_putl(s, 0);
+ stream_putl(s, 0);
+ stream_putl(s, 0);
+ stream_putl(s, 0);
+ }
+
+ /* Peer AS */
+ stream_putl(s, peer->as);
+
+ /* Peer BGP ID */
+ stream_put_in_addr(s, &peer->remote_id);
+
+ /* Timestamp */
+ if (tv) {
+ stream_putl(s, tv->tv_sec);
+ stream_putl(s, tv->tv_usec);
+ } else {
+ stream_putl(s, 0);
+ stream_putl(s, 0);
+ }
+}
+
+static void bmp_put_info_tlv(struct stream *s, uint16_t type,
+ const char *string)
+{
+ int len = strlen (string);
+ stream_putw(s, type);
+ stream_putw(s, len);
+ stream_put(s, string, len);
+}
+
+static int bmp_send_initiation(struct bmp *bmp)
+{
+ int len;
+ struct stream *s;
+ s = stream_new(BGP_MAX_PACKET_SIZE);
+ bmp_common_hdr(s, BMP_VERSION_3, BMP_TYPE_INITIATION);
+
+#define BMP_INFO_TYPE_SYSDESCR 1
+#define BMP_INFO_TYPE_SYSNAME 2
+ bmp_put_info_tlv(s, BMP_INFO_TYPE_SYSDESCR,
+ FRR_FULL_NAME " " FRR_VER_SHORT);
+ bmp_put_info_tlv(s, BMP_INFO_TYPE_SYSNAME, cmd_hostname_get());
+
+ len = stream_get_endp(s);
+ stream_putl_at(s, BMP_LENGTH_POS, len); //message length is set.
+
+ pullwr_write_stream(bmp->pullwr, s);
+ stream_free(s);
+ return 0;
+}
+
+static void bmp_notify_put(struct stream *s, struct bgp_notify *nfy)
+{
+ size_t len_pos;
+ uint8_t marker[16] = {
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ };
+
+ stream_put(s, marker, sizeof(marker));
+ len_pos = stream_get_endp(s);
+ stream_putw(s, 0);
+ stream_putc(s, BGP_MSG_NOTIFY);
+ stream_putc(s, nfy->code);
+ stream_putc(s, nfy->subcode);
+ stream_put(s, nfy->data, nfy->length);
+
+ stream_putw_at(s, len_pos, stream_get_endp(s) - len_pos
+ + sizeof(marker));
+}
+
+static struct stream *bmp_peerstate(struct peer *peer, bool down)
+{
+ struct stream *s;
+ size_t len;
+ struct timeval uptime, uptime_real;
+
+ uptime.tv_sec = peer->uptime;
+ uptime.tv_usec = 0;
+ monotime_to_realtime(&uptime, &uptime_real);
+
+#define BGP_BMP_MAX_PACKET_SIZE 1024
+ s = stream_new(BGP_MAX_PACKET_SIZE);
+
+ if (peer->status == Established && !down) {
+ struct bmp_bgp_peer *bbpeer;
+
+ bmp_common_hdr(s, BMP_VERSION_3,
+ BMP_TYPE_PEER_UP_NOTIFICATION);
+ bmp_per_peer_hdr(s, peer, 0, &uptime_real);
+
+ /* Local Address (16 bytes) */
+ if (peer->su_local->sa.sa_family == AF_INET6)
+ stream_put(s, &peer->su_local->sin6.sin6_addr, 16);
+ else if (peer->su_local->sa.sa_family == AF_INET) {
+ stream_putl(s, 0);
+ stream_putl(s, 0);
+ stream_putl(s, 0);
+ stream_put_in_addr(s, &peer->su_local->sin.sin_addr);
+ }
+
+ /* Local Port, Remote Port */
+ if (peer->su_local->sa.sa_family == AF_INET6)
+ stream_putw(s, peer->su_local->sin6.sin6_port);
+ else if (peer->su_local->sa.sa_family == AF_INET)
+ stream_putw(s, peer->su_local->sin.sin_port);
+ if (peer->su_remote->sa.sa_family == AF_INET6)
+ stream_putw(s, peer->su_remote->sin6.sin6_port);
+ else if (peer->su_remote->sa.sa_family == AF_INET)
+ stream_putw(s, peer->su_remote->sin.sin_port);
+
+ static const uint8_t dummy_open[] = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x00, 0x13, 0x01,
+ };
+
+ bbpeer = bmp_bgp_peer_find(peer->qobj_node.nid);
+
+ if (bbpeer && bbpeer->open_tx)
+ stream_put(s, bbpeer->open_tx, bbpeer->open_tx_len);
+ else {
+ stream_put(s, dummy_open, sizeof(dummy_open));
+ zlog_warn("bmp: missing TX OPEN message for peer %s\n",
+ peer->host);
+ }
+ if (bbpeer && bbpeer->open_rx)
+ stream_put(s, bbpeer->open_rx, bbpeer->open_rx_len);
+ else {
+ stream_put(s, dummy_open, sizeof(dummy_open));
+ zlog_warn("bmp: missing RX OPEN message for peer %s\n",
+ peer->host);
+ }
+
+ if (peer->desc)
+ bmp_put_info_tlv(s, 0, peer->desc);
+ } else {
+ uint8_t type;
+ size_t type_pos;
+
+ bmp_common_hdr(s, BMP_VERSION_3,
+ BMP_TYPE_PEER_DOWN_NOTIFICATION);
+ bmp_per_peer_hdr(s, peer, 0, &uptime_real);
+
+ type_pos = stream_get_endp(s);
+ stream_putc(s, 0); /* placeholder for down reason */
+
+ switch (peer->last_reset) {
+ case PEER_DOWN_NOTIFY_RECEIVED:
+ type = BMP_PEERDOWN_REMOTE_NOTIFY;
+ bmp_notify_put(s, &peer->notify);
+ break;
+ case PEER_DOWN_CLOSE_SESSION:
+ type = BMP_PEERDOWN_REMOTE_CLOSE;
+ break;
+ default:
+ type = BMP_PEERDOWN_LOCAL_NOTIFY;
+ stream_put(s, peer->last_reset_cause,
+ peer->last_reset_cause_size);
+ break;
+ }
+ stream_putc_at(s, type_pos, type);
+ }
+
+ len = stream_get_endp(s);
+ stream_putl_at(s, BMP_LENGTH_POS, len); //message length is set.
+ return s;
+}
+
+
+static int bmp_send_peerup(struct bmp *bmp)
+{
+ struct peer *peer;
+ struct listnode *node;
+ struct stream *s;
+
+ /* Walk down all peers */
+ for (ALL_LIST_ELEMENTS_RO(bmp->targets->bgp->peer, node, peer)) {
+ s = bmp_peerstate(peer, false);
+ pullwr_write_stream(bmp->pullwr, s);
+ stream_free(s);
+ }
+
+ return 0;
+}
+
+/* XXX: kludge - filling the pullwr's buffer */
+static void bmp_send_all(struct bmp_bgp *bmpbgp, struct stream *s)
+{
+ struct bmp_targets *bt;
+ struct bmp *bmp;
+
+ frr_each(bmp_targets, &bmpbgp->targets, bt)
+ frr_each(bmp_session, &bt->sessions, bmp)
+ pullwr_write_stream(bmp->pullwr, s);
+ stream_free(s);
+}
+
+/*
+ * Route Mirroring
+ */
+
+#define BMP_MIRROR_TLV_TYPE_BGP_MESSAGE 0
+#define BMP_MIRROR_TLV_TYPE_INFO 1
+
+#define BMP_MIRROR_INFO_CODE_ERRORPDU 0
+#define BMP_MIRROR_INFO_CODE_LOSTMSGS 1
+
+static struct bmp_mirrorq *bmp_pull_mirror(struct bmp *bmp)
+{
+ struct bmp_mirrorq *bmq;
+
+ bmq = bmp->mirrorpos;
+ if (!bmq)
+ return NULL;
+
+ bmp->mirrorpos = bmp_mirrorq_next(&bmp->targets->bmpbgp->mirrorq, bmq);
+
+ bmq->refcount--;
+ if (!bmq->refcount) {
+ bmp->targets->bmpbgp->mirror_qsize -= sizeof(*bmq) + bmq->len;
+ bmp_mirrorq_del(&bmp->targets->bmpbgp->mirrorq, bmq);
+ }
+ return bmq;
+}
+
+static void bmp_mirror_cull(struct bmp_bgp *bmpbgp)
+{
+ while (bmpbgp->mirror_qsize > bmpbgp->mirror_qsizelimit) {
+ struct bmp_mirrorq *bmq, *inner;
+ struct bmp_targets *bt;
+ struct bmp *bmp;
+
+ bmq = bmp_mirrorq_first(&bmpbgp->mirrorq);
+
+ frr_each(bmp_targets, &bmpbgp->targets, bt) {
+ if (!bt->mirror)
+ continue;
+ frr_each(bmp_session, &bt->sessions, bmp) {
+ if (bmp->mirrorpos != bmq)
+ continue;
+
+ while ((inner = bmp_pull_mirror(bmp))) {
+ if (!inner->refcount)
+ XFREE(MTYPE_BMP_MIRRORQ,
+ inner);
+ }
+
+ zlog_warn("bmp[%s] lost mirror messages due to buffer size limit",
+ bmp->remote);
+ bmp->mirror_lost = true;
+ pullwr_bump(bmp->pullwr);
+ }
+ }
+ }
+}
+
+static int bmp_mirror_packet(struct peer *peer, uint8_t type, bgp_size_t size,
+ struct stream *packet)
+{
+ struct bmp_bgp *bmpbgp = bmp_bgp_find(peer->bgp);
+ struct timeval tv;
+ struct bmp_mirrorq *qitem;
+ struct bmp_targets *bt;
+ struct bmp *bmp;
+
+ gettimeofday(&tv, NULL);
+
+ if (type == BGP_MSG_OPEN) {
+ struct bmp_bgp_peer *bbpeer = bmp_bgp_peer_get(peer);
+
+ XFREE(MTYPE_BMP_OPEN, bbpeer->open_rx);
+
+ bbpeer->open_rx_len = size;
+ bbpeer->open_rx = XMALLOC(MTYPE_BMP_OPEN, size);
+ memcpy(bbpeer->open_rx, packet->data, size);
+ }
+
+ if (!bmpbgp)
+ return 0;
+
+ qitem = XCALLOC(MTYPE_BMP_MIRRORQ, sizeof(*qitem) + size);
+ qitem->peerid = peer->qobj_node.nid;
+ qitem->tv = tv;
+ qitem->len = size;
+ memcpy(qitem->data, packet->data, size);
+
+ frr_each(bmp_targets, &bmpbgp->targets, bt) {
+ if (!bt->mirror)
+ continue;
+ frr_each(bmp_session, &bt->sessions, bmp) {
+ qitem->refcount++;
+ if (!bmp->mirrorpos)
+ bmp->mirrorpos = qitem;
+ pullwr_bump(bmp->pullwr);
+ }
+ }
+ if (qitem->refcount == 0)
+ XFREE(MTYPE_BMP_MIRRORQ, qitem);
+ else {
+ bmpbgp->mirror_qsize += sizeof(*qitem) + size;
+ bmp_mirrorq_add_tail(&bmpbgp->mirrorq, qitem);
+
+ bmp_mirror_cull(bmpbgp);
+
+ bmpbgp->mirror_qsizemax = MAX(bmpbgp->mirror_qsizemax,
+ bmpbgp->mirror_qsize);
+ }
+ return 0;
+}
+
+static void bmp_wrmirror_lost(struct bmp *bmp, struct pullwr *pullwr)
+{
+ struct stream *s;
+ struct timeval tv;
+
+ gettimeofday(&tv, NULL);
+
+ s = stream_new(BGP_MAX_PACKET_SIZE);
+
+ bmp_common_hdr(s, BMP_VERSION_3, BMP_TYPE_ROUTE_MIRRORING);
+ bmp_per_peer_hdr(s, bmp->targets->bgp->peer_self, 0, &tv);
+
+ stream_putw(s, BMP_MIRROR_TLV_TYPE_INFO);
+ stream_putw(s, 2);
+ stream_putw(s, BMP_MIRROR_INFO_CODE_LOSTMSGS);
+ stream_putl_at(s, BMP_LENGTH_POS, stream_get_endp(s));
+
+ bmp->cnt_mirror_overruns++;
+ pullwr_write_stream(bmp->pullwr, s);
+ stream_free(s);
+}
+
+static bool bmp_wrmirror(struct bmp *bmp, struct pullwr *pullwr)
+{
+ struct bmp_mirrorq *bmq;
+ struct peer *peer;
+ bool written = false;
+
+ if (bmp->mirror_lost) {
+ bmp_wrmirror_lost(bmp, pullwr);
+ bmp->mirror_lost = false;
+ return true;
+ }
+
+ bmq = bmp_pull_mirror(bmp);
+ if (!bmq)
+ return false;
+
+ peer = QOBJ_GET_TYPESAFE(bmq->peerid, peer);
+ if (!peer) {
+ zlog_info("bmp: skipping mirror message for deleted peer");
+ goto out;
+ }
+
+ struct stream *s;
+ s = stream_new(BGP_MAX_PACKET_SIZE);
+
+ bmp_common_hdr(s, BMP_VERSION_3, BMP_TYPE_ROUTE_MIRRORING);
+ bmp_per_peer_hdr(s, peer, 0, &bmq->tv);
+
+ /* BMP Mirror TLV. */
+ stream_putw(s, BMP_MIRROR_TLV_TYPE_BGP_MESSAGE);
+ stream_putw(s, bmq->len);
+ stream_putl_at(s, BMP_LENGTH_POS, stream_get_endp(s) + bmq->len);
+
+ bmp->cnt_mirror++;
+ pullwr_write_stream(bmp->pullwr, s);
+ pullwr_write(bmp->pullwr, bmq->data, bmq->len);
+
+ stream_free(s);
+ written = true;
+
+out:
+ if (!bmq->refcount)
+ XFREE(MTYPE_BMP_MIRRORQ, bmq);
+ return written;
+}
+
+static int bmp_outgoing_packet(struct peer *peer, uint8_t type, bgp_size_t size,
+ struct stream *packet)
+{
+ if (type == BGP_MSG_OPEN) {
+ struct bmp_bgp_peer *bbpeer = bmp_bgp_peer_get(peer);
+
+ XFREE(MTYPE_BMP_OPEN, bbpeer->open_tx);
+
+ bbpeer->open_tx_len = size;
+ bbpeer->open_tx = XMALLOC(MTYPE_BMP_OPEN, size);
+ memcpy(bbpeer->open_tx, packet->data, size);
+ }
+ return 0;
+}
+
+static int bmp_peer_established(struct peer *peer)
+{
+ struct bmp_bgp *bmpbgp = bmp_bgp_find(peer->bgp);
+
+ if (!bmpbgp)
+ return 0;
+
+ /* Check if this peer just went to Established */
+ if ((peer->last_major_event != OpenConfirm) ||
+ !(peer_established(peer)))
+ return 0;
+
+ if (peer->doppelganger && (peer->doppelganger->status != Deleted)) {
+ struct bmp_bgp_peer *bbpeer, *bbdopp;
+
+ bbpeer = bmp_bgp_peer_get(peer);
+ bbdopp = bmp_bgp_peer_find(peer->doppelganger->qobj_node.nid);
+ if (bbdopp) {
+ XFREE(MTYPE_BMP_OPEN, bbpeer->open_tx);
+ XFREE(MTYPE_BMP_OPEN, bbpeer->open_rx);
+
+ bbpeer->open_tx = bbdopp->open_tx;
+ bbpeer->open_tx_len = bbdopp->open_tx_len;
+ bbpeer->open_rx = bbdopp->open_rx;
+ bbpeer->open_rx_len = bbdopp->open_rx_len;
+
+ bmp_peerh_del(&bmp_peerh, bbdopp);
+ XFREE(MTYPE_BMP_PEER, bbdopp);
+ }
+ }
+
+ bmp_send_all(bmpbgp, bmp_peerstate(peer, false));
+ return 0;
+}
+
+static int bmp_peer_backward(struct peer *peer)
+{
+ struct bmp_bgp *bmpbgp = bmp_bgp_find(peer->bgp);
+ struct bmp_bgp_peer *bbpeer;
+
+ if (!bmpbgp)
+ return 0;
+
+ bbpeer = bmp_bgp_peer_find(peer->qobj_node.nid);
+ if (bbpeer) {
+ XFREE(MTYPE_BMP_OPEN, bbpeer->open_tx);
+ bbpeer->open_tx_len = 0;
+ XFREE(MTYPE_BMP_OPEN, bbpeer->open_rx);
+ bbpeer->open_rx_len = 0;
+ }
+
+ bmp_send_all(bmpbgp, bmp_peerstate(peer, true));
+ return 0;
+}
+
+static void bmp_eor(struct bmp *bmp, afi_t afi, safi_t safi, uint8_t flags)
+{
+ struct peer *peer;
+ struct listnode *node;
+ struct stream *s, *s2;
+ iana_afi_t pkt_afi;
+ iana_safi_t pkt_safi;
+
+ s = stream_new(BGP_MAX_PACKET_SIZE);
+
+ /* Make BGP update packet. */
+ bgp_packet_set_marker(s, BGP_MSG_UPDATE);
+
+ /* Unfeasible Routes Length */
+ stream_putw(s, 0);
+
+ if (afi == AFI_IP && safi == SAFI_UNICAST) {
+ /* Total Path Attribute Length */
+ stream_putw(s, 0);
+ } else {
+ /* Convert AFI, SAFI to values for packet. */
+ bgp_map_afi_safi_int2iana(afi, safi, &pkt_afi, &pkt_safi);
+
+ /* Total Path Attribute Length */
+ stream_putw(s, 6);
+ stream_putc(s, BGP_ATTR_FLAG_OPTIONAL);
+ stream_putc(s, BGP_ATTR_MP_UNREACH_NLRI);
+ stream_putc(s, 3);
+ stream_putw(s, pkt_afi);
+ stream_putc(s, pkt_safi);
+ }
+
+ bgp_packet_set_size(s);
+
+ for (ALL_LIST_ELEMENTS_RO(bmp->targets->bgp->peer, node, peer)) {
+ if (!peer->afc_nego[afi][safi])
+ continue;
+
+ s2 = stream_new(BGP_MAX_PACKET_SIZE);
+
+ bmp_common_hdr(s2, BMP_VERSION_3,
+ BMP_TYPE_ROUTE_MONITORING);
+ bmp_per_peer_hdr(s2, peer, flags, NULL);
+
+ stream_putl_at(s2, BMP_LENGTH_POS,
+ stream_get_endp(s) + stream_get_endp(s2));
+
+ bmp->cnt_update++;
+ pullwr_write_stream(bmp->pullwr, s2);
+ pullwr_write_stream(bmp->pullwr, s);
+ stream_free(s2);
+ }
+ stream_free(s);
+}
+
+static struct stream *bmp_update(struct prefix *p, struct peer *peer,
+ struct attr *attr, afi_t afi, safi_t safi)
+{
+ struct bpacket_attr_vec_arr vecarr;
+ struct stream *s;
+ size_t attrlen_pos = 0, mpattrlen_pos = 0;
+ bgp_size_t total_attr_len = 0;
+
+ bpacket_attr_vec_arr_reset(&vecarr);
+
+ s = stream_new(BGP_MAX_PACKET_SIZE);
+ bgp_packet_set_marker(s, BGP_MSG_UPDATE);
+
+ /* 2: withdrawn routes length */
+ stream_putw(s, 0);
+
+ /* 3: total attributes length - attrlen_pos stores the position */
+ attrlen_pos = stream_get_endp(s);
+ stream_putw(s, 0);
+
+ /* 5: Encode all the attributes, except MP_REACH_NLRI attr. */
+ total_attr_len = bgp_packet_attribute(NULL, peer, s, attr,
+ &vecarr, NULL, afi, safi, peer, NULL, NULL, 0, 0, 0);
+
+ /* space check? */
+
+ /* peer_cap_enhe & add-path removed */
+ if (afi == AFI_IP && safi == SAFI_UNICAST)
+ stream_put_prefix(s, p);
+ else {
+ size_t p1 = stream_get_endp(s);
+
+ /* MPLS removed for now */
+
+ mpattrlen_pos = bgp_packet_mpattr_start(s, peer, afi, safi,
+ &vecarr, attr);
+ bgp_packet_mpattr_prefix(s, afi, safi, p, NULL, NULL, 0,
+ 0, 0, attr);
+ bgp_packet_mpattr_end(s, mpattrlen_pos);
+ total_attr_len += stream_get_endp(s) - p1;
+ }
+
+ /* set the total attribute length correctly */
+ stream_putw_at(s, attrlen_pos, total_attr_len);
+ bgp_packet_set_size(s);
+ return s;
+}
+
+static struct stream *bmp_withdraw(struct prefix *p, afi_t afi, safi_t safi)
+{
+ struct stream *s;
+ size_t attrlen_pos = 0, mp_start, mplen_pos;
+ bgp_size_t total_attr_len = 0;
+ bgp_size_t unfeasible_len;
+
+ s = stream_new(BGP_MAX_PACKET_SIZE);
+
+ bgp_packet_set_marker(s, BGP_MSG_UPDATE);
+ stream_putw(s, 0);
+
+ if (afi == AFI_IP && safi == SAFI_UNICAST) {
+ stream_put_prefix(s, p);
+ unfeasible_len = stream_get_endp(s) - BGP_HEADER_SIZE
+ - BGP_UNFEASIBLE_LEN;
+ stream_putw_at(s, BGP_HEADER_SIZE, unfeasible_len);
+ stream_putw(s, 0);
+ } else {
+ attrlen_pos = stream_get_endp(s);
+ /* total attr length = 0 for now. reevaluate later */
+ stream_putw(s, 0);
+ mp_start = stream_get_endp(s);
+ mplen_pos = bgp_packet_mpunreach_start(s, afi, safi);
+
+ bgp_packet_mpunreach_prefix(s, p, afi, safi, NULL, NULL, 0,
+ 0, 0, NULL);
+ /* Set the mp_unreach attr's length */
+ bgp_packet_mpunreach_end(s, mplen_pos);
+
+ /* Set total path attribute length. */
+ total_attr_len = stream_get_endp(s) - mp_start;
+ stream_putw_at(s, attrlen_pos, total_attr_len);
+ }
+
+ bgp_packet_set_size(s);
+ return s;
+}
+
+static void bmp_monitor(struct bmp *bmp, struct peer *peer, uint8_t flags,
+ struct prefix *p, struct attr *attr, afi_t afi,
+ safi_t safi, time_t uptime)
+{
+ struct stream *hdr, *msg;
+ struct timeval tv = { .tv_sec = uptime, .tv_usec = 0 };
+
+ if (attr)
+ msg = bmp_update(p, peer, attr, afi, safi);
+ else
+ msg = bmp_withdraw(p, afi, safi);
+
+ hdr = stream_new(BGP_MAX_PACKET_SIZE);
+ bmp_common_hdr(hdr, BMP_VERSION_3, BMP_TYPE_ROUTE_MONITORING);
+ bmp_per_peer_hdr(hdr, peer, flags, &tv);
+
+ stream_putl_at(hdr, BMP_LENGTH_POS,
+ stream_get_endp(hdr) + stream_get_endp(msg));
+
+ bmp->cnt_update++;
+ pullwr_write_stream(bmp->pullwr, hdr);
+ pullwr_write_stream(bmp->pullwr, msg);
+ stream_free(hdr);
+ stream_free(msg);
+}
+
+static bool bmp_wrsync(struct bmp *bmp, struct pullwr *pullwr)
+{
+ afi_t afi;
+ safi_t safi;
+
+ if (bmp->syncafi == AFI_MAX) {
+ FOREACH_AFI_SAFI (afi, safi) {
+ if (bmp->afistate[afi][safi] != BMP_AFI_NEEDSYNC)
+ continue;
+
+ bmp->afistate[afi][safi] = BMP_AFI_SYNC;
+
+ bmp->syncafi = afi;
+ bmp->syncsafi = safi;
+ bmp->syncpeerid = 0;
+ memset(&bmp->syncpos, 0, sizeof(bmp->syncpos));
+ bmp->syncpos.family = afi2family(afi);
+ zlog_info("bmp[%s] %s %s sending table",
+ bmp->remote,
+ afi2str(bmp->syncafi),
+ safi2str(bmp->syncsafi));
+ /* break does not work here, 2 loops... */
+ goto afibreak;
+ }
+ if (bmp->syncafi == AFI_MAX)
+ return false;
+ }
+
+afibreak:
+ afi = bmp->syncafi;
+ safi = bmp->syncsafi;
+
+ if (!bmp->targets->afimon[afi][safi]) {
+ /* shouldn't happen */
+ bmp->afistate[afi][safi] = BMP_AFI_INACTIVE;
+ bmp->syncafi = AFI_MAX;
+ bmp->syncsafi = SAFI_MAX;
+ return true;
+ }
+
+ struct bgp_table *table = bmp->targets->bgp->rib[afi][safi];
+ struct bgp_node *bn;
+ struct bgp_path_info *bpi = NULL, *bpiter;
+ struct bgp_adj_in *adjin = NULL, *adjiter;
+
+ bn = bgp_node_lookup(table, &bmp->syncpos);
+ do {
+ if (!bn) {
+ bn = bgp_table_get_next(table, &bmp->syncpos);
+ if (!bn) {
+ zlog_info("bmp[%s] %s %s table completed (EoR)",
+ bmp->remote, afi2str(afi),
+ safi2str(safi));
+ bmp_eor(bmp, afi, safi, BMP_PEER_FLAG_L);
+ bmp_eor(bmp, afi, safi, 0);
+
+ bmp->afistate[afi][safi] = BMP_AFI_LIVE;
+ bmp->syncafi = AFI_MAX;
+ bmp->syncsafi = SAFI_MAX;
+ return true;
+ }
+ bmp->syncpeerid = 0;
+ prefix_copy(&bmp->syncpos, &bn->p);
+ }
+
+ if (bmp->targets->afimon[afi][safi] & BMP_MON_POSTPOLICY) {
+ for (bpiter = bn->info; bpiter; bpiter = bpiter->next) {
+ if (!CHECK_FLAG(bpiter->flags, BGP_PATH_VALID))
+ continue;
+ if (bpiter->peer->qobj_node.nid
+ <= bmp->syncpeerid)
+ continue;
+ if (bpi && bpiter->peer->qobj_node.nid
+ > bpi->peer->qobj_node.nid)
+ continue;
+ bpi = bpiter;
+ }
+ }
+ if (bmp->targets->afimon[afi][safi] & BMP_MON_PREPOLICY) {
+ for (adjiter = bn->adj_in; adjiter;
+ adjiter = adjiter->next) {
+ if (adjiter->peer->qobj_node.nid
+ <= bmp->syncpeerid)
+ continue;
+ if (adjin && adjiter->peer->qobj_node.nid
+ > adjin->peer->qobj_node.nid)
+ continue;
+ adjin = adjiter;
+ }
+ }
+ if (bpi || adjin)
+ break;
+
+ bn = NULL;
+ } while (1);
+
+ if (adjin && bpi
+ && adjin->peer->qobj_node.nid < bpi->peer->qobj_node.nid) {
+ bpi = NULL;
+ bmp->syncpeerid = adjin->peer->qobj_node.nid;
+ } else if (adjin && bpi
+ && adjin->peer->qobj_node.nid > bpi->peer->qobj_node.nid) {
+ adjin = NULL;
+ bmp->syncpeerid = bpi->peer->qobj_node.nid;
+ } else if (bpi) {
+ bmp->syncpeerid = bpi->peer->qobj_node.nid;
+ } else if (adjin) {
+ bmp->syncpeerid = adjin->peer->qobj_node.nid;
+ }
+
+ if (bpi)
+ bmp_monitor(bmp, bpi->peer, BMP_PEER_FLAG_L, &bn->p, bpi->attr,
+ afi, safi, bpi->uptime);
+ if (adjin)
+ bmp_monitor(bmp, adjin->peer, 0, &bn->p, adjin->attr,
+ afi, safi, adjin->uptime);
+
+ return true;
+}
+
+static struct bmp_queue_entry *bmp_pull(struct bmp *bmp)
+{
+ struct bmp_queue_entry *bqe;
+
+ bqe = bmp->queuepos;
+ if (!bqe)
+ return NULL;
+
+ bmp->queuepos = bmp_qlist_next(&bmp->targets->updlist, bqe);
+
+ bqe->refcount--;
+ if (!bqe->refcount) {
+ bmp_qhash_del(&bmp->targets->updhash, bqe);
+ bmp_qlist_del(&bmp->targets->updlist, bqe);
+ }
+ return bqe;
+}
+
+static bool bmp_wrqueue(struct bmp *bmp, struct pullwr *pullwr)
+{
+ struct bmp_queue_entry *bqe;
+ struct peer *peer;
+ struct bgp_node *bn;
+ bool written = false;
+
+ bqe = bmp_pull(bmp);
+ if (!bqe)
+ return false;
+
+ afi_t afi = bqe->afi;
+ safi_t safi = bqe->safi;
+
+ switch (bmp->afistate[afi][safi]) {
+ case BMP_AFI_INACTIVE:
+ case BMP_AFI_NEEDSYNC:
+ goto out;
+ case BMP_AFI_SYNC:
+ if (prefix_cmp(&bqe->p, &bmp->syncpos) <= 0)
+ /* currently syncing but have already passed this
+ * prefix => send it. */
+ break;
+
+ /* currently syncing & haven't reached this prefix yet
+ * => it'll be sent as part of the table sync, no need here */
+ goto out;
+ case BMP_AFI_LIVE:
+ break;
+ }
+
+ peer = QOBJ_GET_TYPESAFE(bqe->peerid, peer);
+ if (!peer) {
+ zlog_info("bmp: skipping queued item for deleted peer");
+ goto out;
+ }
+ if (peer->status != Established)
+ goto out;
+
+ bn = bgp_node_lookup(bmp->targets->bgp->rib[afi][safi], &bqe->p);
+
+ if (bmp->targets->afimon[afi][safi] & BMP_MON_POSTPOLICY) {
+ struct bgp_path_info *bpi;
+
+ for (bpi = bn ? bn->info : NULL; bpi; bpi = bpi->next) {
+ if (!CHECK_FLAG(bpi->flags, BGP_PATH_VALID))
+ continue;
+ if (bpi->peer == peer)
+ break;
+ }
+
+ bmp_monitor(bmp, peer, BMP_PEER_FLAG_L, &bqe->p,
+ bpi ? bpi->attr : NULL, afi, safi,
+ bpi ? bpi->uptime : monotime(NULL));
+ written = true;
+ }
+
+ if (bmp->targets->afimon[afi][safi] & BMP_MON_PREPOLICY) {
+ struct bgp_adj_in *adjin;
+
+ for (adjin = bn ? bn->adj_in : NULL; adjin;
+ adjin = adjin->next) {
+ if (adjin->peer == peer)
+ break;
+ }
+ bmp_monitor(bmp, peer, BMP_PEER_FLAG_L, &bqe->p,
+ adjin ? adjin->attr : NULL, afi, safi,
+ adjin ? adjin->uptime : monotime(NULL));
+ written = true;
+ }
+
+out:
+ if (!bqe->refcount)
+ XFREE(MTYPE_BMP_QUEUE, bqe);
+ return written;
+}
+
+static void bmp_wrfill(struct bmp *bmp, struct pullwr *pullwr)
+{
+ switch(bmp->state) {
+ case BMP_PeerUp:
+ bmp_send_peerup(bmp);
+ bmp->state = BMP_Run;
+ break;
+
+ case BMP_Run:
+ if (bmp_wrmirror(bmp, pullwr))
+ break;
+ if (bmp_wrqueue(bmp, pullwr))
+ break;
+ if (bmp_wrsync(bmp, pullwr))
+ break;
+ break;
+ }
+}
+
+static void bmp_wrerr(struct bmp *bmp, struct pullwr *pullwr, bool eof)
+{
+ if (eof)
+ zlog_info("bmp[%s] disconnected", bmp->remote);
+ else
+ flog_warn(EC_LIB_SYSTEM_CALL, "bmp[%s] connection error: %s",
+ bmp->remote, strerror(errno));
+
+ bmp_close(bmp);
+ bmp_free(bmp);
+}
+
+static void bmp_process_one(struct bmp_targets *bt, struct bgp *bgp,
+ afi_t afi, safi_t safi, struct bgp_node *bn, struct peer *peer)
+{
+ struct bmp *bmp;
+ struct bmp_queue_entry *bqe, bqeref;
+ size_t refcount;
+ char buf[256];
+
+ prefix2str(&bn->p, buf, sizeof(buf));
+
+ refcount = bmp_session_count(&bt->sessions);
+ if (refcount == 0)
+ return;
+
+ memset(&bqeref, 0, sizeof(bqeref));
+ prefix_copy(&bqeref.p, &bn->p);
+ bqeref.peerid = peer->qobj_node.nid;
+ bqeref.afi = afi;
+ bqeref.safi = safi;
+
+ bqe = bmp_qhash_find(&bt->updhash, &bqeref);
+ if (bqe) {
+ if (bqe->refcount >= refcount)
+ /* nothing to do here */
+ return;
+
+ bmp_qlist_del(&bt->updlist, bqe);
+ } else {
+ bqe = XMALLOC(MTYPE_BMP_QUEUE, sizeof(*bqe));
+ memcpy(bqe, &bqeref, sizeof(*bqe));
+
+ bmp_qhash_add(&bt->updhash, bqe);
+ }
+
+ bqe->refcount = refcount;
+ bmp_qlist_add_tail(&bt->updlist, bqe);
+
+ frr_each (bmp_session, &bt->sessions, bmp)
+ if (!bmp->queuepos)
+ bmp->queuepos = bqe;
+}
+
+static int bmp_process(struct bgp *bgp, afi_t afi, safi_t safi,
+ struct bgp_node *bn, struct peer *peer, bool withdraw)
+{
+ struct bmp_bgp *bmpbgp = bmp_bgp_find(peer->bgp);
+ struct bmp_targets *bt;
+ struct bmp *bmp;
+
+ if (!bmpbgp)
+ return 0;
+
+ frr_each(bmp_targets, &bmpbgp->targets, bt) {
+ if (!bt->afimon[afi][safi])
+ continue;
+
+ bmp_process_one(bt, bgp, afi, safi, bn, peer);
+
+ frr_each(bmp_session, &bt->sessions, bmp) {
+ pullwr_bump(bmp->pullwr);
+ }
+ }
+ return 0;
+}
+
+static void bmp_stat_put_u32(struct stream *s, size_t *cnt, uint16_t type,
+ uint32_t value)
+{
+ stream_putw(s, type);
+ stream_putw(s, 4);
+ stream_putl(s, value);
+ (*cnt)++;
+}
+
+static int bmp_stats(struct thread *thread)
+{
+ struct bmp_targets *bt = THREAD_ARG(thread);
+ struct stream *s;
+ struct peer *peer;
+ struct listnode *node;
+ struct timeval tv;
+
+ if (bt->stat_msec)
+ thread_add_timer_msec(bm->master, bmp_stats, bt, bt->stat_msec,
+ &bt->t_stats);
+
+ gettimeofday(&tv, NULL);
+
+ /* Walk down all peers */
+ for (ALL_LIST_ELEMENTS_RO(bt->bgp->peer, node, peer)) {
+ size_t count = 0, count_pos, len;
+
+ if (peer->status != Established)
+ continue;
+
+ s = stream_new(BGP_MAX_PACKET_SIZE);
+ bmp_common_hdr(s, BMP_VERSION_3, BMP_TYPE_STATISTICS_REPORT);
+ bmp_per_peer_hdr(s, peer, 0, &tv);
+
+ count_pos = stream_get_endp(s);
+ stream_putl(s, 0);
+
+ bmp_stat_put_u32(s, &count, BMP_STATS_PFX_REJECTED,
+ peer->stat_pfx_filter);
+ bmp_stat_put_u32(s, &count, BMP_STATS_UPD_LOOP_ASPATH,
+ peer->stat_pfx_aspath_loop);
+ bmp_stat_put_u32(s, &count, BMP_STATS_UPD_LOOP_ORIGINATOR,
+ peer->stat_pfx_originator_loop);
+ bmp_stat_put_u32(s, &count, BMP_STATS_UPD_LOOP_CLUSTER,
+ peer->stat_pfx_cluster_loop);
+ bmp_stat_put_u32(s, &count, BMP_STATS_PFX_DUP_WITHDRAW,
+ peer->stat_pfx_dup_withdraw);
+ bmp_stat_put_u32(s, &count, BMP_STATS_UPD_7606_WITHDRAW,
+ peer->stat_upd_7606);
+ bmp_stat_put_u32(s, &count, BMP_STATS_FRR_NH_INVALID,
+ peer->stat_pfx_nh_invalid);
+
+ stream_putl_at(s, count_pos, count);
+
+ len = stream_get_endp(s);
+ stream_putl_at(s, BMP_LENGTH_POS, len);
+
+ bmp_send_all(bt->bmpbgp, s);
+ }
+ return 0;
+}
+
+static struct bmp *bmp_open(struct bmp_targets *bt, int bmp_sock)
+{
+ union sockunion su, *sumem;
+ struct prefix p;
+ int on = 1;
+ struct access_list *acl = NULL;
+ enum filter_type ret;
+ char buf[SU_ADDRSTRLEN];
+ struct bmp *bmp;
+
+ sumem = sockunion_getpeername(bmp_sock);
+ if (!sumem) {
+ close(bmp_sock);
+ return NULL;
+ }
+ memcpy(&su, sumem, sizeof(su));
+ sockunion_free(sumem);
+
+ set_nonblocking(bmp_sock);
+ set_cloexec(bmp_sock);
+ shutdown(bmp_sock, SHUT_RD);
+
+ sockunion2hostprefix(&su, &p);
+
+ acl = NULL;
+ switch (p.family) {
+ case AF_INET:
+ acl = access_list_lookup(AFI_IP, bt->acl_name);
+ break;
+ case AF_INET6:
+ acl = access_list_lookup(AFI_IP6, bt->acl6_name);
+ break;
+ default:
+ break;
+ }
+
+ ret = FILTER_PERMIT;
+ if (acl) {
+ ret = access_list_apply(acl, &p);
+ }
+
+ sockunion2str(&su, buf, SU_ADDRSTRLEN);
+ snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), ":%u",
+ su.sa.sa_family == AF_INET
+ ? ntohs(su.sin.sin_port)
+ : ntohs(su.sin6.sin6_port));
+
+ if (ret == FILTER_DENY) {
+ bt->cnt_aclrefused++;
+ zlog_info("bmp[%s] connection refused by access-list", buf);
+ close(bmp_sock);
+ return NULL;
+ }
+ bt->cnt_accept++;
+
+ setsockopt(bmp_sock, SOL_SOCKET, SO_KEEPALIVE, &on, sizeof(on));
+ setsockopt(bmp_sock, IPPROTO_TCP, TCP_NODELAY, &on, sizeof(on));
+
+ zlog_info("bmp[%s] connection established", buf);
+
+ /* Allocate new BMP structure and set up default values. */
+ bmp = bmp_new(bt, bmp_sock);
+ strlcpy(bmp->remote, buf, sizeof(bmp->remote));
+
+ bmp->state = BMP_PeerUp;
+ bmp->pullwr = pullwr_new(bm->master, bmp_sock, bmp, bmp_wrfill,
+ bmp_wrerr);
+ bmp_send_initiation(bmp);
+
+ return bmp;
+}
+
+/* Accept BMP connection. */
+static int bmp_accept(struct thread *thread)
+{
+ union sockunion su;
+ struct bmp_listener *bl = THREAD_ARG(thread);
+ int bmp_sock;
+
+ /* We continue hearing BMP socket. */
+ thread_add_read(bm->master, bmp_accept, bl, bl->sock, &bl->t_accept);
+
+ memset(&su, 0, sizeof(union sockunion));
+
+ /* We can handle IPv4 or IPv6 socket. */
+ bmp_sock = sockunion_accept(bl->sock, &su);
+ if (bmp_sock < 0) {
+ zlog_info("bmp: accept_sock failed: %s\n",
+ safe_strerror (errno));
+ return -1;
+ }
+ bmp_open(bl->targets, bmp_sock);
+ return 0;
+}
+
+static void bmp_close(struct bmp *bmp)
+{
+ struct bmp_queue_entry *bqe;
+ struct bmp_mirrorq *bmq;
+
+ if (bmp->active)
+ bmp_active_disconnected(bmp->active);
+
+ while ((bmq = bmp_pull_mirror(bmp)))
+ if (!bmq->refcount)
+ XFREE(MTYPE_BMP_MIRRORQ, bmq);
+ while ((bqe = bmp_pull(bmp)))
+ if (!bqe->refcount)
+ XFREE(MTYPE_BMP_QUEUE, bqe);
+
+ THREAD_OFF(bmp->t_read);
+ pullwr_del(bmp->pullwr);
+ close(bmp->socket);
+}
+
+static struct bmp_bgp *bmp_bgp_find(struct bgp *bgp)
+{
+ struct bmp_bgp dummy = { .bgp = bgp };
+ return bmp_bgph_find(&bmp_bgph, &dummy);
+}
+
+static struct bmp_bgp *bmp_bgp_get(struct bgp *bgp)
+{
+ struct bmp_bgp *bmpbgp;
+
+ bmpbgp = bmp_bgp_find(bgp);
+ if (bmpbgp)
+ return bmpbgp;
+
+ bmpbgp = XCALLOC(MTYPE_BMP, sizeof(*bmpbgp));
+ bmpbgp->bgp = bgp;
+ bmpbgp->mirror_qsizelimit = ~0UL;
+ bmp_mirrorq_init(&bmpbgp->mirrorq);
+ bmp_bgph_add(&bmp_bgph, bmpbgp);
+
+ return bmpbgp;
+}
+
+static void bmp_bgp_put(struct bmp_bgp *bmpbgp)
+{
+ struct bmp_targets *bt;
+
+ bmp_bgph_del(&bmp_bgph, bmpbgp);
+
+ frr_each_safe(bmp_targets, &bmpbgp->targets, bt)
+ bmp_targets_put(bt);
+
+ bmp_mirrorq_fini(&bmpbgp->mirrorq);
+ XFREE(MTYPE_BMP, bmpbgp);
+}
+
+static int bmp_bgp_del(struct bgp *bgp)
+{
+ struct bmp_bgp *bmpbgp = bmp_bgp_find(bgp);
+
+ if (bmpbgp)
+ bmp_bgp_put(bmpbgp);
+ return 0;
+}
+
+static struct bmp_bgp_peer *bmp_bgp_peer_find(uint64_t peerid)
+{
+ struct bmp_bgp_peer dummy = { .peerid = peerid };
+ return bmp_peerh_find(&bmp_peerh, &dummy);
+}
+
+static struct bmp_bgp_peer *bmp_bgp_peer_get(struct peer *peer)
+{
+ struct bmp_bgp_peer *bbpeer;
+
+ bbpeer = bmp_bgp_peer_find(peer->qobj_node.nid);
+ if (bbpeer)
+ return bbpeer;
+
+ bbpeer = XCALLOC(MTYPE_BMP_PEER, sizeof(*bbpeer));
+ bbpeer->peerid = peer->qobj_node.nid;
+ bmp_peerh_add(&bmp_peerh, bbpeer);
+
+ return bbpeer;
+}
+
+static struct bmp_targets *bmp_targets_find1(struct bgp *bgp, const char *name)
+{
+ struct bmp_bgp *bmpbgp = bmp_bgp_find(bgp);
+ struct bmp_targets dummy;
+
+ if (!bmpbgp)
+ return NULL;
+ dummy.name = (char *)name;
+ return bmp_targets_find(&bmpbgp->targets, &dummy);
+}
+
+static struct bmp_targets *bmp_targets_get(struct bgp *bgp, const char *name)
+{
+ struct bmp_targets *bt;
+
+ bt = bmp_targets_find1(bgp, name);
+ if (bt)
+ return bt;
+
+ bt = XCALLOC(MTYPE_BMP_TARGETS, sizeof(*bt));
+ bt->name = XSTRDUP(MTYPE_BMP_TARGETSNAME, name);
+ bt->bgp = bgp;
+ bt->bmpbgp = bmp_bgp_get(bgp);
+ bmp_session_init(&bt->sessions);
+ bmp_qhash_init(&bt->updhash);
+ bmp_qlist_init(&bt->updlist);
+ bmp_actives_init(&bt->actives);
+ bmp_listeners_init(&bt->listeners);
+
+ QOBJ_REG(bt, bmp_targets);
+ bmp_targets_add(&bt->bmpbgp->targets, bt);
+ return bt;
+}
+
+static void bmp_targets_put(struct bmp_targets *bt)
+{
+ struct bmp *bmp;
+ struct bmp_active *ba;
+
+ frr_each_safe (bmp_actives, &bt->actives, ba)
+ bmp_active_put(ba);
+
+ frr_each_safe(bmp_session, &bt->sessions, bmp) {
+ bmp_close(bmp);
+ bmp_free(bmp);
+ }
+
+ bmp_targets_del(&bt->bmpbgp->targets, bt);
+ QOBJ_UNREG(bt);
+
+ bmp_listeners_fini(&bt->listeners);
+ bmp_actives_fini(&bt->actives);
+ bmp_qhash_fini(&bt->updhash);
+ bmp_qlist_fini(&bt->updlist);
+
+ XFREE(MTYPE_BMP_ACLNAME, bt->acl_name);
+ XFREE(MTYPE_BMP_ACLNAME, bt->acl6_name);
+ bmp_session_fini(&bt->sessions);
+
+ XFREE(MTYPE_BMP_TARGETSNAME, bt->name);
+ XFREE(MTYPE_BMP_TARGETS, bt);
+}
+
+static struct bmp_listener *bmp_listener_find(struct bmp_targets *bt,
+ const union sockunion *su,
+ int port)
+{
+ struct bmp_listener dummy;
+ dummy.addr = *su;
+ dummy.port = port;
+ return bmp_listeners_find(&bt->listeners, &dummy);
+}
+
+static struct bmp_listener *bmp_listener_get(struct bmp_targets *bt,
+ const union sockunion *su,
+ int port)
+{
+ struct bmp_listener *bl = bmp_listener_find(bt, su, port);
+
+ if (bl)
+ return bl;
+
+ bl = XCALLOC(MTYPE_BMP_LISTENER, sizeof(*bl));
+ bl->targets = bt;
+ bl->addr = *su;
+ bl->port = port;
+ bl->sock = -1;
+
+ bmp_listeners_add(&bt->listeners, bl);
+ return bl;
+}
+
+static void bmp_listener_put(struct bmp_listener *bl)
+{
+ bmp_listeners_del(&bl->targets->listeners, bl);
+ XFREE(MTYPE_BMP_LISTENER, bl);
+}
+
+static void bmp_listener_start(struct bmp_listener *bl)
+{
+ int sock, ret;
+
+ sock = socket(bl->addr.sa.sa_family, SOCK_STREAM, 0);
+ if (sock < 0)
+ return;
+
+ sockopt_reuseaddr(sock);
+ sockopt_reuseport(sock);
+ sockopt_v6only(bl->addr.sa.sa_family, sock);
+ set_cloexec(sock);
+
+ ret = sockunion_bind(sock, &bl->addr, bl->port, &bl->addr);
+ if (ret < 0)
+ goto out_sock;
+
+ ret = listen(sock, 3);
+ if (ret < 0)
+ goto out_sock;
+
+ bl->sock = sock;
+ thread_add_read(bm->master, bmp_accept, bl, sock, &bl->t_accept);
+ return;
+out_sock:
+ close(sock);
+}
+
+static void bmp_listener_stop(struct bmp_listener *bl)
+{
+ THREAD_OFF(bl->t_accept);
+
+ if (bl->sock != -1)
+ close(bl->sock);
+ bl->sock = -1;
+}
+
+static struct bmp_active *bmp_active_find(struct bmp_targets *bt,
+ const char *hostname, int port)
+{
+ struct bmp_active dummy;
+ dummy.hostname = (char *)hostname;
+ dummy.port = port;
+ return bmp_actives_find(&bt->actives, &dummy);
+}
+
+static struct bmp_active *bmp_active_get(struct bmp_targets *bt,
+ const char *hostname, int port)
+{
+ struct bmp_active *ba;
+
+ ba = bmp_active_find(bt, hostname, port);
+ if (ba)
+ return ba;
+
+ ba = XCALLOC(MTYPE_BMP_ACTIVE, sizeof(*ba));
+ ba->targets = bt;
+ ba->hostname = XSTRDUP(MTYPE_TMP, hostname);
+ ba->port = port;
+ ba->minretry = BMP_DFLT_MINRETRY;
+ ba->maxretry = BMP_DFLT_MAXRETRY;
+ ba->socket = -1;
+
+ bmp_actives_add(&bt->actives, ba);
+ return ba;
+}
+
+static void bmp_active_put(struct bmp_active *ba)
+{
+ THREAD_OFF(ba->t_timer);
+ THREAD_OFF(ba->t_read);
+ THREAD_OFF(ba->t_write);
+
+ bmp_actives_del(&ba->targets->actives, ba);
+
+ if (ba->bmp) {
+ ba->bmp->active = NULL;
+ bmp_close(ba->bmp);
+ bmp_free(ba->bmp);
+ }
+ if (ba->socket != -1)
+ close(ba->socket);
+
+ XFREE(MTYPE_TMP, ba->hostname);
+ XFREE(MTYPE_BMP_ACTIVE, ba);
+}
+
+static void bmp_active_setup(struct bmp_active *ba);
+
+static void bmp_active_connect(struct bmp_active *ba)
+{
+ enum connect_result res;
+ char buf[SU_ADDRSTRLEN];
+
+ for (; ba->addrpos < ba->addrtotal; ba->addrpos++) {
+ ba->socket = sockunion_socket(&ba->addrs[ba->addrpos]);
+ if (ba->socket < 0) {
+ zlog_warn("bmp[%s]: failed to create socket",
+ ba->hostname);
+ continue;
+ }
+
+ set_nonblocking(ba->socket);
+ res = sockunion_connect(ba->socket, &ba->addrs[ba->addrpos],
+ htons(ba->port), 0);
+ switch (res) {
+ case connect_error:
+ sockunion2str(&ba->addrs[ba->addrpos], buf,
+ sizeof(buf));
+ zlog_warn("bmp[%s]: failed to connect to %s:%d",
+ ba->hostname, buf, ba->port);
+ close(ba->socket);
+ ba->socket = -1;
+ continue;
+ case connect_success:
+ break;
+ case connect_in_progress:
+ bmp_active_setup(ba);
+ return;
+ }
+ }
+
+ /* exhausted all addresses */
+ ba->curretry += ba->curretry / 2;
+ bmp_active_setup(ba);
+}
+
+static void bmp_active_resolved(struct resolver_query *resq, int numaddrs,
+ union sockunion *addr)
+{
+ struct bmp_active *ba = container_of(resq, struct bmp_active, resq);
+ unsigned i;
+
+ if (numaddrs <= 0) {
+ zlog_warn("bmp[%s]: hostname resolution failed", ba->hostname);
+ ba->curretry += ba->curretry / 2;
+ bmp_active_setup(ba);
+ return;
+ }
+ if (numaddrs > (int)array_size(ba->addrs))
+ numaddrs = array_size(ba->addrs);
+
+ ba->addrpos = 0;
+ ba->addrtotal = numaddrs;
+ for (i = 0; i < ba->addrtotal; i++)
+ memcpy(&ba->addrs[i], &addr[i], sizeof(ba->addrs[0]));
+
+ bmp_active_connect(ba);
+}
+
+static int bmp_active_thread(struct thread *t)
+{
+ struct bmp_active *ba = THREAD_ARG(t);
+ socklen_t slen;
+ int status, ret;
+ char buf[SU_ADDRSTRLEN];
+
+ /* all 3 end up here, though only timer or read+write are active
+ * at a time */
+ THREAD_OFF(ba->t_timer);
+ THREAD_OFF(ba->t_read);
+ THREAD_OFF(ba->t_write);
+
+ if (ba->socket == -1) {
+ resolver_resolve(&ba->resq, AF_UNSPEC, ba->hostname,
+ bmp_active_resolved);
+ return 0;
+ }
+
+ slen = sizeof(status);
+ ret = getsockopt(ba->socket, SOL_SOCKET, SO_ERROR, (void *)&status,
+ &slen);
+
+ sockunion2str(&ba->addrs[ba->addrpos], buf, sizeof(buf));
+ if (ret < 0 || status != 0) {
+ zlog_warn("bmp[%s]: failed to connect to %s:%d",
+ ba->hostname, buf, ba->port);
+ goto out_next;
+ }
+
+ zlog_warn("bmp[%s]: outbound connection to %s:%d",
+ ba->hostname, buf, ba->port);
+
+ ba->bmp = bmp_open(ba->targets, ba->socket);
+ if (!ba->bmp)
+ goto out_next;
+
+ ba->bmp->active = ba;
+ ba->socket = -1;
+ ba->curretry = ba->minretry;
+ return 0;
+
+out_next:
+ close(ba->socket);
+ ba->socket = -1;
+ ba->addrpos++;
+ bmp_active_connect(ba);
+ return 0;
+}
+
+static void bmp_active_disconnected(struct bmp_active *ba)
+{
+ ba->bmp = NULL;
+ bmp_active_setup(ba);
+}
+
+static void bmp_active_setup(struct bmp_active *ba)
+{
+ THREAD_OFF(ba->t_timer);
+ THREAD_OFF(ba->t_read);
+ THREAD_OFF(ba->t_write);
+
+ if (ba->bmp)
+ return;
+ if (ba->resq.callback)
+ return;
+
+ if (ba->curretry > ba->maxretry)
+ ba->curretry = ba->maxretry;
+
+ if (ba->socket == -1)
+ thread_add_timer_msec(bm->master, bmp_active_thread, ba,
+ ba->curretry, &ba->t_timer);
+ else {
+ thread_add_read(bm->master, bmp_active_thread, ba, ba->socket,
+ &ba->t_read);
+ thread_add_write(bm->master, bmp_active_thread, ba, ba->socket,
+ &ba->t_write);
+ }
+}
+
+static struct cmd_node bmp_node = {BMP_NODE, "%s(config-bgp-bmp)# "};
+
+#define BMP_STR "BGP Monitoring Protocol\n"
+
+#ifndef VTYSH_EXTRACT_PL
+#include "bgp_bmp_clippy.c"
+#endif
+
+DEFPY_NOSH(bmp_targets_main,
+ bmp_targets_cmd,
+ "bmp targets BMPTARGETS",
+ BMP_STR
+ "Create BMP target group\n"
+ "Name of the BMP target group\n")
+{
+ VTY_DECLVAR_CONTEXT(bgp, bgp);
+ struct bmp_targets *bt;
+
+ bt = bmp_targets_get(bgp, bmptargets);
+
+ VTY_PUSH_CONTEXT_SUB(BMP_NODE, bt);
+ return CMD_SUCCESS;
+}
+
+DEFPY(no_bmp_targets_main,
+ no_bmp_targets_cmd,
+ "no bmp targets BMPTARGETS",
+ NO_STR
+ BMP_STR
+ "Delete BMP target group\n"
+ "Name of the BMP target group\n")
+{
+ VTY_DECLVAR_CONTEXT(bgp, bgp);
+ struct bmp_targets *bt;
+
+ bt = bmp_targets_find1(bgp, bmptargets);
+ if (!bt) {
+ vty_out(vty, "%% BMP target group not found\n");
+ return CMD_WARNING;
+ }
+ bmp_targets_put(bt);
+ return CMD_SUCCESS;
+}
+
+DEFPY(bmp_listener_main,
+ bmp_listener_cmd,
+ "bmp listener <X:X::X:X|A.B.C.D> port (1-65535)",
+ BMP_STR
+ "Listen for inbound BMP connections\n"
+ "IPv6 address to listen on\n"
+ "IPv4 address to listen on\n"
+ "TCP Port number\n"
+ "TCP Port number\n")
+{
+ VTY_DECLVAR_CONTEXT_SUB(bmp_targets, bt);
+ struct bmp_listener *bl;
+
+ bl = bmp_listener_get(bt, listener, port);
+ if (bl->sock == -1)
+ bmp_listener_start(bl);
+
+ return CMD_SUCCESS;
+}
+
+DEFPY(no_bmp_listener_main,
+ no_bmp_listener_cmd,
+ "no bmp listener <X:X::X:X|A.B.C.D> port (1-65535)",
+ NO_STR
+ BMP_STR
+ "Create BMP listener\n"
+ "IPv6 address to listen on\n"
+ "IPv4 address to listen on\n"
+ "TCP Port number\n"
+ "TCP Port number\n")
+{
+ VTY_DECLVAR_CONTEXT_SUB(bmp_targets, bt);
+ struct bmp_listener *bl;
+
+ bl = bmp_listener_find(bt, listener, port);
+ if (!bl) {
+ vty_out(vty, "%% BMP listener not found\n");
+ return CMD_WARNING;
+ }
+ bmp_listener_stop(bl);
+ bmp_listener_put(bl);
+ return CMD_SUCCESS;
+}
+
+DEFPY(bmp_connect,
+ bmp_connect_cmd,
+ "[no] bmp connect HOSTNAME port (1-65535) "
+ "{min-retry (100-86400000)"
+ "|max-retry (100-86400000)}",
+ NO_STR
+ BMP_STR
+ "Actively establish connection to monitoring station\n"
+ "Monitoring station hostname or address\n"
+ "TCP port\n"
+ "TCP port\n"
+ "Minimum connection retry interval\n"
+ "Minimum connection retry interval (milliseconds)\n"
+ "Maximum connection retry interval\n"
+ "Maximum connection retry interval (milliseconds)\n")
+{
+ VTY_DECLVAR_CONTEXT_SUB(bmp_targets, bt);
+ struct bmp_active *ba;
+
+ if (no) {
+ ba = bmp_active_find(bt, hostname, port);
+ if (!ba) {
+ vty_out(vty, "%% No such active connection found\n");
+ return CMD_WARNING;
+ }
+ bmp_active_put(ba);
+ return CMD_SUCCESS;
+ }
+
+ ba = bmp_active_get(bt, hostname, port);
+ if (min_retry_str)
+ ba->minretry = min_retry;
+ if (max_retry_str)
+ ba->maxretry = max_retry;
+ ba->curretry = ba->minretry;
+ bmp_active_setup(ba);
+
+ return CMD_SUCCESS;
+}
+
+DEFPY(bmp_acl,
+ bmp_acl_cmd,
+ "[no] <ip|ipv6>$af access-list WORD",
+ NO_STR
+ IP_STR
+ IPV6_STR
+ "Access list to restrict BMP sessions\n"
+ "Access list name\n")
+{
+ VTY_DECLVAR_CONTEXT_SUB(bmp_targets, bt);
+ char **what;
+
+ if (no)
+ access_list = NULL;
+ if (!strcmp(af, "ipv6"))
+ what = &bt->acl6_name;
+ else
+ what = &bt->acl_name;
+
+ XFREE(MTYPE_BMP_ACLNAME, *what);
+ if (access_list)
+ *what = XSTRDUP(MTYPE_BMP_ACLNAME, access_list);
+
+ return CMD_SUCCESS;
+}
+
+DEFPY(bmp_stats_cfg,
+ bmp_stats_cmd,
+ "[no] bmp stats [interval (100-86400000)]",
+ NO_STR
+ BMP_STR
+ "Send BMP statistics messages\n"
+ "Specify BMP stats interval\n"
+ "Interval (milliseconds) to send BMP Stats in\n")
+{
+ VTY_DECLVAR_CONTEXT_SUB(bmp_targets, bt);
+
+ THREAD_OFF(bt->t_stats);
+ if (no)
+ bt->stat_msec = 0;
+ else if (interval_str)
+ bt->stat_msec = interval;
+ else
+ bt->stat_msec = BMP_STAT_DEFAULT_TIMER;
+
+ if (bt->stat_msec)
+ thread_add_timer_msec(bm->master, bmp_stats, bt, bt->stat_msec,
+ &bt->t_stats);
+ return CMD_SUCCESS;
+}
+
+DEFPY(bmp_monitor_cfg,
+ bmp_monitor_cmd,
+ "[no] bmp monitor "BGP_AFI_CMD_STR" <unicast|multicast> <pre-policy|post-policy>$policy",
+ NO_STR
+ BMP_STR
+ "Send BMP route monitoring messages\n"
+ BGP_AFI_HELP_STR
+ "Address family modifier\n"
+ "Address family modifier\n"
+ "Send state before policy and filter processing\n"
+ "Send state with policy and filters applied\n")
+{
+ int index = 0;
+ uint8_t flag, prev;
+ afi_t afi;
+ safi_t safi;
+
+ VTY_DECLVAR_CONTEXT_SUB(bmp_targets, bt);
+ struct bmp *bmp;
+
+ argv_find_and_parse_afi(argv, argc, &index, &afi);
+ argv_find_and_parse_safi(argv, argc, &index, &safi);
+
+ if (policy[1] == 'r')
+ flag = BMP_MON_PREPOLICY;
+ else
+ flag = BMP_MON_POSTPOLICY;
+
+ prev = bt->afimon[afi][safi];
+ if (no)
+ bt->afimon[afi][safi] &= ~flag;
+ else
+ bt->afimon[afi][safi] |= flag;
+
+ if (prev == bt->afimon[afi][safi])
+ return CMD_SUCCESS;
+
+ frr_each (bmp_session, &bt->sessions, bmp) {
+ if (bmp->syncafi == afi && bmp->syncsafi == safi) {
+ bmp->syncafi = AFI_MAX;
+ bmp->syncsafi = SAFI_MAX;
+ }
+
+ if (!bt->afimon[afi][safi]) {
+ bmp->afistate[afi][safi] = BMP_AFI_INACTIVE;
+ continue;
+ }
+
+ bmp->afistate[afi][safi] = BMP_AFI_NEEDSYNC;
+ }
+
+ return CMD_SUCCESS;
+}
+
+DEFPY(bmp_mirror_cfg,
+ bmp_mirror_cmd,
+ "[no] bmp mirror",
+ NO_STR
+ BMP_STR
+ "Send BMP route mirroring messages\n")
+{
+ VTY_DECLVAR_CONTEXT_SUB(bmp_targets, bt);
+ struct bmp *bmp;
+
+ if (bt->mirror == !no)
+ return CMD_SUCCESS;
+
+ bt->mirror = !no;
+ if (bt->mirror)
+ return CMD_SUCCESS;
+
+ frr_each (bmp_session, &bt->sessions, bmp) {
+ struct bmp_mirrorq *bmq;
+
+ while ((bmq = bmp_pull_mirror(bmp)))
+ if (!bmq->refcount)
+ XFREE(MTYPE_BMP_MIRRORQ, bmq);
+ }
+ return CMD_SUCCESS;
+}
+
+DEFPY(bmp_mirror_limit_cfg,
+ bmp_mirror_limit_cmd,
+ "bmp mirror buffer-limit (0-4294967294)",
+ BMP_STR
+ "Route Mirroring settings\n"
+ "Configure maximum memory used for buffered mirroring messages\n"
+ "Limit in bytes\n")
+{
+ VTY_DECLVAR_CONTEXT(bgp, bgp);
+ struct bmp_bgp *bmpbgp;
+
+ bmpbgp = bmp_bgp_get(bgp);
+ bmpbgp->mirror_qsizelimit = buffer_limit;
+
+ return CMD_SUCCESS;
+}
+
+DEFPY(no_bmp_mirror_limit_cfg,
+ no_bmp_mirror_limit_cmd,
+ "no bmp mirror buffer-limit [(0-4294967294)]",
+ NO_STR
+ BMP_STR
+ "Route Mirroring settings\n"
+ "Configure maximum memory used for buffered mirroring messages\n"
+ "Limit in bytes\n")
+{
+ VTY_DECLVAR_CONTEXT(bgp, bgp);
+ struct bmp_bgp *bmpbgp;
+
+ bmpbgp = bmp_bgp_get(bgp);
+ bmpbgp->mirror_qsizelimit = ~0UL;
+
+ return CMD_SUCCESS;
+}
+
+
+DEFPY(show_bmp,
+ show_bmp_cmd,
+ "show bmp",
+ SHOW_STR
+ BMP_STR)
+{
+ struct bmp_bgp *bmpbgp;
+ struct bmp_targets *bt;
+ struct bmp_listener *bl;
+ struct bmp *bmp;
+ struct ttable *tt;
+ char buf[SU_ADDRSTRLEN];
+
+ frr_each(bmp_bgph, &bmp_bgph, bmpbgp) {
+ vty_out(vty, "BMP state for BGP %s:\n\n",
+ bmpbgp->bgp->name_pretty);
+ vty_out(vty, " Route Mirroring %9zu bytes (%zu messages) pending\n",
+ bmpbgp->mirror_qsize,
+ bmp_mirrorq_count(&bmpbgp->mirrorq));
+ vty_out(vty, " %9zu bytes maximum buffer used\n",
+ bmpbgp->mirror_qsizemax);
+ if (bmpbgp->mirror_qsizelimit != ~0UL)
+ vty_out(vty, " %9zu bytes buffer size limit\n",
+ bmpbgp->mirror_qsizelimit);
+ vty_out(vty, "\n");
+
+ frr_each(bmp_targets, &bmpbgp->targets, bt) {
+ vty_out(vty, " Targets \"%s\":\n", bt->name);
+ vty_out(vty, " Route Mirroring %sabled\n",
+ bt->mirror ? "en" : "dis");
+
+ afi_t afi;
+ safi_t safi;
+
+ FOREACH_AFI_SAFI (afi, safi) {
+ const char *str = NULL;
+
+ switch (bt->afimon[afi][safi]) {
+ case BMP_MON_PREPOLICY:
+ str = "pre-policy";
+ break;
+ case BMP_MON_POSTPOLICY:
+ str = "post-policy";
+ break;
+ case BMP_MON_PREPOLICY | BMP_MON_POSTPOLICY:
+ str = "pre-policy and post-policy";
+ break;
+ }
+ if (!str)
+ continue;
+ vty_out(vty, " Route Monitoring %s %s %s\n",
+ afi2str(afi), safi2str(safi), str);
+ }
+
+ vty_out(vty, " Listeners:\n");
+ frr_each (bmp_listeners, &bt->listeners, bl)
+ vty_out(vty, " %s:%d\n",
+ sockunion2str(&bl->addr, buf,
+ SU_ADDRSTRLEN), bl->port);
+
+ vty_out(vty, "\n %zu connected clients:\n",
+ bmp_session_count(&bt->sessions));
+ tt = ttable_new(&ttable_styles[TTSTYLE_BLANK]);
+ ttable_add_row(tt, "remote|uptime|MonSent|MirrSent|MirrLost|ByteSent|ByteQ|ByteQKernel");
+ ttable_rowseps(tt, 0, BOTTOM, true, '-');
+
+ frr_each (bmp_session, &bt->sessions, bmp) {
+ uint64_t total;
+ size_t q, kq;
+
+ pullwr_stats(bmp->pullwr, &total, &q, &kq);
+
+ ttable_add_row(tt, "%s|-|%Lu|%Lu|%Lu|%Lu|%zu|%zu",
+ bmp->remote,
+ bmp->cnt_update,
+ bmp->cnt_mirror,
+ bmp->cnt_mirror_overruns,
+ total, q, kq);
+ }
+ char *out = ttable_dump(tt, "\n");
+ vty_out(vty, "%s", out);
+ XFREE(MTYPE_TMP, out);
+ ttable_del(tt);
+ vty_out(vty, "\n");
+ }
+ }
+
+ return CMD_SUCCESS;
+}
+
+static int bmp_config_write(struct bgp *bgp, struct vty *vty)
+{
+ struct bmp_bgp *bmpbgp = bmp_bgp_find(bgp);
+ struct bmp_targets *bt;
+ struct bmp_listener *bl;
+ struct bmp_active *ba;
+ char buf[SU_ADDRSTRLEN];
+ afi_t afi;
+ safi_t safi;
+
+ if (!bmpbgp)
+ return 0;
+
+ if (bmpbgp->mirror_qsizelimit != ~0UL)
+ vty_out(vty, " !\n bmp mirror buffer-limit %zu\n",
+ bmpbgp->mirror_qsizelimit);
+
+ frr_each(bmp_targets, &bmpbgp->targets, bt) {
+ vty_out(vty, " !\n bmp targets %s\n", bt->name);
+
+ if (bt->acl6_name)
+ vty_out(vty, " ipv6 access-list %s\n", bt->acl6_name);
+ if (bt->acl_name)
+ vty_out(vty, " ip access-list %s\n", bt->acl_name);
+
+ if (bt->stat_msec)
+ vty_out(vty, " bmp stats interval %d\n",
+ bt->stat_msec);
+
+ if (bt->mirror)
+ vty_out(vty, " bmp mirror\n");
+
+ FOREACH_AFI_SAFI (afi, safi) {
+ const char *afi_str = (afi == AFI_IP) ? "ipv4" : "ipv6";
+
+ if (bt->afimon[afi][safi] & BMP_MON_PREPOLICY)
+ vty_out(vty, " bmp monitor %s %s pre-policy\n",
+ afi_str, safi2str(safi));
+ if (bt->afimon[afi][safi] & BMP_MON_POSTPOLICY)
+ vty_out(vty, " bmp monitor %s %s post-policy\n",
+ afi_str, safi2str(safi));
+ }
+ frr_each (bmp_listeners, &bt->listeners, bl)
+ vty_out(vty, " \n bmp listener %s port %d\n",
+ sockunion2str(&bl->addr, buf, SU_ADDRSTRLEN),
+ bl->port);
+
+ frr_each (bmp_actives, &bt->actives, ba)
+ vty_out(vty, " bmp connect %s port %u min-retry %u max-retry %u\n",
+ ba->hostname, ba->port, ba->minretry, ba->maxretry);
+ }
+
+ return 0;
+}
+
+static int bgp_bmp_init(struct thread_master *tm)
+{
+ install_node(&bmp_node, NULL);
+ install_default(BMP_NODE);
+ install_element(BGP_NODE, &bmp_targets_cmd);
+ install_element(BGP_NODE, &no_bmp_targets_cmd);
+
+ install_element(BMP_NODE, &bmp_listener_cmd);
+ install_element(BMP_NODE, &no_bmp_listener_cmd);
+ install_element(BMP_NODE, &bmp_connect_cmd);
+ install_element(BMP_NODE, &bmp_acl_cmd);
+ install_element(BMP_NODE, &bmp_stats_cmd);
+ install_element(BMP_NODE, &bmp_monitor_cmd);
+ install_element(BMP_NODE, &bmp_mirror_cmd);
+
+ install_element(BGP_NODE, &bmp_mirror_limit_cmd);
+ install_element(BGP_NODE, &no_bmp_mirror_limit_cmd);
+
+ install_element(VIEW_NODE, &show_bmp_cmd);
+
+ resolver_init(tm);
+ return 0;
+}
+
+static int bgp_bmp_module_init(void)
+{
+ hook_register(bgp_packet_dump, bmp_mirror_packet);
+ hook_register(bgp_packet_send, bmp_outgoing_packet);
+ hook_register(peer_status_changed, bmp_peer_established);
+ hook_register(peer_backward_transition, bmp_peer_backward);
+ hook_register(bgp_process, bmp_process);
+ hook_register(bgp_inst_config_write, bmp_config_write);
+ hook_register(bgp_inst_delete, bmp_bgp_del);
+ hook_register(frr_late_init, bgp_bmp_init);
+ return 0;
+}
+
+FRR_MODULE_SETUP(.name = "bgpd_bmp", .version = FRR_VERSION,
+ .description = "bgpd BMP module",
+ .init = bgp_bmp_module_init)
diff --git a/bgpd/bgp_bmp.h b/bgpd/bgp_bmp.h
new file mode 100644
index 0000000000..9d270e808c
--- /dev/null
+++ b/bgpd/bgp_bmp.h
@@ -0,0 +1,303 @@
+/* BMP support.
+ * Copyright (C) 2018 Yasuhiro Ohara
+ * Copyright (C) 2019 David Lamparter for NetDEF, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; see the file COPYING; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _BGP_BMP_H_
+#define _BGP_BMP_H_
+
+#include "zebra.h"
+#include "typesafe.h"
+#include "pullwr.h"
+#include "qobj.h"
+#include "resolver.h"
+
+#define BMP_VERSION_3 3
+
+#define BMP_LENGTH_POS 1
+
+/* BMP message types */
+#define BMP_TYPE_ROUTE_MONITORING 0
+#define BMP_TYPE_STATISTICS_REPORT 1
+#define BMP_TYPE_PEER_DOWN_NOTIFICATION 2
+#define BMP_TYPE_PEER_UP_NOTIFICATION 3
+#define BMP_TYPE_INITIATION 4
+#define BMP_TYPE_TERMINATION 5
+#define BMP_TYPE_ROUTE_MIRRORING 6
+
+#define BMP_READ_BUFSIZ 1024
+
+/* bmp->state */
+#define BMP_None 0
+#define BMP_PeerUp 2
+#define BMP_Run 3
+
+/* This one is for BMP Route Monitoring messages, i.e. delivering updates
+ * in somewhat processed (as opposed to fully raw, see mirroring below) form.
+ * RFC explicitly says that we can skip old updates if we haven't sent them out
+ * yet and another newer update for the same prefix arrives.
+ *
+ * So, at most one of these can exist for each (bgp, afi, safi, prefix, peerid)
+ * tuple; if some prefix is "re-added" to the queue, the existing entry is
+ * instead moved to the end of the queue. This ensures that the queue size is
+ * bounded by the BGP table size.
+ *
+ * bmp_qlist is the queue itself while bmp_qhash is used to efficiently check
+ * whether a tuple is already on the list. The queue is maintained per
+ * bmp_target.
+ *
+ * refcount = number of "struct bmp *" whose queue position is before this
+ * entry, i.e. number of BMP sessions where we still want to send this out.
+ * Decremented on send so we know when we're done with an entry (i.e. this
+ * always happens from the front of the queue.)
+ */
+
+PREDECL_DLIST(bmp_qlist)
+PREDECL_HASH(bmp_qhash)
+
+struct bmp_queue_entry {
+ struct bmp_qlist_item bli;
+ struct bmp_qhash_item bhi;
+
+ struct prefix p;
+ uint64_t peerid;
+ afi_t afi;
+ safi_t safi;
+
+ size_t refcount;
+};
+
+/* This is for BMP Route Mirroring, which feeds fully raw BGP PDUs out to BMP
+ * receivers. So, this goes directly off packet RX/TX handling instead of
+ * grabbing bits from tables.
+ *
+ * There is *one* queue for each "struct bgp *" where we throw everything on,
+ * with a size limit. Refcount works the same as for monitoring above.
+ */
+
+PREDECL_LIST(bmp_mirrorq)
+
+struct bmp_mirrorq {
+ struct bmp_mirrorq_item bmi;
+
+ size_t refcount;
+ uint64_t peerid;
+ struct timeval tv;
+
+ size_t len;
+ uint8_t data[0];
+};
+
+enum {
+ BMP_AFI_INACTIVE = 0,
+ BMP_AFI_NEEDSYNC,
+ BMP_AFI_SYNC,
+ BMP_AFI_LIVE,
+};
+
+PREDECL_LIST(bmp_session)
+
+struct bmp_active;
+struct bmp_targets;
+
+/* an established BMP session to a peer */
+struct bmp {
+ struct bmp_session_item bsi;
+ struct bmp_targets *targets;
+ struct bmp_active *active;
+
+ int socket;
+ char remote[SU_ADDRSTRLEN + 6];
+ struct thread *t_read;
+
+ struct pullwr *pullwr;
+
+ int state;
+
+ /* queue positions must remain synced with refcounts in the items.
+ * Whenever appending a queue item, we need to know the correct number
+ * of "struct bmp *" that want it, and when moving these positions
+ * ahead we need to make sure that refcount is decremented. Also, on
+ * disconnects we need to walk the queue and drop our reference.
+ */
+ struct bmp_queue_entry *queuepos;
+ struct bmp_mirrorq *mirrorpos;
+ bool mirror_lost;
+
+ /* enum BMP_AFI_* */
+ uint8_t afistate[AFI_MAX][SAFI_MAX];
+
+ /* counters for the various BMP packet types */
+ uint64_t cnt_update, cnt_mirror;
+ /* number of times this peer wasn't fast enough in consuming the
+ * mirror queue
+ */
+ uint64_t cnt_mirror_overruns;
+ struct timeval t_up;
+
+ /* synchronization / startup works by repeatedly finding the next
+ * table entry, the sync* fields note down what we sent last
+ */
+ struct prefix syncpos;
+ uint64_t syncpeerid;
+ afi_t syncafi;
+ safi_t syncsafi;
+};
+
+/* config & state for an active outbound connection. When the connection
+ * succeeds, "bmp" is set up.
+ */
+
+PREDECL_SORTLIST_UNIQ(bmp_actives)
+
+#define BMP_DFLT_MINRETRY 30000
+#define BMP_DFLT_MAXRETRY 720000
+
+struct bmp_active {
+ struct bmp_actives_item bai;
+ struct bmp_targets *targets;
+ struct bmp *bmp;
+
+ char *hostname;
+ int port;
+ unsigned minretry, maxretry;
+
+ struct resolver_query resq;
+
+ unsigned curretry;
+ unsigned addrpos, addrtotal;
+ union sockunion addrs[8];
+ int socket;
+ struct thread *t_timer, *t_read, *t_write;
+};
+
+/* config & state for passive / listening sockets */
+PREDECL_SORTLIST_UNIQ(bmp_listeners)
+
+struct bmp_listener {
+ struct bmp_listeners_item bli;
+
+ struct bmp_targets *targets;
+
+ union sockunion addr;
+ int port;
+
+ struct thread *t_accept;
+ int sock;
+};
+
+/* bmp_targets - plural since it may contain multiple bmp_listener &
+ * bmp_active items. If they have the same config, BMP session should be
+ * put in the same targets since that's a bit more effective.
+ */
+PREDECL_SORTLIST_UNIQ(bmp_targets)
+
+struct bmp_targets {
+ struct bmp_targets_item bti;
+
+ struct bmp_bgp *bmpbgp;
+ struct bgp *bgp;
+ char *name;
+
+ struct bmp_listeners_head listeners;
+
+ char *acl_name;
+ char *acl6_name;
+#define BMP_STAT_DEFAULT_TIMER 60000
+ int stat_msec;
+
+ /* only IPv4 & IPv6 / unicast & multicast supported for now */
+#define BMP_MON_PREPOLICY (1 << 0)
+#define BMP_MON_POSTPOLICY (1 << 1)
+ uint8_t afimon[AFI_MAX][SAFI_MAX];
+ bool mirror;
+
+ struct bmp_actives_head actives;
+
+ struct thread *t_stats;
+ struct bmp_session_head sessions;
+
+ struct bmp_qhash_head updhash;
+ struct bmp_qlist_head updlist;
+
+ uint64_t cnt_accept, cnt_aclrefused;
+
+ QOBJ_FIELDS
+};
+DECLARE_QOBJ_TYPE(bmp_targets)
+
+/* per struct peer * data. Lookup by peer->qobj_node.nid, created on demand,
+ * deleted in peer_backward hook. */
+PREDECL_HASH(bmp_peerh)
+
+struct bmp_bgp_peer {
+ struct bmp_peerh_item bpi;
+
+ uint64_t peerid;
+ /* struct peer *peer; */
+
+ uint8_t *open_rx;
+ size_t open_rx_len;
+
+ uint8_t *open_tx;
+ size_t open_tx_len;
+};
+
+/* per struct bgp * data */
+PREDECL_HASH(bmp_bgph)
+
+struct bmp_bgp {
+ struct bmp_bgph_item bbi;
+
+ struct bgp *bgp;
+ struct bmp_targets_head targets;
+
+ struct bmp_mirrorq_head mirrorq;
+ size_t mirror_qsize, mirror_qsizemax;
+
+ size_t mirror_qsizelimit;
+};
+
+enum {
+ BMP_PEERDOWN_LOCAL_NOTIFY = 1,
+ BMP_PEERDOWN_LOCAL_FSM = 2,
+ BMP_PEERDOWN_REMOTE_NOTIFY = 3,
+ BMP_PEERDOWN_REMOTE_CLOSE = 4,
+ BMP_PEERDOWN_ENDMONITOR = 5,
+};
+
+enum {
+ BMP_STATS_PFX_REJECTED = 0,
+ BMP_STATS_PFX_DUP_ADV = 1,
+ BMP_STATS_PFX_DUP_WITHDRAW = 2,
+ BMP_STATS_UPD_LOOP_CLUSTER = 3,
+ BMP_STATS_UPD_LOOP_ASPATH = 4,
+ BMP_STATS_UPD_LOOP_ORIGINATOR = 5,
+ BMP_STATS_UPD_LOOP_CONFED = 6,
+ BMP_STATS_SIZE_ADJ_RIB_IN = 7,
+ BMP_STATS_SIZE_LOC_RIB = 8,
+ BMP_STATS_SIZE_ADJ_RIB_IN_SAFI = 9,
+ BMP_STATS_SIZE_LOC_RIB_IN_SAFI = 10,
+ BMP_STATS_UPD_7606_WITHDRAW = 11,
+ BMP_STATS_PFX_7606_WITHDRAW = 12,
+ BMP_STATS_UPD_DUP = 13,
+ BMP_STATS_FRR_NH_INVALID = 65531,
+};
+
+DECLARE_MGROUP(BMP)
+
+#endif /*_BGP_BMP_H_*/
diff --git a/bgpd/bgp_dump.c b/bgpd/bgp_dump.c
index 535f36ab5e..640224e759 100644
--- a/bgpd/bgp_dump.c
+++ b/bgpd/bgp_dump.c
@@ -494,13 +494,13 @@ static void bgp_dump_common(struct stream *obuf, struct peer *peer,
}
/* Dump BGP status change. */
-void bgp_dump_state(struct peer *peer, int status_old, int status_new)
+int bgp_dump_state(struct peer *peer)
{
struct stream *obuf;
/* If dump file pointer is disabled return immediately. */
if (bgp_dump_all.fp == NULL)
- return;
+ return 0;
/* Make dump stream. */
obuf = bgp_dump_obuf;
@@ -510,8 +510,8 @@ void bgp_dump_state(struct peer *peer, int status_old, int status_new)
bgp_dump_all.type);
bgp_dump_common(obuf, peer, 1); /* force this in as4speak*/
- stream_putw(obuf, status_old);
- stream_putw(obuf, status_new);
+ stream_putw(obuf, peer->ostatus);
+ stream_putw(obuf, peer->status);
/* Set length. */
bgp_dump_set_size(obuf, MSG_PROTOCOL_BGP4MP);
@@ -519,6 +519,7 @@ void bgp_dump_state(struct peer *peer, int status_old, int status_new)
/* Write to the stream. */
fwrite(STREAM_DATA(obuf), stream_get_endp(obuf), 1, bgp_dump_all.fp);
fflush(bgp_dump_all.fp);
+ return 0;
}
static void bgp_dump_packet_func(struct bgp_dump *bgp_dump, struct peer *peer,
@@ -867,6 +868,7 @@ void bgp_dump_init(void)
install_element(CONFIG_NODE, &no_dump_bgp_all_cmd);
hook_register(bgp_packet_dump, bgp_dump_packet);
+ hook_register(peer_status_changed, bgp_dump_state);
}
void bgp_dump_finish(void)
@@ -878,4 +880,5 @@ void bgp_dump_finish(void)
stream_free(bgp_dump_obuf);
bgp_dump_obuf = NULL;
hook_unregister(bgp_packet_dump, bgp_dump_packet);
+ hook_unregister(peer_status_changed, bgp_dump_state);
}
diff --git a/bgpd/bgp_dump.h b/bgpd/bgp_dump.h
index 5ec0561b05..86c80d481c 100644
--- a/bgpd/bgp_dump.h
+++ b/bgpd/bgp_dump.h
@@ -51,6 +51,6 @@
extern void bgp_dump_init(void);
extern void bgp_dump_finish(void);
-extern void bgp_dump_state(struct peer *, int, int);
+extern int bgp_dump_state(struct peer *);
#endif /* _QUAGGA_BGP_DUMP_H */
diff --git a/bgpd/bgp_evpn.c b/bgpd/bgp_evpn.c
index 3bc3d74de6..50fef00a96 100644
--- a/bgpd/bgp_evpn.c
+++ b/bgpd/bgp_evpn.c
@@ -505,7 +505,9 @@ static void unmap_vni_from_rt(struct bgp *bgp, struct bgpevpn *vpn,
static void form_auto_rt(struct bgp *bgp, vni_t vni, struct list *rtl)
{
struct ecommunity_val eval;
- struct ecommunity *ecomadd;
+ struct ecommunity *ecomadd, *ecom;
+ bool ecom_found = false;
+ struct listnode *node;
if (bgp->advertise_autort_rfc8365)
vni |= EVPN_AUTORT_VXLAN;
@@ -513,7 +515,12 @@ static void form_auto_rt(struct bgp *bgp, vni_t vni, struct list *rtl)
ecomadd = ecommunity_new();
ecommunity_add_val(ecomadd, &eval);
- listnode_add_sort(rtl, ecomadd);
+ for (ALL_LIST_ELEMENTS_RO(rtl, node, ecom))
+ if (ecommunity_cmp(ecomadd, ecom))
+ ecom_found = true;
+
+ if (!ecom_found)
+ listnode_add_sort(rtl, ecomadd);
}
/*
@@ -4459,7 +4466,8 @@ void bgp_evpn_configure_import_rt_for_vrf(struct bgp *bgp_vrf,
struct ecommunity *ecomadd)
{
/* uninstall routes from vrf */
- uninstall_routes_for_vrf(bgp_vrf);
+ if (is_l3vni_live(bgp_vrf))
+ uninstall_routes_for_vrf(bgp_vrf);
/* Cleanup the RT to VRF mapping */
bgp_evpn_unmap_vrf_from_its_rts(bgp_vrf);
@@ -4471,11 +4479,11 @@ void bgp_evpn_configure_import_rt_for_vrf(struct bgp *bgp_vrf,
listnode_add_sort(bgp_vrf->vrf_import_rtl, ecomadd);
SET_FLAG(bgp_vrf->vrf_flags, BGP_VRF_IMPORT_RT_CFGD);
- /* map VRF to its RTs */
- bgp_evpn_map_vrf_to_its_rts(bgp_vrf);
-
- /* install routes matching the new VRF */
- install_routes_for_vrf(bgp_vrf);
+ /* map VRF to its RTs and install routes matching the new RTs */
+ if (is_l3vni_live(bgp_vrf)) {
+ bgp_evpn_map_vrf_to_its_rts(bgp_vrf);
+ install_routes_for_vrf(bgp_vrf);
+ }
}
void bgp_evpn_unconfigure_import_rt_for_vrf(struct bgp *bgp_vrf,
@@ -4485,7 +4493,8 @@ void bgp_evpn_unconfigure_import_rt_for_vrf(struct bgp *bgp_vrf,
struct ecommunity *ecom = NULL;
/* uninstall routes from vrf */
- uninstall_routes_for_vrf(bgp_vrf);
+ if (is_l3vni_live(bgp_vrf))
+ uninstall_routes_for_vrf(bgp_vrf);
/* Cleanup the RT to VRF mapping */
bgp_evpn_unmap_vrf_from_its_rts(bgp_vrf);
@@ -4509,11 +4518,11 @@ void bgp_evpn_unconfigure_import_rt_for_vrf(struct bgp *bgp_vrf,
evpn_auto_rt_import_add_for_vrf(bgp_vrf);
}
- /* map VRFs to its RTs */
- bgp_evpn_map_vrf_to_its_rts(bgp_vrf);
-
- /* install routes matching this new RT */
- install_routes_for_vrf(bgp_vrf);
+ /* map VRFs to its RTs and install routes matching this new RT */
+ if (is_l3vni_live(bgp_vrf)) {
+ bgp_evpn_map_vrf_to_its_rts(bgp_vrf);
+ install_routes_for_vrf(bgp_vrf);
+ }
}
void bgp_evpn_configure_export_rt_for_vrf(struct bgp *bgp_vrf,
@@ -5659,6 +5668,8 @@ int bgp_evpn_local_l3vni_del(vni_t l3vni, vrf_id_t vrf_id)
for (ALL_LIST_ELEMENTS(bgp_vrf->l2vnis, node, next, vpn))
bgpevpn_unlink_from_l3vni(vpn);
+ UNSET_FLAG(bgp_vrf->vrf_flags, BGP_VRF_L3VNI_PREFIX_ROUTES_ONLY);
+
/* Delete the instance if it was autocreated */
if (CHECK_FLAG(bgp_vrf->vrf_flags, BGP_VRF_AUTO))
bgp_delete(bgp_vrf);
@@ -5801,6 +5812,9 @@ int bgp_evpn_local_vni_add(struct bgp *bgp, vni_t vni,
It needs to be conveyed again to zebra */
bgp_zebra_advertise_gw_macip(bgp, vpn->advertise_gw_macip, vpn->vni);
+ /* advertise svi mac-ip knob to zebra */
+ bgp_zebra_advertise_svi_macip(bgp, vpn->advertise_svi_macip, vpn->vni);
+
return 0;
}
diff --git a/bgpd/bgp_evpn_private.h b/bgpd/bgp_evpn_private.h
index a5a091242f..f6bde2e9fa 100644
--- a/bgpd/bgp_evpn_private.h
+++ b/bgpd/bgp_evpn_private.h
@@ -275,6 +275,11 @@ static inline int is_vni_live(struct bgpevpn *vpn)
return (CHECK_FLAG(vpn->flags, VNI_FLAG_LIVE));
}
+static inline int is_l3vni_live(struct bgp *bgp_vrf)
+{
+ return (bgp_vrf->l3vni && bgp_vrf->l3vni_svi_ifindex);
+}
+
static inline int is_rd_configured(struct bgpevpn *vpn)
{
return (CHECK_FLAG(vpn->flags, VNI_FLAG_RD_CFGD));
diff --git a/bgpd/bgp_evpn_vty.c b/bgpd/bgp_evpn_vty.c
index e6d81e54c4..30380f6add 100644
--- a/bgpd/bgp_evpn_vty.c
+++ b/bgpd/bgp_evpn_vty.c
@@ -3696,7 +3696,7 @@ DEFUN(show_bgp_l2vpn_evpn_es,
*/
DEFUN(show_bgp_l2vpn_evpn_summary,
show_bgp_l2vpn_evpn_summary_cmd,
- "show bgp [vrf VRFNAME] l2vpn evpn summary [json]",
+ "show bgp [vrf VRFNAME] l2vpn evpn summary [failed] [json]",
SHOW_STR
BGP_STR
"bgp vrf\n"
@@ -3704,15 +3704,20 @@ DEFUN(show_bgp_l2vpn_evpn_summary,
L2VPN_HELP_STR
EVPN_HELP_STR
"Summary of BGP neighbor status\n"
+ "Show only sessions not in Established state\n"
JSON_STR)
{
int idx_vrf = 0;
bool uj = use_json(argc, argv);
char *vrf = NULL;
+ bool show_failed = false;
if (argv_find(argv, argc, "vrf", &idx_vrf))
vrf = argv[++idx_vrf]->arg;
- return bgp_show_summary_vty(vty, vrf, AFI_L2VPN, SAFI_EVPN, uj);
+ if (argv_find(argv, argc, "failed", &idx_vrf))
+ show_failed = true;
+ return bgp_show_summary_vty(vty, vrf, AFI_L2VPN, SAFI_EVPN,
+ show_failed, uj);
}
/*
diff --git a/bgpd/bgp_fsm.c b/bgpd/bgp_fsm.c
index c9c6fc157e..79c873e601 100644
--- a/bgpd/bgp_fsm.c
+++ b/bgpd/bgp_fsm.c
@@ -57,7 +57,7 @@
#include "bgpd/bgp_zebra.h"
DEFINE_HOOK(peer_backward_transition, (struct peer * peer), (peer))
-DEFINE_HOOK(peer_established, (struct peer * peer), (peer))
+DEFINE_HOOK(peer_status_changed, (struct peer * peer), (peer))
/* Definition of display strings corresponding to FSM events. This should be
* kept consistent with the events defined in bgpd.h
@@ -179,9 +179,7 @@ static struct peer *peer_xfer_conn(struct peer *from_peer)
* on various buffers. Those need to be transferred or dropped,
* otherwise we'll get spurious failures during session establishment.
*/
- pthread_mutex_lock(&peer->io_mtx);
- pthread_mutex_lock(&from_peer->io_mtx);
- {
+ frr_with_mutex(&peer->io_mtx, &from_peer->io_mtx) {
fd = peer->fd;
peer->fd = from_peer->fd;
from_peer->fd = fd;
@@ -222,8 +220,6 @@ static struct peer *peer_xfer_conn(struct peer *from_peer)
ringbuf_copy(peer->ibuf_work, from_peer->ibuf_work,
ringbuf_remain(from_peer->ibuf_work));
}
- pthread_mutex_unlock(&from_peer->io_mtx);
- pthread_mutex_unlock(&peer->io_mtx);
peer->as = from_peer->as;
peer->v_holdtime = from_peer->v_holdtime;
@@ -244,6 +240,7 @@ static struct peer *peer_xfer_conn(struct peer *from_peer)
from_peer->last_event = last_evt;
from_peer->last_major_event = last_maj_evt;
peer->remote_id = from_peer->remote_id;
+ peer->last_reset = from_peer->last_reset;
if (from_peer->hostname != NULL) {
if (peer->hostname) {
@@ -551,7 +548,11 @@ const char *peer_down_str[] = {"",
"Intf peering v6only config change",
"BFD down received",
"Interface down",
- "Neighbor address lost"};
+ "Neighbor address lost",
+ "Waiting for NHT",
+ "Waiting for Peer IPv6 LLA",
+ "Waiting for VRF to be initialized",
+ "No AFI/SAFI activated for peer"};
static int bgp_graceful_restart_timer_expire(struct thread *thread)
{
@@ -961,8 +962,6 @@ void bgp_fsm_change_status(struct peer *peer, int status)
struct bgp *bgp;
uint32_t peer_count;
- bgp_dump_state(peer, peer->status, status);
-
bgp = peer->bgp;
peer_count = bgp->established_peers;
@@ -1024,6 +1023,9 @@ void bgp_fsm_change_status(struct peer *peer, int status)
/* Save event that caused status change. */
peer->last_major_event = peer->cur_event;
+ /* Operations after status change */
+ hook_call(peer_status_changed, peer);
+
if (status == Established)
UNSET_FLAG(peer->sflags, PEER_STATUS_ACCEPT_PEER);
@@ -1162,8 +1164,7 @@ int bgp_stop(struct peer *peer)
BGP_TIMER_OFF(peer->t_routeadv);
/* Clear input and output buffer. */
- pthread_mutex_lock(&peer->io_mtx);
- {
+ frr_with_mutex(&peer->io_mtx) {
if (peer->ibuf)
stream_fifo_clean(peer->ibuf);
if (peer->obuf)
@@ -1179,7 +1180,6 @@ int bgp_stop(struct peer *peer)
peer->curr = NULL;
}
}
- pthread_mutex_unlock(&peer->io_mtx);
/* Close of file descriptor. */
if (peer->fd >= 0) {
@@ -1410,6 +1410,7 @@ int bgp_start(struct peer *peer)
zlog_debug(
"%s [FSM] Unable to get neighbor's IP address, waiting...",
peer->host);
+ peer->last_reset = PEER_DOWN_NBR_ADDR;
return -1;
}
@@ -1455,6 +1456,7 @@ int bgp_start(struct peer *peer)
EC_BGP_FSM,
"%s [FSM] In a VRF that is not initialised yet",
peer->host);
+ peer->last_reset = PEER_DOWN_VRF_UNINIT;
return -1;
}
@@ -1466,7 +1468,7 @@ int bgp_start(struct peer *peer)
if (bgp_debug_neighbor_events(peer))
zlog_debug("%s [FSM] Waiting for NHT",
peer->host);
-
+ peer->last_reset = PEER_DOWN_WAITING_NHT;
BGP_EVENT_ADD(peer, TCP_connection_open_failed);
return 0;
}
@@ -1656,8 +1658,6 @@ static int bgp_establish(struct peer *peer)
peer->host);
}
- hook_call(peer_established, peer);
-
/* Reset uptime, turn on keepalives, send current table. */
if (!peer->v_holdtime)
bgp_keepalives_on(peer);
diff --git a/bgpd/bgp_fsm.h b/bgpd/bgp_fsm.h
index 3476a3c3a9..9d0500ae2c 100644
--- a/bgpd/bgp_fsm.h
+++ b/bgpd/bgp_fsm.h
@@ -88,6 +88,5 @@ extern void bgp_adjust_routeadv(struct peer *);
#include "hook.h"
DECLARE_HOOK(peer_backward_transition, (struct peer * peer), (peer))
-DECLARE_HOOK(peer_established, (struct peer * peer), (peer))
#endif /* _QUAGGA_BGP_FSM_H */
diff --git a/bgpd/bgp_io.c b/bgpd/bgp_io.c
index f111822e53..c2d06a4b5a 100644
--- a/bgpd/bgp_io.c
+++ b/bgpd/bgp_io.c
@@ -132,12 +132,10 @@ static int bgp_process_writes(struct thread *thread)
struct frr_pthread *fpt = bgp_pth_io;
- pthread_mutex_lock(&peer->io_mtx);
- {
+ frr_with_mutex(&peer->io_mtx) {
status = bgp_write(peer);
reschedule = (stream_fifo_head(peer->obuf) != NULL);
}
- pthread_mutex_unlock(&peer->io_mtx);
/* no problem */
if (CHECK_FLAG(status, BGP_IO_TRANS_ERR)) {
@@ -184,11 +182,9 @@ static int bgp_process_reads(struct thread *thread)
struct frr_pthread *fpt = bgp_pth_io;
- pthread_mutex_lock(&peer->io_mtx);
- {
+ frr_with_mutex(&peer->io_mtx) {
status = bgp_read(peer);
}
- pthread_mutex_unlock(&peer->io_mtx);
/* error checking phase */
if (CHECK_FLAG(status, BGP_IO_TRANS_ERR)) {
@@ -237,11 +233,9 @@ static int bgp_process_reads(struct thread *thread)
assert(ringbuf_get(ibw, pktbuf, pktsize) == pktsize);
stream_put(pkt, pktbuf, pktsize);
- pthread_mutex_lock(&peer->io_mtx);
- {
+ frr_with_mutex(&peer->io_mtx) {
stream_fifo_push(peer->ibuf, pkt);
}
- pthread_mutex_unlock(&peer->io_mtx);
added_pkt = true;
} else
diff --git a/bgpd/bgp_keepalives.c b/bgpd/bgp_keepalives.c
index bec3bdcb8d..3a49e8bc00 100644
--- a/bgpd/bgp_keepalives.c
+++ b/bgpd/bgp_keepalives.c
@@ -97,11 +97,18 @@ static void peer_process(struct hash_bucket *hb, void *arg)
static struct timeval tolerance = {0, 100000};
+ uint32_t v_ka = atomic_load_explicit(&pkat->peer->v_keepalive,
+ memory_order_relaxed);
+
+ /* 0 keepalive timer means no keepalives */
+ if (v_ka == 0)
+ return;
+
/* calculate elapsed time since last keepalive */
monotime_since(&pkat->last, &elapsed);
/* calculate difference between elapsed time and configured time */
- ka.tv_sec = pkat->peer->v_keepalive;
+ ka.tv_sec = v_ka;
timersub(&ka, &elapsed, &diff);
int send_keepalive =
@@ -245,8 +252,7 @@ void bgp_keepalives_on(struct peer *peer)
*/
assert(peerhash_mtx);
- pthread_mutex_lock(peerhash_mtx);
- {
+ frr_with_mutex(peerhash_mtx) {
holder.peer = peer;
if (!hash_lookup(peerhash, &holder)) {
struct pkat *pkat = pkat_new(peer);
@@ -255,7 +261,6 @@ void bgp_keepalives_on(struct peer *peer)
}
SET_FLAG(peer->thread_flags, PEER_THREAD_KEEPALIVES_ON);
}
- pthread_mutex_unlock(peerhash_mtx);
bgp_keepalives_wake();
}
@@ -275,8 +280,7 @@ void bgp_keepalives_off(struct peer *peer)
*/
assert(peerhash_mtx);
- pthread_mutex_lock(peerhash_mtx);
- {
+ frr_with_mutex(peerhash_mtx) {
holder.peer = peer;
struct pkat *res = hash_release(peerhash, &holder);
if (res) {
@@ -285,16 +289,13 @@ void bgp_keepalives_off(struct peer *peer)
}
UNSET_FLAG(peer->thread_flags, PEER_THREAD_KEEPALIVES_ON);
}
- pthread_mutex_unlock(peerhash_mtx);
}
void bgp_keepalives_wake(void)
{
- pthread_mutex_lock(peerhash_mtx);
- {
+ frr_with_mutex(peerhash_mtx) {
pthread_cond_signal(peerhash_cond);
}
- pthread_mutex_unlock(peerhash_mtx);
}
int bgp_keepalives_stop(struct frr_pthread *fpt, void **result)
diff --git a/bgpd/bgp_mplsvpn.c b/bgpd/bgp_mplsvpn.c
index 1156810510..c81abd643f 100644
--- a/bgpd/bgp_mplsvpn.c
+++ b/bgpd/bgp_mplsvpn.c
@@ -781,12 +781,12 @@ void vpn_leak_from_vrf_update(struct bgp *bgp_vpn, /* to */
static_attr.nexthop.s_addr = nexthop->u.prefix4.s_addr;
static_attr.mp_nexthop_global_in = nexthop->u.prefix4;
- static_attr.mp_nexthop_len = 4;
+ static_attr.mp_nexthop_len = BGP_ATTR_NHLEN_IPV4;
break;
case AF_INET6:
static_attr.mp_nexthop_global = nexthop->u.prefix6;
- static_attr.mp_nexthop_len = 16;
+ static_attr.mp_nexthop_len = BGP_ATTR_NHLEN_IPV6_GLOBAL;
break;
default:
@@ -802,7 +802,8 @@ void vpn_leak_from_vrf_update(struct bgp *bgp_vpn, /* to */
*/
static_attr.mp_nexthop_global_in =
static_attr.nexthop;
- static_attr.mp_nexthop_len = 4;
+ static_attr.mp_nexthop_len =
+ BGP_ATTR_NHLEN_IPV4;
/*
* XXX Leave static_attr.nexthop
* intact for NHT
@@ -821,7 +822,8 @@ void vpn_leak_from_vrf_update(struct bgp *bgp_vpn, /* to */
&& !BGP_ATTR_NEXTHOP_AFI_IP6(path_vrf->attr)) {
static_attr.mp_nexthop_global_in.s_addr =
static_attr.nexthop.s_addr;
- static_attr.mp_nexthop_len = 4;
+ static_attr.mp_nexthop_len =
+ BGP_ATTR_NHLEN_IPV4;
static_attr.flag |=
ATTR_FLAG_BIT(BGP_ATTR_NEXT_HOP);
}
diff --git a/bgpd/bgp_network.c b/bgpd/bgp_network.c
index 1dadf00e8f..887caee95e 100644
--- a/bgpd/bgp_network.c
+++ b/bgpd/bgp_network.c
@@ -122,7 +122,7 @@ static int bgp_md5_set_connect(int socket, union sockunion *su,
int ret = -1;
#if HAVE_DECL_TCP_MD5SIG
- frr_elevate_privs(&bgpd_privs) {
+ frr_with_privs(&bgpd_privs) {
ret = bgp_md5_set_socket(socket, su, prefixlen, password);
}
#endif /* HAVE_TCP_MD5SIG */
@@ -140,8 +140,7 @@ static int bgp_md5_set_password(struct peer *peer, const char *password)
* Set or unset the password on the listen socket(s). Outbound
* connections are taken care of in bgp_connect() below.
*/
- frr_elevate_privs(&bgpd_privs)
- {
+ frr_with_privs(&bgpd_privs) {
for (ALL_LIST_ELEMENTS_RO(bm->listen_sockets, node, listener))
if (listener->su.sa.sa_family
== peer->su.sa.sa_family) {
@@ -167,8 +166,7 @@ int bgp_md5_set_prefix(struct prefix *p, const char *password)
struct bgp_listener *listener;
/* Set or unset the password on the listen socket(s). */
- frr_elevate_privs(&bgpd_privs)
- {
+ frr_with_privs(&bgpd_privs) {
for (ALL_LIST_ELEMENTS_RO(bm->listen_sockets, node, listener))
if (listener->su.sa.sa_family == p->family) {
prefix2sockunion(p, &su);
@@ -610,7 +608,7 @@ int bgp_connect(struct peer *peer)
zlog_debug("Peer address not learnt: Returning from connect");
return 0;
}
- frr_elevate_privs(&bgpd_privs) {
+ frr_with_privs(&bgpd_privs) {
/* Make socket for the peer. */
peer->fd = vrf_sockunion_socket(&peer->su, peer->bgp->vrf_id,
bgp_get_bound_name(peer));
@@ -630,7 +628,7 @@ int bgp_connect(struct peer *peer)
sockopt_reuseport(peer->fd);
#ifdef IPTOS_PREC_INTERNETCONTROL
- frr_elevate_privs(&bgpd_privs) {
+ frr_with_privs(&bgpd_privs) {
if (sockunion_family(&peer->su) == AF_INET)
setsockopt_ipv4_tos(peer->fd,
IPTOS_PREC_INTERNETCONTROL);
@@ -708,7 +706,7 @@ static int bgp_listener(int sock, struct sockaddr *sa, socklen_t salen,
sockopt_reuseaddr(sock);
sockopt_reuseport(sock);
- frr_elevate_privs(&bgpd_privs) {
+ frr_with_privs(&bgpd_privs) {
#ifdef IPTOS_PREC_INTERNETCONTROL
if (sa->sa_family == AF_INET)
@@ -767,7 +765,7 @@ int bgp_socket(struct bgp *bgp, unsigned short port, const char *address)
snprintf(port_str, sizeof(port_str), "%d", port);
port_str[sizeof(port_str) - 1] = '\0';
- frr_elevate_privs(&bgpd_privs) {
+ frr_with_privs(&bgpd_privs) {
ret = vrf_getaddrinfo(address, port_str, &req, &ainfo_save,
bgp->vrf_id);
}
@@ -788,7 +786,7 @@ int bgp_socket(struct bgp *bgp, unsigned short port, const char *address)
if (ainfo->ai_family != AF_INET && ainfo->ai_family != AF_INET6)
continue;
- frr_elevate_privs(&bgpd_privs) {
+ frr_with_privs(&bgpd_privs) {
sock = vrf_socket(ainfo->ai_family,
ainfo->ai_socktype,
ainfo->ai_protocol, bgp->vrf_id,
diff --git a/bgpd/bgp_open.c b/bgpd/bgp_open.c
index 64529f6ef3..7e5e07099d 100644
--- a/bgpd/bgp_open.c
+++ b/bgpd/bgp_open.c
@@ -503,7 +503,7 @@ static int bgp_capability_restart(struct peer *peer,
if (bgp_debug_neighbor_events(peer))
zlog_debug(
"%s Address family %s is%spreserved",
- peer->host, afi_safi_print(afi, safi),
+ peer->host, get_afi_safi_str(afi, safi, false),
CHECK_FLAG(
peer->af_cap[afi][safi],
PEER_CAP_RESTART_AF_PRESERVE_RCV)
diff --git a/bgpd/bgp_packet.c b/bgpd/bgp_packet.c
index 99522a6522..c7c1780c21 100644
--- a/bgpd/bgp_packet.c
+++ b/bgpd/bgp_packet.c
@@ -41,6 +41,7 @@
#include "bgpd/bgpd.h"
#include "bgpd/bgp_table.h"
#include "bgpd/bgp_dump.h"
+#include "bgpd/bgp_bmp.h"
#include "bgpd/bgp_attr.h"
#include "bgpd/bgp_debug.h"
#include "bgpd/bgp_errors.h"
@@ -123,9 +124,9 @@ int bgp_packet_set_size(struct stream *s)
*/
static void bgp_packet_add(struct peer *peer, struct stream *s)
{
- pthread_mutex_lock(&peer->io_mtx);
- stream_fifo_push(peer->obuf, s);
- pthread_mutex_unlock(&peer->io_mtx);
+ frr_with_mutex(&peer->io_mtx) {
+ stream_fifo_push(peer->obuf, s);
+ }
}
static struct stream *bgp_update_packet_eor(struct peer *peer, afi_t afi,
@@ -140,7 +141,7 @@ static struct stream *bgp_update_packet_eor(struct peer *peer, afi_t afi,
if (bgp_debug_neighbor_events(peer))
zlog_debug("send End-of-RIB for %s to %s",
- afi_safi_print(afi, safi), peer->host);
+ get_afi_safi_str(afi, safi, false), peer->host);
s = stream_new(BGP_MAX_PACKET_SIZE);
@@ -664,7 +665,7 @@ void bgp_notify_send_with_data(struct peer *peer, uint8_t code,
struct stream *s;
/* Lock I/O mutex to prevent other threads from pushing packets */
- pthread_mutex_lock(&peer->io_mtx);
+ frr_mutex_lock_autounlock(&peer->io_mtx);
/* ============================================== */
/* Allocate new stream. */
@@ -755,9 +756,6 @@ void bgp_notify_send_with_data(struct peer *peer, uint8_t code,
stream_fifo_push(peer->obuf, s);
bgp_write_notify(peer);
-
- /* ============================================== */
- pthread_mutex_unlock(&peer->io_mtx);
}
/*
@@ -1660,7 +1658,7 @@ static int bgp_update_receive(struct peer *peer, bgp_size_t size)
bgp_clear_stale_route(peer, afi, safi);
zlog_info("%%NOTIFICATION: rcvd End-of-RIB for %s from %s in vrf %s",
- afi_safi_print(afi, safi), peer->host,
+ get_afi_safi_str(afi, safi, false), peer->host,
vrf ? vrf->name : VRF_DEFAULT_NAME);
}
}
@@ -2236,11 +2234,9 @@ int bgp_process_packet(struct thread *thread)
bgp_size_t size;
char notify_data_length[2];
- pthread_mutex_lock(&peer->io_mtx);
- {
+ frr_with_mutex(&peer->io_mtx) {
peer->curr = stream_fifo_pop(peer->ibuf);
}
- pthread_mutex_unlock(&peer->io_mtx);
if (peer->curr == NULL) // no packets to process, hmm...
return 0;
@@ -2359,15 +2355,13 @@ int bgp_process_packet(struct thread *thread)
if (fsm_update_result != FSM_PEER_TRANSFERRED
&& fsm_update_result != FSM_PEER_STOPPED) {
- pthread_mutex_lock(&peer->io_mtx);
- {
+ frr_with_mutex(&peer->io_mtx) {
// more work to do, come back later
if (peer->ibuf->count > 0)
thread_add_timer_msec(
bm->master, bgp_process_packet, peer, 0,
&peer->t_process_packet);
}
- pthread_mutex_unlock(&peer->io_mtx);
}
return 0;
diff --git a/bgpd/bgp_route.c b/bgpd/bgp_route.c
index 183debddba..962f0b3375 100644
--- a/bgpd/bgp_route.c
+++ b/bgpd/bgp_route.c
@@ -2750,7 +2750,7 @@ int bgp_maximum_prefix_overflow(struct peer *peer, afi_t afi, safi_t safi,
zlog_info(
"%%MAXPFXEXCEED: No. of %s prefix received from %s %ld exceed, "
"limit %ld",
- afi_safi_print(afi, safi), peer->host,
+ get_afi_safi_str(afi, safi, false), peer->host,
peer->pcount[afi][safi], peer->pmax[afi][safi]);
SET_FLAG(peer->af_sflags[afi][safi], PEER_STATUS_PREFIX_LIMIT);
@@ -2811,7 +2811,7 @@ int bgp_maximum_prefix_overflow(struct peer *peer, afi_t afi, safi_t safi,
zlog_info(
"%%MAXPFX: No. of %s prefix received from %s reaches %ld, max %ld",
- afi_safi_print(afi, safi), peer->host,
+ get_afi_safi_str(afi, safi, false), peer->host,
peer->pcount[afi][safi], peer->pmax[afi][safi]);
SET_FLAG(peer->af_sflags[afi][safi],
PEER_STATUS_PREFIX_THRESHOLD);
@@ -5704,6 +5704,8 @@ static struct bgp_aggregate *bgp_aggregate_new(void)
static void bgp_aggregate_free(struct bgp_aggregate *aggregate)
{
+ XFREE(MTYPE_ROUTE_MAP_NAME, aggregate->rmap.name);
+ route_map_counter_decrement(aggregate->rmap.map);
XFREE(MTYPE_BGP_AGGREGATE, aggregate);
}
@@ -5754,6 +5756,7 @@ static void bgp_aggregate_install(struct bgp *bgp, afi_t afi, safi_t safi,
struct bgp_node *rn;
struct bgp_table *table;
struct bgp_path_info *pi, *orig, *new;
+ struct attr *attr;
table = bgp->rib[afi][safi];
@@ -5791,14 +5794,18 @@ static void bgp_aggregate_install(struct bgp *bgp, afi_t afi, safi_t safi,
if (pi)
bgp_path_info_delete(rn, pi);
+ attr = bgp_attr_aggregate_intern(
+ bgp, origin, aspath, community, ecommunity, lcommunity,
+ aggregate, atomic_aggregate, p);
+
+ if (!attr) {
+ bgp_aggregate_delete(bgp, p, afi, safi, aggregate);
+ return;
+ }
+
new = info_make(ZEBRA_ROUTE_BGP, BGP_ROUTE_AGGREGATE, 0,
- bgp->peer_self,
- bgp_attr_aggregate_intern(bgp, origin, aspath,
- community, ecommunity,
- lcommunity,
- aggregate->as_set,
- atomic_aggregate),
- rn);
+ bgp->peer_self, attr, rn);
+
SET_FLAG(new->flags, BGP_PATH_VALID);
bgp_path_info_add(rn, new);
@@ -5821,7 +5828,7 @@ static void bgp_aggregate_install(struct bgp *bgp, afi_t afi, safi_t safi,
}
/* Update an aggregate as routes are added/removed from the BGP table */
-static void bgp_aggregate_route(struct bgp *bgp, struct prefix *p,
+void bgp_aggregate_route(struct bgp *bgp, struct prefix *p,
afi_t afi, safi_t safi,
struct bgp_aggregate *aggregate)
{
@@ -5982,7 +5989,7 @@ static void bgp_aggregate_route(struct bgp *bgp, struct prefix *p,
aggregate);
}
-static void bgp_aggregate_delete(struct bgp *bgp, struct prefix *p, afi_t afi,
+void bgp_aggregate_delete(struct bgp *bgp, struct prefix *p, afi_t afi,
safi_t safi, struct bgp_aggregate *aggregate)
{
struct bgp_table *table;
@@ -6426,7 +6433,8 @@ static int bgp_aggregate_unset(struct vty *vty, const char *prefix_str,
}
static int bgp_aggregate_set(struct vty *vty, const char *prefix_str, afi_t afi,
- safi_t safi, uint8_t summary_only, uint8_t as_set)
+ safi_t safi, const char *rmap, uint8_t summary_only,
+ uint8_t as_set)
{
VTY_DECLVAR_CONTEXT(bgp, bgp);
int ret;
@@ -6451,8 +6459,9 @@ static int bgp_aggregate_set(struct vty *vty, const char *prefix_str, afi_t afi,
/* Old configuration check. */
rn = bgp_node_get(bgp->aggregate[afi][safi], &p);
+ aggregate = bgp_node_get_bgp_aggregate_info(rn);
- if (bgp_node_has_bgp_path_info_data(rn)) {
+ if (aggregate) {
vty_out(vty, "There is already same aggregate network.\n");
/* try to remove the old entry */
ret = bgp_aggregate_unset(vty, prefix_str, afi, safi);
@@ -6468,6 +6477,15 @@ static int bgp_aggregate_set(struct vty *vty, const char *prefix_str, afi_t afi,
aggregate->summary_only = summary_only;
aggregate->as_set = as_set;
aggregate->safi = safi;
+
+ if (rmap) {
+ XFREE(MTYPE_ROUTE_MAP_NAME, aggregate->rmap.name);
+ route_map_counter_decrement(aggregate->rmap.map);
+ aggregate->rmap.name =
+ XSTRDUP(MTYPE_ROUTE_MAP_NAME, rmap);
+ aggregate->rmap.map = route_map_lookup_by_name(rmap);
+ route_map_counter_increment(aggregate->rmap.map);
+ }
bgp_node_set_bgp_aggregate_info(rn, aggregate);
/* Aggregate address insert into BGP routing table. */
@@ -6478,17 +6496,20 @@ static int bgp_aggregate_set(struct vty *vty, const char *prefix_str, afi_t afi,
DEFUN (aggregate_address,
aggregate_address_cmd,
- "aggregate-address A.B.C.D/M [<as-set [summary-only]|summary-only [as-set]>]",
+ "aggregate-address A.B.C.D/M [<as-set [summary-only]|summary-only [as-set]>] [route-map WORD]",
"Configure BGP aggregate entries\n"
"Aggregate prefix\n"
"Generate AS set path information\n"
"Filter more specific routes from updates\n"
"Filter more specific routes from updates\n"
- "Generate AS set path information\n")
+ "Generate AS set path information\n"
+ "Apply route map to aggregate network\n"
+ "Name of route map\n")
{
int idx = 0;
argv_find(argv, argc, "A.B.C.D/M", &idx);
char *prefix = argv[idx]->arg;
+ char *rmap = NULL;
int as_set =
argv_find(argv, argc, "as-set", &idx) ? AGGREGATE_AS_SET : 0;
idx = 0;
@@ -6496,25 +6517,33 @@ DEFUN (aggregate_address,
? AGGREGATE_SUMMARY_ONLY
: 0;
+ idx = 0;
+ argv_find(argv, argc, "WORD", &idx);
+ if (idx)
+ rmap = argv[idx]->arg;
+
return bgp_aggregate_set(vty, prefix, AFI_IP, bgp_node_safi(vty),
- summary_only, as_set);
+ rmap, summary_only, as_set);
}
DEFUN (aggregate_address_mask,
aggregate_address_mask_cmd,
- "aggregate-address A.B.C.D A.B.C.D [<as-set [summary-only]|summary-only [as-set]>]",
+ "aggregate-address A.B.C.D A.B.C.D [<as-set [summary-only]|summary-only [as-set]>] [route-map WORD]",
"Configure BGP aggregate entries\n"
"Aggregate address\n"
"Aggregate mask\n"
"Generate AS set path information\n"
"Filter more specific routes from updates\n"
"Filter more specific routes from updates\n"
- "Generate AS set path information\n")
+ "Generate AS set path information\n"
+ "Apply route map to aggregate network\n"
+ "Name of route map\n")
{
int idx = 0;
argv_find(argv, argc, "A.B.C.D", &idx);
char *prefix = argv[idx]->arg;
char *mask = argv[idx + 1]->arg;
+ char *rmap = NULL;
int as_set =
argv_find(argv, argc, "as-set", &idx) ? AGGREGATE_AS_SET : 0;
idx = 0;
@@ -6522,6 +6551,10 @@ DEFUN (aggregate_address_mask,
? AGGREGATE_SUMMARY_ONLY
: 0;
+ argv_find(argv, argc, "WORD", &idx);
+ if (idx)
+ rmap = argv[idx]->arg;
+
char prefix_str[BUFSIZ];
int ret = netmask_str2prefix_str(prefix, mask, prefix_str);
@@ -6531,7 +6564,7 @@ DEFUN (aggregate_address_mask,
}
return bgp_aggregate_set(vty, prefix_str, AFI_IP, bgp_node_safi(vty),
- summary_only, as_set);
+ rmap, summary_only, as_set);
}
DEFUN (no_aggregate_address,
@@ -6581,17 +6614,20 @@ DEFUN (no_aggregate_address_mask,
DEFUN (ipv6_aggregate_address,
ipv6_aggregate_address_cmd,
- "aggregate-address X:X::X:X/M [<as-set [summary-only]|summary-only [as-set]>]",
+ "aggregate-address X:X::X:X/M [<as-set [summary-only]|summary-only [as-set]>] [route-map WORD]",
"Configure BGP aggregate entries\n"
"Aggregate prefix\n"
"Generate AS set path information\n"
"Filter more specific routes from updates\n"
"Filter more specific routes from updates\n"
- "Generate AS set path information\n")
+ "Generate AS set path information\n"
+ "Apply route map to aggregate network\n"
+ "Name of route map\n")
{
int idx = 0;
argv_find(argv, argc, "X:X::X:X/M", &idx);
char *prefix = argv[idx]->arg;
+ char *rmap = NULL;
int as_set =
argv_find(argv, argc, "as-set", &idx) ? AGGREGATE_AS_SET : 0;
@@ -6599,8 +6635,13 @@ DEFUN (ipv6_aggregate_address,
int sum_only = argv_find(argv, argc, "summary-only", &idx)
? AGGREGATE_SUMMARY_ONLY
: 0;
- return bgp_aggregate_set(vty, prefix, AFI_IP6, SAFI_UNICAST, sum_only,
- as_set);
+
+ argv_find(argv, argc, "WORD", &idx);
+ if (idx)
+ rmap = argv[idx]->arg;
+
+ return bgp_aggregate_set(vty, prefix, AFI_IP6, SAFI_UNICAST, rmap,
+ sum_only, as_set);
}
DEFUN (no_ipv6_aggregate_address,
@@ -7228,7 +7269,8 @@ void route_vty_out(struct vty *vty, struct prefix *p,
/* We display both LL & GL if both have been
* received */
- if ((attr->mp_nexthop_len == 32)
+ if ((attr->mp_nexthop_len
+ == BGP_ATTR_NHLEN_IPV6_GLOBAL_AND_LL)
|| (path->peer->conf_if)) {
json_nexthop_ll = json_object_new_object();
json_object_string_add(
@@ -7260,7 +7302,8 @@ void route_vty_out(struct vty *vty, struct prefix *p,
} else {
/* Display LL if LL/Global both in table unless
* prefer-global is set */
- if (((attr->mp_nexthop_len == 32)
+ if (((attr->mp_nexthop_len
+ == BGP_ATTR_NHLEN_IPV6_GLOBAL_AND_LL)
&& !attr->mp_nexthop_prefer_global)
|| (path->peer->conf_if)) {
if (path->peer->conf_if) {
@@ -10696,7 +10739,7 @@ static int bgp_table_stats(struct vty *vty, struct bgp *bgp, afi_t afi,
return CMD_WARNING;
}
- vty_out(vty, "BGP %s RIB statistics\n", afi_safi_print(afi, safi));
+ vty_out(vty, "BGP %s RIB statistics\n", get_afi_safi_str(afi, safi, false));
/* labeled-unicast routes live in the unicast table */
if (safi == SAFI_LABELED_UNICAST)
@@ -10895,7 +10938,7 @@ static int bgp_peer_counts(struct vty *vty, struct peer *peer, afi_t afi,
if (use_json) {
json_object_string_add(json, "prefixCountsFor", peer->host);
json_object_string_add(json, "multiProtocol",
- afi_safi_print(afi, safi));
+ get_afi_safi_str(afi, safi, true));
json_object_int_add(json, "pfxCounter",
peer->pcount[afi][safi]);
@@ -10921,10 +10964,10 @@ static int bgp_peer_counts(struct vty *vty, struct peer *peer, afi_t afi,
&& bgp_flag_check(peer->bgp, BGP_FLAG_SHOW_HOSTNAME)) {
vty_out(vty, "Prefix counts for %s/%s, %s\n",
peer->hostname, peer->host,
- afi_safi_print(afi, safi));
+ get_afi_safi_str(afi, safi, false));
} else {
vty_out(vty, "Prefix counts for %s, %s\n", peer->host,
- afi_safi_print(afi, safi));
+ get_afi_safi_str(afi, safi, false));
}
vty_out(vty, "PfxCt: %ld\n", peer->pcount[afi][safi]);
@@ -11552,7 +11595,7 @@ DEFUN (show_ip_bgp_neighbor_received_prefix_filter,
if (count) {
if (!uj)
vty_out(vty, "Address Family: %s\n",
- afi_safi_print(afi, safi));
+ get_afi_safi_str(afi, safi, false));
prefix_bgp_show_prefix_list(vty, afi, name, uj);
} else {
if (uj)
@@ -12467,6 +12510,9 @@ void bgp_config_write_network(struct vty *vty, struct bgp *bgp, afi_t afi,
if (bgp_aggregate->summary_only)
vty_out(vty, " summary-only");
+ if (bgp_aggregate->rmap.name)
+ vty_out(vty, " route-map %s", bgp_aggregate->rmap.name);
+
vty_out(vty, "\n");
}
}
diff --git a/bgpd/bgp_route.h b/bgpd/bgp_route.h
index 704cd39710..ba4e32c23d 100644
--- a/bgpd/bgp_route.h
+++ b/bgpd/bgp_route.h
@@ -313,7 +313,10 @@ struct bgp_aggregate {
uint8_t as_set;
/* Route-map for aggregated route. */
- struct route_map *map;
+ struct {
+ char *name;
+ struct route_map *map;
+ } rmap;
/* Suppress-count. */
unsigned long count;
@@ -545,6 +548,10 @@ extern void bgp_config_write_network(struct vty *, struct bgp *, afi_t, safi_t);
extern void bgp_config_write_distance(struct vty *, struct bgp *, afi_t,
safi_t);
+extern void bgp_aggregate_delete(struct bgp *bgp, struct prefix *p, afi_t afi,
+ safi_t safi, struct bgp_aggregate *aggregate);
+extern void bgp_aggregate_route(struct bgp *bgp, struct prefix *p, afi_t afi,
+ safi_t safi, struct bgp_aggregate *aggregate);
extern void bgp_aggregate_increment(struct bgp *bgp, struct prefix *p,
struct bgp_path_info *path, afi_t afi,
safi_t safi);
diff --git a/bgpd/bgp_routemap.c b/bgpd/bgp_routemap.c
index aaae9e380e..da702ec857 100644
--- a/bgpd/bgp_routemap.c
+++ b/bgpd/bgp_routemap.c
@@ -269,7 +269,8 @@ route_match_peer(void *rule, const struct prefix *prefix,
/* If su='0.0.0.0' (command 'match peer local'), and it's a
NETWORK,
- REDISTRIBUTE or DEFAULT_GENERATED route => return RMAP_MATCH
+ REDISTRIBUTE, AGGREGATE-ADDRESS or DEFAULT_GENERATED route
+ => return RMAP_MATCH
*/
if (sockunion_same(su, &su_def)) {
int ret;
@@ -277,6 +278,8 @@ route_match_peer(void *rule, const struct prefix *prefix,
|| CHECK_FLAG(peer->rmap_type,
PEER_RMAP_TYPE_REDISTRIBUTE)
|| CHECK_FLAG(peer->rmap_type,
+ PEER_RMAP_TYPE_AGGREGATE)
+ || CHECK_FLAG(peer->rmap_type,
PEER_RMAP_TYPE_DEFAULT))
ret = RMAP_MATCH;
else
@@ -2853,12 +2856,15 @@ route_set_ipv6_nexthop_peer(void *rule, const struct prefix *pfx,
/* Set next hop value and length in attribute. */
if (IN6_IS_ADDR_LINKLOCAL(&peer_address)) {
path->attr->mp_nexthop_local = peer_address;
- if (path->attr->mp_nexthop_len != 32)
- path->attr->mp_nexthop_len = 32;
+ if (path->attr->mp_nexthop_len
+ != BGP_ATTR_NHLEN_IPV6_GLOBAL_AND_LL)
+ path->attr->mp_nexthop_len =
+ BGP_ATTR_NHLEN_IPV6_GLOBAL_AND_LL;
} else {
path->attr->mp_nexthop_global = peer_address;
if (path->attr->mp_nexthop_len == 0)
- path->attr->mp_nexthop_len = 16;
+ path->attr->mp_nexthop_len =
+ BGP_ATTR_NHLEN_IPV6_GLOBAL;
}
} else if (CHECK_FLAG(peer->rmap_type, PEER_RMAP_TYPE_OUT)) {
@@ -2923,7 +2929,7 @@ route_set_vpnv4_nexthop(void *rule, const struct prefix *prefix,
/* Set next hop value. */
path->attr->mp_nexthop_global_in = *address;
- path->attr->mp_nexthop_len = 4;
+ path->attr->mp_nexthop_len = BGP_ATTR_NHLEN_IPV4;
}
return RMAP_OKAY;
@@ -3057,7 +3063,7 @@ static int bgp_route_match_add(struct vty *vty, const char *command,
{
VTY_DECLVAR_CONTEXT(route_map_index, index);
int retval = CMD_SUCCESS;
- int ret;
+ enum rmap_compile_rets ret;
ret = route_map_add_match(index, command, arg, type);
switch (ret) {
@@ -3074,6 +3080,11 @@ static int bgp_route_match_add(struct vty *vty, const char *command,
route_map_upd8_dependency(type, arg, index->map->name);
}
break;
+ case RMAP_DUPLICATE_RULE:
+ /*
+ * Intentionally doing nothing here.
+ */
+ break;
}
return retval;
@@ -3084,7 +3095,7 @@ static int bgp_route_match_delete(struct vty *vty, const char *command,
const char *arg, route_map_event_t type)
{
VTY_DECLVAR_CONTEXT(route_map_index, index);
- int ret;
+ enum rmap_compile_rets ret;
int retval = CMD_SUCCESS;
char *dep_name = NULL;
const char *tmpstr;
@@ -3117,6 +3128,11 @@ static int bgp_route_match_delete(struct vty *vty, const char *command,
if (type != RMAP_EVENT_MATCH_DELETED && dep_name)
route_map_upd8_dependency(type, dep_name, rmap_name);
break;
+ case RMAP_DUPLICATE_RULE:
+ /*
+ * Nothing to do here
+ */
+ break;
}
XFREE(MTYPE_ROUTE_MAP_RULE, dep_name);
@@ -3238,6 +3254,7 @@ static void bgp_route_map_process_update(struct bgp *bgp, const char *rmap_name,
struct peer *peer;
struct bgp_node *bn;
struct bgp_static *bgp_static;
+ struct bgp_aggregate *aggregate;
struct listnode *node, *nnode;
struct route_map *map;
char buf[INET6_ADDRSTRLEN];
@@ -3321,6 +3338,35 @@ static void bgp_route_map_process_update(struct bgp *bgp, const char *rmap_name,
safi);
}
}
+
+ /* For aggregate-address route-map updates. */
+ for (bn = bgp_table_top(bgp->aggregate[afi][safi]); bn;
+ bn = bgp_route_next(bn)) {
+ aggregate = bgp_node_get_bgp_aggregate_info(bn);
+ if (!aggregate)
+ continue;
+
+ if (!aggregate->rmap.name
+ || (strcmp(rmap_name, aggregate->rmap.name) != 0))
+ continue;
+
+ if (!aggregate->rmap.map)
+ route_map_counter_increment(map);
+
+ aggregate->rmap.map = map;
+
+ if (route_update) {
+ if (bgp_debug_zebra(&bn->p))
+ zlog_debug(
+ "Processing route_map %s update on aggregate-address route %s",
+ rmap_name,
+ inet_ntop(bn->p.family,
+ &bn->p.u.prefix, buf,
+ INET6_ADDRSTRLEN));
+ bgp_aggregate_route(bgp, &bn->p, afi, safi,
+ aggregate);
+ }
+ }
}
/* For redistribute route-map updates. */
diff --git a/bgpd/bgp_rpki.c b/bgpd/bgp_rpki.c
index 22840d54c6..2cfd65896c 100644
--- a/bgpd/bgp_rpki.c
+++ b/bgpd/bgp_rpki.c
@@ -51,12 +51,6 @@
#include "lib/thread.h"
#ifndef VTYSH_EXTRACT_PL
#include "rtrlib/rtrlib.h"
-#include "rtrlib/rtr_mgr.h"
-#include "rtrlib/lib/ip.h"
-#include "rtrlib/transport/tcp/tcp_transport.h"
-#if defined(FOUND_SSH)
-#include "rtrlib/transport/ssh/ssh_transport.h"
-#endif
#endif
#include "hook.h"
#include "libfrr.h"
@@ -76,8 +70,6 @@ DEFINE_MTYPE_STATIC(BGPD, BGP_RPKI_CACHE_GROUP, "BGP RPKI Cache server group")
#define POLLING_PERIOD_DEFAULT 3600
#define EXPIRE_INTERVAL_DEFAULT 7200
#define RETRY_INTERVAL_DEFAULT 600
-#define TIMEOUT_DEFAULT 600
-#define INITIAL_SYNCHRONISATION_TIMEOUT_DEFAULT 30
#define RPKI_DEBUG(...) \
if (rpki_debug) { \
@@ -147,8 +139,6 @@ static int rpki_debug;
static unsigned int polling_period;
static unsigned int expire_interval;
static unsigned int retry_interval;
-static unsigned int timeout;
-static unsigned int initial_synchronisation_timeout;
static int rpki_sync_socket_rtr;
static int rpki_sync_socket_bgpd;
@@ -538,9 +528,6 @@ static int bgp_rpki_init(struct thread_master *master)
polling_period = POLLING_PERIOD_DEFAULT;
expire_interval = EXPIRE_INTERVAL_DEFAULT;
retry_interval = RETRY_INTERVAL_DEFAULT;
- timeout = TIMEOUT_DEFAULT;
- initial_synchronisation_timeout =
- INITIAL_SYNCHRONISATION_TIMEOUT_DEFAULT;
install_cli_commands();
rpki_init_sync_socket();
return 0;
@@ -756,8 +743,6 @@ static int add_cache(struct cache *cache)
group.sockets_len = 1;
group.sockets = &cache->rtr_socket;
- listnode_add(cache_list, cache);
-
if (rtr_is_running) {
init_tr_socket(cache);
@@ -767,6 +752,8 @@ static int add_cache(struct cache *cache)
}
}
+ listnode_add(cache_list, cache);
+
return SUCCESS;
}
@@ -793,7 +780,12 @@ static int add_tcp_cache(const char *host, const char *port,
cache->rtr_socket = rtr_socket;
cache->preference = preference;
- return add_cache(cache);
+ int ret = add_cache(cache);
+ if (ret != SUCCESS) {
+ free_cache(cache);
+ }
+
+ return ret;
}
#if defined(FOUND_SSH)
@@ -829,7 +821,12 @@ static int add_ssh_cache(const char *host, const unsigned int port,
cache->rtr_socket = rtr_socket;
cache->preference = preference;
- return add_cache(cache);
+ int ret = add_cache(cache);
+ if (ret != SUCCESS) {
+ free_cache(cache);
+ }
+
+ return ret;
}
#endif
@@ -869,9 +866,6 @@ static int config_write(struct vty *vty)
vty_out(vty, "!\n");
vty_out(vty, "rpki\n");
vty_out(vty, " rpki polling_period %d\n", polling_period);
- vty_out(vty, " rpki timeout %d\n", timeout);
- vty_out(vty, " rpki initial-synchronisation-timeout %d\n",
- initial_synchronisation_timeout);
for (ALL_LIST_ELEMENTS_RO(cache_list, cache_node, cache)) {
switch (cache->type) {
struct tr_tcp_config *tcp_config;
@@ -1020,48 +1014,64 @@ DEFUN (no_rpki_retry_interval,
return CMD_SUCCESS;
}
-DEFPY (rpki_timeout,
+#if (CONFDATE > 20200901)
+CPP_NOTICE("bgpd: time to remove rpki timeout")
+CPP_NOTICE("bgpd: this includes rpki_timeout and rpki_synchronisation_timeout")
+#endif
+
+DEFPY_HIDDEN (rpki_timeout,
rpki_timeout_cmd,
"rpki timeout (1-4294967295)$to_arg",
RPKI_OUTPUT_STRING
"Set timeout\n"
"Timeout value\n")
{
- timeout = to_arg;
+ vty_out(vty,
+ "This config option is deprecated, and is scheduled for removal.\n");
+ vty_out(vty,
+ "This functionality has also already been removed because it caused bugs and was pointless\n");
return CMD_SUCCESS;
}
-DEFUN (no_rpki_timeout,
+DEFUN_HIDDEN (no_rpki_timeout,
no_rpki_timeout_cmd,
"no rpki timeout",
NO_STR
RPKI_OUTPUT_STRING
"Set timeout back to default\n")
{
- timeout = TIMEOUT_DEFAULT;
+ vty_out(vty,
+ "This config option is deprecated, and is scheduled for removal.\n");
+ vty_out(vty,
+ "This functionality has also already been removed because it caused bugs and was pointless\n");
return CMD_SUCCESS;
}
-DEFPY (rpki_synchronisation_timeout,
+DEFPY_HIDDEN (rpki_synchronisation_timeout,
rpki_synchronisation_timeout_cmd,
"rpki initial-synchronisation-timeout (1-4294967295)$ito_arg",
RPKI_OUTPUT_STRING
"Set a timeout for the initial synchronisation of prefix validation data\n"
"Timeout value\n")
{
- initial_synchronisation_timeout = ito_arg;
+ vty_out(vty,
+ "This config option is deprecated, and is scheduled for removal.\n");
+ vty_out(vty,
+ "This functionality has also already been removed because it caused bugs and was pointless\n");
return CMD_SUCCESS;
}
-DEFUN (no_rpki_synchronisation_timeout,
+DEFUN_HIDDEN (no_rpki_synchronisation_timeout,
no_rpki_synchronisation_timeout_cmd,
"no rpki initial-synchronisation-timeout",
NO_STR
RPKI_OUTPUT_STRING
"Set the initial synchronisation timeout back to default (30 sec.)\n")
{
- initial_synchronisation_timeout =
- INITIAL_SYNCHRONISATION_TIMEOUT_DEFAULT;
+ vty_out(vty,
+ "This config option is deprecated, and is scheduled for removal.\n");
+ vty_out(vty,
+ "This functionality has also already been removed because it caused bugs and was pointless\n");
return CMD_SUCCESS;
}
@@ -1083,6 +1093,18 @@ DEFPY (rpki_cache,
"Preference value\n")
{
int return_value;
+ struct listnode *cache_node;
+ struct cache *current_cache;
+
+ for (ALL_LIST_ELEMENTS_RO(cache_list, cache_node, current_cache)) {
+ if (current_cache->preference == preference) {
+ vty_out(vty,
+ "Cache with preference %ld is already configured\n",
+ preference);
+ return CMD_WARNING;
+ }
+ }
+
// use ssh connection
if (ssh_uname) {
@@ -1128,11 +1150,11 @@ DEFPY (no_rpki_cache,
return CMD_WARNING;
}
- if (rtr_is_running) {
+ if (rtr_is_running && listcount(cache_list) == 1) {
+ stop();
+ } else if (rtr_is_running) {
if (rtr_mgr_remove_group(rtr_config, preference) == RTR_ERROR) {
vty_out(vty, "Could not remove cache %ld", preference);
- if (listcount(cache_list) == 1)
- vty_out(vty, " because it is the last cache");
vty_out(vty, "\n");
return CMD_WARNING;
@@ -1385,7 +1407,7 @@ DEFUN (match_rpki,
"Prefix not found\n")
{
VTY_DECLVAR_CONTEXT(route_map_index, index);
- int ret;
+ enum rmap_compile_rets ret;
ret = route_map_add_match(index, "rpki", argv[2]->arg,
RMAP_EVENT_MATCH_ADDED);
@@ -1397,6 +1419,12 @@ DEFUN (match_rpki,
case RMAP_COMPILE_ERROR:
vty_out(vty, "%% BGP Argument is malformed.\n");
return CMD_WARNING_CONFIG_FAILED;
+ case RMAP_COMPILE_SUCCESS:
+ case RMAP_DUPLICATE_RULE:
+ /*
+ * Intentionally doing nothing here
+ */
+ break;
}
}
return CMD_SUCCESS;
@@ -1413,7 +1441,7 @@ DEFUN (no_match_rpki,
"Prefix not found\n")
{
VTY_DECLVAR_CONTEXT(route_map_index, index);
- int ret;
+ enum rmap_compile_rets ret;
ret = route_map_delete_match(index, "rpki", argv[3]->arg);
if (ret) {
@@ -1424,6 +1452,12 @@ DEFUN (no_match_rpki,
case RMAP_COMPILE_ERROR:
vty_out(vty, "%% BGP Argument is malformed.\n");
break;
+ case RMAP_COMPILE_SUCCESS:
+ case RMAP_DUPLICATE_RULE:
+ /*
+ * Nothing to do here
+ */
+ break;
}
return CMD_WARNING_CONFIG_FAILED;
}
diff --git a/bgpd/bgp_snmp.c b/bgpd/bgp_snmp.c
index 44cbeabd69..c266440899 100644
--- a/bgpd/bgp_snmp.c
+++ b/bgpd/bgp_snmp.c
@@ -894,6 +894,10 @@ static int bgpTrapEstablished(struct peer *peer)
struct in_addr addr;
oid index[sizeof(oid) * IN_ADDR_SIZE];
+ /* Check if this peer just went to Established */
+ if ((peer->last_major_event != OpenConfirm) || !(peer_established(peer)))
+ return 0;
+
ret = inet_aton(peer->host, &addr);
if (ret == 0)
return 0;
@@ -935,7 +939,7 @@ static int bgp_snmp_init(struct thread_master *tm)
static int bgp_snmp_module_init(void)
{
- hook_register(peer_established, bgpTrapEstablished);
+ hook_register(peer_status_changed, bgpTrapEstablished);
hook_register(peer_backward_transition, bgpTrapBackwardTransition);
hook_register(frr_late_init, bgp_snmp_init);
return 0;
diff --git a/bgpd/bgp_vty.c b/bgpd/bgp_vty.c
index 0d0e433980..17bc83ed2e 100644
--- a/bgpd/bgp_vty.c
+++ b/bgpd/bgp_vty.c
@@ -22,6 +22,7 @@
#include "command.h"
#include "lib/json.h"
+#include "lib_errors.h"
#include "lib/zclient.h"
#include "prefix.h"
#include "plist.h"
@@ -130,6 +131,80 @@ static enum node_type bgp_node_type(afi_t afi, safi_t safi)
return BGP_IPV4_NODE;
}
+static const char *get_afi_safi_vty_str(afi_t afi, safi_t safi)
+{
+ if (afi == AFI_IP && safi == SAFI_UNICAST)
+ return "IPv4 Unicast";
+ else if (afi == AFI_IP && safi == SAFI_MULTICAST)
+ return "IPv4 Multicast";
+ else if (afi == AFI_IP && safi == SAFI_LABELED_UNICAST)
+ return "IPv4 Labeled Unicast";
+ else if (afi == AFI_IP && safi == SAFI_MPLS_VPN)
+ return "IPv4 VPN";
+ else if (afi == AFI_IP && safi == SAFI_ENCAP)
+ return "IPv4 Encap";
+ else if (afi == AFI_IP && safi == SAFI_FLOWSPEC)
+ return "IPv4 Flowspec";
+ else if (afi == AFI_IP6 && safi == SAFI_UNICAST)
+ return "IPv6 Unicast";
+ else if (afi == AFI_IP6 && safi == SAFI_MULTICAST)
+ return "IPv6 Multicast";
+ else if (afi == AFI_IP6 && safi == SAFI_LABELED_UNICAST)
+ return "IPv6 Labeled Unicast";
+ else if (afi == AFI_IP6 && safi == SAFI_MPLS_VPN)
+ return "IPv6 VPN";
+ else if (afi == AFI_IP6 && safi == SAFI_ENCAP)
+ return "IPv6 Encap";
+ else if (afi == AFI_IP6 && safi == SAFI_FLOWSPEC)
+ return "IPv6 Flowspec";
+ else if (afi == AFI_L2VPN && safi == SAFI_EVPN)
+ return "L2VPN EVPN";
+ else {
+ flog_err(EC_LIB_DEVELOPMENT, "New afi/safi that needs to be taken care of?");
+ return "Unknown";
+ }
+}
+
+/*
+ * Please note that we have intentionally camelCased
+ * the return strings here. So if you want
+ * to use this function, please ensure you
+ * are doing this within json output
+ */
+static const char *get_afi_safi_json_str(afi_t afi, safi_t safi)
+{
+ if (afi == AFI_IP && safi == SAFI_UNICAST)
+ return "ipv4Unicast";
+ else if (afi == AFI_IP && safi == SAFI_MULTICAST)
+ return "ipv4Multicast";
+ else if (afi == AFI_IP && safi == SAFI_LABELED_UNICAST)
+ return "ipv4LabeledUnicast";
+ else if (afi == AFI_IP && safi == SAFI_MPLS_VPN)
+ return "ipv4Vpn";
+ else if (afi == AFI_IP && safi == SAFI_ENCAP)
+ return "ipv4Encap";
+ else if (afi == AFI_IP && safi == SAFI_FLOWSPEC)
+ return "ipv4Flowspec";
+ else if (afi == AFI_IP6 && safi == SAFI_UNICAST)
+ return "ipv6Unicast";
+ else if (afi == AFI_IP6 && safi == SAFI_MULTICAST)
+ return "ipv6Multicast";
+ else if (afi == AFI_IP6 && safi == SAFI_LABELED_UNICAST)
+ return "ipv6LabeledUnicast";
+ else if (afi == AFI_IP6 && safi == SAFI_MPLS_VPN)
+ return "ipv6Vpn";
+ else if (afi == AFI_IP6 && safi == SAFI_ENCAP)
+ return "ipv6Encap";
+ else if (afi == AFI_IP6 && safi == SAFI_FLOWSPEC)
+ return "ipv6Flowspec";
+ else if (afi == AFI_L2VPN && safi == SAFI_EVPN)
+ return "l2VpnEvpn";
+ else {
+ flog_err(EC_LIB_DEVELOPMENT, "New afi/safi that needs to be taken care of?");
+ return "Unknown";
+ }
+}
+
/* Utility function to get address family from current node. */
afi_t bgp_node_afi(struct vty *vty)
{
@@ -584,7 +659,7 @@ static void bgp_clear_vty_error(struct vty *vty, struct peer *peer, afi_t afi,
case BGP_ERR_AF_UNCONFIGURED:
vty_out(vty,
"%%BGP: Enable %s address family for the neighbor %s\n",
- afi_safi_print(afi, safi), peer->host);
+ get_afi_safi_str(afi, safi, false), peer->host);
break;
case BGP_ERR_SOFT_RECONFIG_UNCONFIGURED:
vty_out(vty,
@@ -738,7 +813,7 @@ static int bgp_clear(struct vty *vty, struct bgp *bgp, afi_t afi, safi_t safi,
if (!found)
vty_out(vty,
"%%BGP: No %s peer belonging to peer-group %s is configured\n",
- afi_safi_print(afi, safi), arg);
+ get_afi_safi_str(afi, safi, false), arg);
return CMD_SUCCESS;
}
@@ -760,7 +835,7 @@ static int bgp_clear(struct vty *vty, struct bgp *bgp, afi_t afi, safi_t safi,
if (!found)
vty_out(vty,
"%%BGP: No external %s peer is configured\n",
- afi_safi_print(afi, safi));
+ get_afi_safi_str(afi, safi, false));
return CMD_SUCCESS;
}
@@ -784,7 +859,7 @@ static int bgp_clear(struct vty *vty, struct bgp *bgp, afi_t afi, safi_t safi,
if (!found)
vty_out(vty,
"%%BGP: No %s peer is configured with AS %s\n",
- afi_safi_print(afi, safi), arg);
+ get_afi_safi_str(afi, safi, false), arg);
return CMD_SUCCESS;
}
@@ -7794,9 +7869,145 @@ static void bgp_show_bestpath_json(struct bgp *bgp, json_object *json)
json_object_object_add(json, "bestPath", bestpath);
}
+/* Print the error code/subcode for why the peer is down */
+static void bgp_show_peer_reset(struct vty * vty, struct peer *peer,
+ json_object *json_peer, bool use_json)
+{
+ const char *code_str;
+ const char *subcode_str;
+
+ if (use_json) {
+ if (peer->last_reset == PEER_DOWN_NOTIFY_SEND
+ || peer->last_reset == PEER_DOWN_NOTIFY_RECEIVED) {
+ char errorcodesubcode_hexstr[5];
+ char errorcodesubcode_str[256];
+
+ code_str = bgp_notify_code_str(peer->notify.code);
+ subcode_str = bgp_notify_subcode_str(
+ peer->notify.code,
+ peer->notify.subcode);
+
+ sprintf(errorcodesubcode_hexstr, "%02X%02X",
+ peer->notify.code, peer->notify.subcode);
+ json_object_string_add(json_peer,
+ "lastErrorCodeSubcode",
+ errorcodesubcode_hexstr);
+ snprintf(errorcodesubcode_str, 255, "%s%s",
+ code_str, subcode_str);
+ json_object_string_add(json_peer,
+ "lastNotificationReason",
+ errorcodesubcode_str);
+ if (peer->last_reset == PEER_DOWN_NOTIFY_RECEIVED
+ && peer->notify.code == BGP_NOTIFY_CEASE
+ && (peer->notify.subcode
+ == BGP_NOTIFY_CEASE_ADMIN_SHUTDOWN
+ || peer->notify.subcode
+ == BGP_NOTIFY_CEASE_ADMIN_RESET)
+ && peer->notify.length) {
+ char msgbuf[1024];
+ const char *msg_str;
+
+ msg_str = bgp_notify_admin_message(
+ msgbuf, sizeof(msgbuf),
+ (uint8_t *)peer->notify.data,
+ peer->notify.length);
+ if (msg_str)
+ json_object_string_add(
+ json_peer,
+ "lastShutdownDescription",
+ msg_str);
+ }
+
+ }
+ json_object_string_add(json_peer, "lastResetDueTo",
+ peer_down_str[(int)peer->last_reset]);
+ json_object_int_add(json_peer, "lastResetCode",
+ peer->last_reset);
+ } else {
+ if (peer->last_reset == PEER_DOWN_NOTIFY_SEND
+ || peer->last_reset == PEER_DOWN_NOTIFY_RECEIVED) {
+ code_str = bgp_notify_code_str(peer->notify.code);
+ subcode_str =
+ bgp_notify_subcode_str(peer->notify.code,
+ peer->notify.subcode);
+ vty_out(vty, " Notification %s (%s%s)\n",
+ peer->last_reset == PEER_DOWN_NOTIFY_SEND
+ ? "sent"
+ : "received",
+ code_str, subcode_str);
+ } else {
+ vty_out(vty, " %s\n",
+ peer_down_str[(int)peer->last_reset]);
+ }
+ }
+}
+
+static inline bool bgp_has_peer_failed(struct peer *peer, afi_t afi,
+ safi_t safi)
+{
+ return ((peer->status != Established) ||
+ !peer->afc_recv[afi][safi]);
+}
+
+static void bgp_show_failed_summary(struct vty *vty, struct bgp *bgp,
+ struct peer *peer, json_object *json_peer,
+ int max_neighbor_width, bool use_json)
+{
+ char timebuf[BGP_UPTIME_LEN], dn_flag[2];
+ int len;
+
+ if (use_json) {
+ if (peer_dynamic_neighbor(peer))
+ json_object_boolean_true_add(json_peer,
+ "dynamicPeer");
+ if (peer->hostname)
+ json_object_string_add(json_peer, "hostname",
+ peer->hostname);
+
+ if (peer->domainname)
+ json_object_string_add(json_peer, "domainname",
+ peer->domainname);
+ json_object_int_add(json_peer, "connectionsEstablished",
+ peer->established);
+ json_object_int_add(json_peer, "connectionsDropped",
+ peer->dropped);
+ peer_uptime(peer->uptime, timebuf, BGP_UPTIME_LEN,
+ use_json, json_peer);
+ if (peer->status == Established)
+ json_object_string_add(json_peer, "lastResetDueTo",
+ "AFI/SAFI Not Negotiated");
+ else
+ bgp_show_peer_reset(NULL, peer, json_peer, true);
+ } else {
+ dn_flag[1] = '\0';
+ dn_flag[0] = peer_dynamic_neighbor(peer) ? '*' : '\0';
+ if (peer->hostname
+ && bgp_flag_check(bgp, BGP_FLAG_SHOW_HOSTNAME))
+ len = vty_out(vty, "%s%s(%s)", dn_flag,
+ peer->hostname, peer->host);
+ else
+ len = vty_out(vty, "%s%s", dn_flag, peer->host);
+
+ /* pad the neighbor column with spaces */
+ if (len < max_neighbor_width)
+ vty_out(vty, "%*s", max_neighbor_width - len,
+ " ");
+ vty_out(vty, "%7d %7d %8s", peer->established,
+ peer->dropped,
+ peer_uptime(peer->uptime, timebuf,
+ BGP_UPTIME_LEN, 0, NULL));
+ if (peer->status == Established)
+ vty_out(vty, " AFI/SAFI Not Negotiated\n");
+ else
+ bgp_show_peer_reset(vty, peer, NULL,
+ false);
+ }
+}
+
+
/* Show BGP peer's summary information. */
static int bgp_show_summary(struct vty *vty, struct bgp *bgp, int afi, int safi,
- bool use_json)
+ bool show_failed, bool use_json)
{
struct peer *peer;
struct listnode *node, *nnode;
@@ -7804,7 +8015,7 @@ static int bgp_show_summary(struct vty *vty, struct bgp *bgp, int afi, int safi,
char timebuf[BGP_UPTIME_LEN], dn_flag[2];
char neighbor_buf[VTY_BUFSIZ];
int neighbor_col_default_width = 16;
- int len;
+ int len, failed_count = 0;
int max_neighbor_width = 0;
int pfx_rcd_safi;
json_object *json = NULL;
@@ -7816,6 +8027,7 @@ static int bgp_show_summary(struct vty *vty, struct bgp *bgp, int afi, int safi,
* to
* display the correct PfxRcd value we must look at SAFI_UNICAST
*/
+
if (safi == SAFI_LABELED_UNICAST)
pfx_rcd_safi = SAFI_UNICAST;
else
@@ -7824,6 +8036,20 @@ static int bgp_show_summary(struct vty *vty, struct bgp *bgp, int afi, int safi,
if (use_json) {
json = json_object_new_object();
json_peers = json_object_new_object();
+ for (ALL_LIST_ELEMENTS(bgp->peer, node, nnode, peer)) {
+ if (!CHECK_FLAG(peer->flags, PEER_FLAG_CONFIG_NODE))
+ continue;
+
+ if (peer->afc[afi][safi]) {
+ /* See if we have at least a single failed peer */
+ if (bgp_has_peer_failed(peer, afi, safi))
+ failed_count++;
+ count++;
+ }
+ if (peer_dynamic_neighbor(peer))
+ dn_count++;
+ }
+
} else {
/* Loop over all neighbors that will be displayed to determine
* how many
@@ -7852,6 +8078,11 @@ static int bgp_show_summary(struct vty *vty, struct bgp *bgp, int afi, int safi,
if (len > max_neighbor_width)
max_neighbor_width = len;
+
+ /* See if we have at least a single failed peer */
+ if (bgp_has_peer_failed(peer, afi, safi))
+ failed_count++;
+ count++;
}
}
@@ -7862,6 +8093,23 @@ static int bgp_show_summary(struct vty *vty, struct bgp *bgp, int afi, int safi,
max_neighbor_width = neighbor_col_default_width;
}
+ if (show_failed && !failed_count) {
+ if (use_json) {
+ json_object_int_add(json, "failedPeersCount", 0);
+ json_object_int_add(json, "dynamicPeers", dn_count);
+ json_object_int_add(json, "totalPeers", count);
+
+ vty_out(vty, "%s\n", json_object_to_json_string_ext(
+ json, JSON_C_TO_STRING_PRETTY));
+ json_object_free(json);
+ } else {
+ vty_out(vty, "%% No failed BGP neighbors found\n");
+ vty_out(vty, "\nTotal number of neighbors %d\n", count);
+ }
+ return CMD_SUCCESS;
+ }
+
+ count = 0; /* Reset the value as its used again */
for (ALL_LIST_ELEMENTS(bgp->peer, node, nnode, peer)) {
if (!CHECK_FLAG(peer->flags, PEER_FLAG_CONFIG_NODE))
continue;
@@ -8063,78 +8311,97 @@ static int bgp_show_summary(struct vty *vty, struct bgp *bgp, int afi, int safi,
vty_out(vty, "Neighbor");
vty_out(vty, "%*s", max_neighbor_width - 8,
" ");
- vty_out(vty,
+ if (show_failed)
+ vty_out(vty, "EstdCnt DropCnt ResetTime Reason\n");
+ else
+ vty_out(vty,
"V AS MsgRcvd MsgSent TblVer InQ OutQ Up/Down State/PfxRcd\n");
}
}
count++;
+ /* Works for both failed & successful cases */
+ if (peer_dynamic_neighbor(peer))
+ dn_count++;
if (use_json) {
- json_peer = json_object_new_object();
+ json_peer = NULL;
+
+ if (show_failed &&
+ bgp_has_peer_failed(peer, afi, safi)) {
+ json_peer = json_object_new_object();
+ bgp_show_failed_summary(vty, bgp, peer,
+ json_peer, 0, use_json);
+ } else if (!show_failed) {
+ json_peer = json_object_new_object();
+ if (peer_dynamic_neighbor(peer)) {
+ json_object_boolean_true_add(json_peer,
+ "dynamicPeer");
+ }
- if (peer_dynamic_neighbor(peer)) {
- dn_count++;
- json_object_boolean_true_add(json_peer,
- "dynamicPeer");
+ if (peer->hostname)
+ json_object_string_add(json_peer, "hostname",
+ peer->hostname);
+
+ if (peer->domainname)
+ json_object_string_add(json_peer, "domainname",
+ peer->domainname);
+
+ json_object_int_add(json_peer, "remoteAs", peer->as);
+ json_object_int_add(json_peer, "version", 4);
+ json_object_int_add(json_peer, "msgRcvd",
+ PEER_TOTAL_RX(peer));
+ json_object_int_add(json_peer, "msgSent",
+ PEER_TOTAL_TX(peer));
+
+ json_object_int_add(json_peer, "tableVersion",
+ peer->version[afi][safi]);
+ json_object_int_add(json_peer, "outq",
+ peer->obuf->count);
+ json_object_int_add(json_peer, "inq", 0);
+ peer_uptime(peer->uptime, timebuf, BGP_UPTIME_LEN,
+ use_json, json_peer);
+
+ /*
+ * Adding "pfxRcd" field to match with the corresponding
+ * CLI. "prefixReceivedCount" will be deprecated in
+ * future.
+ */
+ json_object_int_add(json_peer, "prefixReceivedCount",
+ peer->pcount[afi][pfx_rcd_safi]);
+ json_object_int_add(json_peer, "pfxRcd",
+ peer->pcount[afi][pfx_rcd_safi]);
+
+ paf = peer_af_find(peer, afi, pfx_rcd_safi);
+ if (paf && PAF_SUBGRP(paf))
+ json_object_int_add(json_peer,
+ "pfxSnt",
+ (PAF_SUBGRP(paf))->scount);
+ if (CHECK_FLAG(peer->flags, PEER_FLAG_SHUTDOWN))
+ json_object_string_add(json_peer, "state",
+ "Idle (Admin)");
+ else if (peer->afc_recv[afi][safi])
+ json_object_string_add(
+ json_peer, "state",
+ lookup_msg(bgp_status_msg, peer->status,
+ NULL));
+ else if (CHECK_FLAG(peer->sflags,
+ PEER_STATUS_PREFIX_OVERFLOW))
+ json_object_string_add(json_peer, "state",
+ "Idle (PfxCt)");
+ else
+ json_object_string_add(
+ json_peer, "state",
+ lookup_msg(bgp_status_msg, peer->status,
+ NULL));
+ json_object_int_add(json_peer, "connectionsEstablished",
+ peer->established);
+ json_object_int_add(json_peer, "connectionsDropped",
+ peer->dropped);
}
-
- if (peer->hostname)
- json_object_string_add(json_peer, "hostname",
- peer->hostname);
-
- if (peer->domainname)
- json_object_string_add(json_peer, "domainname",
- peer->domainname);
-
- json_object_int_add(json_peer, "remoteAs", peer->as);
- json_object_int_add(json_peer, "version", 4);
- json_object_int_add(json_peer, "msgRcvd",
- PEER_TOTAL_RX(peer));
- json_object_int_add(json_peer, "msgSent",
- PEER_TOTAL_TX(peer));
-
- json_object_int_add(json_peer, "tableVersion",
- peer->version[afi][safi]);
- json_object_int_add(json_peer, "outq",
- peer->obuf->count);
- json_object_int_add(json_peer, "inq", 0);
- peer_uptime(peer->uptime, timebuf, BGP_UPTIME_LEN,
- use_json, json_peer);
-
- /*
- * Adding "pfxRcd" field to match with the corresponding
- * CLI. "prefixReceivedCount" will be deprecated in
- * future.
- */
- json_object_int_add(json_peer, "prefixReceivedCount",
- peer->pcount[afi][pfx_rcd_safi]);
- json_object_int_add(json_peer, "pfxRcd",
- peer->pcount[afi][pfx_rcd_safi]);
-
- paf = peer_af_find(peer, afi, pfx_rcd_safi);
- if (paf && PAF_SUBGRP(paf))
- json_object_int_add(json_peer,
- "pfxSnt",
- (PAF_SUBGRP(paf))->scount);
-
- if (CHECK_FLAG(peer->flags, PEER_FLAG_SHUTDOWN))
- json_object_string_add(json_peer, "state",
- "Idle (Admin)");
- else if (peer->afc_recv[afi][safi])
- json_object_string_add(
- json_peer, "state",
- lookup_msg(bgp_status_msg, peer->status,
- NULL));
- else if (CHECK_FLAG(peer->sflags,
- PEER_STATUS_PREFIX_OVERFLOW))
- json_object_string_add(json_peer, "state",
- "Idle (PfxCt)");
- else
- json_object_string_add(
- json_peer, "state",
- lookup_msg(bgp_status_msg, peer->status,
- NULL));
+ /* Avoid creating empty peer dicts in JSON */
+ if (json_peer == NULL)
+ continue;
if (peer->conf_if)
json_object_string_add(json_peer, "idType",
@@ -8145,65 +8412,72 @@ static int bgp_show_summary(struct vty *vty, struct bgp *bgp, int afi, int safi,
else if (peer->su.sa.sa_family == AF_INET6)
json_object_string_add(json_peer, "idType",
"ipv6");
-
json_object_object_add(json_peers, peer->host,
json_peer);
} else {
- memset(dn_flag, '\0', sizeof(dn_flag));
- if (peer_dynamic_neighbor(peer)) {
- dn_count++;
- dn_flag[0] = '*';
- }
-
- if (peer->hostname
- && bgp_flag_check(bgp, BGP_FLAG_SHOW_HOSTNAME))
- len = vty_out(vty, "%s%s(%s)", dn_flag,
- peer->hostname, peer->host);
- else
- len = vty_out(vty, "%s%s", dn_flag, peer->host);
-
- /* pad the neighbor column with spaces */
- if (len < max_neighbor_width)
- vty_out(vty, "%*s", max_neighbor_width - len,
- " ");
-
- vty_out(vty, "4 %10u %7u %7u %8" PRIu64 " %4d %4zd %8s",
- peer->as, PEER_TOTAL_RX(peer),
- PEER_TOTAL_TX(peer), peer->version[afi][safi],
- 0, peer->obuf->count,
- peer_uptime(peer->uptime, timebuf,
- BGP_UPTIME_LEN, 0, NULL));
+ if (show_failed &&
+ bgp_has_peer_failed(peer, afi, safi)) {
+ bgp_show_failed_summary(vty, bgp, peer, NULL,
+ max_neighbor_width,
+ use_json);
+ } else if (!show_failed) {
+ memset(dn_flag, '\0', sizeof(dn_flag));
+ if (peer_dynamic_neighbor(peer)) {
+ dn_flag[0] = '*';
+ }
- if (peer->status == Established)
- if (peer->afc_recv[afi][safi])
- vty_out(vty, " %12ld",
- peer->pcount[afi]
- [pfx_rcd_safi]);
- else
- vty_out(vty, " NoNeg");
- else {
- if (CHECK_FLAG(peer->flags, PEER_FLAG_SHUTDOWN))
- vty_out(vty, " Idle (Admin)");
- else if (CHECK_FLAG(
- peer->sflags,
- PEER_STATUS_PREFIX_OVERFLOW))
- vty_out(vty, " Idle (PfxCt)");
+ if (peer->hostname
+ && bgp_flag_check(bgp, BGP_FLAG_SHOW_HOSTNAME))
+ len = vty_out(vty, "%s%s(%s)", dn_flag,
+ peer->hostname, peer->host);
else
- vty_out(vty, " %12s",
- lookup_msg(bgp_status_msg,
- peer->status, NULL));
+ len = vty_out(vty, "%s%s", dn_flag, peer->host);
+
+ /* pad the neighbor column with spaces */
+ if (len < max_neighbor_width)
+ vty_out(vty, "%*s", max_neighbor_width - len,
+ " ");
+
+ vty_out(vty, "4 %10u %7u %7u %8" PRIu64 " %4d %4zd %8s",
+ peer->as, PEER_TOTAL_RX(peer),
+ PEER_TOTAL_TX(peer), peer->version[afi][safi],
+ 0, peer->obuf->count,
+ peer_uptime(peer->uptime, timebuf,
+ BGP_UPTIME_LEN, 0, NULL));
+
+ if (peer->status == Established)
+ if (peer->afc_recv[afi][safi])
+ vty_out(vty, " %12ld",
+ peer->pcount[afi]
+ [pfx_rcd_safi]);
+ else
+ vty_out(vty, " NoNeg");
+ else {
+ if (CHECK_FLAG(peer->flags, PEER_FLAG_SHUTDOWN))
+ vty_out(vty, " Idle (Admin)");
+ else if (CHECK_FLAG(
+ peer->sflags,
+ PEER_STATUS_PREFIX_OVERFLOW))
+ vty_out(vty, " Idle (PfxCt)");
+ else
+ vty_out(vty, " %12s",
+ lookup_msg(bgp_status_msg,
+ peer->status, NULL));
+ }
+ vty_out(vty, "\n");
}
- vty_out(vty, "\n");
+
}
}
if (use_json) {
json_object_object_add(json, "peers", json_peers);
-
+ json_object_int_add(json, "failedPeers", failed_count);
json_object_int_add(json, "totalPeers", count);
json_object_int_add(json, "dynamicPeers", dn_count);
- bgp_show_bestpath_json(bgp, json);
+ if (!show_failed)
+ bgp_show_bestpath_json(bgp, json);
vty_out(vty, "%s\n", json_object_to_json_string_ext(
json, JSON_C_TO_STRING_PRETTY));
@@ -8213,7 +8487,7 @@ static int bgp_show_summary(struct vty *vty, struct bgp *bgp, int afi, int safi,
vty_out(vty, "\nTotal number of neighbors %d\n", count);
else {
vty_out(vty, "No %s neighbor is configured\n",
- afi_safi_print(afi, safi));
+ get_afi_safi_str(afi, safi, false));
}
if (dn_count) {
@@ -8227,7 +8501,7 @@ static int bgp_show_summary(struct vty *vty, struct bgp *bgp, int afi, int safi,
}
static void bgp_show_summary_afi_safi(struct vty *vty, struct bgp *bgp, int afi,
- int safi, bool use_json)
+ int safi, bool show_failed, bool use_json)
{
int is_first = 1;
int afi_wildcard = (afi == AFI_MAX);
@@ -8260,15 +8534,18 @@ static void bgp_show_summary_afi_safi(struct vty *vty, struct bgp *bgp, int afi,
is_first = 0;
vty_out(vty, "\"%s\":",
- afi_safi_json(afi,
- safi));
+ get_afi_safi_str(afi,
+ safi,
+ true));
} else {
vty_out(vty, "\n%s Summary:\n",
- afi_safi_print(afi,
- safi));
+ get_afi_safi_str(afi,
+ safi,
+ false));
}
}
- bgp_show_summary(vty, bgp, afi, safi, use_json);
+ bgp_show_summary(vty, bgp, afi, safi, show_failed,
+ use_json);
}
safi++;
if (!safi_wildcard)
@@ -8290,7 +8567,8 @@ static void bgp_show_summary_afi_safi(struct vty *vty, struct bgp *bgp, int afi,
}
static void bgp_show_all_instances_summary_vty(struct vty *vty, afi_t afi,
- safi_t safi, bool use_json)
+ safi_t safi, bool show_failed,
+ bool use_json)
{
struct listnode *node, *nnode;
struct bgp *bgp;
@@ -8318,7 +8596,8 @@ static void bgp_show_all_instances_summary_vty(struct vty *vty, afi_t afi,
? VRF_DEFAULT_NAME
: bgp->name);
}
- bgp_show_summary_afi_safi(vty, bgp, afi, safi, use_json);
+ bgp_show_summary_afi_safi(vty, bgp, afi, safi, show_failed,
+ use_json);
}
if (use_json)
@@ -8328,13 +8607,14 @@ static void bgp_show_all_instances_summary_vty(struct vty *vty, afi_t afi,
}
int bgp_show_summary_vty(struct vty *vty, const char *name, afi_t afi,
- safi_t safi, bool use_json)
+ safi_t safi, bool show_failed, bool use_json)
{
struct bgp *bgp;
if (name) {
if (strmatch(name, "all")) {
bgp_show_all_instances_summary_vty(vty, afi, safi,
+ show_failed,
use_json);
return CMD_SUCCESS;
} else {
@@ -8350,7 +8630,7 @@ int bgp_show_summary_vty(struct vty *vty, const char *name, afi_t afi,
}
bgp_show_summary_afi_safi(vty, bgp, afi, safi,
- use_json);
+ show_failed, use_json);
return CMD_SUCCESS;
}
}
@@ -8358,7 +8638,8 @@ int bgp_show_summary_vty(struct vty *vty, const char *name, afi_t afi,
bgp = bgp_get_default();
if (bgp)
- bgp_show_summary_afi_safi(vty, bgp, afi, safi, use_json);
+ bgp_show_summary_afi_safi(vty, bgp, afi, safi, show_failed,
+ use_json);
else {
if (use_json)
vty_out(vty, "{}\n");
@@ -8373,7 +8654,7 @@ int bgp_show_summary_vty(struct vty *vty, const char *name, afi_t afi,
/* `show [ip] bgp summary' commands. */
DEFUN (show_ip_bgp_summary,
show_ip_bgp_summary_cmd,
- "show [ip] bgp [<view|vrf> VIEWVRFNAME] ["BGP_AFI_CMD_STR" ["BGP_SAFI_WITH_LABEL_CMD_STR"]] summary [json]",
+ "show [ip] bgp [<view|vrf> VIEWVRFNAME] ["BGP_AFI_CMD_STR" ["BGP_SAFI_WITH_LABEL_CMD_STR"]] summary [failed] [json]",
SHOW_STR
IP_STR
BGP_STR
@@ -8381,11 +8662,13 @@ DEFUN (show_ip_bgp_summary,
BGP_AFI_HELP_STR
BGP_SAFI_WITH_LABEL_HELP_STR
"Summary of BGP neighbor status\n"
+ "Show only sessions not in Established state\n"
JSON_STR)
{
char *vrf = NULL;
afi_t afi = AFI_MAX;
safi_t safi = SAFI_MAX;
+ bool show_failed = false;
int idx = 0;
@@ -8405,79 +8688,20 @@ DEFUN (show_ip_bgp_summary,
argv_find_and_parse_safi(argv, argc, &idx, &safi);
}
+ if (argv_find(argv, argc, "failed", &idx))
+ show_failed = true;
+
bool uj = use_json(argc, argv);
- return bgp_show_summary_vty(vty, vrf, afi, safi, uj);
+ return bgp_show_summary_vty(vty, vrf, afi, safi, show_failed, uj);
}
-const char *afi_safi_print(afi_t afi, safi_t safi)
+const char *get_afi_safi_str(afi_t afi, safi_t safi, bool for_json)
{
- if (afi == AFI_IP && safi == SAFI_UNICAST)
- return "IPv4 Unicast";
- else if (afi == AFI_IP && safi == SAFI_MULTICAST)
- return "IPv4 Multicast";
- else if (afi == AFI_IP && safi == SAFI_LABELED_UNICAST)
- return "IPv4 Labeled Unicast";
- else if (afi == AFI_IP && safi == SAFI_MPLS_VPN)
- return "IPv4 VPN";
- else if (afi == AFI_IP && safi == SAFI_ENCAP)
- return "IPv4 Encap";
- else if (afi == AFI_IP && safi == SAFI_FLOWSPEC)
- return "IPv4 Flowspec";
- else if (afi == AFI_IP6 && safi == SAFI_UNICAST)
- return "IPv6 Unicast";
- else if (afi == AFI_IP6 && safi == SAFI_MULTICAST)
- return "IPv6 Multicast";
- else if (afi == AFI_IP6 && safi == SAFI_LABELED_UNICAST)
- return "IPv6 Labeled Unicast";
- else if (afi == AFI_IP6 && safi == SAFI_MPLS_VPN)
- return "IPv6 VPN";
- else if (afi == AFI_IP6 && safi == SAFI_ENCAP)
- return "IPv6 Encap";
- else if (afi == AFI_IP6 && safi == SAFI_FLOWSPEC)
- return "IPv6 Flowspec";
- else if (afi == AFI_L2VPN && safi == SAFI_EVPN)
- return "L2VPN EVPN";
- else
- return "Unknown";
-}
-
-/*
- * Please note that we have intentionally camelCased
- * the return strings here. So if you want
- * to use this function, please ensure you
- * are doing this within json output
- */
-const char *afi_safi_json(afi_t afi, safi_t safi)
-{
- if (afi == AFI_IP && safi == SAFI_UNICAST)
- return "ipv4Unicast";
- else if (afi == AFI_IP && safi == SAFI_MULTICAST)
- return "ipv4Multicast";
- else if (afi == AFI_IP && safi == SAFI_LABELED_UNICAST)
- return "ipv4LabeledUnicast";
- else if (afi == AFI_IP && safi == SAFI_MPLS_VPN)
- return "ipv4Vpn";
- else if (afi == AFI_IP && safi == SAFI_ENCAP)
- return "ipv4Encap";
- else if (afi == AFI_IP && safi == SAFI_FLOWSPEC)
- return "ipv4Flowspec";
- else if (afi == AFI_IP6 && safi == SAFI_UNICAST)
- return "ipv6Unicast";
- else if (afi == AFI_IP6 && safi == SAFI_MULTICAST)
- return "ipv6Multicast";
- else if (afi == AFI_IP6 && safi == SAFI_LABELED_UNICAST)
- return "ipv6LabeledUnicast";
- else if (afi == AFI_IP6 && safi == SAFI_MPLS_VPN)
- return "ipv6Vpn";
- else if (afi == AFI_IP6 && safi == SAFI_ENCAP)
- return "ipv6Encap";
- else if (afi == AFI_IP6 && safi == SAFI_FLOWSPEC)
- return "ipv6Flowspec";
- else if (afi == AFI_L2VPN && safi == SAFI_EVPN)
- return "l2VpnEvpn";
+ if (for_json)
+ return get_afi_safi_json_str(afi, safi);
else
- return "Unknown";
+ return get_afi_safi_vty_str(afi, safi);
}
/* Show BGP peer's information. */
@@ -8855,14 +9079,14 @@ static void bgp_show_peer_afi(struct vty *vty, struct peer *p, afi_t afi,
"prefixAllowedRestartIntervalMsecs",
p->pmax_restart[afi][safi] * 60000);
}
- json_object_object_add(json_neigh, afi_safi_print(afi, safi),
+ json_object_object_add(json_neigh, get_afi_safi_str(afi, safi, true),
json_addr);
} else {
filter = &p->filter[afi][safi];
vty_out(vty, " For address family: %s\n",
- afi_safi_print(afi, safi));
+ get_afi_safi_str(afi, safi, false));
if (peer_group_active(p))
vty_out(vty, " %s peer-group member\n",
@@ -9150,8 +9374,6 @@ static void bgp_show_peer(struct vty *vty, struct peer *p, bool use_json,
char buf1[PREFIX2STR_BUFFER], buf[SU_ADDRSTRLEN];
char timebuf[BGP_UPTIME_LEN];
char dn_flag[2];
- const char *subcode_str;
- const char *code_str;
afi_t afi;
safi_t safi;
uint16_t i;
@@ -9569,8 +9791,8 @@ static void bgp_show_peer(struct vty *vty, struct peer *p, bool use_json,
json_object *json_sub = NULL;
json_sub =
json_object_new_object();
- print_store = afi_safi_print(
- afi, safi);
+ print_store = get_afi_safi_str(
+ afi, safi, true);
if (CHECK_FLAG(
p->af_cap[afi]
@@ -9748,9 +9970,9 @@ static void bgp_show_peer(struct vty *vty, struct peer *p, bool use_json,
[AFI_IP]
[safi],
PEER_CAP_ENHE_AF_RCV)) {
- print_store = afi_safi_print(
+ print_store = get_afi_safi_str(
AFI_IP,
- safi);
+ safi, true);
json_object_string_add(
json_nxt,
print_store,
@@ -9850,8 +10072,9 @@ static void bgp_show_peer(struct vty *vty, struct peer *p, bool use_json,
json_object_object_add(
json_multi,
- afi_safi_print(afi,
- safi),
+ get_afi_safi_str(afi,
+ safi,
+ true),
json_exten);
}
}
@@ -9957,9 +10180,10 @@ static void bgp_show_peer(struct vty *vty, struct peer *p, bool use_json,
restart_af_count++;
json_object_object_add(
json_restart,
- afi_safi_print(
+ get_afi_safi_str(
afi,
- safi),
+ safi,
+ true),
json_sub);
}
}
@@ -10018,9 +10242,10 @@ static void bgp_show_peer(struct vty *vty, struct peer *p, bool use_json,
PEER_CAP_ADDPATH_AF_TX_RCV)) {
vty_out(vty,
" %s: TX ",
- afi_safi_print(
+ get_afi_safi_str(
afi,
- safi));
+ safi,
+ false));
if (CHECK_FLAG(
p->af_cap
@@ -10029,9 +10254,10 @@ static void bgp_show_peer(struct vty *vty, struct peer *p, bool use_json,
PEER_CAP_ADDPATH_AF_TX_ADV))
vty_out(vty,
"advertised %s",
- afi_safi_print(
+ get_afi_safi_str(
afi,
- safi));
+ safi,
+ false));
if (CHECK_FLAG(
p->af_cap
@@ -10061,9 +10287,10 @@ static void bgp_show_peer(struct vty *vty, struct peer *p, bool use_json,
PEER_CAP_ADDPATH_AF_RX_RCV)) {
vty_out(vty,
" %s: RX ",
- afi_safi_print(
+ get_afi_safi_str(
afi,
- safi));
+ safi,
+ false));
if (CHECK_FLAG(
p->af_cap
@@ -10072,9 +10299,10 @@ static void bgp_show_peer(struct vty *vty, struct peer *p, bool use_json,
PEER_CAP_ADDPATH_AF_RX_ADV))
vty_out(vty,
"advertised %s",
- afi_safi_print(
+ get_afi_safi_str(
afi,
- safi));
+ safi,
+ false));
if (CHECK_FLAG(
p->af_cap
@@ -10145,9 +10373,10 @@ static void bgp_show_peer(struct vty *vty, struct peer *p, bool use_json,
PEER_CAP_ENHE_AF_RCV))
vty_out(vty,
" %s\n",
- afi_safi_print(
+ get_afi_safi_str(
AFI_IP,
- safi));
+ safi,
+ false));
}
}
@@ -10194,8 +10423,10 @@ static void bgp_show_peer(struct vty *vty, struct peer *p, bool use_json,
|| p->afc_recv[afi][safi]) {
vty_out(vty,
" Address Family %s:",
- afi_safi_print(afi,
- safi));
+ get_afi_safi_str(
+ afi,
+ safi,
+ false));
if (p->afc_adv[afi][safi])
vty_out(vty,
" advertised");
@@ -10280,9 +10511,10 @@ static void bgp_show_peer(struct vty *vty, struct peer *p, bool use_json,
restart_af_count
? ", "
: "",
- afi_safi_print(
+ get_afi_safi_str(
afi,
- safi),
+ safi,
+ false),
CHECK_FLAG(
p->af_cap
[afi]
@@ -10321,8 +10553,9 @@ static void bgp_show_peer(struct vty *vty, struct peer *p, bool use_json,
PEER_STATUS_EOR_SEND)) {
json_object_boolean_true_add(
json_grace_send,
- afi_safi_print(afi,
- safi));
+ get_afi_safi_str(afi,
+ safi,
+ true));
eor_send_af_count++;
}
}
@@ -10332,8 +10565,9 @@ static void bgp_show_peer(struct vty *vty, struct peer *p, bool use_json,
PEER_STATUS_EOR_RECEIVED)) {
json_object_boolean_true_add(
json_grace_recv,
- afi_safi_print(afi,
- safi));
+ get_afi_safi_str(afi,
+ safi,
+ true));
eor_receive_af_count++;
}
}
@@ -10371,8 +10605,9 @@ static void bgp_show_peer(struct vty *vty, struct peer *p, bool use_json,
vty_out(vty, "%s%s",
eor_send_af_count ? ", "
: "",
- afi_safi_print(afi,
- safi));
+ get_afi_safi_str(afi,
+ safi,
+ false));
eor_send_af_count++;
}
}
@@ -10386,8 +10621,9 @@ static void bgp_show_peer(struct vty *vty, struct peer *p, bool use_json,
eor_receive_af_count
? ", "
: "",
- afi_safi_print(afi,
- safi));
+ get_afi_safi_str(afi,
+ safi,
+ false));
eor_receive_af_count++;
}
}
@@ -10573,88 +10809,13 @@ static void bgp_show_peer(struct vty *vty, struct peer *p, bool use_json,
(tm->tm_sec * 1000)
+ (tm->tm_min * 60000)
+ (tm->tm_hour * 3600000));
- json_object_string_add(
- json_neigh, "lastResetDueTo",
- peer_down_str[(int)p->last_reset]);
- if (p->last_reset == PEER_DOWN_NOTIFY_SEND
- || p->last_reset == PEER_DOWN_NOTIFY_RECEIVED) {
- char errorcodesubcode_hexstr[5];
- char errorcodesubcode_str[256];
-
- code_str = bgp_notify_code_str(p->notify.code);
- subcode_str = bgp_notify_subcode_str(
- p->notify.code, p->notify.subcode);
-
- sprintf(errorcodesubcode_hexstr, "%02X%02X",
- p->notify.code, p->notify.subcode);
- json_object_string_add(json_neigh,
- "lastErrorCodeSubcode",
- errorcodesubcode_hexstr);
- snprintf(errorcodesubcode_str, 255, "%s%s",
- code_str, subcode_str);
- json_object_string_add(json_neigh,
- "lastNotificationReason",
- errorcodesubcode_str);
- if (p->last_reset == PEER_DOWN_NOTIFY_RECEIVED
- && p->notify.code == BGP_NOTIFY_CEASE
- && (p->notify.subcode
- == BGP_NOTIFY_CEASE_ADMIN_SHUTDOWN
- || p->notify.subcode
- == BGP_NOTIFY_CEASE_ADMIN_RESET)
- && p->notify.length) {
- char msgbuf[1024];
- const char *msg_str;
-
- msg_str = bgp_notify_admin_message(
- msgbuf, sizeof(msgbuf),
- (uint8_t *)p->notify.data,
- p->notify.length);
- if (msg_str)
- json_object_string_add(
- json_neigh,
- "lastShutdownDescription",
- msg_str);
- }
- }
+ bgp_show_peer_reset(NULL, p, json_neigh, true);
} else {
vty_out(vty, " Last reset %s, ",
peer_uptime(p->resettime, timebuf,
BGP_UPTIME_LEN, 0, NULL));
- if (p->last_reset == PEER_DOWN_NOTIFY_SEND
- || p->last_reset == PEER_DOWN_NOTIFY_RECEIVED) {
- code_str = bgp_notify_code_str(p->notify.code);
- subcode_str = bgp_notify_subcode_str(
- p->notify.code, p->notify.subcode);
- vty_out(vty, "due to NOTIFICATION %s (%s%s)\n",
- p->last_reset == PEER_DOWN_NOTIFY_SEND
- ? "sent"
- : "received",
- code_str, subcode_str);
- if (p->last_reset == PEER_DOWN_NOTIFY_RECEIVED
- && p->notify.code == BGP_NOTIFY_CEASE
- && (p->notify.subcode
- == BGP_NOTIFY_CEASE_ADMIN_SHUTDOWN
- || p->notify.subcode
- == BGP_NOTIFY_CEASE_ADMIN_RESET)
- && p->notify.length) {
- char msgbuf[1024];
- const char *msg_str;
-
- msg_str = bgp_notify_admin_message(
- msgbuf, sizeof(msgbuf),
- (uint8_t *)p->notify.data,
- p->notify.length);
- if (msg_str)
- vty_out(vty,
- " Message: \"%s\"\n",
- msg_str);
- }
- } else {
- vty_out(vty, "due to %s\n",
- peer_down_str[(int)p->last_reset]);
- }
-
+ bgp_show_peer_reset(vty, p, NULL, false);
if (p->last_reset_cause_size) {
msg = p->last_reset_cause;
vty_out(vty,
@@ -11319,7 +11480,7 @@ static int bgp_show_route_leak_vty(struct vty *vty, const char *name,
/* Provide context for the block */
json_object_string_add(json, "vrf", name ? name : "default");
json_object_string_add(json, "afiSafi",
- afi_safi_print(afi, safi));
+ get_afi_safi_str(afi, safi, true));
if (!CHECK_FLAG(bgp->af_flags[afi][safi],
BGP_CONFIG_VRF_TO_VRF_IMPORT)) {
@@ -11400,11 +11561,11 @@ static int bgp_show_route_leak_vty(struct vty *vty, const char *name,
BGP_CONFIG_VRF_TO_VRF_IMPORT))
vty_out(vty,
"This VRF is not importing %s routes from any other VRF\n",
- afi_safi_print(afi, safi));
+ get_afi_safi_str(afi, safi, false));
else {
vty_out(vty,
"This VRF is importing %s routes from the following VRFs:\n",
- afi_safi_print(afi, safi));
+ get_afi_safi_str(afi, safi, false));
for (ALL_LIST_ELEMENTS_RO(
bgp->vpn_policy[afi].import_vrf,
@@ -11428,11 +11589,11 @@ static int bgp_show_route_leak_vty(struct vty *vty, const char *name,
BGP_CONFIG_VRF_TO_VRF_EXPORT))
vty_out(vty,
"This VRF is not exporting %s routes to any other VRF\n",
- afi_safi_print(afi, safi));
+ get_afi_safi_str(afi, safi, false));
else {
vty_out(vty,
"This VRF is exporting %s routes to the following VRFs:\n",
- afi_safi_print(afi, safi));
+ get_afi_safi_str(afi, safi, false));
for (ALL_LIST_ELEMENTS_RO(
bgp->vpn_policy[afi].export_vrf,
@@ -11809,7 +11970,7 @@ static int bgp_show_one_peer_group(struct vty *vty, struct peer_group *group)
FOREACH_AFI_SAFI (afi, safi) {
if (conf->afc[afi][safi]) {
af_cfgd = 1;
- vty_out(vty, " %s;", afi_safi_print(afi, safi));
+ vty_out(vty, " %s;", get_afi_safi_str(afi, safi, false));
}
}
if (!af_cfgd)
diff --git a/bgpd/bgp_vty.h b/bgpd/bgp_vty.h
index d9df2b4cfe..27b5ea47b2 100644
--- a/bgpd/bgp_vty.h
+++ b/bgpd/bgp_vty.h
@@ -45,8 +45,7 @@ struct bgp;
"Address Family modifier\n"
extern void bgp_vty_init(void);
-extern const char *afi_safi_print(afi_t afi, safi_t safi);
-extern const char *afi_safi_json(afi_t afi, safi_t safi);
+extern const char *get_afi_safi_str(afi_t afi, safi_t safi, bool for_json);
extern void bgp_config_write_update_delay(struct vty *vty, struct bgp *bgp);
extern void bgp_config_write_wpkt_quanta(struct vty *vty, struct bgp *bgp);
extern void bgp_config_write_rpkt_quanta(struct vty *vty, struct bgp *bgp);
@@ -72,7 +71,7 @@ extern int bgp_vty_find_and_parse_afi_safi_bgp(struct vty *vty,
safi_t *safi, struct bgp **bgp,
bool use_json);
extern int bgp_show_summary_vty(struct vty *vty, const char *name, afi_t afi,
- safi_t safi, bool use_json);
+ safi_t safi, bool show_failed, bool use_json);
extern void bgp_vpn_policy_config_write_afi(struct vty *vty, struct bgp *bgp,
afi_t afi);
#endif /* _QUAGGA_BGP_VTY_H */
diff --git a/bgpd/bgpd.c b/bgpd/bgpd.c
index b5f267cc38..5b31fbb3a8 100644
--- a/bgpd/bgpd.c
+++ b/bgpd/bgpd.c
@@ -1582,6 +1582,12 @@ struct peer *peer_create(union sockunion *su, const char *conf_if,
}
active = peer_active(peer);
+ if (!active) {
+ if (peer->su.sa.sa_family == AF_UNSPEC)
+ peer->last_reset = PEER_DOWN_NBR_ADDR;
+ else
+ peer->last_reset = PEER_DOWN_NOAFI_ACTIVATED;
+ }
/* Last read and reset time set */
peer->readtime = peer->resettime = bgp_clock();
@@ -2221,10 +2227,12 @@ int peer_delete(struct peer *peer)
bgp = peer->bgp;
accept_peer = CHECK_FLAG(peer->sflags, PEER_STATUS_ACCEPT_PEER);
+ bgp_keepalives_off(peer);
bgp_reads_off(peer);
bgp_writes_off(peer);
assert(!CHECK_FLAG(peer->thread_flags, PEER_THREAD_WRITES_ON));
assert(!CHECK_FLAG(peer->thread_flags, PEER_THREAD_READS_ON));
+ assert(!CHECK_FLAG(peer->thread_flags, PEER_THREAD_KEEPALIVES_ON));
if (CHECK_FLAG(peer->sflags, PEER_STATUS_NSF_WAIT))
peer_nsf_stop(peer);
diff --git a/bgpd/bgpd.h b/bgpd/bgpd.h
index 9e05fd5629..9d45d96987 100644
--- a/bgpd/bgpd.h
+++ b/bgpd/bgpd.h
@@ -1194,6 +1194,10 @@ struct peer {
#define PEER_DOWN_BFD_DOWN 24 /* BFD down */
#define PEER_DOWN_IF_DOWN 25 /* Interface down */
#define PEER_DOWN_NBR_ADDR_DEL 26 /* Peer address lost */
+#define PEER_DOWN_WAITING_NHT 27 /* Waiting for NHT to resolve */
+#define PEER_DOWN_NBR_ADDR 28 /* Waiting for peer IPv6 IP Addr */
+#define PEER_DOWN_VRF_UNINIT 29 /* Associated VRF is not init yet */
+#define PEER_DOWN_NOAFI_ACTIVATED 30 /* No AFI/SAFI activated for peer */
size_t last_reset_cause_size;
uint8_t last_reset_cause[BGP_MAX_PACKET_SIZE];
@@ -1207,6 +1211,7 @@ struct peer {
#define PEER_RMAP_TYPE_NOSET (1 << 5) /* not allow to set commands */
#define PEER_RMAP_TYPE_IMPORT (1 << 6) /* neighbor route-map import */
#define PEER_RMAP_TYPE_EXPORT (1 << 7) /* neighbor route-map export */
+#define PEER_RMAP_TYPE_AGGREGATE (1 << 8) /* aggregate-address route-map */
/* peer specific BFD information */
struct bfd_info *bfd_info;
@@ -1931,4 +1936,7 @@ extern struct peer *peer_new(struct bgp *bgp);
extern struct peer *peer_lookup_in_view(struct vty *vty, struct bgp *bgp,
const char *ip_str, bool use_json);
+/* Hooks */
+DECLARE_HOOK(peer_status_changed, (struct peer * peer), (peer))
+
#endif /* _QUAGGA_BGPD_H */
diff --git a/bgpd/rfapi/rfapi.c b/bgpd/rfapi/rfapi.c
index e7905e5622..0aa102feab 100644
--- a/bgpd/rfapi/rfapi.c
+++ b/bgpd/rfapi/rfapi.c
@@ -880,12 +880,12 @@ void add_vnc_route(struct rfapi_descriptor *rfd, /* cookie, VPN UN addr, peer */
attr.nexthop.s_addr = nexthop->addr.v4.s_addr;
attr.mp_nexthop_global_in = nexthop->addr.v4;
- attr.mp_nexthop_len = 4;
+ attr.mp_nexthop_len = BGP_ATTR_NHLEN_IPV4;
break;
case AF_INET6:
attr.mp_nexthop_global = nexthop->addr.v6;
- attr.mp_nexthop_len = 16;
+ attr.mp_nexthop_len = BGP_ATTR_NHLEN_IPV6_GLOBAL;
break;
default:
@@ -1282,8 +1282,7 @@ static int rfapi_open_inner(struct rfapi_descriptor *rfd, struct bgp *bgp,
* since this peer is not on the I/O thread, this lock is not strictly
* necessary, but serves as a reminder to those who may meddle...
*/
- pthread_mutex_lock(&rfd->peer->io_mtx);
- {
+ frr_with_mutex(&rfd->peer->io_mtx) {
// we don't need any I/O related facilities
if (rfd->peer->ibuf)
stream_fifo_free(rfd->peer->ibuf);
@@ -1300,7 +1299,6 @@ static int rfapi_open_inner(struct rfapi_descriptor *rfd, struct bgp *bgp,
rfd->peer->obuf_work = NULL;
rfd->peer->ibuf_work = NULL;
}
- pthread_mutex_unlock(&rfd->peer->io_mtx);
{ /* base code assumes have valid host pointer */
char buf[BUFSIZ];
diff --git a/bgpd/rfapi/vnc_export_bgp.c b/bgpd/rfapi/vnc_export_bgp.c
index 3d8d5bccb0..b97c8c3030 100644
--- a/bgpd/rfapi/vnc_export_bgp.c
+++ b/bgpd/rfapi/vnc_export_bgp.c
@@ -86,13 +86,13 @@ static void encap_attr_export_ce(struct attr *new, struct attr *orig,
switch (use_nexthop->family) {
case AF_INET:
new->nexthop = use_nexthop->u.prefix4;
- new->mp_nexthop_len = 4; /* bytes */
+ new->mp_nexthop_len = BGP_ATTR_NHLEN_IPV4; /* bytes */
new->flag |= ATTR_FLAG_BIT(BGP_ATTR_NEXT_HOP);
break;
case AF_INET6:
new->mp_nexthop_global = use_nexthop->u.prefix6;
- new->mp_nexthop_len = 16; /* bytes */
+ new->mp_nexthop_len = BGP_ATTR_NHLEN_IPV6_GLOBAL; /* bytes */
break;
default:
@@ -624,13 +624,13 @@ encap_attr_export(struct attr *new, struct attr *orig,
switch (use_nexthop->family) {
case AF_INET:
new->nexthop = use_nexthop->u.prefix4;
- new->mp_nexthop_len = 4; /* bytes */
+ new->mp_nexthop_len = BGP_ATTR_NHLEN_IPV4; /* bytes */
new->flag |= ATTR_FLAG_BIT(BGP_ATTR_NEXT_HOP);
break;
case AF_INET6:
new->mp_nexthop_global = use_nexthop->u.prefix6;
- new->mp_nexthop_len = 16; /* bytes */
+ new->mp_nexthop_len = BGP_ATTR_NHLEN_IPV6_GLOBAL; /* bytes */
break;
default:
diff --git a/bgpd/rfapi/vnc_zebra.c b/bgpd/rfapi/vnc_zebra.c
index 481500dfb4..80a590f56a 100644
--- a/bgpd/rfapi/vnc_zebra.c
+++ b/bgpd/rfapi/vnc_zebra.c
@@ -191,8 +191,7 @@ static void vnc_redistribute_add(struct prefix *p, uint32_t metric,
* is not strictly necessary, but serves as a reminder
* to those who may meddle...
*/
- pthread_mutex_lock(&vncHD1VR.peer->io_mtx);
- {
+ frr_with_mutex(&vncHD1VR.peer->io_mtx) {
// we don't need any I/O related facilities
if (vncHD1VR.peer->ibuf)
stream_fifo_free(vncHD1VR.peer->ibuf);
@@ -209,7 +208,6 @@ static void vnc_redistribute_add(struct prefix *p, uint32_t metric,
vncHD1VR.peer->obuf_work = NULL;
vncHD1VR.peer->ibuf_work = NULL;
}
- pthread_mutex_unlock(&vncHD1VR.peer->io_mtx);
/* base code assumes have valid host pointer */
vncHD1VR.peer->host =
diff --git a/bgpd/subdir.am b/bgpd/subdir.am
index d281fe4e59..b338fd4f3d 100644
--- a/bgpd/subdir.am
+++ b/bgpd/subdir.am
@@ -27,6 +27,7 @@ vtysh_scan += \
# can be loaded as DSO - always include for vtysh
vtysh_scan += $(top_srcdir)/bgpd/bgp_rpki.c
+vtysh_scan += $(top_srcdir)/bgpd/bgp_bmp.c
if ENABLE_BGP_VNC
vtysh_scan += \
@@ -42,6 +43,9 @@ endif
if RPKI
module_LTLIBRARIES += bgpd/bgpd_rpki.la
endif
+if BGP_BMP
+module_LTLIBRARIES += bgpd/bgpd_bmp.la
+endif
man8 += $(MANBUILD)/bgpd.8
endif
@@ -129,6 +133,7 @@ noinst_HEADERS += \
bgpd/bgp_damp.h \
bgpd/bgp_debug.h \
bgpd/bgp_dump.h \
+ bgpd/bgp_bmp.h \
bgpd/bgp_ecommunity.h \
bgpd/bgp_encap_tlv.h \
bgpd/bgp_encap_types.h \
@@ -216,6 +221,10 @@ bgpd_bgpd_rpki_la_CFLAGS = $(WERROR) $(RTRLIB_CFLAGS)
bgpd_bgpd_rpki_la_LDFLAGS = -avoid-version -module -shared -export-dynamic
bgpd_bgpd_rpki_la_LIBADD = $(RTRLIB_LIBS)
+bgpd_bgpd_bmp_la_SOURCES = bgpd/bgp_bmp.c
+bgpd_bgpd_bmp_la_LIBADD = lib/libfrrcares.la
+bgpd_bgpd_bmp_la_LDFLAGS = -avoid-version -module -shared -export-dynamic
+
bgpd/bgp_evpn_vty_clippy.c: $(CLIPPY_DEPS)
bgpd/bgp_evpn_vty.$(OBJEXT): bgpd/bgp_evpn_vty_clippy.c
bgpd/bgp_vty_clippy.c: $(CLIPPY_DEPS)
@@ -229,3 +238,5 @@ bgpd/bgp_routemap.$(OBJEXT): bgpd/bgp_routemap_clippy.c
bgpd/bgp_rpki_clippy.c: $(CLIPPY_DEPS)
$(AUTOMAKE_DUMMY)bgpd/bgpd_bgpd_rpki_la-bgp_rpki.lo: bgpd/bgp_rpki_clippy.c
$(AUTOMAKE_DUMMY)bgpd/bgpd_rpki_la-bgp_rpki.lo: bgpd/bgp_rpki_clippy.c
+bgpd/bgp_bmp_clippy.c: $(CLIPPY_DEPS)
+bgpd/bgp_bmp.lo: bgpd/bgp_bmp_clippy.c
diff --git a/configure.ac b/configure.ac
index 134c8692d4..6c1b35b5f2 100755
--- a/configure.ac
+++ b/configure.ac
@@ -479,12 +479,14 @@ AC_ARG_ENABLE([staticd],
AS_HELP_STRING([--disable-staticd], [do not build staticd]))
AC_ARG_ENABLE([fabricd],
AS_HELP_STRING([--disable-fabricd], [do not build fabricd]))
-AC_ARG_ENABLE([bgp-announce],
- AS_HELP_STRING([--disable-bgp-announce,], [turn off BGP route announcement]))
AC_ARG_ENABLE([vrrpd],
AS_HELP_STRING([--disable-vrrpd], [do not build vrrpd]))
+AC_ARG_ENABLE([bgp-announce],
+ AS_HELP_STRING([--disable-bgp-announce,], [turn off BGP route announcement]))
AC_ARG_ENABLE([bgp-vnc],
AS_HELP_STRING([--disable-bgp-vnc],[turn off BGP VNC support]))
+AC_ARG_ENABLE([bgp-bmp],
+ AS_HELP_STRING([--disable-bgp-bmp],[turn off BGP BMP support]))
AC_ARG_ENABLE([snmp],
AS_HELP_STRING([--enable-snmp], [enable SNMP support for agentx]))
AC_ARG_ENABLE([config_rollbacks],
@@ -1061,6 +1063,7 @@ case "$host_os" in
AC_CHECK_LIB([nsl], [main])
AC_CHECK_LIB([umem], [main])
SOLARIS="solaris"
+ AC_MSG_WARN([--Solaris support is being considered for deprecation, please let us know if you are still using this--])
;;
linux*)
AC_MSG_RESULT([Linux])
@@ -1449,6 +1452,16 @@ if test "x$enable_pcreposix" = "xyes"; then
fi
AC_SUBST([HAVE_LIBPCREPOSIX])
+dnl ------------------
+dnl check C-Ares library
+dnl ------------------
+PKG_CHECK_MODULES([CARES], [libcares], [
+ c_ares_found=true
+],[
+ c_ares_found=false
+])
+AM_CONDITIONAL([CARES], [$c_ares_found])
+
dnl ##########################################################################
dnl test "${enable_clippy_only}" != "yes"
fi
@@ -1518,9 +1531,21 @@ fi
NHRPD=""
case "$host_os" in
linux*)
- if test "${enable_nhrpd}" != "no"; then
- NHRPD="nhrpd"
- fi
+ case "${enable_nhrpd}" in
+ no)
+ ;;
+ yes)
+ if test "$c_ares_found" != "true" ; then
+ AC_MSG_ERROR([nhrpd requires libcares. Please install c-ares and its -dev headers.])
+ fi
+ NHRPD="nhrpd"
+ ;;
+ *)
+ if test "$c_ares_found" = "true" ; then
+ NHRPD="nhrpd"
+ fi
+ ;;
+ esac
;;
*)
if test "${enable_nhrpd}" = "yes"; then
@@ -1554,22 +1579,29 @@ if test "${enable_bgp_vnc}" != "no";then
AC_DEFINE([ENABLE_BGP_VNC], [1], [Enable BGP VNC support])
fi
+bgpd_bmp=false
+case "${enable_bmp}" in
+ no)
+ ;;
+ yes)
+ if test "$c_ares_found" != "true" ; then
+ AC_MSG_ERROR([BMP support requires libcares. Please install c-ares and its -dev headers.])
+ fi
+ bgpd_bmp=true
+ ;;
+ *)
+ if test "$c_ares_found" = "true" ; then
+ bgpd_bmp=true
+ fi
+ ;;
+esac
+
dnl ##########################################################################
dnl LARGE if block
if test "${enable_clippy_only}" != "yes"; then
dnl ##########################################################################
dnl ------------------
-dnl check C-Ares library
-dnl ------------------
-if test "${NHRPD}" != ""; then
- PKG_CHECK_MODULES([CARES], [libcares], ,[
- AC_MSG_ERROR([trying to build nhrpd, but libcares not found. install c-ares and its -dev headers.])
- ])
-fi
-AM_CONDITIONAL([CARES], [test "${NHRPD}" != ""])
-
-dnl ------------------
dnl check Net-SNMP library
dnl ------------------
if test "${enable_snmp}" != "" -a "${enable_snmp}" != "no"; then
@@ -2006,6 +2038,20 @@ if test "${enable_capabilities}" != "no"; then
-o x"${frr_ac_lcaps}" = x"yes"; then
AC_DEFINE([HAVE_CAPABILITIES], [1], [capabilities])
fi
+
+ case "$host_os" in
+ linux*)
+ if test "$frr_ac_lcaps" != "yes"; then
+ AC_MSG_ERROR([libcap and/or its headers were not found. Running FRR without libcap support built in causes a huge performance penalty.])
+ fi
+ ;;
+ esac
+else
+ case "$host_os" in
+ linux*)
+ AC_MSG_WARN([Running FRR without libcap support built in causes a huge performance penalty.])
+ ;;
+ esac
fi
AC_SUBST([LIBCAP])
@@ -2192,6 +2238,7 @@ AC_DEFINE_UNQUOTED([WATCHFRR_SH_PATH], ["${CFG_SBIN%/}/watchfrr.sh"], [path to w
dnl various features
AM_CONDITIONAL([SUPPORT_REALMS], [test "${enable_realms}" = "yes"])
AM_CONDITIONAL([ENABLE_BGP_VNC], [test x${enable_bgp_vnc} != xno])
+AM_CONDITIONAL([BGP_BMP], [$bgpd_bmp])
dnl northbound
AM_CONDITIONAL([SQLITE3], [$SQLITE3])
AM_CONDITIONAL([CONFD], [test "x$enable_confd" != "x"])
diff --git a/debian/frr-pythontools.install b/debian/frr-pythontools.install
index 28140382f6..5f7eaebed5 100644
--- a/debian/frr-pythontools.install
+++ b/debian/frr-pythontools.install
@@ -1 +1,2 @@
usr/lib/frr/frr-reload.py
+usr/lib/frr/generate_support_bundle.py
diff --git a/debian/frr.install b/debian/frr.install
index fe34b23d02..09bddf0fc6 100644
--- a/debian/frr.install
+++ b/debian/frr.install
@@ -10,6 +10,7 @@ usr/lib/frr/watchfrr
usr/lib/frr/zebra
usr/lib/*/frr/modules/zebra_irdp.so
usr/lib/*/frr/modules/zebra_fpm.so
+usr/lib/*/frr/modules/bgpd_bmp.so
usr/share/doc/frr/examples
usr/share/man/
usr/share/yang/
diff --git a/debian/rules b/debian/rules
index a546f38d70..c8550ecb52 100755
--- a/debian/rules
+++ b/debian/rules
@@ -71,6 +71,7 @@ override_dh_auto_install:
dh_auto_install
sed -e '1c #!/usr/bin/python3' -i debian/tmp/usr/lib/frr/frr-reload.py
+ sed -e '1c #!/usr/bin/python3' -i debian/tmp/usr/lib/frr/generate_support_bundle.py
# let dh_systemd_* and dh_installinit do their thing automatically
ifeq ($(filter pkg.frr.nosystemd,$(DEB_BUILD_PROFILES)),)
diff --git a/doc/developer/library.rst b/doc/developer/library.rst
index 7cd493ccc4..a904a4e778 100644
--- a/doc/developer/library.rst
+++ b/doc/developer/library.rst
@@ -11,6 +11,7 @@ Library Facilities (libfrr)
rcu
lists
logging
+ locking
hooks
cli
modules
diff --git a/doc/developer/locking.rst b/doc/developer/locking.rst
new file mode 100644
index 0000000000..aee05aae06
--- /dev/null
+++ b/doc/developer/locking.rst
@@ -0,0 +1,73 @@
+Locking
+=======
+
+FRR ships two small wrappers around ``pthread_mutex_lock()`` /
+``pthread_mutex_unlock``. Use ``#include "frr_pthread.h"`` to get these
+macros.
+
+.. c:function:: frr_with_mutex(pthread_mutex_t *mutex)
+
+ Begin a C statement block that is executed with the mutex locked. Any
+ exit from the block (``break``, ``return``, ``goto``, end of block) will
+ cause the mutex to be unlocked::
+
+ int somefunction(int option)
+ {
+ frr_with_mutex(&my_mutex) {
+ /* mutex will be locked */
+
+ if (!option)
+ /* mutex will be unlocked before return */
+ return -1;
+
+ if (something(option))
+ /* mutex will be unlocked before goto */
+ goto out_err;
+
+ somethingelse();
+
+ /* mutex will be unlocked at end of block */
+ }
+
+ return 0;
+
+ out_err:
+ somecleanup();
+ return -1;
+ }
+
+ This is a macro that internally uses a ``for`` loop. It is explicitly
+ acceptable to use ``break`` to get out of the block. Even though a single
+ statement works correctly, FRR coding style requires that this macro always
+ be used with a ``{ ... }`` block.
+
+.. c:function:: frr_mutex_lock_autounlock(pthread_mutex_t *mutex)
+
+ Lock mutex and unlock at the end of the current C statement block::
+
+ int somefunction(int option)
+ {
+ frr_mutex_lock_autounlock(&my_mutex);
+ /* mutex will be locked */
+
+ ...
+ if (error)
+ /* mutex will be unlocked before return */
+ return -1;
+ ...
+
+ /* mutex will be unlocked before return */
+ return 0;
+ }
+
+ This is a macro that internally creates a variable with a destructor.
+ When the variable goes out of scope (i.e. the block ends), the mutex is
+ released.
+
+ .. warning::
+
+ This macro should only used when :c:func:`frr_with_mutex` would
+ result in excessively/weirdly nested code. This generally is an
+ indicator that the code might be trying to do too many things with
+ the lock held. Try any possible venues to reduce the amount of
+ code covered by the lock and move to :c:func:`frr_with_mutex`.
diff --git a/doc/developer/subdir.am b/doc/developer/subdir.am
index 1fc593e566..557a41c51f 100644
--- a/doc/developer/subdir.am
+++ b/doc/developer/subdir.am
@@ -31,6 +31,7 @@ dev_RSTFILES = \
doc/developer/index.rst \
doc/developer/library.rst \
doc/developer/lists.rst \
+ doc/developer/locking.rst \
doc/developer/logging.rst \
doc/developer/maintainer-release-build.rst \
doc/developer/memtypes.rst \
diff --git a/doc/developer/workflow.rst b/doc/developer/workflow.rst
index 07c43ac2de..3c6887fbac 100644
--- a/doc/developer/workflow.rst
+++ b/doc/developer/workflow.rst
@@ -767,6 +767,28 @@ ways that can be unexpected for the original implementor. As such debugs
ability to turn on/off debugs from the CLI and it is expected that the
developer will use this convention to allow control of their debugs.
+Custom syntax-like block macros
+-------------------------------
+
+FRR uses some macros that behave like the ``for`` or ``if`` C keywords. These
+macros follow these patterns:
+
+- loop-style macros are named ``frr_each_*`` (and ``frr_each``)
+- single run macros are named ``frr_with_*``
+- to avoid confusion, ``frr_with_*`` macros must always use a ``{ ... }``
+ block even if the block only contains one statement. The ``frr_each``
+ constructs are assumed to be well-known enough to use normal ``for`` rules.
+- ``break``, ``return`` and ``goto`` all work correctly. For loop-style
+ macros, ``continue`` works correctly too.
+
+Both the ``each`` and ``with`` keywords are inspired by other (more
+higher-level) programming languages that provide these constructs.
+
+There are also some older iteration macros, e.g. ``ALL_LIST_ELEMENTS`` and
+``FOREACH_AFI_SAFI``. These macros in some cases do **not** fulfill the above
+pattern (e.g. ``break`` does not work in ``FOREACH_AFI_SAFI`` because it
+expands to 2 nested loops.)
+
Static Analysis and Sanitizers
------------------------------
Clang/LLVM and GCC come with a variety of tools that can be used to help find
diff --git a/doc/user/bgp.rst b/doc/user/bgp.rst
index 92d4126cec..7b3cdf2c4b 100644
--- a/doc/user/bgp.rst
+++ b/doc/user/bgp.rst
@@ -368,6 +368,16 @@ Administrative Distance Metrics
Sets the administrative distance for a particular route.
+.. _bgp-requires-policy:
+
+Require policy on EBGP
+-------------------------------
+
+.. index:: [no] bgp ebgp-requires-policy
+.. clicmd:: [no] bgp ebgp-requires-policy
+
+ This command requires incoming and outgoing filters to be applied for eBGP sessions. Without the incoming filter, no routes will be accepted. Without the outgoing filter, no routes will be announced.
+
.. _bgp-route-flap-dampening:
Route Flap Dampening
@@ -675,6 +685,11 @@ Route Aggregation-IPv4 Address Family
This command specifies an aggregate address.
+.. index:: aggregate-address A.B.C.D/M route-map NAME
+.. clicmd:: aggregate-address A.B.C.D/M route-map NAME
+
+ Apply a route-map for an aggregated prefix.
+
.. index:: aggregate-address A.B.C.D/M as-set
.. clicmd:: aggregate-address A.B.C.D/M as-set
@@ -689,11 +704,11 @@ Route Aggregation-IPv4 Address Family
.. index:: no aggregate-address A.B.C.D/M
.. clicmd:: no aggregate-address A.B.C.D/M
-
+
This command removes an aggregate address.
- This configuration example setup the aggregate-address under
+ This configuration example setup the aggregate-address under
ipv4 address-family.
.. code-block:: frr
@@ -703,6 +718,7 @@ Route Aggregation-IPv4 Address Family
aggregate-address 10.0.0.0/8
aggregate-address 20.0.0.0/8 as-set
aggregate-address 40.0.0.0/8 summary-only
+ aggregate-address 50.0.0.0/8 route-map aggr-rmap
exit-address-family
@@ -716,6 +732,11 @@ Route Aggregation-IPv6 Address Family
This command specifies an aggregate address.
+.. index:: aggregate-address X:X::X:X/M route-map NAME
+.. clicmd:: aggregate-address X:X::X:X/M route-map NAME
+
+ Apply a route-map for an aggregated prefix.
+
.. index:: aggregate-address X:X::X:X/M as-set
.. clicmd:: aggregate-address X:X::X:X/M as-set
@@ -734,16 +755,17 @@ Route Aggregation-IPv6 Address Family
This command removes an aggregate address.
- This configuration example setup the aggregate-address under
- ipv4 address-family.
+ This configuration example setup the aggregate-address under
+ ipv6 address-family.
.. code-block:: frr
router bgp 1
address-family ipv6 unicast
aggregate-address 10::0/64
- aggregate-address 20::0/64 as-set
- aggregate-address 40::0/64 summary-only
+ aggregate-address 20::0/64 as-set
+ aggregate-address 40::0/64 summary-only
+ aggregate-address 50::0/64 route-map aggr-rmap
exit-address-family
.. _bgp-redistribute-to-bgp:
@@ -2251,6 +2273,12 @@ structure is extended with :clicmd:`show bgp [afi] [safi]`.
Show a bgp peer summary for the specified address family, and subsequent
address-family.
+.. index:: show bgp [afi] [safi] summary failed [json]
+.. clicmd:: show bgp [afi] [safi] summary failed [json]
+
+ Show a bgp peer summary for peers that are not succesfully exchanging routes
+ for the specified address family, and subsequent address-family.
+
.. index:: show bgp [afi] [safi] neighbor [PEER]
.. clicmd:: show bgp [afi] [safi] neighbor [PEER]
@@ -2305,7 +2333,7 @@ attribute.
Displaying Routes by Large Community Attribute
----------------------------------------------
-The following commands allow displaying routes based on their
+The following commands allow displaying routes based on their
large community attribute.
.. index:: show [ip] bgp <ipv4|ipv6> large-community
@@ -2322,8 +2350,8 @@ large community attribute.
These commands display BGP routes which have the large community attribute.
attribute. When ``LARGE-COMMUNITY`` is specified, BGP routes that match that
- large community are displayed. When `exact-match` is specified, it display
- only routes that have an exact match. When `json` is specified, it display
+ large community are displayed. When `exact-match` is specified, it display
+ only routes that have an exact match. When `json` is specified, it display
routes in json format.
.. index:: show [ip] bgp <ipv4|ipv6> large-community-list WORD
@@ -2336,8 +2364,8 @@ large community attribute.
.. clicmd:: show [ip] bgp <ipv4|ipv6> large-community-list WORD json
These commands display BGP routes for the address family specified that
- match the specified large community list. When `exact-match` is specified,
- it displays only routes that have an exact match. When `json` is specified,
+ match the specified large community list. When `exact-match` is specified,
+ it displays only routes that have an exact match. When `json` is specified,
it display routes in json format.
.. _bgp-display-routes-by-as-path:
diff --git a/doc/user/bmp.rst b/doc/user/bmp.rst
new file mode 100644
index 0000000000..061800c14e
--- /dev/null
+++ b/doc/user/bmp.rst
@@ -0,0 +1,170 @@
+.. _bmp:
+
+***
+BMP
+***
+
+:abbr:`BMP` (BGP Monitoring Protocol, :rfc:`7854`) is used to send monitoring
+data from BGP routers to network management entities.
+
+Implementation characteristics
+==============================
+
+The `BMP` implementation in FRR has the following properties:
+
+- only the :rfc:`7854` features are currently implemented. This means protocol
+ version 3 without any extensions. It is not possible to use an older draft
+ protocol version of BMP.
+
+- the following statistics codes are implemented:
+
+ - 0: count of prefixes rejected
+ - 2: count of duplicate prefix withdrawals
+ - 3: count of **prefixes** with loop in cluster id
+ - 4: count of **prefixes** with loop in AS-path
+ - 5: count of **prefixes** with loop in originator
+ - 11: count of updates subjected to :rfc:`7607` "treat as withdrawal"
+ handling due to errors
+ - 65531: *experimental* count of prefixes rejected due to invalid next-hop
+
+ Note that stat items 3, 4 and 5 are specified to count updates, but FRR
+ implements them as prefix-based counters.
+
+- **route mirroring** is fully implemented, however BGP OPEN messages are not
+ currently included in route mirroring messages. Their contents can be
+ extracted from the "peer up" notification for sessions that established
+ successfully. OPEN messages for failed sessions cannot currently be
+ mirrored.
+
+- **route monitoring** is available for IPv4 and IPv6 AFIs, unicast and
+ multicast SAFIs. Other SAFIs (VPN, Labeled-Unicast, Flowspec, etc.) are not
+ currently supported.
+
+- monitoring peers that have BGP **add-path** enabled on the session will
+ result in somewhat unpredictable behaviour. Currently, the outcome is:
+
+ - route mirroring functions as intended, messages are copied verbatim
+ - the add-path ID is never included in route monitoring messages
+ - if multiple paths were received from a peer, an unpredictable path is
+ picked and sent on the BMP session. The selection will differ for
+ pre-policy and post-policy monitoring sessions.
+ - as long as any path is present, something will be advertised on BMP
+ sessions. Only after the last path is gone a withdrawal will be sent on
+ BMP sessions.
+ - updates to additional paths will trigger BMP route monitoring messages.
+ There is no guarantee on consistency regarding which path is sent in these
+ messages.
+
+- monitoring peers with :rfc:`5549` extended next-hops has not been tested.
+
+Starting BMP
+============
+
+BMP is implemented as a loadable module. This means that to use BMP, ``bgpd``
+must be started with the ``-M bmp`` option. It is not possible to enable BMP
+if ``bgpd`` was started without this option.
+
+Configuring BMP
+===============
+
+All of FRR's BMP configuration options are located inside the
+:clicmd:`router bgp ASN` block. Configure BGP first before proceeding to BMP
+setup.
+
+There is one option that applies to the BGP instance as a whole:
+
+.. index:: bmp mirror buffer-limit(0-4294967294)
+.. clicmd:: [no] bmp mirror buffer-limit(0-4294967294)
+
+ This sets the maximum amount of memory used for buffering BGP messages
+ (updates, keepalives, ...) for sending in BMP Route Mirroring.
+
+ The buffer is for the entire BGP instance; if multiple BMP targets are
+ configured they reference the same buffer and do not consume additional
+ memory. Queue overhead is included in accounting this memory, so the
+ actual space available for BGP messages is slightly less than the value
+ configured here.
+
+ If the buffer fills up, the oldest messages are removed from the buffer and
+ any BMP sessions where the now-removed messages were still pending have
+ their **entire** queue flushed and a "Mirroring Messages Lost" BMP message
+ is sent.
+
+ BMP Route Monitoring is not affected by this option.
+
+All other configuration is managed per targets:
+
+.. index:: bmp targets NAME
+.. clicmd:: [no] bmp targets NAME
+
+ Create/delete a targets group. As implied by the plural name, targets may
+ cover multiple outbound active BMP sessions as well as inbound passive
+ listeners.
+
+ If BMP sessions have the same configuration, putting them in the same
+ ``bmp targets`` will reduce overhead.
+
+BMP session configuration
+-------------------------
+
+Inside a ``bmp targets`` block, the following commands control session
+establishment:
+
+.. index:: bmp connect HOSTNAME port (1-65535) {min-retry MSEC|max-retry MSEC}
+.. clicmd:: [no] bmp connect HOSTNAME port (1-65535) {min-retry MSEC|max-retry MSEC}
+
+ Add/remove an active outbound BMP session. HOSTNAME is resolved via DNS,
+ if multiple addresses are returned they are tried in nondeterministic
+ order. Only one connection will be established even if multiple addresses
+ are returned. ``min-retry`` and ``max-retry`` specify (in milliseconds)
+ bounds for exponential backoff.
+
+.. warning::
+
+ ``ip access-list`` and ``ipv6 access-list`` are checked for outbound
+ connections resulting from ``bmp connect`` statements.
+
+.. index:: bmp listener <X:X::X:X|A.B.C.D> port (1-65535)
+.. clicmd:: [no] bmp listener <X:X::X:X|A.B.C.D> port (1-65535)
+
+ Accept incoming BMP sessions on the specified address and port. You can
+ use ``0.0.0.0`` and ``::`` to listen on all IPv4/IPv6 addresses.
+
+.. clicmd:: [no] ip access-list NAME
+.. clicmd:: [no] ipv6 access-list NAME
+
+ Restrict BMP sessions to the addresses allowed by the respective access
+ lists. The access lists are checked for both passive and active BMP
+ sessions. Changes do not affect currently established sessions.
+
+BMP data feed configuration
+---------------------------
+
+The following commands configure what BMP messages are sent on sessions
+associated with a particular ``bmp targets``:
+
+.. index:: bmp stats [interval (100-86400000)]
+.. clicmd:: [no] bmp stats [interval (100-86400000)]
+
+ Send BMP Statistics (counter) messages at the specified interval (in
+ milliseconds.)
+
+.. index:: bmp monitor AFI SAFI <pre-policy|post-policy>
+.. clicmd:: [no] bmp monitor AFI SAFI <pre-policy|post-policy>
+
+ Perform Route Monitoring for the specified AFI and SAFI. Only IPv4 and
+ IPv6 are currently valid for AFI, and only unicast and multicast are valid
+ for SAFI. Other AFI/SAFI combinations may be added in the future.
+
+ All BGP neighbors are included in Route Monitoring. Options to select
+ a subset of BGP sessions may be added in the future.
+
+.. index:: bmp mirror
+.. clicmd:: [no] bmp mirror
+
+ Perform Route Mirroring for all BGP neighbors. Since this provides a
+ direct feed of BGP messages, there are no AFI/SAFI options to be
+ configured.
+
+ All BGP neighbors are included in Route Mirroring. Options to select
+ a subset of BGP sessions may be added in the future.
diff --git a/doc/user/eigrpd.rst b/doc/user/eigrpd.rst
index 330267a8ee..915270a562 100644
--- a/doc/user/eigrpd.rst
+++ b/doc/user/eigrpd.rst
@@ -65,15 +65,16 @@ Certain signals have special meanings to *eigrpd*.
EIGRP Configuration
===================
-.. index:: router eigrp (1-65535)
-.. clicmd:: router eigrp (1-65535)
+.. index:: router eigrp (1-65535) [vrf NAME]
+.. clicmd:: router eigrp (1-65535) [vrf NAME]
The `router eigrp` command is necessary to enable EIGRP. To disable EIGRP,
use the `no router eigrp (1-65535)` command. EIGRP must be enabled before
- carrying out any of the EIGRP commands.
+ carrying out any of the EIGRP commands. Specify vrf NAME if you want
+ eigrp to work within the specified vrf.
-.. index:: no router eigrp (1-65535)
-.. clicmd:: no router eigrp (1-65535)
+.. index:: no router eigrp (1-65535) [vrf NAME]
+.. clicmd:: no router eigrp (1-65535) [vrf NAME]
Disable EIGRP.
@@ -189,8 +190,8 @@ How to Announce EIGRP route
Show EIGRP Information
======================
-.. index:: show ip eigrp topology
-.. clicmd:: show ip eigrp topology
+.. index:: show ip eigrp [vrf NAME] topology
+.. clicmd:: show ip eigrp [vrf NAME] topology
Display current EIGRP status.
@@ -207,6 +208,17 @@ Show EIGRP Information
P 10.0.2.0/24, 1 successors, FD is 256256, serno: 0
via Connected, enp0s3
+.. index:: show ip eigrp [vrf NAME] interface
+.. clicmd:: show ip eigrp [vrf NAME] interface
+
+ Display the list of interfaces associated with a particular eigrp
+ instance.
+
+..index:: show ip eigrp [vrf NAME] neighbor
+..clicmd:: show ip eigrp [vrf NAME] neighbor
+
+ Display the list of neighbors that have been established within
+ a particular eigrp instance.
EIGRP Debug Commands
====================
diff --git a/doc/user/index.rst b/doc/user/index.rst
index 4e14de6737..6c3b14e062 100644
--- a/doc/user/index.rst
+++ b/doc/user/index.rst
@@ -57,6 +57,7 @@ Protocols
static
vnc
vrrp
+ bmp
########
Appendix
diff --git a/doc/user/ospf_fundamentals.rst b/doc/user/ospf_fundamentals.rst
index 7db822c820..da348b02d2 100644
--- a/doc/user/ospf_fundamentals.rst
+++ b/doc/user/ospf_fundamentals.rst
@@ -24,7 +24,7 @@ Each router thus builds up an :abbr:`LSDB (Link State Database)` of all the
link-state messages. From this collection of LSAs in the LSDB, each router can
then calculate the shortest path to any other router, based on some common
metric, by using an algorithm such as
-`Edgar Djikstra's <http://www.cs.utexas.edu/users/EWD/>`_
+`Edsger Dijkstra's <http://www.cs.utexas.edu/users/EWD/>`_
:abbr:`SPF (Shortest Path First)` algorithm.
.. index:: Link-state routing protocol advantages
diff --git a/doc/user/pbr.rst b/doc/user/pbr.rst
index 6230bf777a..fab4343f50 100644
--- a/doc/user/pbr.rst
+++ b/doc/user/pbr.rst
@@ -91,6 +91,12 @@ end destination.
both v4 and v6 prefixes. This command is used in conjunction of the
:clicmd:`match src-ip PREFIX` command for matching.
+.. clicmd:: match mark (1-4294967295)
+
+ Select the mark to match. This is a linux only command and if attempted
+ on another platform it will be denied. This mark translates to the
+ underlying `ip rule .... fwmark XXXX` command.
+
.. clicmd:: set nexthop-group NAME
Use the nexthop-group NAME as the place to forward packets when the match
diff --git a/doc/user/rpki.rst b/doc/user/rpki.rst
index ca6b46d3cf..dfac10b4f2 100644
--- a/doc/user/rpki.rst
+++ b/doc/user/rpki.rst
@@ -112,31 +112,6 @@ The following commands are independent of a specific cache server.
The default value is 300 seconds.
-.. index:: rpki timeout <1-4,294,967,296>
-.. clicmd:: rpki timeout <1-4,294,967,296>
-
-.. index:: no rpki timeout
-.. clicmd:: no rpki timeout
-
- Set the number of seconds the router waits for the cache reply. If the cache
- server is not replying within this time period, the router deletes all
- received prefix records from the prefix table.
-
- The default value is 600 seconds.
-
-.. index:: rpki initial-synchronisation-timeout <1-4,294,967,296>
-.. clicmd:: rpki initial-synchronisation-timeout <1-4,294,967,296>
-
-.. index:: no rpki initial-synchronisation-timeout
-.. clicmd:: no rpki initial-synchronisation-timeout
-
- Set the number of seconds until the first synchronization with the cache
- server needs to be completed. If the timeout expires, BGP routing is started
- without RPKI. The router will try to establish the cache server connection in
- the background.
-
- The default value is 30 seconds.
-
The following commands configure one or multiple cache servers.
.. index:: rpki cache (A.B.C.D|WORD) PORT [SSH_USERNAME] [SSH_PRIVKEY_PATH] [SSH_PUBKEY_PATH] [KNOWN_HOSTS_PATH] PREFERENCE
diff --git a/doc/user/subdir.am b/doc/user/subdir.am
index 1e4d86c722..0f0a8a0774 100644
--- a/doc/user/subdir.am
+++ b/doc/user/subdir.am
@@ -7,6 +7,7 @@ user_RSTFILES = \
doc/user/ldpd.rst \
doc/user/basic.rst \
doc/user/bgp.rst \
+ doc/user/bmp.rst \
doc/user/bugs.rst \
doc/user/conf.py \
doc/user/eigrpd.rst \
diff --git a/doc/user/zebra.rst b/doc/user/zebra.rst
index 50c518d456..b7283d83de 100644
--- a/doc/user/zebra.rst
+++ b/doc/user/zebra.rst
@@ -292,6 +292,62 @@ Link Parameters Commands
.. _zebra-vrf:
+Administrative Distance
+=======================
+
+Administrative distance allows FRR to make decisions about what routes
+should be installed in the rib based upon the originating protocol.
+The lowest Admin Distance is the route selected. This is purely a
+subjective decision about ordering and care has been taken to choose
+the same distances that other routing suites have choosen.
+
++------------+-----------+
+| Protocol | Distance |
++------------+-----------+
+| System | 0 |
++------------+-----------+
+| Kernel | 0 |
++------------+-----------+
+| Connect | 0 |
++------------+-----------+
+| Static | 1 |
++------------+-----------+
+| NHRP | 10 |
++------------+-----------+
+| EBGP | 20 |
++------------+-----------+
+| EIGRP | 90 |
++------------+-----------+
+| BABEL | 100 |
++------------+-----------+
+| OSPF | 110 |
++------------+-----------+
+| ISIS | 115 |
++------------+-----------+
+| OPENFABRIC | 115 |
++------------+-----------+
+| RIP | 120 |
++------------+-----------+
+| Table | 150 |
++------------+-----------+
+| SHARP | 150 |
++------------+-----------+
+| IBGP | 200 |
++------------+-----------+
+| PBR | 200 |
++------------+-----------+
+
+An admin distance of 255 indicates to Zebra that the route should not be
+installed into the Data Plane. Additionally routes with an admin distance
+of 255 will not be redistributed.
+
+Zebra does treat Kernel routes as special case for the purposes of Admin
+Distance. Upon learning about a route that is not originated by FRR
+we read the metric value as a uint32_t. The top byte of the value
+is interpreted as the Administrative Distance and the low three bytes
+are read in as the metric. This special case is to facilitate VRF
+default routes.
+
Virtual Routing and Forwarding
==============================
@@ -374,6 +430,14 @@ commands in relationship to VRF. Here is an extract of some of those commands:
This command will dump the routing tables within the vrf scope. If `vrf all`
is executed, all routing tables will be dumped.
+.. index:: show <ip|ipv6> route summary [vrf VRF] [table TABLENO] [prefix]
+.. clicmd:: show <ip|ipv6> route summary [vrf VRF] [table TABLENO] [prefix]
+
+ This command will dump a summary output of the specified VRF and TABLENO
+ combination. If neither VRF or TABLENO is specified FRR defaults to
+ the default vrf and default table. If prefix is specified dump the
+ number of prefix routes.
+
By using the :option:`-n` option, the *Linux network namespace* will be mapped
over the *Zebra* VRF. One nice feature that is possible by handling *Linux
network namespace* is the ability to name default VRF. At startup, *Zebra*
diff --git a/eigrpd/eigrp_cli.c b/eigrpd/eigrp_cli.c
index ba657a7d5d..a93d4c8280 100644
--- a/eigrpd/eigrp_cli.c
+++ b/eigrpd/eigrp_cli.c
@@ -40,17 +40,18 @@
DEFPY_NOSH(
router_eigrp,
router_eigrp_cmd,
- "router eigrp (1-65535)$as",
+ "router eigrp (1-65535)$as [vrf NAME]",
ROUTER_STR
EIGRP_STR
- AS_STR)
+ AS_STR
+ VRF_CMD_HELP_STR)
{
char xpath[XPATH_MAXLEN];
int rv;
snprintf(xpath, sizeof(xpath),
- "/frr-eigrpd:eigrpd/instance[asn='%s'][vrf='']",
- as_str);
+ "/frr-eigrpd:eigrpd/instance[asn='%s'][vrf='%s']",
+ as_str, vrf ? vrf : VRF_DEFAULT_NAME);
nb_cli_enqueue_change(vty, xpath, NB_OP_CREATE, NULL);
rv = nb_cli_apply_changes(vty, NULL);
@@ -60,20 +61,21 @@ DEFPY_NOSH(
return rv;
}
-DEFPY_NOSH(
+DEFPY(
no_router_eigrp,
no_router_eigrp_cmd,
- "no router eigrp (1-65535)$as",
+ "no router eigrp (1-65535)$as [vrf NAME]",
NO_STR
ROUTER_STR
EIGRP_STR
- AS_STR)
+ AS_STR
+ VRF_CMD_HELP_STR)
{
char xpath[XPATH_MAXLEN];
snprintf(xpath, sizeof(xpath),
- "/frr-eigrpd:eigrpd/instance[asn='%s'][vrf='']",
- as_str);
+ "/frr-eigrpd:eigrpd/instance[asn='%s'][vrf='%s']",
+ as_str, vrf ? vrf : VRF_DEFAULT_NAME);
nb_cli_enqueue_change(vty, xpath, NB_OP_DESTROY, NULL);
return nb_cli_apply_changes(vty, NULL);
@@ -83,8 +85,12 @@ void eigrp_cli_show_header(struct vty *vty, struct lyd_node *dnode,
bool show_defaults)
{
const char *asn = yang_dnode_get_string(dnode, "./asn");
+ const char *vrf = yang_dnode_get_string(dnode, "./vrf");
- vty_out(vty, "router eigrp %s\n", asn);
+ vty_out(vty, "router eigrp %s", asn);
+ if (strcmp(vrf, VRF_DEFAULT_NAME))
+ vty_out(vty, " vrf %s", vrf);
+ vty_out(vty, "\n");
}
void eigrp_cli_show_end_header(struct vty *vty, struct lyd_node *dnode)
diff --git a/eigrpd/eigrp_filter.c b/eigrpd/eigrp_filter.c
index 93eed9452c..9d5d45ca50 100644
--- a/eigrpd/eigrp_filter.c
+++ b/eigrpd/eigrp_filter.c
@@ -65,17 +65,15 @@
void eigrp_distribute_update(struct distribute_ctx *ctx,
struct distribute *dist)
{
+ struct eigrp *e = eigrp_lookup(ctx->vrf->vrf_id);
struct interface *ifp;
struct eigrp_interface *ei = NULL;
struct access_list *alist;
struct prefix_list *plist;
// struct route_map *routemap;
- struct eigrp *e;
/* if no interface address is present, set list to eigrp process struct
*/
- e = eigrp_lookup();
- assert(e != NULL);
/* Check if distribute-list was set for process or interface */
if (!dist->ifname) {
@@ -174,7 +172,7 @@ void eigrp_distribute_update(struct distribute_ctx *ctx,
return;
}
- ifp = if_lookup_by_name(dist->ifname, VRF_DEFAULT);
+ ifp = if_lookup_by_name(dist->ifname, e->vrf_id);
if (ifp == NULL)
return;
@@ -288,7 +286,7 @@ void eigrp_distribute_update_interface(struct interface *ifp)
struct distribute *dist;
struct eigrp *eigrp;
- eigrp = eigrp_lookup();
+ eigrp = eigrp_lookup(ifp->vrf_id);
if (!eigrp)
return;
dist = distribute_lookup(eigrp->distribute_ctx, ifp->name);
@@ -302,11 +300,13 @@ void eigrp_distribute_update_interface(struct interface *ifp)
*/
void eigrp_distribute_update_all(struct prefix_list *notused)
{
- struct vrf *vrf = vrf_lookup_by_id(VRF_DEFAULT);
+ struct vrf *vrf;
struct interface *ifp;
- FOR_ALL_INTERFACES (vrf, ifp)
- eigrp_distribute_update_interface(ifp);
+ RB_FOREACH (vrf, vrf_name_head, &vrfs_by_name) {
+ FOR_ALL_INTERFACES (vrf, ifp)
+ eigrp_distribute_update_interface(ifp);
+ }
}
/*
diff --git a/eigrpd/eigrp_filter.h b/eigrpd/eigrp_filter.h
index 34d00ecc13..03f45cedbf 100644
--- a/eigrpd/eigrp_filter.h
+++ b/eigrpd/eigrp_filter.h
@@ -35,10 +35,10 @@
extern void eigrp_distribute_update(struct distribute_ctx *ctx,
struct distribute *dist);
-extern void eigrp_distribute_update_interface(struct interface *);
-extern void eigrp_distribute_update_all(struct prefix_list *);
-extern void eigrp_distribute_update_all_wrapper(struct access_list *);
-extern int eigrp_distribute_timer_process(struct thread *);
-extern int eigrp_distribute_timer_interface(struct thread *);
+extern void eigrp_distribute_update_interface(struct interface *ifp);
+extern void eigrp_distribute_update_all(struct prefix_list *plist);
+extern void eigrp_distribute_update_all_wrapper(struct access_list *alist);
+extern int eigrp_distribute_timer_process(struct thread *thread);
+extern int eigrp_distribute_timer_interface(struct thread *thread);
#endif /* EIGRPD_EIGRP_FILTER_H_ */
diff --git a/eigrpd/eigrp_fsm.c b/eigrpd/eigrp_fsm.c
index 4d6d73e202..cc6d47f488 100644
--- a/eigrpd/eigrp_fsm.c
+++ b/eigrpd/eigrp_fsm.c
@@ -445,7 +445,7 @@ int eigrp_fsm_event_nq_fcn(struct eigrp_fsm_action_message *msg)
prefix->rdistance = prefix->distance = prefix->fdistance = ne->distance;
prefix->reported_metric = ne->total_metric;
- if (eigrp_nbr_count_get()) {
+ if (eigrp_nbr_count_get(eigrp)) {
prefix->req_action |= EIGRP_FSM_NEED_QUERY;
listnode_add(eigrp->topology_changes_internalIPV4, prefix);
} else {
@@ -471,7 +471,7 @@ int eigrp_fsm_event_q_fcn(struct eigrp_fsm_action_message *msg)
prefix->state = EIGRP_FSM_STATE_ACTIVE_3;
prefix->rdistance = prefix->distance = prefix->fdistance = ne->distance;
prefix->reported_metric = ne->total_metric;
- if (eigrp_nbr_count_get()) {
+ if (eigrp_nbr_count_get(eigrp)) {
prefix->req_action |= EIGRP_FSM_NEED_QUERY;
listnode_add(eigrp->topology_changes_internalIPV4, prefix);
} else {
@@ -486,7 +486,7 @@ int eigrp_fsm_event_q_fcn(struct eigrp_fsm_action_message *msg)
int eigrp_fsm_event_keep_state(struct eigrp_fsm_action_message *msg)
{
- struct eigrp *eigrp;
+ struct eigrp *eigrp = msg->eigrp;
struct eigrp_prefix_entry *prefix = msg->prefix;
struct eigrp_nexthop_entry *ne = listnode_head(prefix->entries);
@@ -499,13 +499,11 @@ int eigrp_fsm_event_keep_state(struct eigrp_fsm_action_message *msg)
if (msg->packet_type == EIGRP_OPC_QUERY)
eigrp_send_reply(msg->adv_router, prefix);
prefix->req_action |= EIGRP_FSM_NEED_UPDATE;
- eigrp = eigrp_lookup();
- assert(eigrp);
listnode_add(eigrp->topology_changes_internalIPV4,
prefix);
}
- eigrp_topology_update_node_flags(prefix);
- eigrp_update_routing_table(prefix);
+ eigrp_topology_update_node_flags(eigrp, prefix);
+ eigrp_update_routing_table(eigrp, prefix);
}
if (msg->packet_type == EIGRP_OPC_QUERY)
@@ -536,9 +534,10 @@ int eigrp_fsm_event_lr(struct eigrp_fsm_action_message *msg)
prefix->state = EIGRP_FSM_STATE_PASSIVE;
prefix->req_action |= EIGRP_FSM_NEED_UPDATE;
listnode_add(eigrp->topology_changes_internalIPV4, prefix);
- eigrp_topology_update_node_flags(prefix);
- eigrp_update_routing_table(prefix);
- eigrp_update_topology_table_prefix(eigrp->topology_table, prefix);
+ eigrp_topology_update_node_flags(eigrp, prefix);
+ eigrp_update_routing_table(eigrp, prefix);
+ eigrp_update_topology_table_prefix(eigrp, eigrp->topology_table,
+ prefix);
return 1;
}
@@ -588,9 +587,10 @@ int eigrp_fsm_event_lr_fcs(struct eigrp_fsm_action_message *msg)
}
prefix->req_action |= EIGRP_FSM_NEED_UPDATE;
listnode_add(eigrp->topology_changes_internalIPV4, prefix);
- eigrp_topology_update_node_flags(prefix);
- eigrp_update_routing_table(prefix);
- eigrp_update_topology_table_prefix(eigrp->topology_table, prefix);
+ eigrp_topology_update_node_flags(eigrp, prefix);
+ eigrp_update_routing_table(eigrp, prefix);
+ eigrp_update_topology_table_prefix(eigrp, eigrp->topology_table,
+ prefix);
return 1;
}
@@ -612,7 +612,7 @@ int eigrp_fsm_event_lr_fcn(struct eigrp_fsm_action_message *msg)
prefix->rdistance = prefix->distance = best_successor->distance;
prefix->reported_metric = best_successor->total_metric;
- if (eigrp_nbr_count_get()) {
+ if (eigrp_nbr_count_get(eigrp)) {
prefix->req_action |= EIGRP_FSM_NEED_QUERY;
listnode_add(eigrp->topology_changes_internalIPV4, prefix);
} else {
diff --git a/eigrpd/eigrp_hello.c b/eigrpd/eigrp_hello.c
index dacd5caeb5..6f93cd7b3e 100644
--- a/eigrpd/eigrp_hello.c
+++ b/eigrpd/eigrp_hello.c
@@ -147,7 +147,7 @@ eigrp_hello_parameter_decode(struct eigrp_neighbor *nbr,
zlog_info("Neighbor %s (%s) is pending: new adjacency",
inet_ntoa(nbr->src),
ifindex2ifname(nbr->ei->ifp->ifindex,
- VRF_DEFAULT));
+ eigrp->vrf_id));
/* Expedited hello sent */
eigrp_hello_send(nbr->ei, EIGRP_HELLO_NORMAL, NULL);
@@ -167,7 +167,7 @@ eigrp_hello_parameter_decode(struct eigrp_neighbor *nbr,
"Neighbor %s (%s) is down: Interface PEER-TERMINATION received",
inet_ntoa(nbr->src),
ifindex2ifname(nbr->ei->ifp->ifindex,
- VRF_DEFAULT));
+ eigrp->vrf_id));
eigrp_nbr_delete(nbr);
return NULL;
} else {
@@ -175,7 +175,7 @@ eigrp_hello_parameter_decode(struct eigrp_neighbor *nbr,
"Neighbor %s (%s) going down: Kvalue mismatch",
inet_ntoa(nbr->src),
ifindex2ifname(nbr->ei->ifp->ifindex,
- VRF_DEFAULT));
+ eigrp->vrf_id));
eigrp_nbr_state_set(nbr, EIGRP_NEIGHBOR_DOWN);
}
}
@@ -245,6 +245,7 @@ static void eigrp_sw_version_decode(struct eigrp_neighbor *nbr,
static void eigrp_peer_termination_decode(struct eigrp_neighbor *nbr,
struct eigrp_tlv_hdr_type *tlv)
{
+ struct eigrp *eigrp = nbr->ei->eigrp;
struct TLV_Peer_Termination_type *param =
(struct TLV_Peer_Termination_type *)tlv;
@@ -254,7 +255,7 @@ static void eigrp_peer_termination_decode(struct eigrp_neighbor *nbr,
if (my_ip == received_ip) {
zlog_info("Neighbor %s (%s) is down: Peer Termination received",
inet_ntoa(nbr->src),
- ifindex2ifname(nbr->ei->ifp->ifindex, VRF_DEFAULT));
+ ifindex2ifname(nbr->ei->ifp->ifindex, eigrp->vrf_id));
/* set neighbor to DOWN */
nbr->state = EIGRP_NEIGHBOR_DOWN;
/* delete neighbor */
@@ -330,7 +331,7 @@ void eigrp_hello_receive(struct eigrp *eigrp, struct ip *iph,
if (IS_DEBUG_EIGRP_PACKET(eigrph->opcode - 1, RECV))
zlog_debug("Processing Hello size[%u] int(%s) nbr(%s)", size,
- ifindex2ifname(nbr->ei->ifp->ifindex, VRF_DEFAULT),
+ ifindex2ifname(nbr->ei->ifp->ifindex, eigrp->vrf_id),
inet_ntoa(nbr->src));
size -= EIGRP_HEADER_LEN;
@@ -484,21 +485,15 @@ static uint16_t eigrp_tidlist_encode(struct stream *s)
* Part of conditional receive process
*
*/
-static uint16_t eigrp_sequence_encode(struct stream *s)
+static uint16_t eigrp_sequence_encode(struct eigrp *eigrp, struct stream *s)
{
uint16_t length = EIGRP_TLV_SEQ_BASE_LEN;
- struct eigrp *eigrp;
struct eigrp_interface *ei;
struct listnode *node, *node2, *nnode2;
struct eigrp_neighbor *nbr;
size_t backup_end, size_end;
int found;
- eigrp = eigrp_lookup();
- if (eigrp == NULL) {
- return 0;
- }
-
// add in the parameters TLV
backup_end = stream_get_endp(s);
stream_putw(s, EIGRP_TLV_SEQ);
@@ -541,15 +536,10 @@ static uint16_t eigrp_sequence_encode(struct stream *s)
* Part of conditional receive process
*
*/
-static uint16_t eigrp_next_sequence_encode(struct stream *s)
+static uint16_t eigrp_next_sequence_encode(struct eigrp *eigrp,
+ struct stream *s)
{
uint16_t length = EIGRP_NEXT_SEQUENCE_TLV_SIZE;
- struct eigrp *eigrp;
-
- eigrp = eigrp_lookup();
- if (eigrp == NULL) {
- return 0;
- }
// add in the parameters TLV
stream_putw(s, EIGRP_TLV_NEXT_MCAST_SEQ);
@@ -659,8 +649,8 @@ static struct eigrp_packet *eigrp_hello_encode(struct eigrp_interface *ei,
length += eigrp_sw_version_encode(ep->s);
if (flags & EIGRP_HELLO_ADD_SEQUENCE) {
- length += eigrp_sequence_encode(ep->s);
- length += eigrp_next_sequence_encode(ep->s);
+ length += eigrp_sequence_encode(ei->eigrp, ep->s);
+ length += eigrp_next_sequence_encode(ei->eigrp, ep->s);
}
// add in the TID list if doing multi-topology
diff --git a/eigrpd/eigrp_interface.c b/eigrpd/eigrp_interface.c
index c52a98ee25..fd1d3f5cb9 100644
--- a/eigrpd/eigrp_interface.c
+++ b/eigrpd/eigrp_interface.c
@@ -206,7 +206,7 @@ int eigrp_if_up(struct eigrp_interface *ei)
eigrp_prefix_entry_add(eigrp->topology_table, pe);
listnode_add(eigrp->topology_changes_internalIPV4, pe);
- eigrp_nexthop_entry_add(pe, ne);
+ eigrp_nexthop_entry_add(eigrp, pe, ne);
for (ALL_LIST_ELEMENTS(eigrp->eiflist, node, nnode, ei2)) {
eigrp_update_send(ei2);
@@ -218,7 +218,7 @@ int eigrp_if_up(struct eigrp_interface *ei)
struct eigrp_fsm_action_message msg;
ne->prefix = pe;
- eigrp_nexthop_entry_add(pe, ne);
+ eigrp_nexthop_entry_add(eigrp, pe, ne);
msg.packet_type = EIGRP_OPC_UPDATE;
msg.eigrp = eigrp;
@@ -329,10 +329,7 @@ void eigrp_if_free(struct eigrp_interface *ei, int source)
{
struct prefix dest_addr;
struct eigrp_prefix_entry *pe;
- struct eigrp *eigrp = eigrp_lookup();
-
- if (!eigrp)
- return;
+ struct eigrp *eigrp = ei->eigrp;
if (source == INTERFACE_DOWN_BY_VTY) {
THREAD_OFF(ei->t_hello);
@@ -344,7 +341,7 @@ void eigrp_if_free(struct eigrp_interface *ei, int source)
pe = eigrp_topology_table_lookup_ipv4(eigrp->topology_table,
&dest_addr);
if (pe)
- eigrp_prefix_entry_delete(eigrp->topology_table, pe);
+ eigrp_prefix_entry_delete(eigrp, eigrp->topology_table, pe);
eigrp_if_down(ei);
diff --git a/eigrpd/eigrp_main.c b/eigrpd/eigrp_main.c
index 1781a88173..299825dd1b 100644
--- a/eigrpd/eigrp_main.c
+++ b/eigrpd/eigrp_main.c
@@ -65,6 +65,7 @@
#include "eigrpd/eigrp_snmp.h"
#include "eigrpd/eigrp_filter.h"
#include "eigrpd/eigrp_errors.h"
+#include "eigrpd/eigrp_vrf.h"
//#include "eigrpd/eigrp_routemap.h"
/* eigprd privileges */
@@ -182,6 +183,7 @@ int main(int argc, char **argv, char **envp)
master = eigrp_om->master;
eigrp_error_init();
+ eigrp_vrf_init();
vrf_init(NULL, NULL, NULL, NULL, NULL);
/*EIGRPd init*/
diff --git a/eigrpd/eigrp_neighbor.c b/eigrpd/eigrp_neighbor.c
index 66dd5f3419..2ae3997fae 100644
--- a/eigrpd/eigrp_neighbor.c
+++ b/eigrpd/eigrp_neighbor.c
@@ -194,13 +194,12 @@ void eigrp_nbr_delete(struct eigrp_neighbor *nbr)
int holddown_timer_expired(struct thread *thread)
{
- struct eigrp_neighbor *nbr;
-
- nbr = THREAD_ARG(thread);
+ struct eigrp_neighbor *nbr = THREAD_ARG(thread);
+ struct eigrp *eigrp = nbr->ei->eigrp;
zlog_info("Neighbor %s (%s) is down: holding time expired",
inet_ntoa(nbr->src),
- ifindex2ifname(nbr->ei->ifp->ifindex, VRF_DEFAULT));
+ ifindex2ifname(nbr->ei->ifp->ifindex, eigrp->vrf_id));
nbr->state = EIGRP_NEIGHBOR_DOWN;
eigrp_nbr_delete(nbr);
@@ -297,19 +296,13 @@ void eigrp_nbr_state_update(struct eigrp_neighbor *nbr)
}
}
-int eigrp_nbr_count_get(void)
+int eigrp_nbr_count_get(struct eigrp *eigrp)
{
struct eigrp_interface *iface;
struct listnode *node, *node2, *nnode2;
struct eigrp_neighbor *nbr;
- struct eigrp *eigrp = eigrp_lookup();
uint32_t counter;
- if (eigrp == NULL) {
- zlog_debug("EIGRP Routing Process not enabled");
- return 0;
- }
-
counter = 0;
for (ALL_LIST_ELEMENTS_RO(eigrp->eiflist, node, iface)) {
for (ALL_LIST_ELEMENTS(iface->nbrs, node2, nnode2, nbr)) {
@@ -335,20 +328,16 @@ int eigrp_nbr_count_get(void)
*/
void eigrp_nbr_hard_restart(struct eigrp_neighbor *nbr, struct vty *vty)
{
- if (nbr == NULL) {
- flog_err(EC_EIGRP_CONFIG,
- "Nbr Hard restart: Neighbor not specified.");
- return;
- }
+ struct eigrp *eigrp = nbr->ei->eigrp;
zlog_debug("Neighbor %s (%s) is down: manually cleared",
inet_ntoa(nbr->src),
- ifindex2ifname(nbr->ei->ifp->ifindex, VRF_DEFAULT));
+ ifindex2ifname(nbr->ei->ifp->ifindex, eigrp->vrf_id));
if (vty != NULL) {
vty_time_print(vty, 0);
vty_out(vty, "Neighbor %s (%s) is down: manually cleared\n",
inet_ntoa(nbr->src),
- ifindex2ifname(nbr->ei->ifp->ifindex, VRF_DEFAULT));
+ ifindex2ifname(nbr->ei->ifp->ifindex, eigrp->vrf_id));
}
/* send Hello with Peer Termination TLV */
diff --git a/eigrpd/eigrp_neighbor.h b/eigrpd/eigrp_neighbor.h
index 21a8c6a004..1c308fa981 100644
--- a/eigrpd/eigrp_neighbor.h
+++ b/eigrpd/eigrp_neighbor.h
@@ -33,24 +33,25 @@
#define _ZEBRA_EIGRP_NEIGHBOR_H
/* Prototypes */
-extern struct eigrp_neighbor *eigrp_nbr_get(struct eigrp_interface *,
- struct eigrp_header *, struct ip *);
-extern struct eigrp_neighbor *eigrp_nbr_new(struct eigrp_interface *);
-extern void eigrp_nbr_delete(struct eigrp_neighbor *);
+extern struct eigrp_neighbor *eigrp_nbr_get(struct eigrp_interface *ei,
+ struct eigrp_header *,
+ struct ip *addr);
+extern struct eigrp_neighbor *eigrp_nbr_new(struct eigrp_interface *ei);
+extern void eigrp_nbr_delete(struct eigrp_neighbor *neigh);
-extern int holddown_timer_expired(struct thread *);
+extern int holddown_timer_expired(struct thread *thread);
-extern int eigrp_neighborship_check(struct eigrp_neighbor *,
- struct TLV_Parameter_Type *);
-extern void eigrp_nbr_state_update(struct eigrp_neighbor *);
-extern void eigrp_nbr_state_set(struct eigrp_neighbor *, uint8_t state);
-extern uint8_t eigrp_nbr_state_get(struct eigrp_neighbor *);
-extern int eigrp_nbr_count_get(void);
-extern const char *eigrp_nbr_state_str(struct eigrp_neighbor *);
-extern struct eigrp_neighbor *eigrp_nbr_lookup_by_addr(struct eigrp_interface *,
- struct in_addr *);
-extern struct eigrp_neighbor *eigrp_nbr_lookup_by_addr_process(struct eigrp *,
- struct in_addr);
+extern int eigrp_neighborship_check(struct eigrp_neighbor *neigh,
+ struct TLV_Parameter_Type *tlv);
+extern void eigrp_nbr_state_update(struct eigrp_neighbor *neigh);
+extern void eigrp_nbr_state_set(struct eigrp_neighbor *neigh, uint8_t state);
+extern uint8_t eigrp_nbr_state_get(struct eigrp_neighbor *neigh);
+extern int eigrp_nbr_count_get(struct eigrp *eigrp);
+extern const char *eigrp_nbr_state_str(struct eigrp_neighbor *neigh);
+extern struct eigrp_neighbor *
+eigrp_nbr_lookup_by_addr(struct eigrp_interface *ei, struct in_addr *addr);
+extern struct eigrp_neighbor *
+eigrp_nbr_lookup_by_addr_process(struct eigrp *eigrp, struct in_addr addr);
extern void eigrp_nbr_hard_restart(struct eigrp_neighbor *nbr, struct vty *vty);
extern int eigrp_nbr_split_horizon_check(struct eigrp_nexthop_entry *ne,
diff --git a/eigrpd/eigrp_network.c b/eigrpd/eigrp_network.c
index bbb9487b4d..c7ffbf9f0e 100644
--- a/eigrpd/eigrp_network.c
+++ b/eigrpd/eigrp_network.c
@@ -53,7 +53,7 @@ static int eigrp_network_match_iface(const struct prefix *connected_prefix,
static void eigrp_network_run_interface(struct eigrp *, struct prefix *,
struct interface *);
-int eigrp_sock_init(void)
+int eigrp_sock_init(struct vrf *vrf)
{
int eigrp_sock;
int ret;
@@ -61,8 +61,10 @@ int eigrp_sock_init(void)
int hincl = 1;
#endif
- frr_elevate_privs(&eigrpd_privs) {
- eigrp_sock = socket(AF_INET, SOCK_RAW, IPPROTO_EIGRPIGP);
+ frr_with_privs(&eigrpd_privs) {
+ eigrp_sock = vrf_socket(
+ AF_INET, SOCK_RAW, IPPROTO_EIGRPIGP, vrf->vrf_id,
+ vrf->vrf_id != VRF_DEFAULT ? vrf->name : NULL);
if (eigrp_sock < 0) {
zlog_err("eigrp_read_sock_init: socket: %s",
safe_strerror(errno));
@@ -209,7 +211,7 @@ int eigrp_if_drop_allspfrouters(struct eigrp *top, struct prefix *p,
int eigrp_network_set(struct eigrp *eigrp, struct prefix *p)
{
- struct vrf *vrf = vrf_lookup_by_id(VRF_DEFAULT);
+ struct vrf *vrf = vrf_lookup_by_id(eigrp->vrf_id);
struct route_node *rn;
struct interface *ifp;
@@ -290,6 +292,9 @@ void eigrp_if_update(struct interface *ifp)
* we need to check eac one and add the interface as approperate
*/
for (ALL_LIST_ELEMENTS(eigrp_om->eigrp, node, nnode, eigrp)) {
+ if (ifp->vrf_id != eigrp->vrf_id)
+ continue;
+
/* EIGRP must be on and Router-ID must be configured. */
if (eigrp->router_id.s_addr == 0)
continue;
diff --git a/eigrpd/eigrp_network.h b/eigrpd/eigrp_network.h
index b3c76bbecc..7839fc946b 100644
--- a/eigrpd/eigrp_network.h
+++ b/eigrpd/eigrp_network.h
@@ -30,7 +30,7 @@
/* Prototypes */
-extern int eigrp_sock_init(void);
+extern int eigrp_sock_init(struct vrf *vrf);
extern int eigrp_if_ipmulticast(struct eigrp *, struct prefix *, unsigned int);
extern int eigrp_network_set(struct eigrp *eigrp, struct prefix *p);
extern int eigrp_network_unset(struct eigrp *eigrp, struct prefix *p);
diff --git a/eigrpd/eigrp_northbound.c b/eigrpd/eigrp_northbound.c
index 5f0c91809e..4ccce2ebb8 100644
--- a/eigrpd/eigrp_northbound.c
+++ b/eigrpd/eigrp_northbound.c
@@ -79,13 +79,18 @@ static int eigrpd_instance_create(enum nb_event event,
union nb_resource *resource)
{
struct eigrp *eigrp;
+ const char *vrf;
+ vrf_id_t vrfid;
switch (event) {
case NB_EV_VALIDATE:
/* NOTHING */
break;
case NB_EV_PREPARE:
- eigrp = eigrp_get(yang_dnode_get_string(dnode, "./asn"));
+ vrf = yang_dnode_get_string(dnode, "./vrf");
+ vrfid = vrf_name_to_id(vrf);
+
+ eigrp = eigrp_get(yang_dnode_get_uint16(dnode, "./asn"), vrfid);
resource->ptr = eigrp;
break;
case NB_EV_ABORT:
@@ -745,14 +750,17 @@ static int eigrpd_instance_redistribute_create(enum nb_event event,
union nb_resource *resource)
{
struct eigrp_metrics metrics;
+ const char *vrfname;
struct eigrp *eigrp;
uint32_t proto;
+ vrf_id_t vrfid;
switch (event) {
case NB_EV_VALIDATE:
proto = yang_dnode_get_enum(dnode, "./protocol");
- if (vrf_bitmap_check(zclient->redist[AFI_IP][proto],
- VRF_DEFAULT))
+ vrfname = yang_dnode_get_string(dnode, "../vrf");
+ vrfid = vrf_name_to_id(vrfname);
+ if (vrf_bitmap_check(zclient->redist[AFI_IP][proto], vrfid))
return NB_ERR_INCONSISTENCY;
break;
case NB_EV_PREPARE:
@@ -1181,7 +1189,8 @@ static int lib_interface_eigrp_instance_create(enum nb_event event,
break;
}
- eigrp = eigrp_get(yang_dnode_get_string(dnode, "./asn"));
+ eigrp = eigrp_get(yang_dnode_get_uint16(dnode, "./asn"),
+ ifp->vrf_id);
eif = eigrp_interface_lookup(eigrp, ifp->name);
if (eif == NULL)
return NB_ERR_INCONSISTENCY;
@@ -1192,7 +1201,8 @@ static int lib_interface_eigrp_instance_create(enum nb_event event,
break;
case NB_EV_APPLY:
ifp = nb_running_get_entry(dnode, NULL, true);
- eigrp = eigrp_get(yang_dnode_get_string(dnode, "./asn"));
+ eigrp = eigrp_get(yang_dnode_get_uint16(dnode, "./asn"),
+ ifp->vrf_id);
eif = eigrp_interface_lookup(eigrp, ifp->name);
if (eif == NULL)
return NB_ERR_INCONSISTENCY;
diff --git a/eigrpd/eigrp_packet.c b/eigrpd/eigrp_packet.c
index 4efb91e4a0..ba8271d46e 100644
--- a/eigrpd/eigrp_packet.c
+++ b/eigrpd/eigrp_packet.c
@@ -77,11 +77,13 @@ const struct message eigrp_packet_type_str[] = {
static unsigned char zeropad[16] = {0};
/* Forward function reference*/
-static struct stream *eigrp_recv_packet(int, struct interface **,
- struct stream *);
-static int eigrp_verify_header(struct stream *, struct eigrp_interface *,
- struct ip *, struct eigrp_header *);
-static int eigrp_check_network_mask(struct eigrp_interface *, struct in_addr);
+static struct stream *eigrp_recv_packet(struct eigrp *eigrp, int fd,
+ struct interface **ifp,
+ struct stream *s);
+static int eigrp_verify_header(struct stream *s, struct eigrp_interface *ei,
+ struct ip *addr, struct eigrp_header *header);
+static int eigrp_check_network_mask(struct eigrp_interface *ei,
+ struct in_addr mask);
static int eigrp_retrans_count_exceeded(struct eigrp_packet *ep,
struct eigrp_neighbor *nbr)
@@ -495,7 +497,7 @@ int eigrp_read(struct thread *thread)
thread_add_read(master, eigrp_read, eigrp, eigrp->fd, &eigrp->t_read);
stream_reset(eigrp->ibuf);
- if (!(ibuf = eigrp_recv_packet(eigrp->fd, &ifp, eigrp->ibuf))) {
+ if (!(ibuf = eigrp_recv_packet(eigrp, eigrp->fd, &ifp, eigrp->ibuf))) {
/* This raw packet is known to be at least as big as its IP
* header. */
return -1;
@@ -525,7 +527,7 @@ int eigrp_read(struct thread *thread)
ifindex
retrieval but do not. */
c = if_lookup_address((void *)&iph->ip_src, AF_INET,
- VRF_DEFAULT);
+ eigrp->vrf_id);
if (c == NULL)
return 0;
@@ -706,7 +708,8 @@ int eigrp_read(struct thread *thread)
return 0;
}
-static struct stream *eigrp_recv_packet(int fd, struct interface **ifp,
+static struct stream *eigrp_recv_packet(struct eigrp *eigrp,
+ int fd, struct interface **ifp,
struct stream *ibuf)
{
int ret;
@@ -774,7 +777,7 @@ static struct stream *eigrp_recv_packet(int fd, struct interface **ifp,
ifindex = getsockopt_ifindex(AF_INET, &msgh);
- *ifp = if_lookup_by_index(ifindex, VRF_DEFAULT);
+ *ifp = if_lookup_by_index(ifindex, eigrp->vrf_id);
if (ret != ip_len) {
zlog_warn(
diff --git a/eigrpd/eigrp_routemap.c b/eigrpd/eigrp_routemap.c
index bac7494774..d78588644f 100644
--- a/eigrpd/eigrp_routemap.c
+++ b/eigrpd/eigrp_routemap.c
@@ -135,7 +135,8 @@ void eigrp_rmap_update(const char *notused)
static int eigrp_route_match_add(struct vty *vty, struct route_map_index *index,
const char *command, const char *arg)
{
- int ret;
+ enum rmap_compile_rets ret;
+
ret = route_map_add_match(index, command, arg, type);
switch (ret) {
case RMAP_RULE_MISSING:
@@ -147,6 +148,10 @@ static int eigrp_route_match_add(struct vty *vty, struct route_map_index *index,
return CMD_WARNING_CONFIG_FAILED;
break;
case RMAP_COMPILE_SUCCESS:
+ case RMAP_DUPLICATE_RULE:
+ /*
+ * Intentionally not handling these cases
+ */
break;
}
@@ -158,7 +163,8 @@ static int eigrp_route_match_delete(struct vty *vty,
struct route_map_index *index,
const char *command, const char *arg)
{
- int ret;
+ enum rmap_compile_rets ret;
+
ret = route_map_delete_match(index, command, arg);
switch (ret) {
case RMAP_RULE_MISSING:
@@ -170,6 +176,10 @@ static int eigrp_route_match_delete(struct vty *vty,
return CMD_WARNING_CONFIG_FAILED;
break;
case RMAP_COMPILE_SUCCESS:
+ case RMAP_DUPLICATE_RULE:
+ /*
+ * These cases intentionally ignored
+ */
break;
}
@@ -180,7 +190,7 @@ static int eigrp_route_match_delete(struct vty *vty,
static int eigrp_route_set_add(struct vty *vty, struct route_map_index *index,
const char *command, const char *arg)
{
- int ret;
+ enum rmap_compile_rets ret;
ret = route_map_add_set(index, command, arg);
switch (ret) {
@@ -201,6 +211,10 @@ static int eigrp_route_set_add(struct vty *vty, struct route_map_index *index,
}
break;
case RMAP_COMPILE_SUCCESS:
+ case RMAP_DUPLICATE_RULE:
+ /*
+ * These cases intentionally left blank here
+ */
break;
}
@@ -212,7 +226,7 @@ static int eigrp_route_set_delete(struct vty *vty,
struct route_map_index *index,
const char *command, const char *arg)
{
- int ret;
+ enum rmap_compile_rets ret;
ret = route_map_delete_set(index, command, arg);
switch (ret) {
@@ -225,6 +239,10 @@ static int eigrp_route_set_delete(struct vty *vty,
return CMD_WARNING_CONFIG_FAILED;
break;
case RMAP_COMPILE_SUCCESS:
+ case RMAP_DUPLICATE_RULE:
+ /*
+ * These cases intentionally not handled
+ */
break;
}
diff --git a/eigrpd/eigrp_structs.h b/eigrpd/eigrp_structs.h
index 1b9186f011..e50858f071 100644
--- a/eigrpd/eigrp_structs.h
+++ b/eigrpd/eigrp_structs.h
@@ -69,6 +69,8 @@ struct eigrp_metrics {
};
struct eigrp {
+ vrf_id_t vrf_id;
+
uint16_t AS; /* Autonomous system number */
uint16_t vrid; /* Virtual Router ID */
uint8_t k_values[6]; /*Array for K values configuration*/
@@ -85,7 +87,7 @@ struct eigrp {
struct list *eiflist; /* eigrp interfaces */
uint8_t passive_interface_default; /* passive-interface default */
- unsigned int fd;
+ int fd;
unsigned int maxsndbuflen;
uint32_t sequence_number; /*Global EIGRP sequence number*/
diff --git a/eigrpd/eigrp_topology.c b/eigrpd/eigrp_topology.c
index e861cdb333..9cc612eaf1 100644
--- a/eigrpd/eigrp_topology.c
+++ b/eigrpd/eigrp_topology.c
@@ -117,9 +117,9 @@ struct eigrp_nexthop_entry *eigrp_nexthop_entry_new(void)
/*
* Freeing topology table list
*/
-void eigrp_topology_free(struct route_table *table)
+void eigrp_topology_free(struct eigrp *eigrp, struct route_table *table)
{
- eigrp_topology_delete_all(table);
+ eigrp_topology_delete_all(eigrp, table);
route_table_finish(table);
}
@@ -150,7 +150,8 @@ void eigrp_prefix_entry_add(struct route_table *topology,
/*
* Adding topology entry to topology node
*/
-void eigrp_nexthop_entry_add(struct eigrp_prefix_entry *node,
+void eigrp_nexthop_entry_add(struct eigrp *eigrp,
+ struct eigrp_prefix_entry *node,
struct eigrp_nexthop_entry *entry)
{
struct list *l = list_new();
@@ -161,7 +162,8 @@ void eigrp_nexthop_entry_add(struct eigrp_prefix_entry *node,
listnode_add_sort(node->entries, entry);
entry->prefix = node;
- eigrp_zebra_route_add(node->destination, l, node->fdistance);
+ eigrp_zebra_route_add(eigrp, node->destination,
+ l, node->fdistance);
}
list_delete(&l);
@@ -170,10 +172,9 @@ void eigrp_nexthop_entry_add(struct eigrp_prefix_entry *node,
/*
* Deleting topology node from topology table
*/
-void eigrp_prefix_entry_delete(struct route_table *table,
+void eigrp_prefix_entry_delete(struct eigrp *eigrp, struct route_table *table,
struct eigrp_prefix_entry *pe)
{
- struct eigrp *eigrp = eigrp_lookup();
struct eigrp_nexthop_entry *ne;
struct listnode *node, *nnode;
struct route_node *rn;
@@ -192,10 +193,10 @@ void eigrp_prefix_entry_delete(struct route_table *table,
listnode_delete(eigrp->topology_changes_internalIPV4, pe);
for (ALL_LIST_ELEMENTS(pe->entries, node, nnode, ne))
- eigrp_nexthop_entry_delete(pe, ne);
+ eigrp_nexthop_entry_delete(eigrp, pe, ne);
list_delete(&pe->entries);
list_delete(&pe->rij);
- eigrp_zebra_route_delete(pe->destination);
+ eigrp_zebra_route_delete(eigrp, pe->destination);
prefix_free(pe->destination);
rn->info = NULL;
@@ -207,12 +208,13 @@ void eigrp_prefix_entry_delete(struct route_table *table,
/*
* Deleting topology entry from topology node
*/
-void eigrp_nexthop_entry_delete(struct eigrp_prefix_entry *node,
+void eigrp_nexthop_entry_delete(struct eigrp *eigrp,
+ struct eigrp_prefix_entry *node,
struct eigrp_nexthop_entry *entry)
{
if (listnode_lookup(node->entries, entry) != NULL) {
listnode_delete(node->entries, entry);
- eigrp_zebra_route_delete(node->destination);
+ eigrp_zebra_route_delete(eigrp, node->destination);
XFREE(MTYPE_EIGRP_NEXTHOP_ENTRY, entry);
}
}
@@ -220,7 +222,8 @@ void eigrp_nexthop_entry_delete(struct eigrp_prefix_entry *node,
/*
* Deleting all nodes from topology table
*/
-void eigrp_topology_delete_all(struct route_table *topology)
+void eigrp_topology_delete_all(struct eigrp *eigrp,
+ struct route_table *topology)
{
struct route_node *rn;
struct eigrp_prefix_entry *pe;
@@ -231,7 +234,7 @@ void eigrp_topology_delete_all(struct route_table *topology)
if (!pe)
continue;
- eigrp_prefix_entry_delete(topology, pe);
+ eigrp_prefix_entry_delete(eigrp, topology, pe);
}
}
@@ -426,17 +429,15 @@ void eigrp_topology_update_all_node_flags(struct eigrp *eigrp)
if (!pe)
continue;
- eigrp_topology_update_node_flags(pe);
+ eigrp_topology_update_node_flags(eigrp, pe);
}
}
-void eigrp_topology_update_node_flags(struct eigrp_prefix_entry *dest)
+void eigrp_topology_update_node_flags(struct eigrp *eigrp,
+ struct eigrp_prefix_entry *dest)
{
struct listnode *node;
struct eigrp_nexthop_entry *entry;
- struct eigrp *eigrp = eigrp_lookup();
-
- assert(eigrp);
for (ALL_LIST_ELEMENTS_RO(dest->entries, node, entry)) {
if (entry->reported_distance < dest->fdistance) {
@@ -464,27 +465,24 @@ void eigrp_topology_update_node_flags(struct eigrp_prefix_entry *dest)
}
}
-void eigrp_update_routing_table(struct eigrp_prefix_entry *prefix)
+void eigrp_update_routing_table(struct eigrp *eigrp,
+ struct eigrp_prefix_entry *prefix)
{
- struct eigrp *eigrp = eigrp_lookup();
struct list *successors;
struct listnode *node;
struct eigrp_nexthop_entry *entry;
- if (!eigrp)
- return;
-
successors = eigrp_topology_get_successor_max(prefix, eigrp->max_paths);
if (successors) {
- eigrp_zebra_route_add(prefix->destination, successors,
+ eigrp_zebra_route_add(eigrp, prefix->destination, successors,
prefix->fdistance);
for (ALL_LIST_ELEMENTS_RO(successors, node, entry))
entry->flags |= EIGRP_NEXTHOP_ENTRY_INTABLE_FLAG;
list_delete(&successors);
} else {
- eigrp_zebra_route_delete(prefix->destination);
+ eigrp_zebra_route_delete(eigrp, prefix->destination);
for (ALL_LIST_ELEMENTS_RO(prefix->entries, node, entry))
entry->flags &= ~EIGRP_NEXTHOP_ENTRY_INTABLE_FLAG;
}
@@ -525,7 +523,8 @@ void eigrp_topology_neighbor_down(struct eigrp *eigrp,
eigrp_update_send_all(eigrp, nbr->ei);
}
-void eigrp_update_topology_table_prefix(struct route_table *table,
+void eigrp_update_topology_table_prefix(struct eigrp *eigrp,
+ struct route_table *table,
struct eigrp_prefix_entry *prefix)
{
struct listnode *node1, *node2;
@@ -533,11 +532,11 @@ void eigrp_update_topology_table_prefix(struct route_table *table,
struct eigrp_nexthop_entry *entry;
for (ALL_LIST_ELEMENTS(prefix->entries, node1, node2, entry)) {
if (entry->distance == EIGRP_MAX_METRIC) {
- eigrp_nexthop_entry_delete(prefix, entry);
+ eigrp_nexthop_entry_delete(eigrp, prefix, entry);
}
}
if (prefix->distance == EIGRP_MAX_METRIC
&& prefix->nt != EIGRP_TOPOLOGY_TYPE_CONNECTED) {
- eigrp_prefix_entry_delete(table, prefix);
+ eigrp_prefix_entry_delete(eigrp, table, prefix);
}
}
diff --git a/eigrpd/eigrp_topology.h b/eigrpd/eigrp_topology.h
index 16bf2261cc..718cece403 100644
--- a/eigrpd/eigrp_topology.h
+++ b/eigrpd/eigrp_topology.h
@@ -37,34 +37,41 @@ extern struct route_table *eigrp_topology_new(void);
extern void eigrp_topology_init(struct route_table *table);
extern struct eigrp_prefix_entry *eigrp_prefix_entry_new(void);
extern struct eigrp_nexthop_entry *eigrp_nexthop_entry_new(void);
-extern void eigrp_topology_free(struct route_table *table);
+extern void eigrp_topology_free(struct eigrp *eigrp, struct route_table *table);
extern void eigrp_prefix_entry_add(struct route_table *table,
struct eigrp_prefix_entry *pe);
-extern void eigrp_nexthop_entry_add(struct eigrp_prefix_entry *,
- struct eigrp_nexthop_entry *);
-extern void eigrp_prefix_entry_delete(struct route_table *table,
+extern void eigrp_nexthop_entry_add(struct eigrp *eigrp,
+ struct eigrp_prefix_entry *pe,
+ struct eigrp_nexthop_entry *ne);
+extern void eigrp_prefix_entry_delete(struct eigrp *eigrp,
+ struct route_table *table,
struct eigrp_prefix_entry *pe);
-extern void eigrp_nexthop_entry_delete(struct eigrp_prefix_entry *,
- struct eigrp_nexthop_entry *);
-extern void eigrp_topology_delete_all(struct route_table *table);
+extern void eigrp_nexthop_entry_delete(struct eigrp *eigrp,
+ struct eigrp_prefix_entry *pe,
+ struct eigrp_nexthop_entry *ne);
+extern void eigrp_topology_delete_all(struct eigrp *eigrp,
+ struct route_table *table);
extern struct eigrp_prefix_entry *
eigrp_topology_table_lookup_ipv4(struct route_table *table, struct prefix *p);
-extern struct list *eigrp_topology_get_successor(struct eigrp_prefix_entry *);
+extern struct list *eigrp_topology_get_successor(struct eigrp_prefix_entry *pe);
extern struct list *
eigrp_topology_get_successor_max(struct eigrp_prefix_entry *pe,
unsigned int maxpaths);
extern struct eigrp_nexthop_entry *
-eigrp_prefix_entry_lookup(struct list *, struct eigrp_neighbor *);
-extern struct list *eigrp_neighbor_prefixes_lookup(struct eigrp *,
- struct eigrp_neighbor *);
-extern void eigrp_topology_update_all_node_flags(struct eigrp *);
-extern void eigrp_topology_update_node_flags(struct eigrp_prefix_entry *);
+eigrp_prefix_entry_lookup(struct list *entries, struct eigrp_neighbor *neigh);
+extern struct list *eigrp_neighbor_prefixes_lookup(struct eigrp *eigrp,
+ struct eigrp_neighbor *n);
+extern void eigrp_topology_update_all_node_flags(struct eigrp *eigrp);
+extern void eigrp_topology_update_node_flags(struct eigrp *eigrp,
+ struct eigrp_prefix_entry *pe);
extern enum metric_change
-eigrp_topology_update_distance(struct eigrp_fsm_action_message *);
-extern void eigrp_update_routing_table(struct eigrp_prefix_entry *);
-extern void eigrp_topology_neighbor_down(struct eigrp *,
- struct eigrp_neighbor *);
-extern void eigrp_update_topology_table_prefix(struct route_table *table,
+eigrp_topology_update_distance(struct eigrp_fsm_action_message *msg);
+extern void eigrp_update_routing_table(struct eigrp *eigrp,
+ struct eigrp_prefix_entry *pe);
+extern void eigrp_topology_neighbor_down(struct eigrp *eigrp,
+ struct eigrp_neighbor *neigh);
+extern void eigrp_update_topology_table_prefix(struct eigrp *eigrp,
+ struct route_table *table,
struct eigrp_prefix_entry *pe);
#endif
diff --git a/eigrpd/eigrp_update.c b/eigrpd/eigrp_update.c
index 8db4903077..6e2a81e32a 100644
--- a/eigrpd/eigrp_update.c
+++ b/eigrpd/eigrp_update.c
@@ -211,7 +211,7 @@ void eigrp_update_receive(struct eigrp *eigrp, struct ip *iph,
zlog_debug(
"Processing Update size[%u] int(%s) nbr(%s) seq [%u] flags [%0x]",
size,
- ifindex2ifname(nbr->ei->ifp->ifindex, VRF_DEFAULT),
+ ifindex2ifname(nbr->ei->ifp->ifindex, eigrp->vrf_id),
inet_ntoa(nbr->src), nbr->recv_sequence_number, flags);
@@ -221,7 +221,7 @@ void eigrp_update_receive(struct eigrp *eigrp, struct ip *iph,
zlog_info("Neighbor %s (%s) is resync: peer graceful-restart",
inet_ntoa(nbr->src),
- ifindex2ifname(nbr->ei->ifp->ifindex, VRF_DEFAULT));
+ ifindex2ifname(nbr->ei->ifp->ifindex, eigrp->vrf_id));
/* get all prefixes from neighbor from topology table */
nbr_prefixes = eigrp_neighbor_prefixes_lookup(eigrp, nbr);
@@ -233,7 +233,7 @@ void eigrp_update_receive(struct eigrp *eigrp, struct ip *iph,
zlog_info("Neighbor %s (%s) is resync: peer graceful-restart",
inet_ntoa(nbr->src),
- ifindex2ifname(nbr->ei->ifp->ifindex, VRF_DEFAULT));
+ ifindex2ifname(nbr->ei->ifp->ifindex, eigrp->vrf_id));
/* get all prefixes from neighbor from topology table */
nbr_prefixes = eigrp_neighbor_prefixes_lookup(eigrp, nbr);
@@ -282,12 +282,12 @@ void eigrp_update_receive(struct eigrp *eigrp, struct ip *iph,
zlog_info("Neighbor %s (%s) is down: peer restarted",
inet_ntoa(nbr->src),
ifindex2ifname(nbr->ei->ifp->ifindex,
- VRF_DEFAULT));
+ eigrp->vrf_id));
eigrp_nbr_state_set(nbr, EIGRP_NEIGHBOR_PENDING);
zlog_info("Neighbor %s (%s) is pending: new adjacency",
inet_ntoa(nbr->src),
ifindex2ifname(nbr->ei->ifp->ifindex,
- VRF_DEFAULT));
+ eigrp->vrf_id));
eigrp_update_send_init(nbr);
}
}
@@ -366,11 +366,11 @@ void eigrp_update_receive(struct eigrp *eigrp, struct ip *iph,
eigrp_prefix_entry_add(eigrp->topology_table,
pe);
- eigrp_nexthop_entry_add(pe, ne);
+ eigrp_nexthop_entry_add(eigrp, pe, ne);
pe->distance = pe->fdistance = pe->rdistance =
ne->distance;
pe->reported_metric = ne->total_metric;
- eigrp_topology_update_node_flags(pe);
+ eigrp_topology_update_node_flags(eigrp, pe);
pe->req_action |= EIGRP_FSM_NEED_UPDATE;
listnode_add(
@@ -965,19 +965,20 @@ void eigrp_update_send_GR(struct eigrp_neighbor *nbr, enum GR_type gr_type,
zlog_info(
"Neighbor %s (%s) is resync: route configuration changed",
inet_ntoa(nbr->src),
- ifindex2ifname(ei->ifp->ifindex, VRF_DEFAULT));
+ ifindex2ifname(ei->ifp->ifindex, eigrp->vrf_id));
} else if (gr_type == EIGRP_GR_MANUAL) {
/* Graceful restart was called manually */
zlog_info("Neighbor %s (%s) is resync: manually cleared",
inet_ntoa(nbr->src),
- ifindex2ifname(ei->ifp->ifindex, VRF_DEFAULT));
+ ifindex2ifname(ei->ifp->ifindex, eigrp->vrf_id));
if (vty != NULL) {
vty_time_print(vty, 0);
vty_out(vty,
"Neighbor %s (%s) is resync: manually cleared\n",
inet_ntoa(nbr->src),
- ifindex2ifname(ei->ifp->ifindex, VRF_DEFAULT));
+ ifindex2ifname(ei->ifp->ifindex,
+ eigrp->vrf_id));
}
}
diff --git a/eigrpd/eigrp_vrf.c b/eigrpd/eigrp_vrf.c
new file mode 100644
index 0000000000..c8c8491056
--- /dev/null
+++ b/eigrpd/eigrp_vrf.c
@@ -0,0 +1,50 @@
+/*
+ * eigrp - vrf code
+ * Copyright (C) 2019 Cumulus Networks, Inc.
+ * Donald Sharp
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; see the file COPYING; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include <zebra.h>
+
+#include "vrf.h"
+
+#include "eigrpd/eigrp_vrf.h"
+
+static int eigrp_vrf_new(struct vrf *vrf)
+{
+ return 0;
+}
+
+static int eigrp_vrf_enable(struct vrf *vrf)
+{
+ return 0;
+}
+
+static int eigrp_vrf_disable(struct vrf *vrf)
+{
+ return 0;
+}
+
+static int eigrp_vrf_delete(struct vrf *vrf)
+{
+ return 0;
+}
+
+void eigrp_vrf_init(void)
+{
+ vrf_init(eigrp_vrf_new, eigrp_vrf_enable,
+ eigrp_vrf_disable, eigrp_vrf_delete, NULL);
+}
diff --git a/eigrpd/eigrp_vrf.h b/eigrpd/eigrp_vrf.h
new file mode 100644
index 0000000000..423a4be551
--- /dev/null
+++ b/eigrpd/eigrp_vrf.h
@@ -0,0 +1,23 @@
+/*
+ * eigrp - vrf code
+ * Copyright (C) 2019 Cumulus Networks, Inc.
+ * Donald Sharp
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; see the file COPYING; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#ifndef __EIGRP_VRF_H__
+
+extern void eigrp_vrf_init(void);
+#endif
diff --git a/eigrpd/eigrp_vty.c b/eigrpd/eigrp_vty.c
index 207622e659..e4a499425b 100644
--- a/eigrpd/eigrp_vty.c
+++ b/eigrpd/eigrp_vty.c
@@ -83,12 +83,31 @@ static void eigrp_vty_display_prefix_entry(struct vty *vty,
}
}
+static struct eigrp *eigrp_vty_get_eigrp(struct vty *vty, const char *vrf_name)
+{
+ struct vrf *vrf;
+
+ if (vrf_name)
+ vrf = vrf_lookup_by_name(vrf_name);
+ else
+ vrf = vrf_lookup_by_id(VRF_DEFAULT);
+
+ if (!vrf) {
+ vty_out(vty, "VRF %s specified does not exist",
+ vrf_name ? vrf_name : VRF_DEFAULT_NAME);
+ return NULL;
+ }
+
+ return eigrp_lookup(vrf->vrf_id);
+}
+
DEFPY (show_ip_eigrp_topology_all,
show_ip_eigrp_topology_all_cmd,
- "show ip eigrp topology [all-links$all]",
+ "show ip eigrp [vrf NAME] topology [all-links$all]",
SHOW_STR
IP_STR
"IP-EIGRP show commands\n"
+ VRF_CMD_HELP_STR
"IP-EIGRP topology\n"
"Show all links in topology table\n")
{
@@ -96,7 +115,7 @@ DEFPY (show_ip_eigrp_topology_all,
struct eigrp_prefix_entry *tn;
struct route_node *rn;
- eigrp = eigrp_lookup();
+ eigrp = eigrp_vty_get_eigrp(vty, vrf);
if (eigrp == NULL) {
vty_out(vty, " EIGRP Routing Process not enabled\n");
return CMD_SUCCESS;
@@ -119,10 +138,11 @@ DEFPY (show_ip_eigrp_topology_all,
DEFPY (show_ip_eigrp_topology,
show_ip_eigrp_topology_cmd,
- "show ip eigrp topology <A.B.C.D$address|A.B.C.D/M$prefix>",
+ "show ip eigrp [vrf NAME] topology <A.B.C.D$address|A.B.C.D/M$prefix>",
SHOW_STR
IP_STR
"IP-EIGRP show commands\n"
+ VRF_CMD_HELP_STR
"IP-EIGRP topology\n"
"For a specific address\n"
"For a specific prefix\n")
@@ -132,7 +152,7 @@ DEFPY (show_ip_eigrp_topology,
struct route_node *rn;
struct prefix cmp;
- eigrp = eigrp_lookup();
+ eigrp = eigrp_vty_get_eigrp(vty, vrf);
if (eigrp == NULL) {
vty_out(vty, " EIGRP Routing Process not enabled\n");
return CMD_SUCCESS;
@@ -167,12 +187,13 @@ DEFPY (show_ip_eigrp_topology,
return CMD_SUCCESS;
}
-DEFUN (show_ip_eigrp_interfaces,
+DEFPY (show_ip_eigrp_interfaces,
show_ip_eigrp_interfaces_cmd,
- "show ip eigrp interfaces [IFNAME] [detail]",
+ "show ip eigrp [vrf NAME] interfaces [IFNAME] [detail]$detail",
SHOW_STR
IP_STR
"IP-EIGRP show commands\n"
+ VRF_CMD_HELP_STR
"IP-EIGRP interfaces\n"
"Interface name to look at\n"
"Detailed information\n")
@@ -180,22 +201,13 @@ DEFUN (show_ip_eigrp_interfaces,
struct eigrp_interface *ei;
struct eigrp *eigrp;
struct listnode *node;
- int idx = 0;
- bool detail = false;
- const char *ifname = NULL;
- eigrp = eigrp_lookup();
+ eigrp = eigrp_vty_get_eigrp(vty, vrf);
if (eigrp == NULL) {
vty_out(vty, "EIGRP Routing Process not enabled\n");
return CMD_SUCCESS;
}
- if (argv_find(argv, argc, "IFNAME", &idx))
- ifname = argv[idx]->arg;
-
- if (argv_find(argv, argc, "detail", &idx))
- detail = true;
-
if (!ifname)
show_ip_eigrp_interface_header(vty, eigrp);
@@ -210,12 +222,13 @@ DEFUN (show_ip_eigrp_interfaces,
return CMD_SUCCESS;
}
-DEFUN (show_ip_eigrp_neighbors,
+DEFPY (show_ip_eigrp_neighbors,
show_ip_eigrp_neighbors_cmd,
- "show ip eigrp neighbors [IFNAME] [detail]",
+ "show ip eigrp [vrf NAME] neighbors [IFNAME] [detail]$detail",
SHOW_STR
IP_STR
"IP-EIGRP show commands\n"
+ VRF_CMD_HELP_STR
"IP-EIGRP neighbors\n"
"Interface to show on\n"
"Detailed Information\n")
@@ -224,21 +237,13 @@ DEFUN (show_ip_eigrp_neighbors,
struct eigrp_interface *ei;
struct listnode *node, *node2, *nnode2;
struct eigrp_neighbor *nbr;
- bool detail = false;
- int idx = 0;
- const char *ifname = NULL;
- eigrp = eigrp_lookup();
+ eigrp = eigrp_vty_get_eigrp(vty, vrf);
if (eigrp == NULL) {
vty_out(vty, " EIGRP Routing Process not enabled\n");
return CMD_SUCCESS;
}
- if (argv_find(argv, argc, "IFNAME", &idx))
- ifname = argv[idx]->arg;
-
- detail = (argv_find(argv, argc, "detail", &idx));
-
show_ip_eigrp_neighbor_header(vty, eigrp);
for (ALL_LIST_ELEMENTS_RO(eigrp->eiflist, node, ei)) {
@@ -246,7 +251,7 @@ DEFUN (show_ip_eigrp_neighbors,
for (ALL_LIST_ELEMENTS(ei->nbrs, node2, nnode2, nbr)) {
if (detail || (nbr->state == EIGRP_NEIGHBOR_UP))
show_ip_eigrp_neighbor_sub(vty, nbr,
- detail);
+ !!detail);
}
}
}
@@ -257,12 +262,13 @@ DEFUN (show_ip_eigrp_neighbors,
/*
* Execute hard restart for all neighbors
*/
-DEFUN (clear_ip_eigrp_neighbors,
+DEFPY (clear_ip_eigrp_neighbors,
clear_ip_eigrp_neighbors_cmd,
- "clear ip eigrp neighbors",
+ "clear ip eigrp [vrf NAME] neighbors",
CLEAR_STR
IP_STR
"Clear IP-EIGRP\n"
+ VRF_CMD_HELP_STR
"Clear IP-EIGRP neighbors\n")
{
struct eigrp *eigrp;
@@ -271,7 +277,7 @@ DEFUN (clear_ip_eigrp_neighbors,
struct eigrp_neighbor *nbr;
/* Check if eigrp process is enabled */
- eigrp = eigrp_lookup();
+ eigrp = eigrp_vty_get_eigrp(vty, vrf);
if (eigrp == NULL) {
vty_out(vty, " EIGRP Routing Process not enabled\n");
return CMD_SUCCESS;
@@ -289,13 +295,13 @@ DEFUN (clear_ip_eigrp_neighbors,
"Neighbor %s (%s) is down: manually cleared",
inet_ntoa(nbr->src),
ifindex2ifname(nbr->ei->ifp->ifindex,
- VRF_DEFAULT));
+ eigrp->vrf_id));
vty_time_print(vty, 0);
vty_out(vty,
"Neighbor %s (%s) is down: manually cleared\n",
inet_ntoa(nbr->src),
ifindex2ifname(nbr->ei->ifp->ifindex,
- VRF_DEFAULT));
+ eigrp->vrf_id));
/* set neighbor to DOWN */
nbr->state = EIGRP_NEIGHBOR_DOWN;
@@ -311,12 +317,13 @@ DEFUN (clear_ip_eigrp_neighbors,
/*
* Execute hard restart for all neighbors on interface
*/
-DEFUN (clear_ip_eigrp_neighbors_int,
+DEFPY (clear_ip_eigrp_neighbors_int,
clear_ip_eigrp_neighbors_int_cmd,
- "clear ip eigrp neighbors IFNAME",
+ "clear ip eigrp [vrf NAME] neighbors IFNAME",
CLEAR_STR
IP_STR
"Clear IP-EIGRP\n"
+ VRF_CMD_HELP_STR
"Clear IP-EIGRP neighbors\n"
"Interface's name\n")
{
@@ -324,20 +331,18 @@ DEFUN (clear_ip_eigrp_neighbors_int,
struct eigrp_interface *ei;
struct listnode *node2, *nnode2;
struct eigrp_neighbor *nbr;
- int idx = 0;
/* Check if eigrp process is enabled */
- eigrp = eigrp_lookup();
+ eigrp = eigrp_vty_get_eigrp(vty, vrf);
if (eigrp == NULL) {
vty_out(vty, " EIGRP Routing Process not enabled\n");
return CMD_SUCCESS;
}
/* lookup interface by specified name */
- argv_find(argv, argc, "IFNAME", &idx);
- ei = eigrp_if_lookup_by_name(eigrp, argv[idx]->arg);
+ ei = eigrp_if_lookup_by_name(eigrp, ifname);
if (ei == NULL) {
- vty_out(vty, " Interface (%s) doesn't exist\n", argv[idx]->arg);
+ vty_out(vty, " Interface (%s) doesn't exist\n", ifname);
return CMD_WARNING;
}
@@ -350,13 +355,13 @@ DEFUN (clear_ip_eigrp_neighbors_int,
zlog_debug("Neighbor %s (%s) is down: manually cleared",
inet_ntoa(nbr->src),
ifindex2ifname(nbr->ei->ifp->ifindex,
- VRF_DEFAULT));
+ eigrp->vrf_id));
vty_time_print(vty, 0);
vty_out(vty,
"Neighbor %s (%s) is down: manually cleared\n",
inet_ntoa(nbr->src),
ifindex2ifname(nbr->ei->ifp->ifindex,
- VRF_DEFAULT));
+ eigrp->vrf_id));
/* set neighbor to DOWN */
nbr->state = EIGRP_NEIGHBOR_DOWN;
@@ -371,26 +376,21 @@ DEFUN (clear_ip_eigrp_neighbors_int,
/*
* Execute hard restart for neighbor specified by IP
*/
-DEFUN (clear_ip_eigrp_neighbors_IP,
+DEFPY (clear_ip_eigrp_neighbors_IP,
clear_ip_eigrp_neighbors_IP_cmd,
- "clear ip eigrp neighbors A.B.C.D",
+ "clear ip eigrp [vrf NAME] neighbors A.B.C.D$nbr_addr",
CLEAR_STR
IP_STR
"Clear IP-EIGRP\n"
+ VRF_CMD_HELP_STR
"Clear IP-EIGRP neighbors\n"
"IP-EIGRP neighbor address\n")
{
struct eigrp *eigrp;
struct eigrp_neighbor *nbr;
- struct in_addr nbr_addr;
-
- if (!inet_aton(argv[4]->arg, &nbr_addr)) {
- vty_out(vty, "Unable to parse %s", argv[4]->arg);
- return CMD_WARNING;
- }
/* Check if eigrp process is enabled */
- eigrp = eigrp_lookup();
+ eigrp = eigrp_vty_get_eigrp(vty, vrf);
if (eigrp == NULL) {
vty_out(vty, " EIGRP Routing Process not enabled\n");
return CMD_SUCCESS;
@@ -414,19 +414,20 @@ DEFUN (clear_ip_eigrp_neighbors_IP,
/*
* Execute graceful restart for all neighbors
*/
-DEFUN (clear_ip_eigrp_neighbors_soft,
+DEFPY (clear_ip_eigrp_neighbors_soft,
clear_ip_eigrp_neighbors_soft_cmd,
- "clear ip eigrp neighbors soft",
+ "clear ip eigrp [vrf NAME] neighbors soft",
CLEAR_STR
IP_STR
"Clear IP-EIGRP\n"
+ VRF_CMD_HELP_STR
"Clear IP-EIGRP neighbors\n"
"Resync with peers without adjacency reset\n")
{
struct eigrp *eigrp;
/* Check if eigrp process is enabled */
- eigrp = eigrp_lookup();
+ eigrp = eigrp_vty_get_eigrp(vty, vrf);
if (eigrp == NULL) {
vty_out(vty, " EIGRP Routing Process not enabled\n");
return CMD_SUCCESS;
@@ -441,12 +442,13 @@ DEFUN (clear_ip_eigrp_neighbors_soft,
/*
* Execute graceful restart for all neighbors on interface
*/
-DEFUN (clear_ip_eigrp_neighbors_int_soft,
+DEFPY (clear_ip_eigrp_neighbors_int_soft,
clear_ip_eigrp_neighbors_int_soft_cmd,
- "clear ip eigrp neighbors IFNAME soft",
+ "clear ip eigrp [vrf NAME] neighbors IFNAME soft",
CLEAR_STR
IP_STR
"Clear IP-EIGRP\n"
+ VRF_CMD_HELP_STR
"Clear IP-EIGRP neighbors\n"
"Interface's name\n"
"Resync with peer without adjacency reset\n")
@@ -455,14 +457,14 @@ DEFUN (clear_ip_eigrp_neighbors_int_soft,
struct eigrp_interface *ei;
/* Check if eigrp process is enabled */
- eigrp = eigrp_lookup();
+ eigrp = eigrp_vty_get_eigrp(vty, vrf);
if (eigrp == NULL) {
vty_out(vty, " EIGRP Routing Process not enabled\n");
return CMD_SUCCESS;
}
/* lookup interface by specified name */
- ei = eigrp_if_lookup_by_name(eigrp, argv[4]->arg);
+ ei = eigrp_if_lookup_by_name(eigrp, ifname);
if (ei == NULL) {
vty_out(vty, " Interface (%s) doesn't exist\n", argv[4]->arg);
return CMD_WARNING;
@@ -476,27 +478,23 @@ DEFUN (clear_ip_eigrp_neighbors_int_soft,
/*
* Execute graceful restart for neighbor specified by IP
*/
-DEFUN (clear_ip_eigrp_neighbors_IP_soft,
+DEFPY (clear_ip_eigrp_neighbors_IP_soft,
clear_ip_eigrp_neighbors_IP_soft_cmd,
- "clear ip eigrp neighbors A.B.C.D soft",
+ "clear ip eigrp [vrf NAME] neighbors A.B.C.D$nbr_addr soft",
CLEAR_STR
IP_STR
"Clear IP-EIGRP\n"
+ VRF_CMD_HELP_STR
"Clear IP-EIGRP neighbors\n"
"IP-EIGRP neighbor address\n"
"Resync with peer without adjacency reset\n")
{
struct eigrp *eigrp;
struct eigrp_neighbor *nbr;
- struct in_addr nbr_addr;
- if (!inet_aton(argv[4]->arg, &nbr_addr)) {
- vty_out(vty, "Unable to parse: %s", argv[4]->arg);
- return CMD_WARNING;
- }
/* Check if eigrp process is enabled */
- eigrp = eigrp_lookup();
+ eigrp = eigrp_vty_get_eigrp(vty, vrf);
if (eigrp == NULL) {
vty_out(vty, " EIGRP Routing Process not enabled\n");
return CMD_SUCCESS;
diff --git a/eigrpd/eigrp_zebra.c b/eigrpd/eigrp_zebra.c
index 0a74e86263..63cbe84ef2 100644
--- a/eigrpd/eigrp_zebra.c
+++ b/eigrpd/eigrp_zebra.c
@@ -59,7 +59,8 @@ static int eigrp_interface_address_add(ZAPI_CALLBACK_ARGS);
static int eigrp_interface_address_delete(ZAPI_CALLBACK_ARGS);
static int eigrp_interface_state_up(ZAPI_CALLBACK_ARGS);
static int eigrp_interface_state_down(ZAPI_CALLBACK_ARGS);
-static struct interface *zebra_interface_if_lookup(struct stream *);
+static struct interface *zebra_interface_if_lookup(struct stream *,
+ vrf_id_t vrf_id);
static int eigrp_zebra_read_route(ZAPI_CALLBACK_ARGS);
@@ -79,7 +80,7 @@ static int eigrp_router_id_update_zebra(ZAPI_CALLBACK_ARGS)
router_id_zebra = router_id.u.prefix4;
- eigrp = eigrp_lookup();
+ eigrp = eigrp_lookup(vrf_id);
if (eigrp != NULL)
eigrp_router_id_update(eigrp);
@@ -137,7 +138,7 @@ static int eigrp_zebra_read_route(ZAPI_CALLBACK_ARGS)
if (IPV4_NET127(ntohl(api.prefix.u.prefix4.s_addr)))
return 0;
- eigrp = eigrp_lookup();
+ eigrp = eigrp_lookup(vrf_id);
if (eigrp == NULL)
return 0;
@@ -257,7 +258,7 @@ static int eigrp_interface_state_up(ZAPI_CALLBACK_ARGS)
{
struct interface *ifp;
- ifp = zebra_interface_if_lookup(zclient->ibuf);
+ ifp = zebra_interface_if_lookup(zclient->ibuf, vrf_id);
if (ifp == NULL)
return 0;
@@ -328,7 +329,8 @@ static int eigrp_interface_state_down(ZAPI_CALLBACK_ARGS)
return 0;
}
-static struct interface *zebra_interface_if_lookup(struct stream *s)
+static struct interface *zebra_interface_if_lookup(struct stream *s,
+ vrf_id_t vrf_id)
{
char ifname_tmp[INTERFACE_NAMSIZ];
@@ -336,11 +338,11 @@ static struct interface *zebra_interface_if_lookup(struct stream *s)
stream_get(ifname_tmp, s, INTERFACE_NAMSIZ);
/* And look it up. */
- return if_lookup_by_name(ifname_tmp, VRF_DEFAULT);
+ return if_lookup_by_name(ifname_tmp, vrf_id);
}
-void eigrp_zebra_route_add(struct prefix *p, struct list *successors,
- uint32_t distance)
+void eigrp_zebra_route_add(struct eigrp *eigrp, struct prefix *p,
+ struct list *successors, uint32_t distance)
{
struct zapi_route api;
struct zapi_nexthop *api_nh;
@@ -352,7 +354,7 @@ void eigrp_zebra_route_add(struct prefix *p, struct list *successors,
return;
memset(&api, 0, sizeof(api));
- api.vrf_id = VRF_DEFAULT;
+ api.vrf_id = eigrp->vrf_id;
api.type = ZEBRA_ROUTE_EIGRP;
api.safi = SAFI_UNICAST;
api.metric = distance;
@@ -366,7 +368,7 @@ void eigrp_zebra_route_add(struct prefix *p, struct list *successors,
if (count >= MULTIPATH_NUM)
break;
api_nh = &api.nexthops[count];
- api_nh->vrf_id = VRF_DEFAULT;
+ api_nh->vrf_id = eigrp->vrf_id;
if (te->adv_router->src.s_addr) {
api_nh->gate.ipv4 = te->adv_router->src;
api_nh->type = NEXTHOP_TYPE_IPV4_IFINDEX;
@@ -388,7 +390,7 @@ void eigrp_zebra_route_add(struct prefix *p, struct list *successors,
zclient_route_send(ZEBRA_ROUTE_ADD, zclient, &api);
}
-void eigrp_zebra_route_delete(struct prefix *p)
+void eigrp_zebra_route_delete(struct eigrp *eigrp, struct prefix *p)
{
struct zapi_route api;
@@ -396,7 +398,7 @@ void eigrp_zebra_route_delete(struct prefix *p)
return;
memset(&api, 0, sizeof(api));
- api.vrf_id = VRF_DEFAULT;
+ api.vrf_id = eigrp->vrf_id;
api.type = ZEBRA_ROUTE_EIGRP;
api.safi = SAFI_UNICAST;
memcpy(&api.prefix, p, sizeof(*p));
@@ -411,20 +413,20 @@ void eigrp_zebra_route_delete(struct prefix *p)
return;
}
-int eigrp_is_type_redistributed(int type)
+static int eigrp_is_type_redistributed(int type, vrf_id_t vrf_id)
{
return ((DEFAULT_ROUTE_TYPE(type))
? vrf_bitmap_check(zclient->default_information[AFI_IP],
- VRF_DEFAULT)
+ vrf_id)
: vrf_bitmap_check(zclient->redist[AFI_IP][type],
- VRF_DEFAULT));
+ vrf_id));
}
int eigrp_redistribute_set(struct eigrp *eigrp, int type,
struct eigrp_metrics metric)
{
- if (eigrp_is_type_redistributed(type)) {
+ if (eigrp_is_type_redistributed(type, eigrp->vrf_id)) {
if (eigrp_metrics_is_same(metric, eigrp->dmetric[type])) {
eigrp->dmetric[type] = metric;
}
@@ -443,7 +445,7 @@ int eigrp_redistribute_set(struct eigrp *eigrp, int type,
eigrp->dmetric[type] = metric;
zclient_redistribute(ZEBRA_REDISTRIBUTE_ADD, zclient, AFI_IP, type, 0,
- VRF_DEFAULT);
+ eigrp->vrf_id);
++eigrp->redistribute;
@@ -453,10 +455,10 @@ int eigrp_redistribute_set(struct eigrp *eigrp, int type,
int eigrp_redistribute_unset(struct eigrp *eigrp, int type)
{
- if (eigrp_is_type_redistributed(type)) {
+ if (eigrp_is_type_redistributed(type, eigrp->vrf_id)) {
memset(&eigrp->dmetric[type], 0, sizeof(struct eigrp_metrics));
zclient_redistribute(ZEBRA_REDISTRIBUTE_DELETE, zclient, AFI_IP,
- type, 0, VRF_DEFAULT);
+ type, 0, eigrp->vrf_id);
--eigrp->redistribute;
}
diff --git a/eigrpd/eigrp_zebra.h b/eigrpd/eigrp_zebra.h
index 86b337cfe6..112f2584ce 100644
--- a/eigrpd/eigrp_zebra.h
+++ b/eigrpd/eigrp_zebra.h
@@ -33,11 +33,10 @@
extern void eigrp_zebra_init(void);
-extern void eigrp_zebra_route_add(struct prefix *, struct list *,
- uint32_t distance);
-extern void eigrp_zebra_route_delete(struct prefix *);
+extern void eigrp_zebra_route_add(struct eigrp *eigrp, struct prefix *p,
+ struct list *successors, uint32_t distance);
+extern void eigrp_zebra_route_delete(struct eigrp *eigrp, struct prefix *);
extern int eigrp_redistribute_set(struct eigrp *, int, struct eigrp_metrics);
extern int eigrp_redistribute_unset(struct eigrp *, int);
-extern int eigrp_is_type_redistributed(int);
#endif /* _ZEBRA_EIGRP_ZEBRA_H_ */
diff --git a/eigrpd/eigrpd.c b/eigrpd/eigrpd.c
index 93f8b6f90e..e93dc0cbfa 100644
--- a/eigrpd/eigrpd.c
+++ b/eigrpd/eigrpd.c
@@ -64,8 +64,6 @@ static struct eigrp_master eigrp_master;
struct eigrp_master *eigrp_om;
-static struct eigrp *eigrp_new(const char *);
-
extern struct zclient *zclient;
extern struct in_addr router_id_zebra;
@@ -95,7 +93,7 @@ extern struct in_addr router_id_zebra;
*/
void eigrp_router_id_update(struct eigrp *eigrp)
{
- struct vrf *vrf = vrf_lookup_by_id(VRF_DEFAULT);
+ struct vrf *vrf = vrf_lookup_by_id(eigrp->vrf_id);
struct interface *ifp;
struct in_addr router_id, router_id_old;
@@ -136,14 +134,14 @@ void eigrp_master_init(void)
}
/* Allocate new eigrp structure. */
-static struct eigrp *eigrp_new(const char *AS)
+static struct eigrp *eigrp_new(uint16_t as, vrf_id_t vrf_id)
{
struct eigrp *eigrp = XCALLOC(MTYPE_EIGRP_TOP, sizeof(struct eigrp));
- int eigrp_socket;
/* init information relevant to peers */
+ eigrp->vrf_id = vrf_id;
eigrp->vrid = 0;
- eigrp->AS = atoi(AS);
+ eigrp->AS = as;
eigrp->router_id.s_addr = 0;
eigrp->router_id_static.s_addr = 0;
eigrp->sequence_number = 1;
@@ -161,14 +159,15 @@ static struct eigrp *eigrp_new(const char *AS)
eigrp->passive_interface_default = EIGRP_IF_ACTIVE;
eigrp->networks = eigrp_topology_new();
- if ((eigrp_socket = eigrp_sock_init()) < 0) {
+ eigrp->fd = eigrp_sock_init(vrf_lookup_by_id(vrf_id));
+
+ if (eigrp->fd < 0) {
flog_err_sys(
EC_LIB_SOCKET,
"eigrp_new: fatal error: eigrp_sock_init was unable to open a socket");
exit(1);
}
- eigrp->fd = eigrp_socket;
eigrp->maxsndbuflen = getsockopt_so_sendbuf(eigrp->fd);
eigrp->ibuf = stream_new(EIGRP_PACKET_MAX_LEN + 1);
@@ -200,16 +199,15 @@ static struct eigrp *eigrp_new(const char *AS)
eigrp->routemap[EIGRP_FILTER_OUT] = NULL;
/* Distribute list install. */
- eigrp->distribute_ctx = distribute_list_ctx_create(
- vrf_lookup_by_id(VRF_DEFAULT));
+ eigrp->distribute_ctx =
+ distribute_list_ctx_create(vrf_lookup_by_id(eigrp->vrf_id));
distribute_list_add_hook(eigrp->distribute_ctx,
eigrp_distribute_update);
distribute_list_delete_hook(eigrp->distribute_ctx,
eigrp_distribute_update);
/*
- eigrp->if_rmap_ctx = if_rmap_ctx_create(
- VRF_DEFAULT_NAME);
+ eigrp->if_rmap_ctx = if_rmap_ctx_create(eigrp->vrf_id);
if_rmap_hook_add (eigrp_if_rmap_update);
if_rmap_hook_delete (eigrp_if_rmap_update);
*/
@@ -217,13 +215,13 @@ static struct eigrp *eigrp_new(const char *AS)
return eigrp;
}
-struct eigrp *eigrp_get(const char *AS)
+struct eigrp *eigrp_get(uint16_t as, vrf_id_t vrf_id)
{
struct eigrp *eigrp;
- eigrp = eigrp_lookup();
+ eigrp = eigrp_lookup(vrf_id);
if (eigrp == NULL) {
- eigrp = eigrp_new(AS);
+ eigrp = eigrp_new(as, vrf_id);
listnode_add(eigrp_om->eigrp, eigrp);
}
@@ -285,7 +283,7 @@ void eigrp_finish_final(struct eigrp *eigrp)
list_delete(&eigrp->eiflist);
list_delete(&eigrp->oi_write_q);
- eigrp_topology_free(eigrp->topology_table);
+ eigrp_topology_free(eigrp, eigrp->topology_table);
eigrp_nbr_delete(eigrp->neighbor_self);
@@ -300,10 +298,14 @@ void eigrp_finish_final(struct eigrp *eigrp)
}
/*Look for existing eigrp process*/
-struct eigrp *eigrp_lookup(void)
+struct eigrp *eigrp_lookup(vrf_id_t vrf_id)
{
- if (listcount(eigrp_om->eigrp) == 0)
- return NULL;
+ struct eigrp *eigrp;
+ struct listnode *node, *nnode;
+
+ for (ALL_LIST_ELEMENTS(eigrp_om->eigrp, node, nnode, eigrp))
+ if (eigrp->vrf_id == vrf_id)
+ return eigrp;
- return listgetdata(listhead(eigrp_om->eigrp));
+ return NULL;
}
diff --git a/eigrpd/eigrpd.h b/eigrpd/eigrpd.h
index 3ef3a9c0cc..6b4d45d1fc 100644
--- a/eigrpd/eigrpd.h
+++ b/eigrpd/eigrpd.h
@@ -48,8 +48,8 @@ extern void eigrp_master_init(void);
extern void eigrp_terminate(void);
extern void eigrp_finish_final(struct eigrp *);
extern void eigrp_finish(struct eigrp *);
-extern struct eigrp *eigrp_get(const char *);
-extern struct eigrp *eigrp_lookup(void);
+extern struct eigrp *eigrp_get(uint16_t as, vrf_id_t vrf_id);
+extern struct eigrp *eigrp_lookup(vrf_id_t vrf_id);
extern void eigrp_router_id_update(struct eigrp *);
/* eigrp_cli.c */
diff --git a/eigrpd/subdir.am b/eigrpd/subdir.am
index cc46766586..5a65c654e9 100644
--- a/eigrpd/subdir.am
+++ b/eigrpd/subdir.am
@@ -35,6 +35,7 @@ eigrpd_libeigrp_a_SOURCES = \
eigrpd/eigrp_snmp.c \
eigrpd/eigrp_topology.c \
eigrpd/eigrp_update.c \
+ eigrpd/eigrp_vrf.c \
eigrpd/eigrp_vty.c \
eigrpd/eigrp_zebra.c \
eigrpd/eigrpd.c \
@@ -66,6 +67,7 @@ noinst_HEADERS += \
eigrpd/eigrp_packet.h \
eigrpd/eigrp_snmp.h \
eigrpd/eigrp_structs.h \
+ eigrpd/eigrp_vrf.h \
eigrpd/eigrp_vty.h \
eigrpd/eigrp_zebra.h \
# end
diff --git a/isisd/isis_bpf.c b/isisd/isis_bpf.c
index 4e9aef47ad..d6b85b2fa3 100644
--- a/isisd/isis_bpf.c
+++ b/isisd/isis_bpf.c
@@ -187,7 +187,7 @@ int isis_sock_init(struct isis_circuit *circuit)
{
int retval = ISIS_OK;
- frr_elevate_privs(&isisd_privs) {
+ frr_with_privs(&isisd_privs) {
retval = open_bpf_dev(circuit);
diff --git a/isisd/isis_dlpi.c b/isisd/isis_dlpi.c
index a96dd93804..7d3dfcb01e 100644
--- a/isisd/isis_dlpi.c
+++ b/isisd/isis_dlpi.c
@@ -467,7 +467,7 @@ int isis_sock_init(struct isis_circuit *circuit)
{
int retval = ISIS_OK;
- frr_elevate_privs(&isisd_privs) {
+ frr_with_privs(&isisd_privs) {
retval = open_dlpi_dev(circuit);
diff --git a/isisd/isis_memory.c b/isisd/isis_memory.c
index 7d1ad6b049..2725459767 100644
--- a/isisd/isis_memory.c
+++ b/isisd/isis_memory.c
@@ -39,7 +39,6 @@ DEFINE_MTYPE(ISISD, ISIS_SPFTREE, "ISIS SPFtree")
DEFINE_MTYPE(ISISD, ISIS_VERTEX, "ISIS vertex")
DEFINE_MTYPE(ISISD, ISIS_ROUTE_INFO, "ISIS route info")
DEFINE_MTYPE(ISISD, ISIS_NEXTHOP, "ISIS nexthop")
-DEFINE_MTYPE(ISISD, ISIS_NEXTHOP6, "ISIS nexthop6")
DEFINE_MTYPE(ISISD, ISIS_DICT, "ISIS dictionary")
DEFINE_MTYPE(ISISD, ISIS_DICT_NODE, "ISIS dictionary node")
DEFINE_MTYPE(ISISD, ISIS_EXT_ROUTE, "ISIS redistributed route")
diff --git a/isisd/isis_memory.h b/isisd/isis_memory.h
index 4078c7a671..e672340e84 100644
--- a/isisd/isis_memory.h
+++ b/isisd/isis_memory.h
@@ -38,7 +38,6 @@ DECLARE_MTYPE(ISIS_SPFTREE)
DECLARE_MTYPE(ISIS_VERTEX)
DECLARE_MTYPE(ISIS_ROUTE_INFO)
DECLARE_MTYPE(ISIS_NEXTHOP)
-DECLARE_MTYPE(ISIS_NEXTHOP6)
DECLARE_MTYPE(ISIS_DICT)
DECLARE_MTYPE(ISIS_DICT_NODE)
DECLARE_MTYPE(ISIS_EXT_ROUTE)
diff --git a/isisd/isis_northbound.c b/isisd/isis_northbound.c
index 0982a468a6..c1b630eb2d 100644
--- a/isisd/isis_northbound.c
+++ b/isisd/isis_northbound.c
@@ -1524,29 +1524,64 @@ static int lib_interface_isis_create(enum nb_event event,
struct interface *ifp;
struct isis_circuit *circuit;
const char *area_tag = yang_dnode_get_string(dnode, "./area-tag");
+ uint32_t min_mtu, actual_mtu;
- if (event != NB_EV_APPLY)
- return NB_OK;
+ switch (event) {
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_VALIDATE:
+ /* check if interface mtu is sufficient. If the area has not
+ * been created yet, assume default MTU for the area
+ */
+ ifp = nb_running_get_entry(dnode, NULL, true);
+ /* zebra might not know yet about the MTU - nothing we can do */
+ if (ifp->mtu == 0)
+ break;
+ actual_mtu =
+ if_is_broadcast(ifp) ? ifp->mtu - LLC_LEN : ifp->mtu;
+ area = isis_area_lookup(area_tag);
+ if (area)
+ min_mtu = area->lsp_mtu;
+ else
+#ifndef FABRICD
+ min_mtu = yang_get_default_uint16(
+ "/frr-isisd:isis/instance/lsp/mtu");
+#else
+ min_mtu = DEFAULT_LSP_MTU;
+#endif /* ifndef FABRICD */
+ if (actual_mtu < min_mtu) {
+ flog_warn(EC_LIB_NB_CB_CONFIG_VALIDATE,
+ "Interface %s has MTU %" PRIu32
+ ", minimum MTU for the area is %" PRIu32 "",
+ ifp->name, actual_mtu, min_mtu);
+ return NB_ERR_VALIDATION;
+ }
+ break;
+ case NB_EV_APPLY:
+ area = isis_area_lookup(area_tag);
+ /* The area should have already be created. We are
+ * setting the priority of the global isis area creation
+ * slightly lower, so it should be executed first, but I
+ * cannot rely on that so here I have to check.
+ */
+ if (!area) {
+ flog_err(
+ EC_LIB_NB_CB_CONFIG_APPLY,
+ "%s: attempt to create circuit for area %s before the area has been created",
+ __func__, area_tag);
+ abort();
+ }
- area = isis_area_lookup(area_tag);
- /* The area should have already be created. We are
- * setting the priority of the global isis area creation
- * slightly lower, so it should be executed first, but I
- * cannot rely on that so here I have to check.
- */
- if (!area) {
- flog_err(
- EC_LIB_NB_CB_CONFIG_APPLY,
- "%s: attempt to create circuit for area %s before the area has been created",
- __func__, area_tag);
- abort();
+ ifp = nb_running_get_entry(dnode, NULL, true);
+ circuit = isis_circuit_create(area, ifp);
+ assert(circuit
+ && (circuit->state == C_STATE_CONF
+ || circuit->state == C_STATE_UP));
+ nb_running_set_entry(dnode, circuit);
+ break;
}
- ifp = nb_running_get_entry(dnode, NULL, true);
- circuit = isis_circuit_create(area, ifp);
- assert(circuit->state == C_STATE_CONF || circuit->state == C_STATE_UP);
- nb_running_set_entry(dnode, circuit);
-
return NB_OK;
}
@@ -1561,21 +1596,8 @@ static int lib_interface_isis_destroy(enum nb_event event,
circuit = nb_running_unset_entry(dnode);
if (!circuit)
return NB_ERR_INCONSISTENCY;
- /* delete circuit through csm changes */
- switch (circuit->state) {
- case C_STATE_UP:
- isis_csm_state_change(IF_DOWN_FROM_Z, circuit,
- circuit->interface);
- isis_csm_state_change(ISIS_DISABLE, circuit, circuit->area);
- break;
- case C_STATE_CONF:
+ if (circuit->state == C_STATE_UP || circuit->state == C_STATE_CONF)
isis_csm_state_change(ISIS_DISABLE, circuit, circuit->area);
- break;
- case C_STATE_INIT:
- isis_csm_state_change(IF_DOWN_FROM_Z, circuit,
- circuit->interface);
- break;
- }
return NB_OK;
}
diff --git a/isisd/isis_pfpacket.c b/isisd/isis_pfpacket.c
index ea66e6950e..69ac3fc555 100644
--- a/isisd/isis_pfpacket.c
+++ b/isisd/isis_pfpacket.c
@@ -183,7 +183,7 @@ int isis_sock_init(struct isis_circuit *circuit)
{
int retval = ISIS_OK;
- frr_elevate_privs(&isisd_privs) {
+ frr_with_privs(&isisd_privs) {
retval = open_packet_socket(circuit);
diff --git a/isisd/isis_route.c b/isisd/isis_route.c
index 281eaf11bc..636a63e290 100644
--- a/isisd/isis_route.c
+++ b/isisd/isis_route.c
@@ -28,6 +28,7 @@
#include "linklist.h"
#include "vty.h"
#include "log.h"
+#include "lib_errors.h"
#include "memory.h"
#include "prefix.h"
#include "hash.h"
@@ -48,26 +49,25 @@
#include "isis_route.h"
#include "isis_zebra.h"
-static struct isis_nexthop *isis_nexthop_create(struct in_addr *ip,
+static struct isis_nexthop *nexthoplookup(struct list *nexthops, int family,
+ union g_addr *ip, ifindex_t ifindex);
+
+static struct isis_nexthop *isis_nexthop_create(int family, union g_addr *ip,
ifindex_t ifindex)
{
- struct listnode *node;
struct isis_nexthop *nexthop;
- for (ALL_LIST_ELEMENTS_RO(isis->nexthops, node, nexthop)) {
- if (nexthop->ifindex != ifindex)
- continue;
- if (ip && memcmp(&nexthop->ip, ip, sizeof(struct in_addr)) != 0)
- continue;
-
+ nexthop = nexthoplookup(isis->nexthops, family, ip, ifindex);
+ if (nexthop) {
nexthop->lock++;
return nexthop;
}
nexthop = XCALLOC(MTYPE_ISIS_NEXTHOP, sizeof(struct isis_nexthop));
+ nexthop->family = family;
nexthop->ifindex = ifindex;
- memcpy(&nexthop->ip, ip, sizeof(struct in_addr));
+ nexthop->ip = *ip;
listnode_add(isis->nexthops, nexthop);
nexthop->lock++;
@@ -85,116 +85,79 @@ static void isis_nexthop_delete(struct isis_nexthop *nexthop)
return;
}
-static int nexthoplookup(struct list *nexthops, struct in_addr *ip,
- ifindex_t ifindex)
+static struct isis_nexthop *nexthoplookup(struct list *nexthops, int family,
+ union g_addr *ip, ifindex_t ifindex)
{
struct listnode *node;
struct isis_nexthop *nh;
for (ALL_LIST_ELEMENTS_RO(nexthops, node, nh)) {
- if (!(memcmp(ip, &nh->ip, sizeof(struct in_addr)))
- && ifindex == nh->ifindex)
- return 1;
- }
-
- return 0;
-}
-
-static struct isis_nexthop6 *isis_nexthop6_new(struct in6_addr *ip6,
- ifindex_t ifindex)
-{
- struct isis_nexthop6 *nexthop6;
-
- nexthop6 = XCALLOC(MTYPE_ISIS_NEXTHOP6, sizeof(struct isis_nexthop6));
-
- nexthop6->ifindex = ifindex;
- memcpy(&nexthop6->ip6, ip6, sizeof(struct in6_addr));
- nexthop6->lock++;
-
- return nexthop6;
-}
-
-static struct isis_nexthop6 *isis_nexthop6_create(struct in6_addr *ip6,
- ifindex_t ifindex)
-{
- struct listnode *node;
- struct isis_nexthop6 *nexthop6;
-
- for (ALL_LIST_ELEMENTS_RO(isis->nexthops6, node, nexthop6)) {
- if (nexthop6->ifindex != ifindex)
+ if (nh->family != family)
continue;
- if (ip6
- && memcmp(&nexthop6->ip6, ip6, sizeof(struct in6_addr))
- != 0)
+ if (nh->ifindex != ifindex)
continue;
- nexthop6->lock++;
- return nexthop6;
- }
-
- nexthop6 = isis_nexthop6_new(ip6, ifindex);
-
- return nexthop6;
-}
-
-static void isis_nexthop6_delete(struct isis_nexthop6 *nexthop6)
-{
-
- nexthop6->lock--;
- if (nexthop6->lock == 0) {
- listnode_delete(isis->nexthops6, nexthop6);
- XFREE(MTYPE_ISIS_NEXTHOP6, nexthop6);
- }
-
- return;
-}
-
-static int nexthop6lookup(struct list *nexthops6, struct in6_addr *ip6,
- ifindex_t ifindex)
-{
- struct listnode *node;
- struct isis_nexthop6 *nh6;
+ switch (family) {
+ case AF_INET:
+ if (IPV4_ADDR_CMP(&nh->ip.ipv4, &ip->ipv4))
+ continue;
+ break;
+ case AF_INET6:
+ if (IPV6_ADDR_CMP(&nh->ip.ipv6, &ip->ipv6))
+ continue;
+ break;
+ default:
+ flog_err(EC_LIB_DEVELOPMENT,
+ "%s: unknown address family [%d]", __func__,
+ family);
+ exit(1);
+ }
- for (ALL_LIST_ELEMENTS_RO(nexthops6, node, nh6)) {
- if (!(memcmp(ip6, &nh6->ip6, sizeof(struct in6_addr)))
- && ifindex == nh6->ifindex)
- return 1;
+ return nh;
}
- return 0;
+ return NULL;
}
-static void adjinfo2nexthop(struct list *nexthops, struct isis_adjacency *adj)
+static void adjinfo2nexthop(int family, struct list *nexthops,
+ struct isis_adjacency *adj)
{
struct isis_nexthop *nh;
-
- for (unsigned int i = 0; i < adj->ipv4_address_count; i++) {
- struct in_addr *ipv4_addr = &adj->ipv4_addresses[i];
- if (!nexthoplookup(nexthops, ipv4_addr,
- adj->circuit->interface->ifindex)) {
- nh = isis_nexthop_create(
- ipv4_addr, adj->circuit->interface->ifindex);
- nh->router_address = adj->router_address;
- listnode_add(nexthops, nh);
- return;
+ union g_addr ip = {};
+
+ switch (family) {
+ case AF_INET:
+ for (unsigned int i = 0; i < adj->ipv4_address_count; i++) {
+ ip.ipv4 = adj->ipv4_addresses[i];
+
+ if (!nexthoplookup(nexthops, AF_INET, &ip,
+ adj->circuit->interface->ifindex)) {
+ nh = isis_nexthop_create(
+ AF_INET, &ip,
+ adj->circuit->interface->ifindex);
+ listnode_add(nexthops, nh);
+ break;
+ }
}
- }
-}
-
-static void adjinfo2nexthop6(struct list *nexthops6, struct isis_adjacency *adj)
-{
- struct isis_nexthop6 *nh6;
-
- for (unsigned int i = 0; i < adj->ipv6_address_count; i++) {
- struct in6_addr *ipv6_addr = &adj->ipv6_addresses[i];
- if (!nexthop6lookup(nexthops6, ipv6_addr,
- adj->circuit->interface->ifindex)) {
- nh6 = isis_nexthop6_create(
- ipv6_addr, adj->circuit->interface->ifindex);
- nh6->router_address6 = adj->router_address6;
- listnode_add(nexthops6, nh6);
- return;
+ break;
+ case AF_INET6:
+ for (unsigned int i = 0; i < adj->ipv6_address_count; i++) {
+ ip.ipv6 = adj->ipv6_addresses[i];
+
+ if (!nexthoplookup(nexthops, AF_INET6, &ip,
+ adj->circuit->interface->ifindex)) {
+ nh = isis_nexthop_create(
+ AF_INET6, &ip,
+ adj->circuit->interface->ifindex);
+ listnode_add(nexthops, nh);
+ break;
+ }
}
+ break;
+ default:
+ flog_err(EC_LIB_DEVELOPMENT, "%s: unknown address family [%d]",
+ __func__, family);
+ exit(1);
}
}
@@ -210,35 +173,32 @@ static struct isis_route_info *isis_route_info_new(struct prefix *prefix,
rinfo = XCALLOC(MTYPE_ISIS_ROUTE_INFO, sizeof(struct isis_route_info));
- if (prefix->family == AF_INET) {
- rinfo->nexthops = list_new();
- for (ALL_LIST_ELEMENTS_RO(adjacencies, node, adj)) {
- /* check for force resync this route */
- if (CHECK_FLAG(adj->circuit->flags,
- ISIS_CIRCUIT_FLAPPED_AFTER_SPF))
- SET_FLAG(rinfo->flag,
- ISIS_ROUTE_FLAG_ZEBRA_RESYNC);
- /* update neighbor router address */
+ rinfo->nexthops = list_new();
+ for (ALL_LIST_ELEMENTS_RO(adjacencies, node, adj)) {
+ /* check for force resync this route */
+ if (CHECK_FLAG(adj->circuit->flags,
+ ISIS_CIRCUIT_FLAPPED_AFTER_SPF))
+ SET_FLAG(rinfo->flag, ISIS_ROUTE_FLAG_ZEBRA_RESYNC);
+
+ /* update neighbor router address */
+ switch (prefix->family) {
+ case AF_INET:
if (depth == 2 && prefix->prefixlen == 32)
adj->router_address = prefix->u.prefix4;
- adjinfo2nexthop(rinfo->nexthops, adj);
- }
- }
- if (prefix->family == AF_INET6) {
- rinfo->nexthops6 = list_new();
- for (ALL_LIST_ELEMENTS_RO(adjacencies, node, adj)) {
- /* check for force resync this route */
- if (CHECK_FLAG(adj->circuit->flags,
- ISIS_CIRCUIT_FLAPPED_AFTER_SPF))
- SET_FLAG(rinfo->flag,
- ISIS_ROUTE_FLAG_ZEBRA_RESYNC);
- /* update neighbor router address */
+ break;
+ case AF_INET6:
if (depth == 2 && prefix->prefixlen == 128
&& (!src_p || !src_p->prefixlen)) {
adj->router_address6 = prefix->u.prefix6;
}
- adjinfo2nexthop6(rinfo->nexthops6, adj);
+ break;
+ default:
+ flog_err(EC_LIB_DEVELOPMENT,
+ "%s: unknown address family [%d]", __func__,
+ prefix->family);
+ exit(1);
}
+ adjinfo2nexthop(prefix->family, rinfo->nexthops, adj);
}
rinfo->cost = cost;
@@ -255,12 +215,6 @@ static void isis_route_info_delete(struct isis_route_info *route_info)
list_delete(&route_info->nexthops);
}
- if (route_info->nexthops6) {
- route_info->nexthops6->del =
- (void (*)(void *))isis_nexthop6_delete;
- list_delete(&route_info->nexthops6);
- }
-
XFREE(MTYPE_ISIS_ROUTE_INFO, route_info);
}
@@ -280,7 +234,6 @@ static int isis_route_info_same(struct isis_route_info *new,
{
struct listnode *node;
struct isis_nexthop *nexthop;
- struct isis_nexthop6 *nexthop6;
if (!CHECK_FLAG(old->flag, ISIS_ROUTE_FLAG_ZEBRA_SYNCED))
return 0;
@@ -291,31 +244,15 @@ static int isis_route_info_same(struct isis_route_info *new,
if (!isis_route_info_same_attrib(new, old))
return 0;
- if (family == AF_INET) {
- for (ALL_LIST_ELEMENTS_RO(new->nexthops, node, nexthop))
- if (nexthoplookup(old->nexthops, &nexthop->ip,
- nexthop->ifindex)
- == 0)
- return 0;
-
- for (ALL_LIST_ELEMENTS_RO(old->nexthops, node, nexthop))
- if (nexthoplookup(new->nexthops, &nexthop->ip,
- nexthop->ifindex)
- == 0)
- return 0;
- } else if (family == AF_INET6) {
- for (ALL_LIST_ELEMENTS_RO(new->nexthops6, node, nexthop6))
- if (nexthop6lookup(old->nexthops6, &nexthop6->ip6,
- nexthop6->ifindex)
- == 0)
- return 0;
-
- for (ALL_LIST_ELEMENTS_RO(old->nexthops6, node, nexthop6))
- if (nexthop6lookup(new->nexthops6, &nexthop6->ip6,
- nexthop6->ifindex)
- == 0)
- return 0;
- }
+ for (ALL_LIST_ELEMENTS_RO(new->nexthops, node, nexthop))
+ if (!nexthoplookup(old->nexthops, nexthop->family, &nexthop->ip,
+ nexthop->ifindex))
+ return 0;
+
+ for (ALL_LIST_ELEMENTS_RO(old->nexthops, node, nexthop))
+ if (!nexthoplookup(new->nexthops, nexthop->family, &nexthop->ip,
+ nexthop->ifindex))
+ return 0;
return 1;
}
diff --git a/isisd/isis_route.h b/isisd/isis_route.h
index 9d6858586b..a20a7e038f 100644
--- a/isisd/isis_route.h
+++ b/isisd/isis_route.h
@@ -25,17 +25,12 @@
#ifndef _ZEBRA_ISIS_ROUTE_H
#define _ZEBRA_ISIS_ROUTE_H
-struct isis_nexthop6 {
- ifindex_t ifindex;
- struct in6_addr ip6;
- struct in6_addr router_address6;
- unsigned int lock;
-};
+#include "lib/nexthop.h"
struct isis_nexthop {
ifindex_t ifindex;
- struct in_addr ip;
- struct in_addr router_address;
+ int family;
+ union g_addr ip;
unsigned int lock;
};
@@ -47,7 +42,6 @@ struct isis_route_info {
uint32_t cost;
uint32_t depth;
struct list *nexthops;
- struct list *nexthops6;
};
struct isis_route_info *isis_route_create(struct prefix *prefix,
diff --git a/isisd/isis_tlvs.c b/isisd/isis_tlvs.c
index 488dfedae4..ee253c7a31 100644
--- a/isisd/isis_tlvs.c
+++ b/isisd/isis_tlvs.c
@@ -22,7 +22,9 @@
*/
#include <zebra.h>
+#ifdef CRYPTO_INTERNAL
#include "md5.h"
+#endif
#include "memory.h"
#include "stream.h"
#include "sbuf.h"
@@ -2770,8 +2772,16 @@ static void update_auth_hmac_md5(struct isis_auth *auth, struct stream *s,
safe_auth_md5(s, &checksum, &rem_lifetime);
memset(STREAM_DATA(s) + auth->offset, 0, 16);
+#ifdef CRYPTO_OPENSSL
+ uint8_t *result = (uint8_t *)HMAC(EVP_md5(), auth->passwd,
+ auth->plength, STREAM_DATA(s),
+ stream_get_endp(s), NULL, NULL);
+
+ memcpy(digest, result, 16);
+#elif CRYPTO_INTERNAL
hmac_md5(STREAM_DATA(s), stream_get_endp(s), auth->passwd,
auth->plength, digest);
+#endif
memcpy(auth->value, digest, 16);
memcpy(STREAM_DATA(s) + auth->offset, digest, 16);
@@ -3310,8 +3320,16 @@ static bool auth_validator_hmac_md5(struct isis_passwd *passwd,
safe_auth_md5(stream, &checksum, &rem_lifetime);
memset(STREAM_DATA(stream) + auth->offset, 0, 16);
+#ifdef CRYPTO_OPENSSL
+ uint8_t *result = (uint8_t *)HMAC(EVP_md5(), passwd->passwd,
+ passwd->len, STREAM_DATA(stream),
+ stream_get_endp(stream), NULL, NULL);
+
+ memcpy(digest, result, 16);
+#elif CRYPTO_INTERNAL
hmac_md5(STREAM_DATA(stream), stream_get_endp(stream), passwd->passwd,
passwd->len, digest);
+#endif
memcpy(STREAM_DATA(stream) + auth->offset, auth->value, 16);
bool rv = !memcmp(digest, auth->value, 16);
diff --git a/isisd/isis_zebra.c b/isisd/isis_zebra.c
index e2ef934696..e8481a558b 100644
--- a/isisd/isis_zebra.c
+++ b/isisd/isis_zebra.c
@@ -27,6 +27,7 @@
#include "command.h"
#include "memory.h"
#include "log.h"
+#include "lib_errors.h"
#include "if.h"
#include "network.h"
#include "prefix.h"
@@ -225,7 +226,6 @@ static void isis_zebra_route_add_route(struct prefix *prefix,
struct zapi_route api;
struct zapi_nexthop *api_nh;
struct isis_nexthop *nexthop;
- struct isis_nexthop6 *nexthop6;
struct listnode *node;
int count = 0;
@@ -250,47 +250,41 @@ static void isis_zebra_route_add_route(struct prefix *prefix,
#endif
/* Nexthops */
- switch (prefix->family) {
- case AF_INET:
- for (ALL_LIST_ELEMENTS_RO(route_info->nexthops, node,
- nexthop)) {
- if (count >= MULTIPATH_NUM)
- break;
- api_nh = &api.nexthops[count];
- if (fabricd)
- api_nh->onlink = true;
- api_nh->vrf_id = VRF_DEFAULT;
+ for (ALL_LIST_ELEMENTS_RO(route_info->nexthops, node, nexthop)) {
+ if (count >= MULTIPATH_NUM)
+ break;
+ api_nh = &api.nexthops[count];
+ if (fabricd)
+ api_nh->onlink = true;
+ api_nh->vrf_id = VRF_DEFAULT;
+
+ switch (nexthop->family) {
+ case AF_INET:
/* FIXME: can it be ? */
- if (nexthop->ip.s_addr != INADDR_ANY) {
+ if (nexthop->ip.ipv4.s_addr != INADDR_ANY) {
api_nh->type = NEXTHOP_TYPE_IPV4_IFINDEX;
- api_nh->gate.ipv4 = nexthop->ip;
+ api_nh->gate.ipv4 = nexthop->ip.ipv4;
} else {
api_nh->type = NEXTHOP_TYPE_IFINDEX;
}
- api_nh->ifindex = nexthop->ifindex;
- count++;
- }
- break;
- case AF_INET6:
- for (ALL_LIST_ELEMENTS_RO(route_info->nexthops6, node,
- nexthop6)) {
- if (count >= MULTIPATH_NUM)
- break;
- if (!IN6_IS_ADDR_LINKLOCAL(&nexthop6->ip6)
- && !IN6_IS_ADDR_UNSPECIFIED(&nexthop6->ip6)) {
+ break;
+ case AF_INET6:
+ if (!IN6_IS_ADDR_LINKLOCAL(&nexthop->ip.ipv6)
+ && !IN6_IS_ADDR_UNSPECIFIED(&nexthop->ip.ipv6)) {
continue;
}
-
- api_nh = &api.nexthops[count];
- if (fabricd)
- api_nh->onlink = true;
- api_nh->vrf_id = VRF_DEFAULT;
- api_nh->gate.ipv6 = nexthop6->ip6;
- api_nh->ifindex = nexthop6->ifindex;
+ api_nh->gate.ipv6 = nexthop->ip.ipv6;
api_nh->type = NEXTHOP_TYPE_IPV6_IFINDEX;
- count++;
+ break;
+ default:
+ flog_err(EC_LIB_DEVELOPMENT,
+ "%s: unknown address family [%d]", __func__,
+ nexthop->family);
+ exit(1);
}
- break;
+
+ api_nh->ifindex = nexthop->ifindex;
+ count++;
}
if (!count)
return;
diff --git a/isisd/isisd.c b/isisd/isisd.c
index bee3b6deb5..67f557ab50 100644
--- a/isisd/isisd.c
+++ b/isisd/isisd.c
@@ -88,7 +88,6 @@ void isis_new(unsigned long process_id)
isis->init_circ_list = list_new();
isis->uptime = time(NULL);
isis->nexthops = list_new();
- isis->nexthops6 = list_new();
dyn_cache_init();
/*
* uncomment the next line for full debugs
diff --git a/isisd/isisd.h b/isisd/isisd.h
index f8486ae0d6..393b1d67c7 100644
--- a/isisd/isisd.h
+++ b/isisd/isisd.h
@@ -69,8 +69,7 @@ struct isis {
uint32_t router_id; /* Router ID from zebra */
struct list *area_list; /* list of IS-IS areas */
struct list *init_circ_list;
- struct list *nexthops; /* IPv4 next hops from this IS */
- struct list *nexthops6; /* IPv6 next hops from this IS */
+ struct list *nexthops; /* IP next hops from this IS */
uint8_t max_area_addrs; /* maximumAreaAdresses */
struct area_addr *man_area_addrs; /* manualAreaAddresses */
uint32_t debugs; /* bitmap for debug */
diff --git a/ldpd/l2vpn.c b/ldpd/l2vpn.c
index 7f2e396a7f..b234e3ebe3 100644
--- a/ldpd/l2vpn.c
+++ b/ldpd/l2vpn.c
@@ -249,7 +249,7 @@ l2vpn_pw_init(struct l2vpn_pw *pw)
l2vpn_pw_fec(pw, &fec);
lde_kernel_insert(&fec, AF_INET, (union ldpd_addr*)&pw->lsr_id, 0, 0,
- 0, (void *)pw);
+ 0, 0, (void *)pw);
lde_kernel_update(&fec);
}
@@ -260,7 +260,7 @@ l2vpn_pw_exit(struct l2vpn_pw *pw)
struct zapi_pw zpw;
l2vpn_pw_fec(pw, &fec);
- lde_kernel_remove(&fec, AF_INET, (union ldpd_addr*)&pw->lsr_id, 0, 0);
+ lde_kernel_remove(&fec, AF_INET, (union ldpd_addr*)&pw->lsr_id, 0, 0, 0);
lde_kernel_update(&fec);
pw2zpw(pw, &zpw);
@@ -433,7 +433,7 @@ l2vpn_recv_pw_status(struct lde_nbr *ln, struct notify_msg *nm)
if (pw == NULL)
return;
- fnh = fec_nh_find(fn, AF_INET, (union ldpd_addr *)&ln->id, 0, 0);
+ fnh = fec_nh_find(fn, AF_INET, (union ldpd_addr *)&ln->id, 0, 0, 0);
if (fnh == NULL)
return;
@@ -482,7 +482,7 @@ l2vpn_recv_pw_status_wcard(struct lde_nbr *ln, struct notify_msg *nm)
}
fnh = fec_nh_find(fn, AF_INET, (union ldpd_addr *)&ln->id,
- 0, 0);
+ 0, 0, 0);
if (fnh == NULL)
continue;
diff --git a/ldpd/lde.c b/ldpd/lde.c
index ac680b47a9..006d27f6ab 100644
--- a/ldpd/lde.c
+++ b/ldpd/lde.c
@@ -520,7 +520,8 @@ lde_dispatch_parent(struct thread *thread)
switch (imsg.hdr.type) {
case IMSG_NETWORK_ADD:
lde_kernel_insert(&fec, kr->af, &kr->nexthop,
- kr->ifindex, kr->priority,
+ kr->ifindex, kr->route_type,
+ kr->route_instance,
kr->flags & F_CONNECTED, NULL);
break;
case IMSG_NETWORK_UPDATE:
@@ -747,7 +748,8 @@ lde_send_change_klabel(struct fec_node *fn, struct fec_nh *fnh)
kr.ifindex = fnh->ifindex;
kr.local_label = fn->local_label;
kr.remote_label = fnh->remote_label;
- kr.priority = fnh->priority;
+ kr.route_type = fnh->route_type;
+ kr.route_instance = fnh->route_instance;
lde_imsg_compose_parent(IMSG_KLABEL_CHANGE, 0, &kr,
sizeof(kr));
@@ -761,7 +763,8 @@ lde_send_change_klabel(struct fec_node *fn, struct fec_nh *fnh)
kr.ifindex = fnh->ifindex;
kr.local_label = fn->local_label;
kr.remote_label = fnh->remote_label;
- kr.priority = fnh->priority;
+ kr.route_type = fnh->route_type;
+ kr.route_instance = fnh->route_instance;
lde_imsg_compose_parent(IMSG_KLABEL_CHANGE, 0, &kr,
sizeof(kr));
@@ -798,7 +801,8 @@ lde_send_delete_klabel(struct fec_node *fn, struct fec_nh *fnh)
kr.ifindex = fnh->ifindex;
kr.local_label = fn->local_label;
kr.remote_label = fnh->remote_label;
- kr.priority = fnh->priority;
+ kr.route_type = fnh->route_type;
+ kr.route_instance = fnh->route_instance;
lde_imsg_compose_parent(IMSG_KLABEL_DELETE, 0, &kr,
sizeof(kr));
@@ -812,7 +816,8 @@ lde_send_delete_klabel(struct fec_node *fn, struct fec_nh *fnh)
kr.ifindex = fnh->ifindex;
kr.local_label = fn->local_label;
kr.remote_label = fnh->remote_label;
- kr.priority = fnh->priority;
+ kr.route_type = fnh->route_type;
+ kr.route_instance = fnh->route_instance;
lde_imsg_compose_parent(IMSG_KLABEL_DELETE, 0, &kr,
sizeof(kr));
diff --git a/ldpd/lde.h b/ldpd/lde.h
index 0a7d0a58fe..ce466c16b9 100644
--- a/ldpd/lde.h
+++ b/ldpd/lde.h
@@ -108,7 +108,8 @@ struct fec_nh {
union ldpd_addr nexthop;
ifindex_t ifindex;
uint32_t remote_label;
- uint8_t priority;
+ uint8_t route_type;
+ unsigned short route_instance;
uint8_t flags;
};
#define F_FEC_NH_NEW 0x01
@@ -193,11 +194,11 @@ void rt_dump(pid_t);
void fec_snap(struct lde_nbr *);
void fec_tree_clear(void);
struct fec_nh *fec_nh_find(struct fec_node *, int, union ldpd_addr *,
- ifindex_t, uint8_t);
+ ifindex_t, uint8_t, unsigned short);
void lde_kernel_insert(struct fec *, int, union ldpd_addr *,
- ifindex_t, uint8_t, int, void *);
+ ifindex_t, uint8_t, unsigned short, int, void *);
void lde_kernel_remove(struct fec *, int, union ldpd_addr *,
- ifindex_t, uint8_t);
+ ifindex_t, uint8_t, unsigned short);
void lde_kernel_update(struct fec *);
void lde_check_mapping(struct map *, struct lde_nbr *);
void lde_check_request(struct map *, struct lde_nbr *);
diff --git a/ldpd/lde_lib.c b/ldpd/lde_lib.c
index 0957a5455e..eb1a6d9434 100644
--- a/ldpd/lde_lib.c
+++ b/ldpd/lde_lib.c
@@ -31,7 +31,7 @@ static int lde_nbr_is_nexthop(struct fec_node *,
static void fec_free(void *);
static struct fec_node *fec_add(struct fec *fec);
static struct fec_nh *fec_nh_add(struct fec_node *, int, union ldpd_addr *,
- ifindex_t, uint8_t);
+ ifindex_t, uint8_t, unsigned short);
static void fec_nh_del(struct fec_nh *);
RB_GENERATE(fec_tree, fec, entry, fec_compare)
@@ -275,7 +275,7 @@ fec_add(struct fec *fec)
struct fec_nh *
fec_nh_find(struct fec_node *fn, int af, union ldpd_addr *nexthop,
- ifindex_t ifindex, uint8_t priority)
+ ifindex_t ifindex, uint8_t route_type, unsigned short route_instance)
{
struct fec_nh *fnh;
@@ -283,7 +283,8 @@ fec_nh_find(struct fec_node *fn, int af, union ldpd_addr *nexthop,
if (fnh->af == af &&
ldp_addrcmp(af, &fnh->nexthop, nexthop) == 0 &&
fnh->ifindex == ifindex &&
- fnh->priority == priority)
+ fnh->route_type == route_type &&
+ fnh->route_instance == route_instance)
return (fnh);
return (NULL);
@@ -291,7 +292,7 @@ fec_nh_find(struct fec_node *fn, int af, union ldpd_addr *nexthop,
static struct fec_nh *
fec_nh_add(struct fec_node *fn, int af, union ldpd_addr *nexthop,
- ifindex_t ifindex, uint8_t priority)
+ ifindex_t ifindex, uint8_t route_type, unsigned short route_instance)
{
struct fec_nh *fnh;
@@ -303,7 +304,8 @@ fec_nh_add(struct fec_node *fn, int af, union ldpd_addr *nexthop,
fnh->nexthop = *nexthop;
fnh->ifindex = ifindex;
fnh->remote_label = NO_LABEL;
- fnh->priority = priority;
+ fnh->route_type = route_type;
+ fnh->route_instance = route_instance;
LIST_INSERT_HEAD(&fn->nexthops, fnh, entry);
return (fnh);
@@ -318,7 +320,8 @@ fec_nh_del(struct fec_nh *fnh)
void
lde_kernel_insert(struct fec *fec, int af, union ldpd_addr *nexthop,
- ifindex_t ifindex, uint8_t priority, int connected, void *data)
+ ifindex_t ifindex, uint8_t route_type, unsigned short route_instance,
+ int connected, void *data)
{
struct fec_node *fn;
struct fec_nh *fnh;
@@ -329,9 +332,10 @@ lde_kernel_insert(struct fec *fec, int af, union ldpd_addr *nexthop,
if (data)
fn->data = data;
- fnh = fec_nh_find(fn, af, nexthop, ifindex, priority);
+ fnh = fec_nh_find(fn, af, nexthop, ifindex, route_type, route_instance);
if (fnh == NULL)
- fnh = fec_nh_add(fn, af, nexthop, ifindex, priority);
+ fnh = fec_nh_add(fn, af, nexthop, ifindex, route_type,
+ route_instance);
fnh->flags |= F_FEC_NH_NEW;
if (connected)
fnh->flags |= F_FEC_NH_CONNECTED;
@@ -339,7 +343,7 @@ lde_kernel_insert(struct fec *fec, int af, union ldpd_addr *nexthop,
void
lde_kernel_remove(struct fec *fec, int af, union ldpd_addr *nexthop,
- ifindex_t ifindex, uint8_t priority)
+ ifindex_t ifindex, uint8_t route_type, unsigned short route_instance)
{
struct fec_node *fn;
struct fec_nh *fnh;
@@ -348,7 +352,7 @@ lde_kernel_remove(struct fec *fec, int af, union ldpd_addr *nexthop,
if (fn == NULL)
/* route lost */
return;
- fnh = fec_nh_find(fn, af, nexthop, ifindex, priority);
+ fnh = fec_nh_find(fn, af, nexthop, ifindex, route_type, route_instance);
if (fnh == NULL)
/* route lost */
return;
diff --git a/ldpd/ldp_zebra.c b/ldpd/ldp_zebra.c
index 35a7d944d3..884ae159be 100644
--- a/ldpd/ldp_zebra.c
+++ b/ldpd/ldp_zebra.c
@@ -37,7 +37,7 @@
static void ifp2kif(struct interface *, struct kif *);
static void ifc2kaddr(struct interface *, struct connected *,
struct kaddr *);
-static int zebra_send_mpls_labels(int, struct kroute *);
+static int ldp_zebra_send_mpls_labels(int, struct kroute *);
static int ldp_router_id_update(ZAPI_CALLBACK_ARGS);
static int ldp_interface_add(ZAPI_CALLBACK_ARGS);
static int ldp_interface_delete(ZAPI_CALLBACK_ARGS);
@@ -106,9 +106,10 @@ pw2zpw(struct l2vpn_pw *pw, struct zapi_pw *zpw)
}
static int
-zebra_send_mpls_labels(int cmd, struct kroute *kr)
+ldp_zebra_send_mpls_labels(int cmd, struct kroute *kr)
{
- struct stream *s;
+ struct zapi_labels zl = {};
+ struct zapi_nexthop_label *znh;
if (kr->local_label < MPLS_LABEL_RESERVED_MAX ||
kr->remote_label == NO_LABEL)
@@ -120,48 +121,65 @@ zebra_send_mpls_labels(int cmd, struct kroute *kr)
log_label(kr->local_label), log_label(kr->remote_label),
(cmd == ZEBRA_MPLS_LABELS_ADD) ? "add" : "delete");
- /* Reset stream. */
- s = zclient->obuf;
- stream_reset(s);
+ zl.type = ZEBRA_LSP_LDP;
+ zl.local_label = kr->local_label;
- zclient_create_header(s, cmd, VRF_DEFAULT);
- stream_putc(s, ZEBRA_LSP_LDP);
- stream_putl(s, kr->af);
+ /* Set prefix. */
+ SET_FLAG(zl.message, ZAPI_LABELS_FTN);
+ zl.route.prefix.family = kr->af;
switch (kr->af) {
case AF_INET:
- stream_put_in_addr(s, &kr->prefix.v4);
- stream_putc(s, kr->prefixlen);
- stream_put_in_addr(s, &kr->nexthop.v4);
+ zl.route.prefix.u.prefix4 = kr->prefix.v4;
break;
case AF_INET6:
- stream_write(s, (uint8_t *)&kr->prefix.v6, 16);
- stream_putc(s, kr->prefixlen);
- stream_write(s, (uint8_t *)&kr->nexthop.v6, 16);
+ zl.route.prefix.u.prefix6 = kr->prefix.v6;
break;
default:
- fatalx("kr_change: unknown af");
+ fatalx("ldp_zebra_send_mpls_labels: unknown af");
}
- stream_putl(s, kr->ifindex);
- stream_putc(s, kr->priority);
- stream_putl(s, kr->local_label);
- stream_putl(s, kr->remote_label);
+ zl.route.prefix.prefixlen = kr->prefixlen;
+ zl.route.type = kr->route_type;
+ zl.route.instance = kr->route_instance;
- /* Put length at the first point of the stream. */
- stream_putw_at(s, 0, stream_get_endp(s));
+ /* Set nexthop. */
+ zl.nexthop_num = 1;
+ znh = &zl.nexthops[0];
+ switch (kr->af) {
+ case AF_INET:
+ znh->family = AF_INET;
+ znh->address.ipv4 = kr->nexthop.v4;
+ if (kr->ifindex)
+ znh->type = NEXTHOP_TYPE_IPV4_IFINDEX;
+ else
+ znh->type = NEXTHOP_TYPE_IPV4;
+ break;
+ case AF_INET6:
+ znh->family = AF_INET6;
+ znh->address.ipv6 = kr->nexthop.v6;
+ if (kr->ifindex)
+ znh->type = NEXTHOP_TYPE_IPV6_IFINDEX;
+ else
+ znh->type = NEXTHOP_TYPE_IPV6;
+ break;
+ default:
+ break;
+ }
+ znh->ifindex = kr->ifindex;
+ znh->label = kr->remote_label;
- return (zclient_send_message(zclient));
+ return zebra_send_mpls_labels(zclient, cmd, &zl);
}
int
kr_change(struct kroute *kr)
{
- return (zebra_send_mpls_labels(ZEBRA_MPLS_LABELS_ADD, kr));
+ return (ldp_zebra_send_mpls_labels(ZEBRA_MPLS_LABELS_ADD, kr));
}
int
kr_delete(struct kroute *kr)
{
- return (zebra_send_mpls_labels(ZEBRA_MPLS_LABELS_DELETE, kr));
+ return (ldp_zebra_send_mpls_labels(ZEBRA_MPLS_LABELS_DELETE, kr));
}
int
@@ -407,7 +425,8 @@ ldp_zebra_read_route(ZAPI_CALLBACK_ARGS)
break;
}
kr.prefixlen = api.prefix.prefixlen;
- kr.priority = api.distance;
+ kr.route_type = api.type;
+ kr.route_instance = api.instance;
switch (api.type) {
case ZEBRA_ROUTE_CONNECT:
diff --git a/ldpd/ldpd.h b/ldpd/ldpd.h
index 9113505581..bd7562e5ad 100644
--- a/ldpd/ldpd.h
+++ b/ldpd/ldpd.h
@@ -543,7 +543,8 @@ struct kroute {
uint32_t local_label;
uint32_t remote_label;
unsigned short ifindex;
- uint8_t priority;
+ uint8_t route_type;
+ uint8_t route_instance;
uint16_t flags;
};
diff --git a/ldpd/socket.c b/ldpd/socket.c
index b31db2c7bc..8706d03c6f 100644
--- a/ldpd/socket.c
+++ b/ldpd/socket.c
@@ -79,7 +79,7 @@ ldp_create_socket(int af, enum socket_type type)
sock_set_bindany(fd, 1);
break;
}
- frr_elevate_privs(&ldpd_privs) {
+ frr_with_privs(&ldpd_privs) {
if (sock_set_reuse(fd, 1) == -1) {
close(fd);
return (-1);
@@ -254,7 +254,7 @@ int
sock_set_bindany(int fd, int enable)
{
#ifdef HAVE_SO_BINDANY
- frr_elevate_privs(&ldpd_privs) {
+ frr_with_privs(&ldpd_privs) {
if (setsockopt(fd, SOL_SOCKET, SO_BINDANY, &enable,
sizeof(int)) < 0) {
log_warn("%s: error setting SO_BINDANY", __func__);
@@ -269,7 +269,7 @@ sock_set_bindany(int fd, int enable)
}
return (0);
#elif defined(IP_BINDANY)
- frr_elevate_privs(&ldpd_privs) {
+ frr_with_privs(&ldpd_privs) {
if (setsockopt(fd, IPPROTO_IP, IP_BINDANY, &enable, sizeof(int))
< 0) {
log_warn("%s: error setting IP_BINDANY", __func__);
@@ -304,7 +304,7 @@ sock_set_md5sig(int fd, int af, union ldpd_addr *addr, const char *password)
#if HAVE_DECL_TCP_MD5SIG
addr2sa(af, addr, 0, &su);
- frr_elevate_privs(&ldpe_privs) {
+ frr_with_privs(&ldpe_privs) {
ret = sockopt_tcp_signature(fd, &su, password);
save_errno = errno;
}
diff --git a/lib/command.c b/lib/command.c
index 9dabc2af7e..eecca2a5f5 100644
--- a/lib/command.c
+++ b/lib/command.c
@@ -151,6 +151,7 @@ const char *node_names[] = {
"bfd peer", /* BFD_PEER_NODE */
"openfabric", // OPENFABRIC_NODE
"vrrp", /* VRRP_NODE */
+ "bmp", /* BMP_NODE */
};
/* clang-format on */
@@ -975,6 +976,7 @@ enum node_type node_parent(enum node_type node)
case BGP_IPV6M_NODE:
case BGP_EVPN_NODE:
case BGP_IPV6L_NODE:
+ case BMP_NODE:
ret = BGP_NODE;
break;
case BGP_EVPN_VNI_NODE:
@@ -1059,8 +1061,10 @@ static int cmd_execute_command_real(vector vline, enum cmd_filter_type filter,
vty->num_cfg_changes = 0;
memset(&vty->cfg_changes, 0, sizeof(vty->cfg_changes));
- /* Regenerate candidate configuration. */
- if (frr_get_cli_mode() == FRR_CLI_CLASSIC)
+ /* Regenerate candidate configuration if necessary. */
+ if (frr_get_cli_mode() == FRR_CLI_CLASSIC
+ && running_config->version
+ > vty->candidate_config->version)
nb_config_replace(vty->candidate_config,
running_config, true);
}
@@ -1491,6 +1495,7 @@ void cmd_exit(struct vty *vty)
case BGP_IPV6M_NODE:
case BGP_EVPN_NODE:
case BGP_IPV6L_NODE:
+ case BMP_NODE:
vty->node = BGP_NODE;
break;
case BGP_EVPN_VNI_NODE:
@@ -2708,15 +2713,66 @@ DEFUN (no_banner_motd,
DEFUN(find,
find_cmd,
- "find COMMAND...",
- "Find CLI command containing text\n"
- "Text to search for\n")
+ "find REGEX",
+ "Find CLI command matching a regular expression\n"
+ "Search pattern (POSIX regex)\n")
{
- char *text = argv_concat(argv, argc, 1);
+ char *pattern = argv[1]->arg;
const struct cmd_node *node;
const struct cmd_element *cli;
vector clis;
+ regex_t exp = {};
+
+ int cr = regcomp(&exp, pattern, REG_NOSUB | REG_EXTENDED);
+
+ if (cr != 0) {
+ switch (cr) {
+ case REG_BADBR:
+ vty_out(vty, "%% Invalid {...} expression\n");
+ break;
+ case REG_BADRPT:
+ vty_out(vty, "%% Bad repetition operator\n");
+ break;
+ case REG_BADPAT:
+ vty_out(vty, "%% Regex syntax error\n");
+ break;
+ case REG_ECOLLATE:
+ vty_out(vty, "%% Invalid collating element\n");
+ break;
+ case REG_ECTYPE:
+ vty_out(vty, "%% Invalid character class name\n");
+ break;
+ case REG_EESCAPE:
+ vty_out(vty,
+ "%% Regex ended with escape character (\\)\n");
+ break;
+ case REG_ESUBREG:
+ vty_out(vty,
+ "%% Invalid number in \\digit construction\n");
+ break;
+ case REG_EBRACK:
+ vty_out(vty, "%% Unbalanced square brackets\n");
+ break;
+ case REG_EPAREN:
+ vty_out(vty, "%% Unbalanced parentheses\n");
+ break;
+ case REG_EBRACE:
+ vty_out(vty, "%% Unbalanced braces\n");
+ break;
+ case REG_ERANGE:
+ vty_out(vty,
+ "%% Invalid endpoint in range expression\n");
+ break;
+ case REG_ESPACE:
+ vty_out(vty, "%% Failed to compile (out of memory)\n");
+ break;
+ }
+
+ goto done;
+ }
+
+
for (unsigned int i = 0; i < vector_active(cmdvec); i++) {
node = vector_slot(cmdvec, i);
if (!node)
@@ -2724,14 +2780,15 @@ DEFUN(find,
clis = node->cmd_vector;
for (unsigned int j = 0; j < vector_active(clis); j++) {
cli = vector_slot(clis, j);
- if (strcasestr(cli->string, text))
+
+ if (regexec(&exp, cli->string, 0, NULL, 0) == 0)
vty_out(vty, " (%s) %s\n",
node_names[node->node], cli->string);
}
}
- XFREE(MTYPE_TMP, text);
-
+done:
+ regfree(&exp);
return CMD_SUCCESS;
}
diff --git a/lib/command.h b/lib/command.h
index 8dc35a0fdc..137d3748ae 100644
--- a/lib/command.h
+++ b/lib/command.h
@@ -159,6 +159,7 @@ enum node_type {
BFD_PEER_NODE, /* BFD peer configuration mode. */
OPENFABRIC_NODE, /* OpenFabric router configuration node */
VRRP_NODE, /* VRRP node */
+ BMP_NODE, /* BMP config under router bgp */
NODE_TYPE_MAX, /* maximum */
};
diff --git a/lib/compiler.h b/lib/compiler.h
index 6700ca9e8b..e430925e69 100644
--- a/lib/compiler.h
+++ b/lib/compiler.h
@@ -121,6 +121,49 @@ extern "C" {
#define macro_inline static inline __attribute__((unused))
#define macro_pure static inline __attribute__((unused, pure))
+
+/* variadic macros, use like:
+ * #define V_0() ...
+ * #define V_1(x) ...
+ * #define V(...) MACRO_VARIANT(V, ##__VA_ARGS__)(__VA_ARGS__)
+ */
+#define _MACRO_VARIANT(A0,A1,A2,A3,A4,A5,A6,A7,A8,A9,A10, N, ...) N
+
+#define _CONCAT2(a, b) a ## b
+#define _CONCAT(a, b) _CONCAT2(a,b)
+
+#define MACRO_VARIANT(NAME, ...) \
+ _CONCAT(NAME, _MACRO_VARIANT(0, ##__VA_ARGS__, \
+ _10, _9, _8, _7, _6, _5, _4, _3, _2, _1, _0))
+
+#define NAMECTR(name) _CONCAT(name, __COUNTER__)
+
+/* per-arg repeat macros, use like:
+ * #define PERARG(n) ...n...
+ * #define FOO(...) MACRO_REPEAT(PERARG, ##__VA_ARGS__)
+ */
+
+#define _MACRO_REPEAT_0(NAME)
+#define _MACRO_REPEAT_1(NAME, A1) \
+ NAME(A1)
+#define _MACRO_REPEAT_2(NAME, A1, A2) \
+ NAME(A1) NAME(A2)
+#define _MACRO_REPEAT_3(NAME, A1, A2, A3) \
+ NAME(A1) NAME(A2) NAME(A3)
+#define _MACRO_REPEAT_4(NAME, A1, A2, A3, A4) \
+ NAME(A1) NAME(A2) NAME(A3) NAME(A4)
+#define _MACRO_REPEAT_5(NAME, A1, A2, A3, A4, A5) \
+ NAME(A1) NAME(A2) NAME(A3) NAME(A4) NAME(A5)
+#define _MACRO_REPEAT_6(NAME, A1, A2, A3, A4, A5, A6) \
+ NAME(A1) NAME(A2) NAME(A3) NAME(A4) NAME(A5) NAME(A6)
+#define _MACRO_REPEAT_7(NAME, A1, A2, A3, A4, A5, A6, A7) \
+ NAME(A1) NAME(A2) NAME(A3) NAME(A4) NAME(A5) NAME(A6) NAME(A7)
+#define _MACRO_REPEAT_8(NAME, A1, A2, A3, A4, A5, A6, A7, A8) \
+ NAME(A1) NAME(A2) NAME(A3) NAME(A4) NAME(A5) NAME(A6) NAME(A7) NAME(A8)
+
+#define MACRO_REPEAT(NAME, ...) \
+ MACRO_VARIANT(_MACRO_REPEAT, ##__VA_ARGS__)(NAME, ##__VA_ARGS__)
+
/*
* for warnings on macros, put in the macro content like this:
* #define MACRO BLA CPP_WARN("MACRO has been deprecated")
diff --git a/lib/ferr.c b/lib/ferr.c
index fd5fb50172..ccf63dea17 100644
--- a/lib/ferr.c
+++ b/lib/ferr.c
@@ -33,6 +33,7 @@
#include "command.h"
#include "json.h"
#include "linklist.h"
+#include "frr_pthread.h"
DEFINE_MTYPE_STATIC(LIB, ERRINFO, "error information")
@@ -83,14 +84,12 @@ void log_ref_add(struct log_ref *ref)
{
uint32_t i = 0;
- pthread_mutex_lock(&refs_mtx);
- {
+ frr_with_mutex(&refs_mtx) {
while (ref[i].code != END_FERR) {
hash_get(refs, &ref[i], hash_alloc_intern);
i++;
}
}
- pthread_mutex_unlock(&refs_mtx);
}
struct log_ref *log_ref_get(uint32_t code)
@@ -99,11 +98,9 @@ struct log_ref *log_ref_get(uint32_t code)
struct log_ref *ref;
holder.code = code;
- pthread_mutex_lock(&refs_mtx);
- {
+ frr_with_mutex(&refs_mtx) {
ref = hash_lookup(refs, &holder);
}
- pthread_mutex_unlock(&refs_mtx);
return ref;
}
@@ -118,11 +115,9 @@ void log_ref_display(struct vty *vty, uint32_t code, bool json)
if (json)
top = json_object_new_object();
- pthread_mutex_lock(&refs_mtx);
- {
+ frr_with_mutex(&refs_mtx) {
errlist = code ? list_new() : hash_to_list(refs);
}
- pthread_mutex_unlock(&refs_mtx);
if (code) {
ref = log_ref_get(code);
@@ -189,23 +184,19 @@ DEFUN_NOSH(show_error_code,
void log_ref_init(void)
{
- pthread_mutex_lock(&refs_mtx);
- {
+ frr_with_mutex(&refs_mtx) {
refs = hash_create(ferr_hash_key, ferr_hash_cmp,
"Error Reference Texts");
}
- pthread_mutex_unlock(&refs_mtx);
}
void log_ref_fini(void)
{
- pthread_mutex_lock(&refs_mtx);
- {
+ frr_with_mutex(&refs_mtx) {
hash_clean(refs, NULL);
hash_free(refs);
refs = NULL;
}
- pthread_mutex_unlock(&refs_mtx);
}
void log_ref_vty_init(void)
diff --git a/lib/frr_pthread.c b/lib/frr_pthread.c
index bdb6c2a397..21dfc9256f 100644
--- a/lib/frr_pthread.c
+++ b/lib/frr_pthread.c
@@ -49,21 +49,17 @@ static struct list *frr_pthread_list;
void frr_pthread_init(void)
{
- pthread_mutex_lock(&frr_pthread_list_mtx);
- {
+ frr_with_mutex(&frr_pthread_list_mtx) {
frr_pthread_list = list_new();
frr_pthread_list->del = (void (*)(void *))&frr_pthread_destroy;
}
- pthread_mutex_unlock(&frr_pthread_list_mtx);
}
void frr_pthread_finish(void)
{
- pthread_mutex_lock(&frr_pthread_list_mtx);
- {
+ frr_with_mutex(&frr_pthread_list_mtx) {
list_delete(&frr_pthread_list);
}
- pthread_mutex_unlock(&frr_pthread_list_mtx);
}
struct frr_pthread *frr_pthread_new(struct frr_pthread_attr *attr,
@@ -94,11 +90,9 @@ struct frr_pthread *frr_pthread_new(struct frr_pthread_attr *attr,
pthread_mutex_init(fpt->running_cond_mtx, NULL);
pthread_cond_init(fpt->running_cond, NULL);
- pthread_mutex_lock(&frr_pthread_list_mtx);
- {
+ frr_with_mutex(&frr_pthread_list_mtx) {
listnode_add(frr_pthread_list, fpt);
}
- pthread_mutex_unlock(&frr_pthread_list_mtx);
return fpt;
}
@@ -162,23 +156,19 @@ int frr_pthread_run(struct frr_pthread *fpt, const pthread_attr_t *attr)
void frr_pthread_wait_running(struct frr_pthread *fpt)
{
- pthread_mutex_lock(fpt->running_cond_mtx);
- {
+ frr_with_mutex(fpt->running_cond_mtx) {
while (!fpt->running)
pthread_cond_wait(fpt->running_cond,
fpt->running_cond_mtx);
}
- pthread_mutex_unlock(fpt->running_cond_mtx);
}
void frr_pthread_notify_running(struct frr_pthread *fpt)
{
- pthread_mutex_lock(fpt->running_cond_mtx);
- {
+ frr_with_mutex(fpt->running_cond_mtx) {
fpt->running = true;
pthread_cond_signal(fpt->running_cond);
}
- pthread_mutex_unlock(fpt->running_cond_mtx);
}
int frr_pthread_stop(struct frr_pthread *fpt, void **result)
@@ -190,14 +180,12 @@ int frr_pthread_stop(struct frr_pthread *fpt, void **result)
void frr_pthread_stop_all(void)
{
- pthread_mutex_lock(&frr_pthread_list_mtx);
- {
+ frr_with_mutex(&frr_pthread_list_mtx) {
struct listnode *n;
struct frr_pthread *fpt;
for (ALL_LIST_ELEMENTS_RO(frr_pthread_list, n, fpt))
frr_pthread_stop(fpt, NULL);
}
- pthread_mutex_unlock(&frr_pthread_list_mtx);
}
/*
diff --git a/lib/frr_pthread.h b/lib/frr_pthread.h
index 6096a50370..f70c8a0db4 100644
--- a/lib/frr_pthread.h
+++ b/lib/frr_pthread.h
@@ -215,6 +215,54 @@ void frr_pthread_stop_all(void);
#define pthread_condattr_setclock(A, B)
#endif
+/* mutex auto-lock/unlock */
+
+/* variant 1:
+ * (for short blocks, multiple mutexes supported)
+ * break & return can be used for aborting the block
+ *
+ * frr_with_mutex(&mtx, &mtx2) {
+ * if (error)
+ * break;
+ * ...
+ * }
+ */
+#define _frr_with_mutex(mutex) \
+ *NAMECTR(_mtx_) __attribute__(( \
+ unused, cleanup(_frr_mtx_unlock))) = _frr_mtx_lock(mutex), \
+ /* end */
+
+#define frr_with_mutex(...) \
+ for (pthread_mutex_t MACRO_REPEAT(_frr_with_mutex, ##__VA_ARGS__) \
+ *_once = NULL; _once == NULL; _once = (void *)1) \
+ /* end */
+
+/* variant 2:
+ * (more suitable for long blocks, no extra indentation)
+ *
+ * frr_mutex_lock_autounlock(&mtx);
+ * ...
+ */
+#define frr_mutex_lock_autounlock(mutex) \
+ pthread_mutex_t *NAMECTR(_mtx_) \
+ __attribute__((unused, cleanup(_frr_mtx_unlock))) = \
+ _frr_mtx_lock(mutex) \
+ /* end */
+
+static inline pthread_mutex_t *_frr_mtx_lock(pthread_mutex_t *mutex)
+{
+ pthread_mutex_lock(mutex);
+ return mutex;
+}
+
+static inline void _frr_mtx_unlock(pthread_mutex_t **mutex)
+{
+ if (!*mutex)
+ return;
+ pthread_mutex_unlock(*mutex);
+ *mutex = NULL;
+}
+
#ifdef __cplusplus
}
#endif
diff --git a/lib/frrcu.c b/lib/frrcu.c
index 7e6475b648..54626f909d 100644
--- a/lib/frrcu.c
+++ b/lib/frrcu.c
@@ -55,7 +55,6 @@
#include "atomlist.h"
DEFINE_MTYPE_STATIC(LIB, RCU_THREAD, "RCU thread")
-DEFINE_MTYPE_STATIC(LIB, RCU_NEXT, "RCU sequence barrier")
DECLARE_ATOMLIST(rcu_heads, struct rcu_head, head)
@@ -226,7 +225,7 @@ static void rcu_bump(void)
{
struct rcu_next *rn;
- rn = XMALLOC(MTYPE_RCU_NEXT, sizeof(*rn));
+ rn = XMALLOC(MTYPE_RCU_THREAD, sizeof(*rn));
/* note: each RCUA_NEXT item corresponds to exactly one seqno bump.
* This means we don't need to communicate which seqno is which
@@ -269,7 +268,7 @@ static void rcu_bump(void)
* "last item is being deleted - start over" case, and then we may end
* up accessing old RCU queue items that are already free'd.
*/
- rcu_free_internal(MTYPE_RCU_NEXT, rn, head_free);
+ rcu_free_internal(MTYPE_RCU_THREAD, rn, head_free);
/* Only allow the RCU sweeper to run after these 2 items are queued.
*
diff --git a/lib/hash.c b/lib/hash.c
index 9d9d39702e..7f8a237047 100644
--- a/lib/hash.c
+++ b/lib/hash.c
@@ -28,6 +28,7 @@
#include "vty.h"
#include "command.h"
#include "libfrr.h"
+#include "frr_pthread.h"
DEFINE_MTYPE_STATIC(LIB, HASH, "Hash")
DEFINE_MTYPE_STATIC(LIB, HASH_BACKET, "Hash Bucket")
@@ -54,14 +55,12 @@ struct hash *hash_create_size(unsigned int size,
hash->name = name ? XSTRDUP(MTYPE_HASH, name) : NULL;
hash->stats.empty = hash->size;
- pthread_mutex_lock(&_hashes_mtx);
- {
+ frr_with_mutex(&_hashes_mtx) {
if (!_hashes)
_hashes = list_new();
listnode_add(_hashes, hash);
}
- pthread_mutex_unlock(&_hashes_mtx);
return hash;
}
@@ -311,8 +310,7 @@ struct list *hash_to_list(struct hash *hash)
void hash_free(struct hash *hash)
{
- pthread_mutex_lock(&_hashes_mtx);
- {
+ frr_with_mutex(&_hashes_mtx) {
if (_hashes) {
listnode_delete(_hashes, hash);
if (_hashes->count == 0) {
@@ -320,7 +318,6 @@ void hash_free(struct hash *hash)
}
}
}
- pthread_mutex_unlock(&_hashes_mtx);
XFREE(MTYPE_HASH, hash->name);
diff --git a/lib/log.c b/lib/log.c
index 51a0ddd6b7..c777868736 100644
--- a/lib/log.c
+++ b/lib/log.c
@@ -31,6 +31,7 @@
#include "lib_errors.h"
#include "lib/hook.h"
#include "printfrr.h"
+#include "frr_pthread.h"
#ifndef SUNOS_5
#include <sys/un.h>
@@ -83,89 +84,70 @@ static int zlog_filter_lookup(const char *lookup)
void zlog_filter_clear(void)
{
- pthread_mutex_lock(&loglock);
- zlog_filter_count = 0;
- pthread_mutex_unlock(&loglock);
+ frr_with_mutex(&loglock) {
+ zlog_filter_count = 0;
+ }
}
int zlog_filter_add(const char *filter)
{
- pthread_mutex_lock(&loglock);
+ frr_with_mutex(&loglock) {
+ if (zlog_filter_count >= ZLOG_FILTERS_MAX)
+ return 1;
- int ret = 0;
+ if (zlog_filter_lookup(filter) != -1)
+ /* Filter already present */
+ return -1;
- if (zlog_filter_count >= ZLOG_FILTERS_MAX) {
- ret = 1;
- goto done;
- }
+ strlcpy(zlog_filters[zlog_filter_count], filter,
+ sizeof(zlog_filters[0]));
- if (zlog_filter_lookup(filter) != -1) {
- /* Filter already present */
- ret = -1;
- goto done;
- }
+ if (zlog_filters[zlog_filter_count][0] == '\0')
+ /* Filter was either empty or didn't get copied
+ * correctly
+ */
+ return -1;
- strlcpy(zlog_filters[zlog_filter_count], filter,
- sizeof(zlog_filters[0]));
-
- if (zlog_filters[zlog_filter_count][0] == '\0') {
- /* Filter was either empty or didn't get copied correctly */
- ret = -1;
- goto done;
+ zlog_filter_count++;
}
-
- zlog_filter_count++;
-
-done:
- pthread_mutex_unlock(&loglock);
- return ret;
+ return 0;
}
int zlog_filter_del(const char *filter)
{
- pthread_mutex_lock(&loglock);
-
- int found_idx = zlog_filter_lookup(filter);
- int last_idx = zlog_filter_count - 1;
- int ret = 0;
+ frr_with_mutex(&loglock) {
+ int found_idx = zlog_filter_lookup(filter);
+ int last_idx = zlog_filter_count - 1;
- if (found_idx == -1) {
- /* Didn't find the filter to delete */
- ret = -1;
- goto done;
- }
-
- /* Adjust the filter array */
- memmove(zlog_filters[found_idx], zlog_filters[found_idx + 1],
- (last_idx - found_idx) * sizeof(zlog_filters[0]));
+ if (found_idx == -1)
+ /* Didn't find the filter to delete */
+ return -1;
- zlog_filter_count--;
+ /* Adjust the filter array */
+ memmove(zlog_filters[found_idx], zlog_filters[found_idx + 1],
+ (last_idx - found_idx) * sizeof(zlog_filters[0]));
-done:
- pthread_mutex_unlock(&loglock);
- return ret;
+ zlog_filter_count--;
+ }
+ return 0;
}
/* Dump all filters to buffer, delimited by new line */
int zlog_filter_dump(char *buf, size_t max_size)
{
- pthread_mutex_lock(&loglock);
-
- int ret = 0;
int len = 0;
- for (int i = 0; i < zlog_filter_count; i++) {
- ret = snprintf(buf + len, max_size - len, " %s\n",
- zlog_filters[i]);
- len += ret;
- if ((ret < 0) || ((size_t)len >= max_size)) {
- len = -1;
- goto done;
+ frr_with_mutex(&loglock) {
+ for (int i = 0; i < zlog_filter_count; i++) {
+ int ret;
+ ret = snprintf(buf + len, max_size - len, " %s\n",
+ zlog_filters[i]);
+ len += ret;
+ if ((ret < 0) || ((size_t)len >= max_size))
+ return -1;
}
}
-done:
- pthread_mutex_unlock(&loglock);
return len;
}
@@ -363,7 +345,7 @@ search:
/* va_list version of zlog. */
void vzlog(int priority, const char *format, va_list args)
{
- pthread_mutex_lock(&loglock);
+ frr_mutex_lock_autounlock(&loglock);
char proto_str[32] = "";
int original_errno = errno;
@@ -430,36 +412,31 @@ out:
if (msg != buf)
XFREE(MTYPE_TMP, msg);
errno = original_errno;
- pthread_mutex_unlock(&loglock);
}
int vzlog_test(int priority)
{
- pthread_mutex_lock(&loglock);
-
- int ret = 0;
+ frr_mutex_lock_autounlock(&loglock);
struct zlog *zl = zlog_default;
/* When zlog_default is also NULL, use stderr for logging. */
if (zl == NULL)
- ret = 1;
+ return 1;
/* Syslog output */
else if (priority <= zl->maxlvl[ZLOG_DEST_SYSLOG])
- ret = 1;
+ return 1;
/* File output. */
else if ((priority <= zl->maxlvl[ZLOG_DEST_FILE]) && zl->fp)
- ret = 1;
+ return 1;
/* stdout output. */
else if (priority <= zl->maxlvl[ZLOG_DEST_STDOUT])
- ret = 1;
+ return 1;
/* Terminal monitor. */
else if (priority <= zl->maxlvl[ZLOG_DEST_MONITOR])
- ret = 1;
-
- pthread_mutex_unlock(&loglock);
+ return 1;
- return ret;
+ return 0;
}
/*
@@ -870,9 +847,9 @@ void openzlog(const char *progname, const char *protoname,
openlog(progname, syslog_flags, zl->facility);
- pthread_mutex_lock(&loglock);
- zlog_default = zl;
- pthread_mutex_unlock(&loglock);
+ frr_with_mutex(&loglock) {
+ zlog_default = zl;
+ }
#ifdef HAVE_GLIBC_BACKTRACE
/* work around backtrace() using lazily resolved dynamically linked
@@ -889,7 +866,8 @@ void openzlog(const char *progname, const char *protoname,
void closezlog(void)
{
- pthread_mutex_lock(&loglock);
+ frr_mutex_lock_autounlock(&loglock);
+
struct zlog *zl = zlog_default;
closelog();
@@ -901,15 +879,14 @@ void closezlog(void)
XFREE(MTYPE_ZLOG, zl);
zlog_default = NULL;
- pthread_mutex_unlock(&loglock);
}
/* Called from command.c. */
void zlog_set_level(zlog_dest_t dest, int log_level)
{
- pthread_mutex_lock(&loglock);
- zlog_default->maxlvl[dest] = log_level;
- pthread_mutex_unlock(&loglock);
+ frr_with_mutex(&loglock) {
+ zlog_default->maxlvl[dest] = log_level;
+ }
}
int zlog_set_file(const char *filename, int log_level)
@@ -929,15 +906,15 @@ int zlog_set_file(const char *filename, int log_level)
if (fp == NULL) {
ret = 0;
} else {
- pthread_mutex_lock(&loglock);
- zl = zlog_default;
-
- /* Set flags. */
- zl->filename = XSTRDUP(MTYPE_ZLOG, filename);
- zl->maxlvl[ZLOG_DEST_FILE] = log_level;
- zl->fp = fp;
- logfile_fd = fileno(fp);
- pthread_mutex_unlock(&loglock);
+ frr_with_mutex(&loglock) {
+ zl = zlog_default;
+
+ /* Set flags. */
+ zl->filename = XSTRDUP(MTYPE_ZLOG, filename);
+ zl->maxlvl[ZLOG_DEST_FILE] = log_level;
+ zl->fp = fp;
+ logfile_fd = fileno(fp);
+ }
}
return ret;
@@ -946,7 +923,7 @@ int zlog_set_file(const char *filename, int log_level)
/* Reset opend file. */
int zlog_reset_file(void)
{
- pthread_mutex_lock(&loglock);
+ frr_mutex_lock_autounlock(&loglock);
struct zlog *zl = zlog_default;
@@ -959,8 +936,6 @@ int zlog_reset_file(void)
XFREE(MTYPE_ZLOG, zl->filename);
zl->filename = NULL;
- pthread_mutex_unlock(&loglock);
-
return 1;
}
@@ -1066,6 +1041,7 @@ static const struct zebra_desc_table command_types[] = {
DESC_ENTRY(ZEBRA_INTERFACE_LINK_PARAMS),
DESC_ENTRY(ZEBRA_MPLS_LABELS_ADD),
DESC_ENTRY(ZEBRA_MPLS_LABELS_DELETE),
+ DESC_ENTRY(ZEBRA_MPLS_LABELS_REPLACE),
DESC_ENTRY(ZEBRA_IPMR_ROUTE_STATS),
DESC_ENTRY(ZEBRA_LABEL_MANAGER_CONNECT),
DESC_ENTRY(ZEBRA_LABEL_MANAGER_CONNECT_ASYNC),
diff --git a/lib/monotime.h b/lib/monotime.h
index ca27c45dc6..e246f177de 100644
--- a/lib/monotime.h
+++ b/lib/monotime.h
@@ -84,6 +84,20 @@ static inline int64_t monotime_until(const struct timeval *ref,
return (int64_t)tv.tv_sec * 1000000LL + tv.tv_usec;
}
+static inline time_t monotime_to_realtime(const struct timeval *mono,
+ struct timeval *realout)
+{
+ struct timeval delta, real;
+
+ monotime_since(mono, &delta);
+ gettimeofday(&real, NULL);
+
+ timersub(&real, &delta, &real);
+ if (realout)
+ *realout = real;
+ return real.tv_sec;
+}
+
/* Char buffer size for time-to-string api */
#define MONOTIME_STRLEN 32
diff --git a/lib/mpls.h b/lib/mpls.h
index d7b56c47bd..472ee9bc46 100644
--- a/lib/mpls.h
+++ b/lib/mpls.h
@@ -125,7 +125,7 @@ enum lsp_types_t {
ZEBRA_LSP_STATIC = 1, /* Static LSP. */
ZEBRA_LSP_LDP = 2, /* LDP LSP. */
ZEBRA_LSP_BGP = 3, /* BGP LSP. */
- ZEBRA_LSP_SR = 4, /* Segment Routing LSP. */
+ ZEBRA_LSP_OSPF_SR = 4,/* OSPF Segment Routing LSP. */
ZEBRA_LSP_SHARP = 5, /* Identifier for test protocol */
};
diff --git a/lib/nexthop.c b/lib/nexthop.c
index 0984c1a168..cf5bed3d62 100644
--- a/lib/nexthop.c
+++ b/lib/nexthop.c
@@ -65,9 +65,8 @@ static int _nexthop_labels_cmp(const struct nexthop *nh1,
return memcmp(nhl1->label, nhl2->label, nhl1->num_labels);
}
-static int _nexthop_g_addr_cmp(enum nexthop_types_t type,
- const union g_addr *addr1,
- const union g_addr *addr2)
+int nexthop_g_addr_cmp(enum nexthop_types_t type, const union g_addr *addr1,
+ const union g_addr *addr2)
{
int ret = 0;
@@ -92,13 +91,13 @@ static int _nexthop_g_addr_cmp(enum nexthop_types_t type,
static int _nexthop_gateway_cmp(const struct nexthop *nh1,
const struct nexthop *nh2)
{
- return _nexthop_g_addr_cmp(nh1->type, &nh1->gate, &nh2->gate);
+ return nexthop_g_addr_cmp(nh1->type, &nh1->gate, &nh2->gate);
}
static int _nexthop_source_cmp(const struct nexthop *nh1,
const struct nexthop *nh2)
{
- return _nexthop_g_addr_cmp(nh1->type, &nh1->src, &nh2->src);
+ return nexthop_g_addr_cmp(nh1->type, &nh1->src, &nh2->src);
}
static int _nexthop_cmp_no_labels(const struct nexthop *next1,
diff --git a/lib/nexthop.h b/lib/nexthop.h
index 20401cd581..9dd5fc6fd3 100644
--- a/lib/nexthop.h
+++ b/lib/nexthop.h
@@ -142,6 +142,9 @@ extern bool nexthop_same(const struct nexthop *nh1, const struct nexthop *nh2);
extern bool nexthop_same_no_labels(const struct nexthop *nh1,
const struct nexthop *nh2);
extern int nexthop_cmp(const struct nexthop *nh1, const struct nexthop *nh2);
+extern int nexthop_g_addr_cmp(enum nexthop_types_t type,
+ const union g_addr *addr1,
+ const union g_addr *addr2);
extern const char *nexthop_type_to_str(enum nexthop_types_t nh_type);
extern bool nexthop_labels_match(const struct nexthop *nh1,
diff --git a/lib/nexthop_group.c b/lib/nexthop_group.c
index 5602018b30..abe2096cec 100644
--- a/lib/nexthop_group.c
+++ b/lib/nexthop_group.c
@@ -60,6 +60,16 @@ nexthop_group_cmd_compare(const struct nexthop_group_cmd *nhgc1,
return strcmp(nhgc1->name, nhgc2->name);
}
+static struct nexthop *nexthop_group_tail(const struct nexthop_group *nhg)
+{
+ struct nexthop *nexthop = nhg->nexthop;
+
+ while (nexthop && nexthop->next)
+ nexthop = nexthop->next;
+
+ return nexthop;
+}
+
uint8_t nexthop_group_nexthop_num(const struct nexthop_group *nhg)
{
struct nexthop *nhop;
@@ -129,7 +139,20 @@ void _nexthop_add(struct nexthop **target, struct nexthop *nexthop)
void _nexthop_group_add_sorted(struct nexthop_group *nhg,
struct nexthop *nexthop)
{
- struct nexthop *position, *prev;
+ struct nexthop *position, *prev, *tail;
+
+ /* Try to just append to the end first
+ * This trust it is already sorted
+ */
+
+ tail = nexthop_group_tail(nhg);
+
+ if (tail && (nexthop_cmp(tail, nexthop) < 0)) {
+ tail->next = nexthop;
+ nexthop->prev = tail;
+
+ return;
+ }
for (position = nhg->nexthop, prev = NULL; position;
prev = position, position = position->next) {
diff --git a/lib/northbound.c b/lib/northbound.c
index 48b450e969..a814f23e14 100644
--- a/lib/northbound.c
+++ b/lib/northbound.c
@@ -26,6 +26,7 @@
#include "command.h"
#include "debug.h"
#include "db.h"
+#include "frr_pthread.h"
#include "northbound.h"
#include "northbound_cli.h"
#include "northbound_db.h"
@@ -723,8 +724,7 @@ int nb_running_lock(enum nb_client client, const void *user)
{
int ret = -1;
- pthread_mutex_lock(&running_config_mgmt_lock.mtx);
- {
+ frr_with_mutex(&running_config_mgmt_lock.mtx) {
if (!running_config_mgmt_lock.locked) {
running_config_mgmt_lock.locked = true;
running_config_mgmt_lock.owner_client = client;
@@ -732,7 +732,6 @@ int nb_running_lock(enum nb_client client, const void *user)
ret = 0;
}
}
- pthread_mutex_unlock(&running_config_mgmt_lock.mtx);
return ret;
}
@@ -741,8 +740,7 @@ int nb_running_unlock(enum nb_client client, const void *user)
{
int ret = -1;
- pthread_mutex_lock(&running_config_mgmt_lock.mtx);
- {
+ frr_with_mutex(&running_config_mgmt_lock.mtx) {
if (running_config_mgmt_lock.locked
&& running_config_mgmt_lock.owner_client == client
&& running_config_mgmt_lock.owner_user == user) {
@@ -752,7 +750,6 @@ int nb_running_unlock(enum nb_client client, const void *user)
ret = 0;
}
}
- pthread_mutex_unlock(&running_config_mgmt_lock.mtx);
return ret;
}
@@ -761,14 +758,12 @@ int nb_running_lock_check(enum nb_client client, const void *user)
{
int ret = -1;
- pthread_mutex_lock(&running_config_mgmt_lock.mtx);
- {
+ frr_with_mutex(&running_config_mgmt_lock.mtx) {
if (!running_config_mgmt_lock.locked
|| (running_config_mgmt_lock.owner_client == client
&& running_config_mgmt_lock.owner_user == user))
ret = 0;
}
- pthread_mutex_unlock(&running_config_mgmt_lock.mtx);
return ret;
}
diff --git a/lib/northbound_sysrepo.c b/lib/northbound_sysrepo.c
index 44a55137f8..77183282ba 100644
--- a/lib/northbound_sysrepo.c
+++ b/lib/northbound_sysrepo.c
@@ -425,7 +425,7 @@ static int frr_sr_state_cb(const char *xpath, sr_val_t **values,
exit:
list_delete(&elements);
*values = NULL;
- values_cnt = 0;
+ *values_cnt = 0;
return SR_ERR_OK;
}
diff --git a/lib/prefix.c b/lib/prefix.c
index ad8dea273e..35b679ab90 100644
--- a/lib/prefix.c
+++ b/lib/prefix.c
@@ -441,7 +441,7 @@ void prefix_hexdump(const struct prefix *p)
zlog_hexdump(p, sizeof(struct prefix));
}
-int is_zero_mac(struct ethaddr *mac)
+int is_zero_mac(const struct ethaddr *mac)
{
int i = 0;
diff --git a/lib/prefix.h b/lib/prefix.h
index e338140f1a..24c146e022 100644
--- a/lib/prefix.h
+++ b/lib/prefix.h
@@ -470,7 +470,7 @@ extern void masklen2ip6(const int, struct in6_addr *);
extern const char *inet6_ntoa(struct in6_addr);
-extern int is_zero_mac(struct ethaddr *mac);
+extern int is_zero_mac(const struct ethaddr *mac);
extern int prefix_str2mac(const char *str, struct ethaddr *mac);
extern char *prefix_mac2str(const struct ethaddr *mac, char *buf, int size);
diff --git a/lib/privs.c b/lib/privs.c
index a3314c6c3c..09efedf684 100644
--- a/lib/privs.c
+++ b/lib/privs.c
@@ -24,6 +24,7 @@
#include "log.h"
#include "privs.h"
#include "memory.h"
+#include "frr_pthread.h"
#include "lib_errors.h"
#include "lib/queue.h"
@@ -760,8 +761,7 @@ struct zebra_privs_t *_zprivs_raise(struct zebra_privs_t *privs,
* Serialize 'raise' operations; particularly important for
* OSes where privs are process-wide.
*/
- pthread_mutex_lock(&(privs->mutex));
- {
+ frr_with_mutex(&(privs->mutex)) {
/* Locate ref-counting object to use */
refs = get_privs_refs(privs);
@@ -775,7 +775,6 @@ struct zebra_privs_t *_zprivs_raise(struct zebra_privs_t *privs,
refs->raised_in_funcname = funcname;
}
}
- pthread_mutex_unlock(&(privs->mutex));
return privs;
}
@@ -791,8 +790,7 @@ void _zprivs_lower(struct zebra_privs_t **privs)
/* Serialize 'lower privs' operation - particularly important
* when OS privs are process-wide.
*/
- pthread_mutex_lock(&(*privs)->mutex);
- {
+ frr_with_mutex(&(*privs)->mutex) {
refs = get_privs_refs(*privs);
if (--(refs->refcount) == 0) {
@@ -806,7 +804,6 @@ void _zprivs_lower(struct zebra_privs_t **privs)
refs->raised_in_funcname = NULL;
}
}
- pthread_mutex_unlock(&(*privs)->mutex);
*privs = NULL;
}
diff --git a/lib/privs.h b/lib/privs.h
index 2b0b44b3f2..db5707d675 100644
--- a/lib/privs.h
+++ b/lib/privs.h
@@ -109,16 +109,16 @@ extern void zprivs_get_ids(struct zprivs_ids_t *);
/*
* Wrapper around zprivs, to be used as:
- * frr_elevate_privs(&privs) {
+ * frr_with_privs(&privs) {
* ... code ...
* if (error)
* break; -- break can be used to get out of the block
* ... code ...
* }
*
- * The argument to frr_elevate_privs() can be NULL to leave privileges as-is
+ * The argument to frr_with_privs() can be NULL to leave privileges as-is
* (mostly useful for conditional privilege-raising, i.e.:)
- * frr_elevate_privs(cond ? &privs : NULL) {}
+ * frr_with_privs(cond ? &privs : NULL) {}
*
* NB: The code block is always executed, regardless of whether privileges
* could be raised or not, or whether NULL was given or not. This is fully
@@ -138,7 +138,7 @@ extern struct zebra_privs_t *_zprivs_raise(struct zebra_privs_t *privs,
const char *funcname);
extern void _zprivs_lower(struct zebra_privs_t **privs);
-#define frr_elevate_privs(privs) \
+#define frr_with_privs(privs) \
for (struct zebra_privs_t *_once = NULL, \
*_privs __attribute__( \
(unused, cleanup(_zprivs_lower))) = \
diff --git a/lib/pullwr.c b/lib/pullwr.c
new file mode 100644
index 0000000000..0c326f29d4
--- /dev/null
+++ b/lib/pullwr.c
@@ -0,0 +1,275 @@
+/*
+ * Pull-driven write event handler
+ * Copyright (C) 2019 David Lamparter
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; see the file COPYING; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "zebra.h"
+
+#include "pullwr.h"
+#include "memory.h"
+#include "monotime.h"
+
+/* defaults */
+#define PULLWR_THRESH 16384 /* size at which we start to call write() */
+#define PULLWR_MAXSPIN 2500 /* max µs to spend grabbing more data */
+
+struct pullwr {
+ int fd;
+ struct thread_master *tm;
+ /* writer == NULL <=> we're idle */
+ struct thread *writer;
+
+ void *arg;
+ void (*fill)(void *, struct pullwr *);
+ void (*err)(void *, struct pullwr *, bool);
+
+ /* ring buffer (although it's "un-ringed" on resizing, it WILL wrap
+ * around if data is trickling in while keeping it at a constant size)
+ */
+ size_t bufsz, valid, pos;
+ uint64_t total_written;
+ char *buffer;
+
+ size_t thresh; /* PULLWR_THRESH */
+ int64_t maxspin; /* PULLWR_MAXSPIN */
+};
+
+DEFINE_MTYPE_STATIC(LIB, PULLWR_HEAD, "pull-driven write controller")
+DEFINE_MTYPE_STATIC(LIB, PULLWR_BUF, "pull-driven write buffer")
+
+static int pullwr_run(struct thread *t);
+
+struct pullwr *_pullwr_new(struct thread_master *tm, int fd,
+ void *arg,
+ void (*fill)(void *, struct pullwr *),
+ void (*err)(void *, struct pullwr *, bool))
+{
+ struct pullwr *pullwr;
+
+ pullwr = XCALLOC(MTYPE_PULLWR_HEAD, sizeof(*pullwr));
+ pullwr->fd = fd;
+ pullwr->tm = tm;
+ pullwr->arg = arg;
+ pullwr->fill = fill;
+ pullwr->err = err;
+
+ pullwr->thresh = PULLWR_THRESH;
+ pullwr->maxspin = PULLWR_MAXSPIN;
+
+ return pullwr;
+}
+
+void pullwr_del(struct pullwr *pullwr)
+{
+ THREAD_OFF(pullwr->writer);
+
+ XFREE(MTYPE_PULLWR_BUF, pullwr->buffer);
+ XFREE(MTYPE_PULLWR_HEAD, pullwr);
+}
+
+void pullwr_cfg(struct pullwr *pullwr, int64_t max_spin_usec,
+ size_t write_threshold)
+{
+ pullwr->maxspin = max_spin_usec ?: PULLWR_MAXSPIN;
+ pullwr->thresh = write_threshold ?: PULLWR_THRESH;
+}
+
+void pullwr_bump(struct pullwr *pullwr)
+{
+ if (pullwr->writer)
+ return;
+
+ thread_add_timer(pullwr->tm, pullwr_run, pullwr, 0, &pullwr->writer);
+}
+
+static size_t pullwr_iov(struct pullwr *pullwr, struct iovec *iov)
+{
+ size_t len1;
+
+ if (pullwr->valid == 0)
+ return 0;
+
+ if (pullwr->pos + pullwr->valid <= pullwr->bufsz) {
+ iov[0].iov_base = pullwr->buffer + pullwr->pos;
+ iov[0].iov_len = pullwr->valid;
+ return 1;
+ }
+
+ len1 = pullwr->bufsz - pullwr->pos;
+
+ iov[0].iov_base = pullwr->buffer + pullwr->pos;
+ iov[0].iov_len = len1;
+ iov[1].iov_base = pullwr->buffer;
+ iov[1].iov_len = pullwr->valid - len1;
+ return 2;
+}
+
+static void pullwr_resize(struct pullwr *pullwr, size_t need)
+{
+ struct iovec iov[2];
+ size_t niov, newsize;
+ char *newbuf;
+
+ /* the buffer is maintained at pullwr->thresh * 2 since we'll be
+ * trying to fill it as long as it's anywhere below pullwr->thresh.
+ * That means we frequently end up a little short of it and then write
+ * something that goes over the threshold. So, just use double.
+ */
+ if (need) {
+ /* resize up */
+ if (pullwr->bufsz - pullwr->valid >= need)
+ return;
+
+ newsize = MAX((pullwr->valid + need) * 2, pullwr->thresh * 2);
+ newbuf = XMALLOC(MTYPE_PULLWR_BUF, newsize);
+ } else if (!pullwr->valid) {
+ /* resize down, buffer empty */
+ newsize = 0;
+ newbuf = NULL;
+ } else {
+ /* resize down */
+ if (pullwr->bufsz - pullwr->valid < pullwr->thresh)
+ return;
+ newsize = MAX(pullwr->valid, pullwr->thresh * 2);
+ newbuf = XMALLOC(MTYPE_PULLWR_BUF, newsize);
+ }
+
+ niov = pullwr_iov(pullwr, iov);
+ if (niov >= 1) {
+ memcpy(newbuf, iov[0].iov_base, iov[0].iov_len);
+ if (niov >= 2)
+ memcpy(newbuf + iov[0].iov_len,
+ iov[1].iov_base, iov[1].iov_len);
+ }
+
+ XFREE(MTYPE_PULLWR_BUF, pullwr->buffer);
+ pullwr->buffer = newbuf;
+ pullwr->bufsz = newsize;
+ pullwr->pos = 0;
+}
+
+void pullwr_write(struct pullwr *pullwr, const void *data, size_t len)
+{
+ pullwr_resize(pullwr, len);
+
+ if (pullwr->pos + pullwr->valid > pullwr->bufsz) {
+ size_t pos;
+
+ pos = (pullwr->pos + pullwr->valid) % pullwr->bufsz;
+ memcpy(pullwr->buffer + pos, data, len);
+ } else {
+ size_t max1, len1;
+ max1 = pullwr->bufsz - (pullwr->pos + pullwr->valid);
+ max1 = MIN(max1, len);
+
+ memcpy(pullwr->buffer + pullwr->pos + pullwr->valid,
+ data, max1);
+ len1 = len - max1;
+
+ if (len1)
+ memcpy(pullwr->buffer, (char *)data + max1, len1);
+
+ }
+ pullwr->valid += len;
+
+ pullwr_bump(pullwr);
+}
+
+static int pullwr_run(struct thread *t)
+{
+ struct pullwr *pullwr = THREAD_ARG(t);
+ struct iovec iov[2];
+ size_t niov, lastvalid;
+ ssize_t nwr;
+ struct timeval t0;
+ bool maxspun = false;
+
+ monotime(&t0);
+
+ do {
+ lastvalid = pullwr->valid - 1;
+ while (pullwr->valid < pullwr->thresh
+ && pullwr->valid != lastvalid
+ && !maxspun) {
+ lastvalid = pullwr->valid;
+ pullwr->fill(pullwr->arg, pullwr);
+
+ /* check after doing at least one fill() call so we
+ * don't spin without making progress on slow boxes
+ */
+ if (!maxspun && monotime_since(&t0, NULL)
+ >= pullwr->maxspin)
+ maxspun = true;
+ }
+
+ if (pullwr->valid == 0) {
+ /* we made a fill() call above that didn't feed any
+ * data in, and we have nothing more queued, so we go
+ * into idle, i.e. no calling thread_add_write()
+ */
+ pullwr_resize(pullwr, 0);
+ return 0;
+ }
+
+ niov = pullwr_iov(pullwr, iov);
+ assert(niov);
+
+ nwr = writev(pullwr->fd, iov, niov);
+ if (nwr < 0) {
+ if (errno == EAGAIN || errno == EWOULDBLOCK)
+ break;
+ pullwr->err(pullwr->arg, pullwr, false);
+ return 0;
+ }
+
+ if (nwr == 0) {
+ pullwr->err(pullwr->arg, pullwr, true);
+ return 0;
+ }
+
+ pullwr->total_written += nwr;
+ pullwr->valid -= nwr;
+ pullwr->pos += nwr;
+ pullwr->pos %= pullwr->bufsz;
+ } while (pullwr->valid == 0 && !maxspun);
+ /* pullwr->valid != 0 implies we did an incomplete write, i.e. socket
+ * is full and we go wait until it's available for writing again.
+ */
+
+ thread_add_write(pullwr->tm, pullwr_run, pullwr, pullwr->fd,
+ &pullwr->writer);
+
+ /* if we hit the time limit, just keep the buffer, we'll probably need
+ * it anyway & another run is already coming up.
+ */
+ if (!maxspun)
+ pullwr_resize(pullwr, 0);
+ return 0;
+}
+
+void pullwr_stats(struct pullwr *pullwr, uint64_t *total_written,
+ size_t *pending, size_t *kernel_pending)
+{
+ int tmp;
+
+ *total_written = pullwr->total_written;
+ *pending = pullwr->valid;
+
+ if (ioctl(pullwr->fd, TIOCOUTQ, &tmp) != 0)
+ tmp = 0;
+ *kernel_pending = tmp;
+}
diff --git a/lib/pullwr.h b/lib/pullwr.h
new file mode 100644
index 0000000000..601eac1b79
--- /dev/null
+++ b/lib/pullwr.h
@@ -0,0 +1,110 @@
+/*
+ * Pull-driven write event handler
+ * Copyright (C) 2019 David Lamparter
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; see the file COPYING; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _WRITEPOLL_H
+#define _WRITEPOLL_H
+
+#include <stdbool.h>
+#include <stdint.h>
+
+#include "thread.h"
+#include "stream.h"
+
+struct pullwr;
+
+/* This is a "pull-driven" write event handler. Instead of having some buffer
+ * or being driven by the availability of data, it triggers on the space being
+ * available on the socket for data to be written on and then calls fill() to
+ * get data to be sent.
+ *
+ * pullwr_* maintains an "idle" vs. "active" state, going into idle when a
+ * fill() call completes without feeing more data into it. The overall
+ * semantics are:
+ * - to put data out, call pullwr_write(). This is possible from both inside
+ * fill() callbacks or anywhere else. Doing so puts the pullwr into
+ * active state.
+ * - in active state, the fill() callback will be called and should feed more
+ * data in. It should NOT loop to push out more than one "unit" of data;
+ * the pullwr code handles this by calling fill() until it has enough data.
+ * - if there's nothing more to be sent, fill() returns without doing anything
+ * and pullwr goes into idle state after flushing all buffered data out.
+ * - when new data becomes available, pullwr_bump() should be called to put
+ * the pullwr back into active mode so it will collect data from fill(),
+ * or you can directly call pullwr_write().
+ * - only calling pullwr_write() from within fill() is the cleanest way of
+ * doing things.
+ *
+ * When the err() callback is called, the pullwr should be considered unusable
+ * and released with pullwr_del(). This can be done from inside the callback,
+ * the pullwr code holds no more references on it when calling err().
+ */
+extern struct pullwr *_pullwr_new(struct thread_master *tm, int fd,
+ void *arg,
+ void (*fill)(void *, struct pullwr *),
+ void (*err)(void *, struct pullwr *, bool eof));
+extern void pullwr_del(struct pullwr *pullwr);
+
+/* type-checking wrapper. makes sure fill() and err() take a first argument
+ * whose type is identical to the type of arg.
+ * => use "void fill(struct mystruct *arg, ...)" - no "void *arg"
+ */
+#define pullwr_new(tm, fd, arg, fill, err) ({ \
+ void (*fill_typechk)(typeof(arg), struct pullwr *) = fill; \
+ void (*err_typechk)(typeof(arg), struct pullwr *, bool) = err; \
+ _pullwr_new(tm, fd, arg, (void *)fill_typechk, (void *)err_typechk); \
+})
+
+/* max_spin_usec is the time after which the pullwr event handler will stop
+ * trying to get more data from fill() and yield control back to the
+ * thread_master. It does reschedule itself to continue later; this is
+ * only to make sure we don't freeze the entire process if we're piping a
+ * lot of data to a local endpoint that reads quickly (i.e. no backpressure)
+ *
+ * default: 2500 (2.5 ms)
+ *
+ * write_threshold is the amount of data buffered from fill() calls at which
+ * the pullwr code starts calling write(). But this is not a "limit".
+ * pullwr will keep poking fill() for more data until
+ * (a) max_spin_usec is reached; fill() will be called again later after
+ * returning to the thread_master to give other events a chance to run
+ * (b) fill() returns without pushing any data onto the pullwr with
+ * pullwr_write(), so fill() will NOT be called again until a call to
+ * pullwr_bump() or pullwr_write() comes in.
+ *
+ * default: 16384 (16 kB)
+ *
+ * passing 0 for either value (or not calling it at all) uses the default.
+ */
+extern void pullwr_cfg(struct pullwr *pullwr, int64_t max_spin_usec,
+ size_t write_threshold);
+
+extern void pullwr_bump(struct pullwr *pullwr);
+extern void pullwr_write(struct pullwr *pullwr,
+ const void *data, size_t len);
+
+static inline void pullwr_write_stream(struct pullwr *pullwr,
+ struct stream *s)
+{
+ pullwr_write(pullwr, s->data, stream_get_endp(s));
+}
+
+extern void pullwr_stats(struct pullwr *pullwr, uint64_t *total_written,
+ size_t *pending, size_t *kernel_pending);
+
+#endif /* _WRITEPOLL_H */
diff --git a/lib/routemap.c b/lib/routemap.c
index eca02e8366..fc15183bf9 100644
--- a/lib/routemap.c
+++ b/lib/routemap.c
@@ -474,7 +474,7 @@ int generic_match_add(struct vty *vty, struct route_map_index *index,
const char *command, const char *arg,
route_map_event_t type)
{
- int ret;
+ enum rmap_compile_rets ret;
ret = route_map_add_match(index, command, arg, type);
switch (ret) {
@@ -493,6 +493,11 @@ int generic_match_add(struct vty *vty, struct route_map_index *index,
frr_protonameinst);
return CMD_WARNING_CONFIG_FAILED;
break;
+ case RMAP_DUPLICATE_RULE:
+ /*
+ * Nothing to do here move along
+ */
+ break;
}
return CMD_SUCCESS;
@@ -502,7 +507,7 @@ int generic_match_delete(struct vty *vty, struct route_map_index *index,
const char *command, const char *arg,
route_map_event_t type)
{
- int ret;
+ enum rmap_compile_rets ret;
int retval = CMD_SUCCESS;
char *dep_name = NULL;
const char *tmpstr;
@@ -537,6 +542,11 @@ int generic_match_delete(struct vty *vty, struct route_map_index *index,
if (type != RMAP_EVENT_MATCH_DELETED && dep_name)
route_map_upd8_dependency(type, dep_name, rmap_name);
break;
+ case RMAP_DUPLICATE_RULE:
+ /*
+ * Nothing to do here
+ */
+ break;
}
XFREE(MTYPE_ROUTE_MAP_RULE, dep_name);
@@ -548,7 +558,7 @@ int generic_match_delete(struct vty *vty, struct route_map_index *index,
int generic_set_add(struct vty *vty, struct route_map_index *index,
const char *command, const char *arg)
{
- int ret;
+ enum rmap_compile_rets ret;
ret = route_map_add_set(index, command, arg);
switch (ret) {
@@ -563,6 +573,7 @@ int generic_set_add(struct vty *vty, struct route_map_index *index,
return CMD_WARNING_CONFIG_FAILED;
break;
case RMAP_COMPILE_SUCCESS:
+ case RMAP_DUPLICATE_RULE:
break;
}
@@ -572,7 +583,7 @@ int generic_set_add(struct vty *vty, struct route_map_index *index,
int generic_set_delete(struct vty *vty, struct route_map_index *index,
const char *command, const char *arg)
{
- int ret;
+ enum rmap_compile_rets ret;
ret = route_map_delete_set(index, command, arg);
switch (ret) {
@@ -587,6 +598,7 @@ int generic_set_delete(struct vty *vty, struct route_map_index *index,
return CMD_WARNING_CONFIG_FAILED;
break;
case RMAP_COMPILE_SUCCESS:
+ case RMAP_DUPLICATE_RULE:
break;
}
@@ -1388,8 +1400,10 @@ static route_map_event_t get_route_map_delete_event(route_map_event_t type)
}
/* Add match statement to route map. */
-int route_map_add_match(struct route_map_index *index, const char *match_name,
- const char *match_arg, route_map_event_t type)
+enum rmap_compile_rets route_map_add_match(struct route_map_index *index,
+ const char *match_name,
+ const char *match_arg,
+ route_map_event_t type)
{
struct route_map_rule *rule;
struct route_map_rule *next;
@@ -1464,15 +1478,16 @@ int route_map_add_match(struct route_map_index *index, const char *match_name,
}
/* Delete specified route match rule. */
-int route_map_delete_match(struct route_map_index *index,
- const char *match_name, const char *match_arg)
+enum rmap_compile_rets route_map_delete_match(struct route_map_index *index,
+ const char *match_name,
+ const char *match_arg)
{
struct route_map_rule *rule;
struct route_map_rule_cmd *cmd;
cmd = route_map_lookup_match(match_name);
if (cmd == NULL)
- return 1;
+ return RMAP_RULE_MISSING;
for (rule = index->match_list.head; rule; rule = rule->next)
if (rule->cmd == cmd && (rulecmp(rule->rule_str, match_arg) == 0
@@ -1485,15 +1500,16 @@ int route_map_delete_match(struct route_map_index *index,
index->map->name,
RMAP_EVENT_CALL_ADDED);
}
- return 0;
+ return RMAP_COMPILE_SUCCESS;
}
/* Can't find matched rule. */
- return 1;
+ return RMAP_RULE_MISSING;
}
/* Add route-map set statement to the route map. */
-int route_map_add_set(struct route_map_index *index, const char *set_name,
- const char *set_arg)
+enum rmap_compile_rets route_map_add_set(struct route_map_index *index,
+ const char *set_name,
+ const char *set_arg)
{
struct route_map_rule *rule;
struct route_map_rule *next;
@@ -1543,15 +1559,16 @@ int route_map_add_set(struct route_map_index *index, const char *set_name,
}
/* Delete route map set rule. */
-int route_map_delete_set(struct route_map_index *index, const char *set_name,
- const char *set_arg)
+enum rmap_compile_rets route_map_delete_set(struct route_map_index *index,
+ const char *set_name,
+ const char *set_arg)
{
struct route_map_rule *rule;
struct route_map_rule_cmd *cmd;
cmd = route_map_lookup_set(set_name);
if (cmd == NULL)
- return 1;
+ return RMAP_RULE_MISSING;
for (rule = index->set_list.head; rule; rule = rule->next)
if ((rule->cmd == cmd) && (rulecmp(rule->rule_str, set_arg) == 0
@@ -1564,10 +1581,10 @@ int route_map_delete_set(struct route_map_index *index, const char *set_name,
index->map->name,
RMAP_EVENT_CALL_ADDED);
}
- return 0;
+ return RMAP_COMPILE_SUCCESS;
}
/* Can't find matched rule. */
- return 1;
+ return RMAP_RULE_MISSING;
}
static enum route_map_cmd_result_t
diff --git a/lib/routemap.h b/lib/routemap.h
index f9ad0f64a9..40525987e9 100644
--- a/lib/routemap.h
+++ b/lib/routemap.h
@@ -126,7 +126,7 @@ struct route_map_rule_cmd {
};
/* Route map apply error. */
-enum {
+enum rmap_compile_rets {
RMAP_COMPILE_SUCCESS,
/* Route map rule is missing. */
@@ -220,25 +220,28 @@ extern void route_map_init(void);
extern void route_map_finish(void);
/* Add match statement to route map. */
-extern int route_map_add_match(struct route_map_index *index,
- const char *match_name, const char *match_arg,
- route_map_event_t type);
+extern enum rmap_compile_rets route_map_add_match(struct route_map_index *index,
+ const char *match_name,
+ const char *match_arg,
+ route_map_event_t type);
/* Delete specified route match rule. */
-extern int route_map_delete_match(struct route_map_index *index,
- const char *match_name,
- const char *match_arg);
+extern enum rmap_compile_rets
+route_map_delete_match(struct route_map_index *index,
+ const char *match_name, const char *match_arg);
extern const char *route_map_get_match_arg(struct route_map_index *index,
const char *match_name);
/* Add route-map set statement to the route map. */
-extern int route_map_add_set(struct route_map_index *index,
- const char *set_name, const char *set_arg);
+extern enum rmap_compile_rets route_map_add_set(struct route_map_index *index,
+ const char *set_name,
+ const char *set_arg);
/* Delete route map set rule. */
-extern int route_map_delete_set(struct route_map_index *index,
- const char *set_name, const char *set_arg);
+extern enum rmap_compile_rets
+route_map_delete_set(struct route_map_index *index,
+ const char *set_name, const char *set_arg);
/* Install rule command to the match list. */
extern void route_map_install_match(struct route_map_rule_cmd *cmd);
diff --git a/lib/stream.c b/lib/stream.c
index dfd13ca186..2e1a0193a2 100644
--- a/lib/stream.c
+++ b/lib/stream.c
@@ -28,6 +28,7 @@
#include "network.h"
#include "prefix.h"
#include "log.h"
+#include "frr_pthread.h"
#include "lib_errors.h"
DEFINE_MTYPE_STATIC(LIB, STREAM, "Stream")
@@ -1136,11 +1137,9 @@ void stream_fifo_push(struct stream_fifo *fifo, struct stream *s)
void stream_fifo_push_safe(struct stream_fifo *fifo, struct stream *s)
{
- pthread_mutex_lock(&fifo->mtx);
- {
+ frr_with_mutex(&fifo->mtx) {
stream_fifo_push(fifo, s);
}
- pthread_mutex_unlock(&fifo->mtx);
}
/* Delete first stream from fifo. */
@@ -1170,11 +1169,9 @@ struct stream *stream_fifo_pop_safe(struct stream_fifo *fifo)
{
struct stream *ret;
- pthread_mutex_lock(&fifo->mtx);
- {
+ frr_with_mutex(&fifo->mtx) {
ret = stream_fifo_pop(fifo);
}
- pthread_mutex_unlock(&fifo->mtx);
return ret;
}
@@ -1188,11 +1185,9 @@ struct stream *stream_fifo_head_safe(struct stream_fifo *fifo)
{
struct stream *ret;
- pthread_mutex_lock(&fifo->mtx);
- {
+ frr_with_mutex(&fifo->mtx) {
ret = stream_fifo_head(fifo);
}
- pthread_mutex_unlock(&fifo->mtx);
return ret;
}
@@ -1212,11 +1207,9 @@ void stream_fifo_clean(struct stream_fifo *fifo)
void stream_fifo_clean_safe(struct stream_fifo *fifo)
{
- pthread_mutex_lock(&fifo->mtx);
- {
+ frr_with_mutex(&fifo->mtx) {
stream_fifo_clean(fifo);
}
- pthread_mutex_unlock(&fifo->mtx);
}
size_t stream_fifo_count_safe(struct stream_fifo *fifo)
diff --git a/lib/subdir.am b/lib/subdir.am
index 2be7537bcc..e0f1352380 100644
--- a/lib/subdir.am
+++ b/lib/subdir.am
@@ -65,6 +65,7 @@ lib_libfrr_la_SOURCES = \
lib/prefix.c \
lib/privs.c \
lib/ptm_lib.c \
+ lib/pullwr.c \
lib/qobj.c \
lib/ringbuf.c \
lib/routemap.c \
@@ -203,6 +204,7 @@ pkginclude_HEADERS += \
lib/printfrr.h \
lib/privs.h \
lib/ptm_lib.h \
+ lib/pullwr.h \
lib/pw.h \
lib/qobj.h \
lib/queue.h \
diff --git a/lib/thread.c b/lib/thread.c
index 943b849ebf..90c794e88d 100644
--- a/lib/thread.c
+++ b/lib/thread.c
@@ -33,6 +33,7 @@
#include "network.h"
#include "jhash.h"
#include "frratomic.h"
+#include "frr_pthread.h"
#include "lib_errors.h"
DEFINE_MTYPE_STATIC(LIB, THREAD, "Thread")
@@ -173,8 +174,7 @@ static void cpu_record_print(struct vty *vty, uint8_t filter)
tmp.funcname = "TOTAL";
tmp.types = filter;
- pthread_mutex_lock(&masters_mtx);
- {
+ frr_with_mutex(&masters_mtx) {
for (ALL_LIST_ELEMENTS_RO(masters, ln, m)) {
const char *name = m->name ? m->name : "main";
@@ -206,7 +206,6 @@ static void cpu_record_print(struct vty *vty, uint8_t filter)
vty_out(vty, "\n");
}
}
- pthread_mutex_unlock(&masters_mtx);
vty_out(vty, "\n");
vty_out(vty, "Total thread statistics\n");
@@ -240,11 +239,9 @@ static void cpu_record_clear(uint8_t filter)
struct thread_master *m;
struct listnode *ln;
- pthread_mutex_lock(&masters_mtx);
- {
+ frr_with_mutex(&masters_mtx) {
for (ALL_LIST_ELEMENTS_RO(masters, ln, m)) {
- pthread_mutex_lock(&m->mtx);
- {
+ frr_with_mutex(&m->mtx) {
void *args[2] = {tmp, m->cpu_record};
hash_iterate(
m->cpu_record,
@@ -252,10 +249,8 @@ static void cpu_record_clear(uint8_t filter)
void *))cpu_record_hash_clear,
args);
}
- pthread_mutex_unlock(&m->mtx);
}
}
- pthread_mutex_unlock(&masters_mtx);
}
static uint8_t parse_filter(const char *filterstr)
@@ -370,13 +365,11 @@ DEFUN (show_thread_poll,
struct listnode *node;
struct thread_master *m;
- pthread_mutex_lock(&masters_mtx);
- {
+ frr_with_mutex(&masters_mtx) {
for (ALL_LIST_ELEMENTS_RO(masters, node, m)) {
show_thread_poll_helper(vty, m);
}
}
- pthread_mutex_unlock(&masters_mtx);
return CMD_SUCCESS;
}
@@ -487,26 +480,22 @@ struct thread_master *thread_master_create(const char *name)
sizeof(struct pollfd) * rv->handler.pfdsize);
/* add to list of threadmasters */
- pthread_mutex_lock(&masters_mtx);
- {
+ frr_with_mutex(&masters_mtx) {
if (!masters)
masters = list_new();
listnode_add(masters, rv);
}
- pthread_mutex_unlock(&masters_mtx);
return rv;
}
void thread_master_set_name(struct thread_master *master, const char *name)
{
- pthread_mutex_lock(&master->mtx);
- {
+ frr_with_mutex(&master->mtx) {
XFREE(MTYPE_THREAD_MASTER, master->name);
master->name = XSTRDUP(MTYPE_THREAD_MASTER, name);
}
- pthread_mutex_unlock(&master->mtx);
}
#define THREAD_UNUSED_DEPTH 10
@@ -569,13 +558,11 @@ static void thread_array_free(struct thread_master *m,
*/
void thread_master_free_unused(struct thread_master *m)
{
- pthread_mutex_lock(&m->mtx);
- {
+ frr_with_mutex(&m->mtx) {
struct thread *t;
while ((t = thread_list_pop(&m->unuse)))
thread_free(m, t);
}
- pthread_mutex_unlock(&m->mtx);
}
/* Stop thread scheduler. */
@@ -583,14 +570,12 @@ void thread_master_free(struct thread_master *m)
{
struct thread *t;
- pthread_mutex_lock(&masters_mtx);
- {
+ frr_with_mutex(&masters_mtx) {
listnode_delete(masters, m);
if (masters->count == 0) {
list_delete(&masters);
}
}
- pthread_mutex_unlock(&masters_mtx);
thread_array_free(m, m->read);
thread_array_free(m, m->write);
@@ -621,11 +606,9 @@ unsigned long thread_timer_remain_msec(struct thread *thread)
{
int64_t remain;
- pthread_mutex_lock(&thread->mtx);
- {
+ frr_with_mutex(&thread->mtx) {
remain = monotime_until(&thread->u.sands, NULL) / 1000LL;
}
- pthread_mutex_unlock(&thread->mtx);
return remain < 0 ? 0 : remain;
}
@@ -642,11 +625,9 @@ unsigned long thread_timer_remain_second(struct thread *thread)
struct timeval thread_timer_remain(struct thread *thread)
{
struct timeval remain;
- pthread_mutex_lock(&thread->mtx);
- {
+ frr_with_mutex(&thread->mtx) {
monotime_until(&thread->u.sands, &remain);
}
- pthread_mutex_unlock(&thread->mtx);
return remain;
}
@@ -770,14 +751,10 @@ struct thread *funcname_thread_add_read_write(int dir, struct thread_master *m,
struct thread **thread_array;
assert(fd >= 0 && fd < m->fd_limit);
- pthread_mutex_lock(&m->mtx);
- {
- if (t_ptr
- && *t_ptr) // thread is already scheduled; don't reschedule
- {
- pthread_mutex_unlock(&m->mtx);
- return NULL;
- }
+ frr_with_mutex(&m->mtx) {
+ if (t_ptr && *t_ptr)
+ // thread is already scheduled; don't reschedule
+ break;
/* default to a new pollfd */
nfds_t queuepos = m->handler.pfdcount;
@@ -817,12 +794,10 @@ struct thread *funcname_thread_add_read_write(int dir, struct thread_master *m,
m->handler.pfdcount++;
if (thread) {
- pthread_mutex_lock(&thread->mtx);
- {
+ frr_with_mutex(&thread->mtx) {
thread->u.fd = fd;
thread_array[thread->u.fd] = thread;
}
- pthread_mutex_unlock(&thread->mtx);
if (t_ptr) {
*t_ptr = thread;
@@ -832,7 +807,6 @@ struct thread *funcname_thread_add_read_write(int dir, struct thread_master *m,
AWAKEN(m);
}
- pthread_mutex_unlock(&m->mtx);
return thread;
}
@@ -850,19 +824,14 @@ funcname_thread_add_timer_timeval(struct thread_master *m,
assert(type == THREAD_TIMER);
assert(time_relative);
- pthread_mutex_lock(&m->mtx);
- {
- if (t_ptr
- && *t_ptr) // thread is already scheduled; don't reschedule
- {
- pthread_mutex_unlock(&m->mtx);
+ frr_with_mutex(&m->mtx) {
+ if (t_ptr && *t_ptr)
+ // thread is already scheduled; don't reschedule
return NULL;
- }
thread = thread_get(m, type, func, arg, debugargpass);
- pthread_mutex_lock(&thread->mtx);
- {
+ frr_with_mutex(&thread->mtx) {
monotime(&thread->u.sands);
timeradd(&thread->u.sands, time_relative,
&thread->u.sands);
@@ -872,11 +841,9 @@ funcname_thread_add_timer_timeval(struct thread_master *m,
thread->ref = t_ptr;
}
}
- pthread_mutex_unlock(&thread->mtx);
AWAKEN(m);
}
- pthread_mutex_unlock(&m->mtx);
return thread;
}
@@ -933,26 +900,20 @@ struct thread *funcname_thread_add_event(struct thread_master *m,
void *arg, int val,
struct thread **t_ptr, debugargdef)
{
- struct thread *thread;
+ struct thread *thread = NULL;
assert(m != NULL);
- pthread_mutex_lock(&m->mtx);
- {
- if (t_ptr
- && *t_ptr) // thread is already scheduled; don't reschedule
- {
- pthread_mutex_unlock(&m->mtx);
- return NULL;
- }
+ frr_with_mutex(&m->mtx) {
+ if (t_ptr && *t_ptr)
+ // thread is already scheduled; don't reschedule
+ break;
thread = thread_get(m, THREAD_EVENT, func, arg, debugargpass);
- pthread_mutex_lock(&thread->mtx);
- {
+ frr_with_mutex(&thread->mtx) {
thread->u.val = val;
thread_list_add_tail(&m->event, thread);
}
- pthread_mutex_unlock(&thread->mtx);
if (t_ptr) {
*t_ptr = thread;
@@ -961,7 +922,6 @@ struct thread *funcname_thread_add_event(struct thread_master *m,
AWAKEN(m);
}
- pthread_mutex_unlock(&m->mtx);
return thread;
}
@@ -1143,15 +1103,13 @@ void thread_cancel_event(struct thread_master *master, void *arg)
{
assert(master->owner == pthread_self());
- pthread_mutex_lock(&master->mtx);
- {
+ frr_with_mutex(&master->mtx) {
struct cancel_req *cr =
XCALLOC(MTYPE_TMP, sizeof(struct cancel_req));
cr->eventobj = arg;
listnode_add(master->cancel_req, cr);
do_thread_cancel(master);
}
- pthread_mutex_unlock(&master->mtx);
}
/**
@@ -1167,15 +1125,13 @@ void thread_cancel(struct thread *thread)
assert(master->owner == pthread_self());
- pthread_mutex_lock(&master->mtx);
- {
+ frr_with_mutex(&master->mtx) {
struct cancel_req *cr =
XCALLOC(MTYPE_TMP, sizeof(struct cancel_req));
cr->thread = thread;
listnode_add(master->cancel_req, cr);
do_thread_cancel(master);
}
- pthread_mutex_unlock(&master->mtx);
}
/**
@@ -1208,8 +1164,7 @@ void thread_cancel_async(struct thread_master *master, struct thread **thread,
assert(!(thread && eventobj) && (thread || eventobj));
assert(master->owner != pthread_self());
- pthread_mutex_lock(&master->mtx);
- {
+ frr_with_mutex(&master->mtx) {
master->canceled = false;
if (thread) {
@@ -1228,7 +1183,6 @@ void thread_cancel_async(struct thread_master *master, struct thread **thread,
while (!master->canceled)
pthread_cond_wait(&master->cancel_cond, &master->mtx);
}
- pthread_mutex_unlock(&master->mtx);
}
/* ------------------------------------------------------------------------- */
@@ -1527,22 +1481,18 @@ unsigned long thread_consumed_time(RUSAGE_T *now, RUSAGE_T *start,
int thread_should_yield(struct thread *thread)
{
int result;
- pthread_mutex_lock(&thread->mtx);
- {
+ frr_with_mutex(&thread->mtx) {
result = monotime_since(&thread->real, NULL)
> (int64_t)thread->yield;
}
- pthread_mutex_unlock(&thread->mtx);
return result;
}
void thread_set_yield_time(struct thread *thread, unsigned long yield_time)
{
- pthread_mutex_lock(&thread->mtx);
- {
+ frr_with_mutex(&thread->mtx) {
thread->yield = yield_time;
}
- pthread_mutex_unlock(&thread->mtx);
}
void thread_getrusage(RUSAGE_T *r)
@@ -1637,20 +1587,16 @@ void funcname_thread_execute(struct thread_master *m,
struct thread *thread;
/* Get or allocate new thread to execute. */
- pthread_mutex_lock(&m->mtx);
- {
+ frr_with_mutex(&m->mtx) {
thread = thread_get(m, THREAD_EVENT, func, arg, debugargpass);
/* Set its event value. */
- pthread_mutex_lock(&thread->mtx);
- {
+ frr_with_mutex(&thread->mtx) {
thread->add_type = THREAD_EXECUTE;
thread->u.val = val;
thread->ref = &thread;
}
- pthread_mutex_unlock(&thread->mtx);
}
- pthread_mutex_unlock(&m->mtx);
/* Execute thread doing all accounting. */
thread_call(thread);
diff --git a/lib/vrf.c b/lib/vrf.c
index 7d8e227c6f..4c6df797ad 100644
--- a/lib/vrf.c
+++ b/lib/vrf.c
@@ -760,7 +760,7 @@ DEFUN_NOSH (vrf_netns,
if (!pathname)
return CMD_WARNING_CONFIG_FAILED;
- frr_elevate_privs(vrf_daemon_privs) {
+ frr_with_privs(vrf_daemon_privs) {
ret = vrf_netns_handler_create(vty, vrf, pathname,
NS_UNKNOWN, NS_UNKNOWN);
}
diff --git a/lib/vty.c b/lib/vty.c
index c1535802cf..deb9391bd5 100644
--- a/lib/vty.c
+++ b/lib/vty.c
@@ -348,6 +348,15 @@ void vty_hello(struct vty *vty)
vty_out(vty, "MOTD file not found\n");
} else if (host.motd)
vty_out(vty, "%s", host.motd);
+
+#if CONFDATE > 20200901
+ CPP_NOTICE("Please remove solaris code from system as it is deprecated");
+#endif
+#ifdef SUNOS_5
+ zlog_warn("If you are using FRR on Solaris, the FRR developers would love to hear from you\n");
+ zlog_warn("Please send email to dev@lists.frrouting.org about this message\n");
+ zlog_warn("We are considering deprecating Solaris and want to find users of Solaris systems\n");
+#endif
}
/* Put out prompt and wait input from user. */
diff --git a/lib/yang_wrappers.c b/lib/yang_wrappers.c
index 0558383823..cd776f333d 100644
--- a/lib/yang_wrappers.c
+++ b/lib/yang_wrappers.c
@@ -1000,3 +1000,56 @@ void yang_get_default_ipv6p(union prefixptr var, const char *xpath_fmt, ...)
value = yang_get_default_value(xpath);
yang_str2ipv6p(value, var);
}
+
+/*
+ * Derived type: ip.
+ */
+void yang_str2ip(const char *value, struct ipaddr *ip)
+{
+ (void)str2ipaddr(value, ip);
+}
+
+struct yang_data *yang_data_new_ip(const char *xpath, const struct ipaddr *addr)
+{
+ size_t sz = IS_IPADDR_V4(addr) ? INET_ADDRSTRLEN : INET6_ADDRSTRLEN;
+ char value_str[sz];
+
+ ipaddr2str(addr, value_str, sizeof(value_str));
+ return yang_data_new(xpath, value_str);
+}
+
+void yang_dnode_get_ip(struct ipaddr *addr, const struct lyd_node *dnode,
+ const char *xpath_fmt, ...)
+{
+ const struct lyd_node_leaf_list *dleaf;
+
+ assert(dnode);
+ if (xpath_fmt) {
+ va_list ap;
+ char xpath[XPATH_MAXLEN];
+
+ va_start(ap, xpath_fmt);
+ vsnprintf(xpath, sizeof(xpath), xpath_fmt, ap);
+ va_end(ap);
+ dnode = yang_dnode_get(dnode, xpath);
+ YANG_DNODE_GET_ASSERT(dnode, xpath);
+ }
+
+ dleaf = (const struct lyd_node_leaf_list *)dnode;
+ assert(dleaf->value_type == LY_TYPE_STRING);
+ (void)str2ipaddr(dleaf->value_str, addr);
+}
+
+void yang_get_default_ip(struct ipaddr *var, const char *xpath_fmt, ...)
+{
+ char xpath[XPATH_MAXLEN];
+ const char *value;
+ va_list ap;
+
+ va_start(ap, xpath_fmt);
+ vsnprintf(xpath, sizeof(xpath), xpath_fmt, ap);
+ va_end(ap);
+
+ value = yang_get_default_value(xpath);
+ yang_str2ip(value, var);
+}
diff --git a/lib/yang_wrappers.h b/lib/yang_wrappers.h
index 5203a033ad..ab7abe173c 100644
--- a/lib/yang_wrappers.h
+++ b/lib/yang_wrappers.h
@@ -154,4 +154,12 @@ extern void yang_dnode_get_ipv6p(union prefixptr prefix,
extern void yang_get_default_ipv6p(union prefixptr var, const char *xpath_fmt,
...);
+/* ip */
+extern void yang_str2ip(const char *value, struct ipaddr *addr);
+extern struct yang_data *yang_data_new_ip(const char *xpath,
+ const struct ipaddr *addr);
+extern void yang_dnode_get_ip(struct ipaddr *addr, const struct lyd_node *dnode,
+ const char *xpath_fmt, ...);
+extern void yang_get_default_ip(struct ipaddr *var, const char *xpath_fmt, ...);
+
#endif /* _FRR_NORTHBOUND_WRAPPERS_H_ */
diff --git a/lib/zclient.c b/lib/zclient.c
index 2d79d9b3c5..92a495ac61 100644
--- a/lib/zclient.c
+++ b/lib/zclient.c
@@ -766,6 +766,92 @@ int zclient_route_send(uint8_t cmd, struct zclient *zclient,
return zclient_send_message(zclient);
}
+static int zapi_nexthop_labels_cmp(const struct zapi_nexthop *next1,
+ const struct zapi_nexthop *next2)
+{
+ if (next1->label_num > next2->label_num)
+ return 1;
+
+ if (next1->label_num < next2->label_num)
+ return -1;
+
+ return memcmp(next1->labels, next2->labels, next1->label_num);
+}
+
+static int zapi_nexthop_cmp_no_labels(const struct zapi_nexthop *next1,
+ const struct zapi_nexthop *next2)
+{
+ int ret = 0;
+
+ if (next1->vrf_id < next2->vrf_id)
+ return -1;
+
+ if (next1->vrf_id > next2->vrf_id)
+ return 1;
+
+ if (next1->type < next2->type)
+ return -1;
+
+ if (next1->type > next2->type)
+ return 1;
+
+ switch (next1->type) {
+ case NEXTHOP_TYPE_IPV4:
+ case NEXTHOP_TYPE_IPV6:
+ ret = nexthop_g_addr_cmp(next1->type, &next1->gate,
+ &next2->gate);
+ if (ret != 0)
+ return ret;
+ break;
+ case NEXTHOP_TYPE_IPV4_IFINDEX:
+ case NEXTHOP_TYPE_IPV6_IFINDEX:
+ ret = nexthop_g_addr_cmp(next1->type, &next1->gate,
+ &next2->gate);
+ if (ret != 0)
+ return ret;
+ /* Intentional Fall-Through */
+ case NEXTHOP_TYPE_IFINDEX:
+ if (next1->ifindex < next2->ifindex)
+ return -1;
+
+ if (next1->ifindex > next2->ifindex)
+ return 1;
+ break;
+ case NEXTHOP_TYPE_BLACKHOLE:
+ if (next1->bh_type < next2->bh_type)
+ return -1;
+
+ if (next1->bh_type > next2->bh_type)
+ return 1;
+ break;
+ }
+
+ return 0;
+}
+
+static int zapi_nexthop_cmp(const void *item1, const void *item2)
+{
+ int ret = 0;
+
+ const struct zapi_nexthop *next1 = item1;
+ const struct zapi_nexthop *next2 = item2;
+
+ ret = zapi_nexthop_cmp_no_labels(next1, next2);
+ if (ret != 0)
+ return ret;
+
+ ret = zapi_nexthop_labels_cmp(next1, next2);
+
+ return ret;
+}
+
+static void zapi_nexthop_group_sort(struct zapi_nexthop *nh_grp,
+ uint16_t nexthop_num)
+{
+ qsort(nh_grp, nexthop_num, sizeof(struct zapi_nexthop),
+ &zapi_nexthop_cmp);
+}
+
int zapi_route_encode(uint8_t cmd, struct stream *s, struct zapi_route *api)
{
struct zapi_nexthop *api_nh;
@@ -821,6 +907,8 @@ int zapi_route_encode(uint8_t cmd, struct stream *s, struct zapi_route *api)
return -1;
}
+ zapi_nexthop_group_sort(api->nexthops, api->nexthop_num);
+
stream_putw(s, api->nexthop_num);
for (i = 0; i < api->nexthop_num; i++) {
@@ -2363,6 +2451,143 @@ int tm_release_table_chunk(struct zclient *zclient, uint32_t start,
return zclient_send_message(zclient);
}
+int zebra_send_mpls_labels(struct zclient *zclient, int cmd,
+ struct zapi_labels *zl)
+{
+ if (zapi_labels_encode(zclient->obuf, cmd, zl) < 0)
+ return -1;
+ return zclient_send_message(zclient);
+}
+
+int zapi_labels_encode(struct stream *s, int cmd, struct zapi_labels *zl)
+{
+ struct zapi_nexthop_label *znh;
+
+ stream_reset(s);
+
+ zclient_create_header(s, cmd, VRF_DEFAULT);
+ stream_putc(s, zl->message);
+ stream_putc(s, zl->type);
+ stream_putl(s, zl->local_label);
+
+ if (CHECK_FLAG(zl->message, ZAPI_LABELS_FTN)) {
+ stream_putw(s, zl->route.prefix.family);
+ stream_put_prefix(s, &zl->route.prefix);
+ stream_putc(s, zl->route.type);
+ stream_putw(s, zl->route.instance);
+ }
+
+ if (zl->nexthop_num > MULTIPATH_NUM) {
+ flog_err(
+ EC_LIB_ZAPI_ENCODE,
+ "%s: label %u: can't encode %u nexthops (maximum is %u)",
+ __func__, zl->local_label, zl->nexthop_num,
+ MULTIPATH_NUM);
+ return -1;
+ }
+ stream_putw(s, zl->nexthop_num);
+
+ for (int i = 0; i < zl->nexthop_num; i++) {
+ znh = &zl->nexthops[i];
+
+ stream_putc(s, znh->type);
+ stream_putw(s, znh->family);
+ switch (znh->family) {
+ case AF_INET:
+ stream_put_in_addr(s, &znh->address.ipv4);
+ break;
+ case AF_INET6:
+ stream_write(s, (uint8_t *)&znh->address.ipv6, 16);
+ break;
+ default:
+ break;
+ }
+ stream_putl(s, znh->ifindex);
+ stream_putl(s, znh->label);
+ }
+
+ /* Put length at the first point of the stream. */
+ stream_putw_at(s, 0, stream_get_endp(s));
+
+ return 0;
+}
+
+int zapi_labels_decode(struct stream *s, struct zapi_labels *zl)
+{
+ struct zapi_nexthop_label *znh;
+
+ memset(zl, 0, sizeof(*zl));
+
+ /* Get data. */
+ STREAM_GETC(s, zl->message);
+ STREAM_GETC(s, zl->type);
+ STREAM_GETL(s, zl->local_label);
+
+ if (CHECK_FLAG(zl->message, ZAPI_LABELS_FTN)) {
+ size_t psize;
+
+ STREAM_GETW(s, zl->route.prefix.family);
+ STREAM_GETC(s, zl->route.prefix.prefixlen);
+
+ psize = PSIZE(zl->route.prefix.prefixlen);
+ switch (zl->route.prefix.family) {
+ case AF_INET:
+ if (zl->route.prefix.prefixlen > IPV4_MAX_BITLEN) {
+ zlog_debug(
+ "%s: Specified prefix length %d is greater than a v4 address can support",
+ __PRETTY_FUNCTION__,
+ zl->route.prefix.prefixlen);
+ return -1;
+ }
+ STREAM_GET(&zl->route.prefix.u.prefix4.s_addr, s,
+ psize);
+ break;
+ case AF_INET6:
+ if (zl->route.prefix.prefixlen > IPV6_MAX_BITLEN) {
+ zlog_debug(
+ "%s: Specified prefix length %d is greater than a v6 address can support",
+ __PRETTY_FUNCTION__,
+ zl->route.prefix.prefixlen);
+ return -1;
+ }
+ STREAM_GET(&zl->route.prefix.u.prefix6, s, psize);
+ break;
+ default:
+ flog_err(EC_LIB_ZAPI_ENCODE,
+ "%s: Specified family %u is not v4 or v6",
+ __PRETTY_FUNCTION__, zl->route.prefix.family);
+ return -1;
+ }
+
+ STREAM_GETC(s, zl->route.type);
+ STREAM_GETW(s, zl->route.instance);
+ }
+
+ STREAM_GETW(s, zl->nexthop_num);
+ for (int i = 0; i < zl->nexthop_num; i++) {
+ znh = &zl->nexthops[i];
+
+ STREAM_GETC(s, znh->type);
+ STREAM_GETW(s, znh->family);
+ switch (znh->family) {
+ case AF_INET:
+ STREAM_GET(&znh->address.ipv4.s_addr, s,
+ IPV4_MAX_BYTELEN);
+ break;
+ case AF_INET6:
+ STREAM_GET(&znh->address.ipv6, s, 16);
+ break;
+ default:
+ break;
+ }
+ STREAM_GETL(s, znh->ifindex);
+ STREAM_GETL(s, znh->label);
+ }
+
+ return 0;
+stream_failure:
+ return -1;
+}
int zebra_send_pw(struct zclient *zclient, int command, struct zapi_pw *pw)
{
diff --git a/lib/zclient.h b/lib/zclient.h
index 81e454d192..eb3c97b111 100644
--- a/lib/zclient.h
+++ b/lib/zclient.h
@@ -126,6 +126,7 @@ typedef enum {
ZEBRA_INTERFACE_LINK_PARAMS,
ZEBRA_MPLS_LABELS_ADD,
ZEBRA_MPLS_LABELS_DELETE,
+ ZEBRA_MPLS_LABELS_REPLACE,
ZEBRA_IPMR_ROUTE_STATS,
ZEBRA_LABEL_MANAGER_CONNECT,
ZEBRA_LABEL_MANAGER_CONNECT_ASYNC,
@@ -395,6 +396,28 @@ struct zapi_route {
uint32_t tableid;
};
+struct zapi_nexthop_label {
+ enum nexthop_types_t type;
+ int family;
+ union g_addr address;
+ ifindex_t ifindex;
+ mpls_label_t label;
+};
+
+struct zapi_labels {
+ uint8_t message;
+#define ZAPI_LABELS_FTN 0x01
+ enum lsp_types_t type;
+ mpls_label_t local_label;
+ struct {
+ struct prefix prefix;
+ uint8_t type;
+ unsigned short instance;
+ } route;
+ uint16_t nexthop_num;
+ struct zapi_nexthop_label nexthops[MULTIPATH_NUM];
+};
+
struct zapi_pw {
char ifname[IF_NAMESIZE];
ifindex_t ifindex;
@@ -625,6 +648,12 @@ extern int tm_get_table_chunk(struct zclient *zclient, uint32_t chunk_size,
extern int tm_release_table_chunk(struct zclient *zclient, uint32_t start,
uint32_t end);
+extern int zebra_send_mpls_labels(struct zclient *zclient, int cmd,
+ struct zapi_labels *zl);
+extern int zapi_labels_encode(struct stream *s, int cmd,
+ struct zapi_labels *zl);
+extern int zapi_labels_decode(struct stream *s, struct zapi_labels *zl);
+
extern int zebra_send_pw(struct zclient *zclient, int command,
struct zapi_pw *pw);
extern void zebra_read_pw_status_update(ZAPI_CALLBACK_ARGS, struct zapi_pw_status *pw);
diff --git a/lib/zebra.h b/lib/zebra.h
index 789a93a3c4..b17ef700b4 100644
--- a/lib/zebra.h
+++ b/lib/zebra.h
@@ -136,6 +136,7 @@ typedef unsigned char uint8_t;
#ifdef CRYPTO_OPENSSL
#include <openssl/evp.h>
+#include <openssl/hmac.h>
#endif
#include "openbsd-tree.h"
diff --git a/ospf6d/ospf6_asbr.c b/ospf6d/ospf6_asbr.c
index 33b9f71b5f..4d1c085081 100644
--- a/ospf6d/ospf6_asbr.c
+++ b/ospf6d/ospf6_asbr.c
@@ -1581,7 +1581,7 @@ static struct route_map_rule_cmd ospf6_routemap_rule_set_tag_cmd = {
route_map_rule_tag_free,
};
-static int route_map_command_status(struct vty *vty, int ret)
+static int route_map_command_status(struct vty *vty, enum rmap_compile_rets ret)
{
switch (ret) {
case RMAP_RULE_MISSING:
@@ -1593,6 +1593,7 @@ static int route_map_command_status(struct vty *vty, int ret)
return CMD_WARNING_CONFIG_FAILED;
break;
case RMAP_COMPILE_SUCCESS:
+ case RMAP_DUPLICATE_RULE:
break;
}
@@ -1610,8 +1611,10 @@ DEFUN (ospf6_routemap_set_metric_type,
{
VTY_DECLVAR_CONTEXT(route_map_index, route_map_index);
int idx_external = 2;
- int ret = route_map_add_set(route_map_index, "metric-type",
- argv[idx_external]->arg);
+ enum rmap_compile_rets ret = route_map_add_set(route_map_index,
+ "metric-type",
+ argv[idx_external]->arg);
+
return route_map_command_status(vty, ret);
}
@@ -1627,7 +1630,9 @@ DEFUN (ospf6_routemap_no_set_metric_type,
{
VTY_DECLVAR_CONTEXT(route_map_index, route_map_index);
char *ext = (argc == 4) ? argv[3]->text : NULL;
- int ret = route_map_delete_set(route_map_index, "metric-type", ext);
+ enum rmap_compile_rets ret = route_map_delete_set(route_map_index,
+ "metric-type", ext);
+
return route_map_command_status(vty, ret);
}
@@ -1641,8 +1646,10 @@ DEFUN (ospf6_routemap_set_forwarding,
{
VTY_DECLVAR_CONTEXT(route_map_index, route_map_index);
int idx_ipv6 = 2;
- int ret = route_map_add_set(route_map_index, "forwarding-address",
- argv[idx_ipv6]->arg);
+ enum rmap_compile_rets ret = route_map_add_set(route_map_index,
+ "forwarding-address",
+ argv[idx_ipv6]->arg);
+
return route_map_command_status(vty, ret);
}
@@ -1657,8 +1664,10 @@ DEFUN (ospf6_routemap_no_set_forwarding,
{
VTY_DECLVAR_CONTEXT(route_map_index, route_map_index);
int idx_ipv6 = 3;
- int ret = route_map_delete_set(route_map_index, "forwarding-address",
- argv[idx_ipv6]->arg);
+ enum rmap_compile_rets ret = route_map_delete_set(route_map_index,
+ "forwarding-address",
+ argv[idx_ipv6]->arg);
+
return route_map_command_status(vty, ret);
}
diff --git a/ospf6d/ospf6_network.c b/ospf6d/ospf6_network.c
index 625ad884f2..9a18680b8b 100644
--- a/ospf6d/ospf6_network.c
+++ b/ospf6d/ospf6_network.c
@@ -85,7 +85,7 @@ void ospf6_serv_close(void)
/* Make ospf6d's server socket. */
int ospf6_serv_sock(void)
{
- frr_elevate_privs(&ospf6d_privs) {
+ frr_with_privs(&ospf6d_privs) {
ospf6_sock = socket(AF_INET6, SOCK_RAW, IPPROTO_OSPFIGP);
if (ospf6_sock < 0) {
diff --git a/ospfd/ospf_interface.c b/ospfd/ospf_interface.c
index ce1604a5b1..3877708708 100644
--- a/ospfd/ospf_interface.c
+++ b/ospfd/ospf_interface.c
@@ -225,12 +225,14 @@ struct ospf_interface *ospf_if_new(struct ospf *ospf, struct interface *ifp,
{
struct ospf_interface *oi;
- if ((oi = ospf_if_table_lookup(ifp, p)) == NULL) {
- oi = XCALLOC(MTYPE_OSPF_IF, sizeof(struct ospf_interface));
- memset(oi, 0, sizeof(struct ospf_interface));
- } else
+ oi = ospf_if_table_lookup(ifp, p);
+ if (oi)
return oi;
+ oi = XCALLOC(MTYPE_OSPF_IF, sizeof(struct ospf_interface));
+
+ oi->obuf = ospf_fifo_new();
+
/* Set zebra interface pointer. */
oi->ifp = ifp;
oi->address = p;
@@ -264,8 +266,6 @@ struct ospf_interface *ospf_if_new(struct ospf *ospf, struct interface *ifp,
oi->ospf = ospf;
- ospf_if_stream_set(oi);
-
QOBJ_REG(oi, ospf_interface);
if (IS_DEBUG_OSPF_EVENT)
@@ -325,8 +325,7 @@ void ospf_if_free(struct ospf_interface *oi)
{
ospf_if_down(oi);
- if (oi->obuf)
- ospf_fifo_free(oi->obuf);
+ ospf_fifo_free(oi->obuf);
assert(oi->state == ISM_Down);
@@ -490,29 +489,20 @@ static void ospf_if_reset_stats(struct ospf_interface *oi)
oi->ls_ack_in = oi->ls_ack_out = 0;
}
-void ospf_if_stream_set(struct ospf_interface *oi)
-{
- /* set output fifo queue. */
- if (oi->obuf == NULL)
- oi->obuf = ospf_fifo_new();
-}
-
void ospf_if_stream_unset(struct ospf_interface *oi)
{
struct ospf *ospf = oi->ospf;
- if (oi->obuf) {
- /* flush the interface packet queue */
- ospf_fifo_flush(oi->obuf);
- /*reset protocol stats */
- ospf_if_reset_stats(oi);
-
- if (oi->on_write_q) {
- listnode_delete(ospf->oi_write_q, oi);
- if (list_isempty(ospf->oi_write_q))
- OSPF_TIMER_OFF(ospf->t_write);
- oi->on_write_q = 0;
- }
+ /* flush the interface packet queue */
+ ospf_fifo_flush(oi->obuf);
+ /*reset protocol stats */
+ ospf_if_reset_stats(oi);
+
+ if (oi->on_write_q) {
+ listnode_delete(ospf->oi_write_q, oi);
+ if (list_isempty(ospf->oi_write_q))
+ OSPF_TIMER_OFF(ospf->t_write);
+ oi->on_write_q = 0;
}
}
@@ -903,8 +893,6 @@ struct ospf_interface *ospf_vl_new(struct ospf *ospf,
ospf_area_add_if(voi->area, voi);
- ospf_if_stream_set(voi);
-
if (IS_DEBUG_OSPF_EVENT)
zlog_debug("ospf_vl_new(): Stop");
return voi;
diff --git a/ospfd/ospf_interface.h b/ospfd/ospf_interface.h
index b88d405875..0c903954d3 100644
--- a/ospfd/ospf_interface.h
+++ b/ospfd/ospf_interface.h
@@ -285,7 +285,6 @@ extern void ospf_if_update_params(struct interface *, struct in_addr);
extern int ospf_if_new_hook(struct interface *);
extern void ospf_if_init(void);
-extern void ospf_if_stream_set(struct ospf_interface *);
extern void ospf_if_stream_unset(struct ospf_interface *);
extern void ospf_if_reset_variables(struct ospf_interface *);
extern int ospf_if_is_enable(struct ospf_interface *);
diff --git a/ospfd/ospf_ism.h b/ospfd/ospf_ism.h
index 5ae99ab320..8d21403695 100644
--- a/ospfd/ospf_ism.h
+++ b/ospfd/ospf_ism.h
@@ -53,8 +53,9 @@
listnode_add((O)->oi_write_q, oi); \
oi->on_write_q = 1; \
} \
- thread_add_write(master, ospf_write, (O), (O)->fd, \
- &(O)->t_write); \
+ if (!list_isempty((O)->oi_write_q)) \
+ thread_add_write(master, ospf_write, (O), (O)->fd, \
+ &(O)->t_write); \
} while (0)
/* Macro for OSPF ISM timer turn on. */
diff --git a/ospfd/ospf_network.c b/ospfd/ospf_network.c
index 1415a6e8b7..b8e2dac70e 100644
--- a/ospfd/ospf_network.c
+++ b/ospfd/ospf_network.c
@@ -190,7 +190,7 @@ int ospf_sock_init(struct ospf *ospf)
/* silently return since VRF is not ready */
return -1;
}
- frr_elevate_privs(&ospfd_privs) {
+ frr_with_privs(&ospfd_privs) {
ospf_sock = vrf_socket(AF_INET, SOCK_RAW, IPPROTO_OSPFIGP,
ospf->vrf_id, ospf->name);
if (ospf_sock < 0) {
diff --git a/ospfd/ospf_packet.c b/ospfd/ospf_packet.c
index 62b0444796..a508ae657f 100644
--- a/ospfd/ospf_packet.c
+++ b/ospfd/ospf_packet.c
@@ -132,7 +132,7 @@ static int ospf_auth_type(struct ospf_interface *oi)
return auth_type;
}
-struct ospf_packet *ospf_packet_new(size_t size)
+static struct ospf_packet *ospf_packet_new(size_t size)
{
struct ospf_packet *new;
@@ -231,22 +231,8 @@ void ospf_fifo_free(struct ospf_fifo *fifo)
XFREE(MTYPE_OSPF_FIFO, fifo);
}
-void ospf_packet_add(struct ospf_interface *oi, struct ospf_packet *op)
+static void ospf_packet_add(struct ospf_interface *oi, struct ospf_packet *op)
{
- if (!oi->obuf) {
- flog_err(
- EC_OSPF_PKT_PROCESS,
- "ospf_packet_add(interface %s in state %d [%s], packet type %s, "
- "destination %s) called with NULL obuf, ignoring "
- "(please report this bug)!\n",
- IF_NAME(oi), oi->state,
- lookup_msg(ospf_ism_state_msg, oi->state, NULL),
- lookup_msg(ospf_packet_type_str,
- stream_getc_from(op->s, 1), NULL),
- inet_ntoa(op->dst));
- return;
- }
-
/* Add packet to end of queue. */
ospf_fifo_push(oi->obuf, op);
@@ -257,20 +243,6 @@ void ospf_packet_add(struct ospf_interface *oi, struct ospf_packet *op)
static void ospf_packet_add_top(struct ospf_interface *oi,
struct ospf_packet *op)
{
- if (!oi->obuf) {
- flog_err(
- EC_OSPF_PKT_PROCESS,
- "ospf_packet_add(interface %s in state %d [%s], packet type %s, "
- "destination %s) called with NULL obuf, ignoring "
- "(please report this bug)!\n",
- IF_NAME(oi), oi->state,
- lookup_msg(ospf_ism_state_msg, oi->state, NULL),
- lookup_msg(ospf_packet_type_str,
- stream_getc_from(op->s, 1), NULL),
- inet_ntoa(op->dst));
- return;
- }
-
/* Add packet to head of queue. */
ospf_fifo_push_head(oi->obuf, op);
@@ -278,7 +250,7 @@ static void ospf_packet_add_top(struct ospf_interface *oi,
/* ospf_fifo_debug (oi->obuf); */
}
-void ospf_packet_delete(struct ospf_interface *oi)
+static void ospf_packet_delete(struct ospf_interface *oi)
{
struct ospf_packet *op;
@@ -288,7 +260,7 @@ void ospf_packet_delete(struct ospf_interface *oi)
ospf_packet_free(op);
}
-struct ospf_packet *ospf_packet_dup(struct ospf_packet *op)
+static struct ospf_packet *ospf_packet_dup(struct ospf_packet *op)
{
struct ospf_packet *new;
@@ -698,12 +670,9 @@ static int ospf_write(struct thread *thread)
return -1;
}
- ospf->t_write = NULL;
-
node = listhead(ospf->oi_write_q);
assert(node);
oi = listgetdata(node);
- assert(oi);
#ifdef WANT_OSPF_WRITE_FRAGMENT
/* seed ipid static with low order bits of time */
@@ -906,9 +875,7 @@ static int ospf_write(struct thread *thread)
/* Setup to service from the head of the queue again */
if (!list_isempty(ospf->oi_write_q)) {
node = listhead(ospf->oi_write_q);
- assert(node);
oi = listgetdata(node);
- assert(oi);
}
}
@@ -1395,11 +1362,12 @@ static void ospf_db_desc(struct ip *iph, struct ospf_header *ospfh,
case NSM_Down:
case NSM_Attempt:
case NSM_TwoWay:
- flog_warn(
- EC_OSPF_PACKET,
- "Packet[DD]: Neighbor %s state is %s, packet discarded.",
- inet_ntoa(nbr->router_id),
- lookup_msg(ospf_nsm_state_msg, nbr->state, NULL));
+ if (CHECK_FLAG(oi->ospf->config, OSPF_LOG_ADJACENCY_DETAIL))
+ zlog_info(
+ "Packet[DD]: Neighbor %s state is %s, packet discarded.",
+ inet_ntoa(nbr->router_id),
+ lookup_msg(ospf_nsm_state_msg, nbr->state,
+ NULL));
break;
case NSM_Init:
OSPF_NSM_EVENT_EXECUTE(nbr, NSM_TwoWayReceived);
@@ -4062,6 +4030,23 @@ static void ospf_ls_upd_queue_send(struct ospf_interface *oi,
oi->on_write_q = 1;
}
ospf_write(&os_packet_thd);
+ /*
+ * We are fake calling ospf_write with a fake
+ * thread. Imagine that we have oi_a already
+ * enqueued and we have turned on the write
+ * thread(t_write).
+ * Now this function calls this for oi_b
+ * so the on_write_q has oi_a and oi_b on
+ * it, ospf_write runs and clears the packets
+ * for both oi_a and oi_b. Removing them from
+ * the on_write_q. After this thread of execution
+ * finishes we will execute the t_write thread
+ * with nothing in the on_write_q causing an
+ * assert. So just make sure that the t_write
+ * is actually turned off.
+ */
+ if (list_isempty(oi->ospf->oi_write_q))
+ OSPF_TIMER_OFF(oi->ospf->t_write);
} else {
/* Hook thread to write packet. */
OSPF_ISM_WRITE_ON(oi->ospf);
diff --git a/ospfd/ospf_packet.h b/ospfd/ospf_packet.h
index a508727968..5a3e029f2e 100644
--- a/ospfd/ospf_packet.h
+++ b/ospfd/ospf_packet.h
@@ -131,8 +131,6 @@ struct ospf_ls_update {
#define IS_SET_DD_ALL(X) ((X) & OSPF_DD_FLAG_ALL)
/* Prototypes. */
-extern void ospf_output_forward(struct stream *, int);
-extern struct ospf_packet *ospf_packet_new(size_t);
extern void ospf_packet_free(struct ospf_packet *);
extern struct ospf_fifo *ospf_fifo_new(void);
extern void ospf_fifo_push(struct ospf_fifo *, struct ospf_packet *);
@@ -140,10 +138,6 @@ extern struct ospf_packet *ospf_fifo_pop(struct ospf_fifo *);
extern struct ospf_packet *ospf_fifo_head(struct ospf_fifo *);
extern void ospf_fifo_flush(struct ospf_fifo *);
extern void ospf_fifo_free(struct ospf_fifo *);
-extern void ospf_packet_add(struct ospf_interface *, struct ospf_packet *);
-extern void ospf_packet_delete(struct ospf_interface *);
-extern struct stream *ospf_stream_dup(struct stream *);
-extern struct ospf_packet *ospf_packet_dup(struct ospf_packet *);
extern int ospf_read(struct thread *);
extern void ospf_hello_send(struct ospf_interface *);
diff --git a/ospfd/ospf_sr.c b/ospfd/ospf_sr.c
index 6947393a60..ff2039bec8 100644
--- a/ospfd/ospf_sr.c
+++ b/ospfd/ospf_sr.c
@@ -608,26 +608,8 @@ static int compute_prefix_nhlfe(struct sr_prefix *srp)
/* Send MPLS Label entry to Zebra for installation or deletion */
static int ospf_zebra_send_mpls_labels(int cmd, struct sr_nhlfe nhlfe)
{
- struct stream *s;
-
- /* Reset stream. */
- s = zclient->obuf;
- stream_reset(s);
-
- zclient_create_header(s, cmd, VRF_DEFAULT);
- stream_putc(s, ZEBRA_LSP_SR);
- /* OSPF Segment Routing currently support only IPv4 */
- stream_putl(s, nhlfe.prefv4.family);
- stream_put_in_addr(s, &nhlfe.prefv4.prefix);
- stream_putc(s, nhlfe.prefv4.prefixlen);
- stream_put_in_addr(s, &nhlfe.nexthop);
- stream_putl(s, nhlfe.ifindex);
- stream_putc(s, OSPF_SR_PRIORITY_DEFAULT);
- stream_putl(s, nhlfe.label_in);
- stream_putl(s, nhlfe.label_out);
-
- /* Put length at the first point of the stream. */
- stream_putw_at(s, 0, stream_get_endp(s));
+ struct zapi_labels zl = {};
+ struct zapi_nexthop_label *znh;
if (IS_DEBUG_OSPF_SR)
zlog_debug(" |- %s LSP %u/%u for %s/%u via %u",
@@ -636,70 +618,39 @@ static int ospf_zebra_send_mpls_labels(int cmd, struct sr_nhlfe nhlfe)
inet_ntoa(nhlfe.prefv4.prefix),
nhlfe.prefv4.prefixlen, nhlfe.ifindex);
- return zclient_send_message(zclient);
-}
-
-/* Request zebra to install/remove FEC in FIB */
-static int ospf_zebra_send_mpls_ftn(int cmd, struct sr_nhlfe nhlfe)
-{
- struct zapi_route api;
- struct zapi_nexthop *api_nh;
-
- /* Support only IPv4 */
- if (nhlfe.prefv4.family != AF_INET)
- return -1;
-
- memset(&api, 0, sizeof(api));
- api.vrf_id = VRF_DEFAULT;
- api.type = ZEBRA_ROUTE_OSPF;
- api.safi = SAFI_UNICAST;
- memcpy(&api.prefix, &nhlfe.prefv4, sizeof(struct prefix_ipv4));
-
- if (cmd == ZEBRA_ROUTE_ADD) {
- /* Metric value. */
- SET_FLAG(api.message, ZAPI_MESSAGE_METRIC);
- api.metric = OSPF_SR_DEFAULT_METRIC;
- /* Nexthop */
- SET_FLAG(api.message, ZAPI_MESSAGE_NEXTHOP);
- api_nh = &api.nexthops[0];
- IPV4_ADDR_COPY(&api_nh->gate.ipv4, &nhlfe.nexthop);
- api_nh->type = NEXTHOP_TYPE_IPV4_IFINDEX;
- api_nh->ifindex = nhlfe.ifindex;
- /* MPLS labels */
- SET_FLAG(api.message, ZAPI_MESSAGE_LABEL);
- api_nh->labels[0] = nhlfe.label_out;
- api_nh->label_num = 1;
- api_nh->vrf_id = VRF_DEFAULT;
- api.nexthop_num = 1;
- }
-
- if (IS_DEBUG_OSPF_SR)
- zlog_debug(" |- %s FEC %u for %s/%u via %u",
- cmd == ZEBRA_ROUTE_ADD ? "Add" : "Delete",
- nhlfe.label_out, inet_ntoa(nhlfe.prefv4.prefix),
- nhlfe.prefv4.prefixlen, nhlfe.ifindex);
-
- return zclient_route_send(cmd, zclient, &api);
+ zl.type = ZEBRA_LSP_OSPF_SR;
+ zl.local_label = nhlfe.label_in;
+
+ SET_FLAG(zl.message, ZAPI_LABELS_FTN);
+ zl.route.prefix.family = nhlfe.prefv4.family;
+ zl.route.prefix.prefixlen = nhlfe.prefv4.prefixlen;
+ zl.route.prefix.u.prefix4 = nhlfe.prefv4.prefix;
+ zl.route.type = ZEBRA_ROUTE_OSPF;
+ zl.route.instance = 0;
+
+ zl.nexthop_num = 1;
+ znh = &zl.nexthops[0];
+ znh->type = NEXTHOP_TYPE_IPV4_IFINDEX;
+ znh->family = AF_INET;
+ znh->address.ipv4 = nhlfe.nexthop;
+ znh->ifindex = nhlfe.ifindex;
+ znh->label = nhlfe.label_out;
+
+ return zebra_send_mpls_labels(zclient, cmd, &zl);
}
/* Add new NHLFE entry for SID */
static inline void add_sid_nhlfe(struct sr_nhlfe nhlfe)
{
- if ((nhlfe.label_in != 0) && (nhlfe.label_out != 0)) {
+ if ((nhlfe.label_in != 0) && (nhlfe.label_out != 0))
ospf_zebra_send_mpls_labels(ZEBRA_MPLS_LABELS_ADD, nhlfe);
- if (nhlfe.label_out != MPLS_LABEL_IMPLICIT_NULL)
- ospf_zebra_send_mpls_ftn(ZEBRA_ROUTE_ADD, nhlfe);
- }
}
/* Remove NHLFE entry for SID */
static inline void del_sid_nhlfe(struct sr_nhlfe nhlfe)
{
- if ((nhlfe.label_in != 0) && (nhlfe.label_out != 0)) {
+ if ((nhlfe.label_in != 0) && (nhlfe.label_out != 0))
ospf_zebra_send_mpls_labels(ZEBRA_MPLS_LABELS_DELETE, nhlfe);
- if (nhlfe.label_out != MPLS_LABEL_IMPLICIT_NULL)
- ospf_zebra_send_mpls_ftn(ZEBRA_ROUTE_DELETE, nhlfe);
- }
}
/* Update NHLFE entry for SID */
diff --git a/ospfd/ospf_sr.h b/ospfd/ospf_sr.h
index 4d3f5f441a..df923e970f 100644
--- a/ospfd/ospf_sr.h
+++ b/ospfd/ospf_sr.h
@@ -27,9 +27,6 @@
#ifndef _FRR_OSPF_SR_H
#define _FRR_OSPF_SR_H
-/* Default Route priority for OSPF Segment Routing */
-#define OSPF_SR_PRIORITY_DEFAULT 10
-
/* macros and constants for segment routing */
#define SET_RANGE_SIZE_MASK 0xffffff00
#define GET_RANGE_SIZE_MASK 0x00ffffff
diff --git a/ospfd/ospfd.c b/ospfd/ospfd.c
index 8f91f84a41..f4de255877 100644
--- a/ospfd/ospfd.c
+++ b/ospfd/ospfd.c
@@ -2097,7 +2097,7 @@ static int ospf_vrf_enable(struct vrf *vrf)
old_vrf_id);
if (old_vrf_id != ospf->vrf_id) {
- frr_elevate_privs(&ospfd_privs) {
+ frr_with_privs(&ospfd_privs) {
/* stop zebra redist to us for old vrf */
zclient_send_dereg_requests(zclient,
old_vrf_id);
diff --git a/pbrd/pbr_map.c b/pbrd/pbr_map.c
index 5e67990d5e..1a8461c6c1 100644
--- a/pbrd/pbr_map.c
+++ b/pbrd/pbr_map.c
@@ -316,7 +316,7 @@ struct pbr_map_sequence *pbrms_get(const char *name, uint32_t seqno)
pbrms->ruleno = pbr_nht_get_next_rule(seqno);
pbrms->parent = pbrm;
pbrms->reason =
- PBR_MAP_INVALID_SRCDST |
+ PBR_MAP_INVALID_EMPTY |
PBR_MAP_INVALID_NO_NEXTHOPS;
QOBJ_REG(pbrms, pbr_map_sequence);
@@ -350,10 +350,10 @@ pbr_map_sequence_check_nexthops_valid(struct pbr_map_sequence *pbrms)
}
}
-static void pbr_map_sequence_check_src_dst_valid(struct pbr_map_sequence *pbrms)
+static void pbr_map_sequence_check_not_empty(struct pbr_map_sequence *pbrms)
{
- if (!pbrms->src && !pbrms->dst)
- pbrms->reason |= PBR_MAP_INVALID_SRCDST;
+ if (!pbrms->src && !pbrms->dst && !pbrms->mark)
+ pbrms->reason |= PBR_MAP_INVALID_EMPTY;
}
/*
@@ -364,7 +364,7 @@ static void pbr_map_sequence_check_valid(struct pbr_map_sequence *pbrms)
{
pbr_map_sequence_check_nexthops_valid(pbrms);
- pbr_map_sequence_check_src_dst_valid(pbrms);
+ pbr_map_sequence_check_not_empty(pbrms);
}
static bool pbr_map_check_valid_internal(struct pbr_map *pbrm)
diff --git a/pbrd/pbr_map.h b/pbrd/pbr_map.h
index 945f76bb2b..112acfe44e 100644
--- a/pbrd/pbr_map.h
+++ b/pbrd/pbr_map.h
@@ -87,6 +87,7 @@ struct pbr_map_sequence {
*/
struct prefix *src;
struct prefix *dst;
+ uint32_t mark;
/*
* Family of the src/dst. Needed when deleting since we clear them
@@ -126,7 +127,7 @@ struct pbr_map_sequence {
#define PBR_MAP_INVALID_NEXTHOP (1 << 1)
#define PBR_MAP_INVALID_NO_NEXTHOPS (1 << 2)
#define PBR_MAP_INVALID_BOTH_NHANDGRP (1 << 3)
-#define PBR_MAP_INVALID_SRCDST (1 << 4)
+#define PBR_MAP_INVALID_EMPTY (1 << 4)
uint64_t reason;
QOBJ_FIELDS
diff --git a/pbrd/pbr_vty.c b/pbrd/pbr_vty.c
index 95f38563b1..5e7addc9d2 100644
--- a/pbrd/pbr_vty.c
+++ b/pbrd/pbr_vty.c
@@ -172,6 +172,33 @@ DEFPY(pbr_map_match_dst, pbr_map_match_dst_cmd,
return CMD_SUCCESS;
}
+DEFPY(pbr_map_match_mark, pbr_map_match_mark_cmd,
+ "[no] match mark (1-4294967295)$mark",
+ NO_STR
+ "Match the rest of the command\n"
+ "Choose the mark value to use\n"
+ "mark\n")
+{
+ struct pbr_map_sequence *pbrms = VTY_GET_CONTEXT(pbr_map_sequence);
+
+#ifndef GNU_LINUX
+ vty_out(vty, "pbr marks are not supported on this platform");
+ return CMD_WARNING_CONFIG_FAILED;
+#endif
+
+ if (!no) {
+ if (pbrms->mark == (uint32_t) mark)
+ return CMD_SUCCESS;
+ pbrms->mark = (uint32_t) mark;
+ } else {
+ pbrms->mark = 0;
+ }
+
+ pbr_map_check(pbrms);
+
+ return CMD_SUCCESS;
+ }
+
DEFPY(pbr_map_nexthop_group, pbr_map_nexthop_group_cmd,
"[no] set nexthop-group NHGNAME$name",
NO_STR
@@ -453,6 +480,8 @@ DEFPY (show_pbr_map,
vty_out(vty, "\tDST Match: %s\n",
prefix2str(pbrms->dst, buf,
sizeof(buf)));
+ if (pbrms->mark)
+ vty_out(vty, "\tMARK Match: %u\n", pbrms->mark);
if (pbrms->nhgrp_name) {
vty_out(vty,
@@ -632,6 +661,9 @@ static int pbr_vty_map_config_write_sequence(struct vty *vty,
vty_out(vty, " match dst-ip %s\n",
prefix2str(pbrms->dst, buff, sizeof(buff)));
+ if (pbrms->mark)
+ vty_out(vty, " match mark %u\n", pbrms->mark);
+
if (pbrms->nhgrp_name)
vty_out(vty, " set nexthop-group %s\n", pbrms->nhgrp_name);
@@ -704,6 +736,7 @@ void pbr_vty_init(void)
install_element(INTERFACE_NODE, &pbr_policy_cmd);
install_element(PBRMAP_NODE, &pbr_map_match_src_cmd);
install_element(PBRMAP_NODE, &pbr_map_match_dst_cmd);
+ install_element(PBRMAP_NODE, &pbr_map_match_mark_cmd);
install_element(PBRMAP_NODE, &pbr_map_nexthop_group_cmd);
install_element(PBRMAP_NODE, &pbr_map_nexthop_cmd);
install_element(VIEW_NODE, &show_pbr_cmd);
diff --git a/pbrd/pbr_zebra.c b/pbrd/pbr_zebra.c
index 466a9a13ae..d74d0fcd23 100644
--- a/pbrd/pbr_zebra.c
+++ b/pbrd/pbr_zebra.c
@@ -526,7 +526,7 @@ static void pbr_encode_pbr_map_sequence(struct stream *s,
stream_putw(s, 0); /* src port */
pbr_encode_pbr_map_sequence_prefix(s, pbrms->dst, family);
stream_putw(s, 0); /* dst port */
- stream_putl(s, 0); /* fwmark */
+ stream_putl(s, pbrms->mark);
if (pbrms->nhgrp_name)
stream_putl(s, pbr_nht_get_table(pbrms->nhgrp_name));
else if (pbrms->nhg)
diff --git a/pimd/pim_assert.c b/pimd/pim_assert.c
index 3aa2a92241..53ab22754c 100644
--- a/pimd/pim_assert.c
+++ b/pimd/pim_assert.c
@@ -259,11 +259,11 @@ int pim_assert_recv(struct interface *ifp, struct pim_neighbor *neigh,
curr += offset;
curr_size -= offset;
- if (curr_size != 8) {
+ if (curr_size < 8) {
char src_str[INET_ADDRSTRLEN];
pim_inet4_dump("<src?>", src_addr, src_str, sizeof(src_str));
zlog_warn(
- "%s: preference/metric size is not 8: size=%d from %s on interface %s",
+ "%s: preference/metric size is less than 8 bytes: size=%d from %s on interface %s",
__PRETTY_FUNCTION__, curr_size, src_str, ifp->name);
return -3;
}
diff --git a/pimd/pim_mroute.c b/pimd/pim_mroute.c
index 1c66007fbb..f7f4b54aea 100644
--- a/pimd/pim_mroute.c
+++ b/pimd/pim_mroute.c
@@ -57,7 +57,7 @@ static int pim_mroute_set(struct pim_instance *pim, int enable)
* We need to create the VRF table for the pim mroute_socket
*/
if (pim->vrf_id != VRF_DEFAULT) {
- frr_elevate_privs(&pimd_privs) {
+ frr_with_privs(&pimd_privs) {
data = pim->vrf->data.l.table_id;
err = setsockopt(pim->mroute_socket, IPPROTO_IP,
@@ -75,7 +75,7 @@ static int pim_mroute_set(struct pim_instance *pim, int enable)
}
}
- frr_elevate_privs(&pimd_privs) {
+ frr_with_privs(&pimd_privs) {
opt = enable ? MRT_INIT : MRT_DONE;
/*
* *BSD *cares* about what value we pass down
@@ -735,7 +735,7 @@ int pim_mroute_socket_enable(struct pim_instance *pim)
{
int fd;
- frr_elevate_privs(&pimd_privs) {
+ frr_with_privs(&pimd_privs) {
fd = socket(AF_INET, SOCK_RAW, IPPROTO_IGMP);
diff --git a/pimd/pim_msdp_socket.c b/pimd/pim_msdp_socket.c
index b1f7cfd2c6..22eb8bc7b4 100644
--- a/pimd/pim_msdp_socket.c
+++ b/pimd/pim_msdp_socket.c
@@ -175,7 +175,7 @@ int pim_msdp_sock_listen(struct pim_instance *pim)
}
}
- frr_elevate_privs(&pimd_privs) {
+ frr_with_privs(&pimd_privs) {
/* bind to well known TCP port */
rc = bind(sock, (struct sockaddr *)&sin, socklen);
}
diff --git a/pimd/pim_sock.c b/pimd/pim_sock.c
index c4538a4ac5..82255cd3b0 100644
--- a/pimd/pim_sock.c
+++ b/pimd/pim_sock.c
@@ -46,7 +46,7 @@ int pim_socket_raw(int protocol)
{
int fd;
- frr_elevate_privs(&pimd_privs) {
+ frr_with_privs(&pimd_privs) {
fd = socket(AF_INET, SOCK_RAW, protocol);
@@ -65,7 +65,7 @@ void pim_socket_ip_hdr(int fd)
{
const int on = 1;
- frr_elevate_privs(&pimd_privs) {
+ frr_with_privs(&pimd_privs) {
if (setsockopt(fd, IPPROTO_IP, IP_HDRINCL, &on, sizeof(on)))
zlog_err("%s: Could not turn on IP_HDRINCL option: %s",
@@ -83,7 +83,7 @@ int pim_socket_bind(int fd, struct interface *ifp)
int ret = 0;
#ifdef SO_BINDTODEVICE
- frr_elevate_privs(&pimd_privs) {
+ frr_with_privs(&pimd_privs) {
ret = setsockopt(fd, SOL_SOCKET, SO_BINDTODEVICE, ifp->name,
strlen(ifp->name));
diff --git a/pimd/pim_vty.c b/pimd/pim_vty.c
index d1935195df..1c4ecf299f 100644
--- a/pimd/pim_vty.c
+++ b/pimd/pim_vty.c
@@ -81,7 +81,7 @@ int pim_debug_config_write(struct vty *vty)
++writes;
}
- if (PIM_DEBUG_MROUTE_DETAIL) {
+ if (PIM_DEBUG_MROUTE_DETAIL_ONLY) {
vty_out(vty, "debug mroute detail\n");
++writes;
}
@@ -107,7 +107,7 @@ int pim_debug_config_write(struct vty *vty)
vty_out(vty, "debug pim trace\n");
++writes;
}
- if (PIM_DEBUG_PIM_TRACE_DETAIL) {
+ if (PIM_DEBUG_PIM_TRACE_DETAIL_ONLY) {
vty_out(vty, "debug pim trace detail\n");
++writes;
}
diff --git a/pimd/pimd.h b/pimd/pimd.h
index 175936e0a7..3b83d3b6c7 100644
--- a/pimd/pimd.h
+++ b/pimd/pimd.h
@@ -163,6 +163,8 @@ extern uint8_t qpim_ecmp_rebalance_enable;
#define PIM_DEBUG_PIM_TRACE (router->debugs & PIM_MASK_PIM_TRACE)
#define PIM_DEBUG_PIM_TRACE_DETAIL \
(router->debugs & (PIM_MASK_PIM_TRACE_DETAIL | PIM_MASK_PIM_TRACE))
+#define PIM_DEBUG_PIM_TRACE_DETAIL_ONLY \
+ (router->debugs & PIM_MASK_PIM_TRACE_DETAIL)
#define PIM_DEBUG_IGMP_EVENTS (router->debugs & PIM_MASK_IGMP_EVENTS)
#define PIM_DEBUG_IGMP_PACKETS (router->debugs & PIM_MASK_IGMP_PACKETS)
#define PIM_DEBUG_IGMP_TRACE (router->debugs & PIM_MASK_IGMP_TRACE)
@@ -173,6 +175,7 @@ extern uint8_t qpim_ecmp_rebalance_enable;
#define PIM_DEBUG_MROUTE (router->debugs & PIM_MASK_MROUTE)
#define PIM_DEBUG_MROUTE_DETAIL \
(router->debugs & (PIM_MASK_MROUTE_DETAIL | PIM_MASK_MROUTE))
+#define PIM_DEBUG_MROUTE_DETAIL_ONLY (router->debugs & PIM_MASK_MROUTE_DETAIL)
#define PIM_DEBUG_PIM_HELLO (router->debugs & PIM_MASK_PIM_HELLO)
#define PIM_DEBUG_PIM_J_P (router->debugs & PIM_MASK_PIM_J_P)
#define PIM_DEBUG_PIM_REG (router->debugs & PIM_MASK_PIM_REG)
diff --git a/python/clidef.py b/python/clidef.py
index bc2f5caebf..baa6ed52b2 100644
--- a/python/clidef.py
+++ b/python/clidef.py
@@ -351,6 +351,7 @@ if __name__ == '__main__':
macros = Macros()
macros.load('lib/route_types.h')
macros.load(os.path.join(basepath, 'lib/command.h'))
+ macros.load(os.path.join(basepath, 'bgpd/bgp_vty.h'))
# sigh :(
macros['PROTO_REDIST_STR'] = 'FRR_REDIST_STR_ISISD'
diff --git a/redhat/frr.spec.in b/redhat/frr.spec.in
index 014cae02ee..b3f9ac7630 100644
--- a/redhat/frr.spec.in
+++ b/redhat/frr.spec.in
@@ -634,6 +634,7 @@ fi
%{_libdir}/frr/modules/bgpd_rpki.so
%endif
%{_libdir}/frr/modules/zebra_irdp.so
+%{_libdir}/frr/modules/bgpd_bmp.so
%{_bindir}/*
%config(noreplace) %{configdir}/[!v]*.conf*
%config(noreplace) %attr(750,%{frr_user},%{frr_user}) %{configdir}/daemons
@@ -655,6 +656,9 @@ fi
%files pythontools
+%{_sbindir}/generate_support_bundle.py
+%{_sbindir}/generate_support_bundle.pyc
+%{_sbindir}/generate_support_bundle.pyo
%{_sbindir}/frr-reload.py
%{_sbindir}/frr-reload.pyc
%{_sbindir}/frr-reload.pyo
diff --git a/ripd/ripd.c b/ripd/ripd.c
index c1c2824f2a..cd2ddb1eba 100644
--- a/ripd/ripd.c
+++ b/ripd/ripd.c
@@ -1395,7 +1395,7 @@ int rip_create_socket(struct vrf *vrf)
/* Make datagram socket. */
if (vrf->vrf_id != VRF_DEFAULT)
vrf_dev = vrf->name;
- frr_elevate_privs(&ripd_privs) {
+ frr_with_privs(&ripd_privs) {
sock = vrf_socket(AF_INET, SOCK_DGRAM, IPPROTO_UDP, vrf->vrf_id,
vrf_dev);
if (sock < 0) {
@@ -1415,7 +1415,7 @@ int rip_create_socket(struct vrf *vrf)
#endif
setsockopt_so_recvbuf(sock, RIP_UDP_RCV_BUF);
- frr_elevate_privs(&ripd_privs) {
+ frr_with_privs(&ripd_privs) {
if ((ret = bind(sock, (struct sockaddr *)&addr, sizeof(addr)))
< 0) {
zlog_err("%s: Can't bind socket %d to %s port %d: %s",
diff --git a/ripngd/ripng_interface.c b/ripngd/ripng_interface.c
index 49ed13a2c2..9ed9dc28fe 100644
--- a/ripngd/ripng_interface.c
+++ b/ripngd/ripng_interface.c
@@ -75,7 +75,7 @@ static int ripng_multicast_join(struct interface *ifp, int sock)
* While this is bogus, privs are available and easy to use
* for this call as a workaround.
*/
- frr_elevate_privs(&ripngd_privs) {
+ frr_with_privs(&ripngd_privs) {
ret = setsockopt(sock, IPPROTO_IPV6, IPV6_JOIN_GROUP,
(char *)&mreq, sizeof(mreq));
diff --git a/ripngd/ripngd.c b/ripngd/ripngd.c
index 786d5c7aa1..120f46f0da 100644
--- a/ripngd/ripngd.c
+++ b/ripngd/ripngd.c
@@ -120,8 +120,7 @@ int ripng_make_socket(struct vrf *vrf)
/* Make datagram socket. */
if (vrf->vrf_id != VRF_DEFAULT)
vrf_dev = vrf->name;
- frr_elevate_privs(&ripngd_privs)
- {
+ frr_with_privs(&ripngd_privs) {
sock = vrf_socket(AF_INET6, SOCK_DGRAM, IPPROTO_UDP,
vrf->vrf_id, vrf_dev);
if (sock < 0) {
@@ -160,7 +159,7 @@ int ripng_make_socket(struct vrf *vrf)
#endif /* SIN6_LEN */
ripaddr.sin6_port = htons(RIPNG_PORT_DEFAULT);
- frr_elevate_privs(&ripngd_privs) {
+ frr_with_privs(&ripngd_privs) {
ret = bind(sock, (struct sockaddr *)&ripaddr, sizeof(ripaddr));
if (ret < 0) {
zlog_err("Can't bind ripng socket: %s.",
diff --git a/staticd/static_routes.c b/staticd/static_routes.c
index 34f58a98e2..a8a5ca523a 100644
--- a/staticd/static_routes.c
+++ b/staticd/static_routes.c
@@ -100,7 +100,8 @@ int static_add_route(afi_t afi, safi_t safi, uint8_t type, struct prefix *p,
&& IPV4_ADDR_SAME(&gate->ipv4, &si->addr.ipv4))
|| (afi == AFI_IP6
&& IPV6_ADDR_SAME(gate, &si->addr.ipv6))))
- && (!strcmp(ifname ? ifname : "", si->ifname))) {
+ && (!strcmp(ifname ? ifname : "", si->ifname))
+ && nh_svrf->vrf->vrf_id == si->nh_vrf_id) {
if ((distance == si->distance) && (tag == si->tag)
&& (table_id == si->table_id)
&& !memcmp(&si->snh_label, snh_label,
diff --git a/staticd/static_vty.c b/staticd/static_vty.c
index 3a9e4e8fa4..f926e1d9cd 100644
--- a/staticd/static_vty.c
+++ b/staticd/static_vty.c
@@ -35,7 +35,6 @@
#ifndef VTYSH_EXTRACT_PL
#include "staticd/static_vty_clippy.c"
#endif
-
static struct static_vrf *static_vty_get_unknown_vrf(struct vty *vty,
const char *vrf_name)
{
@@ -479,6 +478,23 @@ static int static_route_leak(
return CMD_WARNING_CONFIG_FAILED;
}
gatep = &gate;
+
+ if (afi == AFI_IP && !negate) {
+ if (if_lookup_exact_address(&gatep->ipv4, AF_INET,
+ svrf->vrf->vrf_id))
+ if (vty)
+ vty_out(vty,
+ "%% Warning!! Local connected address is configured as Gateway IP(%s)\n",
+ gate_str);
+ } else if (afi == AFI_IP6 && !negate) {
+ if (if_lookup_exact_address(&gatep->ipv6, AF_INET6,
+ svrf->vrf->vrf_id))
+ if (vty)
+ vty_out(vty,
+ "%% Warning!! Local connected address is configured as Gateway IPv6(%s)\n",
+ gate_str);
+ }
+
}
if (gate_str == NULL && ifname == NULL)
diff --git a/staticd/static_zebra.c b/staticd/static_zebra.c
index 13c04259d7..1965c2968e 100644
--- a/staticd/static_zebra.c
+++ b/staticd/static_zebra.c
@@ -130,6 +130,7 @@ static int interface_state_up(ZAPI_CALLBACK_ARGS)
/* Install any static reliant on this interface coming up */
static_install_intf_nh(ifp);
+ static_ifindex_update(ifp, true);
}
return 0;
@@ -137,7 +138,12 @@ static int interface_state_up(ZAPI_CALLBACK_ARGS)
static int interface_state_down(ZAPI_CALLBACK_ARGS)
{
- zebra_interface_state_read(zclient->ibuf, vrf_id);
+ struct interface *ifp;
+
+ ifp = zebra_interface_state_read(zclient->ibuf, vrf_id);
+
+ if (ifp)
+ static_ifindex_update(ifp, false);
return 0;
}
@@ -194,6 +200,25 @@ struct static_nht_data {
uint8_t nh_num;
};
+/* API to check whether the configured nexthop address is
+ * one of its local connected address or not.
+ */
+static bool
+static_nexthop_is_local(vrf_id_t vrfid, struct prefix *addr, int family)
+{
+ if (family == AF_INET) {
+ if (if_lookup_exact_address(&addr->u.prefix4,
+ AF_INET,
+ vrfid))
+ return true;
+ } else if (family == AF_INET6) {
+ if (if_lookup_exact_address(&addr->u.prefix6,
+ AF_INET6,
+ vrfid))
+ return true;
+ }
+ return false;
+}
static int static_zebra_nexthop_update(ZAPI_CALLBACK_ARGS)
{
struct static_nht_data *nhtd, lookup;
@@ -208,6 +233,12 @@ static int static_zebra_nexthop_update(ZAPI_CALLBACK_ARGS)
if (nhr.prefix.family == AF_INET6)
afi = AFI_IP6;
+ if (nhr.type == ZEBRA_ROUTE_CONNECT) {
+ if (static_nexthop_is_local(vrf_id, &nhr.prefix,
+ nhr.prefix.family))
+ nhr.nexthop_num = 0;
+ }
+
memset(&lookup, 0, sizeof(lookup));
lookup.nh = &nhr.prefix;
lookup.nh_vrf_id = vrf_id;
diff --git a/tests/bgpd/test_peer_attr.c b/tests/bgpd/test_peer_attr.c
index 78016dc9ce..8e1b62ac15 100644
--- a/tests/bgpd/test_peer_attr.c
+++ b/tests/bgpd/test_peer_attr.c
@@ -1170,7 +1170,7 @@ static void test_peer_attr(struct test *test, struct test_peer_attr *pa)
/* Test Preparation: Switch and activate address-family. */
if (!is_attr_type_global(pa->type)) {
test_log(test, "prepare: switch address-family to [%s]",
- afi_safi_print(pa->afi, pa->safi));
+ get_afi_safi_str(pa->afi, pa->safi, false));
test_execute(test, "address-family %s %s",
str_from_afi(pa->afi), str_from_safi(pa->safi));
test_execute(test, "neighbor %s activate", g->name);
@@ -1237,7 +1237,7 @@ static void test_peer_attr(struct test *test, struct test_peer_attr *pa)
/* Test Preparation: Switch and activate address-family. */
if (!is_attr_type_global(pa->type)) {
test_log(test, "prepare: switch address-family to [%s]",
- afi_safi_print(pa->afi, pa->safi));
+ get_afi_safi_str(pa->afi, pa->safi, false));
test_execute(test, "address-family %s %s",
str_from_afi(pa->afi), str_from_safi(pa->safi));
test_execute(test, "neighbor %s activate", g->name);
@@ -1285,7 +1285,7 @@ static void test_peer_attr(struct test *test, struct test_peer_attr *pa)
/* Test Preparation: Switch and activate address-family. */
if (!is_attr_type_global(pa->type)) {
test_log(test, "prepare: switch address-family to [%s]",
- afi_safi_print(pa->afi, pa->safi));
+ get_afi_safi_str(pa->afi, pa->safi, false));
test_execute(test, "address-family %s %s",
str_from_afi(pa->afi), str_from_safi(pa->safi));
test_execute(test, "neighbor %s activate", g->name);
diff --git a/tests/lib/test_privs.c b/tests/lib/test_privs.c
index fc3d908661..de638bc67a 100644
--- a/tests/lib/test_privs.c
+++ b/tests/lib/test_privs.c
@@ -113,7 +113,7 @@ int main(int argc, char **argv)
((test_privs.current_state() == ZPRIVS_RAISED) ? "Raised" : "Lowered")
printf("%s\n", PRIV_STATE());
- frr_elevate_privs(&test_privs) {
+ frr_with_privs(&test_privs) {
printf("%s\n", PRIV_STATE());
}
@@ -125,7 +125,7 @@ int main(int argc, char **argv)
/* but these should continue to work... */
printf("%s\n", PRIV_STATE());
- frr_elevate_privs(&test_privs) {
+ frr_with_privs(&test_privs) {
printf("%s\n", PRIV_STATE());
}
diff --git a/tests/topotests/Dockerfile b/tests/topotests/Dockerfile
index ea6fa4b9e0..fc6d6df530 100644
--- a/tests/topotests/Dockerfile
+++ b/tests/topotests/Dockerfile
@@ -19,6 +19,7 @@ RUN export DEBIAN_FRONTEND=noninteractive \
libpython-dev \
libreadline-dev \
libc-ares-dev \
+ libcap-dev \
man \
mininet \
pkg-config \
@@ -40,12 +41,12 @@ RUN export DEBIAN_FRONTEND=noninteractive \
pytest
RUN cd /tmp \
- && wget -q https://ci1.netdef.org/artifact/LIBYANG-YANGRELEASE/shared/build-1/Ubuntu-18.04-x86_64-Packages/libyang-dev_0.16.46_amd64.deb \
+ && wget -q https://ci1.netdef.org/artifact/LIBYANG-YANGRELEASE/shared/build-10/Debian-AMD64-Packages/libyang-dev_0.16.105-1_amd64.deb \
-O libyang-dev.deb \
- && wget -q https://ci1.netdef.org/artifact/LIBYANG-YANGRELEASE/shared/build-1/Ubuntu-18.04-x86_64-Packages/libyang_0.16.46_amd64.deb \
+ && wget -q https://ci1.netdef.org/artifact/LIBYANG-YANGRELEASE/shared/build-10/Debian-AMD64-Packages/libyang0.16_0.16.105-1_amd64.deb \
-O libyang.deb \
- && echo "039252cc66eb254a97e160b1c325af669470cde8a02d73ec9f7b920ed3c7997c libyang.deb" | sha256sum -c - \
- && echo "e7e2d5bfc7b33b3218df8bef404432970f9b4ad10d6dbbdcb0e0be2babbb68e9 libyang-dev.deb" | sha256sum -c - \
+ && echo "34bef017e527a590020185f05dc39203bdf1c86223e0d990839623ec629d8598 libyang.deb" | sha256sum -c - \
+ && echo "fe9cc6e3b173ca56ef49428c281e96bf76c0f910aa75cf85098076411484e8f4 libyang-dev.deb" | sha256sum -c - \
&& dpkg -i libyang*.deb \
&& rm libyang*.deb
diff --git a/tests/topotests/all-protocol-startup/r1/ipv4_routes.ref b/tests/topotests/all-protocol-startup/r1/ipv4_routes.ref
index 0a20231371..a7d6fe11a6 100644
--- a/tests/topotests/all-protocol-startup/r1/ipv4_routes.ref
+++ b/tests/topotests/all-protocol-startup/r1/ipv4_routes.ref
@@ -15,6 +15,7 @@ S>* 4.5.6.11/32 [1/0] via 192.168.0.2, r1-eth0, XX:XX:XX
S>* 4.5.6.12/32 [1/0] is directly connected, r1-eth0, XX:XX:XX
S>* 4.5.6.13/32 [1/0] unreachable (blackhole), XX:XX:XX
S>* 4.5.6.14/32 [1/0] unreachable (blackhole), XX:XX:XX
+S 4.5.6.15/32 [255/0] via 192.168.0.2, r1-eth0, XX:XX:XX
S>* 4.5.6.7/32 [1/0] unreachable (blackhole), XX:XX:XX
S>* 4.5.6.8/32 [1/0] unreachable (blackhole), XX:XX:XX
S>* 4.5.6.9/32 [1/0] unreachable (ICMP unreachable), XX:XX:XX
diff --git a/tests/topotests/all-protocol-startup/r1/ipv6_routes.ref b/tests/topotests/all-protocol-startup/r1/ipv6_routes.ref
index 6e3e9c87c1..d5bc16a2bf 100644
--- a/tests/topotests/all-protocol-startup/r1/ipv6_routes.ref
+++ b/tests/topotests/all-protocol-startup/r1/ipv6_routes.ref
@@ -20,9 +20,10 @@ C * fe80::/64 is directly connected, r1-eth7, XX:XX:XX
C * fe80::/64 is directly connected, r1-eth8, XX:XX:XX
C * fe80::/64 is directly connected, r1-eth9, XX:XX:XX
O fc00:0:0:4::/64 [110/10] is directly connected, r1-eth4, XX:XX:XX
-S>* 4:5::/32 [1/0] is directly connected, r1-eth0, XX:XX:XX
S>* 4:5::6:10/128 [1/0] via fc00::2, r1-eth0, XX:XX:XX
S>* 4:5::6:11/128 [1/0] via fc00::2, r1-eth0, XX:XX:XX
+S>* 4:5::6:12/128 [1/0] is directly connected, r1-eth0, XX:XX:XX
+S 4:5::6:15/128 [255/0] via fc00::2, r1-eth0, XX:XX:XX
S>* 4:5::6:7/128 [1/0] unreachable (blackhole), XX:XX:XX
S>* 4:5::6:8/128 [1/0] unreachable (blackhole), XX:XX:XX
S>* 4:5::6:9/128 [1/0] unreachable (ICMP unreachable), XX:XX:XX
diff --git a/tests/topotests/all-protocol-startup/r1/zebra.conf b/tests/topotests/all-protocol-startup/r1/zebra.conf
index c621593ef7..85c8676964 100644
--- a/tests/topotests/all-protocol-startup/r1/zebra.conf
+++ b/tests/topotests/all-protocol-startup/r1/zebra.conf
@@ -20,7 +20,12 @@ ip route 4.5.6.11/32 192.168.0.2 r1-eth0
ipv6 route 4:5::6:11/128 fc00:0:0:0::2 r1-eth0
# Create ifname routes
ip route 4.5.6.12/32 r1-eth0
-ipv6 route 4:5::6:12/32 r1-eth0
+ipv6 route 4:5::6:12/128 r1-eth0
+# Create a route that has a large admin distance
+# an admin distance of 255 should be accepted
+# by zebra but not installed.
+ip route 4.5.6.15/32 192.168.0.2 255
+ipv6 route 4:5::6:15/128 fc00:0:0:0::2 255
!
interface r1-eth0
description to sw0 - no routing protocol
diff --git a/tests/topotests/bgp-basic-functionality-topo1/test_bgp_basic_functionality.py b/tests/topotests/bgp-basic-functionality-topo1/test_bgp_basic_functionality.py
index 095ebe3344..e99111d90b 100755
--- a/tests/topotests/bgp-basic-functionality-topo1/test_bgp_basic_functionality.py
+++ b/tests/topotests/bgp-basic-functionality-topo1/test_bgp_basic_functionality.py
@@ -375,7 +375,7 @@ def test_static_routes(request):
# Verifying RIB routes
dut = 'r3'
protocol = 'bgp'
- next_hop = '10.0.0.2'
+ next_hop = ['10.0.0.2', '10.0.0.5']
result = verify_rib(tgen, 'ipv4', dut, input_dict, next_hop=next_hop,
protocol=protocol)
assert result is True, "Testcase {} :Failed \n Error: {}". \
diff --git a/tests/topotests/bgp-ecmp-topo2/ebgp_ecmp_topo2.json b/tests/topotests/bgp-ecmp-topo2/ebgp_ecmp_topo2.json
new file mode 100755
index 0000000000..50797f130a
--- /dev/null
+++ b/tests/topotests/bgp-ecmp-topo2/ebgp_ecmp_topo2.json
@@ -0,0 +1,664 @@
+{
+ "address_types": [
+ "ipv4",
+ "ipv6"
+ ],
+ "ipv4base": "10.0.0.0",
+ "ipv4mask": 24,
+ "ipv6base": "fd00::",
+ "ipv6mask": 64,
+ "link_ip_start": {
+ "ipv4": "10.0.0.0",
+ "v4mask": 24,
+ "ipv6": "fd00::",
+ "v6mask": 64
+ },
+ "lo_prefix": {
+ "ipv4": "1.0.",
+ "v4mask": 32,
+ "ipv6": "2001:DB8:F::",
+ "v6mask": 128
+ },
+ "routers": {
+ "r1": {
+ "links": {
+ "lo": {
+ "ipv4": "auto",
+ "ipv6": "auto",
+ "type": "loopback"
+ },
+ "r2-link1": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ }
+ },
+ "bgp": {
+ "local_as": "100",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "r1-link1": {}
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "r1-link1": {}
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "r2": {
+ "links": {
+ "lo": {
+ "ipv4": "auto",
+ "ipv6": "auto",
+ "type": "loopback"
+ },
+ "r1-link1": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link1": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link2": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link3": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link4": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link5": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link6": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link7": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link8": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link9": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link10": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link11": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link12": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link13": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link14": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link15": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link16": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link17": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link18": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link19": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link20": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link21": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link22": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link23": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link24": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link25": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link26": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link27": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link28": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link29": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link30": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link31": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link32": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ }
+ },
+ "bgp": {
+ "local_as": "200",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r2-link1": {}
+ }
+ },
+ "r3": {
+ "dest_link": {
+ "r2-link1": {
+ "next_hop_self": true
+ },
+ "r2-link2": {
+ "next_hop_self": true
+ },
+ "r2-link3": {
+ "next_hop_self": true
+ },
+ "r2-link4": {
+ "next_hop_self": true
+ },
+ "r2-link5": {
+ "next_hop_self": true
+ },
+ "r2-link6": {
+ "next_hop_self": true
+ },
+ "r2-link7": {
+ "next_hop_self": true
+ },
+ "r2-link8": {
+ "next_hop_self": true
+ },
+ "r2-link9": {
+ "next_hop_self": true
+ },
+ "r2-link10": {
+ "next_hop_self": true
+ },
+ "r2-link11": {
+ "next_hop_self": true
+ },
+ "r2-link12": {
+ "next_hop_self": true
+ },
+ "r2-link13": {
+ "next_hop_self": true
+ },
+ "r2-link14": {
+ "next_hop_self": true
+ },
+ "r2-link15": {
+ "next_hop_self": true
+ },
+ "r2-link16": {
+ "next_hop_self": true
+ },
+ "r2-link17": {
+ "next_hop_self": true
+ },
+ "r2-link18": {
+ "next_hop_self": true
+ },
+ "r2-link19": {
+ "next_hop_self": true
+ },
+ "r2-link20": {
+ "next_hop_self": true
+ },
+ "r2-link21": {
+ "next_hop_self": true
+ },
+ "r2-link22": {
+ "next_hop_self": true
+ },
+ "r2-link23": {
+ "next_hop_self": true
+ },
+ "r2-link24": {
+ "next_hop_self": true
+ },
+ "r2-link25": {
+ "next_hop_self": true
+ },
+ "r2-link26": {
+ "next_hop_self": true
+ },
+ "r2-link27": {
+ "next_hop_self": true
+ },
+ "r2-link28": {
+ "next_hop_self": true
+ },
+ "r2-link29": {
+ "next_hop_self": true
+ },
+ "r2-link30": {
+ "next_hop_self": true
+ },
+ "r2-link31": {
+ "next_hop_self": true
+ },
+ "r2-link32": {
+ "next_hop_self": true
+ }
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r2-link1": {}
+ }
+ },
+ "r3": {
+ "dest_link": {
+ "r2-link1": {
+ "next_hop_self": true
+ },
+ "r2-link2": {
+ "next_hop_self": true
+ },
+ "r2-link3": {
+ "next_hop_self": true
+ },
+ "r2-link4": {
+ "next_hop_self": true
+ },
+ "r2-link5": {
+ "next_hop_self": true
+ },
+ "r2-link6": {
+ "next_hop_self": true
+ },
+ "r2-link7": {
+ "next_hop_self": true
+ },
+ "r2-link8": {
+ "next_hop_self": true
+ },
+ "r2-link9": {
+ "next_hop_self": true
+ },
+ "r2-link10": {
+ "next_hop_self": true
+ },
+ "r2-link11": {
+ "next_hop_self": true
+ },
+ "r2-link12": {
+ "next_hop_self": true
+ },
+ "r2-link13": {
+ "next_hop_self": true
+ },
+ "r2-link14": {
+ "next_hop_self": true
+ },
+ "r2-link15": {
+ "next_hop_self": true
+ },
+ "r2-link16": {
+ "next_hop_self": true
+ },
+ "r2-link17": {
+ "next_hop_self": true
+ },
+ "r2-link18": {
+ "next_hop_self": true
+ },
+ "r2-link19": {
+ "next_hop_self": true
+ },
+ "r2-link20": {
+ "next_hop_self": true
+ },
+ "r2-link21": {
+ "next_hop_self": true
+ },
+ "r2-link22": {
+ "next_hop_self": true
+ },
+ "r2-link23": {
+ "next_hop_self": true
+ },
+ "r2-link24": {
+ "next_hop_self": true
+ },
+ "r2-link25": {
+ "next_hop_self": true
+ },
+ "r2-link26": {
+ "next_hop_self": true
+ },
+ "r2-link27": {
+ "next_hop_self": true
+ },
+ "r2-link28": {
+ "next_hop_self": true
+ },
+ "r2-link29": {
+ "next_hop_self": true
+ },
+ "r2-link30": {
+ "next_hop_self": true
+ },
+ "r2-link31": {
+ "next_hop_self": true
+ },
+ "r2-link32": {
+ "next_hop_self": true
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "r3": {
+ "links": {
+ "lo": {
+ "ipv4": "auto",
+ "ipv6": "auto",
+ "type": "loopback"
+ },
+ "r2-link1": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link2": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link3": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link4": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link5": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link6": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link7": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link8": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link9": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link10": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link11": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link12": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link13": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link14": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link15": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link16": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link17": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link18": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link19": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link20": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link21": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link22": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link23": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link24": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link25": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link26": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link27": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link28": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link29": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link30": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link31": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link32": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ }
+ },
+ "bgp": {
+ "local_as": "300",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "maximum_paths": {
+ "ebgp": 32
+ },
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "r3-link1": {},
+ "r3-link2": {},
+ "r3-link3": {},
+ "r3-link4": {},
+ "r3-link5": {},
+ "r3-link6": {},
+ "r3-link7": {},
+ "r3-link8": {},
+ "r3-link9": {},
+ "r3-link10": {},
+ "r3-link11": {},
+ "r3-link12": {},
+ "r3-link13": {},
+ "r3-link14": {},
+ "r3-link15": {},
+ "r3-link16": {},
+ "r3-link17": {},
+ "r3-link18": {},
+ "r3-link19": {},
+ "r3-link20": {},
+ "r3-link21": {},
+ "r3-link22": {},
+ "r3-link23": {},
+ "r3-link24": {},
+ "r3-link25": {},
+ "r3-link26": {},
+ "r3-link27": {},
+ "r3-link28": {},
+ "r3-link29": {},
+ "r3-link30": {},
+ "r3-link31": {},
+ "r3-link32": {}
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "maximum_paths": {
+ "ebgp": 32
+ },
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "r3-link1": {},
+ "r3-link2": {},
+ "r3-link3": {},
+ "r3-link4": {},
+ "r3-link5": {},
+ "r3-link6": {},
+ "r3-link7": {},
+ "r3-link8": {},
+ "r3-link9": {},
+ "r3-link10": {},
+ "r3-link11": {},
+ "r3-link12": {},
+ "r3-link13": {},
+ "r3-link14": {},
+ "r3-link15": {},
+ "r3-link16": {},
+ "r3-link17": {},
+ "r3-link18": {},
+ "r3-link19": {},
+ "r3-link20": {},
+ "r3-link21": {},
+ "r3-link22": {},
+ "r3-link23": {},
+ "r3-link24": {},
+ "r3-link25": {},
+ "r3-link26": {},
+ "r3-link27": {},
+ "r3-link28": {},
+ "r3-link29": {},
+ "r3-link30": {},
+ "r3-link31": {},
+ "r3-link32": {}
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/tests/topotests/bgp-ecmp-topo2/ibgp_ecmp_topo2.json b/tests/topotests/bgp-ecmp-topo2/ibgp_ecmp_topo2.json
new file mode 100755
index 0000000000..9010b8c273
--- /dev/null
+++ b/tests/topotests/bgp-ecmp-topo2/ibgp_ecmp_topo2.json
@@ -0,0 +1,674 @@
+{
+ "address_types": [
+ "ipv4",
+ "ipv6"
+ ],
+ "ipv4base": "10.0.0.0",
+ "ipv4mask": 24,
+ "ipv6base": "fd00::",
+ "ipv6mask": 64,
+ "link_ip_start": {
+ "ipv4": "10.0.0.0",
+ "v4mask": 24,
+ "ipv6": "fd00::",
+ "v6mask": 64
+ },
+ "lo_prefix": {
+ "ipv4": "1.0.",
+ "v4mask": 32,
+ "ipv6": "2001:DB8:F::",
+ "v6mask": 128
+ },
+ "routers": {
+ "r1": {
+ "links": {
+ "lo": {
+ "ipv4": "auto",
+ "ipv6": "auto",
+ "type": "loopback"
+ },
+ "r2-link1": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ }
+ },
+ "bgp": {
+ "local_as": "100",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "r1-link1": {}
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "r1-link1": {}
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "r2": {
+ "links": {
+ "lo": {
+ "ipv4": "auto",
+ "ipv6": "auto",
+ "type": "loopback"
+ },
+ "r1-link1": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link1": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link2": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link3": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link4": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link5": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link6": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link7": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link8": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link9": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link10": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link11": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link12": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link13": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link14": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link15": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link16": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link17": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link18": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link19": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link20": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link21": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link22": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link23": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link24": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link25": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link26": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link27": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link28": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link29": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link30": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link31": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link32": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ }
+ },
+ "bgp": {
+ "local_as": "100",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r2-link1": {}
+ }
+ },
+ "r3": {
+ "dest_link": {
+ "r2-link1": {
+ "next_hop_self": true
+ },
+ "r2-link2": {
+ "next_hop_self": true
+ },
+ "r2-link3": {
+ "next_hop_self": true
+ },
+ "r2-link4": {
+ "next_hop_self": true
+ },
+ "r2-link5": {
+ "next_hop_self": true
+ },
+ "r2-link6": {
+ "next_hop_self": true
+ },
+ "r2-link7": {
+ "next_hop_self": true
+ },
+ "r2-link8": {
+ "next_hop_self": true
+ },
+ "r2-link9": {
+ "next_hop_self": true
+ },
+ "r2-link10": {
+ "next_hop_self": true
+ },
+ "r2-link11": {
+ "next_hop_self": true
+ },
+ "r2-link12": {
+ "next_hop_self": true
+ },
+ "r2-link13": {
+ "next_hop_self": true
+ },
+ "r2-link14": {
+ "next_hop_self": true
+ },
+ "r2-link15": {
+ "next_hop_self": true
+ },
+ "r2-link16": {
+ "next_hop_self": true
+ },
+ "r2-link17": {
+ "next_hop_self": true
+ },
+ "r2-link18": {
+ "next_hop_self": true
+ },
+ "r2-link19": {
+ "next_hop_self": true
+ },
+ "r2-link20": {
+ "next_hop_self": true
+ },
+ "r2-link21": {
+ "next_hop_self": true
+ },
+ "r2-link22": {
+ "next_hop_self": true
+ },
+ "r2-link23": {
+ "next_hop_self": true
+ },
+ "r2-link24": {
+ "next_hop_self": true
+ },
+ "r2-link25": {
+ "next_hop_self": true
+ },
+ "r2-link26": {
+ "next_hop_self": true
+ },
+ "r2-link27": {
+ "next_hop_self": true
+ },
+ "r2-link28": {
+ "next_hop_self": true
+ },
+ "r2-link29": {
+ "next_hop_self": true
+ },
+ "r2-link30": {
+ "next_hop_self": true
+ },
+ "r2-link31": {
+ "next_hop_self": true
+ },
+ "r2-link32": {
+ "next_hop_self": true
+ }
+ }
+ }
+ },
+ "redistribute": [
+ {
+ "redist_type": "static"
+ }
+ ]
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r2-link1": {}
+ }
+ },
+ "r3": {
+ "dest_link": {
+ "r2-link1": {
+ "next_hop_self": true
+ },
+ "r2-link2": {
+ "next_hop_self": true
+ },
+ "r2-link3": {
+ "next_hop_self": true
+ },
+ "r2-link4": {
+ "next_hop_self": true
+ },
+ "r2-link5": {
+ "next_hop_self": true
+ },
+ "r2-link6": {
+ "next_hop_self": true
+ },
+ "r2-link7": {
+ "next_hop_self": true
+ },
+ "r2-link8": {
+ "next_hop_self": true
+ },
+ "r2-link9": {
+ "next_hop_self": true
+ },
+ "r2-link10": {
+ "next_hop_self": true
+ },
+ "r2-link11": {
+ "next_hop_self": true
+ },
+ "r2-link12": {
+ "next_hop_self": true
+ },
+ "r2-link13": {
+ "next_hop_self": true
+ },
+ "r2-link14": {
+ "next_hop_self": true
+ },
+ "r2-link15": {
+ "next_hop_self": true
+ },
+ "r2-link16": {
+ "next_hop_self": true
+ },
+ "r2-link17": {
+ "next_hop_self": true
+ },
+ "r2-link18": {
+ "next_hop_self": true
+ },
+ "r2-link19": {
+ "next_hop_self": true
+ },
+ "r2-link20": {
+ "next_hop_self": true
+ },
+ "r2-link21": {
+ "next_hop_self": true
+ },
+ "r2-link22": {
+ "next_hop_self": true
+ },
+ "r2-link23": {
+ "next_hop_self": true
+ },
+ "r2-link24": {
+ "next_hop_self": true
+ },
+ "r2-link25": {
+ "next_hop_self": true
+ },
+ "r2-link26": {
+ "next_hop_self": true
+ },
+ "r2-link27": {
+ "next_hop_self": true
+ },
+ "r2-link28": {
+ "next_hop_self": true
+ },
+ "r2-link29": {
+ "next_hop_self": true
+ },
+ "r2-link30": {
+ "next_hop_self": true
+ },
+ "r2-link31": {
+ "next_hop_self": true
+ },
+ "r2-link32": {
+ "next_hop_self": true
+ }
+ }
+ }
+ },
+ "redistribute": [
+ {
+ "redist_type": "static"
+ }
+ ]
+ }
+ }
+ }
+ }
+ },
+ "r3": {
+ "links": {
+ "lo": {
+ "ipv4": "auto",
+ "ipv6": "auto",
+ "type": "loopback"
+ },
+ "r2-link1": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link2": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link3": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link4": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link5": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link6": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link7": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link8": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link9": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link10": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link11": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link12": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link13": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link14": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link15": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link16": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link17": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link18": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link19": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link20": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link21": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link22": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link23": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link24": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link25": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link26": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link27": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link28": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link29": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link30": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link31": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link32": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ }
+ },
+ "bgp": {
+ "local_as": "100",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "maximum_paths": {
+ "ibgp": 32
+ },
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "r3-link1": {},
+ "r3-link2": {},
+ "r3-link3": {},
+ "r3-link4": {},
+ "r3-link5": {},
+ "r3-link6": {},
+ "r3-link7": {},
+ "r3-link8": {},
+ "r3-link9": {},
+ "r3-link10": {},
+ "r3-link11": {},
+ "r3-link12": {},
+ "r3-link13": {},
+ "r3-link14": {},
+ "r3-link15": {},
+ "r3-link16": {},
+ "r3-link17": {},
+ "r3-link18": {},
+ "r3-link19": {},
+ "r3-link20": {},
+ "r3-link21": {},
+ "r3-link22": {},
+ "r3-link23": {},
+ "r3-link24": {},
+ "r3-link25": {},
+ "r3-link26": {},
+ "r3-link27": {},
+ "r3-link28": {},
+ "r3-link29": {},
+ "r3-link30": {},
+ "r3-link31": {},
+ "r3-link32": {}
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "maximum_paths": {
+ "ibgp": 32
+ },
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "r3-link1": {},
+ "r3-link2": {},
+ "r3-link3": {},
+ "r3-link4": {},
+ "r3-link5": {},
+ "r3-link6": {},
+ "r3-link7": {},
+ "r3-link8": {},
+ "r3-link9": {},
+ "r3-link10": {},
+ "r3-link11": {},
+ "r3-link12": {},
+ "r3-link13": {},
+ "r3-link14": {},
+ "r3-link15": {},
+ "r3-link16": {},
+ "r3-link17": {},
+ "r3-link18": {},
+ "r3-link19": {},
+ "r3-link20": {},
+ "r3-link21": {},
+ "r3-link22": {},
+ "r3-link23": {},
+ "r3-link24": {},
+ "r3-link25": {},
+ "r3-link26": {},
+ "r3-link27": {},
+ "r3-link28": {},
+ "r3-link29": {},
+ "r3-link30": {},
+ "r3-link31": {},
+ "r3-link32": {}
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/tests/topotests/bgp-ecmp-topo2/test_ebgp_ecmp_topo2.py b/tests/topotests/bgp-ecmp-topo2/test_ebgp_ecmp_topo2.py
new file mode 100755
index 0000000000..4b9f419bf2
--- /dev/null
+++ b/tests/topotests/bgp-ecmp-topo2/test_ebgp_ecmp_topo2.py
@@ -0,0 +1,817 @@
+#!/usr/bin/env python
+
+#
+# Copyright (c) 2019 by VMware, Inc. ("VMware")
+# Used Copyright (c) 2018 by Network Device Education Foundation, Inc.
+# ("NetDEF") in this file.
+#
+# Permission to use, copy, modify, and/or distribute this software
+# for any purpose with or without fee is hereby granted, provided
+# that the above copyright notice and this permission notice appear
+# in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND VMWARE DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL VMWARE BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
+# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+# OF THIS SOFTWARE.
+#
+
+
+"""
+Following tests are covered to test ecmp functionality on EBGP.
+1. Verify routes installed as per maximum-paths configuration (8/16/32)
+2. Disable/Shut selected paths nexthops and verify other next are installed in
+ the RIB of DUT. Enable interfaces and verify RIB count.
+3. Verify BGP table and RIB in DUT after clear BGP routes and neighbors.
+4. Verify routes are cleared from BGP and RIB table of DUT when
+ redistribute static configuration is removed.
+5. Shut BGP neigbors one by one and verify BGP and routing table updated
+ accordingly in DUT
+6. Delete static routes and verify routers are cleared from BGP table and RIB
+ of DUT.
+7. Verify routes are cleared from BGP and RIB table of DUT when advertise
+ network configuration is removed.
+"""
+import os
+import sys
+import time
+import json
+import pytest
+# Save the Current Working Directory to find configuration files.
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, '../'))
+sys.path.append(os.path.join(CWD, '../../'))
+
+# pylint: disable=C0413
+# Import topogen and topotest helpers
+from lib.topogen import Topogen, get_topogen
+from mininet.topo import Topo
+
+from lib.common_config import (
+ start_topology, write_test_header,
+ write_test_footer,
+ verify_rib, create_static_routes, check_address_types,
+ interface_status, reset_config_on_routers
+)
+from lib.topolog import logger
+from lib.bgp import (
+ verify_bgp_convergence, create_router_bgp,
+ clear_bgp_and_verify)
+from lib.topojson import build_topo_from_json, build_config_from_json
+
+# Reading the data from JSON File for topology and configuration creation
+jsonFile = "{}/ebgp_ecmp_topo2.json".format(CWD)
+
+try:
+ with open(jsonFile, "r") as topoJson:
+ topo = json.load(topoJson)
+except IOError:
+ assert False, "Could not read file {}".format(jsonFile)
+
+# Global variables
+NEXT_HOPS = {"ipv4": [], "ipv6": []}
+INTF_LIST_R3 = []
+INTF_LIST_R2 = []
+NETWORK = {"ipv4": "11.0.20.1/32", "ipv6": "1::/64"}
+NEXT_HOP_IP = {"ipv4": "10.0.0.1", "ipv6": "fd00::1"}
+BGP_CONVERGENCE = False
+
+
+class CreateTopo(Topo):
+ """
+ Test topology builder.
+
+ * `Topo`: Topology object
+ """
+
+ def build(self, *_args, **_opts):
+ """Build function."""
+ tgen = get_topogen(self)
+
+ # Building topology from json file
+ build_topo_from_json(tgen, topo)
+
+
+def setup_module(mod):
+ """
+ Sets up the pytest environment.
+
+ * `mod`: module name
+ """
+ global NEXT_HOPS, INTF_LIST_R3, INTF_LIST_R2, TEST_STATIC
+ global ADDR_TYPES
+
+ testsuite_run_time = time.asctime(time.localtime(time.time()))
+ logger.info("Testsuite start time: {}".format(testsuite_run_time))
+ logger.info("=" * 40)
+
+ logger.info("Running setup_module to create topology")
+
+ # This function initiates the topology build with Topogen...
+ tgen = Topogen(CreateTopo, mod.__name__)
+
+ # Starting topology, create tmp files which are loaded to routers
+ # to start deamons and then start routers
+ start_topology(tgen)
+
+ # Creating configuration from JSON
+ build_config_from_json(tgen, topo)
+
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ # tgen.mininet_cli()
+ # Api call verify whether BGP is converged
+ ADDR_TYPES = check_address_types()
+
+ BGP_CONVERGENCE = verify_bgp_convergence(tgen, topo)
+ assert BGP_CONVERGENCE is True, ("setup_module :Failed \n Error:"
+ " {}".format(BGP_CONVERGENCE))
+
+ link_data = [val for links, val in
+ topo["routers"]["r2"]["links"].iteritems()
+ if "r3" in links]
+ for adt in ADDR_TYPES:
+ NEXT_HOPS[adt] = [val[adt].split("/")[0] for val in link_data]
+ if adt == "ipv4":
+ NEXT_HOPS[adt] = sorted(
+ NEXT_HOPS[adt], key=lambda x: int(x.split(".")[2]))
+ elif adt == "ipv6":
+ NEXT_HOPS[adt] = sorted(
+ NEXT_HOPS[adt], key=lambda x: int(x.split(':')[-3], 16))
+
+ INTF_LIST_R2 = [val["interface"].split("/")[0] for val in link_data]
+ INTF_LIST_R2 = sorted(INTF_LIST_R2, key=lambda x: int(x.split("eth")[1]))
+
+ link_data = [val for links, val in
+ topo["routers"]["r3"]["links"].iteritems()
+ if "r2" in links]
+ INTF_LIST_R3 = [val["interface"].split("/")[0] for val in link_data]
+ INTF_LIST_R3 = sorted(INTF_LIST_R3, key=lambda x: int(x.split("eth")[1]))
+
+ # STATIC_ROUTE = True
+ logger.info("Running setup_module() done")
+
+
+def teardown_module():
+ """
+ Teardown the pytest environment.
+
+ * `mod`: module name
+ """
+
+ logger.info("Running teardown_module to delete topology")
+
+ tgen = get_topogen()
+
+ # Stop toplogy and Remove tmp files
+ tgen.stop_topology()
+
+
+def static_or_nw(tgen, topo, tc_name, test_type, dut):
+
+ if test_type == "redist_static":
+ input_dict_static = {
+ dut: {
+ "static_routes": [
+ {
+ "network": NETWORK["ipv4"],
+ "next_hop": NEXT_HOP_IP["ipv4"]
+ },
+ {
+ "network": NETWORK["ipv6"],
+ "next_hop": NEXT_HOP_IP["ipv6"]
+ }
+ ]
+ }
+ }
+ logger.info("Configuring static route on router %s", dut)
+ result = create_static_routes(tgen, input_dict_static)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ input_dict_2 = {
+ dut: {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "redistribute": [{
+ "redist_type": "static"
+ }]
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "redistribute": [{
+ "redist_type": "static"
+ }]
+ }
+ }
+ }
+ }
+ }
+ }
+
+ logger.info("Configuring redistribute static route on router %s", dut)
+ result = create_router_bgp(tgen, topo, input_dict_2)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ elif test_type == "advertise_nw":
+ input_dict_nw = {
+ dut: {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "advertise_networks": [
+ {"network": NETWORK["ipv4"]}
+ ]
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "advertise_networks": [
+ {"network": NETWORK["ipv6"]}
+ ]
+ }
+ }
+ }
+ }
+ }
+ }
+
+ logger.info("Advertising networks %s %s from router %s",
+ NETWORK["ipv4"], NETWORK["ipv6"], dut)
+ result = create_router_bgp(tgen, topo, input_dict_nw)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+
+@pytest.mark.parametrize("ecmp_num", ["8", "16", "32"])
+@pytest.mark.parametrize("test_type", ["redist_static", "advertise_nw"])
+def test_modify_ecmp_max_paths(request, ecmp_num, test_type):
+ """
+ Verify routes installed as per maximum-paths
+ configuration (8/16/32).
+ """
+
+ tc_name = request.node.name
+ write_test_header(tc_name)
+ tgen = get_topogen()
+
+ reset_config_on_routers(tgen)
+
+ static_or_nw(tgen, topo, tc_name, test_type, "r2")
+
+ input_dict = {
+ "r3": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "maximum_paths": {
+ "ebgp": ecmp_num,
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "maximum_paths": {
+ "ebgp": ecmp_num,
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+
+ logger.info("Configuring bgp maximum-paths %s on router r3", ecmp_num)
+ result = create_router_bgp(tgen, topo, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ # Verifying RIB routes
+ dut = "r3"
+ protocol = "bgp"
+
+ for addr_type in ADDR_TYPES:
+ input_dict_1 = {
+ "r3": {
+ "static_routes": [
+ {
+ "network": NETWORK[addr_type]
+ }
+ ]
+ }
+ }
+
+ logger.info("Verifying %s routes on r3", addr_type)
+ result = verify_rib(tgen, addr_type, dut, input_dict_1,
+ next_hop=NEXT_HOPS[addr_type],
+ protocol=protocol)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+def test_ecmp_after_clear_bgp(request):
+ """ Verify BGP table and RIB in DUT after clear BGP routes and neighbors"""
+
+ tc_name = request.node.name
+ write_test_header(tc_name)
+ tgen = get_topogen()
+
+ reset_config_on_routers(tgen)
+
+ # Verifying RIB routes
+ dut = "r3"
+ protocol = "bgp"
+
+ static_or_nw(tgen, topo, tc_name, "redist_static", "r2")
+ for addr_type in ADDR_TYPES:
+ input_dict_1 = {
+ "r3": {
+ "static_routes": [
+ {
+ "network": NETWORK[addr_type]
+ }
+ ]
+ }
+ }
+
+ logger.info("Verifying %s routes on r3", addr_type)
+ result = verify_rib(tgen, addr_type, dut, input_dict_1,
+ next_hop=NEXT_HOPS[addr_type],
+ protocol=protocol)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ # Clear bgp
+ result = clear_bgp_and_verify(tgen, topo, dut)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ for addr_type in ADDR_TYPES:
+ input_dict_1 = {
+ "r3": {
+ "static_routes": [
+ {
+ "network": NETWORK[addr_type]
+ }
+ ]
+ }
+ }
+ logger.info("Verifying %s routes on r3", addr_type)
+ result = verify_rib(tgen, addr_type, dut, input_dict_1,
+ next_hop=NEXT_HOPS[addr_type],
+ protocol=protocol)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+def test_ecmp_remove_redistribute_static(request):
+ """ Verify routes are cleared from BGP and RIB table of DUT when
+ redistribute static configuration is removed."""
+
+ tc_name = request.node.name
+ write_test_header(tc_name)
+ tgen = get_topogen()
+
+ reset_config_on_routers(tgen)
+ static_or_nw(tgen, topo, tc_name, "redist_static", "r2")
+ for addr_type in ADDR_TYPES:
+
+ # Verifying RIB routes
+ dut = "r3"
+ protocol = "bgp"
+ input_dict_1 = {
+ "r3": {
+ "static_routes": [
+ {
+ "network": NETWORK[addr_type]
+ }
+ ]
+ }
+ }
+
+ logger.info("Verifying %s routes on r3", addr_type)
+ result = verify_rib(tgen, addr_type, dut, input_dict_1,
+ next_hop=NEXT_HOPS[addr_type],
+ protocol=protocol)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ input_dict_2 = {
+ "r2": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "redistribute": [{
+ "redist_type": "static",
+ "delete": True
+
+ }]
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "redistribute": [{
+ "redist_type": "static",
+ "delete": True
+
+ }]
+ }
+ }
+ }
+ }
+ }
+ }
+
+ logger.info("Remove redistribute static")
+ result = create_router_bgp(tgen, topo, input_dict_2)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ for addr_type in ADDR_TYPES:
+
+ # Verifying RIB routes
+ dut = "r3"
+ protocol = "bgp"
+ input_dict_1 = {
+ "r3": {
+ "static_routes": [
+ {
+ "network": NETWORK[addr_type]
+ }
+ ]
+ }
+ }
+
+ logger.info("Verifying %s routes on r3 are deleted", addr_type)
+ result = verify_rib(tgen, addr_type, dut, input_dict_1,
+ next_hop=[], protocol=protocol, expected=False)
+ assert result is not True, "Testcase {} : Failed \n Routes still" \
+ " present in RIB".format(tc_name)
+
+ logger.info("Enable redistribute static")
+ input_dict_2 = {
+ "r2": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "redistribute": [{
+ "redist_type": "static"
+ }]
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "redistribute": [{
+ "redist_type": "static"
+ }]
+ }
+ }
+ }
+ }
+ }
+ }
+ result = create_router_bgp(tgen, topo, input_dict_2)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ for addr_type in ADDR_TYPES:
+ # Verifying RIB routes
+ dut = "r3"
+ protocol = "bgp"
+ input_dict_1 = {
+ "r3": {
+ "static_routes": [
+ {
+ "network": NETWORK[addr_type]
+ }
+ ]
+ }
+ }
+ logger.info("Verifying %s routes on r3", addr_type)
+ result = verify_rib(tgen, addr_type, dut, input_dict_1,
+ next_hop=NEXT_HOPS[addr_type],
+ protocol=protocol)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+def test_ecmp_shut_bgp_neighbor(request):
+ """
+ Disable/Shut selected paths nexthops and verify other next are installed in
+ the RIB of DUT. Enable interfaces and verify RIB count.
+
+ Shut BGP neigbors one by one and verify BGP and routing table updated
+ accordingly in DUT
+ """
+
+ tc_name = request.node.name
+ write_test_header(tc_name)
+ tgen = get_topogen()
+
+ logger.info(INTF_LIST_R2)
+ # Verifying RIB routes
+ dut = "r3"
+ protocol = "bgp"
+
+ reset_config_on_routers(tgen)
+ static_or_nw(tgen, topo, tc_name, "redist_static", "r2")
+
+ for addr_type in ADDR_TYPES:
+ input_dict = {
+ "r3": {
+ "static_routes": [
+ {
+ "network": NETWORK[addr_type]
+ }
+ ]
+ }
+ }
+
+ logger.info("Verifying %s routes on r3", addr_type)
+ result = verify_rib(tgen, addr_type, dut, input_dict,
+ next_hop=NEXT_HOPS[addr_type],
+ protocol=protocol)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ for intf_num in range(len(INTF_LIST_R2)+1, 16):
+ intf_val = INTF_LIST_R2[intf_num:intf_num+16]
+
+ input_dict_1 = {
+ "r2": {
+ "interface_list": [intf_val],
+ "status": "down"
+ }
+ }
+ logger.info("Shutting down neighbor interface {} on r2".
+ format(intf_val))
+ result = interface_status(tgen, topo, input_dict_1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ for addr_type in ADDR_TYPES:
+ if intf_num + 16 < 32:
+ check_hops = NEXT_HOPS[addr_type]
+ else:
+ check_hops = []
+
+ input_dict = {
+ "r3": {
+ "static_routes": [
+ {
+ "network": NETWORK[addr_type]
+ }
+ ]
+ }
+ }
+ logger.info("Verifying %s routes on r3", addr_type)
+ result = verify_rib(tgen, addr_type, dut, input_dict,
+ next_hop=check_hops,
+ protocol=protocol)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ input_dict_1 = {
+ "r2": {
+ "interface_list": INTF_LIST_R2,
+ "status": "up"
+ }
+ }
+
+ logger.info("Enabling all neighbor interface {} on r2")
+ result = interface_status(tgen, topo, input_dict_1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ static_or_nw(tgen, topo, tc_name, "redist_static", "r2")
+ for addr_type in ADDR_TYPES:
+ input_dict = {
+ "r3": {
+ "static_routes": [
+ {
+ "network": NETWORK[addr_type]
+ }
+ ]
+ }
+ }
+
+ logger.info("Verifying %s routes on r3", addr_type)
+ result = verify_rib(tgen, addr_type, dut, input_dict,
+ next_hop=NEXT_HOPS[addr_type],
+ protocol=protocol)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+def test_ecmp_remove_static_route(request):
+ """
+ Delete static routes and verify routers are cleared from BGP table,
+ and RIB of DUT.
+ """
+
+ tc_name = request.node.name
+ write_test_header(tc_name)
+ tgen = get_topogen()
+
+ # Verifying RIB routes
+ dut = "r3"
+ protocol = "bgp"
+
+ reset_config_on_routers(tgen)
+
+ static_or_nw(tgen, topo, tc_name, "redist_static", "r2")
+ for addr_type in ADDR_TYPES:
+ input_dict_1 = {
+ "r3": {
+ "static_routes": [
+ {
+ "network": NETWORK[addr_type]
+ }
+ ]
+ }
+ }
+
+ logger.info("Verifying %s routes on r3", addr_type)
+ result = verify_rib(
+ tgen, addr_type, dut, input_dict_1,
+ next_hop=NEXT_HOPS[addr_type], protocol=protocol)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ for addr_type in ADDR_TYPES:
+ input_dict_2 = {
+ "r2": {
+ "static_routes": [
+ {
+ "network": NETWORK[addr_type],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ "delete": True
+ }
+ ]
+ }
+ }
+
+ logger.info("Remove static routes")
+ result = create_static_routes(tgen, input_dict_2)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ logger.info("Verifying %s routes on r3 are removed", addr_type)
+ result = verify_rib(tgen, addr_type, dut, input_dict_2,
+ next_hop=[], protocol=protocol, expected=False)
+ assert result is not True, "Testcase {} : Failed \n Routes still" \
+ " present in RIB".format(tc_name)
+
+ for addr_type in ADDR_TYPES:
+ # Enable static routes
+ input_dict_4 = {
+ "r2": {
+ "static_routes": [
+ {
+ "network": NETWORK[addr_type],
+ "next_hop": NEXT_HOP_IP[addr_type]
+ }
+ ]
+ }
+ }
+
+ logger.info("Enable static route")
+ result = create_static_routes(tgen, input_dict_4)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ logger.info("Verifying %s routes on r3", addr_type)
+ result = verify_rib(tgen, addr_type, dut, input_dict_4,
+ next_hop=NEXT_HOPS[addr_type],
+ protocol=protocol)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+
+def test_ecmp_remove_nw_advertise(request):
+ """
+ Verify routes are cleared from BGP and RIB table of DUT,
+ when advertise network configuration is removed
+ """
+
+ tc_name = request.node.name
+ write_test_header(tc_name)
+ tgen = get_topogen()
+
+ # Verifying RIB routes
+ dut = "r3"
+ protocol = "bgp"
+
+ reset_config_on_routers(tgen)
+ static_or_nw(tgen, topo, tc_name, "advertise_nw", "r2")
+ for addr_type in ADDR_TYPES:
+ input_dict = {
+ "r3": {
+ "static_routes": [
+ {
+ "network": NETWORK[addr_type]
+ }
+ ]
+ }
+ }
+
+ logger.info("Verifying %s routes on r3", addr_type)
+ result = verify_rib(tgen, addr_type, dut, input_dict,
+ next_hop=NEXT_HOPS[addr_type],
+ protocol=protocol)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ input_dict_3 = {
+ "r2": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "advertise_networks": [{
+ "network": NETWORK["ipv4"],
+ "delete": True
+ }]
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "advertise_networks": [{
+ "network": NETWORK["ipv6"],
+ "delete": True
+ }]
+ }
+ }
+ }
+ }
+ }
+ }
+
+ logger.info("Withdraw advertised networks")
+ result = create_router_bgp(tgen, topo, input_dict_3)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ for addr_type in ADDR_TYPES:
+ input_dict = {
+ "r3": {
+ "static_routes": [
+ {
+ "network": NETWORK[addr_type]
+ }
+ ]
+ }
+ }
+
+ logger.info("Verifying %s routes on r3", addr_type)
+ result = verify_rib(tgen, addr_type, dut, input_dict,
+ next_hop=[], protocol=protocol, expected=False)
+ assert result is not True, "Testcase {} : Failed \n Routes still" \
+ " present in RIB".format(tc_name)
+
+ static_or_nw(tgen, topo, tc_name, "advertise_nw", "r2")
+ for addr_type in ADDR_TYPES:
+ input_dict = {
+ "r3": {
+ "static_routes": [
+ {
+ "network": NETWORK[addr_type]
+ }
+ ]
+ }
+ }
+ logger.info("Verifying %s routes on r3", addr_type)
+ result = verify_rib(tgen, addr_type, dut, input_dict,
+ next_hop=NEXT_HOPS[addr_type],
+ protocol=protocol)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+if __name__ == "__main__":
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
diff --git a/tests/topotests/bgp-ecmp-topo2/test_ibgp_ecmp_topo2.py b/tests/topotests/bgp-ecmp-topo2/test_ibgp_ecmp_topo2.py
new file mode 100755
index 0000000000..a9f18ed1fa
--- /dev/null
+++ b/tests/topotests/bgp-ecmp-topo2/test_ibgp_ecmp_topo2.py
@@ -0,0 +1,813 @@
+#!/usr/bin/env python
+
+#
+# Copyright (c) 2019 by VMware, Inc. ("VMware")
+# Used Copyright (c) 2018 by Network Device Education Foundation, Inc.
+# ("NetDEF") in this file.
+#
+# Permission to use, copy, modify, and/or distribute this software
+# for any purpose with or without fee is hereby granted, provided
+# that the above copyright notice and this permission notice appear
+# in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND VMWARE DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL VMWARE BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
+# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+# OF THIS SOFTWARE.
+#
+
+
+"""
+Following tests are covered to test ecmp functionality on EBGP.
+1. Verify routes installed as per maximum-paths configuration (8/16/32)
+2. Disable/Shut selected paths nexthops and verify other next are installed in
+ the RIB of DUT. Enable interfaces and verify RIB count.
+3. Verify BGP table and RIB in DUT after clear BGP routes and neighbors.
+4. Verify routes are cleared from BGP and RIB table of DUT when
+ redistribute static configuration is removed.
+5. Shut BGP neigbors one by one and verify BGP and routing table updated
+ accordingly in DUT
+6. Delete static routes and verify routers are cleared from BGP table and RIB
+ of DUT.
+7. Verify routes are cleared from BGP and RIB table of DUT when advertise
+ network configuration is removed.
+"""
+import os
+import sys
+import time
+import json
+import pytest
+# Save the Current Working Directory to find configuration files.
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, '../'))
+sys.path.append(os.path.join(CWD, '../../'))
+
+# pylint: disable=C0413
+# Import topogen and topotest helpers
+from lib.topogen import Topogen, get_topogen
+from mininet.topo import Topo
+
+from lib.common_config import (
+ start_topology, write_test_header,
+ write_test_footer,
+ verify_rib, create_static_routes, check_address_types,
+ interface_status, reset_config_on_routers
+)
+from lib.topolog import logger
+from lib.bgp import (
+ verify_bgp_convergence, create_router_bgp,
+ clear_bgp_and_verify)
+from lib.topojson import build_topo_from_json, build_config_from_json
+
+# Reading the data from JSON File for topology and configuration creation
+jsonFile = "{}/ibgp_ecmp_topo2.json".format(CWD)
+
+try:
+ with open(jsonFile, "r") as topoJson:
+ topo = json.load(topoJson)
+except IOError:
+ assert False, "Could not read file {}".format(jsonFile)
+
+# Global variables
+NEXT_HOPS = {"ipv4": [], "ipv6": []}
+INTF_LIST_R3 = []
+INTF_LIST_R2 = []
+NETWORK = {"ipv4": "11.0.20.1/32", "ipv6": "1::/64"}
+NEXT_HOP_IP = {"ipv4": "10.0.0.1", "ipv6": "fd00::1"}
+BGP_CONVERGENCE = False
+
+
+class CreateTopo(Topo):
+ """
+ Test topology builder.
+
+ * `Topo`: Topology object
+ """
+
+ def build(self, *_args, **_opts):
+ """Build function."""
+ tgen = get_topogen(self)
+
+ # Building topology from json file
+ build_topo_from_json(tgen, topo)
+
+
+def setup_module(mod):
+ """
+ Sets up the pytest environment.
+
+ * `mod`: module name
+ """
+ global NEXT_HOPS, INTF_LIST_R3, INTF_LIST_R2, TEST_STATIC
+ global ADDR_TYPES
+
+ testsuite_run_time = time.asctime(time.localtime(time.time()))
+ logger.info("Testsuite start time: {}".format(testsuite_run_time))
+ logger.info("=" * 40)
+
+ logger.info("Running setup_module to create topology")
+
+ # This function initiates the topology build with Topogen...
+ tgen = Topogen(CreateTopo, mod.__name__)
+
+ # Starting topology, create tmp files which are loaded to routers
+ # to start deamons and then start routers
+ start_topology(tgen)
+
+ # Creating configuration from JSON
+ build_config_from_json(tgen, topo)
+
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ # tgen.mininet_cli()
+ # Api call verify whether BGP is converged
+ ADDR_TYPES = check_address_types()
+
+ for addr_type in ADDR_TYPES:
+ BGP_CONVERGENCE = verify_bgp_convergence(tgen, topo)
+ assert BGP_CONVERGENCE is True, ("setup_module :Failed \n Error:"
+ " {}".format(BGP_CONVERGENCE))
+
+ link_data = [val for links, val in
+ topo["routers"]["r2"]["links"].iteritems()
+ if "r3" in links]
+ for adt in ADDR_TYPES:
+ NEXT_HOPS[adt] = [val[adt].split("/")[0] for val in link_data]
+ if adt == "ipv4":
+ NEXT_HOPS[adt] = sorted(
+ NEXT_HOPS[adt], key=lambda x: int(x.split(".")[2]))
+ elif adt == "ipv6":
+ NEXT_HOPS[adt] = sorted(
+ NEXT_HOPS[adt], key=lambda x: int(x.split(':')[-3], 16))
+
+ INTF_LIST_R2 = [val["interface"].split("/")[0] for val in link_data]
+ INTF_LIST_R2 = sorted(INTF_LIST_R2, key=lambda x: int(x.split("eth")[1]))
+
+ link_data = [val for links, val in
+ topo["routers"]["r3"]["links"].iteritems()
+ if "r2" in links]
+ INTF_LIST_R3 = [val["interface"].split("/")[0] for val in link_data]
+ INTF_LIST_R3 = sorted(INTF_LIST_R3, key=lambda x: int(x.split("eth")[1]))
+
+ # STATIC_ROUTE = True
+ logger.info("Running setup_module() done")
+
+
+def teardown_module():
+ """
+ Teardown the pytest environment.
+
+ * `mod`: module name
+ """
+
+ logger.info("Running teardown_module to delete topology")
+
+ tgen = get_topogen()
+
+ # Stop toplogy and Remove tmp files
+ tgen.stop_topology()
+
+
+def static_or_nw(tgen, topo, tc_name, test_type, dut):
+
+ if test_type == "redist_static":
+ input_dict_static = {
+ dut: {
+ "static_routes": [
+ {
+ "network": NETWORK["ipv4"],
+ "next_hop": NEXT_HOP_IP["ipv4"]
+ },
+ {
+ "network": NETWORK["ipv6"],
+ "next_hop": NEXT_HOP_IP["ipv6"]
+ }
+ ]
+ }
+ }
+ logger.info("Configuring static route on router %s", dut)
+ result = create_static_routes(tgen, input_dict_static)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ input_dict_2 = {
+ dut: {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "redistribute": [{
+ "redist_type": "static"
+ }]
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "redistribute": [{
+ "redist_type": "static"
+ }]
+ }
+ }
+ }
+ }
+ }
+ }
+
+ logger.info("Configuring redistribute static route on router %s", dut)
+ result = create_router_bgp(tgen, topo, input_dict_2)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ elif test_type == "advertise_nw":
+ input_dict_nw = {
+ dut: {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "advertise_networks": [
+ {"network": NETWORK["ipv4"]}
+ ]
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "advertise_networks": [
+ {"network": NETWORK["ipv6"]}
+ ]
+ }
+ }
+ }
+ }
+ }
+ }
+
+ logger.info("Advertising networks %s %s from router %s",
+ NETWORK["ipv4"], NETWORK["ipv6"], dut)
+ result = create_router_bgp(tgen, topo, input_dict_nw)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+
+@pytest.mark.parametrize("ecmp_num", ["8", "16", "32"])
+@pytest.mark.parametrize("test_type", ["redist_static", "advertise_nw"])
+def test_modify_ecmp_max_paths(request, ecmp_num, test_type):
+ """
+ Verify routes installed as per maximum-paths
+ configuration (8/16/32).
+ """
+
+ tc_name = request.node.name
+ write_test_header(tc_name)
+ tgen = get_topogen()
+
+ reset_config_on_routers(tgen)
+
+ static_or_nw(tgen, topo, tc_name, test_type, "r2")
+
+ input_dict = {
+ "r3": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "maximum_paths": {
+ "ibgp": ecmp_num,
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "maximum_paths": {
+ "ibgp": ecmp_num,
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+
+ logger.info("Configuring bgp maximum-paths %s on router r3", ecmp_num)
+ result = create_router_bgp(tgen, topo, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ # Verifying RIB routes
+ dut = "r3"
+ protocol = "bgp"
+
+ for addr_type in ADDR_TYPES:
+ input_dict_1 = {
+ "r3": {
+ "static_routes": [
+ {
+ "network": NETWORK[addr_type]
+ }
+ ]
+ }
+ }
+
+ logger.info("Verifying %s routes on r3", addr_type)
+ result = verify_rib(tgen, addr_type, dut, input_dict_1,
+ next_hop=NEXT_HOPS[addr_type],
+ protocol=protocol)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+def test_ecmp_after_clear_bgp(request):
+ """ Verify BGP table and RIB in DUT after clear BGP routes and neighbors"""
+
+ tc_name = request.node.name
+ write_test_header(tc_name)
+ tgen = get_topogen()
+
+ reset_config_on_routers(tgen)
+
+ # Verifying RIB routes
+ dut = "r3"
+ protocol = "bgp"
+
+ static_or_nw(tgen, topo, tc_name, "redist_static", "r2")
+ for addr_type in ADDR_TYPES:
+ input_dict_1 = {
+ "r3": {
+ "static_routes": [
+ {
+ "network": NETWORK[addr_type]
+ }
+ ]
+ }
+ }
+
+ logger.info("Verifying %s routes on r3", addr_type)
+ result = verify_rib(tgen, addr_type, dut, input_dict_1,
+ next_hop=NEXT_HOPS[addr_type],
+ protocol=protocol)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ # Clear bgp
+ result = clear_bgp_and_verify(tgen, topo, dut)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ for addr_type in ADDR_TYPES:
+ input_dict_1 = {
+ "r3": {
+ "static_routes": [
+ {
+ "network": NETWORK[addr_type]
+ }
+ ]
+ }
+ }
+ logger.info("Verifying %s routes on r3", addr_type)
+ result = verify_rib(tgen, addr_type, dut, input_dict_1,
+ next_hop=NEXT_HOPS[addr_type],
+ protocol=protocol)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+def test_ecmp_remove_redistribute_static(request):
+ """ Verify routes are cleared from BGP and RIB table of DUT when
+ redistribute static configuration is removed."""
+
+ tc_name = request.node.name
+ write_test_header(tc_name)
+ tgen = get_topogen()
+
+ reset_config_on_routers(tgen)
+ static_or_nw(tgen, topo, tc_name, "redist_static", "r2")
+ for addr_type in ADDR_TYPES:
+
+ # Verifying RIB routes
+ dut = "r3"
+ protocol = "bgp"
+ input_dict_1 = {
+ "r3": {
+ "static_routes": [
+ {
+ "network": NETWORK[addr_type]
+ }
+ ]
+ }
+ }
+
+ logger.info("Verifying %s routes on r3", addr_type)
+ result = verify_rib(tgen, addr_type, dut, input_dict_1,
+ next_hop=NEXT_HOPS[addr_type],
+ protocol=protocol)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ input_dict_2 = {
+ "r2": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "redistribute": [{
+ "redist_type": "static",
+ "delete": True
+
+ }]
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "redistribute": [{
+ "redist_type": "static",
+ "delete": True
+
+ }]
+ }
+ }
+ }
+ }
+ }
+ }
+
+ logger.info("Remove redistribute static")
+ result = create_router_bgp(tgen, topo, input_dict_2)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ for addr_type in ADDR_TYPES:
+
+ # Verifying RIB routes
+ dut = "r3"
+ protocol = "bgp"
+ input_dict_1 = {
+ "r3": {
+ "static_routes": [
+ {
+ "network": NETWORK[addr_type]
+ }
+ ]
+ }
+ }
+
+ logger.info("Verifying %s routes on r3 are deleted", addr_type)
+ result = verify_rib(tgen, addr_type, dut, input_dict_1,
+ next_hop=[], protocol=protocol, expected=False)
+ assert result is not True, "Testcase {} : Failed \n Routes still" \
+ " present in RIB".format(tc_name)
+
+ logger.info("Enable redistribute static")
+ input_dict_2 = {
+ "r2": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "redistribute": [{
+ "redist_type": "static"
+ }]
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "redistribute": [{
+ "redist_type": "static"
+ }]
+ }
+ }
+ }
+ }
+ }
+ }
+ result = create_router_bgp(tgen, topo, input_dict_2)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ for addr_type in ADDR_TYPES:
+ # Verifying RIB routes
+ dut = "r3"
+ protocol = "bgp"
+ input_dict_1 = {
+ "r3": {
+ "static_routes": [
+ {
+ "network": NETWORK[addr_type]
+ }
+ ]
+ }
+ }
+ logger.info("Verifying %s routes on r3", addr_type)
+ result = verify_rib(tgen, addr_type, dut, input_dict_1,
+ next_hop=NEXT_HOPS[addr_type],
+ protocol=protocol)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+def test_ecmp_shut_bgp_neighbor(request):
+ """ Shut BGP neigbors one by one and verify BGP and routing table updated
+ accordingly in DUT """
+
+ tc_name = request.node.name
+ write_test_header(tc_name)
+ tgen = get_topogen()
+
+ logger.info(INTF_LIST_R2)
+ # Verifying RIB routes
+ dut = "r3"
+ protocol = "bgp"
+
+ reset_config_on_routers(tgen)
+ static_or_nw(tgen, topo, tc_name, "redist_static", "r2")
+
+ for addr_type in ADDR_TYPES:
+ input_dict = {
+ "r3": {
+ "static_routes": [
+ {
+ "network": NETWORK[addr_type]
+ }
+ ]
+ }
+ }
+
+ logger.info("Verifying %s routes on r3", addr_type)
+ result = verify_rib(tgen, addr_type, dut, input_dict,
+ next_hop=NEXT_HOPS[addr_type],
+ protocol=protocol)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ for intf_num in range(len(INTF_LIST_R2)+1, 16):
+ intf_val = INTF_LIST_R2[intf_num:intf_num+16]
+
+ input_dict_1 = {
+ "r2": {
+ "interface_list": [intf_val],
+ "status": "down"
+ }
+ }
+ logger.info("Shutting down neighbor interface {} on r2".
+ format(intf_val))
+ result = interface_status(tgen, topo, input_dict_1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ for addr_type in ADDR_TYPES:
+ if intf_num + 16 < 32:
+ check_hops = NEXT_HOPS[addr_type]
+ else:
+ check_hops = []
+
+ input_dict = {
+ "r3": {
+ "static_routes": [
+ {
+ "network": NETWORK[addr_type]
+ }
+ ]
+ }
+ }
+ logger.info("Verifying %s routes on r3", addr_type)
+ result = verify_rib(tgen, addr_type, dut, input_dict,
+ next_hop=check_hops,
+ protocol=protocol)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ input_dict_1 = {
+ "r2": {
+ "interface_list": INTF_LIST_R2,
+ "status": "up"
+ }
+ }
+
+ logger.info("Enabling all neighbor interface {} on r2")
+ result = interface_status(tgen, topo, input_dict_1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ static_or_nw(tgen, topo, tc_name, "redist_static", "r2")
+ for addr_type in ADDR_TYPES:
+ input_dict = {
+ "r3": {
+ "static_routes": [
+ {
+ "network": NETWORK[addr_type]
+ }
+ ]
+ }
+ }
+
+ logger.info("Verifying %s routes on r3", addr_type)
+ result = verify_rib(tgen, addr_type, dut, input_dict,
+ next_hop=NEXT_HOPS[addr_type],
+ protocol=protocol)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+def test_ecmp_remove_static_route(request):
+ """
+ Delete static routes and verify routers are cleared from BGP table,
+ and RIB of DUT.
+ """
+
+ tc_name = request.node.name
+ write_test_header(tc_name)
+ tgen = get_topogen()
+
+ # Verifying RIB routes
+ dut = "r3"
+ protocol = "bgp"
+
+ reset_config_on_routers(tgen)
+
+ static_or_nw(tgen, topo, tc_name, "redist_static", "r2")
+ for addr_type in ADDR_TYPES:
+ input_dict_1 = {
+ "r3": {
+ "static_routes": [
+ {
+ "network": NETWORK[addr_type]
+ }
+ ]
+ }
+ }
+
+ logger.info("Verifying %s routes on r3", addr_type)
+ result = verify_rib(
+ tgen, addr_type, dut, input_dict_1,
+ next_hop=NEXT_HOPS[addr_type], protocol=protocol)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ for addr_type in ADDR_TYPES:
+ input_dict_2 = {
+ "r2": {
+ "static_routes": [
+ {
+ "network": NETWORK[addr_type],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ "delete": True
+ }
+ ]
+ }
+ }
+
+ logger.info("Remove static routes")
+ result = create_static_routes(tgen, input_dict_2)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ logger.info("Verifying %s routes on r3 are removed", addr_type)
+ result = verify_rib(tgen, addr_type, dut, input_dict_2,
+ next_hop=[], protocol=protocol, expected=False)
+ assert result is not True, "Testcase {} : Failed \n Routes still" \
+ " present in RIB".format(tc_name)
+
+ for addr_type in ADDR_TYPES:
+ # Enable static routes
+ input_dict_4 = {
+ "r2": {
+ "static_routes": [
+ {
+ "network": NETWORK[addr_type],
+ "next_hop": NEXT_HOP_IP[addr_type]
+ }
+ ]
+ }
+ }
+
+ logger.info("Enable static route")
+ result = create_static_routes(tgen, input_dict_4)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ logger.info("Verifying %s routes on r3", addr_type)
+ result = verify_rib(tgen, addr_type, dut, input_dict_4,
+ next_hop=NEXT_HOPS[addr_type],
+ protocol=protocol)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+def test_ecmp_remove_nw_advertise(request):
+ """
+ Verify routes are cleared from BGP and RIB table of DUT,
+ when advertise network configuration is removed
+ """
+
+ tc_name = request.node.name
+ write_test_header(tc_name)
+ tgen = get_topogen()
+
+ # Verifying RIB routes
+ dut = "r3"
+ protocol = "bgp"
+
+ reset_config_on_routers(tgen)
+ static_or_nw(tgen, topo, tc_name, "advertise_nw", "r2")
+ for addr_type in ADDR_TYPES:
+ input_dict = {
+ "r3": {
+ "static_routes": [
+ {
+ "network": NETWORK[addr_type]
+ }
+ ]
+ }
+ }
+
+ logger.info("Verifying %s routes on r3", addr_type)
+ result = verify_rib(tgen, addr_type, dut, input_dict,
+ next_hop=NEXT_HOPS[addr_type],
+ protocol=protocol)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ input_dict_3 = {
+ "r2": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "advertise_networks": [{
+ "network": NETWORK["ipv4"],
+ "delete": True
+ }]
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "advertise_networks": [{
+ "network": NETWORK["ipv6"],
+ "delete": True
+ }]
+ }
+ }
+ }
+ }
+ }
+ }
+
+ logger.info("Withdraw advertised networks")
+ result = create_router_bgp(tgen, topo, input_dict_3)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+ for addr_type in ADDR_TYPES:
+ input_dict = {
+ "r3": {
+ "static_routes": [
+ {
+ "network": NETWORK[addr_type]
+ }
+ ]
+ }
+ }
+
+ logger.info("Verifying %s routes on r3", addr_type)
+ result = verify_rib(tgen, addr_type, dut, input_dict,
+ next_hop=[], protocol=protocol, expected=False)
+ assert result is not True, "Testcase {} : Failed \n Routes still" \
+ " present in RIB".format(tc_name)
+
+ static_or_nw(tgen, topo, tc_name, "advertise_nw", "r2")
+ for addr_type in ADDR_TYPES:
+ input_dict = {
+ "r3": {
+ "static_routes": [
+ {
+ "network": NETWORK[addr_type]
+ }
+ ]
+ }
+ }
+ logger.info("Verifying %s routes on r3", addr_type)
+ result = verify_rib(tgen, addr_type, dut, input_dict,
+ next_hop=NEXT_HOPS[addr_type],
+ protocol=protocol)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result)
+
+
+if __name__ == "__main__":
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
diff --git a/tests/topotests/bgp-path-attributes-topo1/test_bgp_path_attributes.py b/tests/topotests/bgp-path-attributes-topo1/test_bgp_path_attributes.py
index 2b9c411ff2..9f92b4b290 100755
--- a/tests/topotests/bgp-path-attributes-topo1/test_bgp_path_attributes.py
+++ b/tests/topotests/bgp-path-attributes-topo1/test_bgp_path_attributes.py
@@ -219,7 +219,8 @@ def test_next_hop_attribute(request):
dut = "r1"
protocol = "bgp"
result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol, expected=False)
- assert result is not True
+ assert result is not True, "Testcase {} : Failed \n Error: Routes still" \
+ " present in RIB".format(tc_name)
# Configure next-hop-self to bgp neighbor
input_dict_1 = {
@@ -484,7 +485,7 @@ def test_localpref_attribute(request):
"neighbor": {
"r1": {
"dest_link": {
- "r3": {
+ "r2": {
"route_maps": [
{"name": "RMAP_LOCAL_PREF",
"direction": "in"}
@@ -499,6 +500,7 @@ def test_localpref_attribute(request):
}
}
}
+
result = create_router_bgp(tgen, topo, input_dict_4)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
tc_name, result)
diff --git a/tests/topotests/bgp-prefix-list-topo1/test_prefix_lists.py b/tests/topotests/bgp-prefix-list-topo1/test_prefix_lists.py
index d3892e9d07..b8975997ea 100755
--- a/tests/topotests/bgp-prefix-list-topo1/test_prefix_lists.py
+++ b/tests/topotests/bgp-prefix-list-topo1/test_prefix_lists.py
@@ -386,8 +386,8 @@ def test_ip_prefix_lists_out_permit(request):
tc_name, result)
result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol, expected=False)
- assert result is not True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is not True, "Testcase {} : Failed \n Error: Routes still" \
+ " present in RIB".format(tc_name)
write_test_footer(tc_name)
@@ -497,8 +497,8 @@ def test_ip_prefix_lists_in_deny_and_permit_any(request):
dut = "r3"
protocol = "bgp"
result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol, expected=False)
- assert result is not True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is not True, "Testcase {} : Failed \n Error: Routes still" \
+ " present in RIB".format(tc_name)
write_test_footer(tc_name)
@@ -542,7 +542,6 @@ def test_delete_prefix_lists(request):
result = verify_prefix_lists(tgen, input_dict_2)
assert result is not True, "Testcase {} : Failed \n Error: {}".format(
tc_name, result)
- logger.info(result)
# Delete prefix list
input_dict_2 = {
@@ -714,9 +713,8 @@ def test_ip_prefix_lists_out_deny_and_permit_any(request):
dut = "r4"
protocol = "bgp"
result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol, expected=False)
- assert result is not True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
-
+ assert result is not True, "Testcase {} : Failed \n Error: Routes still" \
+ " present in RIB".format(tc_name)
write_test_footer(tc_name)
@@ -859,8 +857,8 @@ def test_modify_prefix_lists_in_permit_to_deny(request):
dut = "r3"
protocol = "bgp"
result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol, expected=False)
- assert result is not True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is not True, "Testcase {} : Failed \n Error: Routes still" \
+ " present in RIB".format(tc_name)
write_test_footer(tc_name)
@@ -972,8 +970,8 @@ def test_modify_prefix_lists_in_deny_to_permit(request):
dut = "r3"
protocol = "bgp"
result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol, expected=False)
- assert result is not True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is not True, "Testcase {} : Failed \n Error: Routes still" \
+ " present in RIB".format(tc_name)
# Modify ip prefix list
input_dict_1 = {
@@ -1152,8 +1150,8 @@ def test_modify_prefix_lists_out_permit_to_deny(request):
dut = "r4"
protocol = "bgp"
result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol, expected=False)
- assert result is not True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is not True, "Testcase {} : Failed \n Error: Routes still" \
+ " present in RIB".format(tc_name)
write_test_footer(tc_name)
@@ -1265,8 +1263,8 @@ def test_modify_prefix_lists_out_deny_to_permit(request):
dut = "r4"
protocol = "bgp"
result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol, expected=False)
- assert result is not True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is not True, "Testcase {} : Failed \n Error: Routes still" \
+ " present in RIB".format(tc_name)
# Modify ip prefix list
input_dict_1 = {
@@ -1439,8 +1437,8 @@ def test_ip_prefix_lists_implicit_deny(request):
dut = "r4"
protocol = "bgp"
result = verify_rib(tgen, "ipv4", dut, input_dict_1, protocol=protocol, expected=False)
- assert result is not True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is not True, "Testcase {} : Failed \n Error: Routes still" \
+ " present in RIB".format(tc_name)
write_test_footer(tc_name)
diff --git a/tests/topotests/bgp_aggregate-address_route-map/__init__.py b/tests/topotests/bgp_aggregate-address_route-map/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/tests/topotests/bgp_aggregate-address_route-map/__init__.py
diff --git a/tests/topotests/bgp_aggregate-address_route-map/r1/bgpd.conf b/tests/topotests/bgp_aggregate-address_route-map/r1/bgpd.conf
new file mode 100644
index 0000000000..ef34817bb1
--- /dev/null
+++ b/tests/topotests/bgp_aggregate-address_route-map/r1/bgpd.conf
@@ -0,0 +1,10 @@
+router bgp 65000
+ neighbor 192.168.255.2 remote-as 65001
+ address-family ipv4 unicast
+ redistribute connected
+ aggregate-address 172.16.255.0/24 route-map aggr-rmap
+ exit-address-family
+!
+route-map aggr-rmap permit 10
+ set metric 123
+!
diff --git a/tests/topotests/bgp_aggregate-address_route-map/r1/zebra.conf b/tests/topotests/bgp_aggregate-address_route-map/r1/zebra.conf
new file mode 100644
index 0000000000..0a283c06d5
--- /dev/null
+++ b/tests/topotests/bgp_aggregate-address_route-map/r1/zebra.conf
@@ -0,0 +1,9 @@
+!
+interface lo
+ ip address 172.16.255.254/32
+!
+interface r1-eth0
+ ip address 192.168.255.1/24
+!
+ip forwarding
+!
diff --git a/tests/topotests/bgp_aggregate-address_route-map/r2/bgpd.conf b/tests/topotests/bgp_aggregate-address_route-map/r2/bgpd.conf
new file mode 100644
index 0000000000..73d4d0aeea
--- /dev/null
+++ b/tests/topotests/bgp_aggregate-address_route-map/r2/bgpd.conf
@@ -0,0 +1,4 @@
+router bgp 65001
+ neighbor 192.168.255.1 remote-as 65000
+ exit-address-family
+!
diff --git a/tests/topotests/bgp_aggregate-address_route-map/r2/zebra.conf b/tests/topotests/bgp_aggregate-address_route-map/r2/zebra.conf
new file mode 100644
index 0000000000..606c17bec9
--- /dev/null
+++ b/tests/topotests/bgp_aggregate-address_route-map/r2/zebra.conf
@@ -0,0 +1,6 @@
+!
+interface r2-eth0
+ ip address 192.168.255.2/24
+!
+ip forwarding
+!
diff --git a/tests/topotests/bgp_aggregate-address_route-map/test_bgp_aggregate-address_route-map.py b/tests/topotests/bgp_aggregate-address_route-map/test_bgp_aggregate-address_route-map.py
new file mode 100644
index 0000000000..d6753e9b23
--- /dev/null
+++ b/tests/topotests/bgp_aggregate-address_route-map/test_bgp_aggregate-address_route-map.py
@@ -0,0 +1,131 @@
+#!/usr/bin/env python
+
+#
+# bgp_aggregate-address_route-map.py
+# Part of NetDEF Topology Tests
+#
+# Copyright (c) 2019 by
+# Network Device Education Foundation, Inc. ("NetDEF")
+#
+# Permission to use, copy, modify, and/or distribute this software
+# for any purpose with or without fee is hereby granted, provided
+# that the above copyright notice and this permission notice appear
+# in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NETDEF DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NETDEF BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
+# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+# OF THIS SOFTWARE.
+#
+
+"""
+bgp_aggregate-address_route-map.py:
+
+Test if works the following commands:
+router bgp 65031
+ address-family ipv4 unicast
+ aggregate-address 192.168.255.0/24 route-map aggr-rmap
+
+route-map aggr-rmap permit 10
+ set metric 123
+"""
+
+import os
+import sys
+import json
+import time
+import pytest
+import functools
+
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, '../'))
+
+# pylint: disable=C0413
+from lib import topotest
+from lib.topogen import Topogen, TopoRouter, get_topogen
+from lib.topolog import logger
+from mininet.topo import Topo
+
+class TemplateTopo(Topo):
+ def build(self, *_args, **_opts):
+ tgen = get_topogen(self)
+
+ for routern in range(1, 3):
+ tgen.add_router('r{}'.format(routern))
+
+ switch = tgen.add_switch('s1')
+ switch.add_link(tgen.gears['r1'])
+ switch.add_link(tgen.gears['r2'])
+
+def setup_module(mod):
+ tgen = Topogen(TemplateTopo, mod.__name__)
+ tgen.start_topology()
+
+ router_list = tgen.routers()
+
+ for i, (rname, router) in enumerate(router_list.iteritems(), 1):
+ router.load_config(
+ TopoRouter.RD_ZEBRA,
+ os.path.join(CWD, '{}/zebra.conf'.format(rname))
+ )
+ router.load_config(
+ TopoRouter.RD_BGP,
+ os.path.join(CWD, '{}/bgpd.conf'.format(rname))
+ )
+
+ tgen.start_router()
+
+def teardown_module(mod):
+ tgen = get_topogen()
+ tgen.stop_topology()
+
+def test_bgp_maximum_prefix_invalid():
+ tgen = get_topogen()
+
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ router = tgen.gears['r2']
+
+ def _bgp_converge(router):
+ output = json.loads(router.vtysh_cmd("show ip bgp neighbor 192.168.255.1 json"))
+ expected = {
+ '192.168.255.1': {
+ 'bgpState': 'Established',
+ 'addressFamilyInfo': {
+ 'ipv4Unicast': {
+ 'acceptedPrefixCounter': 3
+ }
+ }
+ }
+ }
+ return topotest.json_cmp(output, expected)
+
+ def _bgp_aggregate_address_has_metric(router):
+ output = json.loads(router.vtysh_cmd("show ip bgp 172.16.255.0/24 json"))
+ expected = {
+ 'paths': [
+ {
+ 'med': 123
+ }
+ ]
+ }
+ return topotest.json_cmp(output, expected)
+
+ test_func = functools.partial(_bgp_converge, router)
+ success, result = topotest.run_and_expect(test_func, None, count=30, wait=0.5)
+
+ assert result is None, 'Failed to see bgp convergence in "{}"'.format(router)
+
+ test_func = functools.partial(_bgp_aggregate_address_has_metric, router)
+ success, result = topotest.run_and_expect(test_func, None, count=30, wait=0.5)
+
+ assert result is None, 'Failed to see applied metric for aggregated prefix in "{}"'.format(router)
+
+if __name__ == '__main__':
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
diff --git a/tests/topotests/bgp_comm-list_delete/test_bgp_comm-list_delete.py b/tests/topotests/bgp_comm-list_delete/test_bgp_comm-list_delete.py
index de6c35ba8f..ed350ebfeb 100644
--- a/tests/topotests/bgp_comm-list_delete/test_bgp_comm-list_delete.py
+++ b/tests/topotests/bgp_comm-list_delete/test_bgp_comm-list_delete.py
@@ -88,7 +88,7 @@ def test_bgp_maximum_prefix_invalid():
while True:
output = json.loads(tgen.gears[router].vtysh_cmd("show ip bgp neighbor 192.168.255.1 json"))
if output['192.168.255.1']['bgpState'] == 'Established':
- if output['192.168.255.1']['addressFamilyInfo']['IPv4 Unicast']['acceptedPrefixCounter'] == 2:
+ if output['192.168.255.1']['addressFamilyInfo']['ipv4Unicast']['acceptedPrefixCounter'] == 2:
return True
def _bgp_comm_list_delete(router):
diff --git a/tests/topotests/bgp_l3vpn_to_bgp_direct/scripts/adjacencies.py b/tests/topotests/bgp_l3vpn_to_bgp_direct/scripts/adjacencies.py
index 7b3a883afa..1317a510d1 100644
--- a/tests/topotests/bgp_l3vpn_to_bgp_direct/scripts/adjacencies.py
+++ b/tests/topotests/bgp_l3vpn_to_bgp_direct/scripts/adjacencies.py
@@ -2,16 +2,16 @@ from lutil import luCommand
luCommand('ce1','ping 192.168.1.1 -c 1',' 0. packet loss','pass','CE->PE ping')
luCommand('ce2','ping 192.168.1.1 -c 1',' 0. packet loss','pass','CE->PE ping')
luCommand('ce3','ping 192.168.1.1 -c 1',' 0. packet loss','pass','CE->PE ping')
-luCommand('ce1','vtysh -c "show bgp summary"',' 00:0','wait','Adjacencies up',90)
-luCommand('ce2','vtysh -c "show bgp summary"',' 00:0','wait','Adjacencies up')
-luCommand('ce3','vtysh -c "show bgp summary"',' 00:0','wait','Adjacencies up')
+luCommand('ce1','vtysh -c "show bgp summary"',' 00:0','wait','Adjacencies up',180)
+luCommand('ce2','vtysh -c "show bgp summary"',' 00:0','wait','Adjacencies up',180)
+luCommand('ce3','vtysh -c "show bgp summary"',' 00:0','wait','Adjacencies up',180)
luCommand('r1','ping 2.2.2.2 -c 1',' 0. packet loss','wait','PE->P2 (loopback) ping',60)
luCommand('r3','ping 2.2.2.2 -c 1',' 0. packet loss','wait','PE->P2 (loopback) ping',60)
luCommand('r4','ping 2.2.2.2 -c 1',' 0. packet loss','wait','PE->P2 (loopback) ping',60)
-luCommand('r2','vtysh -c "show bgp summary"',' 00:0.* 00:0.* 00:0','wait','Core adjacencies up')
-luCommand('r1','vtysh -c "show bgp summary"',' 00:0','pass','Core adjacencies up')
-luCommand('r3','vtysh -c "show bgp summary"',' 00:0','pass','Core adjacencies up')
-luCommand('r4','vtysh -c "show bgp summary"',' 00:0','pass','Core adjacencies up')
+luCommand('r2','vtysh -c "show bgp summary"',' 00:0.* 00:0.* 00:0','wait','Core adjacencies up',180)
+luCommand('r1','vtysh -c "show bgp summary"',' 00:0','wait','Core adjacencies up',180)
+luCommand('r3','vtysh -c "show bgp summary"',' 00:0','wait','Core adjacencies up',180)
+luCommand('r4','vtysh -c "show bgp summary"',' 00:0','wait','Core adjacencies up',180)
luCommand('r1','vtysh -c "show bgp vrf all summary"',' 00:0.* 00:0','pass','All adjacencies up')
luCommand('r3','vtysh -c "show bgp vrf all summary"',' 00:0.* 00:0','pass','All adjacencies up')
luCommand('r4','vtysh -c "show bgp vrf all summary"',' 00:0.* 00:0','pass','All adjacencies up')
diff --git a/tests/topotests/bgp_l3vpn_to_bgp_vrf/scripts/adjacencies.py b/tests/topotests/bgp_l3vpn_to_bgp_vrf/scripts/adjacencies.py
index 5674120b9c..c2b0cf9e7a 100644
--- a/tests/topotests/bgp_l3vpn_to_bgp_vrf/scripts/adjacencies.py
+++ b/tests/topotests/bgp_l3vpn_to_bgp_vrf/scripts/adjacencies.py
@@ -6,10 +6,10 @@ luCommand('ce4','vtysh -c "show bgp vrf all summary"',' 00:0','wait','Adjacencie
luCommand('r1','ping 2.2.2.2 -c 1',' 0. packet loss','wait','PE->P2 (loopback) ping',60)
luCommand('r3','ping 2.2.2.2 -c 1',' 0. packet loss','wait','PE->P2 (loopback) ping',60)
luCommand('r4','ping 2.2.2.2 -c 1',' 0. packet loss','wait','PE->P2 (loopback) ping',60)
-luCommand('r2','vtysh -c "show bgp summary"',' 00:0.* 00:0.* 00:0','wait','Core adjacencies up',300)
-luCommand('r1','vtysh -c "show bgp summary"',' 00:0','pass','Core adjacencies up')
-luCommand('r3','vtysh -c "show bgp summary"',' 00:0','pass','Core adjacencies up')
-luCommand('r4','vtysh -c "show bgp summary"',' 00:0','pass','Core adjacencies up')
+luCommand('r2','vtysh -c "show bgp summary"',' 00:0.* 00:0.* 00:0','wait','Core adjacencies up',180)
+luCommand('r1','vtysh -c "show bgp summary"',' 00:0','wait','Core adjacencies up', 180)
+luCommand('r3','vtysh -c "show bgp summary"',' 00:0','wait','Core adjacencies up', 180)
+luCommand('r4','vtysh -c "show bgp summary"',' 00:0','wait','Core adjacencies up', 180)
luCommand('r1','vtysh -c "show bgp vrf all summary"',' 00:0.* 00:0','pass','All adjacencies up')
luCommand('r3','vtysh -c "show bgp vrf all summary"',' 00:0.* 00:0','pass','All adjacencies up')
luCommand('r4','vtysh -c "show bgp vrf all summary"',' 00:0.* 00:0.* 00:0','pass','All adjacencies up')
diff --git a/tests/topotests/bgp_rfapi_basic_sanity/scripts/adjacencies.py b/tests/topotests/bgp_rfapi_basic_sanity/scripts/adjacencies.py
index 1f53791f6a..6fbe4ff1c0 100644
--- a/tests/topotests/bgp_rfapi_basic_sanity/scripts/adjacencies.py
+++ b/tests/topotests/bgp_rfapi_basic_sanity/scripts/adjacencies.py
@@ -1,10 +1,10 @@
luCommand('r1','ping 2.2.2.2 -c 1',' 0. packet loss','wait','PE->P2 (loopback) ping',60)
luCommand('r3','ping 2.2.2.2 -c 1',' 0. packet loss','wait','PE->P2 (loopback) ping',60)
luCommand('r4','ping 2.2.2.2 -c 1',' 0. packet loss','wait','PE->P2 (loopback) ping',60)
-luCommand('r2','vtysh -c "show bgp summary"',' 00:0.* 00:0.* 00:0','wait','Core adjacencies up',30)
-luCommand('r1','vtysh -c "show bgp vrf all summary"',' 00:0','pass','All adjacencies up')
-luCommand('r3','vtysh -c "show bgp vrf all summary"',' 00:0','pass','All adjacencies up')
-luCommand('r4','vtysh -c "show bgp vrf all summary"',' 00:0','pass','All adjacencies up')
+luCommand('r2','vtysh -c "show bgp summary"',' 00:0.* 00:0.* 00:0','wait','Core adjacencies up',180)
+luCommand('r1','vtysh -c "show bgp vrf all summary"',' 00:0','wait','All adjacencies up',180)
+luCommand('r3','vtysh -c "show bgp vrf all summary"',' 00:0','wait','All adjacencies up',180)
+luCommand('r4','vtysh -c "show bgp vrf all summary"',' 00:0','wait','All adjacencies up',180)
luCommand('r1','ping 3.3.3.3 -c 1',' 0. packet loss','wait','PE->PE3 (loopback) ping')
luCommand('r1','ping 4.4.4.4 -c 1',' 0. packet loss','wait','PE->PE4 (loopback) ping')
#luCommand('r4','ping 3.3.3.3 -c 1',' 0. packet loss','wait','PE->PE3 (loopback) ping')
diff --git a/tests/topotests/docker/inner/compile_frr.sh b/tests/topotests/docker/inner/compile_frr.sh
index 2d72082c1e..dee0ec8118 100755
--- a/tests/topotests/docker/inner/compile_frr.sh
+++ b/tests/topotests/docker/inner/compile_frr.sh
@@ -84,6 +84,7 @@ if [ ! -e Makefile ]; then
--enable-static-bin \
--enable-static \
--enable-shared \
+ --enable-dev-build \
--with-moduledir=/usr/lib/frr/modules \
--prefix=/usr \
--localstatedir=/var/run/frr \
diff --git a/tests/topotests/example-topojson-test/test_topo_json_single_link_loopback/test_example_topojson.py b/tests/topotests/example-topojson-test/test_topo_json_single_link_loopback/test_example_topojson.py
index b794b96a63..cd069aaec5 100755
--- a/tests/topotests/example-topojson-test/test_topo_json_single_link_loopback/test_example_topojson.py
+++ b/tests/topotests/example-topojson-test/test_topo_json_single_link_loopback/test_example_topojson.py
@@ -178,7 +178,7 @@ def test_static_routes(request):
# Static routes are created as part of initial configuration,
# verifying RIB
dut = 'r3'
- next_hop = '10.0.0.1'
+ next_hop = ['10.0.0.1', '10.0.0.5']
input_dict = {
"r1": {
"static_routes": [
diff --git a/tests/topotests/ldp-topo1/r1/ip_mpls_route.ref-1 b/tests/topotests/ldp-topo1/r1/ip_mpls_route.ref-1
deleted file mode 100644
index f244122f1a..0000000000
--- a/tests/topotests/ldp-topo1/r1/ip_mpls_route.ref-1
+++ /dev/null
@@ -1,5 +0,0 @@
-xx as to xx via inet 10.0.1.2 dev r1-eth0 proto xx
-xx as to xx via inet 10.0.1.2 dev r1-eth0 proto xx
-xx via inet 10.0.1.2 dev r1-eth0 proto xx
-xx via inet 10.0.1.2 dev r1-eth0 proto xx
-xx via inet 10.0.1.2 dev r1-eth0 proto xx
diff --git a/tests/topotests/ldp-topo1/r1/show_ipv4_route.ref-1 b/tests/topotests/ldp-topo1/r1/show_ipv4_route.ref-1
deleted file mode 100644
index ff99ff9866..0000000000
--- a/tests/topotests/ldp-topo1/r1/show_ipv4_route.ref-1
+++ /dev/null
@@ -1,7 +0,0 @@
-O 1.1.1.1/32 [110/0] is directly connected, lo
-O>* 2.2.2.2/32 [110/10] via 10.0.1.2, r1-eth0
-O>* 3.3.3.3/32 [110/20] via 10.0.1.2, r1-eth0, label xxx
-O>* 4.4.4.4/32 [110/20] via 10.0.1.2, r1-eth0, label xxx
-O 10.0.1.0/24 [110/10] is directly connected, r1-eth0
-O>* 10.0.2.0/24 [110/20] via 10.0.1.2, r1-eth0
-O>* 10.0.3.0/24 [110/20] via 10.0.1.2, r1-eth0
diff --git a/tests/topotests/ldp-topo1/r1/show_mpls_ldp_binding.ref-1 b/tests/topotests/ldp-topo1/r1/show_mpls_ldp_binding.ref-1
deleted file mode 100644
index ff72a1c0b7..0000000000
--- a/tests/topotests/ldp-topo1/r1/show_mpls_ldp_binding.ref-1
+++ /dev/null
@@ -1,42 +0,0 @@
-1.1.1.1/32
- Local binding: label: imp-null
- Remote bindings:
- Peer Label
- ----------------- ---------
- 2.2.2.2 xxx
-2.2.2.2/32
- Local binding: label: xxx
- Remote bindings:
- Peer Label
- ----------------- ---------
- 2.2.2.2 imp-null
-3.3.3.3/32
- Local binding: label: xxx
- Remote bindings:
- Peer Label
- ----------------- ---------
- 2.2.2.2 xxx
-4.4.4.4/32
- Local binding: label: xxx
- Remote bindings:
- Peer Label
- ----------------- ---------
- 2.2.2.2 xxx
-10.0.1.0/24
- Local binding: label: imp-null
- Remote bindings:
- Peer Label
- ----------------- ---------
- 2.2.2.2 imp-null
-10.0.2.0/24
- Local binding: label: xxx
- Remote bindings:
- Peer Label
- ----------------- ---------
- 2.2.2.2 imp-null
-10.0.3.0/24
- Local binding: label: xxx
- Remote bindings:
- Peer Label
- ----------------- ---------
- 2.2.2.2 imp-null
diff --git a/tests/topotests/ldp-topo1/r1/show_mpls_ldp_discovery.ref-1 b/tests/topotests/ldp-topo1/r1/show_mpls_ldp_discovery.ref-1
deleted file mode 100644
index 38522e162e..0000000000
--- a/tests/topotests/ldp-topo1/r1/show_mpls_ldp_discovery.ref-1
+++ /dev/null
@@ -1,7 +0,0 @@
-Local LDP Identifier: 1.1.1.1:0
-Discovery Sources:
- Interfaces:
- r1-eth0: xmit/recv
- LDP Id: 2.2.2.2:0, Transport address: 2.2.2.2
- Hold time: 15 sec
- Targeted Hellos:
diff --git a/tests/topotests/ldp-topo1/r1/show_mpls_ldp_interface.ref-1 b/tests/topotests/ldp-topo1/r1/show_mpls_ldp_interface.ref-1
deleted file mode 100644
index 0fb15d2da7..0000000000
--- a/tests/topotests/ldp-topo1/r1/show_mpls_ldp_interface.ref-1
+++ /dev/null
@@ -1,2 +0,0 @@
-AF Interface State Uptime Hello Timers ac
-ipv4 r1-eth0 ACTIVE xx:xx:xx 5/15 1
diff --git a/tests/topotests/ldp-topo1/r1/show_mpls_ldp_neighbor.ref-1 b/tests/topotests/ldp-topo1/r1/show_mpls_ldp_neighbor.ref-1
deleted file mode 100644
index 3df98bfae5..0000000000
--- a/tests/topotests/ldp-topo1/r1/show_mpls_ldp_neighbor.ref-1
+++ /dev/null
@@ -1,8 +0,0 @@
-Peer LDP Identifier: 2.2.2.2:0
- TCP connection: 1.1.1.1:xxx - 2.2.2.2:xxx
- Session Holdtime: 180 sec
- State: OPERATIONAL; Downstream-Unsolicited
- Up time: xx:xx:xx
- LDP Discovery Sources:
- IPv4:
- Interface: r1-eth0
diff --git a/tests/topotests/ldp-topo1/r1/show_mpls_table.ref b/tests/topotests/ldp-topo1/r1/show_mpls_table.ref
index 61cb9eec82..7e24359af3 100644
--- a/tests/topotests/ldp-topo1/r1/show_mpls_table.ref
+++ b/tests/topotests/ldp-topo1/r1/show_mpls_table.ref
@@ -1,8 +1,8 @@
- Inbound Outbound
- Label Type Nexthop Label
--------- ------- --------------- --------
- XX LDP 10.0.1.2 XX
- XX LDP 10.0.1.2 XX
- XX LDP 10.0.1.2 implicit-null
- XX LDP 10.0.1.2 implicit-null
- XX LDP 10.0.1.2 implicit-null
+ Inbound Label Type Nexthop Outbound Label
+ -----------------------------------------------
+ XX LDP 10.0.1.2 XX
+ XX LDP 10.0.1.2 XX
+ XX LDP 10.0.1.2 implicit-null
+ XX LDP 10.0.1.2 implicit-null
+ XX LDP 10.0.1.2 implicit-null
+
diff --git a/tests/topotests/ldp-topo1/r1/show_mpls_table.ref-1 b/tests/topotests/ldp-topo1/r1/show_mpls_table.ref-1
deleted file mode 100644
index 912a082019..0000000000
--- a/tests/topotests/ldp-topo1/r1/show_mpls_table.ref-1
+++ /dev/null
@@ -1,8 +0,0 @@
- Inbound Outbound
- Label Type Nexthop Label
--------- ------- --------------- --------
- XX LDP 10.0.1.2 3
- XX LDP 10.0.1.2 3
- XX LDP 10.0.1.2 3
- XX LDP 10.0.1.2 XX
- XX LDP 10.0.1.2 XX
diff --git a/tests/topotests/ldp-topo1/r1/show_mpls_table.ref-no-impl-null b/tests/topotests/ldp-topo1/r1/show_mpls_table.ref-no-impl-null
deleted file mode 100644
index 912a082019..0000000000
--- a/tests/topotests/ldp-topo1/r1/show_mpls_table.ref-no-impl-null
+++ /dev/null
@@ -1,8 +0,0 @@
- Inbound Outbound
- Label Type Nexthop Label
--------- ------- --------------- --------
- XX LDP 10.0.1.2 3
- XX LDP 10.0.1.2 3
- XX LDP 10.0.1.2 3
- XX LDP 10.0.1.2 XX
- XX LDP 10.0.1.2 XX
diff --git a/tests/topotests/ldp-topo1/r2/show_ipv4_route.ref-1 b/tests/topotests/ldp-topo1/r2/show_ipv4_route.ref-1
deleted file mode 100644
index eaec2f16b9..0000000000
--- a/tests/topotests/ldp-topo1/r2/show_ipv4_route.ref-1
+++ /dev/null
@@ -1,7 +0,0 @@
-O>* 1.1.1.1/32 [110/10] via 10.0.1.1, r2-eth0
-O 2.2.2.2/32 [110/0] is directly connected, lo
-O>* 3.3.3.3/32 [110/10] via 10.0.2.3, r2-eth1
-O>* 4.4.4.4/32 [110/10] via 10.0.2.4, r2-eth1
-O 10.0.1.0/24 [110/10] is directly connected, r2-eth0
-O 10.0.2.0/24 [110/10] is directly connected, r2-eth1
-O 10.0.3.0/24 [110/10] is directly connected, r2-eth2
diff --git a/tests/topotests/ldp-topo1/r2/show_mpls_ldp_binding.ref-1 b/tests/topotests/ldp-topo1/r2/show_mpls_ldp_binding.ref-1
deleted file mode 100644
index 54ee39080a..0000000000
--- a/tests/topotests/ldp-topo1/r2/show_mpls_ldp_binding.ref-1
+++ /dev/null
@@ -1,56 +0,0 @@
-1.1.1.1/32
- Local binding: label: xxx
- Remote bindings:
- Peer Label
- ----------------- ---------
- 1.1.1.1 imp-null
- 3.3.3.3 xxx
- 4.4.4.4 xxx
-2.2.2.2/32
- Local binding: label: imp-null
- Remote bindings:
- Peer Label
- ----------------- ---------
- 1.1.1.1 xxx
- 3.3.3.3 xxx
- 4.4.4.4 xxx
-3.3.3.3/32
- Local binding: label: xxx
- Remote bindings:
- Peer Label
- ----------------- ---------
- 1.1.1.1 xxx
- 3.3.3.3 imp-null
- 4.4.4.4 xxx
-4.4.4.4/32
- Local binding: label: xxx
- Remote bindings:
- Peer Label
- ----------------- ---------
- 1.1.1.1 xxx
- 3.3.3.3 xxx
- 4.4.4.4 imp-null
-10.0.1.0/24
- Local binding: label: imp-null
- Remote bindings:
- Peer Label
- ----------------- ---------
- 1.1.1.1 imp-null
- 3.3.3.3 xxx
- 4.4.4.4 xxx
-10.0.2.0/24
- Local binding: label: imp-null
- Remote bindings:
- Peer Label
- ----------------- ---------
- 1.1.1.1 xxx
- 3.3.3.3 imp-null
- 4.4.4.4 imp-null
-10.0.3.0/24
- Local binding: label: imp-null
- Remote bindings:
- Peer Label
- ----------------- ---------
- 1.1.1.1 xxx
- 3.3.3.3 imp-null
- 4.4.4.4 xxx
diff --git a/tests/topotests/ldp-topo1/r2/show_mpls_ldp_discovery.ref-1 b/tests/topotests/ldp-topo1/r2/show_mpls_ldp_discovery.ref-1
deleted file mode 100644
index b1bebd7c46..0000000000
--- a/tests/topotests/ldp-topo1/r2/show_mpls_ldp_discovery.ref-1
+++ /dev/null
@@ -1,12 +0,0 @@
-Local LDP Identifier: 2.2.2.2:0
-Discovery Sources:
- Interfaces:
- r2-eth0: xmit/recv
- LDP Id: 1.1.1.1:0, Transport address: 1.1.1.1
- Hold time: 15 sec
- r2-eth1: xmit/recv
- LDP Id: 3.3.3.3:0, Transport address: 3.3.3.3
- Hold time: 15 sec
- LDP Id: 4.4.4.4:0, Transport address: 4.4.4.4
- Hold time: 15 sec
- Targeted Hellos:
diff --git a/tests/topotests/ldp-topo1/r2/show_mpls_ldp_interface.ref-1 b/tests/topotests/ldp-topo1/r2/show_mpls_ldp_interface.ref-1
deleted file mode 100644
index f9fc98408c..0000000000
--- a/tests/topotests/ldp-topo1/r2/show_mpls_ldp_interface.ref-1
+++ /dev/null
@@ -1,3 +0,0 @@
-AF Interface State Uptime Hello Timers ac
-ipv4 r2-eth0 ACTIVE xx:xx:xx 5/15 1
-ipv4 r2-eth1 ACTIVE xx:xx:xx 5/15 2
diff --git a/tests/topotests/ldp-topo1/r2/show_mpls_ldp_neighbor.ref-1 b/tests/topotests/ldp-topo1/r2/show_mpls_ldp_neighbor.ref-1
deleted file mode 100644
index a70e2f48c6..0000000000
--- a/tests/topotests/ldp-topo1/r2/show_mpls_ldp_neighbor.ref-1
+++ /dev/null
@@ -1,26 +0,0 @@
-Peer LDP Identifier: 1.1.1.1:0
- TCP connection: 2.2.2.2:xxx - 1.1.1.1:xxx
- Session Holdtime: 180 sec
- State: OPERATIONAL; Downstream-Unsolicited
- Up time: xx:xx:xx
- LDP Discovery Sources:
- IPv4:
- Interface: r2-eth0
-
-Peer LDP Identifier: 3.3.3.3:0
- TCP connection: 2.2.2.2:xxx - 3.3.3.3:xxx
- Session Holdtime: 180 sec
- State: OPERATIONAL; Downstream-Unsolicited
- Up time: xx:xx:xx
- LDP Discovery Sources:
- IPv4:
- Interface: r2-eth1
-
-Peer LDP Identifier: 4.4.4.4:0
- TCP connection: 2.2.2.2:xxx - 4.4.4.4:xxx
- Session Holdtime: 180 sec
- State: OPERATIONAL; Downstream-Unsolicited
- Up time: xx:xx:xx
- LDP Discovery Sources:
- IPv4:
- Interface: r2-eth1
diff --git a/tests/topotests/ldp-topo1/r2/show_mpls_table.ref b/tests/topotests/ldp-topo1/r2/show_mpls_table.ref
index 46420ccd11..df05a6b31a 100644
--- a/tests/topotests/ldp-topo1/r2/show_mpls_table.ref
+++ b/tests/topotests/ldp-topo1/r2/show_mpls_table.ref
@@ -1,7 +1,7 @@
- Inbound Outbound
- Label Type Nexthop Label
--------- ------- --------------- --------
- XX LDP 10.0.1.1 implicit-null
- XX LDP 10.0.2.3 implicit-null
- XX LDP 10.0.2.4 implicit-null
- XX LDP 10.0.3.3 implicit-null
+ Inbound Label Type Nexthop Outbound Label
+ -----------------------------------------------
+ XX LDP 10.0.1.1 implicit-null
+ XX LDP 10.0.2.3 implicit-null
+ XX LDP 10.0.2.4 implicit-null
+ XX LDP 10.0.3.3 implicit-null
+
diff --git a/tests/topotests/ldp-topo1/r2/show_mpls_table.ref-1 b/tests/topotests/ldp-topo1/r2/show_mpls_table.ref-1
deleted file mode 100644
index ba244e76ec..0000000000
--- a/tests/topotests/ldp-topo1/r2/show_mpls_table.ref-1
+++ /dev/null
@@ -1,7 +0,0 @@
- Inbound Outbound
- Label Type Nexthop Label
--------- ------- --------------- --------
- XX LDP 10.0.1.1 3
- XX LDP 10.0.2.3 3
- XX LDP 10.0.2.4 3
- XX LDP 10.0.3.3 3
diff --git a/tests/topotests/ldp-topo1/r2/show_mpls_table.ref-no-impl-null b/tests/topotests/ldp-topo1/r2/show_mpls_table.ref-no-impl-null
deleted file mode 100644
index ba244e76ec..0000000000
--- a/tests/topotests/ldp-topo1/r2/show_mpls_table.ref-no-impl-null
+++ /dev/null
@@ -1,7 +0,0 @@
- Inbound Outbound
- Label Type Nexthop Label
--------- ------- --------------- --------
- XX LDP 10.0.1.1 3
- XX LDP 10.0.2.3 3
- XX LDP 10.0.2.4 3
- XX LDP 10.0.3.3 3
diff --git a/tests/topotests/ldp-topo1/r3/show_ipv4_route.ref-1 b/tests/topotests/ldp-topo1/r3/show_ipv4_route.ref-1
deleted file mode 100644
index c8a29400b2..0000000000
--- a/tests/topotests/ldp-topo1/r3/show_ipv4_route.ref-1
+++ /dev/null
@@ -1,7 +0,0 @@
-O>* 1.1.1.1/32 [110/20] via 10.0.2.2, r3-eth0, label xxx
-O>* 2.2.2.2/32 [110/10] via 10.0.2.2, r3-eth0
-O 3.3.3.3/32 [110/0] is directly connected, lo
-O>* 4.4.4.4/32 [110/10] via 10.0.2.4, r3-eth0
-O>* 10.0.1.0/24 [110/20] via 10.0.2.2, r3-eth0
-O 10.0.2.0/24 [110/10] is directly connected, r3-eth0
-O 10.0.3.0/24 [110/10] is directly connected, r3-eth1
diff --git a/tests/topotests/ldp-topo1/r3/show_mpls_ldp_binding.ref-1 b/tests/topotests/ldp-topo1/r3/show_mpls_ldp_binding.ref-1
deleted file mode 100644
index e04d2b7e4a..0000000000
--- a/tests/topotests/ldp-topo1/r3/show_mpls_ldp_binding.ref-1
+++ /dev/null
@@ -1,49 +0,0 @@
-1.1.1.1/32
- Local binding: label: xxx
- Remote bindings:
- Peer Label
- ----------------- ---------
- 2.2.2.2 xxx
- 4.4.4.4 xxx
-2.2.2.2/32
- Local binding: label: xxx
- Remote bindings:
- Peer Label
- ----------------- ---------
- 2.2.2.2 imp-null
- 4.4.4.4 xxx
-3.3.3.3/32
- Local binding: label: imp-null
- Remote bindings:
- Peer Label
- ----------------- ---------
- 2.2.2.2 xxx
- 4.4.4.4 xxx
-4.4.4.4/32
- Local binding: label: xxx
- Remote bindings:
- Peer Label
- ----------------- ---------
- 2.2.2.2 xxx
- 4.4.4.4 imp-null
-10.0.1.0/24
- Local binding: label: xxx
- Remote bindings:
- Peer Label
- ----------------- ---------
- 2.2.2.2 imp-null
- 4.4.4.4 xxx
-10.0.2.0/24
- Local binding: label: imp-null
- Remote bindings:
- Peer Label
- ----------------- ---------
- 2.2.2.2 imp-null
- 4.4.4.4 imp-null
-10.0.3.0/24
- Local binding: label: imp-null
- Remote bindings:
- Peer Label
- ----------------- ---------
- 2.2.2.2 imp-null
- 4.4.4.4 xxx
diff --git a/tests/topotests/ldp-topo1/r3/show_mpls_ldp_discovery.ref-1 b/tests/topotests/ldp-topo1/r3/show_mpls_ldp_discovery.ref-1
deleted file mode 100644
index 5e299fff9c..0000000000
--- a/tests/topotests/ldp-topo1/r3/show_mpls_ldp_discovery.ref-1
+++ /dev/null
@@ -1,9 +0,0 @@
-Local LDP Identifier: 3.3.3.3:0
-Discovery Sources:
- Interfaces:
- r3-eth0: xmit/recv
- LDP Id: 2.2.2.2:0, Transport address: 2.2.2.2
- Hold time: 15 sec
- LDP Id: 4.4.4.4:0, Transport address: 4.4.4.4
- Hold time: 15 sec
- Targeted Hellos:
diff --git a/tests/topotests/ldp-topo1/r3/show_mpls_ldp_interface.ref-1 b/tests/topotests/ldp-topo1/r3/show_mpls_ldp_interface.ref-1
deleted file mode 100644
index 243811e3a9..0000000000
--- a/tests/topotests/ldp-topo1/r3/show_mpls_ldp_interface.ref-1
+++ /dev/null
@@ -1,2 +0,0 @@
-AF Interface State Uptime Hello Timers ac
-ipv4 r3-eth0 ACTIVE xx:xx:xx 5/15 2
diff --git a/tests/topotests/ldp-topo1/r3/show_mpls_ldp_neighbor.ref-1 b/tests/topotests/ldp-topo1/r3/show_mpls_ldp_neighbor.ref-1
deleted file mode 100644
index ee1983ac29..0000000000
--- a/tests/topotests/ldp-topo1/r3/show_mpls_ldp_neighbor.ref-1
+++ /dev/null
@@ -1,17 +0,0 @@
-Peer LDP Identifier: 2.2.2.2:0
- TCP connection: 3.3.3.3:xxx - 2.2.2.2:xxx
- Session Holdtime: 180 sec
- State: OPERATIONAL; Downstream-Unsolicited
- Up time: xx:xx:xx
- LDP Discovery Sources:
- IPv4:
- Interface: r3-eth0
-
-Peer LDP Identifier: 4.4.4.4:0
- TCP connection: 3.3.3.3:xxx - 4.4.4.4:xxx
- Session Holdtime: 180 sec
- State: OPERATIONAL; Downstream-Unsolicited
- Up time: xx:xx:xx
- LDP Discovery Sources:
- IPv4:
- Interface: r3-eth0
diff --git a/tests/topotests/ldp-topo1/r3/show_mpls_table.ref b/tests/topotests/ldp-topo1/r3/show_mpls_table.ref
index c367f240f4..3978895613 100644
--- a/tests/topotests/ldp-topo1/r3/show_mpls_table.ref
+++ b/tests/topotests/ldp-topo1/r3/show_mpls_table.ref
@@ -1,10 +1,10 @@
- Inbound Outbound
- Label Type Nexthop Label
--------- ------- --------------- --------
- XX LDP 10.0.2.2 XX
- XX LDP 10.0.2.2 implicit-null
- XX LDP 10.0.2.2 implicit-null
- XX LDP 10.0.2.4 implicit-null
- XX LDP 10.0.3.2 XX
- XX LDP 10.0.3.2 implicit-null
- XX LDP 10.0.3.2 implicit-null
+ Inbound Label Type Nexthop Outbound Label
+ -----------------------------------------------
+ XX LDP 10.0.2.2 XX
+ XX LDP 10.0.2.2 implicit-null
+ XX LDP 10.0.2.2 implicit-null
+ XX LDP 10.0.2.4 implicit-null
+ XX LDP 10.0.3.2 XX
+ XX LDP 10.0.3.2 implicit-null
+ XX LDP 10.0.3.2 implicit-null
+
diff --git a/tests/topotests/ldp-topo1/r3/show_mpls_table.ref-1 b/tests/topotests/ldp-topo1/r3/show_mpls_table.ref-1
deleted file mode 100644
index 9198969bd5..0000000000
--- a/tests/topotests/ldp-topo1/r3/show_mpls_table.ref-1
+++ /dev/null
@@ -1,10 +0,0 @@
- Inbound Outbound
- Label Type Nexthop Label
--------- ------- --------------- --------
- XX LDP 10.0.2.2 3
- XX LDP 10.0.2.2 3
- XX LDP 10.0.2.2 XX
- XX LDP 10.0.2.4 3
- XX LDP 10.0.3.2 3
- XX LDP 10.0.3.2 3
- XX LDP 10.0.3.2 XX
diff --git a/tests/topotests/ldp-topo1/r3/show_mpls_table.ref-no-impl-null b/tests/topotests/ldp-topo1/r3/show_mpls_table.ref-no-impl-null
deleted file mode 100644
index 9198969bd5..0000000000
--- a/tests/topotests/ldp-topo1/r3/show_mpls_table.ref-no-impl-null
+++ /dev/null
@@ -1,10 +0,0 @@
- Inbound Outbound
- Label Type Nexthop Label
--------- ------- --------------- --------
- XX LDP 10.0.2.2 3
- XX LDP 10.0.2.2 3
- XX LDP 10.0.2.2 XX
- XX LDP 10.0.2.4 3
- XX LDP 10.0.3.2 3
- XX LDP 10.0.3.2 3
- XX LDP 10.0.3.2 XX
diff --git a/tests/topotests/ldp-topo1/r4/show_ipv4_route.ref-1 b/tests/topotests/ldp-topo1/r4/show_ipv4_route.ref-1
deleted file mode 100644
index df2a2b585f..0000000000
--- a/tests/topotests/ldp-topo1/r4/show_ipv4_route.ref-1
+++ /dev/null
@@ -1,7 +0,0 @@
-O>* 1.1.1.1/32 [110/20] via 10.0.2.2, r4-eth0, label xxx
-O>* 2.2.2.2/32 [110/10] via 10.0.2.2, r4-eth0
-O>* 3.3.3.3/32 [110/10] via 10.0.2.3, r4-eth0
-O 4.4.4.4/32 [110/0] is directly connected, lo
-O>* 10.0.1.0/24 [110/20] via 10.0.2.2, r4-eth0
-O 10.0.2.0/24 [110/10] is directly connected, r4-eth0
-O>* 10.0.3.0/24 [110/20] via 10.0.2.2, r4-eth0
diff --git a/tests/topotests/ldp-topo1/r4/show_mpls_ldp_binding.ref-1 b/tests/topotests/ldp-topo1/r4/show_mpls_ldp_binding.ref-1
deleted file mode 100644
index 3d55805d7c..0000000000
--- a/tests/topotests/ldp-topo1/r4/show_mpls_ldp_binding.ref-1
+++ /dev/null
@@ -1,49 +0,0 @@
-1.1.1.1/32
- Local binding: label: xxx
- Remote bindings:
- Peer Label
- ----------------- ---------
- 2.2.2.2 xxx
- 3.3.3.3 xxx
-2.2.2.2/32
- Local binding: label: xxx
- Remote bindings:
- Peer Label
- ----------------- ---------
- 2.2.2.2 imp-null
- 3.3.3.3 xxx
-3.3.3.3/32
- Local binding: label: xxx
- Remote bindings:
- Peer Label
- ----------------- ---------
- 2.2.2.2 xxx
- 3.3.3.3 imp-null
-4.4.4.4/32
- Local binding: label: imp-null
- Remote bindings:
- Peer Label
- ----------------- ---------
- 2.2.2.2 xxx
- 3.3.3.3 xxx
-10.0.1.0/24
- Local binding: label: xxx
- Remote bindings:
- Peer Label
- ----------------- ---------
- 2.2.2.2 imp-null
- 3.3.3.3 xxx
-10.0.2.0/24
- Local binding: label: imp-null
- Remote bindings:
- Peer Label
- ----------------- ---------
- 2.2.2.2 imp-null
- 3.3.3.3 imp-null
-10.0.3.0/24
- Local binding: label: xxx
- Remote bindings:
- Peer Label
- ----------------- ---------
- 2.2.2.2 imp-null
- 3.3.3.3 imp-null
diff --git a/tests/topotests/ldp-topo1/r4/show_mpls_ldp_discovery.ref-1 b/tests/topotests/ldp-topo1/r4/show_mpls_ldp_discovery.ref-1
deleted file mode 100644
index 3ebddd606a..0000000000
--- a/tests/topotests/ldp-topo1/r4/show_mpls_ldp_discovery.ref-1
+++ /dev/null
@@ -1,9 +0,0 @@
-Local LDP Identifier: 4.4.4.4:0
-Discovery Sources:
- Interfaces:
- r4-eth0: xmit/recv
- LDP Id: 2.2.2.2:0, Transport address: 2.2.2.2
- Hold time: 15 sec
- LDP Id: 3.3.3.3:0, Transport address: 3.3.3.3
- Hold time: 15 sec
- Targeted Hellos:
diff --git a/tests/topotests/ldp-topo1/r4/show_mpls_ldp_interface.ref-1 b/tests/topotests/ldp-topo1/r4/show_mpls_ldp_interface.ref-1
deleted file mode 100644
index dd57656f15..0000000000
--- a/tests/topotests/ldp-topo1/r4/show_mpls_ldp_interface.ref-1
+++ /dev/null
@@ -1,2 +0,0 @@
-AF Interface State Uptime Hello Timers ac
-ipv4 r4-eth0 ACTIVE xx:xx:xx 5/15 2
diff --git a/tests/topotests/ldp-topo1/r4/show_mpls_ldp_neighbor.ref-1 b/tests/topotests/ldp-topo1/r4/show_mpls_ldp_neighbor.ref-1
deleted file mode 100644
index fb0e7d7dfa..0000000000
--- a/tests/topotests/ldp-topo1/r4/show_mpls_ldp_neighbor.ref-1
+++ /dev/null
@@ -1,17 +0,0 @@
-Peer LDP Identifier: 2.2.2.2:0
- TCP connection: 4.4.4.4:xxx - 2.2.2.2:xxx
- Session Holdtime: 180 sec
- State: OPERATIONAL; Downstream-Unsolicited
- Up time: xx:xx:xx
- LDP Discovery Sources:
- IPv4:
- Interface: r4-eth0
-
-Peer LDP Identifier: 3.3.3.3:0
- TCP connection: 4.4.4.4:xxx - 3.3.3.3:xxx
- Session Holdtime: 180 sec
- State: OPERATIONAL; Downstream-Unsolicited
- Up time: xx:xx:xx
- LDP Discovery Sources:
- IPv4:
- Interface: r4-eth0
diff --git a/tests/topotests/ldp-topo1/r4/show_mpls_table.ref b/tests/topotests/ldp-topo1/r4/show_mpls_table.ref
index 9f86cd67cc..174dcebd4d 100644
--- a/tests/topotests/ldp-topo1/r4/show_mpls_table.ref
+++ b/tests/topotests/ldp-topo1/r4/show_mpls_table.ref
@@ -1,9 +1,9 @@
- Inbound Outbound
- Label Type Nexthop Label
--------- ------- --------------- --------
- XX LDP 10.0.2.2 XX
- XX LDP 10.0.2.2 implicit-null
- XX LDP 10.0.2.2 implicit-null
- XX LDP 10.0.2.2 implicit-null
- XX LDP 10.0.2.3 implicit-null
- XX LDP 10.0.2.3 implicit-null
+ Inbound Label Type Nexthop Outbound Label
+ -----------------------------------------------
+ XX LDP 10.0.2.2 XX
+ XX LDP 10.0.2.2 implicit-null
+ XX LDP 10.0.2.2 implicit-null
+ XX LDP 10.0.2.2 implicit-null
+ XX LDP 10.0.2.3 implicit-null
+ XX LDP 10.0.2.3 implicit-null
+
diff --git a/tests/topotests/ldp-topo1/r4/show_mpls_table.ref-1 b/tests/topotests/ldp-topo1/r4/show_mpls_table.ref-1
deleted file mode 100644
index b8cf5a2702..0000000000
--- a/tests/topotests/ldp-topo1/r4/show_mpls_table.ref-1
+++ /dev/null
@@ -1,9 +0,0 @@
- Inbound Outbound
- Label Type Nexthop Label
--------- ------- --------------- --------
- XX LDP 10.0.2.2 3
- XX LDP 10.0.2.2 3
- XX LDP 10.0.2.2 3
- XX LDP 10.0.2.2 XX
- XX LDP 10.0.2.3 3
- XX LDP 10.0.2.3 3
diff --git a/tests/topotests/ldp-topo1/r4/show_mpls_table.ref-no-impl-null b/tests/topotests/ldp-topo1/r4/show_mpls_table.ref-no-impl-null
deleted file mode 100644
index b8cf5a2702..0000000000
--- a/tests/topotests/ldp-topo1/r4/show_mpls_table.ref-no-impl-null
+++ /dev/null
@@ -1,9 +0,0 @@
- Inbound Outbound
- Label Type Nexthop Label
--------- ------- --------------- --------
- XX LDP 10.0.2.2 3
- XX LDP 10.0.2.2 3
- XX LDP 10.0.2.2 3
- XX LDP 10.0.2.2 XX
- XX LDP 10.0.2.3 3
- XX LDP 10.0.2.3 3
diff --git a/tests/topotests/ldp-topo1/test_ldp_topo1.py b/tests/topotests/ldp-topo1/test_ldp_topo1.py
index 409a5f54c8..f02f4c4e21 100755
--- a/tests/topotests/ldp-topo1/test_ldp_topo1.py
+++ b/tests/topotests/ldp-topo1/test_ldp_topo1.py
@@ -77,12 +77,6 @@ from lib import topotest
fatal_error = ""
-# Expected version of CLI Output - Appendix to filename
-# empty string = current, latest output (default)
-# "-1" ... "-NNN" previous versions (incrementing with each version)
-cli_version = ""
-
-
#####################################################
##
## Network Topology Definition
@@ -164,7 +158,6 @@ def teardown_module(module):
def test_router_running():
global fatal_error
global net
- global cli_version
# Skip if previous fatal error condition is raised
if (fatal_error != ""):
@@ -179,35 +172,12 @@ def test_router_running():
fatal_error = net['r%s' % i].checkRouterRunning()
assert fatal_error == "", fatal_error
- # Detect CLI Version
- # At this time, there are only 2 possible outputs, so simple check
- output = net['r1'].cmd('vtysh -c "show mpls ldp discovery" 2> /dev/null').rstrip()
-
- # Check if old or new format of CLI Output. Default is to current format
- #
- # Old (v1) output looks like this:
- # Local LDP Identifier: 1.1.1.1:0
- # Discovery Sources:
- # Interfaces:
- # r1-eth0: xmit/recv
- # LDP Id: 2.2.2.2:0, Transport address: 2.2.2.2
- # Hold time: 15 sec
- # Targeted Hellos:
- #
- # Current (v0) output looks like this:
- # AF ID Type Source Holdtime
- # ipv4 2.2.2.2 Link r1-eth0 15
- pattern = re.compile("^Local LDP Identifier.*")
- if pattern.match(output):
- cli_version = "-1"
-
# For debugging after starting FRR/Quagga daemons, uncomment the next line
# CLI(net)
def test_mpls_interfaces():
global fatal_error
global net
- global cli_version
# Skip if previous fatal error condition is raised
if (fatal_error != ""):
@@ -220,7 +190,7 @@ def test_mpls_interfaces():
print("******************************************\n")
failures = 0
for i in range(1, 5):
- refTableFile = '%s/r%s/show_mpls_ldp_interface.ref%s' % (thisDir, i, cli_version)
+ refTableFile = '%s/r%s/show_mpls_ldp_interface.ref'
if os.path.isfile(refTableFile):
# Read expected result from file
expected = open(refTableFile).read().rstrip()
@@ -263,7 +233,6 @@ def test_mpls_interfaces():
def test_mpls_ldp_neighbor_establish():
global fatal_error
global net
- global cli_version
# Skip if previous fatal error condition is raised
if (fatal_error != ""):
@@ -279,22 +248,22 @@ def test_mpls_ldp_neighbor_establish():
# Look for any node not yet converged
for i in range(1, 5):
established = net['r%s' % i].cmd('vtysh -c "show mpls ldp neighbor" 2> /dev/null').rstrip()
- if cli_version != "-1":
- # On current version, we need to make sure they all turn to OPERATIONAL on all lines
- #
- lines = ('\n'.join(established.splitlines()) + '\n').splitlines(1)
- # Check all lines to be either table header (starting with ^AF or show OPERATIONAL)
- header = r'^AF.*'
- operational = r'^ip.*OPERATIONAL.*'
- found_operational = 0
- for j in range(1, len(lines)):
- if (not re.search(header, lines[j])) and (not re.search(operational, lines[j])):
- established = "" # Empty string shows NOT established
- if re.search(operational, lines[j]):
- found_operational += 1
- if found_operational < 1:
- # Need at least one operational neighbor
+
+ # On current version, we need to make sure they all turn to OPERATIONAL on all lines
+ #
+ lines = ('\n'.join(established.splitlines()) + '\n').splitlines(1)
+ # Check all lines to be either table header (starting with ^AF or show OPERATIONAL)
+ header = r'^AF.*'
+ operational = r'^ip.*OPERATIONAL.*'
+ found_operational = 0
+ for j in range(1, len(lines)):
+ if (not re.search(header, lines[j])) and (not re.search(operational, lines[j])):
established = "" # Empty string shows NOT established
+ if re.search(operational, lines[j]):
+ found_operational += 1
+ if found_operational < 1:
+ # Need at least one operational neighbor
+ established = "" # Empty string shows NOT established
if not established:
print('Waiting for r%s' %i)
sys.stdout.flush()
@@ -326,7 +295,6 @@ def test_mpls_ldp_neighbor_establish():
def test_mpls_ldp_discovery():
global fatal_error
global net
- global cli_version
# Skip if previous fatal error condition is raised
if (fatal_error != ""):
@@ -339,7 +307,7 @@ def test_mpls_ldp_discovery():
print("******************************************\n")
failures = 0
for i in range(1, 5):
- refTableFile = '%s/r%s/show_mpls_ldp_discovery.ref%s' % (thisDir, i, cli_version)
+ refTableFile = '%s/r%s/show_mpls_ldp_discovery.ref'
if os.path.isfile(refTableFile):
# Actual output from router
actual = net['r%s' % i].cmd('vtysh -c "show mpls ldp discovery" 2> /dev/null').rstrip()
@@ -381,7 +349,6 @@ def test_mpls_ldp_discovery():
def test_mpls_ldp_neighbor():
global fatal_error
global net
- global cli_version
# Skip if previous fatal error condition is raised
if (fatal_error != ""):
@@ -394,7 +361,7 @@ def test_mpls_ldp_neighbor():
print("******************************************\n")
failures = 0
for i in range(1, 5):
- refTableFile = '%s/r%s/show_mpls_ldp_neighbor.ref%s' % (thisDir, i, cli_version)
+ refTableFile = '%s/r%s/show_mpls_ldp_neighbor.ref'
if os.path.isfile(refTableFile):
# Read expected result from file
expected = open(refTableFile).read().rstrip()
@@ -405,17 +372,8 @@ def test_mpls_ldp_neighbor():
actual = net['r%s' % i].cmd('vtysh -c "show mpls ldp neighbor" 2> /dev/null').rstrip()
# Mask out changing parts in output
- if cli_version == "-1":
- # Mask out Timer in Uptime
- actual = re.sub(r"Up time: [0-9][0-9]:[0-9][0-9]:[0-9][0-9]", "Up time: xx:xx:xx", actual)
- # Mask out Port numbers in TCP connection
- actual = re.sub(r"TCP connection: ([0-9]+\.[0-9]+\.[0-9]+\.[0-9]):[0-9]+ - ([0-9]+\.[0-9]+\.[0-9]+\.[0-9]):[0-9]+",
- r"TCP connection: \1:xxx - \2:xxx", actual)
- else:
- # Current Version
- #
- # Mask out Timer in Uptime
- actual = re.sub(r"(ipv4 [0-9\.]+ +OPERATIONAL [0-9\.]+ +)[0-9][0-9]:[0-9][0-9]:[0-9][0-9]", r"\1xx:xx:xx", actual)
+ # Mask out Timer in Uptime
+ actual = re.sub(r"(ipv4 [0-9\.]+ +OPERATIONAL [0-9\.]+ +)[0-9][0-9]:[0-9][0-9]:[0-9][0-9]", r"\1xx:xx:xx", actual)
# Fix newlines (make them all the same)
actual = ('\n'.join(actual.splitlines()) + '\n').splitlines(1)
@@ -446,7 +404,6 @@ def test_mpls_ldp_neighbor():
def test_mpls_ldp_binding():
global fatal_error
global net
- global cli_version
# Skip this test for now until proper sorting of the output
# is implemented
@@ -463,7 +420,7 @@ def test_mpls_ldp_binding():
print("******************************************\n")
failures = 0
for i in range(1, 5):
- refTableFile = '%s/r%s/show_mpls_ldp_binding.ref%s' % (thisDir, i, cli_version)
+ refTableFile = '%s/r%s/show_mpls_ldp_binding.ref'
if os.path.isfile(refTableFile):
# Read expected result from file
expected = open(refTableFile).read().rstrip()
@@ -474,16 +431,9 @@ def test_mpls_ldp_binding():
actual = net['r%s' % i].cmd('vtysh -c "show mpls ldp binding" 2> /dev/null').rstrip()
# Mask out changing parts in output
- if cli_version == "-1":
- # Mask out label
- actual = re.sub(r"label: [0-9]+", "label: xxx", actual)
- actual = re.sub(r"(\s+[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+[ ]+)[0-9]+", r"\1xxx", actual)
- else:
- # Current Version
- #
- # Mask out label
- actual = re.sub(r"(ipv4 [0-9\./]+ +[0-9\.]+ +)[0-9][0-9] (.*)", r"\1xxx\2", actual)
- actual = re.sub(r"(ipv4 [0-9\./]+ +[0-9\.]+ +[a-z\-]+ +)[0-9][0-9] (.*)", r"\1xxx\2", actual)
+ # Mask out label
+ actual = re.sub(r"(ipv4 [0-9\./]+ +[0-9\.]+ +)[0-9][0-9] (.*)", r"\1xxx\2", actual)
+ actual = re.sub(r"(ipv4 [0-9\./]+ +[0-9\.]+ +[a-z\-]+ +)[0-9][0-9] (.*)", r"\1xxx\2", actual)
# Fix newlines (make them all the same)
actual = ('\n'.join(actual.splitlines()) + '\n').splitlines(1)
@@ -527,7 +477,6 @@ def test_mpls_ldp_binding():
def test_zebra_ipv4_routingTable():
global fatal_error
global net
- global cli_version
# Skip if previous fatal error condition is raised
if (fatal_error != ""):
@@ -540,7 +489,7 @@ def test_zebra_ipv4_routingTable():
print("******************************************\n")
failures = 0
for i in range(1, 5):
- refTableFile = '%s/r%s/show_ipv4_route.ref%s' % (thisDir, i, cli_version)
+ refTableFile = '%s/r%s/show_ipv4_route.ref'
if os.path.isfile(refTableFile):
# Read expected result from file
expected = open(refTableFile).read().rstrip()
@@ -562,9 +511,6 @@ def test_zebra_ipv4_routingTable():
# now fix newlines of expected (make them all the same)
expected = ('\n'.join(expected.splitlines()) + '\n').splitlines(1)
- # Add missing comma before label (for old version)
- actual = re.sub(r"([0-9]) label ", r"\1, label ", actual)
-
# Fix newlines (make them all the same)
actual = ('\n'.join(actual.splitlines()) + '\n').splitlines(1)
@@ -594,7 +540,6 @@ def test_zebra_ipv4_routingTable():
def test_mpls_table():
global fatal_error
global net
- global cli_version
# Skip if previous fatal error condition is raised
if (fatal_error != ""):
@@ -607,23 +552,16 @@ def test_mpls_table():
print("******************************************\n")
failures = 0
- version = cli_version
- if (version == ""):
- # check for new output without implicit-null
- output = net['r1'].cmd('vtysh -c "show mpls table" 2> /dev/null').rstrip()
- if 'LDP 10.0.1.2 3' in output:
- version = "-no-impl-null"
-
for i in range(1, 5):
- refTableFile = '%s/r%s/show_mpls_table.ref%s' % (thisDir, i, version)
+ refTableFile = '%s/r%s/show_mpls_table.ref'
if os.path.isfile(refTableFile):
# Read expected result from file
- expected = open(refTableFile).read().rstrip()
+ expected = open(refTableFile).read()
# Fix newlines (make them all the same)
expected = ('\n'.join(expected.splitlines()) + '\n').splitlines(1)
# Actual output from router
- actual = net['r%s' % i].cmd('vtysh -c "show mpls table" 2> /dev/null').rstrip()
+ actual = net['r%s' % i].cmd('vtysh -c "show mpls table" 2> /dev/null')
# Fix inconsistent Label numbers at beginning of line
actual = re.sub(r"(\s+)[0-9]+(\s+LDP)", r"\1XX\2", actual)
@@ -672,7 +610,6 @@ def test_mpls_table():
def test_linux_mpls_routes():
global fatal_error
global net
- global cli_version
# Skip if previous fatal error condition is raised
if (fatal_error != ""):
diff --git a/tests/topotests/lib/bgp.py b/tests/topotests/lib/bgp.py
index 2613f45f1c..c05e14a95e 100644
--- a/tests/topotests/lib/bgp.py
+++ b/tests/topotests/lib/bgp.py
@@ -32,7 +32,8 @@ from lib.common_config import (create_common_configuration,
load_config_to_router,
check_address_types,
generate_ips,
- find_interface_with_greater_ip)
+ find_interface_with_greater_ip,
+ run_frr_cmd, retry)
BGP_CONVERGENCE_TIMEOUT = 10
@@ -116,8 +117,8 @@ def create_router_bgp(tgen, topo, input_dict=None, build=False):
logger.debug("Router %s: 'bgp' not present in input_dict", router)
continue
- result = __create_bgp_global(tgen, input_dict, router, build)
- if result is True:
+ data_all_bgp = __create_bgp_global(tgen, input_dict, router, build)
+ if data_all_bgp:
bgp_data = input_dict[router]["bgp"]
bgp_addr_data = bgp_data.setdefault("address_family", {})
@@ -134,8 +135,18 @@ def create_router_bgp(tgen, topo, input_dict=None, build=False):
or ipv6_data.setdefault("unicast", {}) else False
if neigh_unicast:
- result = __create_bgp_unicast_neighbor(
- tgen, topo, input_dict, router, build)
+ data_all_bgp = __create_bgp_unicast_neighbor(
+ tgen, topo, input_dict, router,
+ config_data=data_all_bgp)
+
+ try:
+ result = create_common_configuration(tgen, router, data_all_bgp,
+ "bgp", build)
+ except InvalidCLIError:
+ # Traceback
+ errormsg = traceback.format_exc()
+ logger.error(errormsg)
+ return errormsg
logger.debug("Exiting lib API: create_router_bgp()")
return result
@@ -157,77 +168,66 @@ def __create_bgp_global(tgen, input_dict, router, build=False):
True or False
"""
- result = False
logger.debug("Entering lib API: __create_bgp_global()")
- try:
-
- bgp_data = input_dict[router]["bgp"]
- del_bgp_action = bgp_data.setdefault("delete", False)
- if del_bgp_action:
- config_data = ["no router bgp"]
- result = create_common_configuration(tgen, router, config_data,
- "bgp", build=build)
- return result
- config_data = []
+ bgp_data = input_dict[router]["bgp"]
+ del_bgp_action = bgp_data.setdefault("delete", False)
+ if del_bgp_action:
+ config_data = ["no router bgp"]
- if "local_as" not in bgp_data and build:
- logger.error("Router %s: 'local_as' not present in input_dict"
- "for BGP", router)
- return False
+ return config_data
- local_as = bgp_data.setdefault("local_as", "")
- cmd = "router bgp {}".format(local_as)
- vrf_id = bgp_data.setdefault("vrf", None)
- if vrf_id:
- cmd = "{} vrf {}".format(cmd, vrf_id)
-
- config_data.append(cmd)
+ config_data = []
- router_id = bgp_data.setdefault("router_id", None)
- del_router_id = bgp_data.setdefault("del_router_id", False)
- if del_router_id:
- config_data.append("no bgp router-id")
- if router_id:
- config_data.append("bgp router-id {}".format(
- router_id))
+ if "local_as" not in bgp_data and build:
+ logger.error("Router %s: 'local_as' not present in input_dict"
+ "for BGP", router)
+ return False
- aggregate_address = bgp_data.setdefault("aggregate_address",
- {})
- if aggregate_address:
- network = aggregate_address.setdefault("network", None)
- if not network:
- logger.error("Router %s: 'network' not present in "
- "input_dict for BGP", router)
- else:
- cmd = "aggregate-address {}".format(network)
+ local_as = bgp_data.setdefault("local_as", "")
+ cmd = "router bgp {}".format(local_as)
+ vrf_id = bgp_data.setdefault("vrf", None)
+ if vrf_id:
+ cmd = "{} vrf {}".format(cmd, vrf_id)
+
+ config_data.append(cmd)
+
+ router_id = bgp_data.setdefault("router_id", None)
+ del_router_id = bgp_data.setdefault("del_router_id", False)
+ if del_router_id:
+ config_data.append("no bgp router-id")
+ if router_id:
+ config_data.append("bgp router-id {}".format(
+ router_id))
+
+ aggregate_address = bgp_data.setdefault("aggregate_address",
+ {})
+ if aggregate_address:
+ network = aggregate_address.setdefault("network", None)
+ if not network:
+ logger.error("Router %s: 'network' not present in "
+ "input_dict for BGP", router)
+ else:
+ cmd = "aggregate-address {}".format(network)
- as_set = aggregate_address.setdefault("as_set", False)
- summary = aggregate_address.setdefault("summary", False)
- del_action = aggregate_address.setdefault("delete", False)
- if as_set:
- cmd = "{} {}".format(cmd, "as-set")
- if summary:
- cmd = "{} {}".format(cmd, "summary")
+ as_set = aggregate_address.setdefault("as_set", False)
+ summary = aggregate_address.setdefault("summary", False)
+ del_action = aggregate_address.setdefault("delete", False)
+ if as_set:
+ cmd = "{} {}".format(cmd, "as-set")
+ if summary:
+ cmd = "{} {}".format(cmd, "summary")
- if del_action:
- cmd = "no {}".format(cmd)
+ if del_action:
+ cmd = "no {}".format(cmd)
- config_data.append(cmd)
+ config_data.append(cmd)
- result = create_common_configuration(tgen, router, config_data,
- "bgp", build=build)
- except InvalidCLIError:
- # Traceback
- errormsg = traceback.format_exc()
- logger.error(errormsg)
- return errormsg
-
- logger.debug("Exiting lib API: create_bgp_global()")
- return result
+ return config_data
-def __create_bgp_unicast_neighbor(tgen, topo, input_dict, router, build=False):
+def __create_bgp_unicast_neighbor(tgen, topo, input_dict, router,
+ config_data=None):
"""
Helper API to create configuration for address-family unicast
@@ -240,124 +240,118 @@ def __create_bgp_unicast_neighbor(tgen, topo, input_dict, router, build=False):
* `build` : Only for initial setup phase this is set as True.
"""
- result = False
logger.debug("Entering lib API: __create_bgp_unicast_neighbor()")
- try:
- config_data = ["router bgp"]
- bgp_data = input_dict[router]["bgp"]["address_family"]
- for addr_type, addr_dict in bgp_data.iteritems():
- if not addr_dict:
- continue
+ add_neigh = True
+ if "router bgp "in config_data:
+ add_neigh = False
+ bgp_data = input_dict[router]["bgp"]["address_family"]
- if not check_address_types(addr_type):
- continue
+ for addr_type, addr_dict in bgp_data.iteritems():
+ if not addr_dict:
+ continue
+ if not check_address_types(addr_type):
+ continue
+
+ addr_data = addr_dict["unicast"]
+ if addr_data:
config_data.append("address-family {} unicast".format(
addr_type
))
- addr_data = addr_dict["unicast"]
- advertise_network = addr_data.setdefault("advertise_networks",
- [])
- for advertise_network_dict in advertise_network:
- network = advertise_network_dict["network"]
- if type(network) is not list:
- network = [network]
-
- if "no_of_network" in advertise_network_dict:
- no_of_network = advertise_network_dict["no_of_network"]
- else:
- no_of_network = 1
-
- del_action = advertise_network_dict.setdefault("delete",
- False)
+ advertise_network = addr_data.setdefault("advertise_networks",
+ [])
+ for advertise_network_dict in advertise_network:
+ network = advertise_network_dict["network"]
+ if type(network) is not list:
+ network = [network]
+
+ if "no_of_network" in advertise_network_dict:
+ no_of_network = advertise_network_dict["no_of_network"]
+ else:
+ no_of_network = 1
- # Generating IPs for verification
- prefix = str(
- ipaddr.IPNetwork(unicode(network[0])).prefixlen)
- network_list = generate_ips(network, no_of_network)
- for ip in network_list:
- ip = str(ipaddr.IPNetwork(unicode(ip)).network)
+ del_action = advertise_network_dict.setdefault("delete",
+ False)
- cmd = "network {}/{}\n".format(ip, prefix)
- if del_action:
- cmd = "no {}".format(cmd)
+ # Generating IPs for verification
+ prefix = str(
+ ipaddr.IPNetwork(unicode(network[0])).prefixlen)
+ network_list = generate_ips(network, no_of_network)
+ for ip in network_list:
+ ip = str(ipaddr.IPNetwork(unicode(ip)).network)
- config_data.append(cmd)
+ cmd = "network {}/{}".format(ip, prefix)
+ if del_action:
+ cmd = "no {}".format(cmd)
- max_paths = addr_data.setdefault("maximum_paths", {})
- if max_paths:
- ibgp = max_paths.setdefault("ibgp", None)
- ebgp = max_paths.setdefault("ebgp", None)
- if ibgp:
- config_data.append("maximum-paths ibgp {}".format(
- ibgp
- ))
- if ebgp:
- config_data.append("maximum-paths {}".format(
- ebgp
- ))
-
- aggregate_address = addr_data.setdefault("aggregate_address",
- {})
- if aggregate_address:
- ip = aggregate_address("network", None)
- attribute = aggregate_address("attribute", None)
- if ip:
- cmd = "aggregate-address {}".format(ip)
- if attribute:
- cmd = "{} {}".format(cmd, attribute)
+ config_data.append(cmd)
- config_data.append(cmd)
+ max_paths = addr_data.setdefault("maximum_paths", {})
+ if max_paths:
+ ibgp = max_paths.setdefault("ibgp", None)
+ ebgp = max_paths.setdefault("ebgp", None)
+ if ibgp:
+ config_data.append("maximum-paths ibgp {}".format(
+ ibgp
+ ))
+ if ebgp:
+ config_data.append("maximum-paths {}".format(
+ ebgp
+ ))
+
+ aggregate_address = addr_data.setdefault("aggregate_address",
+ {})
+ if aggregate_address:
+ ip = aggregate_address("network", None)
+ attribute = aggregate_address("attribute", None)
+ if ip:
+ cmd = "aggregate-address {}".format(ip)
+ if attribute:
+ cmd = "{} {}".format(cmd, attribute)
- redistribute_data = addr_data.setdefault("redistribute", {})
- if redistribute_data:
- for redistribute in redistribute_data:
- if "redist_type" not in redistribute:
- logger.error("Router %s: 'redist_type' not present in "
- "input_dict", router)
- else:
- cmd = "redistribute {}".format(
- redistribute["redist_type"])
- redist_attr = redistribute.setdefault("attribute",
- None)
- if redist_attr:
- cmd = "{} {}".format(cmd, redist_attr)
- del_action = redistribute.setdefault("delete", False)
- if del_action:
- cmd = "no {}".format(cmd)
- config_data.append(cmd)
+ config_data.append(cmd)
- if "neighbor" in addr_data:
- neigh_data = __create_bgp_neighbor(topo, input_dict,
- router, addr_type)
- config_data.extend(neigh_data)
+ redistribute_data = addr_data.setdefault("redistribute", {})
+ if redistribute_data:
+ for redistribute in redistribute_data:
+ if "redist_type" not in redistribute:
+ logger.error("Router %s: 'redist_type' not present in "
+ "input_dict", router)
+ else:
+ cmd = "redistribute {}".format(
+ redistribute["redist_type"])
+ redist_attr = redistribute.setdefault("attribute",
+ None)
+ if redist_attr:
+ cmd = "{} {}".format(cmd, redist_attr)
+ del_action = redistribute.setdefault("delete", False)
+ if del_action:
+ cmd = "no {}".format(cmd)
+ config_data.append(cmd)
- for addr_type, addr_dict in bgp_data.iteritems():
- if not addr_dict or not check_address_types(addr_type):
- continue
+ if "neighbor" in addr_data:
+ neigh_data = __create_bgp_neighbor(topo, input_dict,
+ router, addr_type, add_neigh)
+ config_data.extend(neigh_data)
- addr_data = addr_dict["unicast"]
- if "neighbor" in addr_data:
- neigh_addr_data = __create_bgp_unicast_address_family(
- topo, input_dict, router, addr_type)
+ for addr_type, addr_dict in bgp_data.iteritems():
+ if not addr_dict or not check_address_types(addr_type):
+ continue
- config_data.extend(neigh_addr_data)
+ addr_data = addr_dict["unicast"]
+ if "neighbor" in addr_data:
+ neigh_addr_data = __create_bgp_unicast_address_family(
+ topo, input_dict, router, addr_type, add_neigh)
- result = create_common_configuration(tgen, router, config_data,
- None, build=build)
+ config_data.extend(neigh_addr_data)
- except InvalidCLIError:
- # Traceback
- errormsg = traceback.format_exc()
- logger.error(errormsg)
- return errormsg
logger.debug("Exiting lib API: __create_bgp_unicast_neighbor()")
- return result
+ return config_data
-def __create_bgp_neighbor(topo, input_dict, router, addr_type):
+def __create_bgp_neighbor(topo, input_dict, router, addr_type, add_neigh=True):
"""
Helper API to create neighbor specific configuration
@@ -391,7 +385,8 @@ def __create_bgp_neighbor(topo, input_dict, router, addr_type):
neigh_cxt = "neighbor {}".format(ip_addr)
- config_data.append("{} remote-as {}".format(neigh_cxt, remote_as))
+ if add_neigh:
+ config_data.append("{} remote-as {}".format(neigh_cxt, remote_as))
if addr_type == "ipv6":
config_data.append("address-family ipv6 unicast")
config_data.append("{} activate".format(neigh_cxt))
@@ -429,7 +424,8 @@ def __create_bgp_neighbor(topo, input_dict, router, addr_type):
return config_data
-def __create_bgp_unicast_address_family(topo, input_dict, router, addr_type):
+def __create_bgp_unicast_address_family(topo, input_dict, router, addr_type,
+ add_neigh=True):
"""
API prints bgp global config to bgp_json file.
@@ -531,6 +527,7 @@ def __create_bgp_unicast_address_family(topo, input_dict, router, addr_type):
#############################################
# Verification APIs
#############################################
+@retry(attempts=3, wait=2, return_is_str=True)
def verify_router_id(tgen, topo, input_dict):
"""
Running command "show ip bgp json" for DUT and reading router-id
@@ -565,7 +562,7 @@ def verify_router_id(tgen, topo, input_dict):
errormsg(str) or True
"""
- logger.info("Entering lib API: verify_router_id()")
+ logger.debug("Entering lib API: verify_router_id()")
for router in input_dict.keys():
if router not in tgen.routers():
continue
@@ -576,9 +573,9 @@ def verify_router_id(tgen, topo, input_dict):
"del_router_id", False)
logger.info("Checking router %s router-id", router)
- show_bgp_json = rnode.vtysh_cmd("show ip bgp json",
+ show_bgp_json = run_frr_cmd(rnode, "show bgp summary json",
isjson=True)
- router_id_out = show_bgp_json["routerId"]
+ router_id_out = show_bgp_json["ipv4Unicast"]["routerId"]
router_id_out = ipaddr.IPv4Address(unicode(router_id_out))
# Once router-id is deleted, highest interface ip should become
@@ -598,100 +595,84 @@ def verify_router_id(tgen, topo, input_dict):
router_id_out)
return errormsg
- logger.info("Exiting lib API: verify_router_id()")
+ logger.debug("Exiting lib API: verify_router_id()")
return True
+@retry(attempts=20, wait=2, return_is_str=True)
def verify_bgp_convergence(tgen, topo):
"""
API will verify if BGP is converged with in the given time frame.
Running "show bgp summary json" command and verify bgp neighbor
state is established,
-
Parameters
----------
* `tgen`: topogen object
* `topo`: input json file data
* `addr_type`: ip_type, ipv4/ipv6
-
Usage
-----
# To veriry is BGP is converged for all the routers used in
topology
results = verify_bgp_convergence(tgen, topo, "ipv4")
-
Returns
-------
errormsg(str) or True
"""
- logger.info("Entering lib API: verify_bgp_confergence()")
+ logger.debug("Entering lib API: verify_bgp_convergence()")
for router, rnode in tgen.routers().iteritems():
- logger.info("Verifying BGP Convergence on router %s:", router)
-
- for retry in range(1, 11):
- show_bgp_json = rnode.vtysh_cmd("show bgp summary json",
- isjson=True)
- # Verifying output dictionary show_bgp_json is empty or not
- if not bool(show_bgp_json):
- errormsg = "BGP is not running"
- return errormsg
+ logger.info("Verifying BGP Convergence on router %s", router)
+ show_bgp_json = run_frr_cmd(rnode, "show bgp summary json",
+ isjson=True)
+ # Verifying output dictionary show_bgp_json is empty or not
+ if not bool(show_bgp_json):
+ errormsg = "BGP is not running"
+ return errormsg
- # To find neighbor ip type
+ # To find neighbor ip type
+ bgp_addr_type = topo["routers"][router]["bgp"]["address_family"]
+ for addr_type in bgp_addr_type.keys():
+ if not check_address_types(addr_type):
+ continue
total_peer = 0
- bgp_addr_type = topo["routers"][router]["bgp"]["address_family"]
- for addr_type in bgp_addr_type.keys():
- if not check_address_types(addr_type):
- continue
-
- bgp_neighbors = bgp_addr_type[addr_type]["unicast"]["neighbor"]
-
- for bgp_neighbor in bgp_neighbors:
- total_peer += len(bgp_neighbors[bgp_neighbor]["dest_link"])
-
- for addr_type in bgp_addr_type.keys():
- bgp_neighbors = bgp_addr_type[addr_type]["unicast"]["neighbor"]
-
- no_of_peer = 0
- for bgp_neighbor, peer_data in bgp_neighbors.iteritems():
- for dest_link in peer_data["dest_link"].keys():
- data = topo["routers"][bgp_neighbor]["links"]
- if dest_link in data:
- neighbor_ip = \
- data[dest_link][addr_type].split("/")[0]
- if addr_type == "ipv4":
- ipv4_data = show_bgp_json["ipv4Unicast"][
- "peers"]
- nh_state = ipv4_data[neighbor_ip]["state"]
- else:
- ipv6_data = show_bgp_json["ipv6Unicast"][
- "peers"]
- nh_state = ipv6_data[neighbor_ip]["state"]
-
- if nh_state == "Established":
- no_of_peer += 1
- if no_of_peer == total_peer:
- logger.info("BGP is Converged for router %s", router)
- break
- else:
- logger.warning("BGP is not yet Converged for router %s",
- router)
- sleeptime = 2 * retry
- if sleeptime <= BGP_CONVERGENCE_TIMEOUT:
- # Waiting for BGP to converge
- logger.info("Waiting for %s sec for BGP to converge on"
- " router %s...", sleeptime, router)
- sleep(sleeptime)
- else:
- show_bgp_summary = rnode.vtysh_cmd("show bgp summary")
- errormsg = "TIMEOUT!! BGP is not converged in {} " \
- "seconds for router {} \n {}".format(
- BGP_CONVERGENCE_TIMEOUT, router,
- show_bgp_summary)
- return errormsg
+ bgp_neighbors = bgp_addr_type[addr_type]["unicast"]["neighbor"]
+
+ for bgp_neighbor in bgp_neighbors:
+ total_peer += len(bgp_neighbors[bgp_neighbor]["dest_link"])
+
+ for addr_type in bgp_addr_type.keys():
+ if not check_address_types(addr_type):
+ continue
+ bgp_neighbors = bgp_addr_type[addr_type]["unicast"]["neighbor"]
+
+ no_of_peer = 0
+ for bgp_neighbor, peer_data in bgp_neighbors.iteritems():
+ for dest_link in peer_data["dest_link"].keys():
+ data = topo["routers"][bgp_neighbor]["links"]
+ if dest_link in data:
+ neighbor_ip = \
+ data[dest_link][addr_type].split("/")[0]
+ if addr_type == "ipv4":
+ ipv4_data = show_bgp_json["ipv4Unicast"][
+ "peers"]
+ nh_state = ipv4_data[neighbor_ip]["state"]
+ else:
+ ipv6_data = show_bgp_json["ipv6Unicast"][
+ "peers"]
+ nh_state = ipv6_data[neighbor_ip]["state"]
+
+ if nh_state == "Established":
+ no_of_peer += 1
+ if no_of_peer == total_peer:
+ logger.info("BGP is Converged for router %s", router)
+ else:
+ errormsg = "BGP is not converged for router {}".format(
+ router)
+ return errormsg
- logger.info("Exiting API: verify_bgp_confergence()")
+ logger.debug("Exiting API: verify_bgp_convergence()")
return True
@@ -723,7 +704,7 @@ def modify_as_number(tgen, topo, input_dict):
errormsg(str) or True
"""
- logger.info("Entering lib API: modify_as_number()")
+ logger.debug("Entering lib API: modify_as_number()")
try:
new_topo = deepcopy(topo["routers"])
@@ -757,11 +738,12 @@ def modify_as_number(tgen, topo, input_dict):
logger.error(errormsg)
return errormsg
- logger.info("Exiting lib API: modify_as_number()")
+ logger.debug("Exiting lib API: modify_as_number()")
return True
+@retry(attempts=3, wait=2, return_is_str=True)
def verify_as_numbers(tgen, topo, input_dict):
"""
This API is to verify AS numbers for given DUT by running
@@ -791,7 +773,7 @@ def verify_as_numbers(tgen, topo, input_dict):
errormsg(str) or True
"""
- logger.info("Entering lib API: verify_as_numbers()")
+ logger.debug("Entering lib API: verify_as_numbers()")
for router in input_dict.keys():
if router not in tgen.routers():
continue
@@ -800,7 +782,7 @@ def verify_as_numbers(tgen, topo, input_dict):
logger.info("Verifying AS numbers for dut %s:", router)
- show_ip_bgp_neighbor_json = rnode.vtysh_cmd(
+ show_ip_bgp_neighbor_json = run_frr_cmd(rnode,
"show ip bgp neighbor json", isjson=True)
local_as = input_dict[router]["bgp"]["local_as"]
bgp_addr_type = topo["routers"][router]["bgp"]["address_family"]
@@ -846,7 +828,7 @@ def verify_as_numbers(tgen, topo, input_dict):
"neighbor %s, found expected: %s",
router, bgp_neighbor, remote_as)
- logger.info("Exiting lib API: verify_AS_numbers()")
+ logger.debug("Exiting lib API: verify_AS_numbers()")
return True
@@ -873,7 +855,7 @@ def clear_bgp_and_verify(tgen, topo, router):
errormsg(str) or True
"""
- logger.info("Entering lib API: clear_bgp_and_verify()")
+ logger.debug("Entering lib API: clear_bgp_and_verify()")
if router not in tgen.routers():
return False
@@ -882,21 +864,15 @@ def clear_bgp_and_verify(tgen, topo, router):
peer_uptime_before_clear_bgp = {}
# Verifying BGP convergence before bgp clear command
- for retry in range(1, 11):
- sleeptime = 2 * retry
- if sleeptime <= BGP_CONVERGENCE_TIMEOUT:
- # Waiting for BGP to converge
- logger.info("Waiting for %s sec for BGP to converge on router"
- " %s...", sleeptime, router)
- sleep(sleeptime)
- else:
- errormsg = "TIMEOUT!! BGP is not converged in {} seconds for" \
- " router {}".format(BGP_CONVERGENCE_TIMEOUT, router)
- return errormsg
-
- show_bgp_json = rnode.vtysh_cmd("show bgp summary json",
+ for retry in range(31):
+ sleeptime = 3
+ # Waiting for BGP to converge
+ logger.info("Waiting for %s sec for BGP to converge on router"
+ " %s...", sleeptime, router)
+ sleep(sleeptime)
+
+ show_bgp_json = run_frr_cmd(rnode, "show bgp summary json",
isjson=True)
- logger.info(show_bgp_json)
# Verifying output dictionary show_bgp_json is empty or not
if not bool(show_bgp_json):
errormsg = "BGP is not running"
@@ -950,33 +926,33 @@ def clear_bgp_and_verify(tgen, topo, router):
" clear", router)
break
else:
- logger.warning("BGP is not yet Converged for router %s "
- "before bgp clear", router)
+ logger.info("BGP is not yet Converged for router %s "
+ "before bgp clear", router)
+ else:
+ errormsg = "TIMEOUT!! BGP is not converged in 30 seconds for" \
+ " router {}".format(router)
+ return errormsg
logger.info(peer_uptime_before_clear_bgp)
# Clearing BGP
logger.info("Clearing BGP neighborship for router %s..", router)
for addr_type in bgp_addr_type.keys():
if addr_type == "ipv4":
- rnode.vtysh_cmd("clear ip bgp *")
+ run_frr_cmd(rnode, "clear ip bgp *")
elif addr_type == "ipv6":
- rnode.vtysh_cmd("clear bgp ipv6 *")
+ run_frr_cmd(rnode, "clear bgp ipv6 *")
peer_uptime_after_clear_bgp = {}
# Verifying BGP convergence after bgp clear command
- for retry in range(1, 11):
- sleeptime = 2 * retry
- if sleeptime <= BGP_CONVERGENCE_TIMEOUT:
- # Waiting for BGP to converge
- logger.info("Waiting for %s sec for BGP to converge on router"
- " %s...", sleeptime, router)
- sleep(sleeptime)
- else:
- errormsg = "TIMEOUT!! BGP is not converged in {} seconds for" \
- " router {}".format(BGP_CONVERGENCE_TIMEOUT, router)
- return errormsg
+ for retry in range(31):
+ sleeptime = 3
+ # Waiting for BGP to converge
+ logger.info("Waiting for %s sec for BGP to converge on router"
+ " %s...", sleeptime, router)
+ sleep(sleeptime)
+
- show_bgp_json = rnode.vtysh_cmd("show bgp summary json",
+ show_bgp_json = run_frr_cmd(rnode, "show bgp summary json",
isjson=True)
# Verifying output dictionary show_bgp_json is empty or not
if not bool(show_bgp_json):
@@ -1028,9 +1004,12 @@ def clear_bgp_and_verify(tgen, topo, router):
router)
break
else:
- logger.warning("BGP is not yet Converged for router %s after"
- " bgp clear", router)
-
+ logger.info("BGP is not yet Converged for router %s after"
+ " bgp clear", router)
+ else:
+ errormsg = "TIMEOUT!! BGP is not converged in 30 seconds for" \
+ " router {}".format(router)
+ return errormsg
logger.info(peer_uptime_after_clear_bgp)
# Comparing peerUptimeEstablishedEpoch dictionaries
if peer_uptime_before_clear_bgp != peer_uptime_after_clear_bgp:
@@ -1041,7 +1020,7 @@ def clear_bgp_and_verify(tgen, topo, router):
" {}".format(router)
return errormsg
- logger.info("Exiting lib API: clear_bgp_and_verify()")
+ logger.debug("Exiting lib API: clear_bgp_and_verify()")
return True
@@ -1077,7 +1056,7 @@ def verify_bgp_timers_and_functionality(tgen, topo, input_dict):
errormsg(str) or True
"""
- logger.info("Entering lib API: verify_bgp_timers_and_functionality()")
+ logger.debug("Entering lib API: verify_bgp_timers_and_functionality()")
sleep(5)
router_list = tgen.routers()
for router in input_dict.keys():
@@ -1090,7 +1069,7 @@ def verify_bgp_timers_and_functionality(tgen, topo, input_dict):
router)
show_ip_bgp_neighbor_json = \
- rnode.vtysh_cmd("show ip bgp neighbor json", isjson=True)
+ run_frr_cmd(rnode, "show ip bgp neighbor json", isjson=True)
bgp_addr_type = input_dict[router]["bgp"]["address_family"]
@@ -1178,7 +1157,7 @@ def verify_bgp_timers_and_functionality(tgen, topo, input_dict):
sleep(keepalivetimer)
sleep(2)
show_bgp_json = \
- rnode.vtysh_cmd("show bgp summary json",
+ run_frr_cmd(rnode, "show bgp summary json",
isjson=True)
if addr_type == "ipv4":
@@ -1192,17 +1171,13 @@ def verify_bgp_timers_and_functionality(tgen, topo, input_dict):
(holddowntimer - keepalivetimer):
if nh_state != "Established":
errormsg = "BGP neighborship has not gone " \
- "down in {} sec for neighbor {}\n" \
- "show_bgp_json: \n {} ".format(
- timer, bgp_neighbor,
- show_bgp_json)
+ "down in {} sec for neighbor {}" \
+ .format(timer, bgp_neighbor)
return errormsg
else:
logger.info("BGP neighborship is intact in %s"
- " sec for neighbor %s \n "
- "show_bgp_json : \n %s",
- timer, bgp_neighbor,
- show_bgp_json)
+ " sec for neighbor %s",
+ timer, bgp_neighbor)
####################
# Shutting down peer interface and verifying that BGP
@@ -1229,7 +1204,7 @@ def verify_bgp_timers_and_functionality(tgen, topo, input_dict):
sleep(keepalivetimer)
sleep(2)
show_bgp_json = \
- rnode.vtysh_cmd("show bgp summary json",
+ run_frr_cmd(rnode, "show bgp summary json",
isjson=True)
if addr_type == "ipv4":
@@ -1242,22 +1217,19 @@ def verify_bgp_timers_and_functionality(tgen, topo, input_dict):
if timer == holddowntimer:
if nh_state == "Established":
errormsg = "BGP neighborship has not gone " \
- "down in {} sec for neighbor {}\n" \
- "show_bgp_json: \n {} ".format(
- timer, bgp_neighbor,
- show_bgp_json)
+ "down in {} sec for neighbor {}" \
+ .format(timer, bgp_neighbor)
return errormsg
else:
logger.info("BGP neighborship has gone down in"
- " %s sec for neighbor %s \n"
- "show_bgp_json : \n %s",
- timer, bgp_neighbor,
- show_bgp_json)
+ " %s sec for neighbor %s",
+ timer, bgp_neighbor)
- logger.info("Exiting lib API: verify_bgp_timers_and_functionality()")
+ logger.debug("Exiting lib API: verify_bgp_timers_and_functionality()")
return True
+@retry(attempts=3, wait=2, return_is_str=True)
def verify_best_path_as_per_bgp_attribute(tgen, addr_type, router, input_dict,
attribute):
"""
@@ -1319,7 +1291,7 @@ def verify_best_path_as_per_bgp_attribute(tgen, addr_type, router, input_dict,
sleep(2)
logger.info("Verifying router %s RIB for best path:", router)
- sh_ip_bgp_json = rnode.vtysh_cmd(command, isjson=True)
+ sh_ip_bgp_json = run_frr_cmd(rnode, command, isjson=True)
for route_val in input_dict.values():
net_data = route_val["bgp"]["address_family"]["ipv4"]["unicast"]
@@ -1380,7 +1352,7 @@ def verify_best_path_as_per_bgp_attribute(tgen, addr_type, router, input_dict,
else:
command = "show ipv6 route json"
- rib_routes_json = rnode.vtysh_cmd(command, isjson=True)
+ rib_routes_json = run_frr_cmd(rnode, command, isjson=True)
# Verifying output dictionary rib_routes_json is not empty
if not bool(rib_routes_json):
@@ -1417,6 +1389,7 @@ def verify_best_path_as_per_bgp_attribute(tgen, addr_type, router, input_dict,
return True
+@retry(attempts=3, wait=2, return_is_str=True)
def verify_best_path_as_per_admin_distance(tgen, addr_type, router, input_dict,
attribute):
"""
@@ -1451,7 +1424,7 @@ def verify_best_path_as_per_admin_distance(tgen, addr_type, router, input_dict,
errormsg(str) or True
"""
- logger.info("Entering lib API: verify_best_path_as_per_admin_distance()")
+ logger.debug("Entering lib API: verify_best_path_as_per_admin_distance()")
router_list = tgen.routers()
if router not in router_list:
return False
@@ -1490,7 +1463,7 @@ def verify_best_path_as_per_admin_distance(tgen, addr_type, router, input_dict,
compare = "LOWEST"
# Show ip route
- rib_routes_json = rnode.vtysh_cmd(command, isjson=True)
+ rib_routes_json = run_frr_cmd(rnode, command, isjson=True)
# Verifying output dictionary rib_routes_json is not empty
if not bool(rib_routes_json):
diff --git a/tests/topotests/lib/common_config.py b/tests/topotests/lib/common_config.py
index 75880cfd28..f2d33f94ae 100644
--- a/tests/topotests/lib/common_config.py
+++ b/tests/topotests/lib/common_config.py
@@ -23,24 +23,32 @@ from datetime import datetime
from time import sleep
from subprocess import call
from subprocess import STDOUT as SUB_STDOUT
+from subprocess import PIPE as SUB_PIPE
+from subprocess import Popen
+from functools import wraps
+from re import search as re_search
+
import StringIO
import os
import ConfigParser
import traceback
import socket
import ipaddr
+import re
from lib import topotest
from functools import partial
from lib.topolog import logger, logger_config
from lib.topogen import TopoRouter
+from lib.topotest import interface_set_status
FRRCFG_FILE = "frr_json.conf"
FRRCFG_BKUP_FILE = "frr_json_initial.conf"
ERROR_LIST = ["Malformed", "Failure", "Unknown"]
+ROUTER_LIST = []
####
CD = os.path.dirname(os.path.realpath(__file__))
@@ -142,6 +150,35 @@ class InvalidCLIError(Exception):
pass
+def run_frr_cmd(rnode, cmd, isjson=False):
+ """
+ Execute frr show commands in priviledged mode
+
+ * `rnode`: router node on which commands needs to executed
+ * `cmd`: Command to be executed on frr
+ * `isjson`: If command is to get json data or not
+
+ :return str:
+ """
+
+ if cmd:
+ ret_data = rnode.vtysh_cmd(cmd, isjson=isjson)
+
+ if True:
+ if isjson:
+ logger.debug(ret_data)
+ print_data = rnode.vtysh_cmd(cmd.rstrip("json"), isjson=False)
+ else:
+ print_data = ret_data
+
+ logger.info('Output for command [ %s] on router %s:\n%s',
+ cmd.rstrip("json"), rnode.name, print_data)
+ return ret_data
+
+ else:
+ raise InvalidCLIError('No actual cmd passed')
+
+
def create_common_configuration(tgen, router, data, config_type=None,
build=False):
"""
@@ -186,6 +223,7 @@ def create_common_configuration(tgen, router, data, config_type=None,
frr_cfg_fd.write(config_map[config_type])
for line in data:
frr_cfg_fd.write("{} \n".format(str(line)))
+ frr_cfg_fd.write("\n")
except IOError as err:
logger.error("Unable to open FRR Config File. error(%s): %s" %
@@ -215,10 +253,13 @@ def reset_config_on_routers(tgen, routerName=None):
logger.debug("Entering API: reset_config_on_routers")
router_list = tgen.routers()
- for rname, router in router_list.iteritems():
+ for rname in ROUTER_LIST:
if routerName and routerName != rname:
continue
+ router = router_list[rname]
+ logger.info("Configuring router %s to initial test configuration",
+ rname)
cfg = router.run("vtysh -c 'show running'")
fname = "{}/{}/frr.sav".format(TMPDIR, rname)
dname = "{}/{}/delta.conf".format(TMPDIR, rname)
@@ -235,16 +276,35 @@ def reset_config_on_routers(tgen, routerName=None):
f.close()
- command = "/usr/lib/frr/frr-reload.py --input {}/{}/frr.sav" \
- " --test {}/{}/frr_json_initial.conf > {}". \
- format(TMPDIR, rname, TMPDIR, rname, dname)
- result = call(command, shell=True, stderr=SUB_STDOUT)
+ run_cfg_file = "{}/{}/frr.sav".format(TMPDIR, rname)
+ init_cfg_file = "{}/{}/frr_json_initial.conf".format(TMPDIR, rname)
+ command = "/usr/lib/frr/frr-reload.py --input {} --test {} > {}". \
+ format(run_cfg_file, init_cfg_file, dname)
+ result = call(command, shell=True, stderr=SUB_STDOUT,
+ stdout=SUB_PIPE)
# Assert if command fail
if result > 0:
- errormsg = ("Command:{} is failed due to non-zero exit"
- " code".format(command))
- return errormsg
+ logger.error("Delta file creation failed. Command executed %s",
+ command)
+ with open(run_cfg_file, 'r') as fd:
+ logger.info('Running configuration saved in %s is:\n%s',
+ run_cfg_file, fd.read())
+ with open(init_cfg_file, 'r') as fd:
+ logger.info('Test configuration saved in %s is:\n%s',
+ init_cfg_file, fd.read())
+
+ err_cmd = ['/usr/bin/vtysh', '-m', '-f', run_cfg_file]
+ result = Popen(err_cmd, stdout=SUB_PIPE, stderr=SUB_PIPE)
+ output = result.communicate()
+ for out_data in output:
+ temp_data = out_data.decode('utf-8').lower()
+ for out_err in ERROR_LIST:
+ if out_err.lower() in temp_data:
+ logger.error("Found errors while validating data in"
+ " %s", run_cfg_file)
+ raise InvalidCLIError(out_data)
+ raise InvalidCLIError("Unknown error in %s", output)
f = open(dname, "r")
delta = StringIO.StringIO()
@@ -264,7 +324,7 @@ def reset_config_on_routers(tgen, routerName=None):
delta.write("end\n")
output = router.vtysh_multicmd(delta.getvalue(),
pretty_output=False)
- logger.info("New configuration for router {}:".format(rname))
+
delta.close()
delta = StringIO.StringIO()
cfg = router.run("vtysh -c 'show running'")
@@ -276,6 +336,8 @@ def reset_config_on_routers(tgen, routerName=None):
# Router current configuration to log file or console if
# "show_router_config" is defined in "pytest.ini"
if show_router_config:
+ logger.info("Configuration on router {} after config reset:".
+ format(rname))
logger.info(delta.getvalue())
delta.close()
@@ -297,34 +359,39 @@ def load_config_to_router(tgen, routerName, save_bkup=False):
logger.debug("Entering API: load_config_to_router")
router_list = tgen.routers()
- for rname, router in router_list.iteritems():
- if rname == routerName:
- try:
- frr_cfg_file = "{}/{}/{}".format(TMPDIR, rname, FRRCFG_FILE)
- frr_cfg_bkup = "{}/{}/{}".format(TMPDIR, rname,
- FRRCFG_BKUP_FILE)
- with open(frr_cfg_file, "r") as cfg:
- data = cfg.read()
- if save_bkup:
- with open(frr_cfg_bkup, "w") as bkup:
- bkup.write(data)
-
- output = router.vtysh_multicmd(data, pretty_output=False)
- for out_err in ERROR_LIST:
- if out_err.lower() in output.lower():
- raise InvalidCLIError("%s" % output)
- except IOError as err:
- errormsg = ("Unable to open config File. error(%s):"
- " %s", (err.errno, err.strerror))
- return errormsg
+ for rname in ROUTER_LIST:
+ if routerName and routerName != rname:
+ continue
- logger.info("New configuration for router {}:".format(rname))
- new_config = router.run("vtysh -c 'show running'")
+ router = router_list[rname]
+ try:
+ frr_cfg_file = "{}/{}/{}".format(TMPDIR, rname, FRRCFG_FILE)
+ frr_cfg_bkup = "{}/{}/{}".format(TMPDIR, rname,
+ FRRCFG_BKUP_FILE)
+ with open(frr_cfg_file, "r+") as cfg:
+ data = cfg.read()
+ logger.info("Applying following configuration on router"
+ " {}:\n{}".format(rname, data))
+ if save_bkup:
+ with open(frr_cfg_bkup, "w") as bkup:
+ bkup.write(data)
+
+ output = router.vtysh_multicmd(data, pretty_output=False)
+ for out_err in ERROR_LIST:
+ if out_err.lower() in output.lower():
+ raise InvalidCLIError("%s" % output)
+
+ cfg.truncate(0)
+ except IOError as err:
+ errormsg = ("Unable to open config File. error(%s):"
+ " %s", (err.errno, err.strerror))
+ return errormsg
- # Router current configuration to log file or console if
- # "show_router_config" is defined in "pytest.ini"
- if show_router_config:
- logger.info(new_config)
+ # Router current configuration to log file or console if
+ # "show_router_config" is defined in "pytest.ini"
+ if show_router_config:
+ new_config = router.run("vtysh -c 'show running'")
+ logger.info(new_config)
logger.debug("Exting API: load_config_to_router")
return True
@@ -337,21 +404,25 @@ def start_topology(tgen):
* `tgen` : topogen object
"""
- global TMPDIR
+ global TMPDIR, ROUTER_LIST
# Starting topology
tgen.start_topology()
# Starting deamons
+
router_list = tgen.routers()
+ ROUTER_LIST = sorted(router_list.keys(),
+ key=lambda x: int(re_search('\d+', x).group(0)))
TMPDIR = os.path.join(LOGDIR, tgen.modname)
- for rname, router in router_list.iteritems():
+ router_list = tgen.routers()
+ for rname in ROUTER_LIST:
+ router = router_list[rname]
try:
os.chdir(TMPDIR)
- # Creating rouer named dir and empty zebra.conf bgpd.conf files
+ # Creating router named dir and empty zebra.conf bgpd.conf files
# inside the current directory
-
if os.path.isdir('{}'.format(rname)):
os.system("rm -rf {}".format(rname))
os.mkdir('{}'.format(rname))
@@ -371,13 +442,11 @@ def start_topology(tgen):
router.load_config(
TopoRouter.RD_ZEBRA,
'{}/{}/zebra.conf'.format(TMPDIR, rname)
- # os.path.join(tmpdir, '{}/zebra.conf'.format(rname))
)
# Loading empty bgpd.conf file to router, to start the bgp deamon
router.load_config(
TopoRouter.RD_BGP,
'{}/{}/bgpd.conf'.format(TMPDIR, rname)
- # os.path.join(tmpdir, '{}/bgpd.conf'.format(rname))
)
# Starting routers
@@ -446,27 +515,31 @@ def validate_ip_address(ip_address):
" address" % ip_address)
-def check_address_types(addr_type):
+def check_address_types(addr_type=None):
"""
Checks environment variable set and compares with the current address type
"""
- global ADDRESS_TYPES
- if ADDRESS_TYPES is None:
- ADDRESS_TYPES = "dual"
-
- if ADDRESS_TYPES == "dual":
- ADDRESS_TYPES = ["ipv4", "ipv6"]
- elif ADDRESS_TYPES == "ipv4":
- ADDRESS_TYPES = ["ipv4"]
- elif ADDRESS_TYPES == "ipv6":
- ADDRESS_TYPES = ["ipv6"]
-
- if addr_type not in ADDRESS_TYPES:
+
+ addr_types_env = os.environ.get("ADDRESS_TYPES")
+ if not addr_types_env:
+ addr_types_env = "dual"
+
+ if addr_types_env == "dual":
+ addr_types = ["ipv4", "ipv6"]
+ elif addr_types_env == "ipv4":
+ addr_types = ["ipv4"]
+ elif addr_types_env == "ipv6":
+ addr_types = ["ipv6"]
+
+ if addr_type is None:
+ return addr_types
+
+ if addr_type not in addr_types:
logger.error("{} not in supported/configured address types {}".
- format(addr_type, ADDRESS_TYPES))
+ format(addr_type, addr_types))
return False
- return ADDRESS_TYPES
+ return True
def generate_ips(network, no_of_ips):
@@ -548,7 +621,7 @@ def write_test_header(tc_name):
""" Display message at beginning of test case"""
count = 20
logger.info("*"*(len(tc_name)+count))
- logger.info("START -> Testcase : %s", tc_name)
+ logger.info("START -> Testcase : %s" % tc_name)
logger.info("*"*(len(tc_name)+count))
@@ -556,10 +629,169 @@ def write_test_footer(tc_name):
""" Display message at end of test case"""
count = 21
logger.info("="*(len(tc_name)+count))
- logger.info("PASSED -> Testcase : %s", tc_name)
+ logger.info("Testcase : %s -> PASSED", tc_name)
logger.info("="*(len(tc_name)+count))
+def interface_status(tgen, topo, input_dict):
+ """
+ Delete ip route maps from device
+
+ * `tgen` : Topogen object
+ * `topo` : json file data
+ * `input_dict` : for which router, route map has to be deleted
+
+ Usage
+ -----
+ input_dict = {
+ "r3": {
+ "interface_list": ['eth1-r1-r2', 'eth2-r1-r3'],
+ "status": "down"
+ }
+ }
+ Returns
+ -------
+ errormsg(str) or True
+ """
+ logger.debug("Entering lib API: interface_status()")
+
+ try:
+ global frr_cfg
+ for router in input_dict.keys():
+
+ interface_list = input_dict[router]['interface_list']
+ status = input_dict[router].setdefault('status', 'up')
+ for intf in interface_list:
+ rnode = tgen.routers()[router]
+ interface_set_status(rnode, intf, status)
+
+ # Load config to router
+ load_config_to_router(tgen, router)
+
+ except Exception as e:
+ # handle any exception
+ logger.error("Error %s occured. Arguments %s.", e.message, e.args)
+
+ # Traceback
+ errormsg = traceback.format_exc()
+ logger.error(errormsg)
+ return errormsg
+
+ logger.debug("Exiting lib API: interface_status()")
+ return True
+
+
+def retry(attempts=3, wait=2, return_is_str=True, initial_wait=0):
+ """
+ Retries function execution, if return is an errormsg or exception
+
+ * `attempts`: Number of attempts to make
+ * `wait`: Number of seconds to wait between each attempt
+ * `return_is_str`: Return val is an errormsg in case of failure
+ * `initial_wait`: Sleeps for this much seconds before executing function
+
+ """
+
+ def _retry(func):
+
+ @wraps(func)
+ def func_retry(*args, **kwargs):
+ _wait = kwargs.pop('wait', wait)
+ _attempts = kwargs.pop('attempts', attempts)
+ _attempts = int(_attempts)
+ if _attempts < 0:
+ raise ValueError("attempts must be 0 or greater")
+
+ if initial_wait > 0:
+ logger.info("Waiting for [%s]s as initial delay", initial_wait)
+ sleep(initial_wait)
+
+ _return_is_str = kwargs.pop('return_is_str', return_is_str)
+ for i in range(1, _attempts + 1):
+ try:
+ _expected = kwargs.setdefault('expected', True)
+ kwargs.pop('expected')
+ ret = func(*args, **kwargs)
+ logger.debug("Function returned %s" % ret)
+ if return_is_str and isinstance(ret, bool):
+ return ret
+ elif return_is_str and _expected is False:
+ return ret
+
+ if _attempts == i:
+ return ret
+ except Exception as err:
+ if _attempts == i:
+ logger.info("Max number of attempts (%r) reached",
+ _attempts)
+ raise
+ else:
+ logger.info("Function returned %s", err)
+ if i < _attempts:
+ logger.info("Retry [#%r] after sleeping for %ss"
+ % (i, _wait))
+ sleep(_wait)
+ func_retry._original = func
+ return func_retry
+ return _retry
+
+
+def disable_v6_link_local(tgen, router, intf_name=None):
+ """
+ Disables ipv6 link local addresses for a particular interface or
+ all interfaces
+
+ * `tgen`: tgen onject
+ * `router` : router for which hightest interface should be
+ calculated
+ * `intf_name` : Interface name for which v6 link local needs to
+ be disabled
+ """
+
+ router_list = tgen.routers()
+ for rname, rnode in router_list.iteritems():
+ if rname != router:
+ continue
+
+ linklocal = []
+
+ ifaces = router_list[router].run('ip -6 address')
+
+ # Fix newlines (make them all the same)
+ ifaces = ('\n'.join(ifaces.splitlines()) + '\n').splitlines()
+
+ interface = None
+ ll_per_if_count = 0
+ for line in ifaces:
+ # Interface name
+ m = re.search('[0-9]+: ([^:]+)[@if0-9:]+ <', line)
+ if m:
+ interface = m.group(1).split("@")[0]
+ ll_per_if_count = 0
+
+ # Interface ip
+ m = re.search('inet6 (fe80::[0-9a-f]+:[0-9a-f]+:[0-9a-f]+'
+ ':[0-9a-f]+[/0-9]*) scope link', line)
+ if m:
+ local = m.group(1)
+ ll_per_if_count += 1
+ if ll_per_if_count > 1:
+ linklocal += [["%s-%s" % (interface, ll_per_if_count), local]]
+ else:
+ linklocal += [[interface, local]]
+
+ if len(linklocal[0]) > 1:
+ link_local_dict = {item[0]: item[1] for item in linklocal}
+
+ for lname, laddr in link_local_dict.items():
+
+ if intf_name is not None and lname != intf_name:
+ continue
+
+ cmd = "ip addr del {} dev {}".format(laddr, lname)
+ router_list[router].run(cmd)
+
+
#############################################
# These APIs, will used by testcase
#############################################
@@ -589,19 +821,22 @@ def create_interfaces_cfg(tgen, topo, build=False):
interface_name = destRouterLink
else:
interface_name = data["interface"]
- interface_data.append("interface {}\n".format(
+ if "ipv6" in data:
+ disable_v6_link_local(tgen, c_router, interface_name)
+ interface_data.append("interface {}".format(
str(interface_name)
))
if "ipv4" in data:
intf_addr = c_data["links"][destRouterLink]["ipv4"]
- interface_data.append("ip address {}\n".format(
+ interface_data.append("ip address {}".format(
intf_addr
))
if "ipv6" in data:
intf_addr = c_data["links"][destRouterLink]["ipv6"]
- interface_data.append("ipv6 address {}\n".format(
+ interface_data.append("ipv6 address {}".format(
intf_addr
))
+
result = create_common_configuration(tgen, c_router,
interface_data,
"interface_config",
@@ -662,7 +897,7 @@ def create_static_routes(tgen, input_dict, build=False):
for router in input_dict.keys():
if "static_routes" not in input_dict[router]:
errormsg = "static_routes not present in input_dict"
- logger.info(errormsg)
+ logger.debug(errormsg)
continue
static_routes_list = []
@@ -768,7 +1003,7 @@ def create_prefix_lists(tgen, input_dict, build=False):
for router in input_dict.keys():
if "prefix_lists" not in input_dict[router]:
errormsg = "prefix_lists not present in input_dict"
- logger.info(errormsg)
+ logger.debug(errormsg)
continue
config_data = []
@@ -922,7 +1157,7 @@ def create_route_maps(tgen, input_dict, build=False):
for router in input_dict.keys():
if "route_maps" not in input_dict[router]:
errormsg = "route_maps not present in input_dict"
- logger.info(errormsg)
+ logger.debug(errormsg)
continue
rmap_data = []
for rmap_name, rmap_value in \
@@ -1014,7 +1249,7 @@ def create_route_maps(tgen, input_dict, build=False):
# Weight
if weight:
- rmap_data.append("set weight {} \n".format(
+ rmap_data.append("set weight {}".format(
weight))
# Adding MATCH and SET sequence to RMAP if defined
@@ -1092,7 +1327,8 @@ def create_route_maps(tgen, input_dict, build=False):
#############################################
# Verification APIs
#############################################
-def _verify_rib(tgen, addr_type, dut, input_dict, next_hop=None, protocol=None):
+@retry(attempts=10, return_is_str=True, initial_wait=2)
+def verify_rib(tgen, addr_type, dut, input_dict, next_hop=None, protocol=None):
"""
Data will be read from input_dict or input JSON file, API will generate
same prefixes, which were redistributed by either create_static_routes() or
@@ -1140,7 +1376,7 @@ def _verify_rib(tgen, addr_type, dut, input_dict, next_hop=None, protocol=None):
errormsg(str) or True
"""
- logger.info("Entering lib API: verify_rib()")
+ logger.debug("Entering lib API: verify_rib()")
router_list = tgen.routers()
for routerInput in input_dict.keys():
@@ -1160,9 +1396,8 @@ def _verify_rib(tgen, addr_type, dut, input_dict, next_hop=None, protocol=None):
else:
command = "show ipv6 route json"
- sleep(10)
logger.info("Checking router %s RIB:", router)
- rib_routes_json = rnode.vtysh_cmd(command, isjson=True)
+ rib_routes_json = run_frr_cmd(rnode, command, isjson=True)
# Verifying output dictionary rib_routes_json is not empty
if bool(rib_routes_json) is False:
@@ -1181,7 +1416,7 @@ def _verify_rib(tgen, addr_type, dut, input_dict, next_hop=None, protocol=None):
if "no_of_ip" in static_route:
no_of_ip = static_route["no_of_ip"]
else:
- no_of_ip = 0
+ no_of_ip = 1
# Generating IPs for verification
ip_list = generate_ips(network, no_of_ip)
@@ -1199,9 +1434,9 @@ def _verify_rib(tgen, addr_type, dut, input_dict, next_hop=None, protocol=None):
found_hops = [rib_r["ip"] for rib_r in
rib_routes_json[st_rt][0][
"nexthops"]]
- for nh in next_hop:
+ for nh in found_hops:
nh_found = False
- if nh and nh in found_hops:
+ if nh and nh in next_hop:
nh_found = True
else:
errormsg = ("Nexthop {} is Missing for {}"
@@ -1257,30 +1492,10 @@ def _verify_rib(tgen, addr_type, dut, input_dict, next_hop=None, protocol=None):
logger.info("Verified routes in router %s RIB, found routes"
" are: %s", dut, found_routes)
- logger.info("Exiting lib API: verify_rib()")
+ logger.debug("Exiting lib API: verify_rib()")
return True
-def verify_rib(tgen, addr_type, dut, input_dict, next_hop=None, protocol=None, expected=True):
- """
- Wrapper function for `_verify_rib` that tries multiple time to get results.
-
- When the expected result is `False` we actually should expect for an string instead.
- """
-
- # Use currying to hide the parameters and create a test function.
- test_func = partial(_verify_rib, tgen, addr_type, dut, input_dict, next_hop, protocol)
-
- # Call the test function and expect it to return True, otherwise try it again.
- if expected is True:
- _, result = topotest.run_and_expect(test_func, True, count=20, wait=6)
- else:
- _, result = topotest.run_and_expect_type(test_func, str, count=20, wait=6)
-
- # Return as normal.
- return result
-
-
def verify_admin_distance_for_static_routes(tgen, input_dict):
"""
API to verify admin distance for static routes as defined in input_dict/
@@ -1311,7 +1526,7 @@ def verify_admin_distance_for_static_routes(tgen, input_dict):
errormsg(str) or True
"""
- logger.info("Entering lib API: verify_admin_distance_for_static_routes()")
+ logger.debug("Entering lib API: verify_admin_distance_for_static_routes()")
for router in input_dict.keys():
if router not in tgen.routers():
@@ -1326,7 +1541,7 @@ def verify_admin_distance_for_static_routes(tgen, input_dict):
command = "show ip route json"
else:
command = "show ipv6 route json"
- show_ip_route_json = rnode.vtysh_cmd(command, isjson=True)
+ show_ip_route_json = run_frr_cmd(rnode, command, isjson=True)
logger.info("Verifying admin distance for static route %s"
" under dut %s:", static_route, router)
@@ -1356,7 +1571,7 @@ def verify_admin_distance_for_static_routes(tgen, input_dict):
format(network, router))
return errormsg
- logger.info("Exiting lib API: verify_admin_distance_for_static_routes()")
+ logger.debug("Exiting lib API: verify_admin_distance_for_static_routes()")
return True
@@ -1384,7 +1599,7 @@ def verify_prefix_lists(tgen, input_dict):
errormsg(str) or True
"""
- logger.info("Entering lib API: verify_prefix_lists()")
+ logger.debug("Entering lib API: verify_prefix_lists()")
for router in input_dict.keys():
if router not in tgen.routers():
@@ -1393,7 +1608,7 @@ def verify_prefix_lists(tgen, input_dict):
rnode = tgen.routers()[router]
# Show ip prefix list
- show_prefix_list = rnode.vtysh_cmd("show ip prefix-list")
+ show_prefix_list = run_frr_cmd(rnode, "show ip prefix-list")
# Verify Prefix list is deleted
prefix_lists_addr = input_dict[router]["prefix_lists"]
@@ -1403,12 +1618,12 @@ def verify_prefix_lists(tgen, input_dict):
for prefix_list in prefix_lists_addr[addr_type].keys():
if prefix_list in show_prefix_list:
- errormsg = ("Prefix list {} is not deleted from router"
+ errormsg = ("Prefix list {} is/are present in the router"
" {}".format(prefix_list, router))
return errormsg
- logger.info("Prefix list %s is/are deleted successfully"
+ logger.info("Prefix list %s is/are not present in the router"
" from router %s", prefix_list, router)
- logger.info("Exiting lib API: verify_prefix_lissts()")
+ logger.debug("Exiting lib API: verify_prefix_lissts()")
return True
diff --git a/tests/topotests/lib/topojson.py b/tests/topotests/lib/topojson.py
index 4130451d2e..7a00fe4c50 100644
--- a/tests/topotests/lib/topojson.py
+++ b/tests/topotests/lib/topojson.py
@@ -20,6 +20,7 @@
from collections import OrderedDict
from json import dumps as json_dumps
+from re import search as re_search
import ipaddr
import pytest
@@ -38,6 +39,9 @@ from lib.common_config import (
from lib.bgp import create_router_bgp
+ROUTER_LIST = []
+
+
def build_topo_from_json(tgen, topo):
"""
Reads configuration from JSON file. Adds routers, creates interface
@@ -48,13 +52,15 @@ def build_topo_from_json(tgen, topo):
* `topo`: json file data
"""
- listRouters = []
- for routerN in sorted(topo['routers'].iteritems()):
- logger.info('Topo: Add router {}'.format(routerN[0]))
- tgen.add_router(routerN[0])
- listRouters.append(routerN[0])
+ ROUTER_LIST = sorted(topo['routers'].keys(),
+ key=lambda x: int(re_search('\d+', x).group(0)))
+
+ listRouters = ROUTER_LIST[:]
+ for routerN in ROUTER_LIST:
+ logger.info('Topo: Add router {}'.format(routerN))
+ tgen.add_router(routerN)
+ listRouters.append(routerN)
- listRouters.sort()
if 'ipv4base' in topo:
ipv4Next = ipaddr.IPv4Address(topo['link_ip_start']['ipv4'])
ipv4Step = 2 ** (32 - topo['link_ip_start']['v4mask'])
@@ -78,7 +84,7 @@ def build_topo_from_json(tgen, topo):
elif 'link' in x:
return int(x.split('-link')[1])
else:
- return int(x.split('r')[1])
+ return int(re_search('\d+', x).group(0))
for destRouterLink, data in sorted(topo['routers'][curRouter]['links']. \
iteritems(),
key=lambda x: link_sort(x[0])):
@@ -179,12 +185,13 @@ def build_config_from_json(tgen, topo, save_bkup=True):
data = topo["routers"]
for func_type in func_dict.keys():
- logger.info('Building configuration for {}'.format(func_type))
+ logger.info('Checking for {} configuration in input data'.format(
+ func_type))
func_dict.get(func_type)(tgen, data, build=True)
for router in sorted(topo['routers'].keys()):
- logger.info('Configuring router {}...'.format(router))
+ logger.debug('Configuring router {}...'.format(router))
result = load_config_to_router(tgen, router, save_bkup)
if not result:
diff --git a/tests/topotests/ospf-sr-topo1/r1/zebra_mpls.json b/tests/topotests/ospf-sr-topo1/r1/zebra_mpls.json
index 254c137acd..6b1fe76b6e 100644
--- a/tests/topotests/ospf-sr-topo1/r1/zebra_mpls.json
+++ b/tests/topotests/ospf-sr-topo1/r1/zebra_mpls.json
@@ -4,7 +4,7 @@
"installed":true,
"nexthops":[
{
- "type":"SR",
+ "type":"SR (OSPF)",
"outLabel":3,
"distance":150,
"installed":true,
@@ -17,7 +17,7 @@
"installed":true,
"nexthops":[
{
- "type":"SR",
+ "type":"SR (OSPF)",
"outLabel":3,
"distance":150,
"installed":true,
@@ -30,7 +30,7 @@
"installed":true,
"nexthops":[
{
- "type":"SR",
+ "type":"SR (OSPF)",
"outLabel":8300,
"distance":150,
"installed":true,
@@ -43,7 +43,7 @@
"installed":true,
"nexthops":[
{
- "type":"SR",
+ "type":"SR (OSPF)",
"outLabel":8400,
"distance":150,
"installed":true,
@@ -56,7 +56,7 @@
"installed":true,
"nexthops":[
{
- "type":"SR",
+ "type":"SR (OSPF)",
"outLabel":3,
"distance":150,
"installed":true,
@@ -69,7 +69,7 @@
"installed":true,
"nexthops":[
{
- "type":"SR",
+ "type":"SR (OSPF)",
"outLabel":3,
"distance":150,
"installed":true,
diff --git a/tests/topotests/ospf-sr-topo1/r2/zebra_mpls.json b/tests/topotests/ospf-sr-topo1/r2/zebra_mpls.json
index 0d73a409ed..79965d280a 100644
--- a/tests/topotests/ospf-sr-topo1/r2/zebra_mpls.json
+++ b/tests/topotests/ospf-sr-topo1/r2/zebra_mpls.json
@@ -4,7 +4,7 @@
"installed":true,
"nexthops":[
{
- "type":"SR",
+ "type":"SR (OSPF)",
"outLabel":20100,
"distance":150,
"installed":true,
@@ -17,7 +17,7 @@
"installed":true,
"nexthops":[
{
- "type":"SR",
+ "type":"SR (OSPF)",
"outLabel":3,
"distance":150,
"installed":true,
@@ -30,7 +30,7 @@
"installed":true,
"nexthops":[
{
- "type":"SR",
+ "type":"SR (OSPF)",
"outLabel":10400,
"distance":150,
"installed":true,
@@ -43,7 +43,7 @@
"installed":true,
"nexthops":[
{
- "type":"SR",
+ "type":"SR (OSPF)",
"outLabel":3,
"distance":150,
"installed":true,
@@ -56,7 +56,7 @@
"installed":true,
"nexthops":[
{
- "type":"SR",
+ "type":"SR (OSPF)",
"outLabel":3,
"distance":150,
"installed":true,
@@ -69,7 +69,7 @@
"installed":true,
"nexthops":[
{
- "type":"SR",
+ "type":"SR (OSPF)",
"outLabel":3,
"distance":150,
"installed":true,
@@ -82,7 +82,7 @@
"installed":true,
"nexthops":[
{
- "type":"SR",
+ "type":"SR (OSPF)",
"outLabel":3,
"distance":150,
"installed":true,
@@ -95,7 +95,7 @@
"installed":true,
"nexthops":[
{
- "type":"SR",
+ "type":"SR (OSPF)",
"outLabel":3,
"distance":150,
"installed":true,
@@ -108,7 +108,7 @@
"installed":true,
"nexthops":[
{
- "type":"SR",
+ "type":"SR (OSPF)",
"outLabel":3,
"distance":150,
"installed":true,
diff --git a/tests/topotests/ospf-sr-topo1/r3/zebra_mpls.json b/tests/topotests/ospf-sr-topo1/r3/zebra_mpls.json
index b15f90afd1..ceb2f7a0e5 100644
--- a/tests/topotests/ospf-sr-topo1/r3/zebra_mpls.json
+++ b/tests/topotests/ospf-sr-topo1/r3/zebra_mpls.json
@@ -4,7 +4,7 @@
"installed":true,
"nexthops":[
{
- "type":"SR",
+ "type":"SR (OSPF)",
"outLabel":8100,
"distance":150,
"installed":true,
@@ -17,7 +17,7 @@
"installed":true,
"nexthops":[
{
- "type":"SR",
+ "type":"SR (OSPF)",
"outLabel":3,
"distance":150,
"installed":true,
@@ -30,7 +30,7 @@
"installed":true,
"nexthops":[
{
- "type":"SR",
+ "type":"SR (OSPF)",
"outLabel":8400,
"distance":150,
"installed":true,
@@ -43,7 +43,7 @@
"installed":true,
"nexthops":[
{
- "type":"SR",
+ "type":"SR (OSPF)",
"outLabel":3,
"distance":150,
"installed":true,
@@ -56,7 +56,7 @@
"installed":true,
"nexthops":[
{
- "type":"SR",
+ "type":"SR (OSPF)",
"outLabel":3,
"distance":150,
"installed":true,
diff --git a/tests/topotests/ospf-sr-topo1/r4/zebra_mpls.json b/tests/topotests/ospf-sr-topo1/r4/zebra_mpls.json
index d1238517f5..d7f54b224d 100644
--- a/tests/topotests/ospf-sr-topo1/r4/zebra_mpls.json
+++ b/tests/topotests/ospf-sr-topo1/r4/zebra_mpls.json
@@ -4,7 +4,7 @@
"installed":true,
"nexthops":[
{
- "type":"SR",
+ "type":"SR (OSPF)",
"outLabel":8100,
"distance":150,
"installed":true,
@@ -17,7 +17,7 @@
"installed":true,
"nexthops":[
{
- "type":"SR",
+ "type":"SR (OSPF)",
"outLabel":3,
"distance":150,
"installed":true,
@@ -30,7 +30,7 @@
"installed":true,
"nexthops":[
{
- "type":"SR",
+ "type":"SR (OSPF)",
"outLabel":8300,
"distance":150,
"installed":true,
@@ -43,7 +43,7 @@
"installed":true,
"nexthops":[
{
- "type":"SR",
+ "type":"SR (OSPF)",
"outLabel":3,
"distance":150,
"installed":true,
@@ -56,7 +56,7 @@
"installed":true,
"nexthops":[
{
- "type":"SR",
+ "type":"SR (OSPF)",
"outLabel":3,
"distance":150,
"installed":true,
@@ -69,7 +69,7 @@
"installed":true,
"nexthops":[
{
- "type":"SR",
+ "type":"SR (OSPF)",
"outLabel":3,
"distance":150,
"installed":true,
diff --git a/tests/topotests/pytest.ini b/tests/topotests/pytest.ini
index 7ea38491d8..b65f93856f 100644
--- a/tests/topotests/pytest.ini
+++ b/tests/topotests/pytest.ini
@@ -1,6 +1,6 @@
# Skip pytests example directory
[pytest]
-norecursedirs = .git example-test lib docker
+norecursedirs = .git example-test example-topojson-test lib docker bgp-ecmp-topo2
[topogen]
# Default configuration values
@@ -15,7 +15,7 @@ norecursedirs = .git example-test lib docker
# Display router current configuration during test execution,
# by default configuration will not be shown
-show_router_config = True
+# show_router_config = True
# Default daemons binaries path.
#frrdir = /usr/lib/frr
diff --git a/tools/checkpatch.pl b/tools/checkpatch.pl
index 1b48d10a09..c0624d933e 100755
--- a/tools/checkpatch.pl
+++ b/tools/checkpatch.pl
@@ -3351,7 +3351,7 @@ sub process {
# if/while/etc brace do not go on next line, unless defining a do while loop,
# or if that brace on the next line is for something else
- if ($line =~ /(.*)\b((?:if|while|for|switch|(?:[a-z_]+|)for_each[a-z_]+)\s*\(|do\b|else\b)/ && $line !~ /^.\s*\#/) {
+ if ($line =~ /(.*)\b((?:if|while|for|switch|(?:[a-z_]+|)frr_(each|with)[a-z_]+)\s*\(|do\b|else\b)/ && $line !~ /^.\s*\#/) {
my $pre_ctx = "$1$2";
my ($level, @ctx) = ctx_statement_level($linenr, $realcnt, 0);
@@ -3397,7 +3397,7 @@ sub process {
}
# Check relative indent for conditionals and blocks.
- if ($line =~ /\b(?:(?:if|while|for|(?:[a-z_]+|)for_each[a-z_]+)\s*\(|(?:do|else)\b)/ && $line !~ /^.\s*#/ && $line !~ /\}\s*while\s*/) {
+ if ($line =~ /\b(?:(?:if|while|for|(?:[a-z_]+|)frr_(each|with)[a-z_]+)\s*\(|(?:do|else)\b)/ && $line !~ /^.\s*#/ && $line !~ /\}\s*while\s*/) {
($stat, $cond, $line_nr_next, $remain_next, $off_next) =
ctx_statement_block($linenr, $realcnt, 0)
if (!defined $stat);
@@ -5177,6 +5177,31 @@ sub process {
}
}
+ if (!defined $suppress_ifbraces{$linenr - 1} &&
+ $line =~ /\b(frr_with_)/) {
+ my ($level, $endln, @chunks) =
+ ctx_statement_full($linenr, $realcnt, $-[0]);
+
+ # Check the condition.
+ my ($cond, $block) = @{$chunks[0]};
+ #print "CHECKING<$linenr> cond<$cond> block<$block>\n";
+ if (defined $cond) {
+ substr($block, 0, length($cond), '');
+ }
+
+ if ($level == 0 && $block !~ /^\s*\{/) {
+ my $herectx = $here . "\n";
+ my $cnt = statement_rawlines($block);
+
+ for (my $n = 0; $n < $cnt; $n++) {
+ $herectx .= raw_line($linenr, $n) . "\n";
+ }
+
+ WARN("BRACES",
+ "braces {} are mandatory for frr_with_* blocks\n" . $herectx);
+ }
+ }
+
# check for single line unbalanced braces
if ($sline =~ /^.\s*\}\s*else\s*$/ ||
$sline =~ /^.\s*else\s*\{\s*$/) {
diff --git a/tools/coccinelle/frr_with_mutex.cocci b/tools/coccinelle/frr_with_mutex.cocci
new file mode 100644
index 0000000000..ec8b73917c
--- /dev/null
+++ b/tools/coccinelle/frr_with_mutex.cocci
@@ -0,0 +1,23 @@
+@@
+expression E;
+iterator name frr_with_mutex;
+@@
+
+- pthread_mutex_lock(E);
++ frr_with_mutex(E) {
+- {
+ ...
+- }
+- pthread_mutex_unlock(E);
++ }
+
+
+@@
+expression E;
+@@
+
+- pthread_mutex_lock(E);
++ frr_with_mutex(E) {
+ ...
+- pthread_mutex_unlock(E);
++ }
diff --git a/tools/coccinelle/zprivs.cocci b/tools/coccinelle/zprivs.cocci
index 76d13c3f0d..11628a7eae 100644
--- a/tools/coccinelle/zprivs.cocci
+++ b/tools/coccinelle/zprivs.cocci
@@ -2,12 +2,12 @@
identifier change;
identifier end;
expression E, f, g;
-iterator name frr_elevate_privs;
+iterator name frr_with_privs;
@@
- if (E.change(ZPRIVS_RAISE))
- f;
-+ frr_elevate_privs(&E) {
++ frr_with_privs(&E) {
<+...
- goto end;
+ break;
@@ -20,7 +20,7 @@ iterator name frr_elevate_privs;
@@
identifier change, errno, safe_strerror, exit;
expression E, f1, f2, f3, ret, fn;
-iterator name frr_elevate_privs;
+iterator name frr_with_privs;
@@
if (E.change(ZPRIVS_RAISE))
@@ -44,7 +44,7 @@ iterator name frr_elevate_privs;
@@
identifier change;
expression E, f1, f2, f3, ret;
-iterator name frr_elevate_privs;
+iterator name frr_with_privs;
@@
if (E.change(ZPRIVS_RAISE))
@@ -64,12 +64,12 @@ iterator name frr_elevate_privs;
@@
identifier change;
expression E, f, g;
-iterator name frr_elevate_privs;
+iterator name frr_with_privs;
@@
- if (E.change(ZPRIVS_RAISE))
- f;
-+ frr_elevate_privs(&E) {
++ frr_with_privs(&E) {
...
- if (E.change(ZPRIVS_LOWER))
- g;
diff --git a/tools/etc/frr/support_bundle_commands.conf b/tools/etc/frr/support_bundle_commands.conf
index d52824ff07..8845df5fc7 100644
--- a/tools/etc/frr/support_bundle_commands.conf
+++ b/tools/etc/frr/support_bundle_commands.conf
@@ -28,6 +28,7 @@ show bgp ipv6 update-groups advertised-routes
show bgp ipv6 update-groups packet-queue
show bgp ipv6 update-groups statistics
show ip bgp statistics
+show bgp martian next-hop
show bgp evpn route
CMD_LIST_END
@@ -37,14 +38,17 @@ PROC_NAME:zebra
CMD_LIST_START
show zebra
show zebra client summary
-show ip route
-
+show ip zebra route dump json
+show ipv6 zebra route dump json
+show ip nht vrf all
show route-map
show memory
-show interface
+show interface vrf all
show vrf
+show zebra fpm stats
show error all
show work-queues
+show debugging hashtable
show running-config
show thread cpu
show thread poll
diff --git a/tools/generate_support_bundle.py b/tools/generate_support_bundle.py
index 118ca113a5..c9ca9c3d0d 100644..100755
--- a/tools/generate_support_bundle.py
+++ b/tools/generate_support_bundle.py
@@ -1,3 +1,5 @@
+#!/usr/bin/python
+
########################################################
### Python Script to generate the FRR support bundle ###
########################################################
diff --git a/tools/subdir.am b/tools/subdir.am
index 7713bb1ade..c637db6eb1 100644
--- a/tools/subdir.am
+++ b/tools/subdir.am
@@ -16,6 +16,7 @@ sbin_SCRIPTS += \
\
tools/frrcommon.sh \
tools/frrinit.sh \
+ tools/generate_support_bundle.py \
tools/watchfrr.sh \
# end
@@ -35,6 +36,7 @@ EXTRA_DIST += \
tools/frr-reload \
tools/frr-reload.py \
tools/frr.service \
+ tools/generate_support_bundle.py \
tools/multiple-bgpd.sh \
tools/rrcheck.pl \
tools/rrlookup.pl \
diff --git a/vrrpd/vrrp.c b/vrrpd/vrrp.c
index 951ad3f58f..b4049b55eb 100644
--- a/vrrpd/vrrp.c
+++ b/vrrpd/vrrp.c
@@ -1065,8 +1065,7 @@ static int vrrp_socket(struct vrrp_router *r)
int ret;
bool failed = false;
- frr_elevate_privs(&vrrp_privs)
- {
+ frr_with_privs(&vrrp_privs) {
r->sock_rx = socket(r->family, SOCK_RAW, IPPROTO_VRRP);
r->sock_tx = socket(r->family, SOCK_RAW, IPPROTO_VRRP);
}
@@ -1102,8 +1101,7 @@ static int vrrp_socket(struct vrrp_router *r)
setsockopt_ipv4_multicast_loop(r->sock_tx, 0);
/* Bind Rx socket to exact interface */
- frr_elevate_privs(&vrrp_privs)
- {
+ frr_with_privs(&vrrp_privs) {
ret = setsockopt(r->sock_rx, SOL_SOCKET,
SO_BINDTODEVICE, r->vr->ifp->name,
strlen(r->vr->ifp->name));
@@ -1213,8 +1211,7 @@ static int vrrp_socket(struct vrrp_router *r)
setsockopt_ipv6_multicast_loop(r->sock_tx, 0);
/* Bind Rx socket to exact interface */
- frr_elevate_privs(&vrrp_privs)
- {
+ frr_with_privs(&vrrp_privs) {
ret = setsockopt(r->sock_rx, SOL_SOCKET,
SO_BINDTODEVICE, r->vr->ifp->name,
strlen(r->vr->ifp->name));
diff --git a/vrrpd/vrrp_arp.c b/vrrpd/vrrp_arp.c
index 78e153a082..750050e8c3 100644
--- a/vrrpd/vrrp_arp.c
+++ b/vrrpd/vrrp_arp.c
@@ -188,7 +188,7 @@ void vrrp_garp_init(void)
/* Create the socket descriptor */
/* FIXME: why ETH_P_RARP? */
errno = 0;
- frr_elevate_privs(&vrrp_privs) {
+ frr_with_privs(&vrrp_privs) {
garp_fd = socket(PF_PACKET, SOCK_RAW | SOCK_CLOEXEC,
htons(ETH_P_RARP));
}
diff --git a/vrrpd/vrrp_ndisc.c b/vrrpd/vrrp_ndisc.c
index 348958509a..dc546b09a2 100644
--- a/vrrpd/vrrp_ndisc.c
+++ b/vrrpd/vrrp_ndisc.c
@@ -214,8 +214,7 @@ int vrrp_ndisc_una_send_all(struct vrrp_router *r)
void vrrp_ndisc_init(void)
{
- frr_elevate_privs(&vrrp_privs)
- {
+ frr_with_privs(&vrrp_privs) {
ndisc_fd = socket(AF_PACKET, SOCK_RAW, htons(ETH_P_IPV6));
}
diff --git a/vtysh/vtysh.c b/vtysh/vtysh.c
index b053392bff..a762e9555c 100644
--- a/vtysh/vtysh.c
+++ b/vtysh/vtysh.c
@@ -1260,6 +1260,8 @@ static struct cmd_node bgp_vrf_policy_node = {BGP_VRF_POLICY_NODE,
static struct cmd_node bgp_vnc_l2_group_node = {
BGP_VNC_L2_GROUP_NODE, "%s(config-router-vnc-l2-group)# "};
+static struct cmd_node bmp_node = {BMP_NODE, "%s(config-bgp-bmp)# "};
+
static struct cmd_node ospf_node = {OSPF_NODE, "%s(config-router)# "};
static struct cmd_node eigrp_node = {EIGRP_NODE, "%s(config-router)# "};
@@ -1335,7 +1337,7 @@ DEFUNSH(VTYSH_REALLYALL, vtysh_end_all, vtysh_end_all_cmd, "end",
}
DEFUNSH(VTYSH_BGPD, router_bgp, router_bgp_cmd,
- "router bgp [(1-4294967295) [<view|vrf> WORD]]",
+ "router bgp [(1-4294967295)$instasn [<view|vrf> WORD]]",
ROUTER_STR BGP_STR AS_STR
"BGP view\nBGP VRF\n"
"View/VRF name\n")
@@ -1478,6 +1480,18 @@ DEFUNSH(VTYSH_BGPD,
return CMD_SUCCESS;
}
+DEFUNSH(VTYSH_BGPD,
+ bmp_targets,
+ bmp_targets_cmd,
+ "bmp targets BMPTARGETS",
+ "BGP Monitoring Protocol\n"
+ "Create BMP target group\n"
+ "Name of the BMP target group\n")
+{
+ vty->node = BMP_NODE;
+ return CMD_SUCCESS;
+}
+
DEFUNSH(VTYSH_BGPD, address_family_evpn, address_family_evpn_cmd,
"address-family <l2vpn evpn>",
"Enter Address Family command mode\n"
@@ -1585,10 +1599,11 @@ DEFUNSH(VTYSH_OSPFD, router_ospf, router_ospf_cmd,
return CMD_SUCCESS;
}
-DEFUNSH(VTYSH_EIGRPD, router_eigrp, router_eigrp_cmd, "router eigrp (1-65535)",
+DEFUNSH(VTYSH_EIGRPD, router_eigrp, router_eigrp_cmd, "router eigrp (1-65535) [vrf NAME]",
"Enable a routing process\n"
"Start EIGRP configuration\n"
- "AS number to use\n")
+ "AS number to use\n"
+ VRF_CMD_HELP_STR)
{
vty->node = EIGRP_NODE;
return CMD_SUCCESS;
@@ -1842,6 +1857,7 @@ static int vtysh_exit(struct vty *vty)
case BGP_VNC_DEFAULTS_NODE:
case BGP_VNC_NVE_GROUP_NODE:
case BGP_VNC_L2_GROUP_NODE:
+ case BMP_NODE:
vty->node = BGP_NODE;
break;
case BGP_EVPN_VNI_NODE:
@@ -1932,6 +1948,19 @@ DEFUNSH(VTYSH_BGPD, rpki_quit, rpki_quit_cmd, "quit",
return rpki_exit(self, vty, argc, argv);
}
+DEFUNSH(VTYSH_BGPD, bmp_exit, bmp_exit_cmd, "exit",
+ "Exit current mode and down to previous mode\n")
+{
+ vtysh_exit(vty);
+ return CMD_SUCCESS;
+}
+
+DEFUNSH(VTYSH_BGPD, bmp_quit, bmp_quit_cmd, "quit",
+ "Exit current mode and down to previous mode\n")
+{
+ return bmp_exit(self, vty, argc, argv);
+}
+
DEFUNSH(VTYSH_VRF, exit_vrf_config, exit_vrf_config_cmd, "exit-vrf",
"Exit from VRF configuration mode\n")
{
@@ -3259,15 +3288,66 @@ DEFUN (no_vtysh_output_file,
DEFUN(find,
find_cmd,
- "find COMMAND...",
- "Find CLI command containing text\n"
- "Text to search for\n")
+ "find REGEX",
+ "Find CLI command matching a regular expression\n"
+ "Search pattern (POSIX regex)\n")
{
- char *text = argv_concat(argv, argc, 1);
+ char *pattern = argv[1]->arg;
const struct cmd_node *node;
const struct cmd_element *cli;
vector clis;
+ regex_t exp = {};
+
+ int cr = regcomp(&exp, pattern, REG_NOSUB | REG_EXTENDED);
+
+ if (cr != 0) {
+ switch (cr) {
+ case REG_BADBR:
+ vty_out(vty, "%% Invalid \\{...\\} expression\n");
+ break;
+ case REG_BADRPT:
+ vty_out(vty, "%% Bad repetition operator\n");
+ break;
+ case REG_BADPAT:
+ vty_out(vty, "%% Regex syntax error\n");
+ break;
+ case REG_ECOLLATE:
+ vty_out(vty, "%% Invalid collating element\n");
+ break;
+ case REG_ECTYPE:
+ vty_out(vty, "%% Invalid character class name\n");
+ break;
+ case REG_EESCAPE:
+ vty_out(vty,
+ "%% Regex ended with escape character (\\)\n");
+ break;
+ case REG_ESUBREG:
+ vty_out(vty,
+ "%% Invalid number in \\digit construction\n");
+ break;
+ case REG_EBRACK:
+ vty_out(vty, "%% Unbalanced square brackets\n");
+ break;
+ case REG_EPAREN:
+ vty_out(vty, "%% Unbalanced parentheses\n");
+ break;
+ case REG_EBRACE:
+ vty_out(vty, "%% Unbalanced braces\n");
+ break;
+ case REG_ERANGE:
+ vty_out(vty,
+ "%% Invalid endpoint in range expression\n");
+ break;
+ case REG_ESPACE:
+ vty_out(vty, "%% Failed to compile (out of memory)\n");
+ break;
+ }
+
+ goto done;
+ }
+
+
for (unsigned int i = 0; i < vector_active(cmdvec); i++) {
node = vector_slot(cmdvec, i);
if (!node)
@@ -3275,14 +3355,15 @@ DEFUN(find,
clis = node->cmd_vector;
for (unsigned int j = 0; j < vector_active(clis); j++) {
cli = vector_slot(clis, j);
- if (strcasestr(cli->string, text))
+
+ if (regexec(&exp, cli->string, 0, NULL, 0) == 0)
vty_out(vty, " (%s) %s\n",
node_names[node->node], cli->string);
}
}
- XFREE(MTYPE_TMP, text);
-
+done:
+ regfree(&exp);
return CMD_SUCCESS;
}
@@ -3620,6 +3701,7 @@ void vtysh_init_vty(void)
install_node(&openfabric_node, NULL);
install_node(&vty_node, NULL);
install_node(&rpki_node, NULL);
+ install_node(&bmp_node, NULL);
#if HAVE_BFDD > 0
install_node(&bfd_node, NULL);
install_node(&bfd_peer_node, NULL);
@@ -3853,6 +3935,11 @@ void vtysh_init_vty(void)
install_element(BGP_FLOWSPECV4_NODE, &exit_address_family_cmd);
install_element(BGP_FLOWSPECV6_NODE, &exit_address_family_cmd);
+ install_element(BGP_NODE, &bmp_targets_cmd);
+ install_element(BMP_NODE, &bmp_exit_cmd);
+ install_element(BMP_NODE, &bmp_quit_cmd);
+ install_element(BMP_NODE, &vtysh_end_all_cmd);
+
install_element(CONFIG_NODE, &rpki_cmd);
install_element(RPKI_NODE, &rpki_exit_cmd);
install_element(RPKI_NODE, &rpki_quit_cmd);
diff --git a/yang/frr-eigrpd.yang b/yang/frr-eigrpd.yang
index 853d823880..0c62954570 100644
--- a/yang/frr-eigrpd.yang
+++ b/yang/frr-eigrpd.yang
@@ -23,6 +23,11 @@ module frr-eigrpd {
description
"This module defines a model for managing FRR eigrpd daemon.";
+ revision 2019-09-09 {
+ description
+ "Changed interface references to use
+ frr-interface:interface-ref typedef";
+ }
revision 2019-06-19 {
description "Initial revision.";
reference
@@ -94,9 +99,7 @@ module frr-eigrpd {
leaf-list passive-interface {
description "List of suppressed interfaces";
- type string {
- length "1..16";
- }
+ type frr-interface:interface-ref;
}
leaf active-time {
diff --git a/yang/frr-interface.yang b/yang/frr-interface.yang
index d3cc66dfaa..4f7f3beebd 100644
--- a/yang/frr-interface.yang
+++ b/yang/frr-interface.yang
@@ -11,6 +11,10 @@ module frr-interface {
description
"This module defines a model for managing FRR interfaces.";
+ revision 2019-09-09 {
+ description
+ "Added interface-ref typedef";
+ }
revision 2018-03-28 {
description
"Initial revision.";
@@ -43,4 +47,13 @@ module frr-interface {
}
}
}
+
+ typedef interface-ref {
+ type leafref {
+ require-instance false;
+ path "/frr-interface:lib/frr-interface:interface/frr-interface:name";
+ }
+ description
+ "Reference to an interface";
+ }
}
diff --git a/yang/frr-isisd.yang b/yang/frr-isisd.yang
index 9180b0c5f3..3313dc2f20 100644
--- a/yang/frr-isisd.yang
+++ b/yang/frr-isisd.yang
@@ -27,6 +27,11 @@ module frr-isisd {
description
"This module defines a model for managing FRR isisd daemon.";
+ revision 2019-09-09 {
+ description
+ "Changed interface references to use
+ frr-interface:interface-ref typedef";
+ }
revision 2018-07-26 {
description
"Initial revision.";
@@ -296,7 +301,7 @@ module frr-isisd {
description
"Interface specific IS-IS notification data grouping";
leaf interface-name {
- type string;
+ type frr-interface:interface-ref;
description
"IS-IS interface name";
}
diff --git a/yang/frr-ripd.yang b/yang/frr-ripd.yang
index 07690793f0..94a9ebf3e1 100644
--- a/yang/frr-ripd.yang
+++ b/yang/frr-ripd.yang
@@ -24,6 +24,11 @@ module frr-ripd {
description
"This module defines a model for managing FRR ripd daemon.";
+ revision 2019-09-09 {
+ description
+ "Changed interface references to use
+ frr-interface:interface-ref typedef";
+ }
revision 2017-12-06 {
description
"Initial revision.";
@@ -113,9 +118,7 @@ module frr-ripd {
"Enable RIP on the specified IP network.";
}
leaf-list interface {
- type string {
- length "1..16";
- }
+ type frr-interface:interface-ref;
description
"Enable RIP on the specified interface.";
}
@@ -124,7 +127,15 @@ module frr-ripd {
description
"Offset-list to modify route metric.";
leaf interface {
- type string;
+ type union {
+ type frr-interface:interface-ref;
+ type enumeration {
+ enum '*' {
+ description
+ "Match all interfaces.";
+ }
+ }
+ }
description
"Interface to match. Use '*' to match all interfaces.";
}
@@ -168,18 +179,14 @@ module frr-ripd {
}
leaf-list passive-interface {
when "../passive-default = 'false'";
- type string {
- length "1..16";
- }
+ type frr-interface:interface-ref;
description
"A list of interfaces where the sending of RIP packets
is disabled.";
}
leaf-list non-passive-interface {
when "../passive-default = 'true'";
- type string {
- length "1..16";
- }
+ type frr-interface:interface-ref;
description
"A list of interfaces where the sending of RIP packets
is enabled.";
diff --git a/yang/frr-ripngd.yang b/yang/frr-ripngd.yang
index b341b438a4..831758af86 100644
--- a/yang/frr-ripngd.yang
+++ b/yang/frr-ripngd.yang
@@ -24,6 +24,11 @@ module frr-ripngd {
description
"This module defines a model for managing FRR ripngd daemon.";
+ revision 2019-09-09 {
+ description
+ "Changed interface references to use
+ frr-interface:interface-ref typedef";
+ }
revision 2018-11-27 {
description
"Initial revision.";
@@ -71,9 +76,7 @@ module frr-ripngd {
"Enable RIPng on the specified IPv6 network.";
}
leaf-list interface {
- type string {
- length "1..16";
- }
+ type frr-interface:interface-ref;
description
"Enable RIPng on the specified interface.";
}
@@ -82,7 +85,15 @@ module frr-ripngd {
description
"Offset-list to modify route metric.";
leaf interface {
- type string;
+ type union {
+ type frr-interface:interface-ref;
+ type enumeration {
+ enum '*' {
+ description
+ "Match all interfaces.";
+ }
+ }
+ }
description
"Interface to match. Use '*' to match all interfaces.";
}
@@ -118,9 +129,7 @@ module frr-ripngd {
}
}
leaf-list passive-interface {
- type string {
- length "1..16";
- }
+ type frr-interface:interface-ref;
description
"A list of interfaces where the sending of RIPng packets
is disabled.";
diff --git a/yang/libyang_plugins/frr_user_types.c b/yang/libyang_plugins/frr_user_types.c
index 4814f5bc1d..a293c3a883 100644
--- a/yang/libyang_plugins/frr_user_types.c
+++ b/yang/libyang_plugins/frr_user_types.c
@@ -20,6 +20,7 @@
#include <zebra.h>
#include "prefix.h"
+#include "ipaddr.h"
#include <libyang/user_types.h>
@@ -53,6 +54,21 @@ static int ipv6_address_store_clb(const char *type_name, const char *value_str,
return 0;
}
+static int ip_address_store_clb(const char *type_name, const char *value_str,
+ lyd_val *value, char **err_msg)
+{
+ value->ptr = malloc(sizeof(struct ipaddr));
+ if (!value->ptr)
+ return 1;
+
+ if (str2ipaddr(value_str, value->ptr)) {
+ free(value->ptr);
+ return 1;
+ }
+
+ return 0;
+}
+
static int ipv4_prefix_store_clb(const char *type_name, const char *value_str,
lyd_val *value, char **err_msg)
{
@@ -92,6 +108,8 @@ struct lytype_plugin_list frr_user_types[] = {
ipv6_address_store_clb, free},
{"ietf-inet-types", "2013-07-15", "ipv6-address-no-zone",
ipv6_address_store_clb, free},
+ {"ietf-inet-types", "2013-07-15", "ip-address", ip_address_store_clb,
+ free},
{"ietf-inet-types", "2013-07-15", "ipv4-prefix", ipv4_prefix_store_clb,
free},
{"ietf-inet-types", "2013-07-15", "ipv6-prefix", ipv6_prefix_store_clb,
diff --git a/zebra/connected.c b/zebra/connected.c
index ffc991861c..6b92945c63 100644
--- a/zebra/connected.c
+++ b/zebra/connected.c
@@ -235,7 +235,7 @@ void connected_up(struct interface *ifp, struct connected *ifc)
return;
break;
case AFI_IP6:
-#ifndef LINUX
+#ifndef GNU_LINUX
/* XXX: It is already done by rib_bogus_ipv6 within rib_add */
if (IN6_IS_ADDR_UNSPECIFIED(&p.u.prefix6))
return;
diff --git a/zebra/if_ioctl_solaris.c b/zebra/if_ioctl_solaris.c
index 8b539a9049..2a2504ebf8 100644
--- a/zebra/if_ioctl_solaris.c
+++ b/zebra/if_ioctl_solaris.c
@@ -60,7 +60,7 @@ static int interface_list_ioctl(int af)
size_t needed, lastneeded = 0;
char *buf = NULL;
- frr_elevate_privs(&zserv_privs) {
+ frr_with_privs(&zserv_privs) {
sock = socket(af, SOCK_DGRAM, 0);
}
@@ -72,7 +72,7 @@ static int interface_list_ioctl(int af)
}
calculate_lifc_len:
- frr_elevate_privs(&zserv_privs) {
+ frr_with_privs(&zserv_privs) {
lifn.lifn_family = af;
lifn.lifn_flags = LIFC_NOXMIT;
/* we want NOXMIT interfaces too */
@@ -107,7 +107,7 @@ calculate_lifc_len:
lifconf.lifc_len = needed;
lifconf.lifc_buf = buf;
- frr_elevate_privs(&zserv_privs) {
+ frr_with_privs(&zserv_privs) {
ret = ioctl(sock, SIOCGLIFCONF, &lifconf);
}
diff --git a/zebra/if_netlink.c b/zebra/if_netlink.c
index e157c2d70a..c71b95f753 100644
--- a/zebra/if_netlink.c
+++ b/zebra/if_netlink.c
@@ -385,7 +385,7 @@ static int get_iflink_speed(struct interface *interface)
ifdata.ifr_data = (caddr_t)&ecmd;
/* use ioctl to get IP address of an interface */
- frr_elevate_privs(&zserv_privs) {
+ frr_with_privs(&zserv_privs) {
sd = vrf_socket(PF_INET, SOCK_DGRAM, IPPROTO_IP,
interface->vrf_id,
NULL);
diff --git a/zebra/ioctl.c b/zebra/ioctl.c
index 8202e076af..b461a08881 100644
--- a/zebra/ioctl.c
+++ b/zebra/ioctl.c
@@ -57,7 +57,7 @@ int if_ioctl(unsigned long request, caddr_t buffer)
int ret;
int err = 0;
- frr_elevate_privs(&zserv_privs) {
+ frr_with_privs(&zserv_privs) {
sock = socket(AF_INET, SOCK_DGRAM, 0);
if (sock < 0) {
zlog_err("Cannot create UDP socket: %s",
@@ -83,7 +83,7 @@ int vrf_if_ioctl(unsigned long request, caddr_t buffer, vrf_id_t vrf_id)
int ret;
int err = 0;
- frr_elevate_privs(&zserv_privs) {
+ frr_with_privs(&zserv_privs) {
sock = vrf_socket(AF_INET, SOCK_DGRAM, 0, vrf_id, NULL);
if (sock < 0) {
zlog_err("Cannot create UDP socket: %s",
@@ -110,7 +110,7 @@ static int if_ioctl_ipv6(unsigned long request, caddr_t buffer)
int ret;
int err = 0;
- frr_elevate_privs(&zserv_privs) {
+ frr_with_privs(&zserv_privs) {
sock = socket(AF_INET6, SOCK_DGRAM, 0);
if (sock < 0) {
zlog_err("Cannot create IPv6 datagram socket: %s",
diff --git a/zebra/ioctl_solaris.c b/zebra/ioctl_solaris.c
index 1f96fa23ea..2c71d949f7 100644
--- a/zebra/ioctl_solaris.c
+++ b/zebra/ioctl_solaris.c
@@ -66,7 +66,7 @@ int if_ioctl(unsigned long request, caddr_t buffer)
int ret;
int err;
- frr_elevate_privs(&zserv_privs) {
+ frr_with_privs(&zserv_privs) {
sock = socket(AF_INET, SOCK_DGRAM, 0);
if (sock < 0) {
@@ -96,7 +96,7 @@ int if_ioctl_ipv6(unsigned long request, caddr_t buffer)
int ret;
int err;
- frr_elevate_privs(&zserv_privs) {
+ frr_with_privs(&zserv_privs) {
sock = socket(AF_INET6, SOCK_DGRAM, 0);
if (sock < 0) {
diff --git a/zebra/ipforward_proc.c b/zebra/ipforward_proc.c
index 8f44c377b3..709d2176aa 100644
--- a/zebra/ipforward_proc.c
+++ b/zebra/ipforward_proc.c
@@ -76,7 +76,7 @@ int ipforward_on(void)
{
FILE *fp;
- frr_elevate_privs(&zserv_privs) {
+ frr_with_privs(&zserv_privs) {
fp = fopen(proc_ipv4_forwarding, "w");
@@ -97,7 +97,7 @@ int ipforward_off(void)
{
FILE *fp;
- frr_elevate_privs(&zserv_privs) {
+ frr_with_privs(&zserv_privs) {
fp = fopen(proc_ipv4_forwarding, "w");
@@ -143,7 +143,7 @@ int ipforward_ipv6_on(void)
{
FILE *fp;
- frr_elevate_privs(&zserv_privs) {
+ frr_with_privs(&zserv_privs) {
fp = fopen(proc_ipv6_forwarding, "w");
@@ -165,7 +165,7 @@ int ipforward_ipv6_off(void)
{
FILE *fp;
- frr_elevate_privs(&zserv_privs) {
+ frr_with_privs(&zserv_privs) {
fp = fopen(proc_ipv6_forwarding, "w");
diff --git a/zebra/ipforward_solaris.c b/zebra/ipforward_solaris.c
index 1bb743059c..1a45328248 100644
--- a/zebra/ipforward_solaris.c
+++ b/zebra/ipforward_solaris.c
@@ -83,7 +83,7 @@ static int solaris_nd(const int cmd, const char *parameter, const int value)
strioctl.ic_len = ND_BUFFER_SIZE;
strioctl.ic_dp = nd_buf;
- frr_elevate_privs(&zserv_privs) {
+ frr_with_privs(&zserv_privs) {
if ((fd = open(device, O_RDWR)) < 0) {
flog_err_sys(EC_LIB_SYSTEM_CALL,
"failed to open device %s - %s", device,
diff --git a/zebra/ipforward_sysctl.c b/zebra/ipforward_sysctl.c
index cc9421c275..ac8f537075 100644
--- a/zebra/ipforward_sysctl.c
+++ b/zebra/ipforward_sysctl.c
@@ -56,7 +56,7 @@ int ipforward_on(void)
int ipforwarding = 1;
len = sizeof ipforwarding;
- frr_elevate_privs(&zserv_privs) {
+ frr_with_privs(&zserv_privs) {
if (sysctl(mib, MIB_SIZ, NULL, NULL, &ipforwarding, len) < 0) {
flog_err_sys(EC_LIB_SYSTEM_CALL,
"Can't set ipforwarding on");
@@ -72,7 +72,7 @@ int ipforward_off(void)
int ipforwarding = 0;
len = sizeof ipforwarding;
- frr_elevate_privs(&zserv_privs) {
+ frr_with_privs(&zserv_privs) {
if (sysctl(mib, MIB_SIZ, NULL, NULL, &ipforwarding, len) < 0) {
flog_err_sys(EC_LIB_SYSTEM_CALL,
"Can't set ipforwarding on");
@@ -97,7 +97,7 @@ int ipforward_ipv6(void)
int ip6forwarding = 0;
len = sizeof ip6forwarding;
- frr_elevate_privs(&zserv_privs) {
+ frr_with_privs(&zserv_privs) {
if (sysctl(mib_ipv6, MIB_SIZ, &ip6forwarding, &len, 0, 0) < 0) {
flog_err_sys(EC_LIB_SYSTEM_CALL,
"can't get ip6forwarding value");
@@ -113,7 +113,7 @@ int ipforward_ipv6_on(void)
int ip6forwarding = 1;
len = sizeof ip6forwarding;
- frr_elevate_privs(&zserv_privs) {
+ frr_with_privs(&zserv_privs) {
if (sysctl(mib_ipv6, MIB_SIZ, NULL, NULL, &ip6forwarding, len)
< 0) {
flog_err_sys(EC_LIB_SYSTEM_CALL,
@@ -130,7 +130,7 @@ int ipforward_ipv6_off(void)
int ip6forwarding = 0;
len = sizeof ip6forwarding;
- frr_elevate_privs(&zserv_privs) {
+ frr_with_privs(&zserv_privs) {
if (sysctl(mib_ipv6, MIB_SIZ, NULL, NULL, &ip6forwarding, len)
< 0) {
flog_err_sys(EC_LIB_SYSTEM_CALL,
diff --git a/zebra/irdp_main.c b/zebra/irdp_main.c
index 38d241eaa5..0de618625d 100644
--- a/zebra/irdp_main.c
+++ b/zebra/irdp_main.c
@@ -82,7 +82,7 @@ int irdp_sock_init(void)
int save_errno;
int sock;
- frr_elevate_privs(&zserv_privs) {
+ frr_with_privs(&zserv_privs) {
sock = socket(AF_INET, SOCK_RAW, IPPROTO_ICMP);
save_errno = errno;
diff --git a/zebra/kernel_netlink.c b/zebra/kernel_netlink.c
index 2c306434a3..f52b4746ae 100644
--- a/zebra/kernel_netlink.c
+++ b/zebra/kernel_netlink.c
@@ -183,7 +183,7 @@ static int netlink_recvbuf(struct nlsock *nl, uint32_t newsize)
}
/* Try force option (linux >= 2.6.14) and fall back to normal set */
- frr_elevate_privs(&zserv_privs) {
+ frr_with_privs(&zserv_privs) {
ret = setsockopt(nl->sock, SOL_SOCKET, SO_RCVBUFFORCE,
&nl_rcvbufsize,
sizeof(nl_rcvbufsize));
@@ -220,7 +220,7 @@ static int netlink_socket(struct nlsock *nl, unsigned long groups,
int sock;
int namelen;
- frr_elevate_privs(&zserv_privs) {
+ frr_with_privs(&zserv_privs) {
sock = ns_socket(AF_NETLINK, SOCK_RAW, NETLINK_ROUTE, ns_id);
if (sock < 0) {
zlog_err("Can't open %s socket: %s", nl->name,
@@ -352,7 +352,7 @@ static void netlink_write_incoming(const char *buf, const unsigned int size,
FILE *f;
snprintf(fname, MAXPATHLEN, "%s/%s_%u", frr_vtydir, "netlink", counter);
- frr_elevate_privs(&zserv_privs) {
+ frr_with_privs(&zserv_privs) {
f = fopen(fname, "w");
}
if (f) {
@@ -373,7 +373,7 @@ static long netlink_read_file(char *buf, const char *fname)
FILE *f;
long file_bytes = -1;
- frr_elevate_privs(&zserv_privs) {
+ frr_with_privs(&zserv_privs) {
f = fopen(fname, "r");
}
if (f) {
@@ -989,7 +989,7 @@ int netlink_talk_info(int (*filter)(struct nlmsghdr *, ns_id_t, int startup),
n->nlmsg_flags);
/* Send message to netlink interface. */
- frr_elevate_privs(&zserv_privs) {
+ frr_with_privs(&zserv_privs) {
status = sendmsg(nl->sock, &msg, 0);
save_errno = errno;
}
@@ -1056,7 +1056,7 @@ int netlink_request(struct nlsock *nl, struct nlmsghdr *n)
snl.nl_family = AF_NETLINK;
/* Raise capabilities and send message, then lower capabilities. */
- frr_elevate_privs(&zserv_privs) {
+ frr_with_privs(&zserv_privs) {
ret = sendto(nl->sock, (void *)n, n->nlmsg_len, 0,
(struct sockaddr *)&snl, sizeof snl);
}
diff --git a/zebra/kernel_socket.c b/zebra/kernel_socket.c
index 156ce50725..60fbbcc059 100644
--- a/zebra/kernel_socket.c
+++ b/zebra/kernel_socket.c
@@ -1426,7 +1426,7 @@ static int kernel_read(struct thread *thread)
/* Make routing socket. */
static void routing_socket(struct zebra_ns *zns)
{
- frr_elevate_privs(&zserv_privs) {
+ frr_with_privs(&zserv_privs) {
routing_sock = ns_socket(AF_ROUTE, SOCK_RAW, 0, zns->ns_id);
dplane_routing_sock =
diff --git a/zebra/rt.h b/zebra/rt.h
index 727d2d0c55..59b42fed18 100644
--- a/zebra/rt.h
+++ b/zebra/rt.h
@@ -57,6 +57,8 @@ enum zebra_dplane_result kernel_address_update_ctx(
enum zebra_dplane_result kernel_mac_update_ctx(struct zebra_dplane_ctx *ctx);
+enum zebra_dplane_result kernel_neigh_update_ctx(struct zebra_dplane_ctx *ctx);
+
extern int kernel_neigh_update(int cmd, int ifindex, uint32_t addr, char *lla,
int llalen, ns_id_t ns_id);
extern int kernel_interface_set_master(struct interface *master,
@@ -66,15 +68,6 @@ extern int mpls_kernel_init(void);
extern uint32_t kernel_get_speed(struct interface *ifp);
extern int kernel_get_ipmr_sg_stats(struct zebra_vrf *zvrf, void *mroute);
-extern int kernel_add_vtep(vni_t vni, struct interface *ifp,
- struct in_addr *vtep_ip);
-extern int kernel_del_vtep(vni_t vni, struct interface *ifp,
- struct in_addr *vtep_ip);
-extern int kernel_add_neigh(struct interface *ifp, struct ipaddr *ip,
- struct ethaddr *mac, uint8_t flags);
-extern int kernel_del_neigh(struct interface *ifp, struct ipaddr *ip);
-extern int kernel_upd_neigh(struct interface *ifp, struct ipaddr *ip,
- struct ethaddr *mac, uint8_t flags, uint16_t state);
/*
* Southbound Initialization routines to get initial starting
diff --git a/zebra/rt_netlink.c b/zebra/rt_netlink.c
index 3655763164..91a3024038 100644
--- a/zebra/rt_netlink.c
+++ b/zebra/rt_netlink.c
@@ -92,6 +92,41 @@ void rt_netlink_init(void)
inet_pton(AF_INET, ipv4_ll_buf, &ipv4_ll);
}
+/*
+ * Mapping from dataplane neighbor flags to netlink flags
+ */
+static uint8_t neigh_flags_to_netlink(uint8_t dplane_flags)
+{
+ uint8_t flags = 0;
+
+ if (dplane_flags & DPLANE_NTF_EXT_LEARNED)
+ flags |= NTF_EXT_LEARNED;
+ if (dplane_flags & DPLANE_NTF_ROUTER)
+ flags |= NTF_ROUTER;
+
+ return flags;
+}
+
+/*
+ * Mapping from dataplane neighbor state to netlink state
+ */
+static uint16_t neigh_state_to_netlink(uint16_t dplane_state)
+{
+ uint16_t state = 0;
+
+ if (dplane_state & DPLANE_NUD_REACHABLE)
+ state |= NUD_REACHABLE;
+ if (dplane_state & DPLANE_NUD_STALE)
+ state |= NUD_STALE;
+ if (dplane_state & DPLANE_NUD_NOARP)
+ state |= NUD_NOARP;
+ if (dplane_state & DPLANE_NUD_PROBE)
+ state |= NUD_PROBE;
+
+ return state;
+}
+
+
static inline int is_selfroute(int proto)
{
if ((proto == RTPROT_BGP) || (proto == RTPROT_OSPF)
@@ -1019,33 +1054,28 @@ static void _netlink_route_build_singlepath(const char *routedesc, int bytelen,
label_buf[0] = '\0';
assert(nexthop);
- for (const struct nexthop *nh = nexthop; nh; nh = nh->rparent) {
- char label_buf1[20];
+ char label_buf1[20];
- nh_label = nh->nh_label;
- if (!nh_label || !nh_label->num_labels)
- continue;
+ nh_label = nexthop->nh_label;
- for (int i = 0; i < nh_label->num_labels; i++) {
- if (nh_label->label[i] == MPLS_LABEL_IMPLICIT_NULL)
- continue;
+ for (int i = 0; nh_label && i < nh_label->num_labels; i++) {
+ if (nh_label->label[i] == MPLS_LABEL_IMPLICIT_NULL)
+ continue;
- if (IS_ZEBRA_DEBUG_KERNEL) {
- if (!num_labels)
- sprintf(label_buf, "label %u",
- nh_label->label[i]);
- else {
- sprintf(label_buf1, "/%u",
- nh_label->label[i]);
- strlcat(label_buf, label_buf1,
- sizeof(label_buf));
- }
+ if (IS_ZEBRA_DEBUG_KERNEL) {
+ if (!num_labels)
+ sprintf(label_buf, "label %u",
+ nh_label->label[i]);
+ else {
+ sprintf(label_buf1, "/%u", nh_label->label[i]);
+ strlcat(label_buf, label_buf1,
+ sizeof(label_buf));
}
-
- out_lse[num_labels] =
- mpls_lse_encode(nh_label->label[i], 0, 0, 0);
- num_labels++;
}
+
+ out_lse[num_labels] =
+ mpls_lse_encode(nh_label->label[i], 0, 0, 0);
+ num_labels++;
}
if (num_labels) {
@@ -1210,33 +1240,28 @@ static void _netlink_route_build_multipath(const char *routedesc, int bytelen,
label_buf[0] = '\0';
assert(nexthop);
- for (const struct nexthop *nh = nexthop; nh; nh = nh->rparent) {
- char label_buf1[20];
+ char label_buf1[20];
- nh_label = nh->nh_label;
- if (!nh_label || !nh_label->num_labels)
- continue;
+ nh_label = nexthop->nh_label;
- for (int i = 0; i < nh_label->num_labels; i++) {
- if (nh_label->label[i] == MPLS_LABEL_IMPLICIT_NULL)
- continue;
+ for (int i = 0; nh_label && i < nh_label->num_labels; i++) {
+ if (nh_label->label[i] == MPLS_LABEL_IMPLICIT_NULL)
+ continue;
- if (IS_ZEBRA_DEBUG_KERNEL) {
- if (!num_labels)
- sprintf(label_buf, "label %u",
- nh_label->label[i]);
- else {
- sprintf(label_buf1, "/%u",
- nh_label->label[i]);
- strlcat(label_buf, label_buf1,
- sizeof(label_buf));
- }
+ if (IS_ZEBRA_DEBUG_KERNEL) {
+ if (!num_labels)
+ sprintf(label_buf, "label %u",
+ nh_label->label[i]);
+ else {
+ sprintf(label_buf1, "/%u", nh_label->label[i]);
+ strlcat(label_buf, label_buf1,
+ sizeof(label_buf));
}
-
- out_lse[num_labels] =
- mpls_lse_encode(nh_label->label[i], 0, 0, 0);
- num_labels++;
}
+
+ out_lse[num_labels] =
+ mpls_lse_encode(nh_label->label[i], 0, 0, 0);
+ num_labels++;
}
if (num_labels) {
@@ -1902,19 +1927,17 @@ int kernel_neigh_update(int add, int ifindex, uint32_t addr, char *lla,
* Add remote VTEP to the flood list for this VxLAN interface (VNI). This
* is done by adding an FDB entry with a MAC of 00:00:00:00:00:00.
*/
-static int netlink_vxlan_flood_list_update(struct interface *ifp,
- struct in_addr *vtep_ip, int cmd)
+static int netlink_vxlan_flood_update_ctx(const struct zebra_dplane_ctx *ctx,
+ int cmd)
{
- struct zebra_ns *zns;
struct {
struct nlmsghdr n;
struct ndmsg ndm;
char buf[256];
} req;
uint8_t dst_mac[6] = {0x0, 0x0, 0x0, 0x0, 0x0, 0x0};
- struct zebra_vrf *zvrf = zebra_vrf_lookup_by_id(ifp->vrf_id);
+ const struct ipaddr *addr;
- zns = zvrf->zns;
memset(&req, 0, sizeof(req));
req.n.nlmsg_len = NLMSG_LENGTH(sizeof(struct ndmsg));
@@ -1928,39 +1951,14 @@ static int netlink_vxlan_flood_list_update(struct interface *ifp,
addattr_l(&req.n, sizeof(req), NDA_LLADDR, &dst_mac, 6);
- req.ndm.ndm_ifindex = ifp->ifindex;
- addattr_l(&req.n, sizeof(req), NDA_DST, &vtep_ip->s_addr, 4);
+ req.ndm.ndm_ifindex = dplane_ctx_get_ifindex(ctx);
- return netlink_talk(netlink_talk_filter, &req.n, &zns->netlink_cmd, zns,
- 0);
-}
-
-/*
- * Add remote VTEP for this VxLAN interface (VNI). In Linux, this involves
- * adding
- * a "flood" MAC FDB entry.
- */
-int kernel_add_vtep(vni_t vni, struct interface *ifp, struct in_addr *vtep_ip)
-{
- if (IS_ZEBRA_DEBUG_VXLAN)
- zlog_debug("Install %s into flood list for VNI %u intf %s(%u)",
- inet_ntoa(*vtep_ip), vni, ifp->name, ifp->ifindex);
+ addr = dplane_ctx_neigh_get_ipaddr(ctx);
- return netlink_vxlan_flood_list_update(ifp, vtep_ip, RTM_NEWNEIGH);
-}
+ addattr_l(&req.n, sizeof(req), NDA_DST, &(addr->ipaddr_v4), 4);
-/*
- * Remove remote VTEP for this VxLAN interface (VNI). In Linux, this involves
- * deleting the "flood" MAC FDB entry.
- */
-int kernel_del_vtep(vni_t vni, struct interface *ifp, struct in_addr *vtep_ip)
-{
- if (IS_ZEBRA_DEBUG_VXLAN)
- zlog_debug(
- "Uninstall %s from flood list for VNI %u intf %s(%u)",
- inet_ntoa(*vtep_ip), vni, ifp->name, ifp->ifindex);
-
- return netlink_vxlan_flood_list_update(ifp, vtep_ip, RTM_DELNEIGH);
+ return netlink_talk_info(netlink_talk_filter, &req.n,
+ dplane_ctx_get_ns(ctx), 0);
}
#ifndef NDA_RTA
@@ -2306,13 +2304,7 @@ netlink_macfdb_update_ctx(struct zebra_dplane_ctx *ctx)
} req;
int ret;
int dst_alen;
- struct zebra_if *zif;
- struct interface *br_if;
- struct zebra_if *br_zif;
int vid_present = 0;
- char vid_buf[20];
- struct zebra_ns *zns;
- struct interface *ifp;
int cmd;
struct in_addr vtep_ip;
vlanid_t vid;
@@ -2322,42 +2314,6 @@ netlink_macfdb_update_ctx(struct zebra_dplane_ctx *ctx)
else
cmd = RTM_DELNEIGH;
- /* Locate zebra ns and interface objects from context data */
- zns = zebra_ns_lookup(dplane_ctx_get_ns(ctx)->ns_id);
- if (zns == NULL) {
- /* Nothing to be done */
- if (IS_ZEBRA_DEBUG_KERNEL)
- zlog_debug("MAC %s on IF %s(%u) - zebra ns unknown",
- (cmd == RTM_NEWNEIGH) ? "add" : "del",
- dplane_ctx_get_ifname(ctx),
- dplane_ctx_get_ifindex(ctx));
-
- return ZEBRA_DPLANE_REQUEST_FAILURE;
- }
-
- ifp = if_lookup_by_index_per_ns(zns, dplane_ctx_get_ifindex(ctx));
- if (ifp == NULL) {
- /* Nothing to be done */
- /* Nothing to be done */
- if (IS_ZEBRA_DEBUG_KERNEL)
- zlog_debug("MAC %s on IF %s(%u) - interface unknown",
- (cmd == RTM_NEWNEIGH) ? "add" : "del",
- dplane_ctx_get_ifname(ctx),
- dplane_ctx_get_ifindex(ctx));
- return ZEBRA_DPLANE_REQUEST_FAILURE;
- }
-
- vid = dplane_ctx_mac_get_vlan(ctx);
-
- zif = ifp->info;
- if ((br_if = zif->brslave_info.br_if) == NULL) {
- if (IS_ZEBRA_DEBUG_KERNEL)
- zlog_debug("MAC %s on IF %s(%u) - no mapping to bridge",
- (cmd == RTM_NEWNEIGH) ? "add" : "del",
- ifp->name, ifp->ifindex);
- return ZEBRA_DPLANE_REQUEST_FAILURE;
- }
-
memset(&req, 0, sizeof(req));
req.n.nlmsg_len = NLMSG_LENGTH(sizeof(struct ndmsg));
@@ -2376,24 +2332,31 @@ netlink_macfdb_update_ctx(struct zebra_dplane_ctx *ctx)
addattr_l(&req.n, sizeof(req), NDA_LLADDR,
dplane_ctx_mac_get_addr(ctx), 6);
- req.ndm.ndm_ifindex = ifp->ifindex;
+ req.ndm.ndm_ifindex = dplane_ctx_get_ifindex(ctx);
dst_alen = 4; // TODO: hardcoded
vtep_ip = *(dplane_ctx_mac_get_vtep_ip(ctx));
addattr_l(&req.n, sizeof(req), NDA_DST, &vtep_ip, dst_alen);
- br_zif = (struct zebra_if *)br_if->info;
- if (IS_ZEBRA_IF_BRIDGE_VLAN_AWARE(br_zif) && vid > 0) {
+ vid = dplane_ctx_mac_get_vlan(ctx);
+
+ if (vid > 0) {
addattr16(&req.n, sizeof(req), NDA_VLAN, vid);
vid_present = 1;
- sprintf(vid_buf, " VLAN %u", vid);
}
- addattr32(&req.n, sizeof(req), NDA_MASTER, br_if->ifindex);
+ addattr32(&req.n, sizeof(req), NDA_MASTER,
+ dplane_ctx_mac_get_br_ifindex(ctx));
if (IS_ZEBRA_DEBUG_KERNEL) {
char ipbuf[PREFIX_STRLEN];
char buf[ETHER_ADDR_STRLEN];
char dst_buf[PREFIX_STRLEN + 10];
+ char vid_buf[20];
+
+ if (vid_present)
+ snprintf(vid_buf, sizeof(vid_buf), " VLAN %u", vid);
+ else
+ vid_buf[0] = '\0';
inet_ntop(AF_INET, &vtep_ip, ipbuf, sizeof(ipbuf));
snprintf(dst_buf, sizeof(dst_buf), " dst %s", ipbuf);
@@ -2401,8 +2364,9 @@ netlink_macfdb_update_ctx(struct zebra_dplane_ctx *ctx)
zlog_debug("Tx %s family %s IF %s(%u)%s %sMAC %s%s",
nl_msg_type_to_str(cmd),
- nl_family_to_str(req.ndm.ndm_family), ifp->name,
- ifp->ifindex, vid_present ? vid_buf : "",
+ nl_family_to_str(req.ndm.ndm_family),
+ dplane_ctx_get_ifname(ctx),
+ dplane_ctx_get_ifindex(ctx), vid_buf,
dplane_ctx_mac_is_sticky(ctx) ? "sticky " : "",
buf, dst_buf);
}
@@ -2778,9 +2742,11 @@ int netlink_neigh_change(struct nlmsghdr *h, ns_id_t ns_id)
return 0;
}
-static int netlink_neigh_update2(struct interface *ifp, struct ipaddr *ip,
- struct ethaddr *mac, uint8_t flags,
- uint16_t state, int cmd)
+/*
+ * Utility neighbor-update function, using info from dplane context.
+ */
+static int netlink_neigh_update_ctx(const struct zebra_dplane_ctx *ctx,
+ int cmd)
{
struct {
struct nlmsghdr n;
@@ -2788,15 +2754,23 @@ static int netlink_neigh_update2(struct interface *ifp, struct ipaddr *ip,
char buf[256];
} req;
int ipa_len;
-
- struct zebra_ns *zns;
char buf[INET6_ADDRSTRLEN];
char buf2[ETHER_ADDR_STRLEN];
- struct zebra_vrf *zvrf = zebra_vrf_lookup_by_id(ifp->vrf_id);
+ const struct ipaddr *ip;
+ const struct ethaddr *mac;
+ uint8_t flags;
+ uint16_t state;
- zns = zvrf->zns;
memset(&req, 0, sizeof(req));
+ ip = dplane_ctx_neigh_get_ipaddr(ctx);
+ mac = dplane_ctx_neigh_get_mac(ctx);
+ if (is_zero_mac(mac))
+ mac = NULL;
+
+ flags = neigh_flags_to_netlink(dplane_ctx_neigh_get_flags(ctx));
+ state = neigh_state_to_netlink(dplane_ctx_neigh_get_state(ctx));
+
req.n.nlmsg_len = NLMSG_LENGTH(sizeof(struct ndmsg));
req.n.nlmsg_flags = NLM_F_REQUEST;
if (cmd == RTM_NEWNEIGH)
@@ -2804,7 +2778,7 @@ static int netlink_neigh_update2(struct interface *ifp, struct ipaddr *ip,
req.n.nlmsg_type = cmd; // RTM_NEWNEIGH or RTM_DELNEIGH
req.ndm.ndm_family = IS_IPADDR_V4(ip) ? AF_INET : AF_INET6;
req.ndm.ndm_state = state;
- req.ndm.ndm_ifindex = ifp->ifindex;
+ req.ndm.ndm_ifindex = dplane_ctx_get_ifindex(ctx);
req.ndm.ndm_type = RTN_UNICAST;
req.ndm.ndm_flags = flags;
@@ -2816,13 +2790,16 @@ static int netlink_neigh_update2(struct interface *ifp, struct ipaddr *ip,
if (IS_ZEBRA_DEBUG_KERNEL)
zlog_debug("Tx %s family %s IF %s(%u) Neigh %s MAC %s flags 0x%x state 0x%x",
nl_msg_type_to_str(cmd),
- nl_family_to_str(req.ndm.ndm_family), ifp->name,
- ifp->ifindex, ipaddr2str(ip, buf, sizeof(buf)),
+ nl_family_to_str(req.ndm.ndm_family),
+ dplane_ctx_get_ifname(ctx),
+ dplane_ctx_get_ifindex(ctx),
+ ipaddr2str(ip, buf, sizeof(buf)),
mac ? prefix_mac2str(mac, buf2, sizeof(buf2))
- : "null", flags, state);
+ : "null",
+ flags, state);
- return netlink_talk(netlink_talk_filter, &req.n, &zns->netlink_cmd, zns,
- 0);
+ return netlink_talk_info(netlink_talk_filter, &req.n,
+ dplane_ctx_get_ns(ctx), 0);
}
/*
@@ -2833,23 +2810,30 @@ enum zebra_dplane_result kernel_mac_update_ctx(struct zebra_dplane_ctx *ctx)
return netlink_macfdb_update_ctx(ctx);
}
-int kernel_add_neigh(struct interface *ifp, struct ipaddr *ip,
- struct ethaddr *mac, uint8_t flags)
+enum zebra_dplane_result kernel_neigh_update_ctx(struct zebra_dplane_ctx *ctx)
{
- return netlink_neigh_update2(ifp, ip, mac, flags,
- NUD_NOARP, RTM_NEWNEIGH);
-}
+ int ret = -1;
-int kernel_del_neigh(struct interface *ifp, struct ipaddr *ip)
-{
- return netlink_neigh_update2(ifp, ip, NULL, 0, 0, RTM_DELNEIGH);
-}
+ switch (dplane_ctx_get_op(ctx)) {
+ case DPLANE_OP_NEIGH_INSTALL:
+ case DPLANE_OP_NEIGH_UPDATE:
+ ret = netlink_neigh_update_ctx(ctx, RTM_NEWNEIGH);
+ break;
+ case DPLANE_OP_NEIGH_DELETE:
+ ret = netlink_neigh_update_ctx(ctx, RTM_DELNEIGH);
+ break;
+ case DPLANE_OP_VTEP_ADD:
+ ret = netlink_vxlan_flood_update_ctx(ctx, RTM_NEWNEIGH);
+ break;
+ case DPLANE_OP_VTEP_DELETE:
+ ret = netlink_vxlan_flood_update_ctx(ctx, RTM_DELNEIGH);
+ break;
+ default:
+ break;
+ }
-int kernel_upd_neigh(struct interface *ifp, struct ipaddr *ip,
- struct ethaddr *mac, uint8_t flags, uint16_t state)
-{
- return netlink_neigh_update2(ifp, ip, mac, flags,
- state, RTM_NEWNEIGH);
+ return (ret == 0 ?
+ ZEBRA_DPLANE_REQUEST_SUCCESS : ZEBRA_DPLANE_REQUEST_FAILURE);
}
/*
diff --git a/zebra/rt_socket.c b/zebra/rt_socket.c
index 7e9a42a617..dc0f29bdbc 100644
--- a/zebra/rt_socket.c
+++ b/zebra/rt_socket.c
@@ -314,7 +314,7 @@ enum zebra_dplane_result kernel_route_update(struct zebra_dplane_ctx *ctx)
type = dplane_ctx_get_type(ctx);
old_type = dplane_ctx_get_old_type(ctx);
- frr_elevate_privs(&zserv_privs) {
+ frr_with_privs(&zserv_privs) {
if (dplane_ctx_get_op(ctx) == DPLANE_OP_ROUTE_DELETE) {
if (!RSYSTEM_ROUTE(type))
@@ -371,17 +371,13 @@ int kernel_neigh_update(int add, int ifindex, uint32_t addr, char *lla,
return 0;
}
-extern int kernel_get_ipmr_sg_stats(struct zebra_vrf *zvrf, void *mroute)
-{
- return 0;
-}
-
-int kernel_add_vtep(vni_t vni, struct interface *ifp, struct in_addr *vtep_ip)
+/* NYI on routing-socket platforms, but we've always returned 'success'... */
+enum zebra_dplane_result kernel_neigh_update_ctx(struct zebra_dplane_ctx *ctx)
{
- return 0;
+ return ZEBRA_DPLANE_REQUEST_SUCCESS;
}
-int kernel_del_vtep(vni_t vni, struct interface *ifp, struct in_addr *vtep_ip)
+extern int kernel_get_ipmr_sg_stats(struct zebra_vrf *zvrf, void *mroute)
{
return 0;
}
@@ -394,17 +390,6 @@ enum zebra_dplane_result kernel_mac_update_ctx(struct zebra_dplane_ctx *ctx)
return ZEBRA_DPLANE_REQUEST_SUCCESS;
}
-int kernel_add_neigh(struct interface *ifp, struct ipaddr *ip,
- struct ethaddr *mac, uint8_t flags)
-{
- return 0;
-}
-
-int kernel_del_neigh(struct interface *ifp, struct ipaddr *ip)
-{
- return 0;
-}
-
extern int kernel_interface_set_master(struct interface *master,
struct interface *slave)
{
diff --git a/zebra/rtadv.c b/zebra/rtadv.c
index 5841c44b03..b084fb99ca 100644
--- a/zebra/rtadv.c
+++ b/zebra/rtadv.c
@@ -760,7 +760,7 @@ static int rtadv_make_socket(ns_id_t ns_id)
int ret = 0;
struct icmp6_filter filter;
- frr_elevate_privs(&zserv_privs) {
+ frr_with_privs(&zserv_privs) {
sock = ns_socket(AF_INET6, SOCK_RAW, IPPROTO_ICMPV6, ns_id);
diff --git a/zebra/zapi_msg.c b/zebra/zapi_msg.c
index b6a8ee950c..826d31ef37 100644
--- a/zebra/zapi_msg.c
+++ b/zebra/zapi_msg.c
@@ -1749,88 +1749,146 @@ static void zread_vrf_unregister(ZAPI_HANDLER_ARGS)
vrf_bitmap_unset(client->ridinfo, zvrf_id(zvrf));
}
-static void zread_mpls_labels(ZAPI_HANDLER_ARGS)
+/*
+ * Handle request to create an MPLS LSP.
+ *
+ * A single message can fully specify an LSP with multiple nexthops.
+ *
+ * When the optional ZAPI_LABELS_FTN flag is set, the specified FEC (route) is
+ * updated to use the received label(s).
+ */
+static void zread_mpls_labels_add(ZAPI_HANDLER_ARGS)
{
struct stream *s;
- enum lsp_types_t type;
- struct prefix prefix;
- enum nexthop_types_t gtype;
- union g_addr gate;
- ifindex_t ifindex;
- mpls_label_t in_label, out_label;
- uint8_t distance;
+ struct zapi_labels zl;
/* Get input stream. */
s = msg;
+ if (zapi_labels_decode(s, &zl) < 0) {
+ if (IS_ZEBRA_DEBUG_RECV)
+ zlog_debug("%s: Unable to decode zapi_labels sent",
+ __PRETTY_FUNCTION__);
+ return;
+ }
- /* Get data. */
- STREAM_GETC(s, type);
- STREAM_GETL(s, prefix.family);
- switch (prefix.family) {
- case AF_INET:
- STREAM_GET(&prefix.u.prefix4.s_addr, s, IPV4_MAX_BYTELEN);
- STREAM_GETC(s, prefix.prefixlen);
- if (prefix.prefixlen > IPV4_MAX_BITLEN) {
- zlog_debug(
- "%s: Specified prefix length %d is greater than a v4 address can support",
- __PRETTY_FUNCTION__, prefix.prefixlen);
- return;
- }
- STREAM_GET(&gate.ipv4.s_addr, s, IPV4_MAX_BYTELEN);
- break;
- case AF_INET6:
- STREAM_GET(&prefix.u.prefix6, s, 16);
- STREAM_GETC(s, prefix.prefixlen);
- if (prefix.prefixlen > IPV6_MAX_BITLEN) {
- zlog_debug(
- "%s: Specified prefix length %d is greater than a v6 address can support",
- __PRETTY_FUNCTION__, prefix.prefixlen);
- return;
- }
- STREAM_GET(&gate.ipv6, s, 16);
- break;
- default:
- zlog_debug("%s: Specified AF %d is not supported for this call",
- __PRETTY_FUNCTION__, prefix.family);
+ if (!mpls_enabled)
return;
+
+ for (int i = 0; i < zl.nexthop_num; i++) {
+ struct zapi_nexthop_label *znh;
+
+ znh = &zl.nexthops[i];
+ mpls_lsp_install(zvrf, zl.type, zl.local_label, znh->label,
+ znh->type, &znh->address, znh->ifindex);
+
+ if (CHECK_FLAG(zl.message, ZAPI_LABELS_FTN))
+ mpls_ftn_update(1, zvrf, zl.type, &zl.route.prefix,
+ znh->type, &znh->address, znh->ifindex,
+ zl.route.type, zl.route.instance,
+ znh->label);
}
- STREAM_GETL(s, ifindex);
- STREAM_GETC(s, distance);
- STREAM_GETL(s, in_label);
- STREAM_GETL(s, out_label);
+}
- switch (prefix.family) {
- case AF_INET:
- if (ifindex)
- gtype = NEXTHOP_TYPE_IPV4_IFINDEX;
- else
- gtype = NEXTHOP_TYPE_IPV4;
- break;
- case AF_INET6:
- if (ifindex)
- gtype = NEXTHOP_TYPE_IPV6_IFINDEX;
- else
- gtype = NEXTHOP_TYPE_IPV6;
- break;
- default:
+/*
+ * Handle request to delete an MPLS LSP.
+ *
+ * An LSP is identified by its type and local label. When the received message
+ * doesn't contain any nexthop, the whole LSP is deleted. Otherwise, only the
+ * listed LSP nexthops (aka NHLFEs) are deleted.
+ *
+ * When the optional ZAPI_LABELS_FTN flag is set, the labels of the specified
+ * FEC (route) nexthops are deleted.
+ */
+static void zread_mpls_labels_delete(ZAPI_HANDLER_ARGS)
+{
+ struct stream *s;
+ struct zapi_labels zl;
+
+ /* Get input stream. */
+ s = msg;
+ if (zapi_labels_decode(s, &zl) < 0) {
+ if (IS_ZEBRA_DEBUG_RECV)
+ zlog_debug("%s: Unable to decode zapi_labels sent",
+ __PRETTY_FUNCTION__);
return;
}
if (!mpls_enabled)
return;
- if (hdr->command == ZEBRA_MPLS_LABELS_ADD) {
- mpls_lsp_install(zvrf, type, in_label, out_label, gtype, &gate,
- ifindex);
- mpls_ftn_update(1, zvrf, type, &prefix, gtype, &gate, ifindex,
- distance, out_label);
- } else if (hdr->command == ZEBRA_MPLS_LABELS_DELETE) {
- mpls_lsp_uninstall(zvrf, type, in_label, gtype, &gate, ifindex);
- mpls_ftn_update(0, zvrf, type, &prefix, gtype, &gate, ifindex,
- distance, out_label);
+ if (zl.nexthop_num > 0) {
+ for (int i = 0; i < zl.nexthop_num; i++) {
+ struct zapi_nexthop_label *znh;
+
+ znh = &zl.nexthops[i];
+ mpls_lsp_uninstall(zvrf, zl.type, zl.local_label,
+ znh->type, &znh->address,
+ znh->ifindex);
+
+ if (CHECK_FLAG(zl.message, ZAPI_LABELS_FTN))
+ mpls_ftn_update(0, zvrf, zl.type,
+ &zl.route.prefix, znh->type,
+ &znh->address, znh->ifindex,
+ zl.route.type,
+ zl.route.instance, znh->label);
+ }
+ } else {
+ mpls_lsp_uninstall_all_vrf(zvrf, zl.type, zl.local_label);
+
+ if (CHECK_FLAG(zl.message, ZAPI_LABELS_FTN))
+ mpls_ftn_uninstall(zvrf, zl.type, &zl.route.prefix,
+ zl.route.type, zl.route.instance);
+ }
+}
+
+/*
+ * Handle request to add an MPLS LSP or change an existing one.
+ *
+ * A single message can fully specify an LSP with multiple nexthops.
+ *
+ * When the optional ZAPI_LABELS_FTN flag is set, the specified FEC (route) is
+ * updated to use the received label(s).
+ *
+ * NOTE: zebra will use route replace semantics (make-before-break) to update
+ * the LSP in the forwarding plane if that's supported by the underlying
+ * platform.
+ */
+static void zread_mpls_labels_replace(ZAPI_HANDLER_ARGS)
+{
+ struct stream *s;
+ struct zapi_labels zl;
+
+ /* Get input stream. */
+ s = msg;
+ if (zapi_labels_decode(s, &zl) < 0) {
+ if (IS_ZEBRA_DEBUG_RECV)
+ zlog_debug("%s: Unable to decode zapi_labels sent",
+ __PRETTY_FUNCTION__);
+ return;
+ }
+
+ if (!mpls_enabled)
+ return;
+
+ mpls_lsp_uninstall_all_vrf(zvrf, zl.type, zl.local_label);
+ if (CHECK_FLAG(zl.message, ZAPI_LABELS_FTN))
+ mpls_ftn_uninstall(zvrf, zl.type, &zl.route.prefix,
+ zl.route.type, zl.route.instance);
+
+ for (int i = 0; i < zl.nexthop_num; i++) {
+ struct zapi_nexthop_label *znh;
+
+ znh = &zl.nexthops[i];
+ mpls_lsp_install(zvrf, zl.type, zl.local_label, znh->label,
+ znh->type, &znh->address, znh->ifindex);
+
+ if (CHECK_FLAG(zl.message, ZAPI_LABELS_FTN)) {
+ mpls_ftn_update(1, zvrf, zl.type, &zl.route.prefix,
+ znh->type, &znh->address, znh->ifindex,
+ zl.route.type, zl.route.instance,
+ znh->label);
+ }
}
-stream_failure:
- return;
}
/* Send response to a table manager connect request to client */
@@ -2455,8 +2513,9 @@ void (*zserv_handlers[])(ZAPI_HANDLER_ARGS) = {
[ZEBRA_INTERFACE_ENABLE_RADV] = NULL,
[ZEBRA_INTERFACE_DISABLE_RADV] = NULL,
#endif
- [ZEBRA_MPLS_LABELS_ADD] = zread_mpls_labels,
- [ZEBRA_MPLS_LABELS_DELETE] = zread_mpls_labels,
+ [ZEBRA_MPLS_LABELS_ADD] = zread_mpls_labels_add,
+ [ZEBRA_MPLS_LABELS_DELETE] = zread_mpls_labels_delete,
+ [ZEBRA_MPLS_LABELS_REPLACE] = zread_mpls_labels_replace,
[ZEBRA_IPMR_ROUTE_STATS] = zebra_ipmr_route_stats,
[ZEBRA_LABEL_MANAGER_CONNECT] = zread_label_manager_request,
[ZEBRA_LABEL_MANAGER_CONNECT_ASYNC] = zread_label_manager_request,
@@ -2507,7 +2566,7 @@ static void zserv_write_incoming(struct stream *orig, uint16_t command)
snprintf(fname, MAXPATHLEN, "%s/%u", frr_vtydir, command);
- frr_elevate_privs(&zserv_privs) {
+ frr_with_privs(&zserv_privs) {
fd = open(fname, O_CREAT | O_WRONLY | O_EXCL, 0644);
}
stream_flush(copy, fd);
diff --git a/zebra/zebra_dplane.c b/zebra/zebra_dplane.c
index 512a9b4021..12f8a1ae3d 100644
--- a/zebra/zebra_dplane.c
+++ b/zebra/zebra_dplane.c
@@ -148,10 +148,11 @@ struct dplane_intf_info {
};
/*
- * MAC address info for the dataplane.
+ * EVPN MAC address info for the dataplane.
*/
struct dplane_mac_info {
vlanid_t vid;
+ ifindex_t br_ifindex;
struct ethaddr mac;
struct in_addr vtep_ip;
bool is_sticky;
@@ -159,6 +160,16 @@ struct dplane_mac_info {
};
/*
+ * EVPN neighbor info for the dataplane
+ */
+struct dplane_neigh_info {
+ struct ipaddr ip_addr;
+ struct ethaddr mac;
+ uint32_t flags;
+ uint16_t state;
+};
+
+/*
* The context block used to exchange info about route updates across
* the boundary between the zebra main context (and pthread) and the
* dataplane layer (and pthread).
@@ -204,6 +215,7 @@ struct zebra_dplane_ctx {
struct dplane_pw_info pw;
struct dplane_intf_info intf;
struct dplane_mac_info macinfo;
+ struct dplane_neigh_info neigh;
} u;
/* Namespace info, used especially for netlink kernel communication */
@@ -321,6 +333,9 @@ static struct zebra_dplane_globals {
_Atomic uint32_t dg_macs_in;
_Atomic uint32_t dg_mac_errors;
+ _Atomic uint32_t dg_neighs_in;
+ _Atomic uint32_t dg_neigh_errors;
+
_Atomic uint32_t dg_update_yields;
/* Dataplane pthread */
@@ -363,8 +378,15 @@ static enum zebra_dplane_result intf_addr_update_internal(
enum dplane_op_e op);
static enum zebra_dplane_result mac_update_internal(
enum dplane_op_e op, const struct interface *ifp,
+ const struct interface *br_ifp,
vlanid_t vid, const struct ethaddr *mac,
struct in_addr vtep_ip, bool sticky);
+static enum zebra_dplane_result neigh_update_internal(
+ enum dplane_op_e op,
+ const struct interface *ifp,
+ const struct ethaddr *mac,
+ const struct ipaddr *ip,
+ uint32_t flags, uint16_t state);
/*
* Public APIs
@@ -485,6 +507,11 @@ static void dplane_ctx_free(struct zebra_dplane_ctx **pctx)
case DPLANE_OP_MAC_INSTALL:
case DPLANE_OP_MAC_DELETE:
+ case DPLANE_OP_NEIGH_INSTALL:
+ case DPLANE_OP_NEIGH_UPDATE:
+ case DPLANE_OP_NEIGH_DELETE:
+ case DPLANE_OP_VTEP_ADD:
+ case DPLANE_OP_VTEP_DELETE:
case DPLANE_OP_NONE:
break;
}
@@ -651,6 +678,22 @@ const char *dplane_op2str(enum dplane_op_e op)
case DPLANE_OP_MAC_DELETE:
ret = "MAC_DELETE";
break;
+
+ case DPLANE_OP_NEIGH_INSTALL:
+ ret = "NEIGH_INSTALL";
+ break;
+ case DPLANE_OP_NEIGH_UPDATE:
+ ret = "NEIGH_UPDATE";
+ break;
+ case DPLANE_OP_NEIGH_DELETE:
+ ret = "NEIGH_DELETE";
+ break;
+ case DPLANE_OP_VTEP_ADD:
+ ret = "VTEP_ADD";
+ break;
+ case DPLANE_OP_VTEP_DELETE:
+ ret = "VTEP_DELETE";
+ break;
}
return ret;
@@ -1231,6 +1274,39 @@ const struct in_addr *dplane_ctx_mac_get_vtep_ip(
return &(ctx->u.macinfo.vtep_ip);
}
+ifindex_t dplane_ctx_mac_get_br_ifindex(const struct zebra_dplane_ctx *ctx)
+{
+ DPLANE_CTX_VALID(ctx);
+ return ctx->u.macinfo.br_ifindex;
+}
+
+/* Accessors for neighbor information */
+const struct ipaddr *dplane_ctx_neigh_get_ipaddr(
+ const struct zebra_dplane_ctx *ctx)
+{
+ DPLANE_CTX_VALID(ctx);
+ return &(ctx->u.neigh.ip_addr);
+}
+
+const struct ethaddr *dplane_ctx_neigh_get_mac(
+ const struct zebra_dplane_ctx *ctx)
+{
+ DPLANE_CTX_VALID(ctx);
+ return &(ctx->u.neigh.mac);
+}
+
+uint32_t dplane_ctx_neigh_get_flags(const struct zebra_dplane_ctx *ctx)
+{
+ DPLANE_CTX_VALID(ctx);
+ return ctx->u.neigh.flags;
+}
+
+uint16_t dplane_ctx_neigh_get_state(const struct zebra_dplane_ctx *ctx)
+{
+ DPLANE_CTX_VALID(ctx);
+ return ctx->u.neigh.state;
+}
+
/*
* End of dplane context accessors
*/
@@ -2066,6 +2142,7 @@ static enum zebra_dplane_result intf_addr_update_internal(
* Enqueue vxlan/evpn mac add (or update).
*/
enum zebra_dplane_result dplane_mac_add(const struct interface *ifp,
+ const struct interface *bridge_ifp,
vlanid_t vid,
const struct ethaddr *mac,
struct in_addr vtep_ip,
@@ -2074,8 +2151,8 @@ enum zebra_dplane_result dplane_mac_add(const struct interface *ifp,
enum zebra_dplane_result result;
/* Use common helper api */
- result = mac_update_internal(DPLANE_OP_MAC_INSTALL, ifp, vid,
- mac, vtep_ip, sticky);
+ result = mac_update_internal(DPLANE_OP_MAC_INSTALL, ifp, bridge_ifp,
+ vid, mac, vtep_ip, sticky);
return result;
}
@@ -2083,6 +2160,7 @@ enum zebra_dplane_result dplane_mac_add(const struct interface *ifp,
* Enqueue vxlan/evpn mac delete.
*/
enum zebra_dplane_result dplane_mac_del(const struct interface *ifp,
+ const struct interface *bridge_ifp,
vlanid_t vid,
const struct ethaddr *mac,
struct in_addr vtep_ip)
@@ -2090,8 +2168,8 @@ enum zebra_dplane_result dplane_mac_del(const struct interface *ifp,
enum zebra_dplane_result result;
/* Use common helper api */
- result = mac_update_internal(DPLANE_OP_MAC_DELETE, ifp, vid, mac,
- vtep_ip, false);
+ result = mac_update_internal(DPLANE_OP_MAC_DELETE, ifp, bridge_ifp,
+ vid, mac, vtep_ip, false);
return result;
}
@@ -2101,6 +2179,7 @@ enum zebra_dplane_result dplane_mac_del(const struct interface *ifp,
static enum zebra_dplane_result
mac_update_internal(enum dplane_op_e op,
const struct interface *ifp,
+ const struct interface *br_ifp,
vlanid_t vid,
const struct ethaddr *mac,
struct in_addr vtep_ip,
@@ -2136,6 +2215,7 @@ mac_update_internal(enum dplane_op_e op,
/* Init the mac-specific data area */
memset(&ctx->u.macinfo, 0, sizeof(ctx->u.macinfo));
+ ctx->u.macinfo.br_ifindex = br_ifp->ifindex;
ctx->u.macinfo.vtep_ip = vtep_ip;
ctx->u.macinfo.mac = *mac;
ctx->u.macinfo.vid = vid;
@@ -2161,6 +2241,165 @@ mac_update_internal(enum dplane_op_e op,
}
/*
+ * Enqueue evpn neighbor add for the dataplane.
+ */
+enum zebra_dplane_result dplane_neigh_add(const struct interface *ifp,
+ const struct ipaddr *ip,
+ const struct ethaddr *mac,
+ uint32_t flags)
+{
+ enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
+
+ result = neigh_update_internal(DPLANE_OP_NEIGH_INSTALL,
+ ifp, mac, ip, flags, 0);
+
+ return result;
+}
+
+/*
+ * Enqueue evpn neighbor update for the dataplane.
+ */
+enum zebra_dplane_result dplane_neigh_update(const struct interface *ifp,
+ const struct ipaddr *ip,
+ const struct ethaddr *mac)
+{
+ enum zebra_dplane_result result;
+
+ result = neigh_update_internal(DPLANE_OP_NEIGH_UPDATE,
+ ifp, mac, ip, 0, DPLANE_NUD_PROBE);
+
+ return result;
+}
+
+/*
+ * Enqueue evpn neighbor delete for the dataplane.
+ */
+enum zebra_dplane_result dplane_neigh_delete(const struct interface *ifp,
+ const struct ipaddr *ip)
+{
+ enum zebra_dplane_result result;
+
+ result = neigh_update_internal(DPLANE_OP_NEIGH_DELETE,
+ ifp, NULL, ip, 0, 0);
+
+ return result;
+}
+
+/*
+ * Enqueue evpn VTEP add for the dataplane.
+ */
+enum zebra_dplane_result dplane_vtep_add(const struct interface *ifp,
+ const struct in_addr *ip,
+ vni_t vni)
+{
+ enum zebra_dplane_result result;
+ struct ethaddr mac = { {0, 0, 0, 0, 0, 0} };
+ struct ipaddr addr;
+
+ if (IS_ZEBRA_DEBUG_VXLAN)
+ zlog_debug("Install %s into flood list for VNI %u intf %s(%u)",
+ inet_ntoa(*ip), vni, ifp->name, ifp->ifindex);
+
+ SET_IPADDR_V4(&addr);
+ addr.ipaddr_v4 = *ip;
+
+ result = neigh_update_internal(DPLANE_OP_VTEP_ADD,
+ ifp, &mac, &addr, 0, 0);
+
+ return result;
+}
+
+/*
+ * Enqueue evpn VTEP add for the dataplane.
+ */
+enum zebra_dplane_result dplane_vtep_delete(const struct interface *ifp,
+ const struct in_addr *ip,
+ vni_t vni)
+{
+ enum zebra_dplane_result result;
+ struct ethaddr mac = { {0, 0, 0, 0, 0, 0} };
+ struct ipaddr addr;
+
+ if (IS_ZEBRA_DEBUG_VXLAN)
+ zlog_debug(
+ "Uninstall %s from flood list for VNI %u intf %s(%u)",
+ inet_ntoa(*ip), vni, ifp->name, ifp->ifindex);
+
+ SET_IPADDR_V4(&addr);
+ addr.ipaddr_v4 = *ip;
+
+ result = neigh_update_internal(DPLANE_OP_VTEP_DELETE,
+ ifp, &mac, &addr, 0, 0);
+
+ return result;
+}
+
+/*
+ * Common helper api for evpn neighbor updates
+ */
+static enum zebra_dplane_result
+neigh_update_internal(enum dplane_op_e op,
+ const struct interface *ifp,
+ const struct ethaddr *mac,
+ const struct ipaddr *ip,
+ uint32_t flags, uint16_t state)
+{
+ enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
+ int ret;
+ struct zebra_dplane_ctx *ctx = NULL;
+ struct zebra_ns *zns;
+
+ if (IS_ZEBRA_DEBUG_DPLANE_DETAIL) {
+ char buf1[ETHER_ADDR_STRLEN], buf2[PREFIX_STRLEN];
+
+ zlog_debug("init neigh ctx %s: ifp %s, mac %s, ip %s",
+ dplane_op2str(op),
+ prefix_mac2str(mac, buf1, sizeof(buf1)),
+ ifp->name,
+ ipaddr2str(ip, buf2, sizeof(buf2)));
+ }
+
+ ctx = dplane_ctx_alloc();
+
+ ctx->zd_op = op;
+ ctx->zd_status = ZEBRA_DPLANE_REQUEST_SUCCESS;
+ ctx->zd_vrf_id = ifp->vrf_id;
+
+ zns = zebra_ns_lookup(ifp->vrf_id);
+ dplane_ctx_ns_init(ctx, zns, false);
+
+ strlcpy(ctx->zd_ifname, ifp->name, sizeof(ctx->zd_ifname));
+ ctx->zd_ifindex = ifp->ifindex;
+
+ /* Init the neighbor-specific data area */
+ memset(&ctx->u.neigh, 0, sizeof(ctx->u.neigh));
+
+ ctx->u.neigh.ip_addr = *ip;
+ if (mac)
+ ctx->u.neigh.mac = *mac;
+ ctx->u.neigh.flags = flags;
+ ctx->u.neigh.state = state;
+
+ /* Enqueue for processing on the dplane pthread */
+ ret = dplane_update_enqueue(ctx);
+
+ /* Increment counter */
+ atomic_fetch_add_explicit(&zdplane_info.dg_neighs_in, 1,
+ memory_order_relaxed);
+
+ if (ret == AOK)
+ result = ZEBRA_DPLANE_REQUEST_QUEUED;
+ else {
+ /* Error counter */
+ atomic_fetch_add_explicit(&zdplane_info.dg_neigh_errors, 1,
+ memory_order_relaxed);
+ dplane_ctx_free(&ctx);
+ }
+
+ return result;
+}
+
+/*
* Handler for 'show dplane'
*/
int dplane_show_helper(struct vty *vty, bool detailed)
@@ -2223,6 +2462,13 @@ int dplane_show_helper(struct vty *vty, bool detailed)
vty_out(vty, "EVPN MAC updates: %"PRIu64"\n", incoming);
vty_out(vty, "EVPN MAC errors: %"PRIu64"\n", errs);
+ incoming = atomic_load_explicit(&zdplane_info.dg_neighs_in,
+ memory_order_relaxed);
+ errs = atomic_load_explicit(&zdplane_info.dg_neigh_errors,
+ memory_order_relaxed);
+ vty_out(vty, "EVPN neigh updates: %"PRIu64"\n", incoming);
+ vty_out(vty, "EVPN neigh errors: %"PRIu64"\n", errs);
+
return CMD_SUCCESS;
}
@@ -2616,7 +2862,7 @@ kernel_dplane_address_update(struct zebra_dplane_ctx *ctx)
}
/*
- * Handler for kernel-facing MAC address updates
+ * Handler for kernel-facing EVPN MAC address updates
*/
static enum zebra_dplane_result
kernel_dplane_mac_update(struct zebra_dplane_ctx *ctx)
@@ -2644,6 +2890,34 @@ kernel_dplane_mac_update(struct zebra_dplane_ctx *ctx)
}
/*
+ * Handler for kernel-facing EVPN neighbor updates
+ */
+static enum zebra_dplane_result
+kernel_dplane_neigh_update(struct zebra_dplane_ctx *ctx)
+{
+ enum zebra_dplane_result res;
+
+ if (IS_ZEBRA_DEBUG_DPLANE_DETAIL) {
+ char buf[PREFIX_STRLEN];
+
+ ipaddr2str(dplane_ctx_neigh_get_ipaddr(ctx), buf,
+ sizeof(buf));
+
+ zlog_debug("Dplane %s, ip %s, ifindex %u",
+ dplane_op2str(dplane_ctx_get_op(ctx)),
+ buf, dplane_ctx_get_ifindex(ctx));
+ }
+
+ res = kernel_neigh_update_ctx(ctx);
+
+ if (res != ZEBRA_DPLANE_REQUEST_SUCCESS)
+ atomic_fetch_add_explicit(&zdplane_info.dg_neigh_errors,
+ 1, memory_order_relaxed);
+
+ return res;
+}
+
+/*
* Kernel provider callback
*/
static int kernel_dplane_process_func(struct zebra_dplane_provider *prov)
@@ -2702,6 +2976,14 @@ static int kernel_dplane_process_func(struct zebra_dplane_provider *prov)
res = kernel_dplane_mac_update(ctx);
break;
+ case DPLANE_OP_NEIGH_INSTALL:
+ case DPLANE_OP_NEIGH_UPDATE:
+ case DPLANE_OP_NEIGH_DELETE:
+ case DPLANE_OP_VTEP_ADD:
+ case DPLANE_OP_VTEP_DELETE:
+ res = kernel_dplane_neigh_update(ctx);
+ break;
+
/* Ignore 'notifications' - no-op */
case DPLANE_OP_SYS_ROUTE_ADD:
case DPLANE_OP_SYS_ROUTE_DELETE:
diff --git a/zebra/zebra_dplane.h b/zebra/zebra_dplane.h
index 912fda45d3..30dfdafdf5 100644
--- a/zebra/zebra_dplane.h
+++ b/zebra/zebra_dplane.h
@@ -129,8 +129,33 @@ enum dplane_op_e {
/* MAC address update */
DPLANE_OP_MAC_INSTALL,
DPLANE_OP_MAC_DELETE,
+
+ /* EVPN neighbor updates */
+ DPLANE_OP_NEIGH_INSTALL,
+ DPLANE_OP_NEIGH_UPDATE,
+ DPLANE_OP_NEIGH_DELETE,
+
+ /* EVPN VTEP updates */
+ DPLANE_OP_VTEP_ADD,
+ DPLANE_OP_VTEP_DELETE,
};
+/*
+ * The vxlan/evpn neighbor management code needs some values to use
+ * when programming neighbor changes. Offer some platform-neutral values
+ * here for use within the dplane apis and plugins.
+ */
+
+/* Neighbor cache flags */
+#define DPLANE_NTF_EXT_LEARNED 0x01
+#define DPLANE_NTF_ROUTER 0x02
+
+/* Neighbor cache states */
+#define DPLANE_NUD_REACHABLE 0x01
+#define DPLANE_NUD_STALE 0x02
+#define DPLANE_NUD_NOARP 0x04
+#define DPLANE_NUD_PROBE 0x08
+
/* Enable system route notifications */
void dplane_enable_sys_route_notifs(void);
@@ -303,6 +328,15 @@ const struct ethaddr *dplane_ctx_mac_get_addr(
const struct zebra_dplane_ctx *ctx);
const struct in_addr *dplane_ctx_mac_get_vtep_ip(
const struct zebra_dplane_ctx *ctx);
+ifindex_t dplane_ctx_mac_get_br_ifindex(const struct zebra_dplane_ctx *ctx);
+
+/* Accessors for neighbor information */
+const struct ipaddr *dplane_ctx_neigh_get_ipaddr(
+ const struct zebra_dplane_ctx *ctx);
+const struct ethaddr *dplane_ctx_neigh_get_mac(
+ const struct zebra_dplane_ctx *ctx);
+uint32_t dplane_ctx_neigh_get_flags(const struct zebra_dplane_ctx *ctx);
+uint16_t dplane_ctx_neigh_get_state(const struct zebra_dplane_ctx *ctx);
/* Namespace info - esp. for netlink communication */
const struct zebra_dplane_info *dplane_ctx_get_ns(
@@ -369,16 +403,42 @@ enum zebra_dplane_result dplane_intf_addr_unset(const struct interface *ifp,
* Enqueue evpn mac operations for the dataplane.
*/
enum zebra_dplane_result dplane_mac_add(const struct interface *ifp,
+ const struct interface *bridge_ifp,
vlanid_t vid,
const struct ethaddr *mac,
struct in_addr vtep_ip,
bool sticky);
enum zebra_dplane_result dplane_mac_del(const struct interface *ifp,
+ const struct interface *bridge_ifp,
vlanid_t vid,
const struct ethaddr *mac,
struct in_addr vtep_ip);
+/*
+ * Enqueue evpn neighbor updates for the dataplane.
+ */
+enum zebra_dplane_result dplane_neigh_add(const struct interface *ifp,
+ const struct ipaddr *ip,
+ const struct ethaddr *mac,
+ uint32_t flags);
+enum zebra_dplane_result dplane_neigh_update(const struct interface *ifp,
+ const struct ipaddr *ip,
+ const struct ethaddr *mac);
+enum zebra_dplane_result dplane_neigh_delete(const struct interface *ifp,
+ const struct ipaddr *ip);
+
+/*
+ * Enqueue evpn VTEP updates for the dataplane.
+ */
+enum zebra_dplane_result dplane_vtep_add(const struct interface *ifp,
+ const struct in_addr *ip,
+ vni_t vni);
+enum zebra_dplane_result dplane_vtep_delete(const struct interface *ifp,
+ const struct in_addr *ip,
+ vni_t vni);
+
+
/* Retrieve the limit on the number of pending, unprocessed updates. */
uint32_t dplane_get_in_queue_limit(void);
diff --git a/zebra/zebra_fpm.c b/zebra/zebra_fpm.c
index eaf43095bc..4144c0afe0 100644
--- a/zebra/zebra_fpm.c
+++ b/zebra/zebra_fpm.c
@@ -1927,6 +1927,9 @@ static inline void zfpm_init_message_format(const char *format)
"FPM protobuf message format is not available");
return;
}
+ flog_warn(EC_ZEBRA_PROTOBUF_NOT_AVAILABLE,
+ "FPM protobuf message format is deprecated and scheduled to be removed. "
+ "Please convert to using netlink format or contact dev@lists.frrouting.org with your use case.");
zfpm_g->message_format = ZFPM_MSG_FORMAT_PROTOBUF;
return;
}
diff --git a/zebra/zebra_mpls.c b/zebra/zebra_mpls.c
index 5214f1f22d..3c4497ebd2 100644
--- a/zebra/zebra_mpls.c
+++ b/zebra/zebra_mpls.c
@@ -34,6 +34,7 @@
#include "routemap.h"
#include "stream.h"
#include "nexthop.h"
+#include "termtable.h"
#include "lib/json.h"
#include "zebra/rib.h"
@@ -124,6 +125,9 @@ static zebra_snhlfe_t *snhlfe_add(zebra_slsp_t *slsp,
static int snhlfe_del(zebra_snhlfe_t *snhlfe);
static int snhlfe_del_all(zebra_slsp_t *slsp);
static char *snhlfe2str(zebra_snhlfe_t *snhlfe, char *buf, int size);
+static void mpls_lsp_uninstall_all_type(struct hash_bucket *bucket, void *ctxt);
+static void mpls_ftn_uninstall_all(struct zebra_vrf *zvrf,
+ int afi, enum lsp_types_t lsp_type);
/* Static functions */
@@ -1111,6 +1115,7 @@ static char *nhlfe2str(zebra_nhlfe_t *nhlfe, char *buf, int size)
inet_ntop(AF_INET, &nexthop->gate.ipv4, buf, size);
break;
case NEXTHOP_TYPE_IPV6:
+ case NEXTHOP_TYPE_IPV6_IFINDEX:
inet_ntop(AF_INET6, &nexthop->gate.ipv6, buf, size);
break;
case NEXTHOP_TYPE_IFINDEX:
@@ -2294,6 +2299,40 @@ static int zebra_mpls_cleanup_fecs_for_client(struct zserv *client)
return 0;
}
+struct lsp_uninstall_args {
+ struct hash *lsp_table;
+ enum lsp_types_t type;
+};
+
+/*
+ * Cleanup MPLS labels registered by this client.
+ */
+static int zebra_mpls_cleanup_zclient_labels(struct zserv *client)
+{
+ struct vrf *vrf;
+ struct zebra_vrf *zvrf;
+
+ RB_FOREACH (vrf, vrf_id_head, &vrfs_by_id) {
+ struct lsp_uninstall_args args;
+
+ zvrf = vrf->info;
+ if (!zvrf)
+ continue;
+
+ /* Cleanup LSPs. */
+ args.lsp_table = zvrf->lsp_table;
+ args.type = lsp_type_from_re_type(client->proto);
+ hash_iterate(zvrf->lsp_table, mpls_lsp_uninstall_all_type,
+ &args);
+
+ /* Cleanup FTNs. */
+ mpls_ftn_uninstall_all(zvrf, AFI_IP, client->proto);
+ mpls_ftn_uninstall_all(zvrf, AFI_IP6, client->proto);
+ }
+
+ return 0;
+}
+
/*
* Return FEC (if any) to which this label is bound.
* Note: Only works for per-prefix binding and when the label is not
@@ -2542,8 +2581,8 @@ static bool mpls_ftn_update_nexthop(int add, struct nexthop *nexthop,
*/
int mpls_ftn_update(int add, struct zebra_vrf *zvrf, enum lsp_types_t type,
struct prefix *prefix, enum nexthop_types_t gtype,
- union g_addr *gate, ifindex_t ifindex, uint8_t distance,
- mpls_label_t out_label)
+ union g_addr *gate, ifindex_t ifindex, uint8_t route_type,
+ unsigned short route_instance, mpls_label_t out_label)
{
struct route_table *table;
struct route_node *rn;
@@ -2562,7 +2601,7 @@ int mpls_ftn_update(int add, struct zebra_vrf *zvrf, enum lsp_types_t type,
RNODE_FOREACH_RE (rn, re) {
if (CHECK_FLAG(re->status, ROUTE_ENTRY_REMOVED))
continue;
- if (re->distance == distance)
+ if (re->type == route_type && re->instance == route_instance)
break;
}
@@ -2617,6 +2656,42 @@ int mpls_ftn_update(int add, struct zebra_vrf *zvrf, enum lsp_types_t type,
return 0;
}
+int mpls_ftn_uninstall(struct zebra_vrf *zvrf, enum lsp_types_t type,
+ struct prefix *prefix, uint8_t route_type,
+ unsigned short route_instance)
+{
+ struct route_table *table;
+ struct route_node *rn;
+ struct route_entry *re;
+ struct nexthop *nexthop;
+
+ /* Lookup table. */
+ table = zebra_vrf_table(family2afi(prefix->family), SAFI_UNICAST,
+ zvrf_id(zvrf));
+ if (!table)
+ return -1;
+
+ /* Lookup existing route */
+ rn = route_node_get(table, prefix);
+ RNODE_FOREACH_RE (rn, re) {
+ if (CHECK_FLAG(re->status, ROUTE_ENTRY_REMOVED))
+ continue;
+ if (re->type == route_type && re->instance == route_instance)
+ break;
+ }
+ if (re == NULL)
+ return -1;
+
+ for (nexthop = re->ng.nexthop; nexthop; nexthop = nexthop->next)
+ nexthop_del_labels(nexthop);
+
+ SET_FLAG(re->status, ROUTE_ENTRY_CHANGED);
+ SET_FLAG(re->status, ROUTE_ENTRY_LABELS_CHANGED);
+ rib_queue_add(rn);
+
+ return 0;
+}
+
/*
* Install/update a NHLFE for an LSP in the forwarding table. This may be
* a new LSP entry or a new NHLFE for an existing in-label or an update of
@@ -2749,12 +2824,34 @@ int mpls_lsp_uninstall(struct zebra_vrf *zvrf, enum lsp_types_t type,
return 0;
}
+int mpls_lsp_uninstall_all_vrf(struct zebra_vrf *zvrf, enum lsp_types_t type,
+ mpls_label_t in_label)
+{
+ struct hash *lsp_table;
+ zebra_ile_t tmp_ile;
+ zebra_lsp_t *lsp;
+
+ /* Lookup table. */
+ lsp_table = zvrf->lsp_table;
+ if (!lsp_table)
+ return -1;
+
+ /* If entry is not present, exit. */
+ tmp_ile.in_label = in_label;
+ lsp = hash_lookup(lsp_table, &tmp_ile);
+ if (!lsp)
+ return 0;
+
+ return mpls_lsp_uninstall_all(lsp_table, lsp, type);
+}
+
/*
- * Uninstall all LDP NHLFEs for a particular LSP forwarding entry.
+ * Uninstall all NHLFEs for a particular LSP forwarding entry.
* If no other NHLFEs exist, the entry would be deleted.
*/
-void mpls_ldp_lsp_uninstall_all(struct hash_bucket *bucket, void *ctxt)
+static void mpls_lsp_uninstall_all_type(struct hash_bucket *bucket, void *ctxt)
{
+ struct lsp_uninstall_args *args = ctxt;
zebra_lsp_t *lsp;
struct hash *lsp_table;
@@ -2762,17 +2859,19 @@ void mpls_ldp_lsp_uninstall_all(struct hash_bucket *bucket, void *ctxt)
if (!lsp->nhlfe_list)
return;
- lsp_table = ctxt;
+ lsp_table = args->lsp_table;
if (!lsp_table)
return;
- mpls_lsp_uninstall_all(lsp_table, lsp, ZEBRA_LSP_LDP);
+ mpls_lsp_uninstall_all(lsp_table, lsp, args->type);
}
/*
- * Uninstall all LDP FEC-To-NHLFE (FTN) bindings of the given address-family.
+ * Uninstall all FEC-To-NHLFE (FTN) bindings of the given address-family and
+ * LSP type.
*/
-void mpls_ldp_ftn_uninstall_all(struct zebra_vrf *zvrf, int afi)
+static void mpls_ftn_uninstall_all(struct zebra_vrf *zvrf,
+ int afi, enum lsp_types_t lsp_type)
{
struct route_table *table;
struct route_node *rn;
@@ -2790,7 +2889,7 @@ void mpls_ldp_ftn_uninstall_all(struct zebra_vrf *zvrf, int afi)
RNODE_FOREACH_RE (rn, re) {
for (nexthop = re->ng.nexthop; nexthop;
nexthop = nexthop->next) {
- if (nexthop->nh_label_type != ZEBRA_LSP_LDP)
+ if (nexthop->nh_label_type != lsp_type)
continue;
nexthop_del_labels(nexthop);
@@ -3047,7 +3146,6 @@ void zebra_mpls_print_lsp_table(struct vty *vty, struct zebra_vrf *zvrf,
json_object *json = NULL;
zebra_lsp_t *lsp = NULL;
zebra_nhlfe_t *nhlfe = NULL;
- struct nexthop *nexthop = NULL;
struct listnode *node = NULL;
struct list *lsp_list = hash_get_sorted_list(zvrf->lsp_table, lsp_cmp);
@@ -3063,15 +3161,23 @@ void zebra_mpls_print_lsp_table(struct vty *vty, struct zebra_vrf *zvrf,
json, JSON_C_TO_STRING_PRETTY));
json_object_free(json);
} else {
- vty_out(vty, " Inbound Outbound\n");
- vty_out(vty, " Label Type Nexthop Label\n");
- vty_out(vty, "-------- ------- --------------- --------\n");
+ struct ttable *tt;
+
+ /* Prepare table. */
+ tt = ttable_new(&ttable_styles[TTSTYLE_BLANK]);
+ ttable_add_row(tt, "Inbound Label|Type|Nexthop|Outbound Label");
+ tt->style.cell.rpad = 2;
+ tt->style.corner = '+';
+ ttable_restyle(tt);
+ ttable_rowseps(tt, 0, BOTTOM, true, '-');
for (ALL_LIST_ELEMENTS_RO(lsp_list, node, lsp)) {
for (nhlfe = lsp->nhlfe_list; nhlfe;
nhlfe = nhlfe->next) {
- vty_out(vty, "%8d %7s ", lsp->ile.in_label,
- nhlfe_type2str(nhlfe->type));
+ struct nexthop *nexthop;
+ const char *out_label_str;
+ char nh_buf[NEXTHOP_STRLEN];
+
nexthop = nhlfe->nexthop;
switch (nexthop->type) {
@@ -3081,45 +3187,47 @@ void zebra_mpls_print_lsp_table(struct vty *vty, struct zebra_vrf *zvrf,
zns = zebra_ns_lookup(NS_DEFAULT);
ifp = if_lookup_by_index_per_ns(
- zns,
- nexthop->ifindex);
- if (ifp)
- vty_out(vty, "%15s", ifp->name);
- else
- vty_out(vty, "%15s", "Null");
-
+ zns, nexthop->ifindex);
+ snprintf(nh_buf, sizeof(nh_buf), "%s",
+ ifp ? ifp->name : "Null");
break;
}
case NEXTHOP_TYPE_IPV4:
case NEXTHOP_TYPE_IPV4_IFINDEX:
- vty_out(vty, "%15s",
- inet_ntoa(nexthop->gate.ipv4));
+ inet_ntop(AF_INET, &nexthop->gate.ipv4,
+ nh_buf, sizeof(nh_buf));
break;
case NEXTHOP_TYPE_IPV6:
case NEXTHOP_TYPE_IPV6_IFINDEX:
- vty_out(vty, "%15s",
- inet_ntop(AF_INET6,
- &nexthop->gate.ipv6,
- buf, BUFSIZ));
+ inet_ntop(AF_INET6, &nexthop->gate.ipv6,
+ nh_buf, sizeof(nh_buf));
break;
default:
break;
}
if (nexthop->type != NEXTHOP_TYPE_IFINDEX)
- vty_out(vty, " %8s\n",
- mpls_label2str(
- nexthop->nh_label
- ->num_labels,
- &nexthop->nh_label
- ->label[0],
- buf, BUFSIZ, 1));
+ out_label_str = mpls_label2str(
+ nexthop->nh_label->num_labels,
+ &nexthop->nh_label->label[0],
+ buf, BUFSIZ, 1);
else
- vty_out(vty, "\n");
+ out_label_str = "-";
+
+ ttable_add_row(tt, "%u|%s|%s|%s",
+ lsp->ile.in_label,
+ nhlfe_type2str(nhlfe->type),
+ nh_buf, out_label_str);
}
}
- vty_out(vty, "\n");
+ /* Dump the generated table. */
+ if (tt->nrows > 1) {
+ char *table = ttable_dump(tt, "\n");
+ vty_out(vty, "%s\n", table);
+ XFREE(MTYPE_TMP, table);
+ }
+ ttable_del(tt);
}
list_delete(&lsp_list);
@@ -3289,4 +3397,5 @@ void zebra_mpls_init(void)
mpls_enabled = 1;
hook_register(zserv_client_close, zebra_mpls_cleanup_fecs_for_client);
+ hook_register(zserv_client_close, zebra_mpls_cleanup_zclient_labels);
}
diff --git a/zebra/zebra_mpls.h b/zebra/zebra_mpls.h
index d983221cb5..157f43ca98 100644
--- a/zebra/zebra_mpls.h
+++ b/zebra/zebra_mpls.h
@@ -269,8 +269,15 @@ void zebra_mpls_print_fec(struct vty *vty, struct zebra_vrf *zvrf,
*/
int mpls_ftn_update(int add, struct zebra_vrf *zvrf, enum lsp_types_t type,
struct prefix *prefix, enum nexthop_types_t gtype,
- union g_addr *gate, ifindex_t ifindex, uint8_t distance,
- mpls_label_t out_label);
+ union g_addr *gate, ifindex_t ifindex, uint8_t route_type,
+ unsigned short route_instance, mpls_label_t out_label);
+
+/*
+ * Uninstall all NHLFEs bound to a single FEC.
+ */
+int mpls_ftn_uninstall(struct zebra_vrf *zvrf, enum lsp_types_t type,
+ struct prefix *prefix, uint8_t route_type,
+ unsigned short route_instance);
/*
* Install/update a NHLFE for an LSP in the forwarding table. This may be
@@ -291,15 +298,10 @@ int mpls_lsp_uninstall(struct zebra_vrf *zvrf, enum lsp_types_t type,
union g_addr *gate, ifindex_t ifindex);
/*
- * Uninstall all LDP NHLFEs for a particular LSP forwarding entry.
- * If no other NHLFEs exist, the entry would be deleted.
- */
-void mpls_ldp_lsp_uninstall_all(struct hash_bucket *bucket, void *ctxt);
-
-/*
- * Uninstall all LDP FEC-To-NHLFE (FTN) bindings of the given address-family.
+ * Uninstall all NHLFEs for a particular LSP forwarding entry.
*/
-void mpls_ldp_ftn_uninstall_all(struct zebra_vrf *zvrf, int afi);
+int mpls_lsp_uninstall_all_vrf(struct zebra_vrf *zvrf, enum lsp_types_t type,
+ mpls_label_t in_label);
/*
* Uninstall all Segment Routing NHLFEs for a particular LSP forwarding entry.
@@ -426,7 +428,7 @@ static inline uint8_t lsp_distance(enum lsp_types_t type)
return (route_distance(ZEBRA_ROUTE_BGP));
case ZEBRA_LSP_NONE:
case ZEBRA_LSP_SHARP:
- case ZEBRA_LSP_SR:
+ case ZEBRA_LSP_OSPF_SR:
return 150;
}
@@ -448,8 +450,12 @@ static inline enum lsp_types_t lsp_type_from_re_type(int re_type)
switch (re_type) {
case ZEBRA_ROUTE_STATIC:
return ZEBRA_LSP_STATIC;
+ case ZEBRA_ROUTE_LDP:
+ return ZEBRA_LSP_LDP;
case ZEBRA_ROUTE_BGP:
return ZEBRA_LSP_BGP;
+ case ZEBRA_ROUTE_OSPF:
+ return ZEBRA_LSP_OSPF_SR;
case ZEBRA_ROUTE_SHARP:
return ZEBRA_LSP_SHARP;
default:
@@ -469,7 +475,7 @@ static inline int re_type_from_lsp_type(enum lsp_types_t lsp_type)
return ZEBRA_ROUTE_LDP;
case ZEBRA_LSP_BGP:
return ZEBRA_ROUTE_BGP;
- case ZEBRA_LSP_SR:
+ case ZEBRA_LSP_OSPF_SR:
return ZEBRA_ROUTE_OSPF;
case ZEBRA_LSP_NONE:
return ZEBRA_ROUTE_KERNEL;
@@ -496,8 +502,8 @@ static inline const char *nhlfe_type2str(enum lsp_types_t lsp_type)
return "LDP";
case ZEBRA_LSP_BGP:
return "BGP";
- case ZEBRA_LSP_SR:
- return "SR";
+ case ZEBRA_LSP_OSPF_SR:
+ return "SR (OSPF)";
case ZEBRA_LSP_SHARP:
return "SHARP";
case ZEBRA_LSP_NONE:
diff --git a/zebra/zebra_mpls_openbsd.c b/zebra/zebra_mpls_openbsd.c
index 9f3ea70c77..fcd476dc2c 100644
--- a/zebra/zebra_mpls_openbsd.c
+++ b/zebra/zebra_mpls_openbsd.c
@@ -119,7 +119,7 @@ static int kernel_send_rtmsg_v4(int action, mpls_label_t in_label,
hdr.rtm_mpls = MPLS_OP_SWAP;
}
- frr_elevate_privs(&zserv_privs) {
+ frr_with_privs(&zserv_privs) {
ret = writev(kr_state.fd, iov, iovcnt);
}
@@ -226,7 +226,7 @@ static int kernel_send_rtmsg_v6(int action, mpls_label_t in_label,
hdr.rtm_mpls = MPLS_OP_SWAP;
}
- frr_elevate_privs(&zserv_privs) {
+ frr_with_privs(&zserv_privs) {
ret = writev(kr_state.fd, iov, iovcnt);
}
diff --git a/zebra/zebra_netns_notify.c b/zebra/zebra_netns_notify.c
index 476638591b..d42cf3d60a 100644
--- a/zebra/zebra_netns_notify.c
+++ b/zebra/zebra_netns_notify.c
@@ -77,7 +77,7 @@ static void zebra_ns_notify_create_context_from_entry_name(const char *name)
if (netnspath == NULL)
return;
- frr_elevate_privs(&zserv_privs) {
+ frr_with_privs(&zserv_privs) {
ns_id = zebra_ns_id_get(netnspath);
}
if (ns_id == NS_UNKNOWN)
@@ -97,7 +97,7 @@ static void zebra_ns_notify_create_context_from_entry_name(const char *name)
ns_map_nsid_with_external(ns_id, false);
return;
}
- frr_elevate_privs(&zserv_privs) {
+ frr_with_privs(&zserv_privs) {
ret = vrf_netns_handler_create(NULL, vrf, netnspath,
ns_id_external, ns_id);
}
@@ -202,14 +202,14 @@ static int zebra_ns_ready_read(struct thread *t)
netnspath = zns_info->netnspath;
if (--zns_info->retries == 0)
stop_retry = 1;
- frr_elevate_privs(&zserv_privs) {
+ frr_with_privs(&zserv_privs) {
err = ns_switch_to_netns(netnspath);
}
if (err < 0)
return zebra_ns_continue_read(zns_info, stop_retry);
/* go back to default ns */
- frr_elevate_privs(&zserv_privs) {
+ frr_with_privs(&zserv_privs) {
err = ns_switchback_to_initial();
}
if (err < 0)
diff --git a/zebra/zebra_nhg.c b/zebra/zebra_nhg.c
index f4b86f3cfe..35df02a19a 100644
--- a/zebra/zebra_nhg.c
+++ b/zebra/zebra_nhg.c
@@ -25,6 +25,7 @@
#include "lib/nexthop.h"
#include "lib/nexthop_group_private.h"
#include "lib/routemap.h"
+#include "lib/mpls.h"
#include "zebra/connected.h"
#include "zebra/debug.h"
@@ -38,6 +39,10 @@ static void nexthop_set_resolved(afi_t afi, const struct nexthop *newhop,
struct nexthop *nexthop)
{
struct nexthop *resolved_hop;
+ uint8_t num_labels = 0;
+ mpls_label_t labels[MPLS_MAX_LABELS];
+ enum lsp_types_t label_type = ZEBRA_LSP_NONE;
+ int i = 0;
resolved_hop = nexthop_new();
SET_FLAG(resolved_hop->flags, NEXTHOP_FLAG_ACTIVE);
@@ -94,16 +99,56 @@ static void nexthop_set_resolved(afi_t afi, const struct nexthop *newhop,
if (newhop->flags & NEXTHOP_FLAG_ONLINK)
resolved_hop->flags |= NEXTHOP_FLAG_ONLINK;
- /* Copy labels of the resolved route */
- if (newhop->nh_label)
- nexthop_add_labels(resolved_hop, newhop->nh_label_type,
- newhop->nh_label->num_labels,
- &newhop->nh_label->label[0]);
+ /* Copy labels of the resolved route and the parent resolving to it */
+ if (newhop->nh_label) {
+ for (i = 0; i < newhop->nh_label->num_labels; i++)
+ labels[num_labels++] = newhop->nh_label->label[i];
+ label_type = newhop->nh_label_type;
+ }
+
+ if (nexthop->nh_label) {
+ for (i = 0; i < nexthop->nh_label->num_labels; i++)
+ labels[num_labels++] = nexthop->nh_label->label[i];
+
+ /* If the parent has labels, use its type */
+ label_type = nexthop->nh_label_type;
+ }
+
+ if (num_labels)
+ nexthop_add_labels(resolved_hop, label_type, num_labels,
+ labels);
resolved_hop->rparent = nexthop;
_nexthop_add(&nexthop->resolved, resolved_hop);
}
+/* Checks if nexthop we are trying to resolve to is valid */
+static bool nexthop_valid_resolve(const struct nexthop *nexthop,
+ const struct nexthop *resolved)
+{
+ /* Can't resolve to a recursive nexthop */
+ if (CHECK_FLAG(resolved->flags, NEXTHOP_FLAG_RECURSIVE))
+ return false;
+
+ switch (nexthop->type) {
+ case NEXTHOP_TYPE_IPV4_IFINDEX:
+ case NEXTHOP_TYPE_IPV6_IFINDEX:
+ /* If the nexthop we are resolving to does not match the
+ * ifindex for the nexthop the route wanted, its not valid.
+ */
+ if (nexthop->ifindex != resolved->ifindex)
+ return false;
+ break;
+ case NEXTHOP_TYPE_IPV4:
+ case NEXTHOP_TYPE_IPV6:
+ case NEXTHOP_TYPE_IFINDEX:
+ case NEXTHOP_TYPE_BLACKHOLE:
+ break;
+ }
+
+ return true;
+}
+
/*
* Given a nexthop we need to properly recursively resolve
* the route. As such, do a table lookup to find and match
@@ -120,6 +165,7 @@ static int nexthop_active(afi_t afi, struct route_entry *re,
struct nexthop *newhop;
struct interface *ifp;
rib_dest_t *dest;
+ struct zebra_vrf *zvrf;
if ((nexthop->type == NEXTHOP_TYPE_IPV4)
|| nexthop->type == NEXTHOP_TYPE_IPV6)
@@ -194,7 +240,9 @@ static int nexthop_active(afi_t afi, struct route_entry *re,
}
/* Lookup table. */
table = zebra_vrf_table(afi, SAFI_UNICAST, nexthop->vrf_id);
- if (!table) {
+ /* get zvrf */
+ zvrf = zebra_vrf_lookup_by_id(nexthop->vrf_id);
+ if (!table || !zvrf) {
if (IS_ZEBRA_DEBUG_RIB_DETAILED)
zlog_debug("\t%s: Table not found",
__PRETTY_FUNCTION__);
@@ -224,7 +272,7 @@ static int nexthop_active(afi_t afi, struct route_entry *re,
/* However, do not resolve over default route unless explicitly
* allowed. */
if (is_default_prefix(&rn->p)
- && !rnh_resolve_via_default(p.family)) {
+ && !rnh_resolve_via_default(zvrf, p.family)) {
if (IS_ZEBRA_DEBUG_RIB_DETAILED)
zlog_debug(
"\t:%s: Resolved against default route",
@@ -266,8 +314,7 @@ static int nexthop_active(afi_t afi, struct route_entry *re,
if (!CHECK_FLAG(match->status,
ROUTE_ENTRY_INSTALLED))
continue;
- if (CHECK_FLAG(newhop->flags,
- NEXTHOP_FLAG_RECURSIVE))
+ if (!nexthop_valid_resolve(nexthop, newhop))
continue;
SET_FLAG(nexthop->flags,
@@ -287,8 +334,7 @@ static int nexthop_active(afi_t afi, struct route_entry *re,
if (!CHECK_FLAG(match->status,
ROUTE_ENTRY_INSTALLED))
continue;
- if (CHECK_FLAG(newhop->flags,
- NEXTHOP_FLAG_RECURSIVE))
+ if (!nexthop_valid_resolve(nexthop, newhop))
continue;
SET_FLAG(nexthop->flags,
diff --git a/zebra/zebra_ns.c b/zebra/zebra_ns.c
index 94918365a3..37f53bf911 100644
--- a/zebra/zebra_ns.c
+++ b/zebra/zebra_ns.c
@@ -180,7 +180,7 @@ int zebra_ns_init(const char *optional_default_name)
dzns = zebra_ns_alloc();
- frr_elevate_privs(&zserv_privs) {
+ frr_with_privs(&zserv_privs) {
ns_id = zebra_ns_id_get_default();
}
ns_id_external = ns_map_nsid_with_external(ns_id, true);
diff --git a/zebra/zebra_rib.c b/zebra/zebra_rib.c
index 335cc8294c..d8fb9ae3cf 100644
--- a/zebra/zebra_rib.c
+++ b/zebra/zebra_rib.c
@@ -37,6 +37,7 @@
#include "vrf.h"
#include "workqueue.h"
#include "nexthop_group_private.h"
+#include "frr_pthread.h"
#include "zebra/zebra_router.h"
#include "zebra/connected.h"
@@ -2087,14 +2088,6 @@ static unsigned int process_subq(struct list *subq, uint8_t qindex)
return 1;
}
-
-/*
- * Perform next-hop tracking processing after RIB updates.
- */
-static void do_nht_processing(void)
-{
-}
-
/* Dispatch the meta queue by picking, processing and unlocking the next RN from
* a non-empty sub-queue with lowest priority. wq is equal to zebra->ribq and
* data
@@ -3204,12 +3197,10 @@ static int rib_process_dplane_results(struct thread *thread)
TAILQ_INIT(&ctxlist);
/* Take lock controlling queue of results */
- pthread_mutex_lock(&dplane_mutex);
- {
+ frr_with_mutex(&dplane_mutex) {
/* Dequeue list of context structs */
dplane_ctx_list_append(&ctxlist, &rib_dplane_q);
}
- pthread_mutex_unlock(&dplane_mutex);
/* Dequeue context block */
ctx = dplane_ctx_dequeue(&ctxlist);
@@ -3275,10 +3266,19 @@ static int rib_process_dplane_results(struct thread *thread)
zebra_vxlan_handle_result(ctx);
break;
- default:
+ /* Some op codes not handled here */
+ case DPLANE_OP_ADDR_INSTALL:
+ case DPLANE_OP_ADDR_UNINSTALL:
+ case DPLANE_OP_NEIGH_INSTALL:
+ case DPLANE_OP_NEIGH_UPDATE:
+ case DPLANE_OP_NEIGH_DELETE:
+ case DPLANE_OP_VTEP_ADD:
+ case DPLANE_OP_VTEP_DELETE:
+ case DPLANE_OP_NONE:
/* Don't expect this: just return the struct? */
dplane_ctx_fini(&ctx);
break;
+
} /* Dispatch by op code */
ctx = dplane_ctx_dequeue(&ctxlist);
@@ -3286,9 +3286,6 @@ static int rib_process_dplane_results(struct thread *thread)
} while (1);
- /* Check for nexthop tracking processing after finishing with results */
- do_nht_processing();
-
return 0;
}
@@ -3300,12 +3297,10 @@ static int rib_process_dplane_results(struct thread *thread)
static int rib_dplane_results(struct dplane_ctx_q *ctxlist)
{
/* Take lock controlling queue of results */
- pthread_mutex_lock(&dplane_mutex);
- {
+ frr_with_mutex(&dplane_mutex) {
/* Enqueue context blocks */
dplane_ctx_list_append(&rib_dplane_q, ctxlist);
}
- pthread_mutex_unlock(&dplane_mutex);
/* Ensure event is signalled to zebra main pthread */
thread_add_event(zrouter.master, rib_process_dplane_results, NULL, 0,
diff --git a/zebra/zebra_rnh.c b/zebra/zebra_rnh.c
index da2fe4a30c..bcaf1b5204 100644
--- a/zebra/zebra_rnh.c
+++ b/zebra/zebra_rnh.c
@@ -62,9 +62,6 @@ static int send_client(struct rnh *rnh, struct zserv *client, rnh_type_t type,
static void print_rnh(struct route_node *rn, struct vty *vty);
static int zebra_client_cleanup_rnh(struct zserv *client);
-int zebra_rnh_ip_default_route = 0;
-int zebra_rnh_ipv6_default_route = 0;
-
void zebra_rnh_init(void)
{
hook_register(zserv_client_close, zebra_client_cleanup_rnh);
@@ -656,7 +653,7 @@ zebra_rnh_resolve_nexthop_entry(struct zebra_vrf *zvrf, afi_t afi,
* match route to be exact if so specified
*/
if (is_default_prefix(&rn->p)
- && !rnh_resolve_via_default(rn->p.family)) {
+ && !rnh_resolve_via_default(zvrf, rn->p.family)) {
if (IS_ZEBRA_DEBUG_NHT_DETAILED)
zlog_debug(
"\tNot allowed to resolve through default prefix");
@@ -1201,15 +1198,17 @@ static int zebra_client_cleanup_rnh(struct zserv *client)
RNH_IMPORT_CHECK_TYPE);
zebra_cleanup_rnh_client(zvrf_id(zvrf), AFI_IP6, client,
RNH_IMPORT_CHECK_TYPE);
- if (client->proto == ZEBRA_ROUTE_LDP) {
- hash_iterate(zvrf->lsp_table,
- mpls_ldp_lsp_uninstall_all,
- zvrf->lsp_table);
- mpls_ldp_ftn_uninstall_all(zvrf, AFI_IP);
- mpls_ldp_ftn_uninstall_all(zvrf, AFI_IP6);
- }
}
}
return 0;
}
+
+int rnh_resolve_via_default(struct zebra_vrf *zvrf, int family)
+{
+ if (((family == AF_INET) && zvrf->zebra_rnh_ip_default_route)
+ || ((family == AF_INET6) && zvrf->zebra_rnh_ipv6_default_route))
+ return 1;
+ else
+ return 0;
+}
diff --git a/zebra/zebra_rnh.h b/zebra/zebra_rnh.h
index c7d2c0d298..6e2dab8d9f 100644
--- a/zebra/zebra_rnh.h
+++ b/zebra/zebra_rnh.h
@@ -29,20 +29,8 @@
extern "C" {
#endif
-extern int zebra_rnh_ip_default_route;
-extern int zebra_rnh_ipv6_default_route;
-
extern void zebra_rnh_init(void);
-static inline int rnh_resolve_via_default(int family)
-{
- if (((family == AF_INET) && zebra_rnh_ip_default_route)
- || ((family == AF_INET6) && zebra_rnh_ipv6_default_route))
- return 1;
- else
- return 0;
-}
-
static inline const char *rnh_type2str(rnh_type_t type)
{
switch (type) {
@@ -72,6 +60,8 @@ extern void zebra_print_rnh_table(vrf_id_t vrfid, afi_t afi, struct vty *vty,
rnh_type_t type, struct prefix *p);
extern char *rnh_str(struct rnh *rnh, char *buf, int size);
+extern int rnh_resolve_via_default(struct zebra_vrf *zvrf, int family);
+
#ifdef __cplusplus
}
#endif
diff --git a/zebra/zebra_routemap.c b/zebra/zebra_routemap.c
index cee2c8980f..88d2091394 100644
--- a/zebra/zebra_routemap.c
+++ b/zebra/zebra_routemap.c
@@ -64,7 +64,7 @@ static int zebra_route_match_add(struct vty *vty, const char *command,
const char *arg, route_map_event_t type)
{
VTY_DECLVAR_CONTEXT(route_map_index, index);
- int ret;
+ enum rmap_compile_rets ret;
int retval = CMD_SUCCESS;
ret = route_map_add_match(index, command, arg, type);
@@ -82,6 +82,11 @@ static int zebra_route_match_add(struct vty *vty, const char *command,
route_map_upd8_dependency(type, arg, index->map->name);
}
break;
+ case RMAP_DUPLICATE_RULE:
+ /*
+ * Nothing to do here
+ */
+ break;
}
return retval;
@@ -92,7 +97,7 @@ static int zebra_route_match_delete(struct vty *vty, const char *command,
const char *arg, route_map_event_t type)
{
VTY_DECLVAR_CONTEXT(route_map_index, index);
- int ret;
+ enum rmap_compile_rets ret;
int retval = CMD_SUCCESS;
char *dep_name = NULL;
const char *tmpstr;
@@ -125,6 +130,11 @@ static int zebra_route_match_delete(struct vty *vty, const char *command,
if (type != RMAP_EVENT_MATCH_DELETED && dep_name)
route_map_upd8_dependency(type, dep_name, rmap_name);
break;
+ case RMAP_DUPLICATE_RULE:
+ /*
+ * Nothing to do here
+ */
+ break;
}
XFREE(MTYPE_ROUTE_MAP_RULE, dep_name);
diff --git a/zebra/zebra_vrf.c b/zebra/zebra_vrf.c
index fcc94a7be9..72d0b6866d 100644
--- a/zebra/zebra_vrf.c
+++ b/zebra/zebra_vrf.c
@@ -488,6 +488,11 @@ static int vrf_config_write(struct vty *vty)
if (zvrf_id(zvrf) == VRF_DEFAULT) {
if (zvrf->l3vni)
vty_out(vty, "vni %u\n", zvrf->l3vni);
+ if (zvrf->zebra_rnh_ip_default_route)
+ vty_out(vty, "ip nht resolve-via-default\n");
+
+ if (zvrf->zebra_rnh_ipv6_default_route)
+ vty_out(vty, "ipv6 nht resolve-via-default\n");
} else {
vty_frame(vty, "vrf %s\n", zvrf_name(zvrf));
if (zvrf->l3vni)
@@ -497,8 +502,14 @@ static int vrf_config_write(struct vty *vty)
? " prefix-routes-only"
: "");
zebra_ns_config_write(vty, (struct ns *)vrf->ns_ctxt);
+ if (zvrf->zebra_rnh_ip_default_route)
+ vty_out(vty, " ip nht resolve-via-default\n");
+
+ if (zvrf->zebra_rnh_ipv6_default_route)
+ vty_out(vty, " ipv6 nht resolve-via-default\n");
}
+
zebra_routemap_config_write_protocol(vty, zvrf);
if (zvrf_id(zvrf) != VRF_DEFAULT)
diff --git a/zebra/zebra_vrf.h b/zebra/zebra_vrf.h
index f92e1a010b..6c80f9bcb4 100644
--- a/zebra/zebra_vrf.h
+++ b/zebra/zebra_vrf.h
@@ -174,6 +174,9 @@ struct zebra_vrf {
#if defined(HAVE_RTADV)
struct rtadv rtadv;
#endif /* HAVE_RTADV */
+
+ int zebra_rnh_ip_default_route;
+ int zebra_rnh_ipv6_default_route;
};
#define PROTO_RM_NAME(zvrf, afi, rtype) zvrf->proto_rm[afi][rtype].name
#define NHT_RM_NAME(zvrf, afi, rtype) zvrf->nht_rm[afi][rtype].name
diff --git a/zebra/zebra_vty.c b/zebra/zebra_vty.c
index 5c0dc27380..7984481093 100644
--- a/zebra/zebra_vty.c
+++ b/zebra/zebra_vty.c
@@ -455,6 +455,10 @@ static void vty_show_ip_route(struct vty *vty, struct route_node *rn,
re->status);
json_object_int_add(json_route, "internalFlags",
re->flags);
+ json_object_int_add(json_route, "internalNextHopNum",
+ re->nexthop_num);
+ json_object_int_add(json_route, "internalNextHopActiveNum",
+ re->nexthop_active_num);
if (uptime < ONE_DAY_SECOND)
sprintf(buf, "%02d:%02d:%02d", tm->tm_hour, tm->tm_min,
tm->tm_sec);
@@ -1086,10 +1090,10 @@ DEFUN (ip_nht_default_route,
if (!zvrf)
return CMD_WARNING;
- if (zebra_rnh_ip_default_route)
+ if (zvrf->zebra_rnh_ip_default_route)
return CMD_SUCCESS;
- zebra_rnh_ip_default_route = 1;
+ zvrf->zebra_rnh_ip_default_route = 1;
zebra_evaluate_rnh(zvrf, AFI_IP, 1, RNH_NEXTHOP_TYPE, NULL);
return CMD_SUCCESS;
@@ -1108,10 +1112,10 @@ DEFUN (no_ip_nht_default_route,
if (!zvrf)
return CMD_WARNING;
- if (!zebra_rnh_ip_default_route)
+ if (!zvrf->zebra_rnh_ip_default_route)
return CMD_SUCCESS;
- zebra_rnh_ip_default_route = 0;
+ zvrf->zebra_rnh_ip_default_route = 0;
zebra_evaluate_rnh(zvrf, AFI_IP, 1, RNH_NEXTHOP_TYPE, NULL);
return CMD_SUCCESS;
}
@@ -1128,10 +1132,10 @@ DEFUN (ipv6_nht_default_route,
if (!zvrf)
return CMD_WARNING;
- if (zebra_rnh_ipv6_default_route)
+ if (zvrf->zebra_rnh_ipv6_default_route)
return CMD_SUCCESS;
- zebra_rnh_ipv6_default_route = 1;
+ zvrf->zebra_rnh_ipv6_default_route = 1;
zebra_evaluate_rnh(zvrf, AFI_IP6, 1, RNH_NEXTHOP_TYPE, NULL);
return CMD_SUCCESS;
}
@@ -1150,10 +1154,10 @@ DEFUN (no_ipv6_nht_default_route,
if (!zvrf)
return CMD_WARNING;
- if (!zebra_rnh_ipv6_default_route)
+ if (!zvrf->zebra_rnh_ipv6_default_route)
return CMD_SUCCESS;
- zebra_rnh_ipv6_default_route = 0;
+ zvrf->zebra_rnh_ipv6_default_route = 0;
zebra_evaluate_rnh(zvrf, AFI_IP6, 1, RNH_NEXTHOP_TYPE, NULL);
return CMD_SUCCESS;
}
@@ -1380,35 +1384,37 @@ DEFPY (show_route_detail,
DEFPY (show_route_summary,
show_route_summary_cmd,
- "show\
- <\
- ip$ipv4 route [vrf <NAME$vrf_name|all$vrf_all>]\
- summary [prefix$prefix]\
- |ipv6$ipv6 route [vrf <NAME$vrf_name|all$vrf_all>]\
- summary [prefix$prefix]\
- >",
+ "show <ip$ipv4|ipv6$ipv6> route [vrf <NAME$vrf_name|all$vrf_all>] \
+ summary [table (1-4294967295)$table_id] [prefix$prefix]",
SHOW_STR
IP_STR
- "IP routing table\n"
- VRF_FULL_CMD_HELP_STR
- "Summary of all routes\n"
- "Prefix routes\n"
IP6_STR
"IP routing table\n"
VRF_FULL_CMD_HELP_STR
"Summary of all routes\n"
+ "Table to display summary for\n"
+ "The table number\n"
"Prefix routes\n")
{
afi_t afi = ipv4 ? AFI_IP : AFI_IP6;
struct route_table *table;
+ if (table_id == 0)
+ table_id = RT_TABLE_MAIN;
+
if (vrf_all) {
struct vrf *vrf;
struct zebra_vrf *zvrf;
RB_FOREACH (vrf, vrf_name_head, &vrfs_by_name) {
- if ((zvrf = vrf->info) == NULL
- || (table = zvrf->table[afi][SAFI_UNICAST]) == NULL)
+ if ((zvrf = vrf->info) == NULL)
+ continue;
+
+ table = zebra_vrf_table_with_table_id(afi,
+ SAFI_UNICAST,
+ zvrf->vrf->vrf_id,
+ table_id);
+ if (!table)
continue;
if (prefix)
@@ -1422,7 +1428,9 @@ DEFPY (show_route_summary,
if (vrf_name)
VRF_GET_ID(vrf_id, vrf_name, false);
- table = zebra_vrf_table(afi, SAFI_UNICAST, vrf_id);
+ table = zebra_vrf_table_with_table_id(afi,
+ SAFI_UNICAST,
+ vrf_id, table_id);
if (!table)
return CMD_SUCCESS;
@@ -2625,12 +2633,6 @@ static int config_write_protocol(struct vty *vty)
if (allow_delete)
vty_out(vty, "allow-external-route-update\n");
- if (zebra_rnh_ip_default_route)
- vty_out(vty, "ip nht resolve-via-default\n");
-
- if (zebra_rnh_ipv6_default_route)
- vty_out(vty, "ipv6 nht resolve-via-default\n");
-
if (zrouter.ribq->spec.hold != ZEBRA_RIB_PROCESS_HOLD_TIME)
vty_out(vty, "zebra work-queue %u\n", zrouter.ribq->spec.hold);
diff --git a/zebra/zebra_vxlan.c b/zebra/zebra_vxlan.c
index bb31247b6a..c7f2976ace 100644
--- a/zebra/zebra_vxlan.c
+++ b/zebra/zebra_vxlan.c
@@ -2515,9 +2515,7 @@ static int zvni_neigh_install(zebra_vni_t *zvni, zebra_neigh_t *n)
struct zebra_if *zif;
struct zebra_l2info_vxlan *vxl;
struct interface *vlan_if;
-#ifdef GNU_LINUX
- uint8_t flags;
-#endif
+ int flags;
int ret = 0;
if (!(n->flags & ZEBRA_NEIGH_REMOTE))
@@ -2531,13 +2529,14 @@ static int zvni_neigh_install(zebra_vni_t *zvni, zebra_neigh_t *n)
vlan_if = zvni_map_to_svi(vxl->access_vlan, zif->brslave_info.br_if);
if (!vlan_if)
return -1;
-#ifdef GNU_LINUX
- flags = NTF_EXT_LEARNED;
+
+ flags = DPLANE_NTF_EXT_LEARNED;
if (n->flags & ZEBRA_NEIGH_ROUTER_FLAG)
- flags |= NTF_ROUTER;
+ flags |= DPLANE_NTF_ROUTER;
ZEBRA_NEIGH_SET_ACTIVE(n);
- ret = kernel_add_neigh(vlan_if, &n->ip, &n->emac, flags);
-#endif
+
+ dplane_neigh_add(vlan_if, &n->ip, &n->emac, flags);
+
return ret;
}
@@ -2554,8 +2553,9 @@ static int zvni_neigh_uninstall(zebra_vni_t *zvni, zebra_neigh_t *n)
return 0;
if (!zvni->vxlan_if) {
- zlog_debug("VNI %u hash %p couldn't be uninstalled - no intf",
- zvni->vni, zvni);
+ if (IS_ZEBRA_DEBUG_VXLAN)
+ zlog_debug("VNI %u hash %p couldn't be uninstalled - no intf",
+ zvni->vni, zvni);
return -1;
}
@@ -2569,7 +2569,10 @@ static int zvni_neigh_uninstall(zebra_vni_t *zvni, zebra_neigh_t *n)
ZEBRA_NEIGH_SET_INACTIVE(n);
n->loc_seq = 0;
- return kernel_del_neigh(vlan_if, &n->ip);
+
+ dplane_neigh_delete(vlan_if, &n->ip);
+
+ return 0;
}
/*
@@ -2590,12 +2593,9 @@ static int zvni_neigh_probe(zebra_vni_t *zvni, zebra_neigh_t *n)
if (!vlan_if)
return -1;
-#ifdef GNU_LINUX
- return kernel_upd_neigh(vlan_if, &n->ip, &n->emac,
- 0, NUD_PROBE);
-#else
+ dplane_neigh_update(vlan_if, &n->ip, &n->emac);
+
return 0;
-#endif
}
/*
@@ -2844,9 +2844,12 @@ static int zvni_gw_macip_del(struct interface *ifp, zebra_vni_t *zvni,
/* mac entry should be present */
mac = zvni_mac_lookup(zvni, &n->emac);
if (!mac) {
- zlog_debug("MAC %s doesn't exist for neigh %s on VNI %u",
- prefix_mac2str(&n->emac, buf1, sizeof(buf1)),
- ipaddr2str(ip, buf2, sizeof(buf2)), zvni->vni);
+ if (IS_ZEBRA_DEBUG_VXLAN)
+ zlog_debug("MAC %s doesn't exist for neigh %s on VNI %u",
+ prefix_mac2str(&n->emac,
+ buf1, sizeof(buf1)),
+ ipaddr2str(ip, buf2, sizeof(buf2)),
+ zvni->vni);
return -1;
}
@@ -3737,10 +3740,12 @@ static struct interface *zvni_map_to_svi(vlanid_t vid, struct interface *br_if)
*/
static int zvni_mac_install(zebra_vni_t *zvni, zebra_mac_t *mac)
{
- struct zebra_if *zif;
- struct zebra_l2info_vxlan *vxl;
+ const struct zebra_if *zif, *br_zif;
+ const struct zebra_l2info_vxlan *vxl;
bool sticky;
enum zebra_dplane_result res;
+ const struct interface *br_ifp;
+ vlanid_t vid;
if (!(mac->flags & ZEBRA_MAC_REMOTE))
return 0;
@@ -3748,13 +3753,25 @@ static int zvni_mac_install(zebra_vni_t *zvni, zebra_mac_t *mac)
zif = zvni->vxlan_if->info;
if (!zif)
return -1;
+
+ br_ifp = zif->brslave_info.br_if;
+ if (br_ifp == NULL)
+ return -1;
+
vxl = &zif->l2info.vxl;
sticky = !!CHECK_FLAG(mac->flags,
(ZEBRA_MAC_STICKY | ZEBRA_MAC_REMOTE_DEF_GW));
- res = dplane_mac_add(zvni->vxlan_if, vxl->access_vlan, &mac->macaddr,
- mac->fwd_info.r_vtep_ip, sticky);
+ br_zif = (const struct zebra_if *)(br_ifp->info);
+
+ if (IS_ZEBRA_IF_BRIDGE_VLAN_AWARE(br_zif))
+ vid = vxl->access_vlan;
+ else
+ vid = 0;
+
+ res = dplane_mac_add(zvni->vxlan_if, br_ifp, vid,
+ &mac->macaddr, mac->fwd_info.r_vtep_ip, sticky);
if (res != ZEBRA_DPLANE_REQUEST_FAILURE)
return 0;
else
@@ -3766,30 +3783,44 @@ static int zvni_mac_install(zebra_vni_t *zvni, zebra_mac_t *mac)
*/
static int zvni_mac_uninstall(zebra_vni_t *zvni, zebra_mac_t *mac)
{
- struct zebra_if *zif;
- struct zebra_l2info_vxlan *vxl;
+ const struct zebra_if *zif, *br_zif;
+ const struct zebra_l2info_vxlan *vxl;
struct in_addr vtep_ip;
- struct interface *ifp;
+ const struct interface *ifp, *br_ifp;
+ vlanid_t vid;
enum zebra_dplane_result res;
if (!(mac->flags & ZEBRA_MAC_REMOTE))
return 0;
if (!zvni->vxlan_if) {
- zlog_debug("VNI %u hash %p couldn't be uninstalled - no intf",
- zvni->vni, zvni);
+ if (IS_ZEBRA_DEBUG_VXLAN)
+ zlog_debug("VNI %u hash %p couldn't be uninstalled - no intf",
+ zvni->vni, zvni);
return -1;
}
zif = zvni->vxlan_if->info;
if (!zif)
return -1;
+
+ br_ifp = zif->brslave_info.br_if;
+ if (br_ifp == NULL)
+ return -1;
+
vxl = &zif->l2info.vxl;
+ br_zif = (const struct zebra_if *)br_ifp->info;
+
+ if (IS_ZEBRA_IF_BRIDGE_VLAN_AWARE(br_zif))
+ vid = vxl->access_vlan;
+ else
+ vid = 0;
+
ifp = zvni->vxlan_if;
vtep_ip = mac->fwd_info.r_vtep_ip;
- res = dplane_mac_del(ifp, vxl->access_vlan, &mac->macaddr, vtep_ip);
+ res = dplane_mac_del(ifp, br_ifp, vid, &mac->macaddr, vtep_ip);
if (res != ZEBRA_DPLANE_REQUEST_FAILURE)
return 0;
else
@@ -4291,9 +4322,13 @@ static int zvni_vtep_del_all(zebra_vni_t *zvni, int uninstall)
static int zvni_vtep_install(zebra_vni_t *zvni, zebra_vtep_t *zvtep)
{
if (is_vxlan_flooding_head_end() &&
- (zvtep->flood_control == VXLAN_FLOOD_HEAD_END_REPL))
- return kernel_add_vtep(zvni->vni, zvni->vxlan_if,
- &zvtep->vtep_ip);
+ (zvtep->flood_control == VXLAN_FLOOD_HEAD_END_REPL)) {
+ if (ZEBRA_DPLANE_REQUEST_FAILURE ==
+ dplane_vtep_add(zvni->vxlan_if,
+ &zvtep->vtep_ip, zvni->vni))
+ return -1;
+ }
+
return 0;
}
@@ -4308,7 +4343,11 @@ static int zvni_vtep_uninstall(zebra_vni_t *zvni, struct in_addr *vtep_ip)
return -1;
}
- return kernel_del_vtep(zvni->vni, zvni->vxlan_if, vtep_ip);
+ if (ZEBRA_DPLANE_REQUEST_FAILURE ==
+ dplane_vtep_delete(zvni->vxlan_if, vtep_ip, zvni->vni))
+ return -1;
+
+ return 0;
}
/*
@@ -4484,9 +4523,11 @@ static int zl3vni_rmac_del(zebra_l3vni_t *zl3vni, zebra_mac_t *zrmac)
*/
static int zl3vni_rmac_install(zebra_l3vni_t *zl3vni, zebra_mac_t *zrmac)
{
- struct zebra_if *zif = NULL;
- struct zebra_l2info_vxlan *vxl = NULL;
+ const struct zebra_if *zif = NULL, *br_zif = NULL;
+ const struct zebra_l2info_vxlan *vxl = NULL;
+ const struct interface *br_ifp;
enum zebra_dplane_result res;
+ vlanid_t vid;
if (!(CHECK_FLAG(zrmac->flags, ZEBRA_MAC_REMOTE))
|| !(CHECK_FLAG(zrmac->flags, ZEBRA_MAC_REMOTE_RMAC)))
@@ -4496,9 +4537,20 @@ static int zl3vni_rmac_install(zebra_l3vni_t *zl3vni, zebra_mac_t *zrmac)
if (!zif)
return -1;
+ br_ifp = zif->brslave_info.br_if;
+ if (br_ifp == NULL)
+ return -1;
+
vxl = &zif->l2info.vxl;
- res = dplane_mac_add(zl3vni->vxlan_if, vxl->access_vlan,
+ br_zif = (const struct zebra_if *)br_ifp->info;
+
+ if (IS_ZEBRA_IF_BRIDGE_VLAN_AWARE(br_zif))
+ vid = vxl->access_vlan;
+ else
+ vid = 0;
+
+ res = dplane_mac_add(zl3vni->vxlan_if, br_ifp, vid,
&zrmac->macaddr, zrmac->fwd_info.r_vtep_ip, 0);
if (res != ZEBRA_DPLANE_REQUEST_FAILURE)
return 0;
@@ -4512,8 +4564,10 @@ static int zl3vni_rmac_install(zebra_l3vni_t *zl3vni, zebra_mac_t *zrmac)
static int zl3vni_rmac_uninstall(zebra_l3vni_t *zl3vni, zebra_mac_t *zrmac)
{
char buf[ETHER_ADDR_STRLEN];
- struct zebra_if *zif = NULL;
- struct zebra_l2info_vxlan *vxl = NULL;
+ const struct zebra_if *zif = NULL, *br_zif;
+ const struct zebra_l2info_vxlan *vxl = NULL;
+ const struct interface *br_ifp;
+ vlanid_t vid;
enum zebra_dplane_result res;
if (!(CHECK_FLAG(zrmac->flags, ZEBRA_MAC_REMOTE))
@@ -4521,10 +4575,12 @@ static int zl3vni_rmac_uninstall(zebra_l3vni_t *zl3vni, zebra_mac_t *zrmac)
return 0;
if (!zl3vni->vxlan_if) {
- zlog_debug(
- "RMAC %s on L3-VNI %u hash %p couldn't be uninstalled - no vxlan_if",
- prefix_mac2str(&zrmac->macaddr, buf, sizeof(buf)),
- zl3vni->vni, zl3vni);
+ if (IS_ZEBRA_DEBUG_VXLAN)
+ zlog_debug(
+ "RMAC %s on L3-VNI %u hash %p couldn't be uninstalled - no vxlan_if",
+ prefix_mac2str(&zrmac->macaddr,
+ buf, sizeof(buf)),
+ zl3vni->vni, zl3vni);
return -1;
}
@@ -4532,9 +4588,19 @@ static int zl3vni_rmac_uninstall(zebra_l3vni_t *zl3vni, zebra_mac_t *zrmac)
if (!zif)
return -1;
+ br_ifp = zif->brslave_info.br_if;
+ if (br_ifp == NULL)
+ return -1;
+
vxl = &zif->l2info.vxl;
- res = dplane_mac_del(zl3vni->vxlan_if, vxl->access_vlan,
+ br_zif = (const struct zebra_if *)br_ifp->info;
+ if (IS_ZEBRA_IF_BRIDGE_VLAN_AWARE(br_zif))
+ vid = vxl->access_vlan;
+ else
+ vid = 0;
+
+ res = dplane_mac_del(zl3vni->vxlan_if, br_ifp, vid,
&zrmac->macaddr, zrmac->fwd_info.r_vtep_ip);
if (res != ZEBRA_DPLANE_REQUEST_FAILURE)
return 0;
@@ -4678,9 +4744,7 @@ static int zl3vni_nh_del(zebra_l3vni_t *zl3vni, zebra_neigh_t *n)
*/
static int zl3vni_nh_install(zebra_l3vni_t *zl3vni, zebra_neigh_t *n)
{
-#ifdef GNU_LINUX
uint8_t flags;
-#endif
int ret = 0;
if (!is_l3vni_oper_up(zl3vni))
@@ -4689,12 +4753,13 @@ static int zl3vni_nh_install(zebra_l3vni_t *zl3vni, zebra_neigh_t *n)
if (!(n->flags & ZEBRA_NEIGH_REMOTE)
|| !(n->flags & ZEBRA_NEIGH_REMOTE_NH))
return 0;
-#ifdef GNU_LINUX
- flags = NTF_EXT_LEARNED;
+
+ flags = DPLANE_NTF_EXT_LEARNED;
if (n->flags & ZEBRA_NEIGH_ROUTER_FLAG)
- flags |= NTF_ROUTER;
- ret = kernel_add_neigh(zl3vni->svi_if, &n->ip, &n->emac, flags);
-#endif
+ flags |= DPLANE_NTF_ROUTER;
+
+ dplane_neigh_add(zl3vni->svi_if, &n->ip, &n->emac, flags);
+
return ret;
}
@@ -4710,7 +4775,9 @@ static int zl3vni_nh_uninstall(zebra_l3vni_t *zl3vni, zebra_neigh_t *n)
if (!zl3vni->svi_if || !if_is_operative(zl3vni->svi_if))
return 0;
- return kernel_del_neigh(zl3vni->svi_if, &n->ip);
+ dplane_neigh_delete(zl3vni->svi_if, &n->ip);
+
+ return 0;
}
/* add remote vtep as a neigh entry */
diff --git a/zebra/zserv.c b/zebra/zserv.c
index 70b4594813..c008441d6a 100644
--- a/zebra/zserv.c
+++ b/zebra/zserv.c
@@ -231,13 +231,11 @@ static int zserv_write(struct thread *thread)
cache = stream_fifo_new();
- pthread_mutex_lock(&client->obuf_mtx);
- {
+ frr_with_mutex(&client->obuf_mtx) {
while (stream_fifo_head(client->obuf_fifo))
stream_fifo_push(cache,
stream_fifo_pop(client->obuf_fifo));
}
- pthread_mutex_unlock(&client->obuf_mtx);
if (cache->tail) {
msg = cache->tail;
@@ -427,13 +425,11 @@ static int zserv_read(struct thread *thread)
memory_order_relaxed);
/* publish read packets on client's input queue */
- pthread_mutex_lock(&client->ibuf_mtx);
- {
+ frr_with_mutex(&client->ibuf_mtx) {
while (cache->head)
stream_fifo_push(client->ibuf_fifo,
stream_fifo_pop(cache));
}
- pthread_mutex_unlock(&client->ibuf_mtx);
/* Schedule job to process those packets */
zserv_event(client, ZSERV_PROCESS_MESSAGES);
@@ -499,8 +495,7 @@ static int zserv_process_messages(struct thread *thread)
uint32_t p2p = zrouter.packets_to_process;
bool need_resched = false;
- pthread_mutex_lock(&client->ibuf_mtx);
- {
+ frr_with_mutex(&client->ibuf_mtx) {
uint32_t i;
for (i = 0; i < p2p && stream_fifo_head(client->ibuf_fifo);
++i) {
@@ -516,7 +511,6 @@ static int zserv_process_messages(struct thread *thread)
if (stream_fifo_head(client->ibuf_fifo))
need_resched = true;
}
- pthread_mutex_unlock(&client->ibuf_mtx);
while (stream_fifo_head(cache)) {
msg = stream_fifo_pop(cache);
@@ -535,11 +529,9 @@ static int zserv_process_messages(struct thread *thread)
int zserv_send_message(struct zserv *client, struct stream *msg)
{
- pthread_mutex_lock(&client->obuf_mtx);
- {
+ frr_with_mutex(&client->obuf_mtx) {
stream_fifo_push(client->obuf_fifo, msg);
}
- pthread_mutex_unlock(&client->obuf_mtx);
zserv_client_event(client, ZSERV_CLIENT_WRITE);
@@ -790,7 +782,7 @@ void zserv_start(char *path)
setsockopt_so_recvbuf(zsock, 1048576);
setsockopt_so_sendbuf(zsock, 1048576);
- frr_elevate_privs((sa.ss_family != AF_UNIX) ? &zserv_privs : NULL) {
+ frr_with_privs((sa.ss_family != AF_UNIX) ? &zserv_privs : NULL) {
ret = bind(zsock, (struct sockaddr *)&sa, sa_len);
}
if (ret < 0) {