summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--bgpd/bgp_evpn_mh.c11
-rw-r--r--bgpd/bgp_fsm.c1
-rw-r--r--bgpd/bgp_network.c17
-rw-r--r--bgpd/bgp_pbr.c29
-rw-r--r--bgpd/bgp_route.c15
-rw-r--r--bgpd/bgpd.h1
-rw-r--r--configure.ac2
-rw-r--r--doc/developer/sbfd.rst5
-rw-r--r--doc/user/bgp.rst6
-rw-r--r--doc/user/sbfd.rst14
-rw-r--r--doc/user/static.rst14
-rw-r--r--doc/user/zebra.rst2
-rw-r--r--fpm/fpm_pb.h1
-rw-r--r--isisd/isis_cli.c28
-rw-r--r--lib/nexthop.c87
-rw-r--r--lib/nexthop.h104
-rwxr-xr-xlib/route_types.pl15
-rw-r--r--lib/zclient.c22
-rw-r--r--pbrd/pbr_nht.c8
-rw-r--r--pbrd/pbr_vty.c2
-rw-r--r--staticd/static_vty.c24
-rw-r--r--tests/topotests/bgp_evpn_route_map_match/r1/frr.conf9
-rw-r--r--tests/topotests/bgp_evpn_route_map_match/r2/frr.conf1
-rw-r--r--tests/topotests/bgp_evpn_route_map_match/test_bgp_evpn_route_map_match.py102
-rw-r--r--tests/topotests/bgp_ipv6_ll_peering/r1/bgpd.conf3
-rw-r--r--tests/topotests/bgp_ipv6_ll_peering/r1/zebra.conf3
-rw-r--r--tests/topotests/bgp_ipv6_ll_peering/r3/bgpd.conf5
-rw-r--r--tests/topotests/bgp_ipv6_ll_peering/r3/zebra.conf4
-rw-r--r--tests/topotests/bgp_ipv6_ll_peering/test_bgp_ipv6_ll_peering.py29
-rw-r--r--tests/topotests/bgp_oad/r3/frr.conf6
-rw-r--r--tests/topotests/bgp_oad/test_bgp_oad.py33
-rw-r--r--tests/topotests/fpm_testing_topo1/r1/routes_summ.json6
-rw-r--r--tests/topotests/fpm_testing_topo1/r1/routes_summ_removed.json4
-rw-r--r--tests/topotests/fpm_testing_topo1/test_fpm_topo1.py7
-rw-r--r--tests/topotests/srv6_static_route_ipv4/__init__.py0
-rw-r--r--tests/topotests/srv6_static_route_ipv4/expected_srv6_route.json28
-rw-r--r--tests/topotests/srv6_static_route_ipv4/r1/frr.conf7
-rw-r--r--tests/topotests/srv6_static_route_ipv4/r1/setup.sh2
-rwxr-xr-xtests/topotests/srv6_static_route_ipv4/test_srv6_route.py82
-rw-r--r--yang/frr-isisd.yang2
-rw-r--r--zebra/dplane_fpm_nl.c23
-rw-r--r--zebra/fpm_listener.c6
-rw-r--r--zebra/rt_netlink.c6
-rw-r--r--zebra/zebra_cli.c4
-rw-r--r--zebra/zebra_dplane.c15
-rw-r--r--zebra/zebra_rib.c62
-rw-r--r--zebra/zebra_routemap.c7
47 files changed, 627 insertions, 237 deletions
diff --git a/bgpd/bgp_evpn_mh.c b/bgpd/bgp_evpn_mh.c
index b6ec8341a7..b9861acad2 100644
--- a/bgpd/bgp_evpn_mh.c
+++ b/bgpd/bgp_evpn_mh.c
@@ -1201,6 +1201,7 @@ int bgp_evpn_type1_route_process(struct peer *peer, afi_t afi, safi_t safi,
mpls_label_t label;
struct in_addr vtep_ip;
struct prefix_evpn p;
+ uint8_t num_labels = 0;
if (psize != BGP_EVPN_TYPE1_PSIZE) {
flog_err(EC_BGP_EVPN_ROUTE_INVALID,
@@ -1225,6 +1226,7 @@ int bgp_evpn_type1_route_process(struct peer *peer, afi_t afi, safi_t safi,
pfx += EVPN_ETH_TAG_BYTES;
memcpy(&label, pfx, BGP_LABEL_BYTES);
+ num_labels++;
/* EAD route prefix doesn't include the nexthop in the global
* table
@@ -1233,12 +1235,11 @@ int bgp_evpn_type1_route_process(struct peer *peer, afi_t afi, safi_t safi,
build_evpn_type1_prefix(&p, eth_tag, &esi, vtep_ip);
/* Process the route. */
if (attr) {
- bgp_update(peer, (struct prefix *)&p, addpath_id, attr, afi,
- safi, ZEBRA_ROUTE_BGP, BGP_ROUTE_NORMAL, &prd, NULL,
- 0, 0, NULL);
+ bgp_update(peer, (struct prefix *)&p, addpath_id, attr, afi, safi, ZEBRA_ROUTE_BGP,
+ BGP_ROUTE_NORMAL, &prd, &label, num_labels, 0, NULL);
} else {
- bgp_withdraw(peer, (struct prefix *)&p, addpath_id, afi, safi,
- ZEBRA_ROUTE_BGP, BGP_ROUTE_NORMAL, &prd, NULL, 0);
+ bgp_withdraw(peer, (struct prefix *)&p, addpath_id, afi, safi, ZEBRA_ROUTE_BGP,
+ BGP_ROUTE_NORMAL, &prd, &label, num_labels);
}
return 0;
}
diff --git a/bgpd/bgp_fsm.c b/bgpd/bgp_fsm.c
index 1a30cb37f4..c7b7f9e284 100644
--- a/bgpd/bgp_fsm.c
+++ b/bgpd/bgp_fsm.c
@@ -607,6 +607,7 @@ const char *const peer_down_str[] = {
"Admin. shutdown (RTT)",
"Suppress Fib Turned On or Off",
"Password config change",
+ "Router ID is missing",
};
static void bgp_graceful_restart_timer_off(struct peer_connection *connection,
diff --git a/bgpd/bgp_network.c b/bgpd/bgp_network.c
index af5d815d30..3df4aa286e 100644
--- a/bgpd/bgp_network.c
+++ b/bgpd/bgp_network.c
@@ -571,7 +571,7 @@ static void bgp_accept(struct event *thread)
/* Do not try to reconnect if the peer reached maximum
* prefixes, restart timer is still running or the peer
- * is shutdown.
+ * is shutdown, or BGP identifier is not set (0.0.0.0).
*/
if (BGP_PEER_START_SUPPRESSED(peer1)) {
if (bgp_debug_neighbor_events(peer1)) {
@@ -588,6 +588,14 @@ static void bgp_accept(struct event *thread)
return;
}
+ if (peer1->bgp->router_id.s_addr == INADDR_ANY) {
+ zlog_warn("[Event] Incoming BGP connection rejected from %s due missing BGP identifier, set it with `bgp router-id`",
+ peer1->host);
+ peer1->last_reset = PEER_DOWN_ROUTER_ID_ZERO;
+ close(bgp_sock);
+ return;
+ }
+
if (bgp_debug_neighbor_events(peer1))
zlog_debug("[Event] connection from %s fd %d, active peer status %d fd %d",
inet_sutop(&su, buf), bgp_sock, connection1->status,
@@ -776,6 +784,13 @@ enum connect_result bgp_connect(struct peer_connection *connection)
assert(!CHECK_FLAG(connection->thread_flags, PEER_THREAD_READS_ON));
ifindex_t ifindex = 0;
+ if (peer->bgp->router_id.s_addr == INADDR_ANY) {
+ peer->last_reset = PEER_DOWN_ROUTER_ID_ZERO;
+ zlog_warn("%s: BGP identifier is missing for peer %s, set it with `bgp router-id`",
+ __func__, peer->host);
+ return connect_error;
+ }
+
if (peer->conf_if && BGP_CONNECTION_SU_UNSPEC(connection)) {
if (bgp_debug_neighbor_events(peer))
zlog_debug("Peer address not learnt: Returning from connect");
diff --git a/bgpd/bgp_pbr.c b/bgpd/bgp_pbr.c
index 2d61c0f00a..b85a8e2254 100644
--- a/bgpd/bgp_pbr.c
+++ b/bgpd/bgp_pbr.c
@@ -2624,7 +2624,6 @@ static void bgp_pbr_policyroute_add_to_zebra(struct bgp *bgp,
static void bgp_pbr_handle_entry(struct bgp *bgp, struct bgp_path_info *path,
struct bgp_pbr_entry_main *api, bool add)
{
- struct nexthop nh;
int i = 0;
int continue_loop = 1;
float rate = 0;
@@ -2639,7 +2638,6 @@ static void bgp_pbr_handle_entry(struct bgp *bgp, struct bgp_path_info *path,
struct bgp_pbr_val_mask bpvm;
memset(&range, 0, sizeof(range));
- memset(&nh, 0, sizeof(nh));
memset(&bpf, 0, sizeof(bpf));
memset(&bpof, 0, sizeof(bpof));
if (CHECK_FLAG(api->match_bitmask, PREFIX_SRC_PRESENT) ||
@@ -2652,8 +2650,6 @@ static void bgp_pbr_handle_entry(struct bgp *bgp, struct bgp_path_info *path,
dst = &api->dst_prefix;
if (api->type == BGP_PBR_IPRULE)
bpf.type = api->type;
- memset(&nh, 0, sizeof(nh));
- nh.vrf_id = VRF_UNKNOWN;
if (api->match_protocol_num) {
proto = (uint8_t)api->protocol[0].value;
if (api->afi == AF_INET6 && proto == IPPROTO_ICMPV6)
@@ -2778,8 +2774,10 @@ static void bgp_pbr_handle_entry(struct bgp *bgp, struct bgp_path_info *path,
case ACTION_TRAFFICRATE:
/* drop packet */
if (api->actions[i].u.r.rate == 0) {
- nh.vrf_id = api->vrf_id;
- nh.type = NEXTHOP_TYPE_BLACKHOLE;
+ struct nexthop nh = {
+ .vrf_id = api->vrf_id,
+ .type = NEXTHOP_TYPE_BLACKHOLE,
+ };
bgp_pbr_policyroute_add_to_zebra(
bgp, path, &bpf, &bpof, &nh, &rate);
} else {
@@ -2802,18 +2800,15 @@ static void bgp_pbr_handle_entry(struct bgp *bgp, struct bgp_path_info *path,
/* terminate action: run other filters
*/
break;
- case ACTION_REDIRECT_IP:
- nh.vrf_id = api->vrf_id;
+ case ACTION_REDIRECT_IP: {
+ struct nexthop nh = { .vrf_id = api->vrf_id };
+
if (api->afi == AFI_IP) {
nh.type = NEXTHOP_TYPE_IPV4;
- nh.gate.ipv4.s_addr =
- api->actions[i].u.zr.
- redirect_ip_v4.s_addr;
+ nh.gate.ipv4 = api->actions[i].u.zr.redirect_ip_v4;
} else {
nh.type = NEXTHOP_TYPE_IPV6;
- memcpy(&nh.gate.ipv6,
- &api->actions[i].u.zr.redirect_ip_v6,
- sizeof(struct in6_addr));
+ nh.gate.ipv6 = api->actions[i].u.zr.redirect_ip_v6;
}
bgp_pbr_policyroute_add_to_zebra(bgp, path, &bpf, &bpof,
&nh, &rate);
@@ -2822,7 +2817,10 @@ static void bgp_pbr_handle_entry(struct bgp *bgp, struct bgp_path_info *path,
*/
continue_loop = 0;
break;
- case ACTION_REDIRECT:
+ }
+ case ACTION_REDIRECT: {
+ struct nexthop nh = {};
+
if (api->afi == AFI_IP)
nh.type = NEXTHOP_TYPE_IPV4;
else
@@ -2832,6 +2830,7 @@ static void bgp_pbr_handle_entry(struct bgp *bgp, struct bgp_path_info *path,
&nh, &rate);
continue_loop = 0;
break;
+ }
case ACTION_MARKING:
if (BGP_DEBUG(pbr, PBR)) {
bgp_pbr_print_policy_route(api);
diff --git a/bgpd/bgp_route.c b/bgpd/bgp_route.c
index e932738cd4..f46977af69 100644
--- a/bgpd/bgp_route.c
+++ b/bgpd/bgp_route.c
@@ -2851,8 +2851,17 @@ bool subgroup_announce_check(struct bgp_dest *dest, struct bgp_path_info *pi,
* If the extended community is non-transitive, strip it off,
* unless it's a locally originated route (static, aggregate,
* redistributed, etc.).
+ * draft-uttaro-idr-bgp-oad says:
+ * Extended communities which are non-transitive across an AS
+ * boundary MAY be advertised over an EBGP-OAD session if allowed
+ * by explicit policy configuration. If allowed, all the members
+ * of the OAD SHOULD be configured to use the same criteria.
+ * For example, the Origin Validation State Extended Community,
+ * defined as non-transitive in [RFC8097], can be advertised to
+ * peers in the same OAD.
*/
- if (from->sort == BGP_PEER_EBGP && peer->sort == BGP_PEER_EBGP &&
+ if (from->sort == BGP_PEER_EBGP && from->sub_sort != BGP_PEER_EBGP_OAD &&
+ peer->sort == BGP_PEER_EBGP && peer->sub_sort != BGP_PEER_EBGP_OAD &&
pi->sub_type == BGP_ROUTE_NORMAL) {
struct ecommunity *new_ecomm;
struct ecommunity *old_ecomm;
@@ -4120,6 +4129,9 @@ static void process_eoiu_marker(struct bgp_dest *dest)
subqueue2str(META_QUEUE_EOIU_MARKER));
bgp_process_main_one(info->bgp, NULL, 0, 0);
+
+ XFREE(MTYPE_BGP_EOIU_MARKER_INFO, info);
+ XFREE(MTYPE_BGP_NODE, dest);
}
/*
@@ -4310,6 +4322,7 @@ static void eoiu_marker_queue_free(struct meta_queue *mq, struct bgp_dest_queue
XFREE(MTYPE_BGP_EOIU_MARKER_INFO, dest->info);
STAILQ_REMOVE_HEAD(l, pq);
STAILQ_NEXT(dest, pq) = NULL; /* complete unlink */
+ XFREE(MTYPE_BGP_NODE, dest);
mq->size--;
}
}
diff --git a/bgpd/bgpd.h b/bgpd/bgpd.h
index 96a78e6662..ee904391c1 100644
--- a/bgpd/bgpd.h
+++ b/bgpd/bgpd.h
@@ -1863,6 +1863,7 @@ struct peer {
#define PEER_DOWN_RTT_SHUTDOWN 35U /* Automatically shutdown due to RTT */
#define PEER_DOWN_SUPPRESS_FIB_PENDING 36U /* Suppress fib pending changed */
#define PEER_DOWN_PASSWORD_CHANGE 37U /* neighbor password command */
+#define PEER_DOWN_ROUTER_ID_ZERO 38U /* router-id is 0.0.0.0 */
/*
* Remember to update peer_down_str in bgp_fsm.c when you add
* a new value to the last_reset reason
diff --git a/configure.ac b/configure.ac
index e04c0b6d46..09e2d20c3a 100644
--- a/configure.ac
+++ b/configure.ac
@@ -7,7 +7,7 @@
##
AC_PREREQ([2.69])
-AC_INIT([frr], [10.3-dev], [https://github.com/frrouting/frr/issues])
+AC_INIT([frr], [10.4-dev], [https://github.com/frrouting/frr/issues])
PACKAGE_URL="https://frrouting.org/"
AC_SUBST([PACKAGE_URL])
PACKAGE_FULLNAME="FRRouting"
diff --git a/doc/developer/sbfd.rst b/doc/developer/sbfd.rst
index 7bbd2428dd..66a3b48dba 100644
--- a/doc/developer/sbfd.rst
+++ b/doc/developer/sbfd.rst
@@ -27,7 +27,7 @@ SBFD takes the same data packet format as BFD, but with a much simpler state mac
According to RFC7880, SBFD has a stateless SBFDReflector and a stateful SBFDInitiator with the state machine as below:
::
-
+
+--+
ADMIN DOWN, | |
TIMER | V
@@ -78,6 +78,7 @@ A bfd-name is always associated with a TE path, for example if we use the sbfd s
Meanwhile bfdd will notify the sbfd status to the Pathd, we should add the bfd-name field in PTM bfd notify message ZEBRA_BFD_DEST_REPLAY:
::
+
* Message format:
* - header: command, vrf
* - l: interface index
@@ -113,6 +114,7 @@ According to RFC7881, SBFD Control packet dst port should be 7784, src port can
::
+
UDP(sport=4784, dport=7784)/BFD() or UDP(sport=3784, dport=7784)/BFD()
if "multihop" is specified for sbfd initiator we choose the 4784 as the source port, so the reflected packet will take 4784 as the dst port, this is a local BFD_MULTI_HOP_PORT so the reflected packet can be handled by the existing bfd_recv_cb function.
@@ -122,6 +124,7 @@ if "multihop" is not specified for sbfd initiator we choose the 3784 as the sour
For echo SBFD with SRv6 encapsulation case, we re-use the BFD Echo port, the UDP ports in packet are set as:
::
+
UDP(sport=3785, dport=3785)/BFD()
diff --git a/doc/user/bgp.rst b/doc/user/bgp.rst
index 1493c2fb98..5add30b6f4 100644
--- a/doc/user/bgp.rst
+++ b/doc/user/bgp.rst
@@ -282,7 +282,9 @@ internal or external.
interface and address information. In that case default router ID value is
selected as the largest IP Address of the interfaces. When `router zebra` is
not enabled *bgpd* can't get interface information so `router-id` is set to
- 0.0.0.0. So please set router-id by hand.
+ 0.0.0.0, which is invalid and BGP session can't be established.
+
+ So please set router-id by manually.
.. _bgp-multiple-autonomous-systems:
@@ -2938,7 +2940,7 @@ BGP Extended Communities in Route Map
``CO:COLOR``
This is a format to define colors value. ``CO`` part is always 00 (default),
- it can be used to support the requirements of Color-Only steering when using
+ it can be used to support the requirements of Color-Only steering when using
a Null Endpoint in the SR-TE Policy as specified in Section 8.8 of [RFC9256].
The below shows in detail what the different combinations of ``CO`` bits can
match on to for the purpose of determining what type of SR-TE Policy Tunnel
diff --git a/doc/user/sbfd.rst b/doc/user/sbfd.rst
index 390d82a6c0..d26bffe07b 100644
--- a/doc/user/sbfd.rst
+++ b/doc/user/sbfd.rst
@@ -27,7 +27,7 @@ SBFD takes the same data packet format as BFD, but with a much simpler state mac
According to RFC7880, SBFD has a stateless SBFDReflector and a stateful SBFDInitiator with the state machine as below:
::
-
+
+--+
ADMIN DOWN, | |
TIMER | V
@@ -73,24 +73,28 @@ In the following example, we set up a sbfd session to monitor the path A-B-D (al
A is the SBFDInitiator, and D is the SBFDReflector, A will trasmit the SBFD packet to B as the format:
::
+
IPv6(src="200::A", dst="100::B", nh=43)/IPv6ExtHdrSegmentRouting(addresses=["100::D"], nh=41, segleft=1)/IPv6(src="200::A", dst="200::D")/UDP(dport=7784)/BFD(my_dis=123, your_disc=456, state=UP)
Upon receiving the packet, B will take the Srv6 End action since the dst ip 100::B is the End address, B will the shift the dst address according to Srv6 spec, then trasmit the SBFD packet to D as the format:
::
+
IPv6(src="200::A", dst="100::D", nh=41)/IPv6(src="200::A", dst="200::D")/UDP(dport=7784)/BFD(my_dis=123, your_disc=456, state=UP)
After D receive the packet, It will decap the outer IPv6 header since the dst ip 100::D is the End address, the decapped packet is:
::
+
IPv6(src="200::A", dst="200::D")/UDP(dport=7784)/BFD(my_dis=123, your_disc=456, state=UP)
This packet will be routed to kernel stack of D since its dst is 200::D. Then the SBFDReflector service on D will get the packet and Reflect it. The response packet will be:
::
+
IPv6(src="200::D", dst="200::A")/UDP(sport=7784)/BFD(my_dis=456, your_disc=123, state=UP)
@@ -132,18 +136,21 @@ For example, we use Echo SBFD session to protect Srv6 path: A-B-D
A is also the SBFDInitiator, and B, C, D is Srv6 ready nodes, A will trasmit the SBFD packet to B as the format:
::
+
IPv6(src="200::A", dst="100::B", nh=43)/IPv6ExtHdrSegmentRouting(addresses=["100::D"], nh=41, segleft=1)/IPv6(src="200::A", dst="200::A")/UDP(dport=3785)/BFD(my_dis=123, your_disc=123, state=UP)
Upon receiving the packet, B will take the Srv6 End action since the dst ip 100::B is the End address, B will the shift the dst address according to Srv6 spec, then trasmit the SBFD packet to D as the format:
::
+
IPv6(src="200::A", dst="100::D", nh=41)/IPv6(src="200::A", dst="200::A")/UDP(dport=3785)/BFD(my_dis=123, your_disc=123, state=UP)
After D receive the packet, It will decap the outer IPv6 header since the dst ip 100::D is the End address, the decapped packet is:
::
+
IPv6(src="200::A", dst="200::A")/UDP(dport=3785)/BFD(my_dis=123, your_disc=123, state=UP)
@@ -181,6 +188,7 @@ We can also configure a SBFD Initiator-Reflector session based on simple IPv6/IP
A is the SBFDInitiator, and D is the SBFDReflector, A will trasmit the SBFD packet to B or C as the format:
::
+
IPv6(src="200::A", dst="200::D")/UDP(dport=7784)/BFD(my_dis=123, your_disc=456, state=UP)
@@ -189,6 +197,7 @@ Upon receiving the packet, B/C will route the packet to D according to the dst i
After D receive the packet, packet will be sent to kernel stack of D since its dst is 200::D. Then the SBFDReflector service on D will get the packet and reflect it. The response packet will be:
::
+
IPv6(src="200::D", dst="200::A")/UDP(sport=7784)/BFD(my_dis=456, your_disc=123, state=UP)
@@ -226,6 +235,7 @@ This command will show all the BFD and SBFD sessions in the bfdd:
::
+
BFD Peers:
peer 200::D bfd-mode sbfd-init bfd-name a-d multihop local-address 200::A vrf default remote-discr 456
ID: 1421669725
@@ -254,6 +264,7 @@ This command will show all the BFD and SBFD session packet counters:
.. clicmd:: show bfd peers counters
::
+
BFD Peers:
peer 200::A bfd-mode sbfd-echo bfd-name a-b-d local-address 200::A vrf default srv6-source-ipv6 200::A srv6-encap-data 100::B 100::D
Control packet input: 0 packets
@@ -281,6 +292,7 @@ we also implemented a new show command to display BFD sessions with a bfd-name,
.. clicmd:: show bfd bfd-name a-b-d
::
+
BFD Peers:
peer 200::A bfd-mode sbfd-echo bfd-name a-b-d local-address 200::A vrf default srv6-source-ipv6 200::A srv6-encap-data 100::B 100::D
ID: 123
diff --git a/doc/user/static.rst b/doc/user/static.rst
index 5bf5004a66..0ce6e2107e 100644
--- a/doc/user/static.rst
+++ b/doc/user/static.rst
@@ -177,6 +177,20 @@ multiple segments instructions.
[..]
S>* 2005::/64 [1/0] is directly connected, ens3, seg6 2001:db8:aaaa::7,2002::4,2002::3,2002::2, weight 1, 00:00:06
+STATIC also supports steering of IPv4 traffic over an SRv6 SID list, as shown in the example below.
+
+.. code-block:: frr
+
+ ip route A.B.C.D <A.B.C.D|nexthop> segments U:U::U:U/Y:Y::Y:Y/Z:Z::Z:Z
+
+::
+
+ router(config)# ip route 10.0.0.0/24 sr0 segments fcbb:bbbb:1:2:3:fe00::
+
+ router# show ip route
+ [..]
+ S>* 10.0.0.0/24 [1/0] is directly connected, sr0, seg6 fcbb:bbbb:1:2:3:fe00::, weight 1, 00:00:06
+
SRv6 Static SIDs Commands
=========================
diff --git a/doc/user/zebra.rst b/doc/user/zebra.rst
index ef3a619853..f700d36086 100644
--- a/doc/user/zebra.rst
+++ b/doc/user/zebra.rst
@@ -1133,10 +1133,8 @@ kernel.
- any,
- babel,
- bgp,
- - connected,
- eigrp,
- isis,
- - kernel,
- nhrp,
- openfabric,
- ospf,
diff --git a/fpm/fpm_pb.h b/fpm/fpm_pb.h
index 23d7e43993..8847365a37 100644
--- a/fpm/fpm_pb.h
+++ b/fpm/fpm_pb.h
@@ -111,6 +111,7 @@ static inline int fpm__nexthop__get(const Fpm__Nexthop *nh,
nexthop->vrf_id = VRF_DEFAULT;
nexthop->type = NEXTHOP_TYPE_IPV4;
+ memset(&nexthop->gate, 0, sizeof(nexthop->gate));
nexthop->gate.ipv4 = ipv4;
if (ifindex) {
nexthop->type = NEXTHOP_TYPE_IPV4_IFINDEX;
diff --git a/isisd/isis_cli.c b/isisd/isis_cli.c
index 735e39a377..c86d929903 100644
--- a/isisd/isis_cli.c
+++ b/isisd/isis_cli.c
@@ -2365,31 +2365,27 @@ DEFPY_YANG (isis_frr_lfa_tiebreaker,
if (!level || strmatch(level, "level-1")) {
if (no) {
- snprintf(
- xpath, XPATH_MAXLEN,
- "./fast-reroute/level-1/lfa/tiebreaker[index='%s']",
- index_str);
+ snprintf(xpath, XPATH_MAXLEN,
+ "./fast-reroute/level-1/lfa/tiebreaker[index='%s'][type='%s']",
+ index_str, type);
nb_cli_enqueue_change(vty, xpath, NB_OP_DESTROY, NULL);
} else {
- snprintf(
- xpath, XPATH_MAXLEN,
- "./fast-reroute/level-1/lfa/tiebreaker[index='%s']/type",
- index_str);
+ snprintf(xpath, XPATH_MAXLEN,
+ "./fast-reroute/level-1/lfa/tiebreaker[index='%s'][type='%s']/type",
+ index_str, type);
nb_cli_enqueue_change(vty, xpath, NB_OP_CREATE, type);
}
}
if (!level || strmatch(level, "level-2")) {
if (no) {
- snprintf(
- xpath, XPATH_MAXLEN,
- "./fast-reroute/level-2/lfa/tiebreaker[index='%s']",
- index_str);
+ snprintf(xpath, XPATH_MAXLEN,
+ "./fast-reroute/level-2/lfa/tiebreaker[index='%s'][type='%s']",
+ index_str, type);
nb_cli_enqueue_change(vty, xpath, NB_OP_DESTROY, NULL);
} else {
- snprintf(
- xpath, XPATH_MAXLEN,
- "./fast-reroute/level-2/lfa/tiebreaker[index='%s']/type",
- index_str);
+ snprintf(xpath, XPATH_MAXLEN,
+ "./fast-reroute/level-2/lfa/tiebreaker[index='%s'][type='%s']/type",
+ index_str, type);
nb_cli_enqueue_change(vty, xpath, NB_OP_CREATE, type);
}
}
diff --git a/lib/nexthop.c b/lib/nexthop.c
index 332581fbd8..ee6c2b7ec0 100644
--- a/lib/nexthop.c
+++ b/lib/nexthop.c
@@ -772,68 +772,30 @@ unsigned int nexthop_level(const struct nexthop *nexthop)
return rv;
}
-/* Only hash word-sized things, let cmp do the rest. */
-uint32_t nexthop_hash_quick(const struct nexthop *nexthop)
+uint32_t nexthop_hash(const struct nexthop *nexthop)
{
uint32_t key = 0x45afe398;
- int i;
- key = jhash_3words(nexthop->type, nexthop->vrf_id,
- nexthop->nh_label_type, key);
-
- if (nexthop->nh_label) {
- int labels = nexthop->nh_label->num_labels;
+ /* type, vrf, ifindex, ip addresses - see nexthop.h */
+ key = _nexthop_hash_bytes(nexthop, key);
- i = 0;
+ key = jhash_1word(nexthop->flags & NEXTHOP_FLAGS_HASHED, key);
- while (labels >= 3) {
- key = jhash_3words(nexthop->nh_label->label[i],
- nexthop->nh_label->label[i + 1],
- nexthop->nh_label->label[i + 2],
- key);
- labels -= 3;
- i += 3;
- }
-
- if (labels >= 2) {
- key = jhash_2words(nexthop->nh_label->label[i],
- nexthop->nh_label->label[i + 1],
- key);
- labels -= 2;
- i += 2;
- }
+ if (nexthop->nh_label) {
+ const struct mpls_label_stack *ls = nexthop->nh_label;
- if (labels >= 1)
- key = jhash_1word(nexthop->nh_label->label[i], key);
+ /* num_labels itself isn't useful to hash, if the number of
+ * labels is different, the hash value will change just due to
+ * that already.
+ */
+ key = jhash(ls->label, sizeof(ls->label[0]) * ls->num_labels, key);
}
- key = jhash_2words(nexthop->ifindex,
- CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_ONLINK),
- key);
-
/* Include backup nexthops, if present */
if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_HAS_BACKUP)) {
int backups = nexthop->backup_num;
- i = 0;
-
- while (backups >= 3) {
- key = jhash_3words(nexthop->backup_idx[i],
- nexthop->backup_idx[i + 1],
- nexthop->backup_idx[i + 2], key);
- backups -= 3;
- i += 3;
- }
-
- while (backups >= 2) {
- key = jhash_2words(nexthop->backup_idx[i],
- nexthop->backup_idx[i + 1], key);
- backups -= 2;
- i += 2;
- }
-
- if (backups >= 1)
- key = jhash_1word(nexthop->backup_idx[i], key);
+ key = jhash(nexthop->backup_idx, sizeof(nexthop->backup_idx[0]) * backups, key);
}
if (nexthop->nh_srv6) {
@@ -868,31 +830,6 @@ uint32_t nexthop_hash_quick(const struct nexthop *nexthop)
return key;
}
-
-#define GATE_SIZE 4 /* Number of uint32_t words in struct g_addr */
-
-/* For a more granular hash */
-uint32_t nexthop_hash(const struct nexthop *nexthop)
-{
- uint32_t gate_src_rmap_raw[GATE_SIZE * 3] = {};
- /* Get all the quick stuff */
- uint32_t key = nexthop_hash_quick(nexthop);
-
- assert(((sizeof(nexthop->gate) + sizeof(nexthop->src)
- + sizeof(nexthop->rmap_src))
- / 3)
- == (GATE_SIZE * sizeof(uint32_t)));
-
- memcpy(gate_src_rmap_raw, &nexthop->gate, GATE_SIZE);
- memcpy(gate_src_rmap_raw + GATE_SIZE, &nexthop->src, GATE_SIZE);
- memcpy(gate_src_rmap_raw + (2 * GATE_SIZE), &nexthop->rmap_src,
- GATE_SIZE);
-
- key = jhash2(gate_src_rmap_raw, (GATE_SIZE * 3), key);
-
- return key;
-}
-
void nexthop_copy_no_recurse(struct nexthop *copy,
const struct nexthop *nexthop,
struct nexthop *rparent)
diff --git a/lib/nexthop.h b/lib/nexthop.h
index 5dfb58d846..cea7c77e3e 100644
--- a/lib/nexthop.h
+++ b/lib/nexthop.h
@@ -8,6 +8,7 @@
#ifndef _LIB_NEXTHOP_H
#define _LIB_NEXTHOP_H
+#include "jhash.h"
#include "prefix.h"
#include "mpls.h"
#include "vxlan.h"
@@ -56,15 +57,48 @@ struct nexthop {
struct nexthop *next;
struct nexthop *prev;
- /*
- * What vrf is this nexthop associated with?
+
+ /* begin of hashed data - all fields from here onwards are given to
+ * jhash() as one consecutive chunk. DO NOT create "padding holes".
+ * DO NOT insert pointers that need to be deep-hashed.
+ *
+ * static_assert() below needs to be updated when fields are added
*/
+ char _hash_begin[0];
+
+ /* see above */
+ enum nexthop_types_t type;
+
+ /* What vrf is this nexthop associated with? */
vrf_id_t vrf_id;
/* Interface index. */
ifindex_t ifindex;
- enum nexthop_types_t type;
+ /* Type of label(s), if any */
+ enum lsp_types_t nh_label_type;
+
+ /* padding: keep 16 byte alignment here */
+
+ /* Nexthop address
+ * make sure all 16 bytes for IPv6 are zeroed when putting in an IPv4
+ * address since the entire thing is hashed as-is
+ */
+ union {
+ union g_addr gate;
+ enum blackhole_type bh_type;
+ };
+ union g_addr src;
+ union g_addr rmap_src; /* Src is set via routemap */
+
+ /* end of hashed data - remaining fields in this struct are not
+ * directly fed into jhash(). Most of them are actually part of the
+ * hash but have special rules or handling attached.
+ */
+ char _hash_end[0];
+
+ /* Weight of the nexthop ( for unequal cost ECMP ) */
+ uint8_t weight;
uint16_t flags;
#define NEXTHOP_FLAG_ACTIVE (1 << 0) /* This nexthop is alive. */
@@ -82,18 +116,15 @@ struct nexthop {
#define NEXTHOP_FLAG_EVPN (1 << 8) /* nexthop is EVPN */
#define NEXTHOP_FLAG_LINKDOWN (1 << 9) /* is not removed on link down */
+ /* which flags are part of nexthop_hash(). Should probably be split
+ * off into a separate field...
+ */
+#define NEXTHOP_FLAGS_HASHED NEXTHOP_FLAG_ONLINK
+
#define NEXTHOP_IS_ACTIVE(flags) \
(CHECK_FLAG(flags, NEXTHOP_FLAG_ACTIVE) \
&& !CHECK_FLAG(flags, NEXTHOP_FLAG_DUPLICATE))
- /* Nexthop address */
- union {
- union g_addr gate;
- enum blackhole_type bh_type;
- };
- union g_addr src;
- union g_addr rmap_src; /* Src is set via routemap */
-
/* Nexthops obtained by recursive resolution.
*
* If the nexthop struct needs to be resolved recursively,
@@ -104,15 +135,9 @@ struct nexthop {
/* Recursive parent */
struct nexthop *rparent;
- /* Type of label(s), if any */
- enum lsp_types_t nh_label_type;
-
/* Label(s) associated with this nexthop. */
struct mpls_label_stack *nh_label;
- /* Weight of the nexthop ( for unequal cost ECMP ) */
- uint8_t weight;
-
/* Count and index of corresponding backup nexthop(s) in a backup list;
* only meaningful if the HAS_BACKUP flag is set.
*/
@@ -138,6 +163,29 @@ struct nexthop {
struct nexthop_srv6 *nh_srv6;
};
+/* all hashed fields (including padding, if it is necessary to add) need to
+ * be listed in the static_assert below
+ */
+
+#define S(field) sizeof(((struct nexthop *)NULL)->field)
+
+static_assert(
+ offsetof(struct nexthop, _hash_end) - offsetof(struct nexthop, _hash_begin) ==
+ S(type) + S(vrf_id) + S(ifindex) + S(nh_label_type) + S(gate) + S(src) + S(rmap_src),
+ "struct nexthop contains padding, this can break things. insert _pad fields at appropriate places");
+
+#undef S
+
+/* this is here to show exactly what is meant by the comments above about
+ * the hashing
+ */
+static inline uint32_t _nexthop_hash_bytes(const struct nexthop *nh, uint32_t seed)
+{
+ return jhash(&nh->_hash_begin,
+ offsetof(struct nexthop, _hash_end) - offsetof(struct nexthop, _hash_begin),
+ seed);
+}
+
/* Utility to append one nexthop to another. */
#define NEXTHOP_APPEND(to, new) \
do { \
@@ -183,27 +231,11 @@ struct nexthop *nexthop_from_blackhole(enum blackhole_type bh_type,
/*
* Hash a nexthop. Suitable for use with hash tables.
*
- * This function uses the following values when computing the hash:
- * - vrf_id
- * - ifindex
- * - type
- * - gate
- *
- * nexthop
- * The nexthop to hash
- *
- * Returns:
- * 32-bit hash of nexthop
+ * Please double check the code on what is included in the hash, there was
+ * documentation here but it got outdated and the only thing worse than no
+ * doc is incorrect doc.
*/
uint32_t nexthop_hash(const struct nexthop *nexthop);
-/*
- * Hash a nexthop only on word-sized attributes:
- * - vrf_id
- * - ifindex
- * - type
- * - (some) flags
- */
-uint32_t nexthop_hash_quick(const struct nexthop *nexthop);
extern bool nexthop_same(const struct nexthop *nh1, const struct nexthop *nh2);
extern bool nexthop_same_no_labels(const struct nexthop *nh1,
diff --git a/lib/route_types.pl b/lib/route_types.pl
index c75a866964..834cb822d2 100755
--- a/lib/route_types.pl
+++ b/lib/route_types.pl
@@ -127,9 +127,12 @@ printf "#define SHOW_ROUTE_V6_HEADER \\\n%s\n", codelist(@protosv6);
print "\n";
sub collect {
- my ($daemon, $ipv4, $ipv6, $any) = @_;
+ my ($daemon, $ipv4, $ipv6, $any, $ip_prot) = @_;
my (@names, @help) = ((), ());
for my $p (@protos) {
+ next if ($ip_prot == 1 && $daemon eq "zebra" && $protodetail{$p}->{"cname"} eq "kernel");
+ next if ($ip_prot == 1 && $daemon eq "zebra" && $protodetail{$p}->{"cname"} eq "connected");
+ next if ($ip_prot == 1 && $daemon eq "zebra" && $protodetail{$p}->{"cname"} eq "local");
next if ($protodetail{$p}->{"daemon"} eq $daemon && $daemon ne "zebra");
next if ($protodetail{$p}->{"restrict2"} ne "" &&
$protodetail{$p}->{"restrict2"} ne $daemon);
@@ -151,24 +154,24 @@ for my $daemon (sort keys %daemons) {
next unless ($daemons{$daemon}->{"ipv4"} || $daemons{$daemon}->{"ipv6"});
printf "/* %s */\n", $daemon;
if ($daemons{$daemon}->{"ipv4"} && $daemons{$daemon}->{"ipv6"}) {
- my ($names, $help) = collect($daemon, 1, 1, 0);
+ my ($names, $help) = collect($daemon, 1, 1, 0, 0);
printf "#define FRR_REDIST_STR_%s \\\n %s\n", uc $daemon, $names;
printf "#define FRR_REDIST_HELP_STR_%s \\\n%s\n", uc $daemon, $help;
- ($names, $help) = collect($daemon, 1, 0, 0);
+ ($names, $help) = collect($daemon, 1, 0, 0, 0);
printf "#define FRR_IP_REDIST_STR_%s \\\n %s\n", uc $daemon, $names;
printf "#define FRR_IP_REDIST_HELP_STR_%s \\\n%s\n", uc $daemon, $help;
- ($names, $help) = collect($daemon, 0, 1, 0);
+ ($names, $help) = collect($daemon, 0, 1, 0, 0);
printf "#define FRR_IP6_REDIST_STR_%s \\\n %s\n", uc $daemon, $names;
printf "#define FRR_IP6_REDIST_HELP_STR_%s \\\n%s\n", uc $daemon, $help;
if ($daemon eq "zebra") {
- ($names, $help) = collect($daemon, 1, 0, 1);
+ ($names, $help) = collect($daemon, 1, 0, 1, 1);
printf "#define FRR_IP_PROTOCOL_MAP_STR_%s \\\n %s\n", uc $daemon, $names;
printf "#define FRR_IP_PROTOCOL_MAP_HELP_STR_%s \\\n%s\n", uc $daemon, $help;
- ($names, $help) = collect($daemon, 0, 1, 1);
+ ($names, $help) = collect($daemon, 0, 1, 1, 1);
printf "#define FRR_IP6_PROTOCOL_MAP_STR_%s \\\n %s\n", uc $daemon, $names;
printf "#define FRR_IP6_PROTOCOL_MAP_HELP_STR_%s \\\n%s\n", uc $daemon, $help;
}
diff --git a/lib/zclient.c b/lib/zclient.c
index d8c75c9029..5deea8f0cf 100644
--- a/lib/zclient.c
+++ b/lib/zclient.c
@@ -2300,7 +2300,27 @@ struct nexthop *nexthop_from_zapi_nexthop(const struct zapi_nexthop *znh)
n->type = znh->type;
n->vrf_id = znh->vrf_id;
n->ifindex = znh->ifindex;
- n->gate = znh->gate;
+
+ /* only copy values that have meaning - make sure "spare bytes" are
+ * left zeroed for hashing (look at _nexthop_hash_bytes)
+ */
+ switch (znh->type) {
+ case NEXTHOP_TYPE_BLACKHOLE:
+ n->bh_type = znh->bh_type;
+ break;
+ case NEXTHOP_TYPE_IPV4:
+ case NEXTHOP_TYPE_IPV4_IFINDEX:
+ n->gate.ipv4 = znh->gate.ipv4;
+ break;
+ case NEXTHOP_TYPE_IPV6:
+ case NEXTHOP_TYPE_IPV6_IFINDEX:
+ n->gate.ipv6 = znh->gate.ipv6;
+ break;
+ case NEXTHOP_TYPE_IFINDEX:
+ /* nothing, ifindex is always copied */
+ break;
+ }
+
n->srte_color = znh->srte_color;
n->weight = znh->weight;
diff --git a/pbrd/pbr_nht.c b/pbrd/pbr_nht.c
index ff252f8505..d5cee5f3e4 100644
--- a/pbrd/pbr_nht.c
+++ b/pbrd/pbr_nht.c
@@ -493,7 +493,7 @@ void pbr_nht_change_group(const char *name)
}
for (ALL_NEXTHOPS(nhgc->nhg, nhop)) {
- struct pbr_nexthop_cache lookup;
+ struct pbr_nexthop_cache lookup = {};
struct pbr_nexthop_cache *pnhc;
lookup.nexthop = *nhop;
@@ -565,7 +565,7 @@ void pbr_nht_add_individual_nexthop(struct pbr_map_sequence *pbrms,
struct pbr_nexthop_group_cache *pnhgc;
struct pbr_nexthop_group_cache find;
struct pbr_nexthop_cache *pnhc;
- struct pbr_nexthop_cache lookup;
+ struct pbr_nexthop_cache lookup = {};
struct nexthop *nh;
char buf[PBR_NHC_NAMELEN];
@@ -624,7 +624,7 @@ static void pbr_nht_release_individual_nexthop(struct pbr_map_sequence *pbrms)
struct pbr_nexthop_group_cache *pnhgc;
struct pbr_nexthop_group_cache find;
struct pbr_nexthop_cache *pnhc;
- struct pbr_nexthop_cache lup;
+ struct pbr_nexthop_cache lup = {};
struct nexthop *nh;
enum nexthop_types_t nh_type = 0;
@@ -690,7 +690,7 @@ struct pbr_nexthop_group_cache *pbr_nht_add_group(const char *name)
DEBUGD(&pbr_dbg_nht, "%s: Retrieved NHGC @ %p", __func__, pnhgc);
for (ALL_NEXTHOPS(nhgc->nhg, nhop)) {
- struct pbr_nexthop_cache lookupc;
+ struct pbr_nexthop_cache lookupc = {};
struct pbr_nexthop_cache *pnhc;
lookupc.nexthop = *nhop;
diff --git a/pbrd/pbr_vty.c b/pbrd/pbr_vty.c
index 08fe56c7bb..aa98913571 100644
--- a/pbrd/pbr_vty.c
+++ b/pbrd/pbr_vty.c
@@ -1488,7 +1488,7 @@ pbrms_nexthop_group_write_individual_nexthop(
{
struct pbr_nexthop_group_cache find;
struct pbr_nexthop_group_cache *pnhgc;
- struct pbr_nexthop_cache lookup;
+ struct pbr_nexthop_cache lookup = {};
struct pbr_nexthop_cache *pnhc;
memset(&find, 0, sizeof(find));
diff --git a/staticd/static_vty.c b/staticd/static_vty.c
index f93e81e8dc..895846a1c7 100644
--- a/staticd/static_vty.c
+++ b/staticd/static_vty.c
@@ -564,6 +564,7 @@ DEFPY_YANG(ip_route_address_interface,
|onlink$onlink \
|color (1-4294967295) \
|bfd$bfd [{multi-hop$bfd_multi_hop|source A.B.C.D$bfd_source|profile BFDPROF$bfd_profile}] \
+ |segments WORD \
}]",
NO_STR IP_STR
"Establish static routes\n"
@@ -589,7 +590,9 @@ DEFPY_YANG(ip_route_address_interface,
BFD_INTEGRATION_SOURCE_STR
BFD_INTEGRATION_SOURCEV4_STR
BFD_PROFILE_STR
- BFD_PROFILE_NAME_STR)
+ BFD_PROFILE_NAME_STR
+ "Steer this route over an SRv6 SID list\n"
+ "SRv6 SID list\n")
{
struct static_route_args args = {
.delete = !!no,
@@ -611,6 +614,7 @@ DEFPY_YANG(ip_route_address_interface,
.bfd_multi_hop = !!bfd_multi_hop,
.bfd_source = bfd_source_str,
.bfd_profile = bfd_profile,
+ .segs = segments,
};
return static_route_nb_run(vty, &args);
@@ -631,6 +635,7 @@ DEFPY_YANG(ip_route_address_interface_vrf,
|onlink$onlink \
|color (1-4294967295) \
|bfd$bfd [{multi-hop$bfd_multi_hop|source A.B.C.D$bfd_source|profile BFDPROF$bfd_profile}] \
+ |segments WORD \
}]",
NO_STR IP_STR
"Establish static routes\n"
@@ -655,7 +660,9 @@ DEFPY_YANG(ip_route_address_interface_vrf,
BFD_INTEGRATION_SOURCE_STR
BFD_INTEGRATION_SOURCEV4_STR
BFD_PROFILE_STR
- BFD_PROFILE_NAME_STR)
+ BFD_PROFILE_NAME_STR
+ "Steer this route over an SRv6 SID list\n"
+ "SRv6 SID list\n")
{
struct static_route_args args = {
.delete = !!no,
@@ -677,6 +684,7 @@ DEFPY_YANG(ip_route_address_interface_vrf,
.bfd_multi_hop = !!bfd_multi_hop,
.bfd_source = bfd_source_str,
.bfd_profile = bfd_profile,
+ .segs = segments,
};
return static_route_nb_run(vty, &args);
@@ -696,6 +704,7 @@ DEFPY_YANG(ip_route,
|nexthop-vrf NAME \
|color (1-4294967295) \
|bfd$bfd [{multi-hop$bfd_multi_hop|source A.B.C.D$bfd_source|profile BFDPROF$bfd_profile}] \
+ |segments WORD \
}]",
NO_STR IP_STR
"Establish static routes\n"
@@ -720,7 +729,9 @@ DEFPY_YANG(ip_route,
BFD_INTEGRATION_SOURCE_STR
BFD_INTEGRATION_SOURCEV4_STR
BFD_PROFILE_STR
- BFD_PROFILE_NAME_STR)
+ BFD_PROFILE_NAME_STR
+ "Steer this route over an SRv6 SID list\n"
+ "SRv6 SID list\n")
{
struct static_route_args args = {
.delete = !!no,
@@ -741,6 +752,7 @@ DEFPY_YANG(ip_route,
.bfd_multi_hop = !!bfd_multi_hop,
.bfd_source = bfd_source_str,
.bfd_profile = bfd_profile,
+ .segs = segments,
};
return static_route_nb_run(vty, &args);
@@ -759,6 +771,7 @@ DEFPY_YANG(ip_route_vrf,
|nexthop-vrf NAME \
|color (1-4294967295) \
|bfd$bfd [{multi-hop$bfd_multi_hop|source A.B.C.D$bfd_source|profile BFDPROF$bfd_profile}] \
+ |segments WORD \
}]",
NO_STR IP_STR
"Establish static routes\n"
@@ -782,7 +795,9 @@ DEFPY_YANG(ip_route_vrf,
BFD_INTEGRATION_SOURCE_STR
BFD_INTEGRATION_SOURCEV4_STR
BFD_PROFILE_STR
- BFD_PROFILE_NAME_STR)
+ BFD_PROFILE_NAME_STR
+ "Steer this route over an SRv6 SID list\n"
+ "SRv6 SID list\n")
{
struct static_route_args args = {
.delete = !!no,
@@ -803,6 +818,7 @@ DEFPY_YANG(ip_route_vrf,
.bfd_multi_hop = !!bfd_multi_hop,
.bfd_source = bfd_source_str,
.bfd_profile = bfd_profile,
+ .segs = segments,
};
return static_route_nb_run(vty, &args);
diff --git a/tests/topotests/bgp_evpn_route_map_match/r1/frr.conf b/tests/topotests/bgp_evpn_route_map_match/r1/frr.conf
index 4347052c5e..2390733cc8 100644
--- a/tests/topotests/bgp_evpn_route_map_match/r1/frr.conf
+++ b/tests/topotests/bgp_evpn_route_map_match/r1/frr.conf
@@ -24,19 +24,16 @@ router bgp 65001
!
address-family l2vpn evpn
neighbor 192.168.1.2 activate
- neighbor 192.168.1.2 route-map r2 out
+ neighbor 192.168.1.2 route-map rt5 out
advertise-all-vni
advertise ipv4 unicast
exit-address-family
!
-route-map r2 deny 10
- match evpn route-type macip
-!
-route-map r2 deny 20
+route-map rt5 deny 20
match ip address prefix-list pl
match evpn route-type prefix
!
-route-map r2 permit 30
+route-map rt5 permit 30
!
ip prefix-list pl seq 5 permit 192.168.1.0/24
ip prefix-list pl seq 10 permit 10.10.10.1/32
diff --git a/tests/topotests/bgp_evpn_route_map_match/r2/frr.conf b/tests/topotests/bgp_evpn_route_map_match/r2/frr.conf
index 9ed298d8fe..1c91a3e254 100644
--- a/tests/topotests/bgp_evpn_route_map_match/r2/frr.conf
+++ b/tests/topotests/bgp_evpn_route_map_match/r2/frr.conf
@@ -7,6 +7,7 @@ int lo
int r2-eth0
ip address 192.168.1.2/24
!
+vni 10
router bgp 65002
no bgp ebgp-requires-policy
neighbor 192.168.1.1 remote-as external
diff --git a/tests/topotests/bgp_evpn_route_map_match/test_bgp_evpn_route_map_match.py b/tests/topotests/bgp_evpn_route_map_match/test_bgp_evpn_route_map_match.py
index 36c79d6b2b..925ae1fce8 100644
--- a/tests/topotests/bgp_evpn_route_map_match/test_bgp_evpn_route_map_match.py
+++ b/tests/topotests/bgp_evpn_route_map_match/test_bgp_evpn_route_map_match.py
@@ -23,6 +23,7 @@ sys.path.append(os.path.join(CWD, "../"))
# pylint: disable=C0413
from lib import topotest
from lib.topogen import Topogen, get_topogen
+from lib.topolog import logger
def setup_module(mod):
@@ -63,7 +64,7 @@ def teardown_module(mod):
tgen.stop_topology()
-def test_bgp_evpn_route_map_match_route_type():
+def test_bgp_evpn_route_map_match_route_type5():
tgen = get_topogen()
if tgen.routers_have_failure():
@@ -84,16 +85,12 @@ def test_bgp_evpn_route_map_match_route_type():
"valid": True,
}
},
- "10.10.10.2:2": {
- "[3]:[0]:[32]:[10.10.10.2]": {
- "valid": True,
- }
- },
},
- "totalPrefixCounter": 2,
+ "totalPrefixCounter": 1,
}
return topotest.json_cmp(output, expected)
+ logger.info("Check route type-5 filtering")
test_func = functools.partial(
_bgp_converge,
)
@@ -101,6 +98,97 @@ def test_bgp_evpn_route_map_match_route_type():
assert result is None, "Filtered EVPN routes should not be advertised"
+def test_bgp_evpn_route_map_match_route_type2():
+ tgen = get_topogen()
+
+ # Change to L2VNI
+ for machine in [tgen.gears["r1"], tgen.gears["r2"]]:
+ machine.vtysh_cmd("configure terminal\nno vni 10")
+
+ def _check_l2vni():
+ for machine in [tgen.gears["r1"], tgen.gears["r2"]]:
+ output = json.loads(machine.vtysh_cmd("show evpn vni json"))
+
+ expected = {"10": {"vni": 10, "type": "L2"}}
+ return topotest.json_cmp(output, expected)
+
+ logger.info("Check L2VNI setup")
+ test_func = functools.partial(_check_l2vni)
+ _, result = topotest.run_and_expect(test_func, None, count=60, wait=1)
+ assert result is None, "L2VNI setup failed."
+
+ c2_mac = (
+ tgen.gears["c2"]
+ .cmd("ip link show c2-eth0 | awk '/link\/ether/ {print $2}'")
+ .rstrip()
+ )
+ tgen.gears["r1"].vtysh_cmd(
+ "\n".join(
+ [
+ "configure",
+ "route-map rt2 deny 30",
+ "match mac address %s" % c2_mac,
+ "exit",
+ "router bgp 65001",
+ "address-family l2vpn evpn",
+ "neighbor 192.168.1.2 route-map rt2 in",
+ ]
+ )
+ )
+
+ def _check_filter_mac():
+ output = json.loads(
+ tgen.gears["r1"].vtysh_cmd(
+ "show bgp l2vpn evpn neighbors 192.168.1.2 advertised-routes json"
+ )
+ )
+
+ if (
+ output["advertisedRoutes"]
+ .get("10.10.10.2:2", {})
+ .get("[2]:[0]:[48]:[%s]" % c2_mac)
+ ):
+ return False
+
+ return True
+
+ logger.info("check mac filter in, on c2 interface: %s" % c2_mac)
+ test_func = functools.partial(_check_filter_mac)
+ _, result = topotest.run_and_expect(test_func, True, count=60, wait=1)
+ assert result is True, "%s is not filtered" % c2_mac
+
+ tgen.gears["r1"].vtysh_cmd(
+ "\n".join(
+ [
+ "configure",
+ "route-map rt2 deny 30",
+ "no match mac address %s" % c2_mac,
+ "match evpn route-type macip" "exit",
+ "router bgp 65001",
+ "address-family l2vpn evpn",
+ "neighbor 192.168.1.2 route-map rt2 out",
+ ]
+ )
+ )
+
+ def _check_filter_type2():
+ output = json.loads(
+ tgen.gears["r1"].vtysh_cmd(
+ "show bgp l2vpn evpn neighbors 192.168.1.2 advertised-routes json"
+ )
+ )
+
+ if output["totalPrefixCounter"] == 0:
+ return True
+
+ return False
+
+ logger.info("check route type-2 filter out")
+ test_func = functools.partial(_check_filter_type2)
+ _, result = topotest.run_and_expect(test_func, True, count=60, wait=1)
+ assert result is True, "EVPN routes type-2 are not filtered."
+
+
if __name__ == "__main__":
args = ["-s"] + sys.argv[1:]
sys.exit(pytest.main(args))
diff --git a/tests/topotests/bgp_ipv6_ll_peering/r1/bgpd.conf b/tests/topotests/bgp_ipv6_ll_peering/r1/bgpd.conf
index 724cbf84ab..a26efb4c4f 100644
--- a/tests/topotests/bgp_ipv6_ll_peering/r1/bgpd.conf
+++ b/tests/topotests/bgp_ipv6_ll_peering/r1/bgpd.conf
@@ -4,3 +4,6 @@ router bgp 65001
neighbor fe80:1::2 remote-as external
neighbor fe80:1::2 timers 3 10
neighbor fe80:1::2 interface r1-eth0
+ neighbor fe80:1::3 remote-as external
+ neighbor fe80:1::3 timers 3 10
+ neighbor fe80:1::3 interface r1-eth1
diff --git a/tests/topotests/bgp_ipv6_ll_peering/r1/zebra.conf b/tests/topotests/bgp_ipv6_ll_peering/r1/zebra.conf
index 4e93d4f4e5..f1bbff2e44 100644
--- a/tests/topotests/bgp_ipv6_ll_peering/r1/zebra.conf
+++ b/tests/topotests/bgp_ipv6_ll_peering/r1/zebra.conf
@@ -2,3 +2,6 @@
interface r1-eth0
ipv6 address fe80:1::1/64
!
+interface r1-eth1
+ ipv6 address fe80:1::1/64
+!
diff --git a/tests/topotests/bgp_ipv6_ll_peering/r3/bgpd.conf b/tests/topotests/bgp_ipv6_ll_peering/r3/bgpd.conf
new file mode 100644
index 0000000000..f1684880b3
--- /dev/null
+++ b/tests/topotests/bgp_ipv6_ll_peering/r3/bgpd.conf
@@ -0,0 +1,5 @@
+router bgp 65003
+ no bgp ebgp-requires-policy
+ neighbor fe80:1::1 remote-as external
+ neighbor fe80:1::1 timers 3 10
+ neighbor fe80:1::1 interface r3-eth0
diff --git a/tests/topotests/bgp_ipv6_ll_peering/r3/zebra.conf b/tests/topotests/bgp_ipv6_ll_peering/r3/zebra.conf
new file mode 100644
index 0000000000..71053cd2c3
--- /dev/null
+++ b/tests/topotests/bgp_ipv6_ll_peering/r3/zebra.conf
@@ -0,0 +1,4 @@
+!
+interface r3-eth0
+ ipv6 address fe80:1::3/64
+!
diff --git a/tests/topotests/bgp_ipv6_ll_peering/test_bgp_ipv6_ll_peering.py b/tests/topotests/bgp_ipv6_ll_peering/test_bgp_ipv6_ll_peering.py
index aaa68ea340..fbd4097605 100644
--- a/tests/topotests/bgp_ipv6_ll_peering/test_bgp_ipv6_ll_peering.py
+++ b/tests/topotests/bgp_ipv6_ll_peering/test_bgp_ipv6_ll_peering.py
@@ -27,13 +27,17 @@ pytestmark = [pytest.mark.bgpd]
def build_topo(tgen):
- for routern in range(1, 3):
+ for routern in range(1, 4):
tgen.add_router("r{}".format(routern))
switch = tgen.add_switch("s1")
switch.add_link(tgen.gears["r1"])
switch.add_link(tgen.gears["r2"])
+ switch = tgen.add_switch("s2")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r3"])
+
def setup_module(mod):
tgen = Topogen(build_topo, mod.__name__)
@@ -64,6 +68,7 @@ def test_bgp_ipv6_link_local_peering():
pytest.skip(tgen.errors)
r1 = tgen.gears["r1"]
+ r3 = tgen.gears["r3"]
def _bgp_converge():
output = json.loads(r1.vtysh_cmd("show bgp summary json"))
@@ -82,6 +87,28 @@ def test_bgp_ipv6_link_local_peering():
_, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5)
assert result is None, "Failed to see BGP convergence on R2"
+ def _bgp_router_id_missing():
+ output = json.loads(r3.vtysh_cmd("show bgp summary failed json"))
+ expected = {
+ "ipv4Unicast": {
+ "routerId": "0.0.0.0",
+ "as": 65003,
+ "peers": {
+ "fe80:1::1": {
+ "connectionsEstablished": 0,
+ "connectionsDropped": 0,
+ "peerUptime": "never",
+ "lastResetDueTo": "Router ID is missing",
+ }
+ },
+ }
+ }
+ return topotest.json_cmp(output, expected)
+
+ test_func = functools.partial(_bgp_router_id_missing)
+ _, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5)
+ assert result is None, "r3 should stay down due to missing router ID"
+
if __name__ == "__main__":
args = ["-s"] + sys.argv[1:]
diff --git a/tests/topotests/bgp_oad/r3/frr.conf b/tests/topotests/bgp_oad/r3/frr.conf
index 02dd5adfe1..164267d74d 100644
--- a/tests/topotests/bgp_oad/r3/frr.conf
+++ b/tests/topotests/bgp_oad/r3/frr.conf
@@ -7,12 +7,14 @@ int r3-eth0
!
router bgp 65003
no bgp ebgp-requires-policy
+ no bgp network import-check
neighbor 192.168.2.2 remote-as external
neighbor 192.168.2.2 timers 1 3
neighbor 192.168.2.2 timers connect 1
neighbor 192.168.2.2 oad
!
address-family ipv4 unicast
+ network 10.10.10.20/32 route-map static
redistribute connected route-map connected
exit-address-family
!
@@ -20,3 +22,7 @@ route-map connected permit 10
set local-preference 123
set metric 123
!
+route-map static permit 10
+ set extcommunity bandwidth 100 non-transitive
+exit
+!
diff --git a/tests/topotests/bgp_oad/test_bgp_oad.py b/tests/topotests/bgp_oad/test_bgp_oad.py
index b2ea7e0f19..b397bc6372 100644
--- a/tests/topotests/bgp_oad/test_bgp_oad.py
+++ b/tests/topotests/bgp_oad/test_bgp_oad.py
@@ -56,6 +56,7 @@ def test_bgp_oad():
r2 = tgen.gears["r2"]
r3 = tgen.gears["r3"]
r4 = tgen.gears["r4"]
+ r5 = tgen.gears["r5"]
def _bgp_converge():
output = json.loads(r1.vtysh_cmd("show bgp ipv4 unicast 10.10.10.10/32 json"))
@@ -121,6 +122,38 @@ def test_bgp_oad():
_, result = topotest.run_and_expect(test_func, None, count=30, wait=1)
assert result is None, "10.10.10.1/32 should not be advertised to r4 (not OAD peer)"
+ def _bgp_check_non_transitive_extended_community(
+ router, arg={"string": "LB:65003:12500000 (100.000 Mbps)"}
+ ):
+ output = json.loads(
+ router.vtysh_cmd("show bgp ipv4 unicast 10.10.10.20/32 json")
+ )
+ expected = {
+ "paths": [
+ {
+ "extendedCommunity": arg,
+ }
+ ]
+ }
+ return topotest.json_cmp(output, expected)
+
+ test_func = functools.partial(
+ _bgp_check_non_transitive_extended_community,
+ r4,
+ )
+ _, result = topotest.run_and_expect(test_func, None, count=30, wait=1)
+ assert (
+ result is None
+ ), "10.10.10.20/32 should be received at r4 with non-transitive extended community"
+
+ test_func = functools.partial(
+ _bgp_check_non_transitive_extended_community, r5, None
+ )
+ _, result = topotest.run_and_expect(test_func, None, count=30, wait=1)
+ assert (
+ result is None
+ ), "10.10.10.20/32 should NOT be received at r5 with non-transitive extended community"
+
if __name__ == "__main__":
args = ["-s"] + sys.argv[1:]
diff --git a/tests/topotests/fpm_testing_topo1/r1/routes_summ.json b/tests/topotests/fpm_testing_topo1/r1/routes_summ.json
index e9157bc664..12fe32cab3 100644
--- a/tests/topotests/fpm_testing_topo1/r1/routes_summ.json
+++ b/tests/topotests/fpm_testing_topo1/r1/routes_summ.json
@@ -3,21 +3,21 @@
{
"fib":1,
"rib":1,
- "fibOffLoaded":0,
+ "fibOffLoaded":1,
"fibTrapped":0,
"type":"connected"
},
{
"fib":1,
"rib":1,
- "fibOffLoaded":0,
+ "fibOffLoaded":1,
"fibTrapped":0,
"type":"local"
},
{
"fib":10000,
"rib":10000,
- "fibOffLoaded":0,
+ "fibOffLoaded":10000,
"fibTrapped":0,
"type":"sharp"
}
diff --git a/tests/topotests/fpm_testing_topo1/r1/routes_summ_removed.json b/tests/topotests/fpm_testing_topo1/r1/routes_summ_removed.json
index 8585b2bb6b..15d3f71077 100644
--- a/tests/topotests/fpm_testing_topo1/r1/routes_summ_removed.json
+++ b/tests/topotests/fpm_testing_topo1/r1/routes_summ_removed.json
@@ -3,14 +3,14 @@
{
"fib":1,
"rib":1,
- "fibOffLoaded":0,
+ "fibOffLoaded":1,
"fibTrapped":0,
"type":"connected"
},
{
"fib":1,
"rib":1,
- "fibOffLoaded":0,
+ "fibOffLoaded":1,
"fibTrapped":0,
"type":"local"
}
diff --git a/tests/topotests/fpm_testing_topo1/test_fpm_topo1.py b/tests/topotests/fpm_testing_topo1/test_fpm_topo1.py
index 66cefcc2a0..b3c375549a 100644
--- a/tests/topotests/fpm_testing_topo1/test_fpm_topo1.py
+++ b/tests/topotests/fpm_testing_topo1/test_fpm_topo1.py
@@ -57,7 +57,7 @@ def setup_module(module):
router.load_config(
TopoRouter.RD_ZEBRA,
os.path.join(CWD, "{}/zebra.conf".format(rname)),
- "-M dplane_fpm_nl",
+ "-M dplane_fpm_nl --asic-offload=notify_on_offload",
)
router.load_config(
TopoRouter.RD_SHARP, os.path.join(CWD, "{}/sharpd.conf".format(rname))
@@ -65,6 +65,7 @@ def setup_module(module):
router.load_config(
TopoRouter.RD_FPM_LISTENER,
os.path.join(CWD, "{}/fpm_stub.conf".format(rname)),
+ "-r",
)
tgen.start_router()
@@ -111,7 +112,7 @@ def test_fpm_install_routes():
topotest.router_json_cmp, router, "show ip route summ json", expected
)
- success, result = topotest.run_and_expect(test_func, None, 60, 1)
+ success, result = topotest.run_and_expect(test_func, None, 120, 1)
assert success, "Unable to successfully install 10000 routes: {}".format(result)
# Let's remove 10000 routes
@@ -124,7 +125,7 @@ def test_fpm_install_routes():
topotest.router_json_cmp, router, "show ip route summ json", expected
)
- success, result = topotest.run_and_expect(test_func, None, 60, 1)
+ success, result = topotest.run_and_expect(test_func, None, 120, 1)
assert success, "Unable to remove 10000 routes: {}".format(result)
diff --git a/tests/topotests/srv6_static_route_ipv4/__init__.py b/tests/topotests/srv6_static_route_ipv4/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/tests/topotests/srv6_static_route_ipv4/__init__.py
diff --git a/tests/topotests/srv6_static_route_ipv4/expected_srv6_route.json b/tests/topotests/srv6_static_route_ipv4/expected_srv6_route.json
new file mode 100644
index 0000000000..57f4c4488d
--- /dev/null
+++ b/tests/topotests/srv6_static_route_ipv4/expected_srv6_route.json
@@ -0,0 +1,28 @@
+{
+ "192.0.2.0/24": [
+ {
+ "prefix": "192.0.2.0/24",
+ "prefixLen": 24,
+ "protocol": "static",
+ "selected": true,
+ "destSelected": true,
+ "distance": 1,
+ "metric": 0,
+ "installed": true,
+ "nexthops": [
+ {
+ "directlyConnected": true,
+ "active": true,
+ "weight": 1,
+ "seg6local": {
+ "action": "unspec"
+ },
+ "seg6": [
+ "fcbb:bbbb:1:2:3:4:5:6",
+ "fcbb:bbbb:7:8:fe00::"
+ ]
+ }
+ ]
+ }
+ ]
+}
diff --git a/tests/topotests/srv6_static_route_ipv4/r1/frr.conf b/tests/topotests/srv6_static_route_ipv4/r1/frr.conf
new file mode 100644
index 0000000000..8ff23ec1b8
--- /dev/null
+++ b/tests/topotests/srv6_static_route_ipv4/r1/frr.conf
@@ -0,0 +1,7 @@
+hostname r1
+!
+log stdout notifications
+log commands
+!
+ipv6 route fcbb:bbbb:1::/48 sr0
+ip route 192.0.2.0/24 sr0 segments fcbb:bbbb:1:2:3:4:5:6/fcbb:bbbb:7:8:fe00::
diff --git a/tests/topotests/srv6_static_route_ipv4/r1/setup.sh b/tests/topotests/srv6_static_route_ipv4/r1/setup.sh
new file mode 100644
index 0000000000..4b6cce89f8
--- /dev/null
+++ b/tests/topotests/srv6_static_route_ipv4/r1/setup.sh
@@ -0,0 +1,2 @@
+ip link add sr0 type dummy
+ip link set sr0 up
diff --git a/tests/topotests/srv6_static_route_ipv4/test_srv6_route.py b/tests/topotests/srv6_static_route_ipv4/test_srv6_route.py
new file mode 100755
index 0000000000..b49a9cec89
--- /dev/null
+++ b/tests/topotests/srv6_static_route_ipv4/test_srv6_route.py
@@ -0,0 +1,82 @@
+#!/usr/bin/env python
+# SPDX-License-Identifier: ISC
+
+#
+# test_srv6_static_route_ipv4.py
+#
+# Copyright 2025
+# Carmine Scarpitta <cscarpit.@cisco.com>
+#
+
+"""
+test_srv6_static_route_ipv4.py:
+Test for SRv6 static route on zebra
+"""
+
+import os
+import sys
+import json
+import pytest
+import functools
+
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, "../"))
+
+# pylint: disable=C0413
+from lib import topotest
+from lib.topogen import Topogen, TopoRouter, get_topogen
+from lib.topolog import logger
+
+pytestmark = [pytest.mark.staticd]
+
+
+def open_json_file(filename):
+ try:
+ with open(filename, "r") as f:
+ return json.load(f)
+ except IOError:
+ assert False, "Could not read file {}".format(filename)
+
+
+def setup_module(mod):
+ tgen = Topogen({None: "r1"}, mod.__name__)
+ tgen.start_topology()
+ for rname, router in tgen.routers().items():
+ router.run("/bin/bash {}/{}/setup.sh".format(CWD, rname))
+ router.load_frr_config("frr.conf")
+ tgen.start_router()
+
+
+def teardown_module():
+ tgen = get_topogen()
+ tgen.stop_topology()
+
+
+def test_srv6_static_route():
+ tgen = get_topogen()
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+ router = tgen.gears["r1"]
+
+ def _check_srv6_static_route(router, expected_route_file):
+ logger.info("checking zebra srv6 static route with multiple segs status")
+ output = json.loads(router.vtysh_cmd("show ip route static json"))
+ expected = open_json_file("{}/{}".format(CWD, expected_route_file))
+ return topotest.json_cmp(output, expected)
+
+ def check_srv6_static_route(router, expected_file):
+ func = functools.partial(_check_srv6_static_route, router, expected_file)
+ _, result = topotest.run_and_expect(func, None, count=15, wait=1)
+ assert result is None, "Failed"
+
+ # FOR DEVELOPER:
+ # If you want to stop some specific line and start interactive shell,
+ # please use tgen.mininet_cli() to start it.
+
+ logger.info("Test for srv6 route configuration")
+ check_srv6_static_route(router, "expected_srv6_route.json")
+
+
+if __name__ == "__main__":
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
diff --git a/yang/frr-isisd.yang b/yang/frr-isisd.yang
index a3e073f626..228faa4f10 100644
--- a/yang/frr-isisd.yang
+++ b/yang/frr-isisd.yang
@@ -403,7 +403,7 @@ module frr-isisd {
"Limit backup computation up to the prefix priority.";
}
list tiebreaker {
- key "index";
+ key "index type";
unique "type";
description
"Configure tiebreaker for multiple backups.";
diff --git a/zebra/dplane_fpm_nl.c b/zebra/dplane_fpm_nl.c
index b8dbabb60d..9f26852d1f 100644
--- a/zebra/dplane_fpm_nl.c
+++ b/zebra/dplane_fpm_nl.c
@@ -587,7 +587,6 @@ static void fpm_read(struct event *t)
struct zebra_dplane_ctx *ctx;
size_t available_bytes;
size_t hdr_available_bytes;
- int ival;
/* Let's ignore the input at the moment. */
rv = stream_read_try(fnc->ibuf, fnc->socket,
@@ -724,12 +723,18 @@ static void fpm_read(struct event *t)
NULL);
if (netlink_route_notify_read_ctx(hdr, 0, ctx) >= 0) {
- /* In the FPM encoding, the vrfid is present */
- ival = dplane_ctx_get_table(ctx);
- dplane_ctx_set_vrf(ctx, ival);
- dplane_ctx_set_table(ctx,
- ZEBRA_ROUTE_TABLE_UNKNOWN);
-
+ /*
+ * Receiving back a netlink message from
+ * the fpm. Currently the netlink messages
+ * do not have a way to specify the vrf
+ * so it must be unknown. I'm looking
+ * at you sonic. If you are reading this
+ * and wondering why it's not working
+ * you must extend your patch to translate
+ * the tableid to the vrfid and set the
+ * tableid to 0 in order for this to work.
+ */
+ dplane_ctx_set_vrf(ctx, VRF_UNKNOWN);
dplane_provider_enqueue_to_zebra(ctx);
} else {
/*
@@ -946,8 +951,6 @@ static int fpm_nl_enqueue(struct fpm_nl_ctx *fnc, struct zebra_dplane_ctx *ctx)
nl_buf_len = 0;
- frr_mutex_lock_autounlock(&fnc->obuf_mutex);
-
/*
* If route replace is enabled then directly encode the install which
* is going to use `NLM_F_REPLACE` (instead of delete/add operations).
@@ -1100,6 +1103,8 @@ static int fpm_nl_enqueue(struct fpm_nl_ctx *fnc, struct zebra_dplane_ctx *ctx)
/* We must know if someday a message goes beyond 65KiB. */
assert((nl_buf_len + FPM_HEADER_SIZE) <= UINT16_MAX);
+ frr_mutex_lock_autounlock(&fnc->obuf_mutex);
+
/* Check if we have enough buffer space. */
if (STREAM_WRITEABLE(fnc->obuf) < (nl_buf_len + FPM_HEADER_SIZE)) {
atomic_fetch_add_explicit(&fnc->counters.buffer_full, 1,
diff --git a/zebra/fpm_listener.c b/zebra/fpm_listener.c
index 7d84c706d4..ed0842a3b1 100644
--- a/zebra/fpm_listener.c
+++ b/zebra/fpm_listener.c
@@ -756,8 +756,10 @@ static void fpm_serve(void)
while (1) {
hdr = read_fpm_msg(buf, sizeof(buf));
- if (!hdr)
+ if (!hdr) {
+ close(glob->sock);
return;
+ }
process_fpm_msg(hdr);
}
@@ -769,6 +771,8 @@ int main(int argc, char **argv)
int r;
bool fork_daemon = false;
+ setbuf(stdout, NULL);
+
memset(glob, 0, sizeof(*glob));
while ((r = getopt(argc, argv, "rdv")) != -1) {
diff --git a/zebra/rt_netlink.c b/zebra/rt_netlink.c
index d696b19859..9a60e32b65 100644
--- a/zebra/rt_netlink.c
+++ b/zebra/rt_netlink.c
@@ -1295,6 +1295,12 @@ static int netlink_route_change_read_unicast_internal(struct nlmsghdr *h,
flags, &p,
(struct prefix_ipv6 *)&src_p, &nh, 0,
table, metric, distance, true);
+
+ if (nh.nh_label)
+ nexthop_del_labels(&nh);
+
+ if (nh.nh_srv6)
+ nexthop_del_srv6_seg6(&nh);
} else {
/* XXX: need to compare the entire list of
* nexthops here for NLM_F_APPEND stupidity */
diff --git a/zebra/zebra_cli.c b/zebra/zebra_cli.c
index ca53eb2eb3..bb79928326 100644
--- a/zebra/zebra_cli.c
+++ b/zebra/zebra_cli.c
@@ -1983,6 +1983,10 @@ static void lib_vrf_zebra_ipv6_router_id_cli_write(struct vty *vty,
vty_out(vty, "ipv6 router-id %s\n", id);
}
+/*
+ * Both the v4 and v6 version of this command are now limiting the
+ * usage of System route types from being considered here at all
+ */
DEFPY_YANG (ip_protocol,
ip_protocol_cmd,
"[no] ip protocol " FRR_IP_PROTOCOL_MAP_STR_ZEBRA
diff --git a/zebra/zebra_dplane.c b/zebra/zebra_dplane.c
index b57c930154..9acdb4b2f8 100644
--- a/zebra/zebra_dplane.c
+++ b/zebra/zebra_dplane.c
@@ -7528,6 +7528,16 @@ static void dplane_thread_loop(struct event *event)
if (!zdplane_info.dg_run)
break;
+ /*
+ * The yield should only happen after a bit of work has been
+ * done but before we pull any new work off any provider
+ * queue to continue looping. This is a safe spot to
+ * do so.
+ */
+ if (event_should_yield(event)) {
+ reschedule = true;
+ break;
+ }
/* Locate next provider */
next_prov = dplane_prov_list_next(&zdplane_info.dg_providers,
prov);
@@ -7592,11 +7602,6 @@ static void dplane_thread_loop(struct event *event)
zlog_debug("dplane dequeues %d completed work from provider %s",
counter, dplane_provider_get_name(prov));
- if (event_should_yield(event)) {
- reschedule = true;
- break;
- }
-
/* Locate next provider */
prov = next_prov;
}
diff --git a/zebra/zebra_rib.c b/zebra/zebra_rib.c
index 2881192eb7..a1c8cd3059 100644
--- a/zebra/zebra_rib.c
+++ b/zebra/zebra_rib.c
@@ -1891,20 +1891,18 @@ struct route_node *rib_find_rn_from_ctx(const struct zebra_dplane_ctx *ctx)
struct route_table *table = NULL;
struct route_node *rn = NULL;
const struct prefix *dest_pfx, *src_pfx;
+ uint32_t tableid = dplane_ctx_get_table(ctx);
+ vrf_id_t vrf_id = dplane_ctx_get_vrf(ctx);
/* Locate rn and re(s) from ctx */
+ table = zebra_vrf_lookup_table_with_table_id(dplane_ctx_get_afi(ctx),
+ dplane_ctx_get_safi(ctx), vrf_id, tableid);
- table = zebra_vrf_lookup_table_with_table_id(
- dplane_ctx_get_afi(ctx), dplane_ctx_get_safi(ctx),
- dplane_ctx_get_vrf(ctx), dplane_ctx_get_table(ctx));
if (table == NULL) {
if (IS_ZEBRA_DEBUG_DPLANE) {
- zlog_debug(
- "Failed to find route for ctx: no table for afi %d, safi %d, vrf %s(%u)",
- dplane_ctx_get_afi(ctx),
- dplane_ctx_get_safi(ctx),
- vrf_id_to_name(dplane_ctx_get_vrf(ctx)),
- dplane_ctx_get_vrf(ctx));
+ zlog_debug("Failed to find route for ctx: no table for afi %d, safi %d, vrf %s(%u) table %u",
+ dplane_ctx_get_afi(ctx), dplane_ctx_get_safi(ctx),
+ vrf_id_to_name(vrf_id), vrf_id, tableid);
}
goto done;
}
@@ -2214,26 +2212,13 @@ static void rib_process_dplane_notify(struct zebra_dplane_ctx *ctx)
{
struct route_node *rn = NULL;
struct route_entry *re = NULL;
- struct vrf *vrf;
+ struct vrf *vrf = vrf_lookup_by_id(dplane_ctx_get_vrf(ctx));
struct nexthop *nexthop;
rib_dest_t *dest;
bool fib_changed = false;
bool debug_p = IS_ZEBRA_DEBUG_DPLANE | IS_ZEBRA_DEBUG_RIB;
int start_count, end_count;
- vrf_id_t vrf_id;
- int tableid;
-
- /* Locate vrf and route table - we must have one or the other */
- tableid = dplane_ctx_get_table(ctx);
- vrf_id = dplane_ctx_get_vrf(ctx);
- if (vrf_id == VRF_UNKNOWN)
- vrf_id = zebra_vrf_lookup_by_table(tableid,
- dplane_ctx_get_ns_id(ctx));
- else if (tableid == ZEBRA_ROUTE_TABLE_UNKNOWN)
- tableid = zebra_vrf_lookup_tableid(vrf_id,
- dplane_ctx_get_ns_id(ctx));
-
- vrf = vrf_lookup_by_id(vrf_id);
+ uint32_t tableid = dplane_ctx_get_table(ctx);
/* Locate rn and re(s) from ctx */
rn = rib_find_rn_from_ctx(ctx);
@@ -4863,6 +4848,33 @@ void rib_close_table(struct route_table *table)
}
/*
+ * The context sent up from the dplane may be a context
+ * that has been generated by the zebra master pthread
+ * or it may be a context generated from a event in
+ * either the kernel dplane code or the fpm dplane
+ * code. In which case the tableid and vrfid may
+ * not be fully known and we have to figure it out
+ * when the context hits the master pthread.
+ * since this is the *starter* spot for that let
+ * us do a bit of work on each one to see if any
+ * massaging is needed
+ */
+static inline void zebra_rib_translate_ctx_from_dplane(struct zebra_dplane_ctx *ctx)
+{
+ uint32_t tableid = dplane_ctx_get_table(ctx);
+ vrf_id_t vrfid = dplane_ctx_get_vrf(ctx);
+ uint32_t nsid = dplane_ctx_get_ns_id(ctx);
+ enum dplane_op_e op = dplane_ctx_get_op(ctx);
+
+ if (vrfid == VRF_UNKNOWN)
+ dplane_ctx_set_vrf(ctx, zebra_vrf_lookup_by_table(tableid, nsid));
+ else if ((op == DPLANE_OP_ROUTE_INSTALL || op == DPLANE_OP_ROUTE_UPDATE ||
+ op == DPLANE_OP_ROUTE_DELETE) &&
+ tableid == ZEBRA_ROUTE_TABLE_UNKNOWN)
+ dplane_ctx_set_table(ctx, zebra_vrf_lookup_tableid(vrfid, nsid));
+}
+
+/*
* Handle results from the dataplane system. Dequeue update context
* structs, dispatch to appropriate internal handlers.
*/
@@ -4921,6 +4933,8 @@ static void rib_process_dplane_results(struct event *thread)
}
while (ctx) {
+ zebra_rib_translate_ctx_from_dplane(ctx);
+
#ifdef HAVE_SCRIPTING
if (ret == 0)
frrscript_call(fs,
diff --git a/zebra/zebra_routemap.c b/zebra/zebra_routemap.c
index 73ffa09c16..2813f037a2 100644
--- a/zebra/zebra_routemap.c
+++ b/zebra/zebra_routemap.c
@@ -959,10 +959,11 @@ route_set_src(void *rule, const struct prefix *prefix, void *object)
/* set src compilation. */
static void *route_set_src_compile(const char *arg)
{
- union g_addr src, *psrc;
+ union g_addr src = {}, *psrc;
- if ((inet_pton(AF_INET6, arg, &src.ipv6) == 1)
- || (inet_pton(AF_INET, arg, &src.ipv4) == 1)) {
+ /* IPv4 first, to ensure no garbage in the 12 unused bytes */
+ if ((inet_pton(AF_INET, arg, &src.ipv4) == 1) ||
+ (inet_pton(AF_INET6, arg, &src.ipv6) == 1)) {
psrc = XMALLOC(MTYPE_ROUTE_MAP_COMPILED, sizeof(union g_addr));
*psrc = src;
return psrc;