summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--babeld/babeld.c14
-rw-r--r--bgpd/bgp_attr.c4
-rw-r--r--bgpd/bgp_debug.c1
-rw-r--r--bgpd/bgp_ecommunity.c25
-rw-r--r--bgpd/bgp_ecommunity.h6
-rw-r--r--bgpd/bgp_evpn_mh.c11
-rw-r--r--bgpd/bgp_fsm.c6
-rw-r--r--bgpd/bgp_network.c17
-rw-r--r--bgpd/bgp_nht.c80
-rw-r--r--bgpd/bgp_open.c16
-rw-r--r--bgpd/bgp_open.h2
-rw-r--r--bgpd/bgp_packet.c13
-rw-r--r--bgpd/bgp_pbr.c29
-rw-r--r--bgpd/bgp_route.c32
-rw-r--r--bgpd/bgp_vty.c97
-rw-r--r--bgpd/bgpd.c14
-rw-r--r--bgpd/bgpd.h6
-rw-r--r--bgpd/rfapi/rfapi_import.c12
-rw-r--r--configure.ac2
-rw-r--r--doc/developer/sbfd.rst5
-rw-r--r--doc/user/bgp.rst15
-rw-r--r--doc/user/sbfd.rst14
-rw-r--r--doc/user/static.rst14
-rw-r--r--doc/user/zebra.rst2
-rw-r--r--fpm/fpm_pb.h1
-rw-r--r--isisd/isis_circuit.c2
-rw-r--r--isisd/isis_cli.c28
-rw-r--r--isisd/isis_events.c3
-rw-r--r--lib/nexthop.c87
-rw-r--r--lib/nexthop.h104
-rwxr-xr-xlib/route_types.pl15
-rw-r--r--lib/zclient.c22
-rw-r--r--lib/zlog_5424.c12
-rw-r--r--ospfd/ospf_api.c18
-rw-r--r--pbrd/pbr_nht.c8
-rw-r--r--pbrd/pbr_vty.c2
-rw-r--r--pimd/pim_ifchannel.c12
-rw-r--r--pimd/pim_memory.c1
-rw-r--r--pimd/pim_memory.h1
-rw-r--r--pimd/pim_msdp.c2
-rw-r--r--pimd/pim_nb_config.c21
-rw-r--r--pimd/pim_upstream.c2
-rw-r--r--staticd/static_vty.c24
-rw-r--r--tests/topotests/bgp_evpn_route_map_match/r1/frr.conf9
-rw-r--r--tests/topotests/bgp_evpn_route_map_match/r2/frr.conf1
-rw-r--r--tests/topotests/bgp_evpn_route_map_match/test_bgp_evpn_route_map_match.py102
-rw-r--r--tests/topotests/bgp_ipv6_link_local_capability/__init__.py0
-rw-r--r--tests/topotests/bgp_ipv6_link_local_capability/r1/frr.conf15
-rw-r--r--tests/topotests/bgp_ipv6_link_local_capability/r2/frr.conf13
-rw-r--r--tests/topotests/bgp_ipv6_link_local_capability/test_bgp_ipv6_link_local_capability.py110
-rw-r--r--tests/topotests/bgp_ipv6_ll_peering/r1/bgpd.conf3
-rw-r--r--tests/topotests/bgp_ipv6_ll_peering/r1/zebra.conf3
-rw-r--r--tests/topotests/bgp_ipv6_ll_peering/r3/bgpd.conf5
-rw-r--r--tests/topotests/bgp_ipv6_ll_peering/r3/zebra.conf4
-rw-r--r--tests/topotests/bgp_ipv6_ll_peering/test_bgp_ipv6_ll_peering.py29
-rw-r--r--tests/topotests/bgp_oad/r3/frr.conf6
-rw-r--r--tests/topotests/bgp_oad/test_bgp_oad.py33
-rw-r--r--tests/topotests/fpm_testing_topo1/r1/routes_summ.json6
-rw-r--r--tests/topotests/fpm_testing_topo1/r1/routes_summ_removed.json4
-rw-r--r--tests/topotests/fpm_testing_topo1/test_fpm_topo1.py7
-rw-r--r--tests/topotests/sbfd_topo1/test_sbfd_topo1.py2
-rw-r--r--tests/topotests/srv6_static_route_ipv4/__init__.py0
-rw-r--r--tests/topotests/srv6_static_route_ipv4/expected_srv6_route.json28
-rw-r--r--tests/topotests/srv6_static_route_ipv4/r1/frr.conf7
-rw-r--r--tests/topotests/srv6_static_route_ipv4/r1/setup.sh2
-rwxr-xr-xtests/topotests/srv6_static_route_ipv4/test_srv6_route.py82
-rw-r--r--tools/etc/frr/support_bundle_commands.conf4
-rw-r--r--yang/frr-isisd.yang2
-rw-r--r--zebra/dplane_fpm_nl.c23
-rw-r--r--zebra/fpm_listener.c6
-rw-r--r--zebra/rt_netlink.c6
-rw-r--r--zebra/zebra_cli.c4
-rw-r--r--zebra/zebra_dplane.c20
-rw-r--r--zebra/zebra_rib.c62
-rw-r--r--zebra/zebra_routemap.c7
-rw-r--r--zebra/zserv.c4
-rw-r--r--zebra/zserv.h3
77 files changed, 1109 insertions, 305 deletions
diff --git a/babeld/babeld.c b/babeld/babeld.c
index 1d2f60e3ad..4e68f05df4 100644
--- a/babeld/babeld.c
+++ b/babeld/babeld.c
@@ -538,7 +538,7 @@ resize_receive_buffer(int size)
}
static void
-babel_distribute_update (struct distribute_ctx *ctx, struct distribute *dist)
+babel_distribute_update (struct distribute_ctx *ctx __attribute__((__unused__)), struct distribute *dist)
{
struct interface *ifp;
babel_interface_nfo *babel_ifp;
@@ -593,7 +593,7 @@ babel_distribute_update_all (struct prefix_list *notused)
}
static void
-babel_distribute_update_all_wrapper (struct access_list *notused)
+babel_distribute_update_all_wrapper (struct access_list *notused __attribute__((__unused__)))
{
babel_distribute_update_all(NULL);
}
@@ -872,16 +872,18 @@ babeld_quagga_init(void)
/* Stubs to adapt Babel's filtering calls to Quagga's infrastructure. */
int
-input_filter(const unsigned char *id,
+input_filter(const unsigned char *id __attribute__((__unused__)),
const unsigned char *prefix, unsigned short plen,
- const unsigned char *neigh, unsigned int ifindex)
+ const unsigned char *neigh __attribute__((__unused__)),
+ unsigned int ifindex)
{
return babel_filter(0, prefix, plen, ifindex);
}
int
-output_filter(const unsigned char *id, const unsigned char *prefix,
- unsigned short plen, unsigned int ifindex)
+output_filter(const unsigned char *id __attribute__((__unused__)),
+ const unsigned char *prefix, unsigned short plen,
+ unsigned int ifindex)
{
return babel_filter(1, prefix, plen, ifindex);
}
diff --git a/bgpd/bgp_attr.c b/bgpd/bgp_attr.c
index d349922c52..c15dada9c1 100644
--- a/bgpd/bgp_attr.c
+++ b/bgpd/bgp_attr.c
@@ -2450,6 +2450,10 @@ int bgp_mp_reach_parse(struct bgp_attr_parser_args *args,
if (!peer->nexthop.ifp) {
zlog_warn("%s sent a v6 global attribute but address is a V6 LL and there's no peer interface information. Hence, withdrawing",
peer->host);
+ if (CHECK_FLAG(peer->cap, PEER_CAP_LINK_LOCAL_ADV) &&
+ CHECK_FLAG(peer->cap, PEER_CAP_LINK_LOCAL_RCV))
+ bgp_notify_send(peer->connection, BGP_NOTIFY_UPDATE_ERR,
+ BGP_NOTIFY_UPDATE_UNREACH_NEXT_HOP);
return BGP_ATTR_PARSE_WITHDRAW;
}
attr->nh_ifindex = peer->nexthop.ifp->ifindex;
diff --git a/bgpd/bgp_debug.c b/bgpd/bgp_debug.c
index 319638e412..dfebc00e0a 100644
--- a/bgpd/bgp_debug.c
+++ b/bgpd/bgp_debug.c
@@ -149,6 +149,7 @@ static const struct message bgp_notify_update_msg[] = {
{BGP_NOTIFY_UPDATE_OPT_ATTR_ERR, "/Optional Attribute Error"},
{BGP_NOTIFY_UPDATE_INVAL_NETWORK, "/Invalid Network Field"},
{BGP_NOTIFY_UPDATE_MAL_AS_PATH, "/Malformed AS_PATH"},
+ {BGP_NOTIFY_UPDATE_UNREACH_NEXT_HOP, "/Unreachable Link-Local Address"},
{0}};
static const struct message bgp_notify_cease_msg[] = {
diff --git a/bgpd/bgp_ecommunity.c b/bgpd/bgp_ecommunity.c
index e794ccb308..2c6ae65f85 100644
--- a/bgpd/bgp_ecommunity.c
+++ b/bgpd/bgp_ecommunity.c
@@ -1337,6 +1337,31 @@ char *ecommunity_ecom2str(struct ecommunity *ecom, int format, int filter)
snprintf(encbuf, sizeof(encbuf),
"DF: (alg: %u, pref: %u)", alg,
pref);
+ } else if (*pnt == ECOMMUNITY_EVPN_SUBTYPE_LAYER2_ATTR) {
+ uint16_t flags, l2mtu;
+
+ ++pnt;
+ memcpy(&flags, pnt, 2);
+ ++pnt;
+ ++pnt;
+
+ memcpy(&l2mtu, pnt, 2);
+
+ snprintf(encbuf, sizeof(encbuf),
+ "L2: P flag:%c, B Flag %c, C word %c, MTU %d",
+ CHECK_FLAG(flags,
+ ECOMMUNITY_EVPN_SUBTYPE_LAYER2_ATTR_PRIMARY_PE_FLAG)
+ ? 'Y'
+ : 'N',
+ CHECK_FLAG(flags,
+ ECOMMUNITY_EVPN_SUBTYPE_LAYER2_ATTR_BACKUP_PE_FLAG)
+ ? 'Y'
+ : 'N',
+ CHECK_FLAG(flags,
+ ECOMMUNITY_EVPN_SUBTYPE_LAYER2_ATTR_CONTROL_WORD_FLAG)
+ ? 'Y'
+ : 'N',
+ l2mtu);
} else
unk_ecom = true;
} else if (type == ECOMMUNITY_ENCODE_REDIRECT_IP_NH) {
diff --git a/bgpd/bgp_ecommunity.h b/bgpd/bgp_ecommunity.h
index af9d481c19..0e68b15807 100644
--- a/bgpd/bgp_ecommunity.h
+++ b/bgpd/bgp_ecommunity.h
@@ -68,12 +68,18 @@
#define ECOMMUNITY_EVPN_SUBTYPE_ESI_LABEL 0x01
#define ECOMMUNITY_EVPN_SUBTYPE_ES_IMPORT_RT 0x02
#define ECOMMUNITY_EVPN_SUBTYPE_ROUTERMAC 0x03
+#define ECOMMUNITY_EVPN_SUBTYPE_LAYER2_ATTR 0x04
#define ECOMMUNITY_EVPN_SUBTYPE_DF_ELECTION 0x06
#define ECOMMUNITY_EVPN_SUBTYPE_DEF_GW 0x0d
#define ECOMMUNITY_EVPN_SUBTYPE_ND 0x08
#define ECOMMUNITY_EVPN_SUBTYPE_MACMOBILITY_FLAG_STICKY 0x01
+/* Layer2 Attributes: RFC8214 */
+#define ECOMMUNITY_EVPN_SUBTYPE_LAYER2_ATTR_PRIMARY_PE_FLAG 0x01
+#define ECOMMUNITY_EVPN_SUBTYPE_LAYER2_ATTR_BACKUP_PE_FLAG 0x02
+#define ECOMMUNITY_EVPN_SUBTYPE_LAYER2_ATTR_CONTROL_WORD_FLAG 0x04
+
/* DF alg bits - only lower 5 bits are applicable */
#define ECOMMUNITY_EVPN_SUBTYPE_DF_ALG_BITS 0x1f
diff --git a/bgpd/bgp_evpn_mh.c b/bgpd/bgp_evpn_mh.c
index b6ec8341a7..b9861acad2 100644
--- a/bgpd/bgp_evpn_mh.c
+++ b/bgpd/bgp_evpn_mh.c
@@ -1201,6 +1201,7 @@ int bgp_evpn_type1_route_process(struct peer *peer, afi_t afi, safi_t safi,
mpls_label_t label;
struct in_addr vtep_ip;
struct prefix_evpn p;
+ uint8_t num_labels = 0;
if (psize != BGP_EVPN_TYPE1_PSIZE) {
flog_err(EC_BGP_EVPN_ROUTE_INVALID,
@@ -1225,6 +1226,7 @@ int bgp_evpn_type1_route_process(struct peer *peer, afi_t afi, safi_t safi,
pfx += EVPN_ETH_TAG_BYTES;
memcpy(&label, pfx, BGP_LABEL_BYTES);
+ num_labels++;
/* EAD route prefix doesn't include the nexthop in the global
* table
@@ -1233,12 +1235,11 @@ int bgp_evpn_type1_route_process(struct peer *peer, afi_t afi, safi_t safi,
build_evpn_type1_prefix(&p, eth_tag, &esi, vtep_ip);
/* Process the route. */
if (attr) {
- bgp_update(peer, (struct prefix *)&p, addpath_id, attr, afi,
- safi, ZEBRA_ROUTE_BGP, BGP_ROUTE_NORMAL, &prd, NULL,
- 0, 0, NULL);
+ bgp_update(peer, (struct prefix *)&p, addpath_id, attr, afi, safi, ZEBRA_ROUTE_BGP,
+ BGP_ROUTE_NORMAL, &prd, &label, num_labels, 0, NULL);
} else {
- bgp_withdraw(peer, (struct prefix *)&p, addpath_id, afi, safi,
- ZEBRA_ROUTE_BGP, BGP_ROUTE_NORMAL, &prd, NULL, 0);
+ bgp_withdraw(peer, (struct prefix *)&p, addpath_id, afi, safi, ZEBRA_ROUTE_BGP,
+ BGP_ROUTE_NORMAL, &prd, &label, num_labels);
}
return 0;
}
diff --git a/bgpd/bgp_fsm.c b/bgpd/bgp_fsm.c
index 1a30cb37f4..c7a4c6928a 100644
--- a/bgpd/bgp_fsm.c
+++ b/bgpd/bgp_fsm.c
@@ -525,8 +525,9 @@ static void bgp_holdtime_timer(struct event *thread)
* for systems where we are heavily loaded for one
* reason or another.
*/
- inq_count = atomic_load_explicit(&connection->ibuf->count,
- memory_order_relaxed);
+ frr_with_mutex (&connection->io_mtx) {
+ inq_count = atomic_load_explicit(&connection->ibuf->count, memory_order_relaxed);
+ }
if (inq_count)
BGP_TIMER_ON(connection->t_holdtime, bgp_holdtime_timer,
peer->v_holdtime);
@@ -607,6 +608,7 @@ const char *const peer_down_str[] = {
"Admin. shutdown (RTT)",
"Suppress Fib Turned On or Off",
"Password config change",
+ "Router ID is missing",
};
static void bgp_graceful_restart_timer_off(struct peer_connection *connection,
diff --git a/bgpd/bgp_network.c b/bgpd/bgp_network.c
index af5d815d30..3df4aa286e 100644
--- a/bgpd/bgp_network.c
+++ b/bgpd/bgp_network.c
@@ -571,7 +571,7 @@ static void bgp_accept(struct event *thread)
/* Do not try to reconnect if the peer reached maximum
* prefixes, restart timer is still running or the peer
- * is shutdown.
+ * is shutdown, or BGP identifier is not set (0.0.0.0).
*/
if (BGP_PEER_START_SUPPRESSED(peer1)) {
if (bgp_debug_neighbor_events(peer1)) {
@@ -588,6 +588,14 @@ static void bgp_accept(struct event *thread)
return;
}
+ if (peer1->bgp->router_id.s_addr == INADDR_ANY) {
+ zlog_warn("[Event] Incoming BGP connection rejected from %s due missing BGP identifier, set it with `bgp router-id`",
+ peer1->host);
+ peer1->last_reset = PEER_DOWN_ROUTER_ID_ZERO;
+ close(bgp_sock);
+ return;
+ }
+
if (bgp_debug_neighbor_events(peer1))
zlog_debug("[Event] connection from %s fd %d, active peer status %d fd %d",
inet_sutop(&su, buf), bgp_sock, connection1->status,
@@ -776,6 +784,13 @@ enum connect_result bgp_connect(struct peer_connection *connection)
assert(!CHECK_FLAG(connection->thread_flags, PEER_THREAD_READS_ON));
ifindex_t ifindex = 0;
+ if (peer->bgp->router_id.s_addr == INADDR_ANY) {
+ peer->last_reset = PEER_DOWN_ROUTER_ID_ZERO;
+ zlog_warn("%s: BGP identifier is missing for peer %s, set it with `bgp router-id`",
+ __func__, peer->host);
+ return connect_error;
+ }
+
if (peer->conf_if && BGP_CONNECTION_SU_UNSPEC(connection)) {
if (bgp_debug_neighbor_events(peer))
zlog_debug("Peer address not learnt: Returning from connect");
diff --git a/bgpd/bgp_nht.c b/bgpd/bgp_nht.c
index 2ef7ec97e3..c8e15372b6 100644
--- a/bgpd/bgp_nht.c
+++ b/bgpd/bgp_nht.c
@@ -38,7 +38,7 @@ extern struct zclient *zclient;
static void register_zebra_rnh(struct bgp_nexthop_cache *bnc);
static void unregister_zebra_rnh(struct bgp_nexthop_cache *bnc);
-static int make_prefix(int afi, struct bgp_path_info *pi, struct prefix *p);
+static bool make_prefix(int afi, struct bgp_path_info *pi, struct prefix *p);
static void bgp_nht_ifp_initial(struct event *thread);
DEFINE_HOOK(bgp_nht_path_update, (struct bgp *bgp, struct bgp_path_info *pi, bool valid),
@@ -330,7 +330,7 @@ int bgp_find_or_add_nexthop(struct bgp *bgp_route, struct bgp *bgp_nexthop,
/* This will return true if the global IPv6 NH is a link local
* addr */
- if (make_prefix(afi, pi, &p) < 0)
+ if (!make_prefix(afi, pi, &p))
return 1;
/*
@@ -1026,7 +1026,7 @@ void bgp_cleanup_nexthops(struct bgp *bgp)
* make_prefix - make a prefix structure from the path (essentially
* path's node.
*/
-static int make_prefix(int afi, struct bgp_path_info *pi, struct prefix *p)
+static bool make_prefix(int afi, struct bgp_path_info *pi, struct prefix *p)
{
int is_bgp_static = ((pi->type == ZEBRA_ROUTE_BGP)
@@ -1036,12 +1036,13 @@ static int make_prefix(int afi, struct bgp_path_info *pi, struct prefix *p)
struct bgp_dest *net = pi->net;
const struct prefix *p_orig = bgp_dest_get_prefix(net);
struct in_addr ipv4;
+ struct peer *peer = pi->peer;
+ struct attr *attr = pi->attr;
if (p_orig->family == AF_FLOWSPEC) {
- if (!pi->peer)
- return -1;
- return bgp_flowspec_get_first_nh(pi->peer->bgp,
- pi, p, afi);
+ if (!peer)
+ return false;
+ return bgp_flowspec_get_first_nh(peer->bgp, pi, p, afi);
}
memset(p, 0, sizeof(struct prefix));
switch (afi) {
@@ -1051,34 +1052,32 @@ static int make_prefix(int afi, struct bgp_path_info *pi, struct prefix *p)
p->u.prefix4 = p_orig->u.prefix4;
p->prefixlen = p_orig->prefixlen;
} else {
- if (IS_MAPPED_IPV6(&pi->attr->mp_nexthop_global)) {
- ipv4_mapped_ipv6_to_ipv4(
- &pi->attr->mp_nexthop_global, &ipv4);
+ if (IS_MAPPED_IPV6(&attr->mp_nexthop_global)) {
+ ipv4_mapped_ipv6_to_ipv4(&attr->mp_nexthop_global, &ipv4);
p->u.prefix4 = ipv4;
p->prefixlen = IPV4_MAX_BITLEN;
} else {
if (p_orig->family == AF_EVPN)
- p->u.prefix4 =
- pi->attr->mp_nexthop_global_in;
+ p->u.prefix4 = attr->mp_nexthop_global_in;
else
- p->u.prefix4 = pi->attr->nexthop;
+ p->u.prefix4 = attr->nexthop;
p->prefixlen = IPV4_MAX_BITLEN;
}
}
break;
case AFI_IP6:
p->family = AF_INET6;
- if (pi->attr->srv6_l3vpn) {
+ if (attr->srv6_l3vpn) {
p->prefixlen = IPV6_MAX_BITLEN;
- if (pi->attr->srv6_l3vpn->transposition_len != 0 &&
+ if (attr->srv6_l3vpn->transposition_len != 0 &&
BGP_PATH_INFO_NUM_LABELS(pi)) {
- IPV6_ADDR_COPY(&p->u.prefix6, &pi->attr->srv6_l3vpn->sid);
+ IPV6_ADDR_COPY(&p->u.prefix6, &attr->srv6_l3vpn->sid);
transpose_sid(&p->u.prefix6,
decode_label(&pi->extra->labels->label[0]),
- pi->attr->srv6_l3vpn->transposition_offset,
- pi->attr->srv6_l3vpn->transposition_len);
+ attr->srv6_l3vpn->transposition_offset,
+ attr->srv6_l3vpn->transposition_len);
} else
- IPV6_ADDR_COPY(&(p->u.prefix6), &(pi->attr->srv6_l3vpn->sid));
+ IPV6_ADDR_COPY(&(p->u.prefix6), &(attr->srv6_l3vpn->sid));
} else if (is_bgp_static) {
p->u.prefix6 = p_orig->u.prefix6;
p->prefixlen = p_orig->prefixlen;
@@ -1086,28 +1085,35 @@ static int make_prefix(int afi, struct bgp_path_info *pi, struct prefix *p)
/* If we receive MP_REACH nexthop with ::(LL)
* or LL(LL), use LL address as nexthop cache.
*/
- if (pi->attr &&
- pi->attr->mp_nexthop_len ==
- BGP_ATTR_NHLEN_IPV6_GLOBAL_AND_LL &&
- (IN6_IS_ADDR_UNSPECIFIED(
- &pi->attr->mp_nexthop_global) ||
- IN6_IS_ADDR_LINKLOCAL(&pi->attr->mp_nexthop_global)))
- p->u.prefix6 = pi->attr->mp_nexthop_local;
+ if (attr && attr->mp_nexthop_len == BGP_ATTR_NHLEN_IPV6_GLOBAL_AND_LL &&
+ (IN6_IS_ADDR_UNSPECIFIED(&attr->mp_nexthop_global) ||
+ IN6_IS_ADDR_LINKLOCAL(&attr->mp_nexthop_global)))
+ p->u.prefix6 = attr->mp_nexthop_local;
/* If we receive MR_REACH with (GA)::(LL)
* then check for route-map to choose GA or LL
*/
- else if (pi->attr &&
- pi->attr->mp_nexthop_len ==
- BGP_ATTR_NHLEN_IPV6_GLOBAL_AND_LL) {
- if (CHECK_FLAG(pi->attr->nh_flags,
- BGP_ATTR_NH_MP_PREFER_GLOBAL))
- p->u.prefix6 =
- pi->attr->mp_nexthop_global;
+ else if (attr && attr->mp_nexthop_len == BGP_ATTR_NHLEN_IPV6_GLOBAL_AND_LL) {
+ if (CHECK_FLAG(attr->nh_flags, BGP_ATTR_NH_MP_PREFER_GLOBAL))
+ p->u.prefix6 = attr->mp_nexthop_global;
else
- p->u.prefix6 =
- pi->attr->mp_nexthop_local;
+ p->u.prefix6 = attr->mp_nexthop_local;
+ } else if (attr && attr->mp_nexthop_len == BGP_ATTR_NHLEN_IPV6_GLOBAL &&
+ IN6_IS_ADDR_LINKLOCAL(&attr->mp_nexthop_global)) {
+ /* If we receive MP_REACH with GUA as LL, we should
+ * check if we have Link-Local Next Hop capability also.
+ */
+ if (!(CHECK_FLAG(peer->cap, PEER_CAP_LINK_LOCAL_ADV) &&
+ CHECK_FLAG(peer->cap, PEER_CAP_LINK_LOCAL_RCV))) {
+ zlog_warn("%s: received IPv6 global next-hop as Link-Local, but no capability exchanged",
+ __func__);
+ p->u.prefix6 = attr->mp_nexthop_global;
+ } else {
+ p->u.prefix6 = attr->mp_nexthop_global;
+ p->prefixlen = IPV6_MAX_BITLEN;
+ return false;
+ }
} else
- p->u.prefix6 = pi->attr->mp_nexthop_global;
+ p->u.prefix6 = attr->mp_nexthop_global;
p->prefixlen = IPV6_MAX_BITLEN;
}
break;
@@ -1119,7 +1125,7 @@ static int make_prefix(int afi, struct bgp_path_info *pi, struct prefix *p)
}
break;
}
- return 0;
+ return true;
}
/**
diff --git a/bgpd/bgp_open.c b/bgpd/bgp_open.c
index 6451c7cf38..be04d87b74 100644
--- a/bgpd/bgp_open.c
+++ b/bgpd/bgp_open.c
@@ -43,6 +43,7 @@ const struct message capcode_str[] = {
{ CAPABILITY_CODE_ROLE, "Role" },
{ CAPABILITY_CODE_SOFT_VERSION, "Software Version" },
{ CAPABILITY_CODE_PATHS_LIMIT, "Paths-Limit" },
+ { CAPABILITY_CODE_LINK_LOCAL, "Link-Local Next Hop" },
{ 0 }
};
@@ -63,6 +64,7 @@ const size_t cap_minsizes[] = {
[CAPABILITY_CODE_ROLE] = CAPABILITY_CODE_ROLE_LEN,
[CAPABILITY_CODE_SOFT_VERSION] = CAPABILITY_CODE_SOFT_VERSION_LEN,
[CAPABILITY_CODE_PATHS_LIMIT] = CAPABILITY_CODE_PATHS_LIMIT_LEN,
+ [CAPABILITY_CODE_LINK_LOCAL] = CAPABILITY_CODE_LINK_LOCAL_LEN,
};
/* value the capability must be a multiple of.
@@ -1067,6 +1069,7 @@ static int bgp_capability_parse(struct peer *peer, size_t length,
case CAPABILITY_CODE_ROLE:
case CAPABILITY_CODE_SOFT_VERSION:
case CAPABILITY_CODE_PATHS_LIMIT:
+ case CAPABILITY_CODE_LINK_LOCAL:
/* Check length. */
if (caphdr.length < cap_minsizes[caphdr.code]) {
zlog_info(
@@ -1168,6 +1171,9 @@ static int bgp_capability_parse(struct peer *peer, size_t length,
case CAPABILITY_CODE_SOFT_VERSION:
ret = bgp_capability_software_version(peer, &caphdr);
break;
+ case CAPABILITY_CODE_LINK_LOCAL:
+ SET_FLAG(peer->cap, PEER_CAP_LINK_LOCAL_RCV);
+ break;
case CAPABILITY_CODE_PATHS_LIMIT:
ret = bgp_capability_paths_limit(peer, &caphdr);
break;
@@ -1968,6 +1974,16 @@ uint16_t bgp_open_capability(struct stream *s, struct peer *peer,
stream_putc(s, CAPABILITY_CODE_DYNAMIC_LEN);
}
+ /* Link-Local Next Hop capability. */
+ if (peergroup_flag_check(peer, PEER_FLAG_CAPABILITY_LINK_LOCAL)) {
+ SET_FLAG(peer->cap, PEER_CAP_LINK_LOCAL_ADV);
+ stream_putc(s, BGP_OPEN_OPT_CAP);
+ ext_opt_params ? stream_putw(s, CAPABILITY_CODE_LINK_LOCAL_LEN + 2)
+ : stream_putc(s, CAPABILITY_CODE_LINK_LOCAL_LEN + 2);
+ stream_putc(s, CAPABILITY_CODE_LINK_LOCAL);
+ stream_putc(s, CAPABILITY_CODE_LINK_LOCAL_LEN);
+ }
+
/* FQDN capability */
if (CHECK_FLAG(peer->flags, PEER_FLAG_CAPABILITY_FQDN)
&& cmd_hostname_get()) {
diff --git a/bgpd/bgp_open.h b/bgpd/bgp_open.h
index 3a8cba9b7d..abe3b51f5d 100644
--- a/bgpd/bgp_open.h
+++ b/bgpd/bgp_open.h
@@ -54,6 +54,7 @@ struct graceful_restart_af {
#define CAPABILITY_CODE_EXT_MESSAGE 6 /* Extended Message Support */
#define CAPABILITY_CODE_ROLE 9 /* Role Capability */
#define CAPABILITY_CODE_PATHS_LIMIT 76 /* Paths Limit Capability */
+#define CAPABILITY_CODE_LINK_LOCAL 77 /* draft-white-linklocal-capability */
/* Capability Length */
#define CAPABILITY_CODE_MP_LEN 4
@@ -71,6 +72,7 @@ struct graceful_restart_af {
#define CAPABILITY_CODE_EXT_MESSAGE_LEN 0 /* Extended Message Support */
#define CAPABILITY_CODE_ROLE_LEN 1
#define CAPABILITY_CODE_SOFT_VERSION_LEN 1
+#define CAPABILITY_CODE_LINK_LOCAL_LEN 0
/* Cooperative Route Filtering Capability. */
diff --git a/bgpd/bgp_packet.c b/bgpd/bgp_packet.c
index f8726ffff9..3e90d7881c 100644
--- a/bgpd/bgp_packet.c
+++ b/bgpd/bgp_packet.c
@@ -1245,6 +1245,18 @@ void bgp_capability_send(struct peer *peer, afi_t afi, safi_t safi,
/* Encode MP_EXT capability. */
switch (capability_code) {
+ case CAPABILITY_CODE_LINK_LOCAL:
+ stream_putc(s, action);
+ stream_putc(s, CAPABILITY_CODE_LINK_LOCAL);
+ stream_putc(s, 0);
+
+ if (bgp_debug_neighbor_events(peer))
+ zlog_debug("%pBP sending CAPABILITY has %s %s for afi/safi: %s/%s", peer,
+ action == CAPABILITY_ACTION_SET ? "Advertising" : "Removing",
+ capability, iana_afi2str(pkt_afi), iana_safi2str(pkt_safi));
+
+ COND_FLAG(peer->cap, PEER_CAP_LINK_LOCAL_ADV, action == CAPABILITY_ACTION_SET);
+ break;
case CAPABILITY_CODE_SOFT_VERSION:
stream_putc(s, action);
stream_putc(s, CAPABILITY_CODE_SOFT_VERSION);
@@ -3769,6 +3781,7 @@ static int bgp_capability_msg_parse(struct peer *peer, uint8_t *pnt,
case CAPABILITY_CODE_ROLE:
case CAPABILITY_CODE_SOFT_VERSION:
case CAPABILITY_CODE_PATHS_LIMIT:
+ case CAPABILITY_CODE_LINK_LOCAL:
if (hdr->length < cap_minsizes[hdr->code]) {
zlog_info("%pBP: %s Capability length error: got %u, expected at least %u",
peer, capability, hdr->length,
diff --git a/bgpd/bgp_pbr.c b/bgpd/bgp_pbr.c
index 2d61c0f00a..b85a8e2254 100644
--- a/bgpd/bgp_pbr.c
+++ b/bgpd/bgp_pbr.c
@@ -2624,7 +2624,6 @@ static void bgp_pbr_policyroute_add_to_zebra(struct bgp *bgp,
static void bgp_pbr_handle_entry(struct bgp *bgp, struct bgp_path_info *path,
struct bgp_pbr_entry_main *api, bool add)
{
- struct nexthop nh;
int i = 0;
int continue_loop = 1;
float rate = 0;
@@ -2639,7 +2638,6 @@ static void bgp_pbr_handle_entry(struct bgp *bgp, struct bgp_path_info *path,
struct bgp_pbr_val_mask bpvm;
memset(&range, 0, sizeof(range));
- memset(&nh, 0, sizeof(nh));
memset(&bpf, 0, sizeof(bpf));
memset(&bpof, 0, sizeof(bpof));
if (CHECK_FLAG(api->match_bitmask, PREFIX_SRC_PRESENT) ||
@@ -2652,8 +2650,6 @@ static void bgp_pbr_handle_entry(struct bgp *bgp, struct bgp_path_info *path,
dst = &api->dst_prefix;
if (api->type == BGP_PBR_IPRULE)
bpf.type = api->type;
- memset(&nh, 0, sizeof(nh));
- nh.vrf_id = VRF_UNKNOWN;
if (api->match_protocol_num) {
proto = (uint8_t)api->protocol[0].value;
if (api->afi == AF_INET6 && proto == IPPROTO_ICMPV6)
@@ -2778,8 +2774,10 @@ static void bgp_pbr_handle_entry(struct bgp *bgp, struct bgp_path_info *path,
case ACTION_TRAFFICRATE:
/* drop packet */
if (api->actions[i].u.r.rate == 0) {
- nh.vrf_id = api->vrf_id;
- nh.type = NEXTHOP_TYPE_BLACKHOLE;
+ struct nexthop nh = {
+ .vrf_id = api->vrf_id,
+ .type = NEXTHOP_TYPE_BLACKHOLE,
+ };
bgp_pbr_policyroute_add_to_zebra(
bgp, path, &bpf, &bpof, &nh, &rate);
} else {
@@ -2802,18 +2800,15 @@ static void bgp_pbr_handle_entry(struct bgp *bgp, struct bgp_path_info *path,
/* terminate action: run other filters
*/
break;
- case ACTION_REDIRECT_IP:
- nh.vrf_id = api->vrf_id;
+ case ACTION_REDIRECT_IP: {
+ struct nexthop nh = { .vrf_id = api->vrf_id };
+
if (api->afi == AFI_IP) {
nh.type = NEXTHOP_TYPE_IPV4;
- nh.gate.ipv4.s_addr =
- api->actions[i].u.zr.
- redirect_ip_v4.s_addr;
+ nh.gate.ipv4 = api->actions[i].u.zr.redirect_ip_v4;
} else {
nh.type = NEXTHOP_TYPE_IPV6;
- memcpy(&nh.gate.ipv6,
- &api->actions[i].u.zr.redirect_ip_v6,
- sizeof(struct in6_addr));
+ nh.gate.ipv6 = api->actions[i].u.zr.redirect_ip_v6;
}
bgp_pbr_policyroute_add_to_zebra(bgp, path, &bpf, &bpof,
&nh, &rate);
@@ -2822,7 +2817,10 @@ static void bgp_pbr_handle_entry(struct bgp *bgp, struct bgp_path_info *path,
*/
continue_loop = 0;
break;
- case ACTION_REDIRECT:
+ }
+ case ACTION_REDIRECT: {
+ struct nexthop nh = {};
+
if (api->afi == AFI_IP)
nh.type = NEXTHOP_TYPE_IPV4;
else
@@ -2832,6 +2830,7 @@ static void bgp_pbr_handle_entry(struct bgp *bgp, struct bgp_path_info *path,
&nh, &rate);
continue_loop = 0;
break;
+ }
case ACTION_MARKING:
if (BGP_DEBUG(pbr, PBR)) {
bgp_pbr_print_policy_route(api);
diff --git a/bgpd/bgp_route.c b/bgpd/bgp_route.c
index e932738cd4..a1a5068a7f 100644
--- a/bgpd/bgp_route.c
+++ b/bgpd/bgp_route.c
@@ -2506,8 +2506,16 @@ bool subgroup_announce_check(struct bgp_dest *dest, struct bgp_path_info *pi,
} else if (!ibgp_to_ibgp && !transparent &&
!CHECK_FLAG(from->af_flags[afi][safi], PEER_FLAG_REFLECTOR_CLIENT) &&
IN6_IS_ADDR_LINKLOCAL(&peer->nexthop.v6_local) && peer->shared_network &&
- (from == bgp->peer_self || peer->sort == BGP_PEER_EBGP))
- global_and_ll = true;
+ (from == bgp->peer_self || peer->sort == BGP_PEER_EBGP)) {
+ /* If an implementation intends to send a single link-local forwarding
+ * address in the Next Hop field of the MP_REACH_NLRI, it MUST set the
+ * length of the Next Hop field to 16 and include only the IPv6 link-local
+ * address in the Next Hop field.
+ */
+ if (!(CHECK_FLAG(peer->cap, PEER_CAP_LINK_LOCAL_ADV) &&
+ CHECK_FLAG(peer->cap, PEER_CAP_LINK_LOCAL_RCV)))
+ global_and_ll = true;
+ }
if (global_and_ll) {
if (safi == SAFI_MPLS_VPN)
@@ -2851,8 +2859,17 @@ bool subgroup_announce_check(struct bgp_dest *dest, struct bgp_path_info *pi,
* If the extended community is non-transitive, strip it off,
* unless it's a locally originated route (static, aggregate,
* redistributed, etc.).
+ * draft-uttaro-idr-bgp-oad says:
+ * Extended communities which are non-transitive across an AS
+ * boundary MAY be advertised over an EBGP-OAD session if allowed
+ * by explicit policy configuration. If allowed, all the members
+ * of the OAD SHOULD be configured to use the same criteria.
+ * For example, the Origin Validation State Extended Community,
+ * defined as non-transitive in [RFC8097], can be advertised to
+ * peers in the same OAD.
*/
- if (from->sort == BGP_PEER_EBGP && peer->sort == BGP_PEER_EBGP &&
+ if (from->sort == BGP_PEER_EBGP && from->sub_sort != BGP_PEER_EBGP_OAD &&
+ peer->sort == BGP_PEER_EBGP && peer->sub_sort != BGP_PEER_EBGP_OAD &&
pi->sub_type == BGP_ROUTE_NORMAL) {
struct ecommunity *new_ecomm;
struct ecommunity *old_ecomm;
@@ -4120,6 +4137,9 @@ static void process_eoiu_marker(struct bgp_dest *dest)
subqueue2str(META_QUEUE_EOIU_MARKER));
bgp_process_main_one(info->bgp, NULL, 0, 0);
+
+ XFREE(MTYPE_BGP_EOIU_MARKER_INFO, info);
+ XFREE(MTYPE_BGP_NODE, dest);
}
/*
@@ -4310,6 +4330,7 @@ static void eoiu_marker_queue_free(struct meta_queue *mq, struct bgp_dest_queue
XFREE(MTYPE_BGP_EOIU_MARKER_INFO, dest->info);
STAILQ_REMOVE_HEAD(l, pq);
STAILQ_NEXT(dest, pq) = NULL; /* complete unlink */
+ XFREE(MTYPE_BGP_NODE, dest);
mq->size--;
}
}
@@ -9933,6 +9954,7 @@ void route_vty_out(struct vty *vty, const struct prefix *p,
"ipv6");
json_object_string_add(json_nexthop_global, "scope",
"global");
+ json_object_int_add(json_nexthop_global, "length", attr->mp_nexthop_len);
/* We display both LL & GL if both have been
* received */
@@ -9956,6 +9978,8 @@ void route_vty_out(struct vty *vty, const struct prefix *p,
"ipv6");
json_object_string_add(json_nexthop_ll, "scope",
"link-local");
+ json_object_int_add(json_nexthop_global, "length",
+ attr->mp_nexthop_len);
if ((IPV6_ADDR_CMP(&attr->mp_nexthop_global,
&attr->mp_nexthop_local) !=
@@ -11077,6 +11101,7 @@ void route_vty_out_detail(struct vty *vty, struct bgp *bgp, struct bgp_dest *bn,
"ipv6");
json_object_string_add(json_nexthop_global, "scope",
"global");
+ json_object_int_add(json_nexthop_global, "length", attr->mp_nexthop_len);
} else {
if (nexthop_hostname)
vty_out(vty, " %pI6(%s)",
@@ -11264,6 +11289,7 @@ void route_vty_out_detail(struct vty *vty, struct bgp *bgp, struct bgp_dest *bn,
json_object_string_add(json_nexthop_ll, "afi", "ipv6");
json_object_string_add(json_nexthop_ll, "scope",
"link-local");
+ json_object_int_add(json_nexthop_ll, "length", attr->mp_nexthop_len);
json_object_boolean_true_add(json_nexthop_ll,
"accessible");
diff --git a/bgpd/bgp_vty.c b/bgpd/bgp_vty.c
index 98021f7ef4..fd4fd8afd6 100644
--- a/bgpd/bgp_vty.c
+++ b/bgpd/bgp_vty.c
@@ -114,6 +114,10 @@ FRR_CFG_DEFAULT_BOOL(BGP_SOFT_VERSION_CAPABILITY,
{ .val_bool = true, .match_profile = "datacenter", },
{ .val_bool = false },
);
+FRR_CFG_DEFAULT_BOOL(BGP_LINK_LOCAL_CAPABILITY,
+ { .val_bool = true, .match_profile = "datacenter", },
+ { .val_bool = false },
+);
FRR_CFG_DEFAULT_BOOL(BGP_DYNAMIC_CAPABILITY,
{ .val_bool = true, .match_profile = "datacenter", },
{ .val_bool = false },
@@ -623,6 +627,8 @@ int bgp_get_vty(struct bgp **bgp, as_t *as, const char *name,
if (DFLT_BGP_SOFT_VERSION_CAPABILITY)
SET_FLAG((*bgp)->flags,
BGP_FLAG_SOFT_VERSION_CAPABILITY);
+ if (DFLT_BGP_LINK_LOCAL_CAPABILITY)
+ SET_FLAG((*bgp)->flags, BGP_FLAG_LINK_LOCAL_CAPABILITY);
if (DFLT_BGP_DYNAMIC_CAPABILITY)
SET_FLAG((*bgp)->flags,
BGP_FLAG_DYNAMIC_CAPABILITY);
@@ -4436,6 +4442,24 @@ DEFPY (bgp_default_software_version_capability,
return CMD_SUCCESS;
}
+DEFPY (bgp_default_link_local_capability,
+ bgp_default_link_local_capability_cmd,
+ "[no] bgp default link-local-capability",
+ NO_STR
+ BGP_STR
+ "Configure BGP defaults\n"
+ "Advertise Link-Local Next Hop capability for all neighbors\n")
+{
+ VTY_DECLVAR_CONTEXT(bgp, bgp);
+
+ if (no)
+ UNSET_FLAG(bgp->flags, BGP_FLAG_LINK_LOCAL_CAPABILITY);
+ else
+ SET_FLAG(bgp->flags, BGP_FLAG_LINK_LOCAL_CAPABILITY);
+
+ return CMD_SUCCESS;
+}
+
DEFPY (bgp_default_dynamic_capability,
bgp_default_dynamic_capability_cmd,
"[no] bgp default dynamic-capability",
@@ -6065,6 +6089,34 @@ DEFPY(neighbor_capability_software_version,
return ret;
}
+/* neighbor capability link-local */
+DEFPY(neighbor_capability_link_local,
+ neighbor_capability_link_local_cmd,
+ "[no$no] neighbor <A.B.C.D|X:X::X:X|WORD>$neighbor capability link-local",
+ NO_STR
+ NEIGHBOR_STR
+ NEIGHBOR_ADDR_STR2
+ "Advertise capability to the peer\n"
+ "Advertise Link-Local Next Hop capability to the peer\n")
+{
+ struct peer *peer;
+ int ret;
+
+ peer = peer_and_group_lookup_vty(vty, neighbor);
+ if (!peer)
+ return CMD_WARNING_CONFIG_FAILED;
+
+ if (no)
+ ret = peer_flag_unset_vty(vty, neighbor, PEER_FLAG_CAPABILITY_LINK_LOCAL);
+ else
+ ret = peer_flag_set_vty(vty, neighbor, PEER_FLAG_CAPABILITY_LINK_LOCAL);
+
+ bgp_capability_send(peer, AFI_IP, SAFI_UNICAST, CAPABILITY_CODE_LINK_LOCAL,
+ no ? CAPABILITY_ACTION_UNSET : CAPABILITY_ACTION_SET);
+
+ return ret;
+}
+
static int peer_af_flag_modify_vty(struct vty *vty, const char *peer_str,
afi_t afi, safi_t safi, uint64_t flag,
int set)
@@ -14942,6 +14994,16 @@ static void bgp_show_peer(struct vty *vty, struct peer *p, bool use_json,
json_object_object_add(json_cap, "softwareVersion",
json_soft_version);
+ /* Link-Local Next Hop capability */
+ json_object *json_link_local = NULL;
+
+ json_link_local = json_object_new_object();
+ json_object_boolean_add(json_link_local, "advertised",
+ !!CHECK_FLAG(p->cap, PEER_CAP_LINK_LOCAL_ADV));
+ json_object_boolean_add(json_link_local, "received",
+ !!CHECK_FLAG(p->cap, PEER_CAP_LINK_LOCAL_RCV));
+ json_object_object_add(json_cap, "linkLocalNextHop", json_link_local);
+
/* Graceful Restart */
if (CHECK_FLAG(p->cap, PEER_CAP_RESTART_RCV) ||
CHECK_FLAG(p->cap, PEER_CAP_RESTART_ADV)) {
@@ -15369,6 +15431,21 @@ CPP_NOTICE("Remove `gracefulRestartCapability` JSON field")
vty_out(vty, "\n");
+ /* Link-Local Next Hop capability */
+ vty_out(vty, " Link-Local Next Hop Capability:");
+
+ if (CHECK_FLAG(p->cap, PEER_CAP_LINK_LOCAL_ADV))
+ vty_out(vty, " advertised link-local");
+ else
+ vty_out(vty, " not advertised");
+
+ if (CHECK_FLAG(p->cap, PEER_CAP_LINK_LOCAL_RCV))
+ vty_out(vty, " received link-local");
+ else
+ vty_out(vty, " not received");
+
+ vty_out(vty, "\n");
+
/* Graceful Restart */
if (CHECK_FLAG(p->cap, PEER_CAP_RESTART_RCV) ||
CHECK_FLAG(p->cap, PEER_CAP_RESTART_ADV)) {
@@ -18913,6 +18990,15 @@ static void bgp_config_write_peer_global(struct vty *vty, struct bgp *bgp,
addr);
}
+ /* capability link-local */
+ if (CHECK_FLAG(bgp->flags, BGP_FLAG_LINK_LOCAL_CAPABILITY)) {
+ if (!peergroup_flag_check(peer, PEER_FLAG_CAPABILITY_LINK_LOCAL))
+ vty_out(vty, " no neighbor %s capability link-local\n", addr);
+ } else {
+ if (peergroup_flag_check(peer, PEER_FLAG_CAPABILITY_LINK_LOCAL))
+ vty_out(vty, " neighbor %s capability link-local\n", addr);
+ }
+
/* dont-capability-negotiation */
if (peergroup_flag_check(peer, PEER_FLAG_DONT_CAPABILITY))
vty_out(vty, " neighbor %s dont-capability-negotiate\n", addr);
@@ -19622,6 +19708,11 @@ int bgp_config_write(struct vty *vty)
? ""
: "no ");
+ if (!!CHECK_FLAG(bgp->flags, BGP_FLAG_LINK_LOCAL_CAPABILITY) !=
+ SAVE_BGP_LINK_LOCAL_CAPABILITY)
+ vty_out(vty, " %sbgp default link-local-capability\n",
+ CHECK_FLAG(bgp->flags, BGP_FLAG_LINK_LOCAL_CAPABILITY) ? "" : "no ");
+
if (!!CHECK_FLAG(bgp->flags, BGP_FLAG_DYNAMIC_CAPABILITY) !=
SAVE_BGP_DYNAMIC_CAPABILITY)
vty_out(vty,
@@ -20728,6 +20819,9 @@ void bgp_vty_init(void)
/* bgp default software-version-capability */
install_element(BGP_NODE, &bgp_default_software_version_capability_cmd);
+ /* bgp default link-local-capability */
+ install_element(BGP_NODE, &bgp_default_link_local_capability_cmd);
+
/* bgp default dynamic-capability */
install_element(BGP_NODE, &bgp_default_dynamic_capability_cmd);
@@ -21383,6 +21477,9 @@ void bgp_vty_init(void)
/* "neighbor capability software-version" commands.*/
install_element(BGP_NODE, &neighbor_capability_software_version_cmd);
+ /* "neighbor capability link-local" commands.*/
+ install_element(BGP_NODE, &neighbor_capability_link_local_cmd);
+
/* "neighbor capability orf prefix-list" commands.*/
install_element(BGP_NODE, &neighbor_capability_orf_prefix_hidden_cmd);
install_element(BGP_NODE,
diff --git a/bgpd/bgpd.c b/bgpd/bgpd.c
index efb2c00fa5..1034e49bae 100644
--- a/bgpd/bgpd.c
+++ b/bgpd/bgpd.c
@@ -1575,6 +1575,9 @@ struct peer *peer_new(struct bgp *bgp)
if (CHECK_FLAG(bgp->flags, BGP_FLAG_SOFT_VERSION_CAPABILITY))
peer_flag_set(peer, PEER_FLAG_CAPABILITY_SOFT_VERSION);
+ if (CHECK_FLAG(bgp->flags, BGP_FLAG_LINK_LOCAL_CAPABILITY))
+ peer_flag_set(peer, PEER_FLAG_CAPABILITY_LINK_LOCAL);
+
if (CHECK_FLAG(bgp->flags, BGP_FLAG_DYNAMIC_CAPABILITY))
peer_flag_set(peer, PEER_FLAG_DYNAMIC_CAPABILITY);
@@ -2964,6 +2967,11 @@ static void peer_group2peer_config_copy(struct peer_group *group,
SET_FLAG(peer->flags,
PEER_FLAG_DYNAMIC_CAPABILITY);
+ /* capability link-local apply */
+ if (!CHECK_FLAG(peer->flags_override, PEER_FLAG_CAPABILITY_LINK_LOCAL))
+ if (CHECK_FLAG(conf->flags, PEER_FLAG_CAPABILITY_LINK_LOCAL))
+ SET_FLAG(peer->flags, PEER_FLAG_CAPABILITY_LINK_LOCAL);
+
/* password apply */
if (!CHECK_FLAG(peer->flags_override, PEER_FLAG_PASSWORD))
PEER_STR_ATTR_INHERIT(peer, group, password,
@@ -4834,6 +4842,7 @@ static const struct peer_flag_action peer_flag_action_list[] = {
{PEER_FLAG_EXTENDED_LINK_BANDWIDTH, 0, peer_change_none},
{PEER_FLAG_LONESOUL, 0, peer_change_reset_out},
{PEER_FLAG_TCP_MSS, 0, peer_change_none},
+ {PEER_FLAG_CAPABILITY_LINK_LOCAL, 0, peer_change_none},
{0, 0, 0}};
static const struct peer_flag_action peer_af_flag_action_list[] = {
@@ -4921,7 +4930,10 @@ static int peer_flag_action_set(const struct peer_flag_action *action_list,
static void peer_flag_modify_action(struct peer *peer, uint64_t flag)
{
- if (flag == PEER_FLAG_DYNAMIC_CAPABILITY)
+ if (flag == PEER_FLAG_DYNAMIC_CAPABILITY || flag == PEER_FLAG_CAPABILITY_ENHE ||
+ flag == PEER_FLAG_CAPABILITY_FQDN || flag == PEER_FLAG_CAPABILITY_SOFT_VERSION ||
+ flag == PEER_FLAG_DONT_CAPABILITY || flag == PEER_FLAG_OVERRIDE_CAPABILITY ||
+ flag == PEER_FLAG_STRICT_CAP_MATCH || flag == PEER_FLAG_CAPABILITY_LINK_LOCAL)
peer->last_reset = PEER_DOWN_CAPABILITY_CHANGE;
else if (flag == PEER_FLAG_PASSIVE)
peer->last_reset = PEER_DOWN_PASSIVE_CHANGE;
diff --git a/bgpd/bgpd.h b/bgpd/bgpd.h
index 96a78e6662..d02c1c924d 100644
--- a/bgpd/bgpd.h
+++ b/bgpd/bgpd.h
@@ -566,6 +566,7 @@ struct bgp {
#define BGP_FLAG_IPV6_NO_AUTO_RA (1ULL << 40)
#define BGP_FLAG_L3VNI_SCHEDULE_FOR_INSTALL (1ULL << 41)
#define BGP_FLAG_L3VNI_SCHEDULE_FOR_DELETE (1ULL << 42)
+#define BGP_FLAG_LINK_LOCAL_CAPABILITY (1ULL << 43)
/* BGP default address-families.
* New peers inherit enabled afi/safis from bgp instance.
@@ -1410,6 +1411,8 @@ struct peer {
#define PEER_CAP_SOFT_VERSION_RCV (1ULL << 28)
#define PEER_CAP_PATHS_LIMIT_ADV (1U << 29)
#define PEER_CAP_PATHS_LIMIT_RCV (1U << 30)
+#define PEER_CAP_LINK_LOCAL_ADV (1ULL << 31)
+#define PEER_CAP_LINK_LOCAL_RCV (1ULL << 32)
/* Capability flags (reset in bgp_stop) */
uint32_t af_cap[AFI_MAX][SAFI_MAX];
@@ -1543,6 +1546,7 @@ struct peer {
#define PEER_FLAG_AS_LOOP_DETECTION (1ULL << 38) /* as path loop detection */
#define PEER_FLAG_EXTENDED_LINK_BANDWIDTH (1ULL << 39)
#define PEER_FLAG_DUAL_AS (1ULL << 40)
+#define PEER_FLAG_CAPABILITY_LINK_LOCAL (1ULL << 41)
/*
*GR-Disabled mode means unset PEER_FLAG_GRACEFUL_RESTART
@@ -1863,6 +1867,7 @@ struct peer {
#define PEER_DOWN_RTT_SHUTDOWN 35U /* Automatically shutdown due to RTT */
#define PEER_DOWN_SUPPRESS_FIB_PENDING 36U /* Suppress fib pending changed */
#define PEER_DOWN_PASSWORD_CHANGE 37U /* neighbor password command */
+#define PEER_DOWN_ROUTER_ID_ZERO 38U /* router-id is 0.0.0.0 */
/*
* Remember to update peer_down_str in bgp_fsm.c when you add
* a new value to the last_reset reason
@@ -2099,6 +2104,7 @@ struct bgp_nlri {
#define BGP_NOTIFY_UPDATE_OPT_ATTR_ERR 9
#define BGP_NOTIFY_UPDATE_INVAL_NETWORK 10
#define BGP_NOTIFY_UPDATE_MAL_AS_PATH 11
+#define BGP_NOTIFY_UPDATE_UNREACH_NEXT_HOP 12 /* draft-white-linklocal-capability */
/* BGP_NOTIFY_CEASE sub codes (RFC 4486). */
#define BGP_NOTIFY_CEASE_MAX_PREFIX 1
diff --git a/bgpd/rfapi/rfapi_import.c b/bgpd/rfapi/rfapi_import.c
index 99d8bcfce4..d9f63700f0 100644
--- a/bgpd/rfapi/rfapi_import.c
+++ b/bgpd/rfapi/rfapi_import.c
@@ -4067,9 +4067,15 @@ static void rfapiProcessPeerDownRt(struct peer *peer,
bpi, import_table, afi, -1);
import_table->holddown_count[afi] += 1;
}
- rfapiBiStartWithdrawTimer(import_table, rn, bpi,
- afi, safi,
- timer_service_func);
+ if (bm->terminating) {
+ if (safi == SAFI_MPLS_VPN)
+ rfapiExpireVpnNow(import_table, rn, bpi, 1);
+ else
+ rfapiExpireEncapNow(import_table, rn, bpi);
+
+ } else
+ rfapiBiStartWithdrawTimer(import_table, rn, bpi, afi, safi,
+ timer_service_func);
}
}
}
diff --git a/configure.ac b/configure.ac
index e04c0b6d46..09e2d20c3a 100644
--- a/configure.ac
+++ b/configure.ac
@@ -7,7 +7,7 @@
##
AC_PREREQ([2.69])
-AC_INIT([frr], [10.3-dev], [https://github.com/frrouting/frr/issues])
+AC_INIT([frr], [10.4-dev], [https://github.com/frrouting/frr/issues])
PACKAGE_URL="https://frrouting.org/"
AC_SUBST([PACKAGE_URL])
PACKAGE_FULLNAME="FRRouting"
diff --git a/doc/developer/sbfd.rst b/doc/developer/sbfd.rst
index 7bbd2428dd..66a3b48dba 100644
--- a/doc/developer/sbfd.rst
+++ b/doc/developer/sbfd.rst
@@ -27,7 +27,7 @@ SBFD takes the same data packet format as BFD, but with a much simpler state mac
According to RFC7880, SBFD has a stateless SBFDReflector and a stateful SBFDInitiator with the state machine as below:
::
-
+
+--+
ADMIN DOWN, | |
TIMER | V
@@ -78,6 +78,7 @@ A bfd-name is always associated with a TE path, for example if we use the sbfd s
Meanwhile bfdd will notify the sbfd status to the Pathd, we should add the bfd-name field in PTM bfd notify message ZEBRA_BFD_DEST_REPLAY:
::
+
* Message format:
* - header: command, vrf
* - l: interface index
@@ -113,6 +114,7 @@ According to RFC7881, SBFD Control packet dst port should be 7784, src port can
::
+
UDP(sport=4784, dport=7784)/BFD() or UDP(sport=3784, dport=7784)/BFD()
if "multihop" is specified for sbfd initiator we choose the 4784 as the source port, so the reflected packet will take 4784 as the dst port, this is a local BFD_MULTI_HOP_PORT so the reflected packet can be handled by the existing bfd_recv_cb function.
@@ -122,6 +124,7 @@ if "multihop" is not specified for sbfd initiator we choose the 3784 as the sour
For echo SBFD with SRv6 encapsulation case, we re-use the BFD Echo port, the UDP ports in packet are set as:
::
+
UDP(sport=3785, dport=3785)/BFD()
diff --git a/doc/user/bgp.rst b/doc/user/bgp.rst
index 1493c2fb98..42f7ca84dd 100644
--- a/doc/user/bgp.rst
+++ b/doc/user/bgp.rst
@@ -282,7 +282,9 @@ internal or external.
interface and address information. In that case default router ID value is
selected as the largest IP Address of the interfaces. When `router zebra` is
not enabled *bgpd* can't get interface information so `router-id` is set to
- 0.0.0.0. So please set router-id by hand.
+ 0.0.0.0, which is invalid and BGP session can't be established.
+
+ So please set router-id by manually.
.. _bgp-multiple-autonomous-systems:
@@ -1951,6 +1953,15 @@ Configuring Peers
are not supporting this capability or supporting BGP Capabilities
Negotiation RFC 2842.
+.. clicmd:: neighbor PEER capability link-local
+
+ Send the Link-Local Next Hop capability in the BGP OPEN message to the neighbor.
+ This is useful in data center environments where point-to-point (unnumbered) links
+ are utilized. This capability standardizes the operation of BGP over a
+ point-to-point links using link-local IPv6 addressing only.
+
+ Enabled by default for the ``datacenter`` profile.
+
.. clicmd:: neighbor <A.B.C.D|X:X::X:X|WORD> accept-own
Enable handling of self-originated VPN routes containing ``accept-own`` community.
@@ -2938,7 +2949,7 @@ BGP Extended Communities in Route Map
``CO:COLOR``
This is a format to define colors value. ``CO`` part is always 00 (default),
- it can be used to support the requirements of Color-Only steering when using
+ it can be used to support the requirements of Color-Only steering when using
a Null Endpoint in the SR-TE Policy as specified in Section 8.8 of [RFC9256].
The below shows in detail what the different combinations of ``CO`` bits can
match on to for the purpose of determining what type of SR-TE Policy Tunnel
diff --git a/doc/user/sbfd.rst b/doc/user/sbfd.rst
index 390d82a6c0..d26bffe07b 100644
--- a/doc/user/sbfd.rst
+++ b/doc/user/sbfd.rst
@@ -27,7 +27,7 @@ SBFD takes the same data packet format as BFD, but with a much simpler state mac
According to RFC7880, SBFD has a stateless SBFDReflector and a stateful SBFDInitiator with the state machine as below:
::
-
+
+--+
ADMIN DOWN, | |
TIMER | V
@@ -73,24 +73,28 @@ In the following example, we set up a sbfd session to monitor the path A-B-D (al
A is the SBFDInitiator, and D is the SBFDReflector, A will trasmit the SBFD packet to B as the format:
::
+
IPv6(src="200::A", dst="100::B", nh=43)/IPv6ExtHdrSegmentRouting(addresses=["100::D"], nh=41, segleft=1)/IPv6(src="200::A", dst="200::D")/UDP(dport=7784)/BFD(my_dis=123, your_disc=456, state=UP)
Upon receiving the packet, B will take the Srv6 End action since the dst ip 100::B is the End address, B will the shift the dst address according to Srv6 spec, then trasmit the SBFD packet to D as the format:
::
+
IPv6(src="200::A", dst="100::D", nh=41)/IPv6(src="200::A", dst="200::D")/UDP(dport=7784)/BFD(my_dis=123, your_disc=456, state=UP)
After D receive the packet, It will decap the outer IPv6 header since the dst ip 100::D is the End address, the decapped packet is:
::
+
IPv6(src="200::A", dst="200::D")/UDP(dport=7784)/BFD(my_dis=123, your_disc=456, state=UP)
This packet will be routed to kernel stack of D since its dst is 200::D. Then the SBFDReflector service on D will get the packet and Reflect it. The response packet will be:
::
+
IPv6(src="200::D", dst="200::A")/UDP(sport=7784)/BFD(my_dis=456, your_disc=123, state=UP)
@@ -132,18 +136,21 @@ For example, we use Echo SBFD session to protect Srv6 path: A-B-D
A is also the SBFDInitiator, and B, C, D is Srv6 ready nodes, A will trasmit the SBFD packet to B as the format:
::
+
IPv6(src="200::A", dst="100::B", nh=43)/IPv6ExtHdrSegmentRouting(addresses=["100::D"], nh=41, segleft=1)/IPv6(src="200::A", dst="200::A")/UDP(dport=3785)/BFD(my_dis=123, your_disc=123, state=UP)
Upon receiving the packet, B will take the Srv6 End action since the dst ip 100::B is the End address, B will the shift the dst address according to Srv6 spec, then trasmit the SBFD packet to D as the format:
::
+
IPv6(src="200::A", dst="100::D", nh=41)/IPv6(src="200::A", dst="200::A")/UDP(dport=3785)/BFD(my_dis=123, your_disc=123, state=UP)
After D receive the packet, It will decap the outer IPv6 header since the dst ip 100::D is the End address, the decapped packet is:
::
+
IPv6(src="200::A", dst="200::A")/UDP(dport=3785)/BFD(my_dis=123, your_disc=123, state=UP)
@@ -181,6 +188,7 @@ We can also configure a SBFD Initiator-Reflector session based on simple IPv6/IP
A is the SBFDInitiator, and D is the SBFDReflector, A will trasmit the SBFD packet to B or C as the format:
::
+
IPv6(src="200::A", dst="200::D")/UDP(dport=7784)/BFD(my_dis=123, your_disc=456, state=UP)
@@ -189,6 +197,7 @@ Upon receiving the packet, B/C will route the packet to D according to the dst i
After D receive the packet, packet will be sent to kernel stack of D since its dst is 200::D. Then the SBFDReflector service on D will get the packet and reflect it. The response packet will be:
::
+
IPv6(src="200::D", dst="200::A")/UDP(sport=7784)/BFD(my_dis=456, your_disc=123, state=UP)
@@ -226,6 +235,7 @@ This command will show all the BFD and SBFD sessions in the bfdd:
::
+
BFD Peers:
peer 200::D bfd-mode sbfd-init bfd-name a-d multihop local-address 200::A vrf default remote-discr 456
ID: 1421669725
@@ -254,6 +264,7 @@ This command will show all the BFD and SBFD session packet counters:
.. clicmd:: show bfd peers counters
::
+
BFD Peers:
peer 200::A bfd-mode sbfd-echo bfd-name a-b-d local-address 200::A vrf default srv6-source-ipv6 200::A srv6-encap-data 100::B 100::D
Control packet input: 0 packets
@@ -281,6 +292,7 @@ we also implemented a new show command to display BFD sessions with a bfd-name,
.. clicmd:: show bfd bfd-name a-b-d
::
+
BFD Peers:
peer 200::A bfd-mode sbfd-echo bfd-name a-b-d local-address 200::A vrf default srv6-source-ipv6 200::A srv6-encap-data 100::B 100::D
ID: 123
diff --git a/doc/user/static.rst b/doc/user/static.rst
index 5bf5004a66..0ce6e2107e 100644
--- a/doc/user/static.rst
+++ b/doc/user/static.rst
@@ -177,6 +177,20 @@ multiple segments instructions.
[..]
S>* 2005::/64 [1/0] is directly connected, ens3, seg6 2001:db8:aaaa::7,2002::4,2002::3,2002::2, weight 1, 00:00:06
+STATIC also supports steering of IPv4 traffic over an SRv6 SID list, as shown in the example below.
+
+.. code-block:: frr
+
+ ip route A.B.C.D <A.B.C.D|nexthop> segments U:U::U:U/Y:Y::Y:Y/Z:Z::Z:Z
+
+::
+
+ router(config)# ip route 10.0.0.0/24 sr0 segments fcbb:bbbb:1:2:3:fe00::
+
+ router# show ip route
+ [..]
+ S>* 10.0.0.0/24 [1/0] is directly connected, sr0, seg6 fcbb:bbbb:1:2:3:fe00::, weight 1, 00:00:06
+
SRv6 Static SIDs Commands
=========================
diff --git a/doc/user/zebra.rst b/doc/user/zebra.rst
index ef3a619853..f700d36086 100644
--- a/doc/user/zebra.rst
+++ b/doc/user/zebra.rst
@@ -1133,10 +1133,8 @@ kernel.
- any,
- babel,
- bgp,
- - connected,
- eigrp,
- isis,
- - kernel,
- nhrp,
- openfabric,
- ospf,
diff --git a/fpm/fpm_pb.h b/fpm/fpm_pb.h
index 23d7e43993..8847365a37 100644
--- a/fpm/fpm_pb.h
+++ b/fpm/fpm_pb.h
@@ -111,6 +111,7 @@ static inline int fpm__nexthop__get(const Fpm__Nexthop *nh,
nexthop->vrf_id = VRF_DEFAULT;
nexthop->type = NEXTHOP_TYPE_IPV4;
+ memset(&nexthop->gate, 0, sizeof(nexthop->gate));
nexthop->gate.ipv4 = ipv4;
if (ifindex) {
nexthop->type = NEXTHOP_TYPE_IPV4_IFINDEX;
diff --git a/isisd/isis_circuit.c b/isisd/isis_circuit.c
index 9ea2cfd0a1..5b62d3c518 100644
--- a/isisd/isis_circuit.c
+++ b/isisd/isis_circuit.c
@@ -840,12 +840,10 @@ void isis_circuit_down(struct isis_circuit *circuit)
if (circuit->u.bc.adjdb[0]) {
circuit->u.bc.adjdb[0]->del = isis_delete_adj;
list_delete(&circuit->u.bc.adjdb[0]);
- circuit->u.bc.adjdb[0] = NULL;
}
if (circuit->u.bc.adjdb[1]) {
circuit->u.bc.adjdb[1]->del = isis_delete_adj;
list_delete(&circuit->u.bc.adjdb[1]);
- circuit->u.bc.adjdb[1] = NULL;
}
if (circuit->u.bc.is_dr[0]) {
isis_dr_resign(circuit, 1);
diff --git a/isisd/isis_cli.c b/isisd/isis_cli.c
index 735e39a377..c86d929903 100644
--- a/isisd/isis_cli.c
+++ b/isisd/isis_cli.c
@@ -2365,31 +2365,27 @@ DEFPY_YANG (isis_frr_lfa_tiebreaker,
if (!level || strmatch(level, "level-1")) {
if (no) {
- snprintf(
- xpath, XPATH_MAXLEN,
- "./fast-reroute/level-1/lfa/tiebreaker[index='%s']",
- index_str);
+ snprintf(xpath, XPATH_MAXLEN,
+ "./fast-reroute/level-1/lfa/tiebreaker[index='%s'][type='%s']",
+ index_str, type);
nb_cli_enqueue_change(vty, xpath, NB_OP_DESTROY, NULL);
} else {
- snprintf(
- xpath, XPATH_MAXLEN,
- "./fast-reroute/level-1/lfa/tiebreaker[index='%s']/type",
- index_str);
+ snprintf(xpath, XPATH_MAXLEN,
+ "./fast-reroute/level-1/lfa/tiebreaker[index='%s'][type='%s']/type",
+ index_str, type);
nb_cli_enqueue_change(vty, xpath, NB_OP_CREATE, type);
}
}
if (!level || strmatch(level, "level-2")) {
if (no) {
- snprintf(
- xpath, XPATH_MAXLEN,
- "./fast-reroute/level-2/lfa/tiebreaker[index='%s']",
- index_str);
+ snprintf(xpath, XPATH_MAXLEN,
+ "./fast-reroute/level-2/lfa/tiebreaker[index='%s'][type='%s']",
+ index_str, type);
nb_cli_enqueue_change(vty, xpath, NB_OP_DESTROY, NULL);
} else {
- snprintf(
- xpath, XPATH_MAXLEN,
- "./fast-reroute/level-2/lfa/tiebreaker[index='%s']/type",
- index_str);
+ snprintf(xpath, XPATH_MAXLEN,
+ "./fast-reroute/level-2/lfa/tiebreaker[index='%s'][type='%s']/type",
+ index_str, type);
nb_cli_enqueue_change(vty, xpath, NB_OP_CREATE, type);
}
}
diff --git a/isisd/isis_events.c b/isisd/isis_events.c
index 5574bbc50f..5237e99999 100644
--- a/isisd/isis_events.c
+++ b/isisd/isis_events.c
@@ -83,7 +83,8 @@ static void circuit_commence_level(struct isis_circuit *circuit, int level)
send_hello_sched(circuit, level, TRIGGERED_IIH_DELAY);
circuit->u.bc.lan_neighs[level - 1] = list_new();
- circuit->u.bc.adjdb[level - 1] = list_new();
+ if (!circuit->u.bc.adjdb[level - 1])
+ circuit->u.bc.adjdb[level - 1] = list_new();
}
}
diff --git a/lib/nexthop.c b/lib/nexthop.c
index 332581fbd8..ee6c2b7ec0 100644
--- a/lib/nexthop.c
+++ b/lib/nexthop.c
@@ -772,68 +772,30 @@ unsigned int nexthop_level(const struct nexthop *nexthop)
return rv;
}
-/* Only hash word-sized things, let cmp do the rest. */
-uint32_t nexthop_hash_quick(const struct nexthop *nexthop)
+uint32_t nexthop_hash(const struct nexthop *nexthop)
{
uint32_t key = 0x45afe398;
- int i;
- key = jhash_3words(nexthop->type, nexthop->vrf_id,
- nexthop->nh_label_type, key);
-
- if (nexthop->nh_label) {
- int labels = nexthop->nh_label->num_labels;
+ /* type, vrf, ifindex, ip addresses - see nexthop.h */
+ key = _nexthop_hash_bytes(nexthop, key);
- i = 0;
+ key = jhash_1word(nexthop->flags & NEXTHOP_FLAGS_HASHED, key);
- while (labels >= 3) {
- key = jhash_3words(nexthop->nh_label->label[i],
- nexthop->nh_label->label[i + 1],
- nexthop->nh_label->label[i + 2],
- key);
- labels -= 3;
- i += 3;
- }
-
- if (labels >= 2) {
- key = jhash_2words(nexthop->nh_label->label[i],
- nexthop->nh_label->label[i + 1],
- key);
- labels -= 2;
- i += 2;
- }
+ if (nexthop->nh_label) {
+ const struct mpls_label_stack *ls = nexthop->nh_label;
- if (labels >= 1)
- key = jhash_1word(nexthop->nh_label->label[i], key);
+ /* num_labels itself isn't useful to hash, if the number of
+ * labels is different, the hash value will change just due to
+ * that already.
+ */
+ key = jhash(ls->label, sizeof(ls->label[0]) * ls->num_labels, key);
}
- key = jhash_2words(nexthop->ifindex,
- CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_ONLINK),
- key);
-
/* Include backup nexthops, if present */
if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_HAS_BACKUP)) {
int backups = nexthop->backup_num;
- i = 0;
-
- while (backups >= 3) {
- key = jhash_3words(nexthop->backup_idx[i],
- nexthop->backup_idx[i + 1],
- nexthop->backup_idx[i + 2], key);
- backups -= 3;
- i += 3;
- }
-
- while (backups >= 2) {
- key = jhash_2words(nexthop->backup_idx[i],
- nexthop->backup_idx[i + 1], key);
- backups -= 2;
- i += 2;
- }
-
- if (backups >= 1)
- key = jhash_1word(nexthop->backup_idx[i], key);
+ key = jhash(nexthop->backup_idx, sizeof(nexthop->backup_idx[0]) * backups, key);
}
if (nexthop->nh_srv6) {
@@ -868,31 +830,6 @@ uint32_t nexthop_hash_quick(const struct nexthop *nexthop)
return key;
}
-
-#define GATE_SIZE 4 /* Number of uint32_t words in struct g_addr */
-
-/* For a more granular hash */
-uint32_t nexthop_hash(const struct nexthop *nexthop)
-{
- uint32_t gate_src_rmap_raw[GATE_SIZE * 3] = {};
- /* Get all the quick stuff */
- uint32_t key = nexthop_hash_quick(nexthop);
-
- assert(((sizeof(nexthop->gate) + sizeof(nexthop->src)
- + sizeof(nexthop->rmap_src))
- / 3)
- == (GATE_SIZE * sizeof(uint32_t)));
-
- memcpy(gate_src_rmap_raw, &nexthop->gate, GATE_SIZE);
- memcpy(gate_src_rmap_raw + GATE_SIZE, &nexthop->src, GATE_SIZE);
- memcpy(gate_src_rmap_raw + (2 * GATE_SIZE), &nexthop->rmap_src,
- GATE_SIZE);
-
- key = jhash2(gate_src_rmap_raw, (GATE_SIZE * 3), key);
-
- return key;
-}
-
void nexthop_copy_no_recurse(struct nexthop *copy,
const struct nexthop *nexthop,
struct nexthop *rparent)
diff --git a/lib/nexthop.h b/lib/nexthop.h
index 5dfb58d846..cea7c77e3e 100644
--- a/lib/nexthop.h
+++ b/lib/nexthop.h
@@ -8,6 +8,7 @@
#ifndef _LIB_NEXTHOP_H
#define _LIB_NEXTHOP_H
+#include "jhash.h"
#include "prefix.h"
#include "mpls.h"
#include "vxlan.h"
@@ -56,15 +57,48 @@ struct nexthop {
struct nexthop *next;
struct nexthop *prev;
- /*
- * What vrf is this nexthop associated with?
+
+ /* begin of hashed data - all fields from here onwards are given to
+ * jhash() as one consecutive chunk. DO NOT create "padding holes".
+ * DO NOT insert pointers that need to be deep-hashed.
+ *
+ * static_assert() below needs to be updated when fields are added
*/
+ char _hash_begin[0];
+
+ /* see above */
+ enum nexthop_types_t type;
+
+ /* What vrf is this nexthop associated with? */
vrf_id_t vrf_id;
/* Interface index. */
ifindex_t ifindex;
- enum nexthop_types_t type;
+ /* Type of label(s), if any */
+ enum lsp_types_t nh_label_type;
+
+ /* padding: keep 16 byte alignment here */
+
+ /* Nexthop address
+ * make sure all 16 bytes for IPv6 are zeroed when putting in an IPv4
+ * address since the entire thing is hashed as-is
+ */
+ union {
+ union g_addr gate;
+ enum blackhole_type bh_type;
+ };
+ union g_addr src;
+ union g_addr rmap_src; /* Src is set via routemap */
+
+ /* end of hashed data - remaining fields in this struct are not
+ * directly fed into jhash(). Most of them are actually part of the
+ * hash but have special rules or handling attached.
+ */
+ char _hash_end[0];
+
+ /* Weight of the nexthop ( for unequal cost ECMP ) */
+ uint8_t weight;
uint16_t flags;
#define NEXTHOP_FLAG_ACTIVE (1 << 0) /* This nexthop is alive. */
@@ -82,18 +116,15 @@ struct nexthop {
#define NEXTHOP_FLAG_EVPN (1 << 8) /* nexthop is EVPN */
#define NEXTHOP_FLAG_LINKDOWN (1 << 9) /* is not removed on link down */
+ /* which flags are part of nexthop_hash(). Should probably be split
+ * off into a separate field...
+ */
+#define NEXTHOP_FLAGS_HASHED NEXTHOP_FLAG_ONLINK
+
#define NEXTHOP_IS_ACTIVE(flags) \
(CHECK_FLAG(flags, NEXTHOP_FLAG_ACTIVE) \
&& !CHECK_FLAG(flags, NEXTHOP_FLAG_DUPLICATE))
- /* Nexthop address */
- union {
- union g_addr gate;
- enum blackhole_type bh_type;
- };
- union g_addr src;
- union g_addr rmap_src; /* Src is set via routemap */
-
/* Nexthops obtained by recursive resolution.
*
* If the nexthop struct needs to be resolved recursively,
@@ -104,15 +135,9 @@ struct nexthop {
/* Recursive parent */
struct nexthop *rparent;
- /* Type of label(s), if any */
- enum lsp_types_t nh_label_type;
-
/* Label(s) associated with this nexthop. */
struct mpls_label_stack *nh_label;
- /* Weight of the nexthop ( for unequal cost ECMP ) */
- uint8_t weight;
-
/* Count and index of corresponding backup nexthop(s) in a backup list;
* only meaningful if the HAS_BACKUP flag is set.
*/
@@ -138,6 +163,29 @@ struct nexthop {
struct nexthop_srv6 *nh_srv6;
};
+/* all hashed fields (including padding, if it is necessary to add) need to
+ * be listed in the static_assert below
+ */
+
+#define S(field) sizeof(((struct nexthop *)NULL)->field)
+
+static_assert(
+ offsetof(struct nexthop, _hash_end) - offsetof(struct nexthop, _hash_begin) ==
+ S(type) + S(vrf_id) + S(ifindex) + S(nh_label_type) + S(gate) + S(src) + S(rmap_src),
+ "struct nexthop contains padding, this can break things. insert _pad fields at appropriate places");
+
+#undef S
+
+/* this is here to show exactly what is meant by the comments above about
+ * the hashing
+ */
+static inline uint32_t _nexthop_hash_bytes(const struct nexthop *nh, uint32_t seed)
+{
+ return jhash(&nh->_hash_begin,
+ offsetof(struct nexthop, _hash_end) - offsetof(struct nexthop, _hash_begin),
+ seed);
+}
+
/* Utility to append one nexthop to another. */
#define NEXTHOP_APPEND(to, new) \
do { \
@@ -183,27 +231,11 @@ struct nexthop *nexthop_from_blackhole(enum blackhole_type bh_type,
/*
* Hash a nexthop. Suitable for use with hash tables.
*
- * This function uses the following values when computing the hash:
- * - vrf_id
- * - ifindex
- * - type
- * - gate
- *
- * nexthop
- * The nexthop to hash
- *
- * Returns:
- * 32-bit hash of nexthop
+ * Please double check the code on what is included in the hash, there was
+ * documentation here but it got outdated and the only thing worse than no
+ * doc is incorrect doc.
*/
uint32_t nexthop_hash(const struct nexthop *nexthop);
-/*
- * Hash a nexthop only on word-sized attributes:
- * - vrf_id
- * - ifindex
- * - type
- * - (some) flags
- */
-uint32_t nexthop_hash_quick(const struct nexthop *nexthop);
extern bool nexthop_same(const struct nexthop *nh1, const struct nexthop *nh2);
extern bool nexthop_same_no_labels(const struct nexthop *nh1,
diff --git a/lib/route_types.pl b/lib/route_types.pl
index c75a866964..834cb822d2 100755
--- a/lib/route_types.pl
+++ b/lib/route_types.pl
@@ -127,9 +127,12 @@ printf "#define SHOW_ROUTE_V6_HEADER \\\n%s\n", codelist(@protosv6);
print "\n";
sub collect {
- my ($daemon, $ipv4, $ipv6, $any) = @_;
+ my ($daemon, $ipv4, $ipv6, $any, $ip_prot) = @_;
my (@names, @help) = ((), ());
for my $p (@protos) {
+ next if ($ip_prot == 1 && $daemon eq "zebra" && $protodetail{$p}->{"cname"} eq "kernel");
+ next if ($ip_prot == 1 && $daemon eq "zebra" && $protodetail{$p}->{"cname"} eq "connected");
+ next if ($ip_prot == 1 && $daemon eq "zebra" && $protodetail{$p}->{"cname"} eq "local");
next if ($protodetail{$p}->{"daemon"} eq $daemon && $daemon ne "zebra");
next if ($protodetail{$p}->{"restrict2"} ne "" &&
$protodetail{$p}->{"restrict2"} ne $daemon);
@@ -151,24 +154,24 @@ for my $daemon (sort keys %daemons) {
next unless ($daemons{$daemon}->{"ipv4"} || $daemons{$daemon}->{"ipv6"});
printf "/* %s */\n", $daemon;
if ($daemons{$daemon}->{"ipv4"} && $daemons{$daemon}->{"ipv6"}) {
- my ($names, $help) = collect($daemon, 1, 1, 0);
+ my ($names, $help) = collect($daemon, 1, 1, 0, 0);
printf "#define FRR_REDIST_STR_%s \\\n %s\n", uc $daemon, $names;
printf "#define FRR_REDIST_HELP_STR_%s \\\n%s\n", uc $daemon, $help;
- ($names, $help) = collect($daemon, 1, 0, 0);
+ ($names, $help) = collect($daemon, 1, 0, 0, 0);
printf "#define FRR_IP_REDIST_STR_%s \\\n %s\n", uc $daemon, $names;
printf "#define FRR_IP_REDIST_HELP_STR_%s \\\n%s\n", uc $daemon, $help;
- ($names, $help) = collect($daemon, 0, 1, 0);
+ ($names, $help) = collect($daemon, 0, 1, 0, 0);
printf "#define FRR_IP6_REDIST_STR_%s \\\n %s\n", uc $daemon, $names;
printf "#define FRR_IP6_REDIST_HELP_STR_%s \\\n%s\n", uc $daemon, $help;
if ($daemon eq "zebra") {
- ($names, $help) = collect($daemon, 1, 0, 1);
+ ($names, $help) = collect($daemon, 1, 0, 1, 1);
printf "#define FRR_IP_PROTOCOL_MAP_STR_%s \\\n %s\n", uc $daemon, $names;
printf "#define FRR_IP_PROTOCOL_MAP_HELP_STR_%s \\\n%s\n", uc $daemon, $help;
- ($names, $help) = collect($daemon, 0, 1, 1);
+ ($names, $help) = collect($daemon, 0, 1, 1, 1);
printf "#define FRR_IP6_PROTOCOL_MAP_STR_%s \\\n %s\n", uc $daemon, $names;
printf "#define FRR_IP6_PROTOCOL_MAP_HELP_STR_%s \\\n%s\n", uc $daemon, $help;
}
diff --git a/lib/zclient.c b/lib/zclient.c
index d8c75c9029..5deea8f0cf 100644
--- a/lib/zclient.c
+++ b/lib/zclient.c
@@ -2300,7 +2300,27 @@ struct nexthop *nexthop_from_zapi_nexthop(const struct zapi_nexthop *znh)
n->type = znh->type;
n->vrf_id = znh->vrf_id;
n->ifindex = znh->ifindex;
- n->gate = znh->gate;
+
+ /* only copy values that have meaning - make sure "spare bytes" are
+ * left zeroed for hashing (look at _nexthop_hash_bytes)
+ */
+ switch (znh->type) {
+ case NEXTHOP_TYPE_BLACKHOLE:
+ n->bh_type = znh->bh_type;
+ break;
+ case NEXTHOP_TYPE_IPV4:
+ case NEXTHOP_TYPE_IPV4_IFINDEX:
+ n->gate.ipv4 = znh->gate.ipv4;
+ break;
+ case NEXTHOP_TYPE_IPV6:
+ case NEXTHOP_TYPE_IPV6_IFINDEX:
+ n->gate.ipv6 = znh->gate.ipv6;
+ break;
+ case NEXTHOP_TYPE_IFINDEX:
+ /* nothing, ifindex is always copied */
+ break;
+ }
+
n->srte_color = znh->srte_color;
n->weight = znh->weight;
diff --git a/lib/zlog_5424.c b/lib/zlog_5424.c
index 4c60d4b405..6265ce3b1f 100644
--- a/lib/zlog_5424.c
+++ b/lib/zlog_5424.c
@@ -782,7 +782,7 @@ static void zlog_5424_cycle(struct zlog_cfg_5424 *zcf, int fd)
}
old = zcf->active ? &zcf->active->zt : NULL;
- old = zlog_target_replace(old, &zlt->zt);
+ old = zlog_target_replace(old, zlt ? &zlt->zt : NULL);
zcf->active = zlt;
/* oldt->fd == fd happens for zlog_5424_apply_meta() */
@@ -1076,9 +1076,17 @@ bool zlog_5424_apply_dst(struct zlog_cfg_5424 *zcf)
bool zlog_5424_apply_meta(struct zlog_cfg_5424 *zcf)
{
+ int fd;
+
frr_with_mutex (&zcf->cfg_mtx) {
if (zcf->active)
- zlog_5424_cycle(zcf, zcf->active->fd);
+ fd = zcf->active->fd;
+ else if (zcf->prio_min != ZLOG_DISABLED)
+ fd = zlog_5424_open(zcf, -1);
+ else
+ fd = -1;
+ if (fd >= 0)
+ zlog_5424_cycle(zcf, fd);
}
return true;
diff --git a/ospfd/ospf_api.c b/ospfd/ospf_api.c
index 213ee8c1fd..cfc13fcc53 100644
--- a/ospfd/ospf_api.c
+++ b/ospfd/ospf_api.c
@@ -514,6 +514,12 @@ struct msg *new_msg_originate_request(uint32_t seqnum, struct in_addr ifaddr,
omsglen += sizeof(struct msg_originate_request)
- sizeof(struct lsa_header);
+ if (omsglen > UINT16_MAX) {
+ zlog_warn("%s: LSA specified is bigger than maximum LSA size, something is wrong",
+ __func__);
+ omsglen = UINT16_MAX;
+ }
+
return msg_new(MSG_ORIGINATE_REQUEST, omsg, seqnum, omsglen);
}
@@ -639,6 +645,12 @@ struct msg *new_msg_lsa_change_notify(uint8_t msgtype, uint32_t seqnum,
memcpy(nmsg_data, data, len);
len += sizeof(struct msg_lsa_change_notify) - sizeof(struct lsa_header);
+ if (len > UINT16_MAX) {
+ zlog_warn("%s: LSA specified is bigger than maximum LSA size, something is wrong",
+ __func__);
+ len = UINT16_MAX;
+ }
+
return msg_new(msgtype, nmsg, seqnum, len);
}
@@ -666,6 +678,12 @@ struct msg *new_msg_reachable_change(uint32_t seqnum, uint16_t nadd,
nmsg->nremove = htons(nremove);
len = sizeof(*nmsg) + insz * (nadd + nremove);
+ if (len > UINT16_MAX) {
+ zlog_warn("%s: LSA specified is bigger than maximum LSA size, something is wrong",
+ __func__);
+ len = UINT16_MAX;
+ }
+
return msg_new(MSG_REACHABLE_CHANGE, nmsg, seqnum, len);
}
diff --git a/pbrd/pbr_nht.c b/pbrd/pbr_nht.c
index ff252f8505..d5cee5f3e4 100644
--- a/pbrd/pbr_nht.c
+++ b/pbrd/pbr_nht.c
@@ -493,7 +493,7 @@ void pbr_nht_change_group(const char *name)
}
for (ALL_NEXTHOPS(nhgc->nhg, nhop)) {
- struct pbr_nexthop_cache lookup;
+ struct pbr_nexthop_cache lookup = {};
struct pbr_nexthop_cache *pnhc;
lookup.nexthop = *nhop;
@@ -565,7 +565,7 @@ void pbr_nht_add_individual_nexthop(struct pbr_map_sequence *pbrms,
struct pbr_nexthop_group_cache *pnhgc;
struct pbr_nexthop_group_cache find;
struct pbr_nexthop_cache *pnhc;
- struct pbr_nexthop_cache lookup;
+ struct pbr_nexthop_cache lookup = {};
struct nexthop *nh;
char buf[PBR_NHC_NAMELEN];
@@ -624,7 +624,7 @@ static void pbr_nht_release_individual_nexthop(struct pbr_map_sequence *pbrms)
struct pbr_nexthop_group_cache *pnhgc;
struct pbr_nexthop_group_cache find;
struct pbr_nexthop_cache *pnhc;
- struct pbr_nexthop_cache lup;
+ struct pbr_nexthop_cache lup = {};
struct nexthop *nh;
enum nexthop_types_t nh_type = 0;
@@ -690,7 +690,7 @@ struct pbr_nexthop_group_cache *pbr_nht_add_group(const char *name)
DEBUGD(&pbr_dbg_nht, "%s: Retrieved NHGC @ %p", __func__, pnhgc);
for (ALL_NEXTHOPS(nhgc->nhg, nhop)) {
- struct pbr_nexthop_cache lookupc;
+ struct pbr_nexthop_cache lookupc = {};
struct pbr_nexthop_cache *pnhc;
lookupc.nexthop = *nhop;
diff --git a/pbrd/pbr_vty.c b/pbrd/pbr_vty.c
index 08fe56c7bb..aa98913571 100644
--- a/pbrd/pbr_vty.c
+++ b/pbrd/pbr_vty.c
@@ -1488,7 +1488,7 @@ pbrms_nexthop_group_write_individual_nexthop(
{
struct pbr_nexthop_group_cache find;
struct pbr_nexthop_group_cache *pnhgc;
- struct pbr_nexthop_cache lookup;
+ struct pbr_nexthop_cache lookup = {};
struct pbr_nexthop_cache *pnhc;
memset(&find, 0, sizeof(find));
diff --git a/pimd/pim_ifchannel.c b/pimd/pim_ifchannel.c
index 1791502b94..d55d2a958a 100644
--- a/pimd/pim_ifchannel.c
+++ b/pimd/pim_ifchannel.c
@@ -239,10 +239,16 @@ void pim_ifchannel_delete_all(struct interface *ifp)
void delete_on_noinfo(struct pim_ifchannel *ch)
{
- if (ch->local_ifmembership == PIM_IFMEMBERSHIP_NOINFO
- && ch->ifjoin_state == PIM_IFJOIN_NOINFO
- && ch->t_ifjoin_expiry_timer == NULL)
+ struct pim_upstream *up = ch->upstream;
+ /*
+ * (S,G) with no active traffic, KAT expires, PPT expries,
+ * channel state is NoInfo
+ */
+ if (ch->local_ifmembership == PIM_IFMEMBERSHIP_NOINFO &&
+ ch->ifjoin_state == PIM_IFJOIN_NOINFO &&
+ (ch->t_ifjoin_expiry_timer == NULL || (up && !pim_upstream_is_kat_running(up)))) {
pim_ifchannel_delete(ch);
+ }
}
void pim_ifchannel_ifjoin_switch(const char *caller, struct pim_ifchannel *ch,
diff --git a/pimd/pim_memory.c b/pimd/pim_memory.c
index 2c35bc6473..f918cbd146 100644
--- a/pimd/pim_memory.c
+++ b/pimd/pim_memory.c
@@ -26,6 +26,7 @@ DEFINE_MTYPE(PIMD, PIM_STATIC_ROUTE, "PIM Static Route");
DEFINE_MTYPE(PIMD, PIM_RP, "PIM RP info");
DEFINE_MTYPE(PIMD, PIM_FILTER_NAME, "PIM RP filter info");
DEFINE_MTYPE(PIMD, PIM_MSDP_PEER, "PIM MSDP peer");
+DEFINE_MTYPE(PIMD, PIM_MSDP_FILTER_NAME, "PIM MSDP peer filter name");
DEFINE_MTYPE(PIMD, PIM_MSDP_MG_NAME, "PIM MSDP mesh-group name");
DEFINE_MTYPE(PIMD, PIM_MSDP_AUTH_KEY, "PIM MSDP authentication key");
DEFINE_MTYPE(PIMD, PIM_MSDP_SA, "PIM MSDP source-active cache");
diff --git a/pimd/pim_memory.h b/pimd/pim_memory.h
index b44d3e191a..5c9bdad50c 100644
--- a/pimd/pim_memory.h
+++ b/pimd/pim_memory.h
@@ -25,6 +25,7 @@ DECLARE_MTYPE(PIM_STATIC_ROUTE);
DECLARE_MTYPE(PIM_RP);
DECLARE_MTYPE(PIM_FILTER_NAME);
DECLARE_MTYPE(PIM_MSDP_PEER);
+DECLARE_MTYPE(PIM_MSDP_FILTER_NAME);
DECLARE_MTYPE(PIM_MSDP_MG_NAME);
DECLARE_MTYPE(PIM_MSDP_SA);
DECLARE_MTYPE(PIM_MSDP_MG);
diff --git a/pimd/pim_msdp.c b/pimd/pim_msdp.c
index 46d5f4881f..1fd27d383e 100644
--- a/pimd/pim_msdp.c
+++ b/pimd/pim_msdp.c
@@ -1095,6 +1095,8 @@ static void pim_msdp_peer_free(struct pim_msdp_peer *mp)
if (mp->auth_listen_sock != -1)
close(mp->auth_listen_sock);
+ XFREE(MTYPE_PIM_MSDP_FILTER_NAME, mp->acl_in);
+ XFREE(MTYPE_PIM_MSDP_FILTER_NAME, mp->acl_out);
XFREE(MTYPE_PIM_MSDP_MG_NAME, mp->mesh_group_name);
mp->pim = NULL;
diff --git a/pimd/pim_nb_config.c b/pimd/pim_nb_config.c
index 51f0615884..5203f78b92 100644
--- a/pimd/pim_nb_config.c
+++ b/pimd/pim_nb_config.c
@@ -1564,8 +1564,8 @@ int pim_msdp_peer_sa_filter_in_modify(struct nb_cb_modify_args *args)
break;
case NB_EV_APPLY:
mp = nb_running_get_entry(args->dnode, NULL, true);
- XFREE(MTYPE_TMP, mp->acl_in);
- mp->acl_in = XSTRDUP(MTYPE_TMP,
+ XFREE(MTYPE_PIM_MSDP_FILTER_NAME, mp->acl_in);
+ mp->acl_in = XSTRDUP(MTYPE_PIM_MSDP_FILTER_NAME,
yang_dnode_get_string(args->dnode, NULL));
break;
}
@@ -1585,7 +1585,7 @@ int pim_msdp_peer_sa_filter_in_destroy(struct nb_cb_destroy_args *args)
break;
case NB_EV_APPLY:
mp = nb_running_get_entry(args->dnode, NULL, true);
- XFREE(MTYPE_TMP, mp->acl_in);
+ XFREE(MTYPE_PIM_MSDP_FILTER_NAME, mp->acl_in);
break;
}
@@ -1608,8 +1608,8 @@ int pim_msdp_peer_sa_filter_out_modify(struct nb_cb_modify_args *args)
break;
case NB_EV_APPLY:
mp = nb_running_get_entry(args->dnode, NULL, true);
- XFREE(MTYPE_TMP, mp->acl_out);
- mp->acl_out = XSTRDUP(MTYPE_TMP,
+ XFREE(MTYPE_PIM_MSDP_FILTER_NAME, mp->acl_out);
+ mp->acl_out = XSTRDUP(MTYPE_PIM_MSDP_FILTER_NAME,
yang_dnode_get_string(args->dnode, NULL));
break;
}
@@ -1629,7 +1629,7 @@ int pim_msdp_peer_sa_filter_out_destroy(struct nb_cb_destroy_args *args)
break;
case NB_EV_APPLY:
mp = nb_running_get_entry(args->dnode, NULL, true);
- XFREE(MTYPE_TMP, mp->acl_out);
+ XFREE(MTYPE_PIM_MSDP_FILTER_NAME, mp->acl_out);
break;
}
@@ -2043,6 +2043,11 @@ int lib_interface_pim_address_family_pim_enable_modify(struct nb_cb_modify_args
ifp->name);
return NB_ERR_INCONSISTENCY;
}
+
+ /* Trigger election in case it was never run before */
+ pim_ifp = ifp->info;
+ if (pim_addr_is_any(pim_ifp->pim_dr_addr))
+ pim_if_dr_election(ifp);
} else {
pim_ifp = ifp->info;
if (!pim_ifp)
@@ -2076,6 +2081,10 @@ int lib_interface_pim_address_family_pim_passive_enable_modify(
pim_ifp = ifp->info;
pim_ifp->pim_passive_enable =
yang_dnode_get_bool(args->dnode, NULL);
+
+ /* Trigger election in case it was never run before */
+ if (pim_ifp->pim_passive_enable && pim_addr_is_any(pim_ifp->pim_dr_addr))
+ pim_if_dr_election(ifp);
break;
}
diff --git a/pimd/pim_upstream.c b/pimd/pim_upstream.c
index c52119e43a..01e1321b25 100644
--- a/pimd/pim_upstream.c
+++ b/pimd/pim_upstream.c
@@ -312,7 +312,7 @@ static void on_join_timer(struct event *t)
}
/*
- * In the case of a HFR we will not ahve anyone to send this to.
+ * In the case of a FHR we will not ahve anyone to send this to.
*/
if (PIM_UPSTREAM_FLAG_TEST_FHR(up->flags))
return;
diff --git a/staticd/static_vty.c b/staticd/static_vty.c
index f93e81e8dc..895846a1c7 100644
--- a/staticd/static_vty.c
+++ b/staticd/static_vty.c
@@ -564,6 +564,7 @@ DEFPY_YANG(ip_route_address_interface,
|onlink$onlink \
|color (1-4294967295) \
|bfd$bfd [{multi-hop$bfd_multi_hop|source A.B.C.D$bfd_source|profile BFDPROF$bfd_profile}] \
+ |segments WORD \
}]",
NO_STR IP_STR
"Establish static routes\n"
@@ -589,7 +590,9 @@ DEFPY_YANG(ip_route_address_interface,
BFD_INTEGRATION_SOURCE_STR
BFD_INTEGRATION_SOURCEV4_STR
BFD_PROFILE_STR
- BFD_PROFILE_NAME_STR)
+ BFD_PROFILE_NAME_STR
+ "Steer this route over an SRv6 SID list\n"
+ "SRv6 SID list\n")
{
struct static_route_args args = {
.delete = !!no,
@@ -611,6 +614,7 @@ DEFPY_YANG(ip_route_address_interface,
.bfd_multi_hop = !!bfd_multi_hop,
.bfd_source = bfd_source_str,
.bfd_profile = bfd_profile,
+ .segs = segments,
};
return static_route_nb_run(vty, &args);
@@ -631,6 +635,7 @@ DEFPY_YANG(ip_route_address_interface_vrf,
|onlink$onlink \
|color (1-4294967295) \
|bfd$bfd [{multi-hop$bfd_multi_hop|source A.B.C.D$bfd_source|profile BFDPROF$bfd_profile}] \
+ |segments WORD \
}]",
NO_STR IP_STR
"Establish static routes\n"
@@ -655,7 +660,9 @@ DEFPY_YANG(ip_route_address_interface_vrf,
BFD_INTEGRATION_SOURCE_STR
BFD_INTEGRATION_SOURCEV4_STR
BFD_PROFILE_STR
- BFD_PROFILE_NAME_STR)
+ BFD_PROFILE_NAME_STR
+ "Steer this route over an SRv6 SID list\n"
+ "SRv6 SID list\n")
{
struct static_route_args args = {
.delete = !!no,
@@ -677,6 +684,7 @@ DEFPY_YANG(ip_route_address_interface_vrf,
.bfd_multi_hop = !!bfd_multi_hop,
.bfd_source = bfd_source_str,
.bfd_profile = bfd_profile,
+ .segs = segments,
};
return static_route_nb_run(vty, &args);
@@ -696,6 +704,7 @@ DEFPY_YANG(ip_route,
|nexthop-vrf NAME \
|color (1-4294967295) \
|bfd$bfd [{multi-hop$bfd_multi_hop|source A.B.C.D$bfd_source|profile BFDPROF$bfd_profile}] \
+ |segments WORD \
}]",
NO_STR IP_STR
"Establish static routes\n"
@@ -720,7 +729,9 @@ DEFPY_YANG(ip_route,
BFD_INTEGRATION_SOURCE_STR
BFD_INTEGRATION_SOURCEV4_STR
BFD_PROFILE_STR
- BFD_PROFILE_NAME_STR)
+ BFD_PROFILE_NAME_STR
+ "Steer this route over an SRv6 SID list\n"
+ "SRv6 SID list\n")
{
struct static_route_args args = {
.delete = !!no,
@@ -741,6 +752,7 @@ DEFPY_YANG(ip_route,
.bfd_multi_hop = !!bfd_multi_hop,
.bfd_source = bfd_source_str,
.bfd_profile = bfd_profile,
+ .segs = segments,
};
return static_route_nb_run(vty, &args);
@@ -759,6 +771,7 @@ DEFPY_YANG(ip_route_vrf,
|nexthop-vrf NAME \
|color (1-4294967295) \
|bfd$bfd [{multi-hop$bfd_multi_hop|source A.B.C.D$bfd_source|profile BFDPROF$bfd_profile}] \
+ |segments WORD \
}]",
NO_STR IP_STR
"Establish static routes\n"
@@ -782,7 +795,9 @@ DEFPY_YANG(ip_route_vrf,
BFD_INTEGRATION_SOURCE_STR
BFD_INTEGRATION_SOURCEV4_STR
BFD_PROFILE_STR
- BFD_PROFILE_NAME_STR)
+ BFD_PROFILE_NAME_STR
+ "Steer this route over an SRv6 SID list\n"
+ "SRv6 SID list\n")
{
struct static_route_args args = {
.delete = !!no,
@@ -803,6 +818,7 @@ DEFPY_YANG(ip_route_vrf,
.bfd_multi_hop = !!bfd_multi_hop,
.bfd_source = bfd_source_str,
.bfd_profile = bfd_profile,
+ .segs = segments,
};
return static_route_nb_run(vty, &args);
diff --git a/tests/topotests/bgp_evpn_route_map_match/r1/frr.conf b/tests/topotests/bgp_evpn_route_map_match/r1/frr.conf
index 4347052c5e..2390733cc8 100644
--- a/tests/topotests/bgp_evpn_route_map_match/r1/frr.conf
+++ b/tests/topotests/bgp_evpn_route_map_match/r1/frr.conf
@@ -24,19 +24,16 @@ router bgp 65001
!
address-family l2vpn evpn
neighbor 192.168.1.2 activate
- neighbor 192.168.1.2 route-map r2 out
+ neighbor 192.168.1.2 route-map rt5 out
advertise-all-vni
advertise ipv4 unicast
exit-address-family
!
-route-map r2 deny 10
- match evpn route-type macip
-!
-route-map r2 deny 20
+route-map rt5 deny 20
match ip address prefix-list pl
match evpn route-type prefix
!
-route-map r2 permit 30
+route-map rt5 permit 30
!
ip prefix-list pl seq 5 permit 192.168.1.0/24
ip prefix-list pl seq 10 permit 10.10.10.1/32
diff --git a/tests/topotests/bgp_evpn_route_map_match/r2/frr.conf b/tests/topotests/bgp_evpn_route_map_match/r2/frr.conf
index 9ed298d8fe..1c91a3e254 100644
--- a/tests/topotests/bgp_evpn_route_map_match/r2/frr.conf
+++ b/tests/topotests/bgp_evpn_route_map_match/r2/frr.conf
@@ -7,6 +7,7 @@ int lo
int r2-eth0
ip address 192.168.1.2/24
!
+vni 10
router bgp 65002
no bgp ebgp-requires-policy
neighbor 192.168.1.1 remote-as external
diff --git a/tests/topotests/bgp_evpn_route_map_match/test_bgp_evpn_route_map_match.py b/tests/topotests/bgp_evpn_route_map_match/test_bgp_evpn_route_map_match.py
index 36c79d6b2b..925ae1fce8 100644
--- a/tests/topotests/bgp_evpn_route_map_match/test_bgp_evpn_route_map_match.py
+++ b/tests/topotests/bgp_evpn_route_map_match/test_bgp_evpn_route_map_match.py
@@ -23,6 +23,7 @@ sys.path.append(os.path.join(CWD, "../"))
# pylint: disable=C0413
from lib import topotest
from lib.topogen import Topogen, get_topogen
+from lib.topolog import logger
def setup_module(mod):
@@ -63,7 +64,7 @@ def teardown_module(mod):
tgen.stop_topology()
-def test_bgp_evpn_route_map_match_route_type():
+def test_bgp_evpn_route_map_match_route_type5():
tgen = get_topogen()
if tgen.routers_have_failure():
@@ -84,16 +85,12 @@ def test_bgp_evpn_route_map_match_route_type():
"valid": True,
}
},
- "10.10.10.2:2": {
- "[3]:[0]:[32]:[10.10.10.2]": {
- "valid": True,
- }
- },
},
- "totalPrefixCounter": 2,
+ "totalPrefixCounter": 1,
}
return topotest.json_cmp(output, expected)
+ logger.info("Check route type-5 filtering")
test_func = functools.partial(
_bgp_converge,
)
@@ -101,6 +98,97 @@ def test_bgp_evpn_route_map_match_route_type():
assert result is None, "Filtered EVPN routes should not be advertised"
+def test_bgp_evpn_route_map_match_route_type2():
+ tgen = get_topogen()
+
+ # Change to L2VNI
+ for machine in [tgen.gears["r1"], tgen.gears["r2"]]:
+ machine.vtysh_cmd("configure terminal\nno vni 10")
+
+ def _check_l2vni():
+ for machine in [tgen.gears["r1"], tgen.gears["r2"]]:
+ output = json.loads(machine.vtysh_cmd("show evpn vni json"))
+
+ expected = {"10": {"vni": 10, "type": "L2"}}
+ return topotest.json_cmp(output, expected)
+
+ logger.info("Check L2VNI setup")
+ test_func = functools.partial(_check_l2vni)
+ _, result = topotest.run_and_expect(test_func, None, count=60, wait=1)
+ assert result is None, "L2VNI setup failed."
+
+ c2_mac = (
+ tgen.gears["c2"]
+ .cmd("ip link show c2-eth0 | awk '/link\/ether/ {print $2}'")
+ .rstrip()
+ )
+ tgen.gears["r1"].vtysh_cmd(
+ "\n".join(
+ [
+ "configure",
+ "route-map rt2 deny 30",
+ "match mac address %s" % c2_mac,
+ "exit",
+ "router bgp 65001",
+ "address-family l2vpn evpn",
+ "neighbor 192.168.1.2 route-map rt2 in",
+ ]
+ )
+ )
+
+ def _check_filter_mac():
+ output = json.loads(
+ tgen.gears["r1"].vtysh_cmd(
+ "show bgp l2vpn evpn neighbors 192.168.1.2 advertised-routes json"
+ )
+ )
+
+ if (
+ output["advertisedRoutes"]
+ .get("10.10.10.2:2", {})
+ .get("[2]:[0]:[48]:[%s]" % c2_mac)
+ ):
+ return False
+
+ return True
+
+ logger.info("check mac filter in, on c2 interface: %s" % c2_mac)
+ test_func = functools.partial(_check_filter_mac)
+ _, result = topotest.run_and_expect(test_func, True, count=60, wait=1)
+ assert result is True, "%s is not filtered" % c2_mac
+
+ tgen.gears["r1"].vtysh_cmd(
+ "\n".join(
+ [
+ "configure",
+ "route-map rt2 deny 30",
+ "no match mac address %s" % c2_mac,
+ "match evpn route-type macip" "exit",
+ "router bgp 65001",
+ "address-family l2vpn evpn",
+ "neighbor 192.168.1.2 route-map rt2 out",
+ ]
+ )
+ )
+
+ def _check_filter_type2():
+ output = json.loads(
+ tgen.gears["r1"].vtysh_cmd(
+ "show bgp l2vpn evpn neighbors 192.168.1.2 advertised-routes json"
+ )
+ )
+
+ if output["totalPrefixCounter"] == 0:
+ return True
+
+ return False
+
+ logger.info("check route type-2 filter out")
+ test_func = functools.partial(_check_filter_type2)
+ _, result = topotest.run_and_expect(test_func, True, count=60, wait=1)
+ assert result is True, "EVPN routes type-2 are not filtered."
+
+
if __name__ == "__main__":
args = ["-s"] + sys.argv[1:]
sys.exit(pytest.main(args))
diff --git a/tests/topotests/bgp_ipv6_link_local_capability/__init__.py b/tests/topotests/bgp_ipv6_link_local_capability/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/tests/topotests/bgp_ipv6_link_local_capability/__init__.py
diff --git a/tests/topotests/bgp_ipv6_link_local_capability/r1/frr.conf b/tests/topotests/bgp_ipv6_link_local_capability/r1/frr.conf
new file mode 100644
index 0000000000..1cf7f3b913
--- /dev/null
+++ b/tests/topotests/bgp_ipv6_link_local_capability/r1/frr.conf
@@ -0,0 +1,15 @@
+!
+int lo
+ ip address 10.0.0.1/32
+!
+router bgp 65001
+ no bgp ebgp-requires-policy
+ no bgp network import-check
+ bgp default link-local-capability
+ neighbor r1-eth0 interface remote-as auto
+ address-family ipv6 unicast
+ network 2001:db8::1/128
+ neighbor r1-eth0 activate
+ exit-address-family
+ !
+!
diff --git a/tests/topotests/bgp_ipv6_link_local_capability/r2/frr.conf b/tests/topotests/bgp_ipv6_link_local_capability/r2/frr.conf
new file mode 100644
index 0000000000..4af053dcf6
--- /dev/null
+++ b/tests/topotests/bgp_ipv6_link_local_capability/r2/frr.conf
@@ -0,0 +1,13 @@
+!
+int lo
+ ip address 10.0.0.2/32
+!
+router bgp 65002
+ no bgp ebgp-requires-policy
+ bgp default link-local-capability
+ neighbor r2-eth0 interface remote-as auto
+ address-family ipv6 unicast
+ neighbor r2-eth0 activate
+ exit-address-family
+ !
+!
diff --git a/tests/topotests/bgp_ipv6_link_local_capability/test_bgp_ipv6_link_local_capability.py b/tests/topotests/bgp_ipv6_link_local_capability/test_bgp_ipv6_link_local_capability.py
new file mode 100644
index 0000000000..1822f17ee4
--- /dev/null
+++ b/tests/topotests/bgp_ipv6_link_local_capability/test_bgp_ipv6_link_local_capability.py
@@ -0,0 +1,110 @@
+#!/usr/bin/env python
+# SPDX-License-Identifier: ISC
+#
+# Copyright (c) 2024 by
+# Donatas Abraitis <donatas@opensourcerouting.org>
+#
+
+import os
+import sys
+import json
+import pytest
+import functools
+
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, "../"))
+
+# pylint: disable=C0413
+from lib import topotest
+from lib.topogen import Topogen, get_topogen
+from lib.common_config import step
+
+pytestmark = [pytest.mark.bgpd]
+
+
+def build_topo(tgen):
+ tgen.add_router("r1")
+ tgen.add_router("r2")
+
+ switch = tgen.add_switch("s1")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r2"])
+
+
+def setup_module(mod):
+ tgen = Topogen(build_topo, mod.__name__)
+ tgen.start_topology()
+
+ router_list = tgen.routers()
+
+ for _, (rname, router) in enumerate(router_list.items(), 1):
+ router.load_frr_config(os.path.join(CWD, "{}/frr.conf".format(rname)))
+
+ tgen.start_router()
+
+
+def teardown_module(mod):
+ tgen = get_topogen()
+ tgen.stop_topology()
+
+
+def test_bgp_ipv6_link_local_capability():
+ tgen = get_topogen()
+
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ r2 = tgen.gears["r2"]
+
+ def _bgp_converge():
+ output = json.loads(r2.vtysh_cmd("show bgp neighbor json"))
+ expected = {
+ "r2-eth0": {
+ "neighborCapabilities": {
+ "linkLocalNextHop": {
+ "advertised": True,
+ "received": True,
+ }
+ }
+ }
+ }
+ return topotest.json_cmp(output, expected)
+
+ test_func = functools.partial(_bgp_converge)
+ _, result = topotest.run_and_expect(test_func, None, count=30, wait=1)
+ assert result is None, "Can't converge initially"
+
+ def _bgp_check_received_nexthops():
+ output = json.loads(r2.vtysh_cmd("show bgp 2001:db8::1/128 json"))
+ expected = {
+ "paths": [
+ {
+ "valid": True,
+ "nexthops": [
+ {
+ "hostname": "r1",
+ "afi": "ipv6",
+ "scope": "global",
+ "length": 16,
+ "accessible": True,
+ }
+ ],
+ "peer": {
+ "routerId": "10.0.0.1",
+ "hostname": "r1",
+ "interface": "r2-eth0",
+ "type": "external",
+ },
+ }
+ ]
+ }
+ return topotest.json_cmp(output, expected)
+
+ test_func = functools.partial(_bgp_check_received_nexthops)
+ _, result = topotest.run_and_expect(test_func, None, count=30, wait=1)
+ assert result is None, "Can't see 2001:db8::1/128"
+
+
+if __name__ == "__main__":
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
diff --git a/tests/topotests/bgp_ipv6_ll_peering/r1/bgpd.conf b/tests/topotests/bgp_ipv6_ll_peering/r1/bgpd.conf
index 724cbf84ab..a26efb4c4f 100644
--- a/tests/topotests/bgp_ipv6_ll_peering/r1/bgpd.conf
+++ b/tests/topotests/bgp_ipv6_ll_peering/r1/bgpd.conf
@@ -4,3 +4,6 @@ router bgp 65001
neighbor fe80:1::2 remote-as external
neighbor fe80:1::2 timers 3 10
neighbor fe80:1::2 interface r1-eth0
+ neighbor fe80:1::3 remote-as external
+ neighbor fe80:1::3 timers 3 10
+ neighbor fe80:1::3 interface r1-eth1
diff --git a/tests/topotests/bgp_ipv6_ll_peering/r1/zebra.conf b/tests/topotests/bgp_ipv6_ll_peering/r1/zebra.conf
index 4e93d4f4e5..f1bbff2e44 100644
--- a/tests/topotests/bgp_ipv6_ll_peering/r1/zebra.conf
+++ b/tests/topotests/bgp_ipv6_ll_peering/r1/zebra.conf
@@ -2,3 +2,6 @@
interface r1-eth0
ipv6 address fe80:1::1/64
!
+interface r1-eth1
+ ipv6 address fe80:1::1/64
+!
diff --git a/tests/topotests/bgp_ipv6_ll_peering/r3/bgpd.conf b/tests/topotests/bgp_ipv6_ll_peering/r3/bgpd.conf
new file mode 100644
index 0000000000..f1684880b3
--- /dev/null
+++ b/tests/topotests/bgp_ipv6_ll_peering/r3/bgpd.conf
@@ -0,0 +1,5 @@
+router bgp 65003
+ no bgp ebgp-requires-policy
+ neighbor fe80:1::1 remote-as external
+ neighbor fe80:1::1 timers 3 10
+ neighbor fe80:1::1 interface r3-eth0
diff --git a/tests/topotests/bgp_ipv6_ll_peering/r3/zebra.conf b/tests/topotests/bgp_ipv6_ll_peering/r3/zebra.conf
new file mode 100644
index 0000000000..71053cd2c3
--- /dev/null
+++ b/tests/topotests/bgp_ipv6_ll_peering/r3/zebra.conf
@@ -0,0 +1,4 @@
+!
+interface r3-eth0
+ ipv6 address fe80:1::3/64
+!
diff --git a/tests/topotests/bgp_ipv6_ll_peering/test_bgp_ipv6_ll_peering.py b/tests/topotests/bgp_ipv6_ll_peering/test_bgp_ipv6_ll_peering.py
index aaa68ea340..fbd4097605 100644
--- a/tests/topotests/bgp_ipv6_ll_peering/test_bgp_ipv6_ll_peering.py
+++ b/tests/topotests/bgp_ipv6_ll_peering/test_bgp_ipv6_ll_peering.py
@@ -27,13 +27,17 @@ pytestmark = [pytest.mark.bgpd]
def build_topo(tgen):
- for routern in range(1, 3):
+ for routern in range(1, 4):
tgen.add_router("r{}".format(routern))
switch = tgen.add_switch("s1")
switch.add_link(tgen.gears["r1"])
switch.add_link(tgen.gears["r2"])
+ switch = tgen.add_switch("s2")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r3"])
+
def setup_module(mod):
tgen = Topogen(build_topo, mod.__name__)
@@ -64,6 +68,7 @@ def test_bgp_ipv6_link_local_peering():
pytest.skip(tgen.errors)
r1 = tgen.gears["r1"]
+ r3 = tgen.gears["r3"]
def _bgp_converge():
output = json.loads(r1.vtysh_cmd("show bgp summary json"))
@@ -82,6 +87,28 @@ def test_bgp_ipv6_link_local_peering():
_, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5)
assert result is None, "Failed to see BGP convergence on R2"
+ def _bgp_router_id_missing():
+ output = json.loads(r3.vtysh_cmd("show bgp summary failed json"))
+ expected = {
+ "ipv4Unicast": {
+ "routerId": "0.0.0.0",
+ "as": 65003,
+ "peers": {
+ "fe80:1::1": {
+ "connectionsEstablished": 0,
+ "connectionsDropped": 0,
+ "peerUptime": "never",
+ "lastResetDueTo": "Router ID is missing",
+ }
+ },
+ }
+ }
+ return topotest.json_cmp(output, expected)
+
+ test_func = functools.partial(_bgp_router_id_missing)
+ _, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5)
+ assert result is None, "r3 should stay down due to missing router ID"
+
if __name__ == "__main__":
args = ["-s"] + sys.argv[1:]
diff --git a/tests/topotests/bgp_oad/r3/frr.conf b/tests/topotests/bgp_oad/r3/frr.conf
index 02dd5adfe1..164267d74d 100644
--- a/tests/topotests/bgp_oad/r3/frr.conf
+++ b/tests/topotests/bgp_oad/r3/frr.conf
@@ -7,12 +7,14 @@ int r3-eth0
!
router bgp 65003
no bgp ebgp-requires-policy
+ no bgp network import-check
neighbor 192.168.2.2 remote-as external
neighbor 192.168.2.2 timers 1 3
neighbor 192.168.2.2 timers connect 1
neighbor 192.168.2.2 oad
!
address-family ipv4 unicast
+ network 10.10.10.20/32 route-map static
redistribute connected route-map connected
exit-address-family
!
@@ -20,3 +22,7 @@ route-map connected permit 10
set local-preference 123
set metric 123
!
+route-map static permit 10
+ set extcommunity bandwidth 100 non-transitive
+exit
+!
diff --git a/tests/topotests/bgp_oad/test_bgp_oad.py b/tests/topotests/bgp_oad/test_bgp_oad.py
index b2ea7e0f19..b397bc6372 100644
--- a/tests/topotests/bgp_oad/test_bgp_oad.py
+++ b/tests/topotests/bgp_oad/test_bgp_oad.py
@@ -56,6 +56,7 @@ def test_bgp_oad():
r2 = tgen.gears["r2"]
r3 = tgen.gears["r3"]
r4 = tgen.gears["r4"]
+ r5 = tgen.gears["r5"]
def _bgp_converge():
output = json.loads(r1.vtysh_cmd("show bgp ipv4 unicast 10.10.10.10/32 json"))
@@ -121,6 +122,38 @@ def test_bgp_oad():
_, result = topotest.run_and_expect(test_func, None, count=30, wait=1)
assert result is None, "10.10.10.1/32 should not be advertised to r4 (not OAD peer)"
+ def _bgp_check_non_transitive_extended_community(
+ router, arg={"string": "LB:65003:12500000 (100.000 Mbps)"}
+ ):
+ output = json.loads(
+ router.vtysh_cmd("show bgp ipv4 unicast 10.10.10.20/32 json")
+ )
+ expected = {
+ "paths": [
+ {
+ "extendedCommunity": arg,
+ }
+ ]
+ }
+ return topotest.json_cmp(output, expected)
+
+ test_func = functools.partial(
+ _bgp_check_non_transitive_extended_community,
+ r4,
+ )
+ _, result = topotest.run_and_expect(test_func, None, count=30, wait=1)
+ assert (
+ result is None
+ ), "10.10.10.20/32 should be received at r4 with non-transitive extended community"
+
+ test_func = functools.partial(
+ _bgp_check_non_transitive_extended_community, r5, None
+ )
+ _, result = topotest.run_and_expect(test_func, None, count=30, wait=1)
+ assert (
+ result is None
+ ), "10.10.10.20/32 should NOT be received at r5 with non-transitive extended community"
+
if __name__ == "__main__":
args = ["-s"] + sys.argv[1:]
diff --git a/tests/topotests/fpm_testing_topo1/r1/routes_summ.json b/tests/topotests/fpm_testing_topo1/r1/routes_summ.json
index e9157bc664..12fe32cab3 100644
--- a/tests/topotests/fpm_testing_topo1/r1/routes_summ.json
+++ b/tests/topotests/fpm_testing_topo1/r1/routes_summ.json
@@ -3,21 +3,21 @@
{
"fib":1,
"rib":1,
- "fibOffLoaded":0,
+ "fibOffLoaded":1,
"fibTrapped":0,
"type":"connected"
},
{
"fib":1,
"rib":1,
- "fibOffLoaded":0,
+ "fibOffLoaded":1,
"fibTrapped":0,
"type":"local"
},
{
"fib":10000,
"rib":10000,
- "fibOffLoaded":0,
+ "fibOffLoaded":10000,
"fibTrapped":0,
"type":"sharp"
}
diff --git a/tests/topotests/fpm_testing_topo1/r1/routes_summ_removed.json b/tests/topotests/fpm_testing_topo1/r1/routes_summ_removed.json
index 8585b2bb6b..15d3f71077 100644
--- a/tests/topotests/fpm_testing_topo1/r1/routes_summ_removed.json
+++ b/tests/topotests/fpm_testing_topo1/r1/routes_summ_removed.json
@@ -3,14 +3,14 @@
{
"fib":1,
"rib":1,
- "fibOffLoaded":0,
+ "fibOffLoaded":1,
"fibTrapped":0,
"type":"connected"
},
{
"fib":1,
"rib":1,
- "fibOffLoaded":0,
+ "fibOffLoaded":1,
"fibTrapped":0,
"type":"local"
}
diff --git a/tests/topotests/fpm_testing_topo1/test_fpm_topo1.py b/tests/topotests/fpm_testing_topo1/test_fpm_topo1.py
index 66cefcc2a0..b3c375549a 100644
--- a/tests/topotests/fpm_testing_topo1/test_fpm_topo1.py
+++ b/tests/topotests/fpm_testing_topo1/test_fpm_topo1.py
@@ -57,7 +57,7 @@ def setup_module(module):
router.load_config(
TopoRouter.RD_ZEBRA,
os.path.join(CWD, "{}/zebra.conf".format(rname)),
- "-M dplane_fpm_nl",
+ "-M dplane_fpm_nl --asic-offload=notify_on_offload",
)
router.load_config(
TopoRouter.RD_SHARP, os.path.join(CWD, "{}/sharpd.conf".format(rname))
@@ -65,6 +65,7 @@ def setup_module(module):
router.load_config(
TopoRouter.RD_FPM_LISTENER,
os.path.join(CWD, "{}/fpm_stub.conf".format(rname)),
+ "-r",
)
tgen.start_router()
@@ -111,7 +112,7 @@ def test_fpm_install_routes():
topotest.router_json_cmp, router, "show ip route summ json", expected
)
- success, result = topotest.run_and_expect(test_func, None, 60, 1)
+ success, result = topotest.run_and_expect(test_func, None, 120, 1)
assert success, "Unable to successfully install 10000 routes: {}".format(result)
# Let's remove 10000 routes
@@ -124,7 +125,7 @@ def test_fpm_install_routes():
topotest.router_json_cmp, router, "show ip route summ json", expected
)
- success, result = topotest.run_and_expect(test_func, None, 60, 1)
+ success, result = topotest.run_and_expect(test_func, None, 120, 1)
assert success, "Unable to remove 10000 routes: {}".format(result)
diff --git a/tests/topotests/sbfd_topo1/test_sbfd_topo1.py b/tests/topotests/sbfd_topo1/test_sbfd_topo1.py
index e20902ebf5..274fbe6138 100644
--- a/tests/topotests/sbfd_topo1/test_sbfd_topo1.py
+++ b/tests/topotests/sbfd_topo1/test_sbfd_topo1.py
@@ -56,7 +56,7 @@ test_sbfd_topo1.py: test simple sbfd with IPv6 encap. RT1 is sbfd Initiator, RT2
+----+----+ +----+----+
"""
-pytestmark = [pytest.mark.bfdd, pytest.mark.sbfd]
+pytestmark = [pytest.mark.bfdd]
def show_bfd_check(router, status, type='echo', encap=None):
output = router.cmd("vtysh -c 'show bfd peers'")
diff --git a/tests/topotests/srv6_static_route_ipv4/__init__.py b/tests/topotests/srv6_static_route_ipv4/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/tests/topotests/srv6_static_route_ipv4/__init__.py
diff --git a/tests/topotests/srv6_static_route_ipv4/expected_srv6_route.json b/tests/topotests/srv6_static_route_ipv4/expected_srv6_route.json
new file mode 100644
index 0000000000..57f4c4488d
--- /dev/null
+++ b/tests/topotests/srv6_static_route_ipv4/expected_srv6_route.json
@@ -0,0 +1,28 @@
+{
+ "192.0.2.0/24": [
+ {
+ "prefix": "192.0.2.0/24",
+ "prefixLen": 24,
+ "protocol": "static",
+ "selected": true,
+ "destSelected": true,
+ "distance": 1,
+ "metric": 0,
+ "installed": true,
+ "nexthops": [
+ {
+ "directlyConnected": true,
+ "active": true,
+ "weight": 1,
+ "seg6local": {
+ "action": "unspec"
+ },
+ "seg6": [
+ "fcbb:bbbb:1:2:3:4:5:6",
+ "fcbb:bbbb:7:8:fe00::"
+ ]
+ }
+ ]
+ }
+ ]
+}
diff --git a/tests/topotests/srv6_static_route_ipv4/r1/frr.conf b/tests/topotests/srv6_static_route_ipv4/r1/frr.conf
new file mode 100644
index 0000000000..8ff23ec1b8
--- /dev/null
+++ b/tests/topotests/srv6_static_route_ipv4/r1/frr.conf
@@ -0,0 +1,7 @@
+hostname r1
+!
+log stdout notifications
+log commands
+!
+ipv6 route fcbb:bbbb:1::/48 sr0
+ip route 192.0.2.0/24 sr0 segments fcbb:bbbb:1:2:3:4:5:6/fcbb:bbbb:7:8:fe00::
diff --git a/tests/topotests/srv6_static_route_ipv4/r1/setup.sh b/tests/topotests/srv6_static_route_ipv4/r1/setup.sh
new file mode 100644
index 0000000000..4b6cce89f8
--- /dev/null
+++ b/tests/topotests/srv6_static_route_ipv4/r1/setup.sh
@@ -0,0 +1,2 @@
+ip link add sr0 type dummy
+ip link set sr0 up
diff --git a/tests/topotests/srv6_static_route_ipv4/test_srv6_route.py b/tests/topotests/srv6_static_route_ipv4/test_srv6_route.py
new file mode 100755
index 0000000000..b49a9cec89
--- /dev/null
+++ b/tests/topotests/srv6_static_route_ipv4/test_srv6_route.py
@@ -0,0 +1,82 @@
+#!/usr/bin/env python
+# SPDX-License-Identifier: ISC
+
+#
+# test_srv6_static_route_ipv4.py
+#
+# Copyright 2025
+# Carmine Scarpitta <cscarpit.@cisco.com>
+#
+
+"""
+test_srv6_static_route_ipv4.py:
+Test for SRv6 static route on zebra
+"""
+
+import os
+import sys
+import json
+import pytest
+import functools
+
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, "../"))
+
+# pylint: disable=C0413
+from lib import topotest
+from lib.topogen import Topogen, TopoRouter, get_topogen
+from lib.topolog import logger
+
+pytestmark = [pytest.mark.staticd]
+
+
+def open_json_file(filename):
+ try:
+ with open(filename, "r") as f:
+ return json.load(f)
+ except IOError:
+ assert False, "Could not read file {}".format(filename)
+
+
+def setup_module(mod):
+ tgen = Topogen({None: "r1"}, mod.__name__)
+ tgen.start_topology()
+ for rname, router in tgen.routers().items():
+ router.run("/bin/bash {}/{}/setup.sh".format(CWD, rname))
+ router.load_frr_config("frr.conf")
+ tgen.start_router()
+
+
+def teardown_module():
+ tgen = get_topogen()
+ tgen.stop_topology()
+
+
+def test_srv6_static_route():
+ tgen = get_topogen()
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+ router = tgen.gears["r1"]
+
+ def _check_srv6_static_route(router, expected_route_file):
+ logger.info("checking zebra srv6 static route with multiple segs status")
+ output = json.loads(router.vtysh_cmd("show ip route static json"))
+ expected = open_json_file("{}/{}".format(CWD, expected_route_file))
+ return topotest.json_cmp(output, expected)
+
+ def check_srv6_static_route(router, expected_file):
+ func = functools.partial(_check_srv6_static_route, router, expected_file)
+ _, result = topotest.run_and_expect(func, None, count=15, wait=1)
+ assert result is None, "Failed"
+
+ # FOR DEVELOPER:
+ # If you want to stop some specific line and start interactive shell,
+ # please use tgen.mininet_cli() to start it.
+
+ logger.info("Test for srv6 route configuration")
+ check_srv6_static_route(router, "expected_srv6_route.json")
+
+
+if __name__ == "__main__":
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
diff --git a/tools/etc/frr/support_bundle_commands.conf b/tools/etc/frr/support_bundle_commands.conf
index be831a1d34..252539e29b 100644
--- a/tools/etc/frr/support_bundle_commands.conf
+++ b/tools/etc/frr/support_bundle_commands.conf
@@ -79,6 +79,10 @@ show mpls table
show mpls fec
show mpls ldp
show mpls pseudowires
+show zebra dplane detailed
+show zebra dplane provider
+show fpm counters
+show fpm status
show memory
show interface vrf all
show vrf
diff --git a/yang/frr-isisd.yang b/yang/frr-isisd.yang
index a3e073f626..228faa4f10 100644
--- a/yang/frr-isisd.yang
+++ b/yang/frr-isisd.yang
@@ -403,7 +403,7 @@ module frr-isisd {
"Limit backup computation up to the prefix priority.";
}
list tiebreaker {
- key "index";
+ key "index type";
unique "type";
description
"Configure tiebreaker for multiple backups.";
diff --git a/zebra/dplane_fpm_nl.c b/zebra/dplane_fpm_nl.c
index b8dbabb60d..9f26852d1f 100644
--- a/zebra/dplane_fpm_nl.c
+++ b/zebra/dplane_fpm_nl.c
@@ -587,7 +587,6 @@ static void fpm_read(struct event *t)
struct zebra_dplane_ctx *ctx;
size_t available_bytes;
size_t hdr_available_bytes;
- int ival;
/* Let's ignore the input at the moment. */
rv = stream_read_try(fnc->ibuf, fnc->socket,
@@ -724,12 +723,18 @@ static void fpm_read(struct event *t)
NULL);
if (netlink_route_notify_read_ctx(hdr, 0, ctx) >= 0) {
- /* In the FPM encoding, the vrfid is present */
- ival = dplane_ctx_get_table(ctx);
- dplane_ctx_set_vrf(ctx, ival);
- dplane_ctx_set_table(ctx,
- ZEBRA_ROUTE_TABLE_UNKNOWN);
-
+ /*
+ * Receiving back a netlink message from
+ * the fpm. Currently the netlink messages
+ * do not have a way to specify the vrf
+ * so it must be unknown. I'm looking
+ * at you sonic. If you are reading this
+ * and wondering why it's not working
+ * you must extend your patch to translate
+ * the tableid to the vrfid and set the
+ * tableid to 0 in order for this to work.
+ */
+ dplane_ctx_set_vrf(ctx, VRF_UNKNOWN);
dplane_provider_enqueue_to_zebra(ctx);
} else {
/*
@@ -946,8 +951,6 @@ static int fpm_nl_enqueue(struct fpm_nl_ctx *fnc, struct zebra_dplane_ctx *ctx)
nl_buf_len = 0;
- frr_mutex_lock_autounlock(&fnc->obuf_mutex);
-
/*
* If route replace is enabled then directly encode the install which
* is going to use `NLM_F_REPLACE` (instead of delete/add operations).
@@ -1100,6 +1103,8 @@ static int fpm_nl_enqueue(struct fpm_nl_ctx *fnc, struct zebra_dplane_ctx *ctx)
/* We must know if someday a message goes beyond 65KiB. */
assert((nl_buf_len + FPM_HEADER_SIZE) <= UINT16_MAX);
+ frr_mutex_lock_autounlock(&fnc->obuf_mutex);
+
/* Check if we have enough buffer space. */
if (STREAM_WRITEABLE(fnc->obuf) < (nl_buf_len + FPM_HEADER_SIZE)) {
atomic_fetch_add_explicit(&fnc->counters.buffer_full, 1,
diff --git a/zebra/fpm_listener.c b/zebra/fpm_listener.c
index 7d84c706d4..ed0842a3b1 100644
--- a/zebra/fpm_listener.c
+++ b/zebra/fpm_listener.c
@@ -756,8 +756,10 @@ static void fpm_serve(void)
while (1) {
hdr = read_fpm_msg(buf, sizeof(buf));
- if (!hdr)
+ if (!hdr) {
+ close(glob->sock);
return;
+ }
process_fpm_msg(hdr);
}
@@ -769,6 +771,8 @@ int main(int argc, char **argv)
int r;
bool fork_daemon = false;
+ setbuf(stdout, NULL);
+
memset(glob, 0, sizeof(*glob));
while ((r = getopt(argc, argv, "rdv")) != -1) {
diff --git a/zebra/rt_netlink.c b/zebra/rt_netlink.c
index d696b19859..9a60e32b65 100644
--- a/zebra/rt_netlink.c
+++ b/zebra/rt_netlink.c
@@ -1295,6 +1295,12 @@ static int netlink_route_change_read_unicast_internal(struct nlmsghdr *h,
flags, &p,
(struct prefix_ipv6 *)&src_p, &nh, 0,
table, metric, distance, true);
+
+ if (nh.nh_label)
+ nexthop_del_labels(&nh);
+
+ if (nh.nh_srv6)
+ nexthop_del_srv6_seg6(&nh);
} else {
/* XXX: need to compare the entire list of
* nexthops here for NLM_F_APPEND stupidity */
diff --git a/zebra/zebra_cli.c b/zebra/zebra_cli.c
index ca53eb2eb3..bb79928326 100644
--- a/zebra/zebra_cli.c
+++ b/zebra/zebra_cli.c
@@ -1983,6 +1983,10 @@ static void lib_vrf_zebra_ipv6_router_id_cli_write(struct vty *vty,
vty_out(vty, "ipv6 router-id %s\n", id);
}
+/*
+ * Both the v4 and v6 version of this command are now limiting the
+ * usage of System route types from being considered here at all
+ */
DEFPY_YANG (ip_protocol,
ip_protocol_cmd,
"[no] ip protocol " FRR_IP_PROTOCOL_MAP_STR_ZEBRA
diff --git a/zebra/zebra_dplane.c b/zebra/zebra_dplane.c
index b57c930154..9c252cc63c 100644
--- a/zebra/zebra_dplane.c
+++ b/zebra/zebra_dplane.c
@@ -7528,6 +7528,16 @@ static void dplane_thread_loop(struct event *event)
if (!zdplane_info.dg_run)
break;
+ /*
+ * The yield should only happen after a bit of work has been
+ * done but before we pull any new work off any provider
+ * queue to continue looping. This is a safe spot to
+ * do so.
+ */
+ if (event_should_yield(event)) {
+ reschedule = true;
+ break;
+ }
/* Locate next provider */
next_prov = dplane_prov_list_next(&zdplane_info.dg_providers,
prov);
@@ -7592,11 +7602,6 @@ static void dplane_thread_loop(struct event *event)
zlog_debug("dplane dequeues %d completed work from provider %s",
counter, dplane_provider_get_name(prov));
- if (event_should_yield(event)) {
- reschedule = true;
- break;
- }
-
/* Locate next provider */
prov = next_prov;
}
@@ -7698,7 +7703,10 @@ static void zebra_dplane_init_internal(void)
dplane_prov_list_init(&zdplane_info.dg_providers);
- dplane_ctx_list_init(&zdplane_info.dg_update_list);
+ frr_with_mutex (&zdplane_info.dg_mutex) {
+ dplane_ctx_list_init(&zdplane_info.dg_update_list);
+ }
+
zns_info_list_init(&zdplane_info.dg_zns_list);
zdplane_info.dg_updates_per_cycle = DPLANE_DEFAULT_NEW_WORK;
diff --git a/zebra/zebra_rib.c b/zebra/zebra_rib.c
index 2881192eb7..a1c8cd3059 100644
--- a/zebra/zebra_rib.c
+++ b/zebra/zebra_rib.c
@@ -1891,20 +1891,18 @@ struct route_node *rib_find_rn_from_ctx(const struct zebra_dplane_ctx *ctx)
struct route_table *table = NULL;
struct route_node *rn = NULL;
const struct prefix *dest_pfx, *src_pfx;
+ uint32_t tableid = dplane_ctx_get_table(ctx);
+ vrf_id_t vrf_id = dplane_ctx_get_vrf(ctx);
/* Locate rn and re(s) from ctx */
+ table = zebra_vrf_lookup_table_with_table_id(dplane_ctx_get_afi(ctx),
+ dplane_ctx_get_safi(ctx), vrf_id, tableid);
- table = zebra_vrf_lookup_table_with_table_id(
- dplane_ctx_get_afi(ctx), dplane_ctx_get_safi(ctx),
- dplane_ctx_get_vrf(ctx), dplane_ctx_get_table(ctx));
if (table == NULL) {
if (IS_ZEBRA_DEBUG_DPLANE) {
- zlog_debug(
- "Failed to find route for ctx: no table for afi %d, safi %d, vrf %s(%u)",
- dplane_ctx_get_afi(ctx),
- dplane_ctx_get_safi(ctx),
- vrf_id_to_name(dplane_ctx_get_vrf(ctx)),
- dplane_ctx_get_vrf(ctx));
+ zlog_debug("Failed to find route for ctx: no table for afi %d, safi %d, vrf %s(%u) table %u",
+ dplane_ctx_get_afi(ctx), dplane_ctx_get_safi(ctx),
+ vrf_id_to_name(vrf_id), vrf_id, tableid);
}
goto done;
}
@@ -2214,26 +2212,13 @@ static void rib_process_dplane_notify(struct zebra_dplane_ctx *ctx)
{
struct route_node *rn = NULL;
struct route_entry *re = NULL;
- struct vrf *vrf;
+ struct vrf *vrf = vrf_lookup_by_id(dplane_ctx_get_vrf(ctx));
struct nexthop *nexthop;
rib_dest_t *dest;
bool fib_changed = false;
bool debug_p = IS_ZEBRA_DEBUG_DPLANE | IS_ZEBRA_DEBUG_RIB;
int start_count, end_count;
- vrf_id_t vrf_id;
- int tableid;
-
- /* Locate vrf and route table - we must have one or the other */
- tableid = dplane_ctx_get_table(ctx);
- vrf_id = dplane_ctx_get_vrf(ctx);
- if (vrf_id == VRF_UNKNOWN)
- vrf_id = zebra_vrf_lookup_by_table(tableid,
- dplane_ctx_get_ns_id(ctx));
- else if (tableid == ZEBRA_ROUTE_TABLE_UNKNOWN)
- tableid = zebra_vrf_lookup_tableid(vrf_id,
- dplane_ctx_get_ns_id(ctx));
-
- vrf = vrf_lookup_by_id(vrf_id);
+ uint32_t tableid = dplane_ctx_get_table(ctx);
/* Locate rn and re(s) from ctx */
rn = rib_find_rn_from_ctx(ctx);
@@ -4863,6 +4848,33 @@ void rib_close_table(struct route_table *table)
}
/*
+ * The context sent up from the dplane may be a context
+ * that has been generated by the zebra master pthread
+ * or it may be a context generated from a event in
+ * either the kernel dplane code or the fpm dplane
+ * code. In which case the tableid and vrfid may
+ * not be fully known and we have to figure it out
+ * when the context hits the master pthread.
+ * since this is the *starter* spot for that let
+ * us do a bit of work on each one to see if any
+ * massaging is needed
+ */
+static inline void zebra_rib_translate_ctx_from_dplane(struct zebra_dplane_ctx *ctx)
+{
+ uint32_t tableid = dplane_ctx_get_table(ctx);
+ vrf_id_t vrfid = dplane_ctx_get_vrf(ctx);
+ uint32_t nsid = dplane_ctx_get_ns_id(ctx);
+ enum dplane_op_e op = dplane_ctx_get_op(ctx);
+
+ if (vrfid == VRF_UNKNOWN)
+ dplane_ctx_set_vrf(ctx, zebra_vrf_lookup_by_table(tableid, nsid));
+ else if ((op == DPLANE_OP_ROUTE_INSTALL || op == DPLANE_OP_ROUTE_UPDATE ||
+ op == DPLANE_OP_ROUTE_DELETE) &&
+ tableid == ZEBRA_ROUTE_TABLE_UNKNOWN)
+ dplane_ctx_set_table(ctx, zebra_vrf_lookup_tableid(vrfid, nsid));
+}
+
+/*
* Handle results from the dataplane system. Dequeue update context
* structs, dispatch to appropriate internal handlers.
*/
@@ -4921,6 +4933,8 @@ static void rib_process_dplane_results(struct event *thread)
}
while (ctx) {
+ zebra_rib_translate_ctx_from_dplane(ctx);
+
#ifdef HAVE_SCRIPTING
if (ret == 0)
frrscript_call(fs,
diff --git a/zebra/zebra_routemap.c b/zebra/zebra_routemap.c
index 73ffa09c16..2813f037a2 100644
--- a/zebra/zebra_routemap.c
+++ b/zebra/zebra_routemap.c
@@ -959,10 +959,11 @@ route_set_src(void *rule, const struct prefix *prefix, void *object)
/* set src compilation. */
static void *route_set_src_compile(const char *arg)
{
- union g_addr src, *psrc;
+ union g_addr src = {}, *psrc;
- if ((inet_pton(AF_INET6, arg, &src.ipv6) == 1)
- || (inet_pton(AF_INET, arg, &src.ipv4) == 1)) {
+ /* IPv4 first, to ensure no garbage in the 12 unused bytes */
+ if ((inet_pton(AF_INET, arg, &src.ipv4) == 1) ||
+ (inet_pton(AF_INET6, arg, &src.ipv6) == 1)) {
psrc = XMALLOC(MTYPE_ROUTE_MAP_COMPILED, sizeof(union g_addr));
*psrc = src;
return psrc;
diff --git a/zebra/zserv.c b/zebra/zserv.c
index 7ef3582329..6965c285cd 100644
--- a/zebra/zserv.c
+++ b/zebra/zserv.c
@@ -937,6 +937,10 @@ void zserv_close(void)
/*
* Open zebra's ZAPI listener socket. This is done early during startup,
* before zebra is ready to listen and accept client connections.
+ *
+ * This function should only ever be called from the startup pthread
+ * from main.c. If it is called multiple times it will cause problems
+ * because it causes the zsock global variable to be setup.
*/
void zserv_open(const char *path)
{
diff --git a/zebra/zserv.h b/zebra/zserv.h
index ce47ef19fa..1ff7ccd981 100644
--- a/zebra/zserv.h
+++ b/zebra/zserv.h
@@ -262,6 +262,9 @@ extern void zserv_close(void);
*
* path
* where to place the Unix domain socket
+ *
+ * This function *should* only ever be called from
+ * main() and only every from 1 pthread.
*/
extern void zserv_open(const char *path);